summaryrefslogtreecommitdiff
path: root/net/ipv4
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-05-14 10:08:24 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2008-05-14 10:08:24 -0700
commit6aa5fc434958d15a4d66d922d0416dfb03c07def (patch)
treec68811b0e38afe68156022bed324d8df25fb45be /net/ipv4
parent362a61ad61199e19a61b8e432015e2586b288f5b (diff)
parent9ee6b7f1556e7889eff4666483b1b554d4686cd4 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (73 commits) net: Fix typo in net/core/sock.c. ppp: Do not free not yet unregistered net device. netfilter: xt_iprange: module aliases for xt_iprange netfilter: ctnetlink: dump conntrack ID in event messages irda: Fix a misalign access issue. (v2) sctp: Fix use of uninitialized pointer cipso: Relax too much careful cipso hash function. tcp FRTO: work-around inorder receivers tcp FRTO: Fix fallback to conventional recovery New maintainer for Intel ethernet adapters DM9000: Use delayed work to update MII PHY state DM9000: Update and fix driver debugging messages DM9000: Add __devinit and __devexit attributes to probe and remove sky2: fix simple define thinko [netdrvr] sfc: sfc: Add self-test support [netdrvr] sfc: Increment rx_reset when reported as driver event [netdrvr] sfc: Remove unused macro EFX_XAUI_RETRAIN_MAX [netdrvr] sfc: Fix code formatting [netdrvr] sfc: Remove kernel-doc comments for removed members of struct efx_nic [netdrvr] sfc: Remove garbage from comment ...
Diffstat (limited to 'net/ipv4')
-rw-r--r--net/ipv4/arp.c2
-rw-r--r--net/ipv4/cipso_ipv4.c4
-rw-r--r--net/ipv4/igmp.c4
-rw-r--r--net/ipv4/ipconfig.c6
-rw-r--r--net/ipv4/raw.c10
-rw-r--r--net/ipv4/tcp_input.c17
6 files changed, 24 insertions, 19 deletions
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 68b72a7a1806..418862f1bf22 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -570,7 +570,7 @@ struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
* Allocate a buffer
*/
- skb = alloc_skb(arp_hdr_len(dev) + LL_RESERVED_SPACE(dev), GFP_ATOMIC);
+ skb = alloc_skb(arp_hdr_len(dev) + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
if (skb == NULL)
return NULL;
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 05afb576d935..2c0e4572cc90 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -338,7 +338,7 @@ static int cipso_v4_cache_check(const unsigned char *key,
return -ENOENT;
hash = cipso_v4_map_cache_hash(key, key_len);
- bkt = hash & (CIPSO_V4_CACHE_BUCKETBITS - 1);
+ bkt = hash & (CIPSO_V4_CACHE_BUCKETS - 1);
spin_lock_bh(&cipso_v4_cache[bkt].lock);
list_for_each_entry(entry, &cipso_v4_cache[bkt].list, list) {
if (entry->hash == hash &&
@@ -417,7 +417,7 @@ int cipso_v4_cache_add(const struct sk_buff *skb,
atomic_inc(&secattr->cache->refcount);
entry->lsm_data = secattr->cache;
- bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETBITS - 1);
+ bkt = entry->hash & (CIPSO_V4_CACHE_BUCKETS - 1);
spin_lock_bh(&cipso_v4_cache[bkt].lock);
if (cipso_v4_cache[bkt].size < cipso_v4_cache_bucketsize) {
list_add(&entry->list, &cipso_v4_cache[bkt].list);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 6250f4239b61..2769dc4a4c84 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -292,7 +292,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
struct iphdr *pip;
struct igmpv3_report *pig;
- skb = alloc_skb(size + LL_RESERVED_SPACE(dev), GFP_ATOMIC);
+ skb = alloc_skb(size + LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
if (skb == NULL)
return NULL;
@@ -653,7 +653,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
return -1;
}
- skb=alloc_skb(IGMP_SIZE+LL_RESERVED_SPACE(dev), GFP_ATOMIC);
+ skb=alloc_skb(IGMP_SIZE+LL_ALLOCATED_SPACE(dev), GFP_ATOMIC);
if (skb == NULL) {
ip_rt_put(rt);
return -1;
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index 89dee4346f60..ed45037ce9be 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -710,14 +710,14 @@ static void __init ic_bootp_send_if(struct ic_device *d, unsigned long jiffies_d
struct net_device *dev = d->dev;
struct sk_buff *skb;
struct bootp_pkt *b;
- int hh_len = LL_RESERVED_SPACE(dev);
struct iphdr *h;
/* Allocate packet */
- skb = alloc_skb(sizeof(struct bootp_pkt) + hh_len + 15, GFP_KERNEL);
+ skb = alloc_skb(sizeof(struct bootp_pkt) + LL_ALLOCATED_SPACE(dev) + 15,
+ GFP_KERNEL);
if (!skb)
return;
- skb_reserve(skb, hh_len);
+ skb_reserve(skb, LL_RESERVED_SPACE(dev));
b = (struct bootp_pkt *) skb_put(skb, sizeof(struct bootp_pkt));
memset(b, 0, sizeof(struct bootp_pkt));
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 11d7f753a820..fead049daf43 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -322,7 +322,6 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
unsigned int flags)
{
struct inet_sock *inet = inet_sk(sk);
- int hh_len;
struct iphdr *iph;
struct sk_buff *skb;
unsigned int iphlen;
@@ -336,13 +335,12 @@ static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
if (flags&MSG_PROBE)
goto out;
- hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
-
- skb = sock_alloc_send_skb(sk, length+hh_len+15,
- flags&MSG_DONTWAIT, &err);
+ skb = sock_alloc_send_skb(sk,
+ length + LL_ALLOCATED_SPACE(rt->u.dst.dev) + 15,
+ flags & MSG_DONTWAIT, &err);
if (skb == NULL)
goto error;
- skb_reserve(skb, hh_len);
+ skb_reserve(skb, LL_RESERVED_SPACE(rt->u.dst.dev));
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 26c936930e92..b54d9d37b636 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1842,9 +1842,16 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
}
- /* Don't lost mark skbs that were fwd transmitted after RTO */
- if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) &&
- !after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark)) {
+ /* Marking forward transmissions that were made after RTO lost
+ * can cause unnecessary retransmissions in some scenarios,
+ * SACK blocks will mitigate that in some but not in all cases.
+ * We used to not mark them but it was causing break-ups with
+ * receivers that do only in-order receival.
+ *
+ * TODO: we could detect presence of such receiver and select
+ * different behavior per flow.
+ */
+ if (!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
tp->lost_out += tcp_skb_pcount(skb);
}
@@ -1860,7 +1867,7 @@ static void tcp_enter_frto_loss(struct sock *sk, int allowed_segments, int flag)
tp->reordering = min_t(unsigned int, tp->reordering,
sysctl_tcp_reordering);
tcp_set_ca_state(sk, TCP_CA_Loss);
- tp->high_seq = tp->frto_highmark;
+ tp->high_seq = tp->snd_nxt;
TCP_ECN_queue_cwr(tp);
tcp_clear_retrans_hints_partial(tp);
@@ -2482,7 +2489,7 @@ static void tcp_try_to_open(struct sock *sk, int flag)
tcp_verify_left_out(tp);
- if (tp->retrans_out == 0)
+ if (!tp->frto_counter && tp->retrans_out == 0)
tp->retrans_stamp = 0;
if (flag & FLAG_ECE)