summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authornbd <nbd@3c298f89-4303-0410-b956-a3cf2f4a3e73>2009-05-10 22:17:50 +0000
committernbd <nbd@3c298f89-4303-0410-b956-a3cf2f4a3e73>2009-05-10 22:17:50 +0000
commit7858dcd34a17716736305e6bb5b32b0fd08149e3 (patch)
tree32a654d37cf7688c9c4579b889aba996abed54c0
parent582aef75bb3cabfab6ec6c4c02651bb2fa13593d (diff)
improve the skb padding performance change to avoid unnecessary reallocations in the routing code
git-svn-id: svn://svn.openwrt.org/openwrt/trunk@15761 3c298f89-4303-0410-b956-a3cf2f4a3e73
-rw-r--r--target/linux/generic-2.6/patches-2.6.28/205-skb_padding.patch48
-rw-r--r--target/linux/generic-2.6/patches-2.6.29/205-skb_padding.patch48
-rw-r--r--target/linux/generic-2.6/patches-2.6.30/205-skb_padding.patch48
3 files changed, 132 insertions, 12 deletions
diff --git a/target/linux/generic-2.6/patches-2.6.28/205-skb_padding.patch b/target/linux/generic-2.6/patches-2.6.28/205-skb_padding.patch
index 07e730a20..855f0fedd 100644
--- a/target/linux/generic-2.6/patches-2.6.28/205-skb_padding.patch
+++ b/target/linux/generic-2.6/patches-2.6.28/205-skb_padding.patch
@@ -1,16 +1,56 @@
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
-@@ -1270,9 +1270,12 @@
+@@ -1256,11 +1256,18 @@ static inline int skb_network_offset(con
*
* Various parts of the networking layer expect at least 16 bytes of
* headroom, you should not reduce this.
+ *
+ * This has been changed to 64 to acommodate for routing between ethernet
-+ * and wireless
++ * and wireless, but only for new allocations
*/
#ifndef NET_SKB_PAD
--#define NET_SKB_PAD 16
-+#define NET_SKB_PAD 64
+ #define NET_SKB_PAD 16
#endif
++#ifndef NET_SKB_PAD_ALLOC
++#define NET_SKB_PAD_ALLOC 64
++#endif
++
extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
+
+ static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
+@@ -1350,9 +1357,9 @@ static inline void __skb_queue_purge(str
+ static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
+ gfp_t gfp_mask)
+ {
+- struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
++ struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD_ALLOC, gfp_mask);
+ if (likely(skb))
+- skb_reserve(skb, NET_SKB_PAD);
++ skb_reserve(skb, NET_SKB_PAD_ALLOC);
+ return skb;
+ }
+
+@@ -1425,7 +1432,7 @@ static inline int __skb_cow(struct sk_bu
+ delta = headroom - skb_headroom(skb);
+
+ if (delta || cloned)
+- return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
++ return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD_ALLOC), 0,
+ GFP_ATOMIC);
+ return 0;
+ }
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -243,9 +243,9 @@ struct sk_buff *__netdev_alloc_skb(struc
+ int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
+ struct sk_buff *skb;
+
+- skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node);
++ skb = __alloc_skb(length + NET_SKB_PAD_ALLOC, gfp_mask, 0, node);
+ if (likely(skb)) {
+- skb_reserve(skb, NET_SKB_PAD);
++ skb_reserve(skb, NET_SKB_PAD_ALLOC);
+ skb->dev = dev;
+ }
+ return skb;
diff --git a/target/linux/generic-2.6/patches-2.6.29/205-skb_padding.patch b/target/linux/generic-2.6/patches-2.6.29/205-skb_padding.patch
index c11bffe55..ff05b8d93 100644
--- a/target/linux/generic-2.6/patches-2.6.29/205-skb_padding.patch
+++ b/target/linux/generic-2.6/patches-2.6.29/205-skb_padding.patch
@@ -1,16 +1,56 @@
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
-@@ -1306,9 +1306,12 @@
+@@ -1306,11 +1306,18 @@ static inline int skb_network_offset(con
*
* Various parts of the networking layer expect at least 16 bytes of
* headroom, you should not reduce this.
+ *
+ * This has been changed to 64 to acommodate for routing between ethernet
-+ * and wireless
++ * and wireless, but only for new allocations
*/
#ifndef NET_SKB_PAD
--#define NET_SKB_PAD 16
-+#define NET_SKB_PAD 64
+ #define NET_SKB_PAD 16
#endif
++#ifndef NET_SKB_PAD_ALLOC
++#define NET_SKB_PAD_ALLOC 64
++#endif
++
extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
+
+ static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
+@@ -1400,9 +1407,9 @@ static inline void __skb_queue_purge(str
+ static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
+ gfp_t gfp_mask)
+ {
+- struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
++ struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD_ALLOC, gfp_mask);
+ if (likely(skb))
+- skb_reserve(skb, NET_SKB_PAD);
++ skb_reserve(skb, NET_SKB_PAD_ALLOC);
+ return skb;
+ }
+
+@@ -1475,7 +1482,7 @@ static inline int __skb_cow(struct sk_bu
+ delta = headroom - skb_headroom(skb);
+
+ if (delta || cloned)
+- return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
++ return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD_ALLOC), 0,
+ GFP_ATOMIC);
+ return 0;
+ }
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -320,9 +320,9 @@ struct sk_buff *__netdev_alloc_skb(struc
+ int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
+ struct sk_buff *skb;
+
+- skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node);
++ skb = __alloc_skb(length + NET_SKB_PAD_ALLOC, gfp_mask, 0, node);
+ if (likely(skb)) {
+- skb_reserve(skb, NET_SKB_PAD);
++ skb_reserve(skb, NET_SKB_PAD_ALLOC);
+ skb->dev = dev;
+ }
+ return skb;
diff --git a/target/linux/generic-2.6/patches-2.6.30/205-skb_padding.patch b/target/linux/generic-2.6/patches-2.6.30/205-skb_padding.patch
index 549fa50c9..06b494b91 100644
--- a/target/linux/generic-2.6/patches-2.6.30/205-skb_padding.patch
+++ b/target/linux/generic-2.6/patches-2.6.30/205-skb_padding.patch
@@ -1,16 +1,56 @@
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
-@@ -1369,9 +1369,12 @@ static inline int skb_network_offset(con
+@@ -1369,11 +1369,18 @@ static inline int skb_network_offset(con
*
* Various parts of the networking layer expect at least 32 bytes of
* headroom, you should not reduce this.
+ *
+ * This has been changed to 64 to acommodate for routing between ethernet
-+ * and wireless
++ * and wireless, but only for new allocations
*/
#ifndef NET_SKB_PAD
--#define NET_SKB_PAD 32
-+#define NET_SKB_PAD 64
+ #define NET_SKB_PAD 32
#endif
++#ifndef NET_SKB_PAD_ALLOC
++#define NET_SKB_PAD_ALLOC 64
++#endif
++
extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
+
+ static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
+@@ -1463,9 +1470,9 @@ static inline void __skb_queue_purge(str
+ static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
+ gfp_t gfp_mask)
+ {
+- struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask);
++ struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD_ALLOC, gfp_mask);
+ if (likely(skb))
+- skb_reserve(skb, NET_SKB_PAD);
++ skb_reserve(skb, NET_SKB_PAD_ALLOC);
+ return skb;
+ }
+
+@@ -1538,7 +1545,7 @@ static inline int __skb_cow(struct sk_bu
+ delta = headroom - skb_headroom(skb);
+
+ if (delta || cloned)
+- return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0,
++ return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD_ALLOC), 0,
+ GFP_ATOMIC);
+ return 0;
+ }
+--- a/net/core/skbuff.c
++++ b/net/core/skbuff.c
+@@ -327,9 +327,9 @@ struct sk_buff *__netdev_alloc_skb(struc
+ int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
+ struct sk_buff *skb;
+
+- skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node);
++ skb = __alloc_skb(length + NET_SKB_PAD_ALLOC, gfp_mask, 0, node);
+ if (likely(skb)) {
+- skb_reserve(skb, NET_SKB_PAD);
++ skb_reserve(skb, NET_SKB_PAD_ALLOC);
+ skb->dev = dev;
+ }
+ return skb;