1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
|
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1103,6 +1103,11 @@ struct net_device {
const struct net_device_ops *netdev_ops;
const struct ethtool_ops *ethtool_ops;
+#ifdef CONFIG_ETHERNET_PACKET_MANGLE
+ void (*eth_mangle_rx)(struct net_device *dev, struct sk_buff *skb);
+ struct sk_buff *(*eth_mangle_tx)(struct net_device *dev, struct sk_buff *skb);
+#endif
+
/* Hardware header description */
const struct header_ops *header_ops;
@@ -1158,6 +1163,9 @@ struct net_device {
void *ax25_ptr; /* AX.25 specific data */
struct wireless_dev *ieee80211_ptr; /* IEEE 802.11 specific data,
assign before registering */
+#ifdef CONFIG_ETHERNET_PACKET_MANGLE
+ void *phy_ptr; /* PHY device specific data */
+#endif
/*
* Cache lines mostly used on receive path (including eth_type_trans())
--- a/include/linux/if.h
+++ b/include/linux/if.h
@@ -79,6 +79,7 @@
#define IFF_TX_SKB_SHARING 0x10000 /* The interface supports sharing
* skbs on transmit */
#define IFF_UNICAST_FLT 0x20000 /* Supports unicast filtering */
+#define IFF_NO_IP_ALIGN 0x40000 /* do not ip-align allocated rx pkts */
#define IF_GET_IFACE 0x0001 /* for querying only */
#define IF_GET_PROTO 0x0002
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1636,6 +1636,10 @@ extern struct sk_buff *dev_alloc_skb(uns
extern struct sk_buff *__netdev_alloc_skb(struct net_device *dev,
unsigned int length, gfp_t gfp_mask);
+extern struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
+ unsigned int length, gfp_t gfp);
+
+
/**
* netdev_alloc_skb - allocate an skbuff for rx on a specific device
* @dev: network device to receive on
@@ -1655,16 +1659,6 @@ static inline struct sk_buff *netdev_all
return __netdev_alloc_skb(dev, length, GFP_ATOMIC);
}
-static inline struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
- unsigned int length, gfp_t gfp)
-{
- struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
-
- if (NET_IP_ALIGN && skb)
- skb_reserve(skb, NET_IP_ALIGN);
- return skb;
-}
-
static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
unsigned int length)
{
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -23,6 +23,12 @@ menuconfig NET
if NET
+config ETHERNET_PACKET_MANGLE
+ bool
+ help
+ This option can be selected by phy drivers that need to mangle
+ packets going in or out of an ethernet device.
+
config WANT_COMPAT_NETLINK_MESSAGES
bool
help
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2224,9 +2224,19 @@ int dev_hard_start_xmit(struct sk_buff *
}
}
- skb_len = skb->len;
- rc = ops->ndo_start_xmit(skb, dev);
- trace_net_dev_xmit(skb, rc, dev, skb_len);
+#ifdef CONFIG_ETHERNET_PACKET_MANGLE
+ if (!dev->eth_mangle_tx ||
+ (skb = dev->eth_mangle_tx(dev, skb)) != NULL)
+#else
+ if (1)
+#endif
+ {
+ skb_len = skb->len;
+ rc = ops->ndo_start_xmit(skb, dev);
+ trace_net_dev_xmit(skb, rc, dev, skb_len);
+ } else {
+ rc = NETDEV_TX_OK;
+ }
if (rc == NETDEV_TX_OK)
txq_trans_update(txq);
return rc;
@@ -2246,9 +2256,19 @@ gso:
if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
skb_dst_drop(nskb);
- skb_len = nskb->len;
- rc = ops->ndo_start_xmit(nskb, dev);
- trace_net_dev_xmit(nskb, rc, dev, skb_len);
+#ifdef CONFIG_ETHERNET_PACKET_MANGLE
+ if (!dev->eth_mangle_tx ||
+ (nskb = dev->eth_mangle_tx(dev, nskb)) != NULL)
+#else
+ if (1)
+#endif
+ {
+ skb_len = nskb->len;
+ rc = ops->ndo_start_xmit(nskb, dev);
+ trace_net_dev_xmit(nskb, rc, dev, skb_len);
+ } else {
+ rc = NETDEV_TX_OK;
+ }
if (unlikely(rc != NETDEV_TX_OK)) {
if (rc & ~NETDEV_TX_MASK)
goto out_kfree_gso_skb;
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -271,6 +271,22 @@ struct sk_buff *__netdev_alloc_skb(struc
}
EXPORT_SYMBOL(__netdev_alloc_skb);
+struct sk_buff *__netdev_alloc_skb_ip_align(struct net_device *dev,
+ unsigned int length, gfp_t gfp)
+{
+ struct sk_buff *skb = __netdev_alloc_skb(dev, length + NET_IP_ALIGN, gfp);
+
+#ifdef CONFIG_ETHERNET_PACKET_MANGLE
+ if (dev->priv_flags & IFF_NO_IP_ALIGN)
+ return skb;
+#endif
+
+ if (NET_IP_ALIGN && skb)
+ skb_reserve(skb, NET_IP_ALIGN);
+ return skb;
+}
+EXPORT_SYMBOL(__netdev_alloc_skb_ip_align);
+
void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, int off,
int size)
{
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -160,6 +160,12 @@ __be16 eth_type_trans(struct sk_buff *sk
struct ethhdr *eth;
skb->dev = dev;
+
+#ifdef CONFIG_ETHERNET_PACKET_MANGLE
+ if (dev->eth_mangle_rx)
+ dev->eth_mangle_rx(dev, skb);
+#endif
+
skb_reset_mac_header(skb);
skb_pull_inline(skb, ETH_HLEN);
eth = eth_hdr(skb);
|