1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
|
--- a/drivers/net/ar2313/ar2313.c
+++ b/drivers/net/ar2313/ar2313.c
@@ -841,6 +841,7 @@ static void ar2313_load_rx_ring(struct n
for (i = 0; i < nr_bufs; i++) {
struct sk_buff *skb;
ar2313_descr_t *rd;
+ int offset = RX_OFFSET;
if (sp->rx_skb[idx]) {
#if DEBUG_RX
@@ -862,7 +863,9 @@ static void ar2313_load_rx_ring(struct n
* Make sure IP header starts on a fresh cache line.
*/
skb->dev = dev;
- skb_reserve(skb, RX_OFFSET);
+ if (sp->phy_dev)
+ offset += sp->phy_dev->pkt_align;
+ skb_reserve(skb, offset);
sp->rx_skb[idx] = skb;
rd = (ar2313_descr_t *) & sp->rx_ring[idx];
@@ -953,6 +956,7 @@ static int ar2313_rx_int(struct net_devi
/* alloc new buffer. */
skb_new = dev_alloc_skb(AR2313_BUFSIZE + RX_OFFSET + 128);
if (skb_new != NULL) {
+ int offset;
skb = sp->rx_skb[idx];
/* set skb */
@@ -960,13 +964,17 @@ static int ar2313_rx_int(struct net_devi
((status >> DMA_RX_LEN_SHIFT) & 0x3fff) - CRC_LEN);
dev->stats.rx_bytes += skb->len;
- skb->protocol = eth_type_trans(skb, dev);
+
/* pass the packet to upper layers */
- netif_rx(skb);
+ sp->rx(skb);
skb_new->dev = dev;
+
/* 16 bit align */
- skb_reserve(skb_new, RX_OFFSET + 32);
+ offset = RX_OFFSET + 32;
+ if (sp->phy_dev)
+ offset += sp->phy_dev->pkt_align;
+ skb_reserve(skb_new, offset);
/* reset descriptor's curr_addr */
rxdesc->addr = virt_to_phys(skb_new->data);
@@ -1392,6 +1400,8 @@ static int ar2313_mdiobus_probe (struct
return PTR_ERR(phydev);
}
+ sp->rx = phydev->netif_rx;
+
/* mask with MAC supported features */
phydev->supported &= (SUPPORTED_10baseT_Half
| SUPPORTED_10baseT_Full
--- a/drivers/net/ar2313/ar2313.h
+++ b/drivers/net/ar2313/ar2313.h
@@ -107,6 +107,8 @@ typedef struct {
*/
struct ar2313_private {
struct net_device *dev;
+ int (*rx)(struct sk_buff *skb);
+
int version;
u32 mb[2];
|