summaryrefslogtreecommitdiffstats
path: root/target/linux/rb532/patches-2.6.28/010-korina_rework_korina_rx.patch
blob: 177678b988d8e807fea8fd53b902e9cb291e711c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
This function needs an early exit condition to function properly, or
else caller assumes napi workload wasn't enough to handle all received
packets and korina_rx is called again (and again and again and ...).

Signed-off-by: Phil Sutter <n0-1@freewrt.org>
---
--- a/drivers/net/korina.c	2009-01-19 23:19:10.000000000 +0100
+++ b/drivers/net/korina.c	2009-01-19 23:25:31.000000000 +0100
@@ -353,15 +353,20 @@
 	struct dma_desc *rd = &lp->rd_ring[lp->rx_next_done];
 	struct sk_buff *skb, *skb_new;
 	u8 *pkt_buf;
-	u32 devcs, pkt_len, dmas, rx_free_desc;
+	u32 devcs, pkt_len, dmas;
 	int count;
 
 	dma_cache_inv((u32)rd, sizeof(*rd));
 
 	for (count = 0; count < limit; count++) {
+		skb = lp->rx_skb[lp->rx_next_done];
+		skb_new = NULL;
 
 		devcs = rd->devcs;
 
+		if ((KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) == 0)
+			break;
+
 		/* Update statistics counters */
 		if (devcs & ETH_RX_CRC)
 			dev->stats.rx_crc_errors++;
@@ -384,64 +389,53 @@
 			 * in Rc32434 (errata ref #077) */
 			dev->stats.rx_errors++;
 			dev->stats.rx_dropped++;
-		}
-
-		while ((rx_free_desc = KORINA_RBSIZE - (u32)DMA_COUNT(rd->control)) != 0) {
-			/* init the var. used for the later
-			 * operations within the while loop */
-			skb_new = NULL;
+		} else if ((devcs & ETH_RX_ROK)) {
 			pkt_len = RCVPKT_LENGTH(devcs);
-			skb = lp->rx_skb[lp->rx_next_done];
+			/* must be the (first and) last
+			 * descriptor then */
+			pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data;
+
+			/* invalidate the cache */
+			dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
+
+			/* Malloc up new buffer. */
+			skb_new = netdev_alloc_skb(dev, KORINA_RBSIZE + 2);
+
+			if (!skb_new)
+				break;
+			/* Do not count the CRC */
+			skb_put(skb, pkt_len - 4);
+			skb->protocol = eth_type_trans(skb, dev);
+
+			/* Pass the packet to upper layers */
+			netif_receive_skb(skb);
+			dev->stats.rx_packets++;
+			dev->stats.rx_bytes += pkt_len;
+
+			/* Update the mcast stats */
+			if (devcs & ETH_RX_MP)
+				dev->stats.multicast++;
 
-			if ((devcs & ETH_RX_ROK)) {
-				/* must be the (first and) last
-				 * descriptor then */
-				pkt_buf = (u8 *)lp->rx_skb[lp->rx_next_done]->data;
-
-				/* invalidate the cache */
-				dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
-
-				/* Malloc up new buffer. */
-				skb_new = netdev_alloc_skb(dev, KORINA_RBSIZE + 2);
-
-				if (!skb_new)
-					break;
-				/* Do not count the CRC */
-				skb_put(skb, pkt_len - 4);
-				skb->protocol = eth_type_trans(skb, dev);
-
-				/* Pass the packet to upper layers */
-				netif_receive_skb(skb);
-				dev->last_rx = jiffies;
-				dev->stats.rx_packets++;
-				dev->stats.rx_bytes += pkt_len;
-
-				/* Update the mcast stats */
-				if (devcs & ETH_RX_MP)
-					dev->stats.multicast++;
-
-				lp->rx_skb[lp->rx_next_done] = skb_new;
-			}
-
-			rd->devcs = 0;
-
-			/* Restore descriptor's curr_addr */
-			if (skb_new)
-				rd->ca = CPHYSADDR(skb_new->data);
-			else
-				rd->ca = CPHYSADDR(skb->data);
+			lp->rx_skb[lp->rx_next_done] = skb_new;
+		}
+		rd->devcs = 0;
+
+		/* Restore descriptor's curr_addr */
+		if (skb_new)
+			rd->ca = CPHYSADDR(skb_new->data);
+		else
+			rd->ca = CPHYSADDR(skb->data);
 
-			rd->control = DMA_COUNT(KORINA_RBSIZE) |
+		rd->control = DMA_COUNT(KORINA_RBSIZE) |
 				DMA_DESC_COD | DMA_DESC_IOD;
-			lp->rd_ring[(lp->rx_next_done - 1) &
-				KORINA_RDS_MASK].control &=
-				~DMA_DESC_COD;
-
-			lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK;
-			dma_cache_wback((u32)rd, sizeof(*rd));
-			rd = &lp->rd_ring[lp->rx_next_done];
-			writel(~DMA_STAT_DONE, &lp->rx_dma_regs->dmas);
-		}
+		lp->rd_ring[(lp->rx_next_done - 1) &
+			KORINA_RDS_MASK].control &=
+			~DMA_DESC_COD;
+
+		lp->rx_next_done = (lp->rx_next_done + 1) & KORINA_RDS_MASK;
+		dma_cache_wback((u32)rd, sizeof(*rd));
+		rd = &lp->rd_ring[lp->rx_next_done];
+		writel(~DMA_STAT_DONE, &lp->rx_dma_regs->dmas);
 	}
 
 	dmas = readl(&lp->rx_dma_regs->dmas);