From 769ee01960e4241017ab9f9f760fc51c1141bf90 Mon Sep 17 00:00:00 2001 From: nbd Date: Sat, 13 Oct 2007 02:04:37 +0000 Subject: add patches for 2.6.23 on brcm47xx (not enabled yet) git-svn-id: svn://svn.openwrt.org/openwrt/trunk@9279 3c298f89-4303-0410-b956-a3cf2f4a3e73 --- .../patches-2.6.23/200-b44_ssb_fixup.patch | 274 +++++++++++++++++++++ 1 file changed, 274 insertions(+) create mode 100644 target/linux/brcm47xx/patches-2.6.23/200-b44_ssb_fixup.patch (limited to 'target/linux/brcm47xx/patches-2.6.23/200-b44_ssb_fixup.patch') diff --git a/target/linux/brcm47xx/patches-2.6.23/200-b44_ssb_fixup.patch b/target/linux/brcm47xx/patches-2.6.23/200-b44_ssb_fixup.patch new file mode 100644 index 000000000..e0456c6d8 --- /dev/null +++ b/target/linux/brcm47xx/patches-2.6.23/200-b44_ssb_fixup.patch @@ -0,0 +1,274 @@ +Index: linux-2.6.23/drivers/net/b44.c +=================================================================== +--- linux-2.6.23.orig/drivers/net/b44.c 2007-10-13 02:46:38.946989430 +0200 ++++ linux-2.6.23/drivers/net/b44.c 2007-10-13 03:15:34.889915180 +0200 +@@ -129,7 +129,7 @@ + unsigned long offset, + enum dma_data_direction dir) + { +- dma_sync_single_range_for_device(&sdev->dev, dma_base, ++ dma_sync_single_range_for_device(sdev->dev, dma_base, + offset & dma_desc_align_mask, + dma_desc_sync_size, dir); + } +@@ -139,7 +139,7 @@ + unsigned long offset, + enum dma_data_direction dir) + { +- dma_sync_single_range_for_cpu(&sdev->dev, dma_base, ++ dma_sync_single_range_for_cpu(sdev->dev, dma_base, + offset & dma_desc_align_mask, + dma_desc_sync_size, dir); + } +@@ -563,7 +563,7 @@ + + BUG_ON(skb == NULL); + +- dma_unmap_single(&bp->sdev->dev, ++ dma_unmap_single(bp->sdev->dev, + pci_unmap_addr(rp, mapping), + skb->len, + DMA_TO_DEVICE); +@@ -603,7 +603,7 @@ + if (skb == NULL) + return -ENOMEM; + +- mapping = dma_map_single(&bp->sdev->dev, skb->data, ++ mapping = dma_map_single(bp->sdev->dev, skb->data, + RX_PKT_BUF_SZ, + DMA_FROM_DEVICE); + +@@ -613,18 +613,18 @@ + mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) { + /* Sigh... */ + if (!dma_mapping_error(mapping)) +- dma_unmap_single(&bp->sdev->dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE); ++ dma_unmap_single(bp->sdev->dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + skb = __netdev_alloc_skb(bp->dev, RX_PKT_BUF_SZ, GFP_ATOMIC|GFP_DMA); + if (skb == NULL) + return -ENOMEM; +- mapping = dma_map_single(&bp->sdev->dev, skb->data, ++ mapping = dma_map_single(bp->sdev->dev, skb->data, + RX_PKT_BUF_SZ, + DMA_FROM_DEVICE); + if (dma_mapping_error(mapping) || + mapping + RX_PKT_BUF_SZ > DMA_30BIT_MASK) { + if (!dma_mapping_error(mapping)) +- dma_unmap_single(&bp->sdev->dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE); ++ dma_unmap_single(bp->sdev->dev, mapping, RX_PKT_BUF_SZ,DMA_FROM_DEVICE); + dev_kfree_skb_any(skb); + return -ENOMEM; + } +@@ -700,7 +700,7 @@ + dest_idx * sizeof(dest_desc), + DMA_BIDIRECTIONAL); + +- dma_sync_single_for_device(&bp->sdev->dev, le32_to_cpu(src_desc->addr), ++ dma_sync_single_for_device(bp->sdev->dev, le32_to_cpu(src_desc->addr), + RX_PKT_BUF_SZ, + DMA_FROM_DEVICE); + } +@@ -722,7 +722,7 @@ + struct rx_header *rh; + u16 len; + +- dma_sync_single_for_cpu(&bp->sdev->dev, map, ++ dma_sync_single_for_cpu(bp->sdev->dev, map, + RX_PKT_BUF_SZ, + DMA_FROM_DEVICE); + rh = (struct rx_header *) skb->data; +@@ -756,7 +756,7 @@ + skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod); + if (skb_size < 0) + goto drop_it; +- dma_unmap_single(&bp->sdev->dev, map, ++ dma_unmap_single(bp->sdev->dev, map, + skb_size, DMA_FROM_DEVICE); + /* Leave out rx_header */ + skb_put(skb, len + RX_PKT_OFFSET); +@@ -928,23 +928,23 @@ + goto err_out; + } + +- mapping = dma_map_single(&bp->sdev->dev, skb->data, len, DMA_TO_DEVICE); ++ mapping = dma_map_single(bp->sdev->dev, skb->data, len, DMA_TO_DEVICE); + if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) { + struct sk_buff *bounce_skb; + + /* Chip can't handle DMA to/from >1GB, use bounce buffer */ + if (!dma_mapping_error(mapping)) +- dma_unmap_single(&bp->sdev->dev, mapping, len, DMA_TO_DEVICE); ++ dma_unmap_single(bp->sdev->dev, mapping, len, DMA_TO_DEVICE); + + bounce_skb = __dev_alloc_skb(len, GFP_ATOMIC | GFP_DMA); + if (!bounce_skb) + goto err_out; + +- mapping = dma_map_single(&bp->sdev->dev, bounce_skb->data, ++ mapping = dma_map_single(bp->sdev->dev, bounce_skb->data, + len, DMA_TO_DEVICE); + if (dma_mapping_error(mapping) || mapping + len > DMA_30BIT_MASK) { + if (!dma_mapping_error(mapping)) +- dma_unmap_single(&bp->sdev->dev, mapping, ++ dma_unmap_single(bp->sdev->dev, mapping, + len, DMA_TO_DEVICE); + dev_kfree_skb_any(bounce_skb); + goto err_out; +@@ -1043,7 +1043,7 @@ + + if (rp->skb == NULL) + continue; +- dma_unmap_single(&bp->sdev->dev, ++ dma_unmap_single(bp->sdev->dev, + pci_unmap_addr(rp, mapping), + RX_PKT_BUF_SZ, + DMA_FROM_DEVICE); +@@ -1057,7 +1057,7 @@ + + if (rp->skb == NULL) + continue; +- dma_unmap_single(&bp->sdev->dev, ++ dma_unmap_single(bp->sdev->dev, + pci_unmap_addr(rp, mapping), + rp->skb->len, + DMA_TO_DEVICE); +@@ -1082,12 +1082,12 @@ + memset(bp->tx_ring, 0, B44_TX_RING_BYTES); + + if (bp->flags & B44_FLAG_RX_RING_HACK) +- dma_sync_single_for_device(&bp->sdev->dev, bp->rx_ring_dma, ++ dma_sync_single_for_device(bp->sdev->dev, bp->rx_ring_dma, + DMA_TABLE_BYTES, + DMA_BIDIRECTIONAL); + + if (bp->flags & B44_FLAG_TX_RING_HACK) +- dma_sync_single_for_device(&bp->sdev->dev, bp->tx_ring_dma, ++ dma_sync_single_for_device(bp->sdev->dev, bp->tx_ring_dma, + DMA_TABLE_BYTES, + DMA_TO_DEVICE); + +@@ -1109,24 +1109,24 @@ + bp->tx_buffers = NULL; + if (bp->rx_ring) { + if (bp->flags & B44_FLAG_RX_RING_HACK) { +- dma_unmap_single(&bp->sdev->dev, bp->rx_ring_dma, ++ dma_unmap_single(bp->sdev->dev, bp->rx_ring_dma, + DMA_TABLE_BYTES, + DMA_BIDIRECTIONAL); + kfree(bp->rx_ring); + } else +- dma_free_coherent(&bp->sdev->dev, DMA_TABLE_BYTES, ++ dma_free_coherent(bp->sdev->dev, DMA_TABLE_BYTES, + bp->rx_ring, bp->rx_ring_dma); + bp->rx_ring = NULL; + bp->flags &= ~B44_FLAG_RX_RING_HACK; + } + if (bp->tx_ring) { + if (bp->flags & B44_FLAG_TX_RING_HACK) { +- dma_unmap_single(&bp->sdev->dev, bp->tx_ring_dma, ++ dma_unmap_single(bp->sdev->dev, bp->tx_ring_dma, + DMA_TABLE_BYTES, + DMA_TO_DEVICE); + kfree(bp->tx_ring); + } else +- dma_free_coherent(&bp->sdev->dev, DMA_TABLE_BYTES, ++ dma_free_coherent(bp->sdev->dev, DMA_TABLE_BYTES, + bp->tx_ring, bp->tx_ring_dma); + bp->tx_ring = NULL; + bp->flags &= ~B44_FLAG_TX_RING_HACK; +@@ -1152,7 +1152,7 @@ + goto out_err; + + size = DMA_TABLE_BYTES; +- bp->rx_ring = dma_alloc_coherent(&bp->sdev->dev, size, &bp->rx_ring_dma, GFP_ATOMIC); ++ bp->rx_ring = dma_alloc_coherent(bp->sdev->dev, size, &bp->rx_ring_dma, GFP_ATOMIC); + if (!bp->rx_ring) { + /* Allocation may have failed due to pci_alloc_consistent + insisting on use of GFP_DMA, which is more restrictive +@@ -1164,7 +1164,7 @@ + if (!rx_ring) + goto out_err; + +- rx_ring_dma = dma_map_single(&bp->sdev->dev, rx_ring, ++ rx_ring_dma = dma_map_single(bp->sdev->dev, rx_ring, + DMA_TABLE_BYTES, + DMA_BIDIRECTIONAL); + +@@ -1179,7 +1179,7 @@ + bp->flags |= B44_FLAG_RX_RING_HACK; + } + +- bp->tx_ring = dma_alloc_coherent(&bp->sdev->dev, size, &bp->tx_ring_dma, GFP_ATOMIC); ++ bp->tx_ring = dma_alloc_coherent(bp->sdev->dev, size, &bp->tx_ring_dma, GFP_ATOMIC); + if (!bp->tx_ring) { + /* Allocation may have failed due to dma_alloc_coherent + insisting on use of GFP_DMA, which is more restrictive +@@ -1191,7 +1191,7 @@ + if (!tx_ring) + goto out_err; + +- tx_ring_dma = dma_map_single(&bp->sdev->dev, tx_ring, ++ tx_ring_dma = dma_map_single(bp->sdev->dev, tx_ring, + DMA_TABLE_BYTES, + DMA_TO_DEVICE); + +@@ -2288,7 +2288,7 @@ + bp->flags |= B44_FLAG_BUGGY_TXPTR; + */ + +- if (bp->sdev->dev->id.revision >= 7) ++ if (bp->sdev->id.revision >= 7) + bp->flags |= B44_FLAG_B0_ANDLATER; + + return err; +@@ -2298,7 +2298,6 @@ + const struct ssb_device_id *ent) + { + static int b44_version_printed = 0; +- unsigned long b44reg_base, b44reg_len; + struct net_device *dev; + struct b44 *bp; + int err, i; +@@ -2310,13 +2309,13 @@ + + dev = alloc_etherdev(sizeof(*bp)); + if (!dev) { +- dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n"); ++ dev_err(sdev->dev, "Etherdev alloc failed, aborting.\n"); + err = -ENOMEM; + goto out; + } + + SET_MODULE_OWNER(dev); +- SET_NETDEV_DEV(dev,&sdev->dev); ++ SET_NETDEV_DEV(dev,sdev->dev); + + /* No interesting netdevice features in this card... */ + dev->features |= 0; +@@ -2354,7 +2353,7 @@ + + err = b44_get_invariants(bp); + if (err) { +- dev_err(&sdev->dev, ++ dev_err(sdev->dev, + "Problem fetching invariants of chip, aborting.\n"); + goto err_out_free_dev; + } +@@ -2375,7 +2374,7 @@ + + err = register_netdev(dev); + if (err) { +- dev_err(&sdev->dev, "Cannot register net device, aborting.\n"); ++ dev_err(sdev->dev, "Cannot register net device, aborting.\n"); + goto out; + } + +@@ -2454,7 +2453,6 @@ + rc = request_irq(dev->irq, b44_interrupt, IRQF_SHARED, dev->name, dev); + if (rc) { + printk(KERN_ERR PFX "%s: request_irq failed\n", dev->name); +- pci_disable_device(pdev); + return rc; + } + -- cgit v1.2.3