[PATCH] MIPS: BCM63XX: enable ethernet for BCM6345 BCM6345 has a slightly older DMA engine which is not supported by default by the bcm63xx_enet driver. This patch adds the missing Ethernet DMA definitions as well as patches all the places in the ethernet driver were the DMA reading/writing is different. Signed-off-by: Florian Fainelli --- --- a/arch/mips/bcm63xx/dev-enet.c +++ b/arch/mips/bcm63xx/dev-enet.c @@ -172,7 +172,7 @@ int __init bcm63xx_enet_register(int uni if (unit > 1) return -ENODEV; - if (unit == 1 && BCMCPU_IS_6338()) + if (unit == 1 && (BCMCPU_IS_6338() || BCMCPU_IS_6345())) return -ENODEV; ret = register_shared(); --- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h +++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_regs.h @@ -764,6 +764,37 @@ /* State Ram Word 4 */ #define ENETDMA_SRAM4_REG(x) (0x20c + (x) * 0x10) +/* Broadcom 6345 ENET DMA definitions */ +#define ENETDMA_6345_CHANCFG_REG(x) (0x00 + (x) * 0x40) +#define ENETDMA_6345_CHANCFG_EN_SHIFT 0 +#define ENETDMA_6345_CHANCFG_EN_MASK (1 << ENETDMA_6345_CHANCFG_EN_SHIFT) +#define ENETDMA_6345_PKTHALT_SHIFT 1 +#define ENETDMA_6345_PKTHALT_MASK (1 << ENETDMA_6345_PKTHALT_SHIFT) +#define ENETDMA_6345_CHAINING_SHIFT 2 +#define ENETDMA_6345_CHAINING_MASK (1 << ENETDMA_6345_CHAINING_SHIFT) +#define ENETDMA_6345_WRAP_EN_SHIFT 3 +#define ENETDMA_6345_WRAP_EN_MASK (1 << ENETDMA_6345_WRAP_EN_SHIFT) +#define ENETDMA_6345_FLOWC_EN_SHIFT 4 +#define ENETDMA_6345_FLOWC_EN_MASK (1 << ENETDMA_6345_FLOWC_EN_SHIFT) + +#define ENETDMA_6345_MAXBURST_REG(x) (0x04 + (x) * 0x40) + +#define ENETDMA_6345_RSTART_REG(x) (0x08 + (x) * 0x40) + +#define ENETDMA_6345_LEN_REG(x) (0x0C + (x) * 0x40) + +#define ENETDMA_6345_BSTAT_REG(x) (0x10 + (x) * 0x40) + +#define ENETDMA_6345_IR_REG(x) (0x14 + (x) * 0x40) +#define ENETDMA_6345_IR_BUFDONE_MASK (1 << 0) +#define ENETDMA_6345_IR_PKTDONE_MASK (1 << 1) +#define ENETDMA_6345_IR_NOTOWNER_MASK (1 << 2) + +#define ENETDMA_6345_IRMASK_REG(x) (0x18 + (x) * 0x40) + +#define ENETDMA_6345_FC_REG(x) (0x1C + (x) * 0x40) + +#define ENETDMA_6345_BUFALLOC_REG(x) (0x20 + (x) * 0x40) /************************************************************************* * _REG relative to RSET_ENETDMAC --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -32,6 +32,7 @@ #include #include +#include #include "bcm63xx_enet.h" static char bcm_enet_driver_name[] = "bcm63xx_enet"; @@ -243,6 +244,7 @@ static void bcm_enet_mdio_write_mii(stru static int bcm_enet_refill_rx(struct net_device *dev) { struct bcm_enet_priv *priv; + unsigned int desc_shift = BCMCPU_IS_6345() ? DMADESC_6345_SHIFT : 0; priv = netdev_priv(dev); @@ -270,7 +272,7 @@ static int bcm_enet_refill_rx(struct net len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT; len_stat |= DMADESC_OWNER_MASK; if (priv->rx_dirty_desc == priv->rx_ring_size - 1) { - len_stat |= DMADESC_WRAP_MASK; + len_stat |= (DMADESC_WRAP_MASK >> desc_shift); priv->rx_dirty_desc = 0; } else { priv->rx_dirty_desc++; @@ -281,7 +283,10 @@ static int bcm_enet_refill_rx(struct net priv->rx_desc_count++; /* tell dma engine we allocated one buffer */ - enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan)); + if (!BCMCPU_IS_6345()) + enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan)); + else + enet_dma_writel(priv, 1, ENETDMA_6345_BUFALLOC_REG(priv->rx_chan)); } /* If rx ring is still empty, set a timer to try allocating @@ -319,6 +324,7 @@ static int bcm_enet_receive_queue(struct struct bcm_enet_priv *priv; struct device *kdev; int processed; + unsigned int desc_shift = BCMCPU_IS_6345() ? DMADESC_6345_SHIFT : 0; priv = netdev_priv(dev); kdev = &priv->pdev->dev; @@ -357,7 +363,7 @@ static int bcm_enet_receive_queue(struct /* if the packet does not have start of packet _and_ * end of packet flag set, then just recycle it */ - if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) { + if ((len_stat & (DMADESC_ESOP_MASK >> desc_shift)) != (DMADESC_ESOP_MASK >> desc_shift)) { dev->stats.rx_dropped++; continue; } @@ -418,8 +424,15 @@ static int bcm_enet_receive_queue(struct bcm_enet_refill_rx(dev); /* kick rx dma */ - enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK, - ENETDMAC_CHANCFG_REG(priv->rx_chan)); + if (!BCMCPU_IS_6345()) + enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK, + ENETDMAC_CHANCFG_REG(priv->rx_chan)); + else + enet_dma_writel(priv, ENETDMA_6345_CHANCFG_EN_MASK | + ENETDMA_6345_CHAINING_MASK | + ENETDMA_6345_WRAP_EN_MASK | + ENETDMA_6345_FLOWC_EN_MASK, + ENETDMA_6345_CHANCFG_REG(priv->rx_chan)); } return processed; @@ -494,10 +507,21 @@ static int bcm_enet_poll(struct napi_str dev = priv->net_dev; /* ack interrupts */ - enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, - ENETDMAC_IR_REG(priv->rx_chan)); - enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, - ENETDMAC_IR_REG(priv->tx_chan)); + if (!BCMCPU_IS_6345()) { + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, + ENETDMAC_IR_REG(priv->rx_chan)); + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, + ENETDMAC_IR_REG(priv->tx_chan)); + } else { + enet_dma_writel(priv, ENETDMA_IR_BUFDONE_MASK | + ENETDMA_IR_PKTDONE_MASK | + ENETDMA_IR_NOTOWNER_MASK, + ENETDMA_6345_IR_REG(priv->rx_chan)); + enet_dma_writel(priv, ENETDMA_IR_BUFDONE_MASK | + ENETDMA_IR_PKTDONE_MASK | + ENETDMA_IR_NOTOWNER_MASK, + ENETDMA_6345_IR_REG(priv->tx_chan)); + } /* reclaim sent skb */ tx_work_done = bcm_enet_tx_reclaim(dev, 0); @@ -516,10 +540,21 @@ static int bcm_enet_poll(struct napi_str napi_complete(napi); /* restore rx/tx interrupt */ - enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, - ENETDMAC_IRMASK_REG(priv->rx_chan)); - enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, - ENETDMAC_IRMASK_REG(priv->tx_chan)); + if (!BCMCPU_IS_6345()) { + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, + ENETDMAC_IRMASK_REG(priv->rx_chan)); + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, + ENETDMAC_IRMASK_REG(priv->tx_chan)); + } else { + enet_dma_writel(priv, ENETDMA_IR_BUFDONE_MASK | + ENETDMA_IR_PKTDONE_MASK | + ENETDMA_IR_NOTOWNER_MASK, + ENETDMA_6345_IRMASK_REG(priv->rx_chan)); + enet_dma_writel(priv, ENETDMA_IR_BUFDONE_MASK | + ENETDMA_IR_PKTDONE_MASK | + ENETDMA_IR_NOTOWNER_MASK, + ENETDMA_6345_IRMASK_REG(priv->tx_chan)); + } return rx_work_done; } @@ -562,8 +597,13 @@ static irqreturn_t bcm_enet_isr_dma(int priv = netdev_priv(dev); /* mask rx/tx interrupts */ - enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan)); - enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan)); + if (!BCMCPU_IS_6345()) { + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan)); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan)); + } else { + enet_dma_writel(priv, 0, ENETDMA_6345_IRMASK_REG(priv->rx_chan)); + enet_dma_writel(priv, 0, ENETDMA_6345_IRMASK_REG(priv->tx_chan)); + } napi_schedule(&priv->napi); @@ -579,6 +619,7 @@ static int bcm_enet_start_xmit(struct sk struct bcm_enet_desc *desc; u32 len_stat; int ret; + unsigned int desc_shift = BCMCPU_IS_6345() ? DMADESC_6345_SHIFT : 0; priv = netdev_priv(dev); @@ -624,14 +665,14 @@ static int bcm_enet_start_xmit(struct sk DMA_TO_DEVICE); len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK; - len_stat |= DMADESC_ESOP_MASK | + len_stat |= (DMADESC_ESOP_MASK >> desc_shift) | DMADESC_APPEND_CRC | DMADESC_OWNER_MASK; priv->tx_curr_desc++; if (priv->tx_curr_desc == priv->tx_ring_size) { priv->tx_curr_desc = 0; - len_stat |= DMADESC_WRAP_MASK; + len_stat |= (DMADESC_WRAP_MASK >> desc_shift); } priv->tx_desc_count--; @@ -642,8 +683,15 @@ static int bcm_enet_start_xmit(struct sk wmb(); /* kick tx dma */ - enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK, - ENETDMAC_CHANCFG_REG(priv->tx_chan)); + if (!BCMCPU_IS_6345()) + enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK, + ENETDMAC_CHANCFG_REG(priv->tx_chan)); + else + enet_dma_writel(priv, ENETDMA_6345_CHANCFG_EN_MASK | + ENETDMA_6345_CHAINING_MASK | + ENETDMA_6345_WRAP_EN_MASK | + ENETDMA_6345_FLOWC_EN_MASK, + ENETDMA_6345_CHANCFG_REG(priv->tx_chan)); /* stop queue if no more desc available */ if (!priv->tx_desc_count) @@ -771,6 +819,9 @@ static void bcm_enet_set_flow(struct bcm val &= ~ENET_RXCFG_ENFLOW_MASK; enet_writel(priv, val, ENET_RXCFG_REG); + if (BCMCPU_IS_6345()) + return; + /* tx flow control (pause frame generation) */ val = enet_dma_readl(priv, ENETDMA_CFG_REG); if (tx_en) @@ -886,8 +937,13 @@ static int bcm_enet_open(struct net_devi /* mask all interrupts and request them */ enet_writel(priv, 0, ENET_IRMASK_REG); - enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan)); - enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan)); + if (!BCMCPU_IS_6345()) { + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan)); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan)); + } else { + enet_dma_writel(priv, 0, ENETDMA_6345_IRMASK_REG(priv->rx_chan)); + enet_dma_writel(priv, 0, ENETDMA_6345_IRMASK_REG(priv->tx_chan)); + } ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev); if (ret) @@ -966,8 +1022,12 @@ static int bcm_enet_open(struct net_devi priv->rx_curr_desc = 0; /* initialize flow control buffer allocation */ - enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, - ENETDMA_BUFALLOC_REG(priv->rx_chan)); + if (!BCMCPU_IS_6345()) + enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, + ENETDMA_BUFALLOC_REG(priv->rx_chan)); + else + enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, + ENETDMA_6345_BUFALLOC_REG(priv->rx_chan)); if (bcm_enet_refill_rx(dev)) { dev_err(kdev, "cannot allocate rx skb queue\n"); @@ -976,37 +1036,62 @@ static int bcm_enet_open(struct net_devi } /* write rx & tx ring addresses */ - enet_dmas_writel(priv, priv->rx_desc_dma, - ENETDMAS_RSTART_REG(priv->rx_chan)); - enet_dmas_writel(priv, priv->tx_desc_dma, + if (!BCMCPU_IS_6345()) { + enet_dmas_writel(priv, priv->rx_desc_dma, + ENETDMAS_RSTART_REG(priv->rx_chan)); + enet_dmas_writel(priv, priv->tx_desc_dma, ENETDMAS_RSTART_REG(priv->tx_chan)); + } else { + enet_dma_writel(priv, priv->rx_desc_dma, + ENETDMA_6345_RSTART_REG(priv->rx_chan)); + enet_dma_writel(priv, priv->tx_desc_dma, + ENETDMA_6345_RSTART_REG(priv->tx_chan)); + } /* clear remaining state ram for rx & tx channel */ - enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->rx_chan)); - enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->tx_chan)); - enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->rx_chan)); - enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->tx_chan)); - enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->rx_chan)); - enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->tx_chan)); + if (!BCMCPU_IS_6345()) { + enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->rx_chan)); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM2_REG(priv->tx_chan)); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->rx_chan)); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM3_REG(priv->tx_chan)); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->rx_chan)); + enet_dmas_writel(priv, 0, ENETDMAS_SRAM4_REG(priv->tx_chan)); + } else { + enet_dma_writel(priv, 0, ENETDMA_6345_FC_REG(priv->rx_chan)); + enet_dma_writel(priv, 0, ENETDMA_6345_FC_REG(priv->tx_chan)); + } /* set max rx/tx length */ enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG); enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG); /* set dma maximum burst len */ - enet_dmac_writel(priv, priv->dma_maxburst, - ENETDMAC_MAXBURST_REG(priv->rx_chan)); - enet_dmac_writel(priv, priv->dma_maxburst, - ENETDMAC_MAXBURST_REG(priv->tx_chan)); + if (!BCMCPU_IS_6345()) { + enet_dmac_writel(priv, priv->dma_maxburst, + ENETDMAC_MAXBURST_REG(priv->rx_chan)); + enet_dmac_writel(priv, priv->dma_maxburst, + ENETDMAC_MAXBURST_REG(priv->tx_chan)); + } else { + enet_dma_writel(priv, BCMENET_DMA_MAXBURST, + ENETDMA_6345_MAXBURST_REG(priv->rx_chan)); + enet_dma_writel(priv, BCMENET_DMA_MAXBURST, + ENETDMA_6345_MAXBURST_REG(priv->tx_chan)); + } /* set correct transmit fifo watermark */ enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG); /* set flow control low/high threshold to 1/3 / 2/3 */ - val = priv->rx_ring_size / 3; - enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); - val = (priv->rx_ring_size * 2) / 3; - enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); + if (!BCMCPU_IS_6345()) { + val = priv->rx_ring_size / 3; + enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); + val = (priv->rx_ring_size * 2) / 3; + enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); + } else { + enet_dma_writel(priv, 5, ENETDMA_6345_FC_REG(priv->rx_chan)); + enet_dma_writel(priv, priv->rx_ring_size, ENETDMA_6345_LEN_REG(priv->rx_chan)); + enet_dma_writel(priv, priv->tx_ring_size, ENETDMA_6345_LEN_REG(priv->tx_chan)); + } /* all set, enable mac and interrupts, start dma engine and * kick rx dma channel */ @@ -1014,27 +1099,57 @@ static int bcm_enet_open(struct net_devi val = enet_readl(priv, ENET_CTL_REG); val |= ENET_CTL_ENABLE_MASK; enet_writel(priv, val, ENET_CTL_REG); - enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); - enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK, - ENETDMAC_CHANCFG_REG(priv->rx_chan)); + if (!BCMCPU_IS_6345()) { + enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); + enet_dmac_writel(priv, ENETDMAC_CHANCFG_EN_MASK, + ENETDMAC_CHANCFG_REG(priv->rx_chan)); + } else { + enet_dma_writel(priv, ENETDMA_6345_CHANCFG_EN_MASK | + ENETDMA_6345_CHAINING_MASK | + ENETDMA_6345_WRAP_EN_MASK | + ENETDMA_6345_FLOWC_EN_MASK, + ENETDMA_6345_CHANCFG_REG(priv->rx_chan)); + } /* watch "mib counters about to overflow" interrupt */ enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); /* watch "packet transferred" interrupt in rx and tx */ - enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, - ENETDMAC_IR_REG(priv->rx_chan)); - enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, - ENETDMAC_IR_REG(priv->tx_chan)); + if (!BCMCPU_IS_6345()) { + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, + ENETDMAC_IR_REG(priv->rx_chan)); + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, + ENETDMAC_IR_REG(priv->tx_chan)); + } else { + enet_dma_writel(priv, ENETDMA_IR_BUFDONE_MASK | + ENETDMA_IR_PKTDONE_MASK | + ENETDMA_IR_NOTOWNER_MASK, + ENETDMA_6345_IR_REG(priv->rx_chan)); + enet_dma_writel(priv, ENETDMA_IR_BUFDONE_MASK | + ENETDMA_IR_PKTDONE_MASK | + ENETDMA_IR_NOTOWNER_MASK, + ENETDMA_6345_IR_REG(priv->tx_chan)); + } /* make sure we enable napi before rx interrupt */ napi_enable(&priv->napi); - enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, - ENETDMAC_IRMASK_REG(priv->rx_chan)); - enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, - ENETDMAC_IRMASK_REG(priv->tx_chan)); + if (!BCMCPU_IS_6345()) { + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, + ENETDMAC_IRMASK_REG(priv->rx_chan)); + enet_dmac_writel(priv, ENETDMAC_IR_PKTDONE_MASK, + ENETDMAC_IRMASK_REG(priv->tx_chan)); + } else { + enet_dma_writel(priv, ENETDMA_IR_BUFDONE_MASK | + ENETDMA_IR_PKTDONE_MASK | + ENETDMA_IR_NOTOWNER_MASK, + ENETDMA_6345_IRMASK_REG(priv->rx_chan)); + enet_dma_writel(priv, ENETDMA_IR_BUFDONE_MASK | + ENETDMA_IR_PKTDONE_MASK | + ENETDMA_IR_NOTOWNER_MASK, + ENETDMA_6345_IRMASK_REG(priv->tx_chan)); + } if (priv->has_phy) phy_start(priv->phydev); @@ -1111,13 +1226,19 @@ static void bcm_enet_disable_dma(struct { int limit; - enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG_REG(chan)); + if (!BCMCPU_IS_6345()) + enet_dmac_writel(priv, 0, ENETDMAC_CHANCFG_REG(chan)); + else + enet_dma_writel(priv, 0, ENETDMA_6345_CHANCFG_REG(chan)); limit = 1000; do { u32 val; - val = enet_dmac_readl(priv, ENETDMAC_CHANCFG_REG(chan)); + if (!BCMCPU_IS_6345()) + val = enet_dmac_readl(priv, ENETDMAC_CHANCFG_REG(chan)); + else + val = enet_dma_readl(priv, ENETDMA_6345_CHANCFG_REG(chan)); if (!(val & ENETDMAC_CHANCFG_EN_MASK)) break; udelay(1); @@ -1144,8 +1265,13 @@ static int bcm_enet_stop(struct net_devi /* mask all interrupts */ enet_writel(priv, 0, ENET_IRMASK_REG); - enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan)); - enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan)); + if (!BCMCPU_IS_6345()) { + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->rx_chan)); + enet_dmac_writel(priv, 0, ENETDMAC_IRMASK_REG(priv->tx_chan)); + } else { + enet_dma_writel(priv, 0, ENETDMA_6345_IRMASK_REG(priv->rx_chan)); + enet_dma_writel(priv, 0, ENETDMA_6345_IRMASK_REG(priv->tx_chan)); + } /* make sure no mib update is scheduled */ cancel_work_sync(&priv->mib_update_task); @@ -1680,6 +1806,7 @@ static int __devinit bcm_enet_probe(stru struct mii_bus *bus; const char *clk_name; int i, ret; + unsigned int chan_offset = 0; /* stop if shared driver failed, assume driver->probe will be * called in the same order we register devices (correct ?) */ @@ -1722,10 +1849,13 @@ static int __devinit bcm_enet_probe(stru priv->irq_tx = res_irq_tx->start; priv->mac_id = pdev->id; + if (BCMCPU_IS_6345()) + chan_offset = 1; + /* get rx & tx dma channel id for this mac */ if (priv->mac_id == 0) { - priv->rx_chan = 0; - priv->tx_chan = 1; + priv->rx_chan = 0 + chan_offset; + priv->tx_chan = 1 + chan_offset; clk_name = "enet0"; } else { priv->rx_chan = 2; --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.h +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.h @@ -47,6 +47,9 @@ struct bcm_enet_desc { #define DMADESC_ESOP_MASK (DMADESC_EOP_MASK | DMADESC_SOP_MASK) #define DMADESC_WRAP_MASK (1 << 12) +/* Shift down for EOP, SOP and WRAP bits */ +#define DMADESC_6345_SHIFT (3) + #define DMADESC_UNDER_MASK (1 << 9) #define DMADESC_APPEND_CRC (1 << 8) #define DMADESC_OVSIZE_MASK (1 << 4)