From 4c8d6ad4d00835073b97d6bacdc119a58ac22350 Mon Sep 17 00:00:00 2001 From: blogic Date: Sun, 25 Mar 2012 08:50:09 +0000 Subject: [lantiq] bump kernel to 3.2.12 git-svn-id: svn://svn.openwrt.org/openwrt/trunk@31060 3c298f89-4303-0410-b956-a3cf2f4a3e73 --- ...044-MIPS-NET-several-fixes-to-etop-driver.patch | 445 +++++++++++++++++++++ 1 file changed, 445 insertions(+) create mode 100644 target/linux/lantiq/patches-3.2/0044-MIPS-NET-several-fixes-to-etop-driver.patch (limited to 'target/linux/lantiq/patches-3.2/0044-MIPS-NET-several-fixes-to-etop-driver.patch') diff --git a/target/linux/lantiq/patches-3.2/0044-MIPS-NET-several-fixes-to-etop-driver.patch b/target/linux/lantiq/patches-3.2/0044-MIPS-NET-several-fixes-to-etop-driver.patch new file mode 100644 index 000000000..dbb205be6 --- /dev/null +++ b/target/linux/lantiq/patches-3.2/0044-MIPS-NET-several-fixes-to-etop-driver.patch @@ -0,0 +1,445 @@ +From 06663beb0230c02d1962eca8d9f6709c2e852328 Mon Sep 17 00:00:00 2001 +From: John Crispin +Date: Wed, 21 Mar 2012 18:14:06 +0100 +Subject: [PATCH 44/70] MIPS: NET: several fixes to etop driver + +--- + drivers/net/ethernet/lantiq_etop.c | 208 +++++++++++++++++++----------------- + 1 files changed, 108 insertions(+), 100 deletions(-) + +diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c +index a084d74..1a807d8 100644 +--- a/drivers/net/ethernet/lantiq_etop.c ++++ b/drivers/net/ethernet/lantiq_etop.c +@@ -103,15 +103,6 @@ + /* the newer xway socks have a embedded 3/7 port gbit multiplexer */ + #define ltq_has_gbit() (ltq_is_ar9() || ltq_is_vr9()) + +-/* use 2 static channels for TX/RX +- depending on the SoC we need to use different DMA channels for ethernet */ +-#define LTQ_ETOP_TX_CHANNEL 1 +-#define LTQ_ETOP_RX_CHANNEL ((ltq_is_ase()) ? (5) : \ +- ((ltq_has_gbit()) ? (0) : (6))) +- +-#define IS_TX(x) (x == LTQ_ETOP_TX_CHANNEL) +-#define IS_RX(x) (x == LTQ_ETOP_RX_CHANNEL) +- + #define ltq_etop_r32(x) ltq_r32(ltq_etop_membase + (x)) + #define ltq_etop_w32(x, y) ltq_w32(x, ltq_etop_membase + (y)) + #define ltq_etop_w32_mask(x, y, z) \ +@@ -128,8 +119,8 @@ static void __iomem *ltq_etop_membase; + static void __iomem *ltq_gbit_membase; + + struct ltq_etop_chan { +- int idx; + int tx_free; ++ int irq; + struct net_device *netdev; + struct napi_struct napi; + struct ltq_dma_channel dma; +@@ -144,8 +135,8 @@ struct ltq_etop_priv { + struct mii_bus *mii_bus; + struct phy_device *phydev; + +- struct ltq_etop_chan ch[MAX_DMA_CHAN]; +- int tx_free[MAX_DMA_CHAN >> 1]; ++ struct ltq_etop_chan txch; ++ struct ltq_etop_chan rxch; + + spinlock_t lock; + +@@ -206,8 +197,10 @@ ltq_etop_poll_rx(struct napi_struct *napi, int budget) + { + struct ltq_etop_chan *ch = container_of(napi, + struct ltq_etop_chan, napi); ++ struct ltq_etop_priv *priv = netdev_priv(ch->netdev); + int rx = 0; + int complete = 0; ++ unsigned long flags; + + while ((rx < budget) && !complete) { + struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; +@@ -221,7 +214,9 @@ ltq_etop_poll_rx(struct napi_struct *napi, int budget) + } + if (complete || !rx) { + napi_complete(&ch->napi); ++ spin_lock_irqsave(&priv->lock, flags); + ltq_dma_ack_irq(&ch->dma); ++ spin_unlock_irqrestore(&priv->lock, flags); + } + return rx; + } +@@ -233,7 +228,7 @@ ltq_etop_poll_tx(struct napi_struct *napi, int budget) + container_of(napi, struct ltq_etop_chan, napi); + struct ltq_etop_priv *priv = netdev_priv(ch->netdev); + struct netdev_queue *txq = +- netdev_get_tx_queue(ch->netdev, ch->idx >> 1); ++ netdev_get_tx_queue(ch->netdev, ch->dma.nr >> 1); + unsigned long flags; + + spin_lock_irqsave(&priv->lock, flags); +@@ -251,7 +246,9 @@ ltq_etop_poll_tx(struct napi_struct *napi, int budget) + if (netif_tx_queue_stopped(txq)) + netif_tx_start_queue(txq); + napi_complete(&ch->napi); ++ spin_lock_irqsave(&priv->lock, flags); + ltq_dma_ack_irq(&ch->dma); ++ spin_unlock_irqrestore(&priv->lock, flags); + return 1; + } + +@@ -259,9 +256,10 @@ static irqreturn_t + ltq_etop_dma_irq(int irq, void *_priv) + { + struct ltq_etop_priv *priv = _priv; +- int ch = irq - LTQ_DMA_ETOP; +- +- napi_schedule(&priv->ch[ch].napi); ++ if (irq == priv->txch.dma.irq) ++ napi_schedule(&priv->txch.napi); ++ else ++ napi_schedule(&priv->rxch.napi); + return IRQ_HANDLED; + } + +@@ -273,7 +271,7 @@ ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch) + ltq_dma_free(&ch->dma); + if (ch->dma.irq) + free_irq(ch->dma.irq, priv); +- if (IS_RX(ch->idx)) { ++ if (ch == &priv->txch) { + int desc; + for (desc = 0; desc < LTQ_DESC_NUM; desc++) + dev_kfree_skb_any(ch->skb[ch->dma.desc]); +@@ -284,7 +282,6 @@ static void + ltq_etop_hw_exit(struct net_device *dev) + { + struct ltq_etop_priv *priv = netdev_priv(dev); +- int i; + + clk_disable(priv->clk_ppe); + +@@ -296,9 +293,8 @@ ltq_etop_hw_exit(struct net_device *dev) + clk_disable(priv->clk_ephycgu); + } + +- for (i = 0; i < MAX_DMA_CHAN; i++) +- if (IS_TX(i) || IS_RX(i)) +- ltq_etop_free_channel(dev, &priv->ch[i]); ++ ltq_etop_free_channel(dev, &priv->txch); ++ ltq_etop_free_channel(dev, &priv->rxch); + } + + static void +@@ -326,8 +322,6 @@ ltq_etop_hw_init(struct net_device *dev) + { + struct ltq_etop_priv *priv = netdev_priv(dev); + unsigned int mii_mode = priv->pldata->mii_mode; +- int err = 0; +- int i; + + clk_enable(priv->clk_ppe); + +@@ -369,31 +363,50 @@ ltq_etop_hw_init(struct net_device *dev) + /* enable crc generation */ + ltq_etop_w32(PPE32_CGEN, LQ_PPE32_ENET_MAC_CFG); + ++ return 0; ++} ++ ++static int ++ltq_etop_dma_init(struct net_device *dev) ++{ ++ struct ltq_etop_priv *priv = netdev_priv(dev); ++ int tx = 1; ++ int rx = ((ltq_is_ase()) ? (5) : \ ++ ((ltq_is_ar9()) ? (0) : (6))); ++ int tx_irq = LTQ_DMA_ETOP + tx; ++ int rx_irq = LTQ_DMA_ETOP + rx; ++ int err; ++ + ltq_dma_init_port(DMA_PORT_ETOP); + +- for (i = 0; i < MAX_DMA_CHAN && !err; i++) { +- int irq = LTQ_DMA_ETOP + i; +- struct ltq_etop_chan *ch = &priv->ch[i]; +- +- ch->idx = ch->dma.nr = i; +- +- if (IS_TX(i)) { +- ltq_dma_alloc_tx(&ch->dma); +- err = request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED, +- "etop_tx", priv); +- } else if (IS_RX(i)) { +- ltq_dma_alloc_rx(&ch->dma); +- for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM; +- ch->dma.desc++) +- if (ltq_etop_alloc_skb(ch)) +- err = -ENOMEM; +- ch->dma.desc = 0; +- err = request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED, +- "etop_rx", priv); ++ priv->txch.dma.nr = tx; ++ ltq_dma_alloc_tx(&priv->txch.dma); ++ err = request_irq(tx_irq, ltq_etop_dma_irq, IRQF_DISABLED, ++ "eth_tx", priv); ++ if (err) { ++ netdev_err(dev, "failed to allocate tx irq\n"); ++ goto err_out; ++ } ++ priv->txch.dma.irq = tx_irq; ++ ++ priv->rxch.dma.nr = rx; ++ ltq_dma_alloc_rx(&priv->rxch.dma); ++ for (priv->rxch.dma.desc = 0; priv->rxch.dma.desc < LTQ_DESC_NUM; ++ priv->rxch.dma.desc++) { ++ if (ltq_etop_alloc_skb(&priv->rxch)) { ++ netdev_err(dev, "failed to allocate skbs\n"); ++ err = -ENOMEM; ++ goto err_out; + } +- if (!err) +- ch->dma.irq = irq; + } ++ priv->rxch.dma.desc = 0; ++ err = request_irq(rx_irq, ltq_etop_dma_irq, IRQF_DISABLED, ++ "eth_rx", priv); ++ if (err) ++ netdev_err(dev, "failed to allocate rx irq\n"); ++ else ++ priv->rxch.dma.irq = rx_irq; ++err_out: + return err; + } + +@@ -410,7 +423,10 @@ ltq_etop_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) + { + struct ltq_etop_priv *priv = netdev_priv(dev); + +- return phy_ethtool_gset(priv->phydev, cmd); ++ if (priv->phydev) ++ return phy_ethtool_gset(priv->phydev, cmd); ++ else ++ return 0; + } + + static int +@@ -418,7 +434,10 @@ ltq_etop_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) + { + struct ltq_etop_priv *priv = netdev_priv(dev); + +- return phy_ethtool_sset(priv->phydev, cmd); ++ if (priv->phydev) ++ return phy_ethtool_sset(priv->phydev, cmd); ++ else ++ return 0; + } + + static int +@@ -426,7 +445,10 @@ ltq_etop_nway_reset(struct net_device *dev) + { + struct ltq_etop_priv *priv = netdev_priv(dev); + +- return phy_start_aneg(priv->phydev); ++ if (priv->phydev) ++ return phy_start_aneg(priv->phydev); ++ else ++ return 0; + } + + static const struct ethtool_ops ltq_etop_ethtool_ops = { +@@ -618,18 +640,19 @@ static int + ltq_etop_open(struct net_device *dev) + { + struct ltq_etop_priv *priv = netdev_priv(dev); +- int i; ++ unsigned long flags; + +- for (i = 0; i < MAX_DMA_CHAN; i++) { +- struct ltq_etop_chan *ch = &priv->ch[i]; ++ napi_enable(&priv->txch.napi); ++ napi_enable(&priv->rxch.napi); ++ ++ spin_lock_irqsave(&priv->lock, flags); ++ ltq_dma_open(&priv->txch.dma); ++ ltq_dma_open(&priv->rxch.dma); ++ spin_unlock_irqrestore(&priv->lock, flags); + +- if (!IS_TX(i) && (!IS_RX(i))) +- continue; +- ltq_dma_open(&ch->dma); +- napi_enable(&ch->napi); +- } + if (priv->phydev) + phy_start(priv->phydev); ++ + netif_tx_start_all_queues(dev); + return 0; + } +@@ -638,19 +661,19 @@ static int + ltq_etop_stop(struct net_device *dev) + { + struct ltq_etop_priv *priv = netdev_priv(dev); +- int i; ++ unsigned long flags; + + netif_tx_stop_all_queues(dev); + if (priv->phydev) + phy_stop(priv->phydev); +- for (i = 0; i < MAX_DMA_CHAN; i++) { +- struct ltq_etop_chan *ch = &priv->ch[i]; ++ napi_disable(&priv->txch.napi); ++ napi_disable(&priv->rxch.napi); ++ ++ spin_lock_irqsave(&priv->lock, flags); ++ ltq_dma_close(&priv->txch.dma); ++ ltq_dma_close(&priv->rxch.dma); ++ spin_unlock_irqrestore(&priv->lock, flags); + +- if (!IS_RX(i) && !IS_TX(i)) +- continue; +- napi_disable(&ch->napi); +- ltq_dma_close(&ch->dma); +- } + return 0; + } + +@@ -660,16 +683,16 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev) + int queue = skb_get_queue_mapping(skb); + struct netdev_queue *txq = netdev_get_tx_queue(dev, queue); + struct ltq_etop_priv *priv = netdev_priv(dev); +- struct ltq_etop_chan *ch = &priv->ch[(queue << 1) | 1]; +- struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc]; ++ struct ltq_dma_desc *desc = ++ &priv->txch.dma.desc_base[priv->txch.dma.desc]; + unsigned long flags; + u32 byte_offset; + int len; + + len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len; + +- if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) { +- dev_kfree_skb_any(skb); ++ if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ++ priv->txch.skb[priv->txch.dma.desc]) { + netdev_err(dev, "tx ring full\n"); + netif_tx_stop_queue(txq); + return NETDEV_TX_BUSY; +@@ -677,7 +700,7 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev) + + /* dma needs to start on a 16 byte aligned address */ + byte_offset = CPHYSADDR(skb->data) % 16; +- ch->skb[ch->dma.desc] = skb; ++ priv->txch.skb[priv->txch.dma.desc] = skb; + + dev->trans_start = jiffies; + +@@ -687,11 +710,11 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev) + wmb(); + desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP | + LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK); +- ch->dma.desc++; +- ch->dma.desc %= LTQ_DESC_NUM; ++ priv->txch.dma.desc++; ++ priv->txch.dma.desc %= LTQ_DESC_NUM; + spin_unlock_irqrestore(&priv->lock, flags); + +- if (ch->dma.desc_base[ch->dma.desc].ctl & LTQ_DMA_OWN) ++ if (priv->txch.dma.desc_base[priv->txch.dma.desc].ctl & LTQ_DMA_OWN) + netif_tx_stop_queue(txq); + + return NETDEV_TX_OK; +@@ -776,6 +799,10 @@ ltq_etop_init(struct net_device *dev) + err = ltq_etop_hw_init(dev); + if (err) + goto err_hw; ++ err = ltq_etop_dma_init(dev); ++ if (err) ++ goto err_hw; ++ + ltq_etop_change_mtu(dev, 1500); + + memcpy(&mac, &priv->pldata->mac, sizeof(struct sockaddr)); +@@ -811,6 +838,9 @@ ltq_etop_tx_timeout(struct net_device *dev) + err = ltq_etop_hw_init(dev); + if (err) + goto err_hw; ++ err = ltq_etop_dma_init(dev); ++ if (err) ++ goto err_hw; + dev->trans_start = jiffies; + netif_wake_queue(dev); + return; +@@ -834,14 +864,13 @@ static const struct net_device_ops ltq_eth_netdev_ops = { + .ndo_tx_timeout = ltq_etop_tx_timeout, + }; + +-static int __init ++static int __devinit + ltq_etop_probe(struct platform_device *pdev) + { + struct net_device *dev; + struct ltq_etop_priv *priv; + struct resource *res, *gbit_res; + int err; +- int i; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) { +@@ -917,15 +946,10 @@ ltq_etop_probe(struct platform_device *pdev) + + spin_lock_init(&priv->lock); + +- for (i = 0; i < MAX_DMA_CHAN; i++) { +- if (IS_TX(i)) +- netif_napi_add(dev, &priv->ch[i].napi, +- ltq_etop_poll_tx, 8); +- else if (IS_RX(i)) +- netif_napi_add(dev, &priv->ch[i].napi, +- ltq_etop_poll_rx, 32); +- priv->ch[i].netdev = dev; +- } ++ netif_napi_add(dev, &priv->txch.napi, ltq_etop_poll_tx, 8); ++ netif_napi_add(dev, &priv->rxch.napi, ltq_etop_poll_rx, 32); ++ priv->txch.netdev = dev; ++ priv->rxch.netdev = dev; + + err = register_netdev(dev); + if (err) +@@ -955,6 +979,7 @@ ltq_etop_remove(struct platform_device *pdev) + } + + static struct platform_driver ltq_mii_driver = { ++ .probe = ltq_etop_probe, + .remove = __devexit_p(ltq_etop_remove), + .driver = { + .name = "ltq_etop", +@@ -962,24 +987,7 @@ static struct platform_driver ltq_mii_driver = { + }, + }; + +-int __init +-init_ltq_etop(void) +-{ +- int ret = platform_driver_probe(<q_mii_driver, ltq_etop_probe); +- +- if (ret) +- pr_err("ltq_etop: Error registering platfom driver!"); +- return ret; +-} +- +-static void __exit +-exit_ltq_etop(void) +-{ +- platform_driver_unregister(<q_mii_driver); +-} +- +-module_init(init_ltq_etop); +-module_exit(exit_ltq_etop); ++module_platform_driver(ltq_mii_driver); + + MODULE_AUTHOR("John Crispin "); + MODULE_DESCRIPTION("Lantiq SoC ETOP"); +-- +1.7.7.1 + -- cgit v1.2.3