diff options
author | florian <florian@3c298f89-4303-0410-b956-a3cf2f4a3e73> | 2010-01-31 21:00:50 +0000 |
---|---|---|
committer | florian <florian@3c298f89-4303-0410-b956-a3cf2f4a3e73> | 2010-01-31 21:00:50 +0000 |
commit | aa058f66664b53ab0314ed0591b65a1e66a988bf (patch) | |
tree | d1c96d3d47d05d91e1c8739777e8cc30b038c3b9 /target/linux/brcm63xx/files-2.6.30/drivers | |
parent | e2ed0595652a45beb67f7542c450373a57e20cff (diff) |
[brcm63xx] move files to files-2.6.30, to ease newer kernel integration
git-svn-id: svn://svn.openwrt.org/openwrt/trunk@19471 3c298f89-4303-0410-b956-a3cf2f4a3e73
Diffstat (limited to 'target/linux/brcm63xx/files-2.6.30/drivers')
11 files changed, 5379 insertions, 0 deletions
diff --git a/target/linux/brcm63xx/files-2.6.30/drivers/mtd/maps/bcm963xx-flash.c b/target/linux/brcm63xx/files-2.6.30/drivers/mtd/maps/bcm963xx-flash.c new file mode 100644 index 000000000..e24a08110 --- /dev/null +++ b/target/linux/brcm63xx/files-2.6.30/drivers/mtd/maps/bcm963xx-flash.c @@ -0,0 +1,399 @@ +/* + * Copyright (C) 2006-2008 Florian Fainelli <florian@openwrt.org> + * Mike Albon <malbon@openwrt.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/mtd/map.h> +#include <linux/mtd/mtd.h> +#include <linux/mtd/partitions.h> +#include <linux/vmalloc.h> +#include <linux/platform_device.h> + +#include <bcm_tag.h> +#include <asm/io.h> + +#define BUSWIDTH 2 /* Buswidth */ +#define EXTENDED_SIZE 0xBFC00000 /* Extended flash address */ + +#define PFX KBUILD_MODNAME ": " + +extern int parse_redboot_partitions(struct mtd_info *master, struct mtd_partition **pparts, unsigned long fis_origin); +static struct mtd_partition *parsed_parts; + +static struct mtd_info *bcm963xx_mtd_info; + +static struct map_info bcm963xx_map = { + .name = "bcm963xx", + .bankwidth = BUSWIDTH, +}; + +static struct tagiddesc_t tagidtab[NUM_TAGID] = TAGID_DEFINITIONS; + +static uint32_t tagcrc32tab[256] = { + 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, + 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, + 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, + 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, + 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, + 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, + 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, + 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, + 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433, + 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, + 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, + 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, + 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, + 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, + 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, + 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, + 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, + 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, + 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, + 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, + 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, + 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, + 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, + 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, + 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, + 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, + 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, + 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, + 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, + 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, + 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, + 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D +}; + +static uint32_t tagcrc32(uint32_t crc, uint8_t *data, size_t len) +{ + while (len--) + crc = (crc >> 8) ^ tagcrc32tab[(crc ^ *data++) & 0xFF]; + + return crc; +} + +static int parse_cfe_partitions( struct mtd_info *master, struct mtd_partition **pparts) +{ + int nrparts = 3, curpart = 0; /* CFE,NVRAM and global LINUX are always present. */ + union bcm_tag *buf; + struct mtd_partition *parts; + int ret; + size_t retlen; + unsigned int rootfsaddr, kerneladdr, spareaddr; + unsigned int rootfslen, kernellen, sparelen, totallen; + unsigned char *tagid; + int namelen = 0; + int i; + uint32_t tagidcrc; + uint32_t calctagidcrc; + bool tagid_match = false; + char *boardid; + char *tagversion; + char *matchtagid; + + /* Allocate memory for buffer */ + buf = vmalloc(sizeof(union bcm_tag)); + if (!buf) + return -ENOMEM; + + /* Get the tag */ + ret = master->read(master,master->erasesize,sizeof(union bcm_tag), &retlen, (void *)buf); + if (retlen != sizeof(union bcm_tag)){ + vfree(buf); + return -EIO; + } + + /* tagId isn't in the same location, so we check each tagid against the + * tagid CRC. If the CRC is valid we have found the right tag and so + * use that tag + */ + + for (i = 0; i < NUM_TAGID; i++) { + switch(i) { + case 0: + matchtagid = "bccfe"; + tagid = &(buf->bccfe.tagId[0]); + sscanf(buf->bccfe.rootAddress,"%u", &rootfsaddr); + sscanf(buf->bccfe.rootLength, "%u", &rootfslen); + sscanf(buf->bccfe.kernelAddress, "%u", &kerneladdr); + sscanf(buf->bccfe.kernelLength, "%u", &kernellen); + sscanf(buf->bccfe.totalLength, "%u", &totallen); + tagidcrc = *(uint32_t *)&(buf->bccfe.tagIdCRC[0]); + tagversion = &(buf->bccfe.tagVersion[0]); + boardid = &(buf->bccfe.boardid[0]); + break; + case 1: + matchtagid = "bc300"; + tagid = &(buf->bc300.tagId[0]); + sscanf(buf->bc300.rootAddress,"%u", &rootfsaddr); + sscanf(buf->bc300.rootLength, "%u", &rootfslen); + sscanf(buf->bc300.kernelAddress, "%u", &kerneladdr); + sscanf(buf->bc300.kernelLength, "%u", &kernellen); + sscanf(buf->bc300.totalLength, "%u", &totallen); + tagidcrc = *(uint32_t *)&(buf->bc300.tagIdCRC[0]); + tagversion = &(buf->bc300.tagVersion[0]); + boardid = &(buf->bc300.boardid[0]); + break; + case 2: + matchtagid = "ag306"; + tagid = &(buf->ag306.tagId[0]); + sscanf(buf->ag306.rootAddress,"%u", &rootfsaddr); + sscanf(buf->ag306.rootLength, "%u", &rootfslen); + sscanf(buf->ag306.kernelAddress, "%u", &kerneladdr); + sscanf(buf->ag306.kernelLength, "%u", &kernellen); + sscanf(buf->ag306.totalLength, "%u", &totallen); + tagidcrc = *(uint32_t *)&(buf->ag306.tagIdCRC[0]); + tagversion = &(buf->ag306.tagVersion[0]); + boardid = &(buf->ag306.boardid[0]); + break; + case 3: + matchtagid = "bc221"; + tagid = &(buf->bc221.tagId[0]); + sscanf(buf->bc221.rootAddress,"%u", &rootfsaddr); + sscanf(buf->bc221.rootLength, "%u", &rootfslen); + sscanf(buf->bc221.kernelAddress, "%u", &kerneladdr); + sscanf(buf->bc221.kernelLength, "%u", &kernellen); + sscanf(buf->bc221.totalLength, "%u", &totallen); + tagidcrc = *(uint32_t *)&(buf->bc221.tagIdCRC[0]); + tagversion = &(buf->bc221.tagVersion[0]); + boardid = &(buf->bc221.boardid[0]); + break; + case 4: + matchtagid = "bc310"; + tagid = &(buf->bc310.tagId[0]); + sscanf(buf->bc310.rootAddress,"%u", &rootfsaddr); + sscanf(buf->bc310.rootLength, "%u", &rootfslen); + sscanf(buf->bc310.kernelAddress, "%u", &kerneladdr); + sscanf(buf->bc310.kernelLength, "%u", &kernellen); + sscanf(buf->bc310.totalLength, "%u", &totallen); + tagidcrc = *(uint32_t *)&(buf->bc310.tagIdCRC[0]); + tagversion = &(buf->bc310.tagVersion[0]); + boardid = &(buf->bc310.boardid[0]); + break; + } + if (strncmp(tagid, matchtagid, TAGID_LEN) != 0) { + continue; + } + + calctagidcrc = htonl(tagcrc32(IMAGETAG_CRC_START, tagid, TAGID_LEN)); + if (tagidcrc == calctagidcrc) { + tagid_match = true; + break; + } + } + + if (!tagid_match) { + tagid = "bcram"; + sscanf(buf->bccfe.rootAddress,"%u", &rootfsaddr); + sscanf(buf->bccfe.rootLength, "%u", &rootfslen); + sscanf(buf->bccfe.kernelAddress, "%u", &kerneladdr); + sscanf(buf->bccfe.kernelLength, "%u", &kernellen); + sscanf(buf->bccfe.totalLength, "%u", &totallen); + tagidcrc = *(uint32_t *)&(buf->bccfe.tagIdCRC[0]); + tagversion = &(buf->bccfe.tagVersion[0]); + boardid = &(buf->bccfe.boardid[0]); + } + + printk(KERN_INFO PFX "CFE boot tag found with version %s, board type %s, and tagid %s.\n",tagversion,boardid,tagid); + + rootfsaddr = rootfsaddr - EXTENDED_SIZE; + kerneladdr = kerneladdr - EXTENDED_SIZE; + spareaddr = roundup(totallen,master->erasesize) + master->erasesize; + sparelen = master->size - spareaddr - master->erasesize; + + /* Determine number of partitions */ + namelen = 8; + if (rootfslen > 0){ + nrparts++; + namelen =+ 6; + }; + if (kernellen > 0) { + nrparts++; + namelen =+ 6; + }; + + /* Ask kernel for more memory */ + parts = kzalloc(sizeof(*parts) * nrparts + 10 * nrparts, GFP_KERNEL); + if (!parts) { + vfree(buf); + return -ENOMEM; + }; + + /* Start building partition list */ + parts[curpart].name = "CFE"; + parts[curpart].offset = 0; + parts[curpart].size = master->erasesize; + curpart++; + + if (kernellen > 0) { + parts[curpart].name = "kernel"; + parts[curpart].offset = kerneladdr; + parts[curpart].size = kernellen; + curpart++; + }; + + if (rootfslen > 0) { + parts[curpart].name = "rootfs"; + parts[curpart].offset = rootfsaddr; + parts[curpart].size = rootfslen; + if (sparelen > 0) + parts[curpart].size += sparelen; + curpart++; + }; + + parts[curpart].name = "nvram"; + parts[curpart].offset = master->size - master->erasesize; + parts[curpart].size = master->erasesize; + + /* Global partition "linux" to make easy firmware upgrade */ + curpart++; + parts[curpart].name = "linux"; + parts[curpart].offset = parts[0].size; + parts[curpart].size = master->size - parts[0].size - parts[3].size; + + for (i = 0; i < nrparts; i++) + printk(KERN_INFO PFX "Partition %d is %s offset %llx and length %llx\n", i, parts[i].name, parts[i].offset, parts[i].size); + + printk(KERN_INFO PFX "Spare partition is %x offset and length %x\n", spareaddr, sparelen); + *pparts = parts; + vfree(buf); + + return nrparts; +}; + +static int bcm963xx_detect_cfe(struct mtd_info *master) +{ + int idoffset = 0x4e0; + static char idstring[8] = "CFE1CFE1"; + char buf[9]; + int ret; + size_t retlen; + + ret = master->read(master, idoffset, 8, &retlen, (void *)buf); + buf[retlen] = 0; + printk(KERN_INFO PFX "Read Signature value of %s\n", buf); + + return strncmp(idstring, buf, 8); +} + +static int bcm963xx_probe(struct platform_device *pdev) +{ + int err = 0; + int parsed_nr_parts = 0; + char *part_type; + struct resource *r; + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + bcm963xx_map.phys = r->start; + bcm963xx_map.size = (r->end - r->start) + 1; + bcm963xx_map.virt = ioremap(r->start, r->end - r->start + 1); + + if (!bcm963xx_map.virt) { + printk(KERN_ERR PFX "Failed to ioremap\n"); + return -EIO; + } + printk(KERN_INFO PFX "0x%08lx at 0x%08x\n", bcm963xx_map.size, bcm963xx_map.phys); + + simple_map_init(&bcm963xx_map); + + bcm963xx_mtd_info = do_map_probe("cfi_probe", &bcm963xx_map); + if (!bcm963xx_mtd_info) { + printk(KERN_ERR PFX "Failed to probe using CFI\n"); + err = -EIO; + goto err_probe; + } + + bcm963xx_mtd_info->owner = THIS_MODULE; + + /* This is mutually exclusive */ + if (bcm963xx_detect_cfe(bcm963xx_mtd_info) == 0) { + printk(KERN_INFO PFX "CFE bootloader detected\n"); + if (parsed_nr_parts == 0) { + int ret = parse_cfe_partitions(bcm963xx_mtd_info, &parsed_parts); + if (ret > 0) { + part_type = "CFE"; + parsed_nr_parts = ret; + } + } + } else { + printk(KERN_INFO PFX "assuming RedBoot bootloader\n"); + if (bcm963xx_mtd_info->size > 0x00400000) { + printk(KERN_INFO PFX "Support for extended flash memory size : 0x%llx ; ONLY 64MBIT SUPPORT\n", bcm963xx_mtd_info->size); + bcm963xx_map.virt = (u32)(EXTENDED_SIZE); + } + +#ifdef CONFIG_MTD_REDBOOT_PARTS + if (parsed_nr_parts == 0) { + int ret = parse_redboot_partitions(bcm963xx_mtd_info, &parsed_parts, 0); + if (ret > 0) { + part_type = "RedBoot"; + parsed_nr_parts = ret; + } + } +#endif + } + + return add_mtd_partitions(bcm963xx_mtd_info, parsed_parts, parsed_nr_parts); + +err_probe: + iounmap(bcm963xx_map.virt); + return err; +} + +static int bcm963xx_remove(struct platform_device *pdev) +{ + if (bcm963xx_mtd_info) { + del_mtd_partitions(bcm963xx_mtd_info); + map_destroy(bcm963xx_mtd_info); + } + + if (bcm963xx_map.virt) { + iounmap(bcm963xx_map.virt); + bcm963xx_map.virt = 0; + } + + return 0; +} + +static struct platform_driver bcm63xx_mtd_dev = { + .probe = bcm963xx_probe, + .remove = bcm963xx_remove, + .driver = { + .name = "bcm963xx-flash", + .owner = THIS_MODULE, + }, +}; + +static int __init bcm963xx_mtd_init(void) +{ + return platform_driver_register(&bcm63xx_mtd_dev); +} + +static void __exit bcm963xx_mtd_exit(void) +{ + platform_driver_unregister(&bcm63xx_mtd_dev); +} + +module_init(bcm963xx_mtd_init); +module_exit(bcm963xx_mtd_exit); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("Broadcom BCM63xx MTD partition parser/mapping for CFE and RedBoot"); +MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>"); +MODULE_AUTHOR("Mike Albon <malbon@openwrt.org>"); diff --git a/target/linux/brcm63xx/files-2.6.30/drivers/net/bcm63xx_enet.c b/target/linux/brcm63xx/files-2.6.30/drivers/net/bcm63xx_enet.c new file mode 100644 index 000000000..d338bbc08 --- /dev/null +++ b/target/linux/brcm63xx/files-2.6.30/drivers/net/bcm63xx_enet.c @@ -0,0 +1,1964 @@ +/* + * Driver for BCM963xx builtin Ethernet mac + * + * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#include <linux/init.h> +#include <linux/module.h> +#include <linux/clk.h> +#include <linux/etherdevice.h> +#include <linux/delay.h> +#include <linux/ethtool.h> +#include <linux/crc32.h> +#include <linux/err.h> +#include <linux/dma-mapping.h> +#include <linux/platform_device.h> +#include <linux/if_vlan.h> +#include <linux/version.h> + +#include <bcm63xx_dev_enet.h> +#include "bcm63xx_enet.h" + +static char bcm_enet_driver_name[] = "bcm63xx_enet"; +static char bcm_enet_driver_version[] = "1.0"; + +static int copybreak __read_mostly = 128; +module_param(copybreak, int, 0); +MODULE_PARM_DESC(copybreak, "Receive copy threshold"); + +/* io memory shared between all devices */ +static void __iomem *bcm_enet_shared_base; + +/* + * io helpers to access mac registers + */ +static inline u32 enet_readl(struct bcm_enet_priv *priv, u32 off) +{ + return bcm_readl(priv->base + off); +} + +static inline void enet_writel(struct bcm_enet_priv *priv, + u32 val, u32 off) +{ + bcm_writel(val, priv->base + off); +} + +/* + * io helpers to access shared registers + */ +static inline u32 enet_dma_readl(struct bcm_enet_priv *priv, u32 off) +{ + return bcm_readl(bcm_enet_shared_base + off); +} + +static inline void enet_dma_writel(struct bcm_enet_priv *priv, + u32 val, u32 off) +{ + bcm_writel(val, bcm_enet_shared_base + off); +} + +/* + * write given data into mii register and wait for transfer to end + * with timeout (average measured transfer time is 25us) + */ +static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data) +{ + int limit; + + /* make sure mii interrupt status is cleared */ + enet_writel(priv, ENET_IR_MII, ENET_IR_REG); + + enet_writel(priv, data, ENET_MIIDATA_REG); + wmb(); + + /* busy wait on mii interrupt bit, with timeout */ + limit = 1000; + do { + if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII) + break; + udelay(1); + } while (limit-- >= 0); + + return (limit < 0) ? 1 : 0; +} + +/* + * MII internal read callback + */ +static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id, + int regnum) +{ + u32 tmp, val; + + tmp = regnum << ENET_MIIDATA_REG_SHIFT; + tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT; + tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT; + tmp |= ENET_MIIDATA_OP_READ_MASK; + + if (do_mdio_op(priv, tmp)) + return -1; + + val = enet_readl(priv, ENET_MIIDATA_REG); + val &= 0xffff; + return val; +} + +/* + * MII internal write callback + */ +static int bcm_enet_mdio_write(struct bcm_enet_priv *priv, int mii_id, + int regnum, u16 value) +{ + u32 tmp; + + tmp = (value & 0xffff) << ENET_MIIDATA_DATA_SHIFT; + tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT; + tmp |= regnum << ENET_MIIDATA_REG_SHIFT; + tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT; + tmp |= ENET_MIIDATA_OP_WRITE_MASK; + + (void)do_mdio_op(priv, tmp); + return 0; +} + +/* + * MII read callback from phylib + */ +static int bcm_enet_mdio_read_phylib(struct mii_bus *bus, int mii_id, + int regnum) +{ + return bcm_enet_mdio_read(bus->priv, mii_id, regnum); +} + +/* + * MII write callback from phylib + */ +static int bcm_enet_mdio_write_phylib(struct mii_bus *bus, int mii_id, + int regnum, u16 value) +{ + return bcm_enet_mdio_write(bus->priv, mii_id, regnum, value); +} + +/* + * MII read callback from mii core + */ +static int bcm_enet_mdio_read_mii(struct net_device *dev, int mii_id, + int regnum) +{ + return bcm_enet_mdio_read(netdev_priv(dev), mii_id, regnum); +} + +/* + * MII write callback from mii core + */ +static void bcm_enet_mdio_write_mii(struct net_device *dev, int mii_id, + int regnum, int value) +{ + bcm_enet_mdio_write(netdev_priv(dev), mii_id, regnum, value); +} + +/* + * refill rx queue + */ +static int bcm_enet_refill_rx(struct net_device *dev) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + + while (priv->rx_desc_count < priv->rx_ring_size) { + struct bcm_enet_desc *desc; + struct sk_buff *skb; + dma_addr_t p; + int desc_idx; + u32 len_stat; + + desc_idx = priv->rx_dirty_desc; + desc = &priv->rx_desc_cpu[desc_idx]; + + if (!priv->rx_skb[desc_idx]) { + skb = netdev_alloc_skb(dev, priv->rx_skb_size); + if (!skb) + break; + priv->rx_skb[desc_idx] = skb; + + p = dma_map_single(&priv->pdev->dev, skb->data, + priv->rx_skb_size, + DMA_FROM_DEVICE); + desc->address = p; + } + + len_stat = priv->rx_skb_size << DMADESC_LENGTH_SHIFT; + len_stat |= DMADESC_OWNER_MASK; + if (priv->rx_dirty_desc == priv->rx_ring_size - 1) { + len_stat |= DMADESC_WRAP_MASK; + priv->rx_dirty_desc = 0; + } else { + priv->rx_dirty_desc++; + } + wmb(); + desc->len_stat = len_stat; + + priv->rx_desc_count++; + + /* tell dma engine we allocated one buffer */ + enet_dma_writel(priv, 1, ENETDMA_BUFALLOC_REG(priv->rx_chan)); + } + + /* If rx ring is still empty, set a timer to try allocating + * again at a later time. */ + if (priv->rx_desc_count == 0 && netif_running(dev)) { + dev_warn(&priv->pdev->dev, "unable to refill rx ring\n"); + priv->rx_timeout.expires = jiffies + HZ; + add_timer(&priv->rx_timeout); + } + + return 0; +} + +/* + * timer callback to defer refill rx queue in case we're OOM + */ +static void bcm_enet_refill_rx_timer(unsigned long data) +{ + struct net_device *dev; + struct bcm_enet_priv *priv; + + dev = (struct net_device *)data; + priv = netdev_priv(dev); + + spin_lock(&priv->rx_lock); + bcm_enet_refill_rx((struct net_device *)data); + spin_unlock(&priv->rx_lock); +} + +/* + * extract packet from rx queue + */ +static int bcm_enet_receive_queue(struct net_device *dev, int budget) +{ + struct bcm_enet_priv *priv; + struct device *kdev; + int processed; + + priv = netdev_priv(dev); + kdev = &priv->pdev->dev; + processed = 0; + + /* don't scan ring further than number of refilled + * descriptor */ + if (budget > priv->rx_desc_count) + budget = priv->rx_desc_count; + + do { + struct bcm_enet_desc *desc; + struct sk_buff *skb; + int desc_idx; + u32 len_stat; + unsigned int len; + + desc_idx = priv->rx_curr_desc; + desc = &priv->rx_desc_cpu[desc_idx]; + + /* make sure we actually read the descriptor status at + * each loop */ + rmb(); + + len_stat = desc->len_stat; + + /* break if dma ownership belongs to hw */ + if (len_stat & DMADESC_OWNER_MASK) + break; + + processed++; + priv->rx_curr_desc++; + if (priv->rx_curr_desc == priv->rx_ring_size) + priv->rx_curr_desc = 0; + priv->rx_desc_count--; + + /* if the packet does not have start of packet _and_ + * end of packet flag set, then just recycle it */ + if ((len_stat & DMADESC_ESOP_MASK) != DMADESC_ESOP_MASK) { + priv->stats.rx_dropped++; + continue; + } + + /* recycle packet if it's marked as bad */ + if (unlikely(len_stat & DMADESC_ERR_MASK)) { + priv->stats.rx_errors++; + + if (len_stat & DMADESC_OVSIZE_MASK) + priv->stats.rx_length_errors++; + if (len_stat & DMADESC_CRC_MASK) + priv->stats.rx_crc_errors++; + if (len_stat & DMADESC_UNDER_MASK) + priv->stats.rx_frame_errors++; + if (len_stat & DMADESC_OV_MASK) + priv->stats.rx_fifo_errors++; + continue; + } + + /* valid packet */ + skb = priv->rx_skb[desc_idx]; + len = (len_stat & DMADESC_LENGTH_MASK) >> DMADESC_LENGTH_SHIFT; + /* don't include FCS */ + len -= 4; + + if (len < copybreak) { + struct sk_buff *nskb; + + nskb = netdev_alloc_skb(dev, len + 2); + if (!nskb) { + /* forget packet, just rearm desc */ + priv->stats.rx_dropped++; + continue; + } + + /* since we're copying the data, we can align + * them properly */ + skb_reserve(nskb, NET_IP_ALIGN); + dma_sync_single_for_cpu(kdev, desc->address, + len, DMA_FROM_DEVICE); + memcpy(nskb->data, skb->data, len); + dma_sync_single_for_device(kdev, desc->address, + len, DMA_FROM_DEVICE); + skb = nskb; + } else { + dma_unmap_single(&priv->pdev->dev, desc->address, + priv->rx_skb_size, DMA_FROM_DEVICE); + priv->rx_skb[desc_idx] = NULL; + } + + skb_put(skb, len); + skb->dev = dev; + skb->protocol = eth_type_trans(skb, dev); + priv->stats.rx_packets++; + priv->stats.rx_bytes += len; + dev->last_rx = jiffies; + netif_receive_skb(skb); + + } while (--budget > 0); + + if (processed || !priv->rx_desc_count) { + bcm_enet_refill_rx(dev); + + /* kick rx dma */ + enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, + ENETDMA_CHANCFG_REG(priv->rx_chan)); + } + + return processed; +} + + +/* + * try to or force reclaim of transmitted buffers + */ +static int bcm_enet_tx_reclaim(struct net_device *dev, int force) +{ + struct bcm_enet_priv *priv; + int released; + + priv = netdev_priv(dev); + released = 0; + + while (priv->tx_desc_count < priv->tx_ring_size) { + struct bcm_enet_desc *desc; + struct sk_buff *skb; + + /* We run in a bh and fight against start_xmit, which + * is called with bh disabled */ + spin_lock(&priv->tx_lock); + + desc = &priv->tx_desc_cpu[priv->tx_dirty_desc]; + + if (!force && (desc->len_stat & DMADESC_OWNER_MASK)) { + spin_unlock(&priv->tx_lock); + break; + } + + /* ensure other field of the descriptor were not read + * before we checked ownership */ + rmb(); + + skb = priv->tx_skb[priv->tx_dirty_desc]; + priv->tx_skb[priv->tx_dirty_desc] = NULL; + dma_unmap_single(&priv->pdev->dev, desc->address, skb->len, + DMA_TO_DEVICE); + + priv->tx_dirty_desc++; + if (priv->tx_dirty_desc == priv->tx_ring_size) + priv->tx_dirty_desc = 0; + priv->tx_desc_count++; + + spin_unlock(&priv->tx_lock); + + if (desc->len_stat & DMADESC_UNDER_MASK) + priv->stats.tx_errors++; + + dev_kfree_skb(skb); + released++; + } + + if (netif_queue_stopped(dev) && released) + netif_wake_queue(dev); + + return released; +} + +/* + * poll func, called by network core + */ +static int bcm_enet_poll(struct napi_struct *napi, int budget) +{ + struct bcm_enet_priv *priv; + struct net_device *dev; + int tx_work_done, rx_work_done; + + priv = container_of(napi, struct bcm_enet_priv, napi); + dev = priv->net_dev; + + /* ack interrupts */ + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, + ENETDMA_IR_REG(priv->rx_chan)); + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, + ENETDMA_IR_REG(priv->tx_chan)); + + /* reclaim sent skb */ + tx_work_done = bcm_enet_tx_reclaim(dev, 0); + + spin_lock(&priv->rx_lock); + rx_work_done = bcm_enet_receive_queue(dev, budget); + spin_unlock(&priv->rx_lock); + + if (rx_work_done >= budget || tx_work_done > 0) { + /* rx/tx queue is not yet empty/clean */ + return rx_work_done; + } + + /* no more packet in rx/tx queue, remove device from poll + * queue */ +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) + netif_rx_complete(dev, napi); +#else + napi_complete(napi); +#endif + + /* restore rx/tx interrupt */ + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, + ENETDMA_IRMASK_REG(priv->rx_chan)); + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, + ENETDMA_IRMASK_REG(priv->tx_chan)); + + return rx_work_done; +} + +/* + * mac interrupt handler + */ +static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id) +{ + struct net_device *dev; + struct bcm_enet_priv *priv; + u32 stat; + + dev = dev_id; + priv = netdev_priv(dev); + + stat = enet_readl(priv, ENET_IR_REG); + if (!(stat & ENET_IR_MIB)) + return IRQ_NONE; + + /* clear & mask interrupt */ + enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); + enet_writel(priv, 0, ENET_IRMASK_REG); + + /* read mib registers in workqueue */ + schedule_work(&priv->mib_update_task); + + return IRQ_HANDLED; +} + +/* + * rx/tx dma interrupt handler + */ +static irqreturn_t bcm_enet_isr_dma(int irq, void *dev_id) +{ + struct net_device *dev; + struct bcm_enet_priv *priv; + + dev = dev_id; + priv = netdev_priv(dev); + + /* mask rx/tx interrupts */ + enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); + enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); + +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) + netif_rx_schedule(dev, &priv->napi); +#else + napi_schedule(&priv->napi); +#endif + + return IRQ_HANDLED; +} + +/* + * tx request callback + */ +static int bcm_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) +{ + struct bcm_enet_priv *priv; + struct bcm_enet_desc *desc; + u32 len_stat; + int ret; + + priv = netdev_priv(dev); + + /* lock against tx reclaim */ + spin_lock(&priv->tx_lock); + + /* make sure the tx hw queue is not full, should not happen + * since we stop queue before it's the case */ + if (unlikely(!priv->tx_desc_count)) { + netif_stop_queue(dev); + dev_err(&priv->pdev->dev, "xmit called with no tx desc " + "available?\n"); + ret = NETDEV_TX_BUSY; + goto out_unlock; + } + + /* point to the next available desc */ + desc = &priv->tx_desc_cpu[priv->tx_curr_desc]; + priv->tx_skb[priv->tx_curr_desc] = skb; + + /* fill descriptor */ + desc->address = dma_map_single(&priv->pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + + len_stat = (skb->len << DMADESC_LENGTH_SHIFT) & DMADESC_LENGTH_MASK; + len_stat |= DMADESC_ESOP_MASK | + DMADESC_APPEND_CRC | + DMADESC_OWNER_MASK; + + priv->tx_curr_desc++; + if (priv->tx_curr_desc == priv->tx_ring_size) { + priv->tx_curr_desc = 0; + len_stat |= DMADESC_WRAP_MASK; + } + priv->tx_desc_count--; + + /* dma might be already polling, make sure we update desc + * fields in correct order */ + wmb(); + desc->len_stat = len_stat; + wmb(); + + /* kick tx dma */ + enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, + ENETDMA_CHANCFG_REG(priv->tx_chan)); + + /* stop queue if no more desc available */ + if (!priv->tx_desc_count) + netif_stop_queue(dev); + + priv->stats.tx_bytes += skb->len; + priv->stats.tx_packets++; + dev->trans_start = jiffies; + ret = NETDEV_TX_OK; + +out_unlock: + spin_unlock(&priv->tx_lock); + return ret; +} + +/* + * Change the interface's mac address. + */ +static int bcm_enet_set_mac_address(struct net_device *dev, void *p) +{ + struct bcm_enet_priv *priv; + struct sockaddr *addr = p; + u32 val; + + priv = netdev_priv(dev); + memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); + + /* use perfect match register 0 to store my mac address */ + val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) | + (dev->dev_addr[4] << 8) | dev->dev_addr[5]; + enet_writel(priv, val, ENET_PML_REG(0)); + + val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]); + val |= ENET_PMH_DATAVALID_MASK; + enet_writel(priv, val, ENET_PMH_REG(0)); + + return 0; +} + +/* + * Change rx mode (promiscous/allmulti) and update multicast list + */ +static void bcm_enet_set_multicast_list(struct net_device *dev) +{ + struct bcm_enet_priv *priv; + struct dev_mc_list *mc_list; + u32 val; + int i; + + priv = netdev_priv(dev); + + val = enet_readl(priv, ENET_RXCFG_REG); + + if (dev->flags & IFF_PROMISC) + val |= ENET_RXCFG_PROMISC_MASK; + else + val &= ~ENET_RXCFG_PROMISC_MASK; + + /* only 3 perfect match registers left, first one is used for + * own mac address */ + if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > 3) + val |= ENET_RXCFG_ALLMCAST_MASK; + else + val &= ~ENET_RXCFG_ALLMCAST_MASK; + + /* no need to set perfect match registers if we catch all + * multicast */ + if (val & ENET_RXCFG_ALLMCAST_MASK) { + enet_writel(priv, val, ENET_RXCFG_REG); + return; + } + + for (i = 0, mc_list = dev->mc_list; + (mc_list != NULL) && (i < dev->mc_count) && (i < 3); + i++, mc_list = mc_list->next) { + u8 *dmi_addr; + u32 tmp; + + /* filter non ethernet address */ + if (mc_list->dmi_addrlen != 6) + continue; + + /* update perfect match registers */ + dmi_addr = mc_list->dmi_addr; + tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) | + (dmi_addr[4] << 8) | dmi_addr[5]; + enet_writel(priv, tmp, ENET_PML_REG(i + 1)); + + tmp = (dmi_addr[0] << 8 | dmi_addr[1]); + tmp |= ENET_PMH_DATAVALID_MASK; + enet_writel(priv, tmp, ENET_PMH_REG(i + 1)); + } + + for (; i < 3; i++) { + enet_writel(priv, 0, ENET_PML_REG(i + 1)); + enet_writel(priv, 0, ENET_PMH_REG(i + 1)); + } + + enet_writel(priv, val, ENET_RXCFG_REG); +} + +/* + * set mac duplex parameters + */ +static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex) +{ + u32 val; + + val = enet_readl(priv, ENET_TXCTL_REG); + if (fullduplex) + val |= ENET_TXCTL_FD_MASK; + else + val &= ~ENET_TXCTL_FD_MASK; + enet_writel(priv, val, ENET_TXCTL_REG); +} + +/* + * set mac flow control parameters + */ +static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en) +{ + u32 val; + + /* rx flow control (pause frame handling) */ + val = enet_readl(priv, ENET_RXCFG_REG); + if (rx_en) + val |= ENET_RXCFG_ENFLOW_MASK; + else + val &= ~ENET_RXCFG_ENFLOW_MASK; + enet_writel(priv, val, ENET_RXCFG_REG); + + /* tx flow control (pause frame generation) */ + val = enet_dma_readl(priv, ENETDMA_CFG_REG); + if (tx_en) + val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); + else + val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); + enet_dma_writel(priv, val, ENETDMA_CFG_REG); +} + +/* + * link changed callback (from phylib) + */ +static void bcm_enet_adjust_phy_link(struct net_device *dev) +{ + struct bcm_enet_priv *priv; + struct phy_device *phydev; + int status_changed; + + priv = netdev_priv(dev); + phydev = priv->phydev; + status_changed = 0; + + if (priv->old_link != phydev->link) { + status_changed = 1; + priv->old_link = phydev->link; + } + + /* reflect duplex change in mac configuration */ + if (phydev->link && phydev->duplex != priv->old_duplex) { + bcm_enet_set_duplex(priv, + (phydev->duplex == DUPLEX_FULL) ? 1 : 0); + status_changed = 1; + priv->old_duplex = phydev->duplex; + } + + /* enable flow control if remote advertise it (trust phylib to + * check that duplex is full */ + if (phydev->link && phydev->pause != priv->old_pause) { + int rx_pause_en, tx_pause_en; + + if (phydev->pause) { + /* pause was advertised by lpa and us */ + rx_pause_en = 1; + tx_pause_en = 1; + } else if (!priv->pause_auto) { + /* pause setting overrided by user */ + rx_pause_en = priv->pause_rx; + tx_pause_en = priv->pause_tx; + } else { + rx_pause_en = 0; + tx_pause_en = 0; + } + + bcm_enet_set_flow(priv, rx_pause_en, tx_pause_en); + status_changed = 1; + priv->old_pause = phydev->pause; + } + + if (status_changed) { + pr_info("%s: link %s", dev->name, phydev->link ? + "UP" : "DOWN"); + if (phydev->link) + printk(" - %d/%s - flow control %s", phydev->speed, + DUPLEX_FULL == phydev->duplex ? "full" : "half", + phydev->pause == 1 ? "rx&tx" : "off"); + + printk("\n"); + } +} + +/* + * link changed callback (if phylib is not used) + */ +static void bcm_enet_adjust_link(struct net_device *dev) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + bcm_enet_set_duplex(priv, priv->force_duplex_full); + bcm_enet_set_flow(priv, priv->pause_rx, priv->pause_tx); + + pr_info("%s: link forced UP - %d/%s - flow control %s/%s\n", + dev->name, + priv->force_speed_100 ? 100 : 10, + priv->force_duplex_full ? "full" : "half", + priv->pause_rx ? "rx" : "off", + priv->pause_tx ? "tx" : "off"); +} + +/* + * open callback, allocate dma rings & buffers and start rx operation + */ +static int bcm_enet_open(struct net_device *dev) +{ + struct bcm_enet_priv *priv; + struct sockaddr addr; + struct device *kdev; + struct phy_device *phydev; + int irq_requested, i, ret; + unsigned int size; + char phy_id[BUS_ID_SIZE]; + void *p; + u32 val; + + priv = netdev_priv(dev); + priv->rx_desc_cpu = priv->tx_desc_cpu = NULL; + priv->rx_skb = priv->tx_skb = NULL; + + kdev = &priv->pdev->dev; + + if (priv->has_phy) { + /* connect to PHY */ + snprintf(phy_id, BUS_ID_SIZE, PHY_ID_FMT, + priv->mac_id ? "1" : "0", priv->phy_id); + + phydev = phy_connect(dev, phy_id, &bcm_enet_adjust_phy_link, 0, + PHY_INTERFACE_MODE_MII); + + if (IS_ERR(phydev)) { + dev_err(kdev, "could not attach to PHY\n"); + return PTR_ERR(phydev); + } + + /* mask with MAC supported features */ + phydev->supported &= (SUPPORTED_10baseT_Half | + SUPPORTED_10baseT_Full | + SUPPORTED_100baseT_Half | + SUPPORTED_100baseT_Full | + SUPPORTED_Autoneg | + SUPPORTED_Pause | + SUPPORTED_MII); + phydev->advertising = phydev->supported; + + if (priv->pause_auto && priv->pause_rx && priv->pause_tx) + phydev->advertising |= SUPPORTED_Pause; + else + phydev->advertising &= ~SUPPORTED_Pause; + + dev_info(kdev, "attached PHY at address %d [%s]\n", + phydev->addr, phydev->drv->name); + + priv->old_link = 0; + priv->old_duplex = -1; + priv->old_pause = -1; + priv->phydev = phydev; + } + + /* mask all interrupts and request them */ + enet_writel(priv, 0, ENET_IRMASK_REG); + enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); + enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); + + irq_requested = 0; + ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev); + if (ret) + goto out; + irq_requested++; + + ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, + IRQF_SAMPLE_RANDOM | IRQF_DISABLED, dev->name, dev); + if (ret) + goto out; + irq_requested++; + + ret = request_irq(priv->irq_tx, bcm_enet_isr_dma, + IRQF_DISABLED, dev->name, dev); + if (ret) + goto out; + irq_requested++; + + /* initialize perfect match registers */ + for (i = 0; i < 4; i++) { + enet_writel(priv, 0, ENET_PML_REG(i)); + enet_writel(priv, 0, ENET_PMH_REG(i)); + } + + /* write device mac address */ + memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN); + bcm_enet_set_mac_address(dev, &addr); + + /* allocate rx dma ring */ + size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); + p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); + if (!p) { + dev_err(kdev, "cannot allocate rx ring %u\n", size); + ret = -ENOMEM; + goto out; + } + + memset(p, 0, size); + priv->rx_desc_alloc_size = size; + priv->rx_desc_cpu = p; + + /* allocate tx dma ring */ + size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); + p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); + if (!p) { + dev_err(kdev, "cannot allocate tx ring\n"); + ret = -ENOMEM; + goto out; + } + + memset(p, 0, size); + priv->tx_desc_alloc_size = size; + priv->tx_desc_cpu = p; + + priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size, + GFP_KERNEL); + if (!priv->tx_skb) { + dev_err(kdev, "cannot allocate rx skb queue\n"); + ret = -ENOMEM; + goto out; + } + + priv->tx_desc_count = priv->tx_ring_size; + priv->tx_dirty_desc = 0; + priv->tx_curr_desc = 0; + spin_lock_init(&priv->tx_lock); + + /* init & fill rx ring with skbs */ + priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size, + GFP_KERNEL); + if (!priv->rx_skb) { + dev_err(kdev, "cannot allocate rx skb queue\n"); + ret = -ENOMEM; + goto out; + } + + priv->rx_desc_count = 0; + priv->rx_dirty_desc = 0; + priv->rx_curr_desc = 0; + + /* initialize flow control buffer allocation */ + enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, + ENETDMA_BUFALLOC_REG(priv->rx_chan)); + + if (bcm_enet_refill_rx(dev)) { + dev_err(kdev, "cannot allocate rx skb queue\n"); + ret = -ENOMEM; + goto out; + } + + /* write rx & tx ring addresses */ + enet_dma_writel(priv, priv->rx_desc_dma, + ENETDMA_RSTART_REG(priv->rx_chan)); + enet_dma_writel(priv, priv->tx_desc_dma, + ENETDMA_RSTART_REG(priv->tx_chan)); + + /* clear remaining state ram for rx & tx channel */ + enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan)); + enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan)); + enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan)); + enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan)); + enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan)); + enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan)); + + /* set max rx/tx length */ + enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG); + enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG); + + /* set dma maximum burst len */ + enet_dma_writel(priv, BCMENET_DMA_MAXBURST, + ENETDMA_MAXBURST_REG(priv->rx_chan)); + enet_dma_writel(priv, BCMENET_DMA_MAXBURST, + ENETDMA_MAXBURST_REG(priv->tx_chan)); + + /* set correct transmit fifo watermark */ + enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG); + + /* set flow control low/high threshold to 1/3 / 2/3 */ + val = priv->rx_ring_size / 3; + enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); + val = (priv->rx_ring_size * 2) / 3; + enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); + + /* all set, enable mac and interrupts, start dma engine and + * kick rx dma channel */ + wmb(); + enet_writel(priv, ENET_CTL_ENABLE_MASK, ENET_CTL_REG); + enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); + enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, + ENETDMA_CHANCFG_REG(priv->rx_chan)); + + /* watch "mib counters about to overflow" interrupt */ + enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); + enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); + + /* watch "packet transferred" interrupt in rx and tx */ + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, + ENETDMA_IR_REG(priv->rx_chan)); + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, + ENETDMA_IR_REG(priv->tx_chan)); + + /* make sure we enable napi before rx interrupt */ + napi_enable(&priv->napi); + + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, + ENETDMA_IRMASK_REG(priv->rx_chan)); + enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, + ENETDMA_IRMASK_REG(priv->tx_chan)); + + if (priv->has_phy) + phy_start(priv->phydev); + else + bcm_enet_adjust_link(dev); + + netif_start_queue(dev); + return 0; + +out: + phy_disconnect(priv->phydev); + if (irq_requested > 2) + free_irq(priv->irq_tx, dev); + if (irq_requested > 1) + free_irq(priv->irq_rx, dev); + if (irq_requested > 0) + free_irq(dev->irq, dev); + for (i = 0; i < priv->rx_ring_size; i++) { + struct bcm_enet_desc *desc; + + if (!priv->rx_skb[i]) + continue; + + desc = &priv->rx_desc_cpu[i]; + dma_unmap_single(kdev, desc->address, priv->rx_skb_size, + DMA_FROM_DEVICE); + kfree_skb(priv->rx_skb[i]); + } + if (priv->rx_desc_cpu) + dma_free_coherent(kdev, priv->rx_desc_alloc_size, + priv->rx_desc_cpu, priv->rx_desc_dma); + if (priv->tx_desc_cpu) + dma_free_coherent(kdev, priv->tx_desc_alloc_size, + priv->tx_desc_cpu, priv->tx_desc_dma); + kfree(priv->rx_skb); + kfree(priv->tx_skb); + return ret; +} + +/* + * disable mac + */ +static void bcm_enet_disable_mac(struct bcm_enet_priv *priv) +{ + int limit; + u32 val; + + val = enet_readl(priv, ENET_CTL_REG); + val |= ENET_CTL_DISABLE_MASK; + enet_writel(priv, val, ENET_CTL_REG); + + limit = 1000; + do { + u32 val; + + val = enet_readl(priv, ENET_CTL_REG); + if (!(val & ENET_CTL_DISABLE_MASK)) + break; + udelay(1); + } while (limit--); +} + +/* + * disable dma in given channel + */ +static void bcm_enet_disable_dma(struct bcm_enet_priv *priv, int chan) +{ + int limit; + + enet_dma_writel(priv, 0, ENETDMA_CHANCFG_REG(chan)); + + limit = 1000; + do { + u32 val; + + val = enet_dma_readl(priv, ENETDMA_CHANCFG_REG(chan)); + if (!(val & ENETDMA_CHANCFG_EN_MASK)) + break; + udelay(1); + } while (limit--); +} + +/* + * stop callback + */ +static int bcm_enet_stop(struct net_device *dev) +{ + struct bcm_enet_priv *priv; + struct device *kdev; + int i; + + priv = netdev_priv(dev); + kdev = &priv->pdev->dev; + + netif_stop_queue(dev); + napi_disable(&priv->napi); + if (priv->has_phy) + phy_stop(priv->phydev); + del_timer_sync(&priv->rx_timeout); + + /* mask all interrupts */ + enet_writel(priv, 0, ENET_IRMASK_REG); + enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); + enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); + + /* make sure no mib update is scheduled */ + flush_scheduled_work(); + + /* disable dma & mac */ + bcm_enet_disable_dma(priv, priv->tx_chan); + bcm_enet_disable_dma(priv, priv->rx_chan); + bcm_enet_disable_mac(priv); + + /* force reclaim of all tx buffers */ + bcm_enet_tx_reclaim(dev, 1); + + /* free the rx skb ring */ + for (i = 0; i < priv->rx_ring_size; i++) { + struct bcm_enet_desc *desc; + + if (!priv->rx_skb[i]) + continue; + + desc = &priv->rx_desc_cpu[i]; + dma_unmap_single(kdev, desc->address, priv->rx_skb_size, + DMA_FROM_DEVICE); + kfree_skb(priv->rx_skb[i]); + } + + /* free remaining allocated memory */ + kfree(priv->rx_skb); + kfree(priv->tx_skb); + dma_free_coherent(kdev, priv->rx_desc_alloc_size, + priv->rx_desc_cpu, priv->rx_desc_dma); + dma_free_coherent(kdev, priv->tx_desc_alloc_size, + priv->tx_desc_cpu, priv->tx_desc_dma); + free_irq(priv->irq_tx, dev); + free_irq(priv->irq_rx, dev); + free_irq(dev->irq, dev); + + /* release phy */ + if (priv->has_phy) { + phy_disconnect(priv->phydev); + priv->phydev = NULL; + } + + return 0; +} + +/* + * core request to return device rx/tx stats + */ +static struct net_device_stats *bcm_enet_get_stats(struct net_device *dev) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + return &priv->stats; +} + +/* + * ethtool callbacks + */ +struct bcm_enet_stats { + char stat_string[ETH_GSTRING_LEN]; + int sizeof_stat; + int stat_offset; + int mib_reg; +}; + +#define GEN_STAT(m) sizeof(((struct bcm_enet_priv *)0)->m), \ + offsetof(struct bcm_enet_priv, m) + +static const struct bcm_enet_stats bcm_enet_gstrings_stats[] = { + { "rx_packets", GEN_STAT(stats.rx_packets), -1 }, + { "tx_packets", GEN_STAT(stats.tx_packets), -1 }, + { "rx_bytes", GEN_STAT(stats.rx_bytes), -1 }, + { "tx_bytes", GEN_STAT(stats.tx_bytes), -1 }, + { "rx_errors", GEN_STAT(stats.rx_errors), -1 }, + { "tx_errors", GEN_STAT(stats.tx_errors), -1 }, + { "rx_dropped", GEN_STAT(stats.rx_dropped), -1 }, + { "tx_dropped", GEN_STAT(stats.tx_dropped), -1 }, + + { "rx_good_octets", GEN_STAT(mib.rx_gd_octets), ETH_MIB_RX_GD_OCTETS}, + { "rx_good_pkts", GEN_STAT(mib.rx_gd_pkts), ETH_MIB_RX_GD_PKTS }, + { "rx_broadcast", GEN_STAT(mib.rx_brdcast), ETH_MIB_RX_BRDCAST }, + { "rx_multicast", GEN_STAT(mib.rx_mult), ETH_MIB_RX_MULT }, + { "rx_64_octets", GEN_STAT(mib.rx_64), ETH_MIB_RX_64 }, + { "rx_65_127_oct", GEN_STAT(mib.rx_65_127), ETH_MIB_RX_65_127 }, + { "rx_128_255_oct", GEN_STAT(mib.rx_128_255), ETH_MIB_RX_128_255 }, + { "rx_256_511_oct", GEN_STAT(mib.rx_256_511), ETH_MIB_RX_256_511 }, + { "rx_512_1023_oct", GEN_STAT(mib.rx_512_1023), ETH_MIB_RX_512_1023 }, + { "rx_1024_max_oct", GEN_STAT(mib.rx_1024_max), ETH_MIB_RX_1024_MAX }, + { "rx_jabber", GEN_STAT(mib.rx_jab), ETH_MIB_RX_JAB }, + { "rx_oversize", GEN_STAT(mib.rx_ovr), ETH_MIB_RX_OVR }, + { "rx_fragment", GEN_STAT(mib.rx_frag), ETH_MIB_RX_FRAG }, + { "rx_dropped", GEN_STAT(mib.rx_drop), ETH_MIB_RX_DROP }, + { "rx_crc_align", GEN_STAT(mib.rx_crc_align), ETH_MIB_RX_CRC_ALIGN }, + { "rx_undersize", GEN_STAT(mib.rx_und), ETH_MIB_RX_UND }, + { "rx_crc", GEN_STAT(mib.rx_crc), ETH_MIB_RX_CRC }, + { "rx_align", GEN_STAT(mib.rx_align), ETH_MIB_RX_ALIGN }, + { "rx_symbol_error", GEN_STAT(mib.rx_sym), ETH_MIB_RX_SYM }, + { "rx_pause", GEN_STAT(mib.rx_pause), ETH_MIB_RX_PAUSE }, + { "rx_control", GEN_STAT(mib.rx_cntrl), ETH_MIB_RX_CNTRL }, + + { "tx_good_octets", GEN_STAT(mib.tx_gd_octets), ETH_MIB_TX_GD_OCTETS }, + { "tx_good_pkts", GEN_STAT(mib.tx_gd_pkts), ETH_MIB_TX_GD_PKTS }, + { "tx_broadcast", GEN_STAT(mib.tx_brdcast), ETH_MIB_TX_BRDCAST }, + { "tx_multicast", GEN_STAT(mib.tx_mult), ETH_MIB_TX_MULT }, + { "tx_64_oct", GEN_STAT(mib.tx_64), ETH_MIB_TX_64 }, + { "tx_65_127_oct", GEN_STAT(mib.tx_65_127), ETH_MIB_TX_65_127 }, + { "tx_128_255_oct", GEN_STAT(mib.tx_128_255), ETH_MIB_TX_128_255 }, + { "tx_256_511_oct", GEN_STAT(mib.tx_256_511), ETH_MIB_TX_256_511 }, + { "tx_512_1023_oct", GEN_STAT(mib.tx_512_1023), ETH_MIB_TX_512_1023}, + { "tx_1024_max_oct", GEN_STAT(mib.tx_1024_max), ETH_MIB_TX_1024_MAX }, + { "tx_jabber", GEN_STAT(mib.tx_jab), ETH_MIB_TX_JAB }, + { "tx_oversize", GEN_STAT(mib.tx_ovr), ETH_MIB_TX_OVR }, + { "tx_fragment", GEN_STAT(mib.tx_frag), ETH_MIB_TX_FRAG }, + { "tx_underrun", GEN_STAT(mib.tx_underrun), ETH_MIB_TX_UNDERRUN }, + { "tx_collisions", GEN_STAT(mib.tx_col), ETH_MIB_TX_COL }, + { "tx_single_collision", GEN_STAT(mib.tx_1_col), ETH_MIB_TX_1_COL }, + { "tx_multiple_collision", GEN_STAT(mib.tx_m_col), ETH_MIB_TX_M_COL }, + { "tx_excess_collision", GEN_STAT(mib.tx_ex_col), ETH_MIB_TX_EX_COL }, + { "tx_late_collision", GEN_STAT(mib.tx_late), ETH_MIB_TX_LATE }, + { "tx_deferred", GEN_STAT(mib.tx_def), ETH_MIB_TX_DEF }, + { "tx_carrier_sense", GEN_STAT(mib.tx_crs), ETH_MIB_TX_CRS }, + { "tx_pause", GEN_STAT(mib.tx_pause), ETH_MIB_TX_PAUSE }, + +}; + +#define BCM_ENET_STATS_LEN \ + (sizeof(bcm_enet_gstrings_stats) / sizeof(struct bcm_enet_stats)) + +static const u32 unused_mib_regs[] = { + ETH_MIB_TX_ALL_OCTETS, + ETH_MIB_TX_ALL_PKTS, + ETH_MIB_RX_ALL_OCTETS, + ETH_MIB_RX_ALL_PKTS, +}; + + +static void bcm_enet_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + strncpy(drvinfo->driver, bcm_enet_driver_name, 32); + strncpy(drvinfo->version, bcm_enet_driver_version, 32); + strncpy(drvinfo->fw_version, "N/A", 32); + strncpy(drvinfo->bus_info, "bcm63xx", 32); + drvinfo->n_stats = BCM_ENET_STATS_LEN; +} + +static int bcm_enet_get_stats_count(struct net_device *netdev) +{ + return BCM_ENET_STATS_LEN; +} + +static void bcm_enet_get_strings(struct net_device *netdev, + u32 stringset, u8 *data) +{ + int i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < BCM_ENET_STATS_LEN; i++) { + memcpy(data + i * ETH_GSTRING_LEN, + bcm_enet_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + } + break; + } +} + +static void update_mib_counters(struct bcm_enet_priv *priv) +{ + int i; + + for (i = 0; i < BCM_ENET_STATS_LEN; i++) { + const struct bcm_enet_stats *s; + u32 val; + char *p; + + s = &bcm_enet_gstrings_stats[i]; + if (s->mib_reg == -1) + continue; + + val = enet_readl(priv, ENET_MIB_REG(s->mib_reg)); + p = (char *)priv + s->stat_offset; + + if (s->sizeof_stat == sizeof(u64)) + *(u64 *)p += val; + else + *(u32 *)p += val; + } + + /* also empty unused mib counters to make sure mib counter + * overflow interrupt is cleared */ + for (i = 0; i < ARRAY_SIZE(unused_mib_regs); i++) + (void)enet_readl(priv, ENET_MIB_REG(unused_mib_regs[i])); +} + +static void bcm_enet_update_mib_counters_defer(struct work_struct *t) +{ + struct bcm_enet_priv *priv; + + priv = container_of(t, struct bcm_enet_priv, mib_update_task); + mutex_lock(&priv->mib_update_lock); + update_mib_counters(priv); + mutex_unlock(&priv->mib_update_lock); + + /* reenable mib interrupt */ + if (netif_running(priv->net_dev)) + enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); +} + +static void bcm_enet_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, + u64 *data) +{ + struct bcm_enet_priv *priv; + int i; + + priv = netdev_priv(netdev); + + mutex_lock(&priv->mib_update_lock); + update_mib_counters(priv); + + for (i = 0; i < BCM_ENET_STATS_LEN; i++) { + const struct bcm_enet_stats *s; + char *p; + + s = &bcm_enet_gstrings_stats[i]; + p = (char *)priv + s->stat_offset; + data[i] = (s->sizeof_stat == sizeof(u64)) ? + *(u64 *)p : *(u32 *)p; + } + mutex_unlock(&priv->mib_update_lock); +} + +static int bcm_enet_get_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + + cmd->maxrxpkt = 0; + cmd->maxtxpkt = 0; + + if (priv->has_phy) { + if (!priv->phydev) + return -ENODEV; + return phy_ethtool_gset(priv->phydev, cmd); + } else { + cmd->autoneg = 0; + cmd->speed = (priv->force_speed_100) ? SPEED_100 : SPEED_10; + cmd->duplex = (priv->force_duplex_full) ? + DUPLEX_FULL : DUPLEX_HALF; + cmd->supported = ADVERTISED_10baseT_Half | + ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | + ADVERTISED_100baseT_Full; + cmd->advertising = 0; + cmd->port = PORT_MII; + cmd->transceiver = XCVR_EXTERNAL; + } + return 0; +} + +static int bcm_enet_set_settings(struct net_device *dev, + struct ethtool_cmd *cmd) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + if (priv->has_phy) { + if (!priv->phydev) + return -ENODEV; + return phy_ethtool_sset(priv->phydev, cmd); + } else { + + if (cmd->autoneg || + (cmd->speed != SPEED_100 && cmd->speed != SPEED_10) || + cmd->port != PORT_MII) + return -EINVAL; + + priv->force_speed_100 = (cmd->speed == SPEED_100) ? 1 : 0; + priv->force_duplex_full = (cmd->duplex == DUPLEX_FULL) ? 1 : 0; + + if (netif_running(dev)) + bcm_enet_adjust_link(dev); + return 0; + } +} + +static void bcm_enet_get_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + + /* rx/tx ring is actually only limited by memory */ + ering->rx_max_pending = 8192; + ering->tx_max_pending = 8192; + ering->rx_mini_max_pending = 0; + ering->rx_jumbo_max_pending = 0; + ering->rx_pending = priv->rx_ring_size; + ering->tx_pending = priv->tx_ring_size; +} + +static int bcm_enet_set_ringparam(struct net_device *dev, + struct ethtool_ringparam *ering) +{ + struct bcm_enet_priv *priv; + int was_running; + + priv = netdev_priv(dev); + + was_running = 0; + if (netif_running(dev)) { + bcm_enet_stop(dev); + was_running = 1; + } + + priv->rx_ring_size = ering->rx_pending; + priv->tx_ring_size = ering->tx_pending; + + if (was_running) { + int err; + + err = bcm_enet_open(dev); + if (err) + dev_close(dev); + else + bcm_enet_set_multicast_list(dev); + } + return 0; +} + +static void bcm_enet_get_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *ecmd) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + ecmd->autoneg = priv->pause_auto; + ecmd->rx_pause = priv->pause_rx; + ecmd->tx_pause = priv->pause_tx; +} + +static int bcm_enet_set_pauseparam(struct net_device *dev, + struct ethtool_pauseparam *ecmd) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + + if (priv->has_phy) { + if (ecmd->autoneg && (ecmd->rx_pause != ecmd->tx_pause)) { + /* asymetric pause mode not supported, + * actually possible but integrated PHY has RO + * asym_pause bit */ + return -EINVAL; + } + } else { + /* no pause autoneg on direct mii connection */ + if (ecmd->autoneg) + return -EINVAL; + } + + priv->pause_auto = ecmd->autoneg; + priv->pause_rx = ecmd->rx_pause; + priv->pause_tx = ecmd->tx_pause; + + return 0; +} + +static struct ethtool_ops bcm_enet_ethtool_ops = { + .get_strings = bcm_enet_get_strings, + .get_stats_count = bcm_enet_get_stats_count, + .get_ethtool_stats = bcm_enet_get_ethtool_stats, + .get_settings = bcm_enet_get_settings, + .set_settings = bcm_enet_set_settings, + .get_drvinfo = bcm_enet_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_ringparam = bcm_enet_get_ringparam, + .set_ringparam = bcm_enet_set_ringparam, + .get_pauseparam = bcm_enet_get_pauseparam, + .set_pauseparam = bcm_enet_set_pauseparam, +}; + +static int bcm_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +{ + struct bcm_enet_priv *priv; + + priv = netdev_priv(dev); + if (priv->has_phy) { + if (!priv->phydev) + return -ENODEV; + return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd); + } else { + struct mii_if_info mii; + + mii.dev = dev; + mii.mdio_read = bcm_enet_mdio_read_mii; + mii.mdio_write = bcm_enet_mdio_write_mii; + mii.phy_id = 0; + mii.phy_id_mask = 0x3f; + mii.reg_num_mask = 0x1f; + return generic_mii_ioctl(&mii, if_mii(rq), cmd, NULL); + } +} + +/* + * calculate actual hardware mtu + */ +static int compute_hw_mtu(struct bcm_enet_priv *priv, int mtu) +{ + int actual_mtu; + + actual_mtu = mtu; + + /* add ethernet header + vlan tag size */ + actual_mtu += VLAN_ETH_HLEN; + + if (actual_mtu < 64 || actual_mtu > BCMENET_MAX_MTU) + return -EINVAL; + + /* + * setup maximum size before we get overflow mark in + * descriptor, note that this will not prevent reception of + * big frames, they will be split into multiple buffers + * anyway + */ + priv->hw_mtu = actual_mtu; + + /* + * align rx buffer size to dma burst len, account FCS since + * it's appended + */ + priv->rx_skb_size = ALIGN(actual_mtu + ETH_FCS_LEN, + BCMENET_DMA_MAXBURST * 4); + return 0; +} + +/* + * adjust mtu, can't be called while device is running + */ +static int bcm_enet_change_mtu(struct net_device *dev, int new_mtu) +{ + int ret; + + if (netif_running(dev)) + return -EBUSY; + + ret = compute_hw_mtu(netdev_priv(dev), new_mtu); + if (ret) + return ret; + dev->mtu = new_mtu; + return 0; +} + +/* + * preinit hardware to allow mii operation while device is down + */ +static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv) +{ + u32 val; + int limit; + + /* make sure mac is disabled */ + bcm_enet_disable_mac(priv); + + /* soft reset mac */ + val = ENET_CTL_SRESET_MASK; + enet_writel(priv, val, ENET_CTL_REG); + wmb(); + + limit = 1000; + do { + val = enet_readl(priv, ENET_CTL_REG); + if (!(val & ENET_CTL_SRESET_MASK)) + break; + udelay(1); + } while (limit--); + + /* select correct mii interface */ + val = enet_readl(priv, ENET_CTL_REG); + if (priv->use_external_mii) + val |= ENET_CTL_EPHYSEL_MASK; + else + val &= ~ENET_CTL_EPHYSEL_MASK; + enet_writel(priv, val, ENET_CTL_REG); + + /* turn on mdc clock */ + enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) | + ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG); + + /* set mib counters to self-clear when read */ + val = enet_readl(priv, ENET_MIBCTL_REG); + val |= ENET_MIBCTL_RDCLEAR_MASK; + enet_writel(priv, val, ENET_MIBCTL_REG); +} + +/* + * allocate netdevice, request register memory and register device. + */ +static int __devinit bcm_enet_probe(struct platform_device *pdev) +{ + struct bcm_enet_priv *priv; + struct net_device *dev; + struct bcm63xx_enet_platform_data *pd; + struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx; + struct mii_bus *bus; + const char *clk_name; + unsigned int iomem_size; + int i, ret, mdio_registered, mem_requested; + + /* stop if shared driver failed, assume driver->probe will be + * called in the same order we register devices (correct ?) */ + if (!bcm_enet_shared_base) + return -ENODEV; + + mdio_registered = mem_requested = 0; + + res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1); + res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2); + if (!res_mem || !res_irq || !res_irq_rx || !res_irq_tx) + return -ENODEV; + + ret = 0; + dev = alloc_etherdev(sizeof(*priv)); + if (!dev) + return -ENOMEM; + priv = netdev_priv(dev); + memset(priv, 0, sizeof(*priv)); + + ret = compute_hw_mtu(priv, dev->mtu); + if (ret) + goto out; + + iomem_size = res_mem->end - res_mem->start + 1; + if (!request_mem_region(res_mem->start, iomem_size, "bcm63xx_enet")) { + ret = -EBUSY; + goto err; + } + mem_requested = 1; + + priv->base = ioremap(res_mem->start, iomem_size); + if (priv->base == NULL) { + ret = -ENOMEM; + goto err; + } + dev->irq = priv->irq = res_irq->start; + priv->irq_rx = res_irq_rx->start; + priv->irq_tx = res_irq_tx->start; + priv->mac_id = pdev->id; + + /* get rx & tx dma channel id for this mac */ + if (priv->mac_id == 0) { + priv->rx_chan = 0; + priv->tx_chan = 1; + clk_name = "enet0"; + } else { + priv->rx_chan = 2; + priv->tx_chan = 3; + clk_name = "enet1"; + } + + priv->mac_clk = clk_get(&pdev->dev, clk_name); + if (IS_ERR(priv->mac_clk)) { + ret = PTR_ERR(priv->mac_clk); + priv->mac_clk = NULL; + goto err; + } + clk_enable(priv->mac_clk); + + /* initialize default and fetch platform data */ + priv->rx_ring_size = BCMENET_DEF_RX_DESC; + priv->tx_ring_size = BCMENET_DEF_TX_DESC; + + pd = pdev->dev.platform_data; + if (pd) { + memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN); + priv->has_phy = pd->has_phy; + priv->phy_id = pd->phy_id; + priv->has_phy_interrupt = pd->has_phy_interrupt; + priv->phy_interrupt = pd->phy_interrupt; + priv->use_external_mii = !pd->use_internal_phy; + priv->pause_auto = pd->pause_auto; + priv->pause_rx = pd->pause_rx; + priv->pause_tx = pd->pause_tx; + priv->force_duplex_full = pd->force_duplex_full; + priv->force_speed_100 = pd->force_speed_100; + } + + if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) { + /* using internal PHY, enable clock */ + priv->phy_clk = clk_get(&pdev->dev, "ephy"); + if (IS_ERR(priv->phy_clk)) { + ret = PTR_ERR(priv->phy_clk); + priv->phy_clk = NULL; + goto err; + } + clk_enable(priv->phy_clk); + } + + /* do minimal hardware init to be able to probe mii bus */ + bcm_enet_hw_preinit(priv); + + /* MII bus registration */ + if (priv->has_phy) { + bus = &priv->mii_bus; + bus->name = "bcm63xx_enet MII bus"; +#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) + bus->dev = &pdev->dev; +#else + bus->parent = &pdev->dev; +#endif + bus->priv = priv; + bus->read = bcm_enet_mdio_read_phylib; + bus->write = bcm_enet_mdio_write_phylib; + sprintf(bus->id, "%d", priv->mac_id); + + /* only probe bus where we think the PHY is, because + * the mdio read operation return 0 instead of 0xffff + * if a slave is not present on hw */ + bus->phy_mask = ~(1 << priv->phy_id); + + bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); + if (!bus->irq) { + ret = -ENOMEM; + goto err; + } + + if (priv->has_phy_interrupt) + bus->irq[priv->phy_id] = priv->phy_interrupt; + else + bus->irq[priv->phy_id] = PHY_POLL; + + ret = mdiobus_register(bus); + if (ret) { + dev_err(&pdev->dev, "unable to register mdio bus\n"); + goto err; + } + mdio_registered = 1; + } else { + + /* run platform code to initialize PHY device */ + if (pd->mii_config && + pd->mii_config(dev, 1, bcm_enet_mdio_read_mii, + bcm_enet_mdio_write_mii)) { + dev_err(&pdev->dev, "unable to configure mdio bus\n"); + goto err; + } + } + + spin_lock_init(&priv->rx_lock); + + /* init rx timeout (used for oom) */ + init_timer(&priv->rx_timeout); + priv->rx_timeout.function = bcm_enet_refill_rx_timer; + priv->rx_timeout.data = (unsigned long)dev; + + /* init the mib update lock&work */ + mutex_init(&priv->mib_update_lock); + INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer); + + /* zero mib counters */ + for (i = 0; i < ENET_MIB_REG_COUNT; i++) + enet_writel(priv, 0, ENET_MIB_REG(i)); + + /* register netdevice */ + dev->open = bcm_enet_open; + dev->stop = bcm_enet_stop; + dev->hard_start_xmit = bcm_enet_start_xmit; + dev->get_stats = bcm_enet_get_stats; + dev->set_mac_address = bcm_enet_set_mac_address; + dev->set_multicast_list = bcm_enet_set_multicast_list; + netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16); + dev->do_ioctl = bcm_enet_ioctl; +#ifdef CONFIG_NET_POLL_CONTROLLER + dev->poll_controller = bcm_enet_netpoll; +#endif + dev->change_mtu = bcm_enet_change_mtu; + + SET_ETHTOOL_OPS(dev, &bcm_enet_ethtool_ops); + SET_NETDEV_DEV(dev, &pdev->dev); + + ret = register_netdev(dev); + if (ret) + goto err; + + platform_set_drvdata(pdev, dev); + priv->pdev = pdev; + priv->net_dev = dev; + + return 0; + +err: + if (mem_requested) + release_mem_region(res_mem->start, iomem_size); + if (mdio_registered) + mdiobus_unregister(&priv->mii_bus); + kfree(priv->mii_bus.irq); + if (priv->mac_clk) { + clk_disable(priv->mac_clk); + clk_put(priv->mac_clk); + } + if (priv->phy_clk) { + clk_disable(priv->phy_clk); + clk_put(priv->phy_clk); + } + if (priv->base) { + /* turn off mdc clock */ + enet_writel(priv, 0, ENET_MIISC_REG); + iounmap(priv->base); + } +out: + free_netdev(dev); + return ret; +} + + +/* + * exit func, stops hardware and unregisters netdevice + */ +static int __devexit bcm_enet_remove(struct platform_device *pdev) +{ + struct bcm_enet_priv *priv; + struct net_device *dev; + struct resource *res; + + /* stop netdevice */ + dev = platform_get_drvdata(pdev); + priv = netdev_priv(dev); + unregister_netdev(dev); + + /* turn off mdc clock */ + enet_writel(priv, 0, ENET_MIISC_REG); + + if (priv->has_phy) { + mdiobus_unregister(&priv->mii_bus); + kfree(priv->mii_bus.irq); + } else { + struct bcm63xx_enet_platform_data *pd; + + pd = pdev->dev.platform_data; + if (pd && pd->mii_config) + pd->mii_config(dev, 0, bcm_enet_mdio_read_mii, + bcm_enet_mdio_write_mii); + } + + /* release device resources */ + iounmap(priv->base); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(res->start, res->end - res->start + 1); + + /* disable hw block clocks */ + if (priv->phy_clk) { + clk_disable(priv->phy_clk); + clk_put(priv->phy_clk); + } + clk_disable(priv->mac_clk); + clk_put(priv->mac_clk); + + platform_set_drvdata(pdev, NULL); + free_netdev(dev); + return 0; +} + +struct platform_driver bcm63xx_enet_driver = { + .probe = bcm_enet_probe, + .remove = __devexit_p(bcm_enet_remove), + .driver = { + .name = "bcm63xx_enet", + .owner = THIS_MODULE, + }, +}; + +/* + * reserve & remap memory space shared between all macs + */ +static int __devinit bcm_enet_shared_probe(struct platform_device *pdev) +{ + struct resource *res; + unsigned int iomem_size; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -ENODEV; + + iomem_size = res->end - res->start + 1; + if (!request_mem_region(res->start, iomem_size, "bcm63xx_enet_dma")) + return -EBUSY; + + bcm_enet_shared_base = ioremap(res->start, iomem_size); + if (!bcm_enet_shared_base) { + release_mem_region(res->start, iomem_size); + return -ENOMEM; + } + return 0; +} + +static int __devexit bcm_enet_shared_remove(struct platform_device *pdev) +{ + struct resource *res; + + iounmap(bcm_enet_shared_base); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(res->start, res->end - res->start + 1); + return 0; +} + +/* + * this "shared" driver is needed because both macs share a single + * address space + */ +struct platform_driver bcm63xx_enet_shared_driver = { + .probe = bcm_enet_shared_probe, + .remove = __devexit_p(bcm_enet_shared_remove), + .driver = { + .name = "bcm63xx_enet_shared", + .owner = THIS_MODULE, + }, +}; + +/* + * entry point + */ +static int __init bcm_enet_init(void) +{ + int ret; + + ret = platform_driver_register(&bcm63xx_enet_shared_driver); + if (ret) + return ret; + + ret = platform_driver_register(&bcm63xx_enet_driver); + if (ret) + platform_driver_unregister(&bcm63xx_enet_shared_driver); + + return ret; +} + +static void __exit bcm_enet_exit(void) +{ + platform_driver_unregister(&bcm63xx_enet_driver); + platform_driver_unregister(&bcm63xx_enet_shared_driver); +} + + +module_init(bcm_enet_init); +module_exit(bcm_enet_exit); + +MODULE_DESCRIPTION("BCM63xx internal ethernet mac driver"); +MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>"); +MODULE_LICENSE("GPL"); diff --git a/target/linux/brcm63xx/files-2.6.30/drivers/net/bcm63xx_enet.h b/target/linux/brcm63xx/files-2.6.30/drivers/net/bcm63xx_enet.h new file mode 100644 index 000000000..dc78f5ae2 --- /dev/null +++ b/target/linux/brcm63xx/files-2.6.30/drivers/net/bcm63xx_enet.h @@ -0,0 +1,303 @@ +#ifndef BCM63XX_ENET_H_ +#define BCM63XX_ENET_H_ + +#include <linux/types.h> +#include <linux/mii.h> +#include <linux/mutex.h> +#include <linux/phy.h> +#include <linux/platform_device.h> + +#include <bcm63xx_regs.h> +#include <bcm63xx_irq.h> +#include <bcm63xx_io.h> + +/* default number of descriptor */ +#define BCMENET_DEF_RX_DESC 64 +#define BCMENET_DEF_TX_DESC 32 + +/* maximum burst len for dma (4 bytes unit) */ +#define BCMENET_DMA_MAXBURST 16 + +/* tx transmit threshold (4 bytes unit), fifo is 256 bytes, the value + * must be low enough so that a DMA transfer of above burst length can + * not overflow the fifo */ +#define BCMENET_TX_FIFO_TRESH 32 + +/* + * hardware maximum rx/tx packet size including FCS, max mtu is + * actually 2047, but if we set max rx size register to 2047 we won't + * get overflow information if packet size is 2048 or above + */ +#define BCMENET_MAX_MTU 2046 + +/* + * rx/tx dma descriptor + */ +struct bcm_enet_desc { + u32 len_stat; + u32 address; +}; + +#define DMADESC_LENGTH_SHIFT 16 +#define DMADESC_LENGTH_MASK (0xfff << DMADESC_LENGTH_SHIFT) +#define DMADESC_OWNER_MASK (1 << 15) +#define DMADESC_EOP_MASK (1 << 14) +#define DMADESC_SOP_MASK (1 << 13) +#define DMADESC_ESOP_MASK (DMADESC_EOP_MASK | DMADESC_SOP_MASK) +#define DMADESC_WRAP_MASK (1 << 12) + +#define DMADESC_UNDER_MASK (1 << 9) +#define DMADESC_APPEND_CRC (1 << 8) +#define DMADESC_OVSIZE_MASK (1 << 4) +#define DMADESC_RXER_MASK (1 << 2) +#define DMADESC_CRC_MASK (1 << 1) +#define DMADESC_OV_MASK (1 << 0) +#define DMADESC_ERR_MASK (DMADESC_UNDER_MASK | \ + DMADESC_OVSIZE_MASK | \ + DMADESC_RXER_MASK | \ + DMADESC_CRC_MASK | \ + DMADESC_OV_MASK) + + +/* + * MIB Counters register definitions +*/ +#define ETH_MIB_TX_GD_OCTETS 0 +#define ETH_MIB_TX_GD_PKTS 1 +#define ETH_MIB_TX_ALL_OCTETS 2 +#define ETH_MIB_TX_ALL_PKTS 3 +#define ETH_MIB_TX_BRDCAST 4 +#define ETH_MIB_TX_MULT 5 +#define ETH_MIB_TX_64 6 +#define ETH_MIB_TX_65_127 7 +#define ETH_MIB_TX_128_255 8 +#define ETH_MIB_TX_256_511 9 +#define ETH_MIB_TX_512_1023 10 +#define ETH_MIB_TX_1024_MAX 11 +#define ETH_MIB_TX_JAB 12 +#define ETH_MIB_TX_OVR 13 +#define ETH_MIB_TX_FRAG 14 +#define ETH_MIB_TX_UNDERRUN 15 +#define ETH_MIB_TX_COL 16 +#define ETH_MIB_TX_1_COL 17 +#define ETH_MIB_TX_M_COL 18 +#define ETH_MIB_TX_EX_COL 19 +#define ETH_MIB_TX_LATE 20 +#define ETH_MIB_TX_DEF 21 +#define ETH_MIB_TX_CRS 22 +#define ETH_MIB_TX_PAUSE 23 + +#define ETH_MIB_RX_GD_OCTETS 32 +#define ETH_MIB_RX_GD_PKTS 33 +#define ETH_MIB_RX_ALL_OCTETS 34 +#define ETH_MIB_RX_ALL_PKTS 35 +#define ETH_MIB_RX_BRDCAST 36 +#define ETH_MIB_RX_MULT 37 +#define ETH_MIB_RX_64 38 +#define ETH_MIB_RX_65_127 39 +#define ETH_MIB_RX_128_255 40 +#define ETH_MIB_RX_256_511 41 +#define ETH_MIB_RX_512_1023 42 +#define ETH_MIB_RX_1024_MAX 43 +#define ETH_MIB_RX_JAB 44 +#define ETH_MIB_RX_OVR 45 +#define ETH_MIB_RX_FRAG 46 +#define ETH_MIB_RX_DROP 47 +#define ETH_MIB_RX_CRC_ALIGN 48 +#define ETH_MIB_RX_UND 49 +#define ETH_MIB_RX_CRC 50 +#define ETH_MIB_RX_ALIGN 51 +#define ETH_MIB_RX_SYM 52 +#define ETH_MIB_RX_PAUSE 53 +#define ETH_MIB_RX_CNTRL 54 + + +struct bcm_enet_mib_counters { + u64 tx_gd_octets; + u32 tx_gd_pkts; + u32 tx_all_octets; + u32 tx_all_pkts; + u32 tx_brdcast; + u32 tx_mult; + u32 tx_64; + u32 tx_65_127; + u32 tx_128_255; + u32 tx_256_511; + u32 tx_512_1023; + u32 tx_1024_max; + u32 tx_jab; + u32 tx_ovr; + u32 tx_frag; + u32 tx_underrun; + u32 tx_col; + u32 tx_1_col; + u32 tx_m_col; + u32 tx_ex_col; + u32 tx_late; + u32 tx_def; + u32 tx_crs; + u32 tx_pause; + u64 rx_gd_octets; + u32 rx_gd_pkts; + u32 rx_all_octets; + u32 rx_all_pkts; + u32 rx_brdcast; + u32 rx_mult; + u32 rx_64; + u32 rx_65_127; + u32 rx_128_255; + u32 rx_256_511; + u32 rx_512_1023; + u32 rx_1024_max; + u32 rx_jab; + u32 rx_ovr; + u32 rx_frag; + u32 rx_drop; + u32 rx_crc_align; + u32 rx_und; + u32 rx_crc; + u32 rx_align; + u32 rx_sym; + u32 rx_pause; + u32 rx_cntrl; +}; + + +struct bcm_enet_priv { + + /* mac id (from platform device id) */ + int mac_id; + + /* base remapped address of device */ + void __iomem *base; + + /* mac irq, rx_dma irq, tx_dma irq */ + int irq; + int irq_rx; + int irq_tx; + + /* hw view of rx & tx dma ring */ + dma_addr_t rx_desc_dma; + dma_addr_t tx_desc_dma; + + /* allocated size (in bytes) for rx & tx dma ring */ + unsigned int rx_desc_alloc_size; + unsigned int tx_desc_alloc_size; + + + struct napi_struct napi; + + /* dma channel id for rx */ + int rx_chan; + + /* number of dma desc in rx ring */ + int rx_ring_size; + + /* cpu view of rx dma ring */ + struct bcm_enet_desc *rx_desc_cpu; + + /* current number of armed descriptor given to hardware for rx */ + int rx_desc_count; + + /* next rx descriptor to fetch from hardware */ + int rx_curr_desc; + + /* next dirty rx descriptor to refill */ + int rx_dirty_desc; + + /* size of allocated rx skbs */ + unsigned int rx_skb_size; + + /* list of skb given to hw for rx */ + struct sk_buff **rx_skb; + + /* used when rx skb allocation failed, so we defer rx queue + * refill */ + struct timer_list rx_timeout; + + /* lock rx_timeout against rx normal operation */ + spinlock_t rx_lock; + + + /* dma channel id for tx */ + int tx_chan; + + /* number of dma desc in tx ring */ + int tx_ring_size; + + /* cpu view of rx dma ring */ + struct bcm_enet_desc *tx_desc_cpu; + + /* number of available descriptor for tx */ + int tx_desc_count; + + /* next tx descriptor avaiable */ + int tx_curr_desc; + + /* next dirty tx descriptor to reclaim */ + int tx_dirty_desc; + + /* list of skb given to hw for tx */ + struct sk_buff **tx_skb; + + /* lock used by tx reclaim and xmit */ + spinlock_t tx_lock; + + + /* set if internal phy is ignored and external mii interface + * is selected */ + int use_external_mii; + + /* set if a phy is connected, phy address must be known, + * probing is not possible */ + int has_phy; + int phy_id; + + /* set if connected phy has an associated irq */ + int has_phy_interrupt; + int phy_interrupt; + + /* used when a phy is connected (phylib used) */ + struct mii_bus mii_bus; + struct phy_device *phydev; + int old_link; + int old_duplex; + int old_pause; + + /* used when no phy is connected */ + int force_speed_100; + int force_duplex_full; + + /* pause parameters */ + int pause_auto; + int pause_rx; + int pause_tx; + + /* stats */ + struct net_device_stats stats; + struct bcm_enet_mib_counters mib; + + /* after mib interrupt, mib registers update is done in this + * work queue */ + struct work_struct mib_update_task; + + /* lock mib update between userspace request and workqueue */ + struct mutex mib_update_lock; + + /* mac clock */ + struct clk *mac_clk; + + /* phy clock if internal phy is used */ + struct clk *phy_clk; + + /* network device reference */ + struct net_device *net_dev; + + /* platform device reference */ + struct platform_device *pdev; + + /* maximum hardware transmit/receive size */ + unsigned int hw_mtu; +}; + +#endif /* ! BCM63XX_ENET_H_ */ diff --git a/target/linux/brcm63xx/files-2.6.30/drivers/net/phy/bcm63xx.c b/target/linux/brcm63xx/files-2.6.30/drivers/net/phy/bcm63xx.c new file mode 100644 index 000000000..4fed95e83 --- /dev/null +++ b/target/linux/brcm63xx/files-2.6.30/drivers/net/phy/bcm63xx.c @@ -0,0 +1,132 @@ +/* + * Driver for Broadcom 63xx SOCs integrated PHYs + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ +#include <linux/module.h> +#include <linux/phy.h> + +#define MII_BCM63XX_IR 0x1a /* interrupt register */ +#define MII_BCM63XX_IR_EN 0x4000 /* global interrupt enable */ +#define MII_BCM63XX_IR_DUPLEX 0x0800 /* duplex changed */ +#define MII_BCM63XX_IR_SPEED 0x0400 /* speed changed */ +#define MII_BCM63XX_IR_LINK 0x0200 /* link changed */ +#define MII_BCM63XX_IR_GMASK 0x0100 /* global interrupt mask */ + +MODULE_DESCRIPTION("Broadcom 63xx internal PHY driver"); +MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>"); +MODULE_LICENSE("GPL"); + +static int bcm63xx_config_init(struct phy_device *phydev) +{ + int reg, err; + + reg = phy_read(phydev, MII_BCM63XX_IR); + if (reg < 0) + return reg; + + /* Mask interrupts globally. */ + reg |= MII_BCM63XX_IR_GMASK; + err = phy_write(phydev, MII_BCM63XX_IR, reg); + if (err < 0) + return err; + + /* Unmask events we are interested in */ + reg = ~(MII_BCM63XX_IR_DUPLEX | + MII_BCM63XX_IR_SPEED | + MII_BCM63XX_IR_LINK) | + MII_BCM63XX_IR_EN; + err = phy_write(phydev, MII_BCM63XX_IR, reg); + if (err < 0) + return err; + return 0; +} + +static int bcm63xx_ack_interrupt(struct phy_device *phydev) +{ + int reg; + + /* Clear pending interrupts. */ + reg = phy_read(phydev, MII_BCM63XX_IR); + if (reg < 0) + return reg; + + return 0; +} + +static int bcm63xx_config_intr(struct phy_device *phydev) +{ + int reg, err; + + reg = phy_read(phydev, MII_BCM63XX_IR); + if (reg < 0) + return reg; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) + reg &= ~MII_BCM63XX_IR_GMASK; + else + reg |= MII_BCM63XX_IR_GMASK; + + err = phy_write(phydev, MII_BCM63XX_IR, reg); + return err; +} + +static struct phy_driver bcm63xx_1_driver = { + .phy_id = 0x00406000, + .phy_id_mask = 0xfffffc00, + .name = "Broadcom BCM63XX (1)", + /* ASYM_PAUSE bit is marked RO in datasheet, so don't cheat */ + .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause), + .flags = PHY_HAS_INTERRUPT, + .config_init = bcm63xx_config_init, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, + .ack_interrupt = bcm63xx_ack_interrupt, + .config_intr = bcm63xx_config_intr, + .driver = { .owner = THIS_MODULE }, +}; + +/* same phy as above, with just a different OUI */ +static struct phy_driver bcm63xx_2_driver = { + .phy_id = 0x002bdc00, + .phy_id_mask = 0xfffffc00, + .name = "Broadcom BCM63XX (2)", + .features = (PHY_BASIC_FEATURES | SUPPORTED_Pause), + .flags = PHY_HAS_INTERRUPT, + .config_init = bcm63xx_config_init, + .config_aneg = genphy_config_aneg, + .read_status = genphy_read_status, + .ack_interrupt = bcm63xx_ack_interrupt, + .config_intr = bcm63xx_config_intr, + .driver = { .owner = THIS_MODULE }, +}; + +static int __init bcm63xx_phy_init(void) +{ + int ret; + + ret = phy_driver_register(&bcm63xx_1_driver); + if (ret) + goto out_63xx_1; + ret = phy_driver_register(&bcm63xx_2_driver); + if (ret) + goto out_63xx_2; + return ret; + +out_63xx_2: + phy_driver_unregister(&bcm63xx_1_driver); +out_63xx_1: + return ret; +} + +static void __exit bcm63xx_phy_exit(void) +{ + phy_driver_unregister(&bcm63xx_1_driver); + phy_driver_unregister(&bcm63xx_2_driver); +} + +module_init(bcm63xx_phy_init); +module_exit(bcm63xx_phy_exit); diff --git a/target/linux/brcm63xx/files-2.6.30/drivers/pcmcia/bcm63xx_pcmcia.c b/target/linux/brcm63xx/files-2.6.30/drivers/pcmcia/bcm63xx_pcmcia.c new file mode 100644 index 000000000..383e322d0 --- /dev/null +++ b/target/linux/brcm63xx/files-2.6.30/drivers/pcmcia/bcm63xx_pcmcia.c @@ -0,0 +1,536 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/ioport.h> +#include <linux/timer.h> +#include <linux/platform_device.h> +#include <linux/delay.h> +#include <linux/pci.h> +#include <linux/gpio.h> + +#include <bcm63xx_regs.h> +#include <bcm63xx_io.h> +#include "bcm63xx_pcmcia.h" + +#define PFX "bcm63xx_pcmcia: " + +#ifdef CONFIG_CARDBUS +/* if cardbus is used, platform device needs reference to actual pci + * device */ +static struct pci_dev *bcm63xx_cb_dev; +#endif + +/* + * read/write helper for pcmcia regs + */ +static inline u32 pcmcia_readl(struct bcm63xx_pcmcia_socket *skt, u32 off) +{ + return bcm_readl(skt->base + off); +} + +static inline void pcmcia_writel(struct bcm63xx_pcmcia_socket *skt, + u32 val, u32 off) +{ + bcm_writel(val, skt->base + off); +} + +/* + * (Re-)Initialise the socket, turning on status interrupts and PCMCIA + * bus. This must wait for power to stabilise so that the card status + * signals report correctly. + */ +static int bcm63xx_pcmcia_sock_init(struct pcmcia_socket *sock) +{ + struct bcm63xx_pcmcia_socket *skt; + skt = sock->driver_data; + return 0; +} + +/* + * Remove power on the socket, disable IRQs from the card. + * Turn off status interrupts, and disable the PCMCIA bus. + */ +static int bcm63xx_pcmcia_suspend(struct pcmcia_socket *sock) +{ + struct bcm63xx_pcmcia_socket *skt; + skt = sock->driver_data; + return 0; +} + +/* + * Implements the set_socket() operation for the in-kernel PCMCIA + * service (formerly SS_SetSocket in Card Services). We more or + * less punt all of this work and let the kernel handle the details + * of power configuration, reset, &c. We also record the value of + * `state' in order to regurgitate it to the PCMCIA core later. + */ +static int bcm63xx_pcmcia_set_socket(struct pcmcia_socket *sock, + socket_state_t *state) +{ + struct bcm63xx_pcmcia_socket *skt; + unsigned long flags; + u32 val; + + skt = sock->driver_data; + + spin_lock_irqsave(&skt->lock, flags); + + /* apply requested socket power */ + /* FIXME: hardware can't do this */ + + /* apply socket reset */ + val = pcmcia_readl(skt, PCMCIA_C1_REG); + if (state->flags & SS_RESET) + val |= PCMCIA_C1_RESET_MASK; + else + val &= ~PCMCIA_C1_RESET_MASK; + + /* reverse reset logic for cardbus card */ + if (skt->card_detected && (skt->card_type & CARD_CARDBUS)) + val ^= PCMCIA_C1_RESET_MASK; + + pcmcia_writel(skt, val, PCMCIA_C1_REG); + + /* keep requested state for event reporting */ + skt->requested_state = *state; + + spin_unlock_irqrestore(&skt->lock, flags); + + return 0; +} + +/* + * identity cardtype from VS[12] input, CD[12] input while only VS2 is + * floating, and CD[12] input while only VS1 is floating + */ +enum { + IN_VS1 = (1 << 0), + IN_VS2 = (1 << 1), + IN_CD1_VS2H = (1 << 2), + IN_CD2_VS2H = (1 << 3), + IN_CD1_VS1H = (1 << 4), + IN_CD2_VS1H = (1 << 5), +}; + +static const u8 vscd_to_cardtype[] = { + + /* VS1 float, VS2 float */ + [IN_VS1 | IN_VS2] = (CARD_PCCARD | CARD_5V), + + /* VS1 grounded, VS2 float */ + [IN_VS2] = (CARD_PCCARD | CARD_5V | CARD_3V), + + /* VS1 grounded, VS2 grounded */ + [0] = (CARD_PCCARD | CARD_5V | CARD_3V | CARD_XV), + + /* VS1 tied to CD1, VS2 float */ + [IN_VS1 | IN_VS2 | IN_CD1_VS1H] = (CARD_CARDBUS | CARD_3V), + + /* VS1 grounded, VS2 tied to CD2 */ + [IN_VS2 | IN_CD2_VS2H] = (CARD_CARDBUS | CARD_3V | CARD_XV), + + /* VS1 tied to CD2, VS2 grounded */ + [IN_VS1 | IN_CD2_VS1H] = (CARD_CARDBUS | CARD_3V | CARD_XV | CARD_YV), + + /* VS1 float, VS2 grounded */ + [IN_VS1] = (CARD_PCCARD | CARD_XV), + + /* VS1 float, VS2 tied to CD2 */ + [IN_VS1 | IN_VS2 | IN_CD2_VS2H] = (CARD_CARDBUS | CARD_3V), + + /* VS1 float, VS2 tied to CD1 */ + [IN_VS1 | IN_VS2 | IN_CD1_VS2H] = (CARD_CARDBUS | CARD_XV | CARD_YV), + + /* VS1 tied to CD2, VS2 float */ + [IN_VS1 | IN_VS2 | IN_CD2_VS1H] = (CARD_CARDBUS | CARD_YV), + + /* VS2 grounded, VS1 is tied to CD1, CD2 is grounded */ + [IN_VS1 | IN_CD1_VS1H] = 0, /* ignore cardbay */ +}; + +/* + * poll hardware to check card insertion status + */ +static unsigned int __get_socket_status(struct bcm63xx_pcmcia_socket *skt) +{ + unsigned int stat; + u32 val; + + stat = 0; + + /* check CD for card presence */ + val = pcmcia_readl(skt, PCMCIA_C1_REG); + + if (!(val & PCMCIA_C1_CD1_MASK) && !(val & PCMCIA_C1_CD2_MASK)) + stat |= SS_DETECT; + + /* if new insertion, detect cardtype */ + if ((stat & SS_DETECT) && !skt->card_detected) { + unsigned int stat = 0; + + /* float VS1, float VS2 */ + val |= PCMCIA_C1_VS1OE_MASK; + val |= PCMCIA_C1_VS2OE_MASK; + pcmcia_writel(skt, val, PCMCIA_C1_REG); + + /* wait for output to stabilize and read VS[12] */ + udelay(10); + val = pcmcia_readl(skt, PCMCIA_C1_REG); + stat |= (val & PCMCIA_C1_VS1_MASK) ? IN_VS1 : 0; + stat |= (val & PCMCIA_C1_VS2_MASK) ? IN_VS2 : 0; + + /* drive VS1 low, float VS2 */ + val &= ~PCMCIA_C1_VS1OE_MASK; + val |= PCMCIA_C1_VS2OE_MASK; + pcmcia_writel(skt, val, PCMCIA_C1_REG); + + /* wait for output to stabilize and read CD[12] */ + udelay(10); + val = pcmcia_readl(skt, PCMCIA_C1_REG); + stat |= (val & PCMCIA_C1_CD1_MASK) ? IN_CD1_VS2H : 0; + stat |= (val & PCMCIA_C1_CD2_MASK) ? IN_CD2_VS2H : 0; + + /* float VS1, drive VS2 low */ + val |= PCMCIA_C1_VS1OE_MASK; + val &= ~PCMCIA_C1_VS2OE_MASK; + pcmcia_writel(skt, val, PCMCIA_C1_REG); + + /* wait for output to stabilize and read CD[12] */ + udelay(10); + val = pcmcia_readl(skt, PCMCIA_C1_REG); + stat |= (val & PCMCIA_C1_CD1_MASK) ? IN_CD1_VS1H : 0; + stat |= (val & PCMCIA_C1_CD2_MASK) ? IN_CD2_VS1H : 0; + + /* guess cardtype from all this */ + skt->card_type = vscd_to_cardtype[stat]; + if (!skt->card_type) + printk(KERN_ERR PFX "unsupported card type\n"); + + /* drive both VS pin to 0 again */ + val &= ~(PCMCIA_C1_VS1OE_MASK | PCMCIA_C1_VS2OE_MASK); + + /* enable correct logic */ + val &= ~(PCMCIA_C1_EN_PCMCIA_MASK | PCMCIA_C1_EN_CARDBUS_MASK); + if (skt->card_type & CARD_PCCARD) + val |= PCMCIA_C1_EN_PCMCIA_MASK; + else + val |= PCMCIA_C1_EN_CARDBUS_MASK; + + pcmcia_writel(skt, val, PCMCIA_C1_REG); + } + skt->card_detected = (stat & SS_DETECT) ? 1 : 0; + + /* report card type/voltage */ + if (skt->card_type & CARD_CARDBUS) + stat |= SS_CARDBUS; + if (skt->card_type & CARD_3V) + stat |= SS_3VCARD; + if (skt->card_type & CARD_XV) + stat |= SS_XVCARD; + stat |= SS_POWERON; + + if (gpio_get_value(skt->pd->ready_gpio)) + stat |= SS_READY; + + return stat; +} + +/* + * core request to get current socket status + */ +static int bcm63xx_pcmcia_get_status(struct pcmcia_socket *sock, + unsigned int *status) +{ + struct bcm63xx_pcmcia_socket *skt; + + skt = sock->driver_data; + + spin_lock_bh(&skt->lock); + *status = __get_socket_status(skt); + spin_unlock_bh(&skt->lock); + + return 0; +} + +/* + * socket polling timer callback + */ +static void bcm63xx_pcmcia_poll(unsigned long data) +{ + struct bcm63xx_pcmcia_socket *skt; + unsigned int stat, events; + + skt = (struct bcm63xx_pcmcia_socket *)data; + + spin_lock_bh(&skt->lock); + + stat = __get_socket_status(skt); + + /* keep only changed bits, and mask with required one from the + * core */ + events = (stat ^ skt->old_status) & skt->requested_state.csc_mask; + skt->old_status = stat; + spin_unlock_bh(&skt->lock); + + if (events) + pcmcia_parse_events(&skt->socket, events); + + mod_timer(&skt->timer, + jiffies + msecs_to_jiffies(BCM63XX_PCMCIA_POLL_RATE)); +} + +static int bcm63xx_pcmcia_set_io_map(struct pcmcia_socket *sock, + struct pccard_io_map *map) +{ + /* this doesn't seem to be called by pcmcia layer if static + * mapping is used */ + return 0; +} + +static int bcm63xx_pcmcia_set_mem_map(struct pcmcia_socket *sock, + struct pccard_mem_map *map) +{ + struct bcm63xx_pcmcia_socket *skt; + struct resource *res; + + skt = sock->driver_data; + if (map->flags & MAP_ATTRIB) + res = skt->attr_res; + else + res = skt->common_res; + + map->static_start = res->start + map->card_start; + return 0; +} + +static struct pccard_operations bcm63xx_pcmcia_operations = { + .init = bcm63xx_pcmcia_sock_init, + .suspend = bcm63xx_pcmcia_suspend, + .get_status = bcm63xx_pcmcia_get_status, + .set_socket = bcm63xx_pcmcia_set_socket, + .set_io_map = bcm63xx_pcmcia_set_io_map, + .set_mem_map = bcm63xx_pcmcia_set_mem_map, +}; + +/* + * register pcmcia socket to core + */ +static int bcm63xx_drv_pcmcia_probe(struct platform_device *pdev) +{ + struct bcm63xx_pcmcia_socket *skt; + struct pcmcia_socket *sock; + struct resource *res, *irq_res; + unsigned int regmem_size = 0, iomem_size = 0; + u32 val; + int ret; + + skt = kzalloc(sizeof(*skt), GFP_KERNEL); + if (!skt) + return -ENOMEM; + spin_lock_init(&skt->lock); + sock = &skt->socket; + sock->driver_data = skt; + + /* make sure we have all resources we need */ + skt->common_res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + skt->attr_res = platform_get_resource(pdev, IORESOURCE_MEM, 2); + irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + skt->pd = pdev->dev.platform_data; + if (!skt->common_res || !skt->attr_res || !irq_res || !skt->pd) { + ret = -EINVAL; + goto err; + } + + /* remap pcmcia registers */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + regmem_size = res->end - res->start + 1; + if (!request_mem_region(res->start, regmem_size, "bcm63xx_pcmcia")) { + ret = -EINVAL; + goto err; + } + skt->reg_res = res; + + skt->base = ioremap(res->start, regmem_size); + if (!skt->base) { + ret = -ENOMEM; + goto err; + } + + /* remap io registers */ + res = platform_get_resource(pdev, IORESOURCE_MEM, 3); + iomem_size = res->end - res->start + 1; + skt->io_base = ioremap(res->start, iomem_size); + if (!skt->io_base) { + ret = -ENOMEM; + goto err; + } + + /* resources are static */ + sock->resource_ops = &pccard_static_ops; + sock->ops = &bcm63xx_pcmcia_operations; + sock->owner = THIS_MODULE; + sock->dev.parent = &pdev->dev; + sock->features = SS_CAP_STATIC_MAP | SS_CAP_PCCARD; + sock->io_offset = (unsigned long)skt->io_base; + sock->pci_irq = irq_res->start; + +#ifdef CONFIG_CARDBUS + sock->cb_dev = bcm63xx_cb_dev; + if (bcm63xx_cb_dev) + sock->features |= SS_CAP_CARDBUS; +#endif + + /* assume common & attribute memory have the same size */ + sock->map_size = skt->common_res->end - skt->common_res->start + 1; + + /* initialize polling timer */ + setup_timer(&skt->timer, bcm63xx_pcmcia_poll, (unsigned long)skt); + + /* initialize pcmcia control register, drive VS[12] to 0, + * leave CB IDSEL to the old value since it is set by the PCI + * layer */ + val = pcmcia_readl(skt, PCMCIA_C1_REG); + val &= PCMCIA_C1_CBIDSEL_MASK; + val |= PCMCIA_C1_EN_PCMCIA_GPIO_MASK; + pcmcia_writel(skt, val, PCMCIA_C1_REG); + + /* FIXME set correct pcmcia timings */ + val = PCMCIA_C2_DATA16_MASK; + val |= 10 << PCMCIA_C2_RWCOUNT_SHIFT; + val |= 6 << PCMCIA_C2_INACTIVE_SHIFT; + val |= 3 << PCMCIA_C2_SETUP_SHIFT; + val |= 3 << PCMCIA_C2_HOLD_SHIFT; + pcmcia_writel(skt, val, PCMCIA_C2_REG); + + /* request and setup ready gpio */ + ret = gpio_request(skt->pd->ready_gpio, "bcm63xx_pcmcia"); + if (ret < 0) + goto err; + + ret = gpio_direction_input(skt->pd->ready_gpio); + if (ret < 0) + goto err_gpio; + + ret = pcmcia_register_socket(sock); + if (ret) + goto err_gpio; + + /* start polling socket */ + mod_timer(&skt->timer, + jiffies + msecs_to_jiffies(BCM63XX_PCMCIA_POLL_RATE)); + + platform_set_drvdata(pdev, skt); + return 0; + +err_gpio: + gpio_free(skt->pd->ready_gpio); + +err: + if (skt->io_base) + iounmap(skt->io_base); + if (skt->base) + iounmap(skt->base); + if (skt->reg_res) + release_mem_region(skt->reg_res->start, regmem_size); + kfree(skt); + return ret; +} + +static int bcm63xx_drv_pcmcia_remove(struct platform_device *pdev) +{ + struct bcm63xx_pcmcia_socket *skt; + struct resource *res; + + skt = platform_get_drvdata(pdev); + del_timer_sync(&skt->timer); + iounmap(skt->base); + iounmap(skt->io_base); + res = skt->reg_res; + release_mem_region(res->start, res->end - res->start + 1); + gpio_free(skt->pd->ready_gpio); + platform_set_drvdata(pdev, NULL); + kfree(skt); + return 0; +} + +struct platform_driver bcm63xx_pcmcia_driver = { + .probe = bcm63xx_drv_pcmcia_probe, + .remove = __devexit_p(bcm63xx_drv_pcmcia_remove), + .driver = { + .name = "bcm63xx_pcmcia", + .owner = THIS_MODULE, + }, +}; + +#ifdef CONFIG_CARDBUS +static int __devinit bcm63xx_cb_probe(struct pci_dev *dev, + const struct pci_device_id *id) +{ + /* keep pci device */ + bcm63xx_cb_dev = dev; + return platform_driver_register(&bcm63xx_pcmcia_driver); +} + +static void __devexit bcm63xx_cb_exit(struct pci_dev *dev) +{ + platform_driver_unregister(&bcm63xx_pcmcia_driver); + bcm63xx_cb_dev = NULL; +} + +static struct pci_device_id bcm63xx_cb_table[] = { + { + .vendor = PCI_VENDOR_ID_BROADCOM, + .device = PCI_ANY_ID, + .subvendor = PCI_VENDOR_ID_BROADCOM, + .subdevice = PCI_ANY_ID, + .class = PCI_CLASS_BRIDGE_CARDBUS << 8, + .class_mask = ~0, + }, + {} +}; + +MODULE_DEVICE_TABLE(pci, bcm63xx_cb_table); + +static struct pci_driver bcm63xx_cardbus_driver = { + .name = "yenta_cardbus", + .id_table = bcm63xx_cb_table, + .probe = bcm63xx_cb_probe, + .remove = __devexit_p(bcm63xx_cb_exit), +}; +#endif + +/* + * if cardbus support is enabled, register our platform device after + * our fake cardbus bridge has been registered + */ +static int __init bcm63xx_pcmcia_init(void) +{ +#ifdef CONFIG_CARDBUS + return pci_register_driver(&bcm63xx_cardbus_driver); +#else + return platform_driver_register(&bcm63xx_pcmcia_driver); +#endif +} + +static void __exit bcm63xx_pcmcia_exit(void) +{ +#ifdef CONFIG_CARDBUS + return pci_unregister_driver(&bcm63xx_cardbus_driver); +#else + platform_driver_unregister(&bcm63xx_pcmcia_driver); +#endif +} + +module_init(bcm63xx_pcmcia_init); +module_exit(bcm63xx_pcmcia_exit); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>"); +MODULE_DESCRIPTION("Linux PCMCIA Card Services: bcm63xx Socket Controller"); diff --git a/target/linux/brcm63xx/files-2.6.30/drivers/pcmcia/bcm63xx_pcmcia.h b/target/linux/brcm63xx/files-2.6.30/drivers/pcmcia/bcm63xx_pcmcia.h new file mode 100644 index 000000000..85de86696 --- /dev/null +++ b/target/linux/brcm63xx/files-2.6.30/drivers/pcmcia/bcm63xx_pcmcia.h @@ -0,0 +1,65 @@ +#ifndef BCM63XX_PCMCIA_H_ +#define BCM63XX_PCMCIA_H_ + +#include <linux/types.h> +#include <linux/timer.h> +#include <pcmcia/ss.h> +#include <bcm63xx_dev_pcmcia.h> + +/* socket polling rate in ms */ +#define BCM63XX_PCMCIA_POLL_RATE 500 + +enum { + CARD_CARDBUS = (1 << 0), + + CARD_PCCARD = (1 << 1), + + CARD_5V = (1 << 2), + + CARD_3V = (1 << 3), + + CARD_XV = (1 << 4), + + CARD_YV = (1 << 5), +}; + +struct bcm63xx_pcmcia_socket { + struct pcmcia_socket socket; + + /* platform specific data */ + struct bcm63xx_pcmcia_platform_data *pd; + + /* all regs access are protected by this spinlock */ + spinlock_t lock; + + /* pcmcia registers resource */ + struct resource *reg_res; + + /* base remapped address of registers */ + void __iomem *base; + + /* whether a card is detected at the moment */ + int card_detected; + + /* type of detected card (mask of above enum) */ + u8 card_type; + + /* keep last socket status to implement event reporting */ + unsigned int old_status; + + /* backup of requested socket state */ + socket_state_t requested_state; + + /* timer used for socket status polling */ + struct timer_list timer; + + /* attribute/common memory resources */ + struct resource *attr_res; + struct resource *common_res; + struct resource *io_res; + + /* base address of io memory */ + void __iomem *io_base; +}; + +#endif /* BCM63XX_PCMCIA_H_ */ diff --git a/target/linux/brcm63xx/files-2.6.30/drivers/serial/bcm63xx_uart.c b/target/linux/brcm63xx/files-2.6.30/drivers/serial/bcm63xx_uart.c new file mode 100644 index 000000000..606f4d68c --- /dev/null +++ b/target/linux/brcm63xx/files-2.6.30/drivers/serial/bcm63xx_uart.c @@ -0,0 +1,890 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Derived from many drivers using generic_serial interface. + * + * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> + * + * Serial driver for BCM63xx integrated UART. + * + * Hardware flow control was _not_ tested since I only have RX/TX on + * my board. + */ + +#if defined(CONFIG_SERIAL_BCM63XX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) +#define SUPPORT_SYSRQ +#endif + +#include <linux/kernel.h> +#include <linux/platform_device.h> +#include <linux/init.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/console.h> +#include <linux/clk.h> +#include <linux/tty.h> +#include <linux/tty_flip.h> +#include <linux/sysrq.h> +#include <linux/serial.h> +#include <linux/serial_core.h> + +#include <bcm63xx_clk.h> +#include <bcm63xx_irq.h> +#include <bcm63xx_regs.h> +#include <bcm63xx_io.h> + +#define BCM63XX_NR_UARTS 1 + +static struct uart_port ports[BCM63XX_NR_UARTS]; + +/* + * rx interrupt mask / stat + * + * mask: + * - rx fifo full + * - rx fifo above threshold + * - rx fifo not empty for too long + */ +#define UART_RX_INT_MASK (UART_IR_MASK(UART_IR_RXOVER) | \ + UART_IR_MASK(UART_IR_RXTHRESH) | \ + UART_IR_MASK(UART_IR_RXTIMEOUT)) + +#define UART_RX_INT_STAT (UART_IR_STAT(UART_IR_RXOVER) | \ + UART_IR_STAT(UART_IR_RXTHRESH) | \ + UART_IR_STAT(UART_IR_RXTIMEOUT)) + +/* + * tx interrupt mask / stat + * + * mask: + * - tx fifo empty + * - tx fifo below threshold + */ +#define UART_TX_INT_MASK (UART_IR_MASK(UART_IR_TXEMPTY) | \ + UART_IR_MASK(UART_IR_TXTRESH)) + +#define UART_TX_INT_STAT (UART_IR_STAT(UART_IR_TXEMPTY) | \ + UART_IR_STAT(UART_IR_TXTRESH)) + +/* + * external input interrupt + * + * mask: any edge on CTS, DCD + */ +#define UART_EXTINP_INT_MASK (UART_EXTINP_IRMASK(UART_EXTINP_IR_CTS) | \ + UART_EXTINP_IRMASK(UART_EXTINP_IR_DCD)) + +/* + * handy uart register accessor + */ +static inline unsigned int bcm_uart_readl(struct uart_port *port, + unsigned int offset) +{ + return bcm_readl(port->membase + offset); +} + +static inline void bcm_uart_writel(struct uart_port *port, + unsigned int value, unsigned int offset) +{ + bcm_writel(value, port->membase + offset); +} + +/* + * serial core request to check if uart tx fifo is empty + */ +static unsigned int bcm_uart_tx_empty(struct uart_port *port) +{ + unsigned int val; + + val = bcm_uart_readl(port, UART_IR_REG); + return (val & UART_IR_STAT(UART_IR_TXEMPTY)) ? 1 : 0; +} + +/* + * serial core request to set RTS and DTR pin state and loopback mode + */ +static void bcm_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + unsigned int val; + + val = bcm_uart_readl(port, UART_MCTL_REG); + val &= ~(UART_MCTL_DTR_MASK | UART_MCTL_RTS_MASK); + /* invert of written value is reflected on the pin */ + if (!(mctrl & TIOCM_DTR)) + val |= UART_MCTL_DTR_MASK; + if (!(mctrl & TIOCM_RTS)) + val |= UART_MCTL_RTS_MASK; + bcm_uart_writel(port, val, UART_MCTL_REG); + + val = bcm_uart_readl(port, UART_CTL_REG); + if (mctrl & TIOCM_LOOP) + val |= UART_CTL_LOOPBACK_MASK; + else + val &= ~UART_CTL_LOOPBACK_MASK; + bcm_uart_writel(port, val, UART_CTL_REG); +} + +/* + * serial core request to return RI, CTS, DCD and DSR pin state + */ +static unsigned int bcm_uart_get_mctrl(struct uart_port *port) +{ + unsigned int val, mctrl; + + mctrl = 0; + val = bcm_uart_readl(port, UART_EXTINP_REG); + if (val & UART_EXTINP_RI_MASK) + mctrl |= TIOCM_RI; + if (val & UART_EXTINP_CTS_MASK) + mctrl |= TIOCM_CTS; + if (val & UART_EXTINP_DCD_MASK) + mctrl |= TIOCM_CD; + if (val & UART_EXTINP_DSR_MASK) + mctrl |= TIOCM_DSR; + return mctrl; +} + +/* + * serial core request to disable tx ASAP (used for flow control) + */ +static void bcm_uart_stop_tx(struct uart_port *port) +{ + unsigned int val; + + val = bcm_uart_readl(port, UART_CTL_REG); + val &= ~(UART_CTL_TXEN_MASK); + bcm_uart_writel(port, val, UART_CTL_REG); + + val = bcm_uart_readl(port, UART_IR_REG); + val &= ~UART_TX_INT_MASK; + bcm_uart_writel(port, val, UART_IR_REG); +} + +/* + * serial core request to (re)enable tx + */ +static void bcm_uart_start_tx(struct uart_port *port) +{ + unsigned int val; + + val = bcm_uart_readl(port, UART_IR_REG); + val |= UART_TX_INT_MASK; + bcm_uart_writel(port, val, UART_IR_REG); + + val = bcm_uart_readl(port, UART_CTL_REG); + val |= UART_CTL_TXEN_MASK; + bcm_uart_writel(port, val, UART_CTL_REG); +} + +/* + * serial core request to stop rx, called before port shutdown + */ +static void bcm_uart_stop_rx(struct uart_port *port) +{ + unsigned int val; + + val = bcm_uart_readl(port, UART_IR_REG); + val &= ~UART_RX_INT_MASK; + bcm_uart_writel(port, val, UART_IR_REG); +} + +/* + * serial core request to enable modem status interrupt reporting + */ +static void bcm_uart_enable_ms(struct uart_port *port) +{ + unsigned int val; + + val = bcm_uart_readl(port, UART_IR_REG); + val |= UART_IR_MASK(UART_IR_EXTIP); + bcm_uart_writel(port, val, UART_IR_REG); +} + +/* + * serial core request to start/stop emitting break char + */ +static void bcm_uart_break_ctl(struct uart_port *port, int ctl) +{ + unsigned long flags; + unsigned int val; + + spin_lock_irqsave(&port->lock, flags); + + val = bcm_uart_readl(port, UART_CTL_REG); + if (ctl) + val |= UART_CTL_XMITBRK_MASK; + else + val &= ~UART_CTL_XMITBRK_MASK; + bcm_uart_writel(port, val, UART_CTL_REG); + + spin_unlock_irqrestore(&port->lock, flags); +} + +/* + * return port type in string format + */ +static const char *bcm_uart_type(struct uart_port *port) +{ + return (port->type == PORT_BCM63XX) ? "bcm63xx_uart" : NULL; +} + +/* + * read all chars in rx fifo and send them to core + */ +static void bcm_uart_do_rx(struct uart_port *port) +{ + struct tty_struct *tty; + unsigned int max_count; + + /* limit number of char read in interrupt, should not be + * higher than fifo size anyway since we're much faster than + * serial port */ + max_count = 32; + tty = port->info->port.tty; + do { + unsigned int iestat, c, cstat; + char flag; + + /* get overrun/fifo empty information from ier + * register */ + iestat = bcm_uart_readl(port, UART_IR_REG); + if (!(iestat & UART_IR_STAT(UART_IR_RXNOTEMPTY))) + break; + + cstat = c = bcm_uart_readl(port, UART_FIFO_REG); + port->icount.rx++; + flag = TTY_NORMAL; + c &= 0xff; + + if (unlikely((cstat & UART_FIFO_ANYERR_MASK))) { + /* do stats first */ + if (cstat & UART_FIFO_BRKDET_MASK) { + port->icount.brk++; + if (uart_handle_break(port)) + continue; + } + + if (cstat & UART_FIFO_PARERR_MASK) + port->icount.parity++; + if (cstat & UART_FIFO_FRAMEERR_MASK) + port->icount.frame++; + + /* update flag wrt read_status_mask */ + cstat &= port->read_status_mask; + if (cstat & UART_FIFO_BRKDET_MASK) + flag = TTY_BREAK; + if (cstat & UART_FIFO_FRAMEERR_MASK) + flag = TTY_FRAME; + if (cstat & UART_FIFO_PARERR_MASK) + flag = TTY_PARITY; + } + + if (uart_handle_sysrq_char(port, c)) + continue; + + if (unlikely(iestat & UART_IR_STAT(UART_IR_RXOVER))) { + port->icount.overrun++; + tty_insert_flip_char(tty, 0, TTY_OVERRUN); + } + + if ((cstat & port->ignore_status_mask) == 0) + tty_insert_flip_char(tty, c, flag); + + } while (--max_count); + + tty_flip_buffer_push(tty); +} + +/* + * fill tx fifo with chars to send, stop when fifo is about to be full + * or when all chars have been sent. + */ +static void bcm_uart_do_tx(struct uart_port *port) +{ + struct circ_buf *xmit; + unsigned int val, max_count; + + if (port->x_char) { + bcm_uart_writel(port, port->x_char, UART_FIFO_REG); + port->icount.tx++; + port->x_char = 0; + return; + } + + if (uart_tx_stopped(port)) { + bcm_uart_stop_tx(port); + return; + } + + xmit = &port->info->xmit; + if (uart_circ_empty(xmit)) + goto txq_empty; + + val = bcm_uart_readl(port, UART_MCTL_REG); + val = (val & UART_MCTL_TXFIFOFILL_MASK) >> UART_MCTL_TXFIFOFILL_SHIFT; + max_count = port->fifosize - val; + + while (max_count--) { + unsigned int c; + + c = xmit->buf[xmit->tail]; + bcm_uart_writel(port, c, UART_FIFO_REG); + xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); + port->icount.tx++; + if (uart_circ_empty(xmit)) + break; + } + + if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) + uart_write_wakeup(port); + + if (uart_circ_empty(xmit)) + goto txq_empty; + return; + +txq_empty: + /* nothing to send, disable transmit interrupt */ + val = bcm_uart_readl(port, UART_IR_REG); + val &= ~UART_TX_INT_MASK; + bcm_uart_writel(port, val, UART_IR_REG); + return; +} + +/* + * process uart interrupt + */ +static irqreturn_t bcm_uart_interrupt(int irq, void *dev_id) +{ + struct uart_port *port; + unsigned int irqstat; + + port = dev_id; + spin_lock(&port->lock); + + irqstat = bcm_uart_readl(port, UART_IR_REG); + if (irqstat & UART_RX_INT_STAT) + bcm_uart_do_rx(port); + + if (irqstat & UART_TX_INT_STAT) + bcm_uart_do_tx(port); + + if (irqstat & UART_IR_MASK(UART_IR_EXTIP)) { + unsigned int estat; + + estat = bcm_uart_readl(port, UART_EXTINP_REG); + if (estat & UART_EXTINP_IRSTAT(UART_EXTINP_IR_CTS)) + uart_handle_cts_change(port, + estat & UART_EXTINP_CTS_MASK); + if (estat & UART_EXTINP_IRSTAT(UART_EXTINP_IR_DCD)) + uart_handle_dcd_change(port, + estat & UART_EXTINP_DCD_MASK); + } + + spin_unlock(&port->lock); + return IRQ_HANDLED; +} + +/* + * enable rx & tx operation on uart + */ +static void bcm_uart_enable(struct uart_port *port) +{ + unsigned int val; + + val = bcm_uart_readl(port, UART_CTL_REG); + val |= (UART_CTL_BRGEN_MASK | UART_CTL_TXEN_MASK | UART_CTL_RXEN_MASK); + bcm_uart_writel(port, val, UART_CTL_REG); +} + +/* + * disable rx & tx operation on uart + */ +static void bcm_uart_disable(struct uart_port *port) +{ + unsigned int val; + + val = bcm_uart_readl(port, UART_CTL_REG); + val &= ~(UART_CTL_BRGEN_MASK | UART_CTL_TXEN_MASK | + UART_CTL_RXEN_MASK); + bcm_uart_writel(port, val, UART_CTL_REG); +} + +/* + * clear all unread data in rx fifo and unsent data in tx fifo + */ +static void bcm_uart_flush(struct uart_port *port) +{ + unsigned int val; + + /* empty rx and tx fifo */ + val = bcm_uart_readl(port, UART_CTL_REG); + val |= UART_CTL_RSTRXFIFO_MASK | UART_CTL_RSTTXFIFO_MASK; + bcm_uart_writel(port, val, UART_CTL_REG); + + /* read any pending char to make sure all irq status are + * cleared */ + (void)bcm_uart_readl(port, UART_FIFO_REG); +} + +/* + * serial core request to initialize uart and start rx operation + */ +static int bcm_uart_startup(struct uart_port *port) +{ + unsigned int val; + int ret; + + /* mask all irq and flush port */ + bcm_uart_disable(port); + bcm_uart_writel(port, 0, UART_IR_REG); + bcm_uart_flush(port); + + /* clear any pending external input interrupt */ + (void)bcm_uart_readl(port, UART_EXTINP_REG); + + /* set rx/tx fifo thresh to fifo half size */ + val = bcm_uart_readl(port, UART_MCTL_REG); + val &= ~(UART_MCTL_RXFIFOTHRESH_MASK | UART_MCTL_TXFIFOTHRESH_MASK); + val |= (port->fifosize / 2) << UART_MCTL_RXFIFOTHRESH_SHIFT; + val |= (port->fifosize / 2) << UART_MCTL_TXFIFOTHRESH_SHIFT; + bcm_uart_writel(port, val, UART_MCTL_REG); + + /* set rx fifo timeout to 1 char time */ + val = bcm_uart_readl(port, UART_CTL_REG); + val &= ~UART_CTL_RXTMOUTCNT_MASK; + val |= 1 << UART_CTL_RXTMOUTCNT_SHIFT; + bcm_uart_writel(port, val, UART_CTL_REG); + + /* report any edge on dcd and cts */ + val = UART_EXTINP_INT_MASK; + val |= UART_EXTINP_DCD_NOSENSE_MASK; + val |= UART_EXTINP_CTS_NOSENSE_MASK; + bcm_uart_writel(port, val, UART_EXTINP_REG); + + /* register irq and enable rx interrupts */ + ret = request_irq(port->irq, bcm_uart_interrupt, 0, + bcm_uart_type(port), port); + if (ret) + return ret; + bcm_uart_writel(port, UART_RX_INT_MASK, UART_IR_REG); + bcm_uart_enable(port); + return 0; +} + +/* + * serial core request to flush & disable uart + */ +static void bcm_uart_shutdown(struct uart_port *port) +{ + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + bcm_uart_writel(port, 0, UART_IR_REG); + spin_unlock_irqrestore(&port->lock, flags); + + bcm_uart_disable(port); + bcm_uart_flush(port); + free_irq(port->irq, port); +} + +/* + * serial core request to change current uart setting + */ +static void bcm_uart_set_termios(struct uart_port *port, + struct ktermios *new, + struct ktermios *old) +{ + unsigned int ctl, baud, quot, ier; + unsigned long flags; + + spin_lock_irqsave(&port->lock, flags); + + /* disable uart while changing speed */ + bcm_uart_disable(port); + bcm_uart_flush(port); + + /* update Control register */ + ctl = bcm_uart_readl(port, UART_CTL_REG); + ctl &= ~UART_CTL_BITSPERSYM_MASK; + + switch (new->c_cflag & CSIZE) { + case CS5: + ctl |= (0 << UART_CTL_BITSPERSYM_SHIFT); + break; + case CS6: + ctl |= (1 << UART_CTL_BITSPERSYM_SHIFT); + break; + case CS7: + ctl |= (2 << UART_CTL_BITSPERSYM_SHIFT); + break; + default: + ctl |= (3 << UART_CTL_BITSPERSYM_SHIFT); + break; + } + + ctl &= ~UART_CTL_STOPBITS_MASK; + if (new->c_cflag & CSTOPB) + ctl |= UART_CTL_STOPBITS_2; + else + ctl |= UART_CTL_STOPBITS_1; + + ctl &= ~(UART_CTL_RXPAREN_MASK | UART_CTL_TXPAREN_MASK); + if (new->c_cflag & PARENB) + ctl |= (UART_CTL_RXPAREN_MASK | UART_CTL_TXPAREN_MASK); + ctl &= ~(UART_CTL_RXPAREVEN_MASK | UART_CTL_TXPAREVEN_MASK); + if (new->c_cflag & PARODD) + ctl |= (UART_CTL_RXPAREVEN_MASK | UART_CTL_TXPAREVEN_MASK); + bcm_uart_writel(port, ctl, UART_CTL_REG); + + /* update Baudword register */ + baud = uart_get_baud_rate(port, new, old, 0, port->uartclk / 16); + quot = uart_get_divisor(port, baud) - 1; + bcm_uart_writel(port, quot, UART_BAUD_REG); + + /* update Interrupt register */ + ier = bcm_uart_readl(port, UART_IR_REG); + + ier &= ~UART_IR_MASK(UART_IR_EXTIP); + if (UART_ENABLE_MS(port, new->c_cflag)) + ier |= UART_IR_MASK(UART_IR_EXTIP); + + bcm_uart_writel(port, ier, UART_IR_REG); + + /* update read/ignore mask */ + port->read_status_mask = UART_FIFO_VALID_MASK; + if (new->c_iflag & INPCK) { + port->read_status_mask |= UART_FIFO_FRAMEERR_MASK; + port->read_status_mask |= UART_FIFO_PARERR_MASK; + } + if (new->c_iflag & (BRKINT)) + port->read_status_mask |= UART_FIFO_BRKDET_MASK; + + port->ignore_status_mask = 0; + if (new->c_iflag & IGNPAR) + port->ignore_status_mask |= UART_FIFO_PARERR_MASK; + if (new->c_iflag & IGNBRK) + port->ignore_status_mask |= UART_FIFO_BRKDET_MASK; + if (!(new->c_cflag & CREAD)) + port->ignore_status_mask |= UART_FIFO_VALID_MASK; + + uart_update_timeout(port, new->c_cflag, baud); + bcm_uart_enable(port); + spin_unlock_irqrestore(&port->lock, flags); +} + +/* + * serial core request to claim uart iomem + */ +static int bcm_uart_request_port(struct uart_port *port) +{ + unsigned int size; + + size = RSET_UART_SIZE; + if (!request_mem_region(port->mapbase, size, "bcm63xx")) { + dev_err(port->dev, "Memory region busy\n"); + return -EBUSY; + } + + port->membase = ioremap(port->mapbase, size); + if (!port->membase) { + dev_err(port->dev, "Unable to map registers\n"); + release_mem_region(port->mapbase, size); + return -EBUSY; + } + return 0; +} + +/* + * serial core request to release uart iomem + */ +static void bcm_uart_release_port(struct uart_port *port) +{ + release_mem_region(port->mapbase, RSET_UART_SIZE); + iounmap(port->membase); +} + +/* + * serial core request to do any port required autoconfiguration + */ +static void bcm_uart_config_port(struct uart_port *port, int flags) +{ + if (flags & UART_CONFIG_TYPE) { + if (bcm_uart_request_port(port)) + return; + port->type = PORT_BCM63XX; + } +} + +/* + * serial core request to check that port information in serinfo are + * suitable + */ +static int bcm_uart_verify_port(struct uart_port *port, + struct serial_struct *serinfo) +{ + if (port->type != PORT_BCM63XX) + return -EINVAL; + if (port->irq != serinfo->irq) + return -EINVAL; + if (port->iotype != serinfo->io_type) + return -EINVAL; + if (port->mapbase != (unsigned long)serinfo->iomem_base) + return -EINVAL; + return 0; +} + +/* serial core callbacks */ +static struct uart_ops bcm_uart_ops = { + .tx_empty = bcm_uart_tx_empty, + .get_mctrl = bcm_uart_get_mctrl, + .set_mctrl = bcm_uart_set_mctrl, + .start_tx = bcm_uart_start_tx, + .stop_tx = bcm_uart_stop_tx, + .stop_rx = bcm_uart_stop_rx, + .enable_ms = bcm_uart_enable_ms, + .break_ctl = bcm_uart_break_ctl, + .startup = bcm_uart_startup, + .shutdown = bcm_uart_shutdown, + .set_termios = bcm_uart_set_termios, + .type = bcm_uart_type, + .release_port = bcm_uart_release_port, + .request_port = bcm_uart_request_port, + .config_port = bcm_uart_config_port, + .verify_port = bcm_uart_verify_port, +}; + + + +#ifdef CONFIG_SERIAL_BCM63XX_CONSOLE +static inline void wait_for_xmitr(struct uart_port *port) +{ + unsigned int tmout; + + /* Wait up to 10ms for the character(s) to be sent. */ + tmout = 10000; + while (--tmout) { + unsigned int val; + + val = bcm_uart_readl(port, UART_IR_REG); + if (val & UART_IR_STAT(UART_IR_TXEMPTY)) + break; + udelay(1); + } + + /* Wait up to 1s for flow control if necessary */ + if (port->flags & UPF_CONS_FLOW) { + tmout = 1000000; + while (--tmout) { + unsigned int val; + + val = bcm_uart_readl(port, UART_EXTINP_REG); + if (val & UART_EXTINP_CTS_MASK) + break; + udelay(1); + } + } +} + +/* + * output given char + */ +static void bcm_console_putchar(struct uart_port *port, int ch) +{ + wait_for_xmitr(port); + bcm_uart_writel(port, ch, UART_FIFO_REG); +} + +/* + * console core request to output given string + */ +static void bcm_console_write(struct console *co, const char *s, + unsigned int count) +{ + struct uart_port *port; + unsigned long flags; + int locked; + + port = &ports[co->index]; + + local_irq_save(flags); + if (port->sysrq) { + /* bcm_uart_interrupt() already took the lock */ + locked = 0; + } else if (oops_in_progress) { + locked = spin_trylock(&port->lock); + } else { + spin_lock(&port->lock); + locked = 1; + } + + /* call helper to deal with \r\n */ + uart_console_write(port, s, count, bcm_console_putchar); + + /* and wait for char to be transmitted */ + wait_for_xmitr(port); + + if (locked) + spin_unlock(&port->lock); + local_irq_restore(flags); +} + +/* + * console core request to setup given console, find matching uart + * port and setup it. + */ +static int bcm_console_setup(struct console *co, char *options) +{ + struct uart_port *port; + int baud = 9600; + int bits = 8; + int parity = 'n'; + int flow = 'n'; + + if (co->index < 0 || co->index >= BCM63XX_NR_UARTS) + return -EINVAL; + port = &ports[co->index]; + if (!port->membase) + return -ENODEV; + if (options) + uart_parse_options(options, &baud, &parity, &bits, &flow); + + return uart_set_options(port, co, baud, parity, bits, flow); +} + +static struct uart_driver bcm_uart_driver; + +static struct console bcm63xx_console = { + .name = "ttyS", + .write = bcm_console_write, + .device = uart_console_device, + .setup = bcm_console_setup, + .flags = CON_PRINTBUFFER, + .index = -1, + .data = &bcm_uart_driver, +}; + +static int __init bcm63xx_console_init(void) +{ + register_console(&bcm63xx_console); + return 0; +} + +console_initcall(bcm63xx_console_init); + +#define BCM63XX_CONSOLE &bcm63xx_console +#else +#define BCM63XX_CONSOLE NULL +#endif /* CONFIG_SERIAL_BCM63XX_CONSOLE */ + +static struct uart_driver bcm_uart_driver = { + .owner = THIS_MODULE, + .driver_name = "bcm63xx_uart", + .dev_name = "ttyS", + .major = TTY_MAJOR, + .minor = 64, + .nr = 1, + .cons = BCM63XX_CONSOLE, +}; + +/* + * platform driver probe/remove callback + */ +static int __devinit bcm_uart_probe(struct platform_device *pdev) +{ + struct resource *res_mem, *res_irq; + struct uart_port *port; + struct clk *clk; + int ret; + + if (pdev->id < 0 || pdev->id >= BCM63XX_NR_UARTS) + return -EINVAL; + + if (ports[pdev->id].membase) + return -EBUSY; + + res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res_mem) + return -ENODEV; + + res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res_irq) + return -ENODEV; + + clk = clk_get(&pdev->dev, "periph"); + if (IS_ERR(clk)) + return -ENODEV; + + port = &ports[pdev->id]; + memset(port, 0, sizeof(*port)); + port->iotype = UPIO_MEM; + port->mapbase = res_mem->start; + port->irq = res_irq->start; + port->ops = &bcm_uart_ops; + port->flags = UPF_BOOT_AUTOCONF; + port->dev = &pdev->dev; + port->fifosize = 16; + port->uartclk = clk_get_rate(clk) / 2; + clk_put(clk); + + ret = uart_add_one_port(&bcm_uart_driver, port); + if (ret) { + kfree(port); + return ret; + } + platform_set_drvdata(pdev, port); + return 0; +} + +static int __devexit bcm_uart_remove(struct platform_device *pdev) +{ + struct uart_port *port; + + port = platform_get_drvdata(pdev); + uart_remove_one_port(&bcm_uart_driver, port); + platform_set_drvdata(pdev, NULL); + /* mark port as free */ + ports[pdev->id].membase = 0; + return 0; +} + +/* + * platform driver stuff + */ +static struct platform_driver bcm_uart_platform_driver = { + .probe = bcm_uart_probe, + .remove = __devexit_p(bcm_uart_remove), + .driver = { + .owner = THIS_MODULE, + .name = "bcm63xx_uart", + }, +}; + +static int __init bcm_uart_init(void) +{ + int ret; + + ret = uart_register_driver(&bcm_uart_driver); + if (ret) + return ret; + + ret = platform_driver_register(&bcm_uart_platform_driver); + if (ret) + uart_unregister_driver(&bcm_uart_driver); + + return ret; +} + +static void __exit bcm_uart_exit(void) +{ + platform_driver_unregister(&bcm_uart_platform_driver); + uart_unregister_driver(&bcm_uart_driver); +} + +module_init(bcm_uart_init); +module_exit(bcm_uart_exit); + +MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>"); +MODULE_DESCRIPTION("Broadcom 63<xx integrated uart driver"); +MODULE_LICENSE("GPL"); diff --git a/target/linux/brcm63xx/files-2.6.30/drivers/spi/bcm63xx_spi.c b/target/linux/brcm63xx/files-2.6.30/drivers/spi/bcm63xx_spi.c new file mode 100644 index 000000000..a20de302d --- /dev/null +++ b/target/linux/brcm63xx/files-2.6.30/drivers/spi/bcm63xx_spi.c @@ -0,0 +1,445 @@ +/* + * Broadcom BCM63xx SPI controller support + * + * Copyright (C) 2009 Florian Fainelli <florian@openwrt.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the + * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/clk.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/spi/spi.h> +#include <linux/spi/spi_bitbang.h> +#include <linux/gpio.h> +#include <linux/completion.h> +#include <linux/err.h> + +#include <bcm63xx_io.h> +#include <bcm63xx_regs.h> +#include <bcm63xx_dev_spi.h> + +#define PFX KBUILD_MODNAME +#define DRV_VER "0.1.2" + +struct bcm63xx_spi { + /* bitbang has to be first */ + struct spi_bitbang bitbang; + struct completion done; + + void __iomem *regs; + int irq; + + /* Platform data */ + u32 speed_hz; + unsigned fifo_size; + + /* Data buffers */ + const unsigned char *tx_ptr; + unsigned char *rx_ptr; + int remaining_bytes; + + struct clk *clk; + struct resource *ioarea; + struct platform_device *pdev; +}; + +static void bcm63xx_spi_chipselect(struct spi_device *spi, int is_on) +{ + struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master); + u16 val; + + val = bcm_spi_readw(bs->regs, SPI_CMD); + if (is_on == BITBANG_CS_INACTIVE) + val |= SPI_CMD_NOOP; + else if (is_on == BITBANG_CS_ACTIVE) + val |= (1 << spi->chip_select << SPI_CMD_DEVICE_ID_SHIFT); + + bcm_spi_writew(val, bs->regs, SPI_CMD); +} + +static int bcm63xx_spi_setup_transfer(struct spi_device *spi, + struct spi_transfer *t) +{ + u8 bits_per_word; + u8 clk_cfg; + u32 hz; + unsigned int div; + + struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master); + + bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word; + hz = (t) ? t->speed_hz : spi->max_speed_hz; + if (bits_per_word != 8) { + dev_err(&spi->dev, "%s, unsupported bits_per_word=%d\n", + __func__, bits_per_word); + return -EINVAL; + } + + if (spi->chip_select > spi->master->num_chipselect) { + dev_err(&spi->dev, "%s, unsupported slave %d\n", + __func__, spi->chip_select); + return -EINVAL; + } + + /* Check clock setting */ + div = (bs->speed_hz / hz); + switch (div) { + case 2: + clk_cfg = SPI_CLK_25MHZ; + break; + case 4: + clk_cfg = SPI_CLK_12_50MHZ; + break; + case 8: + clk_cfg = SPI_CLK_6_250MHZ; + break; + case 16: + clk_cfg = SPI_CLK_3_125MHZ; + break; + case 32: + clk_cfg = SPI_CLK_1_563MHZ; + break; + case 128: + clk_cfg = SPI_CLK_0_781MHZ; + break; + case 64: + default: + /* Set to slowest mode for compatibility */ + clk_cfg = SPI_CLK_0_781MHZ; + break; + } + + bcm_spi_writeb(clk_cfg, bs->regs, SPI_CLK_CFG); + dev_dbg(&spi->dev, "Setting clock register to %d (hz %d, cmd %02x)\n", + div, hz, clk_cfg); + + return 0; +} + +/* the spi->mode bits understood by this driver: */ +#define MODEBITS (SPI_CPOL | SPI_CPHA) + +static int bcm63xx_spi_setup(struct spi_device *spi) +{ + struct spi_bitbang *bitbang; + struct bcm63xx_spi *bs; + int retval; + + bs = spi_master_get_devdata(spi->master); + bitbang = &bs->bitbang; + + if (!spi->bits_per_word) + spi->bits_per_word = 8; + + if (spi->mode & ~MODEBITS) { + dev_err(&spi->dev, "%s, unsupported mode bits %x\n", + __func__, spi->mode & ~MODEBITS); + return -EINVAL; + } + + retval = bcm63xx_spi_setup_transfer(spi, NULL); + if (retval < 0) { + dev_err(&spi->dev, "setup: unsupported mode bits %x\n", + spi->mode & ~MODEBITS); + return retval; + } + + dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u nsec/bit\n", + __func__, spi->mode & MODEBITS, spi->bits_per_word, 0); + + return 0; +} + +/* Fill the TX FIFO with as many bytes as possible */ +static void bcm63xx_spi_fill_tx_fifo(struct bcm63xx_spi *bs) +{ + u8 tail; + + /* Fill the Tx FIFO with as many bytes as possible */ + tail = bcm_spi_readb(bs->regs, SPI_MSG_TAIL); + while ((tail < bs->fifo_size) && (bs->remaining_bytes > 0)) { + if (bs->tx_ptr) + bcm_spi_writeb(*bs->tx_ptr++, bs->regs, SPI_MSG_DATA); + else + bcm_spi_writeb(0, bs->regs, SPI_MSG_DATA); + bs->remaining_bytes--; + tail = bcm_spi_readb(bs->regs, SPI_MSG_TAIL); + } +} + +static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *t) +{ + struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master); + u8 msg_ctl; + u16 cmd; + + dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n", + t->tx_buf, t->rx_buf, t->len); + + /* Transmitter is inhibited */ + bs->tx_ptr = t->tx_buf; + bs->rx_ptr = t->rx_buf; + bs->remaining_bytes = t->len; + init_completion(&bs->done); + + bcm63xx_spi_fill_tx_fifo(bs); + + /* Enable the command done interrupt which + * we use to determine completion of a command */ + bcm_spi_writeb(SPI_INTR_CMD_DONE, bs->regs, SPI_INT_MASK); + + /* Fill in the Message control register */ + msg_ctl = bcm_spi_readb(bs->regs, SPI_MSG_CTL); + msg_ctl |= (t->len << SPI_BYTE_CNT_SHIFT); + msg_ctl |= (SPI_FD_RW << SPI_MSG_TYPE_SHIFT); + bcm_spi_writeb(msg_ctl, bs->regs, SPI_MSG_CTL); + + /* Issue the transfer */ + cmd = bcm_spi_readb(bs->regs, SPI_CMD); + cmd |= SPI_CMD_START_IMMEDIATE; + cmd |= (0 << SPI_CMD_PREPEND_BYTE_CNT_SHIFT); + bcm_spi_writeb(cmd, bs->regs, SPI_CMD); + + wait_for_completion(&bs->done); + + /* Disable the CMD_DONE interrupt */ + bcm_spi_writeb(~(SPI_INTR_CMD_DONE), bs->regs, SPI_INT_MASK); + + return t->len - bs->remaining_bytes; +} + +/* This driver supports single master mode only. Hence + * CMD_DONE is the only interrupt we care about + */ +static irqreturn_t bcm63xx_spi_interrupt(int irq, void *dev_id) +{ + struct spi_master *master = (struct spi_master *)dev_id; + struct bcm63xx_spi *bs = spi_master_get_devdata(master); + u8 intr; + u16 cmd; + + /* Read interupts and clear them immediately */ + intr = bcm_spi_readb(bs->regs, SPI_INT_STATUS); + bcm_spi_writeb(SPI_INTR_CLEAR_ALL, bs->regs, SPI_INT_MASK); + + /* A tansfer completed */ + if (intr & SPI_INTR_CMD_DONE) { + u8 rx_empty; + + rx_empty = bcm_spi_readb(bs->regs, SPI_ST); + /* Read out all the data */ + while ((rx_empty & SPI_RX_EMPTY) == 0) { + u8 data; + + data = bcm_spi_readb(bs->regs, SPI_RX_DATA); + if (bs->rx_ptr) + *bs->rx_ptr++ = data; + + rx_empty = bcm_spi_readb(bs->regs, SPI_RX_EMPTY); + } + + /* See if there is more data to send */ + if (bs->remaining_bytes > 0) { + bcm63xx_spi_fill_tx_fifo(bs); + + /* Start the transfer */ + cmd = bcm_spi_readb(bs->regs, SPI_CMD); + cmd |= SPI_CMD_START_IMMEDIATE; + cmd |= (0 << SPI_CMD_PREPEND_BYTE_CNT_SHIFT); + bcm_spi_writeb(cmd, bs->regs, SPI_CMD); + } else + complete(&bs->done); + } + + return IRQ_HANDLED; +} + + +static int __init bcm63xx_spi_probe(struct platform_device *pdev) +{ + struct resource *r; + struct bcm63xx_spi_pdata *pdata = pdev->dev.platform_data; + int irq; + struct spi_master *master; + struct clk *clk; + struct bcm63xx_spi *bs; + int ret; + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) { + ret = -ENXIO; + goto out; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + ret = -ENXIO; + goto out; + } + + clk = clk_get(&pdev->dev, "spi"); + if (IS_ERR(clk)) { + dev_err(&pdev->dev, "No clock for device\n"); + ret = -ENODEV; + goto out; + } + + master = spi_alloc_master(&pdev->dev, sizeof(struct bcm63xx_spi)); + if (!master) { + ret = -ENOMEM; + goto out_free; + } + + bs = spi_master_get_devdata(master); + bs->bitbang.master = spi_master_get(master); + bs->bitbang.chipselect = bcm63xx_spi_chipselect; + bs->bitbang.setup_transfer = bcm63xx_spi_setup_transfer; + bs->bitbang.txrx_bufs = bcm63xx_txrx_bufs; + bs->bitbang.master->setup = bcm63xx_spi_setup; + init_completion(&bs->done); + + platform_set_drvdata(pdev, master); + bs->pdev = pdev; + + if (!request_mem_region(r->start, + r->end - r->start, PFX)) { + ret = -ENXIO; + goto out_free; + } + + bs->regs = ioremap_nocache(r->start, r->end - r->start); + if (!bs->regs) { + printk(KERN_ERR PFX " unable to ioremap regs\n"); + ret = -ENOMEM; + goto out_free; + } + bs->irq = irq; + bs->clk = clk; + bs->fifo_size = pdata->fifo_size; + + ret = request_irq(irq, bcm63xx_spi_interrupt, 0, + pdev->name, master); + if (ret) { + printk(KERN_ERR PFX " unable to request irq\n"); + goto out_unmap; + } + + master->bus_num = pdata->bus_num; + master->num_chipselect = pdata->num_chipselect; + bs->speed_hz = pdata->speed_hz; + + /* Initialize hardware */ + clk_enable(bs->clk); + bcm_spi_writeb(SPI_INTR_CLEAR_ALL, bs->regs, SPI_INT_MASK); + + dev_info(&pdev->dev, " at 0x%08x (irq %d, FIFOs size %d) v%s\n", + r->start, irq, bs->fifo_size, DRV_VER); + + ret = spi_bitbang_start(&bs->bitbang); + if (ret) { + dev_err(&pdev->dev, "spi_bitbang_start FAILED\n"); + goto out_reset_hw; + } + + return ret; + +out_reset_hw: + clk_disable(clk); + free_irq(irq, master); +out_unmap: + iounmap(bs->regs); +out_free: + clk_put(clk); + spi_master_put(master); +out: + return ret; +} + +static int __exit bcm63xx_spi_remove(struct platform_device *pdev) +{ + struct spi_master *master = platform_get_drvdata(pdev); + struct bcm63xx_spi *bs = spi_master_get_devdata(master); + + spi_bitbang_stop(&bs->bitbang); + clk_disable(bs->clk); + clk_put(bs->clk); + free_irq(bs->irq, master); + iounmap(bs->regs); + platform_set_drvdata(pdev, 0); + spi_master_put(bs->bitbang.master); + + return 0; +} + +#ifdef CONFIG_PM +static int bcm63xx_spi_suspend(struct platform_device *pdev, pm_message_t mesg) +{ + struct spi_master *master = platform_get_drvdata(pdev); + struct bcm63xx_spi *bs = spi_master_get_devdata(master); + + clk_disable(bs->clk); + + return 0; +} + +static int bcm63xx_spi_resume(struct platform_device *pdev) +{ + struct bcm63xx_spi *bs = spi_master_get_devdata(master); + struct bcm63xx_spi *bs = spi_master_get_devdata(master); + + clk_enable(bs->clk); + + return 0; +} +#else +#define bcm63xx_spi_suspend NULL +#define bcm63xx_spi_resume NULL +#endif + +static struct platform_driver bcm63xx_spi_driver = { + .driver = { + .name = "bcm63xx-spi", + .owner = THIS_MODULE, + }, + .probe = bcm63xx_spi_probe, + .remove = bcm63xx_spi_remove, + .suspend = bcm63xx_spi_suspend, + .resume = bcm63xx_spi_resume, +}; + + +static int __init bcm63xx_spi_init(void) +{ + return platform_driver_register(&bcm63xx_spi_driver); +} + +static void __exit bcm63xx_spi_exit(void) +{ + platform_driver_unregister(&bcm63xx_spi_driver); +} + +module_init(bcm63xx_spi_init); +module_exit(bcm63xx_spi_exit); + +MODULE_ALIAS("platform:bcm63xx_spi"); +MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>"); +MODULE_DESCRIPTION("Broadcom BCM63xx SPI Controller driver"); +MODULE_LICENSE("GPL"); +MODULE_VERSION(DRV_VER); diff --git a/target/linux/brcm63xx/files-2.6.30/drivers/usb/host/ehci-bcm63xx.c b/target/linux/brcm63xx/files-2.6.30/drivers/usb/host/ehci-bcm63xx.c new file mode 100644 index 000000000..2fef5716e --- /dev/null +++ b/target/linux/brcm63xx/files-2.6.30/drivers/usb/host/ehci-bcm63xx.c @@ -0,0 +1,152 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> + */ + +#include <linux/init.h> +#include <linux/platform_device.h> +#include <bcm63xx_cpu.h> +#include <bcm63xx_regs.h> +#include <bcm63xx_io.h> + +static int ehci_bcm63xx_setup(struct usb_hcd *hcd) +{ + struct ehci_hcd *ehci = hcd_to_ehci(hcd); + int retval; + + retval = ehci_halt(ehci); + if (retval) + return retval; + + retval = ehci_init(hcd); + if (retval) + return retval; + + hcd->has_tt = 1; + ehci_reset(ehci); + ehci_port_power(ehci, 0); + + return retval; +} + + +static const struct hc_driver ehci_bcm63xx_hc_driver = { + .description = hcd_name, + .product_desc = "BCM63XX integrated EHCI controller", + .hcd_priv_size = sizeof(struct ehci_hcd), + + .irq = ehci_irq, + .flags = HCD_MEMORY | HCD_USB2, + + .reset = ehci_bcm63xx_setup, + .start = ehci_run, + .stop = ehci_stop, + .shutdown = ehci_shutdown, + + .urb_enqueue = ehci_urb_enqueue, + .urb_dequeue = ehci_urb_dequeue, + .endpoint_disable = ehci_endpoint_disable, + + .get_frame_number = ehci_get_frame, + + .hub_status_data = ehci_hub_status_data, + .hub_control = ehci_hub_control, + .bus_suspend = ehci_bus_suspend, + .bus_resume = ehci_bus_resume, + .relinquish_port = ehci_relinquish_port, + .port_handed_over = ehci_port_handed_over, +}; + +static int __devinit ehci_hcd_bcm63xx_drv_probe(struct platform_device *pdev) +{ + struct resource *res_mem, *res_irq; + struct usb_hcd *hcd; + struct ehci_hcd *ehci; + u32 reg; + int ret; + + res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res_mem || !res_irq) + return -ENODEV; + + reg = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_SWAP_REG); + reg &= ~USBH_PRIV_SWAP_EHCI_DATA_MASK; + reg |= USBH_PRIV_SWAP_EHCI_ENDN_MASK; + bcm_rset_writel(RSET_USBH_PRIV, reg, USBH_PRIV_SWAP_REG); + + /* don't ask... */ + bcm_rset_writel(RSET_USBH_PRIV, 0x1c0020, USBH_PRIV_TEST_REG); + + hcd = usb_create_hcd(&ehci_bcm63xx_hc_driver, &pdev->dev, "bcm63xx"); + if (!hcd) + return -ENOMEM; + hcd->rsrc_start = res_mem->start; + hcd->rsrc_len = res_mem->end - res_mem->start + 1; + + if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { + pr_debug("request_mem_region failed\n"); + ret = -EBUSY; + goto out; + } + + hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); + if (!hcd->regs) { + pr_debug("ioremap failed\n"); + ret = -EIO; + goto out1; + } + + ehci = hcd_to_ehci(hcd); + ehci->big_endian_mmio = 1; + ehci->big_endian_desc = 0; + ehci->caps = hcd->regs; + ehci->regs = hcd->regs + + HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase)); + ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params); + ehci->sbrn = 0x20; + + ret = usb_add_hcd(hcd, res_irq->start, IRQF_DISABLED); + if (ret) + goto out2; + + platform_set_drvdata(pdev, hcd); + return 0; + +out2: + iounmap(hcd->regs); +out1: + release_mem_region(hcd->rsrc_start, hcd->rsrc_len); +out: + usb_put_hcd(hcd); + return ret; +} + +static int __devexit ehci_hcd_bcm63xx_drv_remove(struct platform_device *pdev) +{ + struct usb_hcd *hcd; + + hcd = platform_get_drvdata(pdev); + usb_remove_hcd(hcd); + iounmap(hcd->regs); + usb_put_hcd(hcd); + release_mem_region(hcd->rsrc_start, hcd->rsrc_len); + platform_set_drvdata(pdev, NULL); + return 0; +} + +static struct platform_driver ehci_hcd_bcm63xx_driver = { + .probe = ehci_hcd_bcm63xx_drv_probe, + .remove = __devexit_p(ehci_hcd_bcm63xx_drv_remove), + .shutdown = usb_hcd_platform_shutdown, + .driver = { + .name = "bcm63xx_ehci", + .owner = THIS_MODULE, + .bus = &platform_bus_type + }, +}; + +MODULE_ALIAS("platform:bcm63xx_ehci"); diff --git a/target/linux/brcm63xx/files-2.6.30/drivers/usb/host/ohci-bcm63xx.c b/target/linux/brcm63xx/files-2.6.30/drivers/usb/host/ohci-bcm63xx.c new file mode 100644 index 000000000..08807d989 --- /dev/null +++ b/target/linux/brcm63xx/files-2.6.30/drivers/usb/host/ohci-bcm63xx.c @@ -0,0 +1,159 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> + */ + +#include <linux/init.h> +#include <linux/clk.h> +#include <linux/platform_device.h> +#include <bcm63xx_cpu.h> +#include <bcm63xx_regs.h> +#include <bcm63xx_io.h> + +static struct clk *usb_host_clock; + +static int __devinit ohci_bcm63xx_start(struct usb_hcd *hcd) +{ + struct ohci_hcd *ohci = hcd_to_ohci(hcd); + int ret; + + ret = ohci_init(ohci); + if (ret < 0) + return ret; + + /* FIXME: autodetected port 2 is shared with USB slave */ + + ret = ohci_run(ohci); + if (ret < 0) { + err("can't start %s", hcd->self.bus_name); + ohci_stop(hcd); + return ret; + } + return 0; +} + +static const struct hc_driver ohci_bcm63xx_hc_driver = { + .description = hcd_name, + .product_desc = "BCM63XX integrated OHCI controller", + .hcd_priv_size = sizeof(struct ohci_hcd), + + .irq = ohci_irq, + .flags = HCD_USB11 | HCD_MEMORY, + .start = ohci_bcm63xx_start, + .stop = ohci_stop, + .shutdown = ohci_shutdown, + .urb_enqueue = ohci_urb_enqueue, + .urb_dequeue = ohci_urb_dequeue, + .endpoint_disable = ohci_endpoint_disable, + .get_frame_number = ohci_get_frame, + .hub_status_data = ohci_hub_status_data, + .hub_control = ohci_hub_control, + .start_port_reset = ohci_start_port_reset, +}; + +static int __devinit ohci_hcd_bcm63xx_drv_probe(struct platform_device *pdev) +{ + struct resource *res_mem, *res_irq; + struct usb_hcd *hcd; + struct ohci_hcd *ohci; + u32 reg; + int ret; + + res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); + if (!res_mem || !res_irq) + return -ENODEV; + + if (BCMCPU_IS_6348()) { + struct clk *clk; + /* enable USB host clock */ + clk = clk_get(&pdev->dev, "usbh"); + if (IS_ERR(clk)) + return -ENODEV; + + clk_enable(clk); + usb_host_clock = clk; + bcm_rset_writel(RSET_OHCI_PRIV, 0, OHCI_PRIV_REG); + + } else if (BCMCPU_IS_6358()) { + reg = bcm_rset_readl(RSET_USBH_PRIV, USBH_PRIV_SWAP_REG); + reg &= ~USBH_PRIV_SWAP_OHCI_ENDN_MASK; + reg |= USBH_PRIV_SWAP_OHCI_DATA_MASK; + bcm_rset_writel(RSET_USBH_PRIV, reg, USBH_PRIV_SWAP_REG); + /* don't ask... */ + bcm_rset_writel(RSET_USBH_PRIV, 0x1c0020, USBH_PRIV_TEST_REG); + } else + return 0; + + hcd = usb_create_hcd(&ohci_bcm63xx_hc_driver, &pdev->dev, "bcm63xx"); + if (!hcd) + return -ENOMEM; + hcd->rsrc_start = res_mem->start; + hcd->rsrc_len = res_mem->end - res_mem->start + 1; + + if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) { + pr_debug("request_mem_region failed\n"); + ret = -EBUSY; + goto out; + } + + hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); + if (!hcd->regs) { + pr_debug("ioremap failed\n"); + ret = -EIO; + goto out1; + } + + ohci = hcd_to_ohci(hcd); + ohci->flags |= OHCI_QUIRK_BE_MMIO | OHCI_QUIRK_BE_DESC | + OHCI_QUIRK_FRAME_NO; + ohci_hcd_init(ohci); + + ret = usb_add_hcd(hcd, res_irq->start, IRQF_DISABLED); + if (ret) + goto out2; + + platform_set_drvdata(pdev, hcd); + return 0; + +out2: + iounmap(hcd->regs); +out1: + release_mem_region(hcd->rsrc_start, hcd->rsrc_len); +out: + usb_put_hcd(hcd); + return ret; +} + +static int __devexit ohci_hcd_bcm63xx_drv_remove(struct platform_device *pdev) +{ + struct usb_hcd *hcd; + + hcd = platform_get_drvdata(pdev); + usb_remove_hcd(hcd); + iounmap(hcd->regs); + usb_put_hcd(hcd); + release_mem_region(hcd->rsrc_start, hcd->rsrc_len); + if (usb_host_clock) { + clk_disable(usb_host_clock); + clk_put(usb_host_clock); + } + platform_set_drvdata(pdev, NULL); + return 0; +} + +static struct platform_driver ohci_hcd_bcm63xx_driver = { + .probe = ohci_hcd_bcm63xx_drv_probe, + .remove = __devexit_p(ohci_hcd_bcm63xx_drv_remove), + .shutdown = usb_hcd_platform_shutdown, + .driver = { + .name = "bcm63xx_ohci", + .owner = THIS_MODULE, + .bus = &platform_bus_type + }, +}; + +MODULE_ALIAS("platform:bcm63xx_ohci"); diff --git a/target/linux/brcm63xx/files-2.6.30/drivers/watchdog/bcm63xx_wdt.c b/target/linux/brcm63xx/files-2.6.30/drivers/watchdog/bcm63xx_wdt.c new file mode 100644 index 000000000..8d58ccd8b --- /dev/null +++ b/target/linux/brcm63xx/files-2.6.30/drivers/watchdog/bcm63xx_wdt.c @@ -0,0 +1,334 @@ +/* + * Broadcom BCM63xx SoC watchdog driver + * + * Copyright (C) 2007, Miguel Gaio <miguel.gaio@efixo.com> + * Copyright (C) 2008, Florian Fainelli <florian@openwrt.org> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include <linux/bitops.h> +#include <linux/errno.h> +#include <linux/fs.h> +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/miscdevice.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/reboot.h> +#include <linux/types.h> +#include <linux/uaccess.h> +#include <linux/watchdog.h> +#include <linux/timer.h> +#include <linux/jiffies.h> +#include <linux/resource.h> +#include <linux/platform_device.h> + +#include <bcm63xx_cpu.h> +#include <bcm63xx_io.h> +#include <bcm63xx_regs.h> + +#define PFX KBUILD_MODNAME + +#define WDT_HZ 50000000 /* Fclk */ +#define WDT_DEFAULT_TIME 30 /* seconds */ +#define WDT_MAX_TIME 256 /* seconds */ + +static struct { + void __iomem *regs; + struct timer_list timer; + int default_ticks; + unsigned long inuse; + atomic_t ticks; +} bcm63xx_wdt_device; + +static int expect_close; +static int timeout; + +static int wdt_time = WDT_DEFAULT_TIME; +static int nowayout = WATCHDOG_NOWAYOUT; +module_param(nowayout, int, 0); +MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" + __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); + +/* HW functions */ +static void bcm63xx_wdt_hw_start(void) +{ + bcm_writel(0xfffffffe, bcm63xx_wdt_device.regs + WDT_DEFVAL_REG); + bcm_writel(WDT_START_1, bcm63xx_wdt_device.regs + WDT_CTL_REG); + bcm_writel(WDT_START_2, bcm63xx_wdt_device.regs + WDT_CTL_REG); +} + +static void bcm63xx_wdt_hw_stop(void) +{ + bcm_writel(WDT_STOP_1, bcm63xx_wdt_device.regs + WDT_CTL_REG); + bcm_writel(WDT_STOP_2, bcm63xx_wdt_device.regs + WDT_CTL_REG); +} + +static void bcm63xx_timer_tick(unsigned long unused) +{ + if (!atomic_dec_and_test(&bcm63xx_wdt_device.ticks)) { + bcm63xx_wdt_hw_start(); + mod_timer(&bcm63xx_wdt_device.timer, jiffies + HZ); + } else + printk(KERN_CRIT PFX ": watchdog will restart system\n"); +} + +static void bcm63xx_wdt_pet(void) +{ + atomic_set(&bcm63xx_wdt_device.ticks, wdt_time); +} + +static void bcm63xx_wdt_start(void) +{ + bcm63xx_wdt_pet(); + bcm63xx_timer_tick(0); +} + +static void bcm63xx_wdt_pause(void) +{ + del_timer_sync(&bcm63xx_wdt_device.timer); + bcm63xx_wdt_hw_stop(); +} + +static int bcm63xx_wdt_settimeout(int new_time) +{ + if ((new_time <= 0) || (new_time > WDT_MAX_TIME)) + return -EINVAL; + + wdt_time = new_time; + + return 0; +} + +static int bcm63xx_wdt_open(struct inode *inode, struct file *file) +{ + if (test_and_set_bit(0, &bcm63xx_wdt_device.inuse)) + return -EBUSY; + + bcm63xx_wdt_start(); + return nonseekable_open(inode, file); +} + +static int bcm63xx_wdt_release(struct inode *inode, struct file *file) +{ + if (expect_close == 42) + bcm63xx_wdt_pause(); + else { + printk(KERN_CRIT PFX + ": Unexpected close, not stopping watchdog!\n"); + bcm63xx_wdt_start(); + } + clear_bit(0, &bcm63xx_wdt_device.inuse); + expect_close = 0; + return 0; +} + +static ssize_t bcm63xx_wdt_write(struct file *file, const char *data, + size_t len, loff_t *ppos) +{ + if (len) { + if (!nowayout) { + size_t i; + + /* In case it was set long ago */ + expect_close = 0; + + for (i = 0; i != len; i++) { + char c; + if (get_user(c, data + i)) + return -EFAULT; + if (c == 'V') + expect_close = 42; + } + } + bcm63xx_wdt_pet(); + } + return len; +} + +static struct watchdog_info bcm63xx_wdt_info = { + .identity = PFX, + .options = WDIOF_SETTIMEOUT | + WDIOF_KEEPALIVEPING | + WDIOF_MAGICCLOSE, +}; + + +static long bcm63xx_wdt_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + void __user *argp = (void __user *)arg; + int __user *p = argp; + int new_value, retval = -EINVAL; + + switch (cmd) { + case WDIOC_GETSUPPORT: + return copy_to_user(argp, &bcm63xx_wdt_info, + sizeof(bcm63xx_wdt_info)) ? -EFAULT : 0; + + case WDIOC_GETSTATUS: + case WDIOC_GETBOOTSTATUS: + return put_user(0, p); + + case WDIOC_SETOPTIONS: + if (get_user(new_value, p)) + return -EFAULT; + + if (new_value & WDIOS_DISABLECARD) { + bcm63xx_wdt_pause(); + retval = 0; + } + if (new_value & WDIOS_ENABLECARD) { + bcm63xx_wdt_start(); + retval = 0; + } + + return retval; + + case WDIOC_KEEPALIVE: + bcm63xx_wdt_pet(); + return 0; + + case WDIOC_SETTIMEOUT: + if (get_user(new_value, p)) + return -EFAULT; + + if (bcm63xx_wdt_settimeout(new_value)) + return -EINVAL; + + bcm63xx_wdt_pet(); + + case WDIOC_GETTIMEOUT: + return put_user(wdt_time, p); + + default: + return -ENOTTY; + + } +} + +static int bcm63xx_wdt_notify_sys(struct notifier_block *this, + unsigned long code, void *unused) +{ + if (code == SYS_DOWN || code == SYS_HALT) + bcm63xx_wdt_pause(); + return NOTIFY_DONE; +} + +static const struct file_operations bcm63xx_wdt_fops = { + .owner = THIS_MODULE, + .llseek = no_llseek, + .write = bcm63xx_wdt_write, + .unlocked_ioctl = bcm63xx_wdt_ioctl, + .open = bcm63xx_wdt_open, + .release = bcm63xx_wdt_release, +}; + +static struct miscdevice bcm63xx_wdt_miscdev = { + .minor = WATCHDOG_MINOR, + .name = "watchdog", + .fops = &bcm63xx_wdt_fops, +}; + +static struct notifier_block bcm63xx_wdt_notifier = { + .notifier_call = bcm63xx_wdt_notify_sys, +}; + + +static int bcm63xx_wdt_probe(struct platform_device *pdev) +{ + int ret; + struct resource *r; + + setup_timer(&bcm63xx_wdt_device.timer, bcm63xx_timer_tick, 0L); + + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!r) { + printk(KERN_ERR PFX + "failed to retrieve resources\n"); + return -ENODEV; + } + + bcm63xx_wdt_device.regs = ioremap_nocache(r->start, r->end - r->start); + if (!bcm63xx_wdt_device.regs) { + printk(KERN_ERR PFX + "failed to remap I/O resources\n"); + return -ENXIO; + } + + if (bcm63xx_wdt_settimeout(wdt_time)) { + bcm63xx_wdt_settimeout(WDT_DEFAULT_TIME); + printk(KERN_INFO PFX + ": wdt_time value must be 1 <= wdt_time <= 256, using %d\n", + wdt_time); + } + + ret = register_reboot_notifier(&bcm63xx_wdt_notifier); + if (ret) { + printk(KERN_ERR PFX + "failed to register reboot_notifier\n"); + return ret; + } + + ret = misc_register(&bcm63xx_wdt_miscdev); + if (ret < 0) { + printk(KERN_ERR PFX + "failed to register watchdog device\n"); + goto unmap; + } + + printk(KERN_INFO PFX " started, timer margin: %d sec\n", WDT_DEFAULT_TIME); + + return 0; + +unmap: + unregister_reboot_notifier(&bcm63xx_wdt_notifier); + iounmap(bcm63xx_wdt_device.regs); + return ret; +} + +static int bcm63xx_wdt_remove(struct platform_device *pdev) +{ + if (!nowayout) + bcm63xx_wdt_pause(); + + misc_deregister(&bcm63xx_wdt_miscdev); + + iounmap(bcm63xx_wdt_device.regs); + + unregister_reboot_notifier(&bcm63xx_wdt_notifier); + + return 0; +} + +static struct platform_driver bcm63xx_wdt = { + .probe = bcm63xx_wdt_probe, + .remove = bcm63xx_wdt_remove, + .driver = { + .name = "bcm63xx-wdt", + } +}; + +static int __init bcm63xx_wdt_init(void) +{ + return platform_driver_register(&bcm63xx_wdt); +} + +static void __exit bcm63xx_wdt_exit(void) +{ + platform_driver_unregister(&bcm63xx_wdt); +} + +module_init(bcm63xx_wdt_init); +module_exit(bcm63xx_wdt_exit); + +MODULE_AUTHOR("Miguel Gaio <miguel.gaio@efixo.com>"); +MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>"); +MODULE_DESCRIPTION("Driver for the Broadcom BCM63xx SoC watchdog"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); +MODULE_ALIAS("platform:bcm63xx-wdt"); |