summaryrefslogtreecommitdiffstats
path: root/package/broadcom-wl/src/driver
diff options
context:
space:
mode:
authornbd <nbd@3c298f89-4303-0410-b956-a3cf2f4a3e73>2007-11-10 19:00:53 +0000
committernbd <nbd@3c298f89-4303-0410-b956-a3cf2f4a3e73>2007-11-10 19:00:53 +0000
commited261aac7dd5c71351f720ab16a12b5bd8c1073b (patch)
treedbe4bee46c2ad4388d4b86fc720c94a461edb4ff /package/broadcom-wl/src/driver
parent46d61531218d6313175cdbaead9df078e1cf7cba (diff)
move wlcompat to the broadcom-wl package
git-svn-id: svn://svn.openwrt.org/openwrt/trunk@9528 3c298f89-4303-0410-b956-a3cf2f4a3e73
Diffstat (limited to 'package/broadcom-wl/src/driver')
-rw-r--r--package/broadcom-wl/src/driver/Makefile31
-rw-r--r--package/broadcom-wl/src/driver/bcmip.h101
-rw-r--r--package/broadcom-wl/src/driver/bcmutils.c873
-rw-r--r--package/broadcom-wl/src/driver/hnddma.c1893
-rw-r--r--package/broadcom-wl/src/driver/hnddma.h156
-rw-r--r--package/broadcom-wl/src/driver/linux_osl.c274
-rw-r--r--package/broadcom-wl/src/driver/linux_osl.h171
-rw-r--r--package/broadcom-wl/src/driver/patchtable.pl61
-rw-r--r--package/broadcom-wl/src/driver/pktq.h97
-rw-r--r--package/broadcom-wl/src/driver/sbhnddma.h284
10 files changed, 3941 insertions, 0 deletions
diff --git a/package/broadcom-wl/src/driver/Makefile b/package/broadcom-wl/src/driver/Makefile
new file mode 100644
index 000000000..0a16bdcec
--- /dev/null
+++ b/package/broadcom-wl/src/driver/Makefile
@@ -0,0 +1,31 @@
+#
+# Makefile for the Broadcom wl driver
+#
+# Copyright 2004, Broadcom Corporation
+# All Rights Reserved.
+#
+# THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
+# KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
+# SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
+# FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
+#
+# $Id: Makefile,v 1.2 2005/03/29 03:32:18 mbm Exp $
+
+EXTRA_CFLAGS += -I$(TOPDIR)/arch/mips/bcm947xx/include -DBCMDRIVER=1 -DBCMDMA64=1
+
+O_TARGET := wl$(MOD_NAME).o
+
+obj-y := wl_mod$(MOD_NAME).o
+obj-y += bcmutils.o hnddma.o linux_osl.o
+
+obj-m := $(O_TARGET)
+
+wl_mod$(MOD_NAME).o: wl_apsta$(MOD_NAME).o
+ perl -ne 's,eth%d,wl%d\x00,g,print' < $< > $@
+
+wl$(MOD_NAME).o.patch: wl$(MOD_NAME).o
+ $(OBJDUMP) -d $< | perl patchtable.pl > $@
+
+modules: wl$(MOD_NAME).o.patch
+
+include $(TOPDIR)/Rules.make
diff --git a/package/broadcom-wl/src/driver/bcmip.h b/package/broadcom-wl/src/driver/bcmip.h
new file mode 100644
index 000000000..423a0e5c5
--- /dev/null
+++ b/package/broadcom-wl/src/driver/bcmip.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2006, Broadcom Corporation
+ * All Rights Reserved.
+ *
+ * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
+ * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
+ * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
+ *
+ * Fundamental constants relating to IP Protocol
+ *
+ * $Id: bcmip.h,v 1.1.1.3 2006/02/27 03:43:16 honor Exp $
+ */
+
+#ifndef _bcmip_h_
+#define _bcmip_h_
+
+/* IPV4 and IPV6 common */
+#define IP_VER_OFFSET 0x0 /* offset to version field */
+#define IP_VER_MASK 0xf0 /* version mask */
+#define IP_VER_SHIFT 4 /* version shift */
+#define IP_VER_4 4 /* version number for IPV4 */
+#define IP_VER_6 6 /* version number for IPV6 */
+
+#define IP_VER(ip_body) \
+ ((((uint8 *)(ip_body))[IP_VER_OFFSET] & IP_VER_MASK) >> IP_VER_SHIFT)
+
+#define IP_PROT_ICMP 0x1 /* ICMP protocol */
+#define IP_PROT_TCP 0x6 /* TCP protocol */
+#define IP_PROT_UDP 0x11 /* UDP protocol type */
+
+/* IPV4 field offsets */
+#define IPV4_VER_HL_OFFSET 0 /* version and ihl byte offset */
+#define IPV4_TOS_OFFSET 1 /* type of service offset */
+#define IPV4_PROT_OFFSET 9 /* protocol type offset */
+#define IPV4_CHKSUM_OFFSET 10 /* IP header checksum offset */
+#define IPV4_SRC_IP_OFFSET 12 /* src IP addr offset */
+#define IPV4_DEST_IP_OFFSET 16 /* dest IP addr offset */
+
+/* IPV4 field decodes */
+#define IPV4_VER_MASK 0xf0 /* IPV4 version mask */
+#define IPV4_VER_SHIFT 4 /* IPV4 version shift */
+
+#define IPV4_HLEN_MASK 0x0f /* IPV4 header length mask */
+#define IPV4_HLEN(ipv4_body) (4 * (((uint8 *)(ipv4_body))[IPV4_VER_HL_OFFSET] & IPV4_HLEN_MASK))
+
+#define IPV4_ADDR_LEN 4 /* IPV4 address length */
+
+#define IPV4_ADDR_NULL(a) ((((uint8 *)(a))[0] | ((uint8 *)(a))[1] | \
+ ((uint8 *)(a))[2] | ((uint8 *)(a))[3]) == 0)
+
+#define IPV4_TOS_DSCP_MASK 0xfc /* DiffServ codepoint mask */
+#define IPV4_TOS_DSCP_SHIFT 2 /* DiffServ codepoint shift */
+
+#define IPV4_TOS(ipv4_body) (((uint8 *)(ipv4_body))[IPV4_TOS_OFFSET])
+
+#define IPV4_TOS_PREC_MASK 0xe0 /* Historical precedence mask */
+#define IPV4_TOS_PREC_SHIFT 5 /* Historical precedence shift */
+
+#define IPV4_TOS_LOWDELAY 0x10 /* Lowest delay requested */
+#define IPV4_TOS_THROUGHPUT 0x8 /* Best throughput requested */
+#define IPV4_TOS_RELIABILITY 0x4 /* Most reliable delivery requested */
+
+#define IPV4_PROT(ipv4_body) (((uint8 *)(ipv4_body))[IPV4_PROT_OFFSET])
+
+#define IPV4_ADDR_STR_LEN 16 /* Max IP address length in string format */
+
+/* IPV6 field offsets */
+#define IPV6_PAYLOAD_LEN_OFFSET 4 /* payload length offset */
+#define IPV6_NEXT_HDR_OFFSET 6 /* next header/protocol offset */
+#define IPV6_HOP_LIMIT_OFFSET 7 /* hop limit offset */
+#define IPV6_SRC_IP_OFFSET 8 /* src IP addr offset */
+#define IPV6_DEST_IP_OFFSET 24 /* dst IP addr offset */
+
+/* IPV6 field decodes */
+#define IPV6_TRAFFIC_CLASS(ipv6_body) \
+ (((((uint8 *)(ipv6_body))[0] & 0x0f) << 4) | \
+ ((((uint8 *)(ipv6_body))[1] & 0xf0) >> 4))
+
+#define IPV6_FLOW_LABEL(ipv6_body) \
+ (((((uint8 *)(ipv6_body))[1] & 0x0f) << 16) | \
+ (((uint8 *)(ipv6_body))[2] << 8) | \
+ (((uint8 *)(ipv6_body))[3]))
+
+#define IPV6_PAYLOAD_LEN(ipv6_body) \
+ ((((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 0] << 8) | \
+ ((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 1])
+
+#define IPV6_NEXT_HDR(ipv6_body) \
+ (((uint8 *)(ipv6_body))[IPV6_NEXT_HDR_OFFSET])
+
+#define IPV6_PROT(ipv6_body) IPV6_NEXT_HDR(ipv6_body)
+
+#define IPV6_ADDR_LEN 16 /* IPV6 address length */
+
+/* IPV4 TOS or IPV6 Traffic Classifier or 0 */
+#define IP_TOS(ip_body) \
+ (IP_VER(ip_body) == IP_VER_4 ? IPV4_TOS(ip_body) : \
+ IP_VER(ip_body) == IP_VER_6 ? IPV6_TRAFFIC_CLASS(ip_body) : 0)
+
+#endif /* _bcmip_h_ */
diff --git a/package/broadcom-wl/src/driver/bcmutils.c b/package/broadcom-wl/src/driver/bcmutils.c
new file mode 100644
index 000000000..7592f230a
--- /dev/null
+++ b/package/broadcom-wl/src/driver/bcmutils.c
@@ -0,0 +1,873 @@
+/*
+ * Misc useful OS-independent routines.
+ *
+ * Copyright 2006, Broadcom Corporation
+ * All Rights Reserved.
+ *
+ * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
+ * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
+ * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
+ * $Id: bcmutils.c,v 1.1.1.12 2006/02/27 03:43:16 honor Exp $
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <stdarg.h>
+#include <osl.h>
+#include "linux_osl.h"
+#include "pktq.h"
+#include <bcmutils.h>
+#include <sbutils.h>
+#include <bcmnvram.h>
+#include <bcmendian.h>
+#include <bcmdevs.h>
+#include "bcmip.h"
+
+#define ETHER_TYPE_8021Q 0x8100
+#define ETHER_TYPE_IP 0x0800
+#define VLAN_PRI_SHIFT 13
+#define VLAN_PRI_MASK 7
+
+
+struct ether_header {
+ uint8 ether_dhost[6];
+ uint8 ether_shost[6];
+ uint16 ether_type;
+} __attribute__((packed));
+
+
+struct ethervlan_header {
+ uint8 ether_dhost[6];
+ uint8 ether_shost[6];
+ uint16 vlan_type; /* 0x8100 */
+ uint16 vlan_tag; /* priority, cfi and vid */
+ uint16 ether_type;
+};
+
+/* copy a pkt buffer chain into a buffer */
+uint
+pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf)
+{
+ uint n, ret = 0;
+
+ if (len < 0)
+ len = 4096; /* "infinite" */
+
+ /* skip 'offset' bytes */
+ for (; p && offset; p = PKTNEXT(osh, p)) {
+ if (offset < (uint)PKTLEN(osh, p))
+ break;
+ offset -= PKTLEN(osh, p);
+ }
+
+ if (!p)
+ return 0;
+
+ /* copy the data */
+ for (; p && len; p = PKTNEXT(osh, p)) {
+ n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len);
+ bcopy(PKTDATA(osh, p) + offset, buf, n);
+ buf += n;
+ len -= n;
+ ret += n;
+ offset = 0;
+ }
+
+ return ret;
+}
+
+/* return total length of buffer chain */
+uint
+pkttotlen(osl_t *osh, void *p)
+{
+ uint total;
+
+ total = 0;
+ for (; p; p = PKTNEXT(osh, p))
+ total += PKTLEN(osh, p);
+ return (total);
+}
+
+/* return the last buffer of chained pkt */
+void *
+pktlast(osl_t *osh, void *p)
+{
+ for (; PKTNEXT(osh, p); p = PKTNEXT(osh, p))
+ ;
+
+ return (p);
+}
+
+
+/*
+ * osl multiple-precedence packet queue
+ * hi_prec is always >= the number of the highest non-empty queue
+ */
+void *
+pktq_penq(struct pktq *pq, int prec, void *p)
+{
+ struct pktq_prec *q;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+ ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */
+
+ ASSERT(!pktq_full(pq));
+ ASSERT(!pktq_pfull(pq, prec));
+
+ q = &pq->q[prec];
+
+ if (q->head)
+ PKTSETLINK(q->tail, p);
+ else
+ q->head = p;
+
+ q->tail = p;
+ q->len++;
+
+ pq->len++;
+
+ if (pq->hi_prec < prec)
+ pq->hi_prec = (uint8)prec;
+
+ return p;
+}
+
+void *
+pktq_penq_head(struct pktq *pq, int prec, void *p)
+{
+ struct pktq_prec *q;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+ ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */
+
+ ASSERT(!pktq_full(pq));
+ ASSERT(!pktq_pfull(pq, prec));
+
+ q = &pq->q[prec];
+
+ if (q->head == NULL)
+ q->tail = p;
+
+ PKTSETLINK(p, q->head);
+ q->head = p;
+ q->len++;
+
+ pq->len++;
+
+ if (pq->hi_prec < prec)
+ pq->hi_prec = (uint8)prec;
+
+ return p;
+}
+
+void *
+pktq_pdeq(struct pktq *pq, int prec)
+{
+ struct pktq_prec *q;
+ void *p;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ if ((q->head = PKTLINK(p)) == NULL)
+ q->tail = NULL;
+
+ q->len--;
+
+ pq->len--;
+
+ PKTSETLINK(p, NULL);
+
+ return p;
+}
+
+void *
+pktq_pdeq_tail(struct pktq *pq, int prec)
+{
+ struct pktq_prec *q;
+ void *p, *prev;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ for (prev = NULL; p != q->tail; p = PKTLINK(p))
+ prev = p;
+
+ if (prev)
+ PKTSETLINK(prev, NULL);
+ else
+ q->head = NULL;
+
+ q->tail = prev;
+ q->len--;
+
+ pq->len--;
+
+ return p;
+}
+
+void
+pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir)
+{
+ struct pktq_prec *q;
+ void *p;
+
+ q = &pq->q[prec];
+ p = q->head;
+ while (p) {
+ q->head = PKTLINK(p);
+ PKTSETLINK(p, NULL);
+ PKTFREE(osh, p, dir);
+ q->len--;
+ pq->len--;
+ p = q->head;
+ }
+ ASSERT(q->len == 0);
+ q->tail = NULL;
+}
+
+bool
+pktq_pdel(struct pktq *pq, void *pktbuf, int prec)
+{
+ struct pktq_prec *q;
+ void *p;
+
+ ASSERT(prec >= 0 && prec < pq->num_prec);
+
+ if (!pktbuf)
+ return FALSE;
+
+ q = &pq->q[prec];
+
+ if (q->head == pktbuf) {
+ if ((q->head = PKTLINK(pktbuf)) == NULL)
+ q->tail = NULL;
+ } else {
+ for (p = q->head; p && PKTLINK(p) != pktbuf; p = PKTLINK(p))
+ ;
+ if (p == NULL)
+ return FALSE;
+
+ PKTSETLINK(p, PKTLINK(pktbuf));
+ if (q->tail == pktbuf)
+ q->tail = p;
+ }
+
+ q->len--;
+ pq->len--;
+ PKTSETLINK(pktbuf, NULL);
+ return TRUE;
+}
+
+void
+pktq_init(struct pktq *pq, int num_prec, int max_len)
+{
+ int prec;
+
+ ASSERT(num_prec > 0 && num_prec <= PKTQ_MAX_PREC);
+
+ bzero(pq, sizeof(*pq));
+
+ pq->num_prec = (uint16)num_prec;
+
+ pq->max = (uint16)max_len;
+
+ for (prec = 0; prec < num_prec; prec++)
+ pq->q[prec].max = pq->max;
+}
+
+void *
+pktq_deq(struct pktq *pq, int *prec_out)
+{
+ struct pktq_prec *q;
+ void *p;
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+ pq->hi_prec--;
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ if ((q->head = PKTLINK(p)) == NULL)
+ q->tail = NULL;
+
+ q->len--;
+
+ pq->len--;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ PKTSETLINK(p, NULL);
+
+ return p;
+}
+
+void *
+pktq_deq_tail(struct pktq *pq, int *prec_out)
+{
+ struct pktq_prec *q;
+ void *p, *prev;
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ for (prec = 0; prec < pq->hi_prec; prec++)
+ if (pq->q[prec].head)
+ break;
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ for (prev = NULL; p != q->tail; p = PKTLINK(p))
+ prev = p;
+
+ if (prev)
+ PKTSETLINK(prev, NULL);
+ else
+ q->head = NULL;
+
+ q->tail = prev;
+ q->len--;
+
+ pq->len--;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ PKTSETLINK(p, NULL);
+
+ return p;
+}
+
+void *
+pktq_peek(struct pktq *pq, int *prec_out)
+{
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+ pq->hi_prec--;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ return (pq->q[prec].head);
+}
+
+void *
+pktq_peek_tail(struct pktq *pq, int *prec_out)
+{
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ for (prec = 0; prec < pq->hi_prec; prec++)
+ if (pq->q[prec].head)
+ break;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ return (pq->q[prec].tail);
+}
+
+void
+pktq_flush(osl_t *osh, struct pktq *pq, bool dir)
+{
+ int prec;
+ for (prec = 0; prec < pq->num_prec; prec++)
+ pktq_pflush(osh, pq, prec, dir);
+ ASSERT(pq->len == 0);
+}
+
+/* Return sum of lengths of a specific set of precedences */
+int
+pktq_mlen(struct pktq *pq, uint prec_bmp)
+{
+ int prec, len;
+
+ len = 0;
+
+ for (prec = 0; prec <= pq->hi_prec; prec++)
+ if (prec_bmp & (1 << prec))
+ len += pq->q[prec].len;
+
+ return len;
+}
+
+/* Priority dequeue from a specific set of precedences */
+void *
+pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out)
+{
+ struct pktq_prec *q;
+ void *p;
+ int prec;
+
+ if (pq->len == 0)
+ return NULL;
+
+ while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL)
+ pq->hi_prec--;
+
+ while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL)
+ if (prec-- == 0)
+ return NULL;
+
+ q = &pq->q[prec];
+
+ if ((p = q->head) == NULL)
+ return NULL;
+
+ if ((q->head = PKTLINK(p)) == NULL)
+ q->tail = NULL;
+
+ q->len--;
+
+ if (prec_out)
+ *prec_out = prec;
+
+ pq->len--;
+
+ PKTSETLINK(p, NULL);
+
+ return p;
+}
+
+char*
+bcmstrcat(char *dest, const char *src)
+{
+ strcpy(&dest[strlen(dest)], src);
+ return (dest);
+}
+
+char*
+bcm_ether_ntoa(struct ether_addr *ea, char *buf)
+{
+ sprintf(buf, "%02x:%02x:%02x:%02x:%02x:%02x",
+ ea->octet[0]&0xff, ea->octet[1]&0xff, ea->octet[2]&0xff,
+ ea->octet[3]&0xff, ea->octet[4]&0xff, ea->octet[5]&0xff);
+ return (buf);
+}
+
+/* parse a xx:xx:xx:xx:xx:xx format ethernet address */
+int
+bcm_ether_atoe(char *p, struct ether_addr *ea)
+{
+ int i = 0;
+
+ for (;;) {
+ ea->octet[i++] = (char) bcm_strtoul(p, &p, 16);
+ if (!*p++ || i == 6)
+ break;
+ }
+
+ return (i == 6);
+}
+
+/* Takes an Ethernet frame and sets out-of-bound PKTPRIO
+ * Also updates the inplace vlan tag if requested
+ */
+void
+pktsetprio(void *pkt, bool update_vtag)
+{
+ struct ether_header *eh;
+ struct ethervlan_header *evh;
+ uint8 *pktdata;
+ int priority = 0;
+
+ pktdata = (uint8 *) PKTDATA(NULL, pkt);
+ ASSERT(ISALIGNED((uintptr)pktdata, sizeof(uint16)));
+
+ eh = (struct ether_header *) pktdata;
+
+ if (ntoh16(eh->ether_type) == ETHER_TYPE_8021Q) {
+ uint16 vlan_tag;
+ int vlan_prio, dscp_prio = 0;
+
+ evh = (struct ethervlan_header *)eh;
+
+ vlan_tag = ntoh16(evh->vlan_tag);
+ vlan_prio = (int) (vlan_tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK;
+
+ if (ntoh16(evh->ether_type) == ETHER_TYPE_IP) {
+ uint8 *ip_body = pktdata + sizeof(struct ethervlan_header);
+ uint8 tos_tc = IP_TOS(ip_body);
+ dscp_prio = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT);
+ }
+
+ /* DSCP priority gets precedence over 802.1P (vlan tag) */
+ priority = (dscp_prio != 0) ? dscp_prio : vlan_prio;
+
+ /*
+ * If the DSCP priority is not the same as the VLAN priority,
+ * then overwrite the priority field in the vlan tag, with the
+ * DSCP priority value. This is required for Linux APs because
+ * the VLAN driver on Linux, overwrites the skb->priority field
+ * with the priority value in the vlan tag
+ */
+ if (update_vtag && (priority != vlan_prio)) {
+ vlan_tag &= ~(VLAN_PRI_MASK << VLAN_PRI_SHIFT);
+ vlan_tag |= (uint16)priority << VLAN_PRI_SHIFT;
+ evh->vlan_tag = hton16(vlan_tag);
+ }
+ } else if (ntoh16(eh->ether_type) == ETHER_TYPE_IP) {
+ uint8 *ip_body = pktdata + sizeof(struct ether_header);
+ uint8 tos_tc = IP_TOS(ip_body);
+ priority = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT);
+ }
+
+ ASSERT(priority >= 0 && priority <= MAXPRIO);
+ PKTSETPRIO(pkt, priority);
+}
+
+static char bcm_undeferrstr[BCME_STRLEN];
+
+static const char *bcmerrorstrtable[] = BCMERRSTRINGTABLE;
+
+/* Convert the Error codes into related Error strings */
+const char *
+bcmerrorstr(int bcmerror)
+{
+ int abs_bcmerror;
+
+ abs_bcmerror = ABS(bcmerror);
+
+ /* check if someone added a bcmerror code but forgot to add errorstring */
+ ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(bcmerrorstrtable) - 1));
+ if ((bcmerror > 0) || (abs_bcmerror > ABS(BCME_LAST))) {
+ sprintf(bcm_undeferrstr, "undefined Error %d", bcmerror);
+ return bcm_undeferrstr;
+ }
+
+ ASSERT((strlen((char*)bcmerrorstrtable[abs_bcmerror])) < BCME_STRLEN);
+
+ return bcmerrorstrtable[abs_bcmerror];
+}
+
+
+int
+bcm_iovar_lencheck(const bcm_iovar_t *vi, void *arg, int len, bool set)
+{
+ int bcmerror = 0;
+
+ /* length check on io buf */
+ switch (vi->type) {
+ case IOVT_BOOL:
+ case IOVT_INT8:
+ case IOVT_INT16:
+ case IOVT_INT32:
+ case IOVT_UINT8:
+ case IOVT_UINT16:
+ case IOVT_UINT32:
+ /* all integers are int32 sized args at the ioctl interface */
+ if (len < (int)sizeof(int)) {
+ bcmerror = BCME_BUFTOOSHORT;
+ }
+ break;
+
+ case IOVT_BUFFER:
+ /* buffer must meet minimum length requirement */
+ if (len < vi->minlen) {
+ bcmerror = BCME_BUFTOOSHORT;
+ }
+ break;
+
+ case IOVT_VOID:
+ if (!set) {
+ /* Cannot return nil... */
+ bcmerror = BCME_UNSUPPORTED;
+ } else if (len) {
+ /* Set is an action w/o parameters */
+ bcmerror = BCME_BUFTOOLONG;
+ }
+ break;
+
+ default:
+ /* unknown type for length check in iovar info */
+ ASSERT(0);
+ bcmerror = BCME_UNSUPPORTED;
+ }
+
+ return bcmerror;
+}
+
+#define CRC_INNER_LOOP(n, c, x) \
+ (c) = ((c) >> 8) ^ crc##n##_table[((c) ^ (x)) & 0xff]
+
+static uint32 crc32_table[256] = {
+ 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA,
+ 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3,
+ 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988,
+ 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91,
+ 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE,
+ 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7,
+ 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC,
+ 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5,
+ 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172,
+ 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B,
+ 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940,
+ 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59,
+ 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116,
+ 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F,
+ 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924,
+ 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D,
+ 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A,
+ 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433,
+ 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818,
+ 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01,
+ 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E,
+ 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457,
+ 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C,
+ 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65,
+ 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2,
+ 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB,
+ 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0,
+ 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9,
+ 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086,
+ 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F,
+ 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4,
+ 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD,
+ 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A,
+ 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683,
+ 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8,
+ 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1,
+ 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE,
+ 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7,
+ 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC,
+ 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5,
+ 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252,
+ 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B,
+ 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60,
+ 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79,
+ 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236,
+ 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F,
+ 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04,
+ 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D,
+ 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A,
+ 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713,
+ 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38,
+ 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21,
+ 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E,
+ 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777,
+ 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C,
+ 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45,
+ 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2,
+ 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB,
+ 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0,
+ 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9,
+ 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6,
+ 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF,
+ 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94,
+ 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D
+};
+
+uint32
+hndcrc32(
+ uint8 *pdata, /* pointer to array of data to process */
+ uint nbytes, /* number of input data bytes to process */
+ uint32 crc /* either CRC32_INIT_VALUE or previous return value */
+)
+{
+ uint8 *pend;
+#ifdef __mips__
+ uint8 tmp[4];
+ ulong *tptr = (ulong *)tmp;
+
+ /* in case the beginning of the buffer isn't aligned */
+ pend = (uint8 *)((uint)(pdata + 3) & 0xfffffffc);
+ nbytes -= (pend - pdata);
+ while (pdata < pend)
+ CRC_INNER_LOOP(32, crc, *pdata++);
+
+ /* handle bulk of data as 32-bit words */
+ pend = pdata + (nbytes & 0xfffffffc);
+ while (pdata < pend) {
+ *tptr = *(ulong *)pdata;
+ pdata += sizeof(ulong *);
+ CRC_INNER_LOOP(32, crc, tmp[0]);
+ CRC_INNER_LOOP(32, crc, tmp[1]);
+ CRC_INNER_LOOP(32, crc, tmp[2]);
+ CRC_INNER_LOOP(32, crc, tmp[3]);
+ }
+
+ /* 1-3 bytes at end of buffer */
+ pend = pdata + (nbytes & 0x03);
+ while (pdata < pend)
+ CRC_INNER_LOOP(32, crc, *pdata++);
+#else
+ pend = pdata + nbytes;
+ while (pdata < pend)
+ CRC_INNER_LOOP(32, crc, *pdata++);
+#endif /* __mips__ */
+
+ return crc;
+}
+
+
+/*
+ * Advance from the current 1-byte tag/1-byte length/variable-length value
+ * triple, to the next, returning a pointer to the next.
+ * If the current or next TLV is invalid (does not fit in given buffer length),
+ * NULL is returned.
+ * *buflen is not modified if the TLV elt parameter is invalid, or is decremented
+ * by the TLV paramter's length if it is valid.
+ */
+bcm_tlv_t *
+bcm_next_tlv(bcm_tlv_t *elt, int *buflen)
+{
+ int len;
+
+ /* validate current elt */
+ if (!bcm_valid_tlv(elt, *buflen))
+ return NULL;
+
+ /* advance to next elt */
+ len = elt->len;
+ elt = (bcm_tlv_t*)(elt->data + len);
+ *buflen -= (2 + len);
+
+ /* validate next elt */
+ if (!bcm_valid_tlv(elt, *buflen))
+ return NULL;
+
+ return elt;
+}
+
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag
+ */
+bcm_tlv_t *
+bcm_parse_tlvs(void *buf, int buflen, uint key)
+{
+ bcm_tlv_t *elt;
+ int totlen;
+
+ elt = (bcm_tlv_t*)buf;
+ totlen = buflen;
+
+ /* find tagged parameter */
+ while (totlen >= 2) {
+ int len = elt->len;
+
+ /* validate remaining totlen */
+ if ((elt->id == key) && (totlen >= (len + 2)))
+ return (elt);
+
+ elt = (bcm_tlv_t*)((uint8*)elt + (len + 2));
+ totlen -= (len + 2);
+ }
+
+ return NULL;
+}
+
+/*
+ * Traverse a string of 1-byte tag/1-byte length/variable-length value
+ * triples, returning a pointer to the substring whose first element
+ * matches tag. Stop parsing when we see an element whose ID is greater
+ * than the target key.
+ */
+bcm_tlv_t *
+bcm_parse_ordered_tlvs(void *buf, int buflen, uint key)
+{
+ bcm_tlv_t *elt;
+ int totlen;
+
+ elt = (bcm_tlv_t*)buf;
+ totlen = buflen;
+
+ /* find tagged parameter */
+ while (totlen >= 2) {
+ uint id = elt->id;
+ int len = elt->len;
+
+ /* Punt if we start seeing IDs > than target key */
+ if (id > key)
+ return (NULL);
+
+ /* validate remaining totlen */
+ if ((id == key) && (totlen >= (len + 2)))
+ return (elt);
+
+ elt = (bcm_tlv_t*)((uint8*)elt + (len + 2));
+ totlen -= (len + 2);
+ }
+ return NULL;
+}
+
+
+/* Initialization of bcmstrbuf structure */
+void
+bcm_binit(struct bcmstrbuf *b, char *buf, uint size)
+{
+ b->origsize = b->size = size;
+ b->origbuf = b->buf = buf;
+}
+
+/* Buffer sprintf wrapper to guard against buffer overflow */
+int
+bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...)
+{
+ va_list ap;
+ int r;
+
+ va_start(ap, fmt);
+ r = vsnprintf(b->buf, b->size, fmt, ap);
+
+ /* Non Ansi C99 compliant returns -1,
+ * Ansi compliant return r >= b->size,
+ * bcmstdlib returns 0, handle all
+ */
+ if ((r == -1) || (r >= (int)b->size) || (r == 0))
+ {
+ b->size = 0;
+ }
+ else
+ {
+ b->size -= r;
+ b->buf += r;
+ }
+
+ va_end(ap);
+
+ return r;
+}
+
+uint
+bcm_bitcount(uint8 *bitmap, uint length)
+{
+ uint bitcount = 0, i;
+ uint8 tmp;
+ for (i = 0; i < length; i++) {
+ tmp = bitmap[i];
+ while (tmp) {
+ bitcount++;
+ tmp &= (tmp - 1);
+ }
+ }
+ return bitcount;
+}
+
diff --git a/package/broadcom-wl/src/driver/hnddma.c b/package/broadcom-wl/src/driver/hnddma.c
new file mode 100644
index 000000000..1b79dff4c
--- /dev/null
+++ b/package/broadcom-wl/src/driver/hnddma.c
@@ -0,0 +1,1893 @@
+/*
+ * Generic Broadcom Home Networking Division (HND) DMA module.
+ * This supports the following chips: BCM42xx, 44xx, 47xx .
+ *
+ * Copyright 2006, Broadcom Corporation
+ * All Rights Reserved.
+ *
+ * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
+ * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
+ * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
+ *
+ * $Id: hnddma.c,v 1.11 2006/04/08 07:12:42 honor Exp $
+ */
+
+#include <typedefs.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include "linux_osl.h"
+#include <bcmendian.h>
+#include <sbconfig.h>
+#include <bcmutils.h>
+#include <bcmdevs.h>
+#include <sbutils.h>
+
+#include "sbhnddma.h"
+#include "hnddma.h"
+
+/* debug/trace */
+#define DMA_ERROR(args)
+#define DMA_TRACE(args)
+
+/* default dma message level (if input msg_level pointer is null in dma_attach()) */
+static uint dma_msg_level =
+ 0;
+
+#define MAXNAMEL 8 /* 8 char names */
+
+#define DI_INFO(dmah) (dma_info_t *)dmah
+
+/* dma engine software state */
+typedef struct dma_info {
+ struct hnddma_pub hnddma; /* exported structure, don't use hnddma_t,
+ * which could be const
+ */
+ uint *msg_level; /* message level pointer */
+ char name[MAXNAMEL]; /* callers name for diag msgs */
+
+ void *osh; /* os handle */
+ sb_t *sbh; /* sb handle */
+
+ bool dma64; /* dma64 enabled */
+ bool addrext; /* this dma engine supports DmaExtendedAddrChanges */
+
+ dma32regs_t *d32txregs; /* 32 bits dma tx engine registers */
+ dma32regs_t *d32rxregs; /* 32 bits dma rx engine registers */
+ dma64regs_t *d64txregs; /* 64 bits dma tx engine registers */
+ dma64regs_t *d64rxregs; /* 64 bits dma rx engine registers */
+
+ uint32 dma64align; /* either 8k or 4k depends on number of dd */
+ dma32dd_t *txd32; /* pointer to dma32 tx descriptor ring */
+ dma64dd_t *txd64; /* pointer to dma64 tx descriptor ring */
+ uint ntxd; /* # tx descriptors tunable */
+ uint txin; /* index of next descriptor to reclaim */
+ uint txout; /* index of next descriptor to post */
+ void **txp; /* pointer to parallel array of pointers to packets */
+ osldma_t *tx_dmah; /* DMA TX descriptor ring handle */
+ osldma_t **txp_dmah; /* DMA TX packet data handle */
+ ulong txdpa; /* physical address of descriptor ring */
+ uint txdalign; /* #bytes added to alloc'd mem to align txd */
+ uint txdalloc; /* #bytes allocated for the ring */
+
+ dma32dd_t *rxd32; /* pointer to dma32 rx descriptor ring */
+ dma64dd_t *rxd64; /* pointer to dma64 rx descriptor ring */
+ uint nrxd; /* # rx descriptors tunable */
+ uint rxin; /* index of next descriptor to reclaim */
+ uint rxout; /* index of next descriptor to post */
+ void **rxp; /* pointer to parallel array of pointers to packets */
+ osldma_t *rx_dmah; /* DMA RX descriptor ring handle */
+ osldma_t **rxp_dmah; /* DMA RX packet data handle */
+ ulong rxdpa; /* physical address of descriptor ring */
+ uint rxdalign; /* #bytes added to alloc'd mem to align rxd */
+ uint rxdalloc; /* #bytes allocated for the ring */
+
+ /* tunables */
+ uint rxbufsize; /* rx buffer size in bytes,
+ not including the extra headroom
+ */
+ uint nrxpost; /* # rx buffers to keep posted */
+ uint rxoffset; /* rxcontrol offset */
+ uint ddoffsetlow; /* add to get dma address of descriptor ring, low 32 bits */
+ uint ddoffsethigh; /* high 32 bits */
+ uint dataoffsetlow; /* add to get dma address of data buffer, low 32 bits */
+ uint dataoffsethigh; /* high 32 bits */
+} dma_info_t;
+
+#ifdef BCMDMA64
+#define DMA64_ENAB(di) ((di)->dma64)
+#define DMA64_CAP TRUE
+#else
+#define DMA64_ENAB(di) (0)
+#define DMA64_CAP FALSE
+#endif
+
+/* descriptor bumping macros */
+#define XXD(x, n) ((x) & ((n) - 1)) /* faster than %, but n must be power of 2 */
+#define TXD(x) XXD((x), di->ntxd)
+#define RXD(x) XXD((x), di->nrxd)
+#define NEXTTXD(i) TXD(i + 1)
+#define PREVTXD(i) TXD(i - 1)
+#define NEXTRXD(i) RXD(i + 1)
+#define NTXDACTIVE(h, t) TXD(t - h)
+#define NRXDACTIVE(h, t) RXD(t - h)
+
+/* macros to convert between byte offsets and indexes */
+#define B2I(bytes, type) ((bytes) / sizeof(type))
+#define I2B(index, type) ((index) * sizeof(type))
+
+#define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */
+#define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */
+
+
+/* common prototypes */
+static bool _dma_isaddrext(dma_info_t *di);
+static bool _dma_alloc(dma_info_t *di, uint direction);
+static void _dma_detach(dma_info_t *di);
+static void _dma_ddtable_init(dma_info_t *di, uint direction, ulong pa);
+static void _dma_rxinit(dma_info_t *di);
+static void *_dma_rx(dma_info_t *di);
+static void _dma_rxfill(dma_info_t *di);
+static void _dma_rxreclaim(dma_info_t *di);
+static void _dma_rxenable(dma_info_t *di);
+static void * _dma_getnextrxp(dma_info_t *di, bool forceall);
+
+static void _dma_txblock(dma_info_t *di);
+static void _dma_txunblock(dma_info_t *di);
+static uint _dma_txactive(dma_info_t *di);
+
+static void* _dma_peeknexttxp(dma_info_t *di);
+static uintptr _dma_getvar(dma_info_t *di, const char *name);
+static void _dma_counterreset(dma_info_t *di);
+static void _dma_fifoloopbackenable(dma_info_t *di);
+
+/* ** 32 bit DMA prototypes */
+static bool dma32_alloc(dma_info_t *di, uint direction);
+static bool dma32_txreset(dma_info_t *di);
+static bool dma32_rxreset(dma_info_t *di);
+static bool dma32_txsuspendedidle(dma_info_t *di);
+static int dma32_txfast(dma_info_t *di, void *p0, bool commit);
+static void *dma32_getnexttxp(dma_info_t *di, bool forceall);
+static void *dma32_getnextrxp(dma_info_t *di, bool forceall);
+static void dma32_txrotate(dma_info_t *di);
+static bool dma32_rxidle(dma_info_t *di);
+static void dma32_txinit(dma_info_t *di);
+static bool dma32_txenabled(dma_info_t *di);
+static void dma32_txsuspend(dma_info_t *di);
+static void dma32_txresume(dma_info_t *di);
+static bool dma32_txsuspended(dma_info_t *di);
+static void dma32_txreclaim(dma_info_t *di, bool forceall);
+static bool dma32_txstopped(dma_info_t *di);
+static bool dma32_rxstopped(dma_info_t *di);
+static bool dma32_rxenabled(dma_info_t *di);
+static bool _dma32_addrext(osl_t *osh, dma32regs_t *dma32regs);
+
+/* ** 64 bit DMA prototypes and stubs */
+#ifdef BCMDMA64
+static bool dma64_alloc(dma_info_t *di, uint direction);
+static bool dma64_txreset(dma_info_t *di);
+static bool dma64_rxreset(dma_info_t *di);
+static bool dma64_txsuspendedidle(dma_info_t *di);
+static int dma64_txfast(dma_info_t *di, void *p0, bool commit);
+static void *dma64_getnexttxp(dma_info_t *di, bool forceall);
+static void *dma64_getnextrxp(dma_info_t *di, bool forceall);
+static void dma64_txrotate(dma_info_t *di);
+
+static bool dma64_rxidle(dma_info_t *di);
+static void dma64_txinit(dma_info_t *di);
+static bool dma64_txenabled(dma_info_t *di);
+static void dma64_txsuspend(dma_info_t *di);
+static void dma64_txresume(dma_info_t *di);
+static bool dma64_txsuspended(dma_info_t *di);
+static void dma64_txreclaim(dma_info_t *di, bool forceall);
+static bool dma64_txstopped(dma_info_t *di);
+static bool dma64_rxstopped(dma_info_t *di);
+static bool dma64_rxenabled(dma_info_t *di);
+static bool _dma64_addrext(osl_t *osh, dma64regs_t *dma64regs);
+
+#else
+static bool dma64_alloc(dma_info_t *di, uint direction) { return FALSE; }
+static bool dma64_txreset(dma_info_t *di) { return FALSE; }
+static bool dma64_rxreset(dma_info_t *di) { return FALSE; }
+static bool dma64_txsuspendedidle(dma_info_t *di) { return FALSE;}
+static int dma64_txfast(dma_info_t *di, void *p0, bool commit) { return 0; }
+static void *dma64_getnexttxp(dma_info_t *di, bool forceall) { return NULL; }
+static void *dma64_getnextrxp(dma_info_t *di, bool forceall) { return NULL; }
+static void dma64_txrotate(dma_info_t *di) { return; }
+
+static bool dma64_rxidle(dma_info_t *di) { return FALSE; }
+static void dma64_txinit(dma_info_t *di) { return; }
+static bool dma64_txenabled(dma_info_t *di) { return FALSE; }
+static void dma64_txsuspend(dma_info_t *di) { return; }
+static void dma64_txresume(dma_info_t *di) { return; }
+static bool dma64_txsuspended(dma_info_t *di) {return FALSE; }
+static void dma64_txreclaim(dma_info_t *di, bool forceall) { return; }
+static bool dma64_txstopped(dma_info_t *di) { return FALSE; }
+static bool dma64_rxstopped(dma_info_t *di) { return FALSE; }
+static bool dma64_rxenabled(dma_info_t *di) { return FALSE; }
+static bool _dma64_addrext(osl_t *osh, dma64regs_t *dma64regs) { return FALSE; }
+
+#endif /* BCMDMA64 */
+
+
+
+static di_fcn_t dma64proc = {
+ (di_detach_t)_dma_detach,
+ (di_txinit_t)dma64_txinit,
+ (di_txreset_t)dma64_txreset,
+ (di_txenabled_t)dma64_txenabled,
+ (di_txsuspend_t)dma64_txsuspend,
+ (di_txresume_t)dma64_txresume,
+ (di_txsuspended_t)dma64_txsuspended,
+ (di_txsuspendedidle_t)dma64_txsuspendedidle,
+ (di_txfast_t)dma64_txfast,
+ (di_txstopped_t)dma64_txstopped,
+ (di_txreclaim_t)dma64_txreclaim,
+ (di_getnexttxp_t)dma64_getnexttxp,
+ (di_peeknexttxp_t)_dma_peeknexttxp,
+ (di_txblock_t)_dma_txblock,
+ (di_txunblock_t)_dma_txunblock,
+ (di_txactive_t)_dma_txactive,
+ (di_txrotate_t)dma64_txrotate,
+
+ (di_rxinit_t)_dma_rxinit,
+ (di_rxreset_t)dma64_rxreset,
+ (di_rxidle_t)dma64_rxidle,
+ (di_rxstopped_t)dma64_rxstopped,
+ (di_rxenable_t)_dma_rxenable,
+ (di_rxenabled_t)dma64_rxenabled,
+ (di_rx_t)_dma_rx,
+ (di_rxfill_t)_dma_rxfill,
+ (di_rxreclaim_t)_dma_rxreclaim,
+ (di_getnextrxp_t)_dma_getnextrxp,
+
+ (di_fifoloopbackenable_t)_dma_fifoloopbackenable,
+ (di_getvar_t)_dma_getvar,
+ (di_counterreset_t)_dma_counterreset,
+
+ NULL,
+ NULL,
+ NULL,
+ 34
+};
+
+static di_fcn_t dma32proc = {
+ (di_detach_t)_dma_detach,
+ (di_txinit_t)dma32_txinit,
+ (di_txreset_t)dma32_txreset,
+ (di_txenabled_t)dma32_txenabled,
+ (di_txsuspend_t)dma32_txsuspend,
+ (di_txresume_t)dma32_txresume,
+ (di_txsuspended_t)dma32_txsuspended,
+ (di_txsuspendedidle_t)dma32_txsuspendedidle,
+ (di_txfast_t)dma32_txfast,
+ (di_txstopped_t)dma32_txstopped,
+ (di_txreclaim_t)dma32_txreclaim,
+ (di_getnexttxp_t)dma32_getnexttxp,
+ (di_peeknexttxp_t)_dma_peeknexttxp,
+ (di_txblock_t)_dma_txblock,
+ (di_txunblock_t)_dma_txunblock,
+ (di_txactive_t)_dma_txactive,
+ (di_txrotate_t)dma32_txrotate,
+
+ (di_rxinit_t)_dma_rxinit,
+ (di_rxreset_t)dma32_rxreset,
+ (di_rxidle_t)dma32_rxidle,
+ (di_rxstopped_t)dma32_rxstopped,
+ (di_rxenable_t)_dma_rxenable,
+ (di_rxenabled_t)dma32_rxenabled,
+ (di_rx_t)_dma_rx,
+ (di_rxfill_t)_dma_rxfill,
+ (di_rxreclaim_t)_dma_rxreclaim,
+ (di_getnextrxp_t)_dma_getnextrxp,
+
+ (di_fifoloopbackenable_t)_dma_fifoloopbackenable,
+ (di_getvar_t)_dma_getvar,
+ (di_counterreset_t)_dma_counterreset,
+
+ NULL,
+ NULL,
+ NULL,
+ 34
+};
+
+hnddma_t *
+dma_attach(osl_t *osh, char *name, sb_t *sbh, void *dmaregstx, void *dmaregsrx,
+ uint ntxd, uint nrxd, uint rxbufsize, uint nrxpost, uint rxoffset, uint *msg_level)
+{
+ dma_info_t *di;
+ uint size;
+
+ /* allocate private info structure */
+ if ((di = MALLOC(osh, sizeof (dma_info_t))) == NULL) {
+ return (NULL);
+ }
+ bzero((char *)di, sizeof(dma_info_t));
+
+ di->msg_level = msg_level ? msg_level : &dma_msg_level;
+
+ /* old chips w/o sb is no longer supported */
+ ASSERT(sbh != NULL);
+
+ di->dma64 = ((sb_coreflagshi(sbh, 0, 0) & SBTMH_DMA64) == SBTMH_DMA64);
+
+#ifndef BCMDMA64
+ if (di->dma64) {
+ DMA_ERROR(("dma_attach: driver doesn't have the capability to support "
+ "64 bits DMA\n"));
+ goto fail;
+ }
+#endif
+
+ /* check arguments */
+ ASSERT(ISPOWEROF2(ntxd));
+ ASSERT(ISPOWEROF2(nrxd));
+ if (nrxd == 0)
+ ASSERT(dmaregsrx == NULL);
+ if (ntxd == 0)
+ ASSERT(dmaregstx == NULL);
+
+
+ /* init dma reg pointer */
+ if (di->dma64) {
+ ASSERT(ntxd <= D64MAXDD);
+ ASSERT(nrxd <= D64MAXDD);
+ di->d64txregs = (dma64regs_t *)dmaregstx;
+ di->d64rxregs = (dma64regs_t *)dmaregsrx;
+
+ di->dma64align = D64RINGALIGN;
+ if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2)) {
+ /* for smaller dd table, HW relax the alignment requirement */
+ di->dma64align = D64RINGALIGN / 2;
+ }
+ } else {
+ ASSERT(ntxd <= D32MAXDD);
+ ASSERT(nrxd <= D32MAXDD);
+ di->d32txregs = (dma32regs_t *)dmaregstx;
+ di->d32rxregs = (dma32regs_t *)dmaregsrx;
+ }
+
+ DMA_TRACE(("%s: dma_attach: %s osh %p ntxd %d nrxd %d rxbufsize %d nrxpost %d "
+ "rxoffset %d dmaregstx %p dmaregsrx %p\n",
+ name, (di->dma64 ? "DMA64" : "DMA32"), osh, ntxd, nrxd, rxbufsize,
+ nrxpost, rxoffset, dmaregstx, dmaregsrx));
+
+ /* make a private copy of our callers name */
+ strncpy(di->name, name, MAXNAMEL);
+ di->name[MAXNAMEL-1] = '\0';
+
+ di->osh = osh;
+ di->sbh = sbh;
+
+ /* save tunables */
+ di->ntxd = ntxd;
+ di->nrxd = nrxd;
+
+ /* the actual dma size doesn't include the extra headroom */
+ if (rxbufsize > BCMEXTRAHDROOM)
+ di->rxbufsize = rxbufsize - BCMEXTRAHDROOM;
+ else
+ di->rxbufsize = rxbufsize;
+
+ di->nrxpost = nrxpost;
+ di->rxoffset = rxoffset;
+
+ /*
+ * figure out the DMA physical address offset for dd and data
+ * for old chips w/o sb, use zero
+ * for new chips w sb,
+ * PCI/PCIE: they map silicon backplace address to zero based memory, need offset
+ * Other bus: use zero
+ * SB_BUS BIGENDIAN kludge: use sdram swapped region for data buffer, not descriptor
+ */
+ di->ddoffsetlow = 0;
+ di->dataoffsetlow = 0;
+ /* for pci bus, add offset */
+ if (sbh->bustype == PCI_BUS) {
+ if ((sbh->buscoretype == SB_PCIE) && di->dma64) {
+ /* pcie with DMA64 */
+ di->ddoffsetlow = 0;
+ di->ddoffsethigh = SB_PCIE_DMA_H32;
+ } else {
+ /* pci(DMA32/DMA64) or pcie with DMA32 */
+ di->ddoffsetlow = SB_PCI_DMA;
+ di->ddoffsethigh = 0;
+ }
+ di->dataoffsetlow = di->ddoffsetlow;
+ di->dataoffsethigh = di->ddoffsethigh;
+ }
+
+#if defined(__mips__) && defined(IL_BIGENDIAN)
+ di->dataoffsetlow = di->dataoffsetlow + SB_SDRAM_SWAPPED;
+#endif
+
+ di->addrext = _dma_isaddrext(di);
+
+ /* allocate tx packet pointer vector */
+ if (ntxd) {
+ size = ntxd * sizeof(void *);
+ if ((di->txp = MALLOC(osh, size)) == NULL) {
+ DMA_ERROR(("%s: dma_attach: out of tx memory, malloced %d bytes\n",
+ di->name, MALLOCED(osh)));
+ goto fail;
+ }
+ bzero((char *)di->txp, size);
+ }
+
+ /* allocate rx packet pointer vector */
+ if (nrxd) {
+ size = nrxd * sizeof(void *);
+ if ((di->rxp = MALLOC(osh, size)) == NULL) {
+ DMA_ERROR(("%s: dma_attach: out of rx memory, malloced %d bytes\n",
+ di->name, MALLOCED(osh)));
+ goto fail;
+ }
+ bzero((char *)di->rxp, size);
+ }
+
+ /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */
+ if (ntxd) {
+ if (!_dma_alloc(di, DMA_TX))
+ goto fail;
+ }
+
+ /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */
+ if (nrxd) {
+ if (!_dma_alloc(di, DMA_RX))
+ goto fail;
+ }
+
+ if ((di->ddoffsetlow == SB_PCI_DMA) && (di->txdpa > SB_PCI_DMA_SZ) && !di->addrext) {
+ DMA_ERROR(("%s: dma_attach: txdpa 0x%lx: addrext not supported\n",
+ di->name, di->txdpa));
+ goto fail;
+ }
+ if ((di->ddoffsetlow == SB_PCI_DMA) && (di->rxdpa > SB_PCI_DMA_SZ) && !di->addrext) {
+ DMA_ERROR(("%s: dma_attach: rxdpa 0x%lx: addrext not supported\n",
+ di->name, di->rxdpa));
+ goto fail;
+ }
+
+ DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh "
+ "0x%x addrext %d\n", di->ddoffsetlow, di->ddoffsethigh, di->dataoffsetlow,
+ di->dataoffsethigh, di->addrext));
+
+ /* allocate tx packet pointer vector and DMA mapping vectors */
+ if (ntxd) {
+
+ size = ntxd * sizeof(osldma_t **);
+ if ((di->txp_dmah = (osldma_t **)MALLOC(osh, size)) == NULL)
+ goto fail;
+ bzero((char*)di->txp_dmah, size);
+ }else
+ di->txp_dmah = NULL;
+
+ /* allocate rx packet pointer vector and DMA mapping vectors */
+ if (nrxd) {
+
+ size = nrxd * sizeof(osldma_t **);
+ if ((di->rxp_dmah = (osldma_t **)MALLOC(osh, size)) == NULL)
+ goto fail;
+ bzero((char*)di->rxp_dmah, size);
+
+ } else
+ di->rxp_dmah = NULL;
+
+ /* initialize opsvec of function pointers */
+ di->hnddma.di_fn = DMA64_ENAB(di) ? dma64proc : dma32proc;
+
+ return ((hnddma_t *)di);
+
+fail:
+ _dma_detach(di);
+ return (NULL);
+}
+
+/* init the tx or rx descriptor */
+static INLINE void
+dma32_dd_upd(dma_info_t *di, dma32dd_t *ddring, ulong pa, uint outidx, uint32 *flags,
+ uint32 bufcount)
+{
+ /* dma32 uses 32 bits control to fit both flags and bufcounter */
+ *flags = *flags | (bufcount & CTRL_BC_MASK);
+
+ if ((di->dataoffsetlow != SB_PCI_DMA) || !(pa & PCI32ADDR_HIGH)) {
+ W_SM(&ddring[outidx].addr, BUS_SWAP32(pa + di->dataoffsetlow));
+ W_SM(&ddring[outidx].ctrl, BUS_SWAP32(*flags));
+ } else {
+ /* address extension */
+ uint32 ae;
+ ASSERT(di->addrext);
+ ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
+ pa &= ~PCI32ADDR_HIGH;
+
+ *flags |= (ae << CTRL_AE_SHIFT);
+ W_SM(&ddring[outidx].addr, BUS_SWAP32(pa + di->dataoffsetlow));
+ W_SM(&ddring[outidx].ctrl, BUS_SWAP32(*flags));
+ }
+}
+
+static INLINE void
+dma64_dd_upd(dma_info_t *di, dma64dd_t *ddring, ulong pa, uint outidx, uint32 *flags,
+ uint32 bufcount)
+{
+ uint32 ctrl2 = bufcount & D64_CTRL2_BC_MASK;
+
+ /* PCI bus with big(>1G) physical address, use address extension */
+ if ((di->dataoffsetlow != SB_PCI_DMA) || !(pa & PCI32ADDR_HIGH)) {
+ W_SM(&ddring[outidx].addrlow, BUS_SWAP32(pa + di->dataoffsetlow));
+ W_SM(&ddring[outidx].addrhigh, BUS_SWAP32(0 + di->dataoffsethigh));
+ W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags));
+ W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2));
+ } else {
+ /* address extension */
+ uint32 ae;
+ ASSERT(di->addrext);
+
+ ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
+ pa &= ~PCI32ADDR_HIGH;
+
+ ctrl2 |= (ae << D64_CTRL2_AE_SHIFT) & D64_CTRL2_AE;
+ W_SM(&ddring[outidx].addrlow, BUS_SWAP32(pa + di->dataoffsetlow));
+ W_SM(&ddring[outidx].addrhigh, BUS_SWAP32(0 + di->dataoffsethigh));
+ W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags));
+ W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2));
+ }
+}
+
+static bool
+_dma32_addrext(osl_t *osh, dma32regs_t *dma32regs)
+{
+ uint32 w;
+
+ OR_REG(osh, &dma32regs->control, XC_AE);
+ w = R_REG(osh, &dma32regs->control);
+ AND_REG(osh, &dma32regs->control, ~XC_AE);
+ return ((w & XC_AE) == XC_AE);
+}
+
+static bool
+_dma_alloc(dma_info_t *di, uint direction)
+{
+ if (DMA64_ENAB(di)) {
+ return dma64_alloc(di, direction);
+ } else {
+ return dma32_alloc(di, direction);
+ }
+}
+
+/* !! may be called with core in reset */
+static void
+_dma_detach(dma_info_t *di)
+{
+ if (di == NULL)
+ return;
+
+ DMA_TRACE(("%s: dma_detach\n", di->name));
+
+ /* shouldn't be here if descriptors are unreclaimed */
+ ASSERT(di->txin == di->txout);
+ ASSERT(di->rxin == di->rxout);
+
+ /* free dma descriptor rings */
+ if (DMA64_ENAB(di)) {
+ if (di->txd64)
+ DMA_FREE_CONSISTENT(di->osh, ((int8*)(uintptr)di->txd64 - di->txdalign),
+ di->txdalloc, (di->txdpa - di->txdalign), &di->tx_dmah);
+ if (di->rxd64)
+ DMA_FREE_CONSISTENT(di->osh, ((int8*)(uintptr)di->rxd64 - di->rxdalign),
+ di->rxdalloc, (di->rxdpa - di->rxdalign), &di->rx_dmah);
+ } else {
+ if (di->txd32)
+ DMA_FREE_CONSISTENT(di->osh, ((int8*)(uintptr)di->txd32 - di->txdalign),
+ di->txdalloc, (di->txdpa - di->txdalign), &di->tx_dmah);
+ if (di->rxd32)
+ DMA_FREE_CONSISTENT(di->osh, ((int8*)(uintptr)di->rxd32 - di->rxdalign),
+ di->rxdalloc, (di->rxdpa - di->rxdalign), &di->rx_dmah);
+ }
+
+ /* free packet pointer vectors */
+ if (di->txp)
+ MFREE(di->osh, (void *)di->txp, (di->ntxd * sizeof(void *)));
+ if (di->rxp)
+ MFREE(di->osh, (void *)di->rxp, (di->nrxd * sizeof(void *)));
+
+ /* free tx packet DMA handles */
+ if (di->txp_dmah)
+ MFREE(di->osh, (void *)di->txp_dmah, di->ntxd * sizeof(osldma_t **));
+
+ /* free rx packet DMA handles */
+ if (di->rxp_dmah)
+ MFREE(di->osh, (void *)di->rxp_dmah, di->nrxd * sizeof(osldma_t **));
+
+ /* free our private info structure */
+ MFREE(di->osh, (void *)di, sizeof(dma_info_t));
+
+}
+
+/* return TRUE if this dma engine supports DmaExtendedAddrChanges, otherwise FALSE */
+static bool
+_dma_isaddrext(dma_info_t *di)
+{
+ if (DMA64_ENAB(di)) {
+ /* DMA64 supports full 32 bits or 64 bits. AE is always valid */
+
+ /* not all tx or rx channel are available */
+ if (di->d64txregs != NULL) {
+ if (!_dma64_addrext(di->osh, di->d64txregs)) {
+ DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have AE set\n",
+ di->name));
+ ASSERT(0);
+ }
+ return TRUE;
+ } else if (di->d64rxregs != NULL) {
+ if (!_dma64_addrext(di->osh, di->d64rxregs)) {
+ DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have AE set\n",
+ di->name));
+ ASSERT(0);
+ }
+ return TRUE;
+ }
+ return FALSE;
+ } else if (di->d32txregs)
+ return (_dma32_addrext(di->osh, di->d32txregs));
+ else if (di->d32rxregs)
+ return (_dma32_addrext(di->osh, di->d32rxregs));
+ return FALSE;
+}
+
+/* initialize descriptor table base address */
+static void
+_dma_ddtable_init(dma_info_t *di, uint direction, ulong pa)
+{
+ if (DMA64_ENAB(di)) {
+
+ if ((di->ddoffsetlow != SB_PCI_DMA) || !(pa & PCI32ADDR_HIGH)) {
+ if (direction == DMA_TX) {
+ W_REG(di->osh, &di->d64txregs->addrlow, (pa + di->ddoffsetlow));
+ W_REG(di->osh, &di->d64txregs->addrhigh, di->ddoffsethigh);
+ } else {
+ W_REG(di->osh, &di->d64rxregs->addrlow, (pa + di->ddoffsetlow));
+ W_REG(di->osh, &di->d64rxregs->addrhigh, di->ddoffsethigh);
+ }
+ } else {
+ /* DMA64 32bits address extension */
+ uint32 ae;
+ ASSERT(di->addrext);
+
+ /* shift the high bit(s) from pa to ae */
+ ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
+ pa &= ~PCI32ADDR_HIGH;
+
+ if (direction == DMA_TX) {
+ W_REG(di->osh, &di->d64txregs->addrlow, (pa + di->ddoffsetlow));
+ W_REG(di->osh, &di->d64txregs->addrhigh, di->ddoffsethigh);
+ SET_REG(di->osh, &di->d64txregs->control, D64_XC_AE,
+ (ae << D64_XC_AE_SHIFT));
+ } else {
+ W_REG(di->osh, &di->d64rxregs->addrlow, (pa + di->ddoffsetlow));
+ W_REG(di->osh, &di->d64rxregs->addrhigh, di->ddoffsethigh);
+ SET_REG(di->osh, &di->d64rxregs->control, D64_RC_AE,
+ (ae << D64_RC_AE_SHIFT));
+ }
+ }
+
+ } else {
+ if ((di->ddoffsetlow != SB_PCI_DMA) || !(pa & PCI32ADDR_HIGH)) {
+ if (direction == DMA_TX)
+ W_REG(di->osh, &di->d32txregs->addr, (pa + di->ddoffsetlow));
+ else
+ W_REG(di->osh, &di->d32rxregs->addr, (pa + di->ddoffsetlow));
+ } else {
+ /* dma32 address extension */
+ uint32 ae;
+ ASSERT(di->addrext);
+
+ /* shift the high bit(s) from pa to ae */
+ ae = (pa & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT;
+ pa &= ~PCI32ADDR_HIGH;
+
+ if (direction == DMA_TX) {
+ W_REG(di->osh, &di->d32txregs->addr, (pa + di->ddoffsetlow));
+ SET_REG(di->osh, &di->d32txregs->control, XC_AE, ae <<XC_AE_SHIFT);
+ } else {
+ W_REG(di->osh, &di->d32rxregs->addr, (pa + di->ddoffsetlow));
+ SET_REG(di->osh, &di->d32rxregs->control, RC_AE, ae <<RC_AE_SHIFT);
+ }
+ }
+ }
+}
+
+static void
+_dma_fifoloopbackenable(dma_info_t *di)
+{
+ DMA_TRACE(("%s: dma_fifoloopbackenable\n", di->name));
+ if (DMA64_ENAB(di))
+ OR_REG(di->osh, &di->d64txregs->control, D64_XC_LE);
+ else
+ OR_REG(di->osh, &di->d32txregs->control, XC_LE);
+}
+
+static void
+_dma_rxinit(dma_info_t *di)
+{
+ DMA_TRACE(("%s: dma_rxinit\n", di->name));
+
+ if (di->nrxd == 0)
+ return;
+
+ di->rxin = di->rxout = 0;
+
+ /* clear rx descriptor ring */
+ if (DMA64_ENAB(di)) {
+ BZERO_SM((void *)(uintptr)di->rxd64, (di->nrxd * sizeof(dma64dd_t)));
+ _dma_rxenable(di);
+ _dma_ddtable_init(di, DMA_RX, di->rxdpa);
+ } else {
+ BZERO_SM((void *)(uintptr)di->rxd32, (di->nrxd * sizeof(dma32dd_t)));
+ _dma_rxenable(di);
+ _dma_ddtable_init(di, DMA_RX, di->rxdpa);
+ }
+}
+
+static void
+_dma_rxenable(dma_info_t *di)
+{
+ DMA_TRACE(("%s: dma_rxenable\n", di->name));
+
+ if (DMA64_ENAB(di))
+ W_REG(di->osh, &di->d64rxregs->control,
+ ((di->rxoffset << D64_RC_RO_SHIFT) | D64_RC_RE));
+ else
+ W_REG(di->osh, &di->d32rxregs->control, ((di->rxoffset << RC_RO_SHIFT) | RC_RE));
+}
+
+/* !! rx entry routine, returns a pointer to the next frame received,
+ * or NULL if there are no more
+ */
+static void *
+_dma_rx(dma_info_t *di)
+{
+ void *p;
+ uint len;
+ int skiplen = 0;
+
+ while ((p = _dma_getnextrxp(di, FALSE))) {
+ /* skip giant packets which span multiple rx descriptors */
+ if (skiplen > 0) {
+ skiplen -= di->rxbufsize;
+ if (skiplen < 0)
+ skiplen = 0;
+ PKTFREE(di->osh, p, FALSE);
+ continue;
+ }
+
+ len = ltoh16(*(uint16*)(PKTDATA(di->osh, p)));
+ DMA_TRACE(("%s: dma_rx len %d\n", di->name, len));
+
+ /* bad frame length check */
+ if (len > (di->rxbufsize - di->rxoffset)) {
+ DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n", di->name, len));
+ if (len > 0)
+ skiplen = len - (di->rxbufsize - di->rxoffset);
+ PKTFREE(di->osh, p, FALSE);
+ di->hnddma.rxgiants++;
+ continue;
+ }
+
+ /* set actual length */
+ PKTSETLEN(di->osh, p, (di->rxoffset + len));
+
+ break;
+ }
+
+ return (p);
+}
+
+/* post receive buffers */
+static void
+_dma_rxfill(dma_info_t *di)
+{
+ void *p;
+ uint rxin, rxout;
+ uint32 flags = 0;
+ uint n;
+ uint i;
+ uint32 pa;
+ uint extra_offset = 0;
+
+ /*
+ * Determine how many receive buffers we're lacking
+ * from the full complement, allocate, initialize,
+ * and post them, then update the chip rx lastdscr.
+ */
+
+ rxin = di->rxin;
+ rxout = di->rxout;
+
+ n = di->nrxpost - NRXDACTIVE(rxin, rxout);
+
+ DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n));
+
+ if (di->rxbufsize > BCMEXTRAHDROOM)
+ extra_offset = BCMEXTRAHDROOM;
+
+ for (i = 0; i < n; i++) {
+ /* the di->rxbufsize doesn't include the extra headroom, we need to add it to the
+ size to be allocated
+ */
+ if ((p = PKTGET(di->osh, di->rxbufsize + extra_offset,
+ FALSE)) == NULL) {
+ DMA_ERROR(("%s: dma_rxfill: out of rxbufs\n", di->name));
+ di->hnddma.rxnobuf++;
+ break;
+ }
+ /* reserve an extra headroom, if applicable */
+ if (extra_offset)
+ PKTPULL(di->osh, p, extra_offset);
+
+ /* Do a cached write instead of uncached write since DMA_MAP
+ * will flush the cache.
+ */
+ *(uint32*)(PKTDATA(di->osh, p)) = 0;
+
+ pa = (uint32) DMA_MAP(di->osh, PKTDATA(di->osh, p),
+ di->rxbufsize, DMA_RX, p);
+
+ ASSERT(ISALIGNED(pa, 4));
+
+ /* save the free packet pointer */
+ ASSERT(di->rxp[rxout] == NULL);
+ di->rxp[rxout] = p;
+
+ /* reset flags for each descriptor */
+ flags = 0;
+ if (DMA64_ENAB(di)) {
+ if (rxout == (di->nrxd - 1))
+ flags = D64_CTRL1_EOT;
+
+ dma64_dd_upd(di, di->rxd64, pa, rxout, &flags, di->rxbufsize);
+ } else {
+ if (rxout == (di->nrxd - 1))
+ flags = CTRL_EOT;
+
+ dma32_dd_upd(di, di->rxd32, pa, rxout, &flags, di->rxbufsize);
+ }
+ rxout = NEXTRXD(rxout);
+ }
+
+ di->rxout = rxout;
+
+ /* update the chip lastdscr pointer */
+ if (DMA64_ENAB(di)) {
+ W_REG(di->osh, &di->d64rxregs->ptr, I2B(rxout, dma64dd_t));
+ } else {
+ W_REG(di->osh, &di->d32rxregs->ptr, I2B(rxout, dma32dd_t));
+ }
+}
+
+/* like getnexttxp but no reclaim */
+static void *
+_dma_peeknexttxp(dma_info_t *di)
+{
+ uint end, i;
+
+ if (di->ntxd == 0)
+ return (NULL);
+
+ if (DMA64_ENAB(di)) {
+ end = B2I(R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_CD_MASK, dma64dd_t);
+ } else {
+ end = B2I(R_REG(di->osh, &di->d32txregs->status) & XS_CD_MASK, dma32dd_t);
+ }
+
+ for (i = di->txin; i != end; i = NEXTTXD(i))
+ if (di->txp[i])
+ return (di->txp[i]);
+
+ return (NULL);
+}
+
+static void
+_dma_rxreclaim(dma_info_t *di)
+{
+ void *p;
+
+ /* "unused local" warning suppression for OSLs that
+ * define PKTFREE() without using the di->osh arg
+ */
+ di = di;
+
+ DMA_TRACE(("%s: dma_rxreclaim\n", di->name));
+
+ while ((p = _dma_getnextrxp(di, TRUE)))
+ PKTFREE(di->osh, p, FALSE);
+}
+
+static void *
+_dma_getnextrxp(dma_info_t *di, bool forceall)
+{
+ if (di->nrxd == 0)
+ return (NULL);
+
+ if (DMA64_ENAB(di)) {
+ return dma64_getnextrxp(di, forceall);
+ } else {
+ return dma32_getnextrxp(di, forceall);
+ }
+}
+
+static void
+_dma_txblock(dma_info_t *di)
+{
+ di->hnddma.txavail = 0;
+}
+
+static void
+_dma_txunblock(dma_info_t *di)
+{
+ di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
+}
+
+static uint
+_dma_txactive(dma_info_t *di)
+{
+ return (NTXDACTIVE(di->txin, di->txout));
+}
+
+static void
+_dma_counterreset(dma_info_t *di)
+{
+ /* reset all software counter */
+ di->hnddma.rxgiants = 0;
+ di->hnddma.rxnobuf = 0;
+ di->hnddma.txnobuf = 0;
+}
+
+/* get the address of the var in order to change later */
+static uintptr
+_dma_getvar(dma_info_t *di, const char *name)
+{
+ if (!strcmp(name, "&txavail"))
+ return ((uintptr) &(di->hnddma.txavail));
+ else {
+ ASSERT(0);
+ }
+ return (0);
+}
+
+void
+dma_txpioloopback(osl_t *osh, dma32regs_t *regs)
+{
+ OR_REG(osh, &regs->control, XC_LE);
+}
+
+
+
+/* 32 bits DMA functions */
+static void
+dma32_txinit(dma_info_t *di)
+{
+ DMA_TRACE(("%s: dma_txinit\n", di->name));
+
+ if (di->ntxd == 0)
+ return;
+
+ di->txin = di->txout = 0;
+ di->hnddma.txavail = di->ntxd - 1;
+
+ /* clear tx descriptor ring */
+ BZERO_SM((void *)(uintptr)di->txd32, (di->ntxd * sizeof(dma32dd_t)));
+ W_REG(di->osh, &di->d32txregs->control, XC_XE);
+ _dma_ddtable_init(di, DMA_TX, di->txdpa);
+}
+
+static bool
+dma32_txenabled(dma_info_t *di)
+{
+ uint32 xc;
+
+ /* If the chip is dead, it is not enabled :-) */
+ xc = R_REG(di->osh, &di->d32txregs->control);
+ return ((xc != 0xffffffff) && (xc & XC_XE));
+}
+
+static void
+dma32_txsuspend(dma_info_t *di)
+{
+ DMA_TRACE(("%s: dma_txsuspend\n", di->name));
+
+ if (di->ntxd == 0)
+ return;
+
+ OR_REG(di->osh, &di->d32txregs->control, XC_SE);
+}
+
+static void
+dma32_txresume(dma_info_t *di)
+{
+ DMA_TRACE(("%s: dma_txresume\n", di->name));
+
+ if (di->ntxd == 0)
+ return;
+
+ AND_REG(di->osh, &di->d32txregs->control, ~XC_SE);
+}
+
+static bool
+dma32_txsuspended(dma_info_t *di)
+{
+ return (di->ntxd == 0) || ((R_REG(di->osh, &di->d32txregs->control) & XC_SE) == XC_SE);
+}
+
+static void
+dma32_txreclaim(dma_info_t *di, bool forceall)
+{
+ void *p;
+
+ DMA_TRACE(("%s: dma_txreclaim %s\n", di->name, forceall ? "all" : ""));
+
+ while ((p = dma32_getnexttxp(di, forceall)))
+ PKTFREE(di->osh, p, TRUE);
+}
+
+static bool
+dma32_txstopped(dma_info_t *di)
+{
+ return ((R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK) == XS_XS_STOPPED);
+}
+
+static bool
+dma32_rxstopped(dma_info_t *di)
+{
+ return ((R_REG(di->osh, &di->d32rxregs->status) & RS_RS_MASK) == RS_RS_STOPPED);
+}
+
+static bool
+dma32_alloc(dma_info_t *di, uint direction)
+{
+ uint size;
+ uint ddlen;
+ void *va;
+
+ ddlen = sizeof(dma32dd_t);
+
+ size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
+
+ if (!ISALIGNED(DMA_CONSISTENT_ALIGN, D32RINGALIGN))
+ size += D32RINGALIGN;
+
+
+ if (direction == DMA_TX) {
+ if ((va = DMA_ALLOC_CONSISTENT(di->osh, size, &di->txdpa, &di->tx_dmah)) == NULL) {
+ DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
+ di->name));
+ return FALSE;
+ }
+
+ di->txd32 = (dma32dd_t *) ROUNDUP((uintptr)va, D32RINGALIGN);
+ di->txdalign = (uint)((int8*)(uintptr)di->txd32 - (int8*)va);
+ di->txdpa += di->txdalign;
+ di->txdalloc = size;
+ ASSERT(ISALIGNED((uintptr)di->txd32, D32RINGALIGN));
+ } else {
+ if ((va = DMA_ALLOC_CONSISTENT(di->osh, size, &di->rxdpa, &di->rx_dmah)) == NULL) {
+ DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
+ di->name));
+ return FALSE;
+ }
+ di->rxd32 = (dma32dd_t *) ROUNDUP((uintptr)va, D32RINGALIGN);
+ di->rxdalign = (uint)((int8*)(uintptr)di->rxd32 - (int8*)va);
+ di->rxdpa += di->rxdalign;
+ di->rxdalloc = size;
+ ASSERT(ISALIGNED((uintptr)di->rxd32, D32RINGALIGN));
+ }
+
+ return TRUE;
+}
+
+static bool
+dma32_txreset(dma_info_t *di)
+{
+ uint32 status;
+
+ if (di->ntxd == 0)
+ return TRUE;
+
+ /* suspend tx DMA first */
+ W_REG(di->osh, &di->d32txregs->control, XC_SE);
+ SPINWAIT(((status = (R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK))
+ != XS_XS_DISABLED) &&
+ (status != XS_XS_IDLE) &&
+ (status != XS_XS_STOPPED),
+ (10000));
+
+ W_REG(di->osh, &di->d32txregs->control, 0);
+ SPINWAIT(((status = (R_REG(di->osh,
+ &di->d32txregs->status) & XS_XS_MASK)) != XS_XS_DISABLED),
+ 10000);
+
+ /* wait for the last transaction to complete */
+ OSL_DELAY(300);
+
+ return (status == XS_XS_DISABLED);
+}
+
+static bool
+dma32_rxidle(dma_info_t *di)
+{
+ DMA_TRACE(("%s: dma_rxidle\n", di->name));
+
+ if (di->nrxd == 0)
+ return TRUE;
+
+ return ((R_REG(di->osh, &di->d32rxregs->status) & RS_CD_MASK) ==
+ R_REG(di->osh, &di->d32rxregs->ptr));
+}
+
+static bool
+dma32_rxreset(dma_info_t *di)
+{
+ uint32 status;
+
+ if (di->nrxd == 0)
+ return TRUE;
+
+ W_REG(di->osh, &di->d32rxregs->control, 0);
+ SPINWAIT(((status = (R_REG(di->osh,
+ &di->d32rxregs->status) & RS_RS_MASK)) != RS_RS_DISABLED),
+ 10000);
+
+ return (status == RS_RS_DISABLED);
+}
+
+static bool
+dma32_rxenabled(dma_info_t *di)
+{
+ uint32 rc;
+
+ rc = R_REG(di->osh, &di->d32rxregs->control);
+ return ((rc != 0xffffffff) && (rc & RC_RE));
+}
+
+static bool
+dma32_txsuspendedidle(dma_info_t *di)
+{
+ if (di->ntxd == 0)
+ return TRUE;
+
+ if (!(R_REG(di->osh, &di->d32txregs->control) & XC_SE))
+ return 0;
+
+ if ((R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK) != XS_XS_IDLE)
+ return 0;
+
+ OSL_DELAY(2);
+ return ((R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK) == XS_XS_IDLE);
+}
+
+/* !! tx entry routine
+ * supports full 32bit dma engine buffer addressing so
+ * dma buffers can cross 4 Kbyte page boundaries.
+ */
+static int
+dma32_txfast(dma_info_t *di, void *p0, bool commit)
+{
+ void *p, *next;
+ uchar *data;
+ uint len;
+ uint txout;
+ uint32 flags = 0;
+ uint32 pa;
+
+ DMA_TRACE(("%s: dma_txfast\n", di->name));
+
+ txout = di->txout;
+
+ /*
+ * Walk the chain of packet buffers
+ * allocating and initializing transmit descriptor entries.
+ */
+ for (p = p0; p; p = next) {
+ data = PKTDATA(di->osh, p);
+ len = PKTLEN(di->osh, p);
+ next = PKTNEXT(di->osh, p);
+
+ /* return nonzero if out of tx descriptors */
+ if (NEXTTXD(txout) == di->txin)
+ goto outoftxd;
+
+ if (len == 0)
+ continue;
+
+ /* get physical address of buffer start */
+ pa = (uint32) DMA_MAP(di->osh, data, len, DMA_TX, p);
+
+ flags = 0;
+ if (p == p0)
+ flags |= CTRL_SOF;
+ if (next == NULL)
+ flags |= (CTRL_IOC | CTRL_EOF);
+ if (txout == (di->ntxd - 1))
+ flags |= CTRL_EOT;
+
+ dma32_dd_upd(di, di->txd32, pa, txout, &flags, len);
+ ASSERT(di->txp[txout] == NULL);
+
+ txout = NEXTTXD(txout);
+ }
+
+ /* if last txd eof not set, fix it */
+ if (!(flags & CTRL_EOF))
+ W_SM(&di->txd32[PREVTXD(txout)].ctrl, BUS_SWAP32(flags | CTRL_IOC | CTRL_EOF));
+
+ /* save the packet */
+ di->txp[PREVTXD(txout)] = p0;
+
+ /* bump the tx descriptor index */
+ di->txout = txout;
+
+ /* kick the chip */
+ if (commit)
+ W_REG(di->osh, &di->d32txregs->ptr, I2B(txout, dma32dd_t));
+
+ /* tx flow control */
+ di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
+
+ return (0);
+
+outoftxd:
+ DMA_ERROR(("%s: dma_txfast: out of txds\n", di->name));
+ PKTFREE(di->osh, p0, TRUE);
+ di->hnddma.txavail = 0;
+ di->hnddma.txnobuf++;
+ return (-1);
+}
+
+/*
+ * Reclaim next completed txd (txds if using chained buffers) and
+ * return associated packet.
+ * If 'force' is true, reclaim txd(s) and return associated packet
+ * regardless of the value of the hardware "curr" pointer.
+ */
+static void *
+dma32_getnexttxp(dma_info_t *di, bool forceall)
+{
+ uint start, end, i;
+ void *txp;
+
+ DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name, forceall ? "all" : ""));
+
+ if (di->ntxd == 0)
+ return (NULL);
+
+ txp = NULL;
+
+ start = di->txin;
+ if (forceall)
+ end = di->txout;
+ else
+ end = B2I(R_REG(di->osh, &di->d32txregs->status) & XS_CD_MASK, dma32dd_t);
+
+ if ((start == 0) && (end > di->txout))
+ goto bogus;
+
+ for (i = start; i != end && !txp; i = NEXTTXD(i)) {
+ DMA_UNMAP(di->osh, (BUS_SWAP32(R_SM(&di->txd32[i].addr)) - di->dataoffsetlow),
+ (BUS_SWAP32(R_SM(&di->txd32[i].ctrl)) & CTRL_BC_MASK),
+ DMA_TX, di->txp[i]);
+
+ W_SM(&di->txd32[i].addr, 0xdeadbeef);
+ txp = di->txp[i];
+ di->txp[i] = NULL;
+ }
+
+ di->txin = i;
+
+ /* tx flow control */
+ di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
+
+ return (txp);
+
+bogus:
+/*
+ DMA_ERROR(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
+ start, end, di->txout, forceall));
+*/
+ return (NULL);
+}
+
+static void *
+dma32_getnextrxp(dma_info_t *di, bool forceall)
+{
+ uint i;
+ void *rxp;
+
+ /* if forcing, dma engine must be disabled */
+ ASSERT(!forceall || !dma32_rxenabled(di));
+
+ i = di->rxin;
+
+ /* return if no packets posted */
+ if (i == di->rxout)
+ return (NULL);
+
+ /* ignore curr if forceall */
+ if (!forceall && (i == B2I(R_REG(di->osh, &di->d32rxregs->status) & RS_CD_MASK, dma32dd_t)))
+ return (NULL);
+
+ /* get the packet pointer that corresponds to the rx descriptor */
+ rxp = di->rxp[i];
+ ASSERT(rxp);
+ di->rxp[i] = NULL;
+
+ /* clear this packet from the descriptor ring */
+ DMA_UNMAP(di->osh, (BUS_SWAP32(R_SM(&di->rxd32[i].addr)) - di->dataoffsetlow),
+ di->rxbufsize, DMA_RX, rxp);
+
+ W_SM(&di->rxd32[i].addr, 0xdeadbeef);
+
+ di->rxin = NEXTRXD(i);
+
+ return (rxp);
+}
+
+/*
+ * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
+ */
+static void
+dma32_txrotate(dma_info_t *di)
+{
+ uint ad;
+ uint nactive;
+ uint rot;
+ uint old, new;
+ uint32 w;
+ uint first, last;
+
+ ASSERT(dma32_txsuspendedidle(di));
+
+ nactive = _dma_txactive(di);
+ ad = B2I(((R_REG(di->osh, &di->d32txregs->status) & XS_AD_MASK) >> XS_AD_SHIFT), dma32dd_t);
+ rot = TXD(ad - di->txin);
+
+ ASSERT(rot < di->ntxd);
+
+ /* full-ring case is a lot harder - don't worry about this */
+ if (rot >= (di->ntxd - nactive)) {
+ DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name));
+ return;
+ }
+
+ first = di->txin;
+ last = PREVTXD(di->txout);
+
+ /* move entries starting at last and moving backwards to first */
+ for (old = last; old != PREVTXD(first); old = PREVTXD(old)) {
+ new = TXD(old + rot);
+
+ /*
+ * Move the tx dma descriptor.
+ * EOT is set only in the last entry in the ring.
+ */
+ w = BUS_SWAP32(R_SM(&di->txd32[old].ctrl)) & ~CTRL_EOT;
+ if (new == (di->ntxd - 1))
+ w |= CTRL_EOT;
+ W_SM(&di->txd32[new].ctrl, BUS_SWAP32(w));
+ W_SM(&di->txd32[new].addr, R_SM(&di->txd32[old].addr));
+
+ /* zap the old tx dma descriptor address field */
+ W_SM(&di->txd32[old].addr, BUS_SWAP32(0xdeadbeef));
+
+ /* move the corresponding txp[] entry */
+ ASSERT(di->txp[new] == NULL);
+ di->txp[new] = di->txp[old];
+ di->txp[old] = NULL;
+ }
+
+ /* update txin and txout */
+ di->txin = ad;
+ di->txout = TXD(di->txout + rot);
+ di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
+
+ /* kick the chip */
+ W_REG(di->osh, &di->d32txregs->ptr, I2B(di->txout, dma32dd_t));
+}
+
+/* 64 bits DMA functions */
+
+#ifdef BCMDMA64
+static void
+dma64_txinit(dma_info_t *di)
+{
+ DMA_TRACE(("%s: dma_txinit\n", di->name));
+
+ if (di->ntxd == 0)
+ return;
+
+ di->txin = di->txout = 0;
+ di->hnddma.txavail = di->ntxd - 1;
+
+ /* clear tx descriptor ring */
+ BZERO_SM((void *)(uintptr)di->txd64, (di->ntxd * sizeof(dma64dd_t)));
+ W_REG(di->osh, &di->d64txregs->control, D64_XC_XE);
+ _dma_ddtable_init(di, DMA_TX, di->txdpa);
+}
+
+static bool
+dma64_txenabled(dma_info_t *di)
+{
+ uint32 xc;
+
+ /* If the chip is dead, it is not enabled :-) */
+ xc = R_REG(di->osh, &di->d64txregs->control);
+ return ((xc != 0xffffffff) && (xc & D64_XC_XE));
+}
+
+static void
+dma64_txsuspend(dma_info_t *di)
+{
+ DMA_TRACE(("%s: dma_txsuspend\n", di->name));
+
+ if (di->ntxd == 0)
+ return;
+
+ OR_REG(di->osh, &di->d64txregs->control, D64_XC_SE);
+}
+
+static void
+dma64_txresume(dma_info_t *di)
+{
+ DMA_TRACE(("%s: dma_txresume\n", di->name));
+
+ if (di->ntxd == 0)
+ return;
+
+ AND_REG(di->osh, &di->d64txregs->control, ~D64_XC_SE);
+}
+
+static bool
+dma64_txsuspended(dma_info_t *di)
+{
+ return (di->ntxd == 0) || ((R_REG(di->osh, &di->d64txregs->control) & D64_XC_SE)
+ == D64_XC_SE);
+}
+
+static void
+dma64_txreclaim(dma_info_t *di, bool forceall)
+{
+ void *p;
+
+ DMA_TRACE(("%s: dma_txreclaim %s\n", di->name, forceall ? "all" : ""));
+
+ while ((p = dma64_getnexttxp(di, forceall)))
+ PKTFREE(di->osh, p, TRUE);
+}
+
+static bool
+dma64_txstopped(dma_info_t *di)
+{
+ return ((R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK) == D64_XS0_XS_STOPPED);
+}
+
+static bool
+dma64_rxstopped(dma_info_t *di)
+{
+ return ((R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_RS_MASK) == D64_RS0_RS_STOPPED);
+}
+
+static bool
+dma64_alloc(dma_info_t *di, uint direction)
+{
+ uint size;
+ uint ddlen;
+ uint32 alignbytes;
+ void *va;
+
+ ddlen = sizeof(dma64dd_t);
+
+ size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen);
+
+ alignbytes = di->dma64align;
+
+ if (!ISALIGNED(DMA_CONSISTENT_ALIGN, alignbytes))
+ size += alignbytes;
+
+ if (direction == DMA_TX) {
+ if ((va = DMA_ALLOC_CONSISTENT(di->osh, size, &di->txdpa, &di->tx_dmah)) == NULL) {
+ DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(ntxd) failed\n",
+ di->name));
+ return FALSE;
+ }
+
+ di->txd64 = (dma64dd_t *) ROUNDUP((uintptr)va, alignbytes);
+ di->txdalign = (uint)((int8*)(uintptr)di->txd64 - (int8*)va);
+ di->txdpa += di->txdalign;
+ di->txdalloc = size;
+ ASSERT(ISALIGNED((uintptr)di->txd64, alignbytes));
+ } else {
+ if ((va = DMA_ALLOC_CONSISTENT(di->osh, size, &di->rxdpa, &di->rx_dmah)) == NULL) {
+ DMA_ERROR(("%s: dma_attach: DMA_ALLOC_CONSISTENT(nrxd) failed\n",
+ di->name));
+ return FALSE;
+ }
+ di->rxd64 = (dma64dd_t *) ROUNDUP((uintptr)va, alignbytes);
+ di->rxdalign = (uint)((int8*)(uintptr)di->rxd64 - (int8*)va);
+ di->rxdpa += di->rxdalign;
+ di->rxdalloc = size;
+ ASSERT(ISALIGNED((uintptr)di->rxd64, alignbytes));
+ }
+
+ return TRUE;
+}
+
+static bool
+dma64_txreset(dma_info_t *di)
+{
+ uint32 status;
+
+ if (di->ntxd == 0)
+ return TRUE;
+
+ /* suspend tx DMA first */
+ W_REG(di->osh, &di->d64txregs->control, D64_XC_SE);
+ SPINWAIT(((status = (R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK)) !=
+ D64_XS0_XS_DISABLED) &&
+ (status != D64_XS0_XS_IDLE) &&
+ (status != D64_XS0_XS_STOPPED),
+ 10000);
+
+ W_REG(di->osh, &di->d64txregs->control, 0);
+ SPINWAIT(((status = (R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK)) !=
+ D64_XS0_XS_DISABLED),
+ 10000);
+
+ /* wait for the last transaction to complete */
+ OSL_DELAY(300);
+
+ return (status == D64_XS0_XS_DISABLED);
+}
+
+static bool
+dma64_rxidle(dma_info_t *di)
+{
+ DMA_TRACE(("%s: dma_rxidle\n", di->name));
+
+ if (di->nrxd == 0)
+ return TRUE;
+
+ return ((R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_CD_MASK) ==
+ R_REG(di->osh, &di->d64rxregs->ptr));
+}
+
+static bool
+dma64_rxreset(dma_info_t *di)
+{
+ uint32 status;
+
+ if (di->nrxd == 0)
+ return TRUE;
+
+ W_REG(di->osh, &di->d64rxregs->control, 0);
+ SPINWAIT(((status = (R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_RS_MASK)) !=
+ D64_RS0_RS_DISABLED),
+ 10000);
+
+ return (status == D64_RS0_RS_DISABLED);
+}
+
+static bool
+dma64_rxenabled(dma_info_t *di)
+{
+ uint32 rc;
+
+ rc = R_REG(di->osh, &di->d64rxregs->control);
+ return ((rc != 0xffffffff) && (rc & D64_RC_RE));
+}
+
+static bool
+dma64_txsuspendedidle(dma_info_t *di)
+{
+
+ if (di->ntxd == 0)
+ return TRUE;
+
+ if (!(R_REG(di->osh, &di->d64txregs->control) & D64_XC_SE))
+ return 0;
+
+ if ((R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK) == D64_XS0_XS_IDLE)
+ return 1;
+
+ return 0;
+}
+
+
+/* !! tx entry routine */
+static int
+dma64_txfast(dma_info_t *di, void *p0, bool commit)
+{
+ void *p, *next;
+ uchar *data;
+ uint len;
+ uint txout;
+ uint32 flags = 0;
+ uint32 pa;
+
+ DMA_TRACE(("%s: dma_txfast\n", di->name));
+
+ txout = di->txout;
+
+ /*
+ * Walk the chain of packet buffers
+ * allocating and initializing transmit descriptor entries.
+ */
+ for (p = p0; p; p = next) {
+ data = PKTDATA(di->osh, p);
+ len = PKTLEN(di->osh, p);
+ next = PKTNEXT(di->osh, p);
+
+ /* return nonzero if out of tx descriptors */
+ if (NEXTTXD(txout) == di->txin)
+ goto outoftxd;
+
+ if (len == 0)
+ continue;
+
+ /* get physical address of buffer start */
+ pa = (uint32) DMA_MAP(di->osh, data, len, DMA_TX, p);
+
+ flags = 0;
+ if (p == p0)
+ flags |= D64_CTRL1_SOF;
+ if (next == NULL)
+ flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF);
+ if (txout == (di->ntxd - 1))
+ flags |= D64_CTRL1_EOT;
+
+ dma64_dd_upd(di, di->txd64, pa, txout, &flags, len);
+ ASSERT(di->txp[txout] == NULL);
+
+ txout = NEXTTXD(txout);
+ }
+
+ /* if last txd eof not set, fix it */
+ if (!(flags & D64_CTRL1_EOF))
+ W_SM(&di->txd64[PREVTXD(txout)].ctrl1,
+ BUS_SWAP32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF));
+
+ /* save the packet */
+ di->txp[PREVTXD(txout)] = p0;
+
+ /* bump the tx descriptor index */
+ di->txout = txout;
+
+ /* kick the chip */
+ if (commit)
+ W_REG(di->osh, &di->d64txregs->ptr, I2B(txout, dma64dd_t));
+
+ /* tx flow control */
+ di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
+
+ return (0);
+
+outoftxd:
+ DMA_ERROR(("%s: dma_txfast: out of txds\n", di->name));
+ PKTFREE(di->osh, p0, TRUE);
+ di->hnddma.txavail = 0;
+ di->hnddma.txnobuf++;
+ return (-1);
+}
+
+/*
+ * Reclaim next completed txd (txds if using chained buffers) and
+ * return associated packet.
+ * If 'force' is true, reclaim txd(s) and return associated packet
+ * regardless of the value of the hardware "curr" pointer.
+ */
+static void *
+dma64_getnexttxp(dma_info_t *di, bool forceall)
+{
+ uint start, end, i;
+ void *txp;
+
+ DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name, forceall ? "all" : ""));
+
+ if (di->ntxd == 0)
+ return (NULL);
+
+ txp = NULL;
+
+ start = di->txin;
+ if (forceall)
+ end = di->txout;
+ else
+ end = B2I(R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_CD_MASK, dma64dd_t);
+
+ if ((start == 0) && (end > di->txout))
+ goto bogus;
+
+ for (i = start; i != end && !txp; i = NEXTTXD(i)) {
+ DMA_UNMAP(di->osh, (BUS_SWAP32(R_SM(&di->txd64[i].addrlow)) - di->dataoffsetlow),
+ (BUS_SWAP32(R_SM(&di->txd64[i].ctrl2)) & D64_CTRL2_BC_MASK),
+ DMA_TX, di->txp[i]);
+
+ W_SM(&di->txd64[i].addrlow, 0xdeadbeef);
+ W_SM(&di->txd64[i].addrhigh, 0xdeadbeef);
+
+ txp = di->txp[i];
+ di->txp[i] = NULL;
+ }
+
+ di->txin = i;
+
+ /* tx flow control */
+ di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
+
+ return (txp);
+
+bogus:
+/*
+ DMA_ERROR(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n",
+ start, end, di->txout, forceall));
+*/
+ return (NULL);
+}
+
+static void *
+dma64_getnextrxp(dma_info_t *di, bool forceall)
+{
+ uint i;
+ void *rxp;
+
+ /* if forcing, dma engine must be disabled */
+ ASSERT(!forceall || !dma64_rxenabled(di));
+
+ i = di->rxin;
+
+ /* return if no packets posted */
+ if (i == di->rxout)
+ return (NULL);
+
+ /* ignore curr if forceall */
+ if (!forceall &&
+ (i == B2I(R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_CD_MASK, dma64dd_t)))
+ return (NULL);
+
+ /* get the packet pointer that corresponds to the rx descriptor */
+ rxp = di->rxp[i];
+ ASSERT(rxp);
+ di->rxp[i] = NULL;
+
+ /* clear this packet from the descriptor ring */
+ DMA_UNMAP(di->osh, (BUS_SWAP32(R_SM(&di->rxd64[i].addrlow)) - di->dataoffsetlow),
+ di->rxbufsize, DMA_RX, rxp);
+
+ W_SM(&di->rxd64[i].addrlow, 0xdeadbeef);
+ W_SM(&di->rxd64[i].addrhigh, 0xdeadbeef);
+
+ di->rxin = NEXTRXD(i);
+
+ return (rxp);
+}
+
+static bool
+_dma64_addrext(osl_t *osh, dma64regs_t *dma64regs)
+{
+ uint32 w;
+ OR_REG(osh, &dma64regs->control, D64_XC_AE);
+ w = R_REG(osh, &dma64regs->control);
+ AND_REG(osh, &dma64regs->control, ~D64_XC_AE);
+ return ((w & D64_XC_AE) == D64_XC_AE);
+}
+
+/*
+ * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin).
+ */
+static void
+dma64_txrotate(dma_info_t *di)
+{
+ uint ad;
+ uint nactive;
+ uint rot;
+ uint old, new;
+ uint32 w;
+ uint first, last;
+
+ ASSERT(dma64_txsuspendedidle(di));
+
+ nactive = _dma_txactive(di);
+ ad = B2I((R_REG(di->osh, &di->d64txregs->status1) & D64_XS1_AD_MASK), dma64dd_t);
+ rot = TXD(ad - di->txin);
+
+ ASSERT(rot < di->ntxd);
+
+ /* full-ring case is a lot harder - don't worry about this */
+ if (rot >= (di->ntxd - nactive)) {
+ DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name));
+ return;
+ }
+
+ first = di->txin;
+ last = PREVTXD(di->txout);
+
+ /* move entries starting at last and moving backwards to first */
+ for (old = last; old != PREVTXD(first); old = PREVTXD(old)) {
+ new = TXD(old + rot);
+
+ /*
+ * Move the tx dma descriptor.
+ * EOT is set only in the last entry in the ring.
+ */
+ w = BUS_SWAP32(R_SM(&di->txd64[old].ctrl1)) & ~D64_CTRL1_EOT;
+ if (new == (di->ntxd - 1))
+ w |= D64_CTRL1_EOT;
+ W_SM(&di->txd64[new].ctrl1, BUS_SWAP32(w));
+
+ w = BUS_SWAP32(R_SM(&di->txd64[old].ctrl2));
+ W_SM(&di->txd64[new].ctrl2, BUS_SWAP32(w));
+
+ W_SM(&di->txd64[new].addrlow, R_SM(&di->txd64[old].addrlow));
+ W_SM(&di->txd64[new].addrhigh, R_SM(&di->txd64[old].addrhigh));
+
+ /* zap the old tx dma descriptor address field */
+ W_SM(&di->txd64[old].addrlow, BUS_SWAP32(0xdeadbeef));
+ W_SM(&di->txd64[old].addrhigh, BUS_SWAP32(0xdeadbeef));
+
+ /* move the corresponding txp[] entry */
+ ASSERT(di->txp[new] == NULL);
+ di->txp[new] = di->txp[old];
+ di->txp[old] = NULL;
+ }
+
+ /* update txin and txout */
+ di->txin = ad;
+ di->txout = TXD(di->txout + rot);
+ di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1;
+
+ /* kick the chip */
+ W_REG(di->osh, &di->d64txregs->ptr, I2B(di->txout, dma64dd_t));
+}
+
+#endif /* BCMDMA64 */
+
+uint
+dma_addrwidth(sb_t *sbh, void *dmaregs)
+{
+ dma32regs_t *dma32regs;
+ osl_t *osh;
+
+ osh = sb_osh(sbh);
+
+ if (DMA64_CAP) {
+ /* DMA engine is 64-bit capable */
+ if (((sb_coreflagshi(sbh, 0, 0) & SBTMH_DMA64) == SBTMH_DMA64)) {
+ /* backplane are 64 bits capable */
+#if 0
+ if (sb_backplane64(sbh))
+ /* If bus is System Backplane or PCIE then we can access 64-bits */
+ if ((BUSTYPE(sbh->bustype) == SB_BUS) ||
+ ((BUSTYPE(sbh->bustype) == PCI_BUS) &&
+ sbh->buscoretype == SB_PCIE))
+ return (DMADDRWIDTH_64);
+#endif
+
+ /* DMA64 is always 32 bits capable, AE is always TRUE */
+#ifdef BCMDMA64
+ ASSERT(_dma64_addrext(osh, (dma64regs_t *)dmaregs));
+#endif
+ return (DMADDRWIDTH_32);
+ }
+ }
+
+ /* Start checking for 32-bit / 30-bit addressing */
+ dma32regs = (dma32regs_t *)dmaregs;
+
+ /* For System Backplane, PCIE bus or addrext feature, 32-bits ok */
+ if ((BUSTYPE(sbh->bustype) == SB_BUS) ||
+ ((BUSTYPE(sbh->bustype) == PCI_BUS) && sbh->buscoretype == SB_PCIE) ||
+ (_dma32_addrext(osh, dma32regs)))
+ return (DMADDRWIDTH_32);
+
+ /* Fallthru */
+ return (DMADDRWIDTH_30);
+}
diff --git a/package/broadcom-wl/src/driver/hnddma.h b/package/broadcom-wl/src/driver/hnddma.h
new file mode 100644
index 000000000..de74c067b
--- /dev/null
+++ b/package/broadcom-wl/src/driver/hnddma.h
@@ -0,0 +1,156 @@
+/*
+ * Generic Broadcom Home Networking Division (HND) DMA engine SW interface
+ * This supports the following chips: BCM42xx, 44xx, 47xx .
+ *
+ * Copyright 2006, Broadcom Corporation
+ * All Rights Reserved.
+ *
+ * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
+ * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
+ * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
+ * $Id: hnddma.h,v 1.1.1.13 2006/04/08 06:13:39 honor Exp $
+ */
+
+#ifndef _hnddma_h_
+#define _hnddma_h_
+
+typedef const struct hnddma_pub hnddma_t;
+
+/* dma function type */
+typedef void (*di_detach_t)(hnddma_t *dmah);
+typedef bool (*di_txreset_t)(hnddma_t *dmah);
+typedef bool (*di_rxreset_t)(hnddma_t *dmah);
+typedef bool (*di_rxidle_t)(hnddma_t *dmah);
+typedef void (*di_txinit_t)(hnddma_t *dmah);
+typedef bool (*di_txenabled_t)(hnddma_t *dmah);
+typedef void (*di_rxinit_t)(hnddma_t *dmah);
+typedef void (*di_txsuspend_t)(hnddma_t *dmah);
+typedef void (*di_txresume_t)(hnddma_t *dmah);
+typedef bool (*di_txsuspended_t)(hnddma_t *dmah);
+typedef bool (*di_txsuspendedidle_t)(hnddma_t *dmah);
+typedef int (*di_txfast_t)(hnddma_t *dmah, void *p, bool commit);
+typedef void (*di_fifoloopbackenable_t)(hnddma_t *dmah);
+typedef bool (*di_txstopped_t)(hnddma_t *dmah);
+typedef bool (*di_rxstopped_t)(hnddma_t *dmah);
+typedef bool (*di_rxenable_t)(hnddma_t *dmah);
+typedef bool (*di_rxenabled_t)(hnddma_t *dmah);
+typedef void* (*di_rx_t)(hnddma_t *dmah);
+typedef void (*di_rxfill_t)(hnddma_t *dmah);
+typedef void (*di_txreclaim_t)(hnddma_t *dmah, bool forceall);
+typedef void (*di_rxreclaim_t)(hnddma_t *dmah);
+typedef uintptr (*di_getvar_t)(hnddma_t *dmah, char *name);
+typedef void* (*di_getnexttxp_t)(hnddma_t *dmah, bool forceall);
+typedef void* (*di_getnextrxp_t)(hnddma_t *dmah, bool forceall);
+typedef void* (*di_peeknexttxp_t)(hnddma_t *dmah);
+typedef void (*di_txblock_t)(hnddma_t *dmah);
+typedef void (*di_txunblock_t)(hnddma_t *dmah);
+typedef uint (*di_txactive_t)(hnddma_t *dmah);
+typedef void (*di_txrotate_t)(hnddma_t *dmah);
+typedef void (*di_counterreset_t)(hnddma_t *dmah);
+typedef char* (*di_dump_t)(hnddma_t *dmah, struct bcmstrbuf *b, bool dumpring);
+typedef char* (*di_dumptx_t)(hnddma_t *dmah, struct bcmstrbuf *b, bool dumpring);
+typedef char* (*di_dumprx_t)(hnddma_t *dmah, struct bcmstrbuf *b, bool dumpring);
+
+/* dma opsvec */
+typedef struct di_fcn_s {
+ di_detach_t detach;
+ di_txinit_t txinit;
+ di_txreset_t txreset;
+ di_txenabled_t txenabled;
+ di_txsuspend_t txsuspend;
+ di_txresume_t txresume;
+ di_txsuspended_t txsuspended;
+ di_txsuspendedidle_t txsuspendedidle;
+ di_txfast_t txfast;
+ di_txstopped_t txstopped;
+ di_txreclaim_t txreclaim;
+ di_getnexttxp_t getnexttxp;
+ di_peeknexttxp_t peeknexttxp;
+ di_txblock_t txblock;
+ di_txunblock_t txunblock;
+ di_txactive_t txactive;
+ di_txrotate_t txrotate;
+
+ di_rxinit_t rxinit;
+ di_rxreset_t rxreset;
+ di_rxidle_t rxidle;
+ di_rxstopped_t rxstopped;
+ di_rxenable_t rxenable;
+ di_rxenabled_t rxenabled;
+ di_rx_t rx;
+ di_rxfill_t rxfill;
+ di_rxreclaim_t rxreclaim;
+ di_getnextrxp_t getnextrxp;
+
+ di_fifoloopbackenable_t fifoloopbackenable;
+ di_getvar_t d_getvar;
+ di_counterreset_t counterreset;
+ di_dump_t dump;
+ di_dumptx_t dumptx;
+ di_dumprx_t dumprx;
+ uint endnum;
+} di_fcn_t;
+
+/*
+ * Exported data structure (read-only)
+ */
+/* export structure */
+struct hnddma_pub {
+ di_fcn_t di_fn; /* DMA function pointers */
+ uint txavail; /* # free tx descriptors */
+
+ /* rx error counters */
+ uint rxgiants; /* rx giant frames */
+ uint rxnobuf; /* rx out of dma descriptors */
+ /* tx error counters */
+ uint txnobuf; /* tx out of dma descriptors */
+};
+
+
+extern hnddma_t * dma_attach(osl_t *osh, char *name, sb_t *sbh, void *dmaregstx, void *dmaregsrx,
+ uint ntxd, uint nrxd, uint rxbufsize, uint nrxpost, uint rxoffset,
+ uint *msg_level);
+#define dma_detach(di) ((di)->di_fn.detach(di))
+#define dma_txreset(di) ((di)->di_fn.txreset(di))
+#define dma_rxreset(di) ((di)->di_fn.rxreset(di))
+#define dma_rxidle(di) ((di)->di_fn.rxidle(di))
+#define dma_txinit(di) ((di)->di_fn.txinit(di))
+#define dma_txenabled(di) ((di)->di_fn.txenabled(di))
+#define dma_rxinit(di) ((di)->di_fn.rxinit(di))
+#define dma_txsuspend(di) ((di)->di_fn.txsuspend(di))
+#define dma_txresume(di) ((di)->di_fn.txresume(di))
+#define dma_txsuspended(di) ((di)->di_fn.txsuspended(di))
+#define dma_txsuspendedidle(di) ((di)->di_fn.txsuspendedidle(di))
+#define dma_txfast(di, p, commit) ((di)->di_fn.txfast(di, p, commit))
+#define dma_fifoloopbackenable(di) ((di)->di_fn.fifoloopbackenable(di))
+#define dma_txstopped(di) ((di)->di_fn.txstopped(di))
+#define dma_rxstopped(di) ((di)->di_fn.rxstopped(di))
+#define dma_rxenable(di) ((di)->di_fn.rxenable(di))
+#define dma_rxenabled(di) ((di)->di_fn.rxenabled(di))
+#define dma_rx(di) ((di)->di_fn.rx(di))
+#define dma_rxfill(di) ((di)->di_fn.rxfill(di))
+#define dma_txreclaim(di, forceall) ((di)->di_fn.txreclaim(di, forceall))
+#define dma_rxreclaim(di) ((di)->di_fn.rxreclaim(di))
+#define dma_getvar(di, name) ((di)->di_fn.d_getvar(di, name))
+#define dma_getnexttxp(di, forceall) ((di)->di_fn.getnexttxp(di, forceall))
+#define dma_getnextrxp(di, forceall) ((di)->di_fn.getnextrxp(di, forceall))
+#define dma_peeknexttxp(di) ((di)->di_fn.peeknexttxp(di))
+#define dma_txblock(di) ((di)->di_fn.txblock(di))
+#define dma_txunblock(di) ((di)->di_fn.txunblock(di))
+#define dma_txactive(di) ((di)->di_fn.txactive(di))
+#define dma_txrotate(di) ((di)->di_fn.txrotate(di))
+#define dma_counterreset(di) ((di)->di_fn.counterreset(di))
+
+#define DMA_DUMP_SIZE 2048
+/* return addresswidth allowed
+ * This needs to be done after SB attach but before dma attach.
+ * SB attach provides ability to probe backplane and dma core capabilities
+ * This info is needed by DMA_ALLOC_CONSISTENT in dma attach
+ */
+extern uint dma_addrwidth(sb_t *sbh, void *dmaregs);
+
+/* pio helpers */
+void dma_txpioloopback(osl_t *osh, dma32regs_t *);
+
+#endif /* _hnddma_h_ */
diff --git a/package/broadcom-wl/src/driver/linux_osl.c b/package/broadcom-wl/src/driver/linux_osl.c
new file mode 100644
index 000000000..24fd77dae
--- /dev/null
+++ b/package/broadcom-wl/src/driver/linux_osl.c
@@ -0,0 +1,274 @@
+/*
+ * Linux OS Independent Layer
+ *
+ * Copyright 2006, Broadcom Corporation
+ * All Rights Reserved.
+ *
+ * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
+ * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
+ * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
+ *
+ * $Id: linux_osl.c,v 1.1.1.14 2006/04/08 06:13:39 honor Exp $
+ */
+
+#define LINUX_OSL
+
+#include <typedefs.h>
+#include <bcmendian.h>
+#include <linux/module.h>
+#include <linuxver.h>
+#include <bcmdefs.h>
+#include <osl.h>
+#include "linux_osl.h"
+#include <bcmutils.h>
+#include <linux/delay.h>
+#ifdef mips
+#include <asm/paccess.h>
+#endif /* mips */
+#include <pcicfg.h>
+
+#define PCI_CFG_RETRY 10
+
+#define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognise osh */
+#define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */
+
+typedef struct bcm_mem_link {
+ struct bcm_mem_link *prev;
+ struct bcm_mem_link *next;
+ uint size;
+ int line;
+ char file[BCM_MEM_FILENAME_LEN];
+} bcm_mem_link_t;
+
+static int16 linuxbcmerrormap[] = \
+{ 0, /* 0 */
+ -EINVAL, /* BCME_ERROR */
+ -EINVAL, /* BCME_BADARG */
+ -EINVAL, /* BCME_BADOPTION */
+ -EINVAL, /* BCME_NOTUP */
+ -EINVAL, /* BCME_NOTDOWN */
+ -EINVAL, /* BCME_NOTAP */
+ -EINVAL, /* BCME_NOTSTA */
+ -EINVAL, /* BCME_BADKEYIDX */
+ -EINVAL, /* BCME_RADIOOFF */
+ -EINVAL, /* BCME_NOTBANDLOCKED */
+ -EINVAL, /* BCME_NOCLK */
+ -EINVAL, /* BCME_BADRATESET */
+ -EINVAL, /* BCME_BADBAND */
+ -E2BIG, /* BCME_BUFTOOSHORT */
+ -E2BIG, /* BCME_BUFTOOLONG */
+ -EBUSY, /* BCME_BUSY */
+ -EINVAL, /* BCME_NOTASSOCIATED */
+ -EINVAL, /* BCME_BADSSIDLEN */
+ -EINVAL, /* BCME_OUTOFRANGECHAN */
+ -EINVAL, /* BCME_BADCHAN */
+ -EFAULT, /* BCME_BADADDR */
+ -ENOMEM, /* BCME_NORESOURCE */
+ -EOPNOTSUPP, /* BCME_UNSUPPORTED */
+ -EMSGSIZE, /* BCME_BADLENGTH */
+ -EINVAL, /* BCME_NOTREADY */
+ -EPERM, /* BCME_NOTPERMITTED */
+ -ENOMEM, /* BCME_NOMEM */
+ -EINVAL, /* BCME_ASSOCIATED */
+ -ERANGE, /* BCME_RANGE */
+ -EINVAL, /* BCME_NOTFOUND */
+ -EINVAL, /* BCME_WME_NOT_ENABLED */
+ -EINVAL, /* BCME_TSPEC_NOTFOUND */
+ -EINVAL, /* BCME_ACM_NOTSUPPORTED */
+ -EINVAL, /* BCME_NOT_WME_ASSOCIATION */
+ -EIO, /* BCME_SDIO_ERROR */
+ -ENODEV /* BCME_DONGLE_DOWN */
+};
+
+/* translate bcmerrors into linux errors */
+int
+osl_error(int bcmerror)
+{
+ int abs_bcmerror;
+ int array_size = ARRAYSIZE(linuxbcmerrormap);
+
+ abs_bcmerror = ABS(bcmerror);
+
+ if (bcmerror > 0)
+ abs_bcmerror = 0;
+
+ else if (abs_bcmerror >= array_size)
+ abs_bcmerror = BCME_ERROR;
+
+ return linuxbcmerrormap[abs_bcmerror];
+}
+
+osl_t *
+osl_attach(void *pdev, bool pkttag)
+{
+ osl_t *osh;
+
+ osh = kmalloc(sizeof(osl_t), GFP_ATOMIC);
+ ASSERT(osh);
+
+ bzero(osh, sizeof(osl_t));
+
+ /*
+ * check the cases where
+ * 1.Error code Added to bcmerror table, but forgot to add it to the OS
+ * dependent error code
+ * 2. Error code is added to the bcmerror table, but forgot to add the
+ * corresponding errorstring(dummy call to bcmerrorstr)
+ */
+ bcmerrorstr(0);
+ ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1));
+
+ osh->magic = OS_HANDLE_MAGIC;
+ osh->malloced = 0;
+ osh->failed = 0;
+ osh->dbgmem_list = NULL;
+ osh->pdev = pdev;
+ osh->pub.pkttag = pkttag;
+
+ return osh;
+}
+
+void
+osl_detach(osl_t *osh)
+{
+ if (osh == NULL)
+ return;
+
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+ kfree(osh);
+}
+
+/* Return a new packet. zero out pkttag */
+void*
+osl_pktget(osl_t *osh, uint len, bool send)
+{
+ struct sk_buff *skb;
+
+ if ((skb = dev_alloc_skb(len))) {
+ skb_put(skb, len);
+ skb->priority = 0;
+
+#ifdef BCMDBG_PKT
+ pktlist_add(&(osh->pktlist), (void *) skb);
+#endif /* BCMDBG_PKT */
+
+ osh->pub.pktalloced++;
+ }
+
+ return ((void*) skb);
+}
+
+typedef void (*pktfree_cb_fn_t)(void *ctx, void *pkt, uint16 status);
+/* Free the driver packet. Free the tag if present */
+void
+osl_pktfree(osl_t *osh, void *p, bool send)
+{
+ struct sk_buff *skb, *nskb;
+ pktfree_cb_fn_t tx_fn = osh->pub.tx_fn;
+
+ skb = (struct sk_buff*) p;
+
+ if (send && tx_fn)
+ tx_fn(osh->pub.tx_ctx, p, 0);
+
+ /* perversion: we use skb->next to chain multi-skb packets */
+ while (skb) {
+ nskb = skb->next;
+ skb->next = NULL;
+
+#ifdef BCMDBG_PKT
+ pktlist_remove(&(osh->pktlist), (void *) skb);
+#endif /* BCMDBG_PKT */
+
+ if (skb->destructor) {
+ /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if destructor exists
+ */
+ dev_kfree_skb_any(skb);
+ } else {
+ /* can free immediately (even in_irq()) if destructor does not exist */
+ dev_kfree_skb(skb);
+ }
+
+ osh->pub.pktalloced--;
+
+ skb = nskb;
+ }
+}
+
+void*
+osl_malloc(osl_t *osh, uint size)
+{
+ void *addr;
+
+ /* only ASSERT if osh is defined */
+ if (osh)
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+
+ if ((addr = kmalloc(size, GFP_ATOMIC)) == NULL) {
+ if (osh)
+ osh->failed++;
+ return (NULL);
+ }
+ if (osh)
+ osh->malloced += size;
+
+ return (addr);
+}
+
+void
+osl_mfree(osl_t *osh, void *addr, uint size)
+{
+ if (osh) {
+ ASSERT(osh->magic == OS_HANDLE_MAGIC);
+ osh->malloced -= size;
+ }
+ kfree(addr);
+}
+
+uint
+osl_malloced(osl_t *osh)
+{
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+ return (osh->malloced);
+}
+
+uint osl_malloc_failed(osl_t *osh)
+{
+ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC)));
+ return (osh->failed);
+}
+
+#undef osl_delay
+void
+osl_delay(uint usec)
+{
+ OSL_DELAY(usec);
+}
+
+/* Clone a packet.
+ * The pkttag contents are NOT cloned.
+ */
+void *
+osl_pktdup(osl_t *osh, void *skb)
+{
+ void * p;
+
+ if ((p = skb_clone((struct sk_buff*)skb, GFP_ATOMIC)) == NULL)
+ return NULL;
+
+ /* skb_clone copies skb->cb.. we don't want that */
+ if (osh->pub.pkttag)
+ bzero((void*)((struct sk_buff *)p)->cb, OSL_PKTTAG_SZ);
+
+ /* Increment the packet counter */
+ osh->pub.pktalloced++;
+ return (p);
+}
+
+uint
+osl_pktalloced(osl_t *osh)
+{
+ return (osh->pub.pktalloced);
+}
+
diff --git a/package/broadcom-wl/src/driver/linux_osl.h b/package/broadcom-wl/src/driver/linux_osl.h
new file mode 100644
index 000000000..d9c5533b8
--- /dev/null
+++ b/package/broadcom-wl/src/driver/linux_osl.h
@@ -0,0 +1,171 @@
+/*
+ * Linux OS Independent Layer
+ *
+ * Copyright 2006, Broadcom Corporation
+ * All Rights Reserved.
+ *
+ * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
+ * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
+ * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
+ *
+ * $Id: linux_osl.h,v 1.1.1.13 2006/04/08 06:13:39 honor Exp $
+ */
+
+#ifndef _linux_osl_h_
+#define _linux_osl_h_
+
+#include <typedefs.h>
+#include <linuxver.h>
+#include <osl.h>
+
+#define OSL_PKTTAG_SZ 32 /* Size of PktTag */
+
+/* osl handle type forward declaration */
+typedef struct osl_dmainfo osldma_t;
+
+/* OSL initialization */
+extern osl_t *osl_attach(void *pdev, bool pkttag);
+extern void osl_detach(osl_t *osh);
+
+/* host/bus architecture-specific byte swap */
+#define BUS_SWAP32(v) (v)
+#define MALLOC_FAILED(osh) osl_malloc_failed((osh))
+
+extern void *osl_malloc(osl_t *osh, uint size);
+extern void osl_mfree(osl_t *osh, void *addr, uint size);
+extern uint osl_malloced(osl_t *osh);
+extern uint osl_malloc_failed(osl_t *osh);
+
+/* API for DMA addressing capability */
+#define DMA_MAP(osh, va, size, direction, p) \
+ osl_dma_map((osh), (va), (size), (direction))
+#define DMA_UNMAP(osh, pa, size, direction, p) \
+ osl_dma_unmap((osh), (pa), (size), (direction))
+static inline uint
+osl_dma_map(void *osh, void *va, uint size, int direction)
+{
+ int dir;
+ struct pci_dev *dev;
+
+ dev = (osh == NULL ? NULL : ((osl_t *)osh)->pdev);
+ dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
+ return (pci_map_single(dev, va, size, dir));
+}
+
+static inline void
+osl_dma_unmap(void *osh, uint pa, uint size, int direction)
+{
+ int dir;
+ struct pci_dev *dev;
+
+ dev = (osh == NULL ? NULL : ((osl_t *)osh)->pdev);
+ dir = (direction == DMA_TX)? PCI_DMA_TODEVICE: PCI_DMA_FROMDEVICE;
+ pci_unmap_single(dev, (uint32)pa, size, dir);
+}
+
+#define OSL_DMADDRWIDTH(osh, addrwidth) do {} while (0)
+#define DMA_CONSISTENT_ALIGN PAGE_SIZE
+#define DMA_ALLOC_CONSISTENT(osh, size, pap, dmah) \
+ osl_dma_alloc_consistent((osh), (size), (pap))
+#define DMA_FREE_CONSISTENT(osh, va, size, pa, dmah) \
+ osl_dma_free_consistent((osh), (void*)(va), (size), (pa))
+static inline void*
+osl_dma_alloc_consistent(osl_t *osh, uint size, ulong *pap)
+{
+ return (pci_alloc_consistent(osh->pdev, size, (dma_addr_t*)pap));
+}
+
+static inline void
+osl_dma_free_consistent(osl_t *osh, void *va, uint size, ulong pa)
+{
+ pci_free_consistent(osh->pdev, size, va, (dma_addr_t)pa);
+}
+
+
+/* register access macros */
+#if defined(BCMJTAG)
+#include <bcmjtag.h>
+#define R_REG(osh, r) bcmjtag_read(NULL, (uint32)(r), sizeof(*(r)))
+#define W_REG(osh, r, v) bcmjtag_write(NULL, (uint32)(r), (uint32)(v), sizeof(*(r)))
+#endif /* defined(BCMSDIO) */
+
+/* packet primitives */
+#define PKTGET(osh, len, send) osl_pktget((osh), (len), (send))
+#define PKTFREE(osh, skb, send) osl_pktfree((osh), (skb), (send))
+#define PKTDATA(osh, skb) (((struct sk_buff*)(skb))->data)
+#define PKTLEN(osh, skb) (((struct sk_buff*)(skb))->len)
+#define PKTHEADROOM(osh, skb) (PKTDATA(osh, skb)-(((struct sk_buff*)(skb))->head))
+#define PKTTAILROOM(osh, skb) ((((struct sk_buff*)(skb))->end)-(((struct sk_buff*)(skb))->tail))
+#define PKTNEXT(osh, skb) (((struct sk_buff*)(skb))->next)
+#define PKTSETNEXT(osh, skb, x) (((struct sk_buff*)(skb))->next = (struct sk_buff*)(x))
+#define PKTSETLEN(osh, skb, len) __skb_trim((struct sk_buff*)(skb), (len))
+#define PKTPUSH(osh, skb, bytes) skb_push((struct sk_buff*)(skb), (bytes))
+#define PKTPULL(osh, skb, bytes) skb_pull((struct sk_buff*)(skb), (bytes))
+#define PKTDUP(osh, skb) osl_pktdup((osh), (skb))
+#define PKTTAG(skb) ((void*)(((struct sk_buff*)(skb))->cb))
+#define PKTALLOCED(osh) osl_pktalloced((osh))
+#define PKTLIST_DUMP(osh, buf)
+
+/* Convert a native(OS) packet to driver packet.
+ * In the process, native packet is destroyed, there is no copying
+ * Also, a packettag is zeroed out
+ */
+static INLINE void *
+osl_pkt_frmnative(osl_pubinfo_t*osh, struct sk_buff *skb)
+{
+ struct sk_buff *nskb;
+
+ if (osh->pkttag)
+ bzero((void*)skb->cb, OSL_PKTTAG_SZ);
+
+ /* Increment the packet counter */
+ for (nskb = skb; nskb; nskb = nskb->next) {
+ osh->pktalloced++;
+ }
+
+ return (void *)skb;
+}
+#define PKTFRMNATIVE(osh, skb) osl_pkt_frmnative(((osl_pubinfo_t*)osh), \
+ (struct sk_buff*)(skb))
+
+/* Convert a driver packet to native(OS) packet
+ * In the process, packettag is zeroed out before sending up
+ * IP code depends on skb->cb to be setup correctly with various options
+ * In our case, that means it should be 0
+ */
+static INLINE struct sk_buff *
+osl_pkt_tonative(osl_pubinfo_t*osh, void *pkt)
+{
+ struct sk_buff *nskb;
+
+ if (osh->pkttag)
+ bzero(((struct sk_buff*)pkt)->cb, OSL_PKTTAG_SZ);
+
+ /* Decrement the packet counter */
+ for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) {
+ osh->pktalloced--;
+ }
+
+ return (struct sk_buff *)pkt;
+}
+#define PKTTONATIVE(osh, pkt) osl_pkt_tonative((osl_pubinfo_t*)(osh), (pkt))
+
+#define PKTLINK(skb) (((struct sk_buff*)(skb))->prev)
+#define PKTSETLINK(skb, x) (((struct sk_buff*)(skb))->prev = (struct sk_buff*)(x))
+#define PKTPRIO(skb) (((struct sk_buff*)(skb))->priority)
+#define PKTSETPRIO(skb, x) (((struct sk_buff*)(skb))->priority = (x))
+#define PKTSHARED(skb) (((struct sk_buff*)(skb))->cloned)
+
+extern void *osl_pktget(osl_t *osh, uint len, bool send);
+extern void osl_pktfree(osl_t *osh, void *skb, bool send);
+extern void *osl_pktdup(osl_t *osh, void *skb);
+extern uint osl_pktalloced(osl_t *osh);
+
+#define OSL_ERROR(bcmerror) osl_error(bcmerror)
+extern int osl_error(int bcmerror);
+
+/* the largest reasonable packet buffer driver uses for ethernet MTU in bytes */
+#define PKTBUFSZ 2048 /* largest reasonable packet buffer, driver uses for ethernet MTU */
+
+#endif /* _linux_osl_h_ */
diff --git a/package/broadcom-wl/src/driver/patchtable.pl b/package/broadcom-wl/src/driver/patchtable.pl
new file mode 100644
index 000000000..699973512
--- /dev/null
+++ b/package/broadcom-wl/src/driver/patchtable.pl
@@ -0,0 +1,61 @@
+#!/usr/bin/perl
+#
+# Copyright (C) 2006 OpenWrt.org
+# Copyright (C) 2006 Felix Fietkau
+#
+# This is free software, licensed under the GNU General Public License v2.
+# See /LICENSE for more information.
+#
+use strict;
+
+my $TABLE = pack("V", 0xbadc0ded);
+my $TABLE_SIZE = 512;
+my $SLT1 = "\x01\x00\x00\x00";
+my $SLT2 = "\x02\x00\x00\x00";
+my $ACKW = "\x03\x00\x00\x00";
+my $PTABLE_END = "\xff\xff\xff\xff";
+
+my $addr = "";
+my $opcode = "";
+my $function = "";
+
+sub add_entry {
+ my $key = shift;
+ my $value = shift;
+ my $default = shift;
+
+ $TABLE .= $key;
+ $TABLE .= pack("V", $value);
+ $TABLE .= pack("V", $default);
+}
+
+while (<>) {
+ $addr = $opcode = "";
+ /^\w{8}\s*<(.*)>:$/ and $function = $1;
+ /^\s*(\w+):\s*(\w{8})\s*/ and do {
+ $addr = $1;
+ $opcode = $2;
+ };
+
+ ($function eq 'wlc_update_slot_timing') and do {
+ # li a2,9 -- short slot time
+ ($opcode eq '24060009') and add_entry($SLT1, hex($addr), hex($opcode));
+ # li v0,519 -- 510 + short slot time
+ ($opcode eq '24020207') and add_entry($SLT2, hex($addr), hex($opcode));
+
+ # li a2,20 -- long slot time
+ ($opcode eq '24060014') and add_entry($SLT1, hex($addr), hex($opcode));
+ # li v0,530 -- 510 + long slot time
+ ($opcode eq '24020212') and add_entry($SLT2, hex($addr), hex($opcode));
+ };
+ ($function eq 'wlc_d11hdrs') and do {
+ # ori s6,s6,0x1 -- ack flag (new)
+ ($opcode eq '36d60001') and add_entry($ACKW, hex($addr), hex($opcode));
+ # ori s3,s3,0x1 -- ack flag (old)
+ ($opcode eq '36730001') and add_entry($ACKW, hex($addr), hex($opcode));
+ }
+}
+
+$TABLE .= $PTABLE_END;
+$TABLE .= ("\x00" x ($TABLE_SIZE - length($TABLE)));
+print $TABLE;
diff --git a/package/broadcom-wl/src/driver/pktq.h b/package/broadcom-wl/src/driver/pktq.h
new file mode 100644
index 000000000..7fe21815e
--- /dev/null
+++ b/package/broadcom-wl/src/driver/pktq.h
@@ -0,0 +1,97 @@
+/*
+ * Misc useful os-independent macros and functions.
+ *
+ * Copyright 2006, Broadcom Corporation
+ * All Rights Reserved.
+ *
+ * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
+ * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
+ * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
+ * $Id: bcmutils.h,v 1.1.1.16 2006/04/08 06:13:39 honor Exp $
+ */
+
+#ifndef _pktq_h_
+#define _pktq_h_
+#include <osl.h>
+
+/* osl multi-precedence packet queue */
+
+#define PKTQ_LEN_DEFAULT 128 /* Max 128 packets */
+#define PKTQ_MAX_PREC 16 /* Maximum precedence levels */
+
+struct pktq {
+ struct pktq_prec {
+ void *head; /* first packet to dequeue */
+ void *tail; /* last packet to dequeue */
+ uint16 len; /* number of queued packets */
+ uint16 max; /* maximum number of queued packets */
+ } q[PKTQ_MAX_PREC];
+ uint16 num_prec; /* number of precedences in use */
+ uint16 hi_prec; /* rapid dequeue hint (>= highest non-empty prec) */
+ uint16 max; /* total max packets */
+ uint16 len; /* total number of packets */
+};
+
+#define PKTQ_PREC_ITER(pq, prec) for (prec = (pq)->num_prec - 1; prec >= 0; prec--)
+
+/* forward definition of ether_addr structure used by some function prototypes */
+
+struct ether_addr;
+
+/* operations on a specific precedence in packet queue */
+
+#define pktq_psetmax(pq, prec, _max) ((pq)->q[prec].max = (_max))
+#define pktq_plen(pq, prec) ((pq)->q[prec].len)
+#define pktq_pavail(pq, prec) ((pq)->q[prec].max - (pq)->q[prec].len)
+#define pktq_pfull(pq, prec) ((pq)->q[prec].len >= (pq)->q[prec].max)
+#define pktq_pempty(pq, prec) ((pq)->q[prec].len == 0)
+
+#define pktq_ppeek(pq, prec) ((pq)->q[prec].head)
+#define pktq_ppeek_tail(pq, prec) ((pq)->q[prec].tail)
+
+extern void *pktq_penq(struct pktq *pq, int prec, void *p);
+extern void *pktq_penq_head(struct pktq *pq, int prec, void *p);
+extern void *pktq_pdeq(struct pktq *pq, int prec);
+extern void *pktq_pdeq_tail(struct pktq *pq, int prec);
+/* Empty the queue at particular precedence level */
+extern void pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir);
+/* Remove a specified packet from its queue */
+extern bool pktq_pdel(struct pktq *pq, void *p, int prec);
+
+/* operations on a set of precedences in packet queue */
+
+extern int pktq_mlen(struct pktq *pq, uint prec_bmp);
+extern void *pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out);
+
+/* operations on packet queue as a whole */
+
+#define pktq_len(pq) ((int)(pq)->len)
+#define pktq_max(pq) ((int)(pq)->max)
+#define pktq_avail(pq) ((int)((pq)->max - (pq)->len))
+#define pktq_full(pq) ((pq)->len >= (pq)->max)
+#define pktq_empty(pq) ((pq)->len == 0)
+
+/* operations for single precedence queues */
+#define pktenq(pq, p) pktq_penq((pq), 0, (p))
+#define pktenq_head(pq, p) pktq_penq_head((pq), 0, (p))
+#define pktdeq(pq) pktq_pdeq((pq), 0)
+#define pktdeq_tail(pq) pktq_pdeq_tail((pq), 0)
+
+extern void pktq_init(struct pktq *pq, int num_prec, int max_len);
+/* prec_out may be NULL if caller is not interested in return value */
+extern void *pktq_deq(struct pktq *pq, int *prec_out);
+extern void *pktq_deq_tail(struct pktq *pq, int *prec_out);
+extern void *pktq_peek(struct pktq *pq, int *prec_out);
+extern void *pktq_peek_tail(struct pktq *pq, int *prec_out);
+extern void pktq_flush(osl_t *osh, struct pktq *pq, bool dir); /* Empty the entire queue */
+
+/* externs */
+/* packet */
+extern uint pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf);
+extern uint pkttotlen(osl_t *osh, void *p);
+extern void *pktlast(osl_t *osh, void *p);
+
+extern void pktsetprio(void *pkt, bool update_vtag);
+
+#endif /* _pktq_h_ */
diff --git a/package/broadcom-wl/src/driver/sbhnddma.h b/package/broadcom-wl/src/driver/sbhnddma.h
new file mode 100644
index 000000000..a26db7395
--- /dev/null
+++ b/package/broadcom-wl/src/driver/sbhnddma.h
@@ -0,0 +1,284 @@
+/*
+ * Generic Broadcom Home Networking Division (HND) DMA engine HW interface
+ * This supports the following chips: BCM42xx, 44xx, 47xx .
+ *
+ * Copyright 2006, Broadcom Corporation
+ * All Rights Reserved.
+ *
+ * THIS SOFTWARE IS OFFERED "AS IS", AND BROADCOM GRANTS NO WARRANTIES OF ANY
+ * KIND, EXPRESS OR IMPLIED, BY STATUTE, COMMUNICATION OR OTHERWISE. BROADCOM
+ * SPECIFICALLY DISCLAIMS ANY IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A SPECIFIC PURPOSE OR NONINFRINGEMENT CONCERNING THIS SOFTWARE.
+ *
+ * $Id: sbhnddma.h,v 1.1.1.2 2006/02/27 03:43:16 honor Exp $
+ */
+
+#ifndef _sbhnddma_h_
+#define _sbhnddma_h_
+
+/* DMA structure:
+ * support two DMA engines: 32 bits address or 64 bit addressing
+ * basic DMA register set is per channel(transmit or receive)
+ * a pair of channels is defined for convenience
+ */
+
+
+/* 32 bits addressing */
+
+/* dma registers per channel(xmt or rcv) */
+typedef volatile struct {
+ uint32 control; /* enable, et al */
+ uint32 addr; /* descriptor ring base address (4K aligned) */
+ uint32 ptr; /* last descriptor posted to chip */
+ uint32 status; /* current active descriptor, et al */
+} dma32regs_t;
+
+typedef volatile struct {
+ dma32regs_t xmt; /* dma tx channel */
+ dma32regs_t rcv; /* dma rx channel */
+} dma32regp_t;
+
+typedef volatile struct { /* diag access */
+ uint32 fifoaddr; /* diag address */
+ uint32 fifodatalow; /* low 32bits of data */
+ uint32 fifodatahigh; /* high 32bits of data */
+ uint32 pad; /* reserved */
+} dma32diag_t;
+
+/*
+ * DMA Descriptor
+ * Descriptors are only read by the hardware, never written back.
+ */
+typedef volatile struct {
+ uint32 ctrl; /* misc control bits & bufcount */
+ uint32 addr; /* data buffer address */
+} dma32dd_t;
+
+/*
+ * Each descriptor ring must be 4096byte aligned, and fit within a single 4096byte page.
+ */
+#define D32MAXRINGSZ 4096
+#define D32RINGALIGN 4096
+#define D32MAXDD (D32MAXRINGSZ / sizeof (dma32dd_t))
+
+/* transmit channel control */
+#define XC_XE ((uint32)1 << 0) /* transmit enable */
+#define XC_SE ((uint32)1 << 1) /* transmit suspend request */
+#define XC_LE ((uint32)1 << 2) /* loopback enable */
+#define XC_FL ((uint32)1 << 4) /* flush request */
+#define XC_AE ((uint32)3 << 16) /* address extension bits */
+#define XC_AE_SHIFT 16
+
+/* transmit descriptor table pointer */
+#define XP_LD_MASK 0xfff /* last valid descriptor */
+
+/* transmit channel status */
+#define XS_CD_MASK 0x0fff /* current descriptor pointer */
+#define XS_XS_MASK 0xf000 /* transmit state */
+#define XS_XS_SHIFT 12
+#define XS_XS_DISABLED 0x0000 /* disabled */
+#define XS_XS_ACTIVE 0x1000 /* active */
+#define XS_XS_IDLE 0x2000 /* idle wait */
+#define XS_XS_STOPPED 0x3000 /* stopped */
+#define XS_XS_SUSP 0x4000 /* suspend pending */
+#define XS_XE_MASK 0xf0000 /* transmit errors */
+#define XS_XE_SHIFT 16
+#define XS_XE_NOERR 0x00000 /* no error */
+#define XS_XE_DPE 0x10000 /* descriptor protocol error */
+#define XS_XE_DFU 0x20000 /* data fifo underrun */
+#define XS_XE_BEBR 0x30000 /* bus error on buffer read */
+#define XS_XE_BEDA 0x40000 /* bus error on descriptor access */
+#define XS_AD_MASK 0xfff00000 /* active descriptor */
+#define XS_AD_SHIFT 20
+
+/* receive channel control */
+#define RC_RE ((uint32)1 << 0) /* receive enable */
+#define RC_RO_MASK 0xfe /* receive frame offset */
+#define RC_RO_SHIFT 1
+#define RC_FM ((uint32)1 << 8) /* direct fifo receive (pio) mode */
+#define RC_AE ((uint32)3 << 16) /* address extension bits */
+#define RC_AE_SHIFT 16
+
+/* receive descriptor table pointer */
+#define RP_LD_MASK 0xfff /* last valid descriptor */
+
+/* receive channel status */
+#define RS_CD_MASK 0x0fff /* current descriptor pointer */
+#define RS_RS_MASK 0xf000 /* receive state */
+#define RS_RS_SHIFT 12
+#define RS_RS_DISABLED 0x0000 /* disabled */
+#define RS_RS_ACTIVE 0x1000 /* active */
+#define RS_RS_IDLE 0x2000 /* idle wait */
+#define RS_RS_STOPPED 0x3000 /* reserved */
+#define RS_RE_MASK 0xf0000 /* receive errors */
+#define RS_RE_SHIFT 16
+#define RS_RE_NOERR 0x00000 /* no error */
+#define RS_RE_DPE 0x10000 /* descriptor protocol error */
+#define RS_RE_DFO 0x20000 /* data fifo overflow */
+#define RS_RE_BEBW 0x30000 /* bus error on buffer write */
+#define RS_RE_BEDA 0x40000 /* bus error on descriptor access */
+#define RS_AD_MASK 0xfff00000 /* active descriptor */
+#define RS_AD_SHIFT 20
+
+/* fifoaddr */
+#define FA_OFF_MASK 0xffff /* offset */
+#define FA_SEL_MASK 0xf0000 /* select */
+#define FA_SEL_SHIFT 16
+#define FA_SEL_XDD 0x00000 /* transmit dma data */
+#define FA_SEL_XDP 0x10000 /* transmit dma pointers */
+#define FA_SEL_RDD 0x40000 /* receive dma data */
+#define FA_SEL_RDP 0x50000 /* receive dma pointers */
+#define FA_SEL_XFD 0x80000 /* transmit fifo data */
+#define FA_SEL_XFP 0x90000 /* transmit fifo pointers */
+#define FA_SEL_RFD 0xc0000 /* receive fifo data */
+#define FA_SEL_RFP 0xd0000 /* receive fifo pointers */
+#define FA_SEL_RSD 0xe0000 /* receive frame status data */
+#define FA_SEL_RSP 0xf0000 /* receive frame status pointers */
+
+/* descriptor control flags */
+#define CTRL_BC_MASK 0x1fff /* buffer byte count */
+#define CTRL_AE ((uint32)3 << 16) /* address extension bits */
+#define CTRL_AE_SHIFT 16
+#define CTRL_EOT ((uint32)1 << 28) /* end of descriptor table */
+#define CTRL_IOC ((uint32)1 << 29) /* interrupt on completion */
+#define CTRL_EOF ((uint32)1 << 30) /* end of frame */
+#define CTRL_SOF ((uint32)1 << 31) /* start of frame */
+
+/* control flags in the range [27:20] are core-specific and not defined here */
+#define CTRL_CORE_MASK 0x0ff00000
+
+/* 64 bits addressing */
+
+/* dma registers per channel(xmt or rcv) */
+typedef volatile struct {
+ uint32 control; /* enable, et al */
+ uint32 ptr; /* last descriptor posted to chip */
+ uint32 addrlow; /* descriptor ring base address low 32-bits (8K aligned) */
+ uint32 addrhigh; /* descriptor ring base address bits 63:32 (8K aligned) */
+ uint32 status0; /* current descriptor, xmt state */
+ uint32 status1; /* active descriptor, xmt error */
+} dma64regs_t;
+
+typedef volatile struct {
+ dma64regs_t tx; /* dma64 tx channel */
+ dma64regs_t rx; /* dma64 rx channel */
+} dma64regp_t;
+
+typedef volatile struct { /* diag access */
+ uint32 fifoaddr; /* diag address */
+ uint32 fifodatalow; /* low 32bits of data */
+ uint32 fifodatahigh; /* high 32bits of data */
+ uint32 pad; /* reserved */
+} dma64diag_t;
+
+/*
+ * DMA Descriptor
+ * Descriptors are only read by the hardware, never written back.
+ */
+typedef volatile struct {
+ uint32 ctrl1; /* misc control bits & bufcount */
+ uint32 ctrl2; /* buffer count and address extension */
+ uint32 addrlow; /* memory address of the date buffer, bits 31:0 */
+ uint32 addrhigh; /* memory address of the date buffer, bits 63:32 */
+} dma64dd_t;
+
+/*
+ * Each descriptor ring must be 8kB aligned, and fit within a contiguous 8kB physical addresss.
+ */
+#define D64MAXRINGSZ 8192
+#define D64RINGALIGN 8192
+#define D64MAXDD (D64MAXRINGSZ / sizeof (dma64dd_t))
+
+/* transmit channel control */
+#define D64_XC_XE 0x00000001 /* transmit enable */
+#define D64_XC_SE 0x00000002 /* transmit suspend request */
+#define D64_XC_LE 0x00000004 /* loopback enable */
+#define D64_XC_FL 0x00000010 /* flush request */
+#define D64_XC_AE 0x00030000 /* address extension bits */
+#define D64_XC_AE_SHIFT 16
+
+/* transmit descriptor table pointer */
+#define D64_XP_LD_MASK 0x00000fff /* last valid descriptor */
+
+/* transmit channel status */
+#define D64_XS0_CD_MASK 0x00001fff /* current descriptor pointer */
+#define D64_XS0_XS_MASK 0xf0000000 /* transmit state */
+#define D64_XS0_XS_SHIFT 28
+#define D64_XS0_XS_DISABLED 0x00000000 /* disabled */
+#define D64_XS0_XS_ACTIVE 0x10000000 /* active */
+#define D64_XS0_XS_IDLE 0x20000000 /* idle wait */
+#define D64_XS0_XS_STOPPED 0x30000000 /* stopped */
+#define D64_XS0_XS_SUSP 0x40000000 /* suspend pending */
+
+#define D64_XS1_AD_MASK 0x0001ffff /* active descriptor */
+#define D64_XS1_XE_MASK 0xf0000000 /* transmit errors */
+#define D64_XS1_XE_SHIFT 28
+#define D64_XS1_XE_NOERR 0x00000000 /* no error */
+#define D64_XS1_XE_DPE 0x10000000 /* descriptor protocol error */
+#define D64_XS1_XE_DFU 0x20000000 /* data fifo underrun */
+#define D64_XS1_XE_DTE 0x30000000 /* data transfer error */
+#define D64_XS1_XE_DESRE 0x40000000 /* descriptor read error */
+#define D64_XS1_XE_COREE 0x50000000 /* core error */
+
+/* receive channel control */
+#define D64_RC_RE 0x00000001 /* receive enable */
+#define D64_RC_RO_MASK 0x000000fe /* receive frame offset */
+#define D64_RC_RO_SHIFT 1
+#define D64_RC_FM 0x00000100 /* direct fifo receive (pio) mode */
+#define D64_RC_AE 0x00030000 /* address extension bits */
+#define D64_RC_AE_SHIFT 16
+
+/* receive descriptor table pointer */
+#define D64_RP_LD_MASK 0x00000fff /* last valid descriptor */
+
+/* receive channel status */
+#define D64_RS0_CD_MASK 0x00001fff /* current descriptor pointer */
+#define D64_RS0_RS_MASK 0xf0000000 /* receive state */
+#define D64_RS0_RS_SHIFT 28
+#define D64_RS0_RS_DISABLED 0x00000000 /* disabled */
+#define D64_RS0_RS_ACTIVE 0x10000000 /* active */
+#define D64_RS0_RS_IDLE 0x20000000 /* idle wait */
+#define D64_RS0_RS_STOPPED 0x30000000 /* stopped */
+#define D64_RS0_RS_SUSP 0x40000000 /* suspend pending */
+
+#define D64_RS1_AD_MASK 0x0001ffff /* active descriptor */
+#define D64_RS1_RE_MASK 0xf0000000 /* receive errors */
+#define D64_RS1_RE_SHIFT 28
+#define D64_RS1_RE_NOERR 0x00000000 /* no error */
+#define D64_RS1_RE_DPO 0x10000000 /* descriptor protocol error */
+#define D64_RS1_RE_DFU 0x20000000 /* data fifo overflow */
+#define D64_RS1_RE_DTE 0x30000000 /* data transfer error */
+#define D64_RS1_RE_DESRE 0x40000000 /* descriptor read error */
+#define D64_RS1_RE_COREE 0x50000000 /* core error */
+
+/* fifoaddr */
+#define D64_FA_OFF_MASK 0xffff /* offset */
+#define D64_FA_SEL_MASK 0xf0000 /* select */
+#define D64_FA_SEL_SHIFT 16
+#define D64_FA_SEL_XDD 0x00000 /* transmit dma data */
+#define D64_FA_SEL_XDP 0x10000 /* transmit dma pointers */
+#define D64_FA_SEL_RDD 0x40000 /* receive dma data */
+#define D64_FA_SEL_RDP 0x50000 /* receive dma pointers */
+#define D64_FA_SEL_XFD 0x80000 /* transmit fifo data */
+#define D64_FA_SEL_XFP 0x90000 /* transmit fifo pointers */
+#define D64_FA_SEL_RFD 0xc0000 /* receive fifo data */
+#define D64_FA_SEL_RFP 0xd0000 /* receive fifo pointers */
+#define D64_FA_SEL_RSD 0xe0000 /* receive frame status data */
+#define D64_FA_SEL_RSP 0xf0000 /* receive frame status pointers */
+
+/* descriptor control flags 1 */
+#define D64_CTRL1_EOT ((uint32)1 << 28) /* end of descriptor table */
+#define D64_CTRL1_IOC ((uint32)1 << 29) /* interrupt on completion */
+#define D64_CTRL1_EOF ((uint32)1 << 30) /* end of frame */
+#define D64_CTRL1_SOF ((uint32)1 << 31) /* start of frame */
+
+/* descriptor control flags 2 */
+#define D64_CTRL2_BC_MASK 0x00007fff /* buffer byte count mask */
+#define D64_CTRL2_AE 0x00030000 /* address extension bits */
+#define D64_CTRL2_AE_SHIFT 16
+
+/* control flags in the range [27:20] are core-specific and not defined here */
+#define D64_CTRL_CORE_MASK 0x0ff00000
+
+
+#endif /* _sbhnddma_h_ */