diff options
Diffstat (limited to 'target/linux/adm5120')
5 files changed, 604 insertions, 767 deletions
diff --git a/target/linux/adm5120/files/drivers/usb/host/adm5120-dbg.c b/target/linux/adm5120/files/drivers/usb/host/adm5120-dbg.c index 817d97548..bb83450eb 100644 --- a/target/linux/adm5120/files/drivers/usb/host/adm5120-dbg.c +++ b/target/linux/adm5120/files/drivers/usb/host/adm5120-dbg.c @@ -11,55 +11,108 @@ #ifdef DEBUG -#define edstring(ed_type) ({ char *temp; \ - switch (ed_type) { \ - case PIPE_CONTROL: temp = "ctrl"; break; \ - case PIPE_BULK: temp = "bulk"; break; \ - case PIPE_INTERRUPT: temp = "intr"; break; \ - default: temp = "isoc"; break; \ - }; temp;}) -#define pipestring(pipe) edstring(usb_pipetype(pipe)) +static inline char *ed_typestring(int ed_type) +{ + switch (ed_type) { + case PIPE_CONTROL: + return "ctrl"; + case PIPE_BULK: + return "bulk"; + case PIPE_INTERRUPT: + return "intr"; + case PIPE_ISOCHRONOUS: + return "isoc"; + } + return "(bad ed_type)"; +} + +static inline char *ed_statestring(int state) +{ + switch (state) { + case ED_IDLE: + return "IDLE"; + case ED_UNLINK: + return "UNLINK"; + case ED_OPER: + return "OPER"; + case ED_NEW: + return "NEW"; + } + return "?STATE"; +} + +static inline char *pipestring(int pipe) +{ + return ed_typestring(usb_pipetype(pipe)); +} + +static inline char *td_pidstring(u32 info) +{ + switch (info & TD_DP) { + case TD_DP_SETUP: + return "SETUP"; + case TD_DP_IN: + return "IN"; + case TD_DP_OUT: + return "OUT"; + } + return "?PID"; +} + +static inline char *td_togglestring(u32 info) +{ + switch (info & TD_T) { + case TD_T_DATA0: + return "DATA0"; + case TD_T_DATA1: + return "DATA1"; + case TD_T_CARRY: + return "CARRY"; + } + return "?TOGGLE"; +} /* debug| print the main components of an URB * small: 0) header + data packets 1) just header */ static void __attribute__((unused)) -urb_print(struct urb * urb, char * str, int small) +urb_print(struct admhcd *ahcd, struct urb * urb, char * str, int small) { - unsigned int pipe= urb->pipe; + unsigned int pipe = urb->pipe; if (!urb->dev || !urb->dev->bus) { - dbg("%s URB: no dev", str); + admhc_dbg(ahcd, "%s URB: no dev", str); return; } #ifndef ADMHC_VERBOSE_DEBUG if (urb->status != 0) #endif - dbg("%s %p dev=%d ep=%d%s-%s flags=%x len=%d/%d stat=%d", - str, - urb, - usb_pipedevice (pipe), - usb_pipeendpoint (pipe), - usb_pipeout(pipe)? "out" : "in", - pipestring(pipe), - urb->transfer_flags, - urb->actual_length, - urb->transfer_buffer_length, - urb->status); + admhc_dbg(ahcd, "URB-%s %p dev=%d ep=%d%s-%s flags=%x len=%d/%d " + "stat=%d\n", + str, + urb, + usb_pipedevice(pipe), + usb_pipeendpoint(pipe), + usb_pipeout(pipe)? "out" : "in", + pipestring(pipe), + urb->transfer_flags, + urb->actual_length, + urb->transfer_buffer_length, + urb->status); #ifdef ADMHC_VERBOSE_DEBUG if (!small) { int i, len; if (usb_pipecontrol(pipe)) { - printk(KERN_DEBUG __FILE__ ": setup(8):"); + admhc_dbg(admhc, "setup(8): "); for (i = 0; i < 8 ; i++) printk (" %02x", ((__u8 *) urb->setup_packet) [i]); printk ("\n"); } if (urb->transfer_buffer_length > 0 && urb->transfer_buffer) { - printk(KERN_DEBUG __FILE__ ": data(%d/%d):", + admhc_dbg(admhc, "data(%d/%d): ", urb->actual_length, urb->transfer_buffer_length); len = usb_pipeout(pipe)? @@ -83,12 +136,8 @@ urb_print(struct urb * urb, char * str, int small) } while (0); -static void admhc_dump_intr_mask ( - struct admhcd *ahcd, - char *label, - u32 mask, - char **next, - unsigned *size) +static void admhc_dump_intr_mask(struct admhcd *ahcd, char *label, u32 mask, + char **next, unsigned *size) { admhc_dbg_sw(ahcd, next, size, "%s 0x%08x%s%s%s%s%s%s%s%s%s%s\n", label, @@ -106,12 +155,8 @@ static void admhc_dump_intr_mask ( ); } -static void maybe_print_eds ( - struct admhcd *ahcd, - char *label, - u32 value, - char **next, - unsigned *size) +static void maybe_print_eds(struct admhcd *ahcd, char *label, u32 value, + char **next, unsigned *size) { if (value) admhc_dbg_sw(ahcd, next, size, "%s %08x\n", label, value); @@ -129,7 +174,7 @@ static char *buss2string (int state) case ADMHC_BUSS_SUSPEND: return "suspend"; } - return "(bad state)"; + return "?state"; } static void @@ -152,7 +197,7 @@ admhc_dump_status(struct admhcd *ahcd, char **next, unsigned *size) admhc_dbg_sw(ahcd, next, size, "host_control 0x%08x BUSS=%s%s\n", temp, - buss2string (temp & ADMHC_HC_BUSS), + buss2string(temp & ADMHC_HC_BUSS), (temp & ADMHC_HC_DMAE) ? " DMAE" : "" ); @@ -245,59 +290,42 @@ static const char data1[] = "DATA1"; static void admhc_dump_td(const struct admhcd *ahcd, const char *label, const struct td *td) { - u32 tmp = hc32_to_cpup(ahcd, &td->hwINFO); + u32 tmp; admhc_dbg(ahcd, "%s td %p; urb %p index %d; hwNextTD %08x\n", label, td, td->urb, td->index, hc32_to_cpup(ahcd, &td->hwNextTD)); - if ((td->flags & TD_FLAG_ISO) == 0) { - const char *toggle, *pid; - - switch (tmp & TD_T) { - case TD_T_DATA0: toggle = data0; break; - case TD_T_DATA1: toggle = data1; break; - case TD_T_CARRY: toggle = "CARRY"; break; - default: toggle = "(bad toggle)"; break; - } - switch (tmp & TD_DP) { - case TD_DP_SETUP: pid = "SETUP"; break; - case TD_DP_IN: pid = "IN"; break; - case TD_DP_OUT: pid = "OUT"; break; - default: pid = "(bad pid)"; break; - } - admhc_dbg(ahcd, - " status %08x%s CC=%x EC=%d %s %s ISI=%x FN=%x\n", - tmp, - (tmp & TD_OWN) ? " OWN" : "", - TD_CC_GET(tmp), - TD_EC_GET(tmp), - toggle, - pid, - TD_ISI_GET(tmp), - TD_FN_GET(tmp)); - } else { -#if 0 /* TODO: remove */ - unsigned i; - admhc_dbg(ahcd, " info %08x CC=%x FC=%d DI=%d SF=%04x\n", tmp, - TD_CC_GET(tmp), - (tmp >> 24) & 0x07, - (tmp & TD_DI) >> 21, - tmp & 0x0000ffff); - admhc_dbg(ahcd, " bp0 %08x be %08x\n", - hc32_to_cpup (ahcd, &td->hwCBP) & ~0x0fff, - hc32_to_cpup (ahcd, &td->hwBE)); -#endif - } + tmp = hc32_to_cpup(ahcd, &td->hwINFO); + admhc_dbg(ahcd, " status %08x%s CC=%x EC=%d %s %s ISI=%x FN=%x\n", + tmp, + (tmp & TD_OWN) ? " OWN" : "", + TD_CC_GET(tmp), + TD_EC_GET(tmp), + td_togglestring(tmp), + td_pidstring(tmp), + TD_ISI_GET(tmp), + TD_FN_GET(tmp)); tmp = hc32_to_cpup(ahcd, &td->hwCBL); admhc_dbg(ahcd, " dbp %08x; cbl %08x; LEN=%d%s\n", - hc32_to_cpup (ahcd, &td->hwDBP), + hc32_to_cpup(ahcd, &td->hwDBP), tmp, TD_BL_GET(tmp), - (tmp & TD_IE) ? " IE" : "" - ); + (tmp & TD_IE) ? " IE" : ""); +} + +static void admhc_dump_up(const struct admhcd *ahcd, const char *label, + const struct urb_priv *up) +{ + int i; + + admhc_dbg(ahcd, "%s urb/%p:\n", label, up->urb); + for (i = 0; i < up->td_cnt; i++) { + struct td *td = up->td[i]; + admhc_dump_td(ahcd, " ->", td); + } } /* caller MUST own hcd spinlock if verbose is set! */ @@ -307,10 +335,10 @@ admhc_dump_ed(const struct admhcd *ahcd, const char *label, { u32 tmp = hc32_to_cpu(ahcd, ed->hwINFO); - admhc_dbg(ahcd, "%s ed %p state 0x%x type %s; next ed %08x\n", + admhc_dbg(ahcd, "%s ed %p %s type %s; next ed %08x\n", label, - ed, ed->state, edstring (ed->type), - hc32_to_cpup (ahcd, &ed->hwNextED)); + ed, ed_statestring(ed->state), ed_typestring(ed->type), + hc32_to_cpup(ahcd, &ed->hwNextED)); admhc_dbg(ahcd, " info %08x MAX=%d%s%s%s%s EP=%d DEV=%d\n", tmp, ED_MPS_GET(tmp), @@ -322,23 +350,22 @@ admhc_dump_ed(const struct admhcd *ahcd, const char *label, ED_FA_GET(tmp)); tmp = hc32_to_cpup(ahcd, &ed->hwHeadP); - admhc_dbg(ahcd, " tds: head %08x tail %08x %s%s%s\n", + admhc_dbg(ahcd, " tds: head %08x tail %08x %s%s\n", tmp & TD_MASK, - hc32_to_cpup (ahcd, &ed->hwTailP), + hc32_to_cpup(ahcd, &ed->hwTailP), (tmp & ED_C) ? data1 : data0, - (tmp & ED_H) ? " HALT" : "", - verbose ? " td list follows" : " (not listing)"); + (tmp & ED_H) ? " HALT" : ""); - if (verbose) { - struct list_head *tmp; - - /* use ed->td_list because HC concurrently modifies - * hwNextTD as it accumulates ed_donelist. - */ - list_for_each(tmp, &ed->td_list) { - struct td *td; - td = list_entry(tmp, struct td, td_list); - admhc_dump_td (ahcd, " ->", td); + if (ed->urb_active) + admhc_dump_up(ahcd, " active ", ed->urb_active); + + if ((verbose) && (!list_empty(&ed->urb_pending))) { + struct list_head *entry; + /* dump pending URBs */ + list_for_each(entry, &ed->urb_pending) { + struct urb_priv *up; + up = list_entry(entry, struct urb_priv, pending); + admhc_dump_up(ahcd, " pending ", up); } } } @@ -346,6 +373,8 @@ admhc_dump_ed(const struct admhcd *ahcd, const char *label, #else /* ifdef DEBUG */ static inline void urb_print(struct urb * urb, char * str, int small) {} +static inline void admhc_dump_up(const struct admhcd *ahcd, const char *label, + const struct urb_priv *up) {} static inline void admhc_dump_ed(const struct admhcd *ahcd, const char *label, const struct ed *ed, int verbose) {} static inline void admhc_dump(struct admhcd *ahcd, int verbose) {} @@ -364,6 +393,44 @@ static inline void remove_debug_files(struct admhcd *bus) { } #else static ssize_t +show_urb_priv(struct admhcd *ahcd, char *buf, size_t count, + struct urb_priv *up) +{ + unsigned temp, size = count; + int i; + + if (!up) + return 0; + + temp = scnprintf(buf, size,"\n\turb %p ", up->urb); + size -= temp; + buf += temp; + + for (i = 0; i< up->td_cnt; i++) { + struct td *td; + u32 dbp, cbl, info; + + td = up->td[i]; + info = hc32_to_cpup(ahcd, &td->hwINFO); + dbp = hc32_to_cpup(ahcd, &td->hwDBP); + cbl = hc32_to_cpup(ahcd, &td->hwCBL); + + temp = scnprintf(buf, size, + "\n\t\ttd %p %s %d %s%scc=%x (%08x,%08x)", + td, + td_pidstring(info), + TD_BL_GET(cbl), + (info & TD_OWN) ? "WORK " : "DONE ", + (cbl & TD_IE) ? "IE " : "", + TD_CC_GET(info), info, cbl); + size -= temp; + buf += temp; + } + + return count - size; +} + +static ssize_t show_list(struct admhcd *ahcd, char *buf, size_t count, struct ed *ed) { unsigned temp, size = count; @@ -371,23 +438,15 @@ show_list(struct admhcd *ahcd, char *buf, size_t count, struct ed *ed) if (!ed) return 0; -#if 0 - /* print first --> last */ - while (ed->ed_prev) - ed = ed->ed_prev; -#endif - /* dump a snapshot of the bulk or control schedule */ while (ed) { u32 info = hc32_to_cpu(ahcd, ed->hwINFO); u32 headp = hc32_to_cpu(ahcd, ed->hwHeadP); - struct list_head *entry; - struct td *td; temp = scnprintf(buf, size, "ed/%p %s %cs dev%d ep%d %s%smax %d %08x%s%s %s", ed, - edstring (ed->type), + ed_typestring (ed->type), (info & ED_SPEED_FULL) ? 'f' : 'l', info & ED_FA_MASK, (info >> ED_EN_SHIFT) & ED_EN_MASK, @@ -397,34 +456,36 @@ show_list(struct admhcd *ahcd, char *buf, size_t count, struct ed *ed) info, (info & ED_SKIP) ? " S" : "", (headp & ED_H) ? " H" : "", - (headp & ED_C) ? data1 : data0); + (headp & ED_C) ? "DATA1" : "DATA0"); size -= temp; buf += temp; - list_for_each(entry, &ed->td_list) { - u32 dbp, cbl; - - td = list_entry(entry, struct td, td_list); - info = hc32_to_cpup (ahcd, &td->hwINFO); - dbp = hc32_to_cpup (ahcd, &td->hwDBP); - cbl = hc32_to_cpup (ahcd, &td->hwCBL); - - temp = scnprintf(buf, size, - "\n\ttd %p %s %d %s%scc=%x urb %p (%08x,%08x)", - td, - ({ char *pid; - switch (info & TD_DP) { - case TD_DP_SETUP: pid = "setup"; break; - case TD_DP_IN: pid = "in"; break; - case TD_DP_OUT: pid = "out"; break; - default: pid = "(bad pid)"; break; - } pid;}), - TD_BL_GET(cbl), - (info & TD_OWN) ? "" : "DONE ", - (cbl & TD_IE) ? "IE " : "", - TD_CC_GET (info), td->urb, info, cbl); + if (ed->urb_active) { + temp = scnprintf(buf, size, "\nactive urb:"); size -= temp; buf += temp; + + temp = show_urb_priv(ahcd, buf, size, ed->urb_active); + size -= temp; + buf += temp; + } + + if (!list_empty(&ed->urb_pending)) { + struct list_head *entry; + + temp = scnprintf(buf, size, "\npending urbs:"); + size -= temp; + buf += temp; + + list_for_each(entry, &ed->urb_pending) { + struct urb_priv *up; + up = list_entry(entry, struct urb_priv, + pending); + + temp = show_urb_priv(ahcd, buf, size, up); + size -= temp; + buf += temp; + } } temp = scnprintf(buf, size, "\n"); @@ -433,6 +494,7 @@ show_list(struct admhcd *ahcd, char *buf, size_t count, struct ed *ed) ed = ed->ed_next; } + return count - size; } @@ -451,17 +513,7 @@ show_async(struct class_device *class_dev, char *buf) /* display control and bulk lists together, for simplicity */ spin_lock_irqsave(&ahcd->lock, flags); -#if 0 - temp = show_list (ahcd, buf, PAGE_SIZE, ahcd->ed_tails[ED_TAIL_CONTROL]); - temp += show_list (ahcd, buf + temp, PAGE_SIZE - temp, - ahcd->ed_tails[ED_TAIL_BULK]); -#else -#ifdef ED_TAIL_ARRAY - temp = show_list(ahcd, buf, PAGE_SIZE, ahcd->ed_head); -#else temp = show_list(ahcd, buf, PAGE_SIZE, ahcd->ed_head); -#endif -#endif spin_unlock_irqrestore(&ahcd->lock, flags); return temp; @@ -474,6 +526,7 @@ static CLASS_DEVICE_ATTR(async, S_IRUGO, show_async, NULL); static ssize_t show_periodic(struct class_device *class_dev, char *buf) { +#if 0 struct usb_bus *bus; struct usb_hcd *hcd; struct admhcd *ahcd; @@ -564,6 +617,9 @@ show_periodic(struct class_device *class_dev, char *buf) kfree (seen); return PAGE_SIZE - size; +#else + return 0; +#endif } static CLASS_DEVICE_ATTR(periodic, S_IRUGO, show_periodic, NULL); diff --git a/target/linux/adm5120/files/drivers/usb/host/adm5120-hcd.c b/target/linux/adm5120/files/drivers/usb/host/adm5120-hcd.c index 3365a1d0a..fae5aa64f 100644 --- a/target/linux/adm5120/files/drivers/usb/host/adm5120-hcd.c +++ b/target/linux/adm5120/files/drivers/usb/host/adm5120-hcd.c @@ -45,7 +45,7 @@ #include "../core/hcd.h" #include "../core/hub.h" -#define DRIVER_VERSION "v0.01" +#define DRIVER_VERSION "v0.02" #define DRIVER_AUTHOR "Gabor Juhos <juhosg at openwrt.org>" #define DRIVER_DESC "ADMtek USB 1.1 Host Controller Driver" @@ -90,12 +90,14 @@ static int admhc_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *ep, struct ed *ed; struct urb_priv *urb_priv; unsigned int pipe = urb->pipe; - int i, td_cnt = 0; + int td_cnt = 0; unsigned long flags; int retval = 0; #ifdef ADMHC_VERBOSE_DEBUG - urb_print(urb, "ENQ", usb_pipein(pipe)); + spin_lock_irqsave(&ahcd->lock, flags); + urb_print(ahcd, urb, "ENQEUE", usb_pipein(pipe)); + spin_unlock_irqrestore(&ahcd->lock, flags); #endif /* every endpoint has an ed, locate and maybe (re)initialize it */ @@ -112,10 +114,12 @@ static int admhc_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *ep, /* 1 TD for setup, 1 for ACK, plus ... */ td_cnt = 2; - /* FALLTHROUGH */ + if (urb->transfer_buffer_length) + td_cnt++; + break; case PIPE_BULK: /* one TD for every 4096 Bytes (can be upto 8K) */ - td_cnt += urb->transfer_buffer_length / TD_DATALEN_MAX; + td_cnt = urb->transfer_buffer_length / TD_DATALEN_MAX; /* ... and for any remaining bytes ... */ if ((urb->transfer_buffer_length % TD_DATALEN_MAX) != 0) td_cnt++; @@ -151,6 +155,7 @@ static int admhc_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *ep, return -ENOMEM; urb_priv->ed = ed; + urb_priv->urb = urb; spin_lock_irqsave(&ahcd->lock, flags); /* don't submit to a dead HC */ @@ -173,14 +178,8 @@ static int admhc_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *ep, goto fail; } - /* schedule the ed if needed */ - if (ed->state == ED_IDLE) { -#ifndef LATE_ED_SCHEDULE - retval = ed_schedule(ahcd, ed); - if (retval < 0) - goto fail0; -#endif - if (ed->type == PIPE_ISOCHRONOUS) { + if (ed->type == PIPE_ISOCHRONOUS) { + if (ed->state == ED_NEW) { u16 frame = admhc_frame_no(ahcd); /* delay a few frames before the first TD */ @@ -192,29 +191,26 @@ static int admhc_urb_enqueue(struct usb_hcd *hcd, struct usb_host_endpoint *ep, /* yes, only URB_ISO_ASAP is supported, and * urb->start_frame is never used as input. */ - } - } else if (ed->type == PIPE_ISOCHRONOUS) - urb->start_frame = ed->last_iso + ed->interval; + } else + urb->start_frame = ed->last_iso + ed->interval; + } - /* fill the TDs and link them to the ed; and - * enable that part of the schedule, if needed - * and update count of queued periodic urbs - */ urb->hcpriv = urb_priv; - td_submit_urb(ahcd, urb); + td_submit_urb(ahcd, urb_priv->urb); -#ifdef LATE_ED_SCHEDULE - if (ed->state == ED_IDLE) - retval = ed_schedule(ahcd, ed); -#endif + /* append it to the ED's queue */ + list_add_tail(&urb_priv->pending, &ed->urb_pending); - admhc_dump_ed(ahcd, "admhc_urb_enqueue", urb_priv->ed, 1); + /* schedule the ED */ + retval = ed_schedule(ahcd, ed); fail0: spin_unlock(&urb->lock); fail: - if (retval) + if (retval) { + urb_priv = urb->hcpriv; urb_priv_free(ahcd, urb_priv); + } spin_unlock_irqrestore(&ahcd->lock, flags); return retval; @@ -228,33 +224,44 @@ fail: */ static int admhc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb) { - struct admhcd *ahcd = hcd_to_admhcd(hcd); - unsigned long flags; + struct admhcd *ahcd = hcd_to_admhcd(hcd); + struct urb_priv *up; + unsigned long flags; + + up = urb->hcpriv; + if (!up) + return 0; + + spin_lock_irqsave(&ahcd->lock, flags); #ifdef ADMHC_VERBOSE_DEBUG - urb_print(urb, "DEQ", 1); + urb_print(ahcd, urb, "DEQEUE", 1); #endif - spin_lock_irqsave(&ahcd->lock, flags); if (HC_IS_RUNNING(hcd->state)) { - struct urb_priv *urb_priv; - /* Unless an IRQ completed the unlink while it was being * handed to us, flag it for unlink and giveback, and force * some upcoming INTR_SF to call finish_unlinks() */ - urb_priv = urb->hcpriv; - if (urb_priv) { - if (urb_priv->ed->state == ED_OPER) - start_ed_unlink(ahcd, urb_priv->ed); + if (up->ed->urb_active != up) { + list_del(&up->pending); + finish_urb(ahcd, urb); + } else { + ed_start_deschedule(ahcd, up->ed); } } else { /* * with HC dead, we won't respect hc queue pointers * any more ... just clean up every urb's memory. */ - if (urb->hcpriv) + if (up->ed->urb_active != up) { + list_del(&up->pending); finish_urb(ahcd, urb); + } else { + finish_urb(ahcd, urb); + up->ed->urb_active = NULL; + up->ed->state = ED_IDLE; + } } spin_unlock_irqrestore(&ahcd->lock, flags); @@ -266,9 +273,8 @@ static int admhc_urb_dequeue(struct usb_hcd *hcd, struct urb *urb) /* frees config/altsetting state for endpoints, * including ED memory, dummy TD, and bulk/intr data toggle */ - -static void -admhc_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep) +static void admhc_endpoint_disable(struct usb_hcd *hcd, + struct usb_host_endpoint *ep) { struct admhcd *ahcd = hcd_to_admhcd(hcd); unsigned long flags; @@ -283,7 +289,7 @@ admhc_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *ep) #ifdef ADMHC_VERBOSE_DEBUG spin_lock_irqsave(&ahcd->lock, flags); - admhc_dump_ed(ahcd, "ep_disable", ed, 1); + admhc_dump_ed(ahcd, "EP-DISABLE", ed, 1); spin_unlock_irqrestore(&ahcd->lock, flags); #endif @@ -292,8 +298,8 @@ rescan: if (!HC_IS_RUNNING(hcd->state)) { sanitize: - ed->state = ED_IDLE; - finish_unlinks(ahcd, 0); + ed->state = ED_UNLINK; + admhc_finish_unlinks(ahcd, 0); } switch (ed->state) { @@ -306,10 +312,11 @@ sanitize: spin_unlock_irqrestore(&ahcd->lock, flags); schedule_timeout_uninterruptible(1); goto rescan; - case ED_IDLE: /* fully unlinked */ - if (list_empty(&ed->td_list)) { - td_free (ahcd, ed->dummy); - ed_free (ahcd, ed); + case ED_IDLE: + case ED_NEW: /* fully unlinked */ + if (list_empty(&ed->urb_pending)) { + td_free(ahcd, ed->dummy); + ed_free(ahcd, ed); break; } /* else FALL THROUGH */ @@ -317,13 +324,16 @@ sanitize: /* caller was supposed to have unlinked any requests; * that's not our job. can't recover; must leak ed. */ - admhc_err(ahcd, "leak ed %p (#%02x) state %d%s\n", - ed, ep->desc.bEndpointAddress, ed->state, - list_empty(&ed->td_list) ? "" : " (has tds)"); - td_free(ahcd, ed->dummy); + admhc_err(ahcd, "leak ed %p (#%02x) %s act %p%s\n", + ed, ep->desc.bEndpointAddress, + ed_statestring(ed->state), + ed->urb_active, + list_empty(&ed->urb_pending) ? "" : " (has urbs)"); break; } + ep->hcpriv = NULL; + spin_unlock_irqrestore(&ahcd->lock, flags); return; } @@ -337,15 +347,8 @@ static int admhc_get_frame(struct usb_hcd *hcd) static void admhc_usb_reset(struct admhcd *ahcd) { -#if 0 - ahcd->hc_control = admhc_readl(ahcd, &ahcd->regs->control); - ahcd->hc_control &= OHCI_CTRL_RWC; - admhc_writel(ahcd, ahcd->hc_control, &ahcd->regs->control); -#else - /* FIXME */ ahcd->host_control = ADMHC_BUSS_RESET; admhc_writel(ahcd, ahcd->host_control ,&ahcd->regs->host_control); -#endif } /* admhc_shutdown forcibly disables IRQs and DMA, helping kexec and @@ -510,17 +513,6 @@ static int admhc_run(struct admhcd *ahcd) /* also: power/overcurrent flags in rhdesc */ } -#if 0 /* TODO: not applicable */ - /* Reset USB nearly "by the book". RemoteWakeupConnected was - * saved if boot firmware (BIOS/SMM/...) told us it's connected, - * or if bus glue did the same (e.g. for PCI add-in cards with - * PCI PM support). - */ - if ((ahcd->hc_control & OHCI_CTRL_RWC) != 0 - && !device_may_wakeup(hcd->self.controller)) - device_init_wakeup(hcd->self.controller, 1); -#endif - switch (ahcd->host_control & ADMHC_HC_BUSS) { case ADMHC_BUSS_OPER: temp = 0; @@ -658,6 +650,7 @@ static irqreturn_t admhc_irq(struct usb_hcd *hcd) ints &= admhc_readl(ahcd, ®s->int_enable); + spin_lock(&ahcd->lock); if (ints & ADMHC_INTR_FATI) { /* e.g. due to PCI Master/Target Abort */ admhc_disable(ahcd); @@ -666,6 +659,13 @@ static irqreturn_t admhc_irq(struct usb_hcd *hcd) admhc_usb_reset(ahcd); } + if (ints & ADMHC_INTR_BABI) { + admhc_intr_disable(ahcd, ADMHC_INTR_MIE); + admhc_err(ahcd, "Babble Detected\n"); + admhc_disable(ahcd); + admhc_usb_reset(ahcd); + } + if (ints & ADMHC_INTR_INSM) { admhc_vdbg(ahcd, "Root Hub Status Change\n"); ahcd->next_statechange = jiffies + STATECHANGE_DELAY; @@ -690,21 +690,17 @@ static irqreturn_t admhc_irq(struct usb_hcd *hcd) admhc_intr_ack(ahcd, ADMHC_INTR_RESI); hcd->poll_rh = 1; if (ahcd->autostop) { - spin_lock(&ahcd->lock); admhc_rh_resume(ahcd); - spin_unlock(&ahcd->lock); } else usb_hcd_resume_root_hub(hcd); } if (ints & ADMHC_INTR_TDC) { - admhc_vdbg(ahcd, "Transfer Descriptor Complete\n"); admhc_intr_ack(ahcd, ADMHC_INTR_TDC); if (HC_IS_RUNNING(hcd->state)) admhc_intr_disable(ahcd, ADMHC_INTR_TDC); - spin_lock(&ahcd->lock); + admhc_vdbg(ahcd, "Transfer Descriptor Complete\n"); admhc_td_complete(ahcd); - spin_unlock(&ahcd->lock); if (HC_IS_RUNNING(hcd->state)) admhc_intr_enable(ahcd, ADMHC_INTR_TDC); } @@ -714,49 +710,19 @@ static irqreturn_t admhc_irq(struct usb_hcd *hcd) admhc_vdbg(ahcd, "Schedule Overrun\n"); } - if (ints & ADMHC_INTR_BABI) { - admhc_intr_disable(ahcd, ADMHC_INTR_BABI); - admhc_intr_ack(ahcd, ADMHC_INTR_BABI); - admhc_err(ahcd, "Babble Detected\n"); - } - -#if 1 - spin_lock(&ahcd->lock); - if (ahcd->ed_rm_list) - finish_unlinks(ahcd, admhc_frame_no(ahcd)); - - if ((ints & ADMHC_INTR_SOFI) != 0 && !ahcd->ed_rm_list - && HC_IS_RUNNING(hcd->state)) - admhc_intr_disable(ahcd, ADMHC_INTR_SOFI); - spin_unlock(&ahcd->lock); -#else if (ints & ADMHC_INTR_SOFI) { - admhc_vdbg(ahcd, "Start Of Frame\n"); - spin_lock(&ahcd->lock); - + admhc_intr_ack(ahcd, ADMHC_INTR_SOFI); /* handle any pending ED removes */ - finish_unlinks(ahcd, admhc_frameno(ahcd)); - - /* leaving INTR_SOFI enabled when there's still unlinking - * to be done in the (next frame). - */ - if ((ahcd->ed_rm_list == NULL) || - HC_IS_RUNNING(hcd->state) == 0) - /* - * disable INTR_SOFI if there are no unlinking to be - * done (in the next frame) - */ - admhc_intr_disable(ahcd, ADMHC_INTR_SOFI); - - spin_unlock(&ahcd->lock); + admhc_finish_unlinks(ahcd, admhc_frame_no(ahcd)); + admhc_sof_refill(ahcd); } -#endif if (HC_IS_RUNNING(hcd->state)) { admhc_intr_ack(ahcd, ints); admhc_intr_enable(ahcd, ADMHC_INTR_MIE); admhc_writel_flush(ahcd); } + spin_unlock(&ahcd->lock); return IRQ_HANDLED; } @@ -804,7 +770,7 @@ static int admhc_restart(struct admhcd *ahcd) usb_root_hub_lost_power(admhcd_to_hcd(ahcd)->self.root_hub); if (!list_empty(&ahcd->pending)) admhc_dbg(ahcd, "abort schedule...\n"); - list_for_each_entry (priv, &ahcd->pending, pending) { + list_for_each_entry(priv, &ahcd->pending, pending) { struct urb *urb = priv->td[0]->urb; struct ed *ed = priv->ed; @@ -829,7 +795,7 @@ static int admhc_restart(struct admhcd *ahcd) urb->status = -ESHUTDOWN; spin_unlock(&urb->lock); } - finish_unlinks (ahcd, 0); + finish_unlinks(ahcd, 0); spin_unlock_irq(&ahcd->lock); /* paranoia, in case that didn't work: */ diff --git a/target/linux/adm5120/files/drivers/usb/host/adm5120-mem.c b/target/linux/adm5120/files/drivers/usb/host/adm5120-mem.c index 924221be1..3e9c2f0b9 100644 --- a/target/linux/adm5120/files/drivers/usb/host/adm5120-mem.c +++ b/target/linux/adm5120/files/drivers/usb/host/adm5120-mem.c @@ -27,7 +27,7 @@ static void admhc_hcd_init(struct admhcd *ahcd) { ahcd->next_statechange = jiffies; spin_lock_init(&ahcd->lock); - INIT_LIST_HEAD(&ahcd->pending); + spin_lock_init(&ahcd->dma_lock); } /*-------------------------------------------------------------------------*/ @@ -76,19 +76,6 @@ static void admhc_mem_cleanup(struct admhcd *ahcd) /*-------------------------------------------------------------------------*/ -/* ahcd "done list" processing needs this mapping */ -static inline struct td *dma_to_td(struct admhcd *ahcd, dma_addr_t td_dma) -{ - struct td *td; - - td_dma &= TD_MASK; - td = ahcd->td_hash[TD_HASH_FUNC(td_dma)]; - while (td && td->td_dma != td_dma) - td = td->td_hash; - - return td; -} - /* TDs ... */ static struct td *td_alloc(struct admhcd *ahcd, gfp_t mem_flags) { @@ -101,29 +88,13 @@ static struct td *td_alloc(struct admhcd *ahcd, gfp_t mem_flags) /* in case ahcd fetches it, make it look dead */ memset(td, 0, sizeof *td); - td->hwNextTD = cpu_to_hc32(ahcd, dma); td->td_dma = dma; - /* hashed in td_fill */ return td; } static void td_free(struct admhcd *ahcd, struct td *td) { - struct td **prev = &ahcd->td_hash[TD_HASH_FUNC(td->td_dma)]; - - while (*prev && *prev != td) - prev = &(*prev)->td_hash; - if (*prev) - *prev = td->td_hash; -#if 0 - /* TODO: remove */ - else if ((td->hwINFO & cpu_to_hc32(ahcd, TD_DONE)) != 0) - admhc_dbg (ahcd, "no hash for td %p\n", td); -#else - else if ((td->flags & TD_FLAG_DONE) != 0) - admhc_dbg (ahcd, "no hash for td %p\n", td); -#endif dma_pool_free(ahcd->td_cache, td, td->td_dma); } @@ -142,8 +113,7 @@ static struct ed *ed_alloc(struct admhcd *ahcd, gfp_t mem_flags) memset(ed, 0, sizeof(*ed)); ed->dma = dma; - INIT_LIST_HEAD(&ed->td_list); - INIT_LIST_HEAD(&ed->urb_list); + INIT_LIST_HEAD(&ed->urb_pending); return ed; } @@ -164,7 +134,6 @@ static void urb_priv_free(struct admhcd *ahcd, struct urb_priv *urb_priv) if (urb_priv->td[i]) td_free(ahcd, urb_priv->td[i]); - list_del(&urb_priv->pending); kfree(urb_priv); } @@ -172,6 +141,7 @@ static struct urb_priv *urb_priv_alloc(struct admhcd *ahcd, int num_tds, gfp_t mem_flags) { struct urb_priv *priv; + int i; /* allocate the private part of the URB */ priv = kzalloc(sizeof(*priv) + sizeof(struct td) * num_tds, mem_flags); @@ -179,13 +149,15 @@ static struct urb_priv *urb_priv_alloc(struct admhcd *ahcd, int num_tds, goto err; /* allocate the TDs (deferring hash chain updates) */ - for (priv->td_cnt = 0; priv->td_cnt < num_tds; priv->td_cnt++) { - priv->td[priv->td_cnt] = td_alloc(ahcd, mem_flags); - if (priv->td[priv->td_cnt] == NULL) + for (i = 0; i < num_tds; i++) { + priv->td[i] = td_alloc(ahcd, mem_flags); + if (priv->td[i] == NULL) goto err_free; + priv->td[i]->index = i; } INIT_LIST_HEAD(&priv->pending); + priv->td_cnt = num_tds; return priv; diff --git a/target/linux/adm5120/files/drivers/usb/host/adm5120-q.c b/target/linux/adm5120/files/drivers/usb/host/adm5120-q.c index 24542d273..2eafecc4f 100644 --- a/target/linux/adm5120/files/drivers/usb/host/adm5120-q.c +++ b/target/linux/adm5120/files/drivers/usb/host/adm5120-q.c @@ -38,7 +38,7 @@ __acquires(ahcd->lock) && urb->status == 0) { urb->status = -EREMOTEIO; #ifdef ADMHC_VERBOSE_DEBUG - urb_print(urb, "SHORT", usb_pipeout (urb->pipe)); + urb_print(ahcd, urb, "SHORT", usb_pipeout (urb->pipe)); #endif } spin_unlock(&urb->lock); @@ -53,7 +53,7 @@ __acquires(ahcd->lock) } #ifdef ADMHC_VERBOSE_DEBUG - urb_print(urb, "RET", usb_pipeout (urb->pipe)); + urb_print(ahcd, urb, "FINISH", 0); #endif /* urb->complete() can reenter this HCD */ @@ -67,189 +67,6 @@ __acquires(ahcd->lock) * ED handling functions *-------------------------------------------------------------------------*/ -#if 0 /* FIXME */ -/* search for the right schedule branch to use for a periodic ed. - * does some load balancing; returns the branch, or negative errno. - */ -static int balance(struct admhcd *ahcd, int interval, int load) -{ - int i, branch = -ENOSPC; - - /* iso periods can be huge; iso tds specify frame numbers */ - if (interval > NUM_INTS) - interval = NUM_INTS; - - /* search for the least loaded schedule branch of that period - * that has enough bandwidth left unreserved. - */ - for (i = 0; i < interval ; i++) { - if (branch < 0 || ahcd->load [branch] > ahcd->load [i]) { - int j; - - /* usb 1.1 says 90% of one frame */ - for (j = i; j < NUM_INTS; j += interval) { - if ((ahcd->load [j] + load) > 900) - break; - } - if (j < NUM_INTS) - continue; - branch = i; - } - } - return branch; -} -#endif - -/*-------------------------------------------------------------------------*/ - -#if 0 /* FIXME */ -/* both iso and interrupt requests have periods; this routine puts them - * into the schedule tree in the apppropriate place. most iso devices use - * 1msec periods, but that's not required. - */ -static void periodic_link (struct admhcd *ahcd, struct ed *ed) -{ - unsigned i; - - admhc_vdbg (ahcd, "link %sed %p branch %d [%dus.], interval %d\n", - (ed->hwINFO & cpu_to_hc32 (ahcd, ED_ISO)) ? "iso " : "", - ed, ed->branch, ed->load, ed->interval); - - for (i = ed->branch; i < NUM_INTS; i += ed->interval) { - struct ed **prev = &ahcd->periodic [i]; - __hc32 *prev_p = &ahcd->hcca->int_table [i]; - struct ed *here = *prev; - - /* sorting each branch by period (slow before fast) - * lets us share the faster parts of the tree. - * (plus maybe: put interrupt eds before iso) - */ - while (here && ed != here) { - if (ed->interval > here->interval) - break; - prev = &here->ed_next; - prev_p = &here->hwNextED; - here = *prev; - } - if (ed != here) { - ed->ed_next = here; - if (here) - ed->hwNextED = *prev_p; - wmb (); - *prev = ed; - *prev_p = cpu_to_hc32(ahcd, ed->dma); - wmb(); - } - ahcd->load [i] += ed->load; - } - admhcd_to_hcd(ahcd)->self.bandwidth_allocated += ed->load / ed->interval; -} -#endif - -/* link an ed into the HC chain */ - -static int ed_schedule(struct admhcd *ahcd, struct ed *ed) -{ - struct ed *old_tail; - - if (admhcd_to_hcd(ahcd)->state == HC_STATE_QUIESCING) - return -EAGAIN; - - ed->state = ED_OPER; - - old_tail = ahcd->ed_tails[ed->type]; - - ed->ed_next = old_tail->ed_next; - if (ed->ed_next) { - ed->ed_next->ed_prev = ed; - ed->hwNextED = cpu_to_hc32(ahcd, ed->ed_next->dma); - } - ed->ed_prev = old_tail; - - old_tail->ed_next = ed; - old_tail->hwNextED = cpu_to_hc32(ahcd, ed->dma); - - ahcd->ed_tails[ed->type] = ed; - - admhc_dma_enable(ahcd); - - return 0; -} - -/*-------------------------------------------------------------------------*/ - -#if 0 /* FIXME */ -/* scan the periodic table to find and unlink this ED */ -static void periodic_unlink (struct admhcd *ahcd, struct ed *ed) -{ - int i; - - for (i = ed->branch; i < NUM_INTS; i += ed->interval) { - struct ed *temp; - struct ed **prev = &ahcd->periodic [i]; - __hc32 *prev_p = &ahcd->hcca->int_table [i]; - - while (*prev && (temp = *prev) != ed) { - prev_p = &temp->hwNextED; - prev = &temp->ed_next; - } - if (*prev) { - *prev_p = ed->hwNextED; - *prev = ed->ed_next; - } - ahcd->load [i] -= ed->load; - } - - admhcd_to_hcd(ahcd)->self.bandwidth_allocated -= ed->load / ed->interval; - admhc_vdbg (ahcd, "unlink %sed %p branch %d [%dus.], interval %d\n", - (ed->hwINFO & cpu_to_hc32 (ahcd, ED_ISO)) ? "iso " : "", - ed, ed->branch, ed->load, ed->interval); -} -#endif - -/* unlink an ed from the HC chain. - * just the link to the ed is unlinked. - * the link from the ed still points to another operational ed or 0 - * so the HC can eventually finish the processing of the unlinked ed - * (assuming it already started that, which needn't be true). - * - * ED_UNLINK is a transient state: the HC may still see this ED, but soon - * it won't. ED_SKIP means the HC will finish its current transaction, - * but won't start anything new. The TD queue may still grow; device - * drivers don't know about this HCD-internal state. - * - * When the HC can't see the ED, something changes ED_UNLINK to one of: - * - * - ED_OPER: when there's any request queued, the ED gets rescheduled - * immediately. HC should be working on them. - * - * - ED_IDLE: when there's no TD queue. there's no reason for the HC - * to care about this ED; safe to disable the endpoint. - * - * When finish_unlinks() runs later, after SOF interrupt, it will often - * complete one or more URB unlinks before making that state change. - */ -static void ed_deschedule(struct admhcd *ahcd, struct ed *ed) -{ - ed->hwINFO |= cpu_to_hc32(ahcd, ED_SKIP); - wmb(); - ed->state = ED_UNLINK; - - /* remove this ED from the HC list */ - ed->ed_prev->hwNextED = ed->hwNextED; - - /* and remove it from our list also */ - ed->ed_prev->ed_next = ed->ed_next; - - if (ed->ed_next) - ed->ed_next->ed_prev = ed->ed_prev; - - if (ahcd->ed_tails[ed->type] == ed) - ahcd->ed_tails[ed->type] = ed->ed_prev; -} - -/*-------------------------------------------------------------------------*/ - static struct ed *ed_create(struct admhcd *ahcd, unsigned int type, u32 info) { struct ed *ed; @@ -274,12 +91,12 @@ static struct ed *ed_create(struct admhcd *ahcd, unsigned int type, u32 info) } ed->dummy = td; - ed->state = ED_IDLE; + ed->state = ED_NEW; ed->type = type; ed->hwINFO = cpu_to_hc32(ahcd, info); ed->hwTailP = cpu_to_hc32(ahcd, td->td_dma); - ed->hwHeadP = ed->hwTailP; /* ED_C, ED_H zeroed */ + ed->hwHeadP = cpu_to_hc32(ahcd, td->td_dma); return ed; @@ -321,76 +138,118 @@ static struct ed *ed_get(struct admhcd *ahcd, struct usb_host_endpoint *ep, return ed; } -/*-------------------------------------------------------------------------*/ +/* link an ed into the HC chain */ +static int ed_schedule(struct admhcd *ahcd, struct ed *ed) +{ + struct ed *old_tail; -/* request unlinking of an endpoint from an operational HC. - * put the ep on the rm_list - * real work is done at the next start frame (SOFI) hardware interrupt - * caller guarantees HCD is running, so hardware access is safe, - * and that ed->state is ED_OPER - */ -static void start_ed_unlink(struct admhcd *ahcd, struct ed *ed) + if (admhcd_to_hcd(ahcd)->state == HC_STATE_QUIESCING) + return -EAGAIN; + + if (ed->state != ED_NEW) + return 0; + + admhc_dump_ed(ahcd, "ED-SCHED", ed, 0); + + ed->state = ED_IDLE; + + admhc_dma_lock(ahcd); + ed->hwINFO &= ~cpu_to_hc32(ahcd, ED_SKIP); + + old_tail = ahcd->ed_tails[ed->type]; + + ed->ed_next = old_tail->ed_next; + if (ed->ed_next) { + ed->ed_next->ed_prev = ed; + ed->hwNextED = cpu_to_hc32(ahcd, ed->ed_next->dma); + } + ed->ed_prev = old_tail; + + old_tail->ed_next = ed; + old_tail->hwNextED = cpu_to_hc32(ahcd, ed->dma); + + ahcd->ed_tails[ed->type] = ed; + admhc_dma_unlock(ahcd); + + admhc_intr_enable(ahcd, ADMHC_INTR_SOFI); + + return 0; +} + +static void ed_deschedule(struct admhcd *ahcd, struct ed *ed) { - ed->hwINFO |= cpu_to_hc32 (ahcd, ED_DEQUEUE); - ed_deschedule(ahcd, ed); + admhc_dump_ed(ahcd, "ED-DESCHED", ed, 0); + + /* remove this ED from the HC list */ + admhc_dma_lock(ahcd); + ed->ed_prev->hwNextED = ed->hwNextED; + admhc_dma_unlock(ahcd); + + /* and remove it from our list */ + ed->ed_prev->ed_next = ed->ed_next; + + if (ed->ed_next) { + ed->ed_next->ed_prev = ed->ed_prev; + ed->ed_next = NULL; + } + + if (ahcd->ed_tails[ed->type] == ed) + ahcd->ed_tails[ed->type] = ed->ed_prev; + + ed->state = ED_NEW; +} + +static void ed_start_deschedule(struct admhcd *ahcd, struct ed *ed) +{ + admhc_dump_ed(ahcd, "ED-UNLINK", ed, 0); + + admhc_dma_lock(ahcd); + ed->hwINFO |= cpu_to_hc32(ahcd, ED_SKIP); + admhc_dma_unlock(ahcd); + + ed->state = ED_UNLINK; /* add this ED into the remove list */ ed->ed_rm_next = ahcd->ed_rm_list; ahcd->ed_rm_list = ed; - /* enable SOF interrupt */ - admhc_intr_ack(ahcd, ADMHC_INTR_SOFI); - admhc_intr_enable(ahcd, ADMHC_INTR_SOFI); - /* flush those writes */ - admhc_writel_flush(ahcd); - /* SOF interrupt might get delayed; record the frame counter value that * indicates when the HC isn't looking at it, so concurrent unlinks * behave. frame_no wraps every 2^16 msec, and changes right before * SOF is triggered. */ ed->tick = admhc_frame_no(ahcd) + 1; + + /* enable SOF interrupt */ + admhc_intr_enable(ahcd, ADMHC_INTR_SOFI); } /*-------------------------------------------------------------------------* * TD handling functions *-------------------------------------------------------------------------*/ -/* enqueue next TD for this URB (OHCI spec 5.2.8.2) */ - -static void -td_fill(struct admhcd *ahcd, u32 info, dma_addr_t data, int len, - struct urb *urb, int index) +static void td_fill(struct admhcd *ahcd, u32 info, dma_addr_t data, int len, + struct urb_priv *up) { - struct td *td, *td_pt; - struct urb_priv *urb_priv = urb->hcpriv; - int hash; - u32 cbl = 0; - -#if 1 - if (index == (urb_priv->td_cnt - 1) && - ((urb->transfer_flags & URB_NO_INTERRUPT) == 0)) - cbl |= TD_IE; -#else - if (index == (urb_priv->td_cnt - 1)) - cbl |= TD_IE; -#endif - - /* use this td as the next dummy */ - td_pt = urb_priv->td[index]; + struct td *td; + u32 cbl = 0; - /* fill the old dummy TD */ - td = urb_priv->td[index] = urb_priv->ed->dummy; - urb_priv->ed->dummy = td_pt; + if (up->td_idx >= up->td_cnt) { + admhc_dbg(ahcd, "td_fill error, idx=%d, cnt=%d\n", up->td_idx, + up->td_cnt); + return; + } - td->ed = urb_priv->ed; - td->next_dl_td = NULL; - td->index = index; - td->urb = urb; + td = up->td[up->td_idx]; td->data_dma = data; if (!len) data = 0; +#if 1 + if (up->td_idx == up->td_cnt-1) +#endif + cbl |= TD_IE; + if (data) cbl |= (len & TD_BL_MASK); @@ -400,19 +259,11 @@ td_fill(struct admhcd *ahcd, u32 info, dma_addr_t data, int len, td->hwINFO = cpu_to_hc32(ahcd, info); td->hwDBP = cpu_to_hc32(ahcd, data); td->hwCBL = cpu_to_hc32(ahcd, cbl); - td->hwNextTD = cpu_to_hc32(ahcd, td_pt->td_dma); - - /* append to queue */ - list_add_tail(&td->td_list, &td->ed->td_list); - /* hash it for later reverse mapping */ - hash = TD_HASH_FUNC(td->td_dma); - td->td_hash = ahcd->td_hash[hash]; - ahcd->td_hash[hash] = td; + if (up->td_idx > 0) + up->td[up->td_idx-1]->hwNextTD = cpu_to_hc32(ahcd, td->td_dma); - /* HC might read the TD (or cachelines) right away ... */ - wmb(); - td->ed->hwTailP = td->hwNextTD; + up->td_idx++; } /*-------------------------------------------------------------------------*/ @@ -430,9 +281,7 @@ static void td_submit_urb(struct admhcd *ahcd, struct urb *urb) int cnt = 0; u32 info = 0; int is_out = usb_pipeout(urb->pipe); - int periodic = 0; u32 toggle = 0; - struct td *td; /* OHCI handles the bulk/interrupt data toggles itself. We just * use the device toggle bits for resetting, and rely on the fact @@ -448,7 +297,6 @@ static void td_submit_urb(struct admhcd *ahcd, struct urb *urb) } urb_priv->td_idx = 0; - list_add(&urb_priv->pending, &ahcd->pending); if (data_len) data = urb->transfer_dma; @@ -469,7 +317,7 @@ static void td_submit_urb(struct admhcd *ahcd, struct urb *urb) info |= (urb->start_frame & TD_FN_MASK); info |= (urb->interval & TD_ISI_MASK) << TD_ISI_SHIFT; - td_fill(ahcd, info, data, data_len, urb, cnt); + td_fill(ahcd, info, data, data_len, urb_priv); cnt++; admhcd_to_hcd(ahcd)->self.bandwidth_int_reqs++; @@ -483,20 +331,20 @@ static void td_submit_urb(struct admhcd *ahcd, struct urb *urb) /* TDs _could_ transfer up to 8K each */ while (data_len > TD_DATALEN_MAX) { td_fill(ahcd, info | ((cnt) ? TD_T_CARRY : toggle), - data, TD_DATALEN_MAX, urb, cnt); + data, TD_DATALEN_MAX, urb_priv); data += TD_DATALEN_MAX; data_len -= TD_DATALEN_MAX; cnt++; } td_fill(ahcd, info | ((cnt) ? TD_T_CARRY : toggle), data, - data_len, urb, cnt); + data_len, urb_priv); cnt++; if ((urb->transfer_flags & URB_ZERO_PACKET) && (cnt < urb_priv->td_cnt)) { td_fill(ahcd, info | ((cnt) ? TD_T_CARRY : toggle), - 0, 0, urb, cnt); + 0, 0, urb_priv); cnt++; } break; @@ -507,21 +355,24 @@ static void td_submit_urb(struct admhcd *ahcd, struct urb *urb) case PIPE_CONTROL: /* fill a TD for the setup */ info = TD_SCC_NOTACCESSED | TD_DP_SETUP | TD_T_DATA0; - td_fill(ahcd, info, urb->setup_dma, 8, urb, cnt++); + td_fill(ahcd, info, urb->setup_dma, 8, urb_priv); + cnt++; if (data_len > 0) { /* fill a TD for the data */ info = TD_SCC_NOTACCESSED | TD_T_DATA1; info |= is_out ? TD_DP_OUT : TD_DP_IN; /* NOTE: mishandles transfers >8K, some >4K */ - td_fill(ahcd, info, data, data_len, urb, cnt++); + td_fill(ahcd, info, data, data_len, urb_priv); + cnt++; } /* fill a TD for the ACK */ info = (is_out || data_len == 0) ? TD_SCC_NOTACCESSED | TD_DP_IN | TD_T_DATA1 : TD_SCC_NOTACCESSED | TD_DP_OUT | TD_T_DATA1; - td_fill(ahcd, info, data, 0, urb, cnt++); + td_fill(ahcd, info, data, 0, urb_priv); + cnt++; break; @@ -538,7 +389,8 @@ static void td_submit_urb(struct admhcd *ahcd, struct urb *urb) frame &= TD_FN_MASK; td_fill(ahcd, info | frame, data + urb->iso_frame_desc[cnt].offset, - urb->iso_frame_desc[cnt].length, urb, cnt); + urb->iso_frame_desc[cnt].length, + urb_priv); } admhcd_to_hcd(ahcd)->self.bandwidth_isoc_reqs++; break; @@ -546,20 +398,22 @@ static void td_submit_urb(struct admhcd *ahcd, struct urb *urb) if (urb_priv->td_cnt != cnt) admhc_err(ahcd, "bad number of tds created for urb %p\n", urb); -} -/*-------------------------------------------------------------------------* - * Done List handling functions - *-------------------------------------------------------------------------*/ + urb_priv->td_idx = 0; +} /* calculate transfer length/status and update the urb * PRECONDITION: irqsafe (only for urb->status locking) */ -static void td_done(struct admhcd *ahcd, struct urb *urb, struct td *td) +static int td_done(struct admhcd *ahcd, struct urb *urb, struct td *td) { u32 info = hc32_to_cpup(ahcd, &td->hwINFO); + u32 dbp = hc32_to_cpup(ahcd, &td->hwDBP); + u32 cbl = TD_BL_GET(hc32_to_cpup(ahcd, &td->hwCBL)); int type = usb_pipetype(urb->pipe); - int cc = TD_CC_NOERROR; + int cc; + + cc = TD_CC_GET(info); /* ISO ... drivers see per-TD length/status */ if (type == PIPE_ISOCHRONOUS) { @@ -576,16 +430,17 @@ static void td_done(struct admhcd *ahcd, struct urb *urb, struct td *td) return; if (usb_pipeout (urb->pipe)) - dlen = urb->iso_frame_desc [td->index].length; + dlen = urb->iso_frame_desc[td->index].length; else { /* short reads are always OK for ISO */ if (cc == TD_DATAUNDERRUN) cc = TD_CC_NOERROR; dlen = tdPSW & 0x3ff; } + urb->actual_length += dlen; - urb->iso_frame_desc [td->index].actual_length = dlen; - urb->iso_frame_desc [td->index].status = cc_to_error [cc]; + urb->iso_frame_desc[td->index].actual_length = dlen; + urb->iso_frame_desc[td->index].status = cc_to_error[cc]; if (cc != TD_CC_NOERROR) admhc_vdbg (ahcd, @@ -597,39 +452,15 @@ static void td_done(struct admhcd *ahcd, struct urb *urb, struct td *td) * might not be reported as errors. */ } else { - u32 bl = TD_BL_GET(hc32_to_cpup(ahcd, &td->hwCBL)); - u32 tdDBP = hc32_to_cpup(ahcd, &td->hwDBP); - - cc = TD_CC_GET(info); - - /* update packet status if needed (short is normally ok) */ - if (cc == TD_CC_DATAUNDERRUN - && !(urb->transfer_flags & URB_SHORT_NOT_OK)) - cc = TD_CC_NOERROR; - - if (cc != TD_CC_NOERROR && cc < TD_CC_HCD0) { - admhc_dump_ed(ahcd, "CC ERROR", td->ed, 1); - spin_lock(&urb->lock); - if (urb->status == -EINPROGRESS) - urb->status = cc_to_error[cc]; - spin_unlock(&urb->lock); - } + admhc_dump_td(ahcd, "td_done", td); /* count all non-empty packets except control SETUP packet */ - if ((type != PIPE_CONTROL || td->index != 0) && tdDBP != 0) { - urb->actual_length += tdDBP - td->data_dma + bl; + if ((type != PIPE_CONTROL || td->index != 0) && dbp != 0) { + urb->actual_length += dbp - td->data_dma + cbl; } - - if (cc != TD_CC_NOERROR && cc < TD_CC_HCD0) - admhc_vdbg(ahcd, - "urb %p td %p (%d) cc %d, len=%d/%d\n", - urb, td, td->index, cc, - urb->actual_length, - urb->transfer_buffer_length); } - list_del(&td->td_list); - + return cc; } /*-------------------------------------------------------------------------*/ @@ -637,6 +468,7 @@ static void td_done(struct admhcd *ahcd, struct urb *urb, struct td *td) static inline struct td * ed_halted(struct admhcd *ahcd, struct td *td, int cc, struct td *rev) { +#if 0 struct urb *urb = td->urb; struct ed *ed = td->ed; struct list_head *tmp = td->td_list.next; @@ -708,195 +540,111 @@ ed_halted(struct admhcd *ahcd, struct td *td, int cc, struct td *rev) } return rev; +#else + return NULL; +#endif } /*-------------------------------------------------------------------------*/ -/* there are some urbs/eds to unlink; called in_irq(), with HCD locked */ -static void -finish_unlinks(struct admhcd *ahcd, u16 tick) +static int ed_next_urb(struct admhcd *ahcd, struct ed *ed) { - struct ed *ed, **last; + struct urb_priv *up; + u32 carry; -rescan_all: - for (last = &ahcd->ed_rm_list, ed = *last; ed != NULL; ed = *last) { - struct list_head *entry, *tmp; - int completed, modified; - __hc32 *prev; + if (ed->state != ED_IDLE) + return 1; - /* only take off EDs that the HC isn't using, accounting for - * frame counter wraps and EDs with partially retired TDs - */ - if (likely(HC_IS_RUNNING(admhcd_to_hcd(ahcd)->state))) { - if (tick_before (tick, ed->tick)) { -skip_ed: - last = &ed->ed_rm_next; - continue; - } + if (ed->urb_active) + return 1; - if (!list_empty (&ed->td_list)) { - struct td *td; - u32 head; + if (list_empty(&ed->urb_pending)) + return 0; - td = list_entry(ed->td_list.next, struct td, - td_list); - head = hc32_to_cpu(ahcd, ed->hwHeadP) & - TD_MASK; - - /* INTR_WDH may need to clean up first */ - if (td->td_dma != head) - goto skip_ed; - } - } - - /* reentrancy: if we drop the schedule lock, someone might - * have modified this list. normally it's just prepending - * entries (which we'd ignore), but paranoia won't hurt. - */ - *last = ed->ed_rm_next; - ed->ed_rm_next = NULL; - modified = 0; - - /* unlink urbs as requested, but rescan the list after - * we call a completion since it might have unlinked - * another (earlier) urb - * - * When we get here, the HC doesn't see this ed. But it - * must not be rescheduled until all completed URBs have - * been given back to the driver. - */ -rescan_this: - completed = 0; - prev = &ed->hwHeadP; - list_for_each_safe (entry, tmp, &ed->td_list) { - struct td *td; - struct urb *urb; - struct urb_priv *urb_priv; - __hc32 savebits; - - td = list_entry(entry, struct td, td_list); - urb = td->urb; - urb_priv = td->urb->hcpriv; - - if (urb->status == -EINPROGRESS) { - prev = &td->hwNextTD; - continue; - } - - if ((urb_priv) == NULL) - continue; + up = list_entry(ed->urb_pending.next, struct urb_priv, pending); + list_del(&up->pending); + ed->urb_active = up; + ed->state = ED_OPER; - /* patch pointer hc uses */ - savebits = *prev & ~cpu_to_hc32(ahcd, TD_MASK); - *prev = td->hwNextTD | savebits; +#ifdef ADMHC_VERBOSE_DEBUG + urb_print(ahcd, up->urb, "NEXT", 0); + admhc_dump_ed(ahcd, " ", ed, 0); +#endif - /* HC may have partly processed this TD */ - urb_print(urb, "PARTIAL",1); - td_done(ahcd, urb, td); - urb_priv->td_idx++; + up->td[up->td_cnt-1]->hwNextTD = cpu_to_hc32(ahcd, ed->dummy->td_dma); - /* if URB is done, clean up */ - if (urb_priv->td_idx == urb_priv->td_cnt) { - modified = completed = 1; - finish_urb(ahcd, urb); - } - } - if (completed && !list_empty (&ed->td_list)) - goto rescan_this; - - /* ED's now officially unlinked, hc doesn't see */ - ed->state = ED_IDLE; - ed->hwHeadP &= ~cpu_to_hc32(ahcd, ED_H); - ed->hwNextED = 0; - wmb (); - ed->hwINFO &= ~cpu_to_hc32 (ahcd, ED_SKIP | ED_DEQUEUE); - - /* but if there's work queued, reschedule */ - if (!list_empty (&ed->td_list)) { - if (HC_IS_RUNNING(admhcd_to_hcd(ahcd)->state)) - ed_schedule(ahcd, ed); - } + admhc_dma_lock(ahcd); + carry = hc32_to_cpup(ahcd, &ed->hwHeadP) & ED_C; + ed->hwHeadP = cpu_to_hc32(ahcd, up->td[0]->td_dma | carry); + ed->hwINFO &= ~cpu_to_hc32(ahcd, ED_SKIP); + admhc_dma_unlock(ahcd); - if (modified) - goto rescan_all; - } + return 1; } -/*-------------------------------------------------------------------------*/ +static void ed_update(struct admhcd *ahcd, struct ed *ed, int partial) +{ + struct urb_priv *up; + struct urb *urb; + int cc; -/* - * Process normal completions (error or success) and clean the schedules. - * - * This is the main path for handing urbs back to drivers. The only other - * path is finish_unlinks(), which unlinks URBs using ed_rm_list, instead of - * scanning the (re-reversed) donelist as this does. - */ + up = ed->urb_active; + if (!up) + return; -static void ed_update(struct admhcd *ahcd, struct ed *ed) -{ - struct list_head *entry,*tmp; + urb = up->urb; - admhc_dump_ed(ahcd, "ed update", ed, 1); +#ifdef ADMHC_VERBOSE_DEBUG + urb_print(ahcd, urb, "UPDATE", 0); +#endif + admhc_dump_ed(ahcd, "ED-UPDATE", ed, 1); - list_for_each_safe(entry, tmp, &ed->td_list) { - struct td *td = list_entry(entry, struct td, td_list); - struct urb *urb = td->urb; - struct urb_priv *urb_priv = urb->hcpriv; + cc = TD_CC_NOERROR; + for (; up->td_idx < up->td_cnt; up->td_idx++) { + struct td *td = up->td[up->td_idx]; if (hc32_to_cpup(ahcd, &td->hwINFO) & TD_OWN) break; - /* update URB's length and status from TD */ - td_done(ahcd, urb, td); - urb_priv->td_idx++; + cc = td_done(ahcd, urb, td); + if (cc != TD_CC_NOERROR) { + admhc_vdbg(ahcd, + "urb %p td %p (%d) cc %d, len=%d/%d\n", + urb, td, td->index, cc, + urb->actual_length, + urb->transfer_buffer_length); - /* If all this urb's TDs are done, call complete() */ - if (urb_priv->td_idx == urb_priv->td_cnt) - finish_urb(ahcd, urb); + up->td_idx = up->td_cnt; + break; + } + } - /* clean schedule: unlink EDs that are no longer busy */ - if (list_empty(&ed->td_list)) { - if (ed->state == ED_OPER) - start_ed_unlink(ahcd, ed); + if ((up->td_idx != up->td_cnt) && (!partial)) + /* the URB is not completed yet */ + return; - /* ... reenabling halted EDs only after fault cleanup */ - } else if ((ed->hwINFO & cpu_to_hc32 (ahcd, - ED_SKIP | ED_DEQUEUE)) - == cpu_to_hc32 (ahcd, ED_SKIP)) { - td = list_entry(ed->td_list.next, struct td, td_list); -#if 0 - if (!(td->hwINFO & cpu_to_hc32 (ahcd, TD_DONE))) { - ed->hwINFO &= ~cpu_to_hc32 (ahcd, ED_SKIP); - /* ... hc may need waking-up */ - switch (ed->type) { - case PIPE_CONTROL: - admhc_writel (ahcd, OHCI_CLF, - &ahcd->regs->cmdstatus); - break; - case PIPE_BULK: - admhc_writel (ahcd, OHCI_BLF, - &ahcd->regs->cmdstatus); - break; - } - } -#else - if ((td->hwINFO & cpu_to_hc32(ahcd, TD_OWN))) - ed->hwINFO &= ~cpu_to_hc32(ahcd, ED_SKIP); -#endif - } + /* update packet status if needed (short is normally ok) */ + if (cc == TD_CC_DATAUNDERRUN + && !(urb->transfer_flags & URB_SHORT_NOT_OK)) + cc = TD_CC_NOERROR; + if (cc != TD_CC_NOERROR && cc < TD_CC_HCD0) { + spin_lock(&urb->lock); + if (urb->status == -EINPROGRESS) + urb->status = cc_to_error[cc]; + spin_unlock(&urb->lock); } -} -static void ed_halt(struct admhcd *ahcd, struct ed *ed) -{ - admhc_dump_ed(ahcd, "ed_halt", ed, 1); + finish_urb(ahcd, urb); + + ed->urb_active = NULL; + ed->state = ED_IDLE; } /* there are some tds completed; called in_irq(), with HCD locked */ static void admhc_td_complete(struct admhcd *ahcd) { - struct ed *ed; + struct ed *ed; for (ed = ahcd->ed_head; ed; ed = ed->ed_next) { if (ed->state != ED_OPER) @@ -906,10 +654,59 @@ static void admhc_td_complete(struct admhcd *ahcd) continue; if (hc32_to_cpup(ahcd, &ed->hwHeadP) & ED_H) { - ed_halt(ahcd, ed); + /* TODO */ + continue; + } + + ed_update(ahcd, ed, 0); + } +} + +/* there are some urbs/eds to unlink; called in_irq(), with HCD locked */ +static void admhc_finish_unlinks(struct admhcd *ahcd, u16 tick) +{ + struct ed *ed; + + for (ed = ahcd->ed_head; ed; ed = ed->ed_next) { + if (ed->state != ED_UNLINK) continue; + + if (likely(HC_IS_RUNNING(admhcd_to_hcd(ahcd)->state))) + if (tick_before(tick, ed->tick)) + continue; + + /* process partial status */ + ed_update(ahcd, ed, 1); + } +} + +static void admhc_sof_refill(struct admhcd *ahcd) +{ + struct ed *ed; + int disable_dma = 1; + + for (ed = ahcd->ed_head; ed; ed = ed->ed_next) { + + if (hc32_to_cpup(ahcd, &ed->hwHeadP) & ED_H) { + ed_update(ahcd, ed, 1); + ed->hwHeadP &= ~cpu_to_hc32 (ahcd, ED_H); } - ed_update(ahcd, ed); + if (ed_next_urb(ahcd, ed)) { + disable_dma = 0; + } else { + struct ed *tmp; + tmp = ed->ed_prev; + ed_deschedule(ahcd, ed); + ed = tmp; + } + } + + if (disable_dma) { + admhc_intr_disable(ahcd, ADMHC_INTR_SOFI); + admhc_dma_disable(ahcd); + } else { + admhc_intr_enable(ahcd, ADMHC_INTR_SOFI); + admhc_dma_enable(ahcd); } } diff --git a/target/linux/adm5120/files/drivers/usb/host/adm5120.h b/target/linux/adm5120/files/drivers/usb/host/adm5120.h index 95616f27a..370722547 100644 --- a/target/linux/adm5120/files/drivers/usb/host/adm5120.h +++ b/target/linux/adm5120/files/drivers/usb/host/adm5120.h @@ -56,7 +56,8 @@ struct ed { dma_addr_t dma; /* addr of ED */ struct td *dummy; /* next TD to activate */ - struct list_head urb_list; /* list of our URBs */ + struct urb_priv *urb_active; /* active URB */ + struct list_head urb_pending; /* pending URBs */ struct list_head ed_list; /* list of all EDs*/ struct list_head rm_list; /* for remove list */ @@ -65,16 +66,15 @@ struct ed { struct ed *ed_next; /* on schedule list */ struct ed *ed_prev; /* for non-interrupt EDs */ struct ed *ed_rm_next; /* on rm list */ - struct ed *ed_soft_list; /* on software int list */ - struct list_head td_list; /* "shadow list" of our TDs */ /* create --> IDLE --> OPER --> ... --> IDLE --> destroy * usually: OPER --> UNLINK --> (IDLE | OPER) --> ... */ - u8 state; /* ED_{IDLE,UNLINK,OPER} */ -#define ED_IDLE 0x00 /* NOT linked to HC */ -#define ED_UNLINK 0x01 /* being unlinked from hc */ -#define ED_OPER 0x02 /* IS linked to hc */ + u8 state; +#define ED_NEW 0x00 /* just allocated */ +#define ED_IDLE 0x01 /* linked into HC, but not running */ +#define ED_OPER 0x02 /* linked into HC and running */ +#define ED_UNLINK 0x03 /* being unlinked from HC */ u8 type; /* PIPE_{BULK,...} */ @@ -115,16 +115,17 @@ struct td { #define TD_T_SHIFT 23 /* data toggle state */ #define TD_T_MASK 0x3 #define TD_T (TD_T_MASK << TD_T_SHIFT) -#define TD_T_DATA0 (0x2 << TD_T_SHIFT) /* DATA0 */ -#define TD_T_DATA1 (0x3 << TD_T_SHIFT) /* DATA1 */ -#define TD_T_CARRY (0x0 << TD_T_SHIFT) /* uses ED_C */ +#define TD_T_DATA0 (0x2 << TD_T_SHIFT) /* DATA0 */ +#define TD_T_DATA1 (0x3 << TD_T_SHIFT) /* DATA1 */ +#define TD_T_CARRY (0x0 << TD_T_SHIFT) /* uses ED_C */ #define TD_T_GET(x) (((x) >> TD_T_SHIFT) & TD_T_MASK) #define TD_DP_SHIFT 21 /* direction/pid */ #define TD_DP_MASK 0x3 #define TD_DP (TD_DP_MASK << TD_DP_SHIFT) -#define TD_DP_SETUP (0x0 << TD_DP_SHIFT) /* SETUP pid */ -#define TD_DP_OUT (0x1 << TD_DP_SHIFT) /* OUT pid */ -#define TD_DP_IN (0x2 << TD_DP_SHIFT) /* IN pid */ +#define TD_DP_SETUP (0x0 << TD_DP_SHIFT) /* SETUP pid */ +#define TD_DP_OUT (0x1 << TD_DP_SHIFT) /* OUT pid */ +#define TD_DP_IN (0x2 << TD_DP_SHIFT) /* IN pid */ +#define TD_DP_GET(x) (((x) >> TD_DP_SHIFT) & TD_DP_MASK) #define TD_ISI_SHIFT 8 /* Interrupt Service Interval */ #define TD_ISI_MASK 0x3f #define TD_ISI_GET(x) (((x) >> TD_ISI_SHIFT) & TD_ISI_MASK) @@ -142,19 +143,12 @@ struct td { /* rest are purely for the driver's use */ __u8 index; - struct ed *ed; - struct td *td_hash; /* dma-->td hashtable */ - struct td *next_dl_td; +/* struct ed *ed;*/ struct urb *urb; dma_addr_t td_dma; /* addr of this TD */ dma_addr_t data_dma; /* addr of data it points to */ - struct list_head td_list; /* "shadow list", TDs on same ED */ - - u32 flags; -#define TD_FLAG_DONE (1 << 17) /* retired to done list */ -#define TD_FLAG_ISO (1 << 16) /* copy of ED_ISO */ } __attribute__ ((aligned(TD_ALIGN))); /* c/b/i need 16; only iso needs 32 */ /* @@ -354,6 +348,7 @@ struct admhcd_regs { /* hcd-private per-urb state */ struct urb_priv { struct ed *ed; + struct urb *urb; struct list_head pending; /* URBs on the same ED */ u32 td_cnt; /* # tds in this request */ @@ -374,6 +369,8 @@ struct urb_priv { struct admhcd { spinlock_t lock; + spinlock_t dma_lock; + u32 dma_state; /* * I/O memory used to communicate with the HC (dma-consistent) @@ -384,14 +381,10 @@ struct admhcd { * hcd adds to schedule for a live hc any time, but removals finish * only at the start of the next frame. */ - struct ed *ed_head; struct ed *ed_tails[4]; struct ed *ed_rm_list; /* to be removed */ - struct ed *ed_halt_list; /* halted due to an error */ - struct ed *ed_soft_list; /* for software interrupt processing */ - struct ed *periodic[NUM_INTS]; /* shadow int_table */ #if 0 /* TODO: remove? */ @@ -408,7 +401,6 @@ struct admhcd { struct dma_pool *td_cache; struct dma_pool *ed_cache; struct td *td_hash[TD_HASH_SIZE]; - struct list_head pending; /* * driver state @@ -446,6 +438,7 @@ static inline struct usb_hcd *admhcd_to_hcd(const struct admhcd *ahcd) #define STUB_DEBUG_FILES #endif /* DEBUG */ +#if 0 #define admhc_dbg(ahcd, fmt, args...) \ dev_dbg(admhcd_to_hcd(ahcd)->self.controller , fmt , ## args ) #define admhc_err(ahcd, fmt, args...) \ @@ -460,6 +453,22 @@ static inline struct usb_hcd *admhcd_to_hcd(const struct admhcd *ahcd) #else # define admhc_vdbg(ahcd, fmt, args...) do { } while (0) #endif +#else +#define admhc_dbg(ahcd, fmt, args...) \ + printk(KERN_DEBUG "adm5120-hcd: " fmt , ## args ) +#define admhc_err(ahcd, fmt, args...) \ + printk(KERN_ERR "adm5120-hcd: " fmt , ## args ) +#define ahcd_info(ahcd, fmt, args...) \ + printk(KERN_INFO "adm5120-hcd: " fmt , ## args ) +#define admhc_warn(ahcd, fmt, args...) \ + printk(KERN_WARNING "adm5120-hcd: " fmt , ## args ) + +#ifdef ADMHC_VERBOSE_DEBUG +# define admhc_vdbg admhc_dbg +#else +# define admhc_vdbg(ahcd, fmt, args...) do { } while (0) +#endif +#endif /*-------------------------------------------------------------------------*/ @@ -633,6 +642,15 @@ static inline u16 admhc_frame_no(const struct admhcd *ahcd) return (u16)t; } +static inline u16 admhc_frame_remain(const struct admhcd *ahcd) +{ + u32 t; + + t = admhc_readl(ahcd, &ahcd->regs->fmnumber) >> ADMHC_SFN_FR_SHIFT; + t &= ADMHC_SFN_FR_MASK; + return (u16)t; +} + /*-------------------------------------------------------------------------*/ static inline void admhc_disable(struct admhcd *ahcd) @@ -652,7 +670,7 @@ static inline void periodic_reinit(struct admhcd *ahcd) /* TODO: adjust FSLargestDataPacket value too? */ admhc_writel(ahcd, (fit ^ FIT) | ahcd->fminterval, - &ahcd->regs->fminterval); + &ahcd->regs->fminterval); } static inline u32 admhc_get_rhdesc(struct admhcd *ahcd) @@ -698,17 +716,45 @@ static inline void admhc_intr_ack(struct admhcd *ahcd, u32 ints) static inline void admhc_dma_enable(struct admhcd *ahcd) { - ahcd->host_control = admhc_readl(ahcd, &ahcd->regs->host_control); - if (ahcd->host_control & ADMHC_HC_DMAE) + u32 t; + + t = admhc_readl(ahcd, &ahcd->regs->host_control); + if (t & ADMHC_HC_DMAE) return; - ahcd->host_control |= ADMHC_HC_DMAE; - admhc_writel(ahcd, ahcd->host_control, &ahcd->regs->host_control); + t |= ADMHC_HC_DMAE; + admhc_writel(ahcd, t, &ahcd->regs->host_control); + admhc_dbg(ahcd,"DMA enabled\n"); } static inline void admhc_dma_disable(struct admhcd *ahcd) { - ahcd->host_control = admhc_readl(ahcd, &ahcd->regs->host_control); - ahcd->host_control &= ~ADMHC_HC_DMAE; - admhc_writel(ahcd, ahcd->host_control, &ahcd->regs->host_control); + u32 t; + + t = admhc_readl(ahcd, &ahcd->regs->host_control); + if (!(t & ADMHC_HC_DMAE)) + return; + + t &= ~ADMHC_HC_DMAE; + admhc_writel(ahcd, t, &ahcd->regs->host_control); + admhc_dbg(ahcd,"DMA disabled\n"); +} + +static inline void admhc_dma_lock(struct admhcd *ahcd) +{ + spin_lock(ahcd->dma_lock); + + ahcd->dma_state = admhc_readl(ahcd, &ahcd->regs->host_control); + admhc_writel(ahcd, 0, &ahcd->regs->hosthead); + admhc_writel(ahcd, ahcd->dma_state & ~ADMHC_HC_DMAE, + &ahcd->regs->host_control); + admhc_dbg(ahcd,"DMA locked\n"); +} + +static inline void admhc_dma_unlock(struct admhcd *ahcd) +{ + admhc_writel(ahcd, (u32)ahcd->ed_head->dma, &ahcd->regs->hosthead); + admhc_writel(ahcd, ahcd->dma_state, &ahcd->regs->host_control); + admhc_dbg(ahcd,"DMA unlocked\n"); + spin_unlock(ahcd->dma_lock); } |