[PATCH] UHCI: use one QH per endpoint, not per URB

This patch (as623) changes the uhci-hcd driver to make it use one QH per
device endpoint, instead of a QH per URB as it does now.  Numerous areas
of the code are affected by this.  For example, the distinction between
"queued" URBs and non-"queued" URBs no longer exists; all URBs belong to
a queue and some just happen to be at the queue's head.

Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
Alan Stern 2005-12-17 17:58:46 -05:00 committed by Greg Kroah-Hartman
parent 499003e815
commit dccf4a48d4
4 changed files with 688 additions and 865 deletions

View File

@ -90,13 +90,60 @@ static int uhci_show_td(struct uhci_td *td, char *buf, int len, int space)
return out - buf; return out - buf;
} }
static int uhci_show_urbp(struct urb_priv *urbp, char *buf, int len, int space)
{
char *out = buf;
struct uhci_td *td;
int i, nactive, ninactive;
if (len < 200)
return 0;
out += sprintf(out, "urb_priv [%p] ", urbp);
out += sprintf(out, "urb [%p] ", urbp->urb);
out += sprintf(out, "qh [%p] ", urbp->qh);
out += sprintf(out, "Dev=%d ", usb_pipedevice(urbp->urb->pipe));
out += sprintf(out, "EP=%x(%s) ", usb_pipeendpoint(urbp->urb->pipe),
(usb_pipein(urbp->urb->pipe) ? "IN" : "OUT"));
switch (usb_pipetype(urbp->urb->pipe)) {
case PIPE_ISOCHRONOUS: out += sprintf(out, "ISO"); break;
case PIPE_INTERRUPT: out += sprintf(out, "INT"); break;
case PIPE_BULK: out += sprintf(out, "BLK"); break;
case PIPE_CONTROL: out += sprintf(out, "CTL"); break;
}
out += sprintf(out, "%s", (urbp->fsbr ? " FSBR" : ""));
out += sprintf(out, "%s", (urbp->fsbr_timeout ? " FSBR_TO" : ""));
if (urbp->urb->status != -EINPROGRESS)
out += sprintf(out, " Status=%d", urbp->urb->status);
out += sprintf(out, "\n");
i = nactive = ninactive = 0;
list_for_each_entry(td, &urbp->td_list, list) {
if (++i <= 10 || debug > 2) {
out += sprintf(out, "%*s%d: ", space + 2, "", i);
out += uhci_show_td(td, out, len - (out - buf), 0);
} else {
if (td_status(td) & TD_CTRL_ACTIVE)
++nactive;
else
++ninactive;
}
}
if (nactive + ninactive > 0)
out += sprintf(out, "%*s[skipped %d inactive and %d active "
"TDs]\n",
space, "", ninactive, nactive);
return out - buf;
}
static int uhci_show_qh(struct uhci_qh *qh, char *buf, int len, int space) static int uhci_show_qh(struct uhci_qh *qh, char *buf, int len, int space)
{ {
char *out = buf; char *out = buf;
struct urb_priv *urbp; int i, nurbs;
struct list_head *head, *tmp;
struct uhci_td *td;
int i = 0, checked = 0, prevactive = 0;
__le32 element = qh_element(qh); __le32 element = qh_element(qh);
/* Try to make sure there's enough memory */ /* Try to make sure there's enough memory */
@ -118,86 +165,36 @@ static int uhci_show_qh(struct uhci_qh *qh, char *buf, int len, int space)
if (!(element & ~(UHCI_PTR_QH | UHCI_PTR_DEPTH))) if (!(element & ~(UHCI_PTR_QH | UHCI_PTR_DEPTH)))
out += sprintf(out, "%*s Element is NULL (bug?)\n", space, ""); out += sprintf(out, "%*s Element is NULL (bug?)\n", space, "");
if (!qh->urbp) { if (list_empty(&qh->queue)) {
out += sprintf(out, "%*s urbp == NULL\n", space, ""); out += sprintf(out, "%*s queue is empty\n", space, "");
goto out; } else {
} struct urb_priv *urbp = list_entry(qh->queue.next,
struct urb_priv, node);
struct uhci_td *td = list_entry(urbp->td_list.next,
struct uhci_td, list);
urbp = qh->urbp; if (cpu_to_le32(td->dma_handle) != (element & ~UHCI_PTR_BITS))
out += sprintf(out, "%*s Element != First TD\n",
head = &urbp->td_list; space, "");
tmp = head->next; i = nurbs = 0;
list_for_each_entry(urbp, &qh->queue, node) {
td = list_entry(tmp, struct uhci_td, list); if (++i <= 10)
out += uhci_show_urbp(urbp, out,
if (cpu_to_le32(td->dma_handle) != (element & ~UHCI_PTR_BITS)) len - (out - buf), space + 2);
out += sprintf(out, "%*s Element != First TD\n", space, ""); else
++nurbs;
while (tmp != head) {
struct uhci_td *td = list_entry(tmp, struct uhci_td, list);
tmp = tmp->next;
out += sprintf(out, "%*s%d: ", space + 2, "", i++);
out += uhci_show_td(td, out, len - (out - buf), 0);
if (i > 10 && !checked && prevactive && tmp != head &&
debug <= 2) {
struct list_head *ntmp = tmp;
struct uhci_td *ntd = td;
int active = 1, ni = i;
checked = 1;
while (ntmp != head && ntmp->next != head && active) {
ntd = list_entry(ntmp, struct uhci_td, list);
ntmp = ntmp->next;
active = td_status(ntd) & TD_CTRL_ACTIVE;
ni++;
}
if (active && ni > i) {
out += sprintf(out, "%*s[skipped %d active TDs]\n", space, "", ni - i);
tmp = ntmp;
td = ntd;
i = ni;
}
} }
if (nurbs > 0)
prevactive = td_status(td) & TD_CTRL_ACTIVE; out += sprintf(out, "%*s Skipped %d URBs\n",
space, "", nurbs);
} }
if (list_empty(&urbp->queue_list) || urbp->queued)
goto out;
out += sprintf(out, "%*sQueued QHs:\n", -space, "--");
head = &urbp->queue_list;
tmp = head->next;
while (tmp != head) {
struct urb_priv *nurbp = list_entry(tmp, struct urb_priv,
queue_list);
tmp = tmp->next;
out += uhci_show_qh(nurbp->qh, out, len - (out - buf), space);
}
out:
return out - buf; return out - buf;
} }
#define show_frame_num() \
if (!shown) { \
shown = 1; \
out += sprintf(out, "- Frame %d\n", i); \
}
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
static const char * const qh_names[] = { static const char * const qh_names[] = {
"skel_unlink_qh", "skel_iso_qh",
"skel_int128_qh", "skel_int64_qh", "skel_int128_qh", "skel_int64_qh",
"skel_int32_qh", "skel_int16_qh", "skel_int32_qh", "skel_int16_qh",
"skel_int8_qh", "skel_int4_qh", "skel_int8_qh", "skel_int4_qh",
@ -206,12 +203,6 @@ static const char * const qh_names[] = {
"skel_bulk_qh", "skel_term_qh" "skel_bulk_qh", "skel_term_qh"
}; };
#define show_qh_name() \
if (!shown) { \
shown = 1; \
out += sprintf(out, "- %s\n", qh_names[i]); \
}
static int uhci_show_sc(int port, unsigned short status, char *buf, int len) static int uhci_show_sc(int port, unsigned short status, char *buf, int len)
{ {
char *out = buf; char *out = buf;
@ -321,139 +312,29 @@ static int uhci_show_status(struct uhci_hcd *uhci, char *buf, int len)
return out - buf; return out - buf;
} }
static int uhci_show_urbp(struct uhci_hcd *uhci, struct urb_priv *urbp, char *buf, int len)
{
struct list_head *tmp;
char *out = buf;
int count = 0;
if (len < 200)
return 0;
out += sprintf(out, "urb_priv [%p] ", urbp);
out += sprintf(out, "urb [%p] ", urbp->urb);
out += sprintf(out, "qh [%p] ", urbp->qh);
out += sprintf(out, "Dev=%d ", usb_pipedevice(urbp->urb->pipe));
out += sprintf(out, "EP=%x(%s) ", usb_pipeendpoint(urbp->urb->pipe), (usb_pipein(urbp->urb->pipe) ? "IN" : "OUT"));
switch (usb_pipetype(urbp->urb->pipe)) {
case PIPE_ISOCHRONOUS: out += sprintf(out, "ISO "); break;
case PIPE_INTERRUPT: out += sprintf(out, "INT "); break;
case PIPE_BULK: out += sprintf(out, "BLK "); break;
case PIPE_CONTROL: out += sprintf(out, "CTL "); break;
}
out += sprintf(out, "%s", (urbp->fsbr ? "FSBR " : ""));
out += sprintf(out, "%s", (urbp->fsbr_timeout ? "FSBR_TO " : ""));
if (urbp->urb->status != -EINPROGRESS)
out += sprintf(out, "Status=%d ", urbp->urb->status);
//out += sprintf(out, "FSBRtime=%lx ",urbp->fsbrtime);
count = 0;
list_for_each(tmp, &urbp->td_list)
count++;
out += sprintf(out, "TDs=%d ",count);
if (urbp->queued)
out += sprintf(out, "queued\n");
else {
count = 0;
list_for_each(tmp, &urbp->queue_list)
count++;
out += sprintf(out, "queued URBs=%d\n", count);
}
return out - buf;
}
static int uhci_show_lists(struct uhci_hcd *uhci, char *buf, int len)
{
char *out = buf;
struct list_head *head, *tmp;
int count;
out += sprintf(out, "Main list URBs:");
if (list_empty(&uhci->urb_list))
out += sprintf(out, " Empty\n");
else {
out += sprintf(out, "\n");
count = 0;
head = &uhci->urb_list;
tmp = head->next;
while (tmp != head) {
struct urb_priv *urbp = list_entry(tmp, struct urb_priv, urb_list);
out += sprintf(out, " %d: ", ++count);
out += uhci_show_urbp(uhci, urbp, out, len - (out - buf));
tmp = tmp->next;
}
}
out += sprintf(out, "Remove list URBs:");
if (list_empty(&uhci->urb_remove_list))
out += sprintf(out, " Empty\n");
else {
out += sprintf(out, "\n");
count = 0;
head = &uhci->urb_remove_list;
tmp = head->next;
while (tmp != head) {
struct urb_priv *urbp = list_entry(tmp, struct urb_priv, urb_list);
out += sprintf(out, " %d: ", ++count);
out += uhci_show_urbp(uhci, urbp, out, len - (out - buf));
tmp = tmp->next;
}
}
out += sprintf(out, "Complete list URBs:");
if (list_empty(&uhci->complete_list))
out += sprintf(out, " Empty\n");
else {
out += sprintf(out, "\n");
count = 0;
head = &uhci->complete_list;
tmp = head->next;
while (tmp != head) {
struct urb_priv *urbp = list_entry(tmp, struct urb_priv, urb_list);
out += sprintf(out, " %d: ", ++count);
out += uhci_show_urbp(uhci, urbp, out, len - (out - buf));
tmp = tmp->next;
}
}
return out - buf;
}
static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len) static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len)
{ {
unsigned long flags;
char *out = buf; char *out = buf;
int i, j; int i, j;
struct uhci_qh *qh; struct uhci_qh *qh;
struct uhci_td *td; struct uhci_td *td;
struct list_head *tmp, *head; struct list_head *tmp, *head;
spin_lock_irqsave(&uhci->lock, flags);
out += uhci_show_root_hub_state(uhci, out, len - (out - buf)); out += uhci_show_root_hub_state(uhci, out, len - (out - buf));
out += sprintf(out, "HC status\n"); out += sprintf(out, "HC status\n");
out += uhci_show_status(uhci, out, len - (out - buf)); out += uhci_show_status(uhci, out, len - (out - buf));
if (debug <= 1)
return out - buf;
out += sprintf(out, "Frame List\n"); out += sprintf(out, "Frame List\n");
for (i = 0; i < UHCI_NUMFRAMES; ++i) { for (i = 0; i < UHCI_NUMFRAMES; ++i) {
int shown = 0;
td = uhci->frame_cpu[i]; td = uhci->frame_cpu[i];
if (!td) if (!td)
continue; continue;
if (td->dma_handle != (dma_addr_t)uhci->frame[i]) { out += sprintf(out, "- Frame %d\n", i); \
show_frame_num(); if (td->dma_handle != (dma_addr_t)uhci->frame[i])
out += sprintf(out, " frame list does not match td->dma_handle!\n"); out += sprintf(out, " frame list does not match td->dma_handle!\n");
}
show_frame_num();
head = &td->fl_list; head = &td->fl_list;
tmp = head; tmp = head;
@ -467,14 +348,11 @@ static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len)
out += sprintf(out, "Skeleton QHs\n"); out += sprintf(out, "Skeleton QHs\n");
for (i = 0; i < UHCI_NUM_SKELQH; ++i) { for (i = 0; i < UHCI_NUM_SKELQH; ++i) {
int shown = 0; int cnt = 0;
qh = uhci->skelqh[i]; qh = uhci->skelqh[i];
out += sprintf(out, "- %s\n", qh_names[i]); \
if (debug > 1) { out += uhci_show_qh(qh, out, len - (out - buf), 4);
show_qh_name();
out += uhci_show_qh(qh, out, len - (out - buf), 4);
}
/* Last QH is the Terminating QH, it's different */ /* Last QH is the Terminating QH, it's different */
if (i == UHCI_NUM_SKELQH - 1) { if (i == UHCI_NUM_SKELQH - 1) {
@ -487,44 +365,27 @@ static int uhci_sprint_schedule(struct uhci_hcd *uhci, char *buf, int len)
continue; continue;
} }
j = (i < 7) ? 7 : i+1; /* Next skeleton */ j = (i < 9) ? 9 : i+1; /* Next skeleton */
if (list_empty(&qh->list)) { head = &qh->node;
if (i < UHCI_NUM_SKELQH - 1) {
if (qh->link !=
(cpu_to_le32(uhci->skelqh[j]->dma_handle) | UHCI_PTR_QH)) {
show_qh_name();
out += sprintf(out, " skeleton QH not linked to next skeleton QH!\n");
}
}
continue;
}
show_qh_name();
head = &qh->list;
tmp = head->next; tmp = head->next;
while (tmp != head) { while (tmp != head) {
qh = list_entry(tmp, struct uhci_qh, list); qh = list_entry(tmp, struct uhci_qh, node);
tmp = tmp->next; tmp = tmp->next;
if (++cnt <= 10)
out += uhci_show_qh(qh, out, len - (out - buf), 4); out += uhci_show_qh(qh, out,
len - (out - buf), 4);
} }
if ((cnt -= 10) > 0)
out += sprintf(out, " Skipped %d QHs\n", cnt);
if (i < UHCI_NUM_SKELQH - 1) { if (i > 1 && i < UHCI_NUM_SKELQH - 1) {
if (qh->link != if (qh->link !=
(cpu_to_le32(uhci->skelqh[j]->dma_handle) | UHCI_PTR_QH)) (cpu_to_le32(uhci->skelqh[j]->dma_handle) | UHCI_PTR_QH))
out += sprintf(out, " last QH not linked to next skeleton!\n"); out += sprintf(out, " last QH not linked to next skeleton!\n");
} }
} }
if (debug > 2)
out += uhci_show_lists(uhci, out, len - (out - buf));
spin_unlock_irqrestore(&uhci->lock, flags);
return out - buf; return out - buf;
} }
@ -541,6 +402,7 @@ static int uhci_debug_open(struct inode *inode, struct file *file)
struct uhci_hcd *uhci = inode->u.generic_ip; struct uhci_hcd *uhci = inode->u.generic_ip;
struct uhci_debug *up; struct uhci_debug *up;
int ret = -ENOMEM; int ret = -ENOMEM;
unsigned long flags;
lock_kernel(); lock_kernel();
up = kmalloc(sizeof(*up), GFP_KERNEL); up = kmalloc(sizeof(*up), GFP_KERNEL);
@ -553,7 +415,9 @@ static int uhci_debug_open(struct inode *inode, struct file *file)
goto out; goto out;
} }
spin_lock_irqsave(&uhci->lock, flags);
up->size = uhci_sprint_schedule(uhci, up->data, MAX_OUTPUT); up->size = uhci_sprint_schedule(uhci, up->data, MAX_OUTPUT);
spin_unlock_irqrestore(&uhci->lock, flags);
file->private_data = up; file->private_data = up;

View File

@ -54,7 +54,7 @@
/* /*
* Version Information * Version Information
*/ */
#define DRIVER_VERSION "v2.3" #define DRIVER_VERSION "v3.0"
#define DRIVER_AUTHOR "Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, \ #define DRIVER_AUTHOR "Linus 'Frodo Rabbit' Torvalds, Johannes Erdfelt, \
Randy Dunlap, Georg Acher, Deti Fliegl, Thomas Sailer, Roman Weissgaerber, \ Randy Dunlap, Georg Acher, Deti Fliegl, Thomas Sailer, Roman Weissgaerber, \
Alan Stern" Alan Stern"
@ -489,15 +489,11 @@ static int uhci_start(struct usb_hcd *hcd)
uhci->fsbrtimeout = 0; uhci->fsbrtimeout = 0;
spin_lock_init(&uhci->lock); spin_lock_init(&uhci->lock);
INIT_LIST_HEAD(&uhci->qh_remove_list);
INIT_LIST_HEAD(&uhci->td_remove_list); INIT_LIST_HEAD(&uhci->td_remove_list);
INIT_LIST_HEAD(&uhci->urb_remove_list);
INIT_LIST_HEAD(&uhci->urb_list); INIT_LIST_HEAD(&uhci->urb_list);
INIT_LIST_HEAD(&uhci->complete_list); INIT_LIST_HEAD(&uhci->complete_list);
INIT_LIST_HEAD(&uhci->idle_qh_list);
init_waitqueue_head(&uhci->waitqh); init_waitqueue_head(&uhci->waitqh);
@ -540,7 +536,7 @@ static int uhci_start(struct usb_hcd *hcd)
} }
for (i = 0; i < UHCI_NUM_SKELQH; i++) { for (i = 0; i < UHCI_NUM_SKELQH; i++) {
uhci->skelqh[i] = uhci_alloc_qh(uhci); uhci->skelqh[i] = uhci_alloc_qh(uhci, NULL, NULL);
if (!uhci->skelqh[i]) { if (!uhci->skelqh[i]) {
dev_err(uhci_dev(uhci), "unable to allocate QH\n"); dev_err(uhci_dev(uhci), "unable to allocate QH\n");
goto err_alloc_skelqh; goto err_alloc_skelqh;
@ -557,13 +553,17 @@ static int uhci_start(struct usb_hcd *hcd)
uhci->skel_int16_qh->link = uhci->skel_int16_qh->link =
uhci->skel_int8_qh->link = uhci->skel_int8_qh->link =
uhci->skel_int4_qh->link = uhci->skel_int4_qh->link =
uhci->skel_int2_qh->link = uhci->skel_int2_qh->link = UHCI_PTR_QH |
cpu_to_le32(uhci->skel_int1_qh->dma_handle) | UHCI_PTR_QH; cpu_to_le32(uhci->skel_int1_qh->dma_handle);
uhci->skel_int1_qh->link = cpu_to_le32(uhci->skel_ls_control_qh->dma_handle) | UHCI_PTR_QH;
uhci->skel_ls_control_qh->link = cpu_to_le32(uhci->skel_fs_control_qh->dma_handle) | UHCI_PTR_QH; uhci->skel_int1_qh->link = UHCI_PTR_QH |
uhci->skel_fs_control_qh->link = cpu_to_le32(uhci->skel_bulk_qh->dma_handle) | UHCI_PTR_QH; cpu_to_le32(uhci->skel_ls_control_qh->dma_handle);
uhci->skel_bulk_qh->link = cpu_to_le32(uhci->skel_term_qh->dma_handle) | UHCI_PTR_QH; uhci->skel_ls_control_qh->link = UHCI_PTR_QH |
cpu_to_le32(uhci->skel_fs_control_qh->dma_handle);
uhci->skel_fs_control_qh->link = UHCI_PTR_QH |
cpu_to_le32(uhci->skel_bulk_qh->dma_handle);
uhci->skel_bulk_qh->link = UHCI_PTR_QH |
cpu_to_le32(uhci->skel_term_qh->dma_handle);
/* This dummy TD is to work around a bug in Intel PIIX controllers */ /* This dummy TD is to work around a bug in Intel PIIX controllers */
uhci_fill_td(uhci->term_td, 0, uhci_explen(0) | uhci_fill_td(uhci->term_td, 0, uhci_explen(0) |
@ -589,15 +589,15 @@ static int uhci_start(struct usb_hcd *hcd)
/* /*
* ffs (Find First bit Set) does exactly what we need: * ffs (Find First bit Set) does exactly what we need:
* 1,3,5,... => ffs = 0 => use skel_int2_qh = skelqh[6], * 1,3,5,... => ffs = 0 => use skel_int2_qh = skelqh[8],
* 2,6,10,... => ffs = 1 => use skel_int4_qh = skelqh[5], etc. * 2,6,10,... => ffs = 1 => use skel_int4_qh = skelqh[7], etc.
* ffs > 6 => not on any high-period queue, so use * ffs >= 7 => not on any high-period queue, so use
* skel_int1_qh = skelqh[7]. * skel_int1_qh = skelqh[9].
* Add UHCI_NUMFRAMES to insure at least one bit is set. * Add UHCI_NUMFRAMES to insure at least one bit is set.
*/ */
irq = 6 - (int) __ffs(i + UHCI_NUMFRAMES); irq = 8 - (int) __ffs(i + UHCI_NUMFRAMES);
if (irq < 0) if (irq <= 1)
irq = 7; irq = 9;
/* Only place we don't use the frame list routines */ /* Only place we don't use the frame list routines */
uhci->frame[i] = UHCI_PTR_QH | uhci->frame[i] = UHCI_PTR_QH |
@ -767,13 +767,30 @@ static int uhci_resume(struct usb_hcd *hcd)
} }
#endif #endif
/* Wait until all the URBs for a particular device/endpoint are gone */ /* Wait until a particular device/endpoint's QH is idle, and free it */
static void uhci_hcd_endpoint_disable(struct usb_hcd *hcd, static void uhci_hcd_endpoint_disable(struct usb_hcd *hcd,
struct usb_host_endpoint *ep) struct usb_host_endpoint *hep)
{ {
struct uhci_hcd *uhci = hcd_to_uhci(hcd); struct uhci_hcd *uhci = hcd_to_uhci(hcd);
struct uhci_qh *qh;
wait_event_interruptible(uhci->waitqh, list_empty(&ep->urb_list)); spin_lock_irq(&uhci->lock);
qh = (struct uhci_qh *) hep->hcpriv;
if (qh == NULL)
goto done;
while (qh->state != QH_STATE_IDLE) {
++uhci->num_waiting;
spin_unlock_irq(&uhci->lock);
wait_event_interruptible(uhci->waitqh,
qh->state == QH_STATE_IDLE);
spin_lock_irq(&uhci->lock);
--uhci->num_waiting;
}
uhci_free_qh(uhci, qh);
done:
spin_unlock_irq(&uhci->lock);
} }
static int uhci_hcd_get_frame_number(struct usb_hcd *hcd) static int uhci_hcd_get_frame_number(struct usb_hcd *hcd)

View File

@ -28,8 +28,9 @@
#define USBSTS_USBINT 0x0001 /* Interrupt due to IOC */ #define USBSTS_USBINT 0x0001 /* Interrupt due to IOC */
#define USBSTS_ERROR 0x0002 /* Interrupt due to error */ #define USBSTS_ERROR 0x0002 /* Interrupt due to error */
#define USBSTS_RD 0x0004 /* Resume Detect */ #define USBSTS_RD 0x0004 /* Resume Detect */
#define USBSTS_HSE 0x0008 /* Host System Error - basically PCI problems */ #define USBSTS_HSE 0x0008 /* Host System Error: PCI problems */
#define USBSTS_HCPE 0x0010 /* Host Controller Process Error - the scripts were buggy */ #define USBSTS_HCPE 0x0010 /* Host Controller Process Error:
* the schedule is buggy */
#define USBSTS_HCH 0x0020 /* HC Halted */ #define USBSTS_HCH 0x0020 /* HC Halted */
/* Interrupt enable register */ /* Interrupt enable register */
@ -47,7 +48,8 @@
/* USB port status and control registers */ /* USB port status and control registers */
#define USBPORTSC1 16 #define USBPORTSC1 16
#define USBPORTSC2 18 #define USBPORTSC2 18
#define USBPORTSC_CCS 0x0001 /* Current Connect Status ("device present") */ #define USBPORTSC_CCS 0x0001 /* Current Connect Status
* ("device present") */
#define USBPORTSC_CSC 0x0002 /* Connect Status Change */ #define USBPORTSC_CSC 0x0002 /* Connect Status Change */
#define USBPORTSC_PE 0x0004 /* Port Enable */ #define USBPORTSC_PE 0x0004 /* Port Enable */
#define USBPORTSC_PEC 0x0008 /* Port Enable Change */ #define USBPORTSC_PEC 0x0008 /* Port Enable Change */
@ -71,15 +73,16 @@
#define USBLEGSUP_RWC 0x8f00 /* the R/WC bits */ #define USBLEGSUP_RWC 0x8f00 /* the R/WC bits */
#define USBLEGSUP_RO 0x5040 /* R/O and reserved bits */ #define USBLEGSUP_RO 0x5040 /* R/O and reserved bits */
#define UHCI_PTR_BITS cpu_to_le32(0x000F) #define UHCI_PTR_BITS __constant_cpu_to_le32(0x000F)
#define UHCI_PTR_TERM cpu_to_le32(0x0001) #define UHCI_PTR_TERM __constant_cpu_to_le32(0x0001)
#define UHCI_PTR_QH cpu_to_le32(0x0002) #define UHCI_PTR_QH __constant_cpu_to_le32(0x0002)
#define UHCI_PTR_DEPTH cpu_to_le32(0x0004) #define UHCI_PTR_DEPTH __constant_cpu_to_le32(0x0004)
#define UHCI_PTR_BREADTH cpu_to_le32(0x0000) #define UHCI_PTR_BREADTH __constant_cpu_to_le32(0x0000)
#define UHCI_NUMFRAMES 1024 /* in the frame list [array] */ #define UHCI_NUMFRAMES 1024 /* in the frame list [array] */
#define UHCI_MAX_SOF_NUMBER 2047 /* in an SOF packet */ #define UHCI_MAX_SOF_NUMBER 2047 /* in an SOF packet */
#define CAN_SCHEDULE_FRAMES 1000 /* how far future frames can be scheduled */ #define CAN_SCHEDULE_FRAMES 1000 /* how far in the future frames
* can be scheduled */
/* /*
@ -87,38 +90,54 @@
*/ */
/* /*
* One role of a QH is to hold a queue of TDs for some endpoint. Each QH is * One role of a QH is to hold a queue of TDs for some endpoint. One QH goes
* used with one URB, and qh->element (updated by the HC) is either: * with each endpoint, and qh->element (updated by the HC) is either:
* - the next unprocessed TD for the URB, or * - the next unprocessed TD in the endpoint's queue, or
* - UHCI_PTR_TERM (when there's no more traffic for this endpoint), or * - UHCI_PTR_TERM (when there's no more traffic for this endpoint).
* - the QH for the next URB queued to the same endpoint.
* *
* The other role of a QH is to serve as a "skeleton" framelist entry, so we * The other role of a QH is to serve as a "skeleton" framelist entry, so we
* can easily splice a QH for some endpoint into the schedule at the right * can easily splice a QH for some endpoint into the schedule at the right
* place. Then qh->element is UHCI_PTR_TERM. * place. Then qh->element is UHCI_PTR_TERM.
* *
* In the frame list, qh->link maintains a list of QHs seen by the HC: * In the schedule, qh->link maintains a list of QHs seen by the HC:
* skel1 --> ep1-qh --> ep2-qh --> ... --> skel2 --> ... * skel1 --> ep1-qh --> ep2-qh --> ... --> skel2 --> ...
*
* qh->node is the software equivalent of qh->link. The differences
* are that the software list is doubly-linked and QHs in the UNLINKING
* state are on the software list but not the hardware schedule.
*
* For bookkeeping purposes we maintain QHs even for Isochronous endpoints,
* but they never get added to the hardware schedule.
*/ */
#define QH_STATE_IDLE 1 /* QH is not being used */
#define QH_STATE_UNLINKING 2 /* QH has been removed from the
* schedule but the hardware may
* still be using it */
#define QH_STATE_ACTIVE 3 /* QH is on the schedule */
struct uhci_qh { struct uhci_qh {
/* Hardware fields */ /* Hardware fields */
__le32 link; /* Next queue */ __le32 link; /* Next QH in the schedule */
__le32 element; /* Queue element pointer */ __le32 element; /* Queue element (TD) pointer */
/* Software fields */ /* Software fields */
dma_addr_t dma_handle; dma_addr_t dma_handle;
struct urb_priv *urbp; struct list_head node; /* Node in the list of QHs */
struct usb_host_endpoint *hep; /* Endpoint information */
struct usb_device *udev;
struct list_head queue; /* Queue of urbps for this QH */
struct uhci_qh *skel; /* Skeleton for this QH */
struct list_head list; unsigned int unlink_frame; /* When the QH was unlinked */
struct list_head remove_list; int state; /* QH_STATE_xxx; see above */
} __attribute__((aligned(16))); } __attribute__((aligned(16)));
/* /*
* We need a special accessor for the element pointer because it is * We need a special accessor for the element pointer because it is
* subject to asynchronous updates by the controller. * subject to asynchronous updates by the controller.
*/ */
static __le32 inline qh_element(struct uhci_qh *qh) { static inline __le32 qh_element(struct uhci_qh *qh) {
__le32 element = qh->element; __le32 element = qh->element;
barrier(); barrier();
@ -149,11 +168,13 @@ static __le32 inline qh_element(struct uhci_qh *qh) {
#define TD_CTRL_ACTLEN_MASK 0x7FF /* actual length, encoded as n - 1 */ #define TD_CTRL_ACTLEN_MASK 0x7FF /* actual length, encoded as n - 1 */
#define TD_CTRL_ANY_ERROR (TD_CTRL_STALLED | TD_CTRL_DBUFERR | \ #define TD_CTRL_ANY_ERROR (TD_CTRL_STALLED | TD_CTRL_DBUFERR | \
TD_CTRL_BABBLE | TD_CTRL_CRCTIME | TD_CTRL_BITSTUFF) TD_CTRL_BABBLE | TD_CTRL_CRCTIME | \
TD_CTRL_BITSTUFF)
#define uhci_maxerr(err) ((err) << TD_CTRL_C_ERR_SHIFT) #define uhci_maxerr(err) ((err) << TD_CTRL_C_ERR_SHIFT)
#define uhci_status_bits(ctrl_sts) ((ctrl_sts) & 0xF60000) #define uhci_status_bits(ctrl_sts) ((ctrl_sts) & 0xF60000)
#define uhci_actual_length(ctrl_sts) (((ctrl_sts) + 1) & TD_CTRL_ACTLEN_MASK) /* 1-based */ #define uhci_actual_length(ctrl_sts) (((ctrl_sts) + 1) & \
TD_CTRL_ACTLEN_MASK) /* 1-based */
/* /*
* for TD <info>: (a.k.a. Token) * for TD <info>: (a.k.a. Token)
@ -163,7 +184,7 @@ static __le32 inline qh_element(struct uhci_qh *qh) {
#define TD_TOKEN_TOGGLE_SHIFT 19 #define TD_TOKEN_TOGGLE_SHIFT 19
#define TD_TOKEN_TOGGLE (1 << 19) #define TD_TOKEN_TOGGLE (1 << 19)
#define TD_TOKEN_EXPLEN_SHIFT 21 #define TD_TOKEN_EXPLEN_SHIFT 21
#define TD_TOKEN_EXPLEN_MASK 0x7FF /* expected length, encoded as n - 1 */ #define TD_TOKEN_EXPLEN_MASK 0x7FF /* expected length, encoded as n-1 */
#define TD_TOKEN_PID_MASK 0xFF #define TD_TOKEN_PID_MASK 0xFF
#define uhci_explen(len) ((((len) - 1) & TD_TOKEN_EXPLEN_MASK) << \ #define uhci_explen(len) ((((len) - 1) & TD_TOKEN_EXPLEN_MASK) << \
@ -187,7 +208,7 @@ static __le32 inline qh_element(struct uhci_qh *qh) {
* sw space after the TD entry. * sw space after the TD entry.
* *
* td->link points to either another TD (not necessarily for the same urb or * td->link points to either another TD (not necessarily for the same urb or
* even the same endpoint), or nothing (PTR_TERM), or a QH (for queued urbs). * even the same endpoint), or nothing (PTR_TERM), or a QH.
*/ */
struct uhci_td { struct uhci_td {
/* Hardware fields */ /* Hardware fields */
@ -210,7 +231,7 @@ struct uhci_td {
* We need a special accessor for the control/status word because it is * We need a special accessor for the control/status word because it is
* subject to asynchronous updates by the controller. * subject to asynchronous updates by the controller.
*/ */
static u32 inline td_status(struct uhci_td *td) { static inline u32 td_status(struct uhci_td *td) {
__le32 status = td->status; __le32 status = td->status;
barrier(); barrier();
@ -223,17 +244,14 @@ static u32 inline td_status(struct uhci_td *td) {
*/ */
/* /*
* The UHCI driver places Interrupt, Control and Bulk into QHs both * The UHCI driver uses QHs with Interrupt, Control and Bulk URBs for
* to group together TDs for one transfer, and also to facilitate queuing * automatic queuing. To make it easy to insert entries into the schedule,
* of URBs. To make it easy to insert entries into the schedule, we have * we have a skeleton of QHs for each predefined Interrupt latency,
* a skeleton of QHs for each predefined Interrupt latency, low-speed * low-speed control, full-speed control, bulk, and terminating QH
* control, full-speed control and terminating QH (see explanation for * (see explanation for the terminating QH below).
* the terminating QH below).
* *
* When we want to add a new QH, we add it to the end of the list for the * When we want to add a new QH, we add it to the end of the list for the
* skeleton QH. * skeleton QH. For instance, the schedule list can look like this:
*
* For instance, the queue can look like this:
* *
* skel int128 QH * skel int128 QH
* dev 1 interrupt QH * dev 1 interrupt QH
@ -256,26 +274,31 @@ static u32 inline td_status(struct uhci_td *td) {
* - To loop back to the full-speed control queue for full-speed bandwidth * - To loop back to the full-speed control queue for full-speed bandwidth
* reclamation. * reclamation.
* *
* Isochronous transfers are stored before the start of the skeleton * There's a special skeleton QH for Isochronous QHs. It never appears
* schedule and don't use QHs. While the UHCI spec doesn't forbid the * on the schedule, and Isochronous TDs go on the schedule before the
* use of QHs for Isochronous, it doesn't use them either. And the spec * the skeleton QHs. The hardware accesses them directly rather than
* says that queues never advance on an error completion status, which * through their QH, which is used only for bookkeeping purposes.
* makes them totally unsuitable for Isochronous transfers. * While the UHCI spec doesn't forbid the use of QHs for Isochronous,
* it doesn't use them either. And the spec says that queues never
* advance on an error completion status, which makes them totally
* unsuitable for Isochronous transfers.
*/ */
#define UHCI_NUM_SKELQH 12 #define UHCI_NUM_SKELQH 14
#define skel_int128_qh skelqh[0] #define skel_unlink_qh skelqh[0]
#define skel_int64_qh skelqh[1] #define skel_iso_qh skelqh[1]
#define skel_int32_qh skelqh[2] #define skel_int128_qh skelqh[2]
#define skel_int16_qh skelqh[3] #define skel_int64_qh skelqh[3]
#define skel_int8_qh skelqh[4] #define skel_int32_qh skelqh[4]
#define skel_int4_qh skelqh[5] #define skel_int16_qh skelqh[5]
#define skel_int2_qh skelqh[6] #define skel_int8_qh skelqh[6]
#define skel_int1_qh skelqh[7] #define skel_int4_qh skelqh[7]
#define skel_ls_control_qh skelqh[8] #define skel_int2_qh skelqh[8]
#define skel_fs_control_qh skelqh[9] #define skel_int1_qh skelqh[9]
#define skel_bulk_qh skelqh[10] #define skel_ls_control_qh skelqh[10]
#define skel_term_qh skelqh[11] #define skel_fs_control_qh skelqh[11]
#define skel_bulk_qh skelqh[12]
#define skel_term_qh skelqh[13]
/* /*
* Search tree for determining where <interval> fits in the skelqh[] * Search tree for determining where <interval> fits in the skelqh[]
@ -293,21 +316,21 @@ static inline int __interval_to_skel(int interval)
if (interval < 16) { if (interval < 16) {
if (interval < 4) { if (interval < 4) {
if (interval < 2) if (interval < 2)
return 7; /* int1 for 0-1 ms */ return 9; /* int1 for 0-1 ms */
return 6; /* int2 for 2-3 ms */ return 8; /* int2 for 2-3 ms */
} }
if (interval < 8) if (interval < 8)
return 5; /* int4 for 4-7 ms */ return 7; /* int4 for 4-7 ms */
return 4; /* int8 for 8-15 ms */ return 6; /* int8 for 8-15 ms */
} }
if (interval < 64) { if (interval < 64) {
if (interval < 32) if (interval < 32)
return 3; /* int16 for 16-31 ms */ return 5; /* int16 for 16-31 ms */
return 2; /* int32 for 32-63 ms */ return 4; /* int32 for 32-63 ms */
} }
if (interval < 128) if (interval < 128)
return 1; /* int64 for 64-127 ms */ return 3; /* int64 for 64-127 ms */
return 0; /* int128 for 128-255 ms (Max.) */ return 2; /* int128 for 128-255 ms (Max.) */
} }
@ -363,12 +386,12 @@ struct uhci_hcd {
spinlock_t lock; spinlock_t lock;
dma_addr_t frame_dma_handle; /* Hardware frame list */ dma_addr_t frame_dma_handle; /* Hardware frame list */
__le32 *frame; __le32 *frame;
void **frame_cpu; /* CPU's frame list */ void **frame_cpu; /* CPU's frame list */
int fsbr; /* Full-speed bandwidth reclamation */ int fsbr; /* Full-speed bandwidth reclamation */
unsigned long fsbrtimeout; /* FSBR delay */ unsigned long fsbrtimeout; /* FSBR delay */
enum uhci_rh_state rh_state; enum uhci_rh_state rh_state;
unsigned long auto_stop_time; /* When to AUTO_STOP */ unsigned long auto_stop_time; /* When to AUTO_STOP */
@ -392,24 +415,19 @@ struct uhci_hcd {
/* Main list of URBs currently controlled by this HC */ /* Main list of URBs currently controlled by this HC */
struct list_head urb_list; struct list_head urb_list;
/* List of QHs that are done, but waiting to be unlinked (race) */
struct list_head qh_remove_list;
unsigned int qh_remove_age; /* Age in frames */
/* List of TDs that are done, but waiting to be freed (race) */ /* List of TDs that are done, but waiting to be freed (race) */
struct list_head td_remove_list; struct list_head td_remove_list;
unsigned int td_remove_age; /* Age in frames */ unsigned int td_remove_age; /* Age in frames */
/* List of asynchronously unlinked URBs */
struct list_head urb_remove_list;
unsigned int urb_remove_age; /* Age in frames */
/* List of URBs awaiting completion callback */ /* List of URBs awaiting completion callback */
struct list_head complete_list; struct list_head complete_list;
struct list_head idle_qh_list; /* Where the idle QHs live */
int rh_numports; /* Number of root-hub ports */ int rh_numports; /* Number of root-hub ports */
wait_queue_head_t waitqh; /* endpoint_disable waiters */ wait_queue_head_t waitqh; /* endpoint_disable waiters */
int num_waiting; /* Number of waiters */
}; };
/* Convert between a usb_hcd pointer and the corresponding uhci_hcd */ /* Convert between a usb_hcd pointer and the corresponding uhci_hcd */
@ -430,22 +448,19 @@ static inline struct usb_hcd *uhci_to_hcd(struct uhci_hcd *uhci)
*/ */
struct urb_priv { struct urb_priv {
struct list_head urb_list; struct list_head urb_list;
struct list_head node; /* Node in the QH's urbp list */
struct urb *urb; struct urb *urb;
struct uhci_qh *qh; /* QH for this URB */ struct uhci_qh *qh; /* QH for this URB */
struct list_head td_list; struct list_head td_list;
unsigned fsbr : 1; /* URB turned on FSBR */
unsigned fsbr_timeout : 1; /* URB timed out on FSBR */
unsigned queued : 1; /* QH was queued (not linked in) */
unsigned short_control_packet : 1; /* If we get a short packet during */
/* a control transfer, retrigger */
/* the status phase */
unsigned long fsbrtime; /* In jiffies */ unsigned long fsbrtime; /* In jiffies */
struct list_head queue_list; unsigned fsbr : 1; /* URB turned on FSBR */
unsigned fsbr_timeout : 1; /* URB timed out on FSBR */
unsigned short_transfer : 1; /* URB got a short transfer, no
* need to rescan */
}; };

File diff suppressed because it is too large Load Diff