USB: EHCI cpufreq fix

EHCI controllers that don't cache enough microframes can get MMF errors
when CPU frequency changes occur between the start and completion of
split interrupt transactions, due to delays in reading main memory
(caused by CPU cache snoop delays).

This patch adds a cpufreq notifier to the EHCI driver that will
inactivate split interrupt transactions during frequency transitions.
It was tested on Intel ICH7 and Serverworks/Broadcom HT1000 EHCI
controllers.

Signed-off-by: Stuart Hayes <stuart_hayes@dell.com>
Signed-off-by: David Brownell <dbrownell@users.sourceforge.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
Stuart_Hayes@Dell.com 2007-05-03 08:58:49 -07:00 committed by Greg Kroah-Hartman
parent ec22559e0b
commit 196705c9bb
5 changed files with 214 additions and 0 deletions

View File

@ -273,6 +273,58 @@ static void ehci_work(struct ehci_hcd *ehci);
/*-------------------------------------------------------------------------*/
#ifdef CONFIG_CPU_FREQ
#include <linux/cpufreq.h>
static void ehci_cpufreq_pause (struct ehci_hcd *ehci)
{
unsigned long flags;
spin_lock_irqsave(&ehci->lock, flags);
if (!ehci->cpufreq_changing++)
qh_inactivate_split_intr_qhs(ehci);
spin_unlock_irqrestore(&ehci->lock, flags);
}
static void ehci_cpufreq_unpause (struct ehci_hcd *ehci)
{
unsigned long flags;
spin_lock_irqsave(&ehci->lock, flags);
if (!--ehci->cpufreq_changing)
qh_reactivate_split_intr_qhs(ehci);
spin_unlock_irqrestore(&ehci->lock, flags);
}
/*
* ehci_cpufreq_notifier is needed to avoid MMF errors that occur when
* EHCI controllers that don't cache many uframes get delayed trying to
* read main memory during CPU frequency transitions. This can cause
* split interrupt transactions to not be completed in the required uframe.
* This has been observed on the Broadcom/ServerWorks HT1000 controller.
*/
static int ehci_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
void *data)
{
struct ehci_hcd *ehci = container_of(nb, struct ehci_hcd,
cpufreq_transition);
switch (val) {
case CPUFREQ_PRECHANGE:
ehci_cpufreq_pause(ehci);
break;
case CPUFREQ_POSTCHANGE:
ehci_cpufreq_unpause(ehci);
break;
}
return 0;
}
#endif
/*-------------------------------------------------------------------------*/
static void ehci_watchdog (unsigned long param)
{
struct ehci_hcd *ehci = (struct ehci_hcd *) param;
@ -404,6 +456,10 @@ static void ehci_stop (struct usb_hcd *hcd)
ehci_writel(ehci, 0, &ehci->regs->intr_enable);
spin_unlock_irq(&ehci->lock);
#ifdef CONFIG_CPU_FREQ
cpufreq_unregister_notifier(&ehci->cpufreq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
#endif
/* let companion controllers work when we aren't */
ehci_writel(ehci, 0, &ehci->regs->configured_flag);
@ -509,6 +565,17 @@ static int ehci_init(struct usb_hcd *hcd)
}
ehci->command = temp;
#ifdef CONFIG_CPU_FREQ
INIT_LIST_HEAD(&ehci->split_intr_qhs);
/*
* If the EHCI controller caches enough uframes, this probably
* isn't needed unless there are so many low/full speed devices
* that the controller's can't cache it all.
*/
ehci->cpufreq_transition.notifier_call = ehci_cpufreq_notifier;
cpufreq_register_notifier(&ehci->cpufreq_transition,
CPUFREQ_TRANSITION_NOTIFIER);
#endif
return 0;
}

View File

@ -94,6 +94,9 @@ static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags)
qh->qh_dma = dma;
// INIT_LIST_HEAD (&qh->qh_list);
INIT_LIST_HEAD (&qh->qtd_list);
#ifdef CONFIG_CPU_FREQ
INIT_LIST_HEAD (&qh->split_intr_qhs);
#endif
/* dummy td enables safe urb queuing */
qh->dummy = ehci_qtd_alloc (ehci, flags);

View File

@ -311,6 +311,10 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
struct urb *urb;
u32 token = 0;
/* ignore QHs that are currently inactive */
if (qh->hw_info1 & __constant_cpu_to_le32(QH_INACTIVATE))
break;
qtd = list_entry (entry, struct ehci_qtd, qtd_list);
urb = qtd->urb;

View File

@ -473,6 +473,111 @@ static int disable_periodic (struct ehci_hcd *ehci)
}
/*-------------------------------------------------------------------------*/
#ifdef CONFIG_CPU_FREQ
/* ignore/inactivate bit in QH hw_info1 */
#define INACTIVATE_BIT __constant_cpu_to_le32(QH_INACTIVATE)
#define HALT_BIT __constant_cpu_to_le32(QTD_STS_HALT)
#define ACTIVE_BIT __constant_cpu_to_le32(QTD_STS_ACTIVE)
#define STATUS_BIT __constant_cpu_to_le32(QTD_STS_STS)
static int safe_to_modify_i (struct ehci_hcd *ehci, struct ehci_qh *qh)
{
int now; /* current (frame * 8) + uframe */
int prev_start, next_start; /* uframes from/to split start */
int start_uframe = ffs(le32_to_cpup (&qh->hw_info2) & QH_SMASK);
int end_uframe = fls((le32_to_cpup (&qh->hw_info2) & QH_CMASK) >> 8);
int split_duration = end_uframe - start_uframe;
now = readl(&ehci->regs->frame_index) % (ehci->periodic_size << 3);
next_start = ((1024 << 3) + (qh->start << 3) + start_uframe - now) %
(qh->period << 3);
prev_start = (qh->period << 3) - next_start;
/*
* Make sure there will be at least one uframe when qh is safe.
*/
if ((qh->period << 3) <= (ehci->i_thresh + 2 + split_duration))
/* never safe */
return -EINVAL;
/*
* Wait 1 uframe after transaction should have started, to make
* sure controller has time to write back overlay, so we can
* check QTD_STS_STS to see if transaction is in progress.
*/
if ((next_start > ehci->i_thresh) && (prev_start > 1))
/* safe to set "i" bit if split isn't in progress */
return (qh->hw_token & STATUS_BIT) ? 0 : 1;
else
return 0;
}
/* Set inactivate bit for all the split interrupt QHs. */
static void qh_inactivate_split_intr_qhs (struct ehci_hcd *ehci)
{
struct ehci_qh *qh;
int not_done, safe;
do {
not_done = 0;
list_for_each_entry(qh, &ehci->split_intr_qhs,
split_intr_qhs) {
if (qh->hw_info1 & INACTIVATE_BIT)
/* already off */
continue;
/*
* To avoid setting "I" after the start split happens,
* don't set it if the QH might be cached in the
* controller. Some HCs (Broadcom/ServerWorks HT1000)
* will stop in the middle of a split transaction when
* the "I" bit is set.
*/
safe = safe_to_modify_i(ehci, qh);
if (safe == 0) {
not_done = 1;
} else if (safe > 0) {
qh->was_active = qh->hw_token & ACTIVE_BIT;
qh->hw_info1 |= INACTIVATE_BIT;
}
}
} while (not_done);
wmb();
}
static void qh_reactivate_split_intr_qhs (struct ehci_hcd *ehci)
{
struct ehci_qh *qh;
u32 token;
int not_done, safe;
do {
not_done = 0;
list_for_each_entry(qh, &ehci->split_intr_qhs, split_intr_qhs) {
if (!(qh->hw_info1 & INACTIVATE_BIT)) /* already on */
continue;
/*
* Don't reactivate if cached, or controller might
* overwrite overlay after we modify it!
*/
safe = safe_to_modify_i(ehci, qh);
if (safe == 0) {
not_done = 1;
} else if (safe > 0) {
/* See EHCI 1.0 section 4.15.2.4. */
token = qh->hw_token;
qh->hw_token = (token | HALT_BIT) & ~ACTIVE_BIT;
wmb();
qh->hw_info1 &= ~INACTIVATE_BIT;
wmb();
qh->hw_token = (token & ~HALT_BIT) | qh->was_active;
}
}
} while (not_done);
}
#endif
/* periodic schedule slots have iso tds (normal or split) first, then a
* sparse tree for active interrupt transfers.
@ -490,6 +595,17 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
period, le32_to_cpup (&qh->hw_info2) & (QH_CMASK | QH_SMASK),
qh, qh->start, qh->usecs, qh->c_usecs);
#ifdef CONFIG_CPU_FREQ
/*
* If low/full speed interrupt QHs are inactive (because of
* cpufreq changing processor speeds), start QH with I flag set--
* it will automatically be cleared when cpufreq is done.
*/
if (ehci->cpufreq_changing)
if (!(qh->hw_info1 & (cpu_to_le32(1 << 13))))
qh->hw_info1 |= INACTIVATE_BIT;
#endif
/* high bandwidth, or otherwise every microframe */
if (period == 0)
period = 1;
@ -538,6 +654,12 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
? ((qh->usecs + qh->c_usecs) / qh->period)
: (qh->usecs * 8);
#ifdef CONFIG_CPU_FREQ
/* add qh to list of low/full speed interrupt QHs, if applicable */
if (!(qh->hw_info1 & (cpu_to_le32(1 << 13)))) {
list_add(&qh->split_intr_qhs, &ehci->split_intr_qhs);
}
#endif
/* maybe enable periodic schedule processing */
if (!ehci->periodic_sched++)
return enable_periodic (ehci);
@ -557,6 +679,13 @@ static void qh_unlink_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
// THEN
// qh->hw_info1 |= __constant_cpu_to_le32 (1 << 7 /* "ignore" */);
#ifdef CONFIG_CPU_FREQ
/* remove qh from list of low/full speed interrupt QHs */
if (!(qh->hw_info1 & (cpu_to_le32(1 << 13)))) {
list_del_init(&qh->split_intr_qhs);
}
#endif
/* high bandwidth, or otherwise part of every microframe */
if ((period = qh->period) == 0)
period = 1;

View File

@ -55,6 +55,12 @@ struct ehci_hcd { /* one per controller */
__u32 hcs_params; /* cached register copy */
spinlock_t lock;
#ifdef CONFIG_CPU_FREQ
struct notifier_block cpufreq_transition;
int cpufreq_changing;
struct list_head split_intr_qhs;
#endif
/* async schedule support */
struct ehci_qh *async;
struct ehci_qh *reclaim;
@ -395,6 +401,7 @@ struct ehci_qh {
__le32 hw_next; /* see EHCI 3.6.1 */
__le32 hw_info1; /* see EHCI 3.6.2 */
#define QH_HEAD 0x00008000
#define QH_INACTIVATE 0x00000080
__le32 hw_info2; /* see EHCI 3.6.2 */
#define QH_SMASK 0x000000ff
#define QH_CMASK 0x0000ff00
@ -437,6 +444,10 @@ struct ehci_qh {
unsigned short start; /* where polling starts */
#define NO_FRAME ((unsigned short)~0) /* pick new start */
struct usb_device *dev; /* access to TT */
#ifdef CONFIG_CPU_FREQ
struct list_head split_intr_qhs; /* list of split qhs */
__le32 was_active; /* active bit before "i" set */
#endif
} __attribute__ ((aligned (32)));
/*-------------------------------------------------------------------------*/