Merge git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6: (142 commits)
  USB: Fix sysfs paths in documentation
  USB: skeleton: fix coding style issues.
  USB: O_NONBLOCK in read path of skeleton
  USB: make usb-skeleton honor O_NONBLOCK in write path
  USB: skel_read really sucks royally
  USB: Add hub descriptor update hook for xHCI
  USB: xhci: Support USB hubs.
  USB: xhci: Set multi-TT field for LS/FS devices under hubs.
  USB: xhci: Set route string for all devices.
  USB: xhci: Fix command wait list handling.
  USB: xhci: Change how xHCI commands are handled.
  USB: xhci: Refactor input device context setup.
  USB: xhci: Endpoint representation refactoring.
  USB: gadget: ether needs to select CRC32
  USB: fix USBTMC get_capabilities success handling
  USB: fix missing error check in probing
  USB: usbfs: add USBDEVFS_URB_BULK_CONTINUATION flag
  USB: support for autosuspend in sierra while online
  USB: ehci-dbgp,ehci: Allow dbpg to work with suspend/resume
  USB: ehci-dbgp,documentation: Documentation updates for ehci-dbgp
  ...
This commit is contained in:
Linus Torvalds 2009-09-23 09:25:16 -07:00
commit be90a49ca2
126 changed files with 9590 additions and 2124 deletions

View File

@ -671,7 +671,7 @@ and is between 256 and 4096 characters. It is defined in the file
earlyprintk= [X86,SH,BLACKFIN]
earlyprintk=vga
earlyprintk=serial[,ttySn[,baudrate]]
earlyprintk=dbgp
earlyprintk=dbgp[debugController#]
Append ",keep" to not disable it when the real console
takes over.

View File

@ -16,20 +16,20 @@ Usage:
Authorize a device to connect:
$ echo 1 > /sys/usb/devices/DEVICE/authorized
$ echo 1 > /sys/bus/usb/devices/DEVICE/authorized
Deauthorize a device:
$ echo 0 > /sys/usb/devices/DEVICE/authorized
$ echo 0 > /sys/bus/usb/devices/DEVICE/authorized
Set new devices connected to hostX to be deauthorized by default (ie:
lock down):
$ echo 0 > /sys/bus/devices/usbX/authorized_default
$ echo 0 > /sys/bus/usb/devices/usbX/authorized_default
Remove the lock down:
$ echo 1 > /sys/bus/devices/usbX/authorized_default
$ echo 1 > /sys/bus/usb/devices/usbX/authorized_default
By default, Wired USB devices are authorized by default to
connect. Wireless USB hosts deauthorize by default all new connected
@ -47,7 +47,7 @@ USB port):
boot up
rc.local ->
for host in /sys/bus/devices/usb*
for host in /sys/bus/usb/devices/usb*
do
echo 0 > $host/authorized_default
done

View File

@ -33,7 +33,7 @@ if usbmon is built into the kernel.
Verify that bus sockets are present.
# ls /sys/kernel/debug/usbmon
# ls /sys/kernel/debug/usb/usbmon
0s 0u 1s 1t 1u 2s 2t 2u 3s 3t 3u 4s 4t 4u
#
@ -58,11 +58,11 @@ Bus=03 means it's bus 3.
3. Start 'cat'
# cat /sys/kernel/debug/usbmon/3u > /tmp/1.mon.out
# cat /sys/kernel/debug/usb/usbmon/3u > /tmp/1.mon.out
to listen on a single bus, otherwise, to listen on all buses, type:
# cat /sys/kernel/debug/usbmon/0u > /tmp/1.mon.out
# cat /sys/kernel/debug/usb/usbmon/0u > /tmp/1.mon.out
This process will be reading until killed. Naturally, the output can be
redirected to a desirable location. This is preferred, because it is going
@ -305,7 +305,7 @@ Before the call, hdr, data, and alloc should be filled. Upon return, the area
pointed by hdr contains the next event structure, and the data buffer contains
the data, if any. The event is removed from the kernel buffer.
The MON_IOCX_GET copies 48 bytes, MON_IOCX_GETX copies 64 bytes.
The MON_IOCX_GET copies 48 bytes to hdr area, MON_IOCX_GETX copies 64 bytes.
MON_IOCX_MFETCH, defined as _IOWR(MON_IOC_MAGIC, 7, struct mon_mfetch_arg)

View File

@ -7,7 +7,7 @@ and two USB cables, connected like this:
[host/target] <-------> [USB debug key] <-------> [client/console]
1. There are three specific hardware requirements:
1. There are a number of specific hardware requirements:
a.) Host/target system needs to have USB debug port capability.
@ -42,7 +42,35 @@ and two USB cables, connected like this:
This is a small blue plastic connector with two USB connections,
it draws power from its USB connections.
c.) Thirdly, you need a second client/console system with a regular USB port.
c.) You need a second client/console system with a high speed USB 2.0
port.
d.) The Netchip device must be plugged directly into the physical
debug port on the "host/target" system. You cannot use a USB hub in
between the physical debug port and the "host/target" system.
The EHCI debug controller is bound to a specific physical USB
port and the Netchip device will only work as an early printk
device in this port. The EHCI host controllers are electrically
wired such that the EHCI debug controller is hooked up to the
first physical and there is no way to change this via software.
You can find the physical port through experimentation by trying
each physical port on the system and rebooting. Or you can try
and use lsusb or look at the kernel info messages emitted by the
usb stack when you plug a usb device into various ports on the
"host/target" system.
Some hardware vendors do not expose the usb debug port with a
physical connector and if you find such a device send a complaint
to the hardware vendor, because there is no reason not to wire
this port into one of the physically accessible ports.
e.) It is also important to note, that many versions of the Netchip
device require the "client/console" system to be plugged into the
right and side of the device (with the product logo facing up and
readable left to right). The reason being is that the 5 volt
power supply is taken from only one side of the device and it
must be the side that does not get rebooted.
2. Software requirements:
@ -56,6 +84,13 @@ and two USB cables, connected like this:
(If you are using Grub, append it to the 'kernel' line in
/etc/grub.conf)
On systems with more than one EHCI debug controller you must
specify the correct EHCI debug controller number. The ordering
comes from the PCI bus enumeration of the EHCI controllers. The
default with no number argument is "0" the first EHCI debug
controller. To use the second EHCI debug controller, you would
use the command line: "earlyprintk=dbgp1"
NOTE: normally earlyprintk console gets turned off once the
regular console is alive - use "earlyprintk=dbgp,keep" to keep
this channel open beyond early bootup. This can be useful for

View File

@ -2107,12 +2107,12 @@ S: Supported
F: arch/powerpc/sysdev/qe_lib/
F: arch/powerpc/include/asm/*qe.h
FREESCALE HIGHSPEED USB DEVICE DRIVER
FREESCALE USB PERIPHERIAL DRIVERS
M: Li Yang <leoli@freescale.com>
L: linux-usb@vger.kernel.org
L: linuxppc-dev@ozlabs.org
S: Maintained
F: drivers/usb/gadget/fsl_usb2_udc.c
F: drivers/usb/gadget/fsl*
FREESCALE QUICC ENGINE UCC ETHERNET DRIVER
M: Li Yang <leoli@freescale.com>

View File

@ -160,721 +160,6 @@ static struct console early_serial_console = {
.index = -1,
};
#ifdef CONFIG_EARLY_PRINTK_DBGP
static struct ehci_caps __iomem *ehci_caps;
static struct ehci_regs __iomem *ehci_regs;
static struct ehci_dbg_port __iomem *ehci_debug;
static unsigned int dbgp_endpoint_out;
struct ehci_dev {
u32 bus;
u32 slot;
u32 func;
};
static struct ehci_dev ehci_dev;
#define USB_DEBUG_DEVNUM 127
#define DBGP_DATA_TOGGLE 0x8800
static inline u32 dbgp_pid_update(u32 x, u32 tok)
{
return ((x ^ DBGP_DATA_TOGGLE) & 0xffff00) | (tok & 0xff);
}
static inline u32 dbgp_len_update(u32 x, u32 len)
{
return (x & ~0x0f) | (len & 0x0f);
}
/*
* USB Packet IDs (PIDs)
*/
/* token */
#define USB_PID_OUT 0xe1
#define USB_PID_IN 0x69
#define USB_PID_SOF 0xa5
#define USB_PID_SETUP 0x2d
/* handshake */
#define USB_PID_ACK 0xd2
#define USB_PID_NAK 0x5a
#define USB_PID_STALL 0x1e
#define USB_PID_NYET 0x96
/* data */
#define USB_PID_DATA0 0xc3
#define USB_PID_DATA1 0x4b
#define USB_PID_DATA2 0x87
#define USB_PID_MDATA 0x0f
/* Special */
#define USB_PID_PREAMBLE 0x3c
#define USB_PID_ERR 0x3c
#define USB_PID_SPLIT 0x78
#define USB_PID_PING 0xb4
#define USB_PID_UNDEF_0 0xf0
#define USB_PID_DATA_TOGGLE 0x88
#define DBGP_CLAIM (DBGP_OWNER | DBGP_ENABLED | DBGP_INUSE)
#define PCI_CAP_ID_EHCI_DEBUG 0xa
#define HUB_ROOT_RESET_TIME 50 /* times are in msec */
#define HUB_SHORT_RESET_TIME 10
#define HUB_LONG_RESET_TIME 200
#define HUB_RESET_TIMEOUT 500
#define DBGP_MAX_PACKET 8
static int dbgp_wait_until_complete(void)
{
u32 ctrl;
int loop = 0x100000;
do {
ctrl = readl(&ehci_debug->control);
/* Stop when the transaction is finished */
if (ctrl & DBGP_DONE)
break;
} while (--loop > 0);
if (!loop)
return -1;
/*
* Now that we have observed the completed transaction,
* clear the done bit.
*/
writel(ctrl | DBGP_DONE, &ehci_debug->control);
return (ctrl & DBGP_ERROR) ? -DBGP_ERRCODE(ctrl) : DBGP_LEN(ctrl);
}
static void __init dbgp_mdelay(int ms)
{
int i;
while (ms--) {
for (i = 0; i < 1000; i++)
outb(0x1, 0x80);
}
}
static void dbgp_breath(void)
{
/* Sleep to give the debug port a chance to breathe */
}
static int dbgp_wait_until_done(unsigned ctrl)
{
u32 pids, lpid;
int ret;
int loop = 3;
retry:
writel(ctrl | DBGP_GO, &ehci_debug->control);
ret = dbgp_wait_until_complete();
pids = readl(&ehci_debug->pids);
lpid = DBGP_PID_GET(pids);
if (ret < 0)
return ret;
/*
* If the port is getting full or it has dropped data
* start pacing ourselves, not necessary but it's friendly.
*/
if ((lpid == USB_PID_NAK) || (lpid == USB_PID_NYET))
dbgp_breath();
/* If I get a NACK reissue the transmission */
if (lpid == USB_PID_NAK) {
if (--loop > 0)
goto retry;
}
return ret;
}
static void dbgp_set_data(const void *buf, int size)
{
const unsigned char *bytes = buf;
u32 lo, hi;
int i;
lo = hi = 0;
for (i = 0; i < 4 && i < size; i++)
lo |= bytes[i] << (8*i);
for (; i < 8 && i < size; i++)
hi |= bytes[i] << (8*(i - 4));
writel(lo, &ehci_debug->data03);
writel(hi, &ehci_debug->data47);
}
static void __init dbgp_get_data(void *buf, int size)
{
unsigned char *bytes = buf;
u32 lo, hi;
int i;
lo = readl(&ehci_debug->data03);
hi = readl(&ehci_debug->data47);
for (i = 0; i < 4 && i < size; i++)
bytes[i] = (lo >> (8*i)) & 0xff;
for (; i < 8 && i < size; i++)
bytes[i] = (hi >> (8*(i - 4))) & 0xff;
}
static int dbgp_bulk_write(unsigned devnum, unsigned endpoint,
const char *bytes, int size)
{
u32 pids, addr, ctrl;
int ret;
if (size > DBGP_MAX_PACKET)
return -1;
addr = DBGP_EPADDR(devnum, endpoint);
pids = readl(&ehci_debug->pids);
pids = dbgp_pid_update(pids, USB_PID_OUT);
ctrl = readl(&ehci_debug->control);
ctrl = dbgp_len_update(ctrl, size);
ctrl |= DBGP_OUT;
ctrl |= DBGP_GO;
dbgp_set_data(bytes, size);
writel(addr, &ehci_debug->address);
writel(pids, &ehci_debug->pids);
ret = dbgp_wait_until_done(ctrl);
if (ret < 0)
return ret;
return ret;
}
static int __init dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data,
int size)
{
u32 pids, addr, ctrl;
int ret;
if (size > DBGP_MAX_PACKET)
return -1;
addr = DBGP_EPADDR(devnum, endpoint);
pids = readl(&ehci_debug->pids);
pids = dbgp_pid_update(pids, USB_PID_IN);
ctrl = readl(&ehci_debug->control);
ctrl = dbgp_len_update(ctrl, size);
ctrl &= ~DBGP_OUT;
ctrl |= DBGP_GO;
writel(addr, &ehci_debug->address);
writel(pids, &ehci_debug->pids);
ret = dbgp_wait_until_done(ctrl);
if (ret < 0)
return ret;
if (size > ret)
size = ret;
dbgp_get_data(data, size);
return ret;
}
static int __init dbgp_control_msg(unsigned devnum, int requesttype,
int request, int value, int index, void *data, int size)
{
u32 pids, addr, ctrl;
struct usb_ctrlrequest req;
int read;
int ret;
read = (requesttype & USB_DIR_IN) != 0;
if (size > (read ? DBGP_MAX_PACKET:0))
return -1;
/* Compute the control message */
req.bRequestType = requesttype;
req.bRequest = request;
req.wValue = cpu_to_le16(value);
req.wIndex = cpu_to_le16(index);
req.wLength = cpu_to_le16(size);
pids = DBGP_PID_SET(USB_PID_DATA0, USB_PID_SETUP);
addr = DBGP_EPADDR(devnum, 0);
ctrl = readl(&ehci_debug->control);
ctrl = dbgp_len_update(ctrl, sizeof(req));
ctrl |= DBGP_OUT;
ctrl |= DBGP_GO;
/* Send the setup message */
dbgp_set_data(&req, sizeof(req));
writel(addr, &ehci_debug->address);
writel(pids, &ehci_debug->pids);
ret = dbgp_wait_until_done(ctrl);
if (ret < 0)
return ret;
/* Read the result */
return dbgp_bulk_read(devnum, 0, data, size);
}
/* Find a PCI capability */
static u32 __init find_cap(u32 num, u32 slot, u32 func, int cap)
{
u8 pos;
int bytes;
if (!(read_pci_config_16(num, slot, func, PCI_STATUS) &
PCI_STATUS_CAP_LIST))
return 0;
pos = read_pci_config_byte(num, slot, func, PCI_CAPABILITY_LIST);
for (bytes = 0; bytes < 48 && pos >= 0x40; bytes++) {
u8 id;
pos &= ~3;
id = read_pci_config_byte(num, slot, func, pos+PCI_CAP_LIST_ID);
if (id == 0xff)
break;
if (id == cap)
return pos;
pos = read_pci_config_byte(num, slot, func,
pos+PCI_CAP_LIST_NEXT);
}
return 0;
}
static u32 __init __find_dbgp(u32 bus, u32 slot, u32 func)
{
u32 class;
class = read_pci_config(bus, slot, func, PCI_CLASS_REVISION);
if ((class >> 8) != PCI_CLASS_SERIAL_USB_EHCI)
return 0;
return find_cap(bus, slot, func, PCI_CAP_ID_EHCI_DEBUG);
}
static u32 __init find_dbgp(int ehci_num, u32 *rbus, u32 *rslot, u32 *rfunc)
{
u32 bus, slot, func;
for (bus = 0; bus < 256; bus++) {
for (slot = 0; slot < 32; slot++) {
for (func = 0; func < 8; func++) {
unsigned cap;
cap = __find_dbgp(bus, slot, func);
if (!cap)
continue;
if (ehci_num-- != 0)
continue;
*rbus = bus;
*rslot = slot;
*rfunc = func;
return cap;
}
}
}
return 0;
}
static int __init ehci_reset_port(int port)
{
u32 portsc;
u32 delay_time, delay;
int loop;
/* Reset the usb debug port */
portsc = readl(&ehci_regs->port_status[port - 1]);
portsc &= ~PORT_PE;
portsc |= PORT_RESET;
writel(portsc, &ehci_regs->port_status[port - 1]);
delay = HUB_ROOT_RESET_TIME;
for (delay_time = 0; delay_time < HUB_RESET_TIMEOUT;
delay_time += delay) {
dbgp_mdelay(delay);
portsc = readl(&ehci_regs->port_status[port - 1]);
if (portsc & PORT_RESET) {
/* force reset to complete */
loop = 2;
writel(portsc & ~(PORT_RWC_BITS | PORT_RESET),
&ehci_regs->port_status[port - 1]);
do {
portsc = readl(&ehci_regs->port_status[port-1]);
} while ((portsc & PORT_RESET) && (--loop > 0));
}
/* Device went away? */
if (!(portsc & PORT_CONNECT))
return -ENOTCONN;
/* bomb out completely if something weird happend */
if ((portsc & PORT_CSC))
return -EINVAL;
/* If we've finished resetting, then break out of the loop */
if (!(portsc & PORT_RESET) && (portsc & PORT_PE))
return 0;
}
return -EBUSY;
}
static int __init ehci_wait_for_port(int port)
{
u32 status;
int ret, reps;
for (reps = 0; reps < 3; reps++) {
dbgp_mdelay(100);
status = readl(&ehci_regs->status);
if (status & STS_PCD) {
ret = ehci_reset_port(port);
if (ret == 0)
return 0;
}
}
return -ENOTCONN;
}
#ifdef DBGP_DEBUG
# define dbgp_printk early_printk
#else
static inline void dbgp_printk(const char *fmt, ...) { }
#endif
typedef void (*set_debug_port_t)(int port);
static void __init default_set_debug_port(int port)
{
}
static set_debug_port_t __initdata set_debug_port = default_set_debug_port;
static void __init nvidia_set_debug_port(int port)
{
u32 dword;
dword = read_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func,
0x74);
dword &= ~(0x0f<<12);
dword |= ((port & 0x0f)<<12);
write_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func, 0x74,
dword);
dbgp_printk("set debug port to %d\n", port);
}
static void __init detect_set_debug_port(void)
{
u32 vendorid;
vendorid = read_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func,
0x00);
if ((vendorid & 0xffff) == 0x10de) {
dbgp_printk("using nvidia set_debug_port\n");
set_debug_port = nvidia_set_debug_port;
}
}
static int __init ehci_setup(void)
{
struct usb_debug_descriptor dbgp_desc;
u32 cmd, ctrl, status, portsc, hcs_params;
u32 debug_port, new_debug_port = 0, n_ports;
u32 devnum;
int ret, i;
int loop;
int port_map_tried;
int playtimes = 3;
try_next_time:
port_map_tried = 0;
try_next_port:
hcs_params = readl(&ehci_caps->hcs_params);
debug_port = HCS_DEBUG_PORT(hcs_params);
n_ports = HCS_N_PORTS(hcs_params);
dbgp_printk("debug_port: %d\n", debug_port);
dbgp_printk("n_ports: %d\n", n_ports);
for (i = 1; i <= n_ports; i++) {
portsc = readl(&ehci_regs->port_status[i-1]);
dbgp_printk("portstatus%d: %08x\n", i, portsc);
}
if (port_map_tried && (new_debug_port != debug_port)) {
if (--playtimes) {
set_debug_port(new_debug_port);
goto try_next_time;
}
return -1;
}
loop = 100000;
/* Reset the EHCI controller */
cmd = readl(&ehci_regs->command);
cmd |= CMD_RESET;
writel(cmd, &ehci_regs->command);
do {
cmd = readl(&ehci_regs->command);
} while ((cmd & CMD_RESET) && (--loop > 0));
if (!loop) {
dbgp_printk("can not reset ehci\n");
return -1;
}
dbgp_printk("ehci reset done\n");
/* Claim ownership, but do not enable yet */
ctrl = readl(&ehci_debug->control);
ctrl |= DBGP_OWNER;
ctrl &= ~(DBGP_ENABLED | DBGP_INUSE);
writel(ctrl, &ehci_debug->control);
/* Start the ehci running */
cmd = readl(&ehci_regs->command);
cmd &= ~(CMD_LRESET | CMD_IAAD | CMD_PSE | CMD_ASE | CMD_RESET);
cmd |= CMD_RUN;
writel(cmd, &ehci_regs->command);
/* Ensure everything is routed to the EHCI */
writel(FLAG_CF, &ehci_regs->configured_flag);
/* Wait until the controller is no longer halted */
loop = 10;
do {
status = readl(&ehci_regs->status);
} while ((status & STS_HALT) && (--loop > 0));
if (!loop) {
dbgp_printk("ehci can be started\n");
return -1;
}
dbgp_printk("ehci started\n");
/* Wait for a device to show up in the debug port */
ret = ehci_wait_for_port(debug_port);
if (ret < 0) {
dbgp_printk("No device found in debug port\n");
goto next_debug_port;
}
dbgp_printk("ehci wait for port done\n");
/* Enable the debug port */
ctrl = readl(&ehci_debug->control);
ctrl |= DBGP_CLAIM;
writel(ctrl, &ehci_debug->control);
ctrl = readl(&ehci_debug->control);
if ((ctrl & DBGP_CLAIM) != DBGP_CLAIM) {
dbgp_printk("No device in debug port\n");
writel(ctrl & ~DBGP_CLAIM, &ehci_debug->control);
goto err;
}
dbgp_printk("debug ported enabled\n");
/* Completely transfer the debug device to the debug controller */
portsc = readl(&ehci_regs->port_status[debug_port - 1]);
portsc &= ~PORT_PE;
writel(portsc, &ehci_regs->port_status[debug_port - 1]);
dbgp_mdelay(100);
/* Find the debug device and make it device number 127 */
for (devnum = 0; devnum <= 127; devnum++) {
ret = dbgp_control_msg(devnum,
USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
USB_REQ_GET_DESCRIPTOR, (USB_DT_DEBUG << 8), 0,
&dbgp_desc, sizeof(dbgp_desc));
if (ret > 0)
break;
}
if (devnum > 127) {
dbgp_printk("Could not find attached debug device\n");
goto err;
}
if (ret < 0) {
dbgp_printk("Attached device is not a debug device\n");
goto err;
}
dbgp_endpoint_out = dbgp_desc.bDebugOutEndpoint;
/* Move the device to 127 if it isn't already there */
if (devnum != USB_DEBUG_DEVNUM) {
ret = dbgp_control_msg(devnum,
USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
USB_REQ_SET_ADDRESS, USB_DEBUG_DEVNUM, 0, NULL, 0);
if (ret < 0) {
dbgp_printk("Could not move attached device to %d\n",
USB_DEBUG_DEVNUM);
goto err;
}
devnum = USB_DEBUG_DEVNUM;
dbgp_printk("debug device renamed to 127\n");
}
/* Enable the debug interface */
ret = dbgp_control_msg(USB_DEBUG_DEVNUM,
USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
USB_REQ_SET_FEATURE, USB_DEVICE_DEBUG_MODE, 0, NULL, 0);
if (ret < 0) {
dbgp_printk(" Could not enable the debug device\n");
goto err;
}
dbgp_printk("debug interface enabled\n");
/* Perform a small write to get the even/odd data state in sync
*/
ret = dbgp_bulk_write(USB_DEBUG_DEVNUM, dbgp_endpoint_out, " ", 1);
if (ret < 0) {
dbgp_printk("dbgp_bulk_write failed: %d\n", ret);
goto err;
}
dbgp_printk("small write doned\n");
return 0;
err:
/* Things didn't work so remove my claim */
ctrl = readl(&ehci_debug->control);
ctrl &= ~(DBGP_CLAIM | DBGP_OUT);
writel(ctrl, &ehci_debug->control);
return -1;
next_debug_port:
port_map_tried |= (1<<(debug_port - 1));
new_debug_port = ((debug_port-1+1)%n_ports) + 1;
if (port_map_tried != ((1<<n_ports) - 1)) {
set_debug_port(new_debug_port);
goto try_next_port;
}
if (--playtimes) {
set_debug_port(new_debug_port);
goto try_next_time;
}
return -1;
}
static int __init early_dbgp_init(char *s)
{
u32 debug_port, bar, offset;
u32 bus, slot, func, cap;
void __iomem *ehci_bar;
u32 dbgp_num;
u32 bar_val;
char *e;
int ret;
u8 byte;
if (!early_pci_allowed())
return -1;
dbgp_num = 0;
if (*s)
dbgp_num = simple_strtoul(s, &e, 10);
dbgp_printk("dbgp_num: %d\n", dbgp_num);
cap = find_dbgp(dbgp_num, &bus, &slot, &func);
if (!cap)
return -1;
dbgp_printk("Found EHCI debug port on %02x:%02x.%1x\n", bus, slot,
func);
debug_port = read_pci_config(bus, slot, func, cap);
bar = (debug_port >> 29) & 0x7;
bar = (bar * 4) + 0xc;
offset = (debug_port >> 16) & 0xfff;
dbgp_printk("bar: %02x offset: %03x\n", bar, offset);
if (bar != PCI_BASE_ADDRESS_0) {
dbgp_printk("only debug ports on bar 1 handled.\n");
return -1;
}
bar_val = read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_0);
dbgp_printk("bar_val: %02x offset: %03x\n", bar_val, offset);
if (bar_val & ~PCI_BASE_ADDRESS_MEM_MASK) {
dbgp_printk("only simple 32bit mmio bars supported\n");
return -1;
}
/* double check if the mem space is enabled */
byte = read_pci_config_byte(bus, slot, func, 0x04);
if (!(byte & 0x2)) {
byte |= 0x02;
write_pci_config_byte(bus, slot, func, 0x04, byte);
dbgp_printk("mmio for ehci enabled\n");
}
/*
* FIXME I don't have the bar size so just guess PAGE_SIZE is more
* than enough. 1K is the biggest I have seen.
*/
set_fixmap_nocache(FIX_DBGP_BASE, bar_val & PAGE_MASK);
ehci_bar = (void __iomem *)__fix_to_virt(FIX_DBGP_BASE);
ehci_bar += bar_val & ~PAGE_MASK;
dbgp_printk("ehci_bar: %p\n", ehci_bar);
ehci_caps = ehci_bar;
ehci_regs = ehci_bar + HC_LENGTH(readl(&ehci_caps->hc_capbase));
ehci_debug = ehci_bar + offset;
ehci_dev.bus = bus;
ehci_dev.slot = slot;
ehci_dev.func = func;
detect_set_debug_port();
ret = ehci_setup();
if (ret < 0) {
dbgp_printk("ehci_setup failed\n");
ehci_debug = NULL;
return -1;
}
return 0;
}
static void early_dbgp_write(struct console *con, const char *str, u32 n)
{
int chunk, ret;
if (!ehci_debug)
return;
while (n > 0) {
chunk = n;
if (chunk > DBGP_MAX_PACKET)
chunk = DBGP_MAX_PACKET;
ret = dbgp_bulk_write(USB_DEBUG_DEVNUM,
dbgp_endpoint_out, str, chunk);
str += chunk;
n -= chunk;
}
}
static struct console early_dbgp_console = {
.name = "earlydbg",
.write = early_dbgp_write,
.flags = CON_PRINTBUFFER,
.index = -1,
};
#endif
/* Direct interface for emergencies */
static struct console *early_console = &early_vga_console;
static int __initdata early_console_initialized;
@ -891,10 +176,19 @@ asmlinkage void early_printk(const char *fmt, ...)
va_end(ap);
}
static inline void early_console_register(struct console *con, int keep_early)
{
early_console = con;
if (keep_early)
early_console->flags &= ~CON_BOOT;
else
early_console->flags |= CON_BOOT;
register_console(early_console);
}
static int __init setup_early_printk(char *buf)
{
int keep_early;
int keep;
if (!buf)
return 0;
@ -903,42 +197,34 @@ static int __init setup_early_printk(char *buf)
return 0;
early_console_initialized = 1;
keep_early = (strstr(buf, "keep") != NULL);
keep = (strstr(buf, "keep") != NULL);
if (!strncmp(buf, "serial", 6)) {
early_serial_init(buf + 6);
early_console = &early_serial_console;
} else if (!strncmp(buf, "ttyS", 4)) {
early_serial_init(buf);
early_console = &early_serial_console;
} else if (!strncmp(buf, "vga", 3)
&& boot_params.screen_info.orig_video_isVGA == 1) {
max_xpos = boot_params.screen_info.orig_video_cols;
max_ypos = boot_params.screen_info.orig_video_lines;
current_ypos = boot_params.screen_info.orig_y;
early_console = &early_vga_console;
while (*buf != '\0') {
if (!strncmp(buf, "serial", 6)) {
early_serial_init(buf + 6);
early_console_register(&early_serial_console, keep);
}
if (!strncmp(buf, "ttyS", 4)) {
early_serial_init(buf + 4);
early_console_register(&early_serial_console, keep);
}
if (!strncmp(buf, "vga", 3) &&
boot_params.screen_info.orig_video_isVGA == 1) {
max_xpos = boot_params.screen_info.orig_video_cols;
max_ypos = boot_params.screen_info.orig_video_lines;
current_ypos = boot_params.screen_info.orig_y;
early_console_register(&early_vga_console, keep);
}
#ifdef CONFIG_EARLY_PRINTK_DBGP
} else if (!strncmp(buf, "dbgp", 4)) {
if (early_dbgp_init(buf+4) < 0)
return 0;
early_console = &early_dbgp_console;
/*
* usb subsys will reset ehci controller, so don't keep
* that early console
*/
keep_early = 0;
if (!strncmp(buf, "dbgp", 4) && !early_dbgp_init(buf + 4))
early_console_register(&early_dbgp_console, keep);
#endif
#ifdef CONFIG_HVC_XEN
} else if (!strncmp(buf, "xen", 3)) {
early_console = &xenboot_console;
if (!strncmp(buf, "xen", 3))
early_console_register(&xenboot_console, keep);
#endif
buf++;
}
if (keep_early)
early_console->flags &= ~CON_BOOT;
else
early_console->flags |= CON_BOOT;
register_console(early_console);
return 0;
}

View File

@ -300,20 +300,23 @@ static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
return 0;
}
crc = get_unaligned_le32(skb2->data
+ len - ETH_FCS_LEN);
skb_trim(skb2, len - ETH_FCS_LEN);
/*
* The bmCRC helps to denote when the CRC field in
* the Ethernet frame contains a calculated CRC:
* bmCRC = 1 : CRC is calculated
* bmCRC = 0 : CRC = 0xDEADBEEF
*/
if (header & BIT(14))
crc2 = ~crc32_le(~0, skb2->data, skb2->len);
else
if (header & BIT(14)) {
crc = get_unaligned_le32(skb2->data
+ len - ETH_FCS_LEN);
crc2 = ~crc32_le(~0, skb2->data, skb2->len
- ETH_FCS_LEN);
} else {
crc = get_unaligned_be32(skb2->data
+ len - ETH_FCS_LEN);
crc2 = 0xdeadbeef;
}
skb_trim(skb2, len - ETH_FCS_LEN);
if (is_last)
return crc == crc2;

View File

@ -39,6 +39,7 @@ config USB_ARCH_HAS_OHCI
default y if ARCH_AT91
default y if ARCH_PNX4008 && I2C
default y if MFD_TC6393XB
default y if ARCH_W90X900
# PPC:
default y if STB03xxx
default y if PPC_MPC52xx
@ -58,6 +59,8 @@ config USB_ARCH_HAS_EHCI
default y if PPC_83xx
default y if SOC_AU1200
default y if ARCH_IXP4XX
default y if ARCH_W90X900
default y if ARCH_AT91SAM9G45
default PCI
# ARM SA1111 chips have a non-PCI based "OHCI-compatible" USB host interface.

View File

@ -16,6 +16,7 @@ obj-$(CONFIG_USB_UHCI_HCD) += host/
obj-$(CONFIG_USB_FHCI_HCD) += host/
obj-$(CONFIG_USB_XHCI_HCD) += host/
obj-$(CONFIG_USB_SL811_HCD) += host/
obj-$(CONFIG_USB_ISP1362_HCD) += host/
obj-$(CONFIG_USB_U132_HCD) += host/
obj-$(CONFIG_USB_R8A66597_HCD) += host/
obj-$(CONFIG_USB_HWA_HCD) += host/
@ -39,6 +40,7 @@ obj-$(CONFIG_USB_MICROTEK) += image/
obj-$(CONFIG_USB_SERIAL) += serial/
obj-$(CONFIG_USB) += misc/
obj-y += early/
obj-$(CONFIG_USB_ATM) += atm/
obj-$(CONFIG_USB_SPEEDTOUCH) += atm/

View File

@ -59,6 +59,7 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/serial.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
@ -609,6 +610,7 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
acm->throttle = 0;
tasklet_schedule(&acm->urb_task);
set_bit(ASYNCB_INITIALIZED, &acm->port.flags);
rv = tty_port_block_til_ready(&acm->port, tty, filp);
done:
mutex_unlock(&acm->mutex);

View File

@ -313,8 +313,13 @@ static ssize_t wdm_write
r = usb_autopm_get_interface(desc->intf);
if (r < 0)
goto outnp;
r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
&desc->flags));
if (!file->f_flags && O_NONBLOCK)
r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
&desc->flags));
else
if (test_bit(WDM_IN_USE, &desc->flags))
r = -EAGAIN;
if (r < 0)
goto out;
@ -377,7 +382,7 @@ outnl:
static ssize_t wdm_read
(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
{
int rv, cntr;
int rv, cntr = 0;
int i = 0;
struct wdm_device *desc = file->private_data;
@ -389,10 +394,23 @@ static ssize_t wdm_read
if (desc->length == 0) {
desc->read = 0;
retry:
if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
rv = -ENODEV;
goto err;
}
i++;
rv = wait_event_interruptible(desc->wait,
test_bit(WDM_READ, &desc->flags));
if (file->f_flags & O_NONBLOCK) {
if (!test_bit(WDM_READ, &desc->flags)) {
rv = cntr ? cntr : -EAGAIN;
goto err;
}
rv = 0;
} else {
rv = wait_event_interruptible(desc->wait,
test_bit(WDM_READ, &desc->flags));
}
/* may have happened while we slept */
if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
rv = -ENODEV;
goto err;
@ -448,7 +466,7 @@ retry:
err:
mutex_unlock(&desc->rlock);
if (rv < 0)
if (rv < 0 && rv != -EAGAIN)
dev_err(&desc->intf->dev, "wdm_read: exit error\n");
return rv;
}

View File

@ -57,7 +57,9 @@ MODULE_DEVICE_TABLE(usb, usbtmc_devices);
/*
* This structure is the capabilities for the device
* See section 4.2.1.8 of the USBTMC specification for details.
* See section 4.2.1.8 of the USBTMC specification,
* and section 4.2.2 of the USBTMC usb488 subclass
* specification for details.
*/
struct usbtmc_dev_capabilities {
__u8 interface_capabilities;
@ -86,6 +88,8 @@ struct usbtmc_device_data {
bool TermCharEnabled;
bool auto_abort;
bool zombie; /* fd of disconnected device */
struct usbtmc_dev_capabilities capabilities;
struct kref kref;
struct mutex io_mutex; /* only one i/o function running at a time */
@ -367,13 +371,13 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
{
struct usbtmc_device_data *data;
struct device *dev;
unsigned long int n_characters;
u32 n_characters;
u8 *buffer;
int actual;
int done;
int remaining;
size_t done;
size_t remaining;
int retval;
int this_part;
size_t this_part;
/* Get pointer to private data structure */
data = filp->private_data;
@ -384,6 +388,10 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
return -ENOMEM;
mutex_lock(&data->io_mutex);
if (data->zombie) {
retval = -ENODEV;
goto exit;
}
remaining = count;
done = 0;
@ -401,10 +409,10 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
buffer[1] = data->bTag;
buffer[2] = ~(data->bTag);
buffer[3] = 0; /* Reserved */
buffer[4] = (this_part - 12 - 3) & 255;
buffer[5] = ((this_part - 12 - 3) >> 8) & 255;
buffer[6] = ((this_part - 12 - 3) >> 16) & 255;
buffer[7] = ((this_part - 12 - 3) >> 24) & 255;
buffer[4] = (this_part) & 255;
buffer[5] = ((this_part) >> 8) & 255;
buffer[6] = ((this_part) >> 16) & 255;
buffer[7] = ((this_part) >> 24) & 255;
buffer[8] = data->TermCharEnabled * 2;
/* Use term character? */
buffer[9] = data->TermChar;
@ -455,6 +463,22 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
(buffer[6] << 16) +
(buffer[7] << 24);
/* Ensure the instrument doesn't lie about it */
if(n_characters > actual - 12) {
dev_err(dev, "Device lies about message size: %u > %d\n", n_characters, actual - 12);
n_characters = actual - 12;
}
/* Ensure the instrument doesn't send more back than requested */
if(n_characters > this_part) {
dev_err(dev, "Device returns more than requested: %zu > %zu\n", done + n_characters, done + this_part);
n_characters = this_part;
}
/* Bound amount of data received by amount of data requested */
if (n_characters > this_part)
n_characters = this_part;
/* Copy buffer to user space */
if (copy_to_user(buf + done, &buffer[12], n_characters)) {
/* There must have been an addressing problem */
@ -463,8 +487,11 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
}
done += n_characters;
if (n_characters < USBTMC_SIZE_IOBUFFER)
/* Terminate if end-of-message bit recieved from device */
if ((buffer[8] & 0x01) && (actual >= n_characters + 12))
remaining = 0;
else
remaining -= n_characters;
}
/* Update file position value */
@ -496,6 +523,10 @@ static ssize_t usbtmc_write(struct file *filp, const char __user *buf,
return -ENOMEM;
mutex_lock(&data->io_mutex);
if (data->zombie) {
retval = -ENODEV;
goto exit;
}
remaining = count;
done = 0;
@ -767,20 +798,21 @@ static int get_capabilities(struct usbtmc_device_data *data)
}
dev_dbg(dev, "GET_CAPABILITIES returned %x\n", buffer[0]);
dev_dbg(dev, "Interface capabilities are %x\n", buffer[4]);
dev_dbg(dev, "Device capabilities are %x\n", buffer[5]);
dev_dbg(dev, "USB488 interface capabilities are %x\n", buffer[14]);
dev_dbg(dev, "USB488 device capabilities are %x\n", buffer[15]);
if (buffer[0] != USBTMC_STATUS_SUCCESS) {
dev_err(dev, "GET_CAPABILITIES returned %x\n", buffer[0]);
rv = -EPERM;
goto err_out;
}
dev_dbg(dev, "Interface capabilities are %x\n", buffer[4]);
dev_dbg(dev, "Device capabilities are %x\n", buffer[5]);
dev_dbg(dev, "USB488 interface capabilities are %x\n", buffer[14]);
dev_dbg(dev, "USB488 device capabilities are %x\n", buffer[15]);
data->capabilities.interface_capabilities = buffer[4];
data->capabilities.device_capabilities = buffer[5];
data->capabilities.usb488_interface_capabilities = buffer[14];
data->capabilities.usb488_device_capabilities = buffer[15];
rv = 0;
err_out:
kfree(buffer);
@ -925,6 +957,10 @@ static long usbtmc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
data = file->private_data;
mutex_lock(&data->io_mutex);
if (data->zombie) {
retval = -ENODEV;
goto skip_io_on_zombie;
}
switch (cmd) {
case USBTMC_IOCTL_CLEAR_OUT_HALT:
@ -952,6 +988,7 @@ static long usbtmc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
}
skip_io_on_zombie:
mutex_unlock(&data->io_mutex);
return retval;
}
@ -995,6 +1032,7 @@ static int usbtmc_probe(struct usb_interface *intf,
usb_set_intfdata(intf, data);
kref_init(&data->kref);
mutex_init(&data->io_mutex);
data->zombie = 0;
/* Initialize USBTMC bTag and other fields */
data->bTag = 1;
@ -1065,14 +1103,30 @@ static void usbtmc_disconnect(struct usb_interface *intf)
usb_deregister_dev(intf, &usbtmc_class);
sysfs_remove_group(&intf->dev.kobj, &capability_attr_grp);
sysfs_remove_group(&intf->dev.kobj, &data_attr_grp);
mutex_lock(&data->io_mutex);
data->zombie = 1;
mutex_unlock(&data->io_mutex);
kref_put(&data->kref, usbtmc_delete);
}
static int usbtmc_suspend (struct usb_interface *intf, pm_message_t message)
{
/* this driver does not have pending URBs */
return 0;
}
static int usbtmc_resume (struct usb_interface *intf)
{
return 0;
}
static struct usb_driver usbtmc_driver = {
.name = "usbtmc",
.id_table = usbtmc_devices,
.probe = usbtmc_probe,
.disconnect = usbtmc_disconnect
.disconnect = usbtmc_disconnect,
.suspend = usbtmc_suspend,
.resume = usbtmc_resume,
};
static int __init usbtmc_init(void)

View File

@ -105,7 +105,7 @@ static int usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
ep->ss_ep_comp->extralen = i;
buffer += i;
size -= i;
retval = buffer - buffer_start + i;
retval = buffer - buffer_start;
if (num_skipped > 0)
dev_dbg(ddev, "skipped %d descriptor%s after %s\n",
num_skipped, plural(num_skipped),

View File

@ -52,6 +52,7 @@
#include "hcd.h" /* for usbcore internals */
#include "usb.h"
#include "hub.h"
#define USB_MAXBUS 64
#define USB_DEVICE_MAX USB_MAXBUS * 128
@ -73,6 +74,7 @@ struct dev_state {
void __user *disccontext;
unsigned long ifclaimed;
u32 secid;
u32 disabled_bulk_eps;
};
struct async {
@ -87,6 +89,8 @@ struct async {
struct urb *urb;
int status;
u32 secid;
u8 bulk_addr;
u8 bulk_status;
};
static int usbfs_snoop;
@ -99,11 +103,15 @@ MODULE_PARM_DESC(usbfs_snoop, "true to log all usbfs traffic");
dev_info(dev , format , ## arg); \
} while (0)
enum snoop_when {
SUBMIT, COMPLETE
};
#define USB_DEVICE_DEV MKDEV(USB_DEVICE_MAJOR, 0)
#define MAX_USBFS_BUFFER_SIZE 16384
static int connected(struct dev_state *ps)
{
return (!list_empty(&ps->list) &&
@ -300,24 +308,79 @@ static struct async *async_getpending(struct dev_state *ps,
return NULL;
}
static void snoop_urb(struct urb *urb, void __user *userurb)
static void snoop_urb(struct usb_device *udev,
void __user *userurb, int pipe, unsigned length,
int timeout_or_status, enum snoop_when when)
{
unsigned j;
unsigned char *data = urb->transfer_buffer;
static const char *types[] = {"isoc", "int", "ctrl", "bulk"};
static const char *dirs[] = {"out", "in"};
int ep;
const char *t, *d;
if (!usbfs_snoop)
return;
dev_info(&urb->dev->dev, "direction=%s\n",
usb_urb_dir_in(urb) ? "IN" : "OUT");
dev_info(&urb->dev->dev, "userurb=%p\n", userurb);
dev_info(&urb->dev->dev, "transfer_buffer_length=%u\n",
urb->transfer_buffer_length);
dev_info(&urb->dev->dev, "actual_length=%u\n", urb->actual_length);
dev_info(&urb->dev->dev, "data: ");
for (j = 0; j < urb->transfer_buffer_length; ++j)
printk("%02x ", data[j]);
printk("\n");
ep = usb_pipeendpoint(pipe);
t = types[usb_pipetype(pipe)];
d = dirs[!!usb_pipein(pipe)];
if (userurb) { /* Async */
if (when == SUBMIT)
dev_info(&udev->dev, "userurb %p, ep%d %s-%s, "
"length %u\n",
userurb, ep, t, d, length);
else
dev_info(&udev->dev, "userurb %p, ep%d %s-%s, "
"actual_length %u status %d\n",
userurb, ep, t, d, length,
timeout_or_status);
} else {
if (when == SUBMIT)
dev_info(&udev->dev, "ep%d %s-%s, length %u, "
"timeout %d\n",
ep, t, d, length, timeout_or_status);
else
dev_info(&udev->dev, "ep%d %s-%s, actual_length %u, "
"status %d\n",
ep, t, d, length, timeout_or_status);
}
}
#define AS_CONTINUATION 1
#define AS_UNLINK 2
static void cancel_bulk_urbs(struct dev_state *ps, unsigned bulk_addr)
__releases(ps->lock)
__acquires(ps->lock)
{
struct async *as;
/* Mark all the pending URBs that match bulk_addr, up to but not
* including the first one without AS_CONTINUATION. If such an
* URB is encountered then a new transfer has already started so
* the endpoint doesn't need to be disabled; otherwise it does.
*/
list_for_each_entry(as, &ps->async_pending, asynclist) {
if (as->bulk_addr == bulk_addr) {
if (as->bulk_status != AS_CONTINUATION)
goto rescan;
as->bulk_status = AS_UNLINK;
as->bulk_addr = 0;
}
}
ps->disabled_bulk_eps |= (1 << bulk_addr);
/* Now carefully unlink all the marked pending URBs */
rescan:
list_for_each_entry(as, &ps->async_pending, asynclist) {
if (as->bulk_status == AS_UNLINK) {
as->bulk_status = 0; /* Only once */
spin_unlock(&ps->lock); /* Allow completions */
usb_unlink_urb(as->urb);
spin_lock(&ps->lock);
goto rescan;
}
}
}
static void async_completed(struct urb *urb)
@ -346,7 +409,11 @@ static void async_completed(struct urb *urb)
secid = as->secid;
}
snoop(&urb->dev->dev, "urb complete\n");
snoop_urb(urb, as->userurb);
snoop_urb(urb->dev, as->userurb, urb->pipe, urb->actual_length,
as->status, COMPLETE);
if (as->status < 0 && as->bulk_addr && as->status != -ECONNRESET &&
as->status != -ENOENT)
cancel_bulk_urbs(ps, as->bulk_addr);
spin_unlock(&ps->lock);
if (signr)
@ -655,6 +722,7 @@ static int usbdev_release(struct inode *inode, struct file *file)
struct async *as;
usb_lock_device(dev);
usb_hub_release_all_ports(dev, ps);
/* Protect against simultaneous open */
mutex_lock(&usbfs_mutex);
@ -688,7 +756,7 @@ static int proc_control(struct dev_state *ps, void __user *arg)
unsigned int tmo;
unsigned char *tbuf;
unsigned wLength;
int i, j, ret;
int i, pipe, ret;
if (copy_from_user(&ctrl, arg, sizeof(ctrl)))
return -EFAULT;
@ -708,24 +776,17 @@ static int proc_control(struct dev_state *ps, void __user *arg)
free_page((unsigned long)tbuf);
return -EINVAL;
}
snoop(&dev->dev, "control read: bRequest=%02x "
"bRrequestType=%02x wValue=%04x "
"wIndex=%04x wLength=%04x\n",
ctrl.bRequest, ctrl.bRequestType, ctrl.wValue,
ctrl.wIndex, ctrl.wLength);
pipe = usb_rcvctrlpipe(dev, 0);
snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT);
usb_unlock_device(dev);
i = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), ctrl.bRequest,
i = usb_control_msg(dev, pipe, ctrl.bRequest,
ctrl.bRequestType, ctrl.wValue, ctrl.wIndex,
tbuf, ctrl.wLength, tmo);
usb_lock_device(dev);
snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE);
if ((i > 0) && ctrl.wLength) {
if (usbfs_snoop) {
dev_info(&dev->dev, "control read: data ");
for (j = 0; j < i; ++j)
printk("%02x ", (u8)(tbuf)[j]);
printk("\n");
}
if (copy_to_user(ctrl.data, tbuf, i)) {
free_page((unsigned long)tbuf);
return -EFAULT;
@ -738,22 +799,15 @@ static int proc_control(struct dev_state *ps, void __user *arg)
return -EFAULT;
}
}
snoop(&dev->dev, "control write: bRequest=%02x "
"bRrequestType=%02x wValue=%04x "
"wIndex=%04x wLength=%04x\n",
ctrl.bRequest, ctrl.bRequestType, ctrl.wValue,
ctrl.wIndex, ctrl.wLength);
if (usbfs_snoop) {
dev_info(&dev->dev, "control write: data: ");
for (j = 0; j < ctrl.wLength; ++j)
printk("%02x ", (unsigned char)(tbuf)[j]);
printk("\n");
}
pipe = usb_sndctrlpipe(dev, 0);
snoop_urb(dev, NULL, pipe, ctrl.wLength, tmo, SUBMIT);
usb_unlock_device(dev);
i = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), ctrl.bRequest,
ctrl.bRequestType, ctrl.wValue, ctrl.wIndex,
tbuf, ctrl.wLength, tmo);
usb_lock_device(dev);
snoop_urb(dev, NULL, pipe, max(i, 0), min(i, 0), COMPLETE);
}
free_page((unsigned long)tbuf);
if (i < 0 && i != -EPIPE) {
@ -772,7 +826,7 @@ static int proc_bulk(struct dev_state *ps, void __user *arg)
unsigned int tmo, len1, pipe;
int len2;
unsigned char *tbuf;
int i, j, ret;
int i, ret;
if (copy_from_user(&bulk, arg, sizeof(bulk)))
return -EFAULT;
@ -799,18 +853,14 @@ static int proc_bulk(struct dev_state *ps, void __user *arg)
kfree(tbuf);
return -EINVAL;
}
snoop(&dev->dev, "bulk read: len=0x%02x timeout=%04d\n",
bulk.len, bulk.timeout);
snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT);
usb_unlock_device(dev);
i = usb_bulk_msg(dev, pipe, tbuf, len1, &len2, tmo);
usb_lock_device(dev);
snoop_urb(dev, NULL, pipe, len2, i, COMPLETE);
if (!i && len2) {
if (usbfs_snoop) {
dev_info(&dev->dev, "bulk read: data ");
for (j = 0; j < len2; ++j)
printk("%02x ", (u8)(tbuf)[j]);
printk("\n");
}
if (copy_to_user(bulk.data, tbuf, len2)) {
kfree(tbuf);
return -EFAULT;
@ -823,17 +873,12 @@ static int proc_bulk(struct dev_state *ps, void __user *arg)
return -EFAULT;
}
}
snoop(&dev->dev, "bulk write: len=0x%02x timeout=%04d\n",
bulk.len, bulk.timeout);
if (usbfs_snoop) {
dev_info(&dev->dev, "bulk write: data: ");
for (j = 0; j < len1; ++j)
printk("%02x ", (unsigned char)(tbuf)[j]);
printk("\n");
}
snoop_urb(dev, NULL, pipe, len1, tmo, SUBMIT);
usb_unlock_device(dev);
i = usb_bulk_msg(dev, pipe, tbuf, len1, &len2, tmo);
usb_lock_device(dev);
snoop_urb(dev, NULL, pipe, len2, i, COMPLETE);
}
kfree(tbuf);
if (i < 0)
@ -991,6 +1036,7 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
if (uurb->flags & ~(USBDEVFS_URB_ISO_ASAP |
USBDEVFS_URB_SHORT_NOT_OK |
USBDEVFS_URB_BULK_CONTINUATION |
USBDEVFS_URB_NO_FSBR |
USBDEVFS_URB_ZERO_PACKET |
USBDEVFS_URB_NO_INTERRUPT))
@ -1051,13 +1097,6 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
is_in = 0;
uurb->endpoint &= ~USB_DIR_IN;
}
snoop(&ps->dev->dev, "control urb: bRequest=%02x "
"bRrequestType=%02x wValue=%04x "
"wIndex=%04x wLength=%04x\n",
dr->bRequest, dr->bRequestType,
__le16_to_cpup(&dr->wValue),
__le16_to_cpup(&dr->wIndex),
__le16_to_cpup(&dr->wLength));
break;
case USBDEVFS_URB_TYPE_BULK:
@ -1070,7 +1109,6 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
uurb->number_of_packets = 0;
if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE)
return -EINVAL;
snoop(&ps->dev->dev, "bulk urb\n");
break;
case USBDEVFS_URB_TYPE_ISO:
@ -1097,12 +1135,12 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
}
totlen += isopkt[u].length;
}
if (totlen > 32768) {
/* 3072 * 64 microframes */
if (totlen > 196608) {
kfree(isopkt);
return -EINVAL;
}
uurb->buffer_length = totlen;
snoop(&ps->dev->dev, "iso urb\n");
break;
case USBDEVFS_URB_TYPE_INTERRUPT:
@ -1111,7 +1149,6 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
return -EINVAL;
if (uurb->buffer_length > MAX_USBFS_BUFFER_SIZE)
return -EINVAL;
snoop(&ps->dev->dev, "interrupt urb\n");
break;
default:
@ -1198,11 +1235,46 @@ static int proc_do_submiturb(struct dev_state *ps, struct usbdevfs_urb *uurb,
return -EFAULT;
}
}
snoop_urb(as->urb, as->userurb);
snoop_urb(ps->dev, as->userurb, as->urb->pipe,
as->urb->transfer_buffer_length, 0, SUBMIT);
async_newpending(as);
if ((ret = usb_submit_urb(as->urb, GFP_KERNEL))) {
if (usb_endpoint_xfer_bulk(&ep->desc)) {
spin_lock_irq(&ps->lock);
/* Not exactly the endpoint address; the direction bit is
* shifted to the 0x10 position so that the value will be
* between 0 and 31.
*/
as->bulk_addr = usb_endpoint_num(&ep->desc) |
((ep->desc.bEndpointAddress & USB_ENDPOINT_DIR_MASK)
>> 3);
/* If this bulk URB is the start of a new transfer, re-enable
* the endpoint. Otherwise mark it as a continuation URB.
*/
if (uurb->flags & USBDEVFS_URB_BULK_CONTINUATION)
as->bulk_status = AS_CONTINUATION;
else
ps->disabled_bulk_eps &= ~(1 << as->bulk_addr);
/* Don't accept continuation URBs if the endpoint is
* disabled because of an earlier error.
*/
if (ps->disabled_bulk_eps & (1 << as->bulk_addr))
ret = -EREMOTEIO;
else
ret = usb_submit_urb(as->urb, GFP_ATOMIC);
spin_unlock_irq(&ps->lock);
} else {
ret = usb_submit_urb(as->urb, GFP_KERNEL);
}
if (ret) {
dev_printk(KERN_DEBUG, &ps->dev->dev,
"usbfs: usb_submit_urb returned %d\n", ret);
snoop_urb(ps->dev, as->userurb, as->urb->pipe,
0, ret, COMPLETE);
async_removepending(as);
free_async(as);
return ret;
@ -1548,6 +1620,29 @@ static int proc_ioctl_compat(struct dev_state *ps, compat_uptr_t arg)
}
#endif
static int proc_claim_port(struct dev_state *ps, void __user *arg)
{
unsigned portnum;
int rc;
if (get_user(portnum, (unsigned __user *) arg))
return -EFAULT;
rc = usb_hub_claim_port(ps->dev, portnum, ps);
if (rc == 0)
snoop(&ps->dev->dev, "port %d claimed by process %d: %s\n",
portnum, task_pid_nr(current), current->comm);
return rc;
}
static int proc_release_port(struct dev_state *ps, void __user *arg)
{
unsigned portnum;
if (get_user(portnum, (unsigned __user *) arg))
return -EFAULT;
return usb_hub_release_port(ps->dev, portnum, ps);
}
/*
* NOTE: All requests here that have interface numbers as parameters
* are assuming that somehow the configuration has been prevented from
@ -1645,7 +1740,7 @@ static int usbdev_ioctl(struct inode *inode, struct file *file,
break;
case USBDEVFS_REAPURBNDELAY32:
snoop(&dev->dev, "%s: REAPURBDELAY32\n", __func__);
snoop(&dev->dev, "%s: REAPURBNDELAY32\n", __func__);
ret = proc_reapurbnonblock_compat(ps, p);
break;
@ -1666,7 +1761,7 @@ static int usbdev_ioctl(struct inode *inode, struct file *file,
break;
case USBDEVFS_REAPURBNDELAY:
snoop(&dev->dev, "%s: REAPURBDELAY\n", __func__);
snoop(&dev->dev, "%s: REAPURBNDELAY\n", __func__);
ret = proc_reapurbnonblock(ps, p);
break;
@ -1689,6 +1784,16 @@ static int usbdev_ioctl(struct inode *inode, struct file *file,
snoop(&dev->dev, "%s: IOCTL\n", __func__);
ret = proc_ioctl_default(ps, p);
break;
case USBDEVFS_CLAIM_PORT:
snoop(&dev->dev, "%s: CLAIM_PORT\n", __func__);
ret = proc_claim_port(ps, p);
break;
case USBDEVFS_RELEASE_PORT:
snoop(&dev->dev, "%s: RELEASE_PORT\n", __func__);
ret = proc_release_port(ps, p);
break;
}
usb_unlock_device(dev);
if (ret >= 0)

View File

@ -207,6 +207,9 @@ static int usb_probe_interface(struct device *dev)
intf->needs_binding = 0;
if (usb_device_is_owned(udev))
return -ENODEV;
if (udev->authorized == 0) {
dev_err(&intf->dev, "Device is not authorized for usage\n");
return -ENODEV;
@ -232,28 +235,35 @@ static int usb_probe_interface(struct device *dev)
/* The interface should always appear to be in use
* unless the driver suports autosuspend.
*/
intf->pm_usage_cnt = !(driver->supports_autosuspend);
atomic_set(&intf->pm_usage_cnt, !driver->supports_autosuspend);
/* Carry out a deferred switch to altsetting 0 */
if (intf->needs_altsetting0) {
usb_set_interface(udev, intf->altsetting[0].
error = usb_set_interface(udev, intf->altsetting[0].
desc.bInterfaceNumber, 0);
if (error < 0)
goto err;
intf->needs_altsetting0 = 0;
}
error = driver->probe(intf, id);
if (error) {
mark_quiesced(intf);
intf->needs_remote_wakeup = 0;
intf->condition = USB_INTERFACE_UNBOUND;
usb_cancel_queued_reset(intf);
} else
intf->condition = USB_INTERFACE_BOUND;
if (error)
goto err;
intf->condition = USB_INTERFACE_BOUND;
usb_autosuspend_device(udev);
}
return error;
err:
mark_quiesced(intf);
intf->needs_remote_wakeup = 0;
intf->condition = USB_INTERFACE_UNBOUND;
usb_cancel_queued_reset(intf);
usb_autosuspend_device(udev);
return error;
}
/* called from driver core with dev locked */
@ -262,7 +272,7 @@ static int usb_unbind_interface(struct device *dev)
struct usb_driver *driver = to_usb_driver(dev->driver);
struct usb_interface *intf = to_usb_interface(dev);
struct usb_device *udev;
int error;
int error, r;
intf->condition = USB_INTERFACE_UNBINDING;
@ -290,11 +300,14 @@ static int usb_unbind_interface(struct device *dev)
* Just re-enable it without affecting the endpoint toggles.
*/
usb_enable_interface(udev, intf, false);
} else if (!error && intf->dev.power.status == DPM_ON)
usb_set_interface(udev, intf->altsetting[0].
} else if (!error && intf->dev.power.status == DPM_ON) {
r = usb_set_interface(udev, intf->altsetting[0].
desc.bInterfaceNumber, 0);
else
if (r < 0)
intf->needs_altsetting0 = 1;
} else {
intf->needs_altsetting0 = 1;
}
usb_set_intfdata(intf, NULL);
intf->condition = USB_INTERFACE_UNBOUND;
@ -344,7 +357,7 @@ int usb_driver_claim_interface(struct usb_driver *driver,
usb_pm_lock(udev);
iface->condition = USB_INTERFACE_BOUND;
mark_active(iface);
iface->pm_usage_cnt = !(driver->supports_autosuspend);
atomic_set(&iface->pm_usage_cnt, !driver->supports_autosuspend);
usb_pm_unlock(udev);
/* if interface was already added, bind now; else let
@ -1065,7 +1078,7 @@ static int autosuspend_check(struct usb_device *udev, int reschedule)
intf = udev->actconfig->interface[i];
if (!is_active(intf))
continue;
if (intf->pm_usage_cnt > 0)
if (atomic_read(&intf->pm_usage_cnt) > 0)
return -EBUSY;
if (intf->needs_remote_wakeup &&
!udev->do_remote_wakeup) {
@ -1461,17 +1474,19 @@ static int usb_autopm_do_interface(struct usb_interface *intf,
status = -ENODEV;
else {
udev->auto_pm = 1;
intf->pm_usage_cnt += inc_usage_cnt;
atomic_add(inc_usage_cnt, &intf->pm_usage_cnt);
udev->last_busy = jiffies;
if (inc_usage_cnt >= 0 && intf->pm_usage_cnt > 0) {
if (inc_usage_cnt >= 0 &&
atomic_read(&intf->pm_usage_cnt) > 0) {
if (udev->state == USB_STATE_SUSPENDED)
status = usb_resume_both(udev,
PMSG_AUTO_RESUME);
if (status != 0)
intf->pm_usage_cnt -= inc_usage_cnt;
atomic_sub(inc_usage_cnt, &intf->pm_usage_cnt);
else
udev->last_busy = jiffies;
} else if (inc_usage_cnt <= 0 && intf->pm_usage_cnt <= 0) {
} else if (inc_usage_cnt <= 0 &&
atomic_read(&intf->pm_usage_cnt) <= 0) {
status = usb_suspend_both(udev, PMSG_AUTO_SUSPEND);
}
}
@ -1516,7 +1531,7 @@ void usb_autopm_put_interface(struct usb_interface *intf)
status = usb_autopm_do_interface(intf, -1);
dev_vdbg(&intf->dev, "%s: status %d cnt %d\n",
__func__, status, intf->pm_usage_cnt);
__func__, status, atomic_read(&intf->pm_usage_cnt));
}
EXPORT_SYMBOL_GPL(usb_autopm_put_interface);
@ -1544,10 +1559,10 @@ void usb_autopm_put_interface_async(struct usb_interface *intf)
status = -ENODEV;
} else {
udev->last_busy = jiffies;
--intf->pm_usage_cnt;
atomic_dec(&intf->pm_usage_cnt);
if (udev->autosuspend_disabled || udev->autosuspend_delay < 0)
status = -EPERM;
else if (intf->pm_usage_cnt <= 0 &&
else if (atomic_read(&intf->pm_usage_cnt) <= 0 &&
!timer_pending(&udev->autosuspend.timer)) {
queue_delayed_work(ksuspend_usb_wq, &udev->autosuspend,
round_jiffies_up_relative(
@ -1555,7 +1570,7 @@ void usb_autopm_put_interface_async(struct usb_interface *intf)
}
}
dev_vdbg(&intf->dev, "%s: status %d cnt %d\n",
__func__, status, intf->pm_usage_cnt);
__func__, status, atomic_read(&intf->pm_usage_cnt));
}
EXPORT_SYMBOL_GPL(usb_autopm_put_interface_async);
@ -1599,7 +1614,7 @@ int usb_autopm_get_interface(struct usb_interface *intf)
status = usb_autopm_do_interface(intf, 1);
dev_vdbg(&intf->dev, "%s: status %d cnt %d\n",
__func__, status, intf->pm_usage_cnt);
__func__, status, atomic_read(&intf->pm_usage_cnt));
return status;
}
EXPORT_SYMBOL_GPL(usb_autopm_get_interface);
@ -1627,10 +1642,14 @@ int usb_autopm_get_interface_async(struct usb_interface *intf)
status = -ENODEV;
else if (udev->autoresume_disabled)
status = -EPERM;
else if (++intf->pm_usage_cnt > 0 && udev->state == USB_STATE_SUSPENDED)
queue_work(ksuspend_usb_wq, &udev->autoresume);
else {
atomic_inc(&intf->pm_usage_cnt);
if (atomic_read(&intf->pm_usage_cnt) > 0 &&
udev->state == USB_STATE_SUSPENDED)
queue_work(ksuspend_usb_wq, &udev->autoresume);
}
dev_vdbg(&intf->dev, "%s: status %d cnt %d\n",
__func__, status, intf->pm_usage_cnt);
__func__, status, atomic_read(&intf->pm_usage_cnt));
return status;
}
EXPORT_SYMBOL_GPL(usb_autopm_get_interface_async);
@ -1652,7 +1671,7 @@ int usb_autopm_set_interface(struct usb_interface *intf)
status = usb_autopm_do_interface(intf, 0);
dev_vdbg(&intf->dev, "%s: status %d cnt %d\n",
__func__, status, intf->pm_usage_cnt);
__func__, status, atomic_read(&intf->pm_usage_cnt));
return status;
}
EXPORT_SYMBOL_GPL(usb_autopm_set_interface);

View File

@ -158,7 +158,9 @@ static int generic_probe(struct usb_device *udev)
/* Choose and set the configuration. This registers the interfaces
* with the driver core and lets interface drivers bind to them.
*/
if (udev->authorized == 0)
if (usb_device_is_owned(udev))
; /* Don't configure if the device is owned */
else if (udev->authorized == 0)
dev_err(&udev->dev, "Device is not authorized for usage\n");
else {
c = usb_choose_configuration(udev);

View File

@ -337,72 +337,89 @@ static const u8 ss_rh_config_descriptor[] = {
/*-------------------------------------------------------------------------*/
/*
* helper routine for returning string descriptors in UTF-16LE
* input can actually be ISO-8859-1; ASCII is its 7-bit subset
/**
* ascii2desc() - Helper routine for producing UTF-16LE string descriptors
* @s: Null-terminated ASCII (actually ISO-8859-1) string
* @buf: Buffer for USB string descriptor (header + UTF-16LE)
* @len: Length (in bytes; may be odd) of descriptor buffer.
*
* The return value is the number of bytes filled in: 2 + 2*strlen(s) or
* buflen, whichever is less.
*
* USB String descriptors can contain at most 126 characters; input
* strings longer than that are truncated.
*/
static unsigned ascii2utf(char *s, u8 *utf, int utfmax)
static unsigned
ascii2desc(char const *s, u8 *buf, unsigned len)
{
unsigned retval;
unsigned n, t = 2 + 2*strlen(s);
for (retval = 0; *s && utfmax > 1; utfmax -= 2, retval += 2) {
*utf++ = *s++;
*utf++ = 0;
if (t > 254)
t = 254; /* Longest possible UTF string descriptor */
if (len > t)
len = t;
t += USB_DT_STRING << 8; /* Now t is first 16 bits to store */
n = len;
while (n--) {
*buf++ = t;
if (!n--)
break;
*buf++ = t >> 8;
t = (unsigned char)*s++;
}
if (utfmax > 0) {
*utf = *s;
++retval;
}
return retval;
return len;
}
/*
* rh_string - provides manufacturer, product and serial strings for root hub
* @id: the string ID number (1: serial number, 2: product, 3: vendor)
/**
* rh_string() - provides string descriptors for root hub
* @id: the string ID number (0: langids, 1: serial #, 2: product, 3: vendor)
* @hcd: the host controller for this root hub
* @data: return packet in UTF-16 LE
* @len: length of the return packet
* @data: buffer for output packet
* @len: length of the provided buffer
*
* Produces either a manufacturer, product or serial number string for the
* virtual root hub device.
* Returns the number of bytes filled in: the length of the descriptor or
* of the provided buffer, whichever is less.
*/
static unsigned rh_string(int id, struct usb_hcd *hcd, u8 *data, unsigned len)
static unsigned
rh_string(int id, struct usb_hcd const *hcd, u8 *data, unsigned len)
{
char buf [100];
char buf[100];
char const *s;
static char const langids[4] = {4, USB_DT_STRING, 0x09, 0x04};
// language ids
if (id == 0) {
buf[0] = 4; buf[1] = 3; /* 4 bytes string data */
buf[2] = 0x09; buf[3] = 0x04; /* MSFT-speak for "en-us" */
len = min_t(unsigned, len, 4);
memcpy (data, buf, len);
switch (id) {
case 0:
/* Array of LANGID codes (0x0409 is MSFT-speak for "en-us") */
/* See http://www.usb.org/developers/docs/USB_LANGIDs.pdf */
if (len > 4)
len = 4;
memcpy(data, langids, len);
return len;
// serial number
} else if (id == 1) {
strlcpy (buf, hcd->self.bus_name, sizeof buf);
// product description
} else if (id == 2) {
strlcpy (buf, hcd->product_desc, sizeof buf);
// id 3 == vendor description
} else if (id == 3) {
case 1:
/* Serial number */
s = hcd->self.bus_name;
break;
case 2:
/* Product name */
s = hcd->product_desc;
break;
case 3:
/* Manufacturer */
snprintf (buf, sizeof buf, "%s %s %s", init_utsname()->sysname,
init_utsname()->release, hcd->driver->description);
s = buf;
break;
default:
/* Can't happen; caller guarantees it */
return 0;
}
switch (len) { /* All cases fall through */
default:
len = 2 + ascii2utf (buf, data + 2, len - 2);
case 2:
data [1] = 3; /* type == string */
case 1:
data [0] = 2 * (strlen (buf) + 1);
case 0:
; /* Compiler wants a statement here */
}
return len;
return ascii2desc(s, data, len);
}

View File

@ -267,6 +267,11 @@ struct hc_driver {
void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
/* Returns the hardware-chosen device address */
int (*address_device)(struct usb_hcd *, struct usb_device *udev);
/* Notifies the HCD after a hub descriptor is fetched.
* Will block.
*/
int (*update_hub_device)(struct usb_hcd *, struct usb_device *hdev,
struct usb_tt *tt, gfp_t mem_flags);
};
extern int usb_hcd_link_urb_to_ep(struct usb_hcd *hcd, struct urb *urb);

View File

@ -78,6 +78,7 @@ struct usb_hub {
u8 indicator[USB_MAXCHILDREN];
struct delayed_work leds;
struct delayed_work init_work;
void **port_owners;
};
@ -162,8 +163,10 @@ static inline char *portspeed(int portstatus)
}
/* Note that hdev or one of its children must be locked! */
static inline struct usb_hub *hdev_to_hub(struct usb_device *hdev)
static struct usb_hub *hdev_to_hub(struct usb_device *hdev)
{
if (!hdev || !hdev->actconfig)
return NULL;
return usb_get_intfdata(hdev->actconfig->interface[0]);
}
@ -372,7 +375,7 @@ static void kick_khubd(struct usb_hub *hub)
unsigned long flags;
/* Suppress autosuspend until khubd runs */
to_usb_interface(hub->intfdev)->pm_usage_cnt = 1;
atomic_set(&to_usb_interface(hub->intfdev)->pm_usage_cnt, 1);
spin_lock_irqsave(&hub_event_lock, flags);
if (!hub->disconnected && list_empty(&hub->event_list)) {
@ -384,8 +387,10 @@ static void kick_khubd(struct usb_hub *hub)
void usb_kick_khubd(struct usb_device *hdev)
{
/* FIXME: What if hdev isn't bound to the hub driver? */
kick_khubd(hdev_to_hub(hdev));
struct usb_hub *hub = hdev_to_hub(hdev);
if (hub)
kick_khubd(hub);
}
@ -677,7 +682,8 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
msecs_to_jiffies(delay));
/* Suppress autosuspend until init is done */
to_usb_interface(hub->intfdev)->pm_usage_cnt = 1;
atomic_set(&to_usb_interface(hub->intfdev)->
pm_usage_cnt, 1);
return; /* Continues at init2: below */
} else {
hub_power_on(hub, true);
@ -854,25 +860,24 @@ static int hub_post_reset(struct usb_interface *intf)
static int hub_configure(struct usb_hub *hub,
struct usb_endpoint_descriptor *endpoint)
{
struct usb_hcd *hcd;
struct usb_device *hdev = hub->hdev;
struct device *hub_dev = hub->intfdev;
u16 hubstatus, hubchange;
u16 wHubCharacteristics;
unsigned int pipe;
int maxp, ret;
char *message;
char *message = "out of memory";
hub->buffer = usb_buffer_alloc(hdev, sizeof(*hub->buffer), GFP_KERNEL,
&hub->buffer_dma);
if (!hub->buffer) {
message = "can't allocate hub irq buffer";
ret = -ENOMEM;
goto fail;
}
hub->status = kmalloc(sizeof(*hub->status), GFP_KERNEL);
if (!hub->status) {
message = "can't kmalloc hub status buffer";
ret = -ENOMEM;
goto fail;
}
@ -880,7 +885,6 @@ static int hub_configure(struct usb_hub *hub,
hub->descriptor = kmalloc(sizeof(*hub->descriptor), GFP_KERNEL);
if (!hub->descriptor) {
message = "can't kmalloc hub descriptor";
ret = -ENOMEM;
goto fail;
}
@ -904,6 +908,12 @@ static int hub_configure(struct usb_hub *hub,
dev_info (hub_dev, "%d port%s detected\n", hdev->maxchild,
(hdev->maxchild == 1) ? "" : "s");
hub->port_owners = kzalloc(hdev->maxchild * sizeof(void *), GFP_KERNEL);
if (!hub->port_owners) {
ret = -ENOMEM;
goto fail;
}
wHubCharacteristics = le16_to_cpu(hub->descriptor->wHubCharacteristics);
if (wHubCharacteristics & HUB_CHAR_COMPOUND) {
@ -1052,6 +1062,19 @@ static int hub_configure(struct usb_hub *hub,
dev_dbg(hub_dev, "%umA bus power budget for each child\n",
hub->mA_per_port);
/* Update the HCD's internal representation of this hub before khubd
* starts getting port status changes for devices under the hub.
*/
hcd = bus_to_hcd(hdev->bus);
if (hcd->driver->update_hub_device) {
ret = hcd->driver->update_hub_device(hcd, hdev,
&hub->tt, GFP_KERNEL);
if (ret < 0) {
message = "can't update HCD hub info";
goto fail;
}
}
ret = hub_hub_status(hub, &hubstatus, &hubchange);
if (ret < 0) {
message = "can't get hub status";
@ -1082,7 +1105,6 @@ static int hub_configure(struct usb_hub *hub,
hub->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!hub->urb) {
message = "couldn't allocate interrupt urb";
ret = -ENOMEM;
goto fail;
}
@ -1131,11 +1153,13 @@ static void hub_disconnect(struct usb_interface *intf)
hub_quiesce(hub, HUB_DISCONNECT);
usb_set_intfdata (intf, NULL);
hub->hdev->maxchild = 0;
if (hub->hdev->speed == USB_SPEED_HIGH)
highspeed_hubs--;
usb_free_urb(hub->urb);
kfree(hub->port_owners);
kfree(hub->descriptor);
kfree(hub->status);
usb_buffer_free(hub->hdev, sizeof(*hub->buffer), hub->buffer,
@ -1250,6 +1274,79 @@ hub_ioctl(struct usb_interface *intf, unsigned int code, void *user_data)
}
}
/*
* Allow user programs to claim ports on a hub. When a device is attached
* to one of these "claimed" ports, the program will "own" the device.
*/
static int find_port_owner(struct usb_device *hdev, unsigned port1,
void ***ppowner)
{
if (hdev->state == USB_STATE_NOTATTACHED)
return -ENODEV;
if (port1 == 0 || port1 > hdev->maxchild)
return -EINVAL;
/* This assumes that devices not managed by the hub driver
* will always have maxchild equal to 0.
*/
*ppowner = &(hdev_to_hub(hdev)->port_owners[port1 - 1]);
return 0;
}
/* In the following three functions, the caller must hold hdev's lock */
int usb_hub_claim_port(struct usb_device *hdev, unsigned port1, void *owner)
{
int rc;
void **powner;
rc = find_port_owner(hdev, port1, &powner);
if (rc)
return rc;
if (*powner)
return -EBUSY;
*powner = owner;
return rc;
}
int usb_hub_release_port(struct usb_device *hdev, unsigned port1, void *owner)
{
int rc;
void **powner;
rc = find_port_owner(hdev, port1, &powner);
if (rc)
return rc;
if (*powner != owner)
return -ENOENT;
*powner = NULL;
return rc;
}
void usb_hub_release_all_ports(struct usb_device *hdev, void *owner)
{
int n;
void **powner;
n = find_port_owner(hdev, 1, &powner);
if (n == 0) {
for (; n < hdev->maxchild; (++n, ++powner)) {
if (*powner == owner)
*powner = NULL;
}
}
}
/* The caller must hold udev's lock */
bool usb_device_is_owned(struct usb_device *udev)
{
struct usb_hub *hub;
if (udev->state == USB_STATE_NOTATTACHED || !udev->parent)
return false;
hub = hdev_to_hub(udev->parent);
return !!hub->port_owners[udev->portnum - 1];
}
static void recursively_mark_NOTATTACHED(struct usb_device *udev)
{
@ -2849,14 +2946,7 @@ static void hub_port_connect_change(struct usb_hub *hub, int port1,
/* For a suspended device, treat this as a
* remote wakeup event.
*/
if (udev->do_remote_wakeup)
status = remote_wakeup(udev);
/* Otherwise leave it be; devices can't tell the
* difference between suspended and disabled.
*/
else
status = 0;
status = remote_wakeup(udev);
#endif
} else {

View File

@ -459,35 +459,23 @@ int usb_sg_init(struct usb_sg_request *io, struct usb_device *dev,
io->urbs[i]->context = io;
/*
* Some systems need to revert to PIO when DMA is
* temporarily unavailable. For their sakes, both
* transfer_buffer and transfer_dma are set when
* possible. However this can only work on systems
* without:
* Some systems need to revert to PIO when DMA is temporarily
* unavailable. For their sakes, both transfer_buffer and
* transfer_dma are set when possible.
*
* - HIGHMEM, since DMA buffers located in high memory
* are not directly addressable by the CPU for PIO;
*
* - IOMMU, since dma_map_sg() is allowed to use an
* IOMMU to make virtually discontiguous buffers be
* "dma-contiguous" so that PIO and DMA need diferent
* numbers of URBs.
*
* So when HIGHMEM or IOMMU are in use, transfer_buffer
* is NULL to prevent stale pointers and to help spot
* bugs.
* Note that if IOMMU coalescing occurred, we cannot
* trust sg_page anymore, so check if S/G list shrunk.
*/
if (io->nents == io->entries && !PageHighMem(sg_page(sg)))
io->urbs[i]->transfer_buffer = sg_virt(sg);
else
io->urbs[i]->transfer_buffer = NULL;
if (dma) {
io->urbs[i]->transfer_dma = sg_dma_address(sg);
len = sg_dma_len(sg);
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_GART_IOMMU)
io->urbs[i]->transfer_buffer = NULL;
#else
io->urbs[i]->transfer_buffer = sg_virt(sg);
#endif
} else {
/* hc may use _only_ transfer_buffer */
io->urbs[i]->transfer_buffer = sg_virt(sg);
len = sg->length;
}

View File

@ -413,8 +413,13 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
} else {
snprintf(dev->devpath, sizeof dev->devpath,
"%s.%d", parent->devpath, port1);
dev->route = parent->route +
(port1 << ((parent->level - 1)*4));
/* Route string assumes hubs have less than 16 ports */
if (port1 < 15)
dev->route = parent->route +
(port1 << ((parent->level - 1)*4));
else
dev->route = parent->route +
(15 << ((parent->level - 1)*4));
}
dev->dev.parent = &parent->dev;
@ -914,11 +919,11 @@ int usb_buffer_map_sg(const struct usb_device *dev, int is_in,
|| !(bus = dev->bus)
|| !(controller = bus->controller)
|| !controller->dma_mask)
return -1;
return -EINVAL;
/* FIXME generic api broken like pci, can't report errors */
return dma_map_sg(controller, sg, nents,
is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
is_in ? DMA_FROM_DEVICE : DMA_TO_DEVICE) ? : -ENOMEM;
}
EXPORT_SYMBOL_GPL(usb_buffer_map_sg);

View File

@ -37,6 +37,13 @@ extern int usb_match_device(struct usb_device *dev,
extern void usb_forced_unbind_intf(struct usb_interface *intf);
extern void usb_rebind_intf(struct usb_interface *intf);
extern int usb_hub_claim_port(struct usb_device *hdev, unsigned port,
void *owner);
extern int usb_hub_release_port(struct usb_device *hdev, unsigned port,
void *owner);
extern void usb_hub_release_all_ports(struct usb_device *hdev, void *owner);
extern bool usb_device_is_owned(struct usb_device *udev);
extern int usb_hub_init(void);
extern void usb_hub_cleanup(void);
extern int usb_major_init(void);

View File

@ -0,0 +1,5 @@
#
# Makefile for early USB devices
#
obj-$(CONFIG_EARLY_PRINTK_DBGP) += ehci-dbgp.o

View File

@ -0,0 +1,996 @@
/*
* Standalone EHCI usb debug driver
*
* Originally written by:
* Eric W. Biederman" <ebiederm@xmission.com> and
* Yinghai Lu <yhlu.kernel@gmail.com>
*
* Changes for early/late printk and HW errata:
* Jason Wessel <jason.wessel@windriver.com>
* Copyright (C) 2009 Wind River Systems, Inc.
*
*/
#include <linux/console.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/pci_regs.h>
#include <linux/pci_ids.h>
#include <linux/usb/ch9.h>
#include <linux/usb/ehci_def.h>
#include <linux/delay.h>
#include <asm/io.h>
#include <asm/pci-direct.h>
#include <asm/fixmap.h>
/* The code here is intended to talk directly to the EHCI debug port
* and does not require that you have any kind of USB host controller
* drivers or USB device drivers compiled into the kernel.
*
* If you make a change to anything in here, the following test cases
* need to pass where a USB debug device works in the following
* configurations.
*
* 1. boot args: earlyprintk=dbgp
* o kernel compiled with # CONFIG_USB_EHCI_HCD is not set
* o kernel compiled with CONFIG_USB_EHCI_HCD=y
* 2. boot args: earlyprintk=dbgp,keep
* o kernel compiled with # CONFIG_USB_EHCI_HCD is not set
* o kernel compiled with CONFIG_USB_EHCI_HCD=y
* 3. boot args: earlyprintk=dbgp console=ttyUSB0
* o kernel has CONFIG_USB_EHCI_HCD=y and
* CONFIG_USB_SERIAL_DEBUG=y
* 4. boot args: earlyprintk=vga,dbgp
* o kernel compiled with # CONFIG_USB_EHCI_HCD is not set
* o kernel compiled with CONFIG_USB_EHCI_HCD=y
*
* For the 4th configuration you can turn on or off the DBGP_DEBUG
* such that you can debug the dbgp device's driver code.
*/
static int dbgp_phys_port = 1;
static struct ehci_caps __iomem *ehci_caps;
static struct ehci_regs __iomem *ehci_regs;
static struct ehci_dbg_port __iomem *ehci_debug;
static int dbgp_not_safe; /* Cannot use debug device during ehci reset */
static unsigned int dbgp_endpoint_out;
struct ehci_dev {
u32 bus;
u32 slot;
u32 func;
};
static struct ehci_dev ehci_dev;
#define USB_DEBUG_DEVNUM 127
#define DBGP_DATA_TOGGLE 0x8800
#ifdef DBGP_DEBUG
#define dbgp_printk printk
static void dbgp_ehci_status(char *str)
{
if (!ehci_debug)
return;
dbgp_printk("dbgp: %s\n", str);
dbgp_printk(" Debug control: %08x", readl(&ehci_debug->control));
dbgp_printk(" ehci cmd : %08x", readl(&ehci_regs->command));
dbgp_printk(" ehci conf flg: %08x\n",
readl(&ehci_regs->configured_flag));
dbgp_printk(" ehci status : %08x", readl(&ehci_regs->status));
dbgp_printk(" ehci portsc : %08x\n",
readl(&ehci_regs->port_status[dbgp_phys_port - 1]));
}
#else
static inline void dbgp_ehci_status(char *str) { }
static inline void dbgp_printk(const char *fmt, ...) { }
#endif
static inline u32 dbgp_pid_update(u32 x, u32 tok)
{
return ((x ^ DBGP_DATA_TOGGLE) & 0xffff00) | (tok & 0xff);
}
static inline u32 dbgp_len_update(u32 x, u32 len)
{
return (x & ~0x0f) | (len & 0x0f);
}
/*
* USB Packet IDs (PIDs)
*/
/* token */
#define USB_PID_OUT 0xe1
#define USB_PID_IN 0x69
#define USB_PID_SOF 0xa5
#define USB_PID_SETUP 0x2d
/* handshake */
#define USB_PID_ACK 0xd2
#define USB_PID_NAK 0x5a
#define USB_PID_STALL 0x1e
#define USB_PID_NYET 0x96
/* data */
#define USB_PID_DATA0 0xc3
#define USB_PID_DATA1 0x4b
#define USB_PID_DATA2 0x87
#define USB_PID_MDATA 0x0f
/* Special */
#define USB_PID_PREAMBLE 0x3c
#define USB_PID_ERR 0x3c
#define USB_PID_SPLIT 0x78
#define USB_PID_PING 0xb4
#define USB_PID_UNDEF_0 0xf0
#define USB_PID_DATA_TOGGLE 0x88
#define DBGP_CLAIM (DBGP_OWNER | DBGP_ENABLED | DBGP_INUSE)
#define PCI_CAP_ID_EHCI_DEBUG 0xa
#define HUB_ROOT_RESET_TIME 50 /* times are in msec */
#define HUB_SHORT_RESET_TIME 10
#define HUB_LONG_RESET_TIME 200
#define HUB_RESET_TIMEOUT 500
#define DBGP_MAX_PACKET 8
#define DBGP_TIMEOUT (250 * 1000)
static int dbgp_wait_until_complete(void)
{
u32 ctrl;
int loop = DBGP_TIMEOUT;
do {
ctrl = readl(&ehci_debug->control);
/* Stop when the transaction is finished */
if (ctrl & DBGP_DONE)
break;
udelay(1);
} while (--loop > 0);
if (!loop)
return -DBGP_TIMEOUT;
/*
* Now that we have observed the completed transaction,
* clear the done bit.
*/
writel(ctrl | DBGP_DONE, &ehci_debug->control);
return (ctrl & DBGP_ERROR) ? -DBGP_ERRCODE(ctrl) : DBGP_LEN(ctrl);
}
static inline void dbgp_mdelay(int ms)
{
int i;
while (ms--) {
for (i = 0; i < 1000; i++)
outb(0x1, 0x80);
}
}
static void dbgp_breath(void)
{
/* Sleep to give the debug port a chance to breathe */
}
static int dbgp_wait_until_done(unsigned ctrl)
{
u32 pids, lpid;
int ret;
int loop = 3;
retry:
writel(ctrl | DBGP_GO, &ehci_debug->control);
ret = dbgp_wait_until_complete();
pids = readl(&ehci_debug->pids);
lpid = DBGP_PID_GET(pids);
if (ret < 0) {
/* A -DBGP_TIMEOUT failure here means the device has
* failed, perhaps because it was unplugged, in which
* case we do not want to hang the system so the dbgp
* will be marked as unsafe to use. EHCI reset is the
* only way to recover if you unplug the dbgp device.
*/
if (ret == -DBGP_TIMEOUT && !dbgp_not_safe)
dbgp_not_safe = 1;
return ret;
}
/*
* If the port is getting full or it has dropped data
* start pacing ourselves, not necessary but it's friendly.
*/
if ((lpid == USB_PID_NAK) || (lpid == USB_PID_NYET))
dbgp_breath();
/* If I get a NACK reissue the transmission */
if (lpid == USB_PID_NAK) {
if (--loop > 0)
goto retry;
}
return ret;
}
static inline void dbgp_set_data(const void *buf, int size)
{
const unsigned char *bytes = buf;
u32 lo, hi;
int i;
lo = hi = 0;
for (i = 0; i < 4 && i < size; i++)
lo |= bytes[i] << (8*i);
for (; i < 8 && i < size; i++)
hi |= bytes[i] << (8*(i - 4));
writel(lo, &ehci_debug->data03);
writel(hi, &ehci_debug->data47);
}
static inline void dbgp_get_data(void *buf, int size)
{
unsigned char *bytes = buf;
u32 lo, hi;
int i;
lo = readl(&ehci_debug->data03);
hi = readl(&ehci_debug->data47);
for (i = 0; i < 4 && i < size; i++)
bytes[i] = (lo >> (8*i)) & 0xff;
for (; i < 8 && i < size; i++)
bytes[i] = (hi >> (8*(i - 4))) & 0xff;
}
static int dbgp_out(u32 addr, const char *bytes, int size)
{
u32 pids, ctrl;
pids = readl(&ehci_debug->pids);
pids = dbgp_pid_update(pids, USB_PID_OUT);
ctrl = readl(&ehci_debug->control);
ctrl = dbgp_len_update(ctrl, size);
ctrl |= DBGP_OUT;
ctrl |= DBGP_GO;
dbgp_set_data(bytes, size);
writel(addr, &ehci_debug->address);
writel(pids, &ehci_debug->pids);
return dbgp_wait_until_done(ctrl);
}
static int dbgp_bulk_write(unsigned devnum, unsigned endpoint,
const char *bytes, int size)
{
int ret;
int loops = 5;
u32 addr;
if (size > DBGP_MAX_PACKET)
return -1;
addr = DBGP_EPADDR(devnum, endpoint);
try_again:
if (loops--) {
ret = dbgp_out(addr, bytes, size);
if (ret == -DBGP_ERR_BAD) {
int try_loops = 3;
do {
/* Emit a dummy packet to re-sync communication
* with the debug device */
if (dbgp_out(addr, "12345678", 8) >= 0) {
udelay(2);
goto try_again;
}
} while (try_loops--);
}
}
return ret;
}
static int dbgp_bulk_read(unsigned devnum, unsigned endpoint, void *data,
int size)
{
u32 pids, addr, ctrl;
int ret;
if (size > DBGP_MAX_PACKET)
return -1;
addr = DBGP_EPADDR(devnum, endpoint);
pids = readl(&ehci_debug->pids);
pids = dbgp_pid_update(pids, USB_PID_IN);
ctrl = readl(&ehci_debug->control);
ctrl = dbgp_len_update(ctrl, size);
ctrl &= ~DBGP_OUT;
ctrl |= DBGP_GO;
writel(addr, &ehci_debug->address);
writel(pids, &ehci_debug->pids);
ret = dbgp_wait_until_done(ctrl);
if (ret < 0)
return ret;
if (size > ret)
size = ret;
dbgp_get_data(data, size);
return ret;
}
static int dbgp_control_msg(unsigned devnum, int requesttype,
int request, int value, int index, void *data, int size)
{
u32 pids, addr, ctrl;
struct usb_ctrlrequest req;
int read;
int ret;
read = (requesttype & USB_DIR_IN) != 0;
if (size > (read ? DBGP_MAX_PACKET:0))
return -1;
/* Compute the control message */
req.bRequestType = requesttype;
req.bRequest = request;
req.wValue = cpu_to_le16(value);
req.wIndex = cpu_to_le16(index);
req.wLength = cpu_to_le16(size);
pids = DBGP_PID_SET(USB_PID_DATA0, USB_PID_SETUP);
addr = DBGP_EPADDR(devnum, 0);
ctrl = readl(&ehci_debug->control);
ctrl = dbgp_len_update(ctrl, sizeof(req));
ctrl |= DBGP_OUT;
ctrl |= DBGP_GO;
/* Send the setup message */
dbgp_set_data(&req, sizeof(req));
writel(addr, &ehci_debug->address);
writel(pids, &ehci_debug->pids);
ret = dbgp_wait_until_done(ctrl);
if (ret < 0)
return ret;
/* Read the result */
return dbgp_bulk_read(devnum, 0, data, size);
}
/* Find a PCI capability */
static u32 __init find_cap(u32 num, u32 slot, u32 func, int cap)
{
u8 pos;
int bytes;
if (!(read_pci_config_16(num, slot, func, PCI_STATUS) &
PCI_STATUS_CAP_LIST))
return 0;
pos = read_pci_config_byte(num, slot, func, PCI_CAPABILITY_LIST);
for (bytes = 0; bytes < 48 && pos >= 0x40; bytes++) {
u8 id;
pos &= ~3;
id = read_pci_config_byte(num, slot, func, pos+PCI_CAP_LIST_ID);
if (id == 0xff)
break;
if (id == cap)
return pos;
pos = read_pci_config_byte(num, slot, func,
pos+PCI_CAP_LIST_NEXT);
}
return 0;
}
static u32 __init __find_dbgp(u32 bus, u32 slot, u32 func)
{
u32 class;
class = read_pci_config(bus, slot, func, PCI_CLASS_REVISION);
if ((class >> 8) != PCI_CLASS_SERIAL_USB_EHCI)
return 0;
return find_cap(bus, slot, func, PCI_CAP_ID_EHCI_DEBUG);
}
static u32 __init find_dbgp(int ehci_num, u32 *rbus, u32 *rslot, u32 *rfunc)
{
u32 bus, slot, func;
for (bus = 0; bus < 256; bus++) {
for (slot = 0; slot < 32; slot++) {
for (func = 0; func < 8; func++) {
unsigned cap;
cap = __find_dbgp(bus, slot, func);
if (!cap)
continue;
if (ehci_num-- != 0)
continue;
*rbus = bus;
*rslot = slot;
*rfunc = func;
return cap;
}
}
}
return 0;
}
static int dbgp_ehci_startup(void)
{
u32 ctrl, cmd, status;
int loop;
/* Claim ownership, but do not enable yet */
ctrl = readl(&ehci_debug->control);
ctrl |= DBGP_OWNER;
ctrl &= ~(DBGP_ENABLED | DBGP_INUSE);
writel(ctrl, &ehci_debug->control);
udelay(1);
dbgp_ehci_status("EHCI startup");
/* Start the ehci running */
cmd = readl(&ehci_regs->command);
cmd &= ~(CMD_LRESET | CMD_IAAD | CMD_PSE | CMD_ASE | CMD_RESET);
cmd |= CMD_RUN;
writel(cmd, &ehci_regs->command);
/* Ensure everything is routed to the EHCI */
writel(FLAG_CF, &ehci_regs->configured_flag);
/* Wait until the controller is no longer halted */
loop = 10;
do {
status = readl(&ehci_regs->status);
if (!(status & STS_HALT))
break;
udelay(1);
} while (--loop > 0);
if (!loop) {
dbgp_printk("ehci can not be started\n");
return -ENODEV;
}
dbgp_printk("ehci started\n");
return 0;
}
static int dbgp_ehci_controller_reset(void)
{
int loop = 250 * 1000;
u32 cmd;
/* Reset the EHCI controller */
cmd = readl(&ehci_regs->command);
cmd |= CMD_RESET;
writel(cmd, &ehci_regs->command);
do {
cmd = readl(&ehci_regs->command);
} while ((cmd & CMD_RESET) && (--loop > 0));
if (!loop) {
dbgp_printk("can not reset ehci\n");
return -1;
}
dbgp_ehci_status("ehci reset done");
return 0;
}
static int ehci_wait_for_port(int port);
/* Return 0 on success
* Return -ENODEV for any general failure
* Return -EIO if wait for port fails
*/
int dbgp_external_startup(void)
{
int devnum;
struct usb_debug_descriptor dbgp_desc;
int ret;
u32 ctrl, portsc, cmd;
int dbg_port = dbgp_phys_port;
int tries = 3;
int reset_port_tries = 1;
int try_hard_once = 1;
try_port_reset_again:
ret = dbgp_ehci_startup();
if (ret)
return ret;
/* Wait for a device to show up in the debug port */
ret = ehci_wait_for_port(dbg_port);
if (ret < 0) {
portsc = readl(&ehci_regs->port_status[dbg_port - 1]);
if (!(portsc & PORT_CONNECT) && try_hard_once) {
/* Last ditch effort to try to force enable
* the debug device by using the packet test
* ehci command to try and wake it up. */
try_hard_once = 0;
cmd = readl(&ehci_regs->command);
cmd &= ~CMD_RUN;
writel(cmd, &ehci_regs->command);
portsc = readl(&ehci_regs->port_status[dbg_port - 1]);
portsc |= PORT_TEST_PKT;
writel(portsc, &ehci_regs->port_status[dbg_port - 1]);
dbgp_ehci_status("Trying to force debug port online");
mdelay(50);
dbgp_ehci_controller_reset();
goto try_port_reset_again;
} else if (reset_port_tries--) {
goto try_port_reset_again;
}
dbgp_printk("No device found in debug port\n");
return -EIO;
}
dbgp_ehci_status("wait for port done");
/* Enable the debug port */
ctrl = readl(&ehci_debug->control);
ctrl |= DBGP_CLAIM;
writel(ctrl, &ehci_debug->control);
ctrl = readl(&ehci_debug->control);
if ((ctrl & DBGP_CLAIM) != DBGP_CLAIM) {
dbgp_printk("No device in debug port\n");
writel(ctrl & ~DBGP_CLAIM, &ehci_debug->control);
return -ENODEV;
}
dbgp_ehci_status("debug ported enabled");
/* Completely transfer the debug device to the debug controller */
portsc = readl(&ehci_regs->port_status[dbg_port - 1]);
portsc &= ~PORT_PE;
writel(portsc, &ehci_regs->port_status[dbg_port - 1]);
dbgp_mdelay(100);
try_again:
/* Find the debug device and make it device number 127 */
for (devnum = 0; devnum <= 127; devnum++) {
ret = dbgp_control_msg(devnum,
USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
USB_REQ_GET_DESCRIPTOR, (USB_DT_DEBUG << 8), 0,
&dbgp_desc, sizeof(dbgp_desc));
if (ret > 0)
break;
}
if (devnum > 127) {
dbgp_printk("Could not find attached debug device\n");
goto err;
}
if (ret < 0) {
dbgp_printk("Attached device is not a debug device\n");
goto err;
}
dbgp_endpoint_out = dbgp_desc.bDebugOutEndpoint;
/* Move the device to 127 if it isn't already there */
if (devnum != USB_DEBUG_DEVNUM) {
ret = dbgp_control_msg(devnum,
USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
USB_REQ_SET_ADDRESS, USB_DEBUG_DEVNUM, 0, NULL, 0);
if (ret < 0) {
dbgp_printk("Could not move attached device to %d\n",
USB_DEBUG_DEVNUM);
goto err;
}
devnum = USB_DEBUG_DEVNUM;
dbgp_printk("debug device renamed to 127\n");
}
/* Enable the debug interface */
ret = dbgp_control_msg(USB_DEBUG_DEVNUM,
USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE,
USB_REQ_SET_FEATURE, USB_DEVICE_DEBUG_MODE, 0, NULL, 0);
if (ret < 0) {
dbgp_printk(" Could not enable the debug device\n");
goto err;
}
dbgp_printk("debug interface enabled\n");
/* Perform a small write to get the even/odd data state in sync
*/
ret = dbgp_bulk_write(USB_DEBUG_DEVNUM, dbgp_endpoint_out, " ", 1);
if (ret < 0) {
dbgp_printk("dbgp_bulk_write failed: %d\n", ret);
goto err;
}
dbgp_printk("small write doned\n");
dbgp_not_safe = 0;
return 0;
err:
if (tries--)
goto try_again;
return -ENODEV;
}
EXPORT_SYMBOL_GPL(dbgp_external_startup);
static int __init ehci_reset_port(int port)
{
u32 portsc;
u32 delay_time, delay;
int loop;
dbgp_ehci_status("reset port");
/* Reset the usb debug port */
portsc = readl(&ehci_regs->port_status[port - 1]);
portsc &= ~PORT_PE;
portsc |= PORT_RESET;
writel(portsc, &ehci_regs->port_status[port - 1]);
delay = HUB_ROOT_RESET_TIME;
for (delay_time = 0; delay_time < HUB_RESET_TIMEOUT;
delay_time += delay) {
dbgp_mdelay(delay);
portsc = readl(&ehci_regs->port_status[port - 1]);
if (!(portsc & PORT_RESET))
break;
}
if (portsc & PORT_RESET) {
/* force reset to complete */
loop = 100 * 1000;
writel(portsc & ~(PORT_RWC_BITS | PORT_RESET),
&ehci_regs->port_status[port - 1]);
do {
udelay(1);
portsc = readl(&ehci_regs->port_status[port-1]);
} while ((portsc & PORT_RESET) && (--loop > 0));
}
/* Device went away? */
if (!(portsc & PORT_CONNECT))
return -ENOTCONN;
/* bomb out completely if something weird happend */
if ((portsc & PORT_CSC))
return -EINVAL;
/* If we've finished resetting, then break out of the loop */
if (!(portsc & PORT_RESET) && (portsc & PORT_PE))
return 0;
return -EBUSY;
}
static int ehci_wait_for_port(int port)
{
u32 status;
int ret, reps;
for (reps = 0; reps < 300; reps++) {
status = readl(&ehci_regs->status);
if (status & STS_PCD)
break;
dbgp_mdelay(1);
}
ret = ehci_reset_port(port);
if (ret == 0)
return 0;
return -ENOTCONN;
}
typedef void (*set_debug_port_t)(int port);
static void __init default_set_debug_port(int port)
{
}
static set_debug_port_t __initdata set_debug_port = default_set_debug_port;
static void __init nvidia_set_debug_port(int port)
{
u32 dword;
dword = read_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func,
0x74);
dword &= ~(0x0f<<12);
dword |= ((port & 0x0f)<<12);
write_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func, 0x74,
dword);
dbgp_printk("set debug port to %d\n", port);
}
static void __init detect_set_debug_port(void)
{
u32 vendorid;
vendorid = read_pci_config(ehci_dev.bus, ehci_dev.slot, ehci_dev.func,
0x00);
if ((vendorid & 0xffff) == 0x10de) {
dbgp_printk("using nvidia set_debug_port\n");
set_debug_port = nvidia_set_debug_port;
}
}
/* The code in early_ehci_bios_handoff() is derived from the usb pci
* quirk initialization, but altered so as to use the early PCI
* routines. */
#define EHCI_USBLEGSUP_BIOS (1 << 16) /* BIOS semaphore */
#define EHCI_USBLEGCTLSTS 4 /* legacy control/status */
static void __init early_ehci_bios_handoff(void)
{
u32 hcc_params = readl(&ehci_caps->hcc_params);
int offset = (hcc_params >> 8) & 0xff;
u32 cap;
int msec;
if (!offset)
return;
cap = read_pci_config(ehci_dev.bus, ehci_dev.slot,
ehci_dev.func, offset);
dbgp_printk("dbgp: ehci BIOS state %08x\n", cap);
if ((cap & 0xff) == 1 && (cap & EHCI_USBLEGSUP_BIOS)) {
dbgp_printk("dbgp: BIOS handoff\n");
write_pci_config_byte(ehci_dev.bus, ehci_dev.slot,
ehci_dev.func, offset + 3, 1);
}
/* if boot firmware now owns EHCI, spin till it hands it over. */
msec = 1000;
while ((cap & EHCI_USBLEGSUP_BIOS) && (msec > 0)) {
mdelay(10);
msec -= 10;
cap = read_pci_config(ehci_dev.bus, ehci_dev.slot,
ehci_dev.func, offset);
}
if (cap & EHCI_USBLEGSUP_BIOS) {
/* well, possibly buggy BIOS... try to shut it down,
* and hope nothing goes too wrong */
dbgp_printk("dbgp: BIOS handoff failed: %08x\n", cap);
write_pci_config_byte(ehci_dev.bus, ehci_dev.slot,
ehci_dev.func, offset + 2, 0);
}
/* just in case, always disable EHCI SMIs */
write_pci_config_byte(ehci_dev.bus, ehci_dev.slot, ehci_dev.func,
offset + EHCI_USBLEGCTLSTS, 0);
}
static int __init ehci_setup(void)
{
u32 ctrl, portsc, hcs_params;
u32 debug_port, new_debug_port = 0, n_ports;
int ret, i;
int port_map_tried;
int playtimes = 3;
early_ehci_bios_handoff();
try_next_time:
port_map_tried = 0;
try_next_port:
hcs_params = readl(&ehci_caps->hcs_params);
debug_port = HCS_DEBUG_PORT(hcs_params);
dbgp_phys_port = debug_port;
n_ports = HCS_N_PORTS(hcs_params);
dbgp_printk("debug_port: %d\n", debug_port);
dbgp_printk("n_ports: %d\n", n_ports);
dbgp_ehci_status("");
for (i = 1; i <= n_ports; i++) {
portsc = readl(&ehci_regs->port_status[i-1]);
dbgp_printk("portstatus%d: %08x\n", i, portsc);
}
if (port_map_tried && (new_debug_port != debug_port)) {
if (--playtimes) {
set_debug_port(new_debug_port);
goto try_next_time;
}
return -1;
}
/* Only reset the controller if it is not already in the
* configured state */
if (!(readl(&ehci_regs->configured_flag) & FLAG_CF)) {
if (dbgp_ehci_controller_reset() != 0)
return -1;
} else {
dbgp_ehci_status("ehci skip - already configured");
}
ret = dbgp_external_startup();
if (ret == -EIO)
goto next_debug_port;
if (ret < 0) {
/* Things didn't work so remove my claim */
ctrl = readl(&ehci_debug->control);
ctrl &= ~(DBGP_CLAIM | DBGP_OUT);
writel(ctrl, &ehci_debug->control);
return -1;
}
return 0;
next_debug_port:
port_map_tried |= (1<<(debug_port - 1));
new_debug_port = ((debug_port-1+1)%n_ports) + 1;
if (port_map_tried != ((1<<n_ports) - 1)) {
set_debug_port(new_debug_port);
goto try_next_port;
}
if (--playtimes) {
set_debug_port(new_debug_port);
goto try_next_time;
}
return -1;
}
int __init early_dbgp_init(char *s)
{
u32 debug_port, bar, offset;
u32 bus, slot, func, cap;
void __iomem *ehci_bar;
u32 dbgp_num;
u32 bar_val;
char *e;
int ret;
u8 byte;
if (!early_pci_allowed())
return -1;
dbgp_num = 0;
if (*s)
dbgp_num = simple_strtoul(s, &e, 10);
dbgp_printk("dbgp_num: %d\n", dbgp_num);
cap = find_dbgp(dbgp_num, &bus, &slot, &func);
if (!cap)
return -1;
dbgp_printk("Found EHCI debug port on %02x:%02x.%1x\n", bus, slot,
func);
debug_port = read_pci_config(bus, slot, func, cap);
bar = (debug_port >> 29) & 0x7;
bar = (bar * 4) + 0xc;
offset = (debug_port >> 16) & 0xfff;
dbgp_printk("bar: %02x offset: %03x\n", bar, offset);
if (bar != PCI_BASE_ADDRESS_0) {
dbgp_printk("only debug ports on bar 1 handled.\n");
return -1;
}
bar_val = read_pci_config(bus, slot, func, PCI_BASE_ADDRESS_0);
dbgp_printk("bar_val: %02x offset: %03x\n", bar_val, offset);
if (bar_val & ~PCI_BASE_ADDRESS_MEM_MASK) {
dbgp_printk("only simple 32bit mmio bars supported\n");
return -1;
}
/* double check if the mem space is enabled */
byte = read_pci_config_byte(bus, slot, func, 0x04);
if (!(byte & 0x2)) {
byte |= 0x02;
write_pci_config_byte(bus, slot, func, 0x04, byte);
dbgp_printk("mmio for ehci enabled\n");
}
/*
* FIXME I don't have the bar size so just guess PAGE_SIZE is more
* than enough. 1K is the biggest I have seen.
*/
set_fixmap_nocache(FIX_DBGP_BASE, bar_val & PAGE_MASK);
ehci_bar = (void __iomem *)__fix_to_virt(FIX_DBGP_BASE);
ehci_bar += bar_val & ~PAGE_MASK;
dbgp_printk("ehci_bar: %p\n", ehci_bar);
ehci_caps = ehci_bar;
ehci_regs = ehci_bar + HC_LENGTH(readl(&ehci_caps->hc_capbase));
ehci_debug = ehci_bar + offset;
ehci_dev.bus = bus;
ehci_dev.slot = slot;
ehci_dev.func = func;
detect_set_debug_port();
ret = ehci_setup();
if (ret < 0) {
dbgp_printk("ehci_setup failed\n");
ehci_debug = NULL;
return -1;
}
dbgp_ehci_status("early_init_complete");
return 0;
}
static void early_dbgp_write(struct console *con, const char *str, u32 n)
{
int chunk, ret;
char buf[DBGP_MAX_PACKET];
int use_cr = 0;
u32 cmd, ctrl;
int reset_run = 0;
if (!ehci_debug || dbgp_not_safe)
return;
cmd = readl(&ehci_regs->command);
if (unlikely(!(cmd & CMD_RUN))) {
/* If the ehci controller is not in the run state do extended
* checks to see if the acpi or some other initialization also
* reset the ehci debug port */
ctrl = readl(&ehci_debug->control);
if (!(ctrl & DBGP_ENABLED)) {
dbgp_not_safe = 1;
dbgp_external_startup();
} else {
cmd |= CMD_RUN;
writel(cmd, &ehci_regs->command);
reset_run = 1;
}
}
while (n > 0) {
for (chunk = 0; chunk < DBGP_MAX_PACKET && n > 0;
str++, chunk++, n--) {
if (!use_cr && *str == '\n') {
use_cr = 1;
buf[chunk] = '\r';
str--;
n++;
continue;
}
if (use_cr)
use_cr = 0;
buf[chunk] = *str;
}
if (chunk > 0) {
ret = dbgp_bulk_write(USB_DEBUG_DEVNUM,
dbgp_endpoint_out, buf, chunk);
}
}
if (unlikely(reset_run)) {
cmd = readl(&ehci_regs->command);
cmd &= ~CMD_RUN;
writel(cmd, &ehci_regs->command);
}
}
struct console early_dbgp_console = {
.name = "earlydbg",
.write = early_dbgp_write,
.flags = CON_PRINTBUFFER,
.index = -1,
};
int dbgp_reset_prep(void)
{
u32 ctrl;
dbgp_not_safe = 1;
if (!ehci_debug)
return 0;
if (early_dbgp_console.index != -1 &&
!(early_dbgp_console.flags & CON_BOOT))
return 1;
/* This means the console is not initialized, or should get
* shutdown so as to allow for reuse of the usb device, which
* means it is time to shutdown the usb debug port. */
ctrl = readl(&ehci_debug->control);
if (ctrl & DBGP_ENABLED) {
ctrl &= ~(DBGP_CLAIM);
writel(ctrl, &ehci_debug->control);
}
return 0;
}
EXPORT_SYMBOL_GPL(dbgp_reset_prep);

View File

@ -124,7 +124,7 @@ choice
config USB_GADGET_AT91
boolean "Atmel AT91 USB Device Port"
depends on ARCH_AT91 && !ARCH_AT91SAM9RL && !ARCH_AT91CAP9
depends on ARCH_AT91 && !ARCH_AT91SAM9RL && !ARCH_AT91CAP9 && !ARCH_AT91SAM9G45
select USB_GADGET_SELECTED
help
Many Atmel AT91 processors (such as the AT91RM2000) have a
@ -143,7 +143,7 @@ config USB_AT91
config USB_GADGET_ATMEL_USBA
boolean "Atmel USBA"
select USB_GADGET_DUALSPEED
depends on AVR32 || ARCH_AT91CAP9 || ARCH_AT91SAM9RL
depends on AVR32 || ARCH_AT91CAP9 || ARCH_AT91SAM9RL || ARCH_AT91SAM9G45
help
USBA is the integrated high-speed USB Device controller on
the AT32AP700x, some AT91SAM9 and AT91CAP9 processors from Atmel.
@ -627,9 +627,10 @@ config USB_AUDIO
config USB_ETH
tristate "Ethernet Gadget (with CDC Ethernet support)"
depends on NET
select CRC32
help
This driver implements Ethernet style communication, in either
of two ways:
This driver implements Ethernet style communication, in one of
several ways:
- The "Communication Device Class" (CDC) Ethernet Control Model.
That protocol is often avoided with pure Ethernet adapters, in
@ -639,7 +640,11 @@ config USB_ETH
- On hardware can't implement that protocol, a simple CDC subset
is used, placing fewer demands on USB.
RNDIS support is a third option, more demanding than that subset.
- CDC Ethernet Emulation Model (EEM) is a newer standard that has
a simpler interface that can be used by more USB hardware.
RNDIS support is an additional option, more demanding than than
subset.
Within the USB device, this gadget driver exposes a network device
"usbX", where X depends on what other networking devices you have.
@ -672,6 +677,22 @@ config USB_ETH_RNDIS
XP, you'll need to download drivers from Microsoft's website; a URL
is given in comments found in that info file.
config USB_ETH_EEM
bool "Ethernet Emulation Model (EEM) support"
depends on USB_ETH
default n
help
CDC EEM is a newer USB standard that is somewhat simpler than CDC ECM
and therefore can be supported by more hardware. Technically ECM and
EEM are designed for different applications. The ECM model extends
the network interface to the target (e.g. a USB cable modem), and the
EEM model is for mobile devices to communicate with hosts using
ethernet over USB. For Linux gadgets, however, the interface with
the host is the same (a usbX device), so the differences are minimal.
If you say "y" here, the Ethernet gadget driver will use the EEM
protocol rather than ECM. If unsure, say "n".
config USB_GADGETFS
tristate "Gadget Filesystem (EXPERIMENTAL)"
depends on EXPERIMENTAL

View File

@ -2378,40 +2378,34 @@ static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
if (!ep->cancel_transfer && !list_empty(&ep->queue)) {
req = list_entry(ep->queue.next,
struct udc_request, queue);
if (req) {
/*
* length bytes transfered
* check dma done of last desc. in PPBDU mode
*/
if (use_dma_ppb_du) {
td = udc_get_last_dma_desc(req);
if (td) {
dma_done =
AMD_GETBITS(td->status,
UDC_DMA_IN_STS_BS);
/* don't care DMA done */
req->req.actual =
req->req.length;
}
} else {
/* assume all bytes transferred */
/*
* length bytes transfered
* check dma done of last desc. in PPBDU mode
*/
if (use_dma_ppb_du) {
td = udc_get_last_dma_desc(req);
if (td) {
dma_done =
AMD_GETBITS(td->status,
UDC_DMA_IN_STS_BS);
/* don't care DMA done */
req->req.actual = req->req.length;
}
} else {
/* assume all bytes transferred */
req->req.actual = req->req.length;
}
if (req->req.actual == req->req.length) {
/* complete req */
complete_req(ep, req, 0);
req->dma_going = 0;
/* further request available ? */
if (list_empty(&ep->queue)) {
/* disable interrupt */
tmp = readl(
&dev->regs->ep_irqmsk);
tmp |= AMD_BIT(ep->num);
writel(tmp,
&dev->regs->ep_irqmsk);
}
if (req->req.actual == req->req.length) {
/* complete req */
complete_req(ep, req, 0);
req->dma_going = 0;
/* further request available ? */
if (list_empty(&ep->queue)) {
/* disable interrupt */
tmp = readl(&dev->regs->ep_irqmsk);
tmp |= AMD_BIT(ep->num);
writel(tmp, &dev->regs->ep_irqmsk);
}
}
}

View File

@ -1754,7 +1754,6 @@ static int __init at91udc_probe(struct platform_device *pdev)
IRQF_DISABLED, driver_name, udc)) {
DBG("request vbus irq %d failed\n",
udc->board.vbus_pin);
free_irq(udc->udp_irq, udc);
retval = -EBUSY;
goto fail3;
}

View File

@ -106,20 +106,20 @@ static int audio_set_endpoint_req(struct usb_configuration *c,
ctrl->bRequest, w_value, len, ep);
switch (ctrl->bRequest) {
case SET_CUR:
case UAC_SET_CUR:
value = 0;
break;
case SET_MIN:
case UAC_SET_MIN:
break;
case SET_MAX:
case UAC_SET_MAX:
break;
case SET_RES:
case UAC_SET_RES:
break;
case SET_MEM:
case UAC_SET_MEM:
break;
default:
@ -142,13 +142,13 @@ static int audio_get_endpoint_req(struct usb_configuration *c,
ctrl->bRequest, w_value, len, ep);
switch (ctrl->bRequest) {
case GET_CUR:
case GET_MIN:
case GET_MAX:
case GET_RES:
case UAC_GET_CUR:
case UAC_GET_MIN:
case UAC_GET_MAX:
case UAC_GET_RES:
value = 3;
break;
case GET_MEM:
case UAC_GET_MEM:
break;
default:
break;
@ -171,11 +171,11 @@ audio_setup(struct usb_configuration *c, const struct usb_ctrlrequest *ctrl)
* Audio class messages; interface activation uses set_alt().
*/
switch (ctrl->bRequestType) {
case USB_AUDIO_SET_ENDPOINT:
case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
value = audio_set_endpoint_req(c, ctrl);
break;
case USB_AUDIO_GET_ENDPOINT:
case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_ENDPOINT:
value = audio_get_endpoint_req(c, ctrl);
break;

View File

@ -602,7 +602,7 @@ static int get_string(struct usb_composite_dev *cdev,
}
}
for (len = 0; s->wData[len] && len <= 126; len++)
for (len = 0; len <= 126 && s->wData[len]; len++)
continue;
if (!len)
return -EINVAL;

View File

@ -1306,11 +1306,6 @@ restart:
setup = *(struct usb_ctrlrequest*) urb->setup_packet;
w_index = le16_to_cpu(setup.wIndex);
w_value = le16_to_cpu(setup.wValue);
if (le16_to_cpu(setup.wLength) !=
urb->transfer_buffer_length) {
status = -EOVERFLOW;
goto return_urb;
}
/* paranoia, in case of stale queued data */
list_for_each_entry (req, &ep->queue, queue) {

View File

@ -61,6 +61,11 @@
* simpler, Microsoft pushes their own approach: RNDIS. The published
* RNDIS specs are ambiguous and appear to be incomplete, and are also
* needlessly complex. They borrow more from CDC ACM than CDC ECM.
*
* While CDC ECM, CDC Subset, and RNDIS are designed to extend the ethernet
* interface to the target, CDC EEM was designed to use ethernet over the USB
* link between the host and target. CDC EEM is implemented as an alternative
* to those other protocols when that communication model is more appropriate
*/
#define DRIVER_DESC "Ethernet Gadget"
@ -114,6 +119,7 @@ static inline bool has_rndis(void)
#include "f_rndis.c"
#include "rndis.c"
#endif
#include "f_eem.c"
#include "u_ether.c"
/*-------------------------------------------------------------------------*/
@ -150,6 +156,10 @@ static inline bool has_rndis(void)
#define RNDIS_VENDOR_NUM 0x0525 /* NetChip */
#define RNDIS_PRODUCT_NUM 0xa4a2 /* Ethernet/RNDIS Gadget */
/* For EEM gadgets */
#define EEM_VENDOR_NUM 0x0525 /* INVALID - NEEDS TO BE ALLOCATED */
#define EEM_PRODUCT_NUM 0xa4a1 /* INVALID - NEEDS TO BE ALLOCATED */
/*-------------------------------------------------------------------------*/
static struct usb_device_descriptor device_desc = {
@ -246,8 +256,16 @@ static struct usb_configuration rndis_config_driver = {
/*-------------------------------------------------------------------------*/
#ifdef CONFIG_USB_ETH_EEM
static int use_eem = 1;
#else
static int use_eem;
#endif
module_param(use_eem, bool, 0);
MODULE_PARM_DESC(use_eem, "use CDC EEM mode");
/*
* We _always_ have an ECM or CDC Subset configuration.
* We _always_ have an ECM, CDC Subset, or EEM configuration.
*/
static int __init eth_do_config(struct usb_configuration *c)
{
@ -258,7 +276,9 @@ static int __init eth_do_config(struct usb_configuration *c)
c->bmAttributes |= USB_CONFIG_ATT_WAKEUP;
}
if (can_support_ecm(c->cdev->gadget))
if (use_eem)
return eem_bind_config(c);
else if (can_support_ecm(c->cdev->gadget))
return ecm_bind_config(c, hostaddr);
else
return geth_bind_config(c, hostaddr);
@ -286,7 +306,12 @@ static int __init eth_bind(struct usb_composite_dev *cdev)
return status;
/* set up main config label and device descriptor */
if (can_support_ecm(cdev->gadget)) {
if (use_eem) {
/* EEM */
eth_config_driver.label = "CDC Ethernet (EEM)";
device_desc.idVendor = cpu_to_le16(EEM_VENDOR_NUM);
device_desc.idProduct = cpu_to_le16(EEM_PRODUCT_NUM);
} else if (can_support_ecm(cdev->gadget)) {
/* ECM */
eth_config_driver.label = "CDC Ethernet (ECM)";
} else {

View File

@ -28,6 +28,9 @@ static int audio_buf_size = 48000;
module_param(audio_buf_size, int, S_IRUGO);
MODULE_PARM_DESC(audio_buf_size, "Audio buffer size");
static int generic_set_cmd(struct usb_audio_control *con, u8 cmd, int value);
static int generic_get_cmd(struct usb_audio_control *con, u8 cmd);
/*
* DESCRIPTORS ... most are static, but strings and full
* configuration descriptors are built on demand.
@ -50,16 +53,16 @@ static struct usb_interface_descriptor ac_interface_desc __initdata = {
.bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL,
};
DECLARE_USB_AC_HEADER_DESCRIPTOR(2);
DECLARE_UAC_AC_HEADER_DESCRIPTOR(2);
#define USB_DT_AC_HEADER_LENGH USB_DT_AC_HEADER_SIZE(F_AUDIO_NUM_INTERFACES)
#define UAC_DT_AC_HEADER_LENGTH UAC_DT_AC_HEADER_SIZE(F_AUDIO_NUM_INTERFACES)
/* B.3.2 Class-Specific AC Interface Descriptor */
static struct usb_ac_header_descriptor_2 ac_header_desc = {
.bLength = USB_DT_AC_HEADER_LENGH,
static struct uac_ac_header_descriptor_2 ac_header_desc = {
.bLength = UAC_DT_AC_HEADER_LENGTH,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = HEADER,
.bDescriptorSubtype = UAC_HEADER,
.bcdADC = __constant_cpu_to_le16(0x0100),
.wTotalLength = __constant_cpu_to_le16(USB_DT_AC_HEADER_LENGH),
.wTotalLength = __constant_cpu_to_le16(UAC_DT_AC_HEADER_LENGTH),
.bInCollection = F_AUDIO_NUM_INTERFACES,
.baInterfaceNr = {
[0] = F_AUDIO_AC_INTERFACE,
@ -68,33 +71,33 @@ static struct usb_ac_header_descriptor_2 ac_header_desc = {
};
#define INPUT_TERMINAL_ID 1
static struct usb_input_terminal_descriptor input_terminal_desc = {
.bLength = USB_DT_AC_INPUT_TERMINAL_SIZE,
static struct uac_input_terminal_descriptor input_terminal_desc = {
.bLength = UAC_DT_INPUT_TERMINAL_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = INPUT_TERMINAL,
.bDescriptorSubtype = UAC_INPUT_TERMINAL,
.bTerminalID = INPUT_TERMINAL_ID,
.wTerminalType = USB_AC_TERMINAL_STREAMING,
.wTerminalType = UAC_TERMINAL_STREAMING,
.bAssocTerminal = 0,
.wChannelConfig = 0x3,
};
DECLARE_USB_AC_FEATURE_UNIT_DESCRIPTOR(0);
DECLARE_UAC_FEATURE_UNIT_DESCRIPTOR(0);
#define FEATURE_UNIT_ID 2
static struct usb_ac_feature_unit_descriptor_0 feature_unit_desc = {
.bLength = USB_DT_AC_FEATURE_UNIT_SIZE(0),
static struct uac_feature_unit_descriptor_0 feature_unit_desc = {
.bLength = UAC_DT_FEATURE_UNIT_SIZE(0),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = FEATURE_UNIT,
.bDescriptorSubtype = UAC_FEATURE_UNIT,
.bUnitID = FEATURE_UNIT_ID,
.bSourceID = INPUT_TERMINAL_ID,
.bControlSize = 2,
.bmaControls[0] = (FU_MUTE | FU_VOLUME),
.bmaControls[0] = (UAC_FU_MUTE | UAC_FU_VOLUME),
};
static struct usb_audio_control mute_control = {
.list = LIST_HEAD_INIT(mute_control.list),
.name = "Mute Control",
.type = MUTE_CONTROL,
.type = UAC_MUTE_CONTROL,
/* Todo: add real Mute control code */
.set = generic_set_cmd,
.get = generic_get_cmd,
@ -103,7 +106,7 @@ static struct usb_audio_control mute_control = {
static struct usb_audio_control volume_control = {
.list = LIST_HEAD_INIT(volume_control.list),
.name = "Volume Control",
.type = VOLUME_CONTROL,
.type = UAC_VOLUME_CONTROL,
/* Todo: add real Volume control code */
.set = generic_set_cmd,
.get = generic_get_cmd,
@ -113,17 +116,17 @@ static struct usb_audio_control_selector feature_unit = {
.list = LIST_HEAD_INIT(feature_unit.list),
.id = FEATURE_UNIT_ID,
.name = "Mute & Volume Control",
.type = FEATURE_UNIT,
.type = UAC_FEATURE_UNIT,
.desc = (struct usb_descriptor_header *)&feature_unit_desc,
};
#define OUTPUT_TERMINAL_ID 3
static struct usb_output_terminal_descriptor output_terminal_desc = {
.bLength = USB_DT_AC_OUTPUT_TERMINAL_SIZE,
static struct uac_output_terminal_descriptor output_terminal_desc = {
.bLength = UAC_DT_OUTPUT_TERMINAL_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = OUTPUT_TERMINAL,
.bDescriptorSubtype = UAC_OUTPUT_TERMINAL,
.bTerminalID = OUTPUT_TERMINAL_ID,
.wTerminalType = USB_AC_OUTPUT_TERMINAL_SPEAKER,
.wTerminalType = UAC_OUTPUT_TERMINAL_SPEAKER,
.bAssocTerminal = FEATURE_UNIT_ID,
.bSourceID = FEATURE_UNIT_ID,
};
@ -148,22 +151,22 @@ static struct usb_interface_descriptor as_interface_alt_1_desc = {
};
/* B.4.2 Class-Specific AS Interface Descriptor */
static struct usb_as_header_descriptor as_header_desc = {
.bLength = USB_DT_AS_HEADER_SIZE,
static struct uac_as_header_descriptor as_header_desc = {
.bLength = UAC_DT_AS_HEADER_SIZE,
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = AS_GENERAL,
.bDescriptorSubtype = UAC_AS_GENERAL,
.bTerminalLink = INPUT_TERMINAL_ID,
.bDelay = 1,
.wFormatTag = USB_AS_AUDIO_FORMAT_TYPE_I_PCM,
.wFormatTag = UAC_FORMAT_TYPE_I_PCM,
};
DECLARE_USB_AS_FORMAT_TYPE_I_DISCRETE_DESC(1);
DECLARE_UAC_FORMAT_TYPE_I_DISCRETE_DESC(1);
static struct usb_as_formate_type_i_discrete_descriptor_1 as_type_i_desc = {
.bLength = USB_AS_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1),
static struct uac_format_type_i_discrete_descriptor_1 as_type_i_desc = {
.bLength = UAC_FORMAT_TYPE_I_DISCRETE_DESC_SIZE(1),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = FORMAT_TYPE,
.bFormatType = USB_AS_FORMAT_TYPE_I,
.bDescriptorSubtype = UAC_FORMAT_TYPE,
.bFormatType = UAC_FORMAT_TYPE_I,
.bSubframeSize = 2,
.bBitResolution = 16,
.bSamFreqType = 1,
@ -174,17 +177,17 @@ static struct usb_endpoint_descriptor as_out_ep_desc __initdata = {
.bLength = USB_DT_ENDPOINT_AUDIO_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_AS_ENDPOINT_ADAPTIVE
.bmAttributes = USB_ENDPOINT_SYNC_ADAPTIVE
| USB_ENDPOINT_XFER_ISOC,
.wMaxPacketSize = __constant_cpu_to_le16(OUT_EP_MAX_PACKET_SIZE),
.bInterval = 4,
};
/* Class-specific AS ISO OUT Endpoint Descriptor */
static struct usb_as_iso_endpoint_descriptor as_iso_out_desc __initdata = {
.bLength = USB_AS_ISO_ENDPOINT_DESC_SIZE,
static struct uac_iso_endpoint_descriptor as_iso_out_desc __initdata = {
.bLength = UAC_ISO_ENDPOINT_DESC_SIZE,
.bDescriptorType = USB_DT_CS_ENDPOINT,
.bDescriptorSubtype = EP_GENERAL,
.bDescriptorSubtype = UAC_EP_GENERAL,
.bmAttributes = 1,
.bLockDelayUnits = 1,
.wLockDelay = __constant_cpu_to_le16(1),
@ -456,11 +459,11 @@ f_audio_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
* Audio class messages; interface activation uses set_alt().
*/
switch (ctrl->bRequestType) {
case USB_AUDIO_SET_INTF:
case USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE:
value = audio_set_intf_req(f, ctrl);
break;
case USB_AUDIO_GET_INTF:
case USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE:
value = audio_get_intf_req(f, ctrl);
break;
@ -632,6 +635,18 @@ f_audio_unbind(struct usb_configuration *c, struct usb_function *f)
/*-------------------------------------------------------------------------*/
static int generic_set_cmd(struct usb_audio_control *con, u8 cmd, int value)
{
con->data[cmd] = value;
return 0;
}
static int generic_get_cmd(struct usb_audio_control *con, u8 cmd)
{
return con->data[cmd];
}
/* Todo: add more control selecotor dynamically */
int __init control_selector_init(struct f_audio *audio)
{
@ -642,10 +657,10 @@ int __init control_selector_init(struct f_audio *audio)
list_add(&mute_control.list, &feature_unit.control);
list_add(&volume_control.list, &feature_unit.control);
volume_control.data[_CUR] = 0xffc0;
volume_control.data[_MIN] = 0xe3a0;
volume_control.data[_MAX] = 0xfff0;
volume_control.data[_RES] = 0x0030;
volume_control.data[UAC__CUR] = 0xffc0;
volume_control.data[UAC__MIN] = 0xe3a0;
volume_control.data[UAC__MAX] = 0xfff0;
volume_control.data[UAC__RES] = 0x0030;
return 0;
}

562
drivers/usb/gadget/f_eem.c Normal file
View File

@ -0,0 +1,562 @@
/*
* f_eem.c -- USB CDC Ethernet (EEM) link function driver
*
* Copyright (C) 2003-2005,2008 David Brownell
* Copyright (C) 2008 Nokia Corporation
* Copyright (C) 2009 EF Johnson Technologies
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/etherdevice.h>
#include <linux/crc32.h>
#include "u_ether.h"
#define EEM_HLEN 2
/*
* This function is a "CDC Ethernet Emulation Model" (CDC EEM)
* Ethernet link.
*/
struct eem_ep_descs {
struct usb_endpoint_descriptor *in;
struct usb_endpoint_descriptor *out;
};
struct f_eem {
struct gether port;
u8 ctrl_id;
struct eem_ep_descs fs;
struct eem_ep_descs hs;
};
static inline struct f_eem *func_to_eem(struct usb_function *f)
{
return container_of(f, struct f_eem, port.func);
}
/*-------------------------------------------------------------------------*/
/* interface descriptor: */
static struct usb_interface_descriptor eem_intf __initdata = {
.bLength = sizeof eem_intf,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC */
.bNumEndpoints = 2,
.bInterfaceClass = USB_CLASS_COMM,
.bInterfaceSubClass = USB_CDC_SUBCLASS_EEM,
.bInterfaceProtocol = USB_CDC_PROTO_EEM,
/* .iInterface = DYNAMIC */
};
/* full speed support: */
static struct usb_endpoint_descriptor eem_fs_in_desc __initdata = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_endpoint_descriptor eem_fs_out_desc __initdata = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_descriptor_header *eem_fs_function[] __initdata = {
/* CDC EEM control descriptors */
(struct usb_descriptor_header *) &eem_intf,
(struct usb_descriptor_header *) &eem_fs_in_desc,
(struct usb_descriptor_header *) &eem_fs_out_desc,
NULL,
};
/* high speed support: */
static struct usb_endpoint_descriptor eem_hs_in_desc __initdata = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_endpoint_descriptor eem_hs_out_desc __initdata = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_descriptor_header *eem_hs_function[] __initdata = {
/* CDC EEM control descriptors */
(struct usb_descriptor_header *) &eem_intf,
(struct usb_descriptor_header *) &eem_hs_in_desc,
(struct usb_descriptor_header *) &eem_hs_out_desc,
NULL,
};
/* string descriptors: */
static struct usb_string eem_string_defs[] = {
[0].s = "CDC Ethernet Emulation Model (EEM)",
{ } /* end of list */
};
static struct usb_gadget_strings eem_string_table = {
.language = 0x0409, /* en-us */
.strings = eem_string_defs,
};
static struct usb_gadget_strings *eem_strings[] = {
&eem_string_table,
NULL,
};
/*-------------------------------------------------------------------------*/
static int eem_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
{
struct usb_composite_dev *cdev = f->config->cdev;
int value = -EOPNOTSUPP;
u16 w_index = le16_to_cpu(ctrl->wIndex);
u16 w_value = le16_to_cpu(ctrl->wValue);
u16 w_length = le16_to_cpu(ctrl->wLength);
DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
ctrl->bRequestType, ctrl->bRequest,
w_value, w_index, w_length);
/* device either stalls (value < 0) or reports success */
return value;
}
static int eem_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct f_eem *eem = func_to_eem(f);
struct usb_composite_dev *cdev = f->config->cdev;
struct net_device *net;
/* we know alt == 0, so this is an activation or a reset */
if (alt != 0)
goto fail;
if (intf == eem->ctrl_id) {
if (eem->port.in_ep->driver_data) {
DBG(cdev, "reset eem\n");
gether_disconnect(&eem->port);
}
if (!eem->port.in) {
DBG(cdev, "init eem\n");
eem->port.in = ep_choose(cdev->gadget,
eem->hs.in, eem->fs.in);
eem->port.out = ep_choose(cdev->gadget,
eem->hs.out, eem->fs.out);
}
/* zlps should not occur because zero-length EEM packets
* will be inserted in those cases where they would occur
*/
eem->port.is_zlp_ok = 1;
eem->port.cdc_filter = DEFAULT_FILTER;
DBG(cdev, "activate eem\n");
net = gether_connect(&eem->port);
if (IS_ERR(net))
return PTR_ERR(net);
} else
goto fail;
return 0;
fail:
return -EINVAL;
}
static void eem_disable(struct usb_function *f)
{
struct f_eem *eem = func_to_eem(f);
struct usb_composite_dev *cdev = f->config->cdev;
DBG(cdev, "eem deactivated\n");
if (eem->port.in_ep->driver_data)
gether_disconnect(&eem->port);
}
/*-------------------------------------------------------------------------*/
/* EEM function driver setup/binding */
static int __init
eem_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
struct f_eem *eem = func_to_eem(f);
int status;
struct usb_ep *ep;
/* allocate instance-specific interface IDs */
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
eem->ctrl_id = status;
eem_intf.bInterfaceNumber = status;
status = -ENODEV;
/* allocate instance-specific endpoints */
ep = usb_ep_autoconfig(cdev->gadget, &eem_fs_in_desc);
if (!ep)
goto fail;
eem->port.in_ep = ep;
ep->driver_data = cdev; /* claim */
ep = usb_ep_autoconfig(cdev->gadget, &eem_fs_out_desc);
if (!ep)
goto fail;
eem->port.out_ep = ep;
ep->driver_data = cdev; /* claim */
status = -ENOMEM;
/* copy descriptors, and track endpoint copies */
f->descriptors = usb_copy_descriptors(eem_fs_function);
if (!f->descriptors)
goto fail;
eem->fs.in = usb_find_endpoint(eem_fs_function,
f->descriptors, &eem_fs_in_desc);
eem->fs.out = usb_find_endpoint(eem_fs_function,
f->descriptors, &eem_fs_out_desc);
/* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
* both speeds
*/
if (gadget_is_dualspeed(c->cdev->gadget)) {
eem_hs_in_desc.bEndpointAddress =
eem_fs_in_desc.bEndpointAddress;
eem_hs_out_desc.bEndpointAddress =
eem_fs_out_desc.bEndpointAddress;
/* copy descriptors, and track endpoint copies */
f->hs_descriptors = usb_copy_descriptors(eem_hs_function);
if (!f->hs_descriptors)
goto fail;
eem->hs.in = usb_find_endpoint(eem_hs_function,
f->hs_descriptors, &eem_hs_in_desc);
eem->hs.out = usb_find_endpoint(eem_hs_function,
f->hs_descriptors, &eem_hs_out_desc);
}
DBG(cdev, "CDC Ethernet (EEM): %s speed IN/%s OUT/%s\n",
gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
eem->port.in_ep->name, eem->port.out_ep->name);
return 0;
fail:
if (f->descriptors)
usb_free_descriptors(f->descriptors);
/* we might as well release our claims on endpoints */
if (eem->port.out)
eem->port.out_ep->driver_data = NULL;
if (eem->port.in)
eem->port.in_ep->driver_data = NULL;
ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
return status;
}
static void
eem_unbind(struct usb_configuration *c, struct usb_function *f)
{
struct f_eem *eem = func_to_eem(f);
DBG(c->cdev, "eem unbind\n");
if (gadget_is_dualspeed(c->cdev->gadget))
usb_free_descriptors(f->hs_descriptors);
usb_free_descriptors(f->descriptors);
kfree(eem);
}
static void eem_cmd_complete(struct usb_ep *ep, struct usb_request *req)
{
}
/*
* Add the EEM header and ethernet checksum.
* We currently do not attempt to put multiple ethernet frames
* into a single USB transfer
*/
static struct sk_buff *eem_wrap(struct gether *port, struct sk_buff *skb)
{
struct sk_buff *skb2 = NULL;
struct usb_ep *in = port->in_ep;
int padlen = 0;
u16 len = skb->len;
if (!skb_cloned(skb)) {
int headroom = skb_headroom(skb);
int tailroom = skb_tailroom(skb);
/* When (len + EEM_HLEN + ETH_FCS_LEN) % in->maxpacket) is 0,
* stick two bytes of zero-length EEM packet on the end.
*/
if (((len + EEM_HLEN + ETH_FCS_LEN) % in->maxpacket) == 0)
padlen += 2;
if ((tailroom >= (ETH_FCS_LEN + padlen)) &&
(headroom >= EEM_HLEN))
goto done;
}
skb2 = skb_copy_expand(skb, EEM_HLEN, ETH_FCS_LEN + padlen, GFP_ATOMIC);
dev_kfree_skb_any(skb);
skb = skb2;
if (!skb)
return skb;
done:
/* use the "no CRC" option */
put_unaligned_be32(0xdeadbeef, skb_put(skb, 4));
/* EEM packet header format:
* b0..13: length of ethernet frame
* b14: bmCRC (0 == sentinel CRC)
* b15: bmType (0 == data)
*/
len = skb->len;
put_unaligned_le16((len & 0x3FFF) | BIT(14), skb_push(skb, 2));
/* add a zero-length EEM packet, if needed */
if (padlen)
put_unaligned_le16(0, skb_put(skb, 2));
return skb;
}
/*
* Remove the EEM header. Note that there can be many EEM packets in a single
* USB transfer, so we need to break them out and handle them independently.
*/
static int eem_unwrap(struct gether *port,
struct sk_buff *skb,
struct sk_buff_head *list)
{
struct usb_composite_dev *cdev = port->func.config->cdev;
int status = 0;
do {
struct sk_buff *skb2;
u16 header;
u16 len = 0;
if (skb->len < EEM_HLEN) {
status = -EINVAL;
DBG(cdev, "invalid EEM header\n");
goto error;
}
/* remove the EEM header */
header = get_unaligned_le16(skb->data);
skb_pull(skb, EEM_HLEN);
/* EEM packet header format:
* b0..14: EEM type dependent (data or command)
* b15: bmType (0 == data, 1 == command)
*/
if (header & BIT(15)) {
struct usb_request *req = cdev->req;
u16 bmEEMCmd;
/* EEM command packet format:
* b0..10: bmEEMCmdParam
* b11..13: bmEEMCmd
* b14: reserved (must be zero)
* b15: bmType (1 == command)
*/
if (header & BIT(14))
continue;
bmEEMCmd = (header >> 11) & 0x7;
switch (bmEEMCmd) {
case 0: /* echo */
len = header & 0x7FF;
if (skb->len < len) {
status = -EOVERFLOW;
goto error;
}
skb2 = skb_clone(skb, GFP_ATOMIC);
if (unlikely(!skb2)) {
DBG(cdev, "EEM echo response error\n");
goto next;
}
skb_trim(skb2, len);
put_unaligned_le16(BIT(15) | BIT(11) | len,
skb_push(skb2, 2));
skb_copy_bits(skb, 0, req->buf, skb->len);
req->length = skb->len;
req->complete = eem_cmd_complete;
req->zero = 1;
if (usb_ep_queue(port->in_ep, req, GFP_ATOMIC))
DBG(cdev, "echo response queue fail\n");
break;
case 1: /* echo response */
case 2: /* suspend hint */
case 3: /* response hint */
case 4: /* response complete hint */
case 5: /* tickle */
default: /* reserved */
continue;
}
} else {
u32 crc, crc2;
struct sk_buff *skb3;
/* check for zero-length EEM packet */
if (header == 0)
continue;
/* EEM data packet format:
* b0..13: length of ethernet frame
* b14: bmCRC (0 == sentinel, 1 == calculated)
* b15: bmType (0 == data)
*/
len = header & 0x3FFF;
if ((skb->len < len)
|| (len < (ETH_HLEN + ETH_FCS_LEN))) {
status = -EINVAL;
goto error;
}
/* validate CRC */
crc = get_unaligned_le32(skb->data + len - ETH_FCS_LEN);
if (header & BIT(14)) {
crc = get_unaligned_le32(skb->data + len
- ETH_FCS_LEN);
crc2 = ~crc32_le(~0,
skb->data,
skb->len - ETH_FCS_LEN);
} else {
crc = get_unaligned_be32(skb->data + len
- ETH_FCS_LEN);
crc2 = 0xdeadbeef;
}
if (crc != crc2) {
DBG(cdev, "invalid EEM CRC\n");
goto next;
}
skb2 = skb_clone(skb, GFP_ATOMIC);
if (unlikely(!skb2)) {
DBG(cdev, "unable to unframe EEM packet\n");
continue;
}
skb_trim(skb2, len - ETH_FCS_LEN);
skb3 = skb_copy_expand(skb2,
NET_IP_ALIGN,
0,
GFP_ATOMIC);
if (unlikely(!skb3)) {
DBG(cdev, "unable to realign EEM packet\n");
dev_kfree_skb_any(skb2);
continue;
}
dev_kfree_skb_any(skb2);
skb_queue_tail(list, skb3);
}
next:
skb_pull(skb, len);
} while (skb->len);
error:
dev_kfree_skb_any(skb);
return status;
}
/**
* eem_bind_config - add CDC Ethernet (EEM) network link to a configuration
* @c: the configuration to support the network link
* Context: single threaded during gadget setup
*
* Returns zero on success, else negative errno.
*
* Caller must have called @gether_setup(). Caller is also responsible
* for calling @gether_cleanup() before module unload.
*/
int __init eem_bind_config(struct usb_configuration *c)
{
struct f_eem *eem;
int status;
/* maybe allocate device-global string IDs */
if (eem_string_defs[0].id == 0) {
/* control interface label */
status = usb_string_id(c->cdev);
if (status < 0)
return status;
eem_string_defs[0].id = status;
eem_intf.iInterface = status;
}
/* allocate and initialize one new instance */
eem = kzalloc(sizeof *eem, GFP_KERNEL);
if (!eem)
return -ENOMEM;
eem->port.cdc_filter = DEFAULT_FILTER;
eem->port.func.name = "cdc_eem";
eem->port.func.strings = eem_strings;
/* descriptors are per-instance copies */
eem->port.func.bind = eem_bind;
eem->port.func.unbind = eem_unbind;
eem->port.func.set_alt = eem_set_alt;
eem->port.func.setup = eem_setup;
eem->port.func.disable = eem_disable;
eem->port.wrap = eem_wrap;
eem->port.unwrap = eem_unwrap;
eem->port.header_len = EEM_HLEN;
status = usb_add_function(c, &eem->port.func);
if (status)
kfree(eem);
return status;
}

View File

@ -286,12 +286,17 @@ static struct usb_gadget_strings *rndis_strings[] = {
/*-------------------------------------------------------------------------*/
static struct sk_buff *rndis_add_header(struct sk_buff *skb)
static struct sk_buff *rndis_add_header(struct gether *port,
struct sk_buff *skb)
{
skb = skb_realloc_headroom(skb, sizeof(struct rndis_packet_msg_type));
if (skb)
rndis_add_hdr(skb);
return skb;
struct sk_buff *skb2;
skb2 = skb_realloc_headroom(skb, sizeof(struct rndis_packet_msg_type));
if (skb2)
rndis_add_hdr(skb2);
dev_kfree_skb_any(skb);
return skb2;
}
static void rndis_response_available(void *_rndis)

View File

@ -2750,6 +2750,10 @@ static int __devexit qe_udc_remove(struct of_device *ofdev)
/*-------------------------------------------------------------------------*/
static struct of_device_id __devinitdata qe_udc_match[] = {
{
.compatible = "fsl,mpc8323-qe-usb",
.data = (void *)PORT_QE,
},
{
.compatible = "fsl,mpc8360-qe-usb",
.data = (void *)PORT_QE,

View File

@ -191,7 +191,7 @@ module_param(qlen, uint, S_IRUGO);
#define GMIDI_MS_INTERFACE 1
#define GMIDI_NUM_INTERFACES 2
DECLARE_USB_AC_HEADER_DESCRIPTOR(1);
DECLARE_UAC_AC_HEADER_DESCRIPTOR(1);
DECLARE_USB_MIDI_OUT_JACK_DESCRIPTOR(1);
DECLARE_USB_MS_ENDPOINT_DESCRIPTOR(1);
@ -237,12 +237,12 @@ static const struct usb_interface_descriptor ac_interface_desc = {
};
/* B.3.2 Class-Specific AC Interface Descriptor */
static const struct usb_ac_header_descriptor_1 ac_header_desc = {
.bLength = USB_DT_AC_HEADER_SIZE(1),
static const struct uac_ac_header_descriptor_1 ac_header_desc = {
.bLength = UAC_DT_AC_HEADER_SIZE(1),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = USB_MS_HEADER,
.bcdADC = cpu_to_le16(0x0100),
.wTotalLength = cpu_to_le16(USB_DT_AC_HEADER_SIZE(1)),
.wTotalLength = cpu_to_le16(UAC_DT_AC_HEADER_SIZE(1)),
.bInCollection = 1,
.baInterfaceNr = {
[0] = GMIDI_MS_INTERFACE,

View File

@ -56,6 +56,7 @@
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/otg.h>
/*
* This driver is PXA25x only. Grab the right register definitions.
@ -1008,15 +1009,27 @@ static int pxa25x_udc_pullup(struct usb_gadget *_gadget, int is_active)
return 0;
}
/* boards may consume current from VBUS, up to 100-500mA based on config.
* the 500uA suspend ceiling means that exclusively vbus-powered PXA designs
* violate USB specs.
*/
static int pxa25x_udc_vbus_draw(struct usb_gadget *_gadget, unsigned mA)
{
struct pxa25x_udc *udc;
udc = container_of(_gadget, struct pxa25x_udc, gadget);
if (udc->transceiver)
return otg_set_power(udc->transceiver, mA);
return -EOPNOTSUPP;
}
static const struct usb_gadget_ops pxa25x_udc_ops = {
.get_frame = pxa25x_udc_get_frame,
.wakeup = pxa25x_udc_wakeup,
.vbus_session = pxa25x_udc_vbus_session,
.pullup = pxa25x_udc_pullup,
// .vbus_draw ... boards may consume current from VBUS, up to
// 100-500mA based on config. the 500uA suspend ceiling means
// that exclusively vbus-powered PXA designs violate USB specs.
.vbus_draw = pxa25x_udc_vbus_draw,
};
/*-------------------------------------------------------------------------*/
@ -1303,9 +1316,23 @@ fail:
* for set_configuration as well as eventual disconnect.
*/
DMSG("registered gadget driver '%s'\n", driver->driver.name);
/* connect to bus through transceiver */
if (dev->transceiver) {
retval = otg_set_peripheral(dev->transceiver, &dev->gadget);
if (retval) {
DMSG("can't bind to transceiver\n");
if (driver->unbind)
driver->unbind(&dev->gadget);
goto bind_fail;
}
}
pullup(dev);
dump_state(dev);
return 0;
bind_fail:
return retval;
}
EXPORT_SYMBOL(usb_gadget_register_driver);
@ -1351,6 +1378,9 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
stop_activity(dev, driver);
local_irq_enable();
if (dev->transceiver)
(void) otg_set_peripheral(dev->transceiver, NULL);
driver->unbind(&dev->gadget);
dev->gadget.dev.driver = NULL;
dev->driver = NULL;
@ -2162,6 +2192,8 @@ static int __init pxa25x_udc_probe(struct platform_device *pdev)
dev->dev = &pdev->dev;
dev->mach = pdev->dev.platform_data;
dev->transceiver = otg_get_transceiver();
if (gpio_is_valid(dev->mach->gpio_vbus)) {
if ((retval = gpio_request(dev->mach->gpio_vbus,
"pxa25x_udc GPIO VBUS"))) {
@ -2264,6 +2296,10 @@ lubbock_fail0:
if (gpio_is_valid(dev->mach->gpio_vbus))
gpio_free(dev->mach->gpio_vbus);
err_gpio_vbus:
if (dev->transceiver) {
otg_put_transceiver(dev->transceiver);
dev->transceiver = NULL;
}
clk_put(dev->clk);
err_clk:
return retval;
@ -2305,6 +2341,11 @@ static int __exit pxa25x_udc_remove(struct platform_device *pdev)
clk_put(dev->clk);
if (dev->transceiver) {
otg_put_transceiver(dev->transceiver);
dev->transceiver = NULL;
}
platform_set_drvdata(pdev, NULL);
the_controller = NULL;
return 0;

View File

@ -128,6 +128,7 @@ struct pxa25x_udc {
struct device *dev;
struct clk *clk;
struct pxa2xx_udc_mach_info *mach;
struct otg_transceiver *transceiver;
u64 dma_mask;
struct pxa25x_ep ep [PXA_UDC_NUM_ENDPOINTS];

View File

@ -1022,22 +1022,29 @@ static rndis_resp_t *rndis_add_response (int configNr, u32 length)
return r;
}
int rndis_rm_hdr(struct sk_buff *skb)
int rndis_rm_hdr(struct gether *port,
struct sk_buff *skb,
struct sk_buff_head *list)
{
/* tmp points to a struct rndis_packet_msg_type */
__le32 *tmp = (void *) skb->data;
/* MessageType, MessageLength */
if (cpu_to_le32(REMOTE_NDIS_PACKET_MSG)
!= get_unaligned(tmp++))
!= get_unaligned(tmp++)) {
dev_kfree_skb_any(skb);
return -EINVAL;
}
tmp++;
/* DataOffset, DataLength */
if (!skb_pull(skb, get_unaligned_le32(tmp++) + 8))
if (!skb_pull(skb, get_unaligned_le32(tmp++) + 8)) {
dev_kfree_skb_any(skb);
return -EOVERFLOW;
}
skb_trim(skb, get_unaligned_le32(tmp++));
skb_queue_tail(list, skb);
return 0;
}

View File

@ -251,7 +251,8 @@ int rndis_set_param_vendor (u8 configNr, u32 vendorID,
const char *vendorDescr);
int rndis_set_param_medium (u8 configNr, u32 medium, u32 speed);
void rndis_add_hdr (struct sk_buff *skb);
int rndis_rm_hdr (struct sk_buff *skb);
int rndis_rm_hdr(struct gether *port, struct sk_buff *skb,
struct sk_buff_head *list);
u8 *rndis_get_next_response (int configNr, u32 *length);
void rndis_free_response (int configNr, u8 *buf);

View File

@ -2392,7 +2392,7 @@ static int s3c_hsotg_corereset(struct s3c_hsotg *hsotg)
grstctl = readl(hsotg->regs + S3C_GRSTCTL);
} while (!(grstctl & S3C_GRSTCTL_CSftRst) && timeout-- > 0);
if (!grstctl & S3C_GRSTCTL_CSftRst) {
if (!(grstctl & S3C_GRSTCTL_CSftRst)) {
dev_err(hsotg->dev, "Failed to get CSftRst asserted\n");
return -EINVAL;
}
@ -2514,8 +2514,8 @@ int usb_gadget_register_driver(struct usb_gadget_driver *driver)
* DMA mode we may need this. */
writel(S3C_DOEPMSK_SetupMsk | S3C_DOEPMSK_AHBErrMsk |
S3C_DOEPMSK_EPDisbldMsk |
using_dma(hsotg) ? (S3C_DIEPMSK_XferComplMsk |
S3C_DIEPMSK_TimeOUTMsk) : 0,
(using_dma(hsotg) ? (S3C_DIEPMSK_XferComplMsk |
S3C_DIEPMSK_TimeOUTMsk) : 0),
hsotg->regs + S3C_DOEPMSK);
writel(0, hsotg->regs + S3C_DAINTMSK);

View File

@ -1703,8 +1703,7 @@ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
dprintk(DEBUG_NORMAL,"usb_gadget_register_driver() '%s'\n",
driver->driver.name);
if (driver->disconnect)
driver->disconnect(&udc->gadget);
driver->unbind(&udc->gadget);
device_del(&udc->gadget.dev);
udc->driver = NULL;

View File

@ -253,11 +253,13 @@ static int gaudio_open_snd_dev(struct gaudio *card)
snd->filp = filp_open(fn_cap, O_RDONLY, 0);
if (IS_ERR(snd->filp)) {
ERROR(card, "No such PCM capture device: %s\n", fn_cap);
snd->filp = NULL;
snd->substream = NULL;
snd->card = NULL;
} else {
pcm_file = snd->filp->private_data;
snd->substream = pcm_file->substream;
snd->card = card;
}
pcm_file = snd->filp->private_data;
snd->substream = pcm_file->substream;
snd->card = card;
return 0;
}

View File

@ -37,8 +37,9 @@
* one (!) network link through the USB gadget stack, normally "usb0".
*
* The control and data models are handled by the function driver which
* connects to this code; such as CDC Ethernet, "CDC Subset", or RNDIS.
* That includes all descriptor and endpoint management.
* connects to this code; such as CDC Ethernet (ECM or EEM),
* "CDC Subset", or RNDIS. That includes all descriptor and endpoint
* management.
*
* Link level addressing is handled by this component using module
* parameters; if no such parameters are provided, random link level
@ -68,9 +69,13 @@ struct eth_dev {
struct list_head tx_reqs, rx_reqs;
atomic_t tx_qlen;
struct sk_buff_head rx_frames;
unsigned header_len;
struct sk_buff *(*wrap)(struct sk_buff *skb);
int (*unwrap)(struct sk_buff *skb);
struct sk_buff *(*wrap)(struct gether *, struct sk_buff *skb);
int (*unwrap)(struct gether *,
struct sk_buff *skb,
struct sk_buff_head *list);
struct work_struct work;
@ -269,7 +274,7 @@ enomem:
static void rx_complete(struct usb_ep *ep, struct usb_request *req)
{
struct sk_buff *skb = req->context;
struct sk_buff *skb = req->context, *skb2;
struct eth_dev *dev = ep->driver_data;
int status = req->status;
@ -278,26 +283,47 @@ static void rx_complete(struct usb_ep *ep, struct usb_request *req)
/* normal completion */
case 0:
skb_put(skb, req->actual);
if (dev->unwrap)
status = dev->unwrap(skb);
if (status < 0
|| ETH_HLEN > skb->len
|| skb->len > ETH_FRAME_LEN) {
dev->net->stats.rx_errors++;
dev->net->stats.rx_length_errors++;
DBG(dev, "rx length %d\n", skb->len);
break;
if (dev->unwrap) {
unsigned long flags;
spin_lock_irqsave(&dev->lock, flags);
if (dev->port_usb) {
status = dev->unwrap(dev->port_usb,
skb,
&dev->rx_frames);
} else {
dev_kfree_skb_any(skb);
status = -ENOTCONN;
}
spin_unlock_irqrestore(&dev->lock, flags);
} else {
skb_queue_tail(&dev->rx_frames, skb);
}
skb->protocol = eth_type_trans(skb, dev->net);
dev->net->stats.rx_packets++;
dev->net->stats.rx_bytes += skb->len;
/* no buffer copies needed, unless hardware can't
* use skb buffers.
*/
status = netif_rx(skb);
skb = NULL;
skb2 = skb_dequeue(&dev->rx_frames);
while (skb2) {
if (status < 0
|| ETH_HLEN > skb2->len
|| skb2->len > ETH_FRAME_LEN) {
dev->net->stats.rx_errors++;
dev->net->stats.rx_length_errors++;
DBG(dev, "rx length %d\n", skb2->len);
dev_kfree_skb_any(skb2);
goto next_frame;
}
skb2->protocol = eth_type_trans(skb2, dev->net);
dev->net->stats.rx_packets++;
dev->net->stats.rx_bytes += skb2->len;
/* no buffer copies needed, unless hardware can't
* use skb buffers.
*/
status = netif_rx(skb2);
next_frame:
skb2 = skb_dequeue(&dev->rx_frames);
}
break;
/* software-driven interface shutdown */
@ -537,14 +563,15 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
* or there's not enough space for extra headers we need
*/
if (dev->wrap) {
struct sk_buff *skb_new;
unsigned long flags;
skb_new = dev->wrap(skb);
if (!skb_new)
spin_lock_irqsave(&dev->lock, flags);
if (dev->port_usb)
skb = dev->wrap(dev->port_usb, skb);
spin_unlock_irqrestore(&dev->lock, flags);
if (!skb)
goto drop;
dev_kfree_skb_any(skb);
skb = skb_new;
length = skb->len;
}
req->buf = skb->data;
@ -578,9 +605,9 @@ static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
}
if (retval) {
dev_kfree_skb_any(skb);
drop:
dev->net->stats.tx_dropped++;
dev_kfree_skb_any(skb);
spin_lock_irqsave(&dev->req_lock, flags);
if (list_empty(&dev->tx_reqs))
netif_start_queue(net);
@ -753,6 +780,8 @@ int __init gether_setup(struct usb_gadget *g, u8 ethaddr[ETH_ALEN])
INIT_LIST_HEAD(&dev->tx_reqs);
INIT_LIST_HEAD(&dev->rx_reqs);
skb_queue_head_init(&dev->rx_frames);
/* network device setup */
dev->net = net;
strcpy(net->name, "usb%d");

View File

@ -60,12 +60,13 @@ struct gether {
u16 cdc_filter;
/* hooks for added framing, as needed for RNDIS and EEM.
* we currently don't support multiple frames per SKB.
*/
/* hooks for added framing, as needed for RNDIS and EEM. */
u32 header_len;
struct sk_buff *(*wrap)(struct sk_buff *skb);
int (*unwrap)(struct sk_buff *skb);
struct sk_buff *(*wrap)(struct gether *port,
struct sk_buff *skb);
int (*unwrap)(struct gether *port,
struct sk_buff *skb,
struct sk_buff_head *list);
/* called on network open/close */
void (*open)(struct gether *);
@ -109,6 +110,7 @@ static inline bool can_support_ecm(struct usb_gadget *gadget)
/* each configuration may bind one instance of an ethernet link */
int geth_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]);
int ecm_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]);
int eem_bind_config(struct usb_configuration *c);
#ifdef CONFIG_USB_ETH_RNDIS

View File

@ -1114,7 +1114,6 @@ int __init gserial_setup(struct usb_gadget *g, unsigned count)
/* export the driver ... */
status = tty_register_driver(gs_tty_driver);
if (status) {
put_tty_driver(gs_tty_driver);
pr_err("%s: cannot register, err %d\n",
__func__, status);
goto fail;

View File

@ -113,6 +113,12 @@ config USB_EHCI_HCD_PPC_OF
Enables support for the USB controller present on the PowerPC
OpenFirmware platform bus.
config USB_W90X900_EHCI
bool "W90X900(W90P910) EHCI support"
depends on USB_EHCI_HCD && ARCH_W90X900
---help---
Enables support for the W90X900 USB controller
config USB_OXU210HP_HCD
tristate "OXU210HP HCD support"
depends on USB
@ -153,6 +159,18 @@ config USB_ISP1760_HCD
To compile this driver as a module, choose M here: the
module will be called isp1760.
config USB_ISP1362_HCD
tristate "ISP1362 HCD support"
depends on USB
default N
---help---
Supports the Philips ISP1362 chip as a host controller
This driver does not support isochronous transfers.
To compile this driver as a module, choose M here: the
module will be called isp1362-hcd.
config USB_OHCI_HCD
tristate "OHCI HCD support"
depends on USB && USB_ARCH_HAS_OHCI

View File

@ -21,6 +21,7 @@ obj-$(CONFIG_PCI) += pci-quirks.o
obj-$(CONFIG_USB_EHCI_HCD) += ehci-hcd.o
obj-$(CONFIG_USB_OXU210HP_HCD) += oxu210hp-hcd.o
obj-$(CONFIG_USB_ISP116X_HCD) += isp116x-hcd.o
obj-$(CONFIG_USB_ISP1362_HCD) += isp1362-hcd.o
obj-$(CONFIG_USB_OHCI_HCD) += ohci-hcd.o
obj-$(CONFIG_USB_UHCI_HCD) += uhci-hcd.o
obj-$(CONFIG_USB_FHCI_HCD) += fhci.o

View File

@ -0,0 +1,230 @@
/*
* Driver for EHCI UHP on Atmel chips
*
* Copyright (C) 2009 Atmel Corporation,
* Nicolas Ferre <nicolas.ferre@atmel.com>
*
* Based on various ehci-*.c drivers
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*/
#include <linux/clk.h>
#include <linux/platform_device.h>
/* interface and function clocks */
static struct clk *iclk, *fclk;
static int clocked;
/*-------------------------------------------------------------------------*/
static void atmel_start_clock(void)
{
clk_enable(iclk);
clk_enable(fclk);
clocked = 1;
}
static void atmel_stop_clock(void)
{
clk_disable(fclk);
clk_disable(iclk);
clocked = 0;
}
static void atmel_start_ehci(struct platform_device *pdev)
{
dev_dbg(&pdev->dev, "start\n");
atmel_start_clock();
}
static void atmel_stop_ehci(struct platform_device *pdev)
{
dev_dbg(&pdev->dev, "stop\n");
atmel_stop_clock();
}
/*-------------------------------------------------------------------------*/
static int ehci_atmel_setup(struct usb_hcd *hcd)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
int retval = 0;
/* registers start at offset 0x0 */
ehci->caps = hcd->regs;
ehci->regs = hcd->regs +
HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
dbg_hcs_params(ehci, "reset");
dbg_hcc_params(ehci, "reset");
/* cache this readonly data; minimize chip reads */
ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
retval = ehci_halt(ehci);
if (retval)
return retval;
/* data structure init */
retval = ehci_init(hcd);
if (retval)
return retval;
ehci->sbrn = 0x20;
ehci_reset(ehci);
ehci_port_power(ehci, 0);
return retval;
}
static const struct hc_driver ehci_atmel_hc_driver = {
.description = hcd_name,
.product_desc = "Atmel EHCI UHP HS",
.hcd_priv_size = sizeof(struct ehci_hcd),
/* generic hardware linkage */
.irq = ehci_irq,
.flags = HCD_MEMORY | HCD_USB2,
/* basic lifecycle operations */
.reset = ehci_atmel_setup,
.start = ehci_run,
.stop = ehci_stop,
.shutdown = ehci_shutdown,
/* managing i/o requests and associated device resources */
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
/* scheduling support */
.get_frame_number = ehci_get_frame,
/* root hub support */
.hub_status_data = ehci_hub_status_data,
.hub_control = ehci_hub_control,
.bus_suspend = ehci_bus_suspend,
.bus_resume = ehci_bus_resume,
.relinquish_port = ehci_relinquish_port,
.port_handed_over = ehci_port_handed_over,
};
static int __init ehci_atmel_drv_probe(struct platform_device *pdev)
{
struct usb_hcd *hcd;
const struct hc_driver *driver = &ehci_atmel_hc_driver;
struct resource *res;
int irq;
int retval;
if (usb_disabled())
return -ENODEV;
pr_debug("Initializing Atmel-SoC USB Host Controller\n");
irq = platform_get_irq(pdev, 0);
if (irq <= 0) {
dev_err(&pdev->dev,
"Found HC with no IRQ. Check %s setup!\n",
dev_name(&pdev->dev));
retval = -ENODEV;
goto fail_create_hcd;
}
hcd = usb_create_hcd(driver, &pdev->dev, dev_name(&pdev->dev));
if (!hcd) {
retval = -ENOMEM;
goto fail_create_hcd;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev,
"Found HC with no register addr. Check %s setup!\n",
dev_name(&pdev->dev));
retval = -ENODEV;
goto fail_request_resource;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = res->end - res->start + 1;
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len,
driver->description)) {
dev_dbg(&pdev->dev, "controller already in use\n");
retval = -EBUSY;
goto fail_request_resource;
}
hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len);
if (hcd->regs == NULL) {
dev_dbg(&pdev->dev, "error mapping memory\n");
retval = -EFAULT;
goto fail_ioremap;
}
iclk = clk_get(&pdev->dev, "ehci_clk");
if (IS_ERR(iclk)) {
dev_err(&pdev->dev, "Error getting interface clock\n");
retval = -ENOENT;
goto fail_get_iclk;
}
fclk = clk_get(&pdev->dev, "uhpck");
if (IS_ERR(fclk)) {
dev_err(&pdev->dev, "Error getting function clock\n");
retval = -ENOENT;
goto fail_get_fclk;
}
atmel_start_ehci(pdev);
retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval)
goto fail_add_hcd;
return retval;
fail_add_hcd:
atmel_stop_ehci(pdev);
clk_put(fclk);
fail_get_fclk:
clk_put(iclk);
fail_get_iclk:
iounmap(hcd->regs);
fail_ioremap:
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
fail_request_resource:
usb_put_hcd(hcd);
fail_create_hcd:
dev_err(&pdev->dev, "init %s fail, %d\n",
dev_name(&pdev->dev), retval);
return retval;
}
static int __exit ehci_atmel_drv_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
ehci_shutdown(hcd);
usb_remove_hcd(hcd);
iounmap(hcd->regs);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
atmel_stop_ehci(pdev);
clk_put(fclk);
clk_put(iclk);
fclk = iclk = NULL;
return 0;
}
static struct platform_driver ehci_atmel_driver = {
.probe = ehci_atmel_drv_probe,
.remove = __exit_p(ehci_atmel_drv_remove),
.shutdown = usb_hcd_platform_shutdown,
.driver.name = "atmel-ehci",
};

View File

@ -199,10 +199,9 @@ static int ehci_hcd_au1xxx_drv_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
static int ehci_hcd_au1xxx_drv_suspend(struct platform_device *pdev,
pm_message_t message)
static int ehci_hcd_au1xxx_drv_suspend(struct device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
unsigned long flags;
int rc;
@ -229,12 +228,6 @@ static int ehci_hcd_au1xxx_drv_suspend(struct platform_device *pdev,
ehci_writel(ehci, 0, &ehci->regs->intr_enable);
(void)ehci_readl(ehci, &ehci->regs->intr_enable);
/* make sure snapshot being resumed re-enumerates everything */
if (message.event == PM_EVENT_PRETHAW) {
ehci_halt(ehci);
ehci_reset(ehci);
}
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
au1xxx_stop_ehc();
@ -248,10 +241,9 @@ bail:
return rc;
}
static int ehci_hcd_au1xxx_drv_resume(struct platform_device *pdev)
static int ehci_hcd_au1xxx_drv_resume(struct device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
au1xxx_start_ehc();
@ -305,20 +297,25 @@ static int ehci_hcd_au1xxx_drv_resume(struct platform_device *pdev)
return 0;
}
static struct dev_pm_ops au1xxx_ehci_pmops = {
.suspend = ehci_hcd_au1xxx_drv_suspend,
.resume = ehci_hcd_au1xxx_drv_resume,
};
#define AU1XXX_EHCI_PMOPS &au1xxx_ehci_pmops
#else
#define ehci_hcd_au1xxx_drv_suspend NULL
#define ehci_hcd_au1xxx_drv_resume NULL
#define AU1XXX_EHCI_PMOPS NULL
#endif
static struct platform_driver ehci_hcd_au1xxx_driver = {
.probe = ehci_hcd_au1xxx_drv_probe,
.remove = ehci_hcd_au1xxx_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
.suspend = ehci_hcd_au1xxx_drv_suspend,
.resume = ehci_hcd_au1xxx_drv_resume,
.driver = {
.name = "au1xxx-ehci",
.owner = THIS_MODULE,
.pm = AU1XXX_EHCI_PMOPS,
}
};

View File

@ -134,10 +134,11 @@ dbg_qtd (const char *label, struct ehci_hcd *ehci, struct ehci_qtd *qtd)
static void __maybe_unused
dbg_qh (const char *label, struct ehci_hcd *ehci, struct ehci_qh *qh)
{
struct ehci_qh_hw *hw = qh->hw;
ehci_dbg (ehci, "%s qh %p n%08x info %x %x qtd %x\n", label,
qh, qh->hw_next, qh->hw_info1, qh->hw_info2,
qh->hw_current);
dbg_qtd ("overlay", ehci, (struct ehci_qtd *) &qh->hw_qtd_next);
qh, hw->hw_next, hw->hw_info1, hw->hw_info2, hw->hw_current);
dbg_qtd("overlay", ehci, (struct ehci_qtd *) &hw->hw_qtd_next);
}
static void __maybe_unused
@ -400,31 +401,32 @@ static void qh_lines (
char *next = *nextp;
char mark;
__le32 list_end = EHCI_LIST_END(ehci);
struct ehci_qh_hw *hw = qh->hw;
if (qh->hw_qtd_next == list_end) /* NEC does this */
if (hw->hw_qtd_next == list_end) /* NEC does this */
mark = '@';
else
mark = token_mark(ehci, qh->hw_token);
mark = token_mark(ehci, hw->hw_token);
if (mark == '/') { /* qh_alt_next controls qh advance? */
if ((qh->hw_alt_next & QTD_MASK(ehci))
== ehci->async->hw_alt_next)
if ((hw->hw_alt_next & QTD_MASK(ehci))
== ehci->async->hw->hw_alt_next)
mark = '#'; /* blocked */
else if (qh->hw_alt_next == list_end)
else if (hw->hw_alt_next == list_end)
mark = '.'; /* use hw_qtd_next */
/* else alt_next points to some other qtd */
}
scratch = hc32_to_cpup(ehci, &qh->hw_info1);
hw_curr = (mark == '*') ? hc32_to_cpup(ehci, &qh->hw_current) : 0;
scratch = hc32_to_cpup(ehci, &hw->hw_info1);
hw_curr = (mark == '*') ? hc32_to_cpup(ehci, &hw->hw_current) : 0;
temp = scnprintf (next, size,
"qh/%p dev%d %cs ep%d %08x %08x (%08x%c %s nak%d)",
qh, scratch & 0x007f,
speed_char (scratch),
(scratch >> 8) & 0x000f,
scratch, hc32_to_cpup(ehci, &qh->hw_info2),
hc32_to_cpup(ehci, &qh->hw_token), mark,
(cpu_to_hc32(ehci, QTD_TOGGLE) & qh->hw_token)
scratch, hc32_to_cpup(ehci, &hw->hw_info2),
hc32_to_cpup(ehci, &hw->hw_token), mark,
(cpu_to_hc32(ehci, QTD_TOGGLE) & hw->hw_token)
? "data1" : "data0",
(hc32_to_cpup(ehci, &qh->hw_alt_next) >> 1) & 0x0f);
(hc32_to_cpup(ehci, &hw->hw_alt_next) >> 1) & 0x0f);
size -= temp;
next += temp;
@ -435,10 +437,10 @@ static void qh_lines (
mark = ' ';
if (hw_curr == td->qtd_dma)
mark = '*';
else if (qh->hw_qtd_next == cpu_to_hc32(ehci, td->qtd_dma))
else if (hw->hw_qtd_next == cpu_to_hc32(ehci, td->qtd_dma))
mark = '+';
else if (QTD_LENGTH (scratch)) {
if (td->hw_alt_next == ehci->async->hw_alt_next)
if (td->hw_alt_next == ehci->async->hw->hw_alt_next)
mark = '#';
else if (td->hw_alt_next != list_end)
mark = '/';
@ -550,12 +552,15 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
next += temp;
do {
struct ehci_qh_hw *hw;
switch (hc32_to_cpu(ehci, tag)) {
case Q_TYPE_QH:
hw = p.qh->hw;
temp = scnprintf (next, size, " qh%d-%04x/%p",
p.qh->period,
hc32_to_cpup(ehci,
&p.qh->hw_info2)
&hw->hw_info2)
/* uframe masks */
& (QH_CMASK | QH_SMASK),
p.qh);
@ -576,7 +581,7 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
/* show more info the first time around */
if (temp == seen_count) {
u32 scratch = hc32_to_cpup(ehci,
&p.qh->hw_info1);
&hw->hw_info1);
struct ehci_qtd *qtd;
char *type = "";
@ -609,7 +614,7 @@ static ssize_t fill_periodic_buffer(struct debug_buffer *buf)
} else
temp = 0;
if (p.qh) {
tag = Q_NEXT_TYPE(ehci, p.qh->hw_next);
tag = Q_NEXT_TYPE(ehci, hw->hw_next);
p = p.qh->qh_next;
}
break;
@ -879,8 +884,7 @@ static int debug_close(struct inode *inode, struct file *file)
struct debug_buffer *buf = file->private_data;
if (buf) {
if (buf->output_buf)
vfree(buf->output_buf);
vfree(buf->output_buf);
kfree(buf);
}

View File

@ -30,7 +30,6 @@
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/reboot.h>
#include <linux/usb.h>
#include <linux/moduleparam.h>
#include <linux/dma-mapping.h>
@ -127,6 +126,8 @@ timer_action(struct ehci_hcd *ehci, enum ehci_timer_action action)
switch (action) {
case TIMER_IO_WATCHDOG:
if (!ehci->need_io_watchdog)
return;
t = EHCI_IO_JIFFIES;
break;
case TIMER_ASYNC_OFF:
@ -239,6 +240,11 @@ static int ehci_reset (struct ehci_hcd *ehci)
int retval;
u32 command = ehci_readl(ehci, &ehci->regs->command);
/* If the EHCI debug controller is active, special care must be
* taken before and after a host controller reset */
if (ehci->debug && !dbgp_reset_prep())
ehci->debug = NULL;
command |= CMD_RESET;
dbg_cmd (ehci, "reset", command);
ehci_writel(ehci, command, &ehci->regs->command);
@ -247,12 +253,21 @@ static int ehci_reset (struct ehci_hcd *ehci)
retval = handshake (ehci, &ehci->regs->command,
CMD_RESET, 0, 250 * 1000);
if (ehci->has_hostpc) {
ehci_writel(ehci, USBMODE_EX_HC | USBMODE_EX_VBPS,
(u32 __iomem *)(((u8 *)ehci->regs) + USBMODE_EX));
ehci_writel(ehci, TXFIFO_DEFAULT,
(u32 __iomem *)(((u8 *)ehci->regs) + TXFILLTUNING));
}
if (retval)
return retval;
if (ehci_is_TDI(ehci))
tdi_reset (ehci);
if (ehci->debug)
dbgp_external_startup();
return retval;
}
@ -505,9 +520,14 @@ static int ehci_init(struct usb_hcd *hcd)
u32 temp;
int retval;
u32 hcc_params;
struct ehci_qh_hw *hw;
spin_lock_init(&ehci->lock);
/*
* keep io watchdog by default, those good HCDs could turn off it later
*/
ehci->need_io_watchdog = 1;
init_timer(&ehci->watchdog);
ehci->watchdog.function = ehci_watchdog;
ehci->watchdog.data = (unsigned long) ehci;
@ -544,12 +564,13 @@ static int ehci_init(struct usb_hcd *hcd)
* from automatically advancing to the next td after short reads.
*/
ehci->async->qh_next.qh = NULL;
ehci->async->hw_next = QH_NEXT(ehci, ehci->async->qh_dma);
ehci->async->hw_info1 = cpu_to_hc32(ehci, QH_HEAD);
ehci->async->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
ehci->async->hw_qtd_next = EHCI_LIST_END(ehci);
hw = ehci->async->hw;
hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma);
hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD);
hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
hw->hw_qtd_next = EHCI_LIST_END(ehci);
ehci->async->qh_state = QH_STATE_LINKED;
ehci->async->hw_alt_next = QTD_NEXT(ehci, ehci->async->dummy->qtd_dma);
hw->hw_alt_next = QTD_NEXT(ehci, ehci->async->dummy->qtd_dma);
/* clear interrupt enables, set irq latency */
if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
@ -850,12 +871,18 @@ static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
if (!HC_IS_RUNNING(ehci_to_hcd(ehci)->state) && ehci->reclaim)
end_unlink_async(ehci);
/* if it's not linked then there's nothing to do */
if (qh->qh_state != QH_STATE_LINKED)
;
/* If the QH isn't linked then there's nothing we can do
* unless we were called during a giveback, in which case
* qh_completions() has to deal with it.
*/
if (qh->qh_state != QH_STATE_LINKED) {
if (qh->qh_state == QH_STATE_COMPLETING)
qh->needs_rescan = 1;
return;
}
/* defer till later if busy */
else if (ehci->reclaim) {
if (ehci->reclaim) {
struct ehci_qh *last;
for (last = ehci->reclaim;
@ -915,8 +942,9 @@ static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
break;
switch (qh->qh_state) {
case QH_STATE_LINKED:
case QH_STATE_COMPLETING:
intr_deschedule (ehci, qh);
/* FALL THROUGH */
break;
case QH_STATE_IDLE:
qh_completions (ehci, qh);
break;
@ -925,23 +953,6 @@ static int ehci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
qh, qh->qh_state);
goto done;
}
/* reschedule QH iff another request is queued */
if (!list_empty (&qh->qtd_list)
&& HC_IS_RUNNING (hcd->state)) {
rc = qh_schedule(ehci, qh);
/* An error here likely indicates handshake failure
* or no space left in the schedule. Neither fault
* should happen often ...
*
* FIXME kill the now-dysfunctional queued urbs
*/
if (rc != 0)
ehci_err(ehci,
"can't reschedule qh %p, err %d",
qh, rc);
}
break;
case PIPE_ISOCHRONOUS:
@ -979,7 +990,7 @@ rescan:
/* endpoints can be iso streams. for now, we don't
* accelerate iso completions ... so spin a while.
*/
if (qh->hw_info1 == 0) {
if (qh->hw->hw_info1 == 0) {
ehci_vdbg (ehci, "iso delay\n");
goto idle_timeout;
}
@ -988,6 +999,7 @@ rescan:
qh->qh_state = QH_STATE_IDLE;
switch (qh->qh_state) {
case QH_STATE_LINKED:
case QH_STATE_COMPLETING:
for (tmp = ehci->async->qh_next.qh;
tmp && tmp != qh;
tmp = tmp->qh_next.qh)
@ -1052,18 +1064,17 @@ ehci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep)
usb_settoggle(qh->dev, epnum, is_out, 0);
if (!list_empty(&qh->qtd_list)) {
WARN_ONCE(1, "clear_halt for a busy endpoint\n");
} else if (qh->qh_state == QH_STATE_LINKED) {
} else if (qh->qh_state == QH_STATE_LINKED ||
qh->qh_state == QH_STATE_COMPLETING) {
/* The toggle value in the QH can't be updated
* while the QH is active. Unlink it now;
* re-linking will call qh_refresh().
*/
if (eptype == USB_ENDPOINT_XFER_BULK) {
if (eptype == USB_ENDPOINT_XFER_BULK)
unlink_async(ehci, qh);
} else {
else
intr_deschedule(ehci, qh);
(void) qh_schedule(ehci, qh);
}
}
}
spin_unlock_irqrestore(&ehci->lock, flags);
@ -1117,6 +1128,16 @@ MODULE_LICENSE ("GPL");
#define PLATFORM_DRIVER ixp4xx_ehci_driver
#endif
#ifdef CONFIG_USB_W90X900_EHCI
#include "ehci-w90x900.c"
#define PLATFORM_DRIVER ehci_hcd_w90x900_driver
#endif
#ifdef CONFIG_ARCH_AT91
#include "ehci-atmel.c"
#define PLATFORM_DRIVER ehci_atmel_driver
#endif
#if !defined(PCI_DRIVER) && !defined(PLATFORM_DRIVER) && \
!defined(PS3_SYSTEM_BUS_DRIVER) && !defined(OF_PLATFORM_DRIVER)
#error "missing bus glue for ehci-hcd"

View File

@ -111,6 +111,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
int port;
int mask;
u32 __iomem *hostpc_reg = NULL;
ehci_dbg(ehci, "suspend root hub\n");
@ -142,6 +143,9 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
u32 t1 = ehci_readl(ehci, reg) & ~PORT_RWC_BITS;
u32 t2 = t1;
if (ehci->has_hostpc)
hostpc_reg = (u32 __iomem *)((u8 *)ehci->regs
+ HOSTPC0 + 4 * (port & 0xff));
/* keep track of which ports we suspend */
if (t1 & PORT_OWNER)
set_bit(port, &ehci->owned_ports);
@ -151,15 +155,37 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
}
/* enable remote wakeup on all ports */
if (hcd->self.root_hub->do_remote_wakeup)
t2 |= PORT_WAKE_BITS;
else
if (hcd->self.root_hub->do_remote_wakeup) {
/* only enable appropriate wake bits, otherwise the
* hardware can not go phy low power mode. If a race
* condition happens here(connection change during bits
* set), the port change detection will finally fix it.
*/
if (t1 & PORT_CONNECT) {
t2 |= PORT_WKOC_E | PORT_WKDISC_E;
t2 &= ~PORT_WKCONN_E;
} else {
t2 |= PORT_WKOC_E | PORT_WKCONN_E;
t2 &= ~PORT_WKDISC_E;
}
} else
t2 &= ~PORT_WAKE_BITS;
if (t1 != t2) {
ehci_vdbg (ehci, "port %d, %08x -> %08x\n",
port + 1, t1, t2);
ehci_writel(ehci, t2, reg);
if (hostpc_reg) {
u32 t3;
msleep(5);/* 5ms for HCD enter low pwr mode */
t3 = ehci_readl(ehci, hostpc_reg);
ehci_writel(ehci, t3 | HOSTPC_PHCD, hostpc_reg);
t3 = ehci_readl(ehci, hostpc_reg);
ehci_dbg(ehci, "Port%d phy low pwr mode %s\n",
port, (t3 & HOSTPC_PHCD) ?
"succeeded" : "failed");
}
}
}
@ -183,6 +209,11 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
ehci->next_statechange = jiffies + msecs_to_jiffies(10);
spin_unlock_irq (&ehci->lock);
/* ehci_work() may have re-enabled the watchdog timer, which we do not
* want, and so we must delete any pending watchdog timer events.
*/
del_timer_sync(&ehci->watchdog);
return 0;
}
@ -204,6 +235,13 @@ static int ehci_bus_resume (struct usb_hcd *hcd)
return -ESHUTDOWN;
}
if (unlikely(ehci->debug)) {
if (ehci->debug && !dbgp_reset_prep())
ehci->debug = NULL;
else
dbgp_external_startup();
}
/* Ideally and we've got a real resume here, and no port's power
* was lost. (For PCI, that means Vaux was maintained.) But we
* could instead be restoring a swsusp snapshot -- so that BIOS was
@ -563,7 +601,8 @@ static int ehci_hub_control (
int ports = HCS_N_PORTS (ehci->hcs_params);
u32 __iomem *status_reg = &ehci->regs->port_status[
(wIndex & 0xff) - 1];
u32 temp, status;
u32 __iomem *hostpc_reg = NULL;
u32 temp, temp1, status;
unsigned long flags;
int retval = 0;
unsigned selector;
@ -575,6 +614,9 @@ static int ehci_hub_control (
* power, "this is the one", etc. EHCI spec supports this.
*/
if (ehci->has_hostpc)
hostpc_reg = (u32 __iomem *)((u8 *)ehci->regs
+ HOSTPC0 + 4 * ((wIndex & 0xff) - 1));
spin_lock_irqsave (&ehci->lock, flags);
switch (typeReq) {
case ClearHubFeature:
@ -773,7 +815,11 @@ static int ehci_hub_control (
if (temp & PORT_CONNECT) {
status |= 1 << USB_PORT_FEAT_CONNECTION;
// status may be from integrated TT
status |= ehci_port_speed(ehci, temp);
if (ehci->has_hostpc) {
temp1 = ehci_readl(ehci, hostpc_reg);
status |= ehci_port_speed(ehci, temp1);
} else
status |= ehci_port_speed(ehci, temp);
}
if (temp & PORT_PE)
status |= 1 << USB_PORT_FEAT_ENABLE;
@ -816,6 +862,15 @@ static int ehci_hub_control (
case SetPortFeature:
selector = wIndex >> 8;
wIndex &= 0xff;
if (unlikely(ehci->debug)) {
/* If the debug port is active any port
* feature requests should get denied */
if (wIndex == HCS_DEBUG_PORT(ehci->hcs_params) &&
(readl(&ehci->debug->control) & DBGP_ENABLED)) {
retval = -ENODEV;
goto error_exit;
}
}
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
@ -832,6 +887,24 @@ static int ehci_hub_control (
|| (temp & PORT_RESET) != 0)
goto error;
ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
/* After above check the port must be connected.
* Set appropriate bit thus could put phy into low power
* mode if we have hostpc feature
*/
if (hostpc_reg) {
temp &= ~PORT_WKCONN_E;
temp |= (PORT_WKDISC_E | PORT_WKOC_E);
ehci_writel(ehci, temp | PORT_SUSPEND,
status_reg);
msleep(5);/* 5ms for HCD enter low pwr mode */
temp1 = ehci_readl(ehci, hostpc_reg);
ehci_writel(ehci, temp1 | HOSTPC_PHCD,
hostpc_reg);
temp1 = ehci_readl(ehci, hostpc_reg);
ehci_dbg(ehci, "Port%d phy low pwr mode %s\n",
wIndex, (temp1 & HOSTPC_PHCD) ?
"succeeded" : "failed");
}
set_bit(wIndex, &ehci->suspended_ports);
break;
case USB_PORT_FEAT_POWER:
@ -894,6 +967,7 @@ error:
/* "stall" on error */
retval = -EPIPE;
}
error_exit:
spin_unlock_irqrestore (&ehci->lock, flags);
return retval;
}

View File

@ -75,7 +75,8 @@ static void qh_destroy(struct ehci_qh *qh)
}
if (qh->dummy)
ehci_qtd_free (ehci, qh->dummy);
dma_pool_free (ehci->qh_pool, qh, qh->qh_dma);
dma_pool_free(ehci->qh_pool, qh->hw, qh->qh_dma);
kfree(qh);
}
static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags)
@ -83,12 +84,14 @@ static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags)
struct ehci_qh *qh;
dma_addr_t dma;
qh = (struct ehci_qh *)
dma_pool_alloc (ehci->qh_pool, flags, &dma);
qh = kzalloc(sizeof *qh, GFP_ATOMIC);
if (!qh)
return qh;
memset (qh, 0, sizeof *qh);
goto done;
qh->hw = (struct ehci_qh_hw *)
dma_pool_alloc(ehci->qh_pool, flags, &dma);
if (!qh->hw)
goto fail;
memset(qh->hw, 0, sizeof *qh->hw);
qh->refcount = 1;
qh->ehci = ehci;
qh->qh_dma = dma;
@ -99,10 +102,15 @@ static struct ehci_qh *ehci_qh_alloc (struct ehci_hcd *ehci, gfp_t flags)
qh->dummy = ehci_qtd_alloc (ehci, flags);
if (qh->dummy == NULL) {
ehci_dbg (ehci, "no dummy td\n");
dma_pool_free (ehci->qh_pool, qh, qh->qh_dma);
qh = NULL;
goto fail1;
}
done:
return qh;
fail1:
dma_pool_free(ehci->qh_pool, qh->hw, qh->qh_dma);
fail:
kfree(qh);
return NULL;
}
/* to share a qh (cpu threads, or hc) */
@ -180,7 +188,7 @@ static int ehci_mem_init (struct ehci_hcd *ehci, gfp_t flags)
/* QHs for control/bulk/intr transfers */
ehci->qh_pool = dma_pool_create ("ehci_qh",
ehci_to_hcd(ehci)->self.controller,
sizeof (struct ehci_qh),
sizeof(struct ehci_qh_hw),
32 /* byte alignment (for hw parts) */,
4096 /* can't cross 4K */);
if (!ehci->qh_pool) {

View File

@ -27,28 +27,8 @@
/* called after powerup, by probe or system-pm "wakeup" */
static int ehci_pci_reinit(struct ehci_hcd *ehci, struct pci_dev *pdev)
{
u32 temp;
int retval;
/* optional debug port, normally in the first BAR */
temp = pci_find_capability(pdev, 0x0a);
if (temp) {
pci_read_config_dword(pdev, temp, &temp);
temp >>= 16;
if ((temp & (3 << 13)) == (1 << 13)) {
temp &= 0x1fff;
ehci->debug = ehci_to_hcd(ehci)->regs + temp;
temp = ehci_readl(ehci, &ehci->debug->control);
ehci_info(ehci, "debug port %d%s\n",
HCS_DEBUG_PORT(ehci->hcs_params),
(temp & DBGP_ENABLED)
? " IN USE"
: "");
if (!(temp & DBGP_ENABLED))
ehci->debug = NULL;
}
}
/* we expect static quirk code to handle the "extended capabilities"
* (currently just BIOS handoff) allowed starting with EHCI 0.96
*/
@ -129,6 +109,9 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
return retval;
switch (pdev->vendor) {
case PCI_VENDOR_ID_INTEL:
ehci->need_io_watchdog = 0;
break;
case PCI_VENDOR_ID_TDI:
if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
hcd->has_tt = 1;
@ -192,6 +175,25 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
break;
}
/* optional debug port, normally in the first BAR */
temp = pci_find_capability(pdev, 0x0a);
if (temp) {
pci_read_config_dword(pdev, temp, &temp);
temp >>= 16;
if ((temp & (3 << 13)) == (1 << 13)) {
temp &= 0x1fff;
ehci->debug = ehci_to_hcd(ehci)->regs + temp;
temp = ehci_readl(ehci, &ehci->debug->control);
ehci_info(ehci, "debug port %d%s\n",
HCS_DEBUG_PORT(ehci->hcs_params),
(temp & DBGP_ENABLED)
? " IN USE"
: "");
if (!(temp & DBGP_ENABLED))
ehci->debug = NULL;
}
}
ehci_reset(ehci);
/* at least the Genesys GL880S needs fixup here */

View File

@ -87,31 +87,33 @@ qtd_fill(struct ehci_hcd *ehci, struct ehci_qtd *qtd, dma_addr_t buf,
static inline void
qh_update (struct ehci_hcd *ehci, struct ehci_qh *qh, struct ehci_qtd *qtd)
{
struct ehci_qh_hw *hw = qh->hw;
/* writes to an active overlay are unsafe */
BUG_ON(qh->qh_state != QH_STATE_IDLE);
qh->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma);
qh->hw_alt_next = EHCI_LIST_END(ehci);
hw->hw_qtd_next = QTD_NEXT(ehci, qtd->qtd_dma);
hw->hw_alt_next = EHCI_LIST_END(ehci);
/* Except for control endpoints, we make hardware maintain data
* toggle (like OHCI) ... here (re)initialize the toggle in the QH,
* and set the pseudo-toggle in udev. Only usb_clear_halt() will
* ever clear it.
*/
if (!(qh->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) {
if (!(hw->hw_info1 & cpu_to_hc32(ehci, 1 << 14))) {
unsigned is_out, epnum;
is_out = !(qtd->hw_token & cpu_to_hc32(ehci, 1 << 8));
epnum = (hc32_to_cpup(ehci, &qh->hw_info1) >> 8) & 0x0f;
epnum = (hc32_to_cpup(ehci, &hw->hw_info1) >> 8) & 0x0f;
if (unlikely (!usb_gettoggle (qh->dev, epnum, is_out))) {
qh->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
hw->hw_token &= ~cpu_to_hc32(ehci, QTD_TOGGLE);
usb_settoggle (qh->dev, epnum, is_out, 1);
}
}
/* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
wmb ();
qh->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING);
hw->hw_token &= cpu_to_hc32(ehci, QTD_TOGGLE | QTD_STS_PING);
}
/* if it weren't for a common silicon quirk (writing the dummy into the qh
@ -129,7 +131,7 @@ qh_refresh (struct ehci_hcd *ehci, struct ehci_qh *qh)
qtd = list_entry (qh->qtd_list.next,
struct ehci_qtd, qtd_list);
/* first qtd may already be partially processed */
if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw_current)
if (cpu_to_hc32(ehci, qtd->qtd_dma) == qh->hw->hw_current)
qtd = NULL;
}
@ -260,7 +262,7 @@ __acquires(ehci->lock)
struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
/* S-mask in a QH means it's an interrupt urb */
if ((qh->hw_info2 & cpu_to_hc32(ehci, QH_SMASK)) != 0) {
if ((qh->hw->hw_info2 & cpu_to_hc32(ehci, QH_SMASK)) != 0) {
/* ... update hc-wide periodic stats (for usbfs) */
ehci_to_hcd(ehci)->self.bandwidth_int_reqs--;
@ -297,7 +299,6 @@ __acquires(ehci->lock)
static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh);
static void unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh);
static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
/*
@ -308,13 +309,14 @@ static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh);
static unsigned
qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
{
struct ehci_qtd *last = NULL, *end = qh->dummy;
struct ehci_qtd *last, *end = qh->dummy;
struct list_head *entry, *tmp;
int last_status = -EINPROGRESS;
int last_status;
int stopped;
unsigned count = 0;
u8 state;
__le32 halt = HALT_BIT(ehci);
const __le32 halt = HALT_BIT(ehci);
struct ehci_qh_hw *hw = qh->hw;
if (unlikely (list_empty (&qh->qtd_list)))
return count;
@ -324,11 +326,20 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
* they add urbs to this qh's queue or mark them for unlinking.
*
* NOTE: unlinking expects to be done in queue order.
*
* It's a bug for qh->qh_state to be anything other than
* QH_STATE_IDLE, unless our caller is scan_async() or
* scan_periodic().
*/
state = qh->qh_state;
qh->qh_state = QH_STATE_COMPLETING;
stopped = (state == QH_STATE_IDLE);
rescan:
last = NULL;
last_status = -EINPROGRESS;
qh->needs_rescan = 0;
/* remove de-activated QTDs from front of queue.
* after faults (including short reads), cleanup this urb
* then let the queue advance.
@ -392,7 +403,8 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
qtd->hw_token = cpu_to_hc32(ehci,
token);
wmb();
qh->hw_token = cpu_to_hc32(ehci, token);
hw->hw_token = cpu_to_hc32(ehci,
token);
goto retry_xacterr;
}
stopped = 1;
@ -435,8 +447,8 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
/* qh unlinked; token in overlay may be most current */
if (state == QH_STATE_IDLE
&& cpu_to_hc32(ehci, qtd->qtd_dma)
== qh->hw_current) {
token = hc32_to_cpu(ehci, qh->hw_token);
== hw->hw_current) {
token = hc32_to_cpu(ehci, hw->hw_token);
/* An unlink may leave an incomplete
* async transaction in the TT buffer.
@ -449,9 +461,9 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
* patch the qh later and so that completions can't
* activate it while we "know" it's stopped.
*/
if ((halt & qh->hw_token) == 0) {
if ((halt & hw->hw_token) == 0) {
halt:
qh->hw_token |= halt;
hw->hw_token |= halt;
wmb ();
}
}
@ -503,6 +515,21 @@ halt:
ehci_qtd_free (ehci, last);
}
/* Do we need to rescan for URBs dequeued during a giveback? */
if (unlikely(qh->needs_rescan)) {
/* If the QH is already unlinked, do the rescan now. */
if (state == QH_STATE_IDLE)
goto rescan;
/* Otherwise we have to wait until the QH is fully unlinked.
* Our caller will start an unlink if qh->needs_rescan is
* set. But if an unlink has already started, nothing needs
* to be done.
*/
if (state != QH_STATE_LINKED)
qh->needs_rescan = 0;
}
/* restore original state; caller must unlink or relink */
qh->qh_state = state;
@ -510,7 +537,7 @@ halt:
* it after fault cleanup, or recovering from silicon wrongly
* overlaying the dummy qtd (which reduces DMA chatter).
*/
if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END(ehci)) {
if (stopped != 0 || hw->hw_qtd_next == EHCI_LIST_END(ehci)) {
switch (state) {
case QH_STATE_IDLE:
qh_refresh(ehci, qh);
@ -527,12 +554,9 @@ halt:
* That should be rare for interrupt transfers,
* except maybe high bandwidth ...
*/
if ((cpu_to_hc32(ehci, QH_SMASK)
& qh->hw_info2) != 0) {
intr_deschedule (ehci, qh);
(void) qh_schedule (ehci, qh);
} else
unlink_async (ehci, qh);
/* Tell the caller to start an unlink */
qh->needs_rescan = 1;
break;
/* otherwise, unlink already started */
}
@ -649,7 +673,7 @@ qh_urb_transaction (
* (this will usually be overridden later.)
*/
if (is_input)
qtd->hw_alt_next = ehci->async->hw_alt_next;
qtd->hw_alt_next = ehci->async->hw->hw_alt_next;
/* qh makes control packets use qtd toggle; maybe switch it */
if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
@ -744,6 +768,7 @@ qh_make (
int is_input, type;
int maxp = 0;
struct usb_tt *tt = urb->dev->tt;
struct ehci_qh_hw *hw;
if (!qh)
return qh;
@ -890,8 +915,9 @@ done:
/* init as live, toggle clear, advance to dummy */
qh->qh_state = QH_STATE_IDLE;
qh->hw_info1 = cpu_to_hc32(ehci, info1);
qh->hw_info2 = cpu_to_hc32(ehci, info2);
hw = qh->hw;
hw->hw_info1 = cpu_to_hc32(ehci, info1);
hw->hw_info2 = cpu_to_hc32(ehci, info2);
usb_settoggle (urb->dev, usb_pipeendpoint (urb->pipe), !is_input, 1);
qh_refresh (ehci, qh);
return qh;
@ -910,6 +936,8 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
if (unlikely(qh->clearing_tt))
return;
WARN_ON(qh->qh_state != QH_STATE_IDLE);
/* (re)start the async schedule? */
head = ehci->async;
timer_action_done (ehci, TIMER_ASYNC_OFF);
@ -928,16 +956,15 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
}
/* clear halt and/or toggle; and maybe recover from silicon quirk */
if (qh->qh_state == QH_STATE_IDLE)
qh_refresh (ehci, qh);
qh_refresh(ehci, qh);
/* splice right after start */
qh->qh_next = head->qh_next;
qh->hw_next = head->hw_next;
qh->hw->hw_next = head->hw->hw_next;
wmb ();
head->qh_next.qh = qh;
head->hw_next = dma;
head->hw->hw_next = dma;
qh_get(qh);
qh->xacterrs = 0;
@ -984,7 +1011,7 @@ static struct ehci_qh *qh_append_tds (
/* usb_reset_device() briefly reverts to address 0 */
if (usb_pipedevice (urb->pipe) == 0)
qh->hw_info1 &= ~qh_addr_mask;
qh->hw->hw_info1 &= ~qh_addr_mask;
}
/* just one way to queue requests: swap with the dummy qtd.
@ -1169,7 +1196,7 @@ static void start_unlink_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
while (prev->qh_next.qh != qh)
prev = prev->qh_next.qh;
prev->hw_next = qh->hw_next;
prev->hw->hw_next = qh->hw->hw_next;
prev->qh_next = qh->qh_next;
wmb ();
@ -1214,6 +1241,8 @@ rescan:
qh = qh_get (qh);
qh->stamp = ehci->stamp;
temp = qh_completions (ehci, qh);
if (qh->needs_rescan)
unlink_async(ehci, qh);
qh_put (qh);
if (temp != 0) {
goto rescan;

View File

@ -60,6 +60,20 @@ periodic_next_shadow(struct ehci_hcd *ehci, union ehci_shadow *periodic,
}
}
static __hc32 *
shadow_next_periodic(struct ehci_hcd *ehci, union ehci_shadow *periodic,
__hc32 tag)
{
switch (hc32_to_cpu(ehci, tag)) {
/* our ehci_shadow.qh is actually software part */
case Q_TYPE_QH:
return &periodic->qh->hw->hw_next;
/* others are hw parts */
default:
return periodic->hw_next;
}
}
/* caller must hold ehci->lock */
static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr)
{
@ -71,7 +85,8 @@ static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr)
while (here.ptr && here.ptr != ptr) {
prev_p = periodic_next_shadow(ehci, prev_p,
Q_NEXT_TYPE(ehci, *hw_p));
hw_p = here.hw_next;
hw_p = shadow_next_periodic(ehci, &here,
Q_NEXT_TYPE(ehci, *hw_p));
here = *prev_p;
}
/* an interrupt entry (at list end) could have been shared */
@ -83,7 +98,7 @@ static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr)
*/
*prev_p = *periodic_next_shadow(ehci, &here,
Q_NEXT_TYPE(ehci, *hw_p));
*hw_p = *here.hw_next;
*hw_p = *shadow_next_periodic(ehci, &here, Q_NEXT_TYPE(ehci, *hw_p));
}
/* how many of the uframe's 125 usecs are allocated? */
@ -93,18 +108,20 @@ periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe)
__hc32 *hw_p = &ehci->periodic [frame];
union ehci_shadow *q = &ehci->pshadow [frame];
unsigned usecs = 0;
struct ehci_qh_hw *hw;
while (q->ptr) {
switch (hc32_to_cpu(ehci, Q_NEXT_TYPE(ehci, *hw_p))) {
case Q_TYPE_QH:
hw = q->qh->hw;
/* is it in the S-mask? */
if (q->qh->hw_info2 & cpu_to_hc32(ehci, 1 << uframe))
if (hw->hw_info2 & cpu_to_hc32(ehci, 1 << uframe))
usecs += q->qh->usecs;
/* ... or C-mask? */
if (q->qh->hw_info2 & cpu_to_hc32(ehci,
if (hw->hw_info2 & cpu_to_hc32(ehci,
1 << (8 + uframe)))
usecs += q->qh->c_usecs;
hw_p = &q->qh->hw_next;
hw_p = &hw->hw_next;
q = &q->qh->qh_next;
break;
// case Q_TYPE_FSTN:
@ -237,10 +254,10 @@ periodic_tt_usecs (
continue;
case Q_TYPE_QH:
if (same_tt(dev, q->qh->dev)) {
uf = tt_start_uframe(ehci, q->qh->hw_info2);
uf = tt_start_uframe(ehci, q->qh->hw->hw_info2);
tt_usecs[uf] += q->qh->tt_usecs;
}
hw_p = &q->qh->hw_next;
hw_p = &q->qh->hw->hw_next;
q = &q->qh->qh_next;
continue;
case Q_TYPE_SITD:
@ -375,6 +392,7 @@ static int tt_no_collision (
for (; frame < ehci->periodic_size; frame += period) {
union ehci_shadow here;
__hc32 type;
struct ehci_qh_hw *hw;
here = ehci->pshadow [frame];
type = Q_NEXT_TYPE(ehci, ehci->periodic [frame]);
@ -385,17 +403,18 @@ static int tt_no_collision (
here = here.itd->itd_next;
continue;
case Q_TYPE_QH:
hw = here.qh->hw;
if (same_tt (dev, here.qh->dev)) {
u32 mask;
mask = hc32_to_cpu(ehci,
here.qh->hw_info2);
hw->hw_info2);
/* "knows" no gap is needed */
mask |= mask >> 8;
if (mask & uf_mask)
break;
}
type = Q_NEXT_TYPE(ehci, here.qh->hw_next);
type = Q_NEXT_TYPE(ehci, hw->hw_next);
here = here.qh->qh_next;
continue;
case Q_TYPE_SITD:
@ -498,7 +517,8 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
dev_dbg (&qh->dev->dev,
"link qh%d-%04x/%p start %d [%d/%d us]\n",
period, hc32_to_cpup(ehci, &qh->hw_info2) & (QH_CMASK | QH_SMASK),
period, hc32_to_cpup(ehci, &qh->hw->hw_info2)
& (QH_CMASK | QH_SMASK),
qh, qh->start, qh->usecs, qh->c_usecs);
/* high bandwidth, or otherwise every microframe */
@ -517,7 +537,7 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
break;
prev = periodic_next_shadow(ehci, prev, type);
hw_p = &here.qh->hw_next;
hw_p = shadow_next_periodic(ehci, &here, type);
here = *prev;
}
@ -528,14 +548,14 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh)
if (qh->period > here.qh->period)
break;
prev = &here.qh->qh_next;
hw_p = &here.qh->hw_next;
hw_p = &here.qh->hw->hw_next;
here = *prev;
}
/* link in this qh, unless some earlier pass did that */
if (qh != here.qh) {
qh->qh_next = here;
if (here.qh)
qh->hw_next = *hw_p;
qh->hw->hw_next = *hw_p;
wmb ();
prev->qh = qh;
*hw_p = QH_NEXT (ehci, qh->qh_dma);
@ -581,7 +601,7 @@ static int qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
dev_dbg (&qh->dev->dev,
"unlink qh%d-%04x/%p start %d [%d/%d us]\n",
qh->period,
hc32_to_cpup(ehci, &qh->hw_info2) & (QH_CMASK | QH_SMASK),
hc32_to_cpup(ehci, &qh->hw->hw_info2) & (QH_CMASK | QH_SMASK),
qh, qh->start, qh->usecs, qh->c_usecs);
/* qh->qh_next still "live" to HC */
@ -595,7 +615,19 @@ static int qh_unlink_periodic(struct ehci_hcd *ehci, struct ehci_qh *qh)
static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
{
unsigned wait;
unsigned wait;
struct ehci_qh_hw *hw = qh->hw;
int rc;
/* If the QH isn't linked then there's nothing we can do
* unless we were called during a giveback, in which case
* qh_completions() has to deal with it.
*/
if (qh->qh_state != QH_STATE_LINKED) {
if (qh->qh_state == QH_STATE_COMPLETING)
qh->needs_rescan = 1;
return;
}
qh_unlink_periodic (ehci, qh);
@ -606,15 +638,33 @@ static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
*/
if (list_empty (&qh->qtd_list)
|| (cpu_to_hc32(ehci, QH_CMASK)
& qh->hw_info2) != 0)
& hw->hw_info2) != 0)
wait = 2;
else
wait = 55; /* worst case: 3 * 1024 */
udelay (wait);
qh->qh_state = QH_STATE_IDLE;
qh->hw_next = EHCI_LIST_END(ehci);
hw->hw_next = EHCI_LIST_END(ehci);
wmb ();
qh_completions(ehci, qh);
/* reschedule QH iff another request is queued */
if (!list_empty(&qh->qtd_list) &&
HC_IS_RUNNING(ehci_to_hcd(ehci)->state)) {
rc = qh_schedule(ehci, qh);
/* An error here likely indicates handshake failure
* or no space left in the schedule. Neither fault
* should happen often ...
*
* FIXME kill the now-dysfunctional queued urbs
*/
if (rc != 0)
ehci_err(ehci, "can't reschedule qh %p, err %d\n",
qh, rc);
}
}
/*-------------------------------------------------------------------------*/
@ -739,14 +789,15 @@ static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
unsigned uframe;
__hc32 c_mask;
unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
struct ehci_qh_hw *hw = qh->hw;
qh_refresh(ehci, qh);
qh->hw_next = EHCI_LIST_END(ehci);
hw->hw_next = EHCI_LIST_END(ehci);
frame = qh->start;
/* reuse the previous schedule slots, if we can */
if (frame < qh->period) {
uframe = ffs(hc32_to_cpup(ehci, &qh->hw_info2) & QH_SMASK);
uframe = ffs(hc32_to_cpup(ehci, &hw->hw_info2) & QH_SMASK);
status = check_intr_schedule (ehci, frame, --uframe,
qh, &c_mask);
} else {
@ -784,11 +835,11 @@ static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh)
qh->start = frame;
/* reset S-frame and (maybe) C-frame masks */
qh->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK));
qh->hw_info2 |= qh->period
hw->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK));
hw->hw_info2 |= qh->period
? cpu_to_hc32(ehci, 1 << uframe)
: cpu_to_hc32(ehci, QH_SMASK);
qh->hw_info2 |= c_mask;
hw->hw_info2 |= c_mask;
} else
ehci_dbg (ehci, "reused qh %p schedule\n", qh);
@ -2188,10 +2239,11 @@ restart:
case Q_TYPE_QH:
/* handle any completions */
temp.qh = qh_get (q.qh);
type = Q_NEXT_TYPE(ehci, q.qh->hw_next);
type = Q_NEXT_TYPE(ehci, q.qh->hw->hw_next);
q = q.qh->qh_next;
modified = qh_completions (ehci, temp.qh);
if (unlikely (list_empty (&temp.qh->qtd_list)))
if (unlikely(list_empty(&temp.qh->qtd_list) ||
temp.qh->needs_rescan))
intr_deschedule (ehci, temp.qh);
qh_put (temp.qh);
break;

View File

@ -0,0 +1,181 @@
/*
* linux/driver/usb/host/ehci-w90x900.c
*
* Copyright (c) 2008 Nuvoton technology corporation.
*
* Wan ZongShun <mcuos.com@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation;version 2 of the License.
*
*/
#include <linux/platform_device.h>
/*ebable phy0 and phy1 for w90p910*/
#define ENPHY (0x01<<8)
#define PHY0_CTR (0xA4)
#define PHY1_CTR (0xA8)
static int __devinit usb_w90x900_probe(const struct hc_driver *driver,
struct platform_device *pdev)
{
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
struct resource *res;
int retval = 0, irq;
unsigned long val;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
retval = -ENXIO;
goto err1;
}
hcd = usb_create_hcd(driver, &pdev->dev, "w90x900 EHCI");
if (!hcd) {
retval = -ENOMEM;
goto err1;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = res->end - res->start + 1;
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
retval = -EBUSY;
goto err2;
}
hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len);
if (hcd->regs == NULL) {
retval = -EFAULT;
goto err3;
}
ehci = hcd_to_ehci(hcd);
ehci->caps = hcd->regs;
ehci->regs = hcd->regs +
HC_LENGTH(ehci_readl(ehci, &ehci->caps->hc_capbase));
/* enable PHY 0,1,the regs only apply to w90p910
* 0xA4,0xA8 were offsets of PHY0 and PHY1 controller of
* w90p910 IC relative to ehci->regs.
*/
val = __raw_readl(ehci->regs+PHY0_CTR);
val |= ENPHY;
__raw_writel(val, ehci->regs+PHY0_CTR);
val = __raw_readl(ehci->regs+PHY1_CTR);
val |= ENPHY;
__raw_writel(val, ehci->regs+PHY1_CTR);
ehci->hcs_params = ehci_readl(ehci, &ehci->caps->hcs_params);
ehci->sbrn = 0x20;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
goto err4;
retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval != 0)
goto err4;
ehci_writel(ehci, 1, &ehci->regs->configured_flag);
return retval;
err4:
iounmap(hcd->regs);
err3:
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
err2:
usb_put_hcd(hcd);
err1:
return retval;
}
static
void usb_w90x900_remove(struct usb_hcd *hcd, struct platform_device *pdev)
{
usb_remove_hcd(hcd);
iounmap(hcd->regs);
release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
usb_put_hcd(hcd);
}
static const struct hc_driver ehci_w90x900_hc_driver = {
.description = hcd_name,
.product_desc = "Nuvoton w90x900 EHCI Host Controller",
.hcd_priv_size = sizeof(struct ehci_hcd),
/*
* generic hardware linkage
*/
.irq = ehci_irq,
.flags = HCD_USB2|HCD_MEMORY,
/*
* basic lifecycle operations
*/
.reset = ehci_init,
.start = ehci_run,
.stop = ehci_stop,
.shutdown = ehci_shutdown,
/*
* managing i/o requests and associated device resources
*/
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
/*
* scheduling support
*/
.get_frame_number = ehci_get_frame,
/*
* root hub support
*/
.hub_status_data = ehci_hub_status_data,
.hub_control = ehci_hub_control,
#ifdef CONFIG_PM
.bus_suspend = ehci_bus_suspend,
.bus_resume = ehci_bus_resume,
#endif
.relinquish_port = ehci_relinquish_port,
.port_handed_over = ehci_port_handed_over,
};
static int __devinit ehci_w90x900_probe(struct platform_device *pdev)
{
if (usb_disabled())
return -ENODEV;
return usb_w90x900_probe(&ehci_w90x900_hc_driver, pdev);
}
static int __devexit ehci_w90x900_remove(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
usb_w90x900_remove(hcd, pdev);
return 0;
}
static struct platform_driver ehci_hcd_w90x900_driver = {
.probe = ehci_w90x900_probe,
.remove = __devexit_p(ehci_w90x900_remove),
.driver = {
.name = "w90x900-ehci",
.owner = THIS_MODULE,
},
};
MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
MODULE_DESCRIPTION("w90p910 usb ehci driver!");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:w90p910-ehci");

View File

@ -126,6 +126,7 @@ struct ehci_hcd { /* one per controller */
unsigned big_endian_mmio:1;
unsigned big_endian_desc:1;
unsigned has_amcc_usb23:1;
unsigned need_io_watchdog:1;
/* required for usb32 quirk */
#define OHCI_CTRL_HCFS (3 << 6)
@ -135,6 +136,7 @@ struct ehci_hcd { /* one per controller */
#define OHCI_HCCTRL_OFFSET 0x4
#define OHCI_HCCTRL_LEN 0x4
__hc32 *ohci_hcctrl_reg;
unsigned has_hostpc:1;
u8 sbrn; /* packed release number */
@ -298,8 +300,8 @@ union ehci_shadow {
* These appear in both the async and (for interrupt) periodic schedules.
*/
struct ehci_qh {
/* first part defined by EHCI spec */
/* first part defined by EHCI spec */
struct ehci_qh_hw {
__hc32 hw_next; /* see EHCI 3.6.1 */
__hc32 hw_info1; /* see EHCI 3.6.2 */
#define QH_HEAD 0x00008000
@ -317,7 +319,10 @@ struct ehci_qh {
__hc32 hw_token;
__hc32 hw_buf [5];
__hc32 hw_buf_hi [5];
} __attribute__ ((aligned(32)));
struct ehci_qh {
struct ehci_qh_hw *hw;
/* the rest is HCD-private */
dma_addr_t qh_dma; /* address of qh */
union ehci_shadow qh_next; /* ptr to qh; or periodic */
@ -336,6 +341,7 @@ struct ehci_qh {
u32 refcount;
unsigned stamp;
u8 needs_rescan; /* Dequeue during giveback */
u8 qh_state;
#define QH_STATE_LINKED 1 /* HC sees this */
#define QH_STATE_UNLINK 2 /* HC may still see this */
@ -357,7 +363,7 @@ struct ehci_qh {
struct usb_device *dev; /* access to TT */
unsigned clearing_tt:1; /* Clear-TT-Buf in progress */
} __attribute__ ((aligned (32)));
};
/*-------------------------------------------------------------------------*/
@ -544,7 +550,7 @@ static inline unsigned int
ehci_port_speed(struct ehci_hcd *ehci, unsigned int portsc)
{
if (ehci_is_TDI(ehci)) {
switch ((portsc>>26)&3) {
switch ((portsc >> (ehci->has_hostpc ? 25 : 26)) & 3) {
case 0:
return 0;
case 1:

File diff suppressed because it is too large Load Diff

1079
drivers/usb/host/isp1362.h Normal file

File diff suppressed because it is too large Load Diff

View File

@ -386,6 +386,10 @@ static int isp1760_hc_setup(struct usb_hcd *hcd)
hwmode |= HW_DACK_POL_HIGH;
if (priv->devflags & ISP1760_FLAG_DREQ_POL_HIGH)
hwmode |= HW_DREQ_POL_HIGH;
if (priv->devflags & ISP1760_FLAG_INTR_POL_HIGH)
hwmode |= HW_INTR_HIGH_ACT;
if (priv->devflags & ISP1760_FLAG_INTR_EDGE_TRIG)
hwmode |= HW_INTR_EDGE_TRIG;
/*
* We have to set this first in case we're in 16-bit mode.

View File

@ -142,6 +142,8 @@ typedef void (packet_enqueue)(struct usb_hcd *hcd, struct isp1760_qh *qh,
#define ISP1760_FLAG_DACK_POL_HIGH 0x00000010 /* DACK active high */
#define ISP1760_FLAG_DREQ_POL_HIGH 0x00000020 /* DREQ active high */
#define ISP1760_FLAG_ISP1761 0x00000040 /* Chip is ISP1761 */
#define ISP1760_FLAG_INTR_POL_HIGH 0x00000080 /* Interrupt polarity active high */
#define ISP1760_FLAG_INTR_EDGE_TRIG 0x00000100 /* Interrupt edge triggered */
/* chip memory management */
struct memory_chunk {

View File

@ -3,6 +3,7 @@
* Currently there is support for
* - OpenFirmware
* - PCI
* - PDEV (generic platform device centralized driver model)
*
* (c) 2007 Sebastian Siewior <bigeasy@linutronix.de>
*
@ -11,6 +12,7 @@
#include <linux/usb.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/usb/isp1760.h>
#include "../core/hcd.h"
#include "isp1760-hcd.h"
@ -308,6 +310,8 @@ static int __devinit isp1760_plat_probe(struct platform_device *pdev)
struct resource *mem_res;
struct resource *irq_res;
resource_size_t mem_size;
struct isp1760_platform_data *priv = pdev->dev.platform_data;
unsigned int devflags = 0;
unsigned long irqflags = IRQF_SHARED | IRQF_DISABLED;
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@ -330,8 +334,23 @@ static int __devinit isp1760_plat_probe(struct platform_device *pdev)
}
irqflags |= irq_res->flags & IRQF_TRIGGER_MASK;
if (priv) {
if (priv->is_isp1761)
devflags |= ISP1760_FLAG_ISP1761;
if (priv->bus_width_16)
devflags |= ISP1760_FLAG_BUS_WIDTH_16;
if (priv->port1_otg)
devflags |= ISP1760_FLAG_OTG_EN;
if (priv->analog_oc)
devflags |= ISP1760_FLAG_ANALOG_OC;
if (priv->dack_polarity_high)
devflags |= ISP1760_FLAG_DACK_POL_HIGH;
if (priv->dreq_polarity_high)
devflags |= ISP1760_FLAG_DREQ_POL_HIGH;
}
hcd = isp1760_register(mem_res->start, mem_size, irq_res->start,
irqflags, &pdev->dev, dev_name(&pdev->dev), 0);
irqflags, &pdev->dev, dev_name(&pdev->dev), devflags);
if (IS_ERR(hcd)) {
pr_warning("isp1760: Failed to register the HCD device\n");
ret = -ENODEV;

View File

@ -148,7 +148,7 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
at91_start_hc(pdev);
ohci_hcd_init(hcd_to_ohci(hcd));
retval = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_DISABLED);
retval = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_SHARED);
if (retval == 0)
return retval;

View File

@ -248,10 +248,9 @@ static int ohci_hcd_au1xxx_drv_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM
static int ohci_hcd_au1xxx_drv_suspend(struct platform_device *pdev,
pm_message_t message)
static int ohci_hcd_au1xxx_drv_suspend(struct device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
unsigned long flags;
int rc;
@ -274,10 +273,6 @@ static int ohci_hcd_au1xxx_drv_suspend(struct platform_device *pdev,
ohci_writel(ohci, OHCI_INTR_MIE, &ohci->regs->intrdisable);
(void)ohci_readl(ohci, &ohci->regs->intrdisable);
/* make sure snapshot being resumed re-enumerates everything */
if (message.event == PM_EVENT_PRETHAW)
ohci_usb_reset(ohci);
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
au1xxx_stop_ohc();
@ -287,9 +282,9 @@ bail:
return rc;
}
static int ohci_hcd_au1xxx_drv_resume(struct platform_device *pdev)
static int ohci_hcd_au1xxx_drv_resume(struct device *dev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct usb_hcd *hcd = dev_get_drvdata(dev);
au1xxx_start_ohc();
@ -298,20 +293,26 @@ static int ohci_hcd_au1xxx_drv_resume(struct platform_device *pdev)
return 0;
}
static struct dev_pm_ops au1xxx_ohci_pmops = {
.suspend = ohci_hcd_au1xxx_drv_suspend,
.resume = ohci_hcd_au1xxx_drv_resume,
};
#define AU1XXX_OHCI_PMOPS &au1xxx_ohci_pmops
#else
#define ohci_hcd_au1xxx_drv_suspend NULL
#define ohci_hcd_au1xxx_drv_resume NULL
#define AU1XXX_OHCI_PMOPS NULL
#endif
static struct platform_driver ohci_hcd_au1xxx_driver = {
.probe = ohci_hcd_au1xxx_drv_probe,
.remove = ohci_hcd_au1xxx_drv_remove,
.shutdown = usb_hcd_platform_shutdown,
.suspend = ohci_hcd_au1xxx_drv_suspend,
.resume = ohci_hcd_au1xxx_drv_resume,
.driver = {
.name = "au1xxx-ohci",
.owner = THIS_MODULE,
.pm = AU1XXX_OHCI_PMOPS,
},
};

View File

@ -188,7 +188,6 @@ static int ohci_hcd_ep93xx_drv_resume(struct platform_device *pdev)
{
struct usb_hcd *hcd = platform_get_drvdata(pdev);
struct ohci_hcd *ohci = hcd_to_ohci(hcd);
int status;
if (time_before(jiffies, ohci->next_statechange))
msleep(5);

View File

@ -34,7 +34,6 @@
#include <linux/usb/otg.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/reboot.h>
#include <linux/workqueue.h>
#include <linux/debugfs.h>

View File

@ -177,9 +177,13 @@ static inline void pxa27x_setup_hc(struct pxa27x_ohci *ohci,
if (inf->flags & NO_OC_PROTECTION)
uhcrhda |= UHCRHDA_NOCP;
else
uhcrhda &= ~UHCRHDA_NOCP;
if (inf->flags & OC_MODE_PERPORT)
uhcrhda |= UHCRHDA_OCPM;
else
uhcrhda &= ~UHCRHDA_OCPM;
if (inf->power_on_delay) {
uhcrhda &= ~UHCRHDA_POTPGT(0xff);

View File

@ -33,7 +33,6 @@
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/reboot.h>
#include <linux/usb.h>
#include <linux/moduleparam.h>
#include <linux/dma-mapping.h>

View File

@ -475,4 +475,4 @@ static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev)
else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI)
quirk_usb_handoff_xhci(pdev);
}
DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff);
DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff);

View File

@ -719,8 +719,12 @@ retry:
/* port status seems weird until after reset, so
* force the reset and make khubd clean up later.
*/
sl811->port1 |= (1 << USB_PORT_FEAT_C_CONNECTION)
| (1 << USB_PORT_FEAT_CONNECTION);
if (sl811->stat_insrmv & 1)
sl811->port1 |= 1 << USB_PORT_FEAT_CONNECTION;
else
sl811->port1 &= ~(1 << USB_PORT_FEAT_CONNECTION);
sl811->port1 |= 1 << USB_PORT_FEAT_C_CONNECTION;
} else if (irqstat & SL11H_INTMASK_RD) {
if (sl811->port1 & (1 << USB_PORT_FEAT_SUSPEND)) {

View File

@ -1422,7 +1422,6 @@ static int uhci_urb_enqueue(struct usb_hcd *hcd,
goto err_submit_failed;
/* Add this URB to the QH */
urbp->qh = qh;
list_add_tail(&urbp->node, &qh->queue);
/* If the new URB is the first and only one on this QH then either

View File

@ -227,11 +227,21 @@ void scan_async_work(struct work_struct *work)
/*
* Now that the ASL is updated, complete the removal of any
* removed qsets.
*
* If the qset was to be reset, do so and reinsert it into the
* ASL if it has pending transfers.
*/
spin_lock_irq(&whc->lock);
list_for_each_entry_safe(qset, t, &whc->async_removed_list, list_node) {
qset_remove_complete(whc, qset);
if (qset->reset) {
qset_reset(whc, qset);
if (!list_empty(&qset->stds)) {
asl_qset_insert_begin(whc, qset);
queue_work(whc->workqueue, &whc->async_work);
}
}
}
spin_unlock_irq(&whc->lock);
@ -267,7 +277,7 @@ int asl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags)
else
err = qset_add_urb(whc, qset, urb, GFP_ATOMIC);
if (!err) {
if (!qset->in_sw_list)
if (!qset->in_sw_list && !qset->remove)
asl_qset_insert_begin(whc, qset);
} else
usb_hcd_unlink_urb_from_ep(&whc->wusbhc.usb_hcd, urb);

View File

@ -192,19 +192,23 @@ static void whc_endpoint_reset(struct usb_hcd *usb_hcd,
struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd);
struct whc *whc = wusbhc_to_whc(wusbhc);
struct whc_qset *qset;
unsigned long flags;
spin_lock_irqsave(&whc->lock, flags);
qset = ep->hcpriv;
if (qset) {
qset->remove = 1;
qset->reset = 1;
if (usb_endpoint_xfer_bulk(&ep->desc)
|| usb_endpoint_xfer_control(&ep->desc))
queue_work(whc->workqueue, &whc->async_work);
else
queue_work(whc->workqueue, &whc->periodic_work);
qset_reset(whc, qset);
}
spin_unlock_irqrestore(&whc->lock, flags);
}

View File

@ -255,11 +255,21 @@ void scan_periodic_work(struct work_struct *work)
/*
* Now that the PZL is updated, complete the removal of any
* removed qsets.
*
* If the qset was to be reset, do so and reinsert it into the
* PZL if it has pending transfers.
*/
spin_lock_irq(&whc->lock);
list_for_each_entry_safe(qset, t, &whc->periodic_removed_list, list_node) {
qset_remove_complete(whc, qset);
if (qset->reset) {
qset_reset(whc, qset);
if (!list_empty(&qset->stds)) {
qset_insert_in_sw_list(whc, qset);
queue_work(whc->workqueue, &whc->periodic_work);
}
}
}
spin_unlock_irq(&whc->lock);
@ -295,7 +305,7 @@ int pzl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags)
else
err = qset_add_urb(whc, qset, urb, GFP_ATOMIC);
if (!err) {
if (!qset->in_sw_list)
if (!qset->in_sw_list && !qset->remove)
qset_insert_in_sw_list(whc, qset);
} else
usb_hcd_unlink_urb_from_ep(&whc->wusbhc.usb_hcd, urb);

View File

@ -103,7 +103,6 @@ static void qset_fill_qh(struct whc_qset *qset, struct urb *urb)
void qset_clear(struct whc *whc, struct whc_qset *qset)
{
qset->td_start = qset->td_end = qset->ntds = 0;
qset->remove = 0;
qset->qh.link = cpu_to_le32(QH_LINK_NTDS(8) | QH_LINK_T);
qset->qh.status = qset->qh.status & QH_STATUS_SEQ_MASK;
@ -125,7 +124,7 @@ void qset_clear(struct whc *whc, struct whc_qset *qset)
*/
void qset_reset(struct whc *whc, struct whc_qset *qset)
{
wait_for_completion(&qset->remove_complete);
qset->reset = 0;
qset->qh.status &= ~QH_STATUS_SEQ_MASK;
qset->qh.cur_window = cpu_to_le32((1 << qset->max_burst) - 1);
@ -156,6 +155,7 @@ struct whc_qset *get_qset(struct whc *whc, struct urb *urb,
void qset_remove_complete(struct whc *whc, struct whc_qset *qset)
{
qset->remove = 0;
list_del_init(&qset->list_node);
complete(&qset->remove_complete);
}

View File

@ -264,6 +264,7 @@ struct whc_qset {
unsigned in_sw_list:1;
unsigned in_hw_list:1;
unsigned remove:1;
unsigned reset:1;
struct urb *pause_after_urb;
struct completion remove_complete;
int max_burst;

View File

@ -413,7 +413,8 @@ void xhci_dbg_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx)
int i;
struct xhci_slot_ctx *slot_ctx = xhci_get_slot_ctx(xhci, ctx);
dma_addr_t dma = ctx->dma + ((unsigned long)slot_ctx - (unsigned long)ctx);
dma_addr_t dma = ctx->dma +
((unsigned long)slot_ctx - (unsigned long)ctx->bytes);
int csz = HCC_64BYTE_CONTEXT(xhci->hcc_params);
xhci_dbg(xhci, "Slot Context:\n");
@ -459,7 +460,7 @@ void xhci_dbg_ep_ctx(struct xhci_hcd *xhci,
for (i = 0; i < last_ep_ctx; ++i) {
struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, ctx, i);
dma_addr_t dma = ctx->dma +
((unsigned long)ep_ctx - (unsigned long)ctx);
((unsigned long)ep_ctx - (unsigned long)ctx->bytes);
xhci_dbg(xhci, "Endpoint %02d Context:\n", i);
xhci_dbg(xhci, "@%p (virt) @%08llx (dma) %#08x - ep_info\n",

View File

@ -22,12 +22,18 @@
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include "xhci.h"
#define DRIVER_AUTHOR "Sarah Sharp"
#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
/* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
static int link_quirk;
module_param(link_quirk, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
/* TODO: copied from ehci-hcd.c - can this be refactored? */
/*
* handshake - spin reading hc until handshake completes or fails
@ -214,6 +220,12 @@ int xhci_init(struct usb_hcd *hcd)
xhci_dbg(xhci, "xhci_init\n");
spin_lock_init(&xhci->lock);
if (link_quirk) {
xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n");
xhci->quirks |= XHCI_LINK_TRB_QUIRK;
} else {
xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n");
}
retval = xhci_mem_init(xhci, GFP_KERNEL);
xhci_dbg(xhci, "Finished xhci_init\n");
@ -339,13 +351,14 @@ void xhci_event_ring_work(unsigned long arg)
xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
xhci_dbg_cmd_ptrs(xhci);
for (i = 0; i < MAX_HC_SLOTS; ++i) {
if (xhci->devs[i]) {
for (j = 0; j < 31; ++j) {
if (xhci->devs[i]->ep_rings[j]) {
xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j);
xhci_debug_segment(xhci, xhci->devs[i]->ep_rings[j]->deq_seg);
}
}
if (!xhci->devs[i])
continue;
for (j = 0; j < 31; ++j) {
struct xhci_ring *ring = xhci->devs[i]->eps[j].ring;
if (!ring)
continue;
xhci_dbg(xhci, "Dev %d endpoint ring %d:\n", i, j);
xhci_debug_segment(xhci, ring->deq_seg);
}
}
@ -555,13 +568,22 @@ unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
return 1 << (xhci_get_endpoint_index(desc) + 1);
}
/* Find the flag for this endpoint (for use in the control context). Use the
* endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
* bit 1, etc.
*/
unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
{
return 1 << (ep_index + 1);
}
/* Compute the last valid endpoint context index. Basically, this is the
* endpoint index plus one. For slot contexts with more than valid endpoint,
* we find the most significant bit set in the added contexts flags.
* e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
* fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
*/
static inline unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
{
return fls(added_ctxs) - 1;
}
@ -589,6 +611,71 @@ int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
return 1;
}
static int xhci_configure_endpoint(struct xhci_hcd *xhci,
struct usb_device *udev, struct xhci_command *command,
bool ctx_change, bool must_succeed);
/*
* Full speed devices may have a max packet size greater than 8 bytes, but the
* USB core doesn't know that until it reads the first 8 bytes of the
* descriptor. If the usb_device's max packet size changes after that point,
* we need to issue an evaluate context command and wait on it.
*/
static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
unsigned int ep_index, struct urb *urb)
{
struct xhci_container_ctx *in_ctx;
struct xhci_container_ctx *out_ctx;
struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_ep_ctx *ep_ctx;
int max_packet_size;
int hw_max_packet_size;
int ret = 0;
out_ctx = xhci->devs[slot_id]->out_ctx;
ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
hw_max_packet_size = MAX_PACKET_DECODED(ep_ctx->ep_info2);
max_packet_size = urb->dev->ep0.desc.wMaxPacketSize;
if (hw_max_packet_size != max_packet_size) {
xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n");
xhci_dbg(xhci, "Max packet size in usb_device = %d\n",
max_packet_size);
xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n",
hw_max_packet_size);
xhci_dbg(xhci, "Issuing evaluate context command.\n");
/* Set up the modified control endpoint 0 */
xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
xhci->devs[slot_id]->out_ctx, ep_index);
in_ctx = xhci->devs[slot_id]->in_ctx;
ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
ep_ctx->ep_info2 &= ~MAX_PACKET_MASK;
ep_ctx->ep_info2 |= MAX_PACKET(max_packet_size);
/* Set up the input context flags for the command */
/* FIXME: This won't work if a non-default control endpoint
* changes max packet sizes.
*/
ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
ctrl_ctx->add_flags = EP0_FLAG;
ctrl_ctx->drop_flags = 0;
xhci_dbg(xhci, "Slot %d input context\n", slot_id);
xhci_dbg_ctx(xhci, in_ctx, ep_index);
xhci_dbg(xhci, "Slot %d output context\n", slot_id);
xhci_dbg_ctx(xhci, out_ctx, ep_index);
ret = xhci_configure_endpoint(xhci, urb->dev, NULL,
true, false);
/* Clean up the input context for later use by bandwidth
* functions.
*/
ctrl_ctx->add_flags = SLOT_FLAG;
}
return ret;
}
/*
* non-error returns are a promise to giveback() the urb later
* we drop ownership so next owner (or urb unlink) can get it
@ -600,13 +687,13 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
int ret = 0;
unsigned int slot_id, ep_index;
if (!urb || xhci_check_args(hcd, urb->dev, urb->ep, true, __func__) <= 0)
return -EINVAL;
slot_id = urb->dev->slot_id;
ep_index = xhci_get_endpoint_index(&urb->ep->desc);
spin_lock_irqsave(&xhci->lock, flags);
if (!xhci->devs || !xhci->devs[slot_id]) {
if (!in_interrupt())
dev_warn(&urb->dev->dev, "WARN: urb submitted for dev with no Slot ID\n");
@ -619,19 +706,38 @@ int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
ret = -ESHUTDOWN;
goto exit;
}
if (usb_endpoint_xfer_control(&urb->ep->desc))
if (usb_endpoint_xfer_control(&urb->ep->desc)) {
/* Check to see if the max packet size for the default control
* endpoint changed during FS device enumeration
*/
if (urb->dev->speed == USB_SPEED_FULL) {
ret = xhci_check_maxpacket(xhci, slot_id,
ep_index, urb);
if (ret < 0)
return ret;
}
/* We have a spinlock and interrupts disabled, so we must pass
* atomic context to this function, which may allocate memory.
*/
spin_lock_irqsave(&xhci->lock, flags);
ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
slot_id, ep_index);
else if (usb_endpoint_xfer_bulk(&urb->ep->desc))
spin_unlock_irqrestore(&xhci->lock, flags);
} else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
spin_lock_irqsave(&xhci->lock, flags);
ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
slot_id, ep_index);
else
spin_unlock_irqrestore(&xhci->lock, flags);
} else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
spin_lock_irqsave(&xhci->lock, flags);
ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
slot_id, ep_index);
spin_unlock_irqrestore(&xhci->lock, flags);
} else {
ret = -EINVAL;
}
exit:
spin_unlock_irqrestore(&xhci->lock, flags);
return ret;
}
@ -674,6 +780,7 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
struct xhci_td *td;
unsigned int ep_index;
struct xhci_ring *ep_ring;
struct xhci_virt_ep *ep;
xhci = hcd_to_xhci(hcd);
spin_lock_irqsave(&xhci->lock, flags);
@ -686,17 +793,18 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
xhci_dbg(xhci, "Event ring:\n");
xhci_debug_ring(xhci, xhci->event_ring);
ep_index = xhci_get_endpoint_index(&urb->ep->desc);
ep_ring = xhci->devs[urb->dev->slot_id]->ep_rings[ep_index];
ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
ep_ring = ep->ring;
xhci_dbg(xhci, "Endpoint ring:\n");
xhci_debug_ring(xhci, ep_ring);
td = (struct xhci_td *) urb->hcpriv;
ep_ring->cancels_pending++;
list_add_tail(&td->cancelled_td_list, &ep_ring->cancelled_td_list);
ep->cancels_pending++;
list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
/* Queue a stop endpoint command, but only if this is
* the first cancellation to be handled.
*/
if (ep_ring->cancels_pending == 1) {
if (ep->cancels_pending == 1) {
xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index);
xhci_ring_cmd_db(xhci);
}
@ -930,6 +1038,141 @@ static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *vir
}
}
static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
struct usb_device *udev, int *cmd_status)
{
int ret;
switch (*cmd_status) {
case COMP_ENOMEM:
dev_warn(&udev->dev, "Not enough host controller resources "
"for new device state.\n");
ret = -ENOMEM;
/* FIXME: can we allocate more resources for the HC? */
break;
case COMP_BW_ERR:
dev_warn(&udev->dev, "Not enough bandwidth "
"for new device state.\n");
ret = -ENOSPC;
/* FIXME: can we go back to the old state? */
break;
case COMP_TRB_ERR:
/* the HCD set up something wrong */
dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
"add flag = 1, "
"and endpoint is not disabled.\n");
ret = -EINVAL;
break;
case COMP_SUCCESS:
dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
ret = 0;
break;
default:
xhci_err(xhci, "ERROR: unexpected command completion "
"code 0x%x.\n", *cmd_status);
ret = -EINVAL;
break;
}
return ret;
}
static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
struct usb_device *udev, int *cmd_status)
{
int ret;
struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
switch (*cmd_status) {
case COMP_EINVAL:
dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate "
"context command.\n");
ret = -EINVAL;
break;
case COMP_EBADSLT:
dev_warn(&udev->dev, "WARN: slot not enabled for"
"evaluate context command.\n");
case COMP_CTX_STATE:
dev_warn(&udev->dev, "WARN: invalid context state for "
"evaluate context command.\n");
xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
ret = -EINVAL;
break;
case COMP_SUCCESS:
dev_dbg(&udev->dev, "Successful evaluate context command\n");
ret = 0;
break;
default:
xhci_err(xhci, "ERROR: unexpected command completion "
"code 0x%x.\n", *cmd_status);
ret = -EINVAL;
break;
}
return ret;
}
/* Issue a configure endpoint command or evaluate context command
* and wait for it to finish.
*/
static int xhci_configure_endpoint(struct xhci_hcd *xhci,
struct usb_device *udev,
struct xhci_command *command,
bool ctx_change, bool must_succeed)
{
int ret;
int timeleft;
unsigned long flags;
struct xhci_container_ctx *in_ctx;
struct completion *cmd_completion;
int *cmd_status;
struct xhci_virt_device *virt_dev;
spin_lock_irqsave(&xhci->lock, flags);
virt_dev = xhci->devs[udev->slot_id];
if (command) {
in_ctx = command->in_ctx;
cmd_completion = command->completion;
cmd_status = &command->status;
command->command_trb = xhci->cmd_ring->enqueue;
list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
} else {
in_ctx = virt_dev->in_ctx;
cmd_completion = &virt_dev->cmd_completion;
cmd_status = &virt_dev->cmd_status;
}
if (!ctx_change)
ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
udev->slot_id, must_succeed);
else
ret = xhci_queue_evaluate_context(xhci, in_ctx->dma,
udev->slot_id);
if (ret < 0) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
return -ENOMEM;
}
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
/* Wait for the configure endpoint command to complete */
timeleft = wait_for_completion_interruptible_timeout(
cmd_completion,
USB_CTRL_SET_TIMEOUT);
if (timeleft <= 0) {
xhci_warn(xhci, "%s while waiting for %s command\n",
timeleft == 0 ? "Timeout" : "Signal",
ctx_change == 0 ?
"configure endpoint" :
"evaluate context");
/* FIXME cancel the configure endpoint command */
return -ETIME;
}
if (!ctx_change)
return xhci_configure_endpoint_result(xhci, udev, cmd_status);
return xhci_evaluate_context_result(xhci, udev, cmd_status);
}
/* Called after one or more calls to xhci_add_endpoint() or
* xhci_drop_endpoint(). If this call fails, the USB core is expected
* to call xhci_reset_bandwidth().
@ -944,8 +1187,6 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
{
int i;
int ret = 0;
int timeleft;
unsigned long flags;
struct xhci_hcd *xhci;
struct xhci_virt_device *virt_dev;
struct xhci_input_control_ctx *ctrl_ctx;
@ -975,56 +1216,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
xhci_dbg_ctx(xhci, virt_dev->in_ctx,
LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
spin_lock_irqsave(&xhci->lock, flags);
ret = xhci_queue_configure_endpoint(xhci, virt_dev->in_ctx->dma,
udev->slot_id);
if (ret < 0) {
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
return -ENOMEM;
}
xhci_ring_cmd_db(xhci);
spin_unlock_irqrestore(&xhci->lock, flags);
/* Wait for the configure endpoint command to complete */
timeleft = wait_for_completion_interruptible_timeout(
&virt_dev->cmd_completion,
USB_CTRL_SET_TIMEOUT);
if (timeleft <= 0) {
xhci_warn(xhci, "%s while waiting for configure endpoint command\n",
timeleft == 0 ? "Timeout" : "Signal");
/* FIXME cancel the configure endpoint command */
return -ETIME;
}
switch (virt_dev->cmd_status) {
case COMP_ENOMEM:
dev_warn(&udev->dev, "Not enough host controller resources "
"for new device state.\n");
ret = -ENOMEM;
/* FIXME: can we allocate more resources for the HC? */
break;
case COMP_BW_ERR:
dev_warn(&udev->dev, "Not enough bandwidth "
"for new device state.\n");
ret = -ENOSPC;
/* FIXME: can we go back to the old state? */
break;
case COMP_TRB_ERR:
/* the HCD set up something wrong */
dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, add flag = 1, "
"and endpoint is not disabled.\n");
ret = -EINVAL;
break;
case COMP_SUCCESS:
dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
break;
default:
xhci_err(xhci, "ERROR: unexpected command completion "
"code 0x%x.\n", virt_dev->cmd_status);
ret = -EINVAL;
break;
}
ret = xhci_configure_endpoint(xhci, udev, NULL,
false, false);
if (ret) {
/* Callee should call reset_bandwidth() */
return ret;
@ -1037,10 +1230,10 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
xhci_zero_in_ctx(xhci, virt_dev);
/* Free any old rings */
for (i = 1; i < 31; ++i) {
if (virt_dev->new_ep_rings[i]) {
xhci_ring_free(xhci, virt_dev->ep_rings[i]);
virt_dev->ep_rings[i] = virt_dev->new_ep_rings[i];
virt_dev->new_ep_rings[i] = NULL;
if (virt_dev->eps[i].new_ring) {
xhci_ring_free(xhci, virt_dev->eps[i].ring);
virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
virt_dev->eps[i].new_ring = NULL;
}
}
@ -1067,14 +1260,93 @@ void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
virt_dev = xhci->devs[udev->slot_id];
/* Free any rings allocated for added endpoints */
for (i = 0; i < 31; ++i) {
if (virt_dev->new_ep_rings[i]) {
xhci_ring_free(xhci, virt_dev->new_ep_rings[i]);
virt_dev->new_ep_rings[i] = NULL;
if (virt_dev->eps[i].new_ring) {
xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
virt_dev->eps[i].new_ring = NULL;
}
}
xhci_zero_in_ctx(xhci, virt_dev);
}
static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
struct xhci_container_ctx *in_ctx,
struct xhci_container_ctx *out_ctx,
u32 add_flags, u32 drop_flags)
{
struct xhci_input_control_ctx *ctrl_ctx;
ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
ctrl_ctx->add_flags = add_flags;
ctrl_ctx->drop_flags = drop_flags;
xhci_slot_copy(xhci, in_ctx, out_ctx);
ctrl_ctx->add_flags |= SLOT_FLAG;
xhci_dbg(xhci, "Input Context:\n");
xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
}
void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
struct xhci_dequeue_state *deq_state)
{
struct xhci_container_ctx *in_ctx;
struct xhci_ep_ctx *ep_ctx;
u32 added_ctxs;
dma_addr_t addr;
xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
xhci->devs[slot_id]->out_ctx, ep_index);
in_ctx = xhci->devs[slot_id]->in_ctx;
ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
deq_state->new_deq_ptr);
if (addr == 0) {
xhci_warn(xhci, "WARN Cannot submit config ep after "
"reset ep command\n");
xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
deq_state->new_deq_seg,
deq_state->new_deq_ptr);
return;
}
ep_ctx->deq = addr | deq_state->new_cycle_state;
added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs);
}
void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
struct usb_device *udev, unsigned int ep_index)
{
struct xhci_dequeue_state deq_state;
struct xhci_virt_ep *ep;
xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
ep = &xhci->devs[udev->slot_id]->eps[ep_index];
/* We need to move the HW's dequeue pointer past this TD,
* or it will attempt to resend it on the next doorbell ring.
*/
xhci_find_new_dequeue_state(xhci, udev->slot_id,
ep_index, ep->stopped_td,
&deq_state);
/* HW with the reset endpoint quirk will use the saved dequeue state to
* issue a configure endpoint command later.
*/
if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
xhci_dbg(xhci, "Queueing new dequeue state\n");
xhci_queue_new_dequeue_state(xhci, udev->slot_id,
ep_index, &deq_state);
} else {
/* Better hope no one uses the input context between now and the
* reset endpoint completion!
*/
xhci_dbg(xhci, "Setting up input context for "
"configure endpoint command\n");
xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
ep_index, &deq_state);
}
}
/* Deal with stalled endpoints. The core should have sent the control message
* to clear the halt condition. However, we need to make the xHCI hardware
* reset its sequence number, since a device will expect a sequence number of
@ -1089,8 +1361,7 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
unsigned int ep_index;
unsigned long flags;
int ret;
struct xhci_dequeue_state deq_state;
struct xhci_ring *ep_ring;
struct xhci_virt_ep *virt_ep;
xhci = hcd_to_xhci(hcd);
udev = (struct usb_device *) ep->hcpriv;
@ -1100,12 +1371,16 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
if (!ep->hcpriv)
return;
ep_index = xhci_get_endpoint_index(&ep->desc);
ep_ring = xhci->devs[udev->slot_id]->ep_rings[ep_index];
if (!ep_ring->stopped_td) {
virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
if (!virt_ep->stopped_td) {
xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n",
ep->desc.bEndpointAddress);
return;
}
if (usb_endpoint_xfer_control(&ep->desc)) {
xhci_dbg(xhci, "Control endpoint stall already handled.\n");
return;
}
xhci_dbg(xhci, "Queueing reset endpoint command\n");
spin_lock_irqsave(&xhci->lock, flags);
@ -1116,17 +1391,8 @@ void xhci_endpoint_reset(struct usb_hcd *hcd,
* command. Better hope that last command worked!
*/
if (!ret) {
xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
/* We need to move the HW's dequeue pointer past this TD,
* or it will attempt to resend it on the next doorbell ring.
*/
xhci_find_new_dequeue_state(xhci, udev->slot_id,
ep_index, ep_ring->stopped_td, &deq_state);
xhci_dbg(xhci, "Queueing new dequeue state\n");
xhci_queue_new_dequeue_state(xhci, ep_ring,
udev->slot_id,
ep_index, &deq_state);
kfree(ep_ring->stopped_td);
xhci_cleanup_stalled_ring(xhci, udev, ep_index);
kfree(virt_ep->stopped_td);
xhci_ring_cmd_db(xhci);
}
spin_unlock_irqrestore(&xhci->lock, flags);
@ -1328,6 +1594,88 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
return 0;
}
/* Once a hub descriptor is fetched for a device, we need to update the xHC's
* internal data structures for the device.
*/
int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
struct usb_tt *tt, gfp_t mem_flags)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct xhci_virt_device *vdev;
struct xhci_command *config_cmd;
struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_slot_ctx *slot_ctx;
unsigned long flags;
unsigned think_time;
int ret;
/* Ignore root hubs */
if (!hdev->parent)
return 0;
vdev = xhci->devs[hdev->slot_id];
if (!vdev) {
xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
return -EINVAL;
}
config_cmd = xhci_alloc_command(xhci, true, mem_flags);
if (!config_cmd) {
xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
return -ENOMEM;
}
spin_lock_irqsave(&xhci->lock, flags);
xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
ctrl_ctx->add_flags |= SLOT_FLAG;
slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
slot_ctx->dev_info |= DEV_HUB;
if (tt->multi)
slot_ctx->dev_info |= DEV_MTT;
if (xhci->hci_version > 0x95) {
xhci_dbg(xhci, "xHCI version %x needs hub "
"TT think time and number of ports\n",
(unsigned int) xhci->hci_version);
slot_ctx->dev_info2 |= XHCI_MAX_PORTS(hdev->maxchild);
/* Set TT think time - convert from ns to FS bit times.
* 0 = 8 FS bit times, 1 = 16 FS bit times,
* 2 = 24 FS bit times, 3 = 32 FS bit times.
*/
think_time = tt->think_time;
if (think_time != 0)
think_time = (think_time / 666) - 1;
slot_ctx->tt_info |= TT_THINK_TIME(think_time);
} else {
xhci_dbg(xhci, "xHCI version %x doesn't need hub "
"TT think time or number of ports\n",
(unsigned int) xhci->hci_version);
}
slot_ctx->dev_state = 0;
spin_unlock_irqrestore(&xhci->lock, flags);
xhci_dbg(xhci, "Set up %s for hub device.\n",
(xhci->hci_version > 0x95) ?
"configure endpoint" : "evaluate context");
xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id);
xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0);
/* Issue and wait for the configure endpoint or
* evaluate context command.
*/
if (xhci->hci_version > 0x95)
ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
false, false);
else
ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
true, false);
xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id);
xhci_dbg_ctx(xhci, vdev->out_ctx, 0);
xhci_free_command(xhci, config_cmd);
return ret;
}
int xhci_get_frame(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);

View File

@ -94,6 +94,9 @@ static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev,
val = prev->trbs[TRBS_PER_SEGMENT-1].link.control;
val &= ~TRB_TYPE_BITMASK;
val |= TRB_TYPE(TRB_LINK);
/* Always set the chain bit with 0.95 hardware */
if (xhci_link_trb_quirk(xhci))
val |= TRB_CHAIN;
prev->trbs[TRBS_PER_SEGMENT-1].link.control = val;
}
xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n",
@ -141,7 +144,6 @@ static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci,
return 0;
INIT_LIST_HEAD(&ring->td_list);
INIT_LIST_HEAD(&ring->cancelled_td_list);
if (num_segs == 0)
return ring;
@ -262,8 +264,8 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
return;
for (i = 0; i < 31; ++i)
if (dev->ep_rings[i])
xhci_ring_free(xhci, dev->ep_rings[i]);
if (dev->eps[i].ring)
xhci_ring_free(xhci, dev->eps[i].ring);
if (dev->in_ctx)
xhci_free_container_ctx(xhci, dev->in_ctx);
@ -278,6 +280,7 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
struct usb_device *udev, gfp_t flags)
{
struct xhci_virt_device *dev;
int i;
/* Slot ID 0 is reserved */
if (slot_id == 0 || xhci->devs[slot_id]) {
@ -306,12 +309,17 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id,
(unsigned long long)dev->in_ctx->dma);
/* Initialize the cancellation list for each endpoint */
for (i = 0; i < 31; i++)
INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
/* Allocate endpoint 0 ring */
dev->ep_rings[0] = xhci_ring_alloc(xhci, 1, true, flags);
if (!dev->ep_rings[0])
dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags);
if (!dev->eps[0].ring)
goto fail;
init_completion(&dev->cmd_completion);
INIT_LIST_HEAD(&dev->cmd_list);
/* Point to output device context in dcbaa. */
xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma;
@ -352,9 +360,9 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
/* 3) Only the control endpoint is valid - one endpoint context */
slot_ctx->dev_info |= LAST_CTX(1);
slot_ctx->dev_info |= (u32) udev->route;
switch (udev->speed) {
case USB_SPEED_SUPER:
slot_ctx->dev_info |= (u32) udev->route;
slot_ctx->dev_info |= (u32) SLOT_SPEED_SS;
break;
case USB_SPEED_HIGH:
@ -382,14 +390,12 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum);
/* Is this a LS/FS device under a HS hub? */
/*
* FIXME: I don't think this is right, where does the TT info for the
* roothub or parent hub come from?
*/
if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) &&
udev->tt) {
slot_ctx->tt_info = udev->tt->hub->slot_id;
slot_ctx->tt_info |= udev->ttport << 8;
if (udev->tt->multi)
slot_ctx->dev_info |= DEV_MTT;
}
xhci_dbg(xhci, "udev->tt = %p\n", udev->tt);
xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport);
@ -398,22 +404,35 @@ int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *ud
/* Step 5 */
ep0_ctx->ep_info2 = EP_TYPE(CTRL_EP);
/*
* See section 4.3 bullet 6:
* The default Max Packet size for ep0 is "8 bytes for a USB2
* LS/FS/HS device or 512 bytes for a USB3 SS device"
* XXX: Not sure about wireless USB devices.
*/
if (udev->speed == USB_SPEED_SUPER)
switch (udev->speed) {
case USB_SPEED_SUPER:
ep0_ctx->ep_info2 |= MAX_PACKET(512);
else
break;
case USB_SPEED_HIGH:
/* USB core guesses at a 64-byte max packet first for FS devices */
case USB_SPEED_FULL:
ep0_ctx->ep_info2 |= MAX_PACKET(64);
break;
case USB_SPEED_LOW:
ep0_ctx->ep_info2 |= MAX_PACKET(8);
break;
case USB_SPEED_VARIABLE:
xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n");
return -EINVAL;
break;
default:
/* New speed? */
BUG();
}
/* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */
ep0_ctx->ep_info2 |= MAX_BURST(0);
ep0_ctx->ep_info2 |= ERROR_COUNT(3);
ep0_ctx->deq =
dev->ep_rings[0]->first_seg->dma;
ep0_ctx->deq |= dev->ep_rings[0]->cycle_state;
dev->eps[0].ring->first_seg->dma;
ep0_ctx->deq |= dev->eps[0].ring->cycle_state;
/* Steps 7 and 8 were done in xhci_alloc_virt_device() */
@ -523,10 +542,11 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
/* Set up the endpoint ring */
virt_dev->new_ep_rings[ep_index] = xhci_ring_alloc(xhci, 1, true, mem_flags);
if (!virt_dev->new_ep_rings[ep_index])
virt_dev->eps[ep_index].new_ring =
xhci_ring_alloc(xhci, 1, true, mem_flags);
if (!virt_dev->eps[ep_index].new_ring)
return -ENOMEM;
ep_ring = virt_dev->new_ep_rings[ep_index];
ep_ring = virt_dev->eps[ep_index].new_ring;
ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state;
ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep);
@ -598,6 +618,48 @@ void xhci_endpoint_zero(struct xhci_hcd *xhci,
*/
}
/* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy.
* Useful when you want to change one particular aspect of the endpoint and then
* issue a configure endpoint command.
*/
void xhci_endpoint_copy(struct xhci_hcd *xhci,
struct xhci_container_ctx *in_ctx,
struct xhci_container_ctx *out_ctx,
unsigned int ep_index)
{
struct xhci_ep_ctx *out_ep_ctx;
struct xhci_ep_ctx *in_ep_ctx;
out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
in_ep_ctx->ep_info = out_ep_ctx->ep_info;
in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2;
in_ep_ctx->deq = out_ep_ctx->deq;
in_ep_ctx->tx_info = out_ep_ctx->tx_info;
}
/* Copy output xhci_slot_ctx to the input xhci_slot_ctx.
* Useful when you want to change one particular aspect of the endpoint and then
* issue a configure endpoint command. Only the context entries field matters,
* but we'll copy the whole thing anyway.
*/
void xhci_slot_copy(struct xhci_hcd *xhci,
struct xhci_container_ctx *in_ctx,
struct xhci_container_ctx *out_ctx)
{
struct xhci_slot_ctx *in_slot_ctx;
struct xhci_slot_ctx *out_slot_ctx;
in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx);
in_slot_ctx->dev_info = out_slot_ctx->dev_info;
in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2;
in_slot_ctx->tt_info = out_slot_ctx->tt_info;
in_slot_ctx->dev_state = out_slot_ctx->dev_state;
}
/* Set up the scratchpad buffer array and scratchpad buffers, if needed. */
static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags)
{
@ -695,6 +757,44 @@ static void scratchpad_free(struct xhci_hcd *xhci)
xhci->scratchpad = NULL;
}
struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
bool allocate_completion, gfp_t mem_flags)
{
struct xhci_command *command;
command = kzalloc(sizeof(*command), mem_flags);
if (!command)
return NULL;
command->in_ctx =
xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, mem_flags);
if (!command->in_ctx)
return NULL;
if (allocate_completion) {
command->completion =
kzalloc(sizeof(struct completion), mem_flags);
if (!command->completion) {
xhci_free_container_ctx(xhci, command->in_ctx);
return NULL;
}
init_completion(command->completion);
}
command->status = 0;
INIT_LIST_HEAD(&command->cmd_list);
return command;
}
void xhci_free_command(struct xhci_hcd *xhci,
struct xhci_command *command)
{
xhci_free_container_ctx(xhci,
command->in_ctx);
kfree(command->completion);
kfree(command);
}
void xhci_mem_cleanup(struct xhci_hcd *xhci)
{
struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);

View File

@ -24,6 +24,10 @@
#include "xhci.h"
/* Device for a quirk */
#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
static const char hcd_name[] = "xhci_hcd";
/* called after powerup, by probe or system-pm "wakeup" */
@ -59,9 +63,20 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
xhci->hcs_params1 = xhci_readl(xhci, &xhci->cap_regs->hcs_params1);
xhci->hcs_params2 = xhci_readl(xhci, &xhci->cap_regs->hcs_params2);
xhci->hcs_params3 = xhci_readl(xhci, &xhci->cap_regs->hcs_params3);
xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
xhci->hci_version = HC_VERSION(xhci->hcc_params);
xhci->hcc_params = xhci_readl(xhci, &xhci->cap_regs->hcc_params);
xhci_print_registers(xhci);
/* Look for vendor-specific quirks */
if (pdev->vendor == PCI_VENDOR_ID_FRESCO_LOGIC &&
pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK &&
pdev->revision == 0x0) {
xhci->quirks |= XHCI_RESET_EP_QUIRK;
xhci_dbg(xhci, "QUIRK: Fresco Logic xHC needs configure"
" endpoint cmd after reset endpoint\n");
}
/* Make sure the HC is halted. */
retval = xhci_halt(xhci);
if (retval)
@ -121,6 +136,7 @@ static const struct hc_driver xhci_pci_hc_driver = {
.check_bandwidth = xhci_check_bandwidth,
.reset_bandwidth = xhci_reset_bandwidth,
.address_device = xhci_address_device,
.update_hub_device = xhci_update_hub_device,
/*
* scheduling support

View File

@ -172,8 +172,9 @@ static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
* have their chain bit cleared (so that each Link TRB is a separate TD).
*
* Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
* set, but other sections talk about dealing with the chain bit set.
* Assume section 6.4.4.1 is wrong, and the chain bit can be set in a Link TRB.
* set, but other sections talk about dealing with the chain bit set. This was
* fixed in the 0.96 specification errata, but we have to assume that all 0.95
* xHCI hardware can't handle the chain bit being cleared on a link TRB.
*/
static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
{
@ -191,8 +192,14 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer
while (last_trb(xhci, ring, ring->enq_seg, next)) {
if (!consumer) {
if (ring != xhci->event_ring) {
next->link.control &= ~TRB_CHAIN;
next->link.control |= chain;
/* If we're not dealing with 0.95 hardware,
* carry over the chain bit of the previous TRB
* (which may mean the chain bit is cleared).
*/
if (!xhci_link_trb_quirk(xhci)) {
next->link.control &= ~TRB_CHAIN;
next->link.control |= chain;
}
/* Give this link TRB to the hardware */
wmb();
if (next->link.control & TRB_CYCLE)
@ -289,16 +296,18 @@ static void ring_ep_doorbell(struct xhci_hcd *xhci,
unsigned int slot_id,
unsigned int ep_index)
{
struct xhci_ring *ep_ring;
struct xhci_virt_ep *ep;
unsigned int ep_state;
u32 field;
__u32 __iomem *db_addr = &xhci->dba->doorbell[slot_id];
ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
ep = &xhci->devs[slot_id]->eps[ep_index];
ep_state = ep->ep_state;
/* Don't ring the doorbell for this endpoint if there are pending
* cancellations because the we don't want to interrupt processing.
*/
if (!ep_ring->cancels_pending && !(ep_ring->state & SET_DEQ_PENDING)
&& !(ep_ring->state & EP_HALTED)) {
if (!ep->cancels_pending && !(ep_state & SET_DEQ_PENDING)
&& !(ep_state & EP_HALTED)) {
field = xhci_readl(xhci, db_addr) & DB_MASK;
xhci_writel(xhci, field | EPI_TO_DB(ep_index), db_addr);
/* Flush PCI posted writes - FIXME Matthew Wilcox says this
@ -354,7 +363,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
struct xhci_td *cur_td, struct xhci_dequeue_state *state)
{
struct xhci_virt_device *dev = xhci->devs[slot_id];
struct xhci_ring *ep_ring = dev->ep_rings[ep_index];
struct xhci_ring *ep_ring = dev->eps[ep_index].ring;
struct xhci_generic_trb *trb;
struct xhci_ep_ctx *ep_ctx;
dma_addr_t addr;
@ -362,7 +371,7 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
state->new_cycle_state = 0;
xhci_dbg(xhci, "Finding segment containing stopped TRB.\n");
state->new_deq_seg = find_trb_seg(cur_td->start_seg,
ep_ring->stopped_trb,
dev->eps[ep_index].stopped_trb,
&state->new_cycle_state);
if (!state->new_deq_seg)
BUG();
@ -442,9 +451,11 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
union xhci_trb *deq_ptr, u32 cycle_state);
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
struct xhci_ring *ep_ring, unsigned int slot_id,
unsigned int ep_index, struct xhci_dequeue_state *deq_state)
unsigned int slot_id, unsigned int ep_index,
struct xhci_dequeue_state *deq_state)
{
struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
xhci_dbg(xhci, "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), "
"new deq ptr = %p (0x%llx dma), new cycle = %u\n",
deq_state->new_deq_seg,
@ -461,8 +472,7 @@ void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
* if the ring is running, and ringing the doorbell starts the
* ring running.
*/
ep_ring->state |= SET_DEQ_PENDING;
xhci_ring_cmd_db(xhci);
ep->ep_state |= SET_DEQ_PENDING;
}
/*
@ -481,6 +491,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
unsigned int slot_id;
unsigned int ep_index;
struct xhci_ring *ep_ring;
struct xhci_virt_ep *ep;
struct list_head *entry;
struct xhci_td *cur_td = 0;
struct xhci_td *last_unlinked_td;
@ -493,9 +504,10 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
memset(&deq_state, 0, sizeof(deq_state));
slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
ep = &xhci->devs[slot_id]->eps[ep_index];
ep_ring = ep->ring;
if (list_empty(&ep_ring->cancelled_td_list))
if (list_empty(&ep->cancelled_td_list))
return;
/* Fix up the ep ring first, so HW stops executing cancelled TDs.
@ -503,7 +515,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
* it. We're also in the event handler, so we can't get re-interrupted
* if another Stop Endpoint command completes
*/
list_for_each(entry, &ep_ring->cancelled_td_list) {
list_for_each(entry, &ep->cancelled_td_list) {
cur_td = list_entry(entry, struct xhci_td, cancelled_td_list);
xhci_dbg(xhci, "Cancelling TD starting at %p, 0x%llx (dma).\n",
cur_td->first_trb,
@ -512,7 +524,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
* If we stopped on the TD we need to cancel, then we have to
* move the xHC endpoint ring dequeue pointer past this TD.
*/
if (cur_td == ep_ring->stopped_td)
if (cur_td == ep->stopped_td)
xhci_find_new_dequeue_state(xhci, slot_id, ep_index, cur_td,
&deq_state);
else
@ -523,14 +535,15 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
* the cancelled TD list for URB completion later.
*/
list_del(&cur_td->td_list);
ep_ring->cancels_pending--;
ep->cancels_pending--;
}
last_unlinked_td = cur_td;
/* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
xhci_queue_new_dequeue_state(xhci, ep_ring,
xhci_queue_new_dequeue_state(xhci,
slot_id, ep_index, &deq_state);
xhci_ring_cmd_db(xhci);
} else {
/* Otherwise just ring the doorbell to restart the ring */
ring_ep_doorbell(xhci, slot_id, ep_index);
@ -543,7 +556,7 @@ static void handle_stopped_endpoint(struct xhci_hcd *xhci,
* So stop when we've completed the URB for the last TD we unlinked.
*/
do {
cur_td = list_entry(ep_ring->cancelled_td_list.next,
cur_td = list_entry(ep->cancelled_td_list.next,
struct xhci_td, cancelled_td_list);
list_del(&cur_td->cancelled_td_list);
@ -590,7 +603,7 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
dev = xhci->devs[slot_id];
ep_ring = dev->ep_rings[ep_index];
ep_ring = dev->eps[ep_index].ring;
ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
@ -634,7 +647,7 @@ static void handle_set_deq_completion(struct xhci_hcd *xhci,
ep_ctx->deq);
}
ep_ring->state &= ~SET_DEQ_PENDING;
dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
ring_ep_doorbell(xhci, slot_id, ep_index);
}
@ -644,18 +657,60 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci,
{
int slot_id;
unsigned int ep_index;
struct xhci_ring *ep_ring;
slot_id = TRB_TO_SLOT_ID(trb->generic.field[3]);
ep_index = TRB_TO_EP_INDEX(trb->generic.field[3]);
ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
/* This command will only fail if the endpoint wasn't halted,
* but we don't care.
*/
xhci_dbg(xhci, "Ignoring reset ep completion code of %u\n",
(unsigned int) GET_COMP_CODE(event->status));
/* Clear our internal halted state and restart the ring */
xhci->devs[slot_id]->ep_rings[ep_index]->state &= ~EP_HALTED;
ring_ep_doorbell(xhci, slot_id, ep_index);
/* HW with the reset endpoint quirk needs to have a configure endpoint
* command complete before the endpoint can be used. Queue that here
* because the HW can't handle two commands being queued in a row.
*/
if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
xhci_dbg(xhci, "Queueing configure endpoint command\n");
xhci_queue_configure_endpoint(xhci,
xhci->devs[slot_id]->in_ctx->dma, slot_id,
false);
xhci_ring_cmd_db(xhci);
} else {
/* Clear our internal halted state and restart the ring */
xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
ring_ep_doorbell(xhci, slot_id, ep_index);
}
}
/* Check to see if a command in the device's command queue matches this one.
* Signal the completion or free the command, and return 1. Return 0 if the
* completed command isn't at the head of the command list.
*/
static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
struct xhci_event_cmd *event)
{
struct xhci_command *command;
if (list_empty(&virt_dev->cmd_list))
return 0;
command = list_entry(virt_dev->cmd_list.next,
struct xhci_command, cmd_list);
if (xhci->cmd_ring->dequeue != command->command_trb)
return 0;
command->status =
GET_COMP_CODE(event->status);
list_del(&command->cmd_list);
if (command->completion)
complete(command->completion);
else
xhci_free_command(xhci, command);
return 1;
}
static void handle_cmd_completion(struct xhci_hcd *xhci,
@ -664,6 +719,11 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
int slot_id = TRB_TO_SLOT_ID(event->flags);
u64 cmd_dma;
dma_addr_t cmd_dequeue_dma;
struct xhci_input_control_ctx *ctrl_ctx;
struct xhci_virt_device *virt_dev;
unsigned int ep_index;
struct xhci_ring *ep_ring;
unsigned int ep_state;
cmd_dma = event->cmd_trb;
cmd_dequeue_dma = xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
@ -691,6 +751,47 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
xhci_free_virt_device(xhci, slot_id);
break;
case TRB_TYPE(TRB_CONFIG_EP):
virt_dev = xhci->devs[slot_id];
if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
break;
/*
* Configure endpoint commands can come from the USB core
* configuration or alt setting changes, or because the HW
* needed an extra configure endpoint command after a reset
* endpoint command. In the latter case, the xHCI driver is
* not waiting on the configure endpoint command.
*/
ctrl_ctx = xhci_get_input_control_ctx(xhci,
virt_dev->in_ctx);
/* Input ctx add_flags are the endpoint index plus one */
ep_index = xhci_last_valid_endpoint(ctrl_ctx->add_flags) - 1;
ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
if (!ep_ring) {
/* This must have been an initial configure endpoint */
xhci->devs[slot_id]->cmd_status =
GET_COMP_CODE(event->status);
complete(&xhci->devs[slot_id]->cmd_completion);
break;
}
ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
xhci_dbg(xhci, "Completed config ep cmd - last ep index = %d, "
"state = %d\n", ep_index, ep_state);
if (xhci->quirks & XHCI_RESET_EP_QUIRK &&
ep_state & EP_HALTED) {
/* Clear our internal halted state and restart ring */
xhci->devs[slot_id]->eps[ep_index].ep_state &=
~EP_HALTED;
ring_ep_doorbell(xhci, slot_id, ep_index);
} else {
xhci->devs[slot_id]->cmd_status =
GET_COMP_CODE(event->status);
complete(&xhci->devs[slot_id]->cmd_completion);
}
break;
case TRB_TYPE(TRB_EVAL_CONTEXT):
virt_dev = xhci->devs[slot_id];
if (handle_cmd_in_cmd_wait_list(xhci, virt_dev, event))
break;
xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
complete(&xhci->devs[slot_id]->cmd_completion);
break;
@ -805,7 +906,9 @@ static int handle_tx_event(struct xhci_hcd *xhci,
struct xhci_transfer_event *event)
{
struct xhci_virt_device *xdev;
struct xhci_virt_ep *ep;
struct xhci_ring *ep_ring;
unsigned int slot_id;
int ep_index;
struct xhci_td *td = 0;
dma_addr_t event_dma;
@ -814,9 +917,11 @@ static int handle_tx_event(struct xhci_hcd *xhci,
struct urb *urb = 0;
int status = -EINPROGRESS;
struct xhci_ep_ctx *ep_ctx;
u32 trb_comp_code;
xhci_dbg(xhci, "In %s\n", __func__);
xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)];
slot_id = TRB_TO_SLOT_ID(event->flags);
xdev = xhci->devs[slot_id];
if (!xdev) {
xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
return -ENODEV;
@ -825,7 +930,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
/* Endpoint ID is 1 based, our index is zero based */
ep_index = TRB_TO_EP_ID(event->flags) - 1;
xhci_dbg(xhci, "%s - ep index = %d\n", __func__, ep_index);
ep_ring = xdev->ep_rings[ep_index];
ep = &xdev->eps[ep_index];
ep_ring = ep->ring;
ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
if (!ep_ring || (ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n");
@ -870,7 +976,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
(unsigned int) event->flags);
/* Look for common error cases */
switch (GET_COMP_CODE(event->transfer_len)) {
trb_comp_code = GET_COMP_CODE(event->transfer_len);
switch (trb_comp_code) {
/* Skip codes that require special handling depending on
* transfer type
*/
@ -885,7 +992,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
break;
case COMP_STALL:
xhci_warn(xhci, "WARN: Stalled endpoint\n");
ep_ring->state |= EP_HALTED;
ep->ep_state |= EP_HALTED;
status = -EPIPE;
break;
case COMP_TRB_ERR:
@ -913,7 +1020,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
/* Was this a control transfer? */
if (usb_endpoint_xfer_control(&td->urb->ep->desc)) {
xhci_debug_trb(xhci, xhci->event_ring->dequeue);
switch (GET_COMP_CODE(event->transfer_len)) {
switch (trb_comp_code) {
case COMP_SUCCESS:
if (event_trb == ep_ring->dequeue) {
xhci_warn(xhci, "WARN: Success on ctrl setup TRB without IOC set??\n");
@ -928,8 +1035,37 @@ static int handle_tx_event(struct xhci_hcd *xhci,
break;
case COMP_SHORT_TX:
xhci_warn(xhci, "WARN: short transfer on control ep\n");
status = -EREMOTEIO;
if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
status = -EREMOTEIO;
else
status = 0;
break;
case COMP_BABBLE:
/* The 0.96 spec says a babbling control endpoint
* is not halted. The 0.96 spec says it is. Some HW
* claims to be 0.95 compliant, but it halts the control
* endpoint anyway. Check if a babble halted the
* endpoint.
*/
if (ep_ctx->ep_info != EP_STATE_HALTED)
break;
/* else fall through */
case COMP_STALL:
/* Did we transfer part of the data (middle) phase? */
if (event_trb != ep_ring->dequeue &&
event_trb != td->last_trb)
td->urb->actual_length =
td->urb->transfer_buffer_length
- TRB_LEN(event->transfer_len);
else
td->urb->actual_length = 0;
ep->stopped_td = td;
ep->stopped_trb = event_trb;
xhci_queue_reset_ep(xhci, slot_id, ep_index);
xhci_cleanup_stalled_ring(xhci, td->urb->dev, ep_index);
xhci_ring_cmd_db(xhci);
goto td_cleanup;
default:
/* Others already handled above */
break;
@ -943,7 +1079,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
if (event_trb == td->last_trb) {
if (td->urb->actual_length != 0) {
/* Don't overwrite a previously set error code */
if (status == -EINPROGRESS || status == 0)
if ((status == -EINPROGRESS ||
status == 0) &&
(td->urb->transfer_flags
& URB_SHORT_NOT_OK))
/* Did we already see a short data stage? */
status = -EREMOTEIO;
} else {
@ -952,7 +1091,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
}
} else {
/* Maybe the event was for the data stage? */
if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL) {
if (trb_comp_code != COMP_STOP_INVAL) {
/* We didn't stop on a link TRB in the middle */
td->urb->actual_length =
td->urb->transfer_buffer_length -
@ -964,7 +1103,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
}
}
} else {
switch (GET_COMP_CODE(event->transfer_len)) {
switch (trb_comp_code) {
case COMP_SUCCESS:
/* Double check that the HW transferred everything. */
if (event_trb != td->last_trb) {
@ -975,7 +1114,12 @@ static int handle_tx_event(struct xhci_hcd *xhci,
else
status = 0;
} else {
xhci_dbg(xhci, "Successful bulk transfer!\n");
if (usb_endpoint_xfer_bulk(&td->urb->ep->desc))
xhci_dbg(xhci, "Successful bulk "
"transfer!\n");
else
xhci_dbg(xhci, "Successful interrupt "
"transfer!\n");
status = 0;
}
break;
@ -1001,11 +1145,17 @@ static int handle_tx_event(struct xhci_hcd *xhci,
td->urb->actual_length =
td->urb->transfer_buffer_length -
TRB_LEN(event->transfer_len);
if (td->urb->actual_length < 0) {
if (td->urb->transfer_buffer_length <
td->urb->actual_length) {
xhci_warn(xhci, "HC gave bad length "
"of %d bytes left\n",
TRB_LEN(event->transfer_len));
td->urb->actual_length = 0;
if (td->urb->transfer_flags &
URB_SHORT_NOT_OK)
status = -EREMOTEIO;
else
status = 0;
}
/* Don't overwrite a previously set error code */
if (status == -EINPROGRESS) {
@ -1041,30 +1191,31 @@ static int handle_tx_event(struct xhci_hcd *xhci,
/* If the ring didn't stop on a Link or No-op TRB, add
* in the actual bytes transferred from the Normal TRB
*/
if (GET_COMP_CODE(event->transfer_len) != COMP_STOP_INVAL)
if (trb_comp_code != COMP_STOP_INVAL)
td->urb->actual_length +=
TRB_LEN(cur_trb->generic.field[2]) -
TRB_LEN(event->transfer_len);
}
}
if (GET_COMP_CODE(event->transfer_len) == COMP_STOP_INVAL ||
GET_COMP_CODE(event->transfer_len) == COMP_STOP) {
if (trb_comp_code == COMP_STOP_INVAL ||
trb_comp_code == COMP_STOP) {
/* The Endpoint Stop Command completion will take care of any
* stopped TDs. A stopped TD may be restarted, so don't update
* the ring dequeue pointer or take this TD off any lists yet.
*/
ep_ring->stopped_td = td;
ep_ring->stopped_trb = event_trb;
ep->stopped_td = td;
ep->stopped_trb = event_trb;
} else {
if (GET_COMP_CODE(event->transfer_len) == COMP_STALL) {
if (trb_comp_code == COMP_STALL ||
trb_comp_code == COMP_BABBLE) {
/* The transfer is completed from the driver's
* perspective, but we need to issue a set dequeue
* command for this stalled endpoint to move the dequeue
* pointer past the TD. We can't do that here because
* the halt condition must be cleared first.
*/
ep_ring->stopped_td = td;
ep_ring->stopped_trb = event_trb;
ep->stopped_td = td;
ep->stopped_trb = event_trb;
} else {
/* Update ring dequeue pointer */
while (ep_ring->dequeue != td->last_trb)
@ -1072,16 +1223,41 @@ static int handle_tx_event(struct xhci_hcd *xhci,
inc_deq(xhci, ep_ring, false);
}
td_cleanup:
/* Clean up the endpoint's TD list */
urb = td->urb;
/* Do one last check of the actual transfer length.
* If the host controller said we transferred more data than
* the buffer length, urb->actual_length will be a very big
* number (since it's unsigned). Play it safe and say we didn't
* transfer anything.
*/
if (urb->actual_length > urb->transfer_buffer_length) {
xhci_warn(xhci, "URB transfer length is wrong, "
"xHC issue? req. len = %u, "
"act. len = %u\n",
urb->transfer_buffer_length,
urb->actual_length);
urb->actual_length = 0;
if (td->urb->transfer_flags & URB_SHORT_NOT_OK)
status = -EREMOTEIO;
else
status = 0;
}
list_del(&td->td_list);
/* Was this TD slated to be cancelled but completed anyway? */
if (!list_empty(&td->cancelled_td_list)) {
list_del(&td->cancelled_td_list);
ep_ring->cancels_pending--;
ep->cancels_pending--;
}
/* Leave the TD around for the reset endpoint function to use */
if (GET_COMP_CODE(event->transfer_len) != COMP_STALL) {
/* Leave the TD around for the reset endpoint function to use
* (but only if it's not a control endpoint, since we already
* queued the Set TR dequeue pointer command for stalled
* control endpoints).
*/
if (usb_endpoint_xfer_control(&urb->ep->desc) ||
(trb_comp_code != COMP_STALL &&
trb_comp_code != COMP_BABBLE)) {
kfree(td);
}
urb->hcpriv = NULL;
@ -1094,7 +1270,7 @@ cleanup:
if (urb) {
usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
xhci_dbg(xhci, "Giveback URB %p, len = %d, status = %d\n",
urb, td->urb->actual_length, status);
urb, urb->actual_length, status);
spin_unlock(&xhci->lock);
usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status);
spin_lock(&xhci->lock);
@ -1235,7 +1411,7 @@ static int prepare_transfer(struct xhci_hcd *xhci,
{
int ret;
struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
ret = prepare_ring(xhci, xdev->ep_rings[ep_index],
ret = prepare_ring(xhci, xdev->eps[ep_index].ring,
ep_ctx->ep_info & EP_STATE_MASK,
num_trbs, mem_flags);
if (ret)
@ -1255,9 +1431,9 @@ static int prepare_transfer(struct xhci_hcd *xhci,
(*td)->urb = urb;
urb->hcpriv = (void *) (*td);
/* Add this TD to the tail of the endpoint ring's TD list */
list_add_tail(&(*td)->td_list, &xdev->ep_rings[ep_index]->td_list);
(*td)->start_seg = xdev->ep_rings[ep_index]->enq_seg;
(*td)->first_trb = xdev->ep_rings[ep_index]->enqueue;
list_add_tail(&(*td)->td_list, &xdev->eps[ep_index].ring->td_list);
(*td)->start_seg = xdev->eps[ep_index].ring->enq_seg;
(*td)->first_trb = xdev->eps[ep_index].ring->enqueue;
return 0;
}
@ -1335,6 +1511,47 @@ static void giveback_first_trb(struct xhci_hcd *xhci, int slot_id,
ring_ep_doorbell(xhci, slot_id, ep_index);
}
/*
* xHCI uses normal TRBs for both bulk and interrupt. When the interrupt
* endpoint is to be serviced, the xHC will consume (at most) one TD. A TD
* (comprised of sg list entries) can take several service intervals to
* transmit.
*/
int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
{
struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci,
xhci->devs[slot_id]->out_ctx, ep_index);
int xhci_interval;
int ep_interval;
xhci_interval = EP_INTERVAL_TO_UFRAMES(ep_ctx->ep_info);
ep_interval = urb->interval;
/* Convert to microframes */
if (urb->dev->speed == USB_SPEED_LOW ||
urb->dev->speed == USB_SPEED_FULL)
ep_interval *= 8;
/* FIXME change this to a warning and a suggestion to use the new API
* to set the polling interval (once the API is added).
*/
if (xhci_interval != ep_interval) {
if (!printk_ratelimit())
dev_dbg(&urb->dev->dev, "Driver uses different interval"
" (%d microframe%s) than xHCI "
"(%d microframe%s)\n",
ep_interval,
ep_interval == 1 ? "" : "s",
xhci_interval,
xhci_interval == 1 ? "" : "s");
urb->interval = xhci_interval;
/* Convert back to frames for LS/FS devices */
if (urb->dev->speed == USB_SPEED_LOW ||
urb->dev->speed == USB_SPEED_FULL)
urb->interval /= 8;
}
return xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb, slot_id, ep_index);
}
static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index)
{
@ -1350,7 +1567,7 @@ static int queue_bulk_sg_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
struct xhci_generic_trb *start_trb;
int start_cycle;
ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
num_trbs = count_sg_trbs_needed(xhci, urb);
num_sgs = urb->num_sgs;
@ -1483,7 +1700,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
if (urb->sg)
return queue_bulk_sg_tx(xhci, mem_flags, urb, slot_id, ep_index);
ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
num_trbs = 0;
/* How much data is (potentially) left before the 64KB boundary? */
@ -1594,7 +1811,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
u32 field, length_field;
struct xhci_td *td;
ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
ep_ring = xhci->devs[slot_id]->eps[ep_index].ring;
/*
* Need to copy setup packet into setup TRB, so we can't use the setup
@ -1677,12 +1894,27 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/**** Command Ring Operations ****/
/* Generic function for queueing a command TRB on the command ring */
static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, u32 field3, u32 field4)
/* Generic function for queueing a command TRB on the command ring.
* Check to make sure there's room on the command ring for one command TRB.
* Also check that there's room reserved for commands that must not fail.
* If this is a command that must not fail, meaning command_must_succeed = TRUE,
* then only check for the number of reserved spots.
* Don't decrement xhci->cmd_ring_reserved_trbs after we've queued the TRB
* because the command event handler may want to resubmit a failed command.
*/
static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2,
u32 field3, u32 field4, bool command_must_succeed)
{
if (!room_on_ring(xhci, xhci->cmd_ring, 1)) {
int reserved_trbs = xhci->cmd_ring_reserved_trbs;
if (!command_must_succeed)
reserved_trbs++;
if (!room_on_ring(xhci, xhci->cmd_ring, reserved_trbs)) {
if (!in_interrupt())
xhci_err(xhci, "ERR: No room for command on command ring\n");
if (command_must_succeed)
xhci_err(xhci, "ERR: Reserved TRB counting for "
"unfailable commands failed.\n");
return -ENOMEM;
}
queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
@ -1693,7 +1925,7 @@ static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, u32 fiel
/* Queue a no-op command on the command ring */
static int queue_cmd_noop(struct xhci_hcd *xhci)
{
return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP));
return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP), false);
}
/*
@ -1712,7 +1944,7 @@ void *xhci_setup_one_noop(struct xhci_hcd *xhci)
int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
{
return queue_command(xhci, 0, 0, 0,
TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id));
TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id), false);
}
/* Queue an address device command TRB */
@ -1721,16 +1953,28 @@ int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
{
return queue_command(xhci, lower_32_bits(in_ctx_ptr),
upper_32_bits(in_ctx_ptr), 0,
TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id));
TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id),
false);
}
/* Queue a configure endpoint command TRB */
int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
u32 slot_id, bool command_must_succeed)
{
return queue_command(xhci, lower_32_bits(in_ctx_ptr),
upper_32_bits(in_ctx_ptr), 0,
TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id),
command_must_succeed);
}
/* Queue an evaluate context command TRB */
int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
u32 slot_id)
{
return queue_command(xhci, lower_32_bits(in_ctx_ptr),
upper_32_bits(in_ctx_ptr), 0,
TRB_TYPE(TRB_CONFIG_EP) | SLOT_ID_FOR_TRB(slot_id));
TRB_TYPE(TRB_EVAL_CONTEXT) | SLOT_ID_FOR_TRB(slot_id),
false);
}
int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
@ -1741,7 +1985,7 @@ int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
u32 type = TRB_TYPE(TRB_STOP_RING);
return queue_command(xhci, 0, 0, 0,
trb_slot_id | trb_ep_index | type);
trb_slot_id | trb_ep_index | type, false);
}
/* Set Transfer Ring Dequeue Pointer command.
@ -1765,7 +2009,7 @@ static int queue_set_tr_deq(struct xhci_hcd *xhci, int slot_id,
}
return queue_command(xhci, lower_32_bits(addr) | cycle_state,
upper_32_bits(addr), 0,
trb_slot_id | trb_ep_index | type);
trb_slot_id | trb_ep_index | type, false);
}
int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
@ -1775,5 +2019,6 @@ int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
u32 type = TRB_TYPE(TRB_RESET_EP);
return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type);
return queue_command(xhci, 0, 0, 0, trb_slot_id | trb_ep_index | type,
false);
}

View File

@ -509,6 +509,8 @@ struct xhci_slot_ctx {
#define MAX_EXIT (0xffff)
/* Root hub port number that is needed to access the USB device */
#define ROOT_HUB_PORT(p) (((p) & 0xff) << 16)
/* Maximum number of ports under a hub device */
#define XHCI_MAX_PORTS(p) (((p) & 0xff) << 24)
/* tt_info bitmasks */
/*
@ -522,6 +524,7 @@ struct xhci_slot_ctx {
* '0' if the device is not low or full speed.
*/
#define TT_PORT (0xff << 8)
#define TT_THINK_TIME(p) (((p) & 0x3) << 16)
/* dev_state bitmasks */
/* USB device address - assigned by the HC */
@ -581,6 +584,7 @@ struct xhci_ep_ctx {
/* bit 15 is Linear Stream Array */
/* Interval - period between requests to an endpoint - 125u increments. */
#define EP_INTERVAL(p) ((p & 0xff) << 16)
#define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff))
/* ep_info2 bitmasks */
/*
@ -589,6 +593,7 @@ struct xhci_ep_ctx {
*/
#define FORCE_EVENT (0x1)
#define ERROR_COUNT(p) (((p) & 0x3) << 1)
#define CTX_TO_EP_TYPE(p) (((p) >> 3) & 0x7)
#define EP_TYPE(p) ((p) << 3)
#define ISOC_OUT_EP 1
#define BULK_OUT_EP 2
@ -601,6 +606,8 @@ struct xhci_ep_ctx {
/* bit 7 is Host Initiate Disable - for disabling stream selection */
#define MAX_BURST(p) (((p)&0xff) << 8)
#define MAX_PACKET(p) (((p)&0xffff) << 16)
#define MAX_PACKET_MASK (0xffff << 16)
#define MAX_PACKET_DECODED(p) (((p) >> 16) & 0xffff)
/**
@ -616,11 +623,44 @@ struct xhci_input_control_ctx {
u32 rsvd2[6];
};
/* Represents everything that is needed to issue a command on the command ring.
* It's useful to pre-allocate these for commands that cannot fail due to
* out-of-memory errors, like freeing streams.
*/
struct xhci_command {
/* Input context for changing device state */
struct xhci_container_ctx *in_ctx;
u32 status;
/* If completion is null, no one is waiting on this command
* and the structure can be freed after the command completes.
*/
struct completion *completion;
union xhci_trb *command_trb;
struct list_head cmd_list;
};
/* drop context bitmasks */
#define DROP_EP(x) (0x1 << x)
/* add context bitmasks */
#define ADD_EP(x) (0x1 << x)
struct xhci_virt_ep {
struct xhci_ring *ring;
/* Temporary storage in case the configure endpoint command fails and we
* have to restore the device state to the previous state
*/
struct xhci_ring *new_ring;
unsigned int ep_state;
#define SET_DEQ_PENDING (1 << 0)
#define EP_HALTED (1 << 1)
/* ---- Related to URB cancellation ---- */
struct list_head cancelled_td_list;
unsigned int cancels_pending;
/* The TRB that was last reported in a stopped endpoint ring */
union xhci_trb *stopped_trb;
struct xhci_td *stopped_td;
};
struct xhci_virt_device {
/*
* Commands to the hardware are passed an "input context" that
@ -633,16 +673,11 @@ struct xhci_virt_device {
struct xhci_container_ctx *out_ctx;
/* Used for addressing devices and configuration changes */
struct xhci_container_ctx *in_ctx;
/* FIXME when stream support is added */
struct xhci_ring *ep_rings[31];
/* Temporary storage in case the configure endpoint command fails and we
* have to restore the device state to the previous state
*/
struct xhci_ring *new_ep_rings[31];
struct xhci_virt_ep eps[31];
struct completion cmd_completion;
/* Status of the last command issued for this device */
u32 cmd_status;
struct list_head cmd_list;
};
@ -905,6 +940,8 @@ union xhci_trb {
* It must also be greater than 16.
*/
#define TRBS_PER_SEGMENT 64
/* Allow two commands + a link TRB, along with any reserved command TRBs */
#define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3)
#define SEGMENT_SIZE (TRBS_PER_SEGMENT*16)
/* TRB buffer pointers can't cross 64KB boundaries */
#define TRB_MAX_BUFF_SHIFT 16
@ -926,6 +963,12 @@ struct xhci_td {
union xhci_trb *last_trb;
};
struct xhci_dequeue_state {
struct xhci_segment *new_deq_seg;
union xhci_trb *new_deq_ptr;
int new_cycle_state;
};
struct xhci_ring {
struct xhci_segment *first_seg;
union xhci_trb *enqueue;
@ -935,15 +978,6 @@ struct xhci_ring {
struct xhci_segment *deq_seg;
unsigned int deq_updates;
struct list_head td_list;
/* ---- Related to URB cancellation ---- */
struct list_head cancelled_td_list;
unsigned int cancels_pending;
unsigned int state;
#define SET_DEQ_PENDING (1 << 0)
#define EP_HALTED (1 << 1)
/* The TRB that was last reported in a stopped endpoint ring */
union xhci_trb *stopped_trb;
struct xhci_td *stopped_td;
/*
* Write the cycle state into the TRB cycle field to give ownership of
* the TRB to the host controller (if we are the producer), or to check
@ -952,12 +986,6 @@ struct xhci_ring {
u32 cycle_state;
};
struct xhci_dequeue_state {
struct xhci_segment *new_deq_seg;
union xhci_trb *new_deq_ptr;
int new_cycle_state;
};
struct xhci_erst_entry {
/* 64-bit event ring segment address */
u64 seg_addr;
@ -1034,6 +1062,7 @@ struct xhci_hcd {
/* data structures */
struct xhci_device_context_array *dcbaa;
struct xhci_ring *cmd_ring;
unsigned int cmd_ring_reserved_trbs;
struct xhci_ring *event_ring;
struct xhci_erst erst;
/* Scratchpad */
@ -1058,6 +1087,9 @@ struct xhci_hcd {
int noops_submitted;
int noops_handled;
int error_bitmask;
unsigned int quirks;
#define XHCI_LINK_TRB_QUIRK (1 << 0)
#define XHCI_RESET_EP_QUIRK (1 << 1)
};
/* For testing purposes */
@ -1136,6 +1168,13 @@ static inline void xhci_write_64(struct xhci_hcd *xhci,
writel(val_hi, ptr + 1);
}
static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci)
{
u32 temp = xhci_readl(xhci, &xhci->cap_regs->hc_capbase);
return ((HC_VERSION(temp) == 0x95) &&
(xhci->quirks & XHCI_LINK_TRB_QUIRK));
}
/* xHCI debugging */
void xhci_print_ir_set(struct xhci_hcd *xhci, struct xhci_intr_reg *ir_set, int set_num);
void xhci_print_registers(struct xhci_hcd *xhci);
@ -1158,11 +1197,24 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device
int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev);
unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc);
unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc);
unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index);
unsigned int xhci_last_valid_endpoint(u32 added_ctxs);
void xhci_endpoint_zero(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_host_endpoint *ep);
void xhci_endpoint_copy(struct xhci_hcd *xhci,
struct xhci_container_ctx *in_ctx,
struct xhci_container_ctx *out_ctx,
unsigned int ep_index);
void xhci_slot_copy(struct xhci_hcd *xhci,
struct xhci_container_ctx *in_ctx,
struct xhci_container_ctx *out_ctx);
int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev,
struct usb_device *udev, struct usb_host_endpoint *ep,
gfp_t mem_flags);
void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
bool allocate_completion, gfp_t mem_flags);
void xhci_free_command(struct xhci_hcd *xhci,
struct xhci_command *command);
#ifdef CONFIG_PCI
/* xHCI PCI glue */
@ -1182,6 +1234,8 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd);
int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev);
void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev);
int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev);
int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
struct usb_tt *tt, gfp_t mem_flags);
int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags);
int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
@ -1205,7 +1259,11 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
int slot_id, unsigned int ep_index);
int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
int slot_id, unsigned int ep_index);
int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
int slot_id, unsigned int ep_index);
int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
u32 slot_id, bool command_must_succeed);
int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
u32 slot_id);
int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
unsigned int ep_index);
@ -1213,8 +1271,13 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
struct xhci_td *cur_td, struct xhci_dequeue_state *state);
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
struct xhci_ring *ep_ring, unsigned int slot_id,
unsigned int ep_index, struct xhci_dequeue_state *deq_state);
unsigned int slot_id, unsigned int ep_index,
struct xhci_dequeue_state *deq_state);
void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
struct usb_device *udev, unsigned int ep_index);
void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
struct xhci_dequeue_state *deq_state);
/* xHCI roothub code */
int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,

View File

@ -653,33 +653,6 @@ static struct scsi_host_template mts_scsi_host_template = {
.max_sectors= 256, /* 128 K */
};
struct vendor_product
{
char* name;
enum
{
mts_sup_unknown=0,
mts_sup_alpha,
mts_sup_full
}
support_status;
} ;
/* These are taken from the msmUSB.inf file on the Windows driver CD */
static const struct vendor_product mts_supported_products[] =
{
{ "Phantom 336CX", mts_sup_unknown},
{ "Phantom 336CX", mts_sup_unknown},
{ "Scanmaker X6", mts_sup_alpha},
{ "Phantom C6", mts_sup_unknown},
{ "Phantom 336CX", mts_sup_unknown},
{ "ScanMaker V6USL", mts_sup_unknown},
{ "ScanMaker V6USL", mts_sup_unknown},
{ "Scanmaker V6UL", mts_sup_unknown},
{ "Scanmaker V6UPL", mts_sup_alpha},
};
/* The entries of microtek_table must correspond, line-by-line to
the entries of mts_supported_products[]. */
@ -711,7 +684,6 @@ static int mts_usb_probe(struct usb_interface *intf,
int err_retval = -ENOMEM;
struct mts_desc * new_desc;
struct vendor_product const* p;
struct usb_device *dev = interface_to_usbdev (intf);
/* the current altsetting on the interface we're probing */
@ -726,15 +698,6 @@ static int mts_usb_probe(struct usb_interface *intf,
MTS_DEBUG_GOT_HERE();
p = &mts_supported_products[id - mts_usb_ids];
MTS_DEBUG_GOT_HERE();
MTS_DEBUG( "found model %s\n", p->name );
if ( p->support_status != mts_sup_full )
MTS_MESSAGE( "model %s is not known to be fully supported, reports welcome!\n",
p->name );
/* the current altsetting on the interface we're probing */
altsetting = intf->cur_altsetting;

View File

@ -96,6 +96,8 @@ static int idmouse_probe(struct usb_interface *interface,
const struct usb_device_id *id);
static void idmouse_disconnect(struct usb_interface *interface);
static int idmouse_suspend(struct usb_interface *intf, pm_message_t message);
static int idmouse_resume(struct usb_interface *intf);
/* file operation pointers */
static const struct file_operations idmouse_fops = {
@ -117,7 +119,11 @@ static struct usb_driver idmouse_driver = {
.name = DRIVER_SHORT,
.probe = idmouse_probe,
.disconnect = idmouse_disconnect,
.suspend = idmouse_suspend,
.resume = idmouse_resume,
.reset_resume = idmouse_resume,
.id_table = idmouse_table,
.supports_autosuspend = 1,
};
static int idmouse_create_image(struct usb_idmouse *dev)
@ -197,6 +203,17 @@ reset:
return result;
}
/* PM operations are nops as this driver does IO only during open() */
static int idmouse_suspend(struct usb_interface *intf, pm_message_t message)
{
return 0;
}
static int idmouse_resume(struct usb_interface *intf)
{
return 0;
}
static inline void idmouse_delete(struct usb_idmouse *dev)
{
kfree(dev->bulk_in_buffer);
@ -235,9 +252,13 @@ static int idmouse_open(struct inode *inode, struct file *file)
} else {
/* create a new image and check for success */
result = usb_autopm_get_interface(interface);
if (result)
goto error;
result = idmouse_create_image (dev);
if (result)
goto error;
usb_autopm_put_interface(interface);
/* increment our usage count for the driver */
++dev->open;

View File

@ -412,6 +412,9 @@ static unsigned int ld_usb_poll(struct file *file, poll_table *wait)
dev = file->private_data;
if (!dev->intf)
return POLLERR | POLLHUP;
poll_wait(file, &dev->read_wait, wait);
poll_wait(file, &dev->write_wait, wait);
@ -767,6 +770,9 @@ static void ld_usb_disconnect(struct usb_interface *intf)
ld_usb_delete(dev);
} else {
dev->intf = NULL;
/* wake up pollers */
wake_up_interruptible_all(&dev->read_wait);
wake_up_interruptible_all(&dev->write_wait);
mutex_unlock(&dev->mutex);
}

View File

@ -552,6 +552,9 @@ static unsigned int tower_poll (struct file *file, poll_table *wait)
dev = file->private_data;
if (!dev->udev)
return POLLERR | POLLHUP;
poll_wait(file, &dev->read_wait, wait);
poll_wait(file, &dev->write_wait, wait);
@ -1025,6 +1028,9 @@ static void tower_disconnect (struct usb_interface *interface)
tower_delete (dev);
} else {
dev->udev = NULL;
/* wake up pollers */
wake_up_interruptible_all(&dev->read_wait);
wake_up_interruptible_all(&dev->write_wait);
mutex_unlock(&dev->lock);
}

View File

@ -79,14 +79,12 @@ sisusb_free_buffers(struct sisusb_usb_data *sisusb)
for (i = 0; i < NUMOBUFS; i++) {
if (sisusb->obuf[i]) {
usb_buffer_free(sisusb->sisusb_dev, sisusb->obufsize,
sisusb->obuf[i], sisusb->transfer_dma_out[i]);
kfree(sisusb->obuf[i]);
sisusb->obuf[i] = NULL;
}
}
if (sisusb->ibuf) {
usb_buffer_free(sisusb->sisusb_dev, sisusb->ibufsize,
sisusb->ibuf, sisusb->transfer_dma_in);
kfree(sisusb->ibuf);
sisusb->ibuf = NULL;
}
}
@ -230,8 +228,7 @@ sisusb_bulk_completeout(struct urb *urb)
static int
sisusb_bulkout_msg(struct sisusb_usb_data *sisusb, int index, unsigned int pipe, void *data,
int len, int *actual_length, int timeout, unsigned int tflags,
dma_addr_t transfer_dma)
int len, int *actual_length, int timeout, unsigned int tflags)
{
struct urb *urb = sisusb->sisurbout[index];
int retval, byteswritten = 0;
@ -245,9 +242,6 @@ sisusb_bulkout_msg(struct sisusb_usb_data *sisusb, int index, unsigned int pipe,
urb->transfer_flags |= tflags;
urb->actual_length = 0;
if ((urb->transfer_dma = transfer_dma))
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
/* Set up context */
sisusb->urbout_context[index].actual_length = (timeout) ?
NULL : actual_length;
@ -297,8 +291,8 @@ sisusb_bulk_completein(struct urb *urb)
}
static int
sisusb_bulkin_msg(struct sisusb_usb_data *sisusb, unsigned int pipe, void *data, int len,
int *actual_length, int timeout, unsigned int tflags, dma_addr_t transfer_dma)
sisusb_bulkin_msg(struct sisusb_usb_data *sisusb, unsigned int pipe, void *data,
int len, int *actual_length, int timeout, unsigned int tflags)
{
struct urb *urb = sisusb->sisurbin;
int retval, readbytes = 0;
@ -311,9 +305,6 @@ sisusb_bulkin_msg(struct sisusb_usb_data *sisusb, unsigned int pipe, void *data,
urb->transfer_flags |= tflags;
urb->actual_length = 0;
if ((urb->transfer_dma = transfer_dma))
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
sisusb->completein = 0;
retval = usb_submit_urb(urb, GFP_ATOMIC);
if (retval == 0) {
@ -422,8 +413,7 @@ static int sisusb_send_bulk_msg(struct sisusb_usb_data *sisusb, int ep, int len,
thispass,
&transferred_len,
async ? 0 : 5 * HZ,
tflags,
sisusb->transfer_dma_out[index]);
tflags);
if (result == -ETIMEDOUT) {
@ -432,29 +422,16 @@ static int sisusb_send_bulk_msg(struct sisusb_usb_data *sisusb, int ep, int len,
return -ETIME;
continue;
}
} else if ((result == 0) && !async && transferred_len) {
if ((result == 0) && !async && transferred_len) {
thispass -= transferred_len;
if (thispass) {
if (sisusb->transfer_dma_out) {
/* If DMA, copy remaining
* to beginning of buffer
*/
memcpy(buffer,
buffer + transferred_len,
thispass);
} else {
/* If not DMA, simply increase
* the pointer
*/
buffer += transferred_len;
}
}
buffer += transferred_len;
} else
break;
};
}
if (result)
return result;
@ -530,8 +507,7 @@ static int sisusb_recv_bulk_msg(struct sisusb_usb_data *sisusb, int ep, int len,
thispass,
&transferred_len,
5 * HZ,
tflags,
sisusb->transfer_dma_in);
tflags);
if (transferred_len)
thispass = transferred_len;
@ -3132,8 +3108,7 @@ static int sisusb_probe(struct usb_interface *intf,
/* Allocate buffers */
sisusb->ibufsize = SISUSB_IBUF_SIZE;
if (!(sisusb->ibuf = usb_buffer_alloc(dev, SISUSB_IBUF_SIZE,
GFP_KERNEL, &sisusb->transfer_dma_in))) {
if (!(sisusb->ibuf = kmalloc(SISUSB_IBUF_SIZE, GFP_KERNEL))) {
dev_err(&sisusb->sisusb_dev->dev, "Failed to allocate memory for input buffer");
retval = -ENOMEM;
goto error_2;
@ -3142,9 +3117,7 @@ static int sisusb_probe(struct usb_interface *intf,
sisusb->numobufs = 0;
sisusb->obufsize = SISUSB_OBUF_SIZE;
for (i = 0; i < NUMOBUFS; i++) {
if (!(sisusb->obuf[i] = usb_buffer_alloc(dev, SISUSB_OBUF_SIZE,
GFP_KERNEL,
&sisusb->transfer_dma_out[i]))) {
if (!(sisusb->obuf[i] = kmalloc(SISUSB_OBUF_SIZE, GFP_KERNEL))) {
if (i == 0) {
dev_err(&sisusb->sisusb_dev->dev, "Failed to allocate memory for output buffer\n");
retval = -ENOMEM;

View File

@ -123,8 +123,6 @@ struct sisusb_usb_data {
int numobufs; /* number of obufs = number of out urbs */
char *obuf[NUMOBUFS], *ibuf; /* transfer buffers */
int obufsize, ibufsize;
dma_addr_t transfer_dma_out[NUMOBUFS];
dma_addr_t transfer_dma_in;
struct urb *sisurbout[NUMOBUFS];
struct urb *sisurbin;
unsigned char urbstatus[NUMOBUFS];

View File

@ -38,6 +38,7 @@ static char *display_textmodes[] = {"raw", "hex", "ascii", NULL};
struct usb_sevsegdev {
struct usb_device *udev;
struct usb_interface *intf;
u8 powered;
u8 mode_msb;
@ -46,6 +47,8 @@ struct usb_sevsegdev {
u8 textmode;
u8 text[MAXLEN];
u16 textlength;
u8 shadow_power; /* for PM */
};
/* sysfs_streq can't replace this completely
@ -65,6 +68,12 @@ static void update_display_powered(struct usb_sevsegdev *mydev)
{
int rc;
if (!mydev->shadow_power && mydev->powered) {
rc = usb_autopm_get_interface(mydev->intf);
if (rc < 0)
return;
}
rc = usb_control_msg(mydev->udev,
usb_sndctrlpipe(mydev->udev, 0),
0x12,
@ -76,12 +85,18 @@ static void update_display_powered(struct usb_sevsegdev *mydev)
2000);
if (rc < 0)
dev_dbg(&mydev->udev->dev, "power retval = %d\n", rc);
if (mydev->shadow_power && !mydev->powered)
usb_autopm_put_interface(mydev->intf);
}
static void update_display_mode(struct usb_sevsegdev *mydev)
{
int rc;
if(mydev->shadow_power != 1)
return;
rc = usb_control_msg(mydev->udev,
usb_sndctrlpipe(mydev->udev, 0),
0x12,
@ -96,14 +111,17 @@ static void update_display_mode(struct usb_sevsegdev *mydev)
dev_dbg(&mydev->udev->dev, "mode retval = %d\n", rc);
}
static void update_display_visual(struct usb_sevsegdev *mydev)
static void update_display_visual(struct usb_sevsegdev *mydev, gfp_t mf)
{
int rc;
int i;
unsigned char *buffer;
u8 decimals = 0;
buffer = kzalloc(MAXLEN, GFP_KERNEL);
if(mydev->shadow_power != 1)
return;
buffer = kzalloc(MAXLEN, mf);
if (!buffer) {
dev_err(&mydev->udev->dev, "out of memory\n");
return;
@ -163,7 +181,7 @@ static ssize_t set_attr_##name(struct device *dev, \
struct usb_sevsegdev *mydev = usb_get_intfdata(intf); \
\
mydev->name = simple_strtoul(buf, NULL, 10); \
update_fcn(mydev); \
update_fcn(mydev); \
\
return count; \
} \
@ -194,7 +212,7 @@ static ssize_t set_attr_text(struct device *dev,
if (end > 0)
memcpy(mydev->text, buf, end);
update_display_visual(mydev);
update_display_visual(mydev, GFP_KERNEL);
return count;
}
@ -242,7 +260,7 @@ static ssize_t set_attr_decimals(struct device *dev,
if (buf[i] == '1')
mydev->decimals[end-1-i] = 1;
update_display_visual(mydev);
update_display_visual(mydev, GFP_KERNEL);
return count;
}
@ -286,7 +304,7 @@ static ssize_t set_attr_textmode(struct device *dev,
for (i = 0; display_textmodes[i]; i++) {
if (sysfs_streq(display_textmodes[i], buf)) {
mydev->textmode = i;
update_display_visual(mydev);
update_display_visual(mydev, GFP_KERNEL);
return count;
}
}
@ -330,6 +348,7 @@ static int sevseg_probe(struct usb_interface *interface,
}
mydev->udev = usb_get_dev(udev);
mydev->intf = interface;
usb_set_intfdata(interface, mydev);
/*set defaults */
@ -364,11 +383,49 @@ static void sevseg_disconnect(struct usb_interface *interface)
dev_info(&interface->dev, "USB 7 Segment now disconnected\n");
}
static int sevseg_suspend(struct usb_interface *intf, pm_message_t message)
{
struct usb_sevsegdev *mydev;
mydev = usb_get_intfdata(intf);
mydev->shadow_power = 0;
return 0;
}
static int sevseg_resume(struct usb_interface *intf)
{
struct usb_sevsegdev *mydev;
mydev = usb_get_intfdata(intf);
mydev->shadow_power = 1;
update_display_mode(mydev);
update_display_visual(mydev, GFP_NOIO);
return 0;
}
static int sevseg_reset_resume(struct usb_interface *intf)
{
struct usb_sevsegdev *mydev;
mydev = usb_get_intfdata(intf);
mydev->shadow_power = 1;
update_display_mode(mydev);
update_display_visual(mydev, GFP_NOIO);
return 0;
}
static struct usb_driver sevseg_driver = {
.name = "usbsevseg",
.probe = sevseg_probe,
.disconnect = sevseg_disconnect,
.suspend = sevseg_suspend,
.resume = sevseg_resume,
.reset_resume = sevseg_reset_resume,
.id_table = id_table,
.supports_autosuspend = 1,
};
static int __init usb_sevseg_init(void)

View File

@ -5,11 +5,9 @@
config USB_MON
tristate "USB Monitor"
depends on USB
default y if USB=y
default m if USB=m
help
If you select this option, a component which captures the USB traffic
between peripheral-specific drivers and HC drivers will be built.
For more information, see <file:Documentation/usb/usbmon.txt>.
If unsure, say Y (if allowed), otherwise M.
If unsure, say Y, if allowed, otherwise M.

View File

@ -2,6 +2,6 @@
# Makefile for USB monitor
#
usbmon-objs := mon_main.o mon_stat.o mon_text.o mon_bin.o mon_dma.o
usbmon-objs := mon_main.o mon_stat.o mon_text.o mon_bin.o
obj-$(CONFIG_USB_MON) += usbmon.o

View File

@ -220,9 +220,8 @@ static void mon_free_buff(struct mon_pgmap *map, int npages);
/*
* This is a "chunked memcpy". It does not manipulate any counters.
* But it returns the new offset for repeated application.
*/
unsigned int mon_copy_to_buff(const struct mon_reader_bin *this,
static void mon_copy_to_buff(const struct mon_reader_bin *this,
unsigned int off, const unsigned char *from, unsigned int length)
{
unsigned int step_len;
@ -247,7 +246,6 @@ unsigned int mon_copy_to_buff(const struct mon_reader_bin *this,
from += step_len;
length -= step_len;
}
return off;
}
/*
@ -400,15 +398,8 @@ static char mon_bin_get_data(const struct mon_reader_bin *rp,
unsigned int offset, struct urb *urb, unsigned int length)
{
if (urb->dev->bus->uses_dma &&
(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
mon_dmapeek_vec(rp, offset, urb->transfer_dma, length);
return 0;
}
if (urb->transfer_buffer == NULL)
return 'Z';
mon_copy_to_buff(rp, offset, urb->transfer_buffer, length);
return 0;
}
@ -635,7 +626,6 @@ static int mon_bin_open(struct inode *inode, struct file *file)
spin_lock_init(&rp->b_lock);
init_waitqueue_head(&rp->b_wait);
mutex_init(&rp->fetch_lock);
rp->b_size = BUFF_DFL;
size = sizeof(struct mon_pgmap) * (rp->b_size/CHUNK_SIZE);

View File

@ -1,95 +0,0 @@
/*
* The USB Monitor, inspired by Dave Harding's USBMon.
*
* mon_dma.c: Library which snoops on DMA areas.
*
* Copyright (C) 2005 Pete Zaitcev (zaitcev@redhat.com)
*/
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/highmem.h>
#include <asm/page.h>
#include <linux/usb.h> /* Only needed for declarations in usb_mon.h */
#include "usb_mon.h"
/*
* PC-compatibles, are, fortunately, sufficiently cache-coherent for this.
*/
#if defined(__i386__) || defined(__x86_64__) /* CONFIG_ARCH_I386 doesn't exit */
#define MON_HAS_UNMAP 1
#define phys_to_page(phys) pfn_to_page((phys) >> PAGE_SHIFT)
char mon_dmapeek(unsigned char *dst, dma_addr_t dma_addr, int len)
{
struct page *pg;
unsigned long flags;
unsigned char *map;
unsigned char *ptr;
/*
* On i386, a DMA handle is the "physical" address of a page.
* In other words, the bus address is equal to physical address.
* There is no IOMMU.
*/
pg = phys_to_page(dma_addr);
/*
* We are called from hardware IRQs in case of callbacks.
* But we can be called from softirq or process context in case
* of submissions. In such case, we need to protect KM_IRQ0.
*/
local_irq_save(flags);
map = kmap_atomic(pg, KM_IRQ0);
ptr = map + (dma_addr & (PAGE_SIZE-1));
memcpy(dst, ptr, len);
kunmap_atomic(map, KM_IRQ0);
local_irq_restore(flags);
return 0;
}
void mon_dmapeek_vec(const struct mon_reader_bin *rp,
unsigned int offset, dma_addr_t dma_addr, unsigned int length)
{
unsigned long flags;
unsigned int step_len;
struct page *pg;
unsigned char *map;
unsigned long page_off, page_len;
local_irq_save(flags);
while (length) {
/* compute number of bytes we are going to copy in this page */
step_len = length;
page_off = dma_addr & (PAGE_SIZE-1);
page_len = PAGE_SIZE - page_off;
if (page_len < step_len)
step_len = page_len;
/* copy data and advance pointers */
pg = phys_to_page(dma_addr);
map = kmap_atomic(pg, KM_IRQ0);
offset = mon_copy_to_buff(rp, offset, map + page_off, step_len);
kunmap_atomic(map, KM_IRQ0);
dma_addr += step_len;
length -= step_len;
}
local_irq_restore(flags);
}
#endif /* __i386__ */
#ifndef MON_HAS_UNMAP
char mon_dmapeek(unsigned char *dst, dma_addr_t dma_addr, int len)
{
return 'D';
}
void mon_dmapeek_vec(const struct mon_reader_bin *rp,
unsigned int offset, dma_addr_t dma_addr, unsigned int length)
{
;
}
#endif /* MON_HAS_UNMAP */

View File

@ -361,7 +361,6 @@ static int __init mon_init(void)
}
// MOD_INC_USE_COUNT(which_module?);
mutex_lock(&usb_bus_list_lock);
list_for_each_entry (ubus, &usb_bus_list, bus_list) {
mon_bus_init(ubus);

View File

@ -150,20 +150,6 @@ static inline char mon_text_get_data(struct mon_event_text *ep, struct urb *urb,
return '>';
}
/*
* The check to see if it's safe to poke at data has an enormous
* number of corner cases, but it seems that the following is
* more or less safe.
*
* We do not even try to look at transfer_buffer, because it can
* contain non-NULL garbage in case the upper level promised to
* set DMA for the HCD.
*/
if (urb->dev->bus->uses_dma &&
(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)) {
return mon_dmapeek(ep->data, urb->transfer_dma, len);
}
if (urb->transfer_buffer == NULL)
return 'Z'; /* '0' would be not as pretty. */

View File

@ -64,20 +64,6 @@ void mon_text_exit(void);
int __init mon_bin_init(void);
void mon_bin_exit(void);
/*
* DMA interface.
*
* XXX The vectored side needs a serious re-thinking. Abstracting vectors,
* like in Paolo's original patch, produces a double pkmap. We need an idea.
*/
extern char mon_dmapeek(unsigned char *dst, dma_addr_t dma_addr, int len);
struct mon_reader_bin;
extern void mon_dmapeek_vec(const struct mon_reader_bin *rp,
unsigned int offset, dma_addr_t dma_addr, unsigned int len);
extern unsigned int mon_copy_to_buff(const struct mon_reader_bin *rp,
unsigned int offset, const unsigned char *from, unsigned int len);
/*
*/
extern struct mutex mon_lock;

View File

@ -1850,6 +1850,10 @@ static void musb_free(struct musb *musb)
dma_controller_destroy(c);
}
#ifdef CONFIG_USB_MUSB_OTG
put_device(musb->xceiv->dev);
#endif
musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
musb_platform_exit(musb);
musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
@ -1859,10 +1863,6 @@ static void musb_free(struct musb *musb)
clk_put(musb->clock);
}
#ifdef CONFIG_USB_MUSB_OTG
put_device(musb->xceiv->dev);
#endif
#ifdef CONFIG_USB_MUSB_HDRC_HCD
usb_put_hcd(musb_to_hcd(musb));
#else

View File

@ -117,24 +117,7 @@ static void enable_vbus_draw(struct isp1301 *isp, unsigned mA)
pr_debug(" VBUS %d mA error %d\n", mA, status);
}
static void enable_vbus_source(struct isp1301 *isp)
{
/* this board won't supply more than 8mA vbus power.
* some boards can switch a 100ma "unit load" (or more).
*/
}
/* products will deliver OTG messages with LEDs, GUI, etc */
static inline void notresponding(struct isp1301 *isp)
{
printk(KERN_NOTICE "OTG device not responding.\n");
}
#endif
#if defined(CONFIG_MACH_OMAP_H4)
#else
static void enable_vbus_draw(struct isp1301 *isp, unsigned mA)
{
@ -144,6 +127,8 @@ static void enable_vbus_draw(struct isp1301 *isp, unsigned mA)
*/
}
#endif
static void enable_vbus_source(struct isp1301 *isp)
{
/* this board won't supply more than 8mA vbus power.
@ -159,8 +144,6 @@ static inline void notresponding(struct isp1301 *isp)
}
#endif
/*-------------------------------------------------------------------------*/
static struct i2c_driver isp1301_driver;

Some files were not shown because too many files have changed in this diff Show More