Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

This commit is contained in:
David S. Miller 2011-06-04 13:38:31 -07:00
commit e990b37b90
101 changed files with 916 additions and 469 deletions

View File

@ -999,7 +999,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
With this option on every unmap_single operation will
result in a hardware IOTLB flush operation as opposed
to batching them for performance.
sp_off [Default Off]
By default, super page will be supported if Intel IOMMU
has the capability. With this option, super page will
not be supported.
intremap= [X86-64, Intel-IOMMU]
Format: { on (default) | off | nosid }
on enable Interrupt Remapping (default)

View File

@ -249,6 +249,29 @@ static int slot_cn7_get_cd(struct platform_device *pdev)
{
return !gpio_get_value(GPIO_PORT41);
}
/* MERAM */
static struct sh_mobile_meram_info meram_info = {
.addr_mode = SH_MOBILE_MERAM_MODE1,
};
static struct resource meram_resources[] = {
[0] = {
.name = "MERAM",
.start = 0xe8000000,
.end = 0xe81fffff,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device meram_device = {
.name = "sh_mobile_meram",
.id = 0,
.num_resources = ARRAY_SIZE(meram_resources),
.resource = meram_resources,
.dev = {
.platform_data = &meram_info,
},
};
/* SH_MMCIF */
static struct resource sh_mmcif_resources[] = {
@ -447,13 +470,29 @@ const static struct fb_videomode ap4evb_lcdc_modes[] = {
#endif
},
};
static struct sh_mobile_meram_cfg lcd_meram_cfg = {
.icb[0] = {
.marker_icb = 28,
.cache_icb = 24,
.meram_offset = 0x0,
.meram_size = 0x40,
},
.icb[1] = {
.marker_icb = 29,
.cache_icb = 25,
.meram_offset = 0x40,
.meram_size = 0x40,
},
};
static struct sh_mobile_lcdc_info lcdc_info = {
.meram_dev = &meram_info,
.ch[0] = {
.chan = LCDC_CHAN_MAINLCD,
.bpp = 16,
.lcd_cfg = ap4evb_lcdc_modes,
.num_cfg = ARRAY_SIZE(ap4evb_lcdc_modes),
.meram_cfg = &lcd_meram_cfg,
}
};
@ -724,15 +763,31 @@ static struct platform_device fsi_device = {
static struct platform_device fsi_ak4643_device = {
.name = "sh_fsi2_a_ak4643",
};
static struct sh_mobile_meram_cfg hdmi_meram_cfg = {
.icb[0] = {
.marker_icb = 30,
.cache_icb = 26,
.meram_offset = 0x80,
.meram_size = 0x100,
},
.icb[1] = {
.marker_icb = 31,
.cache_icb = 27,
.meram_offset = 0x180,
.meram_size = 0x100,
},
};
static struct sh_mobile_lcdc_info sh_mobile_lcdc1_info = {
.clock_source = LCDC_CLK_EXTERNAL,
.meram_dev = &meram_info,
.ch[0] = {
.chan = LCDC_CHAN_MAINLCD,
.bpp = 16,
.interface_type = RGB24,
.clock_divider = 1,
.flags = LCDC_FLAGS_DWPOL,
.meram_cfg = &hdmi_meram_cfg,
}
};
@ -961,6 +1016,7 @@ static struct platform_device *ap4evb_devices[] __initdata = {
&csi2_device,
&ceu_device,
&ap4evb_camera,
&meram_device,
};
static void __init hdmi_init_pm_clock(void)

View File

@ -39,6 +39,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/pm_runtime.h>
#include <linux/smsc911x.h>
#include <linux/sh_intc.h>
#include <linux/tca6416_keypad.h>
@ -314,6 +315,30 @@ static struct platform_device smc911x_device = {
},
};
/* MERAM */
static struct sh_mobile_meram_info mackerel_meram_info = {
.addr_mode = SH_MOBILE_MERAM_MODE1,
};
static struct resource meram_resources[] = {
[0] = {
.name = "MERAM",
.start = 0xe8000000,
.end = 0xe81fffff,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device meram_device = {
.name = "sh_mobile_meram",
.id = 0,
.num_resources = ARRAY_SIZE(meram_resources),
.resource = meram_resources,
.dev = {
.platform_data = &mackerel_meram_info,
},
};
/* LCDC */
static struct fb_videomode mackerel_lcdc_modes[] = {
{
@ -342,7 +367,23 @@ static int mackerel_get_brightness(void *board_data)
return gpio_get_value(GPIO_PORT31);
}
static struct sh_mobile_meram_cfg lcd_meram_cfg = {
.icb[0] = {
.marker_icb = 28,
.cache_icb = 24,
.meram_offset = 0x0,
.meram_size = 0x40,
},
.icb[1] = {
.marker_icb = 29,
.cache_icb = 25,
.meram_offset = 0x40,
.meram_size = 0x40,
},
};
static struct sh_mobile_lcdc_info lcdc_info = {
.meram_dev = &mackerel_meram_info,
.clock_source = LCDC_CLK_BUS,
.ch[0] = {
.chan = LCDC_CHAN_MAINLCD,
@ -362,6 +403,7 @@ static struct sh_mobile_lcdc_info lcdc_info = {
.name = "sh_mobile_lcdc_bl",
.max_brightness = 1,
},
.meram_cfg = &lcd_meram_cfg,
}
};
@ -388,8 +430,23 @@ static struct platform_device lcdc_device = {
},
};
static struct sh_mobile_meram_cfg hdmi_meram_cfg = {
.icb[0] = {
.marker_icb = 30,
.cache_icb = 26,
.meram_offset = 0x80,
.meram_size = 0x100,
},
.icb[1] = {
.marker_icb = 31,
.cache_icb = 27,
.meram_offset = 0x180,
.meram_size = 0x100,
},
};
/* HDMI */
static struct sh_mobile_lcdc_info hdmi_lcdc_info = {
.meram_dev = &mackerel_meram_info,
.clock_source = LCDC_CLK_EXTERNAL,
.ch[0] = {
.chan = LCDC_CHAN_MAINLCD,
@ -397,6 +454,7 @@ static struct sh_mobile_lcdc_info hdmi_lcdc_info = {
.interface_type = RGB24,
.clock_divider = 1,
.flags = LCDC_FLAGS_DWPOL,
.meram_cfg = &hdmi_meram_cfg,
}
};
@ -856,6 +914,17 @@ static int slot_cn7_get_cd(struct platform_device *pdev)
}
/* SDHI0 */
static irqreturn_t mackerel_sdhi0_gpio_cd(int irq, void *arg)
{
struct device *dev = arg;
struct sh_mobile_sdhi_info *info = dev->platform_data;
struct tmio_mmc_data *pdata = info->pdata;
tmio_mmc_cd_wakeup(pdata);
return IRQ_HANDLED;
}
static struct sh_mobile_sdhi_info sdhi0_info = {
.dma_slave_tx = SHDMA_SLAVE_SDHI0_TX,
.dma_slave_rx = SHDMA_SLAVE_SDHI0_RX,
@ -1150,6 +1219,7 @@ static struct platform_device *mackerel_devices[] __initdata = {
&mackerel_camera,
&hdmi_lcdc_device,
&hdmi_device,
&meram_device,
};
/* Keypad Initialization */
@ -1238,6 +1308,7 @@ static void __init mackerel_init(void)
{
u32 srcr4;
struct clk *clk;
int ret;
sh7372_pinmux_init();
@ -1343,6 +1414,13 @@ static void __init mackerel_init(void)
gpio_request(GPIO_FN_SDHID0_1, NULL);
gpio_request(GPIO_FN_SDHID0_0, NULL);
ret = request_irq(evt2irq(0x3340), mackerel_sdhi0_gpio_cd,
IRQF_TRIGGER_FALLING, "sdhi0 cd", &sdhi0_device.dev);
if (!ret)
sdhi0_info.tmio_flags |= TMIO_MMC_HAS_COLD_CD;
else
pr_err("Cannot get IRQ #%d: %d\n", evt2irq(0x3340), ret);
#if !defined(CONFIG_MMC_SH_MMCIF) && !defined(CONFIG_MMC_SH_MMCIF_MODULE)
/* enable SDHI1 */
gpio_request(GPIO_FN_SDHICMD1, NULL);

View File

@ -509,6 +509,7 @@ enum { MSTP001,
MSTP118, MSTP117, MSTP116, MSTP113,
MSTP106, MSTP101, MSTP100,
MSTP223,
MSTP218, MSTP217, MSTP216,
MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
MSTP329, MSTP328, MSTP323, MSTP322, MSTP314, MSTP313, MSTP312,
MSTP423, MSTP415, MSTP413, MSTP411, MSTP410, MSTP406, MSTP403,
@ -534,6 +535,9 @@ static struct clk mstp_clks[MSTP_NR] = {
[MSTP101] = MSTP(&div4_clks[DIV4_M1], SMSTPCR1, 1, 0), /* VPU */
[MSTP100] = MSTP(&div4_clks[DIV4_B], SMSTPCR1, 0, 0), /* LCDC0 */
[MSTP223] = MSTP(&div6_clks[DIV6_SPU], SMSTPCR2, 23, 0), /* SPU2 */
[MSTP218] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* DMAC1 */
[MSTP217] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 17, 0), /* DMAC2 */
[MSTP216] = MSTP(&div4_clks[DIV4_HP], SMSTPCR2, 16, 0), /* DMAC3 */
[MSTP207] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 7, 0), /* SCIFA5 */
[MSTP206] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 6, 0), /* SCIFB */
[MSTP204] = MSTP(&div6_clks[DIV6_SUB], SMSTPCR2, 4, 0), /* SCIFA0 */
@ -626,6 +630,9 @@ static struct clk_lookup lookups[] = {
CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[MSTP100]), /* LCDC0 */
CLKDEV_DEV_ID("uio_pdrv_genirq.6", &mstp_clks[MSTP223]), /* SPU2DSP0 */
CLKDEV_DEV_ID("uio_pdrv_genirq.7", &mstp_clks[MSTP223]), /* SPU2DSP1 */
CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]), /* DMAC1 */
CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[MSTP217]), /* DMAC2 */
CLKDEV_DEV_ID("sh-dma-engine.2", &mstp_clks[MSTP216]), /* DMAC3 */
CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */
CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP206]), /* SCIFB */
CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */

View File

@ -25,7 +25,7 @@
ENTRY(_strncpy)
CC = R2 == 0;
if CC JUMP 4f;
if CC JUMP 6f;
P2 = R2 ; /* size */
P0 = R0 ; /* dst*/

View File

@ -161,7 +161,7 @@ config ARCH_HAS_CPU_IDLE_WAIT
config NO_IOPORT
def_bool !PCI
depends on !SH_CAYMAN && !SH_SH4202_MICRODEV
depends on !SH_CAYMAN && !SH_SH4202_MICRODEV && !SH_SHMIN
config IO_TRAPPED
bool

View File

@ -359,37 +359,31 @@ static struct soc_camera_link camera_link = {
.priv = &camera_info,
};
static void dummy_release(struct device *dev)
{
}
static struct platform_device *camera_device;
static struct platform_device camera_device = {
.name = "soc_camera_platform",
.dev = {
.platform_data = &camera_info,
.release = dummy_release,
},
};
static void ap325rxa_camera_release(struct device *dev)
{
soc_camera_platform_release(&camera_device);
}
static int ap325rxa_camera_add(struct soc_camera_link *icl,
struct device *dev)
{
if (icl != &camera_link || camera_probe() <= 0)
return -ENODEV;
int ret = soc_camera_platform_add(icl, dev, &camera_device, &camera_link,
ap325rxa_camera_release, 0);
if (ret < 0)
return ret;
camera_info.dev = dev;
ret = camera_probe();
if (ret < 0)
soc_camera_platform_del(icl, camera_device, &camera_link);
return platform_device_register(&camera_device);
return ret;
}
static void ap325rxa_camera_del(struct soc_camera_link *icl)
{
if (icl != &camera_link)
return;
platform_device_unregister(&camera_device);
memset(&camera_device.dev.kobj, 0,
sizeof(camera_device.dev.kobj));
soc_camera_platform_del(icl, camera_device, &camera_link);
}
#endif /* CONFIG_I2C */

View File

@ -885,6 +885,9 @@ static struct platform_device sh_mmcif_device = {
},
.num_resources = ARRAY_SIZE(sh_mmcif_resources),
.resource = sh_mmcif_resources,
.archdata = {
.hwblk_id = HWBLK_MMC,
},
};
#endif

View File

@ -18,6 +18,7 @@
#include <asm/pgtable-2level.h>
#endif
#include <asm/page.h>
#include <asm/mmu.h>
#ifndef __ASSEMBLY__
#include <asm/addrspace.h>

View File

@ -41,7 +41,9 @@
#define user_mode(regs) (((regs)->sr & 0x40000000)==0)
#define kernel_stack_pointer(_regs) ((unsigned long)(_regs)->regs[15])
#define GET_USP(regs) ((regs)->regs[15])
#define GET_FP(regs) ((regs)->regs[14])
#define GET_USP(regs) ((regs)->regs[15])
extern void show_regs(struct pt_regs *);
@ -131,7 +133,7 @@ extern void ptrace_triggered(struct perf_event *bp, int nmi,
static inline unsigned long profile_pc(struct pt_regs *regs)
{
unsigned long pc = instruction_pointer(regs);
unsigned long pc = regs->pc;
if (virt_addr_uncached(pc))
return CAC_ADDR(pc);

View File

@ -9,6 +9,7 @@
#include <linux/pagemap.h>
#ifdef CONFIG_MMU
#include <linux/swap.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
#include <asm/mmu_context.h>

View File

@ -236,6 +236,7 @@ enum {
};
enum {
SHDMA_SLAVE_INVALID,
SHDMA_SLAVE_SCIF0_TX,
SHDMA_SLAVE_SCIF0_RX,
SHDMA_SLAVE_SCIF1_TX,

View File

@ -285,6 +285,7 @@ enum {
};
enum {
SHDMA_SLAVE_INVALID,
SHDMA_SLAVE_SCIF0_TX,
SHDMA_SLAVE_SCIF0_RX,
SHDMA_SLAVE_SCIF1_TX,

View File

@ -252,6 +252,7 @@ enum {
};
enum {
SHDMA_SLAVE_INVALID,
SHDMA_SLAVE_SDHI_TX,
SHDMA_SLAVE_SDHI_RX,
SHDMA_SLAVE_MMCIF_TX,

View File

@ -21,6 +21,7 @@
#include <linux/fs.h>
#include <linux/ftrace.h>
#include <linux/hw_breakpoint.h>
#include <linux/prefetch.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/system.h>

View File

@ -82,7 +82,7 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
void *addr;
addr = __in_29bit_mode() ?
(void *)P1SEGADDR((unsigned long)vaddr) : vaddr;
(void *)CAC_ADDR((unsigned long)vaddr) : vaddr;
switch (direction) {
case DMA_FROM_DEVICE: /* invalidate only */

View File

@ -21,7 +21,7 @@ static void cfq_dtor(struct io_context *ioc)
if (!hlist_empty(&ioc->cic_list)) {
struct cfq_io_context *cic;
cic = list_entry(ioc->cic_list.first, struct cfq_io_context,
cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context,
cic_list);
cic->dtor(ioc);
}
@ -57,7 +57,7 @@ static void cfq_exit(struct io_context *ioc)
if (!hlist_empty(&ioc->cic_list)) {
struct cfq_io_context *cic;
cic = list_entry(ioc->cic_list.first, struct cfq_io_context,
cic = hlist_entry(ioc->cic_list.first, struct cfq_io_context,
cic_list);
cic->exit(ioc);
}

View File

@ -185,7 +185,7 @@ struct cfq_group {
int nr_cfqq;
/*
* Per group busy queus average. Useful for workload slice calc. We
* Per group busy queues average. Useful for workload slice calc. We
* create the array for each prio class but at run time it is used
* only for RT and BE class and slot for IDLE class remains unused.
* This is primarily done to avoid confusion and a gcc warning.
@ -369,16 +369,16 @@ CFQ_CFQQ_FNS(wait_busy);
#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
blk_add_trace_msg((cfqd)->queue, "cfq%d%c %s " fmt, (cfqq)->pid, \
cfq_cfqq_sync((cfqq)) ? 'S' : 'A', \
blkg_path(&(cfqq)->cfqg->blkg), ##args);
blkg_path(&(cfqq)->cfqg->blkg), ##args)
#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) \
blk_add_trace_msg((cfqd)->queue, "%s " fmt, \
blkg_path(&(cfqg)->blkg), ##args); \
blkg_path(&(cfqg)->blkg), ##args) \
#else
#define cfq_log_cfqq(cfqd, cfqq, fmt, args...) \
blk_add_trace_msg((cfqd)->queue, "cfq%d " fmt, (cfqq)->pid, ##args)
#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0);
#define cfq_log_cfqg(cfqd, cfqg, fmt, args...) do {} while (0)
#endif
#define cfq_log(cfqd, fmt, args...) \
blk_add_trace_msg((cfqd)->queue, "cfq " fmt, ##args)
@ -3786,9 +3786,6 @@ new_queue:
return 0;
queue_fail:
if (cic)
put_io_context(cic->ioc);
cfq_schedule_dispatch(cfqd);
spin_unlock_irqrestore(q->queue_lock, flags);
cfq_log(cfqd, "set_request fail");

View File

@ -192,7 +192,8 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
if (lo->xmit_timeout)
del_timer_sync(&ti);
} else
result = kernel_recvmsg(sock, &msg, &iov, 1, size, 0);
result = kernel_recvmsg(sock, &msg, &iov, 1, size,
msg.msg_flags);
if (signal_pending(current)) {
siginfo_t info;
@ -753,9 +754,26 @@ static int __init nbd_init(void)
return -ENOMEM;
part_shift = 0;
if (max_part > 0)
if (max_part > 0) {
part_shift = fls(max_part);
/*
* Adjust max_part according to part_shift as it is exported
* to user space so that user can know the max number of
* partition kernel should be able to manage.
*
* Note that -1 is required because partition 0 is reserved
* for the whole disk.
*/
max_part = (1UL << part_shift) - 1;
}
if ((1UL << part_shift) > DISK_MAX_PARTS)
return -EINVAL;
if (nbds_max > 1UL << (MINORBITS - part_shift))
return -EINVAL;
for (i = 0; i < nbds_max; i++) {
struct gendisk *disk = alloc_disk(1 << part_shift);
if (!disk)

View File

@ -321,7 +321,6 @@ static void pcd_init_units(void)
strcpy(disk->disk_name, cd->name); /* umm... */
disk->fops = &pcd_bdops;
disk->flags = GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
disk->events = DISK_EVENT_MEDIA_CHANGE;
}
}

View File

@ -809,11 +809,13 @@ static int __init xen_blkif_init(void)
failed_init:
kfree(blkbk->pending_reqs);
kfree(blkbk->pending_grant_handles);
for (i = 0; i < mmap_pages; i++) {
if (blkbk->pending_pages[i])
__free_page(blkbk->pending_pages[i]);
if (blkbk->pending_pages) {
for (i = 0; i < mmap_pages; i++) {
if (blkbk->pending_pages[i])
__free_page(blkbk->pending_pages[i]);
}
kfree(blkbk->pending_pages);
}
kfree(blkbk->pending_pages);
kfree(blkbk);
blkbk = NULL;
return rc;

View File

@ -357,14 +357,13 @@ static int xen_vbd_create(struct xen_blkif *blkif, blkif_vdev_t handle,
}
vbd->bdev = bdev;
vbd->size = vbd_sz(vbd);
if (vbd->bdev->bd_disk == NULL) {
DPRINTK("xen_vbd_create: device %08x doesn't exist.\n",
vbd->pdevice);
xen_vbd_free(vbd);
return -ENOENT;
}
vbd->size = vbd_sz(vbd);
if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom)
vbd->type |= VDISK_CDROM;

View File

@ -355,29 +355,24 @@ static void hci_uart_tty_wakeup(struct tty_struct *tty)
* flags pointer to flags for data
* count count of received data in bytes
*
* Return Value: Number of bytes received
* Return Value: None
*/
static unsigned int hci_uart_tty_receive(struct tty_struct *tty,
const u8 *data, char *flags, int count)
static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, char *flags, int count)
{
struct hci_uart *hu = (void *)tty->disc_data;
int received;
if (!hu || tty != hu->tty)
return -ENODEV;
return;
if (!test_bit(HCI_UART_PROTO_SET, &hu->flags))
return -EINVAL;
return;
spin_lock(&hu->rx_lock);
received = hu->proto->recv(hu, (void *) data, count);
if (received > 0)
hu->hdev->stat.byte_rx += received;
hu->proto->recv(hu, (void *) data, count);
hu->hdev->stat.byte_rx += count;
spin_unlock(&hu->rx_lock);
tty_unthrottle(tty);
return received;
}
static int hci_uart_register_dev(struct hci_uart *hu)

View File

@ -627,7 +627,6 @@ static int viocd_probe(struct vio_dev *vdev, const struct vio_device_id *id)
gendisk->fops = &viocd_fops;
gendisk->flags = GENHD_FL_CD | GENHD_FL_REMOVABLE |
GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
gendisk->events = DISK_EVENT_MEDIA_CHANGE;
set_capacity(gendisk, 0);
gendisk->private_data = d;
d->viocd_disk = gendisk;

View File

@ -24,7 +24,6 @@
#include <linux/ioport.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
#include <linux/irq.h>
#include <linux/err.h>
#include <linux/clocksource.h>
@ -153,12 +152,10 @@ static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
{
int ret;
/* wake up device and enable clock */
pm_runtime_get_sync(&p->pdev->dev);
/* enable clock */
ret = clk_enable(p->clk);
if (ret) {
dev_err(&p->pdev->dev, "cannot enable clock\n");
pm_runtime_put_sync(&p->pdev->dev);
return ret;
}
@ -190,9 +187,8 @@ static void sh_cmt_disable(struct sh_cmt_priv *p)
/* disable interrupts in CMT block */
sh_cmt_write(p, CMCSR, 0);
/* stop clock and mark device as idle */
/* stop clock */
clk_disable(p->clk);
pm_runtime_put_sync(&p->pdev->dev);
}
/* private flags */
@ -664,7 +660,6 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev)
if (p) {
dev_info(&pdev->dev, "kept as earlytimer\n");
pm_runtime_enable(&pdev->dev);
return 0;
}
@ -679,9 +674,6 @@ static int __devinit sh_cmt_probe(struct platform_device *pdev)
kfree(p);
platform_set_drvdata(pdev, NULL);
}
if (!is_early_platform_device(pdev))
pm_runtime_enable(&pdev->dev);
return ret;
}

View File

@ -25,7 +25,6 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
#include <linux/irq.h>
#include <linux/err.h>
#include <linux/clocksource.h>
@ -110,12 +109,10 @@ static int sh_tmu_enable(struct sh_tmu_priv *p)
{
int ret;
/* wake up device and enable clock */
pm_runtime_get_sync(&p->pdev->dev);
/* enable clock */
ret = clk_enable(p->clk);
if (ret) {
dev_err(&p->pdev->dev, "cannot enable clock\n");
pm_runtime_put_sync(&p->pdev->dev);
return ret;
}
@ -144,9 +141,8 @@ static void sh_tmu_disable(struct sh_tmu_priv *p)
/* disable interrupts in TMU block */
sh_tmu_write(p, TCR, 0x0000);
/* stop clock and mark device as idle */
/* stop clock */
clk_disable(p->clk);
pm_runtime_put_sync(&p->pdev->dev);
}
static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta,
@ -415,7 +411,6 @@ static int __devinit sh_tmu_probe(struct platform_device *pdev)
if (p) {
dev_info(&pdev->dev, "kept as earlytimer\n");
pm_runtime_enable(&pdev->dev);
return 0;
}
@ -430,9 +425,6 @@ static int __devinit sh_tmu_probe(struct platform_device *pdev)
kfree(p);
platform_set_drvdata(pdev, NULL);
}
if (!is_early_platform_device(pdev))
pm_runtime_enable(&pdev->dev);
return ret;
}

View File

@ -343,7 +343,7 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
dmae_set_dmars(sh_chan, cfg->mid_rid);
dmae_set_chcr(sh_chan, cfg->chcr);
} else if ((sh_dmae_readl(sh_chan, CHCR) & 0xf00) != 0x400) {
} else {
dmae_init(sh_chan);
}
@ -1144,6 +1144,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
/* platform data */
shdev->pdata = pdata;
platform_set_drvdata(pdev, shdev);
pm_runtime_enable(&pdev->dev);
pm_runtime_get_sync(&pdev->dev);
@ -1256,7 +1258,6 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
platform_set_drvdata(pdev, shdev);
dma_async_device_register(&shdev->common);
return err;
@ -1278,6 +1279,8 @@ rst_err:
if (dmars)
iounmap(shdev->dmars);
platform_set_drvdata(pdev, NULL);
emapdmars:
iounmap(shdev->chan_reg);
synchronize_rcu();
@ -1316,6 +1319,8 @@ static int __exit sh_dmae_remove(struct platform_device *pdev)
iounmap(shdev->dmars);
iounmap(shdev->chan_reg);
platform_set_drvdata(pdev, NULL);
synchronize_rcu();
kfree(shdev);

View File

@ -1782,7 +1782,6 @@ static int ide_cd_probe(ide_drive_t *drive)
ide_cd_read_toc(drive, &sense);
g->fops = &idecd_ops;
g->flags |= GENHD_FL_REMOVABLE | GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE;
g->events = DISK_EVENT_MEDIA_CHANGE;
add_disk(g);
return 0;

View File

@ -120,21 +120,17 @@ static void serport_ldisc_close(struct tty_struct *tty)
* 'interrupt' routine.
*/
static unsigned int serport_ldisc_receive(struct tty_struct *tty,
const unsigned char *cp, char *fp, int count)
static void serport_ldisc_receive(struct tty_struct *tty, const unsigned char *cp, char *fp, int count)
{
struct serport *serport = (struct serport*) tty->disc_data;
unsigned long flags;
unsigned int ch_flags;
int ret = 0;
int i;
spin_lock_irqsave(&serport->lock, flags);
if (!test_bit(SERPORT_ACTIVE, &serport->flags)) {
ret = -EINVAL;
if (!test_bit(SERPORT_ACTIVE, &serport->flags))
goto out;
}
for (i = 0; i < count; i++) {
switch (fp[i]) {
@ -156,8 +152,6 @@ static unsigned int serport_ldisc_receive(struct tty_struct *tty,
out:
spin_unlock_irqrestore(&serport->lock, flags);
return ret == 0 ? count : ret;
}
/*

View File

@ -674,7 +674,7 @@ gigaset_tty_ioctl(struct tty_struct *tty, struct file *file,
* cflags buffer containing error flags for received characters (ignored)
* count number of received characters
*/
static unsigned int
static void
gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf,
char *cflags, int count)
{
@ -683,12 +683,12 @@ gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf,
struct inbuf_t *inbuf;
if (!cs)
return -ENODEV;
return;
inbuf = cs->inbuf;
if (!inbuf) {
dev_err(cs->dev, "%s: no inbuf\n", __func__);
cs_put(cs);
return -EINVAL;
return;
}
tail = inbuf->tail;
@ -725,8 +725,6 @@ gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf,
gig_dbg(DEBUG_INTR, "%s-->BH", __func__);
gigaset_schedule_event(cs);
cs_put(cs);
return count;
}
/*

View File

@ -304,7 +304,10 @@ static int check_and_rewind_pc(char *put_str, char *arg)
return 1;
}
/* Readjust the instruction pointer if needed */
instruction_pointer_set(&kgdbts_regs, ip + offset);
ip += offset;
#ifdef GDB_ADJUSTS_BREAK_OFFSET
instruction_pointer_set(&kgdbts_regs, ip);
#endif
return 0;
}

View File

@ -747,8 +747,8 @@ static void st_tty_close(struct tty_struct *tty)
pr_debug("%s: done ", __func__);
}
static unsigned int st_tty_receive(struct tty_struct *tty,
const unsigned char *data, char *tty_flags, int count)
static void st_tty_receive(struct tty_struct *tty, const unsigned char *data,
char *tty_flags, int count)
{
#ifdef VERBOSE
print_hex_dump(KERN_DEBUG, ">in>", DUMP_PREFIX_NONE,
@ -761,8 +761,6 @@ static unsigned int st_tty_receive(struct tty_struct *tty,
*/
st_recv(tty->disc_data, data, count);
pr_debug("done %s", __func__);
return count;
}
/* wake-up function called in from the TTY layer

View File

@ -167,8 +167,8 @@ static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
#endif
static unsigned int ldisc_receive(struct tty_struct *tty,
const u8 *data, char *flags, int count)
static void ldisc_receive(struct tty_struct *tty, const u8 *data,
char *flags, int count)
{
struct sk_buff *skb = NULL;
struct ser_device *ser;
@ -215,8 +215,6 @@ static unsigned int ldisc_receive(struct tty_struct *tty,
} else
++ser->dev->stats.rx_dropped;
update_tty_status(ser);
return count;
}
static int handle_tx(struct ser_device *ser)

View File

@ -923,7 +923,7 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
mem_size = resource_size(mem);
if (!request_mem_region(mem->start, mem_size, pdev->name)) {
err = -EBUSY;
goto failed_req;
goto failed_get;
}
base = ioremap(mem->start, mem_size);
@ -977,9 +977,8 @@ static int __devinit flexcan_probe(struct platform_device *pdev)
iounmap(base);
failed_map:
release_mem_region(mem->start, mem_size);
failed_req:
clk_put(clk);
failed_get:
clk_put(clk);
failed_clock:
return err;
}

View File

@ -425,17 +425,16 @@ static void slc_setup(struct net_device *dev)
* in parallel
*/
static unsigned int slcan_receive_buf(struct tty_struct *tty,
static void slcan_receive_buf(struct tty_struct *tty,
const unsigned char *cp, char *fp, int count)
{
struct slcan *sl = (struct slcan *) tty->disc_data;
int bytes = count;
if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
return -ENODEV;
return;
/* Read the characters out of the buffer */
while (bytes--) {
while (count--) {
if (fp && *fp++) {
if (!test_and_set_bit(SLF_ERROR, &sl->flags))
sl->dev->stats.rx_errors++;
@ -444,8 +443,6 @@ static unsigned int slcan_receive_buf(struct tty_struct *tty,
}
slcan_unesc(sl, *cp++);
}
return count;
}
/************************************

View File

@ -1781,8 +1781,8 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
ndev = alloc_etherdev(sizeof(struct emac_priv));
if (!ndev) {
dev_err(&pdev->dev, "error allocating net_device\n");
clk_put(emac_clk);
return -ENOMEM;
rc = -ENOMEM;
goto free_clk;
}
platform_set_drvdata(pdev, ndev);
@ -1796,7 +1796,8 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev)
pdata = pdev->dev.platform_data;
if (!pdata) {
dev_err(&pdev->dev, "no platform data\n");
return -ENODEV;
rc = -ENODEV;
goto probe_quit;
}
/* MAC addr and PHY mask , RMII enable info from platform_data */
@ -1929,8 +1930,9 @@ no_dma:
iounmap(priv->remap_addr);
probe_quit:
clk_put(emac_clk);
free_netdev(ndev);
free_clk:
clk_put(emac_clk);
return rc;
}

View File

@ -1157,9 +1157,6 @@ dm9000_open(struct net_device *dev)
irqflags |= IRQF_SHARED;
if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
return -EAGAIN;
/* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
mdelay(1); /* delay needs by DM9000B */
@ -1168,6 +1165,9 @@ dm9000_open(struct net_device *dev)
dm9000_reset(db);
dm9000_init_dm9000(dev);
if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
return -EAGAIN;
/* Init driver variable */
db->dbug_cnt = 0;

View File

@ -456,7 +456,7 @@ out:
* a block of 6pack data has been received, which can now be decapsulated
* and sent on to some IP layer for further processing.
*/
static unsigned int sixpack_receive_buf(struct tty_struct *tty,
static void sixpack_receive_buf(struct tty_struct *tty,
const unsigned char *cp, char *fp, int count)
{
struct sixpack *sp;
@ -464,11 +464,11 @@ static unsigned int sixpack_receive_buf(struct tty_struct *tty,
int count1;
if (!count)
return 0;
return;
sp = sp_get(tty);
if (!sp)
return -ENODEV;
return;
memcpy(buf, cp, count < sizeof(buf) ? count : sizeof(buf));
@ -487,8 +487,6 @@ static unsigned int sixpack_receive_buf(struct tty_struct *tty,
sp_put(sp);
tty_unthrottle(tty);
return count1;
}
/*

View File

@ -923,14 +923,13 @@ static long mkiss_compat_ioctl(struct tty_struct *tty, struct file *file,
* a block of data has been received, which can now be decapsulated
* and sent on to the AX.25 layer for further processing.
*/
static unsigned int mkiss_receive_buf(struct tty_struct *tty,
const unsigned char *cp, char *fp, int count)
static void mkiss_receive_buf(struct tty_struct *tty, const unsigned char *cp,
char *fp, int count)
{
struct mkiss *ax = mkiss_get(tty);
int bytes = count;
if (!ax)
return -ENODEV;
return;
/*
* Argh! mtu change time! - costs us the packet part received
@ -940,7 +939,7 @@ static unsigned int mkiss_receive_buf(struct tty_struct *tty,
ax_changedmtu(ax);
/* Read the characters out of the buffer */
while (bytes--) {
while (count--) {
if (fp != NULL && *fp++) {
if (!test_and_set_bit(AXF_ERROR, &ax->flags))
ax->dev->stats.rx_errors++;
@ -953,8 +952,6 @@ static unsigned int mkiss_receive_buf(struct tty_struct *tty,
mkiss_put(ax);
tty_unthrottle(tty);
return count;
}
/*

View File

@ -216,23 +216,23 @@ static int irtty_do_write(struct sir_dev *dev, const unsigned char *ptr, size_t
* usbserial: urb-complete-interrupt / softint
*/
static unsigned int irtty_receive_buf(struct tty_struct *tty,
const unsigned char *cp, char *fp, int count)
static void irtty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
char *fp, int count)
{
struct sir_dev *dev;
struct sirtty_cb *priv = tty->disc_data;
int i;
IRDA_ASSERT(priv != NULL, return -ENODEV;);
IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return -EINVAL;);
IRDA_ASSERT(priv != NULL, return;);
IRDA_ASSERT(priv->magic == IRTTY_MAGIC, return;);
if (unlikely(count==0)) /* yes, this happens */
return 0;
return;
dev = priv->dev;
if (!dev) {
IRDA_WARNING("%s(), not ready yet!\n", __func__);
return -ENODEV;
return;
}
for (i = 0; i < count; i++) {
@ -242,13 +242,11 @@ static unsigned int irtty_receive_buf(struct tty_struct *tty,
if (fp && *fp++) {
IRDA_DEBUG(0, "Framing or parity error!\n");
sirdev_receive(dev, NULL, 0); /* notify sir_dev (updating stats) */
return -EINVAL;
return;
}
}
sirdev_receive(dev, cp, count);
return count;
}
/*

View File

@ -340,7 +340,7 @@ ppp_asynctty_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
}
/* May sleep, don't call from interrupt level or with interrupts disabled */
static unsigned int
static void
ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
char *cflags, int count)
{
@ -348,7 +348,7 @@ ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
unsigned long flags;
if (!ap)
return -ENODEV;
return;
spin_lock_irqsave(&ap->recv_lock, flags);
ppp_async_input(ap, buf, cflags, count);
spin_unlock_irqrestore(&ap->recv_lock, flags);
@ -356,8 +356,6 @@ ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
tasklet_schedule(&ap->tsk);
ap_put(ap);
tty_unthrottle(tty);
return count;
}
static void

View File

@ -381,7 +381,7 @@ ppp_sync_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
}
/* May sleep, don't call from interrupt level or with interrupts disabled */
static unsigned int
static void
ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
char *cflags, int count)
{
@ -389,7 +389,7 @@ ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
unsigned long flags;
if (!ap)
return -ENODEV;
return;
spin_lock_irqsave(&ap->recv_lock, flags);
ppp_sync_input(ap, buf, cflags, count);
spin_unlock_irqrestore(&ap->recv_lock, flags);
@ -397,8 +397,6 @@ ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
tasklet_schedule(&ap->tsk);
sp_put(ap);
tty_unthrottle(tty);
return count;
}
static void

View File

@ -670,17 +670,16 @@ static void sl_setup(struct net_device *dev)
* in parallel
*/
static unsigned int slip_receive_buf(struct tty_struct *tty,
const unsigned char *cp, char *fp, int count)
static void slip_receive_buf(struct tty_struct *tty, const unsigned char *cp,
char *fp, int count)
{
struct slip *sl = tty->disc_data;
int bytes = count;
if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
return -ENODEV;
return;
/* Read the characters out of the buffer */
while (bytes--) {
while (count--) {
if (fp && *fp++) {
if (!test_and_set_bit(SLF_ERROR, &sl->flags))
sl->dev->stats.rx_errors++;
@ -694,8 +693,6 @@ static unsigned int slip_receive_buf(struct tty_struct *tty,
#endif
slip_unesc(sl, *cp++);
}
return count;
}
/************************************

View File

@ -5774,7 +5774,7 @@ static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
dma_unmap_addr(txb, mapping),
skb_headlen(skb),
PCI_DMA_TODEVICE);
for (i = 0; i <= last; i++) {
for (i = 0; i < last; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
entry = NEXT_TX(entry);

View File

@ -54,7 +54,7 @@
#include <linux/usb/usbnet.h>
#include <linux/usb/cdc.h>
#define DRIVER_VERSION "24-May-2011"
#define DRIVER_VERSION "01-June-2011"
/* CDC NCM subclass 3.2.1 */
#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
@ -1234,6 +1234,7 @@ static struct usb_driver cdc_ncm_driver = {
.disconnect = cdc_ncm_disconnect,
.suspend = usbnet_suspend,
.resume = usbnet_resume,
.reset_resume = usbnet_resume,
.supports_autosuspend = 1,
};

View File

@ -517,18 +517,17 @@ static int x25_asy_close(struct net_device *dev)
* and sent on to some IP layer for further processing.
*/
static unsigned int x25_asy_receive_buf(struct tty_struct *tty,
static void x25_asy_receive_buf(struct tty_struct *tty,
const unsigned char *cp, char *fp, int count)
{
struct x25_asy *sl = tty->disc_data;
int bytes = count;
if (!sl || sl->magic != X25_ASY_MAGIC || !netif_running(sl->dev))
return;
/* Read the characters out of the buffer */
while (bytes--) {
while (count--) {
if (fp && *fp++) {
if (!test_and_set_bit(SLF_ERROR, &sl->flags))
sl->dev->stats.rx_errors++;
@ -537,8 +536,6 @@ static unsigned int x25_asy_receive_buf(struct tty_struct *tty,
}
x25_asy_unesc(sl, *cp++);
}
return count;
}
/*

View File

@ -4645,10 +4645,16 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
case 1:
break;
case 2:
scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN)
scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
else
scaledPower = 0;
break;
case 3:
scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN)
scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
else
scaledPower = 0;
break;
}

View File

@ -524,10 +524,16 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
case 1:
break;
case 2:
scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
if (scaledPower > REDUCE_SCALED_POWER_BY_TWO_CHAIN)
scaledPower -= REDUCE_SCALED_POWER_BY_TWO_CHAIN;
else
scaledPower = 0;
break;
case 3:
scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
if (scaledPower > REDUCE_SCALED_POWER_BY_THREE_CHAIN)
scaledPower -= REDUCE_SCALED_POWER_BY_THREE_CHAIN;
else
scaledPower = 0;
break;
}
scaledPower = max((u16)0, scaledPower);

View File

@ -1543,7 +1543,7 @@ static void iwl4965_temperature_calib(struct iwl_priv *priv)
s32 temp;
temp = iwl4965_hw_get_temperature(priv);
if (temp < 0)
if (IWL_TX_POWER_TEMPERATURE_OUT_OF_RANGE(temp))
return;
if (priv->temperature != temp) {

View File

@ -603,19 +603,27 @@ struct iwl_cfg iwl6050_2abg_cfg = {
IWL_DEVICE_6050,
};
#define IWL_DEVICE_6150 \
.fw_name_pre = IWL6050_FW_PRE, \
.ucode_api_max = IWL6050_UCODE_API_MAX, \
.ucode_api_min = IWL6050_UCODE_API_MIN, \
.ops = &iwl6150_ops, \
.eeprom_ver = EEPROM_6150_EEPROM_VERSION, \
.eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION, \
.base_params = &iwl6050_base_params, \
.need_dc_calib = true, \
.led_mode = IWL_LED_BLINK, \
.internal_wimax_coex = true
struct iwl_cfg iwl6150_bgn_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
.fw_name_pre = IWL6050_FW_PRE,
.ucode_api_max = IWL6050_UCODE_API_MAX,
.ucode_api_min = IWL6050_UCODE_API_MIN,
.eeprom_ver = EEPROM_6150_EEPROM_VERSION,
.eeprom_calib_ver = EEPROM_6150_TX_POWER_VERSION,
.ops = &iwl6150_ops,
.base_params = &iwl6050_base_params,
IWL_DEVICE_6150,
.ht_params = &iwl6000_ht_params,
.need_dc_calib = true,
.led_mode = IWL_LED_RF_STATE,
.internal_wimax_coex = true,
};
struct iwl_cfg iwl6150_bg_cfg = {
.name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BG",
IWL_DEVICE_6150,
};
struct iwl_cfg iwl6000_3agn_cfg = {

View File

@ -3831,11 +3831,11 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
/* 6150 WiFi/WiMax Series */
{IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_bgn_cfg)},
{IWL_PCI_DEVICE(0x0885, 0x1306, iwl6150_bgn_cfg)},
{IWL_PCI_DEVICE(0x0885, 0x1307, iwl6150_bg_cfg)},
{IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_bgn_cfg)},
{IWL_PCI_DEVICE(0x0885, 0x1326, iwl6150_bgn_cfg)},
{IWL_PCI_DEVICE(0x0885, 0x1327, iwl6150_bg_cfg)},
{IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_bgn_cfg)},
{IWL_PCI_DEVICE(0x0886, 0x1316, iwl6150_bgn_cfg)},
{IWL_PCI_DEVICE(0x0886, 0x1317, iwl6150_bg_cfg)},
/* 1000 Series WiFi */
{IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)},

View File

@ -89,6 +89,7 @@ extern struct iwl_cfg iwl6000_3agn_cfg;
extern struct iwl_cfg iwl6050_2agn_cfg;
extern struct iwl_cfg iwl6050_2abg_cfg;
extern struct iwl_cfg iwl6150_bgn_cfg;
extern struct iwl_cfg iwl6150_bg_cfg;
extern struct iwl_cfg iwl1000_bgn_cfg;
extern struct iwl_cfg iwl1000_bg_cfg;
extern struct iwl_cfg iwl100_bgn_cfg;

View File

@ -994,6 +994,8 @@ static void lbs_submit_command(struct lbs_private *priv,
cmd = cmdnode->cmdbuf;
spin_lock_irqsave(&priv->driver_lock, flags);
priv->seqnum++;
cmd->seqnum = cpu_to_le16(priv->seqnum);
priv->cur_cmd = cmdnode;
spin_unlock_irqrestore(&priv->driver_lock, flags);
@ -1621,11 +1623,9 @@ struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv,
/* Copy the incoming command to the buffer */
memcpy(cmdnode->cmdbuf, in_cmd, in_cmd_size);
/* Set sequence number, clean result, move to buffer */
priv->seqnum++;
/* Set command, clean result, move to buffer */
cmdnode->cmdbuf->command = cpu_to_le16(command);
cmdnode->cmdbuf->size = cpu_to_le16(in_cmd_size);
cmdnode->cmdbuf->seqnum = cpu_to_le16(priv->seqnum);
cmdnode->cmdbuf->result = 0;
lbs_deb_host("PREP_CMD: command 0x%04x\n", command);

View File

@ -1157,6 +1157,9 @@ struct conf_sched_scan_settings {
/* time to wait on the channel for passive scans (in TUs) */
u32 dwell_time_passive;
/* time to wait on the channel for DFS scans (in TUs) */
u32 dwell_time_dfs;
/* number of probe requests to send on each channel in active scans */
u8 num_probe_reqs;

View File

@ -311,6 +311,7 @@ static struct conf_drv_settings default_conf = {
.min_dwell_time_active = 8,
.max_dwell_time_active = 30,
.dwell_time_passive = 100,
.dwell_time_dfs = 150,
.num_probe_reqs = 2,
.rssi_threshold = -90,
.snr_threshold = 0,

View File

@ -331,16 +331,22 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
int i, j;
u32 flags;
bool force_passive = !req->n_ssids;
for (i = 0, j = start;
i < req->n_channels && j < MAX_CHANNELS_ALL_BANDS;
i++) {
flags = req->channels[i]->flags;
if (!(flags & IEEE80211_CHAN_DISABLED) &&
((flags & IEEE80211_CHAN_PASSIVE_SCAN) == passive) &&
((flags & IEEE80211_CHAN_RADAR) == radar) &&
(req->channels[i]->band == band)) {
if (force_passive)
flags |= IEEE80211_CHAN_PASSIVE_SCAN;
if ((req->channels[i]->band == band) &&
!(flags & IEEE80211_CHAN_DISABLED) &&
(!!(flags & IEEE80211_CHAN_RADAR) == radar) &&
/* if radar is set, we ignore the passive flag */
(radar ||
!!(flags & IEEE80211_CHAN_PASSIVE_SCAN) == passive)) {
wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ",
req->channels[i]->band,
req->channels[i]->center_freq);
@ -350,7 +356,12 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
wl1271_debug(DEBUG_SCAN, "max_power %d",
req->channels[i]->max_power);
if (flags & IEEE80211_CHAN_PASSIVE_SCAN) {
if (flags & IEEE80211_CHAN_RADAR) {
channels[j].flags |= SCAN_CHANNEL_FLAGS_DFS;
channels[j].passive_duration =
cpu_to_le16(c->dwell_time_dfs);
}
else if (flags & IEEE80211_CHAN_PASSIVE_SCAN) {
channels[j].passive_duration =
cpu_to_le16(c->dwell_time_passive);
} else {
@ -359,7 +370,7 @@ wl1271_scan_get_sched_scan_channels(struct wl1271 *wl,
channels[j].max_duration =
cpu_to_le16(c->max_dwell_time_active);
}
channels[j].tx_power_att = req->channels[j]->max_power;
channels[j].tx_power_att = req->channels[i]->max_power;
channels[j].channel = req->channels[i]->hw_value;
j++;
@ -386,7 +397,11 @@ wl1271_scan_sched_scan_channels(struct wl1271 *wl,
wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
IEEE80211_BAND_2GHZ,
false, false, idx);
idx += cfg->active[0];
/*
* 5GHz channels always start at position 14, not immediately
* after the last 2.4GHz channel
*/
idx = 14;
cfg->passive[1] =
wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
@ -394,22 +409,23 @@ wl1271_scan_sched_scan_channels(struct wl1271 *wl,
false, true, idx);
idx += cfg->passive[1];
cfg->active[1] =
wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
IEEE80211_BAND_5GHZ,
false, false, 14);
idx += cfg->active[1];
cfg->dfs =
wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
IEEE80211_BAND_5GHZ,
true, false, idx);
true, true, idx);
idx += cfg->dfs;
cfg->active[1] =
wl1271_scan_get_sched_scan_channels(wl, req, cfg->channels,
IEEE80211_BAND_5GHZ,
false, false, idx);
idx += cfg->active[1];
wl1271_debug(DEBUG_SCAN, " 2.4GHz: active %d passive %d",
cfg->active[0], cfg->passive[0]);
wl1271_debug(DEBUG_SCAN, " 5GHz: active %d passive %d",
cfg->active[1], cfg->passive[1]);
wl1271_debug(DEBUG_SCAN, " DFS: %d", cfg->dfs);
return idx;
}
@ -421,6 +437,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
struct wl1271_cmd_sched_scan_config *cfg = NULL;
struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
int i, total_channels, ret;
bool force_passive = !req->n_ssids;
wl1271_debug(DEBUG_CMD, "cmd sched_scan scan config");
@ -444,7 +461,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
for (i = 0; i < SCAN_MAX_CYCLE_INTERVALS; i++)
cfg->intervals[i] = cpu_to_le32(req->interval);
if (req->ssids[0].ssid_len && req->ssids[0].ssid) {
if (!force_passive && req->ssids[0].ssid_len && req->ssids[0].ssid) {
cfg->filter_type = SCAN_SSID_FILTER_SPECIFIC;
cfg->ssid_len = req->ssids[0].ssid_len;
memcpy(cfg->ssid, req->ssids[0].ssid,
@ -461,7 +478,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
goto out;
}
if (cfg->active[0]) {
if (!force_passive && cfg->active[0]) {
ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid,
req->ssids[0].ssid_len,
ies->ie[IEEE80211_BAND_2GHZ],
@ -473,7 +490,7 @@ int wl1271_scan_sched_scan_config(struct wl1271 *wl,
}
}
if (cfg->active[1]) {
if (!force_passive && cfg->active[1]) {
ret = wl1271_cmd_build_probe_req(wl, req->ssids[0].ssid,
req->ssids[0].ssid_len,
ies->ie[IEEE80211_BAND_5GHZ],

View File

@ -137,6 +137,9 @@ enum {
SCAN_BSS_TYPE_ANY,
};
#define SCAN_CHANNEL_FLAGS_DFS BIT(0)
#define SCAN_CHANNEL_FLAGS_DFS_ENABLED BIT(1)
struct conn_scan_ch_params {
__le16 min_duration;
__le16 max_duration;

View File

@ -1533,6 +1533,31 @@ static void __exit usb_exit(void)
module_init(usb_init);
module_exit(usb_exit);
static int zd_ep_regs_out_msg(struct usb_device *udev, void *data, int len,
int *actual_length, int timeout)
{
/* In USB 2.0 mode EP_REGS_OUT endpoint is interrupt type. However in
* USB 1.1 mode endpoint is bulk. Select correct type URB by endpoint
* descriptor.
*/
struct usb_host_endpoint *ep;
unsigned int pipe;
pipe = usb_sndintpipe(udev, EP_REGS_OUT);
ep = usb_pipe_endpoint(udev, pipe);
if (!ep)
return -EINVAL;
if (usb_endpoint_xfer_int(&ep->desc)) {
return usb_interrupt_msg(udev, pipe, data, len,
actual_length, timeout);
} else {
pipe = usb_sndbulkpipe(udev, EP_REGS_OUT);
return usb_bulk_msg(udev, pipe, data, len, actual_length,
timeout);
}
}
static int usb_int_regs_length(unsigned int count)
{
return sizeof(struct usb_int_regs) + count * sizeof(struct reg_data);
@ -1648,15 +1673,14 @@ int zd_usb_ioread16v(struct zd_usb *usb, u16 *values,
udev = zd_usb_to_usbdev(usb);
prepare_read_regs_int(usb);
r = usb_interrupt_msg(udev, usb_sndintpipe(udev, EP_REGS_OUT),
req, req_len, &actual_req_len, 50 /* ms */);
r = zd_ep_regs_out_msg(udev, req, req_len, &actual_req_len, 50 /*ms*/);
if (r) {
dev_dbg_f(zd_usb_dev(usb),
"error in usb_interrupt_msg(). Error number %d\n", r);
"error in zd_ep_regs_out_msg(). Error number %d\n", r);
goto error;
}
if (req_len != actual_req_len) {
dev_dbg_f(zd_usb_dev(usb), "error in usb_interrupt_msg()\n"
dev_dbg_f(zd_usb_dev(usb), "error in zd_ep_regs_out_msg()\n"
" req_len %d != actual_req_len %d\n",
req_len, actual_req_len);
r = -EIO;
@ -1818,9 +1842,17 @@ int zd_usb_iowrite16v_async(struct zd_usb *usb, const struct zd_ioreq16 *ioreqs,
rw->value = cpu_to_le16(ioreqs[i].value);
}
usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, EP_REGS_OUT),
req, req_len, iowrite16v_urb_complete, usb,
ep->desc.bInterval);
/* In USB 2.0 mode endpoint is interrupt type. However in USB 1.1 mode
* endpoint is bulk. Select correct type URB by endpoint descriptor.
*/
if (usb_endpoint_xfer_int(&ep->desc))
usb_fill_int_urb(urb, udev, usb_sndintpipe(udev, EP_REGS_OUT),
req, req_len, iowrite16v_urb_complete, usb,
ep->desc.bInterval);
else
usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_REGS_OUT),
req, req_len, iowrite16v_urb_complete, usb);
urb->transfer_flags |= URB_FREE_BUFFER;
/* Submit previous URB */
@ -1924,15 +1956,14 @@ int zd_usb_rfwrite(struct zd_usb *usb, u32 value, u8 bits)
}
udev = zd_usb_to_usbdev(usb);
r = usb_interrupt_msg(udev, usb_sndintpipe(udev, EP_REGS_OUT),
req, req_len, &actual_req_len, 50 /* ms */);
r = zd_ep_regs_out_msg(udev, req, req_len, &actual_req_len, 50 /*ms*/);
if (r) {
dev_dbg_f(zd_usb_dev(usb),
"error in usb_interrupt_msg(). Error number %d\n", r);
"error in zd_ep_regs_out_msg(). Error number %d\n", r);
goto out;
}
if (req_len != actual_req_len) {
dev_dbg_f(zd_usb_dev(usb), "error in usb_interrupt_msg()"
dev_dbg_f(zd_usb_dev(usb), "error in zd_ep_regs_out_msg()"
" req_len %d != actual_req_len %d\n",
req_len, actual_req_len);
r = -EIO;

View File

@ -698,12 +698,7 @@ int __init detect_intel_iommu(void)
{
#ifdef CONFIG_INTR_REMAP
struct acpi_table_dmar *dmar;
/*
* for now we will disable dma-remapping when interrupt
* remapping is enabled.
* When support for queued invalidation for IOTLB invalidation
* is added, we will not need this any more.
*/
dmar = (struct acpi_table_dmar *) dmar_tbl;
if (ret && cpu_has_x2apic && dmar->flags & 0x1)
printk(KERN_INFO

View File

@ -47,6 +47,8 @@
#define ROOT_SIZE VTD_PAGE_SIZE
#define CONTEXT_SIZE VTD_PAGE_SIZE
#define IS_BRIDGE_HOST_DEVICE(pdev) \
((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
#define IS_GFX_DEVICE(pdev) ((pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
#define IS_ISA_DEVICE(pdev) ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
#define IS_AZALIA(pdev) ((pdev)->vendor == 0x8086 && (pdev)->device == 0x3a3e)
@ -116,6 +118,11 @@ static inline unsigned long align_to_level(unsigned long pfn, int level)
return (pfn + level_size(level) - 1) & level_mask(level);
}
static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
{
return 1 << ((lvl - 1) * LEVEL_STRIDE);
}
/* VT-d pages must always be _smaller_ than MM pages. Otherwise things
are never going to work. */
static inline unsigned long dma_to_mm_pfn(unsigned long dma_pfn)
@ -142,6 +149,12 @@ static struct intel_iommu **g_iommus;
static void __init check_tylersburg_isoch(void);
static int rwbf_quirk;
/*
* set to 1 to panic kernel if can't successfully enable VT-d
* (used when kernel is launched w/ TXT)
*/
static int force_on = 0;
/*
* 0: Present
* 1-11: Reserved
@ -338,6 +351,9 @@ struct dmar_domain {
int iommu_coherency;/* indicate coherency of iommu access */
int iommu_snooping; /* indicate snooping control feature*/
int iommu_count; /* reference count of iommu */
int iommu_superpage;/* Level of superpages supported:
0 == 4KiB (no superpages), 1 == 2MiB,
2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
spinlock_t iommu_lock; /* protect iommu set in domain */
u64 max_addr; /* maximum mapped address */
};
@ -387,6 +403,7 @@ int dmar_disabled = 1;
static int dmar_map_gfx = 1;
static int dmar_forcedac;
static int intel_iommu_strict;
static int intel_iommu_superpage = 1;
#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
static DEFINE_SPINLOCK(device_domain_lock);
@ -417,6 +434,10 @@ static int __init intel_iommu_setup(char *str)
printk(KERN_INFO
"Intel-IOMMU: disable batched IOTLB flush\n");
intel_iommu_strict = 1;
} else if (!strncmp(str, "sp_off", 6)) {
printk(KERN_INFO
"Intel-IOMMU: disable supported super page\n");
intel_iommu_superpage = 0;
}
str += strcspn(str, ",");
@ -555,11 +576,32 @@ static void domain_update_iommu_snooping(struct dmar_domain *domain)
}
}
static void domain_update_iommu_superpage(struct dmar_domain *domain)
{
int i, mask = 0xf;
if (!intel_iommu_superpage) {
domain->iommu_superpage = 0;
return;
}
domain->iommu_superpage = 4; /* 1TiB */
for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) {
mask |= cap_super_page_val(g_iommus[i]->cap);
if (!mask) {
break;
}
}
domain->iommu_superpage = fls(mask);
}
/* Some capabilities may be different across iommus */
static void domain_update_iommu_cap(struct dmar_domain *domain)
{
domain_update_iommu_coherency(domain);
domain_update_iommu_snooping(domain);
domain_update_iommu_superpage(domain);
}
static struct intel_iommu *device_to_iommu(int segment, u8 bus, u8 devfn)
@ -689,23 +731,31 @@ out:
}
static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
unsigned long pfn)
unsigned long pfn, int large_level)
{
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
struct dma_pte *parent, *pte = NULL;
int level = agaw_to_level(domain->agaw);
int offset;
int offset, target_level;
BUG_ON(!domain->pgd);
BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
parent = domain->pgd;
/* Search pte */
if (!large_level)
target_level = 1;
else
target_level = large_level;
while (level > 0) {
void *tmp_page;
offset = pfn_level_offset(pfn, level);
pte = &parent[offset];
if (level == 1)
if (!large_level && (pte->val & DMA_PTE_LARGE_PAGE))
break;
if (level == target_level)
break;
if (!dma_pte_present(pte)) {
@ -733,10 +783,11 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
return pte;
}
/* return address's pte at specific level */
static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
unsigned long pfn,
int level)
int level, int *large_page)
{
struct dma_pte *parent, *pte = NULL;
int total = agaw_to_level(domain->agaw);
@ -749,8 +800,16 @@ static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
if (level == total)
return pte;
if (!dma_pte_present(pte))
if (!dma_pte_present(pte)) {
*large_page = total;
break;
}
if (pte->val & DMA_PTE_LARGE_PAGE) {
*large_page = total;
return pte;
}
parent = phys_to_virt(dma_pte_addr(pte));
total--;
}
@ -763,6 +822,7 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
unsigned long last_pfn)
{
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
unsigned int large_page = 1;
struct dma_pte *first_pte, *pte;
BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
@ -771,14 +831,15 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
/* we don't need lock here; nobody else touches the iova range */
do {
first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1);
large_page = 1;
first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1, &large_page);
if (!pte) {
start_pfn = align_to_level(start_pfn + 1, 2);
start_pfn = align_to_level(start_pfn + 1, large_page + 1);
continue;
}
do {
do {
dma_clear_pte(pte);
start_pfn++;
start_pfn += lvl_to_nr_pages(large_page);
pte++;
} while (start_pfn <= last_pfn && !first_pte_in_page(pte));
@ -798,6 +859,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
int total = agaw_to_level(domain->agaw);
int level;
unsigned long tmp;
int large_page = 2;
BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
@ -813,7 +875,10 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
return;
do {
first_pte = pte = dma_pfn_level_pte(domain, tmp, level);
large_page = level;
first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page);
if (large_page > level)
level = large_page + 1;
if (!pte) {
tmp = align_to_level(tmp + 1, level + 1);
continue;
@ -1397,6 +1462,7 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
else
domain->iommu_snooping = 0;
domain->iommu_superpage = fls(cap_super_page_val(iommu->cap));
domain->iommu_count = 1;
domain->nid = iommu->node;
@ -1417,6 +1483,10 @@ static void domain_exit(struct dmar_domain *domain)
if (!domain)
return;
/* Flush any lazy unmaps that may reference this domain */
if (!intel_iommu_strict)
flush_unmaps_timeout(0);
domain_remove_dev_info(domain);
/* destroy iovas */
put_iova_domain(&domain->iovad);
@ -1648,6 +1718,34 @@ static inline unsigned long aligned_nrpages(unsigned long host_addr,
return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
}
/* Return largest possible superpage level for a given mapping */
static inline int hardware_largepage_caps(struct dmar_domain *domain,
unsigned long iov_pfn,
unsigned long phy_pfn,
unsigned long pages)
{
int support, level = 1;
unsigned long pfnmerge;
support = domain->iommu_superpage;
/* To use a large page, the virtual *and* physical addresses
must be aligned to 2MiB/1GiB/etc. Lower bits set in either
of them will mean we have to use smaller pages. So just
merge them and check both at once. */
pfnmerge = iov_pfn | phy_pfn;
while (support && !(pfnmerge & ~VTD_STRIDE_MASK)) {
pages >>= VTD_STRIDE_SHIFT;
if (!pages)
break;
pfnmerge >>= VTD_STRIDE_SHIFT;
level++;
support--;
}
return level;
}
static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
struct scatterlist *sg, unsigned long phys_pfn,
unsigned long nr_pages, int prot)
@ -1656,6 +1754,8 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
phys_addr_t uninitialized_var(pteval);
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
unsigned long sg_res;
unsigned int largepage_lvl = 0;
unsigned long lvl_pages = 0;
BUG_ON(addr_width < BITS_PER_LONG && (iov_pfn + nr_pages - 1) >> addr_width);
@ -1671,7 +1771,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
}
while (nr_pages--) {
while (nr_pages > 0) {
uint64_t tmp;
if (!sg_res) {
@ -1679,11 +1779,21 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
sg->dma_address = ((dma_addr_t)iov_pfn << VTD_PAGE_SHIFT) + sg->offset;
sg->dma_length = sg->length;
pteval = page_to_phys(sg_page(sg)) | prot;
phys_pfn = pteval >> VTD_PAGE_SHIFT;
}
if (!pte) {
first_pte = pte = pfn_to_dma_pte(domain, iov_pfn);
largepage_lvl = hardware_largepage_caps(domain, iov_pfn, phys_pfn, sg_res);
first_pte = pte = pfn_to_dma_pte(domain, iov_pfn, largepage_lvl);
if (!pte)
return -ENOMEM;
/* It is large page*/
if (largepage_lvl > 1)
pteval |= DMA_PTE_LARGE_PAGE;
else
pteval &= ~(uint64_t)DMA_PTE_LARGE_PAGE;
}
/* We don't need lock here, nobody else
* touches the iova range
@ -1699,16 +1809,38 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
}
WARN_ON(1);
}
lvl_pages = lvl_to_nr_pages(largepage_lvl);
BUG_ON(nr_pages < lvl_pages);
BUG_ON(sg_res < lvl_pages);
nr_pages -= lvl_pages;
iov_pfn += lvl_pages;
phys_pfn += lvl_pages;
pteval += lvl_pages * VTD_PAGE_SIZE;
sg_res -= lvl_pages;
/* If the next PTE would be the first in a new page, then we
need to flush the cache on the entries we've just written.
And then we'll need to recalculate 'pte', so clear it and
let it get set again in the if (!pte) block above.
If we're done (!nr_pages) we need to flush the cache too.
Also if we've been setting superpages, we may need to
recalculate 'pte' and switch back to smaller pages for the
end of the mapping, if the trailing size is not enough to
use another superpage (i.e. sg_res < lvl_pages). */
pte++;
if (!nr_pages || first_pte_in_page(pte)) {
if (!nr_pages || first_pte_in_page(pte) ||
(largepage_lvl > 1 && sg_res < lvl_pages)) {
domain_flush_cache(domain, first_pte,
(void *)pte - (void *)first_pte);
pte = NULL;
}
iov_pfn++;
pteval += VTD_PAGE_SIZE;
sg_res--;
if (!sg_res)
if (!sg_res && nr_pages)
sg = sg_next(sg);
}
return 0;
@ -2016,7 +2148,7 @@ static inline int iommu_prepare_rmrr_dev(struct dmar_rmrr_unit *rmrr,
if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
return 0;
return iommu_prepare_identity_map(pdev, rmrr->base_address,
rmrr->end_address + 1);
rmrr->end_address);
}
#ifdef CONFIG_DMAR_FLOPPY_WA
@ -2030,7 +2162,7 @@ static inline void iommu_prepare_isa(void)
return;
printk(KERN_INFO "IOMMU: Prepare 0-16MiB unity mapping for LPC\n");
ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024);
ret = iommu_prepare_identity_map(pdev, 0, 16*1024*1024 - 1);
if (ret)
printk(KERN_ERR "IOMMU: Failed to create 0-16MiB identity map; "
@ -2106,10 +2238,10 @@ static int identity_mapping(struct pci_dev *pdev)
if (likely(!iommu_identity_mapping))
return 0;
info = pdev->dev.archdata.iommu;
if (info && info != DUMMY_DEVICE_DOMAIN_INFO)
return (info->domain == si_domain);
list_for_each_entry(info, &si_domain->devices, link)
if (info->dev == pdev)
return 1;
return 0;
}
@ -2187,8 +2319,19 @@ static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
* Assume that they will -- if they turn out not to be, then we can
* take them out of the 1:1 domain later.
*/
if (!startup)
return pdev->dma_mask > DMA_BIT_MASK(32);
if (!startup) {
/*
* If the device's dma_mask is less than the system's memory
* size then this is not a candidate for identity mapping.
*/
u64 dma_mask = pdev->dma_mask;
if (pdev->dev.coherent_dma_mask &&
pdev->dev.coherent_dma_mask < dma_mask)
dma_mask = pdev->dev.coherent_dma_mask;
return dma_mask >= dma_get_required_mask(&pdev->dev);
}
return 1;
}
@ -2203,6 +2346,9 @@ static int __init iommu_prepare_static_identity_mapping(int hw)
return -EFAULT;
for_each_pci_dev(pdev) {
/* Skip Host/PCI Bridge devices */
if (IS_BRIDGE_HOST_DEVICE(pdev))
continue;
if (iommu_should_identity_map(pdev, 1)) {
printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
hw ? "hardware" : "software", pci_name(pdev));
@ -2218,7 +2364,7 @@ static int __init iommu_prepare_static_identity_mapping(int hw)
return 0;
}
static int __init init_dmars(int force_on)
static int __init init_dmars(void)
{
struct dmar_drhd_unit *drhd;
struct dmar_rmrr_unit *rmrr;
@ -2592,8 +2738,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
iommu = domain_get_iommu(domain);
size = aligned_nrpages(paddr, size);
iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
pdev->dma_mask);
iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size), dma_mask);
if (!iova)
goto error;
@ -3118,7 +3263,17 @@ static int init_iommu_hw(void)
if (iommu->qi)
dmar_reenable_qi(iommu);
for_each_active_iommu(iommu, drhd) {
for_each_iommu(iommu, drhd) {
if (drhd->ignored) {
/*
* we always have to disable PMRs or DMA may fail on
* this device
*/
if (force_on)
iommu_disable_protect_mem_regions(iommu);
continue;
}
iommu_flush_write_buffer(iommu);
iommu_set_root_entry(iommu);
@ -3127,7 +3282,8 @@ static int init_iommu_hw(void)
DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0,
DMA_TLB_GLOBAL_FLUSH);
iommu_enable_translation(iommu);
if (iommu_enable_translation(iommu))
return 1;
iommu_disable_protect_mem_regions(iommu);
}
@ -3194,7 +3350,10 @@ static void iommu_resume(void)
unsigned long flag;
if (init_iommu_hw()) {
WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
if (force_on)
panic("tboot: IOMMU setup failed, DMAR can not resume!\n");
else
WARN(1, "IOMMU setup failed, DMAR can not resume!\n");
return;
}
@ -3271,7 +3430,6 @@ static struct notifier_block device_nb = {
int __init intel_iommu_init(void)
{
int ret = 0;
int force_on = 0;
/* VT-d is required for a TXT/tboot launch, so enforce that */
force_on = tboot_force_iommu();
@ -3309,7 +3467,7 @@ int __init intel_iommu_init(void)
init_no_remapping_devices();
ret = init_dmars(force_on);
ret = init_dmars();
if (ret) {
if (force_on)
panic("tboot: Failed to initialize DMARs\n");
@ -3380,8 +3538,8 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
spin_lock_irqsave(&device_domain_lock, flags);
list_for_each_safe(entry, tmp, &domain->devices) {
info = list_entry(entry, struct device_domain_info, link);
/* No need to compare PCI domain; it has to be the same */
if (info->bus == pdev->bus->number &&
if (info->segment == pci_domain_nr(pdev->bus) &&
info->bus == pdev->bus->number &&
info->devfn == pdev->devfn) {
list_del(&info->link);
list_del(&info->global);
@ -3419,10 +3577,13 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
domain_update_iommu_cap(domain);
spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
spin_lock_irqsave(&iommu->lock, tmp_flags);
clear_bit(domain->id, iommu->domain_ids);
iommu->domains[domain->id] = NULL;
spin_unlock_irqrestore(&iommu->lock, tmp_flags);
if (!(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) &&
!(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)) {
spin_lock_irqsave(&iommu->lock, tmp_flags);
clear_bit(domain->id, iommu->domain_ids);
iommu->domains[domain->id] = NULL;
spin_unlock_irqrestore(&iommu->lock, tmp_flags);
}
}
spin_unlock_irqrestore(&device_domain_lock, flags);
@ -3505,6 +3666,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
domain->iommu_count = 0;
domain->iommu_coherency = 0;
domain->iommu_snooping = 0;
domain->iommu_superpage = 0;
domain->max_addr = 0;
domain->nid = -1;
@ -3720,7 +3882,7 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
struct dma_pte *pte;
u64 phys = 0;
pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT);
pte = pfn_to_dma_pte(dmar_domain, iova >> VTD_PAGE_SHIFT, 0);
if (pte)
phys = dma_pte_addr(pte);

View File

@ -63,8 +63,16 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
curr = iovad->cached32_node;
cached_iova = container_of(curr, struct iova, node);
if (free->pfn_lo >= cached_iova->pfn_lo)
iovad->cached32_node = rb_next(&free->node);
if (free->pfn_lo >= cached_iova->pfn_lo) {
struct rb_node *node = rb_next(&free->node);
struct iova *iova = container_of(node, struct iova, node);
/* only cache if it's below 32bit pfn */
if (node && iova->pfn_lo < iovad->dma_32bit_pfn)
iovad->cached32_node = node;
else
iovad->cached32_node = NULL;
}
}
/* Computes the padding size required, to make the

View File

@ -297,7 +297,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
kfree(sdev);
goto out;
}
blk_get_queue(sdev->request_queue);
sdev->request_queue->queuedata = sdev;
scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);

View File

@ -322,6 +322,7 @@ static void scsi_device_dev_release_usercontext(struct work_struct *work)
kfree(evt);
}
blk_put_queue(sdev->request_queue);
/* NULL queue means the device can't be used */
sdev->request_queue = NULL;

View File

@ -2128,8 +2128,8 @@ static void gsmld_detach_gsm(struct tty_struct *tty, struct gsm_mux *gsm)
gsm->tty = NULL;
}
static unsigned int gsmld_receive_buf(struct tty_struct *tty,
const unsigned char *cp, char *fp, int count)
static void gsmld_receive_buf(struct tty_struct *tty, const unsigned char *cp,
char *fp, int count)
{
struct gsm_mux *gsm = tty->disc_data;
const unsigned char *dp;
@ -2162,8 +2162,6 @@ static unsigned int gsmld_receive_buf(struct tty_struct *tty,
}
/* FASYNC if needed ? */
/* If clogged call tty_throttle(tty); */
return count;
}
/**

View File

@ -188,8 +188,8 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
poll_table *wait);
static int n_hdlc_tty_open(struct tty_struct *tty);
static void n_hdlc_tty_close(struct tty_struct *tty);
static unsigned int n_hdlc_tty_receive(struct tty_struct *tty,
const __u8 *cp, char *fp, int count);
static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *cp,
char *fp, int count);
static void n_hdlc_tty_wakeup(struct tty_struct *tty);
#define bset(p,b) ((p)[(b) >> 5] |= (1 << ((b) & 0x1f)))
@ -509,8 +509,8 @@ static void n_hdlc_tty_wakeup(struct tty_struct *tty)
* Called by tty low level driver when receive data is available. Data is
* interpreted as one HDLC frame.
*/
static unsigned int n_hdlc_tty_receive(struct tty_struct *tty,
const __u8 *data, char *flags, int count)
static void n_hdlc_tty_receive(struct tty_struct *tty, const __u8 *data,
char *flags, int count)
{
register struct n_hdlc *n_hdlc = tty2n_hdlc (tty);
register struct n_hdlc_buf *buf;
@ -521,20 +521,20 @@ static unsigned int n_hdlc_tty_receive(struct tty_struct *tty,
/* This can happen if stuff comes in on the backup tty */
if (!n_hdlc || tty != n_hdlc->tty)
return -ENODEV;
return;
/* verify line is using HDLC discipline */
if (n_hdlc->magic != HDLC_MAGIC) {
printk("%s(%d) line not using HDLC discipline\n",
__FILE__,__LINE__);
return -EINVAL;
return;
}
if ( count>maxframe ) {
if (debuglevel >= DEBUG_LEVEL_INFO)
printk("%s(%d) rx count>maxframesize, data discarded\n",
__FILE__,__LINE__);
return -EINVAL;
return;
}
/* get a free HDLC buffer */
@ -550,7 +550,7 @@ static unsigned int n_hdlc_tty_receive(struct tty_struct *tty,
if (debuglevel >= DEBUG_LEVEL_INFO)
printk("%s(%d) no more rx buffers, data discarded\n",
__FILE__,__LINE__);
return -EINVAL;
return;
}
/* copy received data to HDLC buffer */
@ -565,8 +565,6 @@ static unsigned int n_hdlc_tty_receive(struct tty_struct *tty,
if (n_hdlc->tty->fasync != NULL)
kill_fasync (&n_hdlc->tty->fasync, SIGIO, POLL_IN);
return count;
} /* end of n_hdlc_tty_receive() */
/**

View File

@ -139,8 +139,8 @@ static int r3964_ioctl(struct tty_struct *tty, struct file *file,
static void r3964_set_termios(struct tty_struct *tty, struct ktermios *old);
static unsigned int r3964_poll(struct tty_struct *tty, struct file *file,
struct poll_table_struct *wait);
static unsigned int r3964_receive_buf(struct tty_struct *tty,
const unsigned char *cp, char *fp, int count);
static void r3964_receive_buf(struct tty_struct *tty, const unsigned char *cp,
char *fp, int count);
static struct tty_ldisc_ops tty_ldisc_N_R3964 = {
.owner = THIS_MODULE,
@ -1239,8 +1239,8 @@ static unsigned int r3964_poll(struct tty_struct *tty, struct file *file,
return result;
}
static unsigned int r3964_receive_buf(struct tty_struct *tty,
const unsigned char *cp, char *fp, int count)
static void r3964_receive_buf(struct tty_struct *tty, const unsigned char *cp,
char *fp, int count)
{
struct r3964_info *pInfo = tty->disc_data;
const unsigned char *p;
@ -1257,8 +1257,6 @@ static unsigned int r3964_receive_buf(struct tty_struct *tty,
}
}
return count;
}
MODULE_LICENSE("GPL");

View File

@ -81,6 +81,38 @@ static inline int tty_put_user(struct tty_struct *tty, unsigned char x,
return put_user(x, ptr);
}
/**
* n_tty_set__room - receive space
* @tty: terminal
*
* Called by the driver to find out how much data it is
* permitted to feed to the line discipline without any being lost
* and thus to manage flow control. Not serialized. Answers for the
* "instant".
*/
static void n_tty_set_room(struct tty_struct *tty)
{
/* tty->read_cnt is not read locked ? */
int left = N_TTY_BUF_SIZE - tty->read_cnt - 1;
int old_left;
/*
* If we are doing input canonicalization, and there are no
* pending newlines, let characters through without limit, so
* that erase characters will be handled. Other excess
* characters will be beeped.
*/
if (left <= 0)
left = tty->icanon && !tty->canon_data;
old_left = tty->receive_room;
tty->receive_room = left;
/* Did this open up the receive buffer? We may need to flip */
if (left && !old_left)
schedule_work(&tty->buf.work);
}
static void put_tty_queue_nolock(unsigned char c, struct tty_struct *tty)
{
if (tty->read_cnt < N_TTY_BUF_SIZE) {
@ -152,6 +184,7 @@ static void reset_buffer_flags(struct tty_struct *tty)
tty->canon_head = tty->canon_data = tty->erasing = 0;
memset(&tty->read_flags, 0, sizeof tty->read_flags);
n_tty_set_room(tty);
check_unthrottle(tty);
}
@ -1327,19 +1360,17 @@ static void n_tty_write_wakeup(struct tty_struct *tty)
* calls one at a time and in order (or using flush_to_ldisc)
*/
static unsigned int n_tty_receive_buf(struct tty_struct *tty,
const unsigned char *cp, char *fp, int count)
static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp,
char *fp, int count)
{
const unsigned char *p;
char *f, flags = TTY_NORMAL;
int i;
char buf[64];
unsigned long cpuflags;
int left;
int ret = 0;
if (!tty->read_buf)
return 0;
return;
if (tty->real_raw) {
spin_lock_irqsave(&tty->read_lock, cpuflags);
@ -1349,7 +1380,6 @@ static unsigned int n_tty_receive_buf(struct tty_struct *tty,
memcpy(tty->read_buf + tty->read_head, cp, i);
tty->read_head = (tty->read_head + i) & (N_TTY_BUF_SIZE-1);
tty->read_cnt += i;
ret += i;
cp += i;
count -= i;
@ -1359,10 +1389,8 @@ static unsigned int n_tty_receive_buf(struct tty_struct *tty,
memcpy(tty->read_buf + tty->read_head, cp, i);
tty->read_head = (tty->read_head + i) & (N_TTY_BUF_SIZE-1);
tty->read_cnt += i;
ret += i;
spin_unlock_irqrestore(&tty->read_lock, cpuflags);
} else {
ret = count;
for (i = count, p = cp, f = fp; i; i--, p++) {
if (f)
flags = *f++;
@ -1390,6 +1418,8 @@ static unsigned int n_tty_receive_buf(struct tty_struct *tty,
tty->ops->flush_chars(tty);
}
n_tty_set_room(tty);
if ((!tty->icanon && (tty->read_cnt >= tty->minimum_to_wake)) ||
L_EXTPROC(tty)) {
kill_fasync(&tty->fasync, SIGIO, POLL_IN);
@ -1402,12 +1432,8 @@ static unsigned int n_tty_receive_buf(struct tty_struct *tty,
* mode. We don't want to throttle the driver if we're in
* canonical mode and don't have a newline yet!
*/
left = N_TTY_BUF_SIZE - tty->read_cnt - 1;
if (left < TTY_THRESHOLD_THROTTLE)
if (tty->receive_room < TTY_THRESHOLD_THROTTLE)
tty_throttle(tty);
return ret;
}
int is_ignored(int sig)
@ -1451,6 +1477,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
if (test_bit(TTY_HW_COOK_IN, &tty->flags)) {
tty->raw = 1;
tty->real_raw = 1;
n_tty_set_room(tty);
return;
}
if (I_ISTRIP(tty) || I_IUCLC(tty) || I_IGNCR(tty) ||
@ -1503,6 +1530,7 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
else
tty->real_raw = 0;
}
n_tty_set_room(tty);
/* The termios change make the tty ready for I/O */
wake_up_interruptible(&tty->write_wait);
wake_up_interruptible(&tty->read_wait);
@ -1784,6 +1812,8 @@ do_it_again:
retval = -ERESTARTSYS;
break;
}
/* FIXME: does n_tty_set_room need locking ? */
n_tty_set_room(tty);
timeout = schedule_timeout(timeout);
continue;
}
@ -1855,8 +1885,10 @@ do_it_again:
* longer than TTY_THRESHOLD_UNTHROTTLE in canonical mode,
* we won't get any more characters.
*/
if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE)
if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE) {
n_tty_set_room(tty);
check_unthrottle(tty);
}
if (b - buf >= minimum)
break;
@ -1878,6 +1910,7 @@ do_it_again:
} else if (test_and_clear_bit(TTY_PUSH, &tty->flags))
goto do_it_again;
n_tty_set_room(tty);
return retval;
}

View File

@ -416,7 +416,6 @@ static void flush_to_ldisc(struct work_struct *work)
struct tty_buffer *head, *tail = tty->buf.tail;
int seen_tail = 0;
while ((head = tty->buf.head) != NULL) {
int copied;
int count;
char *char_buf;
unsigned char *flag_buf;
@ -443,19 +442,17 @@ static void flush_to_ldisc(struct work_struct *work)
line discipline as we want to empty the queue */
if (test_bit(TTY_FLUSHPENDING, &tty->flags))
break;
if (!tty->receive_room || seen_tail)
break;
if (count > tty->receive_room)
count = tty->receive_room;
char_buf = head->char_buf_ptr + head->read;
flag_buf = head->flag_buf_ptr + head->read;
head->read += count;
spin_unlock_irqrestore(&tty->buf.lock, flags);
copied = disc->ops->receive_buf(tty, char_buf,
disc->ops->receive_buf(tty, char_buf,
flag_buf, count);
spin_lock_irqsave(&tty->buf.lock, flags);
head->read += copied;
if (copied == 0 || seen_tail) {
schedule_work(&tty->buf.work);
break;
}
}
clear_bit(TTY_FLUSHING, &tty->flags);
}

View File

@ -332,7 +332,8 @@ int paste_selection(struct tty_struct *tty)
continue;
}
count = sel_buffer_lth - pasted;
count = tty->ldisc->ops->receive_buf(tty, sel_buffer + pasted,
count = min(count, tty->receive_room);
tty->ldisc->ops->receive_buf(tty, sel_buffer + pasted,
NULL, count);
pasted += count;
}

View File

@ -1272,8 +1272,8 @@ int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
* individual writeable reference is too fragile given the
* way @mode is used in blkdev_get/put().
*/
if ((disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE) &&
!res && (mode & FMODE_WRITE) && !bdev->bd_write_holder) {
if (!res && (mode & FMODE_WRITE) && !bdev->bd_write_holder &&
(disk->flags & GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE)) {
bdev->bd_write_holder = true;
disk_block_events(disk);
}

View File

@ -255,13 +255,7 @@ ssize_t part_discard_alignment_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct hd_struct *p = dev_to_part(dev);
struct gendisk *disk = dev_to_disk(dev);
unsigned int alignment = 0;
if (disk->queue)
alignment = queue_limit_discard_alignment(&disk->queue->limits,
p->start_sect);
return sprintf(buf, "%u\n", alignment);
return sprintf(buf, "%u\n", p->discard_alignment);
}
ssize_t part_stat_show(struct device *dev,
@ -455,6 +449,8 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno,
p->start_sect = start;
p->alignment_offset =
queue_limit_alignment_offset(&disk->queue->limits, start);
p->discard_alignment =
queue_limit_discard_alignment(&disk->queue->limits, start);
p->nr_sects = len;
p->partno = partno;
p->policy = get_disk_ro(disk);

View File

@ -581,6 +581,7 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
ubifs_assert(wbuf->size % c->min_io_size == 0);
ubifs_assert(mutex_is_locked(&wbuf->io_mutex));
ubifs_assert(!c->ro_media && !c->ro_mount);
ubifs_assert(!c->space_fixup);
if (c->leb_size - wbuf->offs >= c->max_write_size)
ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size));
@ -759,6 +760,7 @@ int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum,
ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
ubifs_assert(offs % c->min_io_size == 0 && offs < c->leb_size);
ubifs_assert(!c->ro_media && !c->ro_mount);
ubifs_assert(!c->space_fixup);
if (c->ro_error)
return -EROFS;

View File

@ -669,6 +669,7 @@ out_free:
out_release:
release_head(c, BASEHD);
kfree(dent);
out_ro:
ubifs_ro_mode(c, err);
if (last_reference)

View File

@ -674,7 +674,7 @@ static int kill_orphans(struct ubifs_info *c)
if (IS_ERR(sleb)) {
if (PTR_ERR(sleb) == -EUCLEAN)
sleb = ubifs_recover_leb(c, lnum, 0,
c->sbuf, 0);
c->sbuf, -1);
if (IS_ERR(sleb)) {
err = PTR_ERR(sleb);
break;

View File

@ -564,19 +564,15 @@ static int fix_unclean_leb(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
}
/**
* drop_last_node - drop the last node or group of nodes.
* drop_last_group - drop the last group of nodes.
* @sleb: scanned LEB information
* @offs: offset of dropped nodes is returned here
* @grouped: non-zero if whole group of nodes have to be dropped
*
* This is a helper function for 'ubifs_recover_leb()' which drops the last
* node of the scanned LEB or the last group of nodes if @grouped is not zero.
* This function returns %1 if a node was dropped and %0 otherwise.
* group of nodes of the scanned LEB.
*/
static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped)
static void drop_last_group(struct ubifs_scan_leb *sleb, int *offs)
{
int dropped = 0;
while (!list_empty(&sleb->nodes)) {
struct ubifs_scan_node *snod;
struct ubifs_ch *ch;
@ -585,17 +581,40 @@ static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped)
list);
ch = snod->node;
if (ch->group_type != UBIFS_IN_NODE_GROUP)
return dropped;
dbg_rcvry("dropping node at %d:%d", sleb->lnum, snod->offs);
break;
dbg_rcvry("dropping grouped node at %d:%d",
sleb->lnum, snod->offs);
*offs = snod->offs;
list_del(&snod->list);
kfree(snod);
sleb->nodes_cnt -= 1;
}
}
/**
* drop_last_node - drop the last node.
* @sleb: scanned LEB information
* @offs: offset of dropped nodes is returned here
* @grouped: non-zero if whole group of nodes have to be dropped
*
* This is a helper function for 'ubifs_recover_leb()' which drops the last
* node of the scanned LEB.
*/
static void drop_last_node(struct ubifs_scan_leb *sleb, int *offs)
{
struct ubifs_scan_node *snod;
if (!list_empty(&sleb->nodes)) {
snod = list_entry(sleb->nodes.prev, struct ubifs_scan_node,
list);
dbg_rcvry("dropping last node at %d:%d", sleb->lnum, snod->offs);
*offs = snod->offs;
list_del(&snod->list);
kfree(snod);
sleb->nodes_cnt -= 1;
dropped = 1;
if (!grouped)
break;
}
return dropped;
}
/**
@ -604,7 +623,8 @@ static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped)
* @lnum: LEB number
* @offs: offset
* @sbuf: LEB-sized buffer to use
* @grouped: nodes may be grouped for recovery
* @jhead: journal head number this LEB belongs to (%-1 if the LEB does not
* belong to any journal head)
*
* This function does a scan of a LEB, but caters for errors that might have
* been caused by the unclean unmount from which we are attempting to recover.
@ -612,13 +632,14 @@ static int drop_last_node(struct ubifs_scan_leb *sleb, int *offs, int grouped)
* found, and a negative error code in case of failure.
*/
struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
int offs, void *sbuf, int grouped)
int offs, void *sbuf, int jhead)
{
int ret = 0, err, len = c->leb_size - offs, start = offs, min_io_unit;
int grouped = jhead == -1 ? 0 : c->jheads[jhead].grouped;
struct ubifs_scan_leb *sleb;
void *buf = sbuf + offs;
dbg_rcvry("%d:%d", lnum, offs);
dbg_rcvry("%d:%d, jhead %d, grouped %d", lnum, offs, jhead, grouped);
sleb = ubifs_start_scan(c, lnum, offs, sbuf);
if (IS_ERR(sleb))
@ -635,7 +656,7 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
* Scan quietly until there is an error from which we cannot
* recover
*/
ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 0);
ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 1);
if (ret == SCANNED_A_NODE) {
/* A valid node, and not a padding node */
struct ubifs_ch *ch = buf;
@ -695,59 +716,62 @@ struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
* If nodes are grouped, always drop the incomplete group at
* the end.
*/
drop_last_node(sleb, &offs, 1);
drop_last_group(sleb, &offs);
/*
* While we are in the middle of the same min. I/O unit keep dropping
* nodes. So basically, what we want is to make sure that the last min.
* I/O unit where we saw the corruption is dropped completely with all
* the uncorrupted node which may possibly sit there.
*
* In other words, let's name the min. I/O unit where the corruption
* starts B, and the previous min. I/O unit A. The below code tries to
* deal with a situation when half of B contains valid nodes or the end
* of a valid node, and the second half of B contains corrupted data or
* garbage. This means that UBIFS had been writing to B just before the
* power cut happened. I do not know how realistic is this scenario
* that half of the min. I/O unit had been written successfully and the
* other half not, but this is possible in our 'failure mode emulation'
* infrastructure at least.
*
* So what is the problem, why we need to drop those nodes? Whey can't
* we just clean-up the second half of B by putting a padding node
* there? We can, and this works fine with one exception which was
* reproduced with power cut emulation testing and happens extremely
* rarely. The description follows, but it is worth noting that that is
* only about the GC head, so we could do this trick only if the bud
* belongs to the GC head, but it does not seem to be worth an
* additional "if" statement.
*
* So, imagine the file-system is full, we run GC which is moving valid
* nodes from LEB X to LEB Y (obviously, LEB Y is the current GC head
* LEB). The @c->gc_lnum is -1, which means that GC will retain LEB X
* and will try to continue. Imagine that LEB X is currently the
* dirtiest LEB, and the amount of used space in LEB Y is exactly the
* same as amount of free space in LEB X.
*
* And a power cut happens when nodes are moved from LEB X to LEB Y. We
* are here trying to recover LEB Y which is the GC head LEB. We find
* the min. I/O unit B as described above. Then we clean-up LEB Y by
* padding min. I/O unit. And later 'ubifs_rcvry_gc_commit()' function
* fails, because it cannot find a dirty LEB which could be GC'd into
* LEB Y! Even LEB X does not match because the amount of valid nodes
* there does not fit the free space in LEB Y any more! And this is
* because of the padding node which we added to LEB Y. The
* user-visible effect of this which I once observed and analysed is
* that we cannot mount the file-system with -ENOSPC error.
*
* So obviously, to make sure that situation does not happen we should
* free min. I/O unit B in LEB Y completely and the last used min. I/O
* unit in LEB Y should be A. This is basically what the below code
* tries to do.
*/
while (min_io_unit == round_down(offs, c->min_io_size) &&
min_io_unit != offs &&
drop_last_node(sleb, &offs, grouped));
if (jhead == GCHD) {
/*
* If this LEB belongs to the GC head then while we are in the
* middle of the same min. I/O unit keep dropping nodes. So
* basically, what we want is to make sure that the last min.
* I/O unit where we saw the corruption is dropped completely
* with all the uncorrupted nodes which may possibly sit there.
*
* In other words, let's name the min. I/O unit where the
* corruption starts B, and the previous min. I/O unit A. The
* below code tries to deal with a situation when half of B
* contains valid nodes or the end of a valid node, and the
* second half of B contains corrupted data or garbage. This
* means that UBIFS had been writing to B just before the power
* cut happened. I do not know how realistic is this scenario
* that half of the min. I/O unit had been written successfully
* and the other half not, but this is possible in our 'failure
* mode emulation' infrastructure at least.
*
* So what is the problem, why we need to drop those nodes? Why
* can't we just clean-up the second half of B by putting a
* padding node there? We can, and this works fine with one
* exception which was reproduced with power cut emulation
* testing and happens extremely rarely.
*
* Imagine the file-system is full, we run GC which starts
* moving valid nodes from LEB X to LEB Y (obviously, LEB Y is
* the current GC head LEB). The @c->gc_lnum is -1, which means
* that GC will retain LEB X and will try to continue. Imagine
* that LEB X is currently the dirtiest LEB, and the amount of
* used space in LEB Y is exactly the same as amount of free
* space in LEB X.
*
* And a power cut happens when nodes are moved from LEB X to
* LEB Y. We are here trying to recover LEB Y which is the GC
* head LEB. We find the min. I/O unit B as described above.
* Then we clean-up LEB Y by padding min. I/O unit. And later
* 'ubifs_rcvry_gc_commit()' function fails, because it cannot
* find a dirty LEB which could be GC'd into LEB Y! Even LEB X
* does not match because the amount of valid nodes there does
* not fit the free space in LEB Y any more! And this is
* because of the padding node which we added to LEB Y. The
* user-visible effect of this which I once observed and
* analysed is that we cannot mount the file-system with
* -ENOSPC error.
*
* So obviously, to make sure that situation does not happen we
* should free min. I/O unit B in LEB Y completely and the last
* used min. I/O unit in LEB Y should be A. This is basically
* what the below code tries to do.
*/
while (offs > min_io_unit)
drop_last_node(sleb, &offs);
}
buf = sbuf + offs;
len = c->leb_size - offs;
@ -881,7 +905,7 @@ struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum,
}
ubifs_scan_destroy(sleb);
}
return ubifs_recover_leb(c, lnum, offs, sbuf, 0);
return ubifs_recover_leb(c, lnum, offs, sbuf, -1);
}
/**

View File

@ -557,8 +557,7 @@ static int replay_bud(struct ubifs_info *c, struct bud_entry *b)
* these LEBs could possibly be written to at the power cut
* time.
*/
sleb = ubifs_recover_leb(c, lnum, offs, c->sbuf,
b->bud->jhead != GCHD);
sleb = ubifs_recover_leb(c, lnum, offs, c->sbuf, b->bud->jhead);
else
sleb = ubifs_scan(c, lnum, offs, c->sbuf, 0);
if (IS_ERR(sleb))

View File

@ -284,7 +284,11 @@ int ubifs_shrinker(struct shrinker *shrink, struct shrink_control *sc)
long clean_zn_cnt = atomic_long_read(&ubifs_clean_zn_cnt);
if (nr == 0)
return clean_zn_cnt;
/*
* Due to the way UBIFS updates the clean znode counter it may
* temporarily be negative.
*/
return clean_zn_cnt >= 0 ? clean_zn_cnt : 1;
if (!clean_zn_cnt) {
/*

View File

@ -811,15 +811,18 @@ static int alloc_wbufs(struct ubifs_info *c)
c->jheads[i].wbuf.sync_callback = &bud_wbuf_callback;
c->jheads[i].wbuf.jhead = i;
c->jheads[i].grouped = 1;
}
c->jheads[BASEHD].wbuf.dtype = UBI_SHORTTERM;
/*
* Garbage Collector head likely contains long-term data and
* does not need to be synchronized by timer.
* does not need to be synchronized by timer. Also GC head nodes are
* not grouped.
*/
c->jheads[GCHD].wbuf.dtype = UBI_LONGTERM;
c->jheads[GCHD].wbuf.no_timer = 1;
c->jheads[GCHD].grouped = 0;
return 0;
}
@ -1284,12 +1287,25 @@ static int mount_ubifs(struct ubifs_info *c)
if ((c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY)) != 0) {
ubifs_msg("recovery needed");
c->need_recovery = 1;
if (!c->ro_mount) {
err = ubifs_recover_inl_heads(c, c->sbuf);
if (err)
goto out_master;
}
} else if (!c->ro_mount) {
}
if (c->need_recovery && !c->ro_mount) {
err = ubifs_recover_inl_heads(c, c->sbuf);
if (err)
goto out_master;
}
err = ubifs_lpt_init(c, 1, !c->ro_mount);
if (err)
goto out_master;
if (!c->ro_mount && c->space_fixup) {
err = ubifs_fixup_free_space(c);
if (err)
goto out_master;
}
if (!c->ro_mount) {
/*
* Set the "dirty" flag so that if we reboot uncleanly we
* will notice this immediately on the next mount.
@ -1297,13 +1313,9 @@ static int mount_ubifs(struct ubifs_info *c)
c->mst_node->flags |= cpu_to_le32(UBIFS_MST_DIRTY);
err = ubifs_write_master(c);
if (err)
goto out_master;
goto out_lpt;
}
err = ubifs_lpt_init(c, 1, !c->ro_mount);
if (err)
goto out_lpt;
err = dbg_check_idx_size(c, c->bi.old_idx_sz);
if (err)
goto out_lpt;
@ -1396,12 +1408,6 @@ static int mount_ubifs(struct ubifs_info *c)
} else
ubifs_assert(c->lst.taken_empty_lebs > 0);
if (!c->ro_mount && c->space_fixup) {
err = ubifs_fixup_free_space(c);
if (err)
goto out_infos;
}
err = dbg_check_filesystem(c);
if (err)
goto out_infos;

View File

@ -2876,12 +2876,13 @@ static void tnc_destroy_cnext(struct ubifs_info *c)
*/
void ubifs_tnc_close(struct ubifs_info *c)
{
long clean_freed;
tnc_destroy_cnext(c);
if (c->zroot.znode) {
clean_freed = ubifs_destroy_tnc_subtree(c->zroot.znode);
atomic_long_sub(clean_freed, &ubifs_clean_zn_cnt);
long n;
ubifs_destroy_tnc_subtree(c->zroot.znode);
n = atomic_long_read(&c->clean_zn_cnt);
atomic_long_sub(n, &ubifs_clean_zn_cnt);
}
kfree(c->gap_lebs);
kfree(c->ilebs);

View File

@ -722,12 +722,14 @@ struct ubifs_bud {
* struct ubifs_jhead - journal head.
* @wbuf: head's write-buffer
* @buds_list: list of bud LEBs belonging to this journal head
* @grouped: non-zero if UBIFS groups nodes when writing to this journal head
*
* Note, the @buds list is protected by the @c->buds_lock.
*/
struct ubifs_jhead {
struct ubifs_wbuf wbuf;
struct list_head buds_list;
unsigned int grouped:1;
};
/**
@ -1742,7 +1744,7 @@ struct inode *ubifs_iget(struct super_block *sb, unsigned long inum);
int ubifs_recover_master_node(struct ubifs_info *c);
int ubifs_write_rcvrd_mst_node(struct ubifs_info *c);
struct ubifs_scan_leb *ubifs_recover_leb(struct ubifs_info *c, int lnum,
int offs, void *sbuf, int grouped);
int offs, void *sbuf, int jhead);
struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum,
int offs, void *sbuf);
int ubifs_recover_inl_heads(const struct ubifs_info *c, void *sbuf);

View File

@ -683,9 +683,11 @@ __SC_COMP(__NR_clock_adjtime, sys_clock_adjtime, compat_sys_clock_adjtime)
__SYSCALL(__NR_syncfs, sys_syncfs)
#define __NR_setns 268
__SYSCALL(__NR_setns, sys_setns)
#define __NR_sendmmsg 269
__SC_COMP(__NR_sendmmsg, sys_sendmmsg, compat_sys_sendmmsg)
#undef __NR_syscalls
#define __NR_syscalls 269
#define __NR_syscalls 270
/*
* All syscalls below here should go away really,

View File

@ -1282,8 +1282,8 @@ queue_max_integrity_segments(struct request_queue *q)
#define blk_get_integrity(a) (0)
#define blk_integrity_compare(a, b) (0)
#define blk_integrity_register(a, b) (0)
#define blk_integrity_unregister(a) do { } while (0);
#define blk_queue_max_integrity_segments(a, b) do { } while (0);
#define blk_integrity_unregister(a) do { } while (0)
#define blk_queue_max_integrity_segments(a, b) do { } while (0)
#define queue_max_integrity_segments(a) (0)
#define blk_integrity_merge_rq(a, b, c) (0)
#define blk_integrity_merge_bio(a, b, c) (0)

View File

@ -9,8 +9,12 @@
#define VTD_PAGE_MASK (((u64)-1) << VTD_PAGE_SHIFT)
#define VTD_PAGE_ALIGN(addr) (((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
#define VTD_STRIDE_SHIFT (9)
#define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT)
#define DMA_PTE_READ (1)
#define DMA_PTE_WRITE (2)
#define DMA_PTE_LARGE_PAGE (1 << 7)
#define DMA_PTE_SNP (1 << 11)
#define CONTEXT_TT_MULTI_LEVEL 0

View File

@ -100,6 +100,7 @@ struct hd_struct {
sector_t start_sect;
sector_t nr_sects;
sector_t alignment_offset;
unsigned int discard_alignment;
struct device __dev;
struct kobject *holder_dir;
int policy, partno;

View File

@ -1003,8 +1003,12 @@ struct ieee80211_ht_info {
#define WLAN_CAPABILITY_ESS (1<<0)
#define WLAN_CAPABILITY_IBSS (1<<1)
/* A mesh STA sets the ESS and IBSS capability bits to zero */
#define WLAN_CAPABILITY_IS_MBSS(cap) \
/*
* A mesh STA sets the ESS and IBSS capability bits to zero.
* however, this holds true for p2p probe responses (in the p2p_find
* phase) as well.
*/
#define WLAN_CAPABILITY_IS_STA_BSS(cap) \
(!((cap) & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)))
#define WLAN_CAPABILITY_CF_POLLABLE (1<<2)

View File

@ -70,6 +70,7 @@ struct tpacket_auxdata {
#define TP_STATUS_COPY 0x2
#define TP_STATUS_LOSING 0x4
#define TP_STATUS_CSUMNOTREADY 0x8
#define TP_STATUS_VLAN_VALID 0x10 /* auxdata has valid tp_vlan_tci */
/* Tx ring - header status */
#define TP_STATUS_AVAILABLE 0x0

View File

@ -19,6 +19,7 @@
#include <linux/mtd/partitions.h>
struct map_info;
struct platform_device;
struct physmap_flash_data {
unsigned int width;

View File

@ -76,7 +76,7 @@
* tty device. It is solely the responsibility of the line
* discipline to handle poll requests.
*
* unsigned int (*receive_buf)(struct tty_struct *, const unsigned char *cp,
* void (*receive_buf)(struct tty_struct *, const unsigned char *cp,
* char *fp, int count);
*
* This function is called by the low-level tty driver to send
@ -84,8 +84,7 @@
* processing. <cp> is a pointer to the buffer of input
* character received by the device. <fp> is a pointer to a
* pointer of flag bytes which indicate whether a character was
* received with a parity error, etc. Returns the amount of bytes
* received.
* received with a parity error, etc.
*
* void (*write_wakeup)(struct tty_struct *);
*
@ -141,8 +140,8 @@ struct tty_ldisc_ops {
/*
* The following routines are called from below.
*/
unsigned int (*receive_buf)(struct tty_struct *,
const unsigned char *cp, char *fp, int count);
void (*receive_buf)(struct tty_struct *, const unsigned char *cp,
char *fp, int count);
void (*write_wakeup)(struct tty_struct *);
void (*dcd_change)(struct tty_struct *, unsigned int,
struct pps_event_time *);

View File

@ -12,22 +12,24 @@
TRACE_EVENT(net_dev_xmit,
TP_PROTO(struct sk_buff *skb,
int rc),
int rc,
struct net_device *dev,
unsigned int skb_len),
TP_ARGS(skb, rc),
TP_ARGS(skb, rc, dev, skb_len),
TP_STRUCT__entry(
__field( void *, skbaddr )
__field( unsigned int, len )
__field( int, rc )
__string( name, skb->dev->name )
__string( name, dev->name )
),
TP_fast_assign(
__entry->skbaddr = skb;
__entry->len = skb->len;
__entry->len = skb_len;
__entry->rc = rc;
__assign_str(name, skb->dev->name);
__assign_str(name, dev->name);
),
TP_printk("dev=%s skbaddr=%p len=%u rc=%d",

View File

@ -697,7 +697,7 @@ config DEBUG_BUGVERBOSE
bool "Verbose BUG() reporting (adds 70K)" if DEBUG_KERNEL && EXPERT
depends on BUG
depends on ARM || AVR32 || M32R || M68K || SPARC32 || SPARC64 || \
FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300
FRV || SUPERH || GENERIC_BUG || BLACKFIN || MN10300 || TILE
default y
help
Say Y here to make BUG() panics output the file name and line number

View File

@ -2247,10 +2247,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
if (should_fail_alloc_page(gfp_mask, order))
return NULL;
#ifndef CONFIG_ZONE_DMA
if (WARN_ON_ONCE(gfp_mask & __GFP_DMA))
return NULL;
#endif
/*
* Check the zones suitable for the gfp_mask contain at least one

View File

@ -167,7 +167,7 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
u64_stats_update_begin(&stats->syncp);
stats->tx_packets++;
stats->tx_bytes += len;
u64_stats_update_begin(&stats->syncp);
u64_stats_update_end(&stats->syncp);
} else {
this_cpu_inc(vlan_dev_info(dev)->vlan_pcpu_stats->tx_dropped);
}

View File

@ -906,7 +906,7 @@ static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr
if (c->psm == psm) {
/* Exact match. */
if (!bacmp(&bt_sk(sk)->src, src)) {
read_unlock_bh(&chan_list_lock);
read_unlock(&chan_list_lock);
return c;
}

View File

@ -139,17 +139,14 @@ static void close_work(struct work_struct *work)
struct chnl_net *dev = NULL;
struct list_head *list_node;
struct list_head *_tmp;
/* May be called with or without RTNL lock held */
int islocked = rtnl_is_locked();
if (!islocked)
rtnl_lock();
rtnl_lock();
list_for_each_safe(list_node, _tmp, &chnl_net_list) {
dev = list_entry(list_node, struct chnl_net, list_field);
if (dev->state == CAIF_SHUTDOWN)
dev_close(dev->netdev);
}
if (!islocked)
rtnl_unlock();
rtnl_unlock();
}
static DECLARE_WORK(close_worker, close_work);

View File

@ -2096,6 +2096,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
{
const struct net_device_ops *ops = dev->netdev_ops;
int rc = NETDEV_TX_OK;
unsigned int skb_len;
if (likely(!skb->next)) {
u32 features;
@ -2146,8 +2147,9 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
}
}
skb_len = skb->len;
rc = ops->ndo_start_xmit(skb, dev);
trace_net_dev_xmit(skb, rc);
trace_net_dev_xmit(skb, rc, dev, skb_len);
if (rc == NETDEV_TX_OK)
txq_trans_update(txq);
return rc;
@ -2167,8 +2169,9 @@ gso:
if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
skb_dst_drop(nskb);
skb_len = nskb->len;
rc = ops->ndo_start_xmit(nskb, dev);
trace_net_dev_xmit(nskb, rc);
trace_net_dev_xmit(nskb, rc, dev, skb_len);
if (unlikely(rc != NETDEV_TX_OK)) {
if (rc & ~NETDEV_TX_MASK)
goto out_kfree_gso_skb;

View File

@ -465,6 +465,9 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
if (addr_len < sizeof(struct sockaddr_in))
goto out;
if (addr->sin_family != AF_INET)
goto out;
chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
/* Not specified by any standard per-se, however it breaks too

View File

@ -798,7 +798,12 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
getnstimeofday(&ts);
h.h2->tp_sec = ts.tv_sec;
h.h2->tp_nsec = ts.tv_nsec;
h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
if (vlan_tx_tag_present(skb)) {
h.h2->tp_vlan_tci = vlan_tx_tag_get(skb);
status |= TP_STATUS_VLAN_VALID;
} else {
h.h2->tp_vlan_tci = 0;
}
hdrlen = sizeof(*h.h2);
break;
default:
@ -1725,8 +1730,12 @@ static int packet_recvmsg(struct kiocb *iocb, struct socket *sock,
aux.tp_snaplen = skb->len;
aux.tp_mac = 0;
aux.tp_net = skb_network_offset(skb);
aux.tp_vlan_tci = vlan_tx_tag_get(skb);
if (vlan_tx_tag_present(skb)) {
aux.tp_vlan_tci = vlan_tx_tag_get(skb);
aux.tp_status |= TP_STATUS_VLAN_VALID;
} else {
aux.tp_vlan_tci = 0;
}
put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
}

View File

@ -267,13 +267,35 @@ static bool is_bss(struct cfg80211_bss *a,
return memcmp(ssidie + 2, ssid, ssid_len) == 0;
}
static bool is_mesh_bss(struct cfg80211_bss *a)
{
const u8 *ie;
if (!WLAN_CAPABILITY_IS_STA_BSS(a->capability))
return false;
ie = cfg80211_find_ie(WLAN_EID_MESH_ID,
a->information_elements,
a->len_information_elements);
if (!ie)
return false;
ie = cfg80211_find_ie(WLAN_EID_MESH_CONFIG,
a->information_elements,
a->len_information_elements);
if (!ie)
return false;
return true;
}
static bool is_mesh(struct cfg80211_bss *a,
const u8 *meshid, size_t meshidlen,
const u8 *meshcfg)
{
const u8 *ie;
if (!WLAN_CAPABILITY_IS_MBSS(a->capability))
if (!WLAN_CAPABILITY_IS_STA_BSS(a->capability))
return false;
ie = cfg80211_find_ie(WLAN_EID_MESH_ID,
@ -311,7 +333,7 @@ static int cmp_bss(struct cfg80211_bss *a,
if (a->channel != b->channel)
return b->channel->center_freq - a->channel->center_freq;
if (WLAN_CAPABILITY_IS_MBSS(a->capability | b->capability)) {
if (is_mesh_bss(a) && is_mesh_bss(b)) {
r = cmp_ies(WLAN_EID_MESH_ID,
a->information_elements,
a->len_information_elements,
@ -457,7 +479,6 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
struct cfg80211_internal_bss *res)
{
struct cfg80211_internal_bss *found = NULL;
const u8 *meshid, *meshcfg;
/*
* The reference to "res" is donated to this function.
@ -470,22 +491,6 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,
res->ts = jiffies;
if (WLAN_CAPABILITY_IS_MBSS(res->pub.capability)) {
/* must be mesh, verify */
meshid = cfg80211_find_ie(WLAN_EID_MESH_ID,
res->pub.information_elements,
res->pub.len_information_elements);
meshcfg = cfg80211_find_ie(WLAN_EID_MESH_CONFIG,
res->pub.information_elements,
res->pub.len_information_elements);
if (!meshid || !meshcfg ||
meshcfg[1] != sizeof(struct ieee80211_meshconf_ie)) {
/* bogus mesh */
kref_put(&res->ref, bss_release);
return NULL;
}
}
spin_lock_bh(&dev->bss_lock);
found = rb_find_bss(dev, res);

View File

@ -593,7 +593,8 @@ static int apparmor_setprocattr(struct task_struct *task, char *name,
sa.aad.op = OP_SETPROCATTR;
sa.aad.info = name;
sa.aad.error = -EINVAL;
return aa_audit(AUDIT_APPARMOR_DENIED, NULL, GFP_KERNEL,
return aa_audit(AUDIT_APPARMOR_DENIED,
__aa_current_profile(), GFP_KERNEL,
&sa, NULL);
}
} else if (strcmp(name, "exec") == 0) {

View File

@ -262,14 +262,14 @@ static int v253_hangup(struct tty_struct *tty)
}
/* Line discipline .receive_buf() */
static unsigned int v253_receive(struct tty_struct *tty,
const unsigned char *cp, char *fp, int count)
static void v253_receive(struct tty_struct *tty,
const unsigned char *cp, char *fp, int count)
{
struct snd_soc_codec *codec = tty->disc_data;
struct cx20442_priv *cx20442;
if (!codec)
return count;
return;
cx20442 = snd_soc_codec_get_drvdata(codec);
@ -281,8 +281,6 @@ static unsigned int v253_receive(struct tty_struct *tty,
codec->hw_write = (hw_write_t)tty->ops->write;
codec->card->pop_time = 1;
}
return count;
}
/* Line discipline .write_wakeup() */

Some files were not shown because too many files have changed in this diff Show More