Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net

Conflicts:
	Documentation/devicetree/bindings/net/micrel-ks8851.txt
	net/core/netpoll.c

The net/core/netpoll.c conflict is a bug fix in 'net' happening
to code which is completely removed in 'net-next'.

In micrel-ks8851.txt we simply have overlapping changes.

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2014-03-25 20:29:20 -04:00
commit 04f58c8854
129 changed files with 973 additions and 786 deletions

View File

@ -4,3 +4,6 @@ Required properties:
- compatible = "micrel,ks8851-ml" of parallel interface
- reg : 2 physical address and size of registers for data and command
- interrupts : interrupt connection
Optional properties:
- vdd-supply: supply for Ethernet mac

View File

@ -226,9 +226,9 @@ Ring setup:
void *rx_ring, *tx_ring;
/* Configure ring parameters */
if (setsockopt(fd, NETLINK_RX_RING, &req, sizeof(req)) < 0)
if (setsockopt(fd, SOL_NETLINK, NETLINK_RX_RING, &req, sizeof(req)) < 0)
exit(1);
if (setsockopt(fd, NETLINK_TX_RING, &req, sizeof(req)) < 0)
if (setsockopt(fd, SOL_NETLINK, NETLINK_TX_RING, &req, sizeof(req)) < 0)
exit(1)
/* Calculate size of each individual ring */

View File

@ -918,11 +918,11 @@ F: arch/arm/include/asm/hardware/dec21285.h
F: arch/arm/mach-footbridge/
ARM/FREESCALE IMX / MXC ARM ARCHITECTURE
M: Shawn Guo <shawn.guo@linaro.org>
M: Shawn Guo <shawn.guo@freescale.com>
M: Sascha Hauer <kernel@pengutronix.de>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
T: git git://git.linaro.org/people/shawnguo/linux-2.6.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
F: arch/arm/mach-imx/
F: arch/arm/boot/dts/imx*
F: arch/arm/configs/imx*_defconfig
@ -4558,6 +4558,7 @@ M: Greg Rose <gregory.v.rose@intel.com>
M: Alex Duyck <alexander.h.duyck@intel.com>
M: John Ronciak <john.ronciak@intel.com>
M: Mitch Williams <mitch.a.williams@intel.com>
M: Linux NICS <linux.nics@intel.com>
L: e1000-devel@lists.sourceforge.net
W: http://www.intel.com/support/feedback.htm
W: http://e1000.sourceforge.net/

View File

@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 14
SUBLEVEL = 0
EXTRAVERSION = -rc6
EXTRAVERSION = -rc8
NAME = Shuffling Zombie Juror
# *DOCUMENTATION*

View File

@ -8,8 +8,8 @@
*/
#include "sama5d3.dtsi"
#include "sama5d3_can.dtsi"
#include "sama5d3_emac.dtsi"
#include "sama5d3_gmac.dtsi"
#include "sama5d3_emac.dtsi"
#include "sama5d3_lcd.dtsi"
#include "sama5d3_mci2.dtsi"
#include "sama5d3_tcb1.dtsi"

View File

@ -1776,12 +1776,12 @@ endchoice
config FORCE_MAX_ZONEORDER
int "Maximum zone order"
range 14 64 if HUGETLB_PAGE && PAGE_SIZE_64KB
default "14" if HUGETLB_PAGE && PAGE_SIZE_64KB
range 13 64 if HUGETLB_PAGE && PAGE_SIZE_32KB
default "13" if HUGETLB_PAGE && PAGE_SIZE_32KB
range 12 64 if HUGETLB_PAGE && PAGE_SIZE_16KB
default "12" if HUGETLB_PAGE && PAGE_SIZE_16KB
range 14 64 if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_64KB
default "14" if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_64KB
range 13 64 if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_32KB
default "13" if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_32KB
range 12 64 if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_16KB
default "12" if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_16KB
range 11 64
default "11"
help
@ -2353,9 +2353,8 @@ config SECCOMP
If unsure, say Y. Only embedded should say N here.
config MIPS_O32_FP64_SUPPORT
bool "Support for O32 binaries using 64-bit FP"
bool "Support for O32 binaries using 64-bit FP (EXPERIMENTAL)"
depends on 32BIT || MIPS32_O32
default y
help
When this is enabled, the kernel will support use of 64-bit floating
point registers with binaries using the O32 ABI along with the
@ -2367,7 +2366,14 @@ config MIPS_O32_FP64_SUPPORT
of your kernel & potentially improve FP emulation performance by
saying N here.
If unsure, say Y.
Although binutils currently supports use of this flag the details
concerning its effect upon the O32 ABI in userland are still being
worked on. In order to avoid userland becoming dependant upon current
behaviour before the details have been finalised, this option should
be considered experimental and only enabled by those working upon
said details.
If unsure, say N.
config USE_OF
bool

View File

@ -53,10 +53,8 @@ void __init prom_init(void)
prom_init_cmdline();
memsize_str = prom_getenv("memsize");
if (!memsize_str)
if (!memsize_str || kstrtoul(memsize_str, 0, &memsize))
memsize = 0x04000000;
else
strict_strtoul(memsize_str, 0, &memsize);
add_memory_region(0, memsize, BOOT_MEM_RAM);
}

View File

@ -52,10 +52,8 @@ void __init prom_init(void)
prom_init_cmdline();
memsize_str = prom_getenv("memsize");
if (!memsize_str)
if (!memsize_str || kstrtoul(memsize_str, 0, &memsize))
memsize = 0x04000000;
else
strict_strtoul(memsize_str, 0, &memsize);
add_memory_region(0, memsize, BOOT_MEM_RAM);
}

View File

@ -1,3 +1,4 @@
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/string.h>
#include <bcm47xx_board.h>

View File

@ -196,7 +196,7 @@ int bcm47xx_nvram_gpio_pin(const char *name)
char nvram_var[10];
char buf[30];
for (i = 0; i < 16; i++) {
for (i = 0; i < 32; i++) {
err = snprintf(nvram_var, sizeof(nvram_var), "gpio%i", i);
if (err <= 0)
continue;

View File

@ -975,10 +975,6 @@ static int octeon_irq_ciu_xlat(struct irq_domain *d,
if (ciu > 1 || bit > 63)
return -EINVAL;
/* These are the GPIO lines */
if (ciu == 0 && bit >= 16 && bit < 32)
return -EINVAL;
*out_hwirq = (ciu << 6) | bit;
*out_type = 0;
@ -1007,6 +1003,10 @@ static int octeon_irq_ciu_map(struct irq_domain *d,
if (!octeon_irq_virq_in_range(virq))
return -EINVAL;
/* Don't map irq if it is reserved for GPIO. */
if (line == 0 && bit >= 16 && bit <32)
return 0;
if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0)
return -EINVAL;
@ -1525,10 +1525,6 @@ static int octeon_irq_ciu2_xlat(struct irq_domain *d,
ciu = intspec[0];
bit = intspec[1];
/* Line 7 are the GPIO lines */
if (ciu > 6 || bit > 63)
return -EINVAL;
*out_hwirq = (ciu << 6) | bit;
*out_type = 0;
@ -1570,8 +1566,14 @@ static int octeon_irq_ciu2_map(struct irq_domain *d,
if (!octeon_irq_virq_in_range(virq))
return -EINVAL;
/* Line 7 are the GPIO lines */
if (line > 6 || octeon_irq_ciu_to_irq[line][bit] != 0)
/*
* Don't map irq if it is reserved for GPIO.
* (Line 7 are the GPIO lines.)
*/
if (line == 7)
return 0;
if (line > 7 || octeon_irq_ciu_to_irq[line][bit] != 0)
return -EINVAL;
if (octeon_irq_ciu2_is_edge(line, bit))

View File

@ -9,6 +9,7 @@
#define _ASM_ASMMACRO_H
#include <asm/hazards.h>
#include <asm/asm-offsets.h>
#ifdef CONFIG_32BIT
#include <asm/asmmacro-32.h>
@ -54,11 +55,21 @@
.endm
.macro local_irq_disable reg=t0
#ifdef CONFIG_PREEMPT
lw \reg, TI_PRE_COUNT($28)
addi \reg, \reg, 1
sw \reg, TI_PRE_COUNT($28)
#endif
mfc0 \reg, CP0_STATUS
ori \reg, \reg, 1
xori \reg, \reg, 1
mtc0 \reg, CP0_STATUS
irq_disable_hazard
#ifdef CONFIG_PREEMPT
lw \reg, TI_PRE_COUNT($28)
addi \reg, \reg, -1
sw \reg, TI_PRE_COUNT($28)
#endif
.endm
#endif /* CONFIG_MIPS_MT_SMTC */
@ -106,7 +117,7 @@
.endm
.macro fpu_save_double thread status tmp
#if defined(CONFIG_MIPS64) || defined(CONFIG_CPU_MIPS32_R2)
#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
sll \tmp, \status, 5
bgez \tmp, 10f
fpu_save_16odd \thread
@ -159,7 +170,7 @@
.endm
.macro fpu_restore_double thread status tmp
#if defined(CONFIG_MIPS64) || defined(CONFIG_CPU_MIPS32_R2)
#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
sll \tmp, \status, 5
bgez \tmp, 10f # 16 register mode?

View File

@ -57,7 +57,7 @@ static inline int __enable_fpu(enum fpu_mode mode)
return 0;
case FPU_64BIT:
#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_MIPS64))
#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_64BIT))
/* we only have a 32-bit FPU */
return SIGFPE;
#endif

View File

@ -22,12 +22,12 @@ extern void _mcount(void);
#define safe_load(load, src, dst, error) \
do { \
asm volatile ( \
"1: " load " %[" STR(dst) "], 0(%[" STR(src) "])\n"\
" li %[" STR(error) "], 0\n" \
"1: " load " %[tmp_dst], 0(%[tmp_src])\n" \
" li %[tmp_err], 0\n" \
"2:\n" \
\
".section .fixup, \"ax\"\n" \
"3: li %[" STR(error) "], 1\n" \
"3: li %[tmp_err], 1\n" \
" j 2b\n" \
".previous\n" \
\
@ -35,8 +35,8 @@ do { \
STR(PTR) "\t1b, 3b\n\t" \
".previous\n" \
\
: [dst] "=&r" (dst), [error] "=r" (error)\
: [src] "r" (src) \
: [tmp_dst] "=&r" (dst), [tmp_err] "=r" (error)\
: [tmp_src] "r" (src) \
: "memory" \
); \
} while (0)
@ -44,12 +44,12 @@ do { \
#define safe_store(store, src, dst, error) \
do { \
asm volatile ( \
"1: " store " %[" STR(src) "], 0(%[" STR(dst) "])\n"\
" li %[" STR(error) "], 0\n" \
"1: " store " %[tmp_src], 0(%[tmp_dst])\n"\
" li %[tmp_err], 0\n" \
"2:\n" \
\
".section .fixup, \"ax\"\n" \
"3: li %[" STR(error) "], 1\n" \
"3: li %[tmp_err], 1\n" \
" j 2b\n" \
".previous\n" \
\
@ -57,8 +57,8 @@ do { \
STR(PTR) "\t1b, 3b\n\t" \
".previous\n" \
\
: [error] "=r" (error) \
: [dst] "r" (dst), [src] "r" (src)\
: [tmp_err] "=r" (error) \
: [tmp_dst] "r" (dst), [tmp_src] "r" (src)\
: "memory" \
); \
} while (0)

View File

@ -13,6 +13,7 @@
#ifndef __ASM_MIPS_SYSCALL_H
#define __ASM_MIPS_SYSCALL_H
#include <linux/compiler.h>
#include <linux/audit.h>
#include <linux/elf-em.h>
#include <linux/kernel.h>
@ -39,14 +40,14 @@ static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
#ifdef CONFIG_32BIT
case 4: case 5: case 6: case 7:
return get_user(*arg, (int *)usp + 4 * n);
return get_user(*arg, (int *)usp + n);
#endif
#ifdef CONFIG_64BIT
case 4: case 5: case 6: case 7:
#ifdef CONFIG_MIPS32_O32
if (test_thread_flag(TIF_32BIT_REGS))
return get_user(*arg, (int *)usp + 4 * n);
return get_user(*arg, (int *)usp + n);
else
#endif
*arg = regs->regs[4 + n];
@ -57,6 +58,8 @@ static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
default:
BUG();
}
unreachable();
}
static inline long syscall_get_return_value(struct task_struct *task,
@ -83,11 +86,10 @@ static inline void syscall_get_arguments(struct task_struct *task,
unsigned int i, unsigned int n,
unsigned long *args)
{
unsigned long arg;
int ret;
while (n--)
ret |= mips_get_syscall_arg(&arg, task, regs, i++);
ret |= mips_get_syscall_arg(args++, task, regs, i++);
/*
* No way to communicate an error because this is a void function.

View File

@ -163,8 +163,8 @@ enum cop1_sdw_func {
*/
enum cop1x_func {
lwxc1_op = 0x00, ldxc1_op = 0x01,
pfetch_op = 0x07, swxc1_op = 0x08,
sdxc1_op = 0x09, madd_s_op = 0x20,
swxc1_op = 0x08, sdxc1_op = 0x09,
pfetch_op = 0x0f, madd_s_op = 0x20,
madd_d_op = 0x21, madd_e_op = 0x22,
msub_s_op = 0x28, msub_d_op = 0x29,
msub_e_op = 0x2a, nmadd_s_op = 0x30,

View File

@ -111,11 +111,10 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
safe_store_code(new_code1, ip, faulted);
if (unlikely(faulted))
return -EFAULT;
ip += 4;
safe_store_code(new_code2, ip, faulted);
safe_store_code(new_code2, ip + 4, faulted);
if (unlikely(faulted))
return -EFAULT;
flush_icache_range(ip, ip + 8); /* original ip + 12 */
flush_icache_range(ip, ip + 8);
return 0;
}
#endif

View File

@ -35,9 +35,9 @@
LEAF(_save_fp_context)
cfc1 t1, fcr31
#if defined(CONFIG_64BIT) || defined(CONFIG_MIPS32_R2)
#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
.set push
#ifdef CONFIG_MIPS32_R2
#ifdef CONFIG_CPU_MIPS32_R2
.set mips64r2
mfc0 t0, CP0_STATUS
sll t0, t0, 5
@ -146,11 +146,11 @@ LEAF(_save_fp_context32)
* - cp1 status/control register
*/
LEAF(_restore_fp_context)
EX lw t0, SC_FPC_CSR(a0)
EX lw t1, SC_FPC_CSR(a0)
#if defined(CONFIG_64BIT) || defined(CONFIG_MIPS32_R2)
#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
.set push
#ifdef CONFIG_MIPS32_R2
#ifdef CONFIG_CPU_MIPS32_R2
.set mips64r2
mfc0 t0, CP0_STATUS
sll t0, t0, 5
@ -191,7 +191,7 @@ LEAF(_restore_fp_context)
EX ldc1 $f26, SC_FPREGS+208(a0)
EX ldc1 $f28, SC_FPREGS+224(a0)
EX ldc1 $f30, SC_FPREGS+240(a0)
ctc1 t0, fcr31
ctc1 t1, fcr31
jr ra
li v0, 0 # success
END(_restore_fp_context)
@ -199,7 +199,7 @@ LEAF(_restore_fp_context)
#ifdef CONFIG_MIPS32_COMPAT
LEAF(_restore_fp_context32)
/* Restore an o32 sigcontext. */
EX lw t0, SC32_FPC_CSR(a0)
EX lw t1, SC32_FPC_CSR(a0)
mfc0 t0, CP0_STATUS
sll t0, t0, 5
@ -239,7 +239,7 @@ LEAF(_restore_fp_context32)
EX ldc1 $f26, SC32_FPREGS+208(a0)
EX ldc1 $f28, SC32_FPREGS+224(a0)
EX ldc1 $f30, SC32_FPREGS+240(a0)
ctc1 t0, fcr31
ctc1 t1, fcr31
jr ra
li v0, 0 # success
END(_restore_fp_context32)

View File

@ -112,5 +112,8 @@ void __exit rtlx_module_exit(void)
for (i = 0; i < RTLX_CHANNELS; i++)
device_destroy(mt_class, MKDEV(major, i));
unregister_chrdev(major, RTLX_MODULE_NAME);
aprp_hook = NULL;
}

View File

@ -144,5 +144,8 @@ void __exit rtlx_module_exit(void)
for (i = 0; i < RTLX_CHANNELS; i++)
device_destroy(mt_class, MKDEV(major, i));
unregister_chrdev(major, RTLX_MODULE_NAME);
aprp_hook = NULL;
}

View File

@ -1538,10 +1538,10 @@ static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
break;
}
case 0x7: /* 7 */
if (MIPSInst_FUNC(ir) != pfetch_op) {
case 0x3:
if (MIPSInst_FUNC(ir) != pfetch_op)
return SIGILL;
}
/* ignore prefx operation */
break;

View File

@ -72,7 +72,7 @@ int amon_cpu_start(int cpu,
return 0;
}
#ifdef CONFIG_MIPS_VPE_LOADER
#ifdef CONFIG_MIPS_VPE_LOADER_CMP
int vpe_run(struct vpe *v)
{
struct vpe_notifications *n;

View File

@ -119,7 +119,7 @@ static void malta_hw0_irqdispatch(void)
do_IRQ(MALTA_INT_BASE + irq);
#ifdef MIPS_VPE_APSP_API
#ifdef CONFIG_MIPS_VPE_APSP_API_MT
if (aprp_hook)
aprp_hook();
#endif
@ -310,7 +310,7 @@ static void ipi_call_dispatch(void)
static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
{
#ifdef MIPS_VPE_APSP_API
#ifdef CONFIG_MIPS_VPE_APSP_API_CMP
if (aprp_hook)
aprp_hook();
#endif

View File

@ -150,6 +150,7 @@ msi_irq_allocated:
msg.address_lo =
((128ul << 20) + CVMX_PCI_MSI_RCV) & 0xffffffff;
msg.address_hi = ((128ul << 20) + CVMX_PCI_MSI_RCV) >> 32;
break;
case OCTEON_DMA_BAR_TYPE_BIG:
/* When using big bar, Bar 0 is based at 0 */
msg.address_lo = (0 + CVMX_PCI_MSI_RCV) & 0xffffffff;

View File

@ -32,17 +32,6 @@ void copy_page_asm(void *to, void *from);
void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
struct page *pg);
/* #define CONFIG_PARISC_TMPALIAS */
#ifdef CONFIG_PARISC_TMPALIAS
void clear_user_highpage(struct page *page, unsigned long vaddr);
#define clear_user_highpage clear_user_highpage
struct vm_area_struct;
void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma);
#define __HAVE_ARCH_COPY_USER_HIGHPAGE
#endif
/*
* These are used to make use of C type-checking..
*/

View File

@ -191,8 +191,4 @@ static __inline__ int arch_write_can_lock(arch_rwlock_t *rw)
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
#endif /* __ASM_SPINLOCK_H */

View File

@ -828,13 +828,13 @@
#define __NR_finit_module (__NR_Linux + 333)
#define __NR_sched_setattr (__NR_Linux + 334)
#define __NR_sched_getattr (__NR_Linux + 335)
#define __NR_utimes (__NR_Linux + 336)
#define __NR_Linux_syscalls (__NR_sched_getattr + 1)
#define __NR_Linux_syscalls (__NR_utimes + 1)
#define __IGNORE_select /* newselect */
#define __IGNORE_fadvise64 /* fadvise64_64 */
#define __IGNORE_utimes /* utime */
#define HPUX_GATEWAY_ADDR 0xC0000004

View File

@ -581,67 +581,3 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
}
}
#ifdef CONFIG_PARISC_TMPALIAS
void clear_user_highpage(struct page *page, unsigned long vaddr)
{
void *vto;
unsigned long flags;
/* Clear using TMPALIAS region. The page doesn't need to
be flushed but the kernel mapping needs to be purged. */
vto = kmap_atomic(page);
/* The PA-RISC 2.0 Architecture book states on page F-6:
"Before a write-capable translation is enabled, *all*
non-equivalently-aliased translations must be removed
from the page table and purged from the TLB. (Note
that the caches are not required to be flushed at this
time.) Before any non-equivalent aliased translation
is re-enabled, the virtual address range for the writeable
page (the entire page) must be flushed from the cache,
and the write-capable translation removed from the page
table and purged from the TLB." */
purge_kernel_dcache_page_asm((unsigned long)vto);
purge_tlb_start(flags);
pdtlb_kernel(vto);
purge_tlb_end(flags);
preempt_disable();
clear_user_page_asm(vto, vaddr);
preempt_enable();
pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */
}
void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma)
{
void *vfrom, *vto;
unsigned long flags;
/* Copy using TMPALIAS region. This has the advantage
that the `from' page doesn't need to be flushed. However,
the `to' page must be flushed in copy_user_page_asm since
it can be used to bring in executable code. */
vfrom = kmap_atomic(from);
vto = kmap_atomic(to);
purge_kernel_dcache_page_asm((unsigned long)vto);
purge_tlb_start(flags);
pdtlb_kernel(vto);
pdtlb_kernel(vfrom);
purge_tlb_end(flags);
preempt_disable();
copy_user_page_asm(vto, vfrom, vaddr);
flush_dcache_page_asm(__pa(vto), vaddr);
preempt_enable();
pagefault_enable(); /* kunmap_atomic(addr, KM_USER1); */
pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */
}
#endif /* CONFIG_PARISC_TMPALIAS */

View File

@ -431,6 +431,7 @@
ENTRY_SAME(finit_module)
ENTRY_SAME(sched_setattr)
ENTRY_SAME(sched_getattr) /* 335 */
ENTRY_COMP(utimes)
/* Nothing yet */

View File

@ -1504,73 +1504,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1: addi r8,r8,16
.endr
/* Save DEC */
mfspr r5,SPRN_DEC
mftb r6
extsw r5,r5
add r5,r5,r6
std r5,VCPU_DEC_EXPIRES(r9)
BEGIN_FTR_SECTION
b 8f
END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
/* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
mfmsr r8
li r0, 1
rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
mtmsrd r8
/* Save POWER8-specific registers */
mfspr r5, SPRN_IAMR
mfspr r6, SPRN_PSPB
mfspr r7, SPRN_FSCR
std r5, VCPU_IAMR(r9)
stw r6, VCPU_PSPB(r9)
std r7, VCPU_FSCR(r9)
mfspr r5, SPRN_IC
mfspr r6, SPRN_VTB
mfspr r7, SPRN_TAR
std r5, VCPU_IC(r9)
std r6, VCPU_VTB(r9)
std r7, VCPU_TAR(r9)
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
mfspr r5, SPRN_TFHAR
mfspr r6, SPRN_TFIAR
mfspr r7, SPRN_TEXASR
std r5, VCPU_TFHAR(r9)
std r6, VCPU_TFIAR(r9)
std r7, VCPU_TEXASR(r9)
#endif
mfspr r8, SPRN_EBBHR
std r8, VCPU_EBBHR(r9)
mfspr r5, SPRN_EBBRR
mfspr r6, SPRN_BESCR
mfspr r7, SPRN_CSIGR
mfspr r8, SPRN_TACR
std r5, VCPU_EBBRR(r9)
std r6, VCPU_BESCR(r9)
std r7, VCPU_CSIGR(r9)
std r8, VCPU_TACR(r9)
mfspr r5, SPRN_TCSCR
mfspr r6, SPRN_ACOP
mfspr r7, SPRN_PID
mfspr r8, SPRN_WORT
std r5, VCPU_TCSCR(r9)
std r6, VCPU_ACOP(r9)
stw r7, VCPU_GUEST_PID(r9)
std r8, VCPU_WORT(r9)
8:
/* Save and reset AMR and UAMOR before turning on the MMU */
BEGIN_FTR_SECTION
mfspr r5,SPRN_AMR
mfspr r6,SPRN_UAMOR
std r5,VCPU_AMR(r9)
std r6,VCPU_UAMOR(r9)
li r6,0
mtspr SPRN_AMR,r6
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
/* Unset guest mode */
li r0, KVM_GUEST_MODE_NONE
stb r0, HSTATE_IN_GUEST(r13)
@ -2203,7 +2136,7 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
mfspr r6,SPRN_VRSAVE
stw r6,VCPU_VRSAVE(r3)
stw r6,VCPU_VRSAVE(r31)
mtlr r30
mtmsrd r5
isync
@ -2240,7 +2173,7 @@ BEGIN_FTR_SECTION
bl .load_vr_state
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
lwz r7,VCPU_VRSAVE(r4)
lwz r7,VCPU_VRSAVE(r31)
mtspr SPRN_VRSAVE,r7
mtlr r30
mr r4,r31

View File

@ -58,9 +58,12 @@ void arch_cpu_idle(void)
{
if (tlb_type != hypervisor) {
touch_nmi_watchdog();
local_irq_enable();
} else {
unsigned long pstate;
local_irq_enable();
/* The sun4v sleeping code requires that we have PSTATE.IE cleared over
* the cpu sleep hypervisor call.
*/
@ -82,7 +85,6 @@ void arch_cpu_idle(void)
: "=&r" (pstate)
: "i" (PSTATE_IE));
}
local_irq_enable();
}
#ifdef CONFIG_HOTPLUG_CPU

View File

@ -189,7 +189,8 @@ linux_sparc_syscall32:
mov %i0, %l5 ! IEU1
5: call %l7 ! CTI Group brk forced
srl %i5, 0, %o5 ! IEU1
ba,a,pt %xcc, 3f
ba,pt %xcc, 3f
sra %o0, 0, %o0
/* Linux native system calls enter here... */
.align 32
@ -217,7 +218,6 @@ linux_sparc_syscall:
3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
ret_sys_call:
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
sra %o0, 0, %o0
mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
sllx %g2, 32, %g2

View File

@ -273,7 +273,7 @@ void __init pgtable_cache_init(void)
prom_halt();
}
for (i = 0; i < 8; i++) {
for (i = 0; i < ARRAY_SIZE(tsb_cache_names); i++) {
unsigned long size = 8192 << i;
const char *name = tsb_cache_names[i];

View File

@ -18,7 +18,6 @@
#include <linux/pci_ids.h>
#include <linux/pci.h>
#include <linux/bitops.h>
#include <linux/ioport.h>
#include <linux/suspend.h>
#include <asm/e820.h>
#include <asm/io.h>
@ -54,18 +53,6 @@ int fallback_aper_force __initdata;
int fix_aperture __initdata = 1;
static struct resource gart_resource = {
.name = "GART",
.flags = IORESOURCE_MEM,
};
static void __init insert_aperture_resource(u32 aper_base, u32 aper_size)
{
gart_resource.start = aper_base;
gart_resource.end = aper_base + aper_size - 1;
insert_resource(&iomem_resource, &gart_resource);
}
/* This code runs before the PCI subsystem is initialized, so just
access the northbridge directly. */
@ -96,7 +83,6 @@ static u32 __init allocate_aperture(void)
memblock_reserve(addr, aper_size);
printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n",
aper_size >> 10, addr);
insert_aperture_resource((u32)addr, aper_size);
register_nosave_region(addr >> PAGE_SHIFT,
(addr+aper_size) >> PAGE_SHIFT);
@ -444,12 +430,8 @@ int __init gart_iommu_hole_init(void)
out:
if (!fix && !fallback_aper_force) {
if (last_aper_base) {
unsigned long n = (32 * 1024 * 1024) << last_aper_order;
insert_aperture_resource((u32)last_aper_base, n);
if (last_aper_base)
return 1;
}
return 0;
}

View File

@ -3334,6 +3334,8 @@ static int __init uncore_type_init(struct intel_uncore_type *type)
if (!pmus)
return -ENOMEM;
type->pmus = pmus;
type->unconstrainted = (struct event_constraint)
__EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
0, type->num_counters, 0, 0);
@ -3369,7 +3371,6 @@ static int __init uncore_type_init(struct intel_uncore_type *type)
}
type->pmu_group = &uncore_pmu_attr_group;
type->pmus = pmus;
return 0;
fail:
uncore_type_exit(type);

View File

@ -693,20 +693,11 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
if (!uninit_q)
return NULL;
uninit_q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
if (!uninit_q->flush_rq)
goto out_cleanup_queue;
q = blk_init_allocated_queue(uninit_q, rfn, lock);
if (!q)
goto out_free_flush_rq;
return q;
blk_cleanup_queue(uninit_q);
out_free_flush_rq:
kfree(uninit_q->flush_rq);
out_cleanup_queue:
blk_cleanup_queue(uninit_q);
return NULL;
return q;
}
EXPORT_SYMBOL(blk_init_queue_node);
@ -717,9 +708,13 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
if (!q)
return NULL;
if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
if (!q->flush_rq)
return NULL;
if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
goto fail;
q->request_fn = rfn;
q->prep_rq_fn = NULL;
q->unprep_rq_fn = NULL;
@ -742,12 +737,16 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
/* init elevator */
if (elevator_init(q, NULL)) {
mutex_unlock(&q->sysfs_lock);
return NULL;
goto fail;
}
mutex_unlock(&q->sysfs_lock);
return q;
fail:
kfree(q->flush_rq);
return NULL;
}
EXPORT_SYMBOL(blk_init_allocated_queue);

View File

@ -140,14 +140,17 @@ static void mq_flush_run(struct work_struct *work)
blk_mq_insert_request(rq, false, true, false);
}
static bool blk_flush_queue_rq(struct request *rq)
static bool blk_flush_queue_rq(struct request *rq, bool add_front)
{
if (rq->q->mq_ops) {
INIT_WORK(&rq->mq_flush_work, mq_flush_run);
kblockd_schedule_work(rq->q, &rq->mq_flush_work);
return false;
} else {
list_add_tail(&rq->queuelist, &rq->q->queue_head);
if (add_front)
list_add(&rq->queuelist, &rq->q->queue_head);
else
list_add_tail(&rq->queuelist, &rq->q->queue_head);
return true;
}
}
@ -193,7 +196,7 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
case REQ_FSEQ_DATA:
list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
queued = blk_flush_queue_rq(rq);
queued = blk_flush_queue_rq(rq, true);
break;
case REQ_FSEQ_DONE:
@ -326,7 +329,7 @@ static bool blk_kick_flush(struct request_queue *q)
q->flush_rq->rq_disk = first_rq->rq_disk;
q->flush_rq->end_io = flush_end_io;
return blk_flush_queue_rq(q->flush_rq);
return blk_flush_queue_rq(q->flush_rq, false);
}
static void flush_data_end_io(struct request *rq, int error)

View File

@ -4498,7 +4498,7 @@ static int mtip_pci_probe(struct pci_dev *pdev,
}
dev_info(&pdev->dev, "NUMA node %d (closest: %d,%d, probe on %d:%d)\n",
my_node, pcibus_to_node(pdev->bus), dev_to_node(&pdev->dev),
cpu_to_node(smp_processor_id()), smp_processor_id());
cpu_to_node(raw_smp_processor_id()), raw_smp_processor_id());
dd = kzalloc_node(sizeof(struct driver_data), GFP_KERNEL, my_node);
if (dd == NULL) {

View File

@ -54,7 +54,7 @@ static inline void pit_irq_acknowledge(void)
static u64 pit_read_sched_clock(void)
{
return __raw_readl(clksrc_base + PITCVAL);
return ~__raw_readl(clksrc_base + PITCVAL);
}
static int __init pit_clocksource_init(unsigned long rate)

View File

@ -468,8 +468,8 @@ void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
} else {
list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list,
legacy_dev_list) {
drm_put_dev(dev);
list_del(&dev->legacy_dev_list);
drm_put_dev(dev);
}
}
DRM_INFO("Module unloaded\n");

View File

@ -172,20 +172,24 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
ret = exynos_drm_subdrv_open(dev, file);
if (ret)
goto out;
goto err_file_priv_free;
anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops,
NULL, 0);
if (IS_ERR(anon_filp)) {
ret = PTR_ERR(anon_filp);
goto out;
goto err_subdrv_close;
}
anon_filp->f_mode = FMODE_READ | FMODE_WRITE;
file_priv->anon_filp = anon_filp;
return ret;
out:
err_subdrv_close:
exynos_drm_subdrv_close(dev, file);
err_file_priv_free:
kfree(file_priv);
file->driver_priv = NULL;
return ret;

View File

@ -214,6 +214,13 @@ int i915_gem_init_stolen(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int bios_reserved = 0;
#ifdef CONFIG_INTEL_IOMMU
if (intel_iommu_gfx_mapped) {
DRM_INFO("DMAR active, disabling use of stolen memory\n");
return 0;
}
#endif
if (dev_priv->gtt.stolen_size == 0)
return 0;

View File

@ -618,33 +618,25 @@ static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
/* raw reads, only for fast reads of display block, no need for forcewake etc. */
#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t status;
int reg;
if (INTEL_INFO(dev)->gen < 7) {
status = pipe == PIPE_A ?
DE_PIPEA_VBLANK :
DE_PIPEB_VBLANK;
if (INTEL_INFO(dev)->gen >= 8) {
status = GEN8_PIPE_VBLANK;
reg = GEN8_DE_PIPE_ISR(pipe);
} else if (INTEL_INFO(dev)->gen >= 7) {
status = DE_PIPE_VBLANK_IVB(pipe);
reg = DEISR;
} else {
switch (pipe) {
default:
case PIPE_A:
status = DE_PIPEA_VBLANK_IVB;
break;
case PIPE_B:
status = DE_PIPEB_VBLANK_IVB;
break;
case PIPE_C:
status = DE_PIPEC_VBLANK_IVB;
break;
}
status = DE_PIPE_VBLANK(pipe);
reg = DEISR;
}
return __raw_i915_read32(dev_priv, DEISR) & status;
return __raw_i915_read32(dev_priv, reg) & status;
}
static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
@ -702,7 +694,28 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
else
position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
if (HAS_PCH_SPLIT(dev)) {
if (HAS_DDI(dev)) {
/*
* On HSW HDMI outputs there seems to be a 2 line
* difference, whereas eDP has the normal 1 line
* difference that earlier platforms have. External
* DP is unknown. For now just check for the 2 line
* difference case on all output types on HSW+.
*
* This might misinterpret the scanline counter being
* one line too far along on eDP, but that's less
* dangerous than the alternative since that would lead
* the vblank timestamp code astray when it sees a
* scanline count before vblank_start during a vblank
* interrupt.
*/
in_vbl = ilk_pipe_in_vblank_locked(dev, pipe);
if ((in_vbl && (position == vbl_start - 2 ||
position == vbl_start - 1)) ||
(!in_vbl && (position == vbl_end - 2 ||
position == vbl_end - 1)))
position = (position + 2) % vtotal;
} else if (HAS_PCH_SPLIT(dev)) {
/*
* The scanline counter increments at the leading edge
* of hsync, ie. it completely misses the active portion
@ -2769,10 +2782,9 @@ static void ibx_irq_postinstall(struct drm_device *dev)
return;
if (HAS_PCH_IBX(dev)) {
mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
SDE_TRANSA_FIFO_UNDER | SDE_POISON;
mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
} else {
mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
I915_WRITE(SERR_INT, I915_READ(SERR_INT));
}
@ -2832,20 +2844,19 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
DE_PLANEB_FLIP_DONE_IVB |
DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB |
DE_ERR_INT_IVB);
DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
DE_PIPEA_VBLANK_IVB);
DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
} else {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
DE_AUX_CHANNEL_A |
DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
DE_POISON);
extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
}
dev_priv->irq_mask = ~display_mask;
@ -2961,9 +2972,9 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
struct drm_device *dev = dev_priv->dev;
uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE |
GEN8_PIPE_CDCLK_CRC_DONE |
GEN8_PIPE_FIFO_UNDERRUN |
GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK;
uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
GEN8_PIPE_FIFO_UNDERRUN;
int pipe;
dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;

View File

@ -1244,6 +1244,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_panel_off(intel_dp);
}

View File

@ -1249,17 +1249,24 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("Turn eDP power off\n");
WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
pp = ironlake_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
* panels get very unhappy and cease to work. */
pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
intel_dp->want_panel_vdd = false;
ironlake_wait_panel_off(intel_dp);
/* We got a reference when we enabled the VDD. */
intel_runtime_pm_put(dev_priv);
}
void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
@ -1639,7 +1646,7 @@ static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
val |= EDP_PSR_LINK_DISABLE;
I915_WRITE(EDP_PSR_CTL(dev), val |
IS_BROADWELL(dev) ? 0 : link_entry_time |
(IS_BROADWELL(dev) ? 0 : link_entry_time) |
max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
EDP_PSR_ENABLE);
@ -1784,6 +1791,7 @@ static void intel_disable_dp(struct intel_encoder *encoder)
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_backlight_off(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
ironlake_edp_panel_off(intel_dp);

View File

@ -43,6 +43,7 @@
#define G25_REV_MIN 0x22
#define G27_REV_MAJ 0x12
#define G27_REV_MIN 0x38
#define G27_2_REV_MIN 0x39
#define to_hid_device(pdev) container_of(pdev, struct hid_device, dev)
@ -130,6 +131,7 @@ static const struct lg4ff_usb_revision lg4ff_revs[] = {
{DFP_REV_MAJ, DFP_REV_MIN, &native_dfp}, /* Driving Force Pro */
{G25_REV_MAJ, G25_REV_MIN, &native_g25}, /* G25 */
{G27_REV_MAJ, G27_REV_MIN, &native_g27}, /* G27 */
{G27_REV_MAJ, G27_2_REV_MIN, &native_g27}, /* G27 v2 */
};
/* Recalculates X axis value accordingly to currently selected range */

View File

@ -42,6 +42,7 @@
#define DUALSHOCK4_CONTROLLER_BT BIT(6)
#define SONY_LED_SUPPORT (SIXAXIS_CONTROLLER_USB | BUZZ_CONTROLLER | DUALSHOCK4_CONTROLLER_USB)
#define SONY_FF_SUPPORT (SIXAXIS_CONTROLLER_USB | DUALSHOCK4_CONTROLLER_USB)
#define MAX_LEDS 4
@ -499,6 +500,7 @@ struct sony_sc {
__u8 right;
#endif
__u8 worker_initialized;
__u8 led_state[MAX_LEDS];
__u8 led_count;
};
@ -993,22 +995,11 @@ static int sony_init_ff(struct hid_device *hdev)
return input_ff_create_memless(input_dev, NULL, sony_play_effect);
}
static void sony_destroy_ff(struct hid_device *hdev)
{
struct sony_sc *sc = hid_get_drvdata(hdev);
cancel_work_sync(&sc->state_worker);
}
#else
static int sony_init_ff(struct hid_device *hdev)
{
return 0;
}
static void sony_destroy_ff(struct hid_device *hdev)
{
}
#endif
static int sony_set_output_report(struct sony_sc *sc, int req_id, int req_size)
@ -1077,6 +1068,8 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (sc->quirks & SIXAXIS_CONTROLLER_USB) {
hdev->hid_output_raw_report = sixaxis_usb_output_raw_report;
ret = sixaxis_set_operational_usb(hdev);
sc->worker_initialized = 1;
INIT_WORK(&sc->state_worker, sixaxis_state_worker);
}
else if (sc->quirks & SIXAXIS_CONTROLLER_BT)
@ -1087,6 +1080,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (ret < 0)
goto err_stop;
sc->worker_initialized = 1;
INIT_WORK(&sc->state_worker, dualshock4_state_worker);
} else {
ret = 0;
@ -1101,9 +1095,11 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto err_stop;
}
ret = sony_init_ff(hdev);
if (ret < 0)
goto err_stop;
if (sc->quirks & SONY_FF_SUPPORT) {
ret = sony_init_ff(hdev);
if (ret < 0)
goto err_stop;
}
return 0;
err_stop:
@ -1120,7 +1116,8 @@ static void sony_remove(struct hid_device *hdev)
if (sc->quirks & SONY_LED_SUPPORT)
sony_leds_remove(hdev);
sony_destroy_ff(hdev);
if (sc->worker_initialized)
cancel_work_sync(&sc->state_worker);
hid_hw_stop(hdev);
}

View File

@ -320,13 +320,13 @@ static void drop_ref(struct hidraw *hidraw, int exists_bit)
hid_hw_close(hidraw->hid);
wake_up_interruptible(&hidraw->wait);
}
device_destroy(hidraw_class,
MKDEV(hidraw_major, hidraw->minor));
} else {
--hidraw->open;
}
if (!hidraw->open) {
if (!hidraw->exist) {
device_destroy(hidraw_class,
MKDEV(hidraw_major, hidraw->minor));
hidraw_table[hidraw->minor] = NULL;
kfree(hidraw);
} else {

View File

@ -16,16 +16,6 @@ config CAPI_TRACE
This will increase the size of the kernelcapi module by 20 KB.
If unsure, say Y.
config ISDN_CAPI_MIDDLEWARE
bool "CAPI2.0 Middleware support"
depends on TTY
help
This option will enhance the capabilities of the /dev/capi20
interface. It will provide a means of moving a data connection,
established via the usual /dev/capi20 interface to a special tty
device. If you want to use pppd with pppdcapiplugin to dial up to
your ISP, say Y here.
config ISDN_CAPI_CAPI20
tristate "CAPI2.0 /dev/capi support"
help
@ -34,6 +24,16 @@ config ISDN_CAPI_CAPI20
standardized libcapi20 to access this functionality. You should say
Y/M here.
config ISDN_CAPI_MIDDLEWARE
bool "CAPI2.0 Middleware support"
depends on ISDN_CAPI_CAPI20 && TTY
help
This option will enhance the capabilities of the /dev/capi20
interface. It will provide a means of moving a data connection,
established via the usual /dev/capi20 interface to a special tty
device. If you want to use pppd with pppdcapiplugin to dial up to
your ISP, say Y here.
config ISDN_CAPI_CAPIDRV
tristate "CAPI2.0 capidrv interface support"
depends on ISDN_I4L

View File

@ -1248,19 +1248,13 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* shared register for the high 32 bits, so only a single, aligned,
* 4 GB physical address range can be used for descriptors.
*/
if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
!dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n");
} else {
err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
err = dma_set_coherent_mask(&pdev->dev,
DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev,
"No usable DMA config, aborting\n");
goto out_pci_disable;
}
dev_err(&pdev->dev, "No usable DMA config, aborting\n");
goto out_pci_disable;
}
}

View File

@ -2436,7 +2436,7 @@ err_reset:
err_register:
err_sw_init:
err_eeprom:
iounmap(adapter->hw.hw_addr);
pci_iounmap(pdev, adapter->hw.hw_addr);
err_init_netdev:
err_ioremap:
free_netdev(netdev);
@ -2474,7 +2474,7 @@ static void atl1e_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
atl1e_free_ring_resources(adapter);
atl1e_force_ps(&adapter->hw);
iounmap(adapter->hw.hw_addr);
pci_iounmap(pdev, adapter->hw.hw_addr);
pci_release_regions(pdev);
free_netdev(netdev);
pci_disable_device(pdev);

View File

@ -1,6 +1,6 @@
/* cnic.c: Broadcom CNIC core network driver.
*
* Copyright (c) 2006-2013 Broadcom Corporation
* Copyright (c) 2006-2014 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -342,7 +342,7 @@ static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
while (retry < 3) {
rc = 0;
rcu_read_lock();
ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]);
ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]);
if (ulp_ops)
rc = ulp_ops->iscsi_nl_send_msg(
cp->ulp_handle[CNIC_ULP_ISCSI],
@ -726,7 +726,7 @@ static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
for (i = 0; i < dma->num_pages; i++) {
if (dma->pg_arr[i]) {
dma_free_coherent(&dev->pcidev->dev, BNX2_PAGE_SIZE,
dma_free_coherent(&dev->pcidev->dev, CNIC_PAGE_SIZE,
dma->pg_arr[i], dma->pg_map_arr[i]);
dma->pg_arr[i] = NULL;
}
@ -785,7 +785,7 @@ static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
for (i = 0; i < pages; i++) {
dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
BNX2_PAGE_SIZE,
CNIC_PAGE_SIZE,
&dma->pg_map_arr[i],
GFP_ATOMIC);
if (dma->pg_arr[i] == NULL)
@ -794,8 +794,8 @@ static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
if (!use_pg_tbl)
return 0;
dma->pgtbl_size = ((pages * 8) + BNX2_PAGE_SIZE - 1) &
~(BNX2_PAGE_SIZE - 1);
dma->pgtbl_size = ((pages * 8) + CNIC_PAGE_SIZE - 1) &
~(CNIC_PAGE_SIZE - 1);
dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
&dma->pgtbl_map, GFP_ATOMIC);
if (dma->pgtbl == NULL)
@ -900,8 +900,8 @@ static int cnic_alloc_context(struct cnic_dev *dev)
if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
int i, k, arr_size;
cp->ctx_blk_size = BNX2_PAGE_SIZE;
cp->cids_per_blk = BNX2_PAGE_SIZE / 128;
cp->ctx_blk_size = CNIC_PAGE_SIZE;
cp->cids_per_blk = CNIC_PAGE_SIZE / 128;
arr_size = BNX2_MAX_CID / cp->cids_per_blk *
sizeof(struct cnic_ctx);
cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
@ -933,7 +933,7 @@ static int cnic_alloc_context(struct cnic_dev *dev)
for (i = 0; i < cp->ctx_blks; i++) {
cp->ctx_arr[i].ctx =
dma_alloc_coherent(&dev->pcidev->dev,
BNX2_PAGE_SIZE,
CNIC_PAGE_SIZE,
&cp->ctx_arr[i].mapping,
GFP_KERNEL);
if (cp->ctx_arr[i].ctx == NULL)
@ -1013,7 +1013,7 @@ static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
if (udev->l2_ring)
return 0;
udev->l2_ring_size = pages * BNX2_PAGE_SIZE;
udev->l2_ring_size = pages * CNIC_PAGE_SIZE;
udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
&udev->l2_ring_map,
GFP_KERNEL | __GFP_COMP);
@ -1021,7 +1021,7 @@ static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
return -ENOMEM;
udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size);
udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
&udev->l2_buf_map,
GFP_KERNEL | __GFP_COMP);
@ -1102,7 +1102,7 @@ static int cnic_init_uio(struct cnic_dev *dev)
uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
TX_MAX_TSS_RINGS + 1);
uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
PAGE_MASK;
CNIC_PAGE_MASK;
if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
else
@ -1113,7 +1113,7 @@ static int cnic_init_uio(struct cnic_dev *dev)
uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
PAGE_MASK;
CNIC_PAGE_MASK;
uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
uinfo->name = "bnx2x_cnic";
@ -1267,14 +1267,14 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
PAGE_SIZE;
pages = CNIC_PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
CNIC_PAGE_SIZE;
ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
if (ret)
return -ENOMEM;
n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
for (i = 0, j = 0; i < cp->max_cid_space; i++) {
long off = CNIC_KWQ16_DATA_SIZE * (i % n);
@ -1296,7 +1296,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
goto error;
}
pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
pages = CNIC_PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / CNIC_PAGE_SIZE;
ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
if (ret)
goto error;
@ -1466,8 +1466,8 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
BNX2X_ISCSI_R2TQE_SIZE;
cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
hq_bds = pages * (CNIC_PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
cp->num_cqs = req1->num_cqs;
if (!dev->max_iscsi_conn)
@ -1477,9 +1477,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
req1->rq_num_wqes);
CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
PAGE_SIZE);
CNIC_PAGE_SIZE);
CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
req1->num_tasks_per_conn);
@ -1489,9 +1489,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
req1->rq_buffer_size);
CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
PAGE_SIZE);
CNIC_PAGE_SIZE);
CNIC_WR8(dev, BAR_USTRORM_INTMEM +
USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
CNIC_WR16(dev, BAR_USTRORM_INTMEM +
USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
req1->num_tasks_per_conn);
@ -1504,9 +1504,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
/* init Xstorm RAM */
CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
PAGE_SIZE);
CNIC_PAGE_SIZE);
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
req1->num_tasks_per_conn);
@ -1519,9 +1519,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
/* init Cstorm RAM */
CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
PAGE_SIZE);
CNIC_PAGE_SIZE);
CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
req1->num_tasks_per_conn);
@ -1623,18 +1623,18 @@ static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
}
ctx->cid = cid;
pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE;
pages = CNIC_PAGE_ALIGN(cp->task_array_size) / CNIC_PAGE_SIZE;
ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
if (ret)
goto error;
pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE;
pages = CNIC_PAGE_ALIGN(cp->r2tq_size) / CNIC_PAGE_SIZE;
ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
if (ret)
goto error;
pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
if (ret)
goto error;
@ -1760,7 +1760,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
/* TSTORM requires the base address of RQ DB & not PTE */
ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
req2->rq_page_table_addr_lo & PAGE_MASK;
req2->rq_page_table_addr_lo & CNIC_PAGE_MASK;
ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
req2->rq_page_table_addr_hi;
ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
@ -1842,7 +1842,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
/* CSTORM and USTORM initialization is different, CSTORM requires
* CQ DB base & not PTE addr */
ictx->cstorm_st_context.cq_db_base.lo =
req1->cq_page_table_addr_lo & PAGE_MASK;
req1->cq_page_table_addr_lo & CNIC_PAGE_MASK;
ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
@ -2911,7 +2911,7 @@ static int cnic_l2_completion(struct cnic_local *cp)
u16 hw_cons, sw_cons;
struct cnic_uio_dev *udev = cp->udev;
union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
(udev->l2_ring + (2 * BNX2_PAGE_SIZE));
(udev->l2_ring + (2 * CNIC_PAGE_SIZE));
u32 cmd;
int comp = 0;
@ -3244,7 +3244,8 @@ static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
int rc;
mutex_lock(&cnic_lock);
ulp_ops = cnic_ulp_tbl_prot(ulp_type);
ulp_ops = rcu_dereference_protected(cp->ulp_ops[ulp_type],
lockdep_is_held(&cnic_lock));
if (ulp_ops && ulp_ops->cnic_get_stats)
rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
else
@ -4384,7 +4385,7 @@ static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
u32 val;
memset(cp->ctx_arr[i].ctx, 0, BNX2_PAGE_SIZE);
memset(cp->ctx_arr[i].ctx, 0, CNIC_PAGE_SIZE);
CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
(cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
@ -4628,7 +4629,7 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
rxbd = udev->l2_ring + BNX2_PAGE_SIZE;
rxbd = udev->l2_ring + CNIC_PAGE_SIZE;
for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) {
dma_addr_t buf_map;
int n = (i % cp->l2_rx_ring_size) + 1;
@ -4639,11 +4640,11 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
}
val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32;
val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
rxbd->rx_bd_haddr_hi = val;
val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff;
val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
rxbd->rx_bd_haddr_lo = val;
@ -4709,10 +4710,10 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
val = CNIC_RD(dev, BNX2_MQ_CONFIG);
val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
if (BNX2_PAGE_BITS > 12)
if (CNIC_PAGE_BITS > 12)
val |= (12 - 8) << 4;
else
val |= (BNX2_PAGE_BITS - 8) << 4;
val |= (CNIC_PAGE_BITS - 8) << 4;
CNIC_WR(dev, BNX2_MQ_CONFIG, val);
@ -4742,13 +4743,13 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
/* Initialize the kernel work queue context. */
val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
(BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
(CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
val = (BNX2_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
val = (CNIC_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
val = ((BNX2_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
val = ((CNIC_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
@ -4768,13 +4769,13 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
/* Initialize the kernel complete queue context. */
val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
(BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
(CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
val = (BNX2_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
val = (CNIC_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
val = ((BNX2_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
val = ((CNIC_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
@ -4918,7 +4919,7 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
u32 cli = cp->ethdev->iscsi_l2_client_id;
u32 val;
memset(txbd, 0, BNX2_PAGE_SIZE);
memset(txbd, 0, CNIC_PAGE_SIZE);
buf_map = udev->l2_buf_map;
for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) {
@ -4978,9 +4979,9 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
struct bnx2x *bp = netdev_priv(dev->netdev);
struct cnic_uio_dev *udev = cp->udev;
struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
BNX2_PAGE_SIZE);
CNIC_PAGE_SIZE);
struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
(udev->l2_ring + (2 * BNX2_PAGE_SIZE));
(udev->l2_ring + (2 * CNIC_PAGE_SIZE));
struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
int i;
u32 cli = cp->ethdev->iscsi_l2_client_id;
@ -5004,20 +5005,20 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
}
val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32;
val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
rxbd->addr_hi = cpu_to_le32(val);
data->rx.bd_page_base.hi = cpu_to_le32(val);
val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff;
val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
rxbd->addr_lo = cpu_to_le32(val);
data->rx.bd_page_base.lo = cpu_to_le32(val);
rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) >> 32;
val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) >> 32;
rxcqe->addr_hi = cpu_to_le32(val);
data->rx.cqe_page_base.hi = cpu_to_le32(val);
val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) & 0xffffffff;
val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) & 0xffffffff;
rxcqe->addr_lo = cpu_to_le32(val);
data->rx.cqe_page_base.lo = cpu_to_le32(val);
@ -5265,8 +5266,8 @@ static void cnic_shutdown_rings(struct cnic_dev *dev)
msleep(10);
}
clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
rx_ring = udev->l2_ring + BNX2_PAGE_SIZE;
memset(rx_ring, 0, BNX2_PAGE_SIZE);
rx_ring = udev->l2_ring + CNIC_PAGE_SIZE;
memset(rx_ring, 0, CNIC_PAGE_SIZE);
}
static int cnic_register_netdev(struct cnic_dev *dev)

View File

@ -1,6 +1,6 @@
/* cnic.h: Broadcom CNIC core network driver.
*
* Copyright (c) 2006-2013 Broadcom Corporation
* Copyright (c) 2006-2014 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by

View File

@ -1,7 +1,7 @@
/* cnic.c: Broadcom CNIC core network driver.
*
* Copyright (c) 2006-2013 Broadcom Corporation
* Copyright (c) 2006-2014 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by

View File

@ -1,6 +1,6 @@
/* cnic_if.h: Broadcom CNIC core network driver.
*
* Copyright (c) 2006-2013 Broadcom Corporation
* Copyright (c) 2006-2014 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -14,8 +14,8 @@
#include "bnx2x/bnx2x_mfw_req.h"
#define CNIC_MODULE_VERSION "2.5.19"
#define CNIC_MODULE_RELDATE "December 19, 2013"
#define CNIC_MODULE_VERSION "2.5.20"
#define CNIC_MODULE_RELDATE "March 14, 2014"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
@ -24,6 +24,16 @@
#define MAX_CNIC_ULP_TYPE_EXT 3
#define MAX_CNIC_ULP_TYPE 4
/* Use CPU native page size up to 16K for cnic ring sizes. */
#if (PAGE_SHIFT > 14)
#define CNIC_PAGE_BITS 14
#else
#define CNIC_PAGE_BITS PAGE_SHIFT
#endif
#define CNIC_PAGE_SIZE (1 << (CNIC_PAGE_BITS))
#define CNIC_PAGE_ALIGN(addr) ALIGN(addr, CNIC_PAGE_SIZE)
#define CNIC_PAGE_MASK (~((CNIC_PAGE_SIZE) - 1))
struct kwqe {
u32 kwqe_op_flag;

View File

@ -23,6 +23,7 @@
#include <linux/crc32.h>
#include <linux/mii.h>
#include <linux/eeprom_93cx6.h>
#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
@ -83,6 +84,7 @@ union ks8851_tx_hdr {
* @rc_rxqcr: Cached copy of KS_RXQCR.
* @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
* @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM.
* @vdd_reg: Optional regulator supplying the chip
*
* The @lock ensures that the chip is protected when certain operations are
* in progress. When the read or write packet transfer is in progress, most
@ -130,6 +132,7 @@ struct ks8851_net {
struct spi_transfer spi_xfer2[2];
struct eeprom_93cx6 eeprom;
struct regulator *vdd_reg;
};
static int msg_enable;
@ -1414,6 +1417,21 @@ static int ks8851_probe(struct spi_device *spi)
ks->spidev = spi;
ks->tx_space = 6144;
ks->vdd_reg = regulator_get_optional(&spi->dev, "vdd");
if (IS_ERR(ks->vdd_reg)) {
ret = PTR_ERR(ks->vdd_reg);
if (ret == -EPROBE_DEFER)
goto err_reg;
} else {
ret = regulator_enable(ks->vdd_reg);
if (ret) {
dev_err(&spi->dev, "regulator enable fail: %d\n",
ret);
goto err_reg_en;
}
}
mutex_init(&ks->lock);
spin_lock_init(&ks->statelock);
@ -1508,8 +1526,14 @@ static int ks8851_probe(struct spi_device *spi)
err_netdev:
free_irq(ndev->irq, ks);
err_id:
err_irq:
err_id:
if (!IS_ERR(ks->vdd_reg))
regulator_disable(ks->vdd_reg);
err_reg_en:
if (!IS_ERR(ks->vdd_reg))
regulator_put(ks->vdd_reg);
err_reg:
free_netdev(ndev);
return ret;
}
@ -1523,6 +1547,10 @@ static int ks8851_remove(struct spi_device *spi)
unregister_netdev(priv->netdev);
free_irq(spi->irq, priv);
if (!IS_ERR(priv->vdd_reg)) {
regulator_disable(priv->vdd_reg);
regulator_put(priv->vdd_reg);
}
free_netdev(priv->netdev);
return 0;

View File

@ -2217,10 +2217,6 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_ale_ret;
}
if (cpts_register(&pdev->dev, priv->cpts,
data->cpts_clock_mult, data->cpts_clock_shift))
dev_err(priv->dev, "error registering cpts device\n");
cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n",
&ss_res->start, ndev->irq);

View File

@ -355,7 +355,7 @@ int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
int i;
spin_lock_irqsave(&ctlr->lock, flags);
if (ctlr->state != CPDMA_STATE_ACTIVE) {
if (ctlr->state == CPDMA_STATE_TEARDOWN) {
spin_unlock_irqrestore(&ctlr->lock, flags);
return -EINVAL;
}
@ -891,7 +891,7 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
unsigned timeout;
spin_lock_irqsave(&chan->lock, flags);
if (chan->state != CPDMA_STATE_ACTIVE) {
if (chan->state == CPDMA_STATE_TEARDOWN) {
spin_unlock_irqrestore(&chan->lock, flags);
return -EINVAL;
}

View File

@ -1532,9 +1532,9 @@ static int emac_dev_open(struct net_device *ndev)
struct device *emac_dev = &ndev->dev;
u32 cnt;
struct resource *res;
int ret;
int q, m, ret;
int res_num = 0, irq_num = 0;
int i = 0;
int k = 0;
struct emac_priv *priv = netdev_priv(ndev);
pm_runtime_get(&priv->pdev->dev);
@ -1564,15 +1564,24 @@ static int emac_dev_open(struct net_device *ndev)
}
/* Request IRQ */
while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ,
res_num))) {
for (irq_num = res->start; irq_num <= res->end; irq_num++) {
dev_err(emac_dev, "Request IRQ %d\n", irq_num);
if (request_irq(irq_num, emac_irq, 0, ndev->name,
ndev)) {
dev_err(emac_dev,
"DaVinci EMAC: request_irq() failed\n");
ret = -EBUSY;
while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
for (i = res->start; i <= res->end; i++) {
if (devm_request_irq(&priv->pdev->dev, i, emac_irq,
0, ndev->name, ndev))
goto rollback;
}
}
k++;
res_num++;
}
/* prepare counters for rollback in case of an error */
res_num--;
irq_num--;
/* Start/Enable EMAC hardware */
emac_hw_enable(priv);
@ -1639,11 +1648,23 @@ static int emac_dev_open(struct net_device *ndev)
return 0;
rollback:
dev_err(emac_dev, "DaVinci EMAC: devm_request_irq() failed");
ret = -EBUSY;
err:
emac_int_disable(priv);
napi_disable(&priv->napi);
rollback:
for (q = res_num; q >= 0; q--) {
res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, q);
/* at the first iteration, irq_num is already set to the
* right value
*/
if (q != res_num)
irq_num = res->end;
for (m = irq_num; m >= res->start; m--)
free_irq(m, ndev);
}
cpdma_ctlr_stop(priv->dma);
pm_runtime_put(&priv->pdev->dev);
return ret;
}
@ -1659,6 +1680,9 @@ err:
*/
static int emac_dev_stop(struct net_device *ndev)
{
struct resource *res;
int i = 0;
int irq_num;
struct emac_priv *priv = netdev_priv(ndev);
struct device *emac_dev = &ndev->dev;
@ -1674,6 +1698,13 @@ static int emac_dev_stop(struct net_device *ndev)
if (priv->phydev)
phy_disconnect(priv->phydev);
/* Free IRQ */
while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) {
for (irq_num = res->start; irq_num <= res->end; irq_num++)
free_irq(irq_num, priv->ndev);
i++;
}
if (netif_msg_drv(priv))
dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name);

View File

@ -923,7 +923,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc) {
dev_err(&pdev->dev,
"32-bit PCI DMA addresses not supported by the card!?\n");
goto err_out;
goto err_out_pci_disable;
}
/* sanity check */
@ -931,7 +931,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
(pci_resource_len(pdev, 1) < io_size)) {
rc = -EIO;
dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
goto err_out;
goto err_out_pci_disable;
}
pioaddr = pci_resource_start(pdev, 0);
@ -942,7 +942,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev = alloc_etherdev(sizeof(struct rhine_private));
if (!dev) {
rc = -ENOMEM;
goto err_out;
goto err_out_pci_disable;
}
SET_NETDEV_DEV(dev, &pdev->dev);
@ -1084,6 +1084,8 @@ err_out_free_res:
pci_release_regions(pdev);
err_out_free_netdev:
free_netdev(dev);
err_out_pci_disable:
pci_disable_device(pdev);
err_out:
return rc;
}

View File

@ -684,10 +684,9 @@ EXPORT_SYMBOL(phy_detach);
int phy_suspend(struct phy_device *phydev)
{
struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver);
struct ethtool_wolinfo wol;
struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
/* If the device has WOL enabled, we cannot suspend the PHY */
wol.cmd = ETHTOOL_GWOL;
phy_ethtool_get_wol(phydev, &wol);
if (wol.wolopts)
return -EBUSY;

View File

@ -68,7 +68,6 @@ static struct usb_driver cdc_ncm_driver;
static int cdc_ncm_setup(struct usbnet *dev)
{
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
struct usb_cdc_ncm_ntb_parameters ncm_parm;
u32 val;
u8 flags;
u8 iface_no;
@ -83,22 +82,22 @@ static int cdc_ncm_setup(struct usbnet *dev)
err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS,
USB_TYPE_CLASS | USB_DIR_IN
|USB_RECIP_INTERFACE,
0, iface_no, &ncm_parm,
sizeof(ncm_parm));
0, iface_no, &ctx->ncm_parm,
sizeof(ctx->ncm_parm));
if (err < 0) {
dev_err(&dev->intf->dev, "failed GET_NTB_PARAMETERS\n");
return err; /* GET_NTB_PARAMETERS is required */
}
/* read correct set of parameters according to device mode */
ctx->rx_max = le32_to_cpu(ncm_parm.dwNtbInMaxSize);
ctx->tx_max = le32_to_cpu(ncm_parm.dwNtbOutMaxSize);
ctx->tx_remainder = le16_to_cpu(ncm_parm.wNdpOutPayloadRemainder);
ctx->tx_modulus = le16_to_cpu(ncm_parm.wNdpOutDivisor);
ctx->tx_ndp_modulus = le16_to_cpu(ncm_parm.wNdpOutAlignment);
ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize);
ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize);
ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor);
ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
/* devices prior to NCM Errata shall set this field to zero */
ctx->tx_max_datagrams = le16_to_cpu(ncm_parm.wNtbOutMaxDatagrams);
ntb_fmt_supported = le16_to_cpu(ncm_parm.bmNtbFormatsSupported);
ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams);
ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported);
/* there are some minor differences in NCM and MBIM defaults */
if (cdc_ncm_comm_intf_is_mbim(ctx->control->cur_altsetting)) {
@ -147,7 +146,7 @@ static int cdc_ncm_setup(struct usbnet *dev)
}
/* inform device about NTB input size changes */
if (ctx->rx_max != le32_to_cpu(ncm_parm.dwNtbInMaxSize)) {
if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
__le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE,
@ -163,14 +162,6 @@ static int cdc_ncm_setup(struct usbnet *dev)
dev_dbg(&dev->intf->dev, "Using default maximum transmit length=%d\n",
CDC_NCM_NTB_MAX_SIZE_TX);
ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX;
/* Adding a pad byte here simplifies the handling in
* cdc_ncm_fill_tx_frame, by making tx_max always
* represent the real skb max size.
*/
if (ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0)
ctx->tx_max++;
}
/*
@ -456,6 +447,10 @@ advance:
goto error2;
}
/* initialize data interface */
if (cdc_ncm_setup(dev))
goto error2;
/* configure data interface */
temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
if (temp) {
@ -470,12 +465,6 @@ advance:
goto error2;
}
/* initialize data interface */
if (cdc_ncm_setup(dev)) {
dev_dbg(&intf->dev, "cdc_ncm_setup() failed\n");
goto error2;
}
usb_set_intfdata(ctx->data, dev);
usb_set_intfdata(ctx->control, dev);
@ -492,6 +481,15 @@ advance:
dev->hard_mtu = ctx->tx_max;
dev->rx_urb_size = ctx->rx_max;
/* cdc_ncm_setup will override dwNtbOutMaxSize if it is
* outside the sane range. Adding a pad byte here if necessary
* simplifies the handling in cdc_ncm_fill_tx_frame, making
* tx_max always represent the real skb max size.
*/
if (ctx->tx_max != le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) &&
ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0)
ctx->tx_max++;
return 0;
error2:

View File

@ -1315,6 +1315,9 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
neigh_release(n);
if (reply == NULL)
goto out;
skb_reset_mac_header(reply);
__skb_pull(reply, skb_network_offset(reply));
reply->ip_summed = CHECKSUM_UNNECESSARY;
@ -1336,15 +1339,103 @@ out:
}
#if IS_ENABLED(CONFIG_IPV6)
static struct sk_buff *vxlan_na_create(struct sk_buff *request,
struct neighbour *n, bool isrouter)
{
struct net_device *dev = request->dev;
struct sk_buff *reply;
struct nd_msg *ns, *na;
struct ipv6hdr *pip6;
u8 *daddr;
int na_olen = 8; /* opt hdr + ETH_ALEN for target */
int ns_olen;
int i, len;
if (dev == NULL)
return NULL;
len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) +
sizeof(*na) + na_olen + dev->needed_tailroom;
reply = alloc_skb(len, GFP_ATOMIC);
if (reply == NULL)
return NULL;
reply->protocol = htons(ETH_P_IPV6);
reply->dev = dev;
skb_reserve(reply, LL_RESERVED_SPACE(request->dev));
skb_push(reply, sizeof(struct ethhdr));
skb_set_mac_header(reply, 0);
ns = (struct nd_msg *)skb_transport_header(request);
daddr = eth_hdr(request)->h_source;
ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns);
for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
break;
}
}
/* Ethernet header */
ether_addr_copy(eth_hdr(reply)->h_dest, daddr);
ether_addr_copy(eth_hdr(reply)->h_source, n->ha);
eth_hdr(reply)->h_proto = htons(ETH_P_IPV6);
reply->protocol = htons(ETH_P_IPV6);
skb_pull(reply, sizeof(struct ethhdr));
skb_set_network_header(reply, 0);
skb_put(reply, sizeof(struct ipv6hdr));
/* IPv6 header */
pip6 = ipv6_hdr(reply);
memset(pip6, 0, sizeof(struct ipv6hdr));
pip6->version = 6;
pip6->priority = ipv6_hdr(request)->priority;
pip6->nexthdr = IPPROTO_ICMPV6;
pip6->hop_limit = 255;
pip6->daddr = ipv6_hdr(request)->saddr;
pip6->saddr = *(struct in6_addr *)n->primary_key;
skb_pull(reply, sizeof(struct ipv6hdr));
skb_set_transport_header(reply, 0);
na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen);
/* Neighbor Advertisement */
memset(na, 0, sizeof(*na)+na_olen);
na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
na->icmph.icmp6_router = isrouter;
na->icmph.icmp6_override = 1;
na->icmph.icmp6_solicited = 1;
na->target = ns->target;
ether_addr_copy(&na->opt[2], n->ha);
na->opt[0] = ND_OPT_TARGET_LL_ADDR;
na->opt[1] = na_olen >> 3;
na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr,
&pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6,
csum_partial(na, sizeof(*na)+na_olen, 0));
pip6->payload_len = htons(sizeof(*na)+na_olen);
skb_push(reply, sizeof(struct ipv6hdr));
reply->ip_summed = CHECKSUM_UNNECESSARY;
return reply;
}
static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct neighbour *n;
union vxlan_addr ipa;
struct nd_msg *msg;
const struct ipv6hdr *iphdr;
const struct in6_addr *saddr, *daddr;
struct nd_msg *msg;
struct inet6_dev *in6_dev = NULL;
struct neighbour *n;
struct inet6_dev *in6_dev;
in6_dev = __in6_dev_get(dev);
if (!in6_dev)
@ -1357,19 +1448,20 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
saddr = &iphdr->saddr;
daddr = &iphdr->daddr;
if (ipv6_addr_loopback(daddr) ||
ipv6_addr_is_multicast(daddr))
goto out;
msg = (struct nd_msg *)skb_transport_header(skb);
if (msg->icmph.icmp6_code != 0 ||
msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
goto out;
n = neigh_lookup(ipv6_stub->nd_tbl, daddr, dev);
if (ipv6_addr_loopback(daddr) ||
ipv6_addr_is_multicast(&msg->target))
goto out;
n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev);
if (n) {
struct vxlan_fdb *f;
struct sk_buff *reply;
if (!(n->nud_state & NUD_CONNECTED)) {
neigh_release(n);
@ -1383,13 +1475,23 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
goto out;
}
ipv6_stub->ndisc_send_na(dev, n, saddr, &msg->target,
!!in6_dev->cnf.forwarding,
true, false, false);
reply = vxlan_na_create(skb, n,
!!(f ? f->flags & NTF_ROUTER : 0));
neigh_release(n);
if (reply == NULL)
goto out;
if (netif_rx_ni(reply) == NET_RX_DROP)
dev->stats.rx_dropped++;
} else if (vxlan->flags & VXLAN_F_L3MISS) {
ipa.sin6.sin6_addr = *daddr;
ipa.sa.sa_family = AF_INET6;
union vxlan_addr ipa = {
.sin6.sin6_addr = msg->target,
.sa.sa_family = AF_INET6,
};
vxlan_ip_miss(dev, &ipa);
}

View File

@ -1547,6 +1547,7 @@ bool ath9k_hw_check_alive(struct ath_hw *ah)
if (reg != last_val)
return true;
udelay(1);
last_val = reg;
if ((reg & 0x7E7FFFEF) == 0x00702400)
continue;
@ -1559,8 +1560,6 @@ bool ath9k_hw_check_alive(struct ath_hw *ah)
default:
return true;
}
udelay(1);
} while (count-- > 0);
return false;

View File

@ -2065,7 +2065,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
ATH_TXBUF_RESET(bf);
if (tid) {
if (tid && ieee80211_is_data_present(hdr->frame_control)) {
fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
seqno = tid->seq_next;
hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
@ -2188,7 +2188,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
txq->stopped = true;
}
if (txctl->an)
if (txctl->an && ieee80211_is_data_present(hdr->frame_control))
tid = ath_get_skb_tid(sc, txctl->an, skb);
if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) {

View File

@ -2140,8 +2140,10 @@ static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio *bus,
if (pkt_pad == NULL)
return -ENOMEM;
ret = brcmf_sdio_txpkt_hdalign(bus, pkt_pad);
if (unlikely(ret < 0))
if (unlikely(ret < 0)) {
kfree_skb(pkt_pad);
return ret;
}
memcpy(pkt_pad->data,
pkt->data + pkt->len - tail_chop,
tail_chop);

View File

@ -5460,14 +5460,15 @@ static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_write(rt2x00dev, 68, 0x0b);
rt2800_bbp_write(rt2x00dev, 69, 0x0d);
rt2800_bbp_write(rt2x00dev, 70, 0x06);
rt2800_bbp_write(rt2x00dev, 69, 0x12);
rt2800_bbp_write(rt2x00dev, 73, 0x13);
rt2800_bbp_write(rt2x00dev, 75, 0x46);
rt2800_bbp_write(rt2x00dev, 76, 0x28);
rt2800_bbp_write(rt2x00dev, 77, 0x59);
rt2800_bbp_write(rt2x00dev, 70, 0x0a);
rt2800_bbp_write(rt2x00dev, 79, 0x13);
rt2800_bbp_write(rt2x00dev, 80, 0x05);
rt2800_bbp_write(rt2x00dev, 81, 0x33);
@ -5510,7 +5511,6 @@ static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev)
if (rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_bbp_write(rt2x00dev, 134, 0xd0);
rt2800_bbp_write(rt2x00dev, 135, 0xf6);
rt2800_bbp_write(rt2x00dev, 148, 0x84);
}
rt2800_disable_unused_dac_adc(rt2x00dev);

View File

@ -325,7 +325,7 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
continue;
if (abrt_task->sc->device->lun != abrt_task->sc->device->lun)
if (sc->device->lun != abrt_task->sc->device->lun)
continue;
/* Invalidate WRB Posted for this Task */

View File

@ -594,13 +594,13 @@ static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req)
mp_req->mp_resp_bd = NULL;
}
if (mp_req->req_buf) {
dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
mp_req->req_buf,
mp_req->req_buf_dma);
mp_req->req_buf = NULL;
}
if (mp_req->resp_buf) {
dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
mp_req->resp_buf,
mp_req->resp_buf_dma);
mp_req->resp_buf = NULL;
@ -622,7 +622,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
mp_req->req_len = sizeof(struct fcp_cmnd);
io_req->data_xfer_len = mp_req->req_len;
mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
&mp_req->req_buf_dma,
GFP_ATOMIC);
if (!mp_req->req_buf) {
@ -631,7 +631,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
return FAILED;
}
mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
&mp_req->resp_buf_dma,
GFP_ATOMIC);
if (!mp_req->resp_buf) {
@ -639,8 +639,8 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
bnx2fc_free_mp_resc(io_req);
return FAILED;
}
memset(mp_req->req_buf, 0, PAGE_SIZE);
memset(mp_req->resp_buf, 0, PAGE_SIZE);
memset(mp_req->req_buf, 0, CNIC_PAGE_SIZE);
memset(mp_req->resp_buf, 0, CNIC_PAGE_SIZE);
/* Allocate and map mp_req_bd and mp_resp_bd */
sz = sizeof(struct fcoe_bd_ctx);
@ -665,7 +665,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
mp_req_bd = mp_req->mp_req_bd;
mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff;
mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32);
mp_req_bd->buf_len = PAGE_SIZE;
mp_req_bd->buf_len = CNIC_PAGE_SIZE;
mp_req_bd->flags = 0;
/*
@ -677,7 +677,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
addr = mp_req->resp_buf_dma;
mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff;
mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32);
mp_resp_bd->buf_len = PAGE_SIZE;
mp_resp_bd->buf_len = CNIC_PAGE_SIZE;
mp_resp_bd->flags = 0;
return SUCCESS;

View File

@ -673,7 +673,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
/* Allocate and map SQ */
tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE;
tgt->sq_mem_size = (tgt->sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
&tgt->sq_dma, GFP_KERNEL);
@ -686,7 +687,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
/* Allocate and map CQ */
tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE;
tgt->cq_mem_size = (tgt->cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
&tgt->cq_dma, GFP_KERNEL);
@ -699,7 +701,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
/* Allocate and map RQ and RQ PBL */
tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE;
tgt->rq_mem_size = (tgt->rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
&tgt->rq_dma, GFP_KERNEL);
@ -710,8 +713,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
}
memset(tgt->rq, 0, tgt->rq_mem_size);
tgt->rq_pbl_size = (tgt->rq_mem_size / PAGE_SIZE) * sizeof(void *);
tgt->rq_pbl_size = (tgt->rq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
tgt->rq_pbl_size = (tgt->rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
&tgt->rq_pbl_dma, GFP_KERNEL);
@ -722,7 +726,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
}
memset(tgt->rq_pbl, 0, tgt->rq_pbl_size);
num_pages = tgt->rq_mem_size / PAGE_SIZE;
num_pages = tgt->rq_mem_size / CNIC_PAGE_SIZE;
page = tgt->rq_dma;
pbl = (u32 *)tgt->rq_pbl;
@ -731,13 +735,13 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
pbl++;
*pbl = (u32)((u64)page >> 32);
pbl++;
page += PAGE_SIZE;
page += CNIC_PAGE_SIZE;
}
/* Allocate and map XFERQ */
tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE;
tgt->xferq_mem_size = (tgt->xferq_mem_size + (PAGE_SIZE - 1)) &
PAGE_MASK;
tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
&tgt->xferq_dma, GFP_KERNEL);
@ -750,8 +754,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
/* Allocate and map CONFQ & CONFQ PBL */
tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE;
tgt->confq_mem_size = (tgt->confq_mem_size + (PAGE_SIZE - 1)) &
PAGE_MASK;
tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
&tgt->confq_dma, GFP_KERNEL);
@ -763,9 +767,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
memset(tgt->confq, 0, tgt->confq_mem_size);
tgt->confq_pbl_size =
(tgt->confq_mem_size / PAGE_SIZE) * sizeof(void *);
(tgt->confq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
tgt->confq_pbl_size =
(tgt->confq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
(tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
tgt->confq_pbl_size,
@ -777,7 +781,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
}
memset(tgt->confq_pbl, 0, tgt->confq_pbl_size);
num_pages = tgt->confq_mem_size / PAGE_SIZE;
num_pages = tgt->confq_mem_size / CNIC_PAGE_SIZE;
page = tgt->confq_dma;
pbl = (u32 *)tgt->confq_pbl;
@ -786,7 +790,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
pbl++;
*pbl = (u32)((u64)page >> 32);
pbl++;
page += PAGE_SIZE;
page += CNIC_PAGE_SIZE;
}
/* Allocate and map ConnDB */
@ -805,8 +809,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
/* Allocate and map LCQ */
tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE;
tgt->lcq_mem_size = (tgt->lcq_mem_size + (PAGE_SIZE - 1)) &
PAGE_MASK;
tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) &
CNIC_PAGE_MASK;
tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
&tgt->lcq_dma, GFP_KERNEL);

View File

@ -61,7 +61,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba)
* yield integral num of page buffers
*/
/* adjust SQ */
num_elements_per_pg = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
if (hba->max_sqes < num_elements_per_pg)
hba->max_sqes = num_elements_per_pg;
else if (hba->max_sqes % num_elements_per_pg)
@ -69,7 +69,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba)
~(num_elements_per_pg - 1);
/* adjust CQ */
num_elements_per_pg = PAGE_SIZE / BNX2I_CQE_SIZE;
num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_CQE_SIZE;
if (hba->max_cqes < num_elements_per_pg)
hba->max_cqes = num_elements_per_pg;
else if (hba->max_cqes % num_elements_per_pg)
@ -77,7 +77,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba)
~(num_elements_per_pg - 1);
/* adjust RQ */
num_elements_per_pg = PAGE_SIZE / BNX2I_RQ_WQE_SIZE;
num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_RQ_WQE_SIZE;
if (hba->max_rqes < num_elements_per_pg)
hba->max_rqes = num_elements_per_pg;
else if (hba->max_rqes % num_elements_per_pg)
@ -959,7 +959,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
/* SQ page table */
memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size);
num_pages = ep->qp.sq_mem_size / PAGE_SIZE;
num_pages = ep->qp.sq_mem_size / CNIC_PAGE_SIZE;
page = ep->qp.sq_phys;
if (cnic_dev_10g)
@ -973,7 +973,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
ptbl++;
*ptbl = (u32) ((u64) page >> 32);
ptbl++;
page += PAGE_SIZE;
page += CNIC_PAGE_SIZE;
} else {
/* PTE is written in big endian format for
* 5706/5708/5709 devices */
@ -981,13 +981,13 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
ptbl++;
*ptbl = (u32) page;
ptbl++;
page += PAGE_SIZE;
page += CNIC_PAGE_SIZE;
}
}
/* RQ page table */
memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size);
num_pages = ep->qp.rq_mem_size / PAGE_SIZE;
num_pages = ep->qp.rq_mem_size / CNIC_PAGE_SIZE;
page = ep->qp.rq_phys;
if (cnic_dev_10g)
@ -1001,7 +1001,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
ptbl++;
*ptbl = (u32) ((u64) page >> 32);
ptbl++;
page += PAGE_SIZE;
page += CNIC_PAGE_SIZE;
} else {
/* PTE is written in big endian format for
* 5706/5708/5709 devices */
@ -1009,13 +1009,13 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
ptbl++;
*ptbl = (u32) page;
ptbl++;
page += PAGE_SIZE;
page += CNIC_PAGE_SIZE;
}
}
/* CQ page table */
memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size);
num_pages = ep->qp.cq_mem_size / PAGE_SIZE;
num_pages = ep->qp.cq_mem_size / CNIC_PAGE_SIZE;
page = ep->qp.cq_phys;
if (cnic_dev_10g)
@ -1029,7 +1029,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
ptbl++;
*ptbl = (u32) ((u64) page >> 32);
ptbl++;
page += PAGE_SIZE;
page += CNIC_PAGE_SIZE;
} else {
/* PTE is written in big endian format for
* 5706/5708/5709 devices */
@ -1037,7 +1037,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
ptbl++;
*ptbl = (u32) page;
ptbl++;
page += PAGE_SIZE;
page += CNIC_PAGE_SIZE;
}
}
}
@ -1064,11 +1064,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
/* Allocate page table memory for SQ which is page aligned */
ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE;
ep->qp.sq_mem_size =
(ep->qp.sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
(ep->qp.sq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
ep->qp.sq_pgtbl_size =
(ep->qp.sq_mem_size / PAGE_SIZE) * sizeof(void *);
(ep->qp.sq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
ep->qp.sq_pgtbl_size =
(ep->qp.sq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
(ep->qp.sq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
ep->qp.sq_pgtbl_virt =
dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
@ -1101,11 +1101,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
/* Allocate page table memory for CQ which is page aligned */
ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE;
ep->qp.cq_mem_size =
(ep->qp.cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
(ep->qp.cq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
ep->qp.cq_pgtbl_size =
(ep->qp.cq_mem_size / PAGE_SIZE) * sizeof(void *);
(ep->qp.cq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
ep->qp.cq_pgtbl_size =
(ep->qp.cq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
(ep->qp.cq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
ep->qp.cq_pgtbl_virt =
dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
@ -1144,11 +1144,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
/* Allocate page table memory for RQ which is page aligned */
ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE;
ep->qp.rq_mem_size =
(ep->qp.rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
(ep->qp.rq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
ep->qp.rq_pgtbl_size =
(ep->qp.rq_mem_size / PAGE_SIZE) * sizeof(void *);
(ep->qp.rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
ep->qp.rq_pgtbl_size =
(ep->qp.rq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
(ep->qp.rq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
ep->qp.rq_pgtbl_virt =
dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
@ -1270,7 +1270,7 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
bnx2i_adjust_qp_size(hba);
iscsi_init.flags =
ISCSI_PAGE_SIZE_4K << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT;
(CNIC_PAGE_BITS - 8) << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT;
if (en_tcp_dack)
iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE;
iscsi_init.reserved0 = 0;
@ -1288,15 +1288,15 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16));
iscsi_init.num_ccells_per_conn = hba->num_ccell;
iscsi_init.num_tasks_per_conn = hba->max_sqes;
iscsi_init.sq_wqes_per_page = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
iscsi_init.sq_wqes_per_page = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
iscsi_init.sq_num_wqes = hba->max_sqes;
iscsi_init.cq_log_wqes_per_page =
(u8) bnx2i_power_of2(PAGE_SIZE / BNX2I_CQE_SIZE);
(u8) bnx2i_power_of2(CNIC_PAGE_SIZE / BNX2I_CQE_SIZE);
iscsi_init.cq_num_wqes = hba->max_cqes;
iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE +
(PAGE_SIZE - 1)) / PAGE_SIZE;
(CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE;
iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE +
(PAGE_SIZE - 1)) / PAGE_SIZE;
(CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE;
iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE;
iscsi_init.rq_num_wqes = hba->max_rqes;

View File

@ -525,7 +525,7 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
struct iscsi_bd *mp_bdt;
u64 addr;
hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
&hba->mp_bd_dma, GFP_KERNEL);
if (!hba->mp_bd_tbl) {
printk(KERN_ERR "unable to allocate Middle Path BDT\n");
@ -533,11 +533,12 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
goto out;
}
hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
CNIC_PAGE_SIZE,
&hba->dummy_buf_dma, GFP_KERNEL);
if (!hba->dummy_buffer) {
printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n");
dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
hba->mp_bd_tbl, hba->mp_bd_dma);
hba->mp_bd_tbl = NULL;
rc = -1;
@ -548,7 +549,7 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
addr = (unsigned long) hba->dummy_buf_dma;
mp_bdt->buffer_addr_lo = addr & 0xffffffff;
mp_bdt->buffer_addr_hi = addr >> 32;
mp_bdt->buffer_length = PAGE_SIZE;
mp_bdt->buffer_length = CNIC_PAGE_SIZE;
mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
ISCSI_BD_FIRST_IN_BD_CHAIN;
out:
@ -565,12 +566,12 @@ out:
static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba)
{
if (hba->mp_bd_tbl) {
dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
hba->mp_bd_tbl, hba->mp_bd_dma);
hba->mp_bd_tbl = NULL;
}
if (hba->dummy_buffer) {
dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
hba->dummy_buffer, hba->dummy_buf_dma);
hba->dummy_buffer = NULL;
}
@ -934,14 +935,14 @@ static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba,
struct bnx2i_conn *bnx2i_conn)
{
if (bnx2i_conn->gen_pdu.resp_bd_tbl) {
dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
bnx2i_conn->gen_pdu.resp_bd_tbl,
bnx2i_conn->gen_pdu.resp_bd_dma);
bnx2i_conn->gen_pdu.resp_bd_tbl = NULL;
}
if (bnx2i_conn->gen_pdu.req_bd_tbl) {
dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
bnx2i_conn->gen_pdu.req_bd_tbl,
bnx2i_conn->gen_pdu.req_bd_dma);
bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
@ -998,13 +999,13 @@ static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba,
bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf;
bnx2i_conn->gen_pdu.req_bd_tbl =
dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
&bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL);
if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL)
goto login_req_bd_tbl_failure;
bnx2i_conn->gen_pdu.resp_bd_tbl =
dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
&bnx2i_conn->gen_pdu.resp_bd_dma,
GFP_KERNEL);
if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL)
@ -1013,7 +1014,7 @@ static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba,
return 0;
login_resp_bd_tbl_failure:
dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
bnx2i_conn->gen_pdu.req_bd_tbl,
bnx2i_conn->gen_pdu.req_bd_dma);
bnx2i_conn->gen_pdu.req_bd_tbl = NULL;

View File

@ -311,9 +311,8 @@ static inline struct Scsi_Host *to_shost(struct isci_host *ihost)
}
#define for_each_isci_host(id, ihost, pdev) \
for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \
id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \
ihost = to_pci_info(pdev)->hosts[++id])
for (id = 0; id < SCI_MAX_CONTROLLERS && \
(ihost = to_pci_info(pdev)->hosts[id]); id++)
static inline void wait_for_start(struct isci_host *ihost)
{

View File

@ -615,13 +615,6 @@ static void sci_apc_agent_link_up(struct isci_host *ihost,
SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
} else {
/* the phy is already the part of the port */
u32 port_state = iport->sm.current_state_id;
/* if the PORT'S state is resetting then the link up is from
* port hard reset in this case, we need to tell the port
* that link up is recieved
*/
BUG_ON(port_state != SCI_PORT_RESETTING);
port_agent->phy_ready_mask |= 1 << phy_index;
sci_port_link_up(iport, iphy);
}

View File

@ -801,7 +801,7 @@ int isci_task_I_T_nexus_reset(struct domain_device *dev)
/* XXX: need to cleanup any ireqs targeting this
* domain_device
*/
ret = TMF_RESP_FUNC_COMPLETE;
ret = -ENODEV;
goto out;
}

View File

@ -2996,8 +2996,7 @@ struct qla_hw_data {
IS_QLA82XX(ha) || IS_QLA83XX(ha) || \
IS_QLA8044(ha))
#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
IS_QLA83XX(ha)) && (ha)->flags.msix_enabled)
#define IS_NOPOLLING_TYPE(ha) (IS_QLA81XX(ha) && (ha)->flags.msix_enabled)
#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
#define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
#define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))

View File

@ -2880,6 +2880,7 @@ static int
qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
{
#define MIN_MSIX_COUNT 2
#define ATIO_VECTOR 2
int i, ret;
struct msix_entry *entries;
struct qla_msix_entry *qentry;
@ -2936,34 +2937,47 @@ msix_failed:
}
/* Enable MSI-X vectors for the base queue */
for (i = 0; i < ha->msix_count; i++) {
for (i = 0; i < 2; i++) {
qentry = &ha->msix_entries[i];
if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
ret = request_irq(qentry->vector,
qla83xx_msix_entries[i].handler,
0, qla83xx_msix_entries[i].name, rsp);
} else if (IS_P3P_TYPE(ha)) {
if (IS_P3P_TYPE(ha))
ret = request_irq(qentry->vector,
qla82xx_msix_entries[i].handler,
0, qla82xx_msix_entries[i].name, rsp);
} else {
else
ret = request_irq(qentry->vector,
msix_entries[i].handler,
0, msix_entries[i].name, rsp);
}
if (ret) {
ql_log(ql_log_fatal, vha, 0x00cb,
"MSI-X: unable to register handler -- %x/%d.\n",
qentry->vector, ret);
qla24xx_disable_msix(ha);
ha->mqenable = 0;
goto msix_out;
}
if (ret)
goto msix_register_fail;
qentry->have_irq = 1;
qentry->rsp = rsp;
rsp->msix = qentry;
}
/*
* If target mode is enable, also request the vector for the ATIO
* queue.
*/
if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
qentry = &ha->msix_entries[ATIO_VECTOR];
ret = request_irq(qentry->vector,
qla83xx_msix_entries[ATIO_VECTOR].handler,
0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp);
qentry->have_irq = 1;
qentry->rsp = rsp;
rsp->msix = qentry;
}
msix_register_fail:
if (ret) {
ql_log(ql_log_fatal, vha, 0x00cb,
"MSI-X: unable to register handler -- %x/%d.\n",
qentry->vector, ret);
qla24xx_disable_msix(ha);
ha->mqenable = 0;
goto msix_out;
}
/* Enable MSI-X vector for response queue update for queue 0 */
if (IS_QLA83XX(ha)) {
if (ha->msixbase && ha->mqiobase &&

View File

@ -1419,6 +1419,9 @@ static void storvsc_device_destroy(struct scsi_device *sdevice)
{
struct stor_mem_pools *memp = sdevice->hostdata;
if (!memp)
return;
mempool_destroy(memp->request_mempool);
kmem_cache_destroy(memp->request_pool);
kfree(memp);

View File

@ -433,13 +433,10 @@ static void sunhv_console_write_paged(struct console *con, const char *s, unsign
unsigned long flags;
int locked = 1;
local_irq_save(flags);
if (port->sysrq) {
locked = 0;
} else if (oops_in_progress) {
locked = spin_trylock(&port->lock);
} else
spin_lock(&port->lock);
if (port->sysrq || oops_in_progress)
locked = spin_trylock_irqsave(&port->lock, flags);
else
spin_lock_irqsave(&port->lock, flags);
while (n > 0) {
unsigned long ra = __pa(con_write_page);
@ -470,8 +467,7 @@ static void sunhv_console_write_paged(struct console *con, const char *s, unsign
}
if (locked)
spin_unlock(&port->lock);
local_irq_restore(flags);
spin_unlock_irqrestore(&port->lock, flags);
}
static inline void sunhv_console_putchar(struct uart_port *port, char c)
@ -492,7 +488,10 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig
unsigned long flags;
int i, locked = 1;
local_irq_save(flags);
if (port->sysrq || oops_in_progress)
locked = spin_trylock_irqsave(&port->lock, flags);
else
spin_lock_irqsave(&port->lock, flags);
if (port->sysrq) {
locked = 0;
} else if (oops_in_progress) {
@ -507,8 +506,7 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig
}
if (locked)
spin_unlock(&port->lock);
local_irq_restore(flags);
spin_unlock_irqrestore(&port->lock, flags);
}
static struct console sunhv_console = {

View File

@ -844,20 +844,16 @@ static void sunsab_console_write(struct console *con, const char *s, unsigned n)
unsigned long flags;
int locked = 1;
local_irq_save(flags);
if (up->port.sysrq) {
locked = 0;
} else if (oops_in_progress) {
locked = spin_trylock(&up->port.lock);
} else
spin_lock(&up->port.lock);
if (up->port.sysrq || oops_in_progress)
locked = spin_trylock_irqsave(&up->port.lock, flags);
else
spin_lock_irqsave(&up->port.lock, flags);
uart_console_write(&up->port, s, n, sunsab_console_putchar);
sunsab_tec_wait(up);
if (locked)
spin_unlock(&up->port.lock);
local_irq_restore(flags);
spin_unlock_irqrestore(&up->port.lock, flags);
}
static int sunsab_console_setup(struct console *con, char *options)

View File

@ -1295,13 +1295,10 @@ static void sunsu_console_write(struct console *co, const char *s,
unsigned int ier;
int locked = 1;
local_irq_save(flags);
if (up->port.sysrq) {
locked = 0;
} else if (oops_in_progress) {
locked = spin_trylock(&up->port.lock);
} else
spin_lock(&up->port.lock);
if (up->port.sysrq || oops_in_progress)
locked = spin_trylock_irqsave(&up->port.lock, flags);
else
spin_lock_irqsave(&up->port.lock, flags);
/*
* First save the UER then disable the interrupts
@ -1319,8 +1316,7 @@ static void sunsu_console_write(struct console *co, const char *s,
serial_out(up, UART_IER, ier);
if (locked)
spin_unlock(&up->port.lock);
local_irq_restore(flags);
spin_unlock_irqrestore(&up->port.lock, flags);
}
/*

View File

@ -1195,20 +1195,16 @@ sunzilog_console_write(struct console *con, const char *s, unsigned int count)
unsigned long flags;
int locked = 1;
local_irq_save(flags);
if (up->port.sysrq) {
locked = 0;
} else if (oops_in_progress) {
locked = spin_trylock(&up->port.lock);
} else
spin_lock(&up->port.lock);
if (up->port.sysrq || oops_in_progress)
locked = spin_trylock_irqsave(&up->port.lock, flags);
else
spin_lock_irqsave(&up->port.lock, flags);
uart_console_write(&up->port, s, count, sunzilog_putchar);
udelay(2);
if (locked)
spin_unlock(&up->port.lock);
local_irq_restore(flags);
spin_unlock_irqrestore(&up->port.lock, flags);
}
static int __init sunzilog_console_setup(struct console *con, char *options)

View File

@ -2833,9 +2833,9 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name)
u32 dlen = ACCESS_ONCE(name->len);
char *p;
if (*buflen < dlen + 1)
return -ENAMETOOLONG;
*buflen -= dlen + 1;
if (*buflen < 0)
return -ENAMETOOLONG;
p = *buffer -= dlen + 1;
*p++ = '/';
while (dlen--) {

View File

@ -713,27 +713,16 @@ unsigned long __fdget_raw(unsigned int fd)
unsigned long __fdget_pos(unsigned int fd)
{
struct files_struct *files = current->files;
struct file *file;
unsigned long v;
unsigned long v = __fdget(fd);
struct file *file = (struct file *)(v & ~3);
if (atomic_read(&files->count) == 1) {
file = __fcheck_files(files, fd);
v = 0;
} else {
file = __fget(fd, 0);
v = FDPUT_FPUT;
}
if (!file)
return 0;
if (file->f_mode & FMODE_ATOMIC_POS) {
if (file && (file->f_mode & FMODE_ATOMIC_POS)) {
if (file_count(file) > 1) {
v |= FDPUT_POS_UNLOCK;
mutex_lock(&file->f_pos_lock);
}
}
return v | (unsigned long)file;
return v;
}
/*

View File

@ -1109,7 +1109,7 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
return false;
if (!d_mountpoint(path->dentry))
break;
return true;
mounted = __lookup_mnt(path->mnt, path->dentry);
if (!mounted)
@ -1125,20 +1125,7 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
*/
*inode = path->dentry->d_inode;
}
return true;
}
static void follow_mount_rcu(struct nameidata *nd)
{
while (d_mountpoint(nd->path.dentry)) {
struct mount *mounted;
mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry);
if (!mounted)
break;
nd->path.mnt = &mounted->mnt;
nd->path.dentry = mounted->mnt.mnt_root;
nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
}
return read_seqretry(&mount_lock, nd->m_seq);
}
static int follow_dotdot_rcu(struct nameidata *nd)
@ -1166,7 +1153,17 @@ static int follow_dotdot_rcu(struct nameidata *nd)
break;
nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
}
follow_mount_rcu(nd);
while (d_mountpoint(nd->path.dentry)) {
struct mount *mounted;
mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry);
if (!mounted)
break;
nd->path.mnt = &mounted->mnt;
nd->path.dentry = mounted->mnt.mnt_root;
nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
if (!read_seqretry(&mount_lock, nd->m_seq))
goto failed;
}
nd->inode = nd->path.dentry->d_inode;
return 0;

View File

@ -449,6 +449,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
fh_lock(fhp);
host_err = notify_change(dentry, iap, NULL);
fh_unlock(fhp);
err = nfserrno(host_err);
out_put_write_access:
if (size_change)

View File

@ -307,7 +307,7 @@ SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high,
unsigned int, whence)
{
int retval;
struct fd f = fdget(fd);
struct fd f = fdget_pos(fd);
loff_t offset;
if (!f.file)
@ -327,7 +327,7 @@ SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high,
retval = 0;
}
out_putf:
fdput(f);
fdput_pos(f);
return retval;
}
#endif

View File

@ -495,10 +495,6 @@ enum {
FILTER_TRACE_FN,
};
#define EVENT_STORAGE_SIZE 128
extern struct mutex event_storage_mutex;
extern char event_storage[EVENT_STORAGE_SIZE];
extern int trace_event_raw_init(struct ftrace_event_call *call);
extern int trace_define_field(struct ftrace_event_call *call, const char *type,
const char *name, int offset, int size,

View File

@ -250,8 +250,7 @@ struct rmap_walk_control {
int (*rmap_one)(struct page *page, struct vm_area_struct *vma,
unsigned long addr, void *arg);
int (*done)(struct page *page);
int (*file_nonlinear)(struct page *, struct address_space *,
struct vm_area_struct *vma);
int (*file_nonlinear)(struct page *, struct address_space *, void *arg);
struct anon_vma *(*anon_lock)(struct page *page);
bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
};

View File

@ -1040,6 +1040,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* Allocate a security structure to the xp->security field; the security
* field is initialized to NULL when the xfrm_policy is allocated.
* Return 0 if operation was successful (memory to allocate, legal context)
* @gfp is to specify the context for the allocation
* @xfrm_policy_clone_security:
* @old_ctx contains an existing xfrm_sec_ctx.
* @new_ctxp contains a new xfrm_sec_ctx being cloned from old.
@ -1683,7 +1684,7 @@ struct security_operations {
#ifdef CONFIG_SECURITY_NETWORK_XFRM
int (*xfrm_policy_alloc_security) (struct xfrm_sec_ctx **ctxp,
struct xfrm_user_sec_ctx *sec_ctx);
struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp);
int (*xfrm_policy_clone_security) (struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctx);
void (*xfrm_policy_free_security) (struct xfrm_sec_ctx *ctx);
int (*xfrm_policy_delete_security) (struct xfrm_sec_ctx *ctx);
@ -2859,7 +2860,8 @@ static inline void security_skb_owned_by(struct sk_buff *skb, struct sock *sk)
#ifdef CONFIG_SECURITY_NETWORK_XFRM
int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx);
int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp);
int security_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctxp);
void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx);
int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx);
@ -2877,7 +2879,9 @@ void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl);
#else /* CONFIG_SECURITY_NETWORK_XFRM */
static inline int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx)
static inline int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
struct xfrm_user_sec_ctx *sec_ctx,
gfp_t gfp)
{
return 0;
}

View File

@ -88,6 +88,7 @@
#define cdc_ncm_data_intf_is_mbim(x) ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB)
struct cdc_ncm_ctx {
struct usb_cdc_ncm_ntb_parameters ncm_parm;
struct hrtimer tx_timer;
struct tasklet_struct bh;

View File

@ -480,20 +480,21 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
struct ip_options *opt);
#ifdef CONFIG_SYN_COOKIES
/* Syncookies use a monotonic timer which increments every 64 seconds.
/* Syncookies use a monotonic timer which increments every 60 seconds.
* This counter is used both as a hash input and partially encoded into
* the cookie value. A cookie is only validated further if the delta
* between the current counter value and the encoded one is less than this,
* i.e. a sent cookie is valid only at most for 128 seconds (or less if
* i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
* the counter advances immediately after a cookie is generated).
*/
#define MAX_SYNCOOKIE_AGE 2
static inline u32 tcp_cookie_time(void)
{
struct timespec now;
getnstimeofday(&now);
return now.tv_sec >> 6; /* 64 seconds granularity */
u64 val = get_jiffies_64();
do_div(val, 60 * HZ);
return val;
}
u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,

View File

@ -310,15 +310,12 @@ static struct trace_event_functions ftrace_event_type_funcs_##call = { \
#undef __array
#define __array(type, item, len) \
do { \
mutex_lock(&event_storage_mutex); \
char *type_str = #type"["__stringify(len)"]"; \
BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
snprintf(event_storage, sizeof(event_storage), \
"%s[%d]", #type, len); \
ret = trace_define_field(event_call, event_storage, #item, \
ret = trace_define_field(event_call, type_str, #item, \
offsetof(typeof(field), item), \
sizeof(field.item), \
is_signed_type(type), FILTER_OTHER); \
mutex_unlock(&event_storage_mutex); \
if (ret) \
return ret; \
} while (0);

View File

@ -901,6 +901,8 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl
return -EINVAL;
if (msgflg & MSG_COPY) {
if ((msgflg & MSG_EXCEPT) || !(msgflg & IPC_NOWAIT))
return -EINVAL;
copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax));
if (IS_ERR(copy))
return PTR_ERR(copy);

View File

@ -4112,17 +4112,17 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss)
err = percpu_ref_init(&css->refcnt, css_release);
if (err)
goto err_free;
goto err_free_css;
init_css(css, ss, cgrp);
err = cgroup_populate_dir(cgrp, 1 << ss->subsys_id);
if (err)
goto err_free;
goto err_free_percpu_ref;
err = online_css(css);
if (err)
goto err_free;
goto err_clear_dir;
dget(cgrp->dentry);
css_get(css->parent);
@ -4138,8 +4138,11 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss)
return 0;
err_free:
err_clear_dir:
cgroup_clear_dir(css->cgroup, 1 << css->ss->subsys_id);
err_free_percpu_ref:
percpu_ref_cancel_init(&css->refcnt);
err_free_css:
ss->css_free(css);
return err;
}

View File

@ -234,6 +234,7 @@ static const struct futex_q futex_q_init = {
* waiting on a futex.
*/
struct futex_hash_bucket {
atomic_t waiters;
spinlock_t lock;
struct plist_head chain;
} ____cacheline_aligned_in_smp;
@ -253,22 +254,37 @@ static inline void futex_get_mm(union futex_key *key)
smp_mb__after_atomic_inc();
}
static inline bool hb_waiters_pending(struct futex_hash_bucket *hb)
/*
* Reflects a new waiter being added to the waitqueue.
*/
static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
{
#ifdef CONFIG_SMP
atomic_inc(&hb->waiters);
/*
* Tasks trying to enter the critical region are most likely
* potential waiters that will be added to the plist. Ensure
* that wakers won't miss to-be-slept tasks in the window between
* the wait call and the actual plist_add.
* Full barrier (A), see the ordering comment above.
*/
if (spin_is_locked(&hb->lock))
return true;
smp_rmb(); /* Make sure we check the lock state first */
smp_mb__after_atomic_inc();
#endif
}
return !plist_head_empty(&hb->chain);
/*
* Reflects a waiter being removed from the waitqueue by wakeup
* paths.
*/
static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
{
#ifdef CONFIG_SMP
atomic_dec(&hb->waiters);
#endif
}
static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
{
#ifdef CONFIG_SMP
return atomic_read(&hb->waiters);
#else
return true;
return 1;
#endif
}
@ -954,6 +970,7 @@ static void __unqueue_futex(struct futex_q *q)
hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
plist_del(&q->list, &hb->chain);
hb_waiters_dec(hb);
}
/*
@ -1257,7 +1274,9 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
*/
if (likely(&hb1->chain != &hb2->chain)) {
plist_del(&q->list, &hb1->chain);
hb_waiters_dec(hb1);
plist_add(&q->list, &hb2->chain);
hb_waiters_inc(hb2);
q->lock_ptr = &hb2->lock;
}
get_futex_key_refs(key2);
@ -1600,6 +1619,17 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
struct futex_hash_bucket *hb;
hb = hash_futex(&q->key);
/*
* Increment the counter before taking the lock so that
* a potential waker won't miss a to-be-slept task that is
* waiting for the spinlock. This is safe as all queue_lock()
* users end up calling queue_me(). Similarly, for housekeeping,
* decrement the counter at queue_unlock() when some error has
* occurred and we don't end up adding the task to the list.
*/
hb_waiters_inc(hb);
q->lock_ptr = &hb->lock;
spin_lock(&hb->lock); /* implies MB (A) */
@ -1611,6 +1641,7 @@ queue_unlock(struct futex_hash_bucket *hb)
__releases(&hb->lock)
{
spin_unlock(&hb->lock);
hb_waiters_dec(hb);
}
/**
@ -2342,6 +2373,7 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
* Unqueue the futex_q and determine which it was.
*/
plist_del(&q->list, &hb->chain);
hb_waiters_dec(hb);
/* Handle spurious wakeups gracefully */
ret = -EWOULDBLOCK;
@ -2875,6 +2907,7 @@ static int __init futex_init(void)
futex_cmpxchg_enabled = 1;
for (i = 0; i < futex_hashsize; i++) {
atomic_set(&futex_queues[i].waiters, 0);
plist_head_init(&futex_queues[i].chain);
spin_lock_init(&futex_queues[i].lock);
}

View File

@ -301,14 +301,14 @@ u64 sched_clock_cpu(int cpu)
if (unlikely(!sched_clock_running))
return 0ull;
preempt_disable();
preempt_disable_notrace();
scd = cpu_sdc(cpu);
if (cpu != smp_processor_id())
clock = sched_clock_remote(scd);
else
clock = sched_clock_local(scd);
preempt_enable();
preempt_enable_notrace();
return clock;
}

View File

@ -3338,6 +3338,15 @@ recheck:
return -EPERM;
}
/*
* Can't set/change SCHED_DEADLINE policy at all for now
* (safest behavior); in the future we would like to allow
* unprivileged DL tasks to increase their relative deadline
* or reduce their runtime (both ways reducing utilization)
*/
if (dl_policy(policy))
return -EPERM;
/*
* Treat SCHED_IDLE as nice 20. Only allow a switch to
* SCHED_NORMAL if the RLIMIT_NICE would normally permit it.

View File

@ -293,7 +293,7 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *
*/
smp_call_function_single(min(cpu1, cpu2),
&irq_cpu_stop_queue_work,
&call_args, 0);
&call_args, 1);
lg_local_unlock(&stop_cpus_lock);
preempt_enable();

View File

@ -27,12 +27,6 @@
DEFINE_MUTEX(event_mutex);
DEFINE_MUTEX(event_storage_mutex);
EXPORT_SYMBOL_GPL(event_storage_mutex);
char event_storage[EVENT_STORAGE_SIZE];
EXPORT_SYMBOL_GPL(event_storage);
LIST_HEAD(ftrace_events);
static LIST_HEAD(ftrace_common_fields);

Some files were not shown because too many files have changed in this diff Show More