Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6

This commit is contained in:
Felix Blyakher 2009-02-18 15:35:05 -06:00
commit 01234f3c87
158 changed files with 9968 additions and 9622 deletions

View File

@ -93,7 +93,7 @@ the PCI Express Port Bus driver from loading a service driver.
int pcie_port_service_register(struct pcie_port_service_driver *new)
This API replaces the Linux Driver Model's pci_module_init API. A
This API replaces the Linux Driver Model's pci_register_driver API. A
service driver should always calls pcie_port_service_register at
module init. Note that after service driver being loaded, calls
such as pci_enable_device(dev) and pci_set_master(dev) are no longer

View File

@ -137,7 +137,7 @@ static void cn_test_timer_func(unsigned long __data)
memcpy(m + 1, data, m->len);
cn_netlink_send(m, 0, gfp_any());
cn_netlink_send(m, 0, GFP_ATOMIC);
kfree(m);
}
@ -160,10 +160,8 @@ static int cn_test_init(void)
goto err_out;
}
init_timer(&cn_test_timer);
cn_test_timer.function = cn_test_timer_func;
setup_timer(&cn_test_timer, cn_test_timer_func, 0);
cn_test_timer.expires = jiffies + HZ;
cn_test_timer.data = 0;
add_timer(&cn_test_timer);
return 0;

View File

@ -78,12 +78,10 @@ to view your kernel log and look for "mmiotrace has lost events" warning. If
events were lost, the trace is incomplete. You should enlarge the buffers and
try again. Buffers are enlarged by first seeing how large the current buffers
are:
$ cat /debug/tracing/trace_entries
$ cat /debug/tracing/buffer_size_kb
gives you a number. Approximately double this number and write it back, for
instance:
$ echo 0 > /debug/tracing/tracing_enabled
$ echo 128000 > /debug/tracing/trace_entries
$ echo 1 > /debug/tracing/tracing_enabled
$ echo 128000 > /debug/tracing/buffer_size_kb
Then start again from the top.
If you are doing a trace for a driver project, e.g. Nouveau, you should also

View File

@ -1905,10 +1905,10 @@ W: http://gigaset307x.sourceforge.net/
S: Maintained
HARD DRIVE ACTIVE PROTECTION SYSTEM (HDAPS) DRIVER
P: Robert Love
M: rlove@rlove.org
M: linux-kernel@vger.kernel.org
W: http://www.kernel.org/pub/linux/kernel/people/rml/hdaps/
P: Frank Seidel
M: frank@f-seidel.de
L: lm-sensors@lm-sensors.org
W: http://www.kernel.org/pub/linux/kernel/people/fseidel/hdaps/
S: Maintained
GSPCA FINEPIX SUBDRIVER
@ -4925,11 +4925,11 @@ L: zd1211-devs@lists.sourceforge.net (subscribers-only)
S: Maintained
ZR36067 VIDEO FOR LINUX DRIVER
P: Ronald Bultje
M: rbultje@ronald.bitfreak.net
L: mjpeg-users@lists.sourceforge.net
L: linux-media@vger.kernel.org
W: http://mjpeg.sourceforge.net/driver-zoran/
S: Maintained
T: Mercurial http://linuxtv.org/hg/v4l-dvb
S: Odd Fixes
ZS DECSTATION Z85C30 SERIAL DRIVER
P: Maciej W. Rozycki

View File

@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 29
EXTRAVERSION = -rc4
EXTRAVERSION = -rc5
NAME = Erotic Pickled Herring
# *DOCUMENTATION*
@ -389,6 +389,7 @@ PHONY += outputmakefile
# output directory.
outputmakefile:
ifneq ($(KBUILD_SRC),)
$(Q)ln -fsn $(srctree) source
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkmakefile \
$(srctree) $(objtree) $(VERSION) $(PATCHLEVEL)
endif
@ -946,7 +947,6 @@ ifneq ($(KBUILD_SRC),)
mkdir -p include2; \
ln -fsn $(srctree)/include/asm-$(SRCARCH) include2/asm; \
fi
ln -fsn $(srctree) source
endif
# prepare2 creates a makefile if using a separate output directory

View File

@ -93,8 +93,8 @@ common_shutdown_1(void *generic_ptr)
if (cpuid != boot_cpuid) {
flags |= 0x00040000UL; /* "remain halted" */
*pflags = flags;
cpu_clear(cpuid, cpu_present_map);
cpu_clear(cpuid, cpu_possible_map);
set_cpu_present(cpuid, false);
set_cpu_possible(cpuid, false);
halt();
}
#endif
@ -120,8 +120,8 @@ common_shutdown_1(void *generic_ptr)
#ifdef CONFIG_SMP
/* Wait for the secondaries to halt. */
cpu_clear(boot_cpuid, cpu_present_map);
cpu_clear(boot_cpuid, cpu_possible_map);
set_cpu_present(boot_cpuid, false);
set_cpu_possible(boot_cpuid, false);
while (cpus_weight(cpu_present_map))
barrier();
#endif

View File

@ -120,12 +120,12 @@ void __cpuinit
smp_callin(void)
{
int cpuid = hard_smp_processor_id();
cpumask_t mask = cpu_online_map;
if (cpu_test_and_set(cpuid, mask)) {
if (cpu_online(cpuid)) {
printk("??, cpu 0x%x already present??\n", cpuid);
BUG();
}
set_cpu_online(cpuid, true);
/* Turn on machine checks. */
wrmces(7);
@ -436,8 +436,8 @@ setup_smp(void)
((char *)cpubase + i*hwrpb->processor_size);
if ((cpu->flags & 0x1cc) == 0x1cc) {
smp_num_probed++;
cpu_set(i, cpu_possible_map);
cpu_set(i, cpu_present_map);
set_cpu_possible(i, true);
set_cpu_present(i, true);
cpu->pal_revision = boot_cpu_palrev;
}
@ -470,8 +470,8 @@ smp_prepare_cpus(unsigned int max_cpus)
/* Nothing to do on a UP box, or when told not to. */
if (smp_num_probed == 1 || max_cpus == 0) {
cpu_possible_map = cpumask_of_cpu(boot_cpuid);
cpu_present_map = cpumask_of_cpu(boot_cpuid);
init_cpu_possible(cpumask_of(boot_cpuid));
init_cpu_present(cpumask_of(boot_cpuid));
printk(KERN_INFO "SMP mode deactivated.\n");
return;
}

View File

@ -25,6 +25,10 @@
#include <linux/ioctl.h>
/* Select x86 specific features in <linux/kvm.h> */
#define __KVM_HAVE_IOAPIC
#define __KVM_HAVE_DEVICE_ASSIGNMENT
/* Architectural interrupt line count. */
#define KVM_NR_INTERRUPTS 256

View File

@ -1337,6 +1337,10 @@ static void kvm_release_vm_pages(struct kvm *kvm)
}
}
void kvm_arch_sync_events(struct kvm *kvm)
{
}
void kvm_arch_destroy_vm(struct kvm *kvm)
{
kvm_iommu_unmap_guest(kvm);

View File

@ -455,13 +455,18 @@ fpswa_ret_t vmm_fp_emulate(int fp_fault, void *bundle, unsigned long *ipsr,
if (!vmm_fpswa_interface)
return (fpswa_ret_t) {-1, 0, 0, 0};
/*
* Just let fpswa driver to use hardware fp registers.
* No fp register is valid in memory.
*/
memset(&fp_state, 0, sizeof(fp_state_t));
/*
* compute fp_state. only FP registers f6 - f11 are used by the
* vmm, so set those bits in the mask and set the low volatile
* pointer to point to these registers.
*/
fp_state.bitmask_low64 = 0xfc0; /* bit6..bit11 */
fp_state.fp_state_low_volatile = (fp_state_low_volatile_t *) &regs->f6;
/*
* unsigned long (*EFI_FPSWA) (
* unsigned long trap_type,
* void *Bundle,
@ -545,10 +550,6 @@ void reflect_interruption(u64 ifa, u64 isr, u64 iim,
status = vmm_handle_fpu_swa(0, regs, isr);
if (!status)
return ;
else if (-EAGAIN == status) {
vcpu_decrement_iip(vcpu);
return ;
}
break;
}

View File

@ -60,7 +60,7 @@
/* It should be preserving the high 48 bits and then specifically */
/* preserving _PAGE_SECONDARY | _PAGE_GROUP_IX */
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \
_PAGE_HPTEFLAGS)
_PAGE_HPTEFLAGS | _PAGE_SPECIAL)
/* Bits to mask out from a PMD to get to the PTE page */
#define PMD_MASKED_BITS 0

View File

@ -114,7 +114,7 @@ static inline struct subpage_prot_table *pgd_subpage_prot(pgd_t *pgd)
* pgprot changes
*/
#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \
_PAGE_ACCESSED)
_PAGE_ACCESSED | _PAGE_SPECIAL)
/* Bits to mask out from a PMD to get to the PTE page */
#define PMD_MASKED_BITS 0x1ff

View File

@ -429,7 +429,8 @@ extern int icache_44x_need_flush;
#define PMD_PAGE_SIZE(pmd) bad_call_to_PMD_PAGE_SIZE()
#endif
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | \
_PAGE_SPECIAL)
#define PAGE_PROT_BITS (_PAGE_GUARDED | _PAGE_COHERENT | _PAGE_NO_CACHE | \

View File

@ -646,11 +646,16 @@ static int emulate_vsx(unsigned char __user *addr, unsigned int reg,
unsigned int areg, struct pt_regs *regs,
unsigned int flags, unsigned int length)
{
char *ptr = (char *) &current->thread.TS_FPR(reg);
char *ptr;
int ret = 0;
flush_vsx_to_thread(current);
if (reg < 32)
ptr = (char *) &current->thread.TS_FPR(reg);
else
ptr = (char *) &current->thread.vr[reg - 32];
if (flags & ST)
ret = __copy_to_user(addr, ptr, length);
else {

View File

@ -125,6 +125,10 @@ static void kvmppc_free_vcpus(struct kvm *kvm)
}
}
void kvm_arch_sync_events(struct kvm *kvm)
{
}
void kvm_arch_destroy_vm(struct kvm *kvm)
{
kvmppc_free_vcpus(kvm);

View File

@ -19,6 +19,7 @@
#include <linux/notifier.h>
#include <linux/lmb.h>
#include <linux/of.h>
#include <linux/pfn.h>
#include <asm/sparsemem.h>
#include <asm/prom.h>
#include <asm/system.h>
@ -882,7 +883,7 @@ static void mark_reserved_regions_for_nid(int nid)
unsigned long physbase = lmb.reserved.region[i].base;
unsigned long size = lmb.reserved.region[i].size;
unsigned long start_pfn = physbase >> PAGE_SHIFT;
unsigned long end_pfn = ((physbase + size) >> PAGE_SHIFT);
unsigned long end_pfn = PFN_UP(physbase + size);
struct node_active_region node_ar;
unsigned long node_end_pfn = node->node_start_pfn +
node->node_spanned_pages;
@ -908,7 +909,7 @@ static void mark_reserved_regions_for_nid(int nid)
*/
if (end_pfn > node_ar.end_pfn)
reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
- (start_pfn << PAGE_SHIFT);
- physbase;
/*
* Only worry about *this* node, others may not
* yet have valid NODE_DATA().

View File

@ -328,7 +328,7 @@ static int __init ps3_mm_add_memory(void)
return result;
}
core_initcall(ps3_mm_add_memory);
device_initcall(ps3_mm_add_memory);
/*============================================================================*/
/* dma routines */

View File

@ -212,6 +212,10 @@ static void kvm_free_vcpus(struct kvm *kvm)
}
}
void kvm_arch_sync_events(struct kvm *kvm)
{
}
void kvm_arch_destroy_vm(struct kvm *kvm)
{
kvm_free_vcpus(kvm);

View File

@ -174,28 +174,8 @@ config IOMMU_LEAK
Add a simple leak tracer to the IOMMU code. This is useful when you
are debugging a buggy device driver that leaks IOMMU mappings.
config MMIOTRACE
bool "Memory mapped IO tracing"
depends on DEBUG_KERNEL && PCI
select TRACING
help
Mmiotrace traces Memory Mapped I/O access and is meant for
debugging and reverse engineering. It is called from the ioremap
implementation and works via page faults. Tracing is disabled by
default and can be enabled at run-time.
See Documentation/tracers/mmiotrace.txt.
If you are not helping to develop drivers, say N.
config MMIOTRACE_TEST
tristate "Test module for mmiotrace"
depends on MMIOTRACE && m
help
This is a dumb module for testing mmiotrace. It is very dangerous
as it will write garbage to IO memory starting at a given address.
However, it should be safe to use on e.g. unused portion of VRAM.
Say N, unless you absolutely know what you are doing.
config HAVE_MMIOTRACE_SUPPORT
def_bool y
#
# IO delay types:

View File

@ -9,6 +9,13 @@
#include <linux/types.h>
#include <linux/ioctl.h>
/* Select x86 specific features in <linux/kvm.h> */
#define __KVM_HAVE_PIT
#define __KVM_HAVE_IOAPIC
#define __KVM_HAVE_DEVICE_ASSIGNMENT
#define __KVM_HAVE_MSI
#define __KVM_HAVE_USER_NMI
/* Architectural interrupt line count. */
#define KVM_NR_INTERRUPTS 256

View File

@ -57,7 +57,6 @@ typedef struct { pgdval_t pgd; } pgd_t;
typedef struct { pgprotval_t pgprot; } pgprot_t;
extern int page_is_ram(unsigned long pagenr);
extern int pagerange_is_ram(unsigned long start, unsigned long end);
extern int devmem_is_allowed(unsigned long pagenr);
extern void map_devmem(unsigned long pfn, unsigned long size,
pgprot_t vma_prot);

View File

@ -1352,14 +1352,7 @@ static inline void arch_leave_lazy_cpu_mode(void)
PVOP_VCALL0(pv_cpu_ops.lazy_mode.leave);
}
static inline void arch_flush_lazy_cpu_mode(void)
{
if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU)) {
arch_leave_lazy_cpu_mode();
arch_enter_lazy_cpu_mode();
}
}
void arch_flush_lazy_cpu_mode(void);
#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
static inline void arch_enter_lazy_mmu_mode(void)
@ -1372,13 +1365,7 @@ static inline void arch_leave_lazy_mmu_mode(void)
PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave);
}
static inline void arch_flush_lazy_mmu_mode(void)
{
if (unlikely(paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU)) {
arch_leave_lazy_mmu_mode();
arch_enter_lazy_mmu_mode();
}
}
void arch_flush_lazy_mmu_mode(void);
static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
unsigned long phys, pgprot_t flags)

View File

@ -1157,8 +1157,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
data->cpu = pol->cpu;
data->currpstate = HW_PSTATE_INVALID;
rc = powernow_k8_cpu_init_acpi(data);
if (rc) {
if (powernow_k8_cpu_init_acpi(data)) {
/*
* Use the PSB BIOS structure. This is only availabe on
* an UP version, and is deprecated by AMD.
@ -1176,17 +1175,20 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
"ACPI maintainers and complain to your BIOS "
"vendor.\n");
#endif
goto err_out;
kfree(data);
return -ENODEV;
}
if (pol->cpu != 0) {
printk(KERN_ERR FW_BUG PFX "No ACPI _PSS objects for "
"CPU other than CPU0. Complain to your BIOS "
"vendor.\n");
goto err_out;
kfree(data);
return -ENODEV;
}
rc = find_psb_table(data);
if (rc) {
goto err_out;
kfree(data);
return -ENODEV;
}
/* Take a crude guess here.
* That guess was in microseconds, so multiply with 1000 */

View File

@ -269,6 +269,8 @@ static void hpet_set_mode(enum clock_event_mode mode,
now = hpet_readl(HPET_COUNTER);
cmp = now + (unsigned long) delta;
cfg = hpet_readl(HPET_Tn_CFG(timer));
/* Make sure we use edge triggered interrupts */
cfg &= ~HPET_TN_LEVEL;
cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
HPET_TN_SETVAL | HPET_TN_32BIT;
hpet_writel(cfg, HPET_Tn_CFG(timer));

View File

@ -203,7 +203,7 @@ static void __init platform_detect(void)
static void __init platform_detect(void)
{
/* stopgap until OFW support is added to the kernel */
olpc_platform_info.boardrev = 0xc2;
olpc_platform_info.boardrev = olpc_board(0xc2);
}
#endif

View File

@ -268,6 +268,32 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
return __get_cpu_var(paravirt_lazy_mode);
}
void arch_flush_lazy_mmu_mode(void)
{
preempt_disable();
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
WARN_ON(preempt_count() == 1);
arch_leave_lazy_mmu_mode();
arch_enter_lazy_mmu_mode();
}
preempt_enable();
}
void arch_flush_lazy_cpu_mode(void)
{
preempt_disable();
if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
WARN_ON(preempt_count() == 1);
arch_leave_lazy_cpu_mode();
arch_enter_lazy_cpu_mode();
}
preempt_enable();
}
struct pv_info pv_info = {
.name = "bare hardware",
.paravirt_enabled = 0,

View File

@ -810,12 +810,16 @@ static void ptrace_bts_untrace(struct task_struct *child)
static void ptrace_bts_detach(struct task_struct *child)
{
if (unlikely(child->bts)) {
ds_release_bts(child->bts);
child->bts = NULL;
ptrace_bts_free_buffer(child);
}
/*
* Ptrace_detach() races with ptrace_untrace() in case
* the child dies and is reaped by another thread.
*
* We only do the memory accounting at this point and
* leave the buffer deallocation and the bts tracer
* release to ptrace_bts_untrace() which will be called
* later on with tasklist_lock held.
*/
release_locked_buffer(child->bts_buffer, child->bts_size);
}
#else
static inline void ptrace_bts_fork(struct task_struct *tsk) {}

View File

@ -99,6 +99,12 @@ static inline void preempt_conditional_sti(struct pt_regs *regs)
local_irq_enable();
}
static inline void conditional_cli(struct pt_regs *regs)
{
if (regs->flags & X86_EFLAGS_IF)
local_irq_disable();
}
static inline void preempt_conditional_cli(struct pt_regs *regs)
{
if (regs->flags & X86_EFLAGS_IF)
@ -626,8 +632,10 @@ clear_dr7:
#ifdef CONFIG_X86_32
debug_vm86:
/* reenable preemption: handle_vm86_trap() might sleep */
dec_preempt_count();
handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
preempt_conditional_cli(regs);
conditional_cli(regs);
return;
#endif

View File

@ -207,7 +207,7 @@ static int __pit_timer_fn(struct kvm_kpit_state *ps)
hrtimer_add_expires_ns(&pt->timer, pt->period);
pt->scheduled = hrtimer_get_expires_ns(&pt->timer);
if (pt->period)
ps->channels[0].count_load_time = hrtimer_get_expires(&pt->timer);
ps->channels[0].count_load_time = ktime_get();
return (pt->period == 0 ? 0 : 1);
}

View File

@ -87,13 +87,6 @@ void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu)
}
EXPORT_SYMBOL_GPL(kvm_inject_pending_timer_irqs);
void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec)
{
kvm_apic_timer_intr_post(vcpu, vec);
/* TODO: PIT, RTC etc. */
}
EXPORT_SYMBOL_GPL(kvm_timer_intr_post);
void __kvm_migrate_timers(struct kvm_vcpu *vcpu)
{
__kvm_migrate_apic_timer(vcpu);

View File

@ -89,7 +89,6 @@ static inline int irqchip_in_kernel(struct kvm *kvm)
void kvm_pic_reset(struct kvm_kpic_state *s);
void kvm_timer_intr_post(struct kvm_vcpu *vcpu, int vec);
void kvm_inject_pending_timer_irqs(struct kvm_vcpu *vcpu);
void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu);
void kvm_apic_nmi_wd_deliver(struct kvm_vcpu *vcpu);

View File

@ -35,6 +35,12 @@
#include "kvm_cache_regs.h"
#include "irq.h"
#ifndef CONFIG_X86_64
#define mod_64(x, y) ((x) - (y) * div64_u64(x, y))
#else
#define mod_64(x, y) ((x) % (y))
#endif
#define PRId64 "d"
#define PRIx64 "llx"
#define PRIu64 "u"
@ -511,52 +517,22 @@ static void apic_send_ipi(struct kvm_lapic *apic)
static u32 apic_get_tmcct(struct kvm_lapic *apic)
{
u64 counter_passed;
ktime_t passed, now;
ktime_t remaining;
s64 ns;
u32 tmcct;
ASSERT(apic != NULL);
now = apic->timer.dev.base->get_time();
tmcct = apic_get_reg(apic, APIC_TMICT);
/* if initial count is 0, current count should also be 0 */
if (tmcct == 0)
if (apic_get_reg(apic, APIC_TMICT) == 0)
return 0;
if (unlikely(ktime_to_ns(now) <=
ktime_to_ns(apic->timer.last_update))) {
/* Wrap around */
passed = ktime_add(( {
(ktime_t) {
.tv64 = KTIME_MAX -
(apic->timer.last_update).tv64}; }
), now);
apic_debug("time elapsed\n");
} else
passed = ktime_sub(now, apic->timer.last_update);
remaining = hrtimer_expires_remaining(&apic->timer.dev);
if (ktime_to_ns(remaining) < 0)
remaining = ktime_set(0, 0);
counter_passed = div64_u64(ktime_to_ns(passed),
(APIC_BUS_CYCLE_NS * apic->timer.divide_count));
if (counter_passed > tmcct) {
if (unlikely(!apic_lvtt_period(apic))) {
/* one-shot timers stick at 0 until reset */
tmcct = 0;
} else {
/*
* periodic timers reset to APIC_TMICT when they
* hit 0. The while loop simulates this happening N
* times. (counter_passed %= tmcct) would also work,
* but might be slower or not work on 32-bit??
*/
while (counter_passed > tmcct)
counter_passed -= tmcct;
tmcct -= counter_passed;
}
} else {
tmcct -= counter_passed;
}
ns = mod_64(ktime_to_ns(remaining), apic->timer.period);
tmcct = div64_u64(ns, (APIC_BUS_CYCLE_NS * apic->timer.divide_count));
return tmcct;
}
@ -653,8 +629,6 @@ static void start_apic_timer(struct kvm_lapic *apic)
{
ktime_t now = apic->timer.dev.base->get_time();
apic->timer.last_update = now;
apic->timer.period = apic_get_reg(apic, APIC_TMICT) *
APIC_BUS_CYCLE_NS * apic->timer.divide_count;
atomic_set(&apic->timer.pending, 0);
@ -1110,16 +1084,6 @@ void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu)
}
}
void kvm_apic_timer_intr_post(struct kvm_vcpu *vcpu, int vec)
{
struct kvm_lapic *apic = vcpu->arch.apic;
if (apic && apic_lvt_vector(apic, APIC_LVTT) == vec)
apic->timer.last_update = ktime_add_ns(
apic->timer.last_update,
apic->timer.period);
}
int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
{
int vector = kvm_apic_has_interrupt(vcpu);

View File

@ -12,7 +12,6 @@ struct kvm_lapic {
atomic_t pending;
s64 period; /* unit: ns */
u32 divide_count;
ktime_t last_update;
struct hrtimer dev;
} timer;
struct kvm_vcpu *vcpu;
@ -42,7 +41,6 @@ void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data);
void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu);
int kvm_lapic_enabled(struct kvm_vcpu *vcpu);
int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu);
void kvm_apic_timer_intr_post(struct kvm_vcpu *vcpu, int vec);
void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu);

View File

@ -1698,8 +1698,13 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
if (largepage)
spte |= PT_PAGE_SIZE_MASK;
if (mt_mask) {
mt_mask = get_memory_type(vcpu, gfn) <<
kvm_x86_ops->get_mt_mask_shift();
if (!kvm_is_mmio_pfn(pfn)) {
mt_mask = get_memory_type(vcpu, gfn) <<
kvm_x86_ops->get_mt_mask_shift();
mt_mask |= VMX_EPT_IGMT_BIT;
} else
mt_mask = MTRR_TYPE_UNCACHABLE <<
kvm_x86_ops->get_mt_mask_shift();
spte |= mt_mask;
}

View File

@ -1600,7 +1600,6 @@ static void svm_intr_assist(struct kvm_vcpu *vcpu)
/* Okay, we can deliver the interrupt: grab it and update PIC state. */
intr_vector = kvm_cpu_get_interrupt(vcpu);
svm_inject_irq(svm, intr_vector);
kvm_timer_intr_post(vcpu, intr_vector);
out:
update_cr8_intercept(vcpu);
}

View File

@ -903,6 +903,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
data = vmcs_readl(GUEST_SYSENTER_ESP);
break;
default:
vmx_load_host_state(to_vmx(vcpu));
msr = find_msr_entry(to_vmx(vcpu), msr_index);
if (msr) {
data = msr->data;
@ -3285,7 +3286,6 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
}
if (vcpu->arch.interrupt.pending) {
vmx_inject_irq(vcpu, vcpu->arch.interrupt.nr);
kvm_timer_intr_post(vcpu, vcpu->arch.interrupt.nr);
if (kvm_cpu_has_interrupt(vcpu))
enable_irq_window(vcpu);
}
@ -3687,8 +3687,7 @@ static int __init vmx_init(void)
if (vm_need_ept()) {
bypass_guest_pf = 0;
kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
VMX_EPT_WRITABLE_MASK |
VMX_EPT_IGMT_BIT);
VMX_EPT_WRITABLE_MASK);
kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
VMX_EPT_EXECUTABLE_MASK,
VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT);

View File

@ -967,7 +967,6 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_MMU_SHADOW_CACHE_CONTROL:
case KVM_CAP_SET_TSS_ADDR:
case KVM_CAP_EXT_CPUID:
case KVM_CAP_CLOCKSOURCE:
case KVM_CAP_PIT:
case KVM_CAP_NOP_IO_DELAY:
case KVM_CAP_MP_STATE:
@ -992,6 +991,9 @@ int kvm_dev_ioctl_check_extension(long ext)
case KVM_CAP_IOMMU:
r = iommu_found();
break;
case KVM_CAP_CLOCKSOURCE:
r = boot_cpu_has(X86_FEATURE_CONSTANT_TSC);
break;
default:
r = 0;
break;
@ -4127,9 +4129,13 @@ static void kvm_free_vcpus(struct kvm *kvm)
}
void kvm_arch_destroy_vm(struct kvm *kvm)
void kvm_arch_sync_events(struct kvm *kvm)
{
kvm_free_all_assigned_devices(kvm);
}
void kvm_arch_destroy_vm(struct kvm *kvm)
{
kvm_iommu_unmap_guest(kvm);
kvm_free_pit(kvm);
kfree(kvm->arch.vpic);

View File

@ -134,25 +134,6 @@ int page_is_ram(unsigned long pagenr)
return 0;
}
int pagerange_is_ram(unsigned long start, unsigned long end)
{
int ram_page = 0, not_rampage = 0;
unsigned long page_nr;
for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
++page_nr) {
if (page_is_ram(page_nr))
ram_page = 1;
else
not_rampage = 1;
if (ram_page == not_rampage)
return -1;
}
return ram_page;
}
/*
* Fix up the linear direct mapping of the kernel to avoid cache attribute
* conflicts.

View File

@ -575,7 +575,6 @@ static int __change_page_attr(struct cpa_data *cpa, int primary)
address = cpa->vaddr[cpa->curpage];
else
address = *cpa->vaddr;
repeat:
kpte = lookup_address(address, &level);
if (!kpte)
@ -812,6 +811,13 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
vm_unmap_aliases();
/*
* If we're called with lazy mmu updates enabled, the
* in-memory pte state may be stale. Flush pending updates to
* bring them up to date.
*/
arch_flush_lazy_mmu_mode();
cpa.vaddr = addr;
cpa.numpages = numpages;
cpa.mask_set = mask_set;
@ -854,6 +860,13 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
} else
cpa_flush_all(cache);
/*
* If we've been called with lazy mmu updates enabled, then
* make sure that everything gets flushed out before we
* return.
*/
arch_flush_lazy_mmu_mode();
out:
return ret;
}

View File

@ -211,6 +211,33 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
static struct memtype *cached_entry;
static u64 cached_start;
static int pat_pagerange_is_ram(unsigned long start, unsigned long end)
{
int ram_page = 0, not_rampage = 0;
unsigned long page_nr;
for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
++page_nr) {
/*
* For legacy reasons, physical address range in the legacy ISA
* region is tracked as non-RAM. This will allow users of
* /dev/mem to map portions of legacy ISA region, even when
* some of those portions are listed(or not even listed) with
* different e820 types(RAM/reserved/..)
*/
if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) &&
page_is_ram(page_nr))
ram_page = 1;
else
not_rampage = 1;
if (ram_page == not_rampage)
return -1;
}
return ram_page;
}
/*
* For RAM pages, mark the pages as non WB memory type using
* PageNonWB (PG_arch_1). We allow only one set_memory_uc() or
@ -336,20 +363,12 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
if (new_type)
*new_type = actual_type;
/*
* For legacy reasons, some parts of the physical address range in the
* legacy 1MB region is treated as non-RAM (even when listed as RAM in
* the e820 tables). So we will track the memory attributes of this
* legacy 1MB region using the linear memtype_list always.
*/
if (end >= ISA_END_ADDRESS) {
is_range_ram = pagerange_is_ram(start, end);
if (is_range_ram == 1)
return reserve_ram_pages_type(start, end, req_type,
new_type);
else if (is_range_ram < 0)
return -EINVAL;
}
is_range_ram = pat_pagerange_is_ram(start, end);
if (is_range_ram == 1)
return reserve_ram_pages_type(start, end, req_type,
new_type);
else if (is_range_ram < 0)
return -EINVAL;
new = kmalloc(sizeof(struct memtype), GFP_KERNEL);
if (!new)
@ -446,19 +465,11 @@ int free_memtype(u64 start, u64 end)
if (is_ISA_range(start, end - 1))
return 0;
/*
* For legacy reasons, some parts of the physical address range in the
* legacy 1MB region is treated as non-RAM (even when listed as RAM in
* the e820 tables). So we will track the memory attributes of this
* legacy 1MB region using the linear memtype_list always.
*/
if (end >= ISA_END_ADDRESS) {
is_range_ram = pagerange_is_ram(start, end);
if (is_range_ram == 1)
return free_ram_pages_type(start, end);
else if (is_range_ram < 0)
return -EINVAL;
}
is_range_ram = pat_pagerange_is_ram(start, end);
if (is_range_ram == 1)
return free_ram_pages_type(start, end);
else if (is_range_ram < 0)
return -EINVAL;
spin_lock(&memtype_lock);
list_for_each_entry(entry, &memtype_list, nd) {
@ -626,17 +637,13 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
unsigned long flags;
unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
is_ram = pagerange_is_ram(paddr, paddr + size);
is_ram = pat_pagerange_is_ram(paddr, paddr + size);
if (is_ram != 0) {
/*
* For mapping RAM pages, drivers need to call
* set_memory_[uc|wc|wb] directly, for reserve and free, before
* setting up the PTE.
*/
WARN_ON_ONCE(1);
return 0;
}
/*
* reserve_pfn_range() doesn't support RAM pages.
*/
if (is_ram != 0)
return -EINVAL;
ret = reserve_memtype(paddr, paddr + size, want_flags, &flags);
if (ret)
@ -693,7 +700,7 @@ static void free_pfn_range(u64 paddr, unsigned long size)
{
int is_ram;
is_ram = pagerange_is_ram(paddr, paddr + size);
is_ram = pat_pagerange_is_ram(paddr, paddr + size);
if (is_ram == 0)
free_memtype(paddr, paddr + size);
}

View File

@ -45,7 +45,13 @@ struct priv {
static inline void setbit128_bbe(void *b, int bit)
{
__set_bit(bit ^ 0x78, b);
__set_bit(bit ^ (0x80 -
#ifdef __BIG_ENDIAN
BITS_PER_LONG
#else
BITS_PER_BYTE
#endif
), b);
}
static int setkey(struct crypto_tfm *parent, const u8 *key,

View File

@ -773,18 +773,32 @@ unsigned int ata_sff_data_xfer32(struct ata_device *dev, unsigned char *buf,
else
iowrite32_rep(data_addr, buf, words);
/* Transfer trailing bytes, if any */
if (unlikely(slop)) {
__le32 pad;
unsigned char pad[4];
/* Point buf to the tail of buffer */
buf += buflen - slop;
/*
* Use io*_rep() accessors here as well to avoid pointlessly
* swapping bytes to and fro on the big endian machines...
*/
if (rw == READ) {
pad = cpu_to_le32(ioread32(ap->ioaddr.data_addr));
memcpy(buf + buflen - slop, &pad, slop);
if (slop < 3)
ioread16_rep(data_addr, pad, 1);
else
ioread32_rep(data_addr, pad, 1);
memcpy(buf, pad, slop);
} else {
memcpy(&pad, buf + buflen - slop, slop);
iowrite32(le32_to_cpu(pad), ap->ioaddr.data_addr);
memcpy(pad, buf, slop);
if (slop < 3)
iowrite16_rep(data_addr, pad, 1);
else
iowrite32_rep(data_addr, pad, 1);
}
words++;
}
return words << 2;
return (buflen + 1) & ~1;
}
EXPORT_SYMBOL_GPL(ata_sff_data_xfer32);

View File

@ -110,7 +110,8 @@ static const struct via_isa_bridge {
{ "vt8237s", PCI_DEVICE_ID_VIA_8237S, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
{ "vt8251", PCI_DEVICE_ID_VIA_8251, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
{ "cx700", PCI_DEVICE_ID_VIA_CX700, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_SATA_PATA },
{ "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_NO_ENABLES},
{ "vt6410", PCI_DEVICE_ID_VIA_6410, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_NO_ENABLES },
{ "vt6415", PCI_DEVICE_ID_VIA_6415, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST | VIA_NO_ENABLES },
{ "vt8237a", PCI_DEVICE_ID_VIA_8237A, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
{ "vt8237", PCI_DEVICE_ID_VIA_8237, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
{ "vt8235", PCI_DEVICE_ID_VIA_8235, 0x00, 0x2f, VIA_UDMA_133 | VIA_BAD_AST },
@ -593,6 +594,7 @@ static int via_reinit_one(struct pci_dev *pdev)
#endif
static const struct pci_device_id via[] = {
{ PCI_VDEVICE(VIA, 0x0415), },
{ PCI_VDEVICE(VIA, 0x0571), },
{ PCI_VDEVICE(VIA, 0x0581), },
{ PCI_VDEVICE(VIA, 0x1571), },

View File

@ -421,19 +421,21 @@ static struct ata_port_operations nv_generic_ops = {
.hardreset = ATA_OP_NULL,
};
/* OSDL bz3352 reports that nf2/3 controllers can't determine device
* signature reliably. Also, the following thread reports detection
* failure on cold boot with the standard debouncing timing.
/* nf2 is ripe with hardreset related problems.
*
* kernel bz#3352 reports nf2/3 controllers can't determine device
* signature reliably. The following thread reports detection failure
* on cold boot with the standard debouncing timing.
*
* http://thread.gmane.org/gmane.linux.ide/34098
*
* Debounce with hotplug timing and request follow-up SRST.
* And bz#12176 reports that hardreset simply doesn't work on nf2.
* Give up on it and just don't do hardreset.
*/
static struct ata_port_operations nv_nf2_ops = {
.inherits = &nv_common_ops,
.inherits = &nv_generic_ops,
.freeze = nv_nf2_freeze,
.thaw = nv_nf2_thaw,
.hardreset = nv_noclassify_hardreset,
};
/* For initial probing after boot and hot plugging, hardreset mostly

View File

@ -2519,8 +2519,8 @@ fore200e_load_and_start_fw(struct fore200e* fore200e)
return err;
sprintf(buf, "%s%s", fore200e->bus->proc_name, FW_EXT);
if (request_firmware(&firmware, buf, device) == 1) {
printk(FORE200E "missing %s firmware image\n", fore200e->bus->model_name);
if ((err = request_firmware(&firmware, buf, device)) < 0) {
printk(FORE200E "problem loading firmware image %s\n", fore200e->bus->model_name);
return err;
}

View File

@ -1300,7 +1300,13 @@ static const struct hid_device_id hid_blacklist[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SUNPLUS, USB_DEVICE_ID_SUNPLUS_WDESKTOP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, 0x030c) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT) },
@ -1605,6 +1611,7 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0002) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0003) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) },
{ HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SOUNDGRAPH, USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SOUNDGRAPH, USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SOUNDGRAPH, USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD3) },
@ -1612,10 +1619,6 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SOUNDGRAPH, USB_DEVICE_ID_SOUNDGRAPH_IMON_LCD5) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TENX, USB_DEVICE_ID_TENX_IBUDDY1) },
{ HID_USB_DEVICE(USB_VENDOR_ID_TENX, USB_DEVICE_ID_TENX_IBUDDY2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb300) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb304) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651) },
{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) },
{ HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_LABPRO) },
{ HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_GOTEMP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_VERNIER, USB_DEVICE_ID_VERNIER_SKIP) },
@ -1626,8 +1629,6 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_1_PHIDGETSERVO_20) },
{ HID_USB_DEVICE(USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_8_8_4_IF_KIT) },
{ HID_USB_DEVICE(USB_VENDOR_ID_YEALINK, USB_DEVICE_ID_YEALINK_P1K_P4K_B2K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) },
{ HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) },
{ }
};

View File

@ -348,6 +348,9 @@
#define USB_VENDOR_ID_PLAYDOTCOM 0x0b43
#define USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII 0x0003
#define USB_VENDOR_ID_POWERCOM 0x0d9f
#define USB_DEVICE_ID_POWERCOM_UPS 0x0002
#define USB_VENDOR_ID_SAITEK 0x06a3
#define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17

View File

@ -267,8 +267,10 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
default:
{
struct hid_device *hid = dev->hid;
if (_IOC_TYPE(cmd) != 'H' || _IOC_DIR(cmd) != _IOC_READ)
return -EINVAL;
if (_IOC_TYPE(cmd) != 'H' || _IOC_DIR(cmd) != _IOC_READ) {
ret = -EINVAL;
break;
}
if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGRAWNAME(0))) {
int len;
@ -277,8 +279,9 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
len = strlen(hid->name) + 1;
if (len > _IOC_SIZE(cmd))
len = _IOC_SIZE(cmd);
return copy_to_user(user_arg, hid->name, len) ?
ret = copy_to_user(user_arg, hid->name, len) ?
-EFAULT : len;
break;
}
if (_IOC_NR(cmd) == _IOC_NR(HIDIOCGRAWPHYS(0))) {
@ -288,12 +291,13 @@ static long hidraw_ioctl(struct file *file, unsigned int cmd,
len = strlen(hid->phys) + 1;
if (len > _IOC_SIZE(cmd))
len = _IOC_SIZE(cmd);
return copy_to_user(user_arg, hid->phys, len) ?
ret = copy_to_user(user_arg, hid->phys, len) ?
-EFAULT : len;
break;
}
}
ret = -ENOTTY;
ret = -ENOTTY;
}
unlock_kernel();
return ret;

View File

@ -1872,7 +1872,7 @@ static int __init f71882fg_find(int sioaddr, unsigned short *address,
devid = superio_inw(sioaddr, SIO_REG_MANID);
if (devid != SIO_FINTEK_ID) {
printk(KERN_INFO DRVNAME ": Not a Fintek device\n");
pr_debug(DRVNAME ": Not a Fintek device\n");
goto exit;
}
@ -1932,7 +1932,7 @@ static int __init f71882fg_device_add(unsigned short address,
res.name = f71882fg_pdev->name;
err = acpi_check_resource_conflict(&res);
if (err)
return err;
goto exit_device_put;
err = platform_device_add_resources(f71882fg_pdev, &res, 1);
if (err) {

View File

@ -1262,7 +1262,7 @@ static int __init vt1211_device_add(unsigned short address)
res.name = pdev->name;
err = acpi_check_resource_conflict(&res);
if (err)
goto EXIT;
goto EXIT_DEV_PUT;
err = platform_device_add_resources(pdev, &res, 1);
if (err) {

View File

@ -1548,7 +1548,7 @@ static int __init sensors_w83627ehf_init(void)
err = acpi_check_resource_conflict(&res);
if (err)
goto exit;
goto exit_device_put;
err = platform_device_add_resources(pdev, &res, 1);
if (err) {

View File

@ -318,7 +318,6 @@ static int simple_std_setup(struct dvb_frontend *fe,
u8 *config, u8 *cb)
{
struct tuner_simple_priv *priv = fe->tuner_priv;
u8 tuneraddr;
int rc;
/* tv norm specific stuff for multi-norm tuners */
@ -387,6 +386,7 @@ static int simple_std_setup(struct dvb_frontend *fe,
case TUNER_PHILIPS_TUV1236D:
{
struct tuner_i2c_props i2c = priv->i2c_props;
/* 0x40 -> ATSC antenna input 1 */
/* 0x48 -> ATSC antenna input 2 */
/* 0x00 -> NTSC antenna input 1 */
@ -398,17 +398,15 @@ static int simple_std_setup(struct dvb_frontend *fe,
buffer[1] = 0x04;
}
/* set to the correct mode (analog or digital) */
tuneraddr = priv->i2c_props.addr;
priv->i2c_props.addr = 0x0a;
rc = tuner_i2c_xfer_send(&priv->i2c_props, &buffer[0], 2);
i2c.addr = 0x0a;
rc = tuner_i2c_xfer_send(&i2c, &buffer[0], 2);
if (2 != rc)
tuner_warn("i2c i/o error: rc == %d "
"(should be 2)\n", rc);
rc = tuner_i2c_xfer_send(&priv->i2c_props, &buffer[2], 2);
rc = tuner_i2c_xfer_send(&i2c, &buffer[2], 2);
if (2 != rc)
tuner_warn("i2c i/o error: rc == %d "
"(should be 2)\n", rc);
priv->i2c_props.addr = tuneraddr;
break;
}
}

View File

@ -364,16 +364,15 @@ static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
enum dmx_success success)
{
struct dmxdev_filter *dmxdevfilter = filter->priv;
unsigned long flags;
int ret;
if (dmxdevfilter->buffer.error) {
wake_up(&dmxdevfilter->buffer.queue);
return 0;
}
spin_lock_irqsave(&dmxdevfilter->dev->lock, flags);
spin_lock(&dmxdevfilter->dev->lock);
if (dmxdevfilter->state != DMXDEV_STATE_GO) {
spin_unlock_irqrestore(&dmxdevfilter->dev->lock, flags);
spin_unlock(&dmxdevfilter->dev->lock);
return 0;
}
del_timer(&dmxdevfilter->timer);
@ -392,7 +391,7 @@ static int dvb_dmxdev_section_callback(const u8 *buffer1, size_t buffer1_len,
}
if (dmxdevfilter->params.sec.flags & DMX_ONESHOT)
dmxdevfilter->state = DMXDEV_STATE_DONE;
spin_unlock_irqrestore(&dmxdevfilter->dev->lock, flags);
spin_unlock(&dmxdevfilter->dev->lock);
wake_up(&dmxdevfilter->buffer.queue);
return 0;
}
@ -404,12 +403,11 @@ static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,
{
struct dmxdev_filter *dmxdevfilter = feed->priv;
struct dvb_ringbuffer *buffer;
unsigned long flags;
int ret;
spin_lock_irqsave(&dmxdevfilter->dev->lock, flags);
spin_lock(&dmxdevfilter->dev->lock);
if (dmxdevfilter->params.pes.output == DMX_OUT_DECODER) {
spin_unlock_irqrestore(&dmxdevfilter->dev->lock, flags);
spin_unlock(&dmxdevfilter->dev->lock);
return 0;
}
@ -419,7 +417,7 @@ static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,
else
buffer = &dmxdevfilter->dev->dvr_buffer;
if (buffer->error) {
spin_unlock_irqrestore(&dmxdevfilter->dev->lock, flags);
spin_unlock(&dmxdevfilter->dev->lock);
wake_up(&buffer->queue);
return 0;
}
@ -430,7 +428,7 @@ static int dvb_dmxdev_ts_callback(const u8 *buffer1, size_t buffer1_len,
dvb_ringbuffer_flush(buffer);
buffer->error = ret;
}
spin_unlock_irqrestore(&dmxdevfilter->dev->lock, flags);
spin_unlock(&dmxdevfilter->dev->lock);
wake_up(&buffer->queue);
return 0;
}

View File

@ -399,9 +399,7 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
void dvb_dmx_swfilter_packets(struct dvb_demux *demux, const u8 *buf,
size_t count)
{
unsigned long flags;
spin_lock_irqsave(&demux->lock, flags);
spin_lock(&demux->lock);
while (count--) {
if (buf[0] == 0x47)
@ -409,17 +407,16 @@ void dvb_dmx_swfilter_packets(struct dvb_demux *demux, const u8 *buf,
buf += 188;
}
spin_unlock_irqrestore(&demux->lock, flags);
spin_unlock(&demux->lock);
}
EXPORT_SYMBOL(dvb_dmx_swfilter_packets);
void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count)
{
unsigned long flags;
int p = 0, i, j;
spin_lock_irqsave(&demux->lock, flags);
spin_lock(&demux->lock);
if (demux->tsbufp) {
i = demux->tsbufp;
@ -452,18 +449,17 @@ void dvb_dmx_swfilter(struct dvb_demux *demux, const u8 *buf, size_t count)
}
bailout:
spin_unlock_irqrestore(&demux->lock, flags);
spin_unlock(&demux->lock);
}
EXPORT_SYMBOL(dvb_dmx_swfilter);
void dvb_dmx_swfilter_204(struct dvb_demux *demux, const u8 *buf, size_t count)
{
unsigned long flags;
int p = 0, i, j;
u8 tmppack[188];
spin_lock_irqsave(&demux->lock, flags);
spin_lock(&demux->lock);
if (demux->tsbufp) {
i = demux->tsbufp;
@ -504,7 +500,7 @@ void dvb_dmx_swfilter_204(struct dvb_demux *demux, const u8 *buf, size_t count)
}
bailout:
spin_unlock_irqrestore(&demux->lock, flags);
spin_unlock(&demux->lock);
}
EXPORT_SYMBOL(dvb_dmx_swfilter_204);

View File

@ -98,11 +98,16 @@
* - blacklisted KWorld radio in hid-core.c and hid-ids.h
* 2008-12-03 Mark Lord <mlord@pobox.com>
* - add support for DealExtreme USB Radio
* 2009-01-31 Bob Ross <pigiron@gmx.com>
* - correction of stereo detection/setting
* - correction of signal strength indicator scaling
* 2009-01-31 Rick Bronson <rick@efn.org>
* Tobias Lorenz <tobias.lorenz@gmx.net>
* - add LED status output
*
* ToDo:
* - add firmware download/update support
* - RDS support: interrupt mode, instead of polling
* - add LED status output (check if that's not already done in firmware)
*/
@ -881,6 +886,30 @@ static int si470x_rds_on(struct si470x_device *radio)
/**************************************************************************
* General Driver Functions - LED_REPORT
**************************************************************************/
/*
* si470x_set_led_state - sets the led state
*/
static int si470x_set_led_state(struct si470x_device *radio,
unsigned char led_state)
{
unsigned char buf[LED_REPORT_SIZE];
int retval;
buf[0] = LED_REPORT;
buf[1] = LED_COMMAND;
buf[2] = led_state;
retval = si470x_set_report(radio, (void *) &buf, sizeof(buf));
return (retval < 0) ? -EINVAL : 0;
}
/**************************************************************************
* RDS Driver Functions
**************************************************************************/
@ -1385,20 +1414,22 @@ static int si470x_vidioc_g_tuner(struct file *file, void *priv,
};
/* stereo indicator == stereo (instead of mono) */
if ((radio->registers[STATUSRSSI] & STATUSRSSI_ST) == 1)
tuner->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
else
if ((radio->registers[STATUSRSSI] & STATUSRSSI_ST) == 0)
tuner->rxsubchans = V4L2_TUNER_SUB_MONO;
else
tuner->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
/* mono/stereo selector */
if ((radio->registers[POWERCFG] & POWERCFG_MONO) == 1)
tuner->audmode = V4L2_TUNER_MODE_MONO;
else
if ((radio->registers[POWERCFG] & POWERCFG_MONO) == 0)
tuner->audmode = V4L2_TUNER_MODE_STEREO;
else
tuner->audmode = V4L2_TUNER_MODE_MONO;
/* min is worst, max is best; signal:0..0xffff; rssi: 0..0xff */
tuner->signal = (radio->registers[STATUSRSSI] & STATUSRSSI_RSSI)
* 0x0101;
/* measured in units of dbµV in 1 db increments (max at ~75 dbµV) */
tuner->signal = (radio->registers[STATUSRSSI] & STATUSRSSI_RSSI);
/* the ideal factor is 0xffff/75 = 873,8 */
tuner->signal = (tuner->signal * 873) + (8 * tuner->signal / 10);
/* automatic frequency control: -1: freq to low, 1 freq to high */
/* AFCRL does only indicate that freq. differs, not if too low/high */
@ -1632,6 +1663,9 @@ static int si470x_usb_driver_probe(struct usb_interface *intf,
/* set initial frequency */
si470x_set_freq(radio, 87.5 * FREQ_MUL); /* available in all regions */
/* set led to connect state */
si470x_set_led_state(radio, BLINK_GREEN_LED);
/* rds buffer allocation */
radio->buf_size = rds_buf * 3;
radio->buffer = kmalloc(radio->buf_size, GFP_KERNEL);
@ -1715,6 +1749,9 @@ static void si470x_usb_driver_disconnect(struct usb_interface *intf)
cancel_delayed_work_sync(&radio->work);
usb_set_intfdata(intf, NULL);
if (radio->users == 0) {
/* set led to disconnect state */
si470x_set_led_state(radio, BLINK_ORANGE_LED);
video_unregister_device(radio->videodev);
kfree(radio->buffer);
kfree(radio);

View File

@ -422,6 +422,7 @@ static void destroy_urbs(struct gspca_dev *gspca_dev)
if (urb == NULL)
break;
BUG_ON(!gspca_dev->dev);
gspca_dev->urb[i] = NULL;
if (!gspca_dev->present)
usb_kill_urb(urb);
@ -1950,8 +1951,12 @@ void gspca_disconnect(struct usb_interface *intf)
{
struct gspca_dev *gspca_dev = usb_get_intfdata(intf);
mutex_lock(&gspca_dev->usb_lock);
gspca_dev->present = 0;
mutex_unlock(&gspca_dev->usb_lock);
destroy_urbs(gspca_dev);
gspca_dev->dev = NULL;
usb_set_intfdata(intf, NULL);
/* release the device */

View File

@ -393,7 +393,7 @@ static int ivtv_g_fmt_sliced_vbi_cap(struct file *file, void *fh, struct v4l2_fo
return 0;
}
v4l2_subdev_call(itv->sd_video, video, s_fmt, fmt);
v4l2_subdev_call(itv->sd_video, video, g_fmt, fmt);
vbifmt->service_set = ivtv_get_service_set(vbifmt);
return 0;
}
@ -1748,6 +1748,18 @@ static long ivtv_default(struct file *file, void *fh, int cmd, void *arg)
break;
}
case IVTV_IOC_DMA_FRAME:
case VIDEO_GET_PTS:
case VIDEO_GET_FRAME_COUNT:
case VIDEO_GET_EVENT:
case VIDEO_PLAY:
case VIDEO_STOP:
case VIDEO_FREEZE:
case VIDEO_CONTINUE:
case VIDEO_COMMAND:
case VIDEO_TRY_COMMAND:
return ivtv_decoder_ioctls(file, cmd, (void *)arg);
default:
return -EINVAL;
}
@ -1790,18 +1802,6 @@ static long ivtv_serialized_ioctl(struct ivtv *itv, struct file *filp,
ivtv_vapi(itv, CX2341X_DEC_SET_AUDIO_MODE, 2, itv->audio_bilingual_mode, itv->audio_stereo_mode);
return 0;
case IVTV_IOC_DMA_FRAME:
case VIDEO_GET_PTS:
case VIDEO_GET_FRAME_COUNT:
case VIDEO_GET_EVENT:
case VIDEO_PLAY:
case VIDEO_STOP:
case VIDEO_FREEZE:
case VIDEO_CONTINUE:
case VIDEO_COMMAND:
case VIDEO_TRY_COMMAND:
return ivtv_decoder_ioctls(filp, cmd, (void *)arg);
default:
break;
}

View File

@ -286,7 +286,7 @@ static int __init egpio_probe(struct platform_device *pdev)
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
goto fail;
ei->base_addr = ioremap_nocache(res->start, res->end - res->start);
ei->base_addr = ioremap_nocache(res->start, resource_size(res));
if (!ei->base_addr)
goto fail;
pr_debug("EGPIO phys=%08x virt=%p\n", (u32)res->start, ei->base_addr);
@ -307,7 +307,7 @@ static int __init egpio_probe(struct platform_device *pdev)
ei->nchips = pdata->num_chips;
ei->chip = kzalloc(sizeof(struct egpio_chip) * ei->nchips, GFP_KERNEL);
if (!ei) {
if (!ei->chip) {
ret = -ENOMEM;
goto fail;
}

View File

@ -678,6 +678,7 @@ static int __devexit pcf50633_remove(struct i2c_client *client)
static struct i2c_device_id pcf50633_id_table[] = {
{"pcf50633", 0x73},
{/* end of list */}
};
static struct i2c_driver pcf50633_driver = {

View File

@ -1050,7 +1050,7 @@ static int __devinit sm501_gpio_register_chip(struct sm501_devdata *sm,
return gpiochip_add(gchip);
}
static int sm501_register_gpio(struct sm501_devdata *sm)
static int __devinit sm501_register_gpio(struct sm501_devdata *sm)
{
struct sm501_gpio *gpio = &sm->gpio;
resource_size_t iobase = sm->io_res->start + SM501_GPIO;
@ -1321,7 +1321,7 @@ static unsigned int sm501_mem_local[] = {
* Common init code for an SM501
*/
static int sm501_init_dev(struct sm501_devdata *sm)
static int __devinit sm501_init_dev(struct sm501_devdata *sm)
{
struct sm501_initdata *idata;
struct sm501_platdata *pdata;
@ -1397,7 +1397,7 @@ static int sm501_init_dev(struct sm501_devdata *sm)
return 0;
}
static int sm501_plat_probe(struct platform_device *dev)
static int __devinit sm501_plat_probe(struct platform_device *dev)
{
struct sm501_devdata *sm;
int ret;
@ -1586,8 +1586,8 @@ static struct sm501_platdata sm501_pci_platdata = {
.gpio_base = -1,
};
static int sm501_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
static int __devinit sm501_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
struct sm501_devdata *sm;
int err;
@ -1693,7 +1693,7 @@ static void sm501_dev_remove(struct sm501_devdata *sm)
sm501_gpio_remove(sm);
}
static void sm501_pci_remove(struct pci_dev *dev)
static void __devexit sm501_pci_remove(struct pci_dev *dev)
{
struct sm501_devdata *sm = pci_get_drvdata(dev);
@ -1727,16 +1727,16 @@ static struct pci_device_id sm501_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, sm501_pci_tbl);
static struct pci_driver sm501_pci_drv = {
static struct pci_driver sm501_pci_driver = {
.name = "sm501",
.id_table = sm501_pci_tbl,
.probe = sm501_pci_probe,
.remove = sm501_pci_remove,
.remove = __devexit_p(sm501_pci_remove),
};
MODULE_ALIAS("platform:sm501");
static struct platform_driver sm501_plat_drv = {
static struct platform_driver sm501_plat_driver = {
.driver = {
.name = "sm501",
.owner = THIS_MODULE,
@ -1749,14 +1749,14 @@ static struct platform_driver sm501_plat_drv = {
static int __init sm501_base_init(void)
{
platform_driver_register(&sm501_plat_drv);
return pci_register_driver(&sm501_pci_drv);
platform_driver_register(&sm501_plat_driver);
return pci_register_driver(&sm501_pci_driver);
}
static void __exit sm501_base_exit(void)
{
platform_driver_unregister(&sm501_plat_drv);
pci_unregister_driver(&sm501_pci_drv);
platform_driver_unregister(&sm501_plat_driver);
pci_unregister_driver(&sm501_pci_driver);
}
module_init(sm501_base_init);

View File

@ -38,7 +38,7 @@
#include <linux/i2c.h>
#include <linux/i2c/twl4030.h>
#ifdef CONFIG_ARM
#if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3)
#include <mach/cpu.h>
#endif

View File

@ -1111,7 +1111,7 @@ int wm8350_read_auxadc(struct wm8350 *wm8350, int channel, int scale, int vref)
do {
schedule_timeout_interruptible(1);
reg = wm8350_reg_read(wm8350, WM8350_DIGITISER_CONTROL_1);
} while (tries-- && (reg & WM8350_AUXADC_POLL));
} while (--tries && (reg & WM8350_AUXADC_POLL));
if (!tries)
dev_err(wm8350->dev, "adc chn %d read timeout\n", channel);
@ -1297,14 +1297,29 @@ static void wm8350_client_dev_register(struct wm8350 *wm8350,
int wm8350_device_init(struct wm8350 *wm8350, int irq,
struct wm8350_platform_data *pdata)
{
int ret = -EINVAL;
int ret;
u16 id1, id2, mask_rev;
u16 cust_id, mode, chip_rev;
/* get WM8350 revision and config mode */
wm8350->read_dev(wm8350, WM8350_RESET_ID, sizeof(id1), &id1);
wm8350->read_dev(wm8350, WM8350_ID, sizeof(id2), &id2);
wm8350->read_dev(wm8350, WM8350_REVISION, sizeof(mask_rev), &mask_rev);
ret = wm8350->read_dev(wm8350, WM8350_RESET_ID, sizeof(id1), &id1);
if (ret != 0) {
dev_err(wm8350->dev, "Failed to read ID: %d\n", ret);
goto err;
}
ret = wm8350->read_dev(wm8350, WM8350_ID, sizeof(id2), &id2);
if (ret != 0) {
dev_err(wm8350->dev, "Failed to read ID: %d\n", ret);
goto err;
}
ret = wm8350->read_dev(wm8350, WM8350_REVISION, sizeof(mask_rev),
&mask_rev);
if (ret != 0) {
dev_err(wm8350->dev, "Failed to read revision: %d\n", ret);
goto err;
}
id1 = be16_to_cpu(id1);
id2 = be16_to_cpu(id2);
@ -1404,14 +1419,12 @@ int wm8350_device_init(struct wm8350 *wm8350, int irq,
return ret;
}
if (pdata && pdata->init) {
ret = pdata->init(wm8350);
if (ret != 0) {
dev_err(wm8350->dev, "Platform init() failed: %d\n",
ret);
goto err;
}
}
wm8350_reg_write(wm8350, WM8350_SYSTEM_INTERRUPTS_MASK, 0xFFFF);
wm8350_reg_write(wm8350, WM8350_INT_STATUS_1_MASK, 0xFFFF);
wm8350_reg_write(wm8350, WM8350_INT_STATUS_2_MASK, 0xFFFF);
wm8350_reg_write(wm8350, WM8350_UNDER_VOLTAGE_INT_STATUS_MASK, 0xFFFF);
wm8350_reg_write(wm8350, WM8350_GPIO_INT_STATUS_MASK, 0xFFFF);
wm8350_reg_write(wm8350, WM8350_COMPARATOR_INT_STATUS_MASK, 0xFFFF);
mutex_init(&wm8350->auxadc_mutex);
mutex_init(&wm8350->irq_mutex);
@ -1430,6 +1443,15 @@ int wm8350_device_init(struct wm8350 *wm8350, int irq,
}
wm8350->chip_irq = irq;
if (pdata && pdata->init) {
ret = pdata->init(wm8350);
if (ret != 0) {
dev_err(wm8350->dev, "Platform init() failed: %d\n",
ret);
goto err;
}
}
wm8350_reg_write(wm8350, WM8350_SYSTEM_INTERRUPTS_MASK, 0x0);
wm8350_client_dev_register(wm8350, "wm8350-codec",

View File

@ -3188,7 +3188,7 @@ const struct wm8350_reg_access wm8350_reg_io_map[] = {
{ 0x7CFF, 0x0C00, 0x7FFF }, /* R1 - ID */
{ 0x0000, 0x0000, 0x0000 }, /* R2 */
{ 0xBE3B, 0xBE3B, 0x8000 }, /* R3 - System Control 1 */
{ 0xFCF7, 0xFCF7, 0xF800 }, /* R4 - System Control 2 */
{ 0xFEF7, 0xFEF7, 0xF800 }, /* R4 - System Control 2 */
{ 0x80FF, 0x80FF, 0x8000 }, /* R5 - System Hibernate */
{ 0xFB0E, 0xFB0E, 0x0000 }, /* R6 - Interface Control */
{ 0x0000, 0x0000, 0x0000 }, /* R7 */

View File

@ -493,21 +493,27 @@ static bool receive_pcb(struct net_device *dev, pcb_struct * pcb)
}
/* read the data */
spin_lock_irqsave(&adapter->lock, flags);
i = 0;
do {
j = 0;
while (((stat = get_status(dev->base_addr)) & ACRF) == 0 && j++ < 20000);
pcb->data.raw[i++] = inb_command(dev->base_addr);
if (i > MAX_PCB_DATA)
INVALID_PCB_MSG(i);
} while ((stat & ASF_PCB_MASK) != ASF_PCB_END && j < 20000);
for (i = 0; i < MAX_PCB_DATA; i++) {
for (j = 0; j < 20000; j++) {
stat = get_status(dev->base_addr);
if (stat & ACRF)
break;
}
pcb->data.raw[i] = inb_command(dev->base_addr);
if ((stat & ASF_PCB_MASK) == ASF_PCB_END || j >= 20000)
break;
}
spin_unlock_irqrestore(&adapter->lock, flags);
if (i >= MAX_PCB_DATA) {
INVALID_PCB_MSG(i);
return false;
}
if (j >= 20000) {
TIMEOUT_MSG(__LINE__);
return false;
}
/* woops, the last "data" byte was really the length! */
total_length = pcb->data.raw[--i];
/* the last "data" byte was really the length! */
total_length = pcb->data.raw[i];
/* safety check total length vs data length */
if (total_length != (pcb->length + 2)) {

View File

@ -1,6 +1,6 @@
/* bnx2.c: Broadcom NX2 network driver.
*
* Copyright (c) 2004-2008 Broadcom Corporation
* Copyright (c) 2004-2009 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@ -57,8 +57,8 @@
#define DRV_MODULE_NAME "bnx2"
#define PFX DRV_MODULE_NAME ": "
#define DRV_MODULE_VERSION "1.9.0"
#define DRV_MODULE_RELDATE "Dec 16, 2008"
#define DRV_MODULE_VERSION "1.9.2"
#define DRV_MODULE_RELDATE "Feb 11, 2009"
#define RUN_AT(x) (jiffies + (x))
@ -2910,18 +2910,8 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
rx_hdr = (struct l2_fhdr *) skb->data;
len = rx_hdr->l2_fhdr_pkt_len;
status = rx_hdr->l2_fhdr_status;
if ((status = rx_hdr->l2_fhdr_status) &
(L2_FHDR_ERRORS_BAD_CRC |
L2_FHDR_ERRORS_PHY_DECODE |
L2_FHDR_ERRORS_ALIGNMENT |
L2_FHDR_ERRORS_TOO_SHORT |
L2_FHDR_ERRORS_GIANT_FRAME)) {
bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
sw_ring_prod);
goto next_rx;
}
hdr_len = 0;
if (status & L2_FHDR_STATUS_SPLIT) {
hdr_len = rx_hdr->l2_fhdr_ip_xsum;
@ -2931,6 +2921,24 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
pg_ring_used = 1;
}
if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
L2_FHDR_ERRORS_PHY_DECODE |
L2_FHDR_ERRORS_ALIGNMENT |
L2_FHDR_ERRORS_TOO_SHORT |
L2_FHDR_ERRORS_GIANT_FRAME))) {
bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
sw_ring_prod);
if (pg_ring_used) {
int pages;
pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
}
goto next_rx;
}
len -= 4;
if (len <= bp->rx_copy_thresh) {

View File

@ -1,6 +1,6 @@
/* bnx2.h: Broadcom NX2 network driver.
*
* Copyright (c) 2004-2007 Broadcom Corporation
* Copyright (c) 2004-2009 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -585,7 +585,7 @@ static int mcs_speed_change(struct mcs_cb *mcs)
mcs_get_reg(mcs, MCS_RESV_REG, &rval);
} while(cnt++ < 100 && (rval & MCS_IRINTX));
if(cnt >= 100) {
if (cnt > 100) {
IRDA_ERROR("unable to change speed\n");
ret = -EIO;
goto error;

View File

@ -201,9 +201,9 @@ static int nx_set_dma_mask(struct netxen_adapter *adapter, uint8_t revision_id)
adapter->pci_using_dac = 1;
return 0;
}
set_32_bit_mask:
#endif /* CONFIG_IA64 */
set_32_bit_mask:
err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
if (!err)
err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
@ -372,67 +372,6 @@ static void netxen_set_port_mode(struct netxen_adapter *adapter)
}
}
#define PCI_CAP_ID_GEN 0x10
static void netxen_pcie_strap_init(struct netxen_adapter *adapter)
{
u32 pdevfuncsave;
u32 c8c9value = 0;
u32 chicken = 0;
u32 control = 0;
int i, pos;
struct pci_dev *pdev;
pdev = adapter->pdev;
adapter->hw_read_wx(adapter,
NETXEN_PCIE_REG(PCIE_CHICKEN3), &chicken, 4);
/* clear chicken3.25:24 */
chicken &= 0xFCFFFFFF;
/*
* if gen1 and B0, set F1020 - if gen 2, do nothing
* if gen2 set to F1000
*/
pos = pci_find_capability(pdev, PCI_CAP_ID_GEN);
if (pos == 0xC0) {
pci_read_config_dword(pdev, pos + 0x10, &control);
if ((control & 0x000F0000) != 0x00020000) {
/* set chicken3.24 if gen1 */
chicken |= 0x01000000;
}
printk(KERN_INFO "%s Gen2 strapping detected\n",
netxen_nic_driver_name);
c8c9value = 0xF1000;
} else {
/* set chicken3.24 if gen1 */
chicken |= 0x01000000;
printk(KERN_INFO "%s Gen1 strapping detected\n",
netxen_nic_driver_name);
if (adapter->ahw.revision_id == NX_P3_B0)
c8c9value = 0xF1020;
else
c8c9value = 0;
}
adapter->hw_write_wx(adapter,
NETXEN_PCIE_REG(PCIE_CHICKEN3), &chicken, 4);
if (!c8c9value)
return;
pdevfuncsave = pdev->devfn;
if (pdevfuncsave & 0x07)
return;
for (i = 0; i < 8; i++) {
pci_read_config_dword(pdev, pos + 8, &control);
pci_read_config_dword(pdev, pos + 8, &control);
pci_write_config_dword(pdev, pos + 8, c8c9value);
pdev->devfn++;
}
pdev->devfn = pdevfuncsave;
}
static void netxen_set_msix_bit(struct pci_dev *pdev, int enable)
{
u32 control;
@ -812,9 +751,6 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
}
netxen_load_firmware(adapter);
if (NX_IS_REVISION_P3(revision_id))
netxen_pcie_strap_init(adapter);
if (NX_IS_REVISION_P2(revision_id)) {
/* Initialize multicast addr pool owners */

View File

@ -125,6 +125,8 @@ static int __devinit mdio_gpio_bus_init(struct device *dev,
if (gpio_request(bitbang->mdio, "mdio"))
goto out_free_mdc;
gpio_direction_output(bitbang->mdc, 0);
dev_set_drvdata(dev, new_bus);
ret = mdiobus_register(new_bus);

View File

@ -898,6 +898,7 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
lbq_desc->index);
lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
if (lbq_desc->p.lbq_page == NULL) {
rx_ring->lbq_clean_idx = clean_idx;
QPRINTK(qdev, RX_STATUS, ERR,
"Couldn't get a page.\n");
return;
@ -907,6 +908,9 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
0, PAGE_SIZE,
PCI_DMA_FROMDEVICE);
if (pci_dma_mapping_error(qdev->pdev, map)) {
rx_ring->lbq_clean_idx = clean_idx;
put_page(lbq_desc->p.lbq_page);
lbq_desc->p.lbq_page = NULL;
QPRINTK(qdev, RX_STATUS, ERR,
"PCI mapping failed.\n");
return;
@ -968,6 +972,8 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
if (pci_dma_mapping_error(qdev->pdev, map)) {
QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
rx_ring->sbq_clean_idx = clean_idx;
dev_kfree_skb_any(sbq_desc->p.skb);
sbq_desc->p.skb = NULL;
return;
}
pci_unmap_addr_set(sbq_desc, mapaddr, map);
@ -1449,12 +1455,12 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
if (qdev->vlgrp && (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V)) {
QPRINTK(qdev, RX_STATUS, DEBUG,
"Passing a VLAN packet upstream.\n");
vlan_hwaccel_rx(skb, qdev->vlgrp,
vlan_hwaccel_receive_skb(skb, qdev->vlgrp,
le16_to_cpu(ib_mac_rsp->vlan_id));
} else {
QPRINTK(qdev, RX_STATUS, DEBUG,
"Passing a normal packet upstream.\n");
netif_rx(skb);
netif_receive_skb(skb);
}
}
@ -1511,6 +1517,11 @@ void ql_queue_asic_error(struct ql_adapter *qdev)
netif_stop_queue(qdev->ndev);
netif_carrier_off(qdev->ndev);
ql_disable_interrupts(qdev);
/* Clear adapter up bit to signal the recovery
* process that it shouldn't kill the reset worker
* thread
*/
clear_bit(QL_ADAPTER_UP, &qdev->flags);
queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
}
@ -1927,10 +1938,6 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
mac_iocb_ptr = tx_ring_desc->queue_entry;
memset((void *)mac_iocb_ptr, 0, sizeof(mac_iocb_ptr));
if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) != NETDEV_TX_OK) {
QPRINTK(qdev, TX_QUEUED, ERR, "Could not map the segments.\n");
return NETDEV_TX_BUSY;
}
mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
mac_iocb_ptr->tid = tx_ring_desc->index;
@ -1956,6 +1963,12 @@ static int qlge_send(struct sk_buff *skb, struct net_device *ndev)
ql_hw_csum_setup(skb,
(struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
}
if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
NETDEV_TX_OK) {
QPRINTK(qdev, TX_QUEUED, ERR,
"Could not map the segments.\n");
return NETDEV_TX_BUSY;
}
QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
tx_ring->prod_idx++;
if (tx_ring->prod_idx == tx_ring->wq_len)
@ -2873,8 +2886,8 @@ static int ql_start_rss(struct ql_adapter *qdev)
/*
* Fill out the Indirection Table.
*/
for (i = 0; i < 32; i++)
hash_id[i] = i & 1;
for (i = 0; i < 256; i++)
hash_id[i] = i & (qdev->rss_ring_count - 1);
/*
* Random values for the IPv6 and IPv4 Hash Keys.
@ -3100,7 +3113,11 @@ static int ql_adapter_down(struct ql_adapter *qdev)
netif_stop_queue(ndev);
netif_carrier_off(ndev);
cancel_delayed_work_sync(&qdev->asic_reset_work);
/* Don't kill the reset worker thread if we
* are in the process of recovery.
*/
if (test_bit(QL_ADAPTER_UP, &qdev->flags))
cancel_delayed_work_sync(&qdev->asic_reset_work);
cancel_delayed_work_sync(&qdev->mpi_reset_work);
cancel_delayed_work_sync(&qdev->mpi_work);
@ -3501,7 +3518,7 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
static void qlge_tx_timeout(struct net_device *ndev)
{
struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev);
queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
ql_queue_asic_error(qdev);
}
static void ql_asic_reset_work(struct work_struct *work)

View File

@ -428,7 +428,7 @@ static int lance_open( struct net_device *dev )
while (--i > 0)
if (DREG & CSR0_IDON)
break;
if (i < 0 || (DREG & CSR0_ERR)) {
if (i <= 0 || (DREG & CSR0_ERR)) {
DPRINTK( 2, ( "lance_open(): opening %s failed, i=%d, csr0=%04x\n",
dev->name, i, DREG ));
DREG = CSR0_STOP;

View File

@ -2543,25 +2543,36 @@ static struct quattro * __devinit quattro_sbus_find(struct of_device *child)
}
/* After all quattro cards have been probed, we call these functions
* to register the IRQ handlers.
* to register the IRQ handlers for the cards that have been
* successfully probed and skip the cards that failed to initialize
*/
static void __init quattro_sbus_register_irqs(void)
static int __init quattro_sbus_register_irqs(void)
{
struct quattro *qp;
for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) {
struct of_device *op = qp->quattro_dev;
int err;
int err, qfe_slot, skip = 0;
for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) {
if (!qp->happy_meals[qfe_slot])
skip = 1;
}
if (skip)
continue;
err = request_irq(op->irqs[0],
quattro_sbus_interrupt,
IRQF_SHARED, "Quattro",
qp);
if (err != 0) {
printk(KERN_ERR "Quattro: Fatal IRQ registery error %d.\n", err);
panic("QFE request irq");
printk(KERN_ERR "Quattro HME: IRQ registration "
"error %d.\n", err);
return err;
}
}
return 0;
}
static void quattro_sbus_free_irqs(void)
@ -2570,6 +2581,14 @@ static void quattro_sbus_free_irqs(void)
for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) {
struct of_device *op = qp->quattro_dev;
int qfe_slot, skip = 0;
for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) {
if (!qp->happy_meals[qfe_slot])
skip = 1;
}
if (skip)
continue;
free_irq(op->irqs[0], qp);
}
@ -2828,6 +2847,9 @@ err_out_iounmap:
if (hp->tcvregs)
of_iounmap(&op->resource[4], hp->tcvregs, TCVR_REG_SIZE);
if (qp)
qp->happy_meals[qfe_slot] = NULL;
err_out_free_netdev:
free_netdev(dev);
@ -3285,7 +3307,7 @@ static int __init happy_meal_sbus_init(void)
err = of_register_driver(&hme_sbus_driver, &of_bus_type);
if (!err)
quattro_sbus_register_irqs();
err = quattro_sbus_register_irqs();
return err;
}

View File

@ -852,7 +852,7 @@ static int tg3_bmcr_reset(struct tg3 *tp)
}
udelay(10);
}
if (limit <= 0)
if (limit < 0)
return -EBUSY;
return 0;
@ -1603,7 +1603,7 @@ static int tg3_wait_macro_done(struct tg3 *tp)
break;
}
}
if (limit <= 0)
if (limit < 0)
return -EBUSY;
return 0;

View File

@ -1098,6 +1098,42 @@ ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix)
* Buffers setup *
\***************/
static
struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_softc *sc, dma_addr_t *skb_addr)
{
struct sk_buff *skb;
unsigned int off;
/*
* Allocate buffer with headroom_needed space for the
* fake physical layer header at the start.
*/
skb = dev_alloc_skb(sc->rxbufsize + sc->cachelsz - 1);
if (!skb) {
ATH5K_ERR(sc, "can't alloc skbuff of size %u\n",
sc->rxbufsize + sc->cachelsz - 1);
return NULL;
}
/*
* Cache-line-align. This is important (for the
* 5210 at least) as not doing so causes bogus data
* in rx'd frames.
*/
off = ((unsigned long)skb->data) % sc->cachelsz;
if (off != 0)
skb_reserve(skb, sc->cachelsz - off);
*skb_addr = pci_map_single(sc->pdev,
skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE);
if (unlikely(pci_dma_mapping_error(sc->pdev, *skb_addr))) {
ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__);
dev_kfree_skb(skb);
return NULL;
}
return skb;
}
static int
ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
{
@ -1105,37 +1141,11 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
struct sk_buff *skb = bf->skb;
struct ath5k_desc *ds;
if (likely(skb == NULL)) {
unsigned int off;
/*
* Allocate buffer with headroom_needed space for the
* fake physical layer header at the start.
*/
skb = dev_alloc_skb(sc->rxbufsize + sc->cachelsz - 1);
if (unlikely(skb == NULL)) {
ATH5K_ERR(sc, "can't alloc skbuff of size %u\n",
sc->rxbufsize + sc->cachelsz - 1);
if (!skb) {
skb = ath5k_rx_skb_alloc(sc, &bf->skbaddr);
if (!skb)
return -ENOMEM;
}
/*
* Cache-line-align. This is important (for the
* 5210 at least) as not doing so causes bogus data
* in rx'd frames.
*/
off = ((unsigned long)skb->data) % sc->cachelsz;
if (off != 0)
skb_reserve(skb, sc->cachelsz - off);
bf->skb = skb;
bf->skbaddr = pci_map_single(sc->pdev,
skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE);
if (unlikely(pci_dma_mapping_error(sc->pdev, bf->skbaddr))) {
ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__);
dev_kfree_skb(skb);
bf->skb = NULL;
return -ENOMEM;
}
}
/*
@ -1664,7 +1674,8 @@ ath5k_tasklet_rx(unsigned long data)
{
struct ieee80211_rx_status rxs = {};
struct ath5k_rx_status rs = {};
struct sk_buff *skb;
struct sk_buff *skb, *next_skb;
dma_addr_t next_skb_addr;
struct ath5k_softc *sc = (void *)data;
struct ath5k_buf *bf, *bf_last;
struct ath5k_desc *ds;
@ -1749,10 +1760,17 @@ ath5k_tasklet_rx(unsigned long data)
goto next;
}
accept:
next_skb = ath5k_rx_skb_alloc(sc, &next_skb_addr);
/*
* If we can't replace bf->skb with a new skb under memory
* pressure, just skip this packet
*/
if (!next_skb)
goto next;
pci_unmap_single(sc->pdev, bf->skbaddr, sc->rxbufsize,
PCI_DMA_FROMDEVICE);
bf->skb = NULL;
skb_put(skb, rs.rs_datalen);
/* The MAC header is padded to have 32-bit boundary if the
@ -1825,6 +1843,9 @@ accept:
ath5k_check_ibss_tsf(sc, skb, &rxs);
__ieee80211_rx(sc->hw, skb, &rxs);
bf->skb = next_skb;
bf->skbaddr = next_skb_addr;
next:
list_move_tail(&bf->list, &sc->rxbuf);
} while (ath5k_rxbuf_setup(sc, bf) == 0);

View File

@ -4042,7 +4042,19 @@ static int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
priv->is_open = 1;
}
pci_save_state(pdev);
/* pci driver assumes state will be saved in this function.
* pci state is saved and device disabled when interface is
* stopped, so at this time pci device will always be disabled -
* whether interface was started or not. saving pci state now will
* cause saved state be that of a disabled device, which will cause
* problems during resume in that we will end up with a disabled device.
*
* indicate that the current saved state (from when interface was
* stopped) is valid. if interface was never up at time of suspend
* then the saved state will still be valid as it was saved during
* .probe. */
pdev->state_saved = true;
pci_set_power_state(pdev, PCI_D3hot);
return 0;
@ -4053,7 +4065,6 @@ static int iwl_pci_resume(struct pci_dev *pdev)
struct iwl_priv *priv = pci_get_drvdata(pdev);
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
if (priv->is_open)
iwl_mac_start(priv->hw);

View File

@ -8143,7 +8143,19 @@ static int iwl3945_pci_suspend(struct pci_dev *pdev, pm_message_t state)
priv->is_open = 1;
}
pci_save_state(pdev);
/* pci driver assumes state will be saved in this function.
* pci state is saved and device disabled when interface is
* stopped, so at this time pci device will always be disabled -
* whether interface was started or not. saving pci state now will
* cause saved state be that of a disabled device, which will cause
* problems during resume in that we will end up with a disabled device.
*
* indicate that the current saved state (from when interface was
* stopped) is valid. if interface was never up at time of suspend
* then the saved state will still be valid as it was saved during
* .probe. */
pdev->state_saved = true;
pci_set_power_state(pdev, PCI_D3hot);
return 0;
@ -8154,7 +8166,6 @@ static int iwl3945_pci_resume(struct pci_dev *pdev)
struct iwl3945_priv *priv = pci_get_drvdata(pdev);
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
if (priv->is_open)
iwl3945_mac_start(priv->hw);

View File

@ -86,6 +86,7 @@ int zd_rf_init_hw(struct zd_rf *rf, u8 type)
case AL7230B_RF:
r = zd_rf_init_al7230b(rf);
break;
case MAXIM_NEW_RF:
case UW2453_RF:
r = zd_rf_init_uw2453(rf);
break;

View File

@ -37,6 +37,7 @@
static struct usb_device_id usb_ids[] = {
/* ZD1211 */
{ USB_DEVICE(0x0ace, 0x1211), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x0ace, 0xa211), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x07b8, 0x6001), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x126f, 0xa006), .driver_info = DEVICE_ZD1211 },
{ USB_DEVICE(0x6891, 0xa727), .driver_info = DEVICE_ZD1211 },

View File

@ -61,6 +61,8 @@
/* global iommu list, set NULL for ignored DMAR units */
static struct intel_iommu **g_iommus;
static int rwbf_quirk;
/*
* 0: Present
* 1-11: Reserved
@ -785,7 +787,7 @@ static void iommu_flush_write_buffer(struct intel_iommu *iommu)
u32 val;
unsigned long flag;
if (!cap_rwbf(iommu->cap))
if (!rwbf_quirk && !cap_rwbf(iommu->cap))
return;
val = iommu->gcmd | DMA_GCMD_WBF;
@ -3137,3 +3139,15 @@ static struct iommu_ops intel_iommu_ops = {
.unmap = intel_iommu_unmap_range,
.iova_to_phys = intel_iommu_iova_to_phys,
};
static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
{
/*
* Mobile 4 Series Chipset neglects to set RWBF capability,
* but needs it:
*/
printk(KERN_INFO "DMAR: Forcing write-buffer flush capability\n");
rwbf_quirk = 1;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x2a40, quirk_iommu_rwbf);

View File

@ -103,14 +103,12 @@ static void msix_set_enable(struct pci_dev *dev, int enable)
}
}
/*
* Essentially, this is ((1 << (1 << x)) - 1), but without the
* undefinedness of a << 32.
*/
static inline __attribute_const__ u32 msi_mask(unsigned x)
{
static const u32 mask[] = { 1, 2, 4, 0xf, 0xff, 0xffff, 0xffffffff };
return mask[x];
/* Don't shift by >= width of type */
if (x >= 5)
return 0xffffffff;
return (1 << (1 << x)) - 1;
}
static void msix_flush_writes(struct irq_desc *desc)

View File

@ -1540,16 +1540,21 @@ void pci_release_region(struct pci_dev *pdev, int bar)
}
/**
* pci_request_region - Reserved PCI I/O and memory resource
* __pci_request_region - Reserved PCI I/O and memory resource
* @pdev: PCI device whose resources are to be reserved
* @bar: BAR to be reserved
* @res_name: Name to be associated with resource.
* @exclusive: whether the region access is exclusive or not
*
* Mark the PCI region associated with PCI device @pdev BR @bar as
* being reserved by owner @res_name. Do not access any
* address inside the PCI regions unless this call returns
* successfully.
*
* If @exclusive is set, then the region is marked so that userspace
* is explicitly not allowed to map the resource via /dev/mem or
* sysfs MMIO access.
*
* Returns 0 on success, or %EBUSY on error. A warning
* message is also printed on failure.
*/
@ -1588,12 +1593,12 @@ err_out:
}
/**
* pci_request_region - Reserved PCI I/O and memory resource
* pci_request_region - Reserve PCI I/O and memory resource
* @pdev: PCI device whose resources are to be reserved
* @bar: BAR to be reserved
* @res_name: Name to be associated with resource.
* @res_name: Name to be associated with resource
*
* Mark the PCI region associated with PCI device @pdev BR @bar as
* Mark the PCI region associated with PCI device @pdev BAR @bar as
* being reserved by owner @res_name. Do not access any
* address inside the PCI regions unless this call returns
* successfully.

View File

@ -16,21 +16,21 @@ extern int pci_mmap_fits(struct pci_dev *pdev, int resno,
#endif
/**
* Firmware PM callbacks
* struct pci_platform_pm_ops - Firmware PM callbacks
*
* @is_manageable - returns 'true' if given device is power manageable by the
* platform firmware
* @is_manageable: returns 'true' if given device is power manageable by the
* platform firmware
*
* @set_state - invokes the platform firmware to set the device's power state
* @set_state: invokes the platform firmware to set the device's power state
*
* @choose_state - returns PCI power state of given device preferred by the
* platform; to be used during system-wide transitions from a
* sleeping state to the working state and vice versa
* @choose_state: returns PCI power state of given device preferred by the
* platform; to be used during system-wide transitions from a
* sleeping state to the working state and vice versa
*
* @can_wakeup - returns 'true' if given device is capable of waking up the
* system from a sleeping state
* @can_wakeup: returns 'true' if given device is capable of waking up the
* system from a sleeping state
*
* @sleep_wake - enables/disables the system wake up capability of given device
* @sleep_wake: enables/disables the system wake up capability of given device
*
* If given platform is generally capable of power managing PCI devices, all of
* these callbacks are mandatory.

View File

@ -55,6 +55,7 @@ void pci_disable_rom(struct pci_dev *pdev)
/**
* pci_get_rom_size - obtain the actual size of the ROM image
* @pdev: target PCI device
* @rom: kernel virtual pointer to image of ROM
* @size: size of PCI window
* return: size of actual ROM image

View File

@ -297,19 +297,6 @@ int usb_hcd_pci_suspend(struct pci_dev *dev, pm_message_t message)
}
EXPORT_SYMBOL_GPL(usb_hcd_pci_suspend);
/**
* usb_hcd_pci_resume_early - resume a PCI-based HCD before IRQs are enabled
* @dev: USB Host Controller being resumed
*
* Store this function in the HCD's struct pci_driver as .resume_early.
*/
int usb_hcd_pci_resume_early(struct pci_dev *dev)
{
pci_restore_state(dev);
return 0;
}
EXPORT_SYMBOL_GPL(usb_hcd_pci_resume_early);
/**
* usb_hcd_pci_resume - power management resume of a PCI-based HCD
* @dev: USB Host Controller being resumed
@ -333,6 +320,8 @@ int usb_hcd_pci_resume(struct pci_dev *dev)
}
#endif
pci_restore_state(dev);
hcd = pci_get_drvdata(dev);
if (hcd->state != HC_STATE_SUSPENDED) {
dev_dbg(hcd->self.controller,

View File

@ -257,7 +257,6 @@ extern void usb_hcd_pci_remove(struct pci_dev *dev);
#ifdef CONFIG_PM
extern int usb_hcd_pci_suspend(struct pci_dev *dev, pm_message_t msg);
extern int usb_hcd_pci_resume_early(struct pci_dev *dev);
extern int usb_hcd_pci_resume(struct pci_dev *dev);
#endif /* CONFIG_PM */

View File

@ -432,7 +432,6 @@ static struct pci_driver ehci_pci_driver = {
#ifdef CONFIG_PM
.suspend = usb_hcd_pci_suspend,
.resume_early = usb_hcd_pci_resume_early,
.resume = usb_hcd_pci_resume,
#endif
.shutdown = usb_hcd_pci_shutdown,

View File

@ -487,7 +487,6 @@ static struct pci_driver ohci_pci_driver = {
#ifdef CONFIG_PM
.suspend = usb_hcd_pci_suspend,
.resume_early = usb_hcd_pci_resume_early,
.resume = usb_hcd_pci_resume,
#endif

View File

@ -942,7 +942,6 @@ static struct pci_driver uhci_pci_driver = {
#ifdef CONFIG_PM
.suspend = usb_hcd_pci_suspend,
.resume_early = usb_hcd_pci_resume_early,
.resume = usb_hcd_pci_resume,
#endif /* PM */
};

View File

@ -227,13 +227,13 @@ void scan_async_work(struct work_struct *work)
* Now that the ASL is updated, complete the removal of any
* removed qsets.
*/
spin_lock(&whc->lock);
spin_lock_irq(&whc->lock);
list_for_each_entry_safe(qset, t, &whc->async_removed_list, list_node) {
qset_remove_complete(whc, qset);
}
spin_unlock(&whc->lock);
spin_unlock_irq(&whc->lock);
}
/**

View File

@ -255,13 +255,13 @@ void scan_periodic_work(struct work_struct *work)
* Now that the PZL is updated, complete the removal of any
* removed qsets.
*/
spin_lock(&whc->lock);
spin_lock_irq(&whc->lock);
list_for_each_entry_safe(qset, t, &whc->periodic_removed_list, list_node) {
qset_remove_complete(whc, qset);
}
spin_unlock(&whc->lock);
spin_unlock_irq(&whc->lock);
}
/**

View File

@ -406,7 +406,7 @@ config ITCO_WDT
---help---
Hardware driver for the intel TCO timer based watchdog devices.
These drivers are included in the Intel 82801 I/O Controller
Hub family (from ICH0 up to ICH8) and in the Intel 6300ESB
Hub family (from ICH0 up to ICH10) and in the Intel 63xxESB
controller hub.
The TCO (Total Cost of Ownership) timer is a watchdog timer

View File

@ -1,7 +1,7 @@
/*
* intel TCO vendor specific watchdog driver support
*
* (c) Copyright 2006-2008 Wim Van Sebroeck <wim@iguana.be>.
* (c) Copyright 2006-2009 Wim Van Sebroeck <wim@iguana.be>.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@ -19,7 +19,7 @@
/* Module and version information */
#define DRV_NAME "iTCO_vendor_support"
#define DRV_VERSION "1.02"
#define DRV_VERSION "1.03"
#define PFX DRV_NAME ": "
/* Includes */
@ -77,6 +77,26 @@ MODULE_PARM_DESC(vendorsupport, "iTCO vendor specific support mode, default=0 (n
* 20.6 seconds.
*/
static void supermicro_old_pre_start(unsigned long acpibase)
{
unsigned long val32;
/* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */
val32 = inl(SMI_EN);
val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */
outl(val32, SMI_EN); /* Needed to activate watchdog */
}
static void supermicro_old_pre_stop(unsigned long acpibase)
{
unsigned long val32;
/* Bit 13: TCO_EN -> 1 = Enables the TCO logic to generate SMI# */
val32 = inl(SMI_EN);
val32 |= 0x00002000; /* Turn on SMI clearing watchdog */
outl(val32, SMI_EN); /* Needed to deactivate watchdog */
}
static void supermicro_old_pre_keepalive(unsigned long acpibase)
{
/* Reload TCO Timer (done in iTCO_wdt_keepalive) + */
@ -228,14 +248,18 @@ static void supermicro_new_pre_set_heartbeat(unsigned int heartbeat)
void iTCO_vendor_pre_start(unsigned long acpibase,
unsigned int heartbeat)
{
if (vendorsupport == SUPERMICRO_NEW_BOARD)
if (vendorsupport == SUPERMICRO_OLD_BOARD)
supermicro_old_pre_start(acpibase);
else if (vendorsupport == SUPERMICRO_NEW_BOARD)
supermicro_new_pre_start(heartbeat);
}
EXPORT_SYMBOL(iTCO_vendor_pre_start);
void iTCO_vendor_pre_stop(unsigned long acpibase)
{
if (vendorsupport == SUPERMICRO_NEW_BOARD)
if (vendorsupport == SUPERMICRO_OLD_BOARD)
supermicro_old_pre_stop(acpibase);
else if (vendorsupport == SUPERMICRO_NEW_BOARD)
supermicro_new_pre_stop();
}
EXPORT_SYMBOL(iTCO_vendor_pre_stop);

View File

@ -1,7 +1,7 @@
/*
* intel TCO Watchdog Driver (Used in i82801 and i6300ESB chipsets)
* intel TCO Watchdog Driver (Used in i82801 and i63xxESB chipsets)
*
* (c) Copyright 2006-2008 Wim Van Sebroeck <wim@iguana.be>.
* (c) Copyright 2006-2009 Wim Van Sebroeck <wim@iguana.be>.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@ -63,7 +63,7 @@
/* Module and version information */
#define DRV_NAME "iTCO_wdt"
#define DRV_VERSION "1.04"
#define DRV_VERSION "1.05"
#define PFX DRV_NAME ": "
/* Includes */
@ -236,16 +236,16 @@ MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
/* Address definitions for the TCO */
/* TCO base address */
#define TCOBASE iTCO_wdt_private.ACPIBASE + 0x60
#define TCOBASE iTCO_wdt_private.ACPIBASE + 0x60
/* SMI Control and Enable Register */
#define SMI_EN iTCO_wdt_private.ACPIBASE + 0x30
#define SMI_EN iTCO_wdt_private.ACPIBASE + 0x30
#define TCO_RLD TCOBASE + 0x00 /* TCO Timer Reload and Curr. Value */
#define TCOv1_TMR TCOBASE + 0x01 /* TCOv1 Timer Initial Value */
#define TCO_DAT_IN TCOBASE + 0x02 /* TCO Data In Register */
#define TCO_DAT_OUT TCOBASE + 0x03 /* TCO Data Out Register */
#define TCO1_STS TCOBASE + 0x04 /* TCO1 Status Register */
#define TCO2_STS TCOBASE + 0x06 /* TCO2 Status Register */
#define TCO_DAT_IN TCOBASE + 0x02 /* TCO Data In Register */
#define TCO_DAT_OUT TCOBASE + 0x03 /* TCO Data Out Register */
#define TCO1_STS TCOBASE + 0x04 /* TCO1 Status Register */
#define TCO2_STS TCOBASE + 0x06 /* TCO2 Status Register */
#define TCO1_CNT TCOBASE + 0x08 /* TCO1 Control Register */
#define TCO2_CNT TCOBASE + 0x0a /* TCO2 Control Register */
#define TCOv2_TMR TCOBASE + 0x12 /* TCOv2 Timer Initial Value */
@ -338,7 +338,6 @@ static int iTCO_wdt_unset_NO_REBOOT_bit(void)
static int iTCO_wdt_start(void)
{
unsigned int val;
unsigned long val32;
spin_lock(&iTCO_wdt_private.io_lock);
@ -351,11 +350,6 @@ static int iTCO_wdt_start(void)
return -EIO;
}
/* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */
val32 = inl(SMI_EN);
val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */
outl(val32, SMI_EN);
/* Force the timer to its reload value by writing to the TCO_RLD
register */
if (iTCO_wdt_private.iTCO_version == 2)
@ -378,7 +372,6 @@ static int iTCO_wdt_start(void)
static int iTCO_wdt_stop(void)
{
unsigned int val;
unsigned long val32;
spin_lock(&iTCO_wdt_private.io_lock);
@ -390,11 +383,6 @@ static int iTCO_wdt_stop(void)
outw(val, TCO1_CNT);
val = inw(TCO1_CNT);
/* Bit 13: TCO_EN -> 1 = Enables the TCO logic to generate SMI# */
val32 = inl(SMI_EN);
val32 |= 0x00002000;
outl(val32, SMI_EN);
/* Set the NO_REBOOT bit to prevent later reboots, just for sure */
iTCO_wdt_set_NO_REBOOT_bit();
@ -649,6 +637,7 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
int ret;
u32 base_address;
unsigned long RCBA;
unsigned long val32;
/*
* Find the ACPI/PM base I/O address which is the base
@ -695,6 +684,10 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
ret = -EIO;
goto out;
}
/* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */
val32 = inl(SMI_EN);
val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */
outl(val32, SMI_EN);
/* The TCO I/O registers reside in a 32-byte range pointed to
by the TCOBASE value */

View File

@ -38,19 +38,12 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_path *path, int level, int slot);
inline void btrfs_init_path(struct btrfs_path *p)
{
memset(p, 0, sizeof(*p));
}
struct btrfs_path *btrfs_alloc_path(void)
{
struct btrfs_path *path;
path = kmem_cache_alloc(btrfs_path_cachep, GFP_NOFS);
if (path) {
btrfs_init_path(path);
path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
if (path)
path->reada = 1;
}
return path;
}
@ -69,14 +62,38 @@ noinline void btrfs_set_path_blocking(struct btrfs_path *p)
/*
* reset all the locked nodes in the patch to spinning locks.
*
* held is used to keep lockdep happy, when lockdep is enabled
* we set held to a blocking lock before we go around and
* retake all the spinlocks in the path. You can safely use NULL
* for held
*/
noinline void btrfs_clear_path_blocking(struct btrfs_path *p)
noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
struct extent_buffer *held)
{
int i;
for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
/* lockdep really cares that we take all of these spinlocks
* in the right order. If any of the locks in the path are not
* currently blocking, it is going to complain. So, make really
* really sure by forcing the path to blocking before we clear
* the path blocking.
*/
if (held)
btrfs_set_lock_blocking(held);
btrfs_set_path_blocking(p);
#endif
for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
if (p->nodes[i] && p->locks[i])
btrfs_clear_lock_blocking(p->nodes[i]);
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
if (held)
btrfs_clear_lock_blocking(held);
#endif
}
/* this also releases the path */
@ -286,7 +303,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
trans->transid, level, &ins);
BUG_ON(ret);
cow = btrfs_init_new_buffer(trans, root, prealloc_dest,
buf->len);
buf->len, level);
} else {
cow = btrfs_alloc_free_block(trans, root, buf->len,
parent_start,
@ -917,9 +934,9 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
/* promote the child to a root */
child = read_node_slot(root, mid, 0);
BUG_ON(!child);
btrfs_tree_lock(child);
btrfs_set_lock_blocking(child);
BUG_ON(!child);
ret = btrfs_cow_block(trans, root, child, mid, 0, &child, 0);
BUG_ON(ret);
@ -1566,7 +1583,7 @@ cow_done:
if (!p->skip_locking)
p->locks[level] = 1;
btrfs_clear_path_blocking(p);
btrfs_clear_path_blocking(p, NULL);
/*
* we have a lock on b and as long as we aren't changing
@ -1605,7 +1622,7 @@ cow_done:
btrfs_set_path_blocking(p);
sret = split_node(trans, root, p, level);
btrfs_clear_path_blocking(p);
btrfs_clear_path_blocking(p, NULL);
BUG_ON(sret > 0);
if (sret) {
@ -1625,7 +1642,7 @@ cow_done:
btrfs_set_path_blocking(p);
sret = balance_level(trans, root, p, level);
btrfs_clear_path_blocking(p);
btrfs_clear_path_blocking(p, NULL);
if (sret) {
ret = sret;
@ -1688,13 +1705,13 @@ cow_done:
if (!p->skip_locking) {
int lret;
btrfs_clear_path_blocking(p);
btrfs_clear_path_blocking(p, NULL);
lret = btrfs_try_spin_lock(b);
if (!lret) {
btrfs_set_path_blocking(p);
btrfs_tree_lock(b);
btrfs_clear_path_blocking(p);
btrfs_clear_path_blocking(p, b);
}
}
} else {
@ -1706,7 +1723,7 @@ cow_done:
btrfs_set_path_blocking(p);
sret = split_leaf(trans, root, key,
p, ins_len, ret == 0);
btrfs_clear_path_blocking(p);
btrfs_clear_path_blocking(p, NULL);
BUG_ON(sret > 0);
if (sret) {
@ -3926,7 +3943,6 @@ find_next_key:
btrfs_release_path(root, path);
goto again;
} else {
btrfs_clear_path_blocking(path);
goto out;
}
}
@ -3946,7 +3962,7 @@ find_next_key:
path->locks[level - 1] = 1;
path->nodes[level - 1] = cur;
unlock_up(path, level, 1);
btrfs_clear_path_blocking(path);
btrfs_clear_path_blocking(path, NULL);
}
out:
if (ret == 0)

View File

@ -43,11 +43,7 @@ struct btrfs_ordered_sum;
#define BTRFS_ACL_NOT_CACHED ((void *)-1)
#ifdef CONFIG_LOCKDEP
# define BTRFS_MAX_LEVEL 7
#else
# define BTRFS_MAX_LEVEL 8
#endif
#define BTRFS_MAX_LEVEL 8
/* holds pointers to all of the tree roots */
#define BTRFS_ROOT_TREE_OBJECTID 1ULL
@ -1715,7 +1711,8 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
u64 empty_size);
struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u32 blocksize);
u64 bytenr, u32 blocksize,
int level);
int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 num_bytes, u64 parent, u64 min_bytes,
@ -1834,9 +1831,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p);
struct btrfs_path *btrfs_alloc_path(void);
void btrfs_free_path(struct btrfs_path *p);
void btrfs_init_path(struct btrfs_path *p);
void btrfs_set_path_blocking(struct btrfs_path *p);
void btrfs_clear_path_blocking(struct btrfs_path *p);
void btrfs_unlock_up_safe(struct btrfs_path *p, int level);
int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,

View File

@ -75,6 +75,40 @@ struct async_submit_bio {
struct btrfs_work work;
};
/* These are used to set the lockdep class on the extent buffer locks.
* The class is set by the readpage_end_io_hook after the buffer has
* passed csum validation but before the pages are unlocked.
*
* The lockdep class is also set by btrfs_init_new_buffer on freshly
* allocated blocks.
*
* The class is based on the level in the tree block, which allows lockdep
* to know that lower nodes nest inside the locks of higher nodes.
*
* We also add a check to make sure the highest level of the tree is
* the same as our lockdep setup here. If BTRFS_MAX_LEVEL changes, this
* code needs update as well.
*/
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# if BTRFS_MAX_LEVEL != 8
# error
# endif
static struct lock_class_key btrfs_eb_class[BTRFS_MAX_LEVEL + 1];
static const char *btrfs_eb_name[BTRFS_MAX_LEVEL + 1] = {
/* leaf */
"btrfs-extent-00",
"btrfs-extent-01",
"btrfs-extent-02",
"btrfs-extent-03",
"btrfs-extent-04",
"btrfs-extent-05",
"btrfs-extent-06",
"btrfs-extent-07",
/* highest possible level */
"btrfs-extent-08",
};
#endif
/*
* extents on the btree inode are pretty simple, there's one extent
* that covers the entire device
@ -347,6 +381,15 @@ static int check_tree_block_fsid(struct btrfs_root *root,
return ret;
}
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb, int level)
{
lockdep_set_class_and_name(&eb->lock,
&btrfs_eb_class[level],
btrfs_eb_name[level]);
}
#endif
static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
struct extent_state *state)
{
@ -392,6 +435,8 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end,
}
found_level = btrfs_header_level(eb);
btrfs_set_buffer_lockdep_class(eb, found_level);
ret = csum_tree_block(root, eb, 1);
if (ret)
ret = -EIO;
@ -1777,7 +1822,6 @@ struct btrfs_root *open_ctree(struct super_block *sb,
ret = find_and_setup_root(tree_root, fs_info,
BTRFS_DEV_TREE_OBJECTID, dev_root);
dev_root->track_dirty = 1;
if (ret)
goto fail_extent_root;

View File

@ -101,4 +101,14 @@ int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans,
int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
int btree_lock_page_hook(struct page *page);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb, int level);
#else
static inline void btrfs_set_buffer_lockdep_class(struct extent_buffer *eb,
int level)
{
}
#endif
#endif

View File

@ -1323,8 +1323,25 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
int btrfs_extent_post_op(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
finish_current_insert(trans, root->fs_info->extent_root, 1);
del_pending_extents(trans, root->fs_info->extent_root, 1);
u64 start;
u64 end;
int ret;
while(1) {
finish_current_insert(trans, root->fs_info->extent_root, 1);
del_pending_extents(trans, root->fs_info->extent_root, 1);
/* is there more work to do? */
ret = find_first_extent_bit(&root->fs_info->pending_del,
0, &start, &end, EXTENT_WRITEBACK);
if (!ret)
continue;
ret = find_first_extent_bit(&root->fs_info->extent_ins,
0, &start, &end, EXTENT_WRITEBACK);
if (!ret)
continue;
break;
}
return 0;
}
@ -2211,13 +2228,12 @@ static int finish_current_insert(struct btrfs_trans_handle *trans,
u64 end;
u64 priv;
u64 search = 0;
u64 skipped = 0;
struct btrfs_fs_info *info = extent_root->fs_info;
struct btrfs_path *path;
struct pending_extent_op *extent_op, *tmp;
struct list_head insert_list, update_list;
int ret;
int num_inserts = 0, max_inserts;
int num_inserts = 0, max_inserts, restart = 0;
path = btrfs_alloc_path();
INIT_LIST_HEAD(&insert_list);
@ -2233,19 +2249,19 @@ again:
ret = find_first_extent_bit(&info->extent_ins, search, &start,
&end, EXTENT_WRITEBACK);
if (ret) {
if (skipped && all && !num_inserts &&
if (restart && !num_inserts &&
list_empty(&update_list)) {
skipped = 0;
restart = 0;
search = 0;
continue;
}
mutex_unlock(&info->extent_ins_mutex);
break;
}
ret = try_lock_extent(&info->extent_ins, start, end, GFP_NOFS);
if (!ret) {
skipped = 1;
if (all)
restart = 1;
search = end + 1;
if (need_resched()) {
mutex_unlock(&info->extent_ins_mutex);
@ -2264,7 +2280,7 @@ again:
list_add_tail(&extent_op->list, &insert_list);
search = end + 1;
if (num_inserts == max_inserts) {
mutex_unlock(&info->extent_ins_mutex);
restart = 1;
break;
}
} else if (extent_op->type == PENDING_BACKREF_UPDATE) {
@ -2280,7 +2296,6 @@ again:
* somebody marked this thing for deletion then just unlock it and be
* done, the free_extents will handle it
*/
mutex_lock(&info->extent_ins_mutex);
list_for_each_entry_safe(extent_op, tmp, &update_list, list) {
clear_extent_bits(&info->extent_ins, extent_op->bytenr,
extent_op->bytenr + extent_op->num_bytes - 1,
@ -2302,6 +2317,10 @@ again:
if (!list_empty(&update_list)) {
ret = update_backrefs(trans, extent_root, path, &update_list);
BUG_ON(ret);
/* we may have COW'ed new blocks, so lets start over */
if (all)
restart = 1;
}
/*
@ -2309,9 +2328,9 @@ again:
* need to make sure everything is cleaned then reset everything and
* go back to the beginning
*/
if (!num_inserts && all && skipped) {
if (!num_inserts && restart) {
search = 0;
skipped = 0;
restart = 0;
INIT_LIST_HEAD(&update_list);
INIT_LIST_HEAD(&insert_list);
goto again;
@ -2368,27 +2387,19 @@ again:
BUG_ON(ret);
/*
* if we broke out of the loop in order to insert stuff because we hit
* the maximum number of inserts at a time we can handle, then loop
* back and pick up where we left off
* if restart is set for whatever reason we need to go back and start
* searching through the pending list again.
*
* We just inserted some extents, which could have resulted in new
* blocks being allocated, which would result in new blocks needing
* updates, so if all is set we _must_ restart to get the updated
* blocks.
*/
if (num_inserts == max_inserts) {
INIT_LIST_HEAD(&insert_list);
INIT_LIST_HEAD(&update_list);
num_inserts = 0;
goto again;
}
/*
* again, if we need to make absolutely sure there are no more pending
* extent operations left and we know that we skipped some, go back to
* the beginning and do it all again
*/
if (all && skipped) {
if (restart || all) {
INIT_LIST_HEAD(&insert_list);
INIT_LIST_HEAD(&update_list);
search = 0;
skipped = 0;
restart = 0;
num_inserts = 0;
goto again;
}
@ -2709,6 +2720,8 @@ again:
goto again;
}
if (!err)
finish_current_insert(trans, extent_root, 0);
return err;
}
@ -2859,7 +2872,8 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
if (data & BTRFS_BLOCK_GROUP_METADATA) {
last_ptr = &root->fs_info->last_alloc;
empty_cluster = 64 * 1024;
if (!btrfs_test_opt(root, SSD))
empty_cluster = 64 * 1024;
}
if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD))
@ -3402,7 +3416,8 @@ int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u32 blocksize)
u64 bytenr, u32 blocksize,
int level)
{
struct extent_buffer *buf;
@ -3410,6 +3425,7 @@ struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
if (!buf)
return ERR_PTR(-ENOMEM);
btrfs_set_header_generation(buf, trans->transid);
btrfs_set_buffer_lockdep_class(buf, level);
btrfs_tree_lock(buf);
clean_tree_block(trans, root, buf);
@ -3453,7 +3469,8 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
return ERR_PTR(ret);
}
buf = btrfs_init_new_buffer(trans, root, ins.objectid, blocksize);
buf = btrfs_init_new_buffer(trans, root, ins.objectid,
blocksize, level);
return buf;
}
@ -5641,7 +5658,9 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root,
prev_block = block_start;
}
mutex_lock(&extent_root->fs_info->trans_mutex);
btrfs_record_root_in_trans(found_root);
mutex_unlock(&extent_root->fs_info->trans_mutex);
if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
/*
* try to update data extent references while

Some files were not shown because too many files have changed in this diff Show More