Merge branch 'gic-fasteoi' of git://linux-arm.org/linux-2.6-wd into devel-stable

This commit is contained in:
Russell King 2011-05-11 18:45:21 +01:00
commit 254c44ea82
142 changed files with 1951 additions and 1585 deletions

View File

@ -66,10 +66,10 @@ trick is to ensure that any needed memory allocations are done before
entering atomic context, using:
int flex_array_prealloc(struct flex_array *array, unsigned int start,
unsigned int end, gfp_t flags);
unsigned int nr_elements, gfp_t flags);
This function will ensure that memory for the elements indexed in the range
defined by start and end has been allocated. Thereafter, a
defined by start and nr_elements has been allocated. Thereafter, a
flex_array_put() call on an element in that range is guaranteed not to
block.

View File

@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 39
EXTRAVERSION = -rc5
EXTRAVERSION = -rc7
NAME = Flesh-Eating Bats with Fangs
# *DOCUMENTATION*

View File

@ -49,7 +49,7 @@ struct gic_chip_data {
* Default make them NULL.
*/
struct irq_chip gic_arch_extn = {
.irq_ack = NULL,
.irq_eoi = NULL,
.irq_mask = NULL,
.irq_unmask = NULL,
.irq_retrigger = NULL,
@ -84,21 +84,12 @@ static inline unsigned int gic_irq(struct irq_data *d)
/*
* Routines to acknowledge, disable and enable interrupts
*/
static void gic_ack_irq(struct irq_data *d)
{
spin_lock(&irq_controller_lock);
if (gic_arch_extn.irq_ack)
gic_arch_extn.irq_ack(d);
writel(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
spin_unlock(&irq_controller_lock);
}
static void gic_mask_irq(struct irq_data *d)
{
u32 mask = 1 << (d->irq % 32);
spin_lock(&irq_controller_lock);
writel(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4);
if (gic_arch_extn.irq_mask)
gic_arch_extn.irq_mask(d);
spin_unlock(&irq_controller_lock);
@ -111,10 +102,21 @@ static void gic_unmask_irq(struct irq_data *d)
spin_lock(&irq_controller_lock);
if (gic_arch_extn.irq_unmask)
gic_arch_extn.irq_unmask(d);
writel(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4);
spin_unlock(&irq_controller_lock);
}
static void gic_eoi_irq(struct irq_data *d)
{
if (gic_arch_extn.irq_eoi) {
spin_lock(&irq_controller_lock);
gic_arch_extn.irq_eoi(d);
spin_unlock(&irq_controller_lock);
}
writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
}
static int gic_set_type(struct irq_data *d, unsigned int type)
{
void __iomem *base = gic_dist_base(d);
@ -138,7 +140,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
if (gic_arch_extn.irq_set_type)
gic_arch_extn.irq_set_type(d, type);
val = readl(base + GIC_DIST_CONFIG + confoff);
val = readl_relaxed(base + GIC_DIST_CONFIG + confoff);
if (type == IRQ_TYPE_LEVEL_HIGH)
val &= ~confmask;
else if (type == IRQ_TYPE_EDGE_RISING)
@ -148,15 +150,15 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
* As recommended by the spec, disable the interrupt before changing
* the configuration
*/
if (readl(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
writel(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
if (readl_relaxed(base + GIC_DIST_ENABLE_SET + enableoff) & enablemask) {
writel_relaxed(enablemask, base + GIC_DIST_ENABLE_CLEAR + enableoff);
enabled = true;
}
writel(val, base + GIC_DIST_CONFIG + confoff);
writel_relaxed(val, base + GIC_DIST_CONFIG + confoff);
if (enabled)
writel(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff);
spin_unlock(&irq_controller_lock);
@ -188,8 +190,8 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
spin_lock(&irq_controller_lock);
d->node = cpu;
val = readl(reg) & ~mask;
writel(val | bit, reg);
val = readl_relaxed(reg) & ~mask;
writel_relaxed(val | bit, reg);
spin_unlock(&irq_controller_lock);
return 0;
@ -218,11 +220,10 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
unsigned int cascade_irq, gic_irq;
unsigned long status;
/* primary controller ack'ing */
chip->irq_ack(&desc->irq_data);
chained_irq_enter(chip, desc);
spin_lock(&irq_controller_lock);
status = readl(chip_data->cpu_base + GIC_CPU_INTACK);
status = readl_relaxed(chip_data->cpu_base + GIC_CPU_INTACK);
spin_unlock(&irq_controller_lock);
gic_irq = (status & 0x3ff);
@ -236,15 +237,14 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
generic_handle_irq(cascade_irq);
out:
/* primary controller unmasking */
chip->irq_unmask(&desc->irq_data);
chained_irq_exit(chip, desc);
}
static struct irq_chip gic_chip = {
.name = "GIC",
.irq_ack = gic_ack_irq,
.irq_mask = gic_mask_irq,
.irq_unmask = gic_unmask_irq,
.irq_eoi = gic_eoi_irq,
.irq_set_type = gic_set_type,
.irq_retrigger = gic_retrigger,
#ifdef CONFIG_SMP
@ -272,13 +272,13 @@ static void __init gic_dist_init(struct gic_chip_data *gic,
cpumask |= cpumask << 8;
cpumask |= cpumask << 16;
writel(0, base + GIC_DIST_CTRL);
writel_relaxed(0, base + GIC_DIST_CTRL);
/*
* Find out how many interrupts are supported.
* The GIC only supports up to 1020 interrupt sources.
*/
gic_irqs = readl(base + GIC_DIST_CTR) & 0x1f;
gic_irqs = readl_relaxed(base + GIC_DIST_CTR) & 0x1f;
gic_irqs = (gic_irqs + 1) * 32;
if (gic_irqs > 1020)
gic_irqs = 1020;
@ -287,26 +287,26 @@ static void __init gic_dist_init(struct gic_chip_data *gic,
* Set all global interrupts to be level triggered, active low.
*/
for (i = 32; i < gic_irqs; i += 16)
writel(0, base + GIC_DIST_CONFIG + i * 4 / 16);
writel_relaxed(0, base + GIC_DIST_CONFIG + i * 4 / 16);
/*
* Set all global interrupts to this CPU only.
*/
for (i = 32; i < gic_irqs; i += 4)
writel(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4);
/*
* Set priority on all global interrupts.
*/
for (i = 32; i < gic_irqs; i += 4)
writel(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
writel_relaxed(0xa0a0a0a0, base + GIC_DIST_PRI + i * 4 / 4);
/*
* Disable all interrupts. Leave the PPI and SGIs alone
* as these enables are banked registers.
*/
for (i = 32; i < gic_irqs; i += 32)
writel(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
writel_relaxed(0xffffffff, base + GIC_DIST_ENABLE_CLEAR + i * 4 / 32);
/*
* Limit number of interrupts registered to the platform maximum
@ -319,12 +319,12 @@ static void __init gic_dist_init(struct gic_chip_data *gic,
* Setup the Linux IRQ subsystem.
*/
for (i = irq_start; i < irq_limit; i++) {
irq_set_chip_and_handler(i, &gic_chip, handle_level_irq);
irq_set_chip_and_handler(i, &gic_chip, handle_fasteoi_irq);
irq_set_chip_data(i, gic);
set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
}
writel(1, base + GIC_DIST_CTRL);
writel_relaxed(1, base + GIC_DIST_CTRL);
}
static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
@ -337,17 +337,17 @@ static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
* Deal with the banked PPI and SGI interrupts - disable all
* PPI interrupts, ensure all SGI interrupts are enabled.
*/
writel(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
writel(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
writel_relaxed(0xffff0000, dist_base + GIC_DIST_ENABLE_CLEAR);
writel_relaxed(0x0000ffff, dist_base + GIC_DIST_ENABLE_SET);
/*
* Set priority on PPI and SGI interrupts
*/
for (i = 0; i < 32; i += 4)
writel(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
writel_relaxed(0xa0a0a0a0, dist_base + GIC_DIST_PRI + i * 4 / 4);
writel(0xf0, base + GIC_CPU_PRIMASK);
writel(1, base + GIC_CPU_CTRL);
writel_relaxed(0xf0, base + GIC_CPU_PRIMASK);
writel_relaxed(1, base + GIC_CPU_CTRL);
}
void __init gic_init(unsigned int gic_nr, unsigned int irq_start,
@ -391,7 +391,13 @@ void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
{
unsigned long map = *cpus_addr(*mask);
/*
* Ensure that stores to Normal memory are visible to the
* other CPUs before issuing the IPI.
*/
dsb();
/* this always happens on GIC0 */
writel(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT);
writel_relaxed(map << 16 | irq, gic_data[0].dist_base + GIC_DIST_SOFTINT);
}
#endif

View File

@ -0,0 +1,48 @@
CONFIG_EXPERIMENTAL=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_EMBEDDED=y
# CONFIG_HOTPLUG is not set
# CONFIG_ELF_CORE is not set
# CONFIG_FUTEX is not set
# CONFIG_TIMERFD is not set
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_SLAB=y
# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
# CONFIG_MMU is not set
CONFIG_ARCH_AT91=y
CONFIG_ARCH_AT91X40=y
CONFIG_MACH_AT91EB01=y
CONFIG_AT91_EARLY_USART0=y
CONFIG_CPU_ARM7TDMI=y
CONFIG_SET_MEM_PARAM=y
CONFIG_DRAM_BASE=0x01000000
CONFIG_DRAM_SIZE=0x00400000
CONFIG_FLASH_MEM_BASE=0x01400000
CONFIG_PROCESSOR_ID=0x14000040
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_BINFMT_FLAT=y
# CONFIG_SUSPEND is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_RAM=y
CONFIG_MTD_ROM=y
CONFIG_BLK_DEV_RAM=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
# CONFIG_DEVKMEM is not set
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_EXT2_FS=y
# CONFIG_DNOTIFY is not set
CONFIG_ROMFS_FS=y
# CONFIG_ENABLE_MUST_CHECK is not set

View File

@ -767,12 +767,20 @@ long arch_ptrace(struct task_struct *child, long request,
#ifdef CONFIG_HAVE_HW_BREAKPOINT
case PTRACE_GETHBPREGS:
if (ptrace_get_breakpoints(child) < 0)
return -ESRCH;
ret = ptrace_gethbpregs(child, addr,
(unsigned long __user *)data);
ptrace_put_breakpoints(child);
break;
case PTRACE_SETHBPREGS:
if (ptrace_get_breakpoints(child) < 0)
return -ESRCH;
ret = ptrace_sethbpregs(child, addr,
(unsigned long __user *)data);
ptrace_put_breakpoints(child);
break;
#endif

View File

@ -83,6 +83,7 @@ config ARCH_AT91CAP9
select CPU_ARM926T
select GENERIC_CLOCKEVENTS
select HAVE_FB_ATMEL
select HAVE_NET_MACB
config ARCH_AT572D940HF
bool "AT572D940HF"

View File

@ -30,6 +30,11 @@
#include <mach/board.h>
#include "generic.h"
static void __init at91eb01_init_irq(void)
{
at91x40_init_interrupts(NULL);
}
static void __init at91eb01_map_io(void)
{
at91x40_initialize(40000000);
@ -38,7 +43,7 @@ static void __init at91eb01_map_io(void)
MACHINE_START(AT91EB01, "Atmel AT91 EB01")
/* Maintainer: Greg Ungerer <gerg@snapgear.com> */
.timer = &at91x40_timer,
.init_irq = at91x40_init_interrupts,
.init_irq = at91eb01_init_irq,
.map_io = at91eb01_map_io,
MACHINE_END

View File

@ -27,6 +27,7 @@
#define ARCH_ID_AT91SAM9G45 0x819b05a0
#define ARCH_ID_AT91SAM9G45MRL 0x819b05a2 /* aka 9G45-ES2 & non ES lots */
#define ARCH_ID_AT91SAM9G45ES 0x819b05a1 /* 9G45-ES (Engineering Sample) */
#define ARCH_ID_AT91SAM9X5 0x819a05a0
#define ARCH_ID_AT91CAP9 0x039A03A0
#define ARCH_ID_AT91SAM9XE128 0x329973a0
@ -55,6 +56,12 @@ static inline unsigned long at91_cpu_fully_identify(void)
#define ARCH_EXID_AT91SAM9G46 0x00000003
#define ARCH_EXID_AT91SAM9G45 0x00000004
#define ARCH_EXID_AT91SAM9G15 0x00000000
#define ARCH_EXID_AT91SAM9G35 0x00000001
#define ARCH_EXID_AT91SAM9X35 0x00000002
#define ARCH_EXID_AT91SAM9G25 0x00000003
#define ARCH_EXID_AT91SAM9X25 0x00000004
static inline unsigned long at91_exid_identify(void)
{
return at91_sys_read(AT91_DBGU_EXID);
@ -143,6 +150,27 @@ static inline unsigned long at91cap9_rev_identify(void)
#define cpu_is_at91sam9m11() (0)
#endif
#ifdef CONFIG_ARCH_AT91SAM9X5
#define cpu_is_at91sam9x5() (at91_cpu_identify() == ARCH_ID_AT91SAM9X5)
#define cpu_is_at91sam9g15() (cpu_is_at91sam9x5() && \
(at91_exid_identify() == ARCH_EXID_AT91SAM9G15))
#define cpu_is_at91sam9g35() (cpu_is_at91sam9x5() && \
(at91_exid_identify() == ARCH_EXID_AT91SAM9G35))
#define cpu_is_at91sam9x35() (cpu_is_at91sam9x5() && \
(at91_exid_identify() == ARCH_EXID_AT91SAM9X35))
#define cpu_is_at91sam9g25() (cpu_is_at91sam9x5() && \
(at91_exid_identify() == ARCH_EXID_AT91SAM9G25))
#define cpu_is_at91sam9x25() (cpu_is_at91sam9x5() && \
(at91_exid_identify() == ARCH_EXID_AT91SAM9X25))
#else
#define cpu_is_at91sam9x5() (0)
#define cpu_is_at91sam9g15() (0)
#define cpu_is_at91sam9g35() (0)
#define cpu_is_at91sam9x35() (0)
#define cpu_is_at91sam9g25() (0)
#define cpu_is_at91sam9x25() (0)
#endif
#ifdef CONFIG_ARCH_AT91CAP9
#define cpu_is_at91cap9() (at91_cpu_identify() == ARCH_ID_AT91CAP9)
#define cpu_is_at91cap9_revB() (at91cap9_rev_identify() == ARCH_REVISION_CAP9_B)

View File

@ -59,8 +59,7 @@ static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
unsigned int cascade_irq, combiner_irq;
unsigned long status;
/* primary controller ack'ing */
chip->irq_ack(&desc->irq_data);
chained_irq_enter(chip, desc);
spin_lock(&irq_controller_lock);
status = __raw_readl(chip_data->base + COMBINER_INT_STATUS);
@ -79,8 +78,7 @@ static void combiner_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
generic_handle_irq(cascade_irq);
out:
/* primary controller unmasking */
chip->irq_unmask(&desc->irq_data);
chained_irq_exit(chip, desc);
}
static struct irq_chip combiner_chip = {

View File

@ -27,6 +27,9 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <asm/mach/irq.h>
#include <mach/msm_iomap.h>
#include "gpiomux.h"
@ -309,8 +312,10 @@ static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type)
*/
static void msm_summary_irq_handler(unsigned int irq, struct irq_desc *desc)
{
struct irq_data *data = irq_desc_get_irq_data(desc);
unsigned long i;
struct irq_chip *chip = irq_desc_get_chip(desc);
chained_irq_enter(chip, desc);
for (i = find_first_bit(msm_gpio.enabled_irqs, NR_GPIO_IRQS);
i < NR_GPIO_IRQS;
@ -319,7 +324,8 @@ static void msm_summary_irq_handler(unsigned int irq, struct irq_desc *desc)
generic_handle_irq(msm_gpio_to_irq(&msm_gpio.gpio_chip,
i));
}
data->chip->irq_ack(data);
chained_irq_exit(chip, desc);
}
static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on)

View File

@ -1,7 +1,7 @@
obj-y += common.o
obj-y += devices.o
obj-y += io.o
obj-y += irq.o legacy_irq.o
obj-y += irq.o
obj-y += clock.o
obj-y += timer.o
obj-y += gpio.o

View File

@ -24,6 +24,8 @@
#include <linux/io.h>
#include <linux/gpio.h>
#include <asm/mach/irq.h>
#include <mach/iomap.h>
#include <mach/suspend.h>
@ -221,8 +223,9 @@ static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
int port;
int pin;
int unmasked = 0;
struct irq_chip *chip = irq_desc_get_chip(desc);
desc->irq_data.chip->irq_ack(&desc->irq_data);
chained_irq_enter(chip, desc);
bank = irq_get_handler_data(irq);
@ -241,7 +244,7 @@ static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
*/
if (lvl & (0x100 << pin)) {
unmasked = 1;
desc->irq_data.chip->irq_unmask(&desc->irq_data);
chained_irq_exit(chip, desc);
}
generic_handle_irq(gpio_to_irq(gpio + pin));
@ -249,7 +252,7 @@ static void tegra_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
}
if (!unmasked)
desc->irq_data.chip->irq_unmask(&desc->irq_data);
chained_irq_exit(chip, desc);
}

View File

@ -1,35 +0,0 @@
/*
* arch/arm/mach-tegra/include/mach/legacy_irq.h
*
* Copyright (C) 2010 Google, Inc.
* Author: Colin Cross <ccross@android.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _ARCH_ARM_MACH_TEGRA_LEGARY_IRQ_H
#define _ARCH_ARM_MACH_TEGRA_LEGARY_IRQ_H
void tegra_legacy_mask_irq(unsigned int irq);
void tegra_legacy_unmask_irq(unsigned int irq);
void tegra_legacy_select_fiq(unsigned int irq, bool fiq);
void tegra_legacy_force_irq_set(unsigned int irq);
void tegra_legacy_force_irq_clr(unsigned int irq);
int tegra_legacy_force_irq_status(unsigned int irq);
void tegra_legacy_select_fiq(unsigned int irq, bool fiq);
unsigned long tegra_legacy_vfiq(int nr);
unsigned long tegra_legacy_class(int nr);
int tegra_legacy_irq_set_wake(int irq, int enable);
void tegra_legacy_irq_set_lp1_wake_mask(void);
void tegra_legacy_irq_restore_mask(void);
void tegra_init_legacy_irq(void);
#endif

View File

@ -1,8 +1,8 @@
/*
* Copyright (C) 2010 Google, Inc.
* Copyright (C) 2011 Google, Inc.
*
* Author:
* Colin Cross <ccross@google.com>
* Colin Cross <ccross@android.com>
*
* Copyright (C) 2010, NVIDIA Corporation
*
@ -18,8 +18,6 @@
*/
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/io.h>
@ -27,134 +25,110 @@
#include <asm/hardware/gic.h>
#include <mach/iomap.h>
#include <mach/legacy_irq.h>
#include <mach/suspend.h>
#include "board.h"
#define PMC_CTRL 0x0
#define PMC_CTRL_LATCH_WAKEUPS (1 << 5)
#define PMC_WAKE_MASK 0xc
#define PMC_WAKE_LEVEL 0x10
#define PMC_WAKE_STATUS 0x14
#define PMC_SW_WAKE_STATUS 0x18
#define PMC_DPD_SAMPLE 0x20
#define INT_SYS_NR (INT_GPIO_BASE - INT_PRI_BASE)
#define INT_SYS_SZ (INT_SEC_BASE - INT_PRI_BASE)
#define PPI_NR ((INT_SYS_NR+INT_SYS_SZ-1)/INT_SYS_SZ)
static void __iomem *pmc = IO_ADDRESS(TEGRA_PMC_BASE);
#define ICTLR_CPU_IEP_VFIQ 0x08
#define ICTLR_CPU_IEP_FIR 0x14
#define ICTLR_CPU_IEP_FIR_SET 0x18
#define ICTLR_CPU_IEP_FIR_CLR 0x1c
static u32 tegra_lp0_wake_enb;
static u32 tegra_lp0_wake_level;
static u32 tegra_lp0_wake_level_any;
#define ICTLR_CPU_IER 0x20
#define ICTLR_CPU_IER_SET 0x24
#define ICTLR_CPU_IER_CLR 0x28
#define ICTLR_CPU_IEP_CLASS 0x2C
static void (*tegra_gic_mask_irq)(struct irq_data *d);
static void (*tegra_gic_unmask_irq)(struct irq_data *d);
static void (*tegra_gic_ack_irq)(struct irq_data *d);
#define ICTLR_COP_IER 0x30
#define ICTLR_COP_IER_SET 0x34
#define ICTLR_COP_IER_CLR 0x38
#define ICTLR_COP_IEP_CLASS 0x3c
/* ensures that sufficient time is passed for a register write to
* serialize into the 32KHz domain */
static void pmc_32kwritel(u32 val, unsigned long offs)
#define NUM_ICTLRS 4
#define FIRST_LEGACY_IRQ 32
static void __iomem *ictlr_reg_base[] = {
IO_ADDRESS(TEGRA_PRIMARY_ICTLR_BASE),
IO_ADDRESS(TEGRA_SECONDARY_ICTLR_BASE),
IO_ADDRESS(TEGRA_TERTIARY_ICTLR_BASE),
IO_ADDRESS(TEGRA_QUATERNARY_ICTLR_BASE),
};
static inline void tegra_irq_write_mask(unsigned int irq, unsigned long reg)
{
writel(val, pmc + offs);
udelay(130);
}
void __iomem *base;
u32 mask;
int tegra_set_lp1_wake(int irq, int enable)
{
return tegra_legacy_irq_set_wake(irq, enable);
}
BUG_ON(irq < FIRST_LEGACY_IRQ ||
irq >= FIRST_LEGACY_IRQ + NUM_ICTLRS * 32);
void tegra_set_lp0_wake_pads(u32 wake_enb, u32 wake_level, u32 wake_any)
{
u32 temp;
u32 status;
u32 lvl;
base = ictlr_reg_base[(irq - FIRST_LEGACY_IRQ) / 32];
mask = BIT((irq - FIRST_LEGACY_IRQ) % 32);
wake_level &= wake_enb;
wake_any &= wake_enb;
wake_level |= (tegra_lp0_wake_level & tegra_lp0_wake_enb);
wake_any |= (tegra_lp0_wake_level_any & tegra_lp0_wake_enb);
wake_enb |= tegra_lp0_wake_enb;
pmc_32kwritel(0, PMC_SW_WAKE_STATUS);
temp = readl(pmc + PMC_CTRL);
temp |= PMC_CTRL_LATCH_WAKEUPS;
pmc_32kwritel(temp, PMC_CTRL);
temp &= ~PMC_CTRL_LATCH_WAKEUPS;
pmc_32kwritel(temp, PMC_CTRL);
status = readl(pmc + PMC_SW_WAKE_STATUS);
lvl = readl(pmc + PMC_WAKE_LEVEL);
/* flip the wakeup trigger for any-edge triggered pads
* which are currently asserting as wakeups */
lvl ^= status;
lvl &= wake_any;
wake_level |= lvl;
writel(wake_level, pmc + PMC_WAKE_LEVEL);
/* Enable DPD sample to trigger sampling pads data and direction
* in which pad will be driven during lp0 mode*/
writel(0x1, pmc + PMC_DPD_SAMPLE);
writel(wake_enb, pmc + PMC_WAKE_MASK);
__raw_writel(mask, base + reg);
}
static void tegra_mask(struct irq_data *d)
{
tegra_gic_mask_irq(d);
tegra_legacy_mask_irq(d->irq);
if (d->irq < FIRST_LEGACY_IRQ)
return;
tegra_irq_write_mask(d->irq, ICTLR_CPU_IER_CLR);
}
static void tegra_unmask(struct irq_data *d)
{
tegra_gic_unmask_irq(d);
tegra_legacy_unmask_irq(d->irq);
if (d->irq < FIRST_LEGACY_IRQ)
return;
tegra_irq_write_mask(d->irq, ICTLR_CPU_IER_SET);
}
static void tegra_ack(struct irq_data *d)
{
tegra_legacy_force_irq_clr(d->irq);
tegra_gic_ack_irq(d);
if (d->irq < FIRST_LEGACY_IRQ)
return;
tegra_irq_write_mask(d->irq, ICTLR_CPU_IEP_FIR_CLR);
}
static void tegra_eoi(struct irq_data *d)
{
if (d->irq < FIRST_LEGACY_IRQ)
return;
tegra_irq_write_mask(d->irq, ICTLR_CPU_IEP_FIR_CLR);
}
static int tegra_retrigger(struct irq_data *d)
{
tegra_legacy_force_irq_set(d->irq);
if (d->irq < FIRST_LEGACY_IRQ)
return 0;
tegra_irq_write_mask(d->irq, ICTLR_CPU_IEP_FIR_SET);
return 1;
}
static struct irq_chip tegra_irq = {
.name = "PPI",
.irq_ack = tegra_ack,
.irq_mask = tegra_mask,
.irq_unmask = tegra_unmask,
.irq_retrigger = tegra_retrigger,
};
void __init tegra_init_irq(void)
{
struct irq_chip *gic;
unsigned int i;
int irq;
int i;
tegra_init_legacy_irq();
for (i = 0; i < NUM_ICTLRS; i++) {
void __iomem *ictlr = ictlr_reg_base[i];
writel(~0, ictlr + ICTLR_CPU_IER_CLR);
writel(0, ictlr + ICTLR_CPU_IEP_CLASS);
}
gic_arch_extn.irq_ack = tegra_ack;
gic_arch_extn.irq_eoi = tegra_eoi;
gic_arch_extn.irq_mask = tegra_mask;
gic_arch_extn.irq_unmask = tegra_unmask;
gic_arch_extn.irq_retrigger = tegra_retrigger;
gic_init(0, 29, IO_ADDRESS(TEGRA_ARM_INT_DIST_BASE),
IO_ADDRESS(TEGRA_ARM_PERIF_BASE + 0x100));
gic = irq_get_chip(29);
tegra_gic_unmask_irq = gic->irq_unmask;
tegra_gic_mask_irq = gic->irq_mask;
tegra_gic_ack_irq = gic->irq_ack;
#ifdef CONFIG_SMP
tegra_irq.irq_set_affinity = gic->irq_set_affinity;
#endif
for (i = 0; i < INT_MAIN_NR; i++) {
irq = INT_PRI_BASE + i;
irq_set_chip_and_handler(irq, &tegra_irq, handle_level_irq);
set_irq_flags(irq, IRQF_VALID);
}
}

View File

@ -1,215 +0,0 @@
/*
* arch/arm/mach-tegra/legacy_irq.c
*
* Copyright (C) 2010 Google, Inc.
* Author: Colin Cross <ccross@android.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/io.h>
#include <linux/kernel.h>
#include <mach/iomap.h>
#include <mach/irqs.h>
#include <mach/legacy_irq.h>
#define INT_SYS_NR (INT_GPIO_BASE - INT_PRI_BASE)
#define INT_SYS_SZ (INT_SEC_BASE - INT_PRI_BASE)
#define PPI_NR ((INT_SYS_NR+INT_SYS_SZ-1)/INT_SYS_SZ)
#define ICTLR_CPU_IEP_VFIQ 0x08
#define ICTLR_CPU_IEP_FIR 0x14
#define ICTLR_CPU_IEP_FIR_SET 0x18
#define ICTLR_CPU_IEP_FIR_CLR 0x1c
#define ICTLR_CPU_IER 0x20
#define ICTLR_CPU_IER_SET 0x24
#define ICTLR_CPU_IER_CLR 0x28
#define ICTLR_CPU_IEP_CLASS 0x2C
#define ICTLR_COP_IER 0x30
#define ICTLR_COP_IER_SET 0x34
#define ICTLR_COP_IER_CLR 0x38
#define ICTLR_COP_IEP_CLASS 0x3c
#define NUM_ICTLRS 4
static void __iomem *ictlr_reg_base[] = {
IO_ADDRESS(TEGRA_PRIMARY_ICTLR_BASE),
IO_ADDRESS(TEGRA_SECONDARY_ICTLR_BASE),
IO_ADDRESS(TEGRA_TERTIARY_ICTLR_BASE),
IO_ADDRESS(TEGRA_QUATERNARY_ICTLR_BASE),
};
static u32 tegra_legacy_wake_mask[4];
static u32 tegra_legacy_saved_mask[4];
/* When going into deep sleep, the CPU is powered down, taking the GIC with it
In order to wake, the wake interrupts need to be enabled in the legacy
interrupt controller. */
void tegra_legacy_unmask_irq(unsigned int irq)
{
void __iomem *base;
pr_debug("%s: %d\n", __func__, irq);
irq -= 32;
base = ictlr_reg_base[irq>>5];
writel(1 << (irq & 31), base + ICTLR_CPU_IER_SET);
}
void tegra_legacy_mask_irq(unsigned int irq)
{
void __iomem *base;
pr_debug("%s: %d\n", __func__, irq);
irq -= 32;
base = ictlr_reg_base[irq>>5];
writel(1 << (irq & 31), base + ICTLR_CPU_IER_CLR);
}
void tegra_legacy_force_irq_set(unsigned int irq)
{
void __iomem *base;
pr_debug("%s: %d\n", __func__, irq);
irq -= 32;
base = ictlr_reg_base[irq>>5];
writel(1 << (irq & 31), base + ICTLR_CPU_IEP_FIR_SET);
}
void tegra_legacy_force_irq_clr(unsigned int irq)
{
void __iomem *base;
pr_debug("%s: %d\n", __func__, irq);
irq -= 32;
base = ictlr_reg_base[irq>>5];
writel(1 << (irq & 31), base + ICTLR_CPU_IEP_FIR_CLR);
}
int tegra_legacy_force_irq_status(unsigned int irq)
{
void __iomem *base;
pr_debug("%s: %d\n", __func__, irq);
irq -= 32;
base = ictlr_reg_base[irq>>5];
return !!(readl(base + ICTLR_CPU_IEP_FIR) & (1 << (irq & 31)));
}
void tegra_legacy_select_fiq(unsigned int irq, bool fiq)
{
void __iomem *base;
pr_debug("%s: %d\n", __func__, irq);
irq -= 32;
base = ictlr_reg_base[irq>>5];
writel(fiq << (irq & 31), base + ICTLR_CPU_IEP_CLASS);
}
unsigned long tegra_legacy_vfiq(int nr)
{
void __iomem *base;
base = ictlr_reg_base[nr];
return readl(base + ICTLR_CPU_IEP_VFIQ);
}
unsigned long tegra_legacy_class(int nr)
{
void __iomem *base;
base = ictlr_reg_base[nr];
return readl(base + ICTLR_CPU_IEP_CLASS);
}
int tegra_legacy_irq_set_wake(int irq, int enable)
{
irq -= 32;
if (enable)
tegra_legacy_wake_mask[irq >> 5] |= 1 << (irq & 31);
else
tegra_legacy_wake_mask[irq >> 5] &= ~(1 << (irq & 31));
return 0;
}
void tegra_legacy_irq_set_lp1_wake_mask(void)
{
void __iomem *base;
int i;
for (i = 0; i < NUM_ICTLRS; i++) {
base = ictlr_reg_base[i];
tegra_legacy_saved_mask[i] = readl(base + ICTLR_CPU_IER);
writel(tegra_legacy_wake_mask[i], base + ICTLR_CPU_IER);
}
}
void tegra_legacy_irq_restore_mask(void)
{
void __iomem *base;
int i;
for (i = 0; i < NUM_ICTLRS; i++) {
base = ictlr_reg_base[i];
writel(tegra_legacy_saved_mask[i], base + ICTLR_CPU_IER);
}
}
void tegra_init_legacy_irq(void)
{
int i;
for (i = 0; i < NUM_ICTLRS; i++) {
void __iomem *ictlr = ictlr_reg_base[i];
writel(~0, ictlr + ICTLR_CPU_IER_CLR);
writel(0, ictlr + ICTLR_CPU_IEP_CLASS);
}
}
#ifdef CONFIG_PM
static u32 cop_ier[NUM_ICTLRS];
static u32 cpu_ier[NUM_ICTLRS];
static u32 cpu_iep[NUM_ICTLRS];
void tegra_irq_suspend(void)
{
unsigned long flags;
int i;
local_irq_save(flags);
for (i = 0; i < NUM_ICTLRS; i++) {
void __iomem *ictlr = ictlr_reg_base[i];
cpu_ier[i] = readl(ictlr + ICTLR_CPU_IER);
cpu_iep[i] = readl(ictlr + ICTLR_CPU_IEP_CLASS);
cop_ier[i] = readl(ictlr + ICTLR_COP_IER);
writel(~0, ictlr + ICTLR_COP_IER_CLR);
}
local_irq_restore(flags);
}
void tegra_irq_resume(void)
{
unsigned long flags;
int i;
local_irq_save(flags);
for (i = 0; i < NUM_ICTLRS; i++) {
void __iomem *ictlr = ictlr_reg_base[i];
writel(cpu_iep[i], ictlr + ICTLR_CPU_IEP_CLASS);
writel(~0ul, ictlr + ICTLR_CPU_IER_CLR);
writel(cpu_ier[i], ictlr + ICTLR_CPU_IER_SET);
writel(0, ictlr + ICTLR_COP_IEP_CLASS);
writel(~0ul, ictlr + ICTLR_COP_IER_CLR);
writel(cop_ier[i], ictlr + ICTLR_COP_IER_SET);
}
local_irq_restore(flags);
}
#endif

View File

@ -23,6 +23,8 @@
#include <linux/irq.h>
#include <linux/slab.h>
#include <asm/mach/irq.h>
#include <plat/pincfg.h>
#include <mach/hardware.h>
#include <mach/gpio.h>
@ -681,13 +683,7 @@ static void __nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc,
struct irq_chip *host_chip = irq_get_chip(irq);
unsigned int first_irq;
if (host_chip->irq_mask_ack)
host_chip->irq_mask_ack(&desc->irq_data);
else {
host_chip->irq_mask(&desc->irq_data);
if (host_chip->irq_ack)
host_chip->irq_ack(&desc->irq_data);
}
chained_irq_enter(host_chip, desc);
nmk_chip = irq_get_handler_data(irq);
first_irq = NOMADIK_GPIO_TO_IRQ(nmk_chip->chip.base);
@ -698,7 +694,7 @@ static void __nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc,
status &= ~BIT(bit);
}
host_chip->irq_unmask(&desc->irq_data);
chained_irq_exit(host_chip, desc);
}
static void nmk_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)

View File

@ -1137,8 +1137,9 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
struct gpio_bank *bank;
u32 retrigger = 0;
int unmasked = 0;
struct irq_chip *chip = irq_desc_get_chip(desc);
desc->irq_data.chip->irq_ack(&desc->irq_data);
chained_irq_enter(chip, desc);
bank = irq_get_handler_data(irq);
#ifdef CONFIG_ARCH_OMAP1
@ -1195,7 +1196,7 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
configured, we could unmask GPIO bank interrupt immediately */
if (!level_mask && !unmasked) {
unmasked = 1;
desc->irq_data.chip->irq_unmask(&desc->irq_data);
chained_irq_exit(chip, desc);
}
isr |= retrigger;
@ -1231,7 +1232,7 @@ static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
interrupt */
exit:
if (!unmasked)
desc->irq_data.chip->irq_unmask(&desc->irq_data);
chained_irq_exit(chip, desc);
}
static void gpio_irq_shutdown(struct irq_data *d)

View File

@ -933,12 +933,16 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
if (data && !(data & DABR_TRANSLATION))
return -EIO;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
if (ptrace_get_breakpoints(task) < 0)
return -ESRCH;
bp = thread->ptrace_bps[0];
if ((!data) || !(data & (DABR_DATA_WRITE | DABR_DATA_READ))) {
if (bp) {
unregister_hw_breakpoint(bp);
thread->ptrace_bps[0] = NULL;
}
ptrace_put_breakpoints(task);
return 0;
}
if (bp) {
@ -948,9 +952,12 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
(DABR_DATA_WRITE | DABR_DATA_READ),
&attr.bp_type);
ret = modify_user_hw_breakpoint(bp, &attr);
if (ret)
if (ret) {
ptrace_put_breakpoints(task);
return ret;
}
thread->ptrace_bps[0] = bp;
ptrace_put_breakpoints(task);
thread->dabr = data;
return 0;
}
@ -965,9 +972,12 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
ptrace_triggered, task);
if (IS_ERR(bp)) {
thread->ptrace_bps[0] = NULL;
ptrace_put_breakpoints(task);
return PTR_ERR(bp);
}
ptrace_put_breakpoints(task);
#endif /* CONFIG_HAVE_HW_BREAKPOINT */
/* Move contents to the DABR register */

View File

@ -117,7 +117,11 @@ void user_enable_single_step(struct task_struct *child)
set_tsk_thread_flag(child, TIF_SINGLESTEP);
if (ptrace_get_breakpoints(child) < 0)
return;
set_single_step(child, pc);
ptrace_put_breakpoints(child);
}
void user_disable_single_step(struct task_struct *child)

View File

@ -698,7 +698,7 @@ cpu_dev_register(amd_cpu_dev);
*/
const int amd_erratum_400[] =
AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0xf, 0x41, 0x2, 0xff, 0xf),
AMD_OSVW_ERRATUM(1, AMD_MODEL_RANGE(0x0f, 0x4, 0x2, 0xff, 0xf),
AMD_MODEL_RANGE(0x10, 0x2, 0x1, 0xff, 0xf));
EXPORT_SYMBOL_GPL(amd_erratum_400);

View File

@ -184,26 +184,23 @@ static __initconst const u64 snb_hw_cache_event_ids
},
},
[ C(LL ) ] = {
/*
* TBD: Need Off-core Response Performance Monitoring support
*/
[ C(OP_READ) ] = {
/* OFFCORE_RESPONSE_0.ANY_DATA.LOCAL_CACHE */
/* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
[ C(RESULT_ACCESS) ] = 0x01b7,
/* OFFCORE_RESPONSE_1.ANY_DATA.ANY_LLC_MISS */
[ C(RESULT_MISS) ] = 0x01bb,
/* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
[ C(RESULT_MISS) ] = 0x01b7,
},
[ C(OP_WRITE) ] = {
/* OFFCORE_RESPONSE_0.ANY_RFO.LOCAL_CACHE */
/* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
[ C(RESULT_ACCESS) ] = 0x01b7,
/* OFFCORE_RESPONSE_1.ANY_RFO.ANY_LLC_MISS */
[ C(RESULT_MISS) ] = 0x01bb,
/* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
[ C(RESULT_MISS) ] = 0x01b7,
},
[ C(OP_PREFETCH) ] = {
/* OFFCORE_RESPONSE_0.PREFETCH.LOCAL_CACHE */
/* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
[ C(RESULT_ACCESS) ] = 0x01b7,
/* OFFCORE_RESPONSE_1.PREFETCH.ANY_LLC_MISS */
[ C(RESULT_MISS) ] = 0x01bb,
/* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
[ C(RESULT_MISS) ] = 0x01b7,
},
},
[ C(DTLB) ] = {
@ -285,26 +282,26 @@ static __initconst const u64 westmere_hw_cache_event_ids
},
[ C(LL ) ] = {
[ C(OP_READ) ] = {
/* OFFCORE_RESPONSE_0.ANY_DATA.LOCAL_CACHE */
/* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
[ C(RESULT_ACCESS) ] = 0x01b7,
/* OFFCORE_RESPONSE_1.ANY_DATA.ANY_LLC_MISS */
[ C(RESULT_MISS) ] = 0x01bb,
/* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
[ C(RESULT_MISS) ] = 0x01b7,
},
/*
* Use RFO, not WRITEBACK, because a write miss would typically occur
* on RFO.
*/
[ C(OP_WRITE) ] = {
/* OFFCORE_RESPONSE_1.ANY_RFO.LOCAL_CACHE */
[ C(RESULT_ACCESS) ] = 0x01bb,
/* OFFCORE_RESPONSE_0.ANY_RFO.ANY_LLC_MISS */
/* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
[ C(RESULT_ACCESS) ] = 0x01b7,
/* OFFCORE_RESPONSE.ANY_RFO.ANY_LLC_MISS */
[ C(RESULT_MISS) ] = 0x01b7,
},
[ C(OP_PREFETCH) ] = {
/* OFFCORE_RESPONSE_0.PREFETCH.LOCAL_CACHE */
/* OFFCORE_RESPONSE.PREFETCH.LOCAL_CACHE */
[ C(RESULT_ACCESS) ] = 0x01b7,
/* OFFCORE_RESPONSE_1.PREFETCH.ANY_LLC_MISS */
[ C(RESULT_MISS) ] = 0x01bb,
/* OFFCORE_RESPONSE.PREFETCH.ANY_LLC_MISS */
[ C(RESULT_MISS) ] = 0x01b7,
},
},
[ C(DTLB) ] = {
@ -352,16 +349,36 @@ static __initconst const u64 westmere_hw_cache_event_ids
};
/*
* OFFCORE_RESPONSE MSR bits (subset), See IA32 SDM Vol 3 30.6.1.3
* Nehalem/Westmere MSR_OFFCORE_RESPONSE bits;
* See IA32 SDM Vol 3B 30.6.1.3
*/
#define DMND_DATA_RD (1 << 0)
#define DMND_RFO (1 << 1)
#define DMND_WB (1 << 3)
#define PF_DATA_RD (1 << 4)
#define PF_DATA_RFO (1 << 5)
#define RESP_UNCORE_HIT (1 << 8)
#define RESP_MISS (0xf600) /* non uncore hit */
#define NHM_DMND_DATA_RD (1 << 0)
#define NHM_DMND_RFO (1 << 1)
#define NHM_DMND_IFETCH (1 << 2)
#define NHM_DMND_WB (1 << 3)
#define NHM_PF_DATA_RD (1 << 4)
#define NHM_PF_DATA_RFO (1 << 5)
#define NHM_PF_IFETCH (1 << 6)
#define NHM_OFFCORE_OTHER (1 << 7)
#define NHM_UNCORE_HIT (1 << 8)
#define NHM_OTHER_CORE_HIT_SNP (1 << 9)
#define NHM_OTHER_CORE_HITM (1 << 10)
/* reserved */
#define NHM_REMOTE_CACHE_FWD (1 << 12)
#define NHM_REMOTE_DRAM (1 << 13)
#define NHM_LOCAL_DRAM (1 << 14)
#define NHM_NON_DRAM (1 << 15)
#define NHM_ALL_DRAM (NHM_REMOTE_DRAM|NHM_LOCAL_DRAM)
#define NHM_DMND_READ (NHM_DMND_DATA_RD)
#define NHM_DMND_WRITE (NHM_DMND_RFO|NHM_DMND_WB)
#define NHM_DMND_PREFETCH (NHM_PF_DATA_RD|NHM_PF_DATA_RFO)
#define NHM_L3_HIT (NHM_UNCORE_HIT|NHM_OTHER_CORE_HIT_SNP|NHM_OTHER_CORE_HITM)
#define NHM_L3_MISS (NHM_NON_DRAM|NHM_ALL_DRAM|NHM_REMOTE_CACHE_FWD)
#define NHM_L3_ACCESS (NHM_L3_HIT|NHM_L3_MISS)
static __initconst const u64 nehalem_hw_cache_extra_regs
[PERF_COUNT_HW_CACHE_MAX]
@ -370,16 +387,16 @@ static __initconst const u64 nehalem_hw_cache_extra_regs
{
[ C(LL ) ] = {
[ C(OP_READ) ] = {
[ C(RESULT_ACCESS) ] = DMND_DATA_RD|RESP_UNCORE_HIT,
[ C(RESULT_MISS) ] = DMND_DATA_RD|RESP_MISS,
[ C(RESULT_ACCESS) ] = NHM_DMND_READ|NHM_L3_ACCESS,
[ C(RESULT_MISS) ] = NHM_DMND_READ|NHM_L3_MISS,
},
[ C(OP_WRITE) ] = {
[ C(RESULT_ACCESS) ] = DMND_RFO|DMND_WB|RESP_UNCORE_HIT,
[ C(RESULT_MISS) ] = DMND_RFO|DMND_WB|RESP_MISS,
[ C(RESULT_ACCESS) ] = NHM_DMND_WRITE|NHM_L3_ACCESS,
[ C(RESULT_MISS) ] = NHM_DMND_WRITE|NHM_L3_MISS,
},
[ C(OP_PREFETCH) ] = {
[ C(RESULT_ACCESS) ] = PF_DATA_RD|PF_DATA_RFO|RESP_UNCORE_HIT,
[ C(RESULT_MISS) ] = PF_DATA_RD|PF_DATA_RFO|RESP_MISS,
[ C(RESULT_ACCESS) ] = NHM_DMND_PREFETCH|NHM_L3_ACCESS,
[ C(RESULT_MISS) ] = NHM_DMND_PREFETCH|NHM_L3_MISS,
},
}
};

View File

@ -608,6 +608,9 @@ static int ptrace_write_dr7(struct task_struct *tsk, unsigned long data)
unsigned len, type;
struct perf_event *bp;
if (ptrace_get_breakpoints(tsk) < 0)
return -ESRCH;
data &= ~DR_CONTROL_RESERVED;
old_dr7 = ptrace_get_dr7(thread->ptrace_bps);
restore:
@ -655,6 +658,9 @@ restore:
}
goto restore;
}
ptrace_put_breakpoints(tsk);
return ((orig_ret < 0) ? orig_ret : rc);
}
@ -668,10 +674,17 @@ static unsigned long ptrace_get_debugreg(struct task_struct *tsk, int n)
if (n < HBP_NUM) {
struct perf_event *bp;
if (ptrace_get_breakpoints(tsk) < 0)
return -ESRCH;
bp = thread->ptrace_bps[n];
if (!bp)
return 0;
val = bp->hw.info.address;
val = 0;
else
val = bp->hw.info.address;
ptrace_put_breakpoints(tsk);
} else if (n == 6) {
val = thread->debugreg6;
} else if (n == 7) {
@ -686,6 +699,10 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
struct perf_event *bp;
struct thread_struct *t = &tsk->thread;
struct perf_event_attr attr;
int err = 0;
if (ptrace_get_breakpoints(tsk) < 0)
return -ESRCH;
if (!t->ptrace_bps[nr]) {
ptrace_breakpoint_init(&attr);
@ -709,24 +726,23 @@ static int ptrace_set_breakpoint_addr(struct task_struct *tsk, int nr,
* writing for the user. And anyway this is the previous
* behaviour.
*/
if (IS_ERR(bp))
return PTR_ERR(bp);
if (IS_ERR(bp)) {
err = PTR_ERR(bp);
goto put;
}
t->ptrace_bps[nr] = bp;
} else {
int err;
bp = t->ptrace_bps[nr];
attr = bp->attr;
attr.bp_addr = addr;
err = modify_user_hw_breakpoint(bp, &attr);
if (err)
return err;
}
return 0;
put:
ptrace_put_breakpoints(tsk);
return err;
}
/*

View File

@ -21,26 +21,26 @@ r_base = .
/* Get our own relocated address */
call 1f
1: popl %ebx
subl $1b, %ebx
subl $(1b - r_base), %ebx
/* Compute the equivalent real-mode segment */
movl %ebx, %ecx
shrl $4, %ecx
/* Patch post-real-mode segment jump */
movw dispatch_table(%ebx,%eax,2),%ax
movw %ax, 101f(%ebx)
movw %cx, 102f(%ebx)
movw (dispatch_table - r_base)(%ebx,%eax,2),%ax
movw %ax, (101f - r_base)(%ebx)
movw %cx, (102f - r_base)(%ebx)
/* Set up the IDT for real mode. */
lidtl machine_real_restart_idt(%ebx)
lidtl (machine_real_restart_idt - r_base)(%ebx)
/*
* Set up a GDT from which we can load segment descriptors for real
* mode. The GDT is not used in real mode; it is just needed here to
* prepare the descriptors.
*/
lgdtl machine_real_restart_gdt(%ebx)
lgdtl (machine_real_restart_gdt - r_base)(%ebx)
/*
* Load the data segment registers with 16-bit compatible values

View File

@ -306,7 +306,7 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
bi->end = min(bi->end, high);
/* and there's no empty block */
if (bi->start == bi->end) {
if (bi->start >= bi->end) {
numa_remove_memblk_from(i--, mi);
continue;
}

View File

@ -1463,6 +1463,119 @@ static int xen_pgd_alloc(struct mm_struct *mm)
return ret;
}
#ifdef CONFIG_X86_64
static __initdata u64 __last_pgt_set_rw = 0;
static __initdata u64 __pgt_buf_start = 0;
static __initdata u64 __pgt_buf_end = 0;
static __initdata u64 __pgt_buf_top = 0;
/*
* As a consequence of the commit:
*
* commit 4b239f458c229de044d6905c2b0f9fe16ed9e01e
* Author: Yinghai Lu <yinghai@kernel.org>
* Date: Fri Dec 17 16:58:28 2010 -0800
*
* x86-64, mm: Put early page table high
*
* at some point init_memory_mapping is going to reach the pagetable pages
* area and map those pages too (mapping them as normal memory that falls
* in the range of addresses passed to init_memory_mapping as argument).
* Some of those pages are already pagetable pages (they are in the range
* pgt_buf_start-pgt_buf_end) therefore they are going to be mapped RO and
* everything is fine.
* Some of these pages are not pagetable pages yet (they fall in the range
* pgt_buf_end-pgt_buf_top; for example the page at pgt_buf_end) so they
* are going to be mapped RW. When these pages become pagetable pages and
* are hooked into the pagetable, xen will find that the guest has already
* a RW mapping of them somewhere and fail the operation.
* The reason Xen requires pagetables to be RO is that the hypervisor needs
* to verify that the pagetables are valid before using them. The validation
* operations are called "pinning".
*
* In order to fix the issue we mark all the pages in the entire range
* pgt_buf_start-pgt_buf_top as RO, however when the pagetable allocation
* is completed only the range pgt_buf_start-pgt_buf_end is reserved by
* init_memory_mapping. Hence the kernel is going to crash as soon as one
* of the pages in the range pgt_buf_end-pgt_buf_top is reused (b/c those
* ranges are RO).
*
* For this reason, 'mark_rw_past_pgt' is introduced which is called _after_
* the init_memory_mapping has completed (in a perfect world we would
* call this function from init_memory_mapping, but lets ignore that).
*
* Because we are called _after_ init_memory_mapping the pgt_buf_[start,
* end,top] have all changed to new values (b/c init_memory_mapping
* is called and setting up another new page-table). Hence, the first time
* we enter this function, we save away the pgt_buf_start value and update
* the pgt_buf_[end,top].
*
* When we detect that the "old" pgt_buf_start through pgt_buf_end
* PFNs have been reserved (so memblock_x86_reserve_range has been called),
* we immediately set out to RW the "old" pgt_buf_end through pgt_buf_top.
*
* And then we update those "old" pgt_buf_[end|top] with the new ones
* so that we can redo this on the next pagetable.
*/
static __init void mark_rw_past_pgt(void) {
if (pgt_buf_end > pgt_buf_start) {
u64 addr, size;
/* Save it away. */
if (!__pgt_buf_start) {
__pgt_buf_start = pgt_buf_start;
__pgt_buf_end = pgt_buf_end;
__pgt_buf_top = pgt_buf_top;
return;
}
/* If we get the range that starts at __pgt_buf_end that means
* the range is reserved, and that in 'init_memory_mapping'
* the 'memblock_x86_reserve_range' has been called with the
* outdated __pgt_buf_start, __pgt_buf_end (the "new"
* pgt_buf_[start|end|top] refer now to a new pagetable.
* Note: we are called _after_ the pgt_buf_[..] have been
* updated.*/
addr = memblock_x86_find_in_range_size(PFN_PHYS(__pgt_buf_start),
&size, PAGE_SIZE);
/* Still not reserved, meaning 'memblock_x86_reserve_range'
* hasn't been called yet. Update the _end and _top.*/
if (addr == PFN_PHYS(__pgt_buf_start)) {
__pgt_buf_end = pgt_buf_end;
__pgt_buf_top = pgt_buf_top;
return;
}
/* OK, the area is reserved, meaning it is time for us to
* set RW for the old end->top PFNs. */
/* ..unless we had already done this. */
if (__pgt_buf_end == __last_pgt_set_rw)
return;
addr = PFN_PHYS(__pgt_buf_end);
/* set as RW the rest */
printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n",
PFN_PHYS(__pgt_buf_end), PFN_PHYS(__pgt_buf_top));
while (addr < PFN_PHYS(__pgt_buf_top)) {
make_lowmem_page_readwrite(__va(addr));
addr += PAGE_SIZE;
}
/* And update everything so that we are ready for the next
* pagetable (the one created for regions past 4GB) */
__last_pgt_set_rw = __pgt_buf_end;
__pgt_buf_start = pgt_buf_start;
__pgt_buf_end = pgt_buf_end;
__pgt_buf_top = pgt_buf_top;
}
return;
}
#else
static __init void mark_rw_past_pgt(void) { }
#endif
static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
#ifdef CONFIG_X86_64
@ -1488,6 +1601,14 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
{
unsigned long pfn = pte_pfn(pte);
/*
* A bit of optimization. We do not need to call the workaround
* when xen_set_pte_init is called with a PTE with 0 as PFN.
* That is b/c the pagetable at that point are just being populated
* with empty values and we can save some cycles by not calling
* the 'memblock' code.*/
if (pfn)
mark_rw_past_pgt();
/*
* If the new pfn is within the range of the newly allocated
* kernel pagetable, and it isn't being mapped into an
@ -1495,7 +1616,7 @@ static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte)
* it is RO.
*/
if (((!is_early_ioremap_ptep(ptep) &&
pfn >= pgt_buf_start && pfn < pgt_buf_end)) ||
pfn >= pgt_buf_start && pfn < pgt_buf_top)) ||
(is_early_ioremap_ptep(ptep) && pfn != (pgt_buf_end - 1)))
pte = pte_wrprotect(pte);
@ -1997,6 +2118,8 @@ __init void xen_ident_map_ISA(void)
static __init void xen_post_allocator_init(void)
{
mark_rw_past_pgt();
#ifdef CONFIG_XEN_DEBUG
pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte_debug);
#endif

View File

@ -777,9 +777,9 @@ static int rbd_do_request(struct request *rq,
ops,
false,
GFP_NOIO, pages, bio);
if (IS_ERR(req)) {
if (!req) {
up_read(&header->snap_rwsem);
ret = PTR_ERR(req);
ret = -ENOMEM;
goto done_pages;
}

View File

@ -2199,7 +2199,6 @@ static int ohci_set_config_rom(struct fw_card *card,
{
struct fw_ohci *ohci;
unsigned long flags;
int ret = -EBUSY;
__be32 *next_config_rom;
dma_addr_t uninitialized_var(next_config_rom_bus);
@ -2240,22 +2239,37 @@ static int ohci_set_config_rom(struct fw_card *card,
spin_lock_irqsave(&ohci->lock, flags);
/*
* If there is not an already pending config_rom update,
* push our new allocation into the ohci->next_config_rom
* and then mark the local variable as null so that we
* won't deallocate the new buffer.
*
* OTOH, if there is a pending config_rom update, just
* use that buffer with the new config_rom data, and
* let this routine free the unused DMA allocation.
*/
if (ohci->next_config_rom == NULL) {
ohci->next_config_rom = next_config_rom;
ohci->next_config_rom_bus = next_config_rom_bus;
copy_config_rom(ohci->next_config_rom, config_rom, length);
ohci->next_header = config_rom[0];
ohci->next_config_rom[0] = 0;
reg_write(ohci, OHCI1394_ConfigROMmap,
ohci->next_config_rom_bus);
ret = 0;
next_config_rom = NULL;
}
copy_config_rom(ohci->next_config_rom, config_rom, length);
ohci->next_header = config_rom[0];
ohci->next_config_rom[0] = 0;
reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
spin_unlock_irqrestore(&ohci->lock, flags);
/* If we didn't use the DMA allocation, delete it. */
if (next_config_rom != NULL)
dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
next_config_rom, next_config_rom_bus);
/*
* Now initiate a bus reset to have the changes take
* effect. We clean up the old config rom memory and DMA
@ -2263,13 +2277,10 @@ static int ohci_set_config_rom(struct fw_card *card,
* controller could need to access it before the bus reset
* takes effect.
*/
if (ret == 0)
fw_schedule_bus_reset(&ohci->card, true, true);
else
dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
next_config_rom, next_config_rom_bus);
return ret;
fw_schedule_bus_reset(&ohci->card, true, true);
return 0;
}
static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)

View File

@ -932,11 +932,34 @@ EXPORT_SYMBOL(drm_vblank_put);
void drm_vblank_off(struct drm_device *dev, int crtc)
{
struct drm_pending_vblank_event *e, *t;
struct timeval now;
unsigned long irqflags;
unsigned int seq;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
vblank_disable_and_save(dev, crtc);
DRM_WAKEUP(&dev->vbl_queue[crtc]);
/* Send any queued vblank events, lest the natives grow disquiet */
seq = drm_vblank_count_and_time(dev, crtc, &now);
list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
if (e->pipe != crtc)
continue;
DRM_DEBUG("Sending premature vblank event on disable: \
wanted %d, current %d\n",
e->event.sequence, seq);
e->event.sequence = seq;
e->event.tv_sec = now.tv_sec;
e->event.tv_usec = now.tv_usec;
drm_vblank_put(dev, e->pipe);
list_move_tail(&e->base.link, &e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
trace_drm_vblank_event_delivered(e->base.pid, e->pipe,
e->event.sequence);
}
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
EXPORT_SYMBOL(drm_vblank_off);

View File

@ -431,7 +431,7 @@ EXPORT_SYMBOL(drm_mm_search_free_in_range);
void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new)
{
list_replace(&old->node_list, &new->node_list);
list_replace(&old->node_list, &new->hole_stack);
list_replace(&old->hole_stack, &new->hole_stack);
new->hole_follows = old->hole_follows;
new->mm = old->mm;
new->start = old->start;
@ -699,8 +699,8 @@ int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
entry->size);
total_used += entry->size;
if (entry->hole_follows) {
hole_start = drm_mm_hole_node_start(&mm->head_node);
hole_end = drm_mm_hole_node_end(&mm->head_node);
hole_start = drm_mm_hole_node_start(entry);
hole_end = drm_mm_hole_node_end(entry);
hole_size = hole_end - hole_start;
seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n",
hole_start, hole_end, hole_size);

View File

@ -5154,8 +5154,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
I915_WRITE(DSPCNTR(plane), dspcntr);
POSTING_READ(DSPCNTR(plane));
if (!HAS_PCH_SPLIT(dev))
intel_enable_plane(dev_priv, plane, pipe);
ret = intel_pipe_set_base(crtc, x, y, old_fb);
@ -5605,9 +5603,9 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
intel_clock_t clock;
if ((dpll & DISPLAY_RATE_SELECT_FPA1) == 0)
fp = FP0(pipe);
fp = I915_READ(FP0(pipe));
else
fp = FP1(pipe);
fp = I915_READ(FP1(pipe));
clock.m1 = (fp & FP_M1_DIV_MASK) >> FP_M1_DIV_SHIFT;
if (IS_PINEVIEW(dev)) {
@ -6579,8 +6577,10 @@ intel_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-ENOENT);
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb)
if (!intel_fb) {
drm_gem_object_unreference_unlocked(&obj->base);
return ERR_PTR(-ENOMEM);
}
ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
if (ret) {

View File

@ -1470,7 +1470,8 @@ intel_dp_link_down(struct intel_dp *intel_dp)
if (!HAS_PCH_CPT(dev) &&
I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
struct drm_crtc *crtc = intel_dp->base.base.crtc;
/* Hardware workaround: leaving our transcoder select
* set to transcoder B while it's off will prevent the
* corresponding HDMI output on transcoder A.
@ -1485,7 +1486,19 @@ intel_dp_link_down(struct intel_dp *intel_dp)
/* Changes to enable or select take place the vblank
* after being written.
*/
intel_wait_for_vblank(dev, intel_crtc->pipe);
if (crtc == NULL) {
/* We can arrive here never having been attached
* to a CRTC, for instance, due to inheriting
* random state from the BIOS.
*
* If the pipe is not running, play safe and
* wait for the clocks to stabilise before
* continuing.
*/
POSTING_READ(intel_dp->output_reg);
msleep(50);
} else
intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe);
}
I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);

View File

@ -539,6 +539,9 @@ static int intel_lid_notify(struct notifier_block *nb, unsigned long val,
struct drm_device *dev = dev_priv->dev;
struct drm_connector *connector = dev_priv->int_lvds_connector;
if (dev->switch_power_state != DRM_SWITCH_POWER_ON)
return NOTIFY_OK;
/*
* check and update the status of LVDS connector after receiving
* the LID nofication event.

View File

@ -152,8 +152,6 @@ nouveau_mem_vram_fini(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
nouveau_bo_ref(NULL, &dev_priv->vga_ram);
ttm_bo_device_release(&dev_priv->ttm.bdev);
nouveau_ttm_global_release(dev_priv);

View File

@ -768,6 +768,11 @@ static void nouveau_card_takedown(struct drm_device *dev)
engine->mc.takedown(dev);
engine->display.late_takedown(dev);
if (dev_priv->vga_ram) {
nouveau_bo_unpin(dev_priv->vga_ram);
nouveau_bo_ref(NULL, &dev_priv->vga_ram);
}
mutex_lock(&dev->struct_mutex);
ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&dev_priv->ttm.bdev, TTM_PL_TT);

View File

@ -862,9 +862,15 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
SYSTEM_ACCESS_MODE_NOT_IN_SYS |
SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU |
EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5);
WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
if (rdev->flags & RADEON_IS_IGP) {
WREG32(FUS_MC_VM_MD_L1_TLB0_CNTL, tmp);
WREG32(FUS_MC_VM_MD_L1_TLB1_CNTL, tmp);
WREG32(FUS_MC_VM_MD_L1_TLB2_CNTL, tmp);
} else {
WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
}
WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
@ -2923,11 +2929,6 @@ static int evergreen_startup(struct radeon_device *rdev)
rdev->asic->copy = NULL;
dev_warn(rdev->dev, "failed blitter (%d) falling back to memcpy\n", r);
}
/* XXX: ontario has problems blitting to gart at the moment */
if (rdev->family == CHIP_PALM) {
rdev->asic->copy = NULL;
radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
}
/* allocate wb buffer */
r = radeon_wb_init(rdev);

View File

@ -221,6 +221,11 @@
#define MC_VM_MD_L1_TLB0_CNTL 0x2654
#define MC_VM_MD_L1_TLB1_CNTL 0x2658
#define MC_VM_MD_L1_TLB2_CNTL 0x265C
#define FUS_MC_VM_MD_L1_TLB0_CNTL 0x265C
#define FUS_MC_VM_MD_L1_TLB1_CNTL 0x2660
#define FUS_MC_VM_MD_L1_TLB2_CNTL 0x2664
#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C
#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034

View File

@ -431,7 +431,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
}
}
/* Acer laptop (Acer TravelMate 5730G) has an HDMI port
/* Acer laptop (Acer TravelMate 5730/5730G) has an HDMI port
* on the laptop and a DVI port on the docking station and
* both share the same encoder, hpd pin, and ddc line.
* So while the bios table is technically correct,
@ -440,7 +440,7 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
* with different crtcs which isn't possible on the hardware
* side and leaves no crtcs for LVDS or VGA.
*/
if ((dev->pdev->device == 0x95c4) &&
if (((dev->pdev->device == 0x95c4) || (dev->pdev->device == 0x9591)) &&
(dev->pdev->subsystem_vendor == 0x1025) &&
(dev->pdev->subsystem_device == 0x013c)) {
if ((*connector_type == DRM_MODE_CONNECTOR_DVII) &&
@ -1599,9 +1599,10 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0],
fake_edid_record->ucFakeEDIDLength);
if (drm_edid_is_valid(edid))
if (drm_edid_is_valid(edid)) {
rdev->mode_info.bios_hardcoded_edid = edid;
else
rdev->mode_info.bios_hardcoded_edid_size = edid_size;
} else
kfree(edid);
}
}

View File

@ -15,6 +15,9 @@
#define ATPX_VERSION 0
#define ATPX_GPU_PWR 2
#define ATPX_MUX_SELECT 3
#define ATPX_I2C_MUX_SELECT 4
#define ATPX_SWITCH_START 5
#define ATPX_SWITCH_END 6
#define ATPX_INTEGRATED 0
#define ATPX_DISCRETE 1
@ -149,13 +152,35 @@ static int radeon_atpx_switch_mux(acpi_handle handle, int mux_id)
return radeon_atpx_execute(handle, ATPX_MUX_SELECT, mux_id);
}
static int radeon_atpx_switch_i2c_mux(acpi_handle handle, int mux_id)
{
return radeon_atpx_execute(handle, ATPX_I2C_MUX_SELECT, mux_id);
}
static int radeon_atpx_switch_start(acpi_handle handle, int gpu_id)
{
return radeon_atpx_execute(handle, ATPX_SWITCH_START, gpu_id);
}
static int radeon_atpx_switch_end(acpi_handle handle, int gpu_id)
{
return radeon_atpx_execute(handle, ATPX_SWITCH_END, gpu_id);
}
static int radeon_atpx_switchto(enum vga_switcheroo_client_id id)
{
int gpu_id;
if (id == VGA_SWITCHEROO_IGD)
radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, 0);
gpu_id = ATPX_INTEGRATED;
else
radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, 1);
gpu_id = ATPX_DISCRETE;
radeon_atpx_switch_start(radeon_atpx_priv.atpx_handle, gpu_id);
radeon_atpx_switch_mux(radeon_atpx_priv.atpx_handle, gpu_id);
radeon_atpx_switch_i2c_mux(radeon_atpx_priv.atpx_handle, gpu_id);
radeon_atpx_switch_end(radeon_atpx_priv.atpx_handle, gpu_id);
return 0;
}

View File

@ -167,9 +167,6 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc,
return -EINVAL;
}
radeon_crtc->cursor_width = width;
radeon_crtc->cursor_height = height;
obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
if (!obj) {
DRM_ERROR("Cannot find cursor object %x for crtc %d\n", handle, radeon_crtc->crtc_id);
@ -180,6 +177,9 @@ int radeon_crtc_cursor_set(struct drm_crtc *crtc,
if (ret)
goto fail;
radeon_crtc->cursor_width = width;
radeon_crtc->cursor_height = height;
radeon_lock_cursor(crtc, true);
/* XXX only 27 bit offset for legacy cursor */
radeon_set_cursor(crtc, obj, gpu_addr);

View File

@ -234,6 +234,9 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return -EINVAL;
}
break;
case RADEON_INFO_FUSION_GART_WORKING:
value = 1;
break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;

View File

@ -68,8 +68,23 @@ struct wm831x_ts {
unsigned int pd_irq;
bool pressure;
bool pen_down;
struct work_struct pd_data_work;
};
static void wm831x_pd_data_work(struct work_struct *work)
{
struct wm831x_ts *wm831x_ts =
container_of(work, struct wm831x_ts, pd_data_work);
if (wm831x_ts->pen_down) {
enable_irq(wm831x_ts->data_irq);
dev_dbg(wm831x_ts->wm831x->dev, "IRQ PD->DATA done\n");
} else {
enable_irq(wm831x_ts->pd_irq);
dev_dbg(wm831x_ts->wm831x->dev, "IRQ DATA->PD done\n");
}
}
static irqreturn_t wm831x_ts_data_irq(int irq, void *irq_data)
{
struct wm831x_ts *wm831x_ts = irq_data;
@ -110,6 +125,9 @@ static irqreturn_t wm831x_ts_data_irq(int irq, void *irq_data)
}
if (!wm831x_ts->pen_down) {
/* Switch from data to pen down */
dev_dbg(wm831x->dev, "IRQ DATA->PD\n");
disable_irq_nosync(wm831x_ts->data_irq);
/* Don't need data any more */
@ -128,6 +146,10 @@ static irqreturn_t wm831x_ts_data_irq(int irq, void *irq_data)
ABS_PRESSURE, 0);
input_report_key(wm831x_ts->input_dev, BTN_TOUCH, 0);
schedule_work(&wm831x_ts->pd_data_work);
} else {
input_report_key(wm831x_ts->input_dev, BTN_TOUCH, 1);
}
input_sync(wm831x_ts->input_dev);
@ -141,6 +163,11 @@ static irqreturn_t wm831x_ts_pen_down_irq(int irq, void *irq_data)
struct wm831x *wm831x = wm831x_ts->wm831x;
int ena = 0;
if (wm831x_ts->pen_down)
return IRQ_HANDLED;
disable_irq_nosync(wm831x_ts->pd_irq);
/* Start collecting data */
if (wm831x_ts->pressure)
ena |= WM831X_TCH_Z_ENA;
@ -149,14 +176,14 @@ static irqreturn_t wm831x_ts_pen_down_irq(int irq, void *irq_data)
WM831X_TCH_X_ENA | WM831X_TCH_Y_ENA | WM831X_TCH_Z_ENA,
WM831X_TCH_X_ENA | WM831X_TCH_Y_ENA | ena);
input_report_key(wm831x_ts->input_dev, BTN_TOUCH, 1);
input_sync(wm831x_ts->input_dev);
wm831x_set_bits(wm831x, WM831X_INTERRUPT_STATUS_1,
WM831X_TCHPD_EINT, WM831X_TCHPD_EINT);
wm831x_ts->pen_down = true;
enable_irq(wm831x_ts->data_irq);
/* Switch from pen down to data */
dev_dbg(wm831x->dev, "IRQ PD->DATA\n");
schedule_work(&wm831x_ts->pd_data_work);
return IRQ_HANDLED;
}
@ -182,13 +209,28 @@ static void wm831x_ts_input_close(struct input_dev *idev)
struct wm831x_ts *wm831x_ts = input_get_drvdata(idev);
struct wm831x *wm831x = wm831x_ts->wm831x;
/* Shut the controller down, disabling all other functionality too */
wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_1,
WM831X_TCH_ENA | WM831X_TCH_CVT_ENA |
WM831X_TCH_X_ENA | WM831X_TCH_Y_ENA |
WM831X_TCH_Z_ENA, 0);
WM831X_TCH_ENA | WM831X_TCH_X_ENA |
WM831X_TCH_Y_ENA | WM831X_TCH_Z_ENA, 0);
if (wm831x_ts->pen_down)
/* Make sure any pending IRQs are done, the above will prevent
* new ones firing.
*/
synchronize_irq(wm831x_ts->data_irq);
synchronize_irq(wm831x_ts->pd_irq);
/* Make sure the IRQ completion work is quiesced */
flush_work_sync(&wm831x_ts->pd_data_work);
/* If we ended up with the pen down then make sure we revert back
* to pen detection state for the next time we start up.
*/
if (wm831x_ts->pen_down) {
disable_irq(wm831x_ts->data_irq);
enable_irq(wm831x_ts->pd_irq);
wm831x_ts->pen_down = false;
}
}
static __devinit int wm831x_ts_probe(struct platform_device *pdev)
@ -198,7 +240,7 @@ static __devinit int wm831x_ts_probe(struct platform_device *pdev)
struct wm831x_pdata *core_pdata = dev_get_platdata(pdev->dev.parent);
struct wm831x_touch_pdata *pdata = NULL;
struct input_dev *input_dev;
int error;
int error, irqf;
if (core_pdata)
pdata = core_pdata->touch;
@ -212,6 +254,7 @@ static __devinit int wm831x_ts_probe(struct platform_device *pdev)
wm831x_ts->wm831x = wm831x;
wm831x_ts->input_dev = input_dev;
INIT_WORK(&wm831x_ts->pd_data_work, wm831x_pd_data_work);
/*
* If we have a direct IRQ use it, otherwise use the interrupt
@ -270,9 +313,14 @@ static __devinit int wm831x_ts_probe(struct platform_device *pdev)
wm831x_set_bits(wm831x, WM831X_TOUCH_CONTROL_1,
WM831X_TCH_RATE_MASK, 6);
if (pdata && pdata->data_irqf)
irqf = pdata->data_irqf;
else
irqf = IRQF_TRIGGER_HIGH;
error = request_threaded_irq(wm831x_ts->data_irq,
NULL, wm831x_ts_data_irq,
IRQF_ONESHOT,
irqf | IRQF_ONESHOT,
"Touchscreen data", wm831x_ts);
if (error) {
dev_err(&pdev->dev, "Failed to request data IRQ %d: %d\n",
@ -281,9 +329,14 @@ static __devinit int wm831x_ts_probe(struct platform_device *pdev)
}
disable_irq(wm831x_ts->data_irq);
if (pdata && pdata->pd_irqf)
irqf = pdata->pd_irqf;
else
irqf = IRQF_TRIGGER_HIGH;
error = request_threaded_irq(wm831x_ts->pd_irq,
NULL, wm831x_ts_pen_down_irq,
IRQF_ONESHOT,
irqf | IRQF_ONESHOT,
"Touchscreen pen down", wm831x_ts);
if (error) {
dev_err(&pdev->dev, "Failed to request pen down IRQ %d: %d\n",

View File

@ -356,6 +356,8 @@ config DVB_USB_LME2510
select DVB_TDA826X if !DVB_FE_CUSTOMISE
select DVB_STV0288 if !DVB_FE_CUSTOMISE
select DVB_IX2505V if !DVB_FE_CUSTOMISE
select DVB_STV0299 if !DVB_FE_CUSTOMISE
select DVB_PLL if !DVB_FE_CUSTOMISE
help
Say Y here to support the LME DM04/QQBOX DVB-S USB2.0 .

View File

@ -1520,6 +1520,7 @@ static int init_channel(struct ngene_channel *chan)
if (dev->ci.en && (io & NGENE_IO_TSOUT)) {
dvb_ca_en50221_init(adapter, dev->ci.en, 0, 1);
set_transfer(chan, 1);
chan->dev->channel[2].DataFormatFlags = DF_SWAP32;
set_transfer(&chan->dev->channel[2], 1);
dvb_register_device(adapter, &chan->ci_dev,
&ngene_dvbdev_ci, (void *) chan,

View File

@ -376,7 +376,7 @@ static int __devinit saa7706h_probe(struct i2c_client *client,
v4l_info(client, "chip found @ 0x%02x (%s)\n",
client->addr << 1, client->adapter->name);
state = kmalloc(sizeof(struct saa7706h_state), GFP_KERNEL);
state = kzalloc(sizeof(struct saa7706h_state), GFP_KERNEL);
if (state == NULL)
return -ENOMEM;
sd = &state->sd;

View File

@ -176,7 +176,7 @@ static int __devinit tef6862_probe(struct i2c_client *client,
v4l_info(client, "chip found @ 0x%02x (%s)\n",
client->addr << 1, client->adapter->name);
state = kmalloc(sizeof(struct tef6862_state), GFP_KERNEL);
state = kzalloc(sizeof(struct tef6862_state), GFP_KERNEL);
if (state == NULL)
return -ENOMEM;
state->freq = TEF6862_LO_FREQ;

View File

@ -46,7 +46,7 @@
#define MOD_AUTHOR "Jarod Wilson <jarod@wilsonet.com>"
#define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display"
#define MOD_NAME "imon"
#define MOD_VERSION "0.9.2"
#define MOD_VERSION "0.9.3"
#define DISPLAY_MINOR_BASE 144
#define DEVICE_NAME "lcd%d"
@ -460,8 +460,9 @@ static int display_close(struct inode *inode, struct file *file)
}
/**
* Sends a packet to the device -- this function must be called
* with ictx->lock held.
* Sends a packet to the device -- this function must be called with
* ictx->lock held, or its unlock/lock sequence while waiting for tx
* to complete can/will lead to a deadlock.
*/
static int send_packet(struct imon_context *ictx)
{
@ -991,12 +992,21 @@ static void imon_touch_display_timeout(unsigned long data)
* the iMON remotes, and those used by the Windows MCE remotes (which is
* really just RC-6), but only one or the other at a time, as the signals
* are decoded onboard the receiver.
*
* This function gets called two different ways, one way is from
* rc_register_device, for initial protocol selection/setup, and the other is
* via a userspace-initiated protocol change request, either by direct sysfs
* prodding or by something like ir-keytable. In the rc_register_device case,
* the imon context lock is already held, but when initiated from userspace,
* it is not, so we must acquire it prior to calling send_packet, which
* requires that the lock is held.
*/
static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
{
int retval;
struct imon_context *ictx = rc->priv;
struct device *dev = ictx->dev;
bool unlock = false;
unsigned char ir_proto_packet[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x86 };
@ -1029,6 +1039,11 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
memcpy(ictx->usb_tx_buf, &ir_proto_packet, sizeof(ir_proto_packet));
if (!mutex_is_locked(&ictx->lock)) {
unlock = true;
mutex_lock(&ictx->lock);
}
retval = send_packet(ictx);
if (retval)
goto out;
@ -1037,6 +1052,9 @@ static int imon_ir_change_protocol(struct rc_dev *rc, u64 rc_type)
ictx->pad_mouse = false;
out:
if (unlock)
mutex_unlock(&ictx->lock);
return retval;
}
@ -2134,6 +2152,7 @@ static struct imon_context *imon_init_intf0(struct usb_interface *intf)
goto rdev_setup_failed;
}
mutex_unlock(&ictx->lock);
return ictx;
rdev_setup_failed:
@ -2205,6 +2224,7 @@ static struct imon_context *imon_init_intf1(struct usb_interface *intf,
goto urb_submit_failed;
}
mutex_unlock(&ictx->lock);
return ictx;
urb_submit_failed:
@ -2299,6 +2319,8 @@ static int __devinit imon_probe(struct usb_interface *interface,
usb_set_intfdata(interface, ictx);
if (ifnum == 0) {
mutex_lock(&ictx->lock);
if (product == 0xffdc && ictx->rf_device) {
sysfs_err = sysfs_create_group(&interface->dev.kobj,
&imon_rf_attr_group);
@ -2309,13 +2331,14 @@ static int __devinit imon_probe(struct usb_interface *interface,
if (ictx->display_supported)
imon_init_display(ictx, interface);
mutex_unlock(&ictx->lock);
}
dev_info(dev, "iMON device (%04x:%04x, intf%d) on "
"usb<%d:%d> initialized\n", vendor, product, ifnum,
usbdev->bus->busnum, usbdev->devnum);
mutex_unlock(&ictx->lock);
mutex_unlock(&driver_lock);
return 0;

View File

@ -36,6 +36,7 @@
#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/bitops.h>

View File

@ -220,6 +220,8 @@ static struct usb_device_id mceusb_dev_table[] = {
{ USB_DEVICE(VENDOR_PHILIPS, 0x206c) },
/* Philips/Spinel plus IR transceiver for ASUS */
{ USB_DEVICE(VENDOR_PHILIPS, 0x2088) },
/* Philips IR transceiver (Dell branded) */
{ USB_DEVICE(VENDOR_PHILIPS, 0x2093) },
/* Realtek MCE IR Receiver and card reader */
{ USB_DEVICE(VENDOR_REALTEK, 0x0161),
.driver_info = MULTIFUNCTION },

View File

@ -707,7 +707,8 @@ static void ir_close(struct input_dev *idev)
{
struct rc_dev *rdev = input_get_drvdata(idev);
rdev->close(rdev);
if (rdev)
rdev->close(rdev);
}
/* class for /sys/class/rc */
@ -733,6 +734,7 @@ static struct {
{ RC_TYPE_SONY, "sony" },
{ RC_TYPE_RC5_SZ, "rc-5-sz" },
{ RC_TYPE_LIRC, "lirc" },
{ RC_TYPE_OTHER, "other" },
};
#define PROTO_NONE "none"

View File

@ -174,7 +174,7 @@ static int m52790_probe(struct i2c_client *client,
v4l_info(client, "chip found @ 0x%x (%s)\n",
client->addr << 1, client->adapter->name);
state = kmalloc(sizeof(struct m52790_state), GFP_KERNEL);
state = kzalloc(sizeof(struct m52790_state), GFP_KERNEL);
if (state == NULL)
return -ENOMEM;

View File

@ -171,7 +171,7 @@ static int tda9840_probe(struct i2c_client *client,
v4l_info(client, "chip found @ 0x%x (%s)\n",
client->addr << 1, client->adapter->name);
sd = kmalloc(sizeof(struct v4l2_subdev), GFP_KERNEL);
sd = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL);
if (sd == NULL)
return -ENOMEM;
v4l2_i2c_subdev_init(sd, client, &tda9840_ops);

View File

@ -152,7 +152,7 @@ static int tea6415c_probe(struct i2c_client *client,
v4l_info(client, "chip found @ 0x%x (%s)\n",
client->addr << 1, client->adapter->name);
sd = kmalloc(sizeof(struct v4l2_subdev), GFP_KERNEL);
sd = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL);
if (sd == NULL)
return -ENOMEM;
v4l2_i2c_subdev_init(sd, client, &tea6415c_ops);

View File

@ -125,7 +125,7 @@ static int tea6420_probe(struct i2c_client *client,
v4l_info(client, "chip found @ 0x%x (%s)\n",
client->addr << 1, client->adapter->name);
sd = kmalloc(sizeof(struct v4l2_subdev), GFP_KERNEL);
sd = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL);
if (sd == NULL)
return -ENOMEM;
v4l2_i2c_subdev_init(sd, client, &tea6420_ops);

View File

@ -230,7 +230,7 @@ static int upd64031a_probe(struct i2c_client *client,
v4l_info(client, "chip found @ 0x%x (%s)\n",
client->addr << 1, client->adapter->name);
state = kmalloc(sizeof(struct upd64031a_state), GFP_KERNEL);
state = kzalloc(sizeof(struct upd64031a_state), GFP_KERNEL);
if (state == NULL)
return -ENOMEM;
sd = &state->sd;

View File

@ -202,7 +202,7 @@ static int upd64083_probe(struct i2c_client *client,
v4l_info(client, "chip found @ 0x%x (%s)\n",
client->addr << 1, client->adapter->name);
state = kmalloc(sizeof(struct upd64083_state), GFP_KERNEL);
state = kzalloc(sizeof(struct upd64083_state), GFP_KERNEL);
if (state == NULL)
return -ENOMEM;
sd = &state->sd;

View File

@ -25,7 +25,6 @@
#include <linux/dma-mapping.h>
#include <linux/spinlock.h>
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <plat/usb.h>
#define USBHS_DRIVER_NAME "usbhs-omap"
@ -700,8 +699,7 @@ static int usbhs_enable(struct device *dev)
dev_dbg(dev, "starting TI HSUSB Controller\n");
if (!pdata) {
dev_dbg(dev, "missing platform_data\n");
ret = -ENODEV;
goto end_enable;
return -ENODEV;
}
spin_lock_irqsave(&omap->lock, flags);
@ -915,7 +913,8 @@ static int usbhs_enable(struct device *dev)
end_count:
omap->count++;
goto end_enable;
spin_unlock_irqrestore(&omap->lock, flags);
return 0;
err_tll:
if (pdata->ehci_data->phy_reset) {
@ -931,8 +930,6 @@ err_tll:
clk_disable(omap->usbhost_fs_fck);
clk_disable(omap->usbhost_hs_fck);
clk_disable(omap->usbhost_ick);
end_enable:
spin_unlock_irqrestore(&omap->lock, flags);
return ret;
}

View File

@ -284,6 +284,7 @@ int mmc_add_card(struct mmc_card *card)
type = "SD-combo";
if (mmc_card_blockaddr(card))
type = "SDHC-combo";
break;
default:
type = "?";
break;

View File

@ -94,7 +94,7 @@ static void mmc_host_clk_gate_delayed(struct mmc_host *host)
spin_unlock_irqrestore(&host->clk_lock, flags);
return;
}
mutex_lock(&host->clk_gate_mutex);
mmc_claim_host(host);
spin_lock_irqsave(&host->clk_lock, flags);
if (!host->clk_requests) {
spin_unlock_irqrestore(&host->clk_lock, flags);
@ -104,7 +104,7 @@ static void mmc_host_clk_gate_delayed(struct mmc_host *host)
pr_debug("%s: gated MCI clock\n", mmc_hostname(host));
}
spin_unlock_irqrestore(&host->clk_lock, flags);
mutex_unlock(&host->clk_gate_mutex);
mmc_release_host(host);
}
/*
@ -130,7 +130,7 @@ void mmc_host_clk_ungate(struct mmc_host *host)
{
unsigned long flags;
mutex_lock(&host->clk_gate_mutex);
mmc_claim_host(host);
spin_lock_irqsave(&host->clk_lock, flags);
if (host->clk_gated) {
spin_unlock_irqrestore(&host->clk_lock, flags);
@ -140,7 +140,7 @@ void mmc_host_clk_ungate(struct mmc_host *host)
}
host->clk_requests++;
spin_unlock_irqrestore(&host->clk_lock, flags);
mutex_unlock(&host->clk_gate_mutex);
mmc_release_host(host);
}
/**
@ -215,7 +215,6 @@ static inline void mmc_host_clk_init(struct mmc_host *host)
host->clk_gated = false;
INIT_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
spin_lock_init(&host->clk_lock);
mutex_init(&host->clk_gate_mutex);
}
/**

View File

@ -832,7 +832,7 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
return IRQ_HANDLED;
}
if (end_command)
if (end_command && host->cmd)
mmc_omap_cmd_done(host, host->cmd);
if (host->data != NULL) {
if (transfer_error)

View File

@ -957,6 +957,7 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(
host->ioaddr = pci_ioremap_bar(pdev, bar);
if (!host->ioaddr) {
dev_err(&pdev->dev, "failed to remap registers\n");
ret = -ENOMEM;
goto release;
}

View File

@ -1334,6 +1334,13 @@ static void sdhci_tasklet_finish(unsigned long param)
host = (struct sdhci_host*)param;
/*
* If this tasklet gets rescheduled while running, it will
* be run again afterwards but without any active request.
*/
if (!host->mrq)
return;
spin_lock_irqsave(&host->lock, flags);
del_timer(&host->timer);
@ -1345,7 +1352,7 @@ static void sdhci_tasklet_finish(unsigned long param)
* upon error conditions.
*/
if (!(host->flags & SDHCI_DEVICE_DEAD) &&
(mrq->cmd->error ||
((mrq->cmd && mrq->cmd->error) ||
(mrq->data && (mrq->data->error ||
(mrq->data->stop && mrq->data->stop->error))) ||
(host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {

View File

@ -728,15 +728,15 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
tmio_mmc_set_clock(host, ios->clock);
/* Power sequence - OFF -> UP -> ON */
if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
if (ios->power_mode == MMC_POWER_UP) {
/* power up SD bus */
if (host->set_pwr)
host->set_pwr(host->pdev, 1);
} else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
/* power down SD bus */
if (ios->power_mode == MMC_POWER_OFF && host->set_pwr)
host->set_pwr(host->pdev, 0);
tmio_mmc_clk_stop(host);
} else if (ios->power_mode == MMC_POWER_UP) {
/* power up SD bus */
if (host->set_pwr)
host->set_pwr(host->pdev, 1);
} else {
/* start bus clock */
tmio_mmc_clk_start(host);

View File

@ -585,8 +585,9 @@ static bool eeepc_wlan_rfkill_blocked(struct eeepc_laptop *eeepc)
return true;
}
static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc)
static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc, acpi_handle handle)
{
struct pci_dev *port;
struct pci_dev *dev;
struct pci_bus *bus;
bool blocked = eeepc_wlan_rfkill_blocked(eeepc);
@ -599,9 +600,16 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc)
mutex_lock(&eeepc->hotplug_lock);
if (eeepc->hotplug_slot) {
bus = pci_find_bus(0, 1);
port = acpi_get_pci_dev(handle);
if (!port) {
pr_warning("Unable to find port\n");
goto out_unlock;
}
bus = port->subordinate;
if (!bus) {
pr_warning("Unable to find PCI bus 1?\n");
pr_warning("Unable to find PCI bus?\n");
goto out_unlock;
}
@ -609,6 +617,7 @@ static void eeepc_rfkill_hotplug(struct eeepc_laptop *eeepc)
pr_err("Unable to read PCI config space?\n");
goto out_unlock;
}
absent = (l == 0xffffffff);
if (blocked != absent) {
@ -647,6 +656,17 @@ out_unlock:
mutex_unlock(&eeepc->hotplug_lock);
}
static void eeepc_rfkill_hotplug_update(struct eeepc_laptop *eeepc, char *node)
{
acpi_status status = AE_OK;
acpi_handle handle;
status = acpi_get_handle(NULL, node, &handle);
if (ACPI_SUCCESS(status))
eeepc_rfkill_hotplug(eeepc, handle);
}
static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
{
struct eeepc_laptop *eeepc = data;
@ -654,7 +674,7 @@ static void eeepc_rfkill_notify(acpi_handle handle, u32 event, void *data)
if (event != ACPI_NOTIFY_BUS_CHECK)
return;
eeepc_rfkill_hotplug(eeepc);
eeepc_rfkill_hotplug(eeepc, handle);
}
static int eeepc_register_rfkill_notifier(struct eeepc_laptop *eeepc,
@ -672,6 +692,11 @@ static int eeepc_register_rfkill_notifier(struct eeepc_laptop *eeepc,
eeepc);
if (ACPI_FAILURE(status))
pr_warning("Failed to register notify on %s\n", node);
/*
* Refresh pci hotplug in case the rfkill state was
* changed during setup.
*/
eeepc_rfkill_hotplug(eeepc, handle);
} else
return -ENODEV;
@ -693,6 +718,12 @@ static void eeepc_unregister_rfkill_notifier(struct eeepc_laptop *eeepc,
if (ACPI_FAILURE(status))
pr_err("Error removing rfkill notify handler %s\n",
node);
/*
* Refresh pci hotplug in case the rfkill
* state was changed after
* eeepc_unregister_rfkill_notifier()
*/
eeepc_rfkill_hotplug(eeepc, handle);
}
}
@ -816,11 +847,7 @@ static void eeepc_rfkill_exit(struct eeepc_laptop *eeepc)
rfkill_destroy(eeepc->wlan_rfkill);
eeepc->wlan_rfkill = NULL;
}
/*
* Refresh pci hotplug in case the rfkill state was changed after
* eeepc_unregister_rfkill_notifier()
*/
eeepc_rfkill_hotplug(eeepc);
if (eeepc->hotplug_slot)
pci_hp_deregister(eeepc->hotplug_slot);
@ -889,11 +916,6 @@ static int eeepc_rfkill_init(struct eeepc_laptop *eeepc)
eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P5");
eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P6");
eeepc_register_rfkill_notifier(eeepc, "\\_SB.PCI0.P0P7");
/*
* Refresh pci hotplug in case the rfkill state was changed during
* setup.
*/
eeepc_rfkill_hotplug(eeepc);
exit:
if (result && result != -ENODEV)
@ -928,8 +950,11 @@ static int eeepc_hotk_restore(struct device *device)
struct eeepc_laptop *eeepc = dev_get_drvdata(device);
/* Refresh both wlan rfkill state and pci hotplug */
if (eeepc->wlan_rfkill)
eeepc_rfkill_hotplug(eeepc);
if (eeepc->wlan_rfkill) {
eeepc_rfkill_hotplug_update(eeepc, "\\_SB.PCI0.P0P5");
eeepc_rfkill_hotplug_update(eeepc, "\\_SB.PCI0.P0P6");
eeepc_rfkill_hotplug_update(eeepc, "\\_SB.PCI0.P0P7");
}
if (eeepc->bluetooth_rfkill)
rfkill_set_sw_state(eeepc->bluetooth_rfkill,

View File

@ -934,6 +934,14 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
/*
* Backlight device
*/
struct sony_backlight_props {
struct backlight_device *dev;
int handle;
u8 offset;
u8 maxlvl;
};
struct sony_backlight_props sony_bl_props;
static int sony_backlight_update_status(struct backlight_device *bd)
{
return acpi_callsetfunc(sony_nc_acpi_handle, "SBRT",
@ -954,21 +962,26 @@ static int sony_nc_get_brightness_ng(struct backlight_device *bd)
{
int result;
int *handle = (int *)bl_get_data(bd);
struct sony_backlight_props *sdev =
(struct sony_backlight_props *)bl_get_data(bd);
sony_call_snc_handle(*handle, 0x0200, &result);
sony_call_snc_handle(sdev->handle, 0x0200, &result);
return result & 0xff;
return (result & 0xff) - sdev->offset;
}
static int sony_nc_update_status_ng(struct backlight_device *bd)
{
int value, result;
int *handle = (int *)bl_get_data(bd);
struct sony_backlight_props *sdev =
(struct sony_backlight_props *)bl_get_data(bd);
value = bd->props.brightness;
sony_call_snc_handle(*handle, 0x0100 | (value << 16), &result);
value = bd->props.brightness + sdev->offset;
if (sony_call_snc_handle(sdev->handle, 0x0100 | (value << 16), &result))
return -EIO;
return sony_nc_get_brightness_ng(bd);
return value;
}
static const struct backlight_ops sony_backlight_ops = {
@ -981,8 +994,6 @@ static const struct backlight_ops sony_backlight_ng_ops = {
.update_status = sony_nc_update_status_ng,
.get_brightness = sony_nc_get_brightness_ng,
};
static int backlight_ng_handle;
static struct backlight_device *sony_backlight_device;
/*
* New SNC-only Vaios event mapping to driver known keys
@ -1549,6 +1560,75 @@ static void sony_nc_kbd_backlight_resume(void)
&ignore);
}
static void sony_nc_backlight_ng_read_limits(int handle,
struct sony_backlight_props *props)
{
int offset;
acpi_status status;
u8 brlvl, i;
u8 min = 0xff, max = 0x00;
struct acpi_object_list params;
union acpi_object in_obj;
union acpi_object *lvl_enum;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
props->handle = handle;
props->offset = 0;
props->maxlvl = 0xff;
offset = sony_find_snc_handle(handle);
if (offset < 0)
return;
/* try to read the boundaries from ACPI tables, if we fail the above
* defaults should be reasonable
*/
params.count = 1;
params.pointer = &in_obj;
in_obj.type = ACPI_TYPE_INTEGER;
in_obj.integer.value = offset;
status = acpi_evaluate_object(sony_nc_acpi_handle, "SN06", &params,
&buffer);
if (ACPI_FAILURE(status))
return;
lvl_enum = (union acpi_object *) buffer.pointer;
if (!lvl_enum) {
pr_err("No SN06 return object.");
return;
}
if (lvl_enum->type != ACPI_TYPE_BUFFER) {
pr_err("Invalid SN06 return object 0x%.2x\n",
lvl_enum->type);
goto out_invalid;
}
/* the buffer lists brightness levels available, brightness levels are
* from 0 to 8 in the array, other values are used by ALS control.
*/
for (i = 0; i < 9 && i < lvl_enum->buffer.length; i++) {
brlvl = *(lvl_enum->buffer.pointer + i);
dprintk("Brightness level: %d\n", brlvl);
if (!brlvl)
break;
if (brlvl > max)
max = brlvl;
if (brlvl < min)
min = brlvl;
}
props->offset = min;
props->maxlvl = max;
dprintk("Brightness levels: min=%d max=%d\n", props->offset,
props->maxlvl);
out_invalid:
kfree(buffer.pointer);
return;
}
static void sony_nc_backlight_setup(void)
{
acpi_handle unused;
@ -1557,14 +1637,14 @@ static void sony_nc_backlight_setup(void)
struct backlight_properties props;
if (sony_find_snc_handle(0x12f) != -1) {
backlight_ng_handle = 0x12f;
ops = &sony_backlight_ng_ops;
max_brightness = 0xff;
sony_nc_backlight_ng_read_limits(0x12f, &sony_bl_props);
max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
} else if (sony_find_snc_handle(0x137) != -1) {
backlight_ng_handle = 0x137;
ops = &sony_backlight_ng_ops;
max_brightness = 0xff;
sony_nc_backlight_ng_read_limits(0x137, &sony_bl_props);
max_brightness = sony_bl_props.maxlvl - sony_bl_props.offset;
} else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT",
&unused))) {
@ -1577,22 +1657,22 @@ static void sony_nc_backlight_setup(void)
memset(&props, 0, sizeof(struct backlight_properties));
props.type = BACKLIGHT_PLATFORM;
props.max_brightness = max_brightness;
sony_backlight_device = backlight_device_register("sony", NULL,
&backlight_ng_handle,
ops, &props);
sony_bl_props.dev = backlight_device_register("sony", NULL,
&sony_bl_props,
ops, &props);
if (IS_ERR(sony_backlight_device)) {
pr_warning(DRV_PFX "unable to register backlight device\n");
sony_backlight_device = NULL;
if (IS_ERR(sony_bl_props.dev)) {
pr_warn(DRV_PFX "unable to register backlight device\n");
sony_bl_props.dev = NULL;
} else
sony_backlight_device->props.brightness =
ops->get_brightness(sony_backlight_device);
sony_bl_props.dev->props.brightness =
ops->get_brightness(sony_bl_props.dev);
}
static void sony_nc_backlight_cleanup(void)
{
if (sony_backlight_device)
backlight_device_unregister(sony_backlight_device);
if (sony_bl_props.dev)
backlight_device_unregister(sony_bl_props.dev);
}
static int sony_nc_add(struct acpi_device *device)
@ -2590,7 +2670,7 @@ static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd,
mutex_lock(&spic_dev.lock);
switch (cmd) {
case SONYPI_IOCGBRT:
if (sony_backlight_device == NULL) {
if (sony_bl_props.dev == NULL) {
ret = -EIO;
break;
}
@ -2603,7 +2683,7 @@ static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd,
ret = -EFAULT;
break;
case SONYPI_IOCSBRT:
if (sony_backlight_device == NULL) {
if (sony_bl_props.dev == NULL) {
ret = -EIO;
break;
}
@ -2617,8 +2697,8 @@ static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd,
break;
}
/* sync the backlight device status */
sony_backlight_device->props.brightness =
sony_backlight_get_brightness(sony_backlight_device);
sony_bl_props.dev->props.brightness =
sony_backlight_get_brightness(sony_bl_props.dev);
break;
case SONYPI_IOCGBAT1CAP:
if (ec_read16(SONYPI_BAT1_FULL, &val16)) {

View File

@ -128,7 +128,8 @@ enum {
};
/* ACPI HIDs */
#define TPACPI_ACPI_HKEY_HID "IBM0068"
#define TPACPI_ACPI_IBM_HKEY_HID "IBM0068"
#define TPACPI_ACPI_LENOVO_HKEY_HID "LEN0068"
#define TPACPI_ACPI_EC_HID "PNP0C09"
/* Input IDs */
@ -3879,7 +3880,8 @@ errexit:
}
static const struct acpi_device_id ibm_htk_device_ids[] = {
{TPACPI_ACPI_HKEY_HID, 0},
{TPACPI_ACPI_IBM_HKEY_HID, 0},
{TPACPI_ACPI_LENOVO_HKEY_HID, 0},
{"", 0},
};

View File

@ -400,10 +400,15 @@ static inline int scsi_host_is_busy(struct Scsi_Host *shost)
static void scsi_run_queue(struct request_queue *q)
{
struct scsi_device *sdev = q->queuedata;
struct Scsi_Host *shost = sdev->host;
struct Scsi_Host *shost;
LIST_HEAD(starved_list);
unsigned long flags;
/* if the device is dead, sdev will be NULL, so no queue to run */
if (!sdev)
return;
shost = sdev->host;
if (scsi_target(sdev)->single_lun)
scsi_single_lun_run(sdev);

View File

@ -2288,7 +2288,3 @@ err_dev:
free_netdev(dev);
return NULL;
}
EXPORT_SYMBOL(init_ft1000_card);
EXPORT_SYMBOL(stop_ft1000_card);
EXPORT_SYMBOL(flarion_ft1000_cnt);

View File

@ -214,6 +214,3 @@ void ft1000CleanupProc(struct net_device *dev)
remove_proc_entry(FT1000_PROC, init_net.proc_net);
unregister_netdevice_notifier(&ft1000_netdev_notifier);
}
EXPORT_SYMBOL(ft1000InitProc);
EXPORT_SYMBOL(ft1000CleanupProc);

View File

@ -1,6 +1,6 @@
config DRM_PSB
tristate "Intel GMA500 KMS Framebuffer"
depends on DRM && PCI
depends on DRM && PCI && X86
select FB_CFB_COPYAREA
select FB_CFB_FILLRECT
select FB_CFB_IMAGEBLIT

View File

@ -28,6 +28,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/file.h>
#include <asm/mrst.h>
#include <sound/pcm.h>

View File

@ -29,6 +29,7 @@
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/file.h>
#include "intel_sst.h"
#include "intelmid_snd_control.h"

View File

@ -12,6 +12,7 @@
*/
#include <linux/cs5535.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <asm/olpc.h>
#include "olpc_dcon.h"

View File

@ -28,7 +28,7 @@
#define RTSX_STOR "rts_pstor: "
#if CONFIG_RTS_PSTOR_DEBUG
#ifdef CONFIG_RTS_PSTOR_DEBUG
#define RTSX_DEBUGP(x...) printk(KERN_DEBUG RTSX_STOR x)
#define RTSX_DEBUGPN(x...) printk(KERN_DEBUG x)
#define RTSX_DEBUGPX(x...) printk(x)

View File

@ -23,6 +23,7 @@
#include <linux/blkdev.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include "rtsx.h"
#include "rtsx_transport.h"

View File

@ -24,6 +24,7 @@
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/workqueue.h>
#include <linux/vmalloc.h>
#include "rtsx.h"
#include "rtsx_transport.h"
@ -1311,11 +1312,11 @@ void rtsx_polling_func(struct rtsx_chip *chip)
#ifdef SUPPORT_OCP
if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) {
#if CONFIG_RTS_PSTOR_DEBUG
#ifdef CONFIG_RTS_PSTOR_DEBUG
if (chip->ocp_stat & (SD_OC_NOW | SD_OC_EVER | MS_OC_NOW | MS_OC_EVER)) {
RTSX_DEBUGP("Over current, OCPSTAT is 0x%x\n", chip->ocp_stat);
}
#endif
#endif
if (chip->ocp_stat & (SD_OC_NOW | SD_OC_EVER)) {
if (chip->card_exist & SD_CARD) {

View File

@ -23,6 +23,7 @@
#include <linux/blkdev.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include "rtsx.h"
#include "rtsx_transport.h"

View File

@ -909,7 +909,7 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
RTSX_WRITE_REG(chip, SD_VPCLK0_CTL, PHASE_NOT_RESET, PHASE_NOT_RESET);
RTSX_WRITE_REG(chip, CLK_CTL, CHANGE_CLK, 0);
} else {
#if CONFIG_RTS_PSTOR_DEBUG
#ifdef CONFIG_RTS_PSTOR_DEBUG
rtsx_read_register(chip, SD_VP_CTL, &val);
RTSX_DEBUGP("SD_VP_CTL: 0x%x\n", val);
rtsx_read_register(chip, SD_DCMPS_CTL, &val);
@ -958,7 +958,7 @@ static int sd_change_phase(struct rtsx_chip *chip, u8 sample_point, u8 tune_dir)
return STATUS_SUCCESS;
Fail:
#if CONFIG_RTS_PSTOR_DEBUG
#ifdef CONFIG_RTS_PSTOR_DEBUG
rtsx_read_register(chip, SD_VP_CTL, &val);
RTSX_DEBUGP("SD_VP_CTL: 0x%x\n", val);
rtsx_read_register(chip, SD_DCMPS_CTL, &val);

View File

@ -82,7 +82,7 @@ do { \
#define TRACE_GOTO(chip, label) goto label
#endif
#if CONFIG_RTS_PSTOR_DEBUG
#ifdef CONFIG_RTS_PSTOR_DEBUG
static inline void rtsx_dump(u8 *buf, int buf_len)
{
int i;

View File

@ -23,6 +23,7 @@
#include <linux/blkdev.h>
#include <linux/kthread.h>
#include <linux/sched.h>
#include <linux/vmalloc.h>
#include "rtsx.h"
#include "rtsx_transport.h"

View File

@ -2,6 +2,7 @@ config SOLO6X10
tristate "Softlogic 6x10 MPEG codec cards"
depends on PCI && VIDEO_DEV && SND && I2C
select VIDEOBUF_DMA_SG
select SND_PCM
---help---
This driver supports the Softlogic based MPEG-4 and h.264 codec
codec cards.

View File

@ -876,8 +876,10 @@ static void vhci_shutdown_connection(struct usbip_device *ud)
}
/* kill threads related to this sdev, if v.c. exists */
kthread_stop(vdev->ud.tcp_rx);
kthread_stop(vdev->ud.tcp_tx);
if (vdev->ud.tcp_rx)
kthread_stop(vdev->ud.tcp_rx);
if (vdev->ud.tcp_tx)
kthread_stop(vdev->ud.tcp_tx);
usbip_uinfo("stop threads\n");
@ -949,9 +951,6 @@ static void vhci_device_init(struct vhci_device *vdev)
{
memset(vdev, 0, sizeof(*vdev));
vdev->ud.tcp_rx = kthread_create(vhci_rx_loop, &vdev->ud, "vhci_rx");
vdev->ud.tcp_tx = kthread_create(vhci_tx_loop, &vdev->ud, "vhci_tx");
vdev->ud.side = USBIP_VHCI;
vdev->ud.status = VDEV_ST_NULL;
/* vdev->ud.lock = SPIN_LOCK_UNLOCKED; */
@ -1139,7 +1138,7 @@ static int vhci_hcd_probe(struct platform_device *pdev)
usbip_uerr("create hcd failed\n");
return -ENOMEM;
}
hcd->has_tt = 1;
/* this is private data for vhci_hcd */
the_controller = hcd_to_vhci(hcd);

View File

@ -21,6 +21,7 @@
#include "vhci.h"
#include <linux/in.h>
#include <linux/kthread.h>
/* TODO: refine locking ?*/
@ -220,13 +221,13 @@ static ssize_t store_attach(struct device *dev, struct device_attribute *attr,
vdev->ud.tcp_socket = socket;
vdev->ud.status = VDEV_ST_NOTASSIGNED;
wake_up_process(vdev->ud.tcp_rx);
wake_up_process(vdev->ud.tcp_tx);
spin_unlock(&vdev->ud.lock);
spin_unlock(&the_controller->lock);
/* end the lock */
vdev->ud.tcp_rx = kthread_run(vhci_rx_loop, &vdev->ud, "vhci_rx");
vdev->ud.tcp_tx = kthread_run(vhci_tx_loop, &vdev->ud, "vhci_tx");
rh_port_connect(rhport, speed);
return count;

View File

@ -273,7 +273,7 @@ exit:
}
int prism2_set_default_key(struct wiphy *wiphy, struct net_device *dev,
u8 key_index)
u8 key_index, bool unicast, bool multicast)
{
wlandevice_t *wlandev = dev->ml_priv;

View File

@ -40,6 +40,7 @@
#include <linux/slab.h>
#include <linux/usb/ulpi.h>
#include <plat/usb.h>
#include <linux/regulator/consumer.h>
/* EHCI Register Set */
#define EHCI_INSNREG04 (0xA0)
@ -118,6 +119,8 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
struct ehci_hcd *omap_ehci;
int ret = -ENODEV;
int irq;
int i;
char supply[7];
if (usb_disabled())
return -ENODEV;
@ -158,6 +161,23 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
hcd->rsrc_len = resource_size(res);
hcd->regs = regs;
/* get ehci regulator and enable */
for (i = 0 ; i < OMAP3_HS_USB_PORTS ; i++) {
if (pdata->port_mode[i] != OMAP_EHCI_PORT_MODE_PHY) {
pdata->regulator[i] = NULL;
continue;
}
snprintf(supply, sizeof(supply), "hsusb%d", i);
pdata->regulator[i] = regulator_get(dev, supply);
if (IS_ERR(pdata->regulator[i])) {
pdata->regulator[i] = NULL;
dev_dbg(dev,
"failed to get ehci port%d regulator\n", i);
} else {
regulator_enable(pdata->regulator[i]);
}
}
ret = omap_usbhs_enable(dev);
if (ret) {
dev_err(dev, "failed to start usbhs with err %d\n", ret);

View File

@ -1633,6 +1633,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
ints[i].qh = NULL;
ints[i].qtd = NULL;
urb->status = status;
isp1760_urb_done(hcd, urb);
if (qtd)
pe(hcd, qh, qtd);

View File

@ -777,7 +777,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
if (t1 != t2)
xhci_writel(xhci, t2, port_array[port_index]);
if (DEV_HIGHSPEED(t1)) {
if (hcd->speed != HCD_USB3) {
/* enable remote wake up for USB 2.0 */
u32 __iomem *addr;
u32 tmp;
@ -866,6 +866,21 @@ int xhci_bus_resume(struct usb_hcd *hcd)
temp |= PORT_LINK_STROBE | XDEV_U0;
xhci_writel(xhci, temp, port_array[port_index]);
}
/* wait for the port to enter U0 and report port link
* state change.
*/
spin_unlock_irqrestore(&xhci->lock, flags);
msleep(20);
spin_lock_irqsave(&xhci->lock, flags);
/* Clear PLC */
temp = xhci_readl(xhci, port_array[port_index]);
if (temp & PORT_PLC) {
temp = xhci_port_state_to_neutral(temp);
temp |= PORT_PLC;
xhci_writel(xhci, temp, port_array[port_index]);
}
slot_id = xhci_find_slot_id_by_port(hcd,
xhci, port_index + 1);
if (slot_id)
@ -873,7 +888,7 @@ int xhci_bus_resume(struct usb_hcd *hcd)
} else
xhci_writel(xhci, temp, port_array[port_index]);
if (DEV_HIGHSPEED(temp)) {
if (hcd->speed != HCD_USB3) {
/* disable remote wake up for USB 2.0 */
u32 __iomem *addr;
u32 tmp;

View File

@ -1887,11 +1887,9 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver,
otg_set_vbus(musb->xceiv, 1);
hcd->self.uses_pio_for_control = 1;
if (musb->xceiv->last_event == USB_EVENT_NONE)
pm_runtime_put(musb->controller);
}
if (musb->xceiv->last_event == USB_EVENT_NONE)
pm_runtime_put(musb->controller);
return 0;

View File

@ -270,7 +270,7 @@ static int musb_otg_notifications(struct notifier_block *nb,
DBG(4, "VBUS Disconnect\n");
#ifdef CONFIG_USB_GADGET_MUSB_HDRC
if (is_otg_enabled(musb))
if (is_otg_enabled(musb) || is_peripheral_enabled(musb))
if (musb->gadget_driver)
#endif
{

View File

@ -775,6 +775,13 @@ get_more_pages:
ci->i_truncate_seq,
ci->i_truncate_size,
&inode->i_mtime, true, 1, 0);
if (!req) {
rc = -ENOMEM;
unlock_page(page);
break;
}
max_pages = req->r_num_pages;
alloc_page_vec(fsc, req);

View File

@ -1331,10 +1331,11 @@ static void ceph_flush_snaps(struct ceph_inode_info *ci)
}
/*
* Mark caps dirty. If inode is newly dirty, add to the global dirty
* list.
* Mark caps dirty. If inode is newly dirty, return the dirty flags.
* Caller is then responsible for calling __mark_inode_dirty with the
* returned flags value.
*/
void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
{
struct ceph_mds_client *mdsc =
ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
@ -1357,7 +1358,7 @@ void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
spin_unlock(&mdsc->cap_dirty_lock);
if (ci->i_flushing_caps == 0) {
igrab(inode);
ihold(inode);
dirty |= I_DIRTY_SYNC;
}
}
@ -1365,9 +1366,8 @@ void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask)
if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) &&
(mask & CEPH_CAP_FILE_BUFFER))
dirty |= I_DIRTY_DATASYNC;
if (dirty)
__mark_inode_dirty(inode, dirty);
__cap_delay_requeue(mdsc, ci);
return dirty;
}
/*
@ -1991,7 +1991,7 @@ static void __take_cap_refs(struct ceph_inode_info *ci, int got)
ci->i_wr_ref++;
if (got & CEPH_CAP_FILE_BUFFER) {
if (ci->i_wrbuffer_ref == 0)
igrab(&ci->vfs_inode);
ihold(&ci->vfs_inode);
ci->i_wrbuffer_ref++;
dout("__take_cap_refs %p wrbuffer %d -> %d (?)\n",
&ci->vfs_inode, ci->i_wrbuffer_ref-1, ci->i_wrbuffer_ref);

View File

@ -734,9 +734,12 @@ retry_snap:
}
}
if (ret >= 0) {
int dirty;
spin_lock(&inode->i_lock);
__ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_FILE_WR);
spin_unlock(&inode->i_lock);
if (dirty)
__mark_inode_dirty(inode, dirty);
}
out:

View File

@ -1567,6 +1567,7 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
int release = 0, dirtied = 0;
int mask = 0;
int err = 0;
int inode_dirty_flags = 0;
if (ceph_snap(inode) != CEPH_NOSNAP)
return -EROFS;
@ -1725,13 +1726,16 @@ int ceph_setattr(struct dentry *dentry, struct iattr *attr)
dout("setattr %p ATTR_FILE ... hrm!\n", inode);
if (dirtied) {
__ceph_mark_dirty_caps(ci, dirtied);
inode_dirty_flags = __ceph_mark_dirty_caps(ci, dirtied);
inode->i_ctime = CURRENT_TIME;
}
release &= issued;
spin_unlock(&inode->i_lock);
if (inode_dirty_flags)
__mark_inode_dirty(inode, inode_dirty_flags);
if (mask) {
req->r_inode = igrab(inode);
req->r_inode_drop = release;

View File

@ -506,7 +506,7 @@ static inline int __ceph_caps_dirty(struct ceph_inode_info *ci)
{
return ci->i_dirty_caps | ci->i_flushing_caps;
}
extern void __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask);
extern int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask);
extern int ceph_caps_revoking(struct ceph_inode_info *ci, int mask);
extern int __ceph_caps_used(struct ceph_inode_info *ci);

View File

@ -703,6 +703,7 @@ int ceph_setxattr(struct dentry *dentry, const char *name,
struct ceph_inode_xattr *xattr = NULL;
int issued;
int required_blob_size;
int dirty;
if (ceph_snap(inode) != CEPH_NOSNAP)
return -EROFS;
@ -763,11 +764,12 @@ retry:
dout("setxattr %p issued %s\n", inode, ceph_cap_string(issued));
err = __set_xattr(ci, newname, name_len, newval,
val_len, 1, 1, 1, &xattr);
__ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
ci->i_xattrs.dirty = true;
inode->i_ctime = CURRENT_TIME;
spin_unlock(&inode->i_lock);
if (dirty)
__mark_inode_dirty(inode, dirty);
return err;
do_sync:
@ -810,6 +812,7 @@ int ceph_removexattr(struct dentry *dentry, const char *name)
struct ceph_vxattr_cb *vxattrs = ceph_inode_vxattrs(inode);
int issued;
int err;
int dirty;
if (ceph_snap(inode) != CEPH_NOSNAP)
return -EROFS;
@ -833,12 +836,13 @@ int ceph_removexattr(struct dentry *dentry, const char *name)
goto do_sync;
err = __remove_xattr_by_name(ceph_inode(inode), name);
__ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
ci->i_xattrs.dirty = true;
inode->i_ctime = CURRENT_TIME;
spin_unlock(&inode->i_lock);
if (dirty)
__mark_inode_dirty(inode, dirty);
return err;
do_sync:
spin_unlock(&inode->i_lock);

View File

@ -274,7 +274,8 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
char *data_area_of_target;
char *data_area_of_buf2;
int remaining;
__u16 byte_count, total_data_size, total_in_buf, total_in_buf2;
unsigned int byte_count, total_in_buf;
__u16 total_data_size, total_in_buf2;
total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount);
@ -287,7 +288,7 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
remaining = total_data_size - total_in_buf;
if (remaining < 0)
return -EINVAL;
return -EPROTO;
if (remaining == 0) /* nothing to do, ignore */
return 0;
@ -308,20 +309,29 @@ static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
data_area_of_target += total_in_buf;
/* copy second buffer into end of first buffer */
memcpy(data_area_of_target, data_area_of_buf2, total_in_buf2);
total_in_buf += total_in_buf2;
/* is the result too big for the field? */
if (total_in_buf > USHRT_MAX)
return -EPROTO;
put_unaligned_le16(total_in_buf, &pSMBt->t2_rsp.DataCount);
/* fix up the BCC */
byte_count = get_bcc_le(pTargetSMB);
byte_count += total_in_buf2;
/* is the result too big for the field? */
if (byte_count > USHRT_MAX)
return -EPROTO;
put_bcc_le(byte_count, pTargetSMB);
byte_count = pTargetSMB->smb_buf_length;
byte_count += total_in_buf2;
/* BB also add check that we are not beyond maximum buffer size */
/* don't allow buffer to overflow */
if (byte_count > CIFSMaxBufSize)
return -ENOBUFS;
pTargetSMB->smb_buf_length = byte_count;
memcpy(data_area_of_target, data_area_of_buf2, total_in_buf2);
if (remaining == total_in_buf2) {
cFYI(1, "found the last secondary response");
return 0; /* we are done */
@ -607,59 +617,63 @@ incomplete_rcv:
list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
if ((mid_entry->mid == smb_buffer->Mid) &&
(mid_entry->midState == MID_REQUEST_SUBMITTED) &&
(mid_entry->command == smb_buffer->Command)) {
if (length == 0 &&
check2ndT2(smb_buffer, server->maxBuf) > 0) {
/* We have a multipart transact2 resp */
isMultiRsp = true;
if (mid_entry->resp_buf) {
/* merge response - fix up 1st*/
if (coalesce_t2(smb_buffer,
mid_entry->resp_buf)) {
mid_entry->multiRsp =
true;
break;
} else {
/* all parts received */
mid_entry->multiEnd =
true;
goto multi_t2_fnd;
}
if (mid_entry->mid != smb_buffer->Mid ||
mid_entry->midState != MID_REQUEST_SUBMITTED ||
mid_entry->command != smb_buffer->Command) {
mid_entry = NULL;
continue;
}
if (length == 0 &&
check2ndT2(smb_buffer, server->maxBuf) > 0) {
/* We have a multipart transact2 resp */
isMultiRsp = true;
if (mid_entry->resp_buf) {
/* merge response - fix up 1st*/
length = coalesce_t2(smb_buffer,
mid_entry->resp_buf);
if (length > 0) {
length = 0;
mid_entry->multiRsp = true;
break;
} else {
if (!isLargeBuf) {
cERROR(1, "1st trans2 resp needs bigbuf");
/* BB maybe we can fix this up, switch
to already allocated large buffer? */
} else {
/* Have first buffer */
mid_entry->resp_buf =
smb_buffer;
mid_entry->largeBuf =
true;
bigbuf = NULL;
}
/* all parts received or
* packet is malformed
*/
mid_entry->multiEnd = true;
goto multi_t2_fnd;
}
} else {
if (!isLargeBuf) {
/*
* FIXME: switch to already
* allocated largebuf?
*/
cERROR(1, "1st trans2 resp "
"needs bigbuf");
} else {
/* Have first buffer */
mid_entry->resp_buf =
smb_buffer;
mid_entry->largeBuf = true;
bigbuf = NULL;
}
break;
}
mid_entry->resp_buf = smb_buffer;
mid_entry->largeBuf = isLargeBuf;
multi_t2_fnd:
if (length == 0)
mid_entry->midState =
MID_RESPONSE_RECEIVED;
else
mid_entry->midState =
MID_RESPONSE_MALFORMED;
#ifdef CONFIG_CIFS_STATS2
mid_entry->when_received = jiffies;
#endif
list_del_init(&mid_entry->qhead);
mid_entry->callback(mid_entry);
break;
}
mid_entry = NULL;
mid_entry->resp_buf = smb_buffer;
mid_entry->largeBuf = isLargeBuf;
multi_t2_fnd:
if (length == 0)
mid_entry->midState = MID_RESPONSE_RECEIVED;
else
mid_entry->midState = MID_RESPONSE_MALFORMED;
#ifdef CONFIG_CIFS_STATS2
mid_entry->when_received = jiffies;
#endif
list_del_init(&mid_entry->qhead);
mid_entry->callback(mid_entry);
break;
}
spin_unlock(&GlobalMid_Lock);

View File

@ -276,7 +276,7 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses,
}
static void
decode_unicode_ssetup(char **pbcc_area, __u16 bleft, struct cifsSesInfo *ses,
decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses,
const struct nls_table *nls_cp)
{
int len;
@ -284,19 +284,6 @@ decode_unicode_ssetup(char **pbcc_area, __u16 bleft, struct cifsSesInfo *ses,
cFYI(1, "bleft %d", bleft);
/*
* Windows servers do not always double null terminate their final
* Unicode string. Check to see if there are an uneven number of bytes
* left. If so, then add an extra NULL pad byte to the end of the
* response.
*
* See section 2.7.2 in "Implementing CIFS" for details
*/
if (bleft % 2) {
data[bleft] = 0;
++bleft;
}
kfree(ses->serverOS);
ses->serverOS = cifs_strndup_from_ucs(data, bleft, true, nls_cp);
cFYI(1, "serverOS=%s", ses->serverOS);
@ -929,7 +916,9 @@ ssetup_ntlmssp_authenticate:
}
/* BB check if Unicode and decode strings */
if (smb_buf->Flags2 & SMBFLG2_UNICODE) {
if (bytes_remaining == 0) {
/* no string area to decode, do nothing */
} else if (smb_buf->Flags2 & SMBFLG2_UNICODE) {
/* unicode string area must be word-aligned */
if (((unsigned long) bcc_ptr - (unsigned long) smb_buf) % 2) {
++bcc_ptr;

View File

@ -1,7 +1,6 @@
config HPFS_FS
tristate "OS/2 HPFS file system support"
depends on BLOCK
depends on BROKEN || !PREEMPT
help
OS/2 is IBM's operating system for PC's, the same as Warp, and HPFS
is the file system used for organizing files on OS/2 hard disk

View File

@ -8,8 +8,6 @@
#include "hpfs_fn.h"
static int hpfs_alloc_if_possible_nolock(struct super_block *s, secno sec);
/*
* Check if a sector is allocated in bitmap
* This is really slow. Turned on only if chk==2
@ -18,9 +16,9 @@ static int hpfs_alloc_if_possible_nolock(struct super_block *s, secno sec);
static int chk_if_allocated(struct super_block *s, secno sec, char *msg)
{
struct quad_buffer_head qbh;
unsigned *bmp;
u32 *bmp;
if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "chk"))) goto fail;
if ((bmp[(sec & 0x3fff) >> 5] >> (sec & 0x1f)) & 1) {
if ((cpu_to_le32(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f)) & 1) {
hpfs_error(s, "sector '%s' - %08x not allocated in bitmap", msg, sec);
goto fail1;
}
@ -28,7 +26,7 @@ static int chk_if_allocated(struct super_block *s, secno sec, char *msg)
if (sec >= hpfs_sb(s)->sb_dirband_start && sec < hpfs_sb(s)->sb_dirband_start + hpfs_sb(s)->sb_dirband_size) {
unsigned ssec = (sec - hpfs_sb(s)->sb_dirband_start) / 4;
if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) goto fail;
if ((bmp[ssec >> 5] >> (ssec & 0x1f)) & 1) {
if ((le32_to_cpu(bmp[ssec >> 5]) >> (ssec & 0x1f)) & 1) {
hpfs_error(s, "sector '%s' - %08x not allocated in directory bitmap", msg, sec);
goto fail1;
}
@ -75,7 +73,6 @@ static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigne
hpfs_error(s, "Bad allocation size: %d", n);
return 0;
}
lock_super(s);
if (bs != ~0x3fff) {
if (!(bmp = hpfs_map_bitmap(s, near >> 14, &qbh, "aib"))) goto uls;
} else {
@ -85,10 +82,6 @@ static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigne
ret = bs + nr;
goto rt;
}
/*if (!tstbits(bmp, nr + n, n + forward)) {
ret = bs + nr + n;
goto rt;
}*/
q = nr + n; b = 0;
while ((a = tstbits(bmp, q, n + forward)) != 0) {
q += a;
@ -105,14 +98,14 @@ static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigne
goto rt;
}
nr >>= 5;
/*for (i = nr + 1; i != nr; i++, i &= 0x1ff) {*/
/*for (i = nr + 1; i != nr; i++, i &= 0x1ff) */
i = nr;
do {
if (!bmp[i]) goto cont;
if (n + forward >= 0x3f && bmp[i] != -1) goto cont;
if (!le32_to_cpu(bmp[i])) goto cont;
if (n + forward >= 0x3f && le32_to_cpu(bmp[i]) != 0xffffffff) goto cont;
q = i<<5;
if (i > 0) {
unsigned k = bmp[i-1];
unsigned k = le32_to_cpu(bmp[i-1]);
while (k & 0x80000000) {
q--; k <<= 1;
}
@ -132,18 +125,17 @@ static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigne
} while (i != nr);
rt:
if (ret) {
if (hpfs_sb(s)->sb_chk && ((ret >> 14) != (bs >> 14) || (bmp[(ret & 0x3fff) >> 5] | ~(((1 << n) - 1) << (ret & 0x1f))) != 0xffffffff)) {
if (hpfs_sb(s)->sb_chk && ((ret >> 14) != (bs >> 14) || (le32_to_cpu(bmp[(ret & 0x3fff) >> 5]) | ~(((1 << n) - 1) << (ret & 0x1f))) != 0xffffffff)) {
hpfs_error(s, "Allocation doesn't work! Wanted %d, allocated at %08x", n, ret);
ret = 0;
goto b;
}
bmp[(ret & 0x3fff) >> 5] &= ~(((1 << n) - 1) << (ret & 0x1f));
bmp[(ret & 0x3fff) >> 5] &= cpu_to_le32(~(((1 << n) - 1) << (ret & 0x1f)));
hpfs_mark_4buffers_dirty(&qbh);
}
b:
hpfs_brelse4(&qbh);
uls:
unlock_super(s);
return ret;
}
@ -155,7 +147,7 @@ static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigne
* sectors
*/
secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forward, int lock)
secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forward)
{
secno sec;
int i;
@ -167,7 +159,6 @@ secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forwa
forward = -forward;
f_p = 1;
}
if (lock) hpfs_lock_creation(s);
n_bmps = (sbi->sb_fs_size + 0x4000 - 1) >> 14;
if (near && near < sbi->sb_fs_size) {
if ((sec = alloc_in_bmp(s, near, n, f_p ? forward : forward/4))) goto ret;
@ -214,18 +205,17 @@ secno hpfs_alloc_sector(struct super_block *s, secno near, unsigned n, int forwa
ret:
if (sec && f_p) {
for (i = 0; i < forward; i++) {
if (!hpfs_alloc_if_possible_nolock(s, sec + i + 1)) {
if (!hpfs_alloc_if_possible(s, sec + i + 1)) {
hpfs_error(s, "Prealloc doesn't work! Wanted %d, allocated at %08x, can't allocate %d", forward, sec, i);
sec = 0;
break;
}
}
}
if (lock) hpfs_unlock_creation(s);
return sec;
}
static secno alloc_in_dirband(struct super_block *s, secno near, int lock)
static secno alloc_in_dirband(struct super_block *s, secno near)
{
unsigned nr = near;
secno sec;
@ -236,49 +226,35 @@ static secno alloc_in_dirband(struct super_block *s, secno near, int lock)
nr = sbi->sb_dirband_start + sbi->sb_dirband_size - 4;
nr -= sbi->sb_dirband_start;
nr >>= 2;
if (lock) hpfs_lock_creation(s);
sec = alloc_in_bmp(s, (~0x3fff) | nr, 1, 0);
if (lock) hpfs_unlock_creation(s);
if (!sec) return 0;
return ((sec & 0x3fff) << 2) + sbi->sb_dirband_start;
}
/* Alloc sector if it's free */
static int hpfs_alloc_if_possible_nolock(struct super_block *s, secno sec)
int hpfs_alloc_if_possible(struct super_block *s, secno sec)
{
struct quad_buffer_head qbh;
unsigned *bmp;
lock_super(s);
u32 *bmp;
if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "aip"))) goto end;
if (bmp[(sec & 0x3fff) >> 5] & (1 << (sec & 0x1f))) {
bmp[(sec & 0x3fff) >> 5] &= ~(1 << (sec & 0x1f));
if (le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) & (1 << (sec & 0x1f))) {
bmp[(sec & 0x3fff) >> 5] &= cpu_to_le32(~(1 << (sec & 0x1f)));
hpfs_mark_4buffers_dirty(&qbh);
hpfs_brelse4(&qbh);
unlock_super(s);
return 1;
}
hpfs_brelse4(&qbh);
end:
unlock_super(s);
return 0;
}
int hpfs_alloc_if_possible(struct super_block *s, secno sec)
{
int r;
hpfs_lock_creation(s);
r = hpfs_alloc_if_possible_nolock(s, sec);
hpfs_unlock_creation(s);
return r;
}
/* Free sectors in bitmaps */
void hpfs_free_sectors(struct super_block *s, secno sec, unsigned n)
{
struct quad_buffer_head qbh;
unsigned *bmp;
u32 *bmp;
struct hpfs_sb_info *sbi = hpfs_sb(s);
/*printk("2 - ");*/
if (!n) return;
@ -286,26 +262,22 @@ void hpfs_free_sectors(struct super_block *s, secno sec, unsigned n)
hpfs_error(s, "Trying to free reserved sector %08x", sec);
return;
}
lock_super(s);
sbi->sb_max_fwd_alloc += n > 0xffff ? 0xffff : n;
if (sbi->sb_max_fwd_alloc > 0xffffff) sbi->sb_max_fwd_alloc = 0xffffff;
new_map:
if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "free"))) {
unlock_super(s);
return;
}
new_tst:
if ((bmp[(sec & 0x3fff) >> 5] >> (sec & 0x1f) & 1)) {
if ((le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f) & 1)) {
hpfs_error(s, "sector %08x not allocated", sec);
hpfs_brelse4(&qbh);
unlock_super(s);
return;
}
bmp[(sec & 0x3fff) >> 5] |= 1 << (sec & 0x1f);
bmp[(sec & 0x3fff) >> 5] |= cpu_to_le32(1 << (sec & 0x1f));
if (!--n) {
hpfs_mark_4buffers_dirty(&qbh);
hpfs_brelse4(&qbh);
unlock_super(s);
return;
}
if (!(++sec & 0x3fff)) {
@ -327,13 +299,13 @@ int hpfs_check_free_dnodes(struct super_block *s, int n)
int n_bmps = (hpfs_sb(s)->sb_fs_size + 0x4000 - 1) >> 14;
int b = hpfs_sb(s)->sb_c_bitmap & 0x0fffffff;
int i, j;
unsigned *bmp;
u32 *bmp;
struct quad_buffer_head qbh;
if ((bmp = hpfs_map_dnode_bitmap(s, &qbh))) {
for (j = 0; j < 512; j++) {
unsigned k;
if (!bmp[j]) continue;
for (k = bmp[j]; k; k >>= 1) if (k & 1) if (!--n) {
if (!le32_to_cpu(bmp[j])) continue;
for (k = le32_to_cpu(bmp[j]); k; k >>= 1) if (k & 1) if (!--n) {
hpfs_brelse4(&qbh);
return 0;
}
@ -352,10 +324,10 @@ int hpfs_check_free_dnodes(struct super_block *s, int n)
chk_bmp:
if (bmp) {
for (j = 0; j < 512; j++) {
unsigned k;
if (!bmp[j]) continue;
u32 k;
if (!le32_to_cpu(bmp[j])) continue;
for (k = 0xf; k; k <<= 4)
if ((bmp[j] & k) == k) {
if ((le32_to_cpu(bmp[j]) & k) == k) {
if (!--n) {
hpfs_brelse4(&qbh);
return 0;
@ -379,44 +351,40 @@ void hpfs_free_dnode(struct super_block *s, dnode_secno dno)
hpfs_free_sectors(s, dno, 4);
} else {
struct quad_buffer_head qbh;
unsigned *bmp;
u32 *bmp;
unsigned ssec = (dno - hpfs_sb(s)->sb_dirband_start) / 4;
lock_super(s);
if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) {
unlock_super(s);
return;
}
bmp[ssec >> 5] |= 1 << (ssec & 0x1f);
bmp[ssec >> 5] |= cpu_to_le32(1 << (ssec & 0x1f));
hpfs_mark_4buffers_dirty(&qbh);
hpfs_brelse4(&qbh);
unlock_super(s);
}
}
struct dnode *hpfs_alloc_dnode(struct super_block *s, secno near,
dnode_secno *dno, struct quad_buffer_head *qbh,
int lock)
dnode_secno *dno, struct quad_buffer_head *qbh)
{
struct dnode *d;
if (hpfs_count_one_bitmap(s, hpfs_sb(s)->sb_dmap) > FREE_DNODES_ADD) {
if (!(*dno = alloc_in_dirband(s, near, lock)))
if (!(*dno = hpfs_alloc_sector(s, near, 4, 0, lock))) return NULL;
if (!(*dno = alloc_in_dirband(s, near)))
if (!(*dno = hpfs_alloc_sector(s, near, 4, 0))) return NULL;
} else {
if (!(*dno = hpfs_alloc_sector(s, near, 4, 0, lock)))
if (!(*dno = alloc_in_dirband(s, near, lock))) return NULL;
if (!(*dno = hpfs_alloc_sector(s, near, 4, 0)))
if (!(*dno = alloc_in_dirband(s, near))) return NULL;
}
if (!(d = hpfs_get_4sectors(s, *dno, qbh))) {
hpfs_free_dnode(s, *dno);
return NULL;
}
memset(d, 0, 2048);
d->magic = DNODE_MAGIC;
d->first_free = 52;
d->magic = cpu_to_le32(DNODE_MAGIC);
d->first_free = cpu_to_le32(52);
d->dirent[0] = 32;
d->dirent[2] = 8;
d->dirent[30] = 1;
d->dirent[31] = 255;
d->self = *dno;
d->self = cpu_to_le32(*dno);
return d;
}
@ -424,16 +392,16 @@ struct fnode *hpfs_alloc_fnode(struct super_block *s, secno near, fnode_secno *f
struct buffer_head **bh)
{
struct fnode *f;
if (!(*fno = hpfs_alloc_sector(s, near, 1, FNODE_ALLOC_FWD, 1))) return NULL;
if (!(*fno = hpfs_alloc_sector(s, near, 1, FNODE_ALLOC_FWD))) return NULL;
if (!(f = hpfs_get_sector(s, *fno, bh))) {
hpfs_free_sectors(s, *fno, 1);
return NULL;
}
memset(f, 0, 512);
f->magic = FNODE_MAGIC;
f->ea_offs = 0xc4;
f->magic = cpu_to_le32(FNODE_MAGIC);
f->ea_offs = cpu_to_le16(0xc4);
f->btree.n_free_nodes = 8;
f->btree.first_free = 8;
f->btree.first_free = cpu_to_le16(8);
return f;
}
@ -441,16 +409,16 @@ struct anode *hpfs_alloc_anode(struct super_block *s, secno near, anode_secno *a
struct buffer_head **bh)
{
struct anode *a;
if (!(*ano = hpfs_alloc_sector(s, near, 1, ANODE_ALLOC_FWD, 1))) return NULL;
if (!(*ano = hpfs_alloc_sector(s, near, 1, ANODE_ALLOC_FWD))) return NULL;
if (!(a = hpfs_get_sector(s, *ano, bh))) {
hpfs_free_sectors(s, *ano, 1);
return NULL;
}
memset(a, 0, 512);
a->magic = ANODE_MAGIC;
a->self = *ano;
a->magic = cpu_to_le32(ANODE_MAGIC);
a->self = cpu_to_le32(*ano);
a->btree.n_free_nodes = 40;
a->btree.n_used_nodes = 0;
a->btree.first_free = 8;
a->btree.first_free = cpu_to_le16(8);
return a;
}

View File

@ -22,8 +22,8 @@ secno hpfs_bplus_lookup(struct super_block *s, struct inode *inode,
if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_bplus_lookup")) return -1;
if (btree->internal) {
for (i = 0; i < btree->n_used_nodes; i++)
if (btree->u.internal[i].file_secno > sec) {
a = btree->u.internal[i].down;
if (le32_to_cpu(btree->u.internal[i].file_secno) > sec) {
a = le32_to_cpu(btree->u.internal[i].down);
brelse(bh);
if (!(anode = hpfs_map_anode(s, a, &bh))) return -1;
btree = &anode->btree;
@ -34,18 +34,18 @@ secno hpfs_bplus_lookup(struct super_block *s, struct inode *inode,
return -1;
}
for (i = 0; i < btree->n_used_nodes; i++)
if (btree->u.external[i].file_secno <= sec &&
btree->u.external[i].file_secno + btree->u.external[i].length > sec) {
a = btree->u.external[i].disk_secno + sec - btree->u.external[i].file_secno;
if (le32_to_cpu(btree->u.external[i].file_secno) <= sec &&
le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > sec) {
a = le32_to_cpu(btree->u.external[i].disk_secno) + sec - le32_to_cpu(btree->u.external[i].file_secno);
if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, a, 1, "data")) {
brelse(bh);
return -1;
}
if (inode) {
struct hpfs_inode_info *hpfs_inode = hpfs_i(inode);
hpfs_inode->i_file_sec = btree->u.external[i].file_secno;
hpfs_inode->i_disk_sec = btree->u.external[i].disk_secno;
hpfs_inode->i_n_secs = btree->u.external[i].length;
hpfs_inode->i_file_sec = le32_to_cpu(btree->u.external[i].file_secno);
hpfs_inode->i_disk_sec = le32_to_cpu(btree->u.external[i].disk_secno);
hpfs_inode->i_n_secs = le32_to_cpu(btree->u.external[i].length);
}
brelse(bh);
return a;
@ -83,8 +83,8 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
return -1;
}
if (btree->internal) {
a = btree->u.internal[n].down;
btree->u.internal[n].file_secno = -1;
a = le32_to_cpu(btree->u.internal[n].down);
btree->u.internal[n].file_secno = cpu_to_le32(-1);
mark_buffer_dirty(bh);
brelse(bh);
if (hpfs_sb(s)->sb_chk)
@ -94,15 +94,15 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
goto go_down;
}
if (n >= 0) {
if (btree->u.external[n].file_secno + btree->u.external[n].length != fsecno) {
if (le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length) != fsecno) {
hpfs_error(s, "allocated size %08x, trying to add sector %08x, %cnode %08x",
btree->u.external[n].file_secno + btree->u.external[n].length, fsecno,
le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length), fsecno,
fnod?'f':'a', node);
brelse(bh);
return -1;
}
if (hpfs_alloc_if_possible(s, se = btree->u.external[n].disk_secno + btree->u.external[n].length)) {
btree->u.external[n].length++;
if (hpfs_alloc_if_possible(s, se = le32_to_cpu(btree->u.external[n].disk_secno) + le32_to_cpu(btree->u.external[n].length))) {
btree->u.external[n].length = cpu_to_le32(le32_to_cpu(btree->u.external[n].length) + 1);
mark_buffer_dirty(bh);
brelse(bh);
return se;
@ -115,20 +115,20 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
}
se = !fnod ? node : (node + 16384) & ~16383;
}
if (!(se = hpfs_alloc_sector(s, se, 1, fsecno*ALLOC_M>ALLOC_FWD_MAX ? ALLOC_FWD_MAX : fsecno*ALLOC_M<ALLOC_FWD_MIN ? ALLOC_FWD_MIN : fsecno*ALLOC_M, 1))) {
if (!(se = hpfs_alloc_sector(s, se, 1, fsecno*ALLOC_M>ALLOC_FWD_MAX ? ALLOC_FWD_MAX : fsecno*ALLOC_M<ALLOC_FWD_MIN ? ALLOC_FWD_MIN : fsecno*ALLOC_M))) {
brelse(bh);
return -1;
}
fs = n < 0 ? 0 : btree->u.external[n].file_secno + btree->u.external[n].length;
fs = n < 0 ? 0 : le32_to_cpu(btree->u.external[n].file_secno) + le32_to_cpu(btree->u.external[n].length);
if (!btree->n_free_nodes) {
up = a != node ? anode->up : -1;
up = a != node ? le32_to_cpu(anode->up) : -1;
if (!(anode = hpfs_alloc_anode(s, a, &na, &bh1))) {
brelse(bh);
hpfs_free_sectors(s, se, 1);
return -1;
}
if (a == node && fnod) {
anode->up = node;
anode->up = cpu_to_le32(node);
anode->btree.fnode_parent = 1;
anode->btree.n_used_nodes = btree->n_used_nodes;
anode->btree.first_free = btree->first_free;
@ -137,9 +137,9 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
btree->internal = 1;
btree->n_free_nodes = 11;
btree->n_used_nodes = 1;
btree->first_free = (char *)&(btree->u.internal[1]) - (char *)btree;
btree->u.internal[0].file_secno = -1;
btree->u.internal[0].down = na;
btree->first_free = cpu_to_le16((char *)&(btree->u.internal[1]) - (char *)btree);
btree->u.internal[0].file_secno = cpu_to_le32(-1);
btree->u.internal[0].down = cpu_to_le32(na);
mark_buffer_dirty(bh);
} else if (!(ranode = hpfs_alloc_anode(s, /*a*/0, &ra, &bh2))) {
brelse(bh);
@ -153,15 +153,15 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
btree = &anode->btree;
}
btree->n_free_nodes--; n = btree->n_used_nodes++;
btree->first_free += 12;
btree->u.external[n].disk_secno = se;
btree->u.external[n].file_secno = fs;
btree->u.external[n].length = 1;
btree->first_free = cpu_to_le16(le16_to_cpu(btree->first_free) + 12);
btree->u.external[n].disk_secno = cpu_to_le32(se);
btree->u.external[n].file_secno = cpu_to_le32(fs);
btree->u.external[n].length = cpu_to_le32(1);
mark_buffer_dirty(bh);
brelse(bh);
if ((a == node && fnod) || na == -1) return se;
c2 = 0;
while (up != -1) {
while (up != (anode_secno)-1) {
struct anode *new_anode;
if (hpfs_sb(s)->sb_chk)
if (hpfs_stop_cycles(s, up, &c1, &c2, "hpfs_add_sector_to_btree #2")) return -1;
@ -174,47 +174,47 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
}
if (btree->n_free_nodes) {
btree->n_free_nodes--; n = btree->n_used_nodes++;
btree->first_free += 8;
btree->u.internal[n].file_secno = -1;
btree->u.internal[n].down = na;
btree->u.internal[n-1].file_secno = fs;
btree->first_free = cpu_to_le16(le16_to_cpu(btree->first_free) + 8);
btree->u.internal[n].file_secno = cpu_to_le32(-1);
btree->u.internal[n].down = cpu_to_le32(na);
btree->u.internal[n-1].file_secno = cpu_to_le32(fs);
mark_buffer_dirty(bh);
brelse(bh);
brelse(bh2);
hpfs_free_sectors(s, ra, 1);
if ((anode = hpfs_map_anode(s, na, &bh))) {
anode->up = up;
anode->up = cpu_to_le32(up);
anode->btree.fnode_parent = up == node && fnod;
mark_buffer_dirty(bh);
brelse(bh);
}
return se;
}
up = up != node ? anode->up : -1;
btree->u.internal[btree->n_used_nodes - 1].file_secno = /*fs*/-1;
up = up != node ? le32_to_cpu(anode->up) : -1;
btree->u.internal[btree->n_used_nodes - 1].file_secno = cpu_to_le32(/*fs*/-1);
mark_buffer_dirty(bh);
brelse(bh);
a = na;
if ((new_anode = hpfs_alloc_anode(s, a, &na, &bh))) {
anode = new_anode;
/*anode->up = up != -1 ? up : ra;*/
/*anode->up = cpu_to_le32(up != -1 ? up : ra);*/
anode->btree.internal = 1;
anode->btree.n_used_nodes = 1;
anode->btree.n_free_nodes = 59;
anode->btree.first_free = 16;
anode->btree.u.internal[0].down = a;
anode->btree.u.internal[0].file_secno = -1;
anode->btree.first_free = cpu_to_le16(16);
anode->btree.u.internal[0].down = cpu_to_le32(a);
anode->btree.u.internal[0].file_secno = cpu_to_le32(-1);
mark_buffer_dirty(bh);
brelse(bh);
if ((anode = hpfs_map_anode(s, a, &bh))) {
anode->up = na;
anode->up = cpu_to_le32(na);
mark_buffer_dirty(bh);
brelse(bh);
}
} else na = a;
}
if ((anode = hpfs_map_anode(s, na, &bh))) {
anode->up = node;
anode->up = cpu_to_le32(node);
if (fnod) anode->btree.fnode_parent = 1;
mark_buffer_dirty(bh);
brelse(bh);
@ -232,14 +232,14 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
}
btree = &fnode->btree;
}
ranode->up = node;
memcpy(&ranode->btree, btree, btree->first_free);
ranode->up = cpu_to_le32(node);
memcpy(&ranode->btree, btree, le16_to_cpu(btree->first_free));
if (fnod) ranode->btree.fnode_parent = 1;
ranode->btree.n_free_nodes = (ranode->btree.internal ? 60 : 40) - ranode->btree.n_used_nodes;
if (ranode->btree.internal) for (n = 0; n < ranode->btree.n_used_nodes; n++) {
struct anode *unode;
if ((unode = hpfs_map_anode(s, ranode->u.internal[n].down, &bh1))) {
unode->up = ra;
if ((unode = hpfs_map_anode(s, le32_to_cpu(ranode->u.internal[n].down), &bh1))) {
unode->up = cpu_to_le32(ra);
unode->btree.fnode_parent = 0;
mark_buffer_dirty(bh1);
brelse(bh1);
@ -248,11 +248,11 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
btree->internal = 1;
btree->n_free_nodes = fnod ? 10 : 58;
btree->n_used_nodes = 2;
btree->first_free = (char *)&btree->u.internal[2] - (char *)btree;
btree->u.internal[0].file_secno = fs;
btree->u.internal[0].down = ra;
btree->u.internal[1].file_secno = -1;
btree->u.internal[1].down = na;
btree->first_free = cpu_to_le16((char *)&btree->u.internal[2] - (char *)btree);
btree->u.internal[0].file_secno = cpu_to_le32(fs);
btree->u.internal[0].down = cpu_to_le32(ra);
btree->u.internal[1].file_secno = cpu_to_le32(-1);
btree->u.internal[1].down = cpu_to_le32(na);
mark_buffer_dirty(bh);
brelse(bh);
mark_buffer_dirty(bh2);
@ -279,7 +279,7 @@ void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree)
go_down:
d2 = 0;
while (btree1->internal) {
ano = btree1->u.internal[pos].down;
ano = le32_to_cpu(btree1->u.internal[pos].down);
if (level) brelse(bh);
if (hpfs_sb(s)->sb_chk)
if (hpfs_stop_cycles(s, ano, &d1, &d2, "hpfs_remove_btree #1"))
@ -290,7 +290,7 @@ void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree)
pos = 0;
}
for (i = 0; i < btree1->n_used_nodes; i++)
hpfs_free_sectors(s, btree1->u.external[i].disk_secno, btree1->u.external[i].length);
hpfs_free_sectors(s, le32_to_cpu(btree1->u.external[i].disk_secno), le32_to_cpu(btree1->u.external[i].length));
go_up:
if (!level) return;
brelse(bh);
@ -298,13 +298,13 @@ void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree)
if (hpfs_stop_cycles(s, ano, &c1, &c2, "hpfs_remove_btree #2")) return;
hpfs_free_sectors(s, ano, 1);
oano = ano;
ano = anode->up;
ano = le32_to_cpu(anode->up);
if (--level) {
if (!(anode = hpfs_map_anode(s, ano, &bh))) return;
btree1 = &anode->btree;
} else btree1 = btree;
for (i = 0; i < btree1->n_used_nodes; i++) {
if (btree1->u.internal[i].down == oano) {
if (le32_to_cpu(btree1->u.internal[i].down) == oano) {
if ((pos = i + 1) < btree1->n_used_nodes)
goto go_down;
else
@ -411,7 +411,7 @@ void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs)
if (fno) {
btree->n_free_nodes = 8;
btree->n_used_nodes = 0;
btree->first_free = 8;
btree->first_free = cpu_to_le16(8);
btree->internal = 0;
mark_buffer_dirty(bh);
} else hpfs_free_sectors(s, f, 1);
@ -421,22 +421,22 @@ void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs)
while (btree->internal) {
nodes = btree->n_used_nodes + btree->n_free_nodes;
for (i = 0; i < btree->n_used_nodes; i++)
if (btree->u.internal[i].file_secno >= secs) goto f;
if (le32_to_cpu(btree->u.internal[i].file_secno) >= secs) goto f;
brelse(bh);
hpfs_error(s, "internal btree %08x doesn't end with -1", node);
return;
f:
for (j = i + 1; j < btree->n_used_nodes; j++)
hpfs_ea_remove(s, btree->u.internal[j].down, 1, 0);
hpfs_ea_remove(s, le32_to_cpu(btree->u.internal[j].down), 1, 0);
btree->n_used_nodes = i + 1;
btree->n_free_nodes = nodes - btree->n_used_nodes;
btree->first_free = 8 + 8 * btree->n_used_nodes;
btree->first_free = cpu_to_le16(8 + 8 * btree->n_used_nodes);
mark_buffer_dirty(bh);
if (btree->u.internal[i].file_secno == secs) {
if (btree->u.internal[i].file_secno == cpu_to_le32(secs)) {
brelse(bh);
return;
}
node = btree->u.internal[i].down;
node = le32_to_cpu(btree->u.internal[i].down);
brelse(bh);
if (hpfs_sb(s)->sb_chk)
if (hpfs_stop_cycles(s, node, &c1, &c2, "hpfs_truncate_btree"))
@ -446,25 +446,25 @@ void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs)
}
nodes = btree->n_used_nodes + btree->n_free_nodes;
for (i = 0; i < btree->n_used_nodes; i++)
if (btree->u.external[i].file_secno + btree->u.external[i].length >= secs) goto ff;
if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) >= secs) goto ff;
brelse(bh);
return;
ff:
if (secs <= btree->u.external[i].file_secno) {
if (secs <= le32_to_cpu(btree->u.external[i].file_secno)) {
hpfs_error(s, "there is an allocation error in file %08x, sector %08x", f, secs);
if (i) i--;
}
else if (btree->u.external[i].file_secno + btree->u.external[i].length > secs) {
hpfs_free_sectors(s, btree->u.external[i].disk_secno + secs -
btree->u.external[i].file_secno, btree->u.external[i].length
- secs + btree->u.external[i].file_secno); /* I hope gcc optimizes this :-) */
btree->u.external[i].length = secs - btree->u.external[i].file_secno;
else if (le32_to_cpu(btree->u.external[i].file_secno) + le32_to_cpu(btree->u.external[i].length) > secs) {
hpfs_free_sectors(s, le32_to_cpu(btree->u.external[i].disk_secno) + secs -
le32_to_cpu(btree->u.external[i].file_secno), le32_to_cpu(btree->u.external[i].length)
- secs + le32_to_cpu(btree->u.external[i].file_secno)); /* I hope gcc optimizes this :-) */
btree->u.external[i].length = cpu_to_le32(secs - le32_to_cpu(btree->u.external[i].file_secno));
}
for (j = i + 1; j < btree->n_used_nodes; j++)
hpfs_free_sectors(s, btree->u.external[j].disk_secno, btree->u.external[j].length);
hpfs_free_sectors(s, le32_to_cpu(btree->u.external[j].disk_secno), le32_to_cpu(btree->u.external[j].length));
btree->n_used_nodes = i + 1;
btree->n_free_nodes = nodes - btree->n_used_nodes;
btree->first_free = 8 + 12 * btree->n_used_nodes;
btree->first_free = cpu_to_le16(8 + 12 * btree->n_used_nodes);
mark_buffer_dirty(bh);
brelse(bh);
}
@ -480,12 +480,12 @@ void hpfs_remove_fnode(struct super_block *s, fnode_secno fno)
struct extended_attribute *ea_end;
if (!(fnode = hpfs_map_fnode(s, fno, &bh))) return;
if (!fnode->dirflag) hpfs_remove_btree(s, &fnode->btree);
else hpfs_remove_dtree(s, fnode->u.external[0].disk_secno);
else hpfs_remove_dtree(s, le32_to_cpu(fnode->u.external[0].disk_secno));
ea_end = fnode_end_ea(fnode);
for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
if (ea->indirect)
hpfs_ea_remove(s, ea_sec(ea), ea->anode, ea_len(ea));
hpfs_ea_ext_remove(s, fnode->ea_secno, fnode->ea_anode, fnode->ea_size_l);
hpfs_ea_ext_remove(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l));
brelse(bh);
hpfs_free_sectors(s, fno, 1);
}

View File

@ -9,22 +9,6 @@
#include <linux/slab.h>
#include "hpfs_fn.h"
void hpfs_lock_creation(struct super_block *s)
{
#ifdef DEBUG_LOCKS
printk("lock creation\n");
#endif
mutex_lock(&hpfs_sb(s)->hpfs_creation_de);
}
void hpfs_unlock_creation(struct super_block *s)
{
#ifdef DEBUG_LOCKS
printk("unlock creation\n");
#endif
mutex_unlock(&hpfs_sb(s)->hpfs_creation_de);
}
/* Map a sector into a buffer and return pointers to it and to the buffer. */
void *hpfs_map_sector(struct super_block *s, unsigned secno, struct buffer_head **bhp,
@ -32,6 +16,8 @@ void *hpfs_map_sector(struct super_block *s, unsigned secno, struct buffer_head
{
struct buffer_head *bh;
hpfs_lock_assert(s);
cond_resched();
*bhp = bh = sb_bread(s, secno);
@ -50,6 +36,8 @@ void *hpfs_get_sector(struct super_block *s, unsigned secno, struct buffer_head
struct buffer_head *bh;
/*return hpfs_map_sector(s, secno, bhp, 0);*/
hpfs_lock_assert(s);
cond_resched();
if ((*bhp = bh = sb_getblk(s, secno)) != NULL) {
@ -70,6 +58,8 @@ void *hpfs_map_4sectors(struct super_block *s, unsigned secno, struct quad_buffe
struct buffer_head *bh;
char *data;
hpfs_lock_assert(s);
cond_resched();
if (secno & 3) {
@ -125,6 +115,8 @@ void *hpfs_get_4sectors(struct super_block *s, unsigned secno,
{
cond_resched();
hpfs_lock_assert(s);
if (secno & 3) {
printk("HPFS: hpfs_get_4sectors: unaligned read\n");
return NULL;

Some files were not shown because too many files have changed in this diff Show More