ASoC: Fixes for v3.15

A smattering of device specific fixes, nothing stands out here except
 for the multiplatform fixes for Samsung and the device IDs being added
 by Stephen Warren - there's no real code changes from those and they
 give better robustness to the enumeration with DT.
 -----BEGIN PGP SIGNATURE-----
 Version: GnuPG v1
 
 iQIcBAABAgAGBQJTRVqxAAoJELSic+t+oim9Sy8P/06CSOkwoY0ofxkRiusqFUCY
 S5lKwRCQrVkMKL7Begjv7L5Dr7xDdjGGl8P36BqysCUUm/hiRHIOwcUtEYdWwmon
 XgUt+vdjz9qVoVilqfPjF7PDGqqDwspOJljaC/9dIylx6mXYKcpdkFzloiTlBC+x
 hZb34YEc+BdYB0BHH7jYvkUV3U4UOeJbOYbmXY940y6aFngnP9dIRwEDlyw2QFbO
 Rpcc+7qmMezALJcKAHBYwv45/08EMZ3OwaZ+4LhUXeeBdP54ZQMJNZiSii4gCAcr
 hRe6ox9ixDxLQ4x09Gv8XfTRL5BozrjtiK+9hxMi1tAtfej5W0YJ9bccmka1yXWw
 /z3LmqARGJPUoAfhVA/ip5l3rWJT1QJzL008yBgsPNZIbm7Fo3meCYwvrI9igOW9
 qpovx4YZoo2edIxMHZAySBnS0WfOJYfwPmZ1wWbS1e+JBu/rIOk8AMQRzNphQ2W+
 YJvIIjajX52vvDoXynaZCAfYfivBYzcuoZllgjyhxNWRj+HC8Wer1Prmv9g5bgHU
 mWW8g2HnkbYh+TP0oFUANLQrGXRbaVok3kQe2oSoUbFkqovhllI7d9za4+xUjEXo
 qZkFaTcn/VjH1Gi2uzZiCWQpy41bLXda0JvRUF9UhWPeKG8OEMN5Lc/aistIiCuh
 Z0crLkK3DmM7Ih0dxm3l
 =Z3Yl
 -----END PGP SIGNATURE-----

Merge tag 'asoc-v3.15-5' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus

ASoC: Fixes for v3.15

A smattering of device specific fixes, nothing stands out here except
for the multiplatform fixes for Samsung and the device IDs being added
by Stephen Warren - there's no real code changes from those and they
give better robustness to the enumeration with DT.
This commit is contained in:
Takashi Iwai 2014-04-09 17:50:02 +02:00
commit 50487c3a4a
85 changed files with 956 additions and 479 deletions

View File

@ -20,15 +20,6 @@ Required properties:
have.
- interrupt-parent: The phandle for the interrupt controller that
services interrupts for this device.
- fsl,mode: The operating mode for the SSI interface.
"i2s-slave" - I2S mode, SSI is clock slave
"i2s-master" - I2S mode, SSI is clock master
"lj-slave" - left-justified mode, SSI is clock slave
"lj-master" - l.j. mode, SSI is clock master
"rj-slave" - right-justified mode, SSI is clock slave
"rj-master" - r.j., SSI is clock master
"ac97-slave" - AC97 mode, SSI is clock slave
"ac97-master" - AC97 mode, SSI is clock master
- fsl,playback-dma: Phandle to a node for the DMA channel to use for
playback of audio. This is typically dictated by SOC
design. See the notes below.
@ -47,6 +38,9 @@ Required properties:
be connected together, and SRFS and STFS be connected
together. This would still allow different sample sizes,
but not different sample rates.
- clocks: "ipg" - Required clock for the SSI unit
"baud" - Required clock for SSI master mode. Otherwise this
clock is not used
Required are also ac97 link bindings if ac97 is used. See
Documentation/devicetree/bindings/sound/soc-ac97link.txt for the necessary
@ -64,6 +58,15 @@ Optional properties:
Documentation/devicetree/bindings/dma/dma.txt.
- dma-names: Two dmas have to be defined, "tx" and "rx", if fsl,imx-fiq
is not defined.
- fsl,mode: The operating mode for the SSI interface.
"i2s-slave" - I2S mode, SSI is clock slave
"i2s-master" - I2S mode, SSI is clock master
"lj-slave" - left-justified mode, SSI is clock slave
"lj-master" - l.j. mode, SSI is clock master
"rj-slave" - right-justified mode, SSI is clock slave
"rj-master" - r.j., SSI is clock master
"ac97-slave" - AC97 mode, SSI is clock slave
"ac97-master" - AC97 mode, SSI is clock master
Child 'codec' node required properties:
- compatible: Compatible list, contains the name of the codec

View File

@ -1832,8 +1832,8 @@ F: net/bluetooth/
F: include/net/bluetooth/
BONDING DRIVER
M: Jay Vosburgh <fubar@us.ibm.com>
M: Veaceslav Falico <vfalico@redhat.com>
M: Jay Vosburgh <j.vosburgh@gmail.com>
M: Veaceslav Falico <vfalico@gmail.com>
M: Andy Gospodarek <andy@greyhouse.net>
L: netdev@vger.kernel.org
W: http://sourceforge.net/projects/bonding/
@ -2808,9 +2808,9 @@ S: Supported
F: drivers/acpi/dock.c
DOCUMENTATION
M: Rob Landley <rob@landley.net>
M: Randy Dunlap <rdunlap@infradead.org>
L: linux-doc@vger.kernel.org
T: TBD
T: quilt http://www.infradead.org/~rdunlap/Doc/patches/
S: Maintained
F: Documentation/
@ -6013,6 +6013,7 @@ F: include/uapi/linux/net.h
F: include/uapi/linux/netdevice.h
F: tools/net/
F: tools/testing/selftests/net/
F: lib/random32.c
NETWORKING [IPv4/IPv6]
M: "David S. Miller" <davem@davemloft.net>

View File

@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 14
SUBLEVEL = 0
EXTRAVERSION = -rc8
EXTRAVERSION =
NAME = Shuffling Zombie Juror
# *DOCUMENTATION*

View File

@ -445,20 +445,10 @@ static inline int pte_same(pte_t a, pte_t b)
return a.pte == b.pte;
}
static inline int pteval_present(pteval_t pteval)
{
/*
* Yes Linus, _PAGE_PROTNONE == _PAGE_NUMA. Expressing it this
* way clearly states that the intent is that protnone and numa
* hinting ptes are considered present for the purposes of
* pagetable operations like zapping, protection changes, gup etc.
*/
return pteval & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_NUMA);
}
static inline int pte_present(pte_t a)
{
return pteval_present(pte_flags(a));
return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE |
_PAGE_NUMA);
}
#define pte_accessible pte_accessible

View File

@ -119,9 +119,10 @@ static inline void setup_node_to_cpumask_map(void) { }
extern const struct cpumask *cpu_coregroup_mask(int cpu);
#ifdef ENABLE_TOPO_DEFINES
#define topology_physical_package_id(cpu) (cpu_data(cpu).phys_proc_id)
#define topology_core_id(cpu) (cpu_data(cpu).cpu_core_id)
#ifdef ENABLE_TOPO_DEFINES
#define topology_core_cpumask(cpu) (per_cpu(cpu_core_map, cpu))
#define topology_thread_cpumask(cpu) (per_cpu(cpu_sibling_map, cpu))
#endif

View File

@ -365,7 +365,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
/* Assume pteval_t is equivalent to all the other *val_t types. */
static pteval_t pte_mfn_to_pfn(pteval_t val)
{
if (pteval_present(val)) {
if (val & _PAGE_PRESENT) {
unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
unsigned long pfn = mfn_to_pfn(mfn);
@ -381,7 +381,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
static pteval_t pte_pfn_to_mfn(pteval_t val)
{
if (pteval_present(val)) {
if (val & _PAGE_PRESENT) {
unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
pteval_t flags = val & PTE_FLAGS_MASK;
unsigned long mfn;

View File

@ -2109,7 +2109,6 @@ static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
rbd_assert(img_request->obj_request_count > 0);
rbd_assert(which != BAD_WHICH);
rbd_assert(which < img_request->obj_request_count);
rbd_assert(which >= img_request->next_completion);
spin_lock_irq(&img_request->completion_lock);
if (which != img_request->next_completion)

View File

@ -842,7 +842,7 @@ void i915_gem_suspend_gtt_mappings(struct drm_device *dev)
dev_priv->gtt.base.clear_range(&dev_priv->gtt.base,
dev_priv->gtt.base.start / PAGE_SIZE,
dev_priv->gtt.base.total / PAGE_SIZE,
false);
true);
}
void i915_gem_restore_gtt_mappings(struct drm_device *dev)

View File

@ -866,13 +866,16 @@ static int nouveau_pmops_runtime_suspend(struct device *dev)
struct drm_device *drm_dev = pci_get_drvdata(pdev);
int ret;
if (nouveau_runtime_pm == 0)
return -EINVAL;
if (nouveau_runtime_pm == 0) {
pm_runtime_forbid(dev);
return -EBUSY;
}
/* are we optimus enabled? */
if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
return -EINVAL;
pm_runtime_forbid(dev);
return -EBUSY;
}
nv_debug_level(SILENT);
@ -923,12 +926,15 @@ static int nouveau_pmops_runtime_idle(struct device *dev)
struct nouveau_drm *drm = nouveau_drm(drm_dev);
struct drm_crtc *crtc;
if (nouveau_runtime_pm == 0)
if (nouveau_runtime_pm == 0) {
pm_runtime_forbid(dev);
return -EBUSY;
}
/* are we optimus enabled? */
if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
pm_runtime_forbid(dev);
return -EBUSY;
}

View File

@ -403,11 +403,15 @@ static int radeon_pmops_runtime_suspend(struct device *dev)
struct drm_device *drm_dev = pci_get_drvdata(pdev);
int ret;
if (radeon_runtime_pm == 0)
return -EINVAL;
if (radeon_runtime_pm == 0) {
pm_runtime_forbid(dev);
return -EBUSY;
}
if (radeon_runtime_pm == -1 && !radeon_is_px())
return -EINVAL;
if (radeon_runtime_pm == -1 && !radeon_is_px()) {
pm_runtime_forbid(dev);
return -EBUSY;
}
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
drm_kms_helper_poll_disable(drm_dev);
@ -456,12 +460,15 @@ static int radeon_pmops_runtime_idle(struct device *dev)
struct drm_device *drm_dev = pci_get_drvdata(pdev);
struct drm_crtc *crtc;
if (radeon_runtime_pm == 0)
if (radeon_runtime_pm == 0) {
pm_runtime_forbid(dev);
return -EBUSY;
}
/* are we PX enabled? */
if (radeon_runtime_pm == -1 && !radeon_is_px()) {
DRM_DEBUG_DRIVER("failing to power off - not px\n");
pm_runtime_forbid(dev);
return -EBUSY;
}

View File

@ -177,8 +177,10 @@ void udl_gem_free_object(struct drm_gem_object *gem_obj)
if (obj->vmapping)
udl_gem_vunmap(obj);
if (gem_obj->import_attach)
if (gem_obj->import_attach) {
drm_prime_gem_destroy(gem_obj, obj->sg);
put_device(gem_obj->dev->dev);
}
if (obj->pages)
udl_gem_put_pages(obj);
@ -256,9 +258,12 @@ struct drm_gem_object *udl_gem_prime_import(struct drm_device *dev,
int ret;
/* need to attach */
get_device(dev->dev);
attach = dma_buf_attach(dma_buf, dev->dev);
if (IS_ERR(attach))
if (IS_ERR(attach)) {
put_device(dev->dev);
return ERR_CAST(attach);
}
get_dma_buf(dma_buf);
@ -282,6 +287,6 @@ fail_unmap:
fail_detach:
dma_buf_detach(dma_buf, attach);
dma_buf_put(dma_buf);
put_device(dev->dev);
return ERR_PTR(ret);
}

View File

@ -39,7 +39,9 @@
#include <linux/i2c.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <sysdev/fsl_soc.h>
#include <asm/cpm.h>

View File

@ -954,11 +954,13 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
return -EFAULT;
error = input_ff_upload(dev, &effect, file);
if (error)
return error;
if (put_user(effect.id, &(((struct ff_effect __user *)p)->id)))
return -EFAULT;
return error;
return 0;
}
/* Multi-number variable-length handlers */

View File

@ -76,8 +76,18 @@ static int adp5588_gpio_get_value(struct gpio_chip *chip, unsigned off)
struct adp5588_kpad *kpad = container_of(chip, struct adp5588_kpad, gc);
unsigned int bank = ADP5588_BANK(kpad->gpiomap[off]);
unsigned int bit = ADP5588_BIT(kpad->gpiomap[off]);
int val;
return !!(adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank) & bit);
mutex_lock(&kpad->gpio_lock);
if (kpad->dir[bank] & bit)
val = kpad->dat_out[bank];
else
val = adp5588_read(kpad->client, GPIO_DAT_STAT1 + bank);
mutex_unlock(&kpad->gpio_lock);
return !!(val & bit);
}
static void adp5588_gpio_set_value(struct gpio_chip *chip,

View File

@ -27,29 +27,32 @@ struct da9052_onkey {
static void da9052_onkey_query(struct da9052_onkey *onkey)
{
int key_stat;
int ret;
key_stat = da9052_reg_read(onkey->da9052, DA9052_EVENT_B_REG);
if (key_stat < 0) {
ret = da9052_reg_read(onkey->da9052, DA9052_STATUS_A_REG);
if (ret < 0) {
dev_err(onkey->da9052->dev,
"Failed to read onkey event %d\n", key_stat);
"Failed to read onkey event err=%d\n", ret);
} else {
/*
* Since interrupt for deassertion of ONKEY pin is not
* generated, onkey event state determines the onkey
* button state.
*/
key_stat &= DA9052_EVENTB_ENONKEY;
input_report_key(onkey->input, KEY_POWER, key_stat);
input_sync(onkey->input);
}
bool pressed = !(ret & DA9052_STATUSA_NONKEY);
/*
* Interrupt is generated only when the ONKEY pin is asserted.
* Hence the deassertion of the pin is simulated through work queue.
*/
if (key_stat)
schedule_delayed_work(&onkey->work, msecs_to_jiffies(50));
input_report_key(onkey->input, KEY_POWER, pressed);
input_sync(onkey->input);
/*
* Interrupt is generated only when the ONKEY pin
* is asserted. Hence the deassertion of the pin
* is simulated through work queue.
*/
if (pressed)
schedule_delayed_work(&onkey->work,
msecs_to_jiffies(50));
}
}
static void da9052_onkey_work(struct work_struct *work)

View File

@ -409,7 +409,6 @@ static int cypress_set_input_params(struct input_dev *input,
__clear_bit(REL_X, input->relbit);
__clear_bit(REL_Y, input->relbit);
__set_bit(INPUT_PROP_BUTTONPAD, input->propbit);
__set_bit(EV_KEY, input->evbit);
__set_bit(BTN_LEFT, input->keybit);
__set_bit(BTN_RIGHT, input->keybit);

View File

@ -265,11 +265,22 @@ static int synaptics_identify(struct psmouse *psmouse)
* Read touchpad resolution and maximum reported coordinates
* Resolution is left zero if touchpad does not support the query
*/
static const int *quirk_min_max;
static int synaptics_resolution(struct psmouse *psmouse)
{
struct synaptics_data *priv = psmouse->private;
unsigned char resp[3];
if (quirk_min_max) {
priv->x_min = quirk_min_max[0];
priv->x_max = quirk_min_max[1];
priv->y_min = quirk_min_max[2];
priv->y_max = quirk_min_max[3];
return 0;
}
if (SYN_ID_MAJOR(priv->identity) < 4)
return 0;
@ -1485,10 +1496,54 @@ static const struct dmi_system_id olpc_dmi_table[] __initconst = {
{ }
};
static const struct dmi_system_id min_max_dmi_table[] __initconst = {
#if defined(CONFIG_DMI)
{
/* Lenovo ThinkPad Helix */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad Helix"),
},
.driver_data = (int []){1024, 5052, 2258, 4832},
},
{
/* Lenovo ThinkPad X240 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X240"),
},
.driver_data = (int []){1232, 5710, 1156, 4696},
},
{
/* Lenovo ThinkPad T440s */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T440"),
},
.driver_data = (int []){1024, 5112, 2024, 4832},
},
{
/* Lenovo ThinkPad T540p */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T540"),
},
.driver_data = (int []){1024, 5056, 2058, 4832},
},
#endif
{ }
};
void __init synaptics_module_init(void)
{
const struct dmi_system_id *min_max_dmi;
impaired_toshiba_kbc = dmi_check_system(toshiba_dmi_table);
broken_olpc_ec = dmi_check_system(olpc_dmi_table);
min_max_dmi = dmi_first_match(min_max_dmi_table);
if (min_max_dmi)
quirk_min_max = min_max_dmi->driver_data;
}
static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)

View File

@ -67,7 +67,6 @@ struct mousedev {
struct device dev;
struct cdev cdev;
bool exist;
bool is_mixdev;
struct list_head mixdev_node;
bool opened_by_mixdev;
@ -77,6 +76,9 @@ struct mousedev {
int old_x[4], old_y[4];
int frac_dx, frac_dy;
unsigned long touch;
int (*open_device)(struct mousedev *mousedev);
void (*close_device)(struct mousedev *mousedev);
};
enum mousedev_emul {
@ -116,9 +118,6 @@ static unsigned char mousedev_imex_seq[] = { 0xf3, 200, 0xf3, 200, 0xf3, 80 };
static struct mousedev *mousedev_mix;
static LIST_HEAD(mousedev_mix_list);
static void mixdev_open_devices(void);
static void mixdev_close_devices(void);
#define fx(i) (mousedev->old_x[(mousedev->pkt_count - (i)) & 03])
#define fy(i) (mousedev->old_y[(mousedev->pkt_count - (i)) & 03])
@ -428,9 +427,7 @@ static int mousedev_open_device(struct mousedev *mousedev)
if (retval)
return retval;
if (mousedev->is_mixdev)
mixdev_open_devices();
else if (!mousedev->exist)
if (!mousedev->exist)
retval = -ENODEV;
else if (!mousedev->open++) {
retval = input_open_device(&mousedev->handle);
@ -446,9 +443,7 @@ static void mousedev_close_device(struct mousedev *mousedev)
{
mutex_lock(&mousedev->mutex);
if (mousedev->is_mixdev)
mixdev_close_devices();
else if (mousedev->exist && !--mousedev->open)
if (mousedev->exist && !--mousedev->open)
input_close_device(&mousedev->handle);
mutex_unlock(&mousedev->mutex);
@ -459,21 +454,29 @@ static void mousedev_close_device(struct mousedev *mousedev)
* stream. Note that this function is called with mousedev_mix->mutex
* held.
*/
static void mixdev_open_devices(void)
static int mixdev_open_devices(struct mousedev *mixdev)
{
struct mousedev *mousedev;
int error;
if (mousedev_mix->open++)
return;
error = mutex_lock_interruptible(&mixdev->mutex);
if (error)
return error;
list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
if (!mousedev->opened_by_mixdev) {
if (mousedev_open_device(mousedev))
continue;
if (!mixdev->open++) {
struct mousedev *mousedev;
mousedev->opened_by_mixdev = true;
list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
if (!mousedev->opened_by_mixdev) {
if (mousedev_open_device(mousedev))
continue;
mousedev->opened_by_mixdev = true;
}
}
}
mutex_unlock(&mixdev->mutex);
return 0;
}
/*
@ -481,19 +484,22 @@ static void mixdev_open_devices(void)
* device. Note that this function is called with mousedev_mix->mutex
* held.
*/
static void mixdev_close_devices(void)
static void mixdev_close_devices(struct mousedev *mixdev)
{
struct mousedev *mousedev;
mutex_lock(&mixdev->mutex);
if (--mousedev_mix->open)
return;
if (!--mixdev->open) {
struct mousedev *mousedev;
list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
if (mousedev->opened_by_mixdev) {
mousedev->opened_by_mixdev = false;
mousedev_close_device(mousedev);
list_for_each_entry(mousedev, &mousedev_mix_list, mixdev_node) {
if (mousedev->opened_by_mixdev) {
mousedev->opened_by_mixdev = false;
mousedev_close_device(mousedev);
}
}
}
mutex_unlock(&mixdev->mutex);
}
@ -522,7 +528,7 @@ static int mousedev_release(struct inode *inode, struct file *file)
mousedev_detach_client(mousedev, client);
kfree(client);
mousedev_close_device(mousedev);
mousedev->close_device(mousedev);
return 0;
}
@ -550,7 +556,7 @@ static int mousedev_open(struct inode *inode, struct file *file)
client->mousedev = mousedev;
mousedev_attach_client(mousedev, client);
error = mousedev_open_device(mousedev);
error = mousedev->open_device(mousedev);
if (error)
goto err_free_client;
@ -861,16 +867,21 @@ static struct mousedev *mousedev_create(struct input_dev *dev,
if (mixdev) {
dev_set_name(&mousedev->dev, "mice");
mousedev->open_device = mixdev_open_devices;
mousedev->close_device = mixdev_close_devices;
} else {
int dev_no = minor;
/* Normalize device number if it falls into legacy range */
if (dev_no < MOUSEDEV_MINOR_BASE + MOUSEDEV_MINORS)
dev_no -= MOUSEDEV_MINOR_BASE;
dev_set_name(&mousedev->dev, "mouse%d", dev_no);
mousedev->open_device = mousedev_open_device;
mousedev->close_device = mousedev_close_device;
}
mousedev->exist = true;
mousedev->is_mixdev = mixdev;
mousedev->handle.dev = input_get_device(dev);
mousedev->handle.name = dev_name(&mousedev->dev);
mousedev->handle.handler = handler;
@ -919,7 +930,7 @@ static void mousedev_destroy(struct mousedev *mousedev)
device_del(&mousedev->dev);
mousedev_cleanup(mousedev);
input_free_minor(MINOR(mousedev->dev.devt));
if (!mousedev->is_mixdev)
if (mousedev != mousedev_mix)
input_unregister_handle(&mousedev->handle);
put_device(&mousedev->dev);
}

View File

@ -17649,8 +17649,6 @@ static int tg3_init_one(struct pci_dev *pdev,
tg3_init_bufmgr_config(tp);
features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
/* 5700 B0 chips do not support checksumming correctly due
* to hardware bugs.
*/
@ -17682,7 +17680,8 @@ static int tg3_init_one(struct pci_dev *pdev,
features |= NETIF_F_TSO_ECN;
}
dev->features |= features;
dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX;
dev->vlan_features |= features;
/*

View File

@ -22,6 +22,7 @@
#include <linux/interrupt.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_mdio.h>
@ -88,8 +89,9 @@
#define MVNETA_TX_IN_PRGRS BIT(1)
#define MVNETA_TX_FIFO_EMPTY BIT(8)
#define MVNETA_RX_MIN_FRAME_SIZE 0x247c
#define MVNETA_SGMII_SERDES_CFG 0x24A0
#define MVNETA_SERDES_CFG 0x24A0
#define MVNETA_SGMII_SERDES_PROTO 0x0cc7
#define MVNETA_RGMII_SERDES_PROTO 0x0667
#define MVNETA_TYPE_PRIO 0x24bc
#define MVNETA_FORCE_UNI BIT(21)
#define MVNETA_TXQ_CMD_1 0x24e4
@ -161,7 +163,7 @@
#define MVNETA_GMAC_MAX_RX_SIZE_MASK 0x7ffc
#define MVNETA_GMAC0_PORT_ENABLE BIT(0)
#define MVNETA_GMAC_CTRL_2 0x2c08
#define MVNETA_GMAC2_PSC_ENABLE BIT(3)
#define MVNETA_GMAC2_PCS_ENABLE BIT(3)
#define MVNETA_GMAC2_PORT_RGMII BIT(4)
#define MVNETA_GMAC2_PORT_RESET BIT(6)
#define MVNETA_GMAC_STATUS 0x2c10
@ -710,35 +712,6 @@ static void mvneta_rxq_bm_disable(struct mvneta_port *pp,
mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val);
}
/* Sets the RGMII Enable bit (RGMIIEn) in port MAC control register */
static void mvneta_gmac_rgmii_set(struct mvneta_port *pp, int enable)
{
u32 val;
val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
if (enable)
val |= MVNETA_GMAC2_PORT_RGMII;
else
val &= ~MVNETA_GMAC2_PORT_RGMII;
mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
}
/* Config SGMII port */
static void mvneta_port_sgmii_config(struct mvneta_port *pp)
{
u32 val;
val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
val |= MVNETA_GMAC2_PSC_ENABLE;
mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
mvreg_write(pp, MVNETA_SGMII_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
}
/* Start the Ethernet port RX and TX activity */
static void mvneta_port_up(struct mvneta_port *pp)
{
@ -2756,12 +2729,15 @@ static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
mvreg_write(pp, MVNETA_UNIT_INTR_CAUSE, 0);
if (phy_mode == PHY_INTERFACE_MODE_SGMII)
mvneta_port_sgmii_config(pp);
mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_SGMII_SERDES_PROTO);
else
mvreg_write(pp, MVNETA_SERDES_CFG, MVNETA_RGMII_SERDES_PROTO);
mvneta_gmac_rgmii_set(pp, 1);
val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
val |= MVNETA_GMAC2_PCS_ENABLE | MVNETA_GMAC2_PORT_RGMII;
/* Cancel Port Reset */
val = mvreg_read(pp, MVNETA_GMAC_CTRL_2);
val &= ~MVNETA_GMAC2_PORT_RESET;
mvreg_write(pp, MVNETA_GMAC_CTRL_2, val);
@ -2774,6 +2750,7 @@ static void mvneta_port_power_up(struct mvneta_port *pp, int phy_mode)
static int mvneta_probe(struct platform_device *pdev)
{
const struct mbus_dram_target_info *dram_target_info;
struct resource *res;
struct device_node *dn = pdev->dev.of_node;
struct device_node *phy_node;
u32 phy_addr;
@ -2838,9 +2815,15 @@ static int mvneta_probe(struct platform_device *pdev)
clk_prepare_enable(pp->clk);
pp->base = of_iomap(dn, 0);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
err = -ENODEV;
goto err_clk;
}
pp->base = devm_ioremap_resource(&pdev->dev, res);
if (pp->base == NULL) {
err = -ENOMEM;
err = PTR_ERR(pp->base);
goto err_clk;
}
@ -2848,7 +2831,7 @@ static int mvneta_probe(struct platform_device *pdev)
pp->stats = alloc_percpu(struct mvneta_pcpu_stats);
if (!pp->stats) {
err = -ENOMEM;
goto err_unmap;
goto err_clk;
}
for_each_possible_cpu(cpu) {
@ -2913,8 +2896,6 @@ err_deinit:
mvneta_deinit(pp);
err_free_stats:
free_percpu(pp->stats);
err_unmap:
iounmap(pp->base);
err_clk:
clk_disable_unprepare(pp->clk);
err_free_irq:
@ -2934,7 +2915,6 @@ static int mvneta_remove(struct platform_device *pdev)
mvneta_deinit(pp);
clk_disable_unprepare(pp->clk);
free_percpu(pp->stats);
iounmap(pp->base);
irq_dispose_mapping(dev->irq);
free_netdev(dev);

View File

@ -2681,7 +2681,11 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
{
int ret = __mlx4_init_one(pdev, 0);
const struct pci_device_id *id;
int ret;
id = pci_match_id(mlx4_pci_table, pdev);
ret = __mlx4_init_one(pdev, id->driver_data);
return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
}

View File

@ -4765,7 +4765,9 @@ static int qlge_probe(struct pci_dev *pdev,
ndev->features = ndev->hw_features;
ndev->vlan_features = ndev->hw_features;
/* vlan gets same features (except vlan filter) */
ndev->vlan_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
ndev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_CTAG_RX);
if (test_bit(QL_DMA64, &qdev->flags))
ndev->features |= NETIF_F_HIGHDMA;

View File

@ -180,7 +180,8 @@ static void ifb_setup(struct net_device *dev)
dev->tx_queue_len = TX_Q_LIMIT;
dev->features |= IFB_FEATURES;
dev->vlan_features |= IFB_FEATURES;
dev->vlan_features |= IFB_FEATURES & ~(NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX);
dev->flags |= IFF_NOARP;
dev->flags &= ~IFF_MULTICAST;

View File

@ -752,14 +752,12 @@ EXPORT_SYMBOL_GPL(usbnet_unlink_rx_urbs);
// precondition: never called in_interrupt
static void usbnet_terminate_urbs(struct usbnet *dev)
{
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
DECLARE_WAITQUEUE(wait, current);
int temp;
/* ensure there are no more active urbs */
add_wait_queue(&unlink_wakeup, &wait);
add_wait_queue(&dev->wait, &wait);
set_current_state(TASK_UNINTERRUPTIBLE);
dev->wait = &unlink_wakeup;
temp = unlink_urbs(dev, &dev->txq) +
unlink_urbs(dev, &dev->rxq);
@ -773,15 +771,14 @@ static void usbnet_terminate_urbs(struct usbnet *dev)
"waited for %d urb completions\n", temp);
}
set_current_state(TASK_RUNNING);
dev->wait = NULL;
remove_wait_queue(&unlink_wakeup, &wait);
remove_wait_queue(&dev->wait, &wait);
}
int usbnet_stop (struct net_device *net)
{
struct usbnet *dev = netdev_priv(net);
struct driver_info *info = dev->driver_info;
int retval;
int retval, pm;
clear_bit(EVENT_DEV_OPEN, &dev->flags);
netif_stop_queue (net);
@ -791,6 +788,8 @@ int usbnet_stop (struct net_device *net)
net->stats.rx_packets, net->stats.tx_packets,
net->stats.rx_errors, net->stats.tx_errors);
/* to not race resume */
pm = usb_autopm_get_interface(dev->intf);
/* allow minidriver to stop correctly (wireless devices to turn off
* radio etc) */
if (info->stop) {
@ -817,6 +816,9 @@ int usbnet_stop (struct net_device *net)
dev->flags = 0;
del_timer_sync (&dev->delay);
tasklet_kill (&dev->bh);
if (!pm)
usb_autopm_put_interface(dev->intf);
if (info->manage_power &&
!test_and_clear_bit(EVENT_NO_RUNTIME_PM, &dev->flags))
info->manage_power(dev, 0);
@ -1437,11 +1439,12 @@ static void usbnet_bh (unsigned long param)
/* restart RX again after disabling due to high error rate */
clear_bit(EVENT_RX_KILL, &dev->flags);
// waiting for all pending urbs to complete?
if (dev->wait) {
if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
wake_up (dev->wait);
}
/* waiting for all pending urbs to complete?
* only then can we forgo submitting anew
*/
if (waitqueue_active(&dev->wait)) {
if (dev->txq.qlen + dev->rxq.qlen + dev->done.qlen == 0)
wake_up_all(&dev->wait);
// or are we maybe short a few urbs?
} else if (netif_running (dev->net) &&
@ -1580,6 +1583,7 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
dev->driver_name = name;
dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
| NETIF_MSG_PROBE | NETIF_MSG_LINK);
init_waitqueue_head(&dev->wait);
skb_queue_head_init (&dev->rxq);
skb_queue_head_init (&dev->txq);
skb_queue_head_init (&dev->done);
@ -1791,9 +1795,10 @@ int usbnet_resume (struct usb_interface *intf)
spin_unlock_irq(&dev->txq.lock);
if (test_bit(EVENT_DEV_OPEN, &dev->flags)) {
/* handle remote wakeup ASAP */
if (!dev->wait &&
netif_device_present(dev->net) &&
/* handle remote wakeup ASAP
* we cannot race against stop
*/
if (netif_device_present(dev->net) &&
!timer_pending(&dev->delay) &&
!test_bit(EVENT_RX_HALT, &dev->flags))
rx_alloc_submit(dev, GFP_NOIO);

View File

@ -286,7 +286,10 @@ static void veth_setup(struct net_device *dev)
dev->features |= NETIF_F_LLTX;
dev->features |= VETH_FEATURES;
dev->vlan_features = dev->features &
~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX);
~(NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX |
NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_STAG_RX);
dev->destructor = veth_dev_free;
dev->hw_features = VETH_FEATURES;

View File

@ -671,8 +671,7 @@ static bool try_fill_recv(struct receive_queue *rq, gfp_t gfp)
if (err)
break;
} while (rq->vq->num_free);
if (unlikely(!virtqueue_kick(rq->vq)))
return false;
virtqueue_kick(rq->vq);
return !oom;
}
@ -877,7 +876,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
err = xmit_skb(sq, skb);
/* This should not happen! */
if (unlikely(err) || unlikely(!virtqueue_kick(sq->vq))) {
if (unlikely(err)) {
dev->stats.tx_fifo_errors++;
if (net_ratelimit())
dev_warn(&dev->dev,
@ -886,6 +885,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
kfree_skb(skb);
return NETDEV_TX_OK;
}
virtqueue_kick(sq->vq);
/* Don't wait up for transmitted skbs to be freed. */
skb_orphan(skb);

View File

@ -505,9 +505,13 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
r = -ENOBUFS;
goto err;
}
d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
r = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg,
ARRAY_SIZE(vq->iov) - seg, &out,
&in, log, log_num);
if (unlikely(r < 0))
goto err;
d = r;
if (d == vq->num) {
r = 0;
goto err;
@ -532,6 +536,12 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
*iovcount = seg;
if (unlikely(log))
*log_num = nlogs;
/* Detect overrun */
if (unlikely(datalen > 0)) {
r = UIO_MAXIOV + 1;
goto err;
}
return headcount;
err:
vhost_discard_vq_desc(vq, headcount);
@ -587,6 +597,14 @@ static void handle_rx(struct vhost_net *net)
/* On error, stop handling until the next kick. */
if (unlikely(headcount < 0))
break;
/* On overrun, truncate and discard */
if (unlikely(headcount > UIO_MAXIOV)) {
msg.msg_iovlen = 1;
err = sock->ops->recvmsg(NULL, sock, &msg,
1, MSG_DONTWAIT | MSG_TRUNC);
pr_debug("Discarded rx packet: len %zd\n", sock_len);
continue;
}
/* OK, now we need to know about added descriptors. */
if (!headcount) {
if (unlikely(vhost_enable_notify(&net->dev, vq))) {

View File

@ -399,12 +399,26 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
state = BP_EAGAIN;
break;
}
pfn = page_to_pfn(page);
frame_list[i] = pfn_to_mfn(pfn);
scrub_page(page);
frame_list[i] = page_to_pfn(page);
}
/*
* Ensure that ballooned highmem pages don't have kmaps.
*
* Do this before changing the p2m as kmap_flush_unused()
* reads PTEs to obtain pages (and hence needs the original
* p2m entry).
*/
kmap_flush_unused();
/* Update direct mapping, invalidate P2M, and add to balloon. */
for (i = 0; i < nr_pages; i++) {
pfn = frame_list[i];
frame_list[i] = pfn_to_mfn(pfn);
page = pfn_to_page(pfn);
#ifdef CONFIG_XEN_HAVE_PVMMU
/*
* Ballooned out frames are effectively replaced with
@ -429,11 +443,9 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
}
#endif
balloon_append(pfn_to_page(pfn));
balloon_append(page);
}
/* Ensure that ballooned highmem pages don't have kmaps. */
kmap_flush_unused();
flush_tlb_all();
set_xen_guest_handle(reservation.extent_start, frame_list);

View File

@ -41,19 +41,8 @@ static const struct dentry_operations anon_inodefs_dentry_operations = {
static struct dentry *anon_inodefs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
struct dentry *root;
root = mount_pseudo(fs_type, "anon_inode:", NULL,
return mount_pseudo(fs_type, "anon_inode:", NULL,
&anon_inodefs_dentry_operations, ANON_INODE_FS_MAGIC);
if (!IS_ERR(root)) {
struct super_block *s = root->d_sb;
anon_inode_inode = alloc_anon_inode(s);
if (IS_ERR(anon_inode_inode)) {
dput(root);
deactivate_locked_super(s);
root = ERR_CAST(anon_inode_inode);
}
}
return root;
}
static struct file_system_type anon_inode_fs_type = {
@ -175,22 +164,15 @@ EXPORT_SYMBOL_GPL(anon_inode_getfd);
static int __init anon_inode_init(void)
{
int error;
error = register_filesystem(&anon_inode_fs_type);
if (error)
goto err_exit;
anon_inode_mnt = kern_mount(&anon_inode_fs_type);
if (IS_ERR(anon_inode_mnt)) {
error = PTR_ERR(anon_inode_mnt);
goto err_unregister_filesystem;
}
return 0;
if (IS_ERR(anon_inode_mnt))
panic("anon_inode_init() kernel mount failed (%ld)\n", PTR_ERR(anon_inode_mnt));
err_unregister_filesystem:
unregister_filesystem(&anon_inode_fs_type);
err_exit:
panic(KERN_ERR "anon_inode_init() failed (%d)\n", error);
anon_inode_inode = alloc_anon_inode(anon_inode_mnt->mnt_sb);
if (IS_ERR(anon_inode_inode))
panic("anon_inode_init() inode allocation failed (%ld)\n", PTR_ERR(anon_inode_inode));
return 0;
}
fs_initcall(anon_inode_init);

View File

@ -2833,9 +2833,9 @@ static int prepend_name(char **buffer, int *buflen, struct qstr *name)
u32 dlen = ACCESS_ONCE(name->len);
char *p;
if (*buflen < dlen + 1)
return -ENAMETOOLONG;
*buflen -= dlen + 1;
if (*buflen < 0)
return -ENAMETOOLONG;
p = *buffer -= dlen + 1;
*p++ = '/';
while (dlen--) {

View File

@ -38,6 +38,7 @@
#include <linux/slab.h>
#include <linux/ratelimit.h>
#include <linux/aio.h>
#include <linux/bitops.h>
#include "ext4_jbd2.h"
#include "xattr.h"
@ -3921,18 +3922,20 @@ int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
void ext4_set_inode_flags(struct inode *inode)
{
unsigned int flags = EXT4_I(inode)->i_flags;
unsigned int new_fl = 0;
inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
if (flags & EXT4_SYNC_FL)
inode->i_flags |= S_SYNC;
new_fl |= S_SYNC;
if (flags & EXT4_APPEND_FL)
inode->i_flags |= S_APPEND;
new_fl |= S_APPEND;
if (flags & EXT4_IMMUTABLE_FL)
inode->i_flags |= S_IMMUTABLE;
new_fl |= S_IMMUTABLE;
if (flags & EXT4_NOATIME_FL)
inode->i_flags |= S_NOATIME;
new_fl |= S_NOATIME;
if (flags & EXT4_DIRSYNC_FL)
inode->i_flags |= S_DIRSYNC;
new_fl |= S_DIRSYNC;
set_mask_bits(&inode->i_flags,
S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC, new_fl);
}
/* Propagate flags from i_flags to EXT4_I(inode)->i_flags */

View File

@ -713,27 +713,16 @@ unsigned long __fdget_raw(unsigned int fd)
unsigned long __fdget_pos(unsigned int fd)
{
struct files_struct *files = current->files;
struct file *file;
unsigned long v;
unsigned long v = __fdget(fd);
struct file *file = (struct file *)(v & ~3);
if (atomic_read(&files->count) == 1) {
file = __fcheck_files(files, fd);
v = 0;
} else {
file = __fget(fd, 0);
v = FDPUT_FPUT;
}
if (!file)
return 0;
if (file->f_mode & FMODE_ATOMIC_POS) {
if (file && (file->f_mode & FMODE_ATOMIC_POS)) {
if (file_count(file) > 1) {
v |= FDPUT_POS_UNLOCK;
mutex_lock(&file->f_pos_lock);
}
}
return v | (unsigned long)file;
return v;
}
/*

View File

@ -19,13 +19,13 @@ struct mnt_pcp {
};
struct mountpoint {
struct list_head m_hash;
struct hlist_node m_hash;
struct dentry *m_dentry;
int m_count;
};
struct mount {
struct list_head mnt_hash;
struct hlist_node mnt_hash;
struct mount *mnt_parent;
struct dentry *mnt_mountpoint;
struct vfsmount mnt;

View File

@ -1109,7 +1109,7 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
return false;
if (!d_mountpoint(path->dentry))
break;
return true;
mounted = __lookup_mnt(path->mnt, path->dentry);
if (!mounted)
@ -1125,20 +1125,7 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
*/
*inode = path->dentry->d_inode;
}
return true;
}
static void follow_mount_rcu(struct nameidata *nd)
{
while (d_mountpoint(nd->path.dentry)) {
struct mount *mounted;
mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry);
if (!mounted)
break;
nd->path.mnt = &mounted->mnt;
nd->path.dentry = mounted->mnt.mnt_root;
nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
}
return read_seqretry(&mount_lock, nd->m_seq);
}
static int follow_dotdot_rcu(struct nameidata *nd)
@ -1166,7 +1153,17 @@ static int follow_dotdot_rcu(struct nameidata *nd)
break;
nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
}
follow_mount_rcu(nd);
while (d_mountpoint(nd->path.dentry)) {
struct mount *mounted;
mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry);
if (!mounted)
break;
nd->path.mnt = &mounted->mnt;
nd->path.dentry = mounted->mnt.mnt_root;
nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
if (!read_seqretry(&mount_lock, nd->m_seq))
goto failed;
}
nd->inode = nd->path.dentry->d_inode;
return 0;

View File

@ -23,11 +23,34 @@
#include <linux/uaccess.h>
#include <linux/proc_ns.h>
#include <linux/magic.h>
#include <linux/bootmem.h>
#include "pnode.h"
#include "internal.h"
#define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head))
#define HASH_SIZE (1UL << HASH_SHIFT)
static unsigned int m_hash_mask __read_mostly;
static unsigned int m_hash_shift __read_mostly;
static unsigned int mp_hash_mask __read_mostly;
static unsigned int mp_hash_shift __read_mostly;
static __initdata unsigned long mhash_entries;
static int __init set_mhash_entries(char *str)
{
if (!str)
return 0;
mhash_entries = simple_strtoul(str, &str, 0);
return 1;
}
__setup("mhash_entries=", set_mhash_entries);
static __initdata unsigned long mphash_entries;
static int __init set_mphash_entries(char *str)
{
if (!str)
return 0;
mphash_entries = simple_strtoul(str, &str, 0);
return 1;
}
__setup("mphash_entries=", set_mphash_entries);
static int event;
static DEFINE_IDA(mnt_id_ida);
@ -36,8 +59,8 @@ static DEFINE_SPINLOCK(mnt_id_lock);
static int mnt_id_start = 0;
static int mnt_group_start = 1;
static struct list_head *mount_hashtable __read_mostly;
static struct list_head *mountpoint_hashtable __read_mostly;
static struct hlist_head *mount_hashtable __read_mostly;
static struct hlist_head *mountpoint_hashtable __read_mostly;
static struct kmem_cache *mnt_cache __read_mostly;
static DECLARE_RWSEM(namespace_sem);
@ -55,12 +78,19 @@ EXPORT_SYMBOL_GPL(fs_kobj);
*/
__cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock);
static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry)
static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry)
{
unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);
tmp += ((unsigned long)dentry / L1_CACHE_BYTES);
tmp = tmp + (tmp >> HASH_SHIFT);
return tmp & (HASH_SIZE - 1);
tmp = tmp + (tmp >> m_hash_shift);
return &mount_hashtable[tmp & m_hash_mask];
}
static inline struct hlist_head *mp_hash(struct dentry *dentry)
{
unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES);
tmp = tmp + (tmp >> mp_hash_shift);
return &mountpoint_hashtable[tmp & mp_hash_mask];
}
/*
@ -187,7 +217,7 @@ static struct mount *alloc_vfsmnt(const char *name)
mnt->mnt_writers = 0;
#endif
INIT_LIST_HEAD(&mnt->mnt_hash);
INIT_HLIST_NODE(&mnt->mnt_hash);
INIT_LIST_HEAD(&mnt->mnt_child);
INIT_LIST_HEAD(&mnt->mnt_mounts);
INIT_LIST_HEAD(&mnt->mnt_list);
@ -575,10 +605,10 @@ bool legitimize_mnt(struct vfsmount *bastard, unsigned seq)
*/
struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
{
struct list_head *head = mount_hashtable + hash(mnt, dentry);
struct hlist_head *head = m_hash(mnt, dentry);
struct mount *p;
list_for_each_entry_rcu(p, head, mnt_hash)
hlist_for_each_entry_rcu(p, head, mnt_hash)
if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
return p;
return NULL;
@ -590,13 +620,17 @@ struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)
*/
struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry)
{
struct list_head *head = mount_hashtable + hash(mnt, dentry);
struct mount *p;
list_for_each_entry_reverse(p, head, mnt_hash)
if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry)
return p;
return NULL;
struct mount *p, *res;
res = p = __lookup_mnt(mnt, dentry);
if (!p)
goto out;
hlist_for_each_entry_continue(p, mnt_hash) {
if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry)
break;
res = p;
}
out:
return res;
}
/*
@ -633,11 +667,11 @@ struct vfsmount *lookup_mnt(struct path *path)
static struct mountpoint *new_mountpoint(struct dentry *dentry)
{
struct list_head *chain = mountpoint_hashtable + hash(NULL, dentry);
struct hlist_head *chain = mp_hash(dentry);
struct mountpoint *mp;
int ret;
list_for_each_entry(mp, chain, m_hash) {
hlist_for_each_entry(mp, chain, m_hash) {
if (mp->m_dentry == dentry) {
/* might be worth a WARN_ON() */
if (d_unlinked(dentry))
@ -659,7 +693,7 @@ static struct mountpoint *new_mountpoint(struct dentry *dentry)
mp->m_dentry = dentry;
mp->m_count = 1;
list_add(&mp->m_hash, chain);
hlist_add_head(&mp->m_hash, chain);
return mp;
}
@ -670,7 +704,7 @@ static void put_mountpoint(struct mountpoint *mp)
spin_lock(&dentry->d_lock);
dentry->d_flags &= ~DCACHE_MOUNTED;
spin_unlock(&dentry->d_lock);
list_del(&mp->m_hash);
hlist_del(&mp->m_hash);
kfree(mp);
}
}
@ -712,7 +746,7 @@ static void detach_mnt(struct mount *mnt, struct path *old_path)
mnt->mnt_parent = mnt;
mnt->mnt_mountpoint = mnt->mnt.mnt_root;
list_del_init(&mnt->mnt_child);
list_del_init(&mnt->mnt_hash);
hlist_del_init_rcu(&mnt->mnt_hash);
put_mountpoint(mnt->mnt_mp);
mnt->mnt_mp = NULL;
}
@ -739,15 +773,14 @@ static void attach_mnt(struct mount *mnt,
struct mountpoint *mp)
{
mnt_set_mountpoint(parent, mp, mnt);
list_add_tail(&mnt->mnt_hash, mount_hashtable +
hash(&parent->mnt, mp->m_dentry));
hlist_add_head_rcu(&mnt->mnt_hash, m_hash(&parent->mnt, mp->m_dentry));
list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
}
/*
* vfsmount lock must be held for write
*/
static void commit_tree(struct mount *mnt)
static void commit_tree(struct mount *mnt, struct mount *shadows)
{
struct mount *parent = mnt->mnt_parent;
struct mount *m;
@ -762,8 +795,11 @@ static void commit_tree(struct mount *mnt)
list_splice(&head, n->list.prev);
list_add_tail(&mnt->mnt_hash, mount_hashtable +
hash(&parent->mnt, mnt->mnt_mountpoint));
if (shadows)
hlist_add_after_rcu(&shadows->mnt_hash, &mnt->mnt_hash);
else
hlist_add_head_rcu(&mnt->mnt_hash,
m_hash(&parent->mnt, mnt->mnt_mountpoint));
list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);
touch_mnt_namespace(n);
}
@ -1153,26 +1189,28 @@ int may_umount(struct vfsmount *mnt)
EXPORT_SYMBOL(may_umount);
static LIST_HEAD(unmounted); /* protected by namespace_sem */
static HLIST_HEAD(unmounted); /* protected by namespace_sem */
static void namespace_unlock(void)
{
struct mount *mnt;
LIST_HEAD(head);
struct hlist_head head = unmounted;
if (likely(list_empty(&unmounted))) {
if (likely(hlist_empty(&head))) {
up_write(&namespace_sem);
return;
}
list_splice_init(&unmounted, &head);
head.first->pprev = &head.first;
INIT_HLIST_HEAD(&unmounted);
up_write(&namespace_sem);
synchronize_rcu();
while (!list_empty(&head)) {
mnt = list_first_entry(&head, struct mount, mnt_hash);
list_del_init(&mnt->mnt_hash);
while (!hlist_empty(&head)) {
mnt = hlist_entry(head.first, struct mount, mnt_hash);
hlist_del_init(&mnt->mnt_hash);
if (mnt->mnt_ex_mountpoint.mnt)
path_put(&mnt->mnt_ex_mountpoint);
mntput(&mnt->mnt);
@ -1193,16 +1231,19 @@ static inline void namespace_lock(void)
*/
void umount_tree(struct mount *mnt, int how)
{
LIST_HEAD(tmp_list);
HLIST_HEAD(tmp_list);
struct mount *p;
struct mount *last = NULL;
for (p = mnt; p; p = next_mnt(p, mnt))
list_move(&p->mnt_hash, &tmp_list);
for (p = mnt; p; p = next_mnt(p, mnt)) {
hlist_del_init_rcu(&p->mnt_hash);
hlist_add_head(&p->mnt_hash, &tmp_list);
}
if (how)
propagate_umount(&tmp_list);
list_for_each_entry(p, &tmp_list, mnt_hash) {
hlist_for_each_entry(p, &tmp_list, mnt_hash) {
list_del_init(&p->mnt_expire);
list_del_init(&p->mnt_list);
__touch_mnt_namespace(p->mnt_ns);
@ -1220,8 +1261,13 @@ void umount_tree(struct mount *mnt, int how)
p->mnt_mp = NULL;
}
change_mnt_propagation(p, MS_PRIVATE);
last = p;
}
if (last) {
last->mnt_hash.next = unmounted.first;
unmounted.first = tmp_list.first;
unmounted.first->pprev = &unmounted.first;
}
list_splice(&tmp_list, &unmounted);
}
static void shrink_submounts(struct mount *mnt);
@ -1605,24 +1651,23 @@ static int attach_recursive_mnt(struct mount *source_mnt,
struct mountpoint *dest_mp,
struct path *parent_path)
{
LIST_HEAD(tree_list);
HLIST_HEAD(tree_list);
struct mount *child, *p;
struct hlist_node *n;
int err;
if (IS_MNT_SHARED(dest_mnt)) {
err = invent_group_ids(source_mnt, true);
if (err)
goto out;
}
err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list);
if (err)
goto out_cleanup_ids;
lock_mount_hash();
if (IS_MNT_SHARED(dest_mnt)) {
err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list);
if (err)
goto out_cleanup_ids;
lock_mount_hash();
for (p = source_mnt; p; p = next_mnt(p, source_mnt))
set_mnt_shared(p);
} else {
lock_mount_hash();
}
if (parent_path) {
detach_mnt(source_mnt, parent_path);
@ -1630,20 +1675,22 @@ static int attach_recursive_mnt(struct mount *source_mnt,
touch_mnt_namespace(source_mnt->mnt_ns);
} else {
mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt);
commit_tree(source_mnt);
commit_tree(source_mnt, NULL);
}
list_for_each_entry_safe(child, p, &tree_list, mnt_hash) {
list_del_init(&child->mnt_hash);
commit_tree(child);
hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) {
struct mount *q;
hlist_del_init(&child->mnt_hash);
q = __lookup_mnt_last(&child->mnt_parent->mnt,
child->mnt_mountpoint);
commit_tree(child, q);
}
unlock_mount_hash();
return 0;
out_cleanup_ids:
if (IS_MNT_SHARED(dest_mnt))
cleanup_group_ids(source_mnt, NULL);
cleanup_group_ids(source_mnt, NULL);
out:
return err;
}
@ -2777,18 +2824,24 @@ void __init mnt_init(void)
mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),
0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC);
mountpoint_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC);
mount_hashtable = alloc_large_system_hash("Mount-cache",
sizeof(struct hlist_head),
mhash_entries, 19,
0,
&m_hash_shift, &m_hash_mask, 0, 0);
mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache",
sizeof(struct hlist_head),
mphash_entries, 19,
0,
&mp_hash_shift, &mp_hash_mask, 0, 0);
if (!mount_hashtable || !mountpoint_hashtable)
panic("Failed to allocate mount hash table\n");
printk(KERN_INFO "Mount-cache hash table entries: %lu\n", HASH_SIZE);
for (u = 0; u < HASH_SIZE; u++)
INIT_LIST_HEAD(&mount_hashtable[u]);
for (u = 0; u < HASH_SIZE; u++)
INIT_LIST_HEAD(&mountpoint_hashtable[u]);
for (u = 0; u <= m_hash_mask; u++)
INIT_HLIST_HEAD(&mount_hashtable[u]);
for (u = 0; u <= mp_hash_mask; u++)
INIT_HLIST_HEAD(&mountpoint_hashtable[u]);
kernfs_init();

View File

@ -449,6 +449,7 @@ nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
fh_lock(fhp);
host_err = notify_change(dentry, iap, NULL);
fh_unlock(fhp);
err = nfserrno(host_err);
out_put_write_access:
if (size_change)

View File

@ -346,7 +346,9 @@ int ocfs2_cluster_connect(const char *stack_name,
strlcpy(new_conn->cc_name, group, GROUP_NAME_MAX + 1);
new_conn->cc_namelen = grouplen;
strlcpy(new_conn->cc_cluster_name, cluster_name, CLUSTER_NAME_MAX + 1);
if (cluster_name_len)
strlcpy(new_conn->cc_cluster_name, cluster_name,
CLUSTER_NAME_MAX + 1);
new_conn->cc_cluster_name_len = cluster_name_len;
new_conn->cc_recovery_handler = recovery_handler;
new_conn->cc_recovery_data = recovery_data;

View File

@ -220,14 +220,14 @@ static struct mount *get_source(struct mount *dest,
* @tree_list : list of heads of trees to be attached.
*/
int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
struct mount *source_mnt, struct list_head *tree_list)
struct mount *source_mnt, struct hlist_head *tree_list)
{
struct user_namespace *user_ns = current->nsproxy->mnt_ns->user_ns;
struct mount *m, *child;
int ret = 0;
struct mount *prev_dest_mnt = dest_mnt;
struct mount *prev_src_mnt = source_mnt;
LIST_HEAD(tmp_list);
HLIST_HEAD(tmp_list);
for (m = propagation_next(dest_mnt, dest_mnt); m;
m = propagation_next(m, dest_mnt)) {
@ -246,27 +246,29 @@ int propagate_mnt(struct mount *dest_mnt, struct mountpoint *dest_mp,
child = copy_tree(source, source->mnt.mnt_root, type);
if (IS_ERR(child)) {
ret = PTR_ERR(child);
list_splice(tree_list, tmp_list.prev);
tmp_list = *tree_list;
tmp_list.first->pprev = &tmp_list.first;
INIT_HLIST_HEAD(tree_list);
goto out;
}
if (is_subdir(dest_mp->m_dentry, m->mnt.mnt_root)) {
mnt_set_mountpoint(m, dest_mp, child);
list_add_tail(&child->mnt_hash, tree_list);
hlist_add_head(&child->mnt_hash, tree_list);
} else {
/*
* This can happen if the parent mount was bind mounted
* on some subdirectory of a shared/slave mount.
*/
list_add_tail(&child->mnt_hash, &tmp_list);
hlist_add_head(&child->mnt_hash, &tmp_list);
}
prev_dest_mnt = m;
prev_src_mnt = child;
}
out:
lock_mount_hash();
while (!list_empty(&tmp_list)) {
child = list_first_entry(&tmp_list, struct mount, mnt_hash);
while (!hlist_empty(&tmp_list)) {
child = hlist_entry(tmp_list.first, struct mount, mnt_hash);
umount_tree(child, 0);
}
unlock_mount_hash();
@ -338,8 +340,10 @@ static void __propagate_umount(struct mount *mnt)
* umount the child only if the child has no
* other children
*/
if (child && list_empty(&child->mnt_mounts))
list_move_tail(&child->mnt_hash, &mnt->mnt_hash);
if (child && list_empty(&child->mnt_mounts)) {
hlist_del_init_rcu(&child->mnt_hash);
hlist_add_before_rcu(&child->mnt_hash, &mnt->mnt_hash);
}
}
}
@ -350,11 +354,11 @@ static void __propagate_umount(struct mount *mnt)
*
* vfsmount lock must be held for write
*/
int propagate_umount(struct list_head *list)
int propagate_umount(struct hlist_head *list)
{
struct mount *mnt;
list_for_each_entry(mnt, list, mnt_hash)
hlist_for_each_entry(mnt, list, mnt_hash)
__propagate_umount(mnt);
return 0;
}

View File

@ -36,8 +36,8 @@ static inline void set_mnt_shared(struct mount *mnt)
void change_mnt_propagation(struct mount *, int);
int propagate_mnt(struct mount *, struct mountpoint *, struct mount *,
struct list_head *);
int propagate_umount(struct list_head *);
struct hlist_head *);
int propagate_umount(struct hlist_head *);
int propagate_mount_busy(struct mount *, int);
void mnt_release_group_id(struct mount *);
int get_dominating_id(struct mount *mnt, const struct path *root);

View File

@ -307,7 +307,7 @@ SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high,
unsigned int, whence)
{
int retval;
struct fd f = fdget(fd);
struct fd f = fdget_pos(fd);
loff_t offset;
if (!f.file)
@ -327,7 +327,7 @@ SYSCALL_DEFINE5(llseek, unsigned int, fd, unsigned long, offset_high,
retval = 0;
}
out_putf:
fdput(f);
fdput_pos(f);
return retval;
}
#endif

View File

@ -196,6 +196,21 @@ static inline unsigned long __ffs64(u64 word)
#ifdef __KERNEL__
#ifndef set_mask_bits
#define set_mask_bits(ptr, _mask, _bits) \
({ \
const typeof(*ptr) mask = (_mask), bits = (_bits); \
typeof(*ptr) old, new; \
\
do { \
old = ACCESS_ONCE(*ptr); \
new = (old & ~mask) | bits; \
} while (cmpxchg(ptr, old, new) != old); \
\
new; \
})
#endif
#ifndef find_last_bit
/**
* find_last_bit - find the last set bit in a memory region

View File

@ -163,4 +163,11 @@ enum {
/* changeable features with no special hardware requirements */
#define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO)
#define NETIF_F_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
NETIF_F_HW_VLAN_CTAG_RX | \
NETIF_F_HW_VLAN_CTAG_TX | \
NETIF_F_HW_VLAN_STAG_FILTER | \
NETIF_F_HW_VLAN_STAG_RX | \
NETIF_F_HW_VLAN_STAG_TX)
#endif /* _LINUX_NETDEV_FEATURES_H */

View File

@ -3014,7 +3014,7 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
{
return __skb_gso_segment(skb, features, true);
}
__be16 skb_network_protocol(struct sk_buff *skb);
__be16 skb_network_protocol(struct sk_buff *skb, int *depth);
static inline bool can_checksum_protocol(netdev_features_t features,
__be16 protocol)

View File

@ -2451,8 +2451,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
unsigned int flags);
void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
unsigned int skb_zerocopy_headlen(const struct sk_buff *from);
void skb_zerocopy(struct sk_buff *to, const struct sk_buff *from,
int len, int hlen);
int skb_zerocopy(struct sk_buff *to, struct sk_buff *from,
int len, int hlen);
void skb_split(struct sk_buff *skb, struct sk_buff *skb1, const u32 len);
int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen);
void skb_scrub_packet(struct sk_buff *skb, bool xnet);

View File

@ -30,7 +30,7 @@ struct usbnet {
struct driver_info *driver_info;
const char *driver_name;
void *driver_priv;
wait_queue_head_t *wait;
wait_queue_head_t wait;
struct mutex phy_mutex;
unsigned char suspend_count;
unsigned char pkt_cnt, pkt_err;

View File

@ -31,8 +31,10 @@
#define IF_PREFIX_AUTOCONF 0x02
enum {
INET6_IFADDR_STATE_PREDAD,
INET6_IFADDR_STATE_DAD,
INET6_IFADDR_STATE_POSTDAD,
INET6_IFADDR_STATE_ERRDAD,
INET6_IFADDR_STATE_UP,
INET6_IFADDR_STATE_DEAD,
};
@ -58,7 +60,7 @@ struct inet6_ifaddr {
unsigned long cstamp; /* created timestamp */
unsigned long tstamp; /* updated timestamp */
struct timer_list dad_timer;
struct delayed_work dad_work;
struct inet6_dev *idev;
struct rt6_info *rt;

View File

@ -608,9 +608,19 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
int err = 0;
/* Only support the initial namespaces for now. */
/*
* We return ECONNREFUSED because it tricks userspace into thinking
* that audit was not configured into the kernel. Lots of users
* configure their PAM stack (because that's what the distro does)
* to reject login if unable to send messages to audit. If we return
* ECONNREFUSED the PAM stack thinks the kernel does not have audit
* configured in and will let login proceed. If we return EPERM
* userspace will reject all logins. This should be removed when we
* support non init namespaces!!
*/
if ((current_user_ns() != &init_user_ns) ||
(task_active_pid_ns(current) != &init_pid_ns))
return -EPERM;
return -ECONNREFUSED;
switch (msg_type) {
case AUDIT_LIST:

View File

@ -1435,7 +1435,8 @@ void update_wall_time(void)
out:
raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
if (clock_set)
clock_was_set();
/* Have to call _delayed version, since in irq context*/
clock_was_set_delayed();
}
/**

View File

@ -1600,15 +1600,31 @@ void trace_buffer_unlock_commit(struct ring_buffer *buffer,
}
EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
static struct ring_buffer *temp_buffer;
struct ring_buffer_event *
trace_event_buffer_lock_reserve(struct ring_buffer **current_rb,
struct ftrace_event_file *ftrace_file,
int type, unsigned long len,
unsigned long flags, int pc)
{
struct ring_buffer_event *entry;
*current_rb = ftrace_file->tr->trace_buffer.buffer;
return trace_buffer_lock_reserve(*current_rb,
entry = trace_buffer_lock_reserve(*current_rb,
type, len, flags, pc);
/*
* If tracing is off, but we have triggers enabled
* we still need to look at the event data. Use the temp_buffer
* to store the trace event for the tigger to use. It's recusive
* safe and will not be recorded anywhere.
*/
if (!entry && ftrace_file->flags & FTRACE_EVENT_FL_TRIGGER_COND) {
*current_rb = temp_buffer;
entry = trace_buffer_lock_reserve(*current_rb,
type, len, flags, pc);
}
return entry;
}
EXPORT_SYMBOL_GPL(trace_event_buffer_lock_reserve);
@ -6494,11 +6510,16 @@ __init static int tracer_alloc_buffers(void)
raw_spin_lock_init(&global_trace.start_lock);
/* Used for event triggers */
temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
if (!temp_buffer)
goto out_free_cpumask;
/* TODO: make the number of buffers hot pluggable with CPUS */
if (allocate_trace_buffers(&global_trace, ring_buf_size) < 0) {
printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
WARN_ON(1);
goto out_free_cpumask;
goto out_free_temp_buffer;
}
if (global_trace.buffer_disabled)
@ -6540,6 +6561,8 @@ __init static int tracer_alloc_buffers(void)
return 0;
out_free_temp_buffer:
ring_buffer_free(temp_buffer);
out_free_cpumask:
free_percpu(global_trace.trace_buffer.data);
#ifdef CONFIG_TRACER_MAX_TRACE

View File

@ -244,8 +244,19 @@ static void __prandom_reseed(bool late)
static bool latch = false;
static DEFINE_SPINLOCK(lock);
/* Asking for random bytes might result in bytes getting
* moved into the nonblocking pool and thus marking it
* as initialized. In this case we would double back into
* this function and attempt to do a late reseed.
* Ignore the pointless attempt to reseed again if we're
* already waiting for bytes when the nonblocking pool
* got initialized.
*/
/* only allow initial seeding (late == false) once */
spin_lock_irqsave(&lock, flags);
if (!spin_trylock_irqsave(&lock, flags))
return;
if (latch && !late)
goto out;
latch = true;

View File

@ -307,9 +307,11 @@ static void vlan_sync_address(struct net_device *dev,
static void vlan_transfer_features(struct net_device *dev,
struct net_device *vlandev)
{
struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
vlandev->gso_max_size = dev->gso_max_size;
if (dev->features & NETIF_F_HW_VLAN_CTAG_TX)
if (vlan_hw_offload_capable(dev->features, vlan->vlan_proto))
vlandev->hard_header_len = dev->hard_header_len;
else
vlandev->hard_header_len = dev->hard_header_len + VLAN_HLEN;

View File

@ -578,6 +578,9 @@ static int vlan_dev_init(struct net_device *dev)
dev->features |= real_dev->vlan_features | NETIF_F_LLTX;
dev->gso_max_size = real_dev->gso_max_size;
if (dev->features & NETIF_F_VLAN_FEATURES)
netdev_warn(real_dev, "VLAN features are set incorrectly. Q-in-Q configurations may not work correctly.\n");
/* ipv6 shared card related stuff */
dev->dev_id = real_dev->dev_id;
@ -592,7 +595,8 @@ static int vlan_dev_init(struct net_device *dev)
#endif
dev->needed_headroom = real_dev->needed_headroom;
if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
if (vlan_hw_offload_capable(real_dev->features,
vlan_dev_priv(dev)->vlan_proto)) {
dev->header_ops = &vlan_passthru_header_ops;
dev->hard_header_len = real_dev->hard_header_len;
} else {

View File

@ -49,14 +49,14 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
brstats->tx_bytes += skb->len;
u64_stats_update_end(&brstats->syncp);
if (!br_allowed_ingress(br, br_get_vlan_info(br), skb, &vid))
goto out;
BR_INPUT_SKB_CB(skb)->brdev = dev;
skb_reset_mac_header(skb);
skb_pull(skb, ETH_HLEN);
if (!br_allowed_ingress(br, br_get_vlan_info(br), skb, &vid))
goto out;
if (is_broadcast_ether_addr(dest))
br_flood_deliver(br, skb, false);
else if (is_multicast_ether_addr(dest)) {

View File

@ -29,6 +29,7 @@ static int br_pass_frame_up(struct sk_buff *skb)
struct net_device *indev, *brdev = BR_INPUT_SKB_CB(skb)->brdev;
struct net_bridge *br = netdev_priv(brdev);
struct pcpu_sw_netstats *brstats = this_cpu_ptr(br->stats);
struct net_port_vlans *pv;
u64_stats_update_begin(&brstats->syncp);
brstats->rx_packets++;
@ -39,18 +40,18 @@ static int br_pass_frame_up(struct sk_buff *skb)
* packet is allowed except in promisc modue when someone
* may be running packet capture.
*/
pv = br_get_vlan_info(br);
if (!(brdev->flags & IFF_PROMISC) &&
!br_allowed_egress(br, br_get_vlan_info(br), skb)) {
!br_allowed_egress(br, pv, skb)) {
kfree_skb(skb);
return NET_RX_DROP;
}
skb = br_handle_vlan(br, br_get_vlan_info(br), skb);
if (!skb)
return NET_RX_DROP;
indev = skb->dev;
skb->dev = brdev;
skb = br_handle_vlan(br, pv, skb);
if (!skb)
return NET_RX_DROP;
return NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_IN, skb, indev, NULL,
netif_receive_skb);

View File

@ -119,22 +119,6 @@ static void __vlan_flush(struct net_port_vlans *v)
kfree_rcu(v, rcu);
}
/* Strip the tag from the packet. Will return skb with tci set 0. */
static struct sk_buff *br_vlan_untag(struct sk_buff *skb)
{
if (skb->protocol != htons(ETH_P_8021Q)) {
skb->vlan_tci = 0;
return skb;
}
skb->vlan_tci = 0;
skb = vlan_untag(skb);
if (skb)
skb->vlan_tci = 0;
return skb;
}
struct sk_buff *br_handle_vlan(struct net_bridge *br,
const struct net_port_vlans *pv,
struct sk_buff *skb)
@ -144,13 +128,27 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br,
if (!br->vlan_enabled)
goto out;
/* Vlan filter table must be configured at this point. The
* only exception is the bridge is set in promisc mode and the
* packet is destined for the bridge device. In this case
* pass the packet as is.
*/
if (!pv) {
if ((br->dev->flags & IFF_PROMISC) && skb->dev == br->dev) {
goto out;
} else {
kfree_skb(skb);
return NULL;
}
}
/* At this point, we know that the frame was filtered and contains
* a valid vlan id. If the vlan id is set in the untagged bitmap,
* send untagged; otherwise, send tagged.
*/
br_vlan_get_tag(skb, &vid);
if (test_bit(vid, pv->untagged_bitmap))
skb = br_vlan_untag(skb);
skb->vlan_tci = 0;
out:
return skb;
@ -174,6 +172,18 @@ bool br_allowed_ingress(struct net_bridge *br, struct net_port_vlans *v,
if (!v)
return false;
/* If vlan tx offload is disabled on bridge device and frame was
* sent from vlan device on the bridge device, it does not have
* HW accelerated vlan tag.
*/
if (unlikely(!vlan_tx_tag_present(skb) &&
(skb->protocol == htons(ETH_P_8021Q) ||
skb->protocol == htons(ETH_P_8021AD)))) {
skb = vlan_untag(skb);
if (unlikely(!skb))
return false;
}
err = br_vlan_get_tag(skb, vid);
if (!*vid) {
u16 pvid = br_get_pvid(v);

View File

@ -2286,7 +2286,7 @@ out:
}
EXPORT_SYMBOL(skb_checksum_help);
__be16 skb_network_protocol(struct sk_buff *skb)
__be16 skb_network_protocol(struct sk_buff *skb, int *depth)
{
__be16 type = skb->protocol;
int vlan_depth = ETH_HLEN;
@ -2313,6 +2313,8 @@ __be16 skb_network_protocol(struct sk_buff *skb)
vlan_depth += VLAN_HLEN;
}
*depth = vlan_depth;
return type;
}
@ -2326,12 +2328,13 @@ struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
{
struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
struct packet_offload *ptype;
__be16 type = skb_network_protocol(skb);
int vlan_depth = skb->mac_len;
__be16 type = skb_network_protocol(skb, &vlan_depth);
if (unlikely(!type))
return ERR_PTR(-EINVAL);
__skb_pull(skb, skb->mac_len);
__skb_pull(skb, vlan_depth);
rcu_read_lock();
list_for_each_entry_rcu(ptype, &offload_base, list) {
@ -2498,8 +2501,10 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
const struct net_device *dev,
netdev_features_t features)
{
int tmp;
if (skb->ip_summed != CHECKSUM_NONE &&
!can_checksum_protocol(features, skb_network_protocol(skb))) {
!can_checksum_protocol(features, skb_network_protocol(skb, &tmp))) {
features &= ~NETIF_F_ALL_CSUM;
} else if (illegal_highdma(dev, skb)) {
features &= ~NETIF_F_SG;

View File

@ -2127,25 +2127,31 @@ EXPORT_SYMBOL_GPL(skb_zerocopy_headlen);
*
* The `hlen` as calculated by skb_zerocopy_headlen() specifies the
* headroom in the `to` buffer.
*
* Return value:
* 0: everything is OK
* -ENOMEM: couldn't orphan frags of @from due to lack of memory
* -EFAULT: skb_copy_bits() found some problem with skb geometry
*/
void
skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen)
int
skb_zerocopy(struct sk_buff *to, struct sk_buff *from, int len, int hlen)
{
int i, j = 0;
int plen = 0; /* length of skb->head fragment */
int ret;
struct page *page;
unsigned int offset;
BUG_ON(!from->head_frag && !hlen);
/* dont bother with small payloads */
if (len <= skb_tailroom(to)) {
skb_copy_bits(from, 0, skb_put(to, len), len);
return;
}
if (len <= skb_tailroom(to))
return skb_copy_bits(from, 0, skb_put(to, len), len);
if (hlen) {
skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
ret = skb_copy_bits(from, 0, skb_put(to, hlen), hlen);
if (unlikely(ret))
return ret;
len -= hlen;
} else {
plen = min_t(int, skb_headlen(from), len);
@ -2163,6 +2169,11 @@ skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen)
to->len += len + plen;
to->data_len += len + plen;
if (unlikely(skb_orphan_frags(from, GFP_ATOMIC))) {
skb_tx_error(from);
return -ENOMEM;
}
for (i = 0; i < skb_shinfo(from)->nr_frags; i++) {
if (!len)
break;
@ -2173,6 +2184,8 @@ skb_zerocopy(struct sk_buff *to, const struct sk_buff *from, int len, int hlen)
j++;
}
skb_shinfo(to)->nr_frags = j;
return 0;
}
EXPORT_SYMBOL_GPL(skb_zerocopy);
@ -2866,8 +2879,9 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
int err = -ENOMEM;
int i = 0;
int pos;
int dummy;
proto = skb_network_protocol(head_skb);
proto = skb_network_protocol(head_skb, &dummy);
if (unlikely(!proto))
return ERR_PTR(-EINVAL);

View File

@ -182,6 +182,14 @@ static int gre_cisco_rcv(struct sk_buff *skb)
int i;
bool csum_err = false;
#ifdef CONFIG_NET_IPGRE_BROADCAST
if (ipv4_is_multicast(ip_hdr(skb)->daddr)) {
/* Looped back packet, drop it! */
if (rt_is_output_route(skb_rtable(skb)))
goto drop;
}
#endif
if (parse_gre_header(skb, &tpi, &csum_err) < 0)
goto drop;

View File

@ -416,9 +416,6 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
#ifdef CONFIG_NET_IPGRE_BROADCAST
if (ipv4_is_multicast(iph->daddr)) {
/* Looped back packet, drop it! */
if (rt_is_output_route(skb_rtable(skb)))
goto drop;
tunnel->dev->stats.multicast++;
skb->pkt_type = PACKET_BROADCAST;
}

View File

@ -108,6 +108,7 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
nf_reset(skb);
secpath_reset(skb);
skb_clear_hash_if_not_l4(skb);
skb_dst_drop(skb);
skb->vlan_tci = 0;
skb_set_queue_mapping(skb, 0);
skb->pkt_type = PACKET_HOST;

View File

@ -2628,7 +2628,7 @@ static void get_timewait4_sock(const struct inet_timewait_sock *tw,
{
__be32 dest, src;
__u16 destp, srcp;
long delta = tw->tw_ttd - jiffies;
s32 delta = tw->tw_ttd - inet_tw_time_stamp();
dest = tw->tw_daddr;
src = tw->tw_rcv_saddr;

View File

@ -133,10 +133,12 @@ static int ipv6_count_addresses(struct inet6_dev *idev);
static struct hlist_head inet6_addr_lst[IN6_ADDR_HSIZE];
static DEFINE_SPINLOCK(addrconf_hash_lock);
static void addrconf_verify(unsigned long);
static void addrconf_verify(void);
static void addrconf_verify_rtnl(void);
static void addrconf_verify_work(struct work_struct *);
static DEFINE_TIMER(addr_chk_timer, addrconf_verify, 0, 0);
static DEFINE_SPINLOCK(addrconf_verify_lock);
static struct workqueue_struct *addrconf_wq;
static DECLARE_DELAYED_WORK(addr_chk_work, addrconf_verify_work);
static void addrconf_join_anycast(struct inet6_ifaddr *ifp);
static void addrconf_leave_anycast(struct inet6_ifaddr *ifp);
@ -151,7 +153,7 @@ static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
u32 flags, u32 noflags);
static void addrconf_dad_start(struct inet6_ifaddr *ifp);
static void addrconf_dad_timer(unsigned long data);
static void addrconf_dad_work(struct work_struct *w);
static void addrconf_dad_completed(struct inet6_ifaddr *ifp);
static void addrconf_dad_run(struct inet6_dev *idev);
static void addrconf_rs_timer(unsigned long data);
@ -247,9 +249,9 @@ static void addrconf_del_rs_timer(struct inet6_dev *idev)
__in6_dev_put(idev);
}
static void addrconf_del_dad_timer(struct inet6_ifaddr *ifp)
static void addrconf_del_dad_work(struct inet6_ifaddr *ifp)
{
if (del_timer(&ifp->dad_timer))
if (cancel_delayed_work(&ifp->dad_work))
__in6_ifa_put(ifp);
}
@ -261,12 +263,12 @@ static void addrconf_mod_rs_timer(struct inet6_dev *idev,
mod_timer(&idev->rs_timer, jiffies + when);
}
static void addrconf_mod_dad_timer(struct inet6_ifaddr *ifp,
unsigned long when)
static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
unsigned long delay)
{
if (!timer_pending(&ifp->dad_timer))
if (!delayed_work_pending(&ifp->dad_work))
in6_ifa_hold(ifp);
mod_timer(&ifp->dad_timer, jiffies + when);
mod_delayed_work(addrconf_wq, &ifp->dad_work, delay);
}
static int snmp6_alloc_dev(struct inet6_dev *idev)
@ -751,8 +753,9 @@ void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
in6_dev_put(ifp->idev);
if (del_timer(&ifp->dad_timer))
pr_notice("Timer is still running, when freeing ifa=%p\n", ifp);
if (cancel_delayed_work(&ifp->dad_work))
pr_notice("delayed DAD work was pending while freeing ifa=%p\n",
ifp);
if (ifp->state != INET6_IFADDR_STATE_DEAD) {
pr_warn("Freeing alive inet6 address %p\n", ifp);
@ -849,8 +852,7 @@ ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
spin_lock_init(&ifa->lock);
spin_lock_init(&ifa->state_lock);
setup_timer(&ifa->dad_timer, addrconf_dad_timer,
(unsigned long)ifa);
INIT_DELAYED_WORK(&ifa->dad_work, addrconf_dad_work);
INIT_HLIST_NODE(&ifa->addr_lst);
ifa->scope = scope;
ifa->prefix_len = pfxlen;
@ -990,6 +992,8 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_NOP;
unsigned long expires;
ASSERT_RTNL();
spin_lock_bh(&ifp->state_lock);
state = ifp->state;
ifp->state = INET6_IFADDR_STATE_DEAD;
@ -1021,7 +1025,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)
write_unlock_bh(&ifp->idev->lock);
addrconf_del_dad_timer(ifp);
addrconf_del_dad_work(ifp);
ipv6_ifa_notify(RTM_DELADDR, ifp);
@ -1604,7 +1608,7 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
{
if (ifp->flags&IFA_F_PERMANENT) {
spin_lock_bh(&ifp->lock);
addrconf_del_dad_timer(ifp);
addrconf_del_dad_work(ifp);
ifp->flags |= IFA_F_TENTATIVE;
if (dad_failed)
ifp->flags |= IFA_F_DADFAILED;
@ -1625,20 +1629,21 @@ static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
spin_unlock_bh(&ifp->lock);
}
ipv6_del_addr(ifp);
} else
} else {
ipv6_del_addr(ifp);
}
}
static int addrconf_dad_end(struct inet6_ifaddr *ifp)
{
int err = -ENOENT;
spin_lock(&ifp->state_lock);
spin_lock_bh(&ifp->state_lock);
if (ifp->state == INET6_IFADDR_STATE_DAD) {
ifp->state = INET6_IFADDR_STATE_POSTDAD;
err = 0;
}
spin_unlock(&ifp->state_lock);
spin_unlock_bh(&ifp->state_lock);
return err;
}
@ -1671,7 +1676,12 @@ void addrconf_dad_failure(struct inet6_ifaddr *ifp)
}
}
addrconf_dad_stop(ifp, 1);
spin_lock_bh(&ifp->state_lock);
/* transition from _POSTDAD to _ERRDAD */
ifp->state = INET6_IFADDR_STATE_ERRDAD;
spin_unlock_bh(&ifp->state_lock);
addrconf_mod_dad_work(ifp, 0);
}
/* Join to solicited addr multicast group. */
@ -1680,6 +1690,8 @@ void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
{
struct in6_addr maddr;
ASSERT_RTNL();
if (dev->flags&(IFF_LOOPBACK|IFF_NOARP))
return;
@ -1691,6 +1703,8 @@ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
{
struct in6_addr maddr;
ASSERT_RTNL();
if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP))
return;
@ -1701,6 +1715,9 @@ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
{
struct in6_addr addr;
ASSERT_RTNL();
if (ifp->prefix_len >= 127) /* RFC 6164 */
return;
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
@ -1712,6 +1729,9 @@ static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
{
struct in6_addr addr;
ASSERT_RTNL();
if (ifp->prefix_len >= 127) /* RFC 6164 */
return;
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
@ -2271,11 +2291,13 @@ ok:
return;
}
ifp->flags |= IFA_F_MANAGETEMPADDR;
update_lft = 0;
create = 1;
spin_lock_bh(&ifp->lock);
ifp->flags |= IFA_F_MANAGETEMPADDR;
ifp->cstamp = jiffies;
ifp->tokenized = tokenized;
spin_unlock_bh(&ifp->lock);
addrconf_dad_start(ifp);
}
@ -2326,7 +2348,7 @@ ok:
create, now);
in6_ifa_put(ifp);
addrconf_verify(0);
addrconf_verify();
}
}
inet6_prefix_notify(RTM_NEWPREFIX, in6_dev, pinfo);
@ -2475,7 +2497,7 @@ static int inet6_addr_add(struct net *net, int ifindex,
manage_tempaddrs(idev, ifp, valid_lft, prefered_lft,
true, jiffies);
in6_ifa_put(ifp);
addrconf_verify(0);
addrconf_verify_rtnl();
return 0;
}
@ -3011,7 +3033,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
hlist_for_each_entry_rcu(ifa, h, addr_lst) {
if (ifa->idev == idev) {
hlist_del_init_rcu(&ifa->addr_lst);
addrconf_del_dad_timer(ifa);
addrconf_del_dad_work(ifa);
goto restart;
}
}
@ -3049,7 +3071,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)
while (!list_empty(&idev->addr_list)) {
ifa = list_first_entry(&idev->addr_list,
struct inet6_ifaddr, if_list);
addrconf_del_dad_timer(ifa);
addrconf_del_dad_work(ifa);
list_del(&ifa->if_list);
@ -3148,10 +3170,10 @@ static void addrconf_dad_kick(struct inet6_ifaddr *ifp)
rand_num = prandom_u32() % (idev->cnf.rtr_solicit_delay ? : 1);
ifp->dad_probes = idev->cnf.dad_transmits;
addrconf_mod_dad_timer(ifp, rand_num);
addrconf_mod_dad_work(ifp, rand_num);
}
static void addrconf_dad_start(struct inet6_ifaddr *ifp)
static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
{
struct inet6_dev *idev = ifp->idev;
struct net_device *dev = idev->dev;
@ -3203,25 +3225,68 @@ out:
read_unlock_bh(&idev->lock);
}
static void addrconf_dad_timer(unsigned long data)
static void addrconf_dad_start(struct inet6_ifaddr *ifp)
{
struct inet6_ifaddr *ifp = (struct inet6_ifaddr *) data;
bool begin_dad = false;
spin_lock_bh(&ifp->state_lock);
if (ifp->state != INET6_IFADDR_STATE_DEAD) {
ifp->state = INET6_IFADDR_STATE_PREDAD;
begin_dad = true;
}
spin_unlock_bh(&ifp->state_lock);
if (begin_dad)
addrconf_mod_dad_work(ifp, 0);
}
static void addrconf_dad_work(struct work_struct *w)
{
struct inet6_ifaddr *ifp = container_of(to_delayed_work(w),
struct inet6_ifaddr,
dad_work);
struct inet6_dev *idev = ifp->idev;
struct in6_addr mcaddr;
enum {
DAD_PROCESS,
DAD_BEGIN,
DAD_ABORT,
} action = DAD_PROCESS;
rtnl_lock();
spin_lock_bh(&ifp->state_lock);
if (ifp->state == INET6_IFADDR_STATE_PREDAD) {
action = DAD_BEGIN;
ifp->state = INET6_IFADDR_STATE_DAD;
} else if (ifp->state == INET6_IFADDR_STATE_ERRDAD) {
action = DAD_ABORT;
ifp->state = INET6_IFADDR_STATE_POSTDAD;
}
spin_unlock_bh(&ifp->state_lock);
if (action == DAD_BEGIN) {
addrconf_dad_begin(ifp);
goto out;
} else if (action == DAD_ABORT) {
addrconf_dad_stop(ifp, 1);
goto out;
}
if (!ifp->dad_probes && addrconf_dad_end(ifp))
goto out;
write_lock(&idev->lock);
write_lock_bh(&idev->lock);
if (idev->dead || !(idev->if_flags & IF_READY)) {
write_unlock(&idev->lock);
write_unlock_bh(&idev->lock);
goto out;
}
spin_lock(&ifp->lock);
if (ifp->state == INET6_IFADDR_STATE_DEAD) {
spin_unlock(&ifp->lock);
write_unlock(&idev->lock);
write_unlock_bh(&idev->lock);
goto out;
}
@ -3232,7 +3297,7 @@ static void addrconf_dad_timer(unsigned long data)
ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
spin_unlock(&ifp->lock);
write_unlock(&idev->lock);
write_unlock_bh(&idev->lock);
addrconf_dad_completed(ifp);
@ -3240,16 +3305,17 @@ static void addrconf_dad_timer(unsigned long data)
}
ifp->dad_probes--;
addrconf_mod_dad_timer(ifp,
NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME));
addrconf_mod_dad_work(ifp,
NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME));
spin_unlock(&ifp->lock);
write_unlock(&idev->lock);
write_unlock_bh(&idev->lock);
/* send a neighbour solicitation for our addr */
addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
ndisc_send_ns(ifp->idev->dev, NULL, &ifp->addr, &mcaddr, &in6addr_any);
out:
in6_ifa_put(ifp);
rtnl_unlock();
}
/* ifp->idev must be at least read locked */
@ -3276,7 +3342,7 @@ static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
struct in6_addr lladdr;
bool send_rs, send_mld;
addrconf_del_dad_timer(ifp);
addrconf_del_dad_work(ifp);
/*
* Configure the address for reception. Now it is valid.
@ -3517,23 +3583,23 @@ int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
* Periodic address status verification
*/
static void addrconf_verify(unsigned long foo)
static void addrconf_verify_rtnl(void)
{
unsigned long now, next, next_sec, next_sched;
struct inet6_ifaddr *ifp;
int i;
ASSERT_RTNL();
rcu_read_lock_bh();
spin_lock(&addrconf_verify_lock);
now = jiffies;
next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
del_timer(&addr_chk_timer);
cancel_delayed_work(&addr_chk_work);
for (i = 0; i < IN6_ADDR_HSIZE; i++) {
restart:
hlist_for_each_entry_rcu_bh(ifp,
&inet6_addr_lst[i], addr_lst) {
hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[i], addr_lst) {
unsigned long age;
/* When setting preferred_lft to a value not zero or
@ -3628,13 +3694,22 @@ restart:
ADBG(KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
now, next, next_sec, next_sched);
addr_chk_timer.expires = next_sched;
add_timer(&addr_chk_timer);
spin_unlock(&addrconf_verify_lock);
mod_delayed_work(addrconf_wq, &addr_chk_work, next_sched - now);
rcu_read_unlock_bh();
}
static void addrconf_verify_work(struct work_struct *w)
{
rtnl_lock();
addrconf_verify_rtnl();
rtnl_unlock();
}
static void addrconf_verify(void)
{
mod_delayed_work(addrconf_wq, &addr_chk_work, 0);
}
static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local,
struct in6_addr **peer_pfx)
{
@ -3691,6 +3766,8 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, u32 ifa_flags,
bool was_managetempaddr;
bool had_prefixroute;
ASSERT_RTNL();
if (!valid_lft || (prefered_lft > valid_lft))
return -EINVAL;
@ -3756,7 +3833,7 @@ static int inet6_addr_modify(struct inet6_ifaddr *ifp, u32 ifa_flags,
!was_managetempaddr, jiffies);
}
addrconf_verify(0);
addrconf_verify_rtnl();
return 0;
}
@ -4386,6 +4463,8 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
bool update_rs = false;
struct in6_addr ll_addr;
ASSERT_RTNL();
if (token == NULL)
return -EINVAL;
if (ipv6_addr_any(token))
@ -4434,7 +4513,7 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
}
write_unlock_bh(&idev->lock);
addrconf_verify(0);
addrconf_verify_rtnl();
return 0;
}
@ -4636,6 +4715,9 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
{
struct net *net = dev_net(ifp->idev->dev);
if (event)
ASSERT_RTNL();
inet6_ifa_notify(event ? : RTM_NEWADDR, ifp);
switch (event) {
@ -5244,6 +5326,12 @@ int __init addrconf_init(void)
if (err < 0)
goto out_addrlabel;
addrconf_wq = create_workqueue("ipv6_addrconf");
if (!addrconf_wq) {
err = -ENOMEM;
goto out_nowq;
}
/* The addrconf netdev notifier requires that loopback_dev
* has it's ipv6 private information allocated and setup
* before it can bring up and give link-local addresses
@ -5274,7 +5362,7 @@ int __init addrconf_init(void)
register_netdevice_notifier(&ipv6_dev_notf);
addrconf_verify(0);
addrconf_verify();
rtnl_af_register(&inet6_ops);
@ -5302,6 +5390,8 @@ errout:
rtnl_af_unregister(&inet6_ops);
unregister_netdevice_notifier(&ipv6_dev_notf);
errlo:
destroy_workqueue(addrconf_wq);
out_nowq:
unregister_pernet_subsys(&addrconf_ops);
out_addrlabel:
ipv6_addr_label_cleanup();
@ -5337,7 +5427,8 @@ void addrconf_cleanup(void)
for (i = 0; i < IN6_ADDR_HSIZE; i++)
WARN_ON(!hlist_empty(&inet6_addr_lst[i]));
spin_unlock_bh(&addrconf_hash_lock);
del_timer(&addr_chk_timer);
cancel_delayed_work(&addr_chk_work);
rtnl_unlock();
destroy_workqueue(addrconf_wq);
}

View File

@ -354,13 +354,16 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
skb = nfnetlink_alloc_skb(net, size, queue->peer_portid,
GFP_ATOMIC);
if (!skb)
if (!skb) {
skb_tx_error(entskb);
return NULL;
}
nlh = nlmsg_put(skb, 0, 0,
NFNL_SUBSYS_QUEUE << 8 | NFQNL_MSG_PACKET,
sizeof(struct nfgenmsg), 0);
if (!nlh) {
skb_tx_error(entskb);
kfree_skb(skb);
return NULL;
}
@ -488,13 +491,15 @@ nfqnl_build_packet_message(struct net *net, struct nfqnl_instance *queue,
nla->nla_type = NFQA_PAYLOAD;
nla->nla_len = nla_attr_size(data_len);
skb_zerocopy(skb, entskb, data_len, hlen);
if (skb_zerocopy(skb, entskb, data_len, hlen))
goto nla_put_failure;
}
nlh->nlmsg_len = skb->len;
return skb;
nla_put_failure:
skb_tx_error(entskb);
kfree_skb(skb);
net_err_ratelimited("nf_queue: error creating packet message\n");
return NULL;

View File

@ -464,7 +464,9 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
}
nla->nla_len = nla_attr_size(skb->len);
skb_zerocopy(user_skb, skb, skb->len, hlen);
err = skb_zerocopy(user_skb, skb, skb->len, hlen);
if (err)
goto out;
/* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
@ -478,6 +480,8 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
out:
if (err)
skb_tx_error(skb);
kfree_skb(nskb);
return err;
}

View File

@ -103,30 +103,24 @@ static void stats_read(struct flow_stats *stats,
void ovs_flow_stats_get(struct sw_flow *flow, struct ovs_flow_stats *ovs_stats,
unsigned long *used, __be16 *tcp_flags)
{
int cpu, cur_cpu;
int cpu;
*used = 0;
*tcp_flags = 0;
memset(ovs_stats, 0, sizeof(*ovs_stats));
local_bh_disable();
if (!flow->stats.is_percpu) {
stats_read(flow->stats.stat, ovs_stats, used, tcp_flags);
} else {
cur_cpu = get_cpu();
for_each_possible_cpu(cpu) {
struct flow_stats *stats;
if (cpu == cur_cpu)
local_bh_disable();
stats = per_cpu_ptr(flow->stats.cpu_stats, cpu);
stats_read(stats, ovs_stats, used, tcp_flags);
if (cpu == cur_cpu)
local_bh_enable();
}
put_cpu();
}
local_bh_enable();
}
static void stats_reset(struct flow_stats *stats)
@ -141,25 +135,17 @@ static void stats_reset(struct flow_stats *stats)
void ovs_flow_stats_clear(struct sw_flow *flow)
{
int cpu, cur_cpu;
int cpu;
local_bh_disable();
if (!flow->stats.is_percpu) {
stats_reset(flow->stats.stat);
} else {
cur_cpu = get_cpu();
for_each_possible_cpu(cpu) {
if (cpu == cur_cpu)
local_bh_disable();
stats_reset(per_cpu_ptr(flow->stats.cpu_stats, cpu));
if (cpu == cur_cpu)
local_bh_enable();
}
put_cpu();
}
local_bh_enable();
}
static int check_header(struct sk_buff *skb, int len)

View File

@ -1787,8 +1787,11 @@ static int unix_dgram_recvmsg(struct kiocb *iocb, struct socket *sock,
goto out;
err = mutex_lock_interruptible(&u->readlock);
if (err) {
err = sock_intr_errno(sock_rcvtimeo(sk, noblock));
if (unlikely(err)) {
/* recvmsg() in non blocking mode is supposed to return -EAGAIN
* sk_rcvtimeo is not honored by mutex_lock_interruptible()
*/
err = noblock ? -EAGAIN : -ERESTARTSYS;
goto out;
}
@ -1913,6 +1916,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
struct unix_sock *u = unix_sk(sk);
DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, msg->msg_name);
int copied = 0;
int noblock = flags & MSG_DONTWAIT;
int check_creds = 0;
int target;
int err = 0;
@ -1928,7 +1932,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
goto out;
target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
timeo = sock_rcvtimeo(sk, noblock);
/* Lock the socket to prevent queue disordering
* while sleeps in memcpy_tomsg
@ -1940,8 +1944,11 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
}
err = mutex_lock_interruptible(&u->readlock);
if (err) {
err = sock_intr_errno(timeo);
if (unlikely(err)) {
/* recvmsg() in non blocking mode is supposed to return -EAGAIN
* sk_rcvtimeo is not honored by mutex_lock_interruptible()
*/
err = noblock ? -EAGAIN : -ERESTARTSYS;
goto out;
}

View File

@ -902,7 +902,6 @@ static int alc5623_probe(struct snd_soc_codec *codec)
{
struct alc5623_priv *alc5623 = snd_soc_codec_get_drvdata(codec);
struct snd_soc_dapm_context *dapm = &codec->dapm;
int ret;
alc5623_reset(codec);
@ -961,7 +960,7 @@ static int alc5623_probe(struct snd_soc_codec *codec)
return -EINVAL;
}
return ret;
return 0;
}
/* power down chip */

View File

@ -1061,7 +1061,6 @@ static int alc5632_resume(struct snd_soc_codec *codec)
static int alc5632_probe(struct snd_soc_codec *codec)
{
struct alc5632_priv *alc5632 = snd_soc_codec_get_drvdata(codec);
int ret;
/* power on device */
alc5632_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
@ -1075,7 +1074,7 @@ static int alc5632_probe(struct snd_soc_codec *codec)
return -EINVAL;
}
return ret;
return 0;
}
/* power down chip */
@ -1191,11 +1190,18 @@ static const struct i2c_device_id alc5632_i2c_table[] = {
};
MODULE_DEVICE_TABLE(i2c, alc5632_i2c_table);
static const struct of_device_id alc5632_of_match[] = {
{ .compatible = "realtek,alc5632", },
{ }
};
MODULE_DEVICE_TABLE(of, alc5632_of_match);
/* i2c codec control layer */
static struct i2c_driver alc5632_i2c_driver = {
.driver = {
.name = "alc5632",
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(alc5632_of_match),
},
.probe = alc5632_i2c_probe,
.remove = alc5632_i2c_remove,

View File

@ -1259,7 +1259,7 @@ static int cs42l52_i2c_probe(struct i2c_client *i2c_client,
}
dev_info(&i2c_client->dev, "Cirrus Logic CS42L52, Revision: %02X\n",
reg & 0xFF);
reg & CS42L52_CHIP_REV_MASK);
/* Set Platform Data */
if (cs42l52->pdata.mica_diff_cfg)

View File

@ -37,7 +37,7 @@
#define CS42L52_CHIP_REV_A0 0x00
#define CS42L52_CHIP_REV_A1 0x01
#define CS42L52_CHIP_REV_B0 0x02
#define CS42L52_CHIP_REV_MASK 0x03
#define CS42L52_CHIP_REV_MASK 0x07
#define CS42L52_PWRCTL1 0x02
#define CS42L52_PWRCTL1_PDN_ALL 0x9F

View File

@ -495,17 +495,16 @@ int cs42xx8_probe(struct device *dev, struct regmap *regmap)
regcache_cache_bypass(cs42xx8->regmap, true);
/* Validate the chip ID */
regmap_read(cs42xx8->regmap, CS42XX8_CHIPID, &val);
if (val < 0) {
dev_err(dev, "failed to get device ID: %x", val);
ret = -EINVAL;
ret = regmap_read(cs42xx8->regmap, CS42XX8_CHIPID, &val);
if (ret < 0) {
dev_err(dev, "failed to get device ID, ret = %d", ret);
goto err_enable;
}
/* The top four bits of the chip ID should be 0000 */
if ((val & CS42XX8_CHIPID_CHIP_ID_MASK) != 0x00) {
if (((val & CS42XX8_CHIPID_CHIP_ID_MASK) >> 4) != 0x00) {
dev_err(dev, "unmatched chip ID: %d\n",
val & CS42XX8_CHIPID_CHIP_ID_MASK);
(val & CS42XX8_CHIPID_CHIP_ID_MASK) >> 4);
ret = -EINVAL;
goto err_enable;
}

View File

@ -1571,7 +1571,8 @@ static int da732x_i2c_probe(struct i2c_client *i2c,
}
dev_info(&i2c->dev, "Revision: %d.%d\n",
(reg & DA732X_ID_MAJOR_MASK), (reg & DA732X_ID_MINOR_MASK));
(reg & DA732X_ID_MAJOR_MASK) >> 4,
(reg & DA732X_ID_MINOR_MASK));
ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_da732x,
da732x_dai, ARRAY_SIZE(da732x_dai));

View File

@ -2399,11 +2399,18 @@ static const struct i2c_device_id max98090_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, max98090_i2c_id);
static const struct of_device_id max98090_of_match[] = {
{ .compatible = "maxim,max98090", },
{ }
};
MODULE_DEVICE_TABLE(of, max98090_of_match);
static struct i2c_driver max98090_i2c_driver = {
.driver = {
.name = "max98090",
.owner = THIS_MODULE,
.pm = &max98090_pm,
.of_match_table = of_match_ptr(max98090_of_match),
},
.probe = max98090_i2c_probe,
.remove = max98090_i2c_remove,

View File

@ -2074,6 +2074,14 @@ static const struct i2c_device_id rt5640_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, rt5640_i2c_id);
#if defined(CONFIG_OF)
static const struct of_device_id rt5640_of_match[] = {
{ .compatible = "realtek,rt5640", },
{},
};
MODULE_DEVICE_TABLE(of, rt5640_of_match);
#endif
#ifdef CONFIG_ACPI
static struct acpi_device_id rt5640_acpi_match[] = {
{ "INT33CA", 0 },
@ -2203,6 +2211,7 @@ static struct i2c_driver rt5640_i2c_driver = {
.name = "rt5640",
.owner = THIS_MODULE,
.acpi_match_table = ACPI_PTR(rt5640_acpi_match),
.of_match_table = of_match_ptr(rt5640_of_match),
},
.probe = rt5640_i2c_probe,
.remove = rt5640_i2c_remove,

View File

@ -43,9 +43,16 @@ static const struct i2c_device_id tlv320aic23_id[] = {
MODULE_DEVICE_TABLE(i2c, tlv320aic23_id);
static const struct of_device_id tlv320aic23_of_match[] = {
{ .compatible = "ti,tlv320aic23", },
{ }
};
MODULE_DEVICE_TABLE(of, tlv320aic23_of_match);
static struct i2c_driver tlv320aic23_i2c_driver = {
.driver = {
.name = "tlv320aic23-codec",
.of_match_table = of_match_ptr(tlv320aic23_of_match),
},
.probe = tlv320aic23_i2c_probe,
.remove = __exit_p(tlv320aic23_i2c_remove),

View File

@ -336,7 +336,7 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
mcasp_clr_bits(mcasp, DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL);
mcasp_clr_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG, FSXPOL);
mcasp_set_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
mcasp_clr_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
mcasp_clr_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, FSRPOL);
break;
@ -344,7 +344,7 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
mcasp_set_bits(mcasp, DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL);
mcasp_set_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG, FSXPOL);
mcasp_clr_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
mcasp_set_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, FSRPOL);
break;
@ -352,7 +352,7 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
mcasp_clr_bits(mcasp, DAVINCI_MCASP_ACLKXCTL_REG, ACLKXPOL);
mcasp_set_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG, FSXPOL);
mcasp_set_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
mcasp_clr_bits(mcasp, DAVINCI_MCASP_ACLKRCTL_REG, ACLKRPOL);
mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, FSRPOL);
break;

View File

@ -23,6 +23,71 @@
#include "fsl_sai.h"
#define FSL_SAI_FLAGS (FSL_SAI_CSR_SEIE |\
FSL_SAI_CSR_FEIE)
static irqreturn_t fsl_sai_isr(int irq, void *devid)
{
struct fsl_sai *sai = (struct fsl_sai *)devid;
struct device *dev = &sai->pdev->dev;
u32 xcsr, mask;
/* Only handle those what we enabled */
mask = (FSL_SAI_FLAGS >> FSL_SAI_CSR_xIE_SHIFT) << FSL_SAI_CSR_xF_SHIFT;
/* Tx IRQ */
regmap_read(sai->regmap, FSL_SAI_TCSR, &xcsr);
xcsr &= mask;
if (xcsr & FSL_SAI_CSR_WSF)
dev_dbg(dev, "isr: Start of Tx word detected\n");
if (xcsr & FSL_SAI_CSR_SEF)
dev_warn(dev, "isr: Tx Frame sync error detected\n");
if (xcsr & FSL_SAI_CSR_FEF) {
dev_warn(dev, "isr: Transmit underrun detected\n");
/* FIFO reset for safety */
xcsr |= FSL_SAI_CSR_FR;
}
if (xcsr & FSL_SAI_CSR_FWF)
dev_dbg(dev, "isr: Enabled transmit FIFO is empty\n");
if (xcsr & FSL_SAI_CSR_FRF)
dev_dbg(dev, "isr: Transmit FIFO watermark has been reached\n");
regmap_update_bits(sai->regmap, FSL_SAI_TCSR,
FSL_SAI_CSR_xF_W_MASK | FSL_SAI_CSR_FR, xcsr);
/* Rx IRQ */
regmap_read(sai->regmap, FSL_SAI_RCSR, &xcsr);
xcsr &= mask;
if (xcsr & FSL_SAI_CSR_WSF)
dev_dbg(dev, "isr: Start of Rx word detected\n");
if (xcsr & FSL_SAI_CSR_SEF)
dev_warn(dev, "isr: Rx Frame sync error detected\n");
if (xcsr & FSL_SAI_CSR_FEF) {
dev_warn(dev, "isr: Receive overflow detected\n");
/* FIFO reset for safety */
xcsr |= FSL_SAI_CSR_FR;
}
if (xcsr & FSL_SAI_CSR_FWF)
dev_dbg(dev, "isr: Enabled receive FIFO is full\n");
if (xcsr & FSL_SAI_CSR_FRF)
dev_dbg(dev, "isr: Receive FIFO watermark has been reached\n");
regmap_update_bits(sai->regmap, FSL_SAI_RCSR,
FSL_SAI_CSR_xF_W_MASK | FSL_SAI_CSR_FR, xcsr);
return IRQ_HANDLED;
}
static int fsl_sai_set_dai_sysclk_tr(struct snd_soc_dai *cpu_dai,
int clk_id, unsigned int freq, int fsl_dir)
{
@ -114,7 +179,7 @@ static int fsl_sai_set_dai_fmt_tr(struct snd_soc_dai *cpu_dai,
* that is, together with the last bit of the previous
* data word.
*/
val_cr2 &= ~FSL_SAI_CR2_BCP;
val_cr2 |= FSL_SAI_CR2_BCP;
val_cr4 |= FSL_SAI_CR4_FSE | FSL_SAI_CR4_FSP;
break;
case SND_SOC_DAIFMT_LEFT_J:
@ -122,7 +187,7 @@ static int fsl_sai_set_dai_fmt_tr(struct snd_soc_dai *cpu_dai,
* Frame high, one word length for frame sync,
* frame sync asserts with the first bit of the frame.
*/
val_cr2 &= ~FSL_SAI_CR2_BCP;
val_cr2 |= FSL_SAI_CR2_BCP;
val_cr4 &= ~(FSL_SAI_CR4_FSE | FSL_SAI_CR4_FSP);
break;
case SND_SOC_DAIFMT_DSP_A:
@ -132,7 +197,7 @@ static int fsl_sai_set_dai_fmt_tr(struct snd_soc_dai *cpu_dai,
* that is, together with the last bit of the previous
* data word.
*/
val_cr2 &= ~FSL_SAI_CR2_BCP;
val_cr2 |= FSL_SAI_CR2_BCP;
val_cr4 &= ~FSL_SAI_CR4_FSP;
val_cr4 |= FSL_SAI_CR4_FSE;
sai->is_dsp_mode = true;
@ -142,7 +207,7 @@ static int fsl_sai_set_dai_fmt_tr(struct snd_soc_dai *cpu_dai,
* Frame high, one bit for frame sync,
* frame sync asserts with the first bit of the frame.
*/
val_cr2 &= ~FSL_SAI_CR2_BCP;
val_cr2 |= FSL_SAI_CR2_BCP;
val_cr4 &= ~(FSL_SAI_CR4_FSE | FSL_SAI_CR4_FSP);
sai->is_dsp_mode = true;
break;
@ -373,8 +438,8 @@ static int fsl_sai_dai_probe(struct snd_soc_dai *cpu_dai)
{
struct fsl_sai *sai = dev_get_drvdata(cpu_dai->dev);
regmap_update_bits(sai->regmap, FSL_SAI_TCSR, 0xffffffff, 0x0);
regmap_update_bits(sai->regmap, FSL_SAI_RCSR, 0xffffffff, 0x0);
regmap_update_bits(sai->regmap, FSL_SAI_TCSR, 0xffffffff, FSL_SAI_FLAGS);
regmap_update_bits(sai->regmap, FSL_SAI_RCSR, 0xffffffff, FSL_SAI_FLAGS);
regmap_update_bits(sai->regmap, FSL_SAI_TCR1, FSL_SAI_CR1_RFW_MASK,
FSL_SAI_MAXBURST_TX * 2);
regmap_update_bits(sai->regmap, FSL_SAI_RCR1, FSL_SAI_CR1_RFW_MASK,
@ -490,12 +555,14 @@ static int fsl_sai_probe(struct platform_device *pdev)
struct fsl_sai *sai;
struct resource *res;
void __iomem *base;
int ret;
int irq, ret;
sai = devm_kzalloc(&pdev->dev, sizeof(*sai), GFP_KERNEL);
if (!sai)
return -ENOMEM;
sai->pdev = pdev;
sai->big_endian_regs = of_property_read_bool(np, "big-endian-regs");
if (sai->big_endian_regs)
fsl_sai_regmap_config.val_format_endian = REGMAP_ENDIAN_BIG;
@ -514,6 +581,18 @@ static int fsl_sai_probe(struct platform_device *pdev)
return PTR_ERR(sai->regmap);
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "no irq for node %s\n", np->full_name);
return irq;
}
ret = devm_request_irq(&pdev->dev, irq, fsl_sai_isr, 0, np->name, sai);
if (ret) {
dev_err(&pdev->dev, "failed to claim irq %u\n", irq);
return ret;
}
sai->dma_params_rx.addr = res->start + FSL_SAI_RDR;
sai->dma_params_tx.addr = res->start + FSL_SAI_TDR;
sai->dma_params_rx.maxburst = FSL_SAI_MAXBURST_RX;

View File

@ -37,7 +37,21 @@
/* SAI Transmit/Recieve Control Register */
#define FSL_SAI_CSR_TERE BIT(31)
#define FSL_SAI_CSR_FR BIT(25)
#define FSL_SAI_CSR_xF_SHIFT 16
#define FSL_SAI_CSR_xF_W_SHIFT 18
#define FSL_SAI_CSR_xF_MASK (0x1f << FSL_SAI_CSR_xF_SHIFT)
#define FSL_SAI_CSR_xF_W_MASK (0x7 << FSL_SAI_CSR_xF_W_SHIFT)
#define FSL_SAI_CSR_WSF BIT(20)
#define FSL_SAI_CSR_SEF BIT(19)
#define FSL_SAI_CSR_FEF BIT(18)
#define FSL_SAI_CSR_FWF BIT(17)
#define FSL_SAI_CSR_FRF BIT(16)
#define FSL_SAI_CSR_xIE_SHIFT 8
#define FSL_SAI_CSR_WSIE BIT(12)
#define FSL_SAI_CSR_SEIE BIT(11)
#define FSL_SAI_CSR_FEIE BIT(10)
#define FSL_SAI_CSR_FWIE BIT(9)
#define FSL_SAI_CSR_FRIE BIT(8)
#define FSL_SAI_CSR_FRDE BIT(0)
@ -99,6 +113,7 @@
#define FSL_SAI_MAXBURST_RX 6
struct fsl_sai {
struct platform_device *pdev;
struct regmap *regmap;
bool big_endian_regs;

View File

@ -39,15 +39,15 @@ struct s3c_ac97_info {
};
static struct s3c_ac97_info s3c_ac97;
static struct s3c2410_dma_client s3c_dma_client_out = {
static struct s3c_dma_client s3c_dma_client_out = {
.name = "AC97 PCMOut"
};
static struct s3c2410_dma_client s3c_dma_client_in = {
static struct s3c_dma_client s3c_dma_client_in = {
.name = "AC97 PCMIn"
};
static struct s3c2410_dma_client s3c_dma_client_micin = {
static struct s3c_dma_client s3c_dma_client_micin = {
.name = "AC97 MicIn"
};

View File

@ -14,8 +14,12 @@
#include <sound/dmaengine_pcm.h>
struct s3c_dma_client {
char *name;
};
struct s3c_dma_params {
struct s3c2410_dma_client *client; /* stream identifier */
struct s3c_dma_client *client; /* stream identifier */
int channel; /* Channel ID */
dma_addr_t dma_addr;
int dma_size; /* Size of the DMA transfer */

View File

@ -1211,10 +1211,10 @@ static int samsung_i2s_probe(struct platform_device *pdev)
pri_dai->dma_playback.dma_addr = regs_base + I2STXD;
pri_dai->dma_capture.dma_addr = regs_base + I2SRXD;
pri_dai->dma_playback.client =
(struct s3c2410_dma_client *)&pri_dai->dma_playback;
(struct s3c_dma_client *)&pri_dai->dma_playback;
pri_dai->dma_playback.ch_name = "tx";
pri_dai->dma_capture.client =
(struct s3c2410_dma_client *)&pri_dai->dma_capture;
(struct s3c_dma_client *)&pri_dai->dma_capture;
pri_dai->dma_capture.ch_name = "rx";
pri_dai->dma_playback.dma_size = 4;
pri_dai->dma_capture.dma_size = 4;
@ -1233,7 +1233,7 @@ static int samsung_i2s_probe(struct platform_device *pdev)
}
sec_dai->dma_playback.dma_addr = regs_base + I2STXDS;
sec_dai->dma_playback.client =
(struct s3c2410_dma_client *)&sec_dai->dma_playback;
(struct s3c_dma_client *)&sec_dai->dma_playback;
sec_dai->dma_playback.ch_name = "tx-sec";
if (!np) {

View File

@ -20,7 +20,6 @@
#include <sound/pcm_params.h>
#include <linux/platform_data/asoc-s3c.h>
#include <mach/dma.h>
#include "dma.h"
#include "pcm.h"
@ -132,11 +131,11 @@ struct s3c_pcm_info {
struct s3c_dma_params *dma_capture;
};
static struct s3c2410_dma_client s3c_pcm_dma_client_out = {
static struct s3c_dma_client s3c_pcm_dma_client_out = {
.name = "PCM Stereo out"
};
static struct s3c2410_dma_client s3c_pcm_dma_client_in = {
static struct s3c_dma_client s3c_pcm_dma_client_in = {
.name = "PCM Stereo in"
};

View File

@ -33,11 +33,11 @@
#include "regs-i2s-v2.h"
#include "s3c2412-i2s.h"
static struct s3c2410_dma_client s3c2412_dma_client_out = {
static struct s3c_dma_client s3c2412_dma_client_out = {
.name = "I2S PCM Stereo out"
};
static struct s3c2410_dma_client s3c2412_dma_client_in = {
static struct s3c_dma_client s3c2412_dma_client_in = {
.name = "I2S PCM Stereo in"
};

View File

@ -31,11 +31,11 @@
#include "dma.h"
#include "s3c24xx-i2s.h"
static struct s3c2410_dma_client s3c24xx_dma_client_out = {
static struct s3c_dma_client s3c24xx_dma_client_out = {
.name = "I2S PCM Stereo out"
};
static struct s3c2410_dma_client s3c24xx_dma_client_in = {
static struct s3c_dma_client s3c24xx_dma_client_in = {
.name = "I2S PCM Stereo in"
};

View File

@ -18,7 +18,6 @@
#include <sound/pcm_params.h>
#include <linux/platform_data/asoc-s3c.h>
#include <mach/dma.h>
#include "dma.h"
#include "spdif.h"
@ -94,7 +93,7 @@ struct samsung_spdif_info {
struct s3c_dma_params *dma_playback;
};
static struct s3c2410_dma_client spdif_dma_client_out = {
static struct s3c_dma_client spdif_dma_client_out = {
.name = "S/PDIF Stereo out",
};