Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6

Conflicts:
	drivers/video/sh_mobile_lcdcfb.c

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
This commit is contained in:
Paul Mundt 2011-01-06 18:27:34 +09:00
commit 1928e87bcf
246 changed files with 2243 additions and 1510 deletions

View File

@ -516,6 +516,7 @@ int main(int argc, char *argv[])
default:
fprintf(stderr, "Unknown nla_type %d\n",
na->nla_type);
case TASKSTATS_TYPE_NULL:
break;
}
na = (struct nlattr *) (GENLMSG_DATA(&msg) + len);

View File

@ -18,7 +18,6 @@ prototypes:
char *(*d_dname)((struct dentry *dentry, char *buffer, int buflen);
locking rules:
none have BKL
dcache_lock rename_lock ->d_lock may block
d_revalidate: no no no yes
d_hash no no no yes
@ -42,18 +41,23 @@ ata *);
int (*rename) (struct inode *, struct dentry *,
struct inode *, struct dentry *);
int (*readlink) (struct dentry *, char __user *,int);
int (*follow_link) (struct dentry *, struct nameidata *);
void * (*follow_link) (struct dentry *, struct nameidata *);
void (*put_link) (struct dentry *, struct nameidata *, void *);
void (*truncate) (struct inode *);
int (*permission) (struct inode *, int, struct nameidata *);
int (*check_acl)(struct inode *, int);
int (*setattr) (struct dentry *, struct iattr *);
int (*getattr) (struct vfsmount *, struct dentry *, struct kstat *);
int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
ssize_t (*listxattr) (struct dentry *, char *, size_t);
int (*removexattr) (struct dentry *, const char *);
void (*truncate_range)(struct inode *, loff_t, loff_t);
long (*fallocate)(struct inode *inode, int mode, loff_t offset, loff_t len);
int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len);
locking rules:
all may block, none have BKL
all may block
i_mutex(inode)
lookup: yes
create: yes
@ -66,19 +70,24 @@ rmdir: yes (both) (see below)
rename: yes (all) (see below)
readlink: no
follow_link: no
put_link: no
truncate: yes (see below)
setattr: yes
permission: no
check_acl: no
getattr: no
setxattr: yes
getxattr: no
listxattr: no
removexattr: yes
truncate_range: yes
fallocate: no
fiemap: no
Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on
victim.
cross-directory ->rename() has (per-superblock) ->s_vfs_rename_sem.
->truncate() is never called directly - it's a callback, not a
method. It's called by vmtruncate() - library function normally used by
method. It's called by vmtruncate() - deprecated library function used by
->setattr(). Locking information above applies to that call (i.e. is
inherited from ->setattr() - vmtruncate() is used when ATTR_SIZE had been
passed).
@ -91,7 +100,7 @@ prototypes:
struct inode *(*alloc_inode)(struct super_block *sb);
void (*destroy_inode)(struct inode *);
void (*dirty_inode) (struct inode *);
int (*write_inode) (struct inode *, int);
int (*write_inode) (struct inode *, struct writeback_control *wbc);
int (*drop_inode) (struct inode *);
void (*evict_inode) (struct inode *);
void (*put_super) (struct super_block *);
@ -105,10 +114,10 @@ prototypes:
int (*show_options)(struct seq_file *, struct vfsmount *);
ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
locking rules:
All may block [not true, see below]
None have BKL
s_umount
alloc_inode:
destroy_inode:
@ -127,6 +136,7 @@ umount_begin: no
show_options: no (namespace_sem)
quota_read: no (see below)
quota_write: no (see below)
bdev_try_to_free_page: no (see below)
->statfs() has s_umount (shared) when called by ustat(2) (native or
compat), but that's an accident of bad API; s_umount is used to pin
@ -139,19 +149,25 @@ be the only ones operating on the quota file by the quota code (via
dqio_sem) (unless an admin really wants to screw up something and
writes to quota files with quotas on). For other details about locking
see also dquot_operations section.
->bdev_try_to_free_page is called from the ->releasepage handler of
the block device inode. See there for more details.
--------------------------- file_system_type ---------------------------
prototypes:
int (*get_sb) (struct file_system_type *, int,
const char *, void *, struct vfsmount *);
struct dentry *(*mount) (struct file_system_type *, int,
const char *, void *);
void (*kill_sb) (struct super_block *);
locking rules:
may block BKL
get_sb yes no
kill_sb yes no
may block
get_sb yes
mount yes
kill_sb yes
->get_sb() returns error or 0 with locked superblock attached to the vfsmount
(exclusive on ->s_umount).
->mount() returns ERR_PTR or the root dentry.
->kill_sb() takes a write-locked superblock, does all shutdown work on it,
unlocks and drops the reference.
@ -176,27 +192,35 @@ prototypes:
void (*freepage)(struct page *);
int (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
loff_t offset, unsigned long nr_segs);
int (*launder_page) (struct page *);
int (*get_xip_mem)(struct address_space *, pgoff_t, int, void **,
unsigned long *);
int (*migratepage)(struct address_space *, struct page *, struct page *);
int (*launder_page)(struct page *);
int (*is_partially_uptodate)(struct page *, read_descriptor_t *, unsigned long);
int (*error_remove_page)(struct address_space *, struct page *);
locking rules:
All except set_page_dirty and freepage may block
BKL PageLocked(page) i_mutex
writepage: no yes, unlocks (see below)
readpage: no yes, unlocks
sync_page: no maybe
writepages: no
set_page_dirty no no
readpages: no
write_begin: no locks the page yes
write_end: no yes, unlocks yes
perform_write: no n/a yes
bmap: no
invalidatepage: no yes
releasepage: no yes
freepage: no yes
direct_IO: no
launder_page: no yes
PageLocked(page) i_mutex
writepage: yes, unlocks (see below)
readpage: yes, unlocks
sync_page: maybe
writepages:
set_page_dirty no
readpages:
write_begin: locks the page yes
write_end: yes, unlocks yes
bmap:
invalidatepage: yes
releasepage: yes
freepage: yes
direct_IO:
get_xip_mem: maybe
migratepage: yes (both)
launder_page: yes
is_partially_uptodate: yes
error_remove_page: yes
->write_begin(), ->write_end(), ->sync_page() and ->readpage()
may be called from the request handler (/dev/loop).
@ -276,9 +300,8 @@ under spinlock (it cannot block) and is sometimes called with the page
not locked.
->bmap() is currently used by legacy ioctl() (FIBMAP) provided by some
filesystems and by the swapper. The latter will eventually go away. All
instances do not actually need the BKL. Please, keep it that way and don't
breed new callers.
filesystems and by the swapper. The latter will eventually go away. Please,
keep it that way and don't breed new callers.
->invalidatepage() is called when the filesystem must attempt to drop
some or all of the buffers from the page when it is being truncated. It
@ -299,47 +322,37 @@ cleaned, or an error value if not. Note that in order to prevent the page
getting mapped back in and redirtied, it needs to be kept locked
across the entire operation.
Note: currently almost all instances of address_space methods are
using BKL for internal serialization and that's one of the worst sources
of contention. Normally they are calling library functions (in fs/buffer.c)
and pass foo_get_block() as a callback (on local block-based filesystems,
indeed). BKL is not needed for library stuff and is usually taken by
foo_get_block(). It's an overkill, since block bitmaps can be protected by
internal fs locking and real critical areas are much smaller than the areas
filesystems protect now.
----------------------- file_lock_operations ------------------------------
prototypes:
void (*fl_insert)(struct file_lock *); /* lock insertion callback */
void (*fl_remove)(struct file_lock *); /* lock removal callback */
void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
void (*fl_release_private)(struct file_lock *);
locking rules:
BKL may block
fl_insert: yes no
fl_remove: yes no
fl_copy_lock: yes no
fl_release_private: yes yes
file_lock_lock may block
fl_copy_lock: yes no
fl_release_private: maybe no
----------------------- lock_manager_operations ---------------------------
prototypes:
int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
void (*fl_notify)(struct file_lock *); /* unblock callback */
int (*fl_grant)(struct file_lock *, struct file_lock *, int);
void (*fl_release_private)(struct file_lock *);
void (*fl_break)(struct file_lock *); /* break_lease callback */
int (*fl_mylease)(struct file_lock *, struct file_lock *);
int (*fl_change)(struct file_lock **, int);
locking rules:
BKL may block
fl_compare_owner: yes no
fl_notify: yes no
fl_release_private: yes yes
fl_break: yes no
file_lock_lock may block
fl_compare_owner: yes no
fl_notify: yes no
fl_grant: no no
fl_release_private: maybe no
fl_break: yes no
fl_mylease: yes no
fl_change yes no
Currently only NFSD and NLM provide instances of this class. None of the
them block. If you have out-of-tree instances - please, show up. Locking
in that area will change.
--------------------------- buffer_head -----------------------------------
prototypes:
void (*b_end_io)(struct buffer_head *bh, int uptodate);
@ -364,17 +377,17 @@ prototypes:
void (*swap_slot_free_notify) (struct block_device *, unsigned long);
locking rules:
BKL bd_mutex
open: no yes
release: no yes
ioctl: no no
compat_ioctl: no no
direct_access: no no
media_changed: no no
unlock_native_capacity: no no
revalidate_disk: no no
getgeo: no no
swap_slot_free_notify: no no (see below)
bd_mutex
open: yes
release: yes
ioctl: no
compat_ioctl: no
direct_access: no
media_changed: no
unlock_native_capacity: no
revalidate_disk: no
getgeo: no
swap_slot_free_notify: no (see below)
media_changed, unlock_native_capacity and revalidate_disk are called only from
check_disk_change().
@ -413,34 +426,21 @@ prototypes:
unsigned long (*get_unmapped_area)(struct file *, unsigned long,
unsigned long, unsigned long, unsigned long);
int (*check_flags)(int);
int (*flock) (struct file *, int, struct file_lock *);
ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *,
size_t, unsigned int);
ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *,
size_t, unsigned int);
int (*setlease)(struct file *, long, struct file_lock **);
};
locking rules:
All may block.
BKL
llseek: no (see below)
read: no
aio_read: no
write: no
aio_write: no
readdir: no
poll: no
unlocked_ioctl: no
compat_ioctl: no
mmap: no
open: no
flush: no
release: no
fsync: no (see below)
aio_fsync: no
fasync: no
lock: yes
readv: no
writev: no
sendfile: no
sendpage: no
get_unmapped_area: no
check_flags: no
All may block except for ->setlease.
No VFS locks held on entry except for ->fsync and ->setlease.
->fsync() has i_mutex on inode.
->setlease has the file_list_lock held and must not sleep.
->llseek() locking has moved from llseek to the individual llseek
implementations. If your fs is not using generic_file_llseek, you
@ -450,17 +450,10 @@ mutex or just to use i_size_read() instead.
Note: this does not protect the file->f_pos against concurrent modifications
since this is something the userspace has to take care about.
Note: ext2_release() was *the* source of contention on fs-intensive
loads and dropping BKL on ->release() helps to get rid of that (we still
grab BKL for cases when we close a file that had been opened r/w, but that
can and should be done using the internal locking with smaller critical areas).
Current worst offender is ext2_get_block()...
->fasync() is called without BKL protection, and is responsible for
maintaining the FASYNC bit in filp->f_flags. Most instances call
fasync_helper(), which does that maintenance, so it's not normally
something one needs to worry about. Return values > 0 will be mapped to
zero in the VFS layer.
->fasync() is responsible for maintaining the FASYNC bit in filp->f_flags.
Most instances call fasync_helper(), which does that maintenance, so it's
not normally something one needs to worry about. Return values > 0 will be
mapped to zero in the VFS layer.
->readdir() and ->ioctl() on directories must be changed. Ideally we would
move ->readdir() to inode_operations and use a separate method for directory
@ -471,8 +464,6 @@ components. And there are other reasons why the current interface is a mess...
->read on directories probably must go away - we should just enforce -EISDIR
in sys_read() and friends.
->fsync() has i_mutex on inode.
--------------------------- dquot_operations -------------------------------
prototypes:
int (*write_dquot) (struct dquot *);
@ -507,12 +498,12 @@ prototypes:
int (*access)(struct vm_area_struct *, unsigned long, void*, int, int);
locking rules:
BKL mmap_sem PageLocked(page)
open: no yes
close: no yes
fault: no yes can return with page locked
page_mkwrite: no yes can return with page locked
access: no yes
mmap_sem PageLocked(page)
open: yes
close: yes
fault: yes can return with page locked
page_mkwrite: yes can return with page locked
access: yes
->fault() is called when a previously not present pte is about
to be faulted in. The filesystem must find and return the page associated
@ -539,6 +530,3 @@ VM_IO | VM_PFNMAP VMAs.
(if you break something or notice that it is broken and do not fix it yourself
- at least put it here)
ipc/shm.c::shm_delete() - may need BKL.
->read() and ->write() in many drivers are (probably) missing BKL.

View File

@ -1759,7 +1759,7 @@ and is between 256 and 4096 characters. It is defined in the file
nousb [USB] Disable the USB subsystem
nowatchdog [KNL] Disable the lockup detector.
nowatchdog [KNL] Disable the lockup detector (NMI watchdog).
nowb [ARM]

View File

@ -1044,9 +1044,9 @@ Details:
/**
* queuecommand - queue scsi command, invoke 'done' on completion
* queuecommand - queue scsi command, invoke scp->scsi_done on completion
* @shost: pointer to the scsi host object
* @scp: pointer to scsi command object
* @done: function pointer to be invoked on completion
*
* Returns 0 on success.
*
@ -1074,42 +1074,45 @@ Details:
*
* Other types of errors that are detected immediately may be
* flagged by setting scp->result to an appropriate value,
* invoking the 'done' callback, and then returning 0 from this
* function. If the command is not performed immediately (and the
* LLD is starting (or will start) the given command) then this
* function should place 0 in scp->result and return 0.
* invoking the scp->scsi_done callback, and then returning 0
* from this function. If the command is not performed
* immediately (and the LLD is starting (or will start) the given
* command) then this function should place 0 in scp->result and
* return 0.
*
* Command ownership. If the driver returns zero, it owns the
* command and must take responsibility for ensuring the 'done'
* callback is executed. Note: the driver may call done before
* returning zero, but after it has called done, it may not
* return any value other than zero. If the driver makes a
* non-zero return, it must not execute the command's done
* callback at any time.
* command and must take responsibility for ensuring the
* scp->scsi_done callback is executed. Note: the driver may
* call scp->scsi_done before returning zero, but after it has
* called scp->scsi_done, it may not return any value other than
* zero. If the driver makes a non-zero return, it must not
* execute the command's scsi_done callback at any time.
*
* Locks: struct Scsi_Host::host_lock held on entry (with "irqsave")
* and is expected to be held on return.
* Locks: up to and including 2.6.36, struct Scsi_Host::host_lock
* held on entry (with "irqsave") and is expected to be
* held on return. From 2.6.37 onwards, queuecommand is
* called without any locks held.
*
* Calling context: in interrupt (soft irq) or process context
*
* Notes: This function should be relatively fast. Normally it will
* not wait for IO to complete. Hence the 'done' callback is invoked
* (often directly from an interrupt service routine) some time after
* this function has returned. In some cases (e.g. pseudo adapter
* drivers that manufacture the response to a SCSI INQUIRY)
* the 'done' callback may be invoked before this function returns.
* If the 'done' callback is not invoked within a certain period
* the SCSI mid level will commence error processing.
* If a status of CHECK CONDITION is placed in "result" when the
* 'done' callback is invoked, then the LLD driver should
* perform autosense and fill in the struct scsi_cmnd::sense_buffer
* Notes: This function should be relatively fast. Normally it
* will not wait for IO to complete. Hence the scp->scsi_done
* callback is invoked (often directly from an interrupt service
* routine) some time after this function has returned. In some
* cases (e.g. pseudo adapter drivers that manufacture the
* response to a SCSI INQUIRY) the scp->scsi_done callback may be
* invoked before this function returns. If the scp->scsi_done
* callback is not invoked within a certain period the SCSI mid
* level will commence error processing. If a status of CHECK
* CONDITION is placed in "result" when the scp->scsi_done
* callback is invoked, then the LLD driver should perform
* autosense and fill in the struct scsi_cmnd::sense_buffer
* array. The scsi_cmnd::sense_buffer array is zeroed prior to
* the mid level queuing a command to an LLD.
*
* Defined in: LLD
**/
int queuecommand(struct scsi_cmnd * scp,
void (*done)(struct scsi_cmnd *))
int queuecommand(struct Scsi_Host *shost, struct scsi_cmnd * scp)
/**

View File

@ -373,9 +373,18 @@ EVENT_PROCESS:
print " $regex_lru_isolate/o\n";
next;
}
my $isolate_mode = $1;
my $nr_scanned = $4;
my $nr_contig_dirty = $7;
$perprocesspid{$process_pid}->{HIGH_NR_SCANNED} += $nr_scanned;
# To closer match vmstat scanning statistics, only count isolate_both
# and isolate_inactive as scanning. isolate_active is rotation
# isolate_inactive == 0
# isolate_active == 1
# isolate_both == 2
if ($isolate_mode != 1) {
$perprocesspid{$process_pid}->{HIGH_NR_SCANNED} += $nr_scanned;
}
$perprocesspid{$process_pid}->{HIGH_NR_CONTIG_DIRTY} += $nr_contig_dirty;
} elsif ($tracepoint eq "mm_vmscan_lru_shrink_inactive") {
$details = $5;

View File

@ -405,7 +405,7 @@ S: Supported
F: drivers/usb/gadget/amd5536udc.*
AMD GEODE PROCESSOR/CHIPSET SUPPORT
P: Jordan Crouse
P: Andres Salomon <dilinger@queued.net>
L: linux-geode@lists.infradead.org (moderated for non-subscribers)
W: http://www.amd.com/us-en/ConnectivitySolutions/TechnicalResources/0,,50_2334_2452_11363,00.html
S: Supported
@ -792,11 +792,14 @@ S: Maintained
ARM/NOMADIK ARCHITECTURE
M: Alessandro Rubini <rubini@unipv.it>
M: Linus Walleij <linus.walleij@stericsson.com>
M: STEricsson <STEricsson_nomadik_linux@list.st.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-nomadik/
F: arch/arm/plat-nomadik/
F: drivers/i2c/busses/i2c-nomadik.c
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git
ARM/OPENMOKO NEO FREERUNNER (GTA02) MACHINE SUPPORT
M: Nelson Castillo <arhuaco@freaks-unidos.net>
@ -998,12 +1001,24 @@ F: drivers/i2c/busses/i2c-stu300.c
F: drivers/rtc/rtc-coh901331.c
F: drivers/watchdog/coh901327_wdt.c
F: drivers/dma/coh901318*
F: drivers/mfd/ab3100*
F: drivers/rtc/rtc-ab3100.c
F: drivers/rtc/rtc-coh901331.c
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git
ARM/U8500 ARM ARCHITECTURE
ARM/Ux500 ARM ARCHITECTURE
M: Srinidhi Kasagar <srinidhi.kasagar@stericsson.com>
M: Linus Walleij <linus.walleij@stericsson.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-ux500/
F: drivers/dma/ste_dma40*
F: drivers/mfd/ab3550*
F: drivers/mfd/abx500*
F: drivers/mfd/ab8500*
F: drivers/mfd/stmpe*
F: drivers/rtc/rtc-ab8500.c
T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-stericsson.git
ARM/VFP SUPPORT
M: Russell King <linux@arm.linux.org.uk>
@ -4590,7 +4605,7 @@ F: drivers/pcmcia/
F: include/pcmcia/
PCNET32 NETWORK DRIVER
M: Don Fry <pcnet32@verizon.net>
M: Don Fry <pcnet32@frontier.com>
L: netdev@vger.kernel.org
S: Maintained
F: drivers/net/pcnet32.c

View File

@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 37
EXTRAVERSION = -rc7
EXTRAVERSION =
NAME = Flesh-Eating Bats with Fangs
# *DOCUMENTATION*

View File

@ -352,3 +352,4 @@ struct pci_bus * __init it8152_pci_scan_bus(int nr, struct pci_sys_data *sys)
return pci_scan_bus(nr, &it8152_ops, sys);
}
EXPORT_SYMBOL(dma_set_coherent_mask);

View File

@ -76,6 +76,7 @@ extern unsigned long it8152_base_address;
IT8152_PD_IRQ(0) Audio controller (ACR)
*/
#define IT8152_IRQ(x) (IRQ_BOARD_START + (x))
#define IT8152_LAST_IRQ (IRQ_BOARD_START + 40)
/* IRQ-sources in 3 groups - local devices, LPC (serial), and external PCI */
#define IT8152_LD_IRQ_COUNT 9

View File

@ -25,9 +25,6 @@ extern void *kmap_high(struct page *page);
extern void *kmap_high_get(struct page *page);
extern void kunmap_high(struct page *page);
extern void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte);
extern void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte);
/*
* The following functions are already defined by <linux/highmem.h>
* when CONFIG_HIGHMEM is not set.

View File

@ -13,9 +13,6 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/* DO NOT EDIT!! - this file automatically generated
* from .s file by awk -f s2h.awk
*/
/* Size definitions
* Copyright (C) ARM Limited 1998. All rights reserved.
*/
@ -25,6 +22,9 @@
/* handy sizes */
#define SZ_16 0x00000010
#define SZ_32 0x00000020
#define SZ_64 0x00000040
#define SZ_128 0x00000080
#define SZ_256 0x00000100
#define SZ_512 0x00000200

View File

@ -150,6 +150,7 @@ extern unsigned int user_debug;
#define rmb() dmb()
#define wmb() mb()
#else
#include <asm/memory.h>
#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
#define wmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)

View File

@ -29,6 +29,9 @@ ret_fast_syscall:
ldr r1, [tsk, #TI_FLAGS]
tst r1, #_TIF_WORK_MASK
bne fast_work_pending
#if defined(CONFIG_IRQSOFF_TRACER)
asm_trace_hardirqs_on
#endif
/* perform architecture specific actions before user return */
arch_ret_to_user r1, lr
@ -65,6 +68,9 @@ ret_slow_syscall:
tst r1, #_TIF_WORK_MASK
bne work_pending
no_work_pending:
#if defined(CONFIG_IRQSOFF_TRACER)
asm_trace_hardirqs_on
#endif
/* perform architecture specific actions before user return */
arch_ret_to_user r1, lr

View File

@ -310,7 +310,6 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
* All kernel threads share the same mm context; grab a
* reference and switch to it.
*/
atomic_inc(&mm->mm_users);
atomic_inc(&mm->mm_count);
current->active_mm = mm;
cpumask_set_cpu(cpu, mm_cpumask(mm));

View File

@ -74,6 +74,8 @@
#define AT91_MCI_TRTYP_BLOCK (0 << 19)
#define AT91_MCI_TRTYP_MULTIPLE (1 << 19)
#define AT91_MCI_TRTYP_STREAM (2 << 19)
#define AT91_MCI_TRTYP_SDIO_BYTE (4 << 19)
#define AT91_MCI_TRTYP_SDIO_BLOCK (5 << 19)
#define AT91_MCI_BLKR 0x18 /* Block Register */
#define AT91_MCI_BLKR_BCNT(n) ((0xffff & (n)) << 0) /* Block count */

View File

@ -513,4 +513,4 @@ int dma_set_coherent_mask(struct device *dev, u64 mask)
EXPORT_SYMBOL(ixp4xx_pci_read);
EXPORT_SYMBOL(ixp4xx_pci_write);
EXPORT_SYMBOL(dma_set_coherent_mask);

View File

@ -540,6 +540,7 @@ config MACH_ICONTROL
config ARCH_PXA_ESERIES
bool "PXA based Toshiba e-series PDAs"
select PXA25x
select FB_W100
config MACH_E330
bool "Toshiba e330"

View File

@ -353,8 +353,8 @@ resume_turn_on_mmu:
@ Let us ensure we jump to resume_after_mmu only when the mcr above
@ actually took effect. They call it the "cpwait" operation.
mrc p15, 0, r1, c2, c0, 0 @ queue a dependency on CP15
sub pc, r2, r1, lsr #32 @ jump to virtual addr
mrc p15, 0, r0, c2, c0, 0 @ queue a dependency on CP15
sub pc, r2, r0, lsr #32 @ jump to virtual addr
nop
nop
nop

View File

@ -13,13 +13,9 @@
*/
#include <linux/init.h>
#include <linux/highmem.h>
#include <asm/cacheflush.h>
#include <asm/kmap_types.h>
#include <asm/fixmap.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <plat/cache-feroceon-l2.h>
#include "mm.h"
/*
* Low-level cache maintenance operations.
@ -39,27 +35,30 @@
* between which we don't want to be preempted.
*/
static inline unsigned long l2_start_va(unsigned long paddr)
static inline unsigned long l2_get_va(unsigned long paddr)
{
#ifdef CONFIG_HIGHMEM
/*
* Let's do our own fixmap stuff in a minimal way here.
* Because range ops can't be done on physical addresses,
* we simply install a virtual mapping for it only for the
* TLB lookup to occur, hence no need to flush the untouched
* memory mapping. This is protected with the disabling of
* interrupts by the caller.
* memory mapping afterwards (note: a cache flush may happen
* in some circumstances depending on the path taken in kunmap_atomic).
*/
unsigned long idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id();
unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
set_pte_ext(TOP_PTE(vaddr), pfn_pte(paddr >> PAGE_SHIFT, PAGE_KERNEL), 0);
local_flush_tlb_kernel_page(vaddr);
return vaddr + (paddr & ~PAGE_MASK);
void *vaddr = kmap_atomic_pfn(paddr >> PAGE_SHIFT);
return (unsigned long)vaddr + (paddr & ~PAGE_MASK);
#else
return __phys_to_virt(paddr);
#endif
}
static inline void l2_put_va(unsigned long vaddr)
{
#ifdef CONFIG_HIGHMEM
kunmap_atomic((void *)vaddr);
#endif
}
static inline void l2_clean_pa(unsigned long addr)
{
__asm__("mcr p15, 1, %0, c15, c9, 3" : : "r" (addr));
@ -76,13 +75,14 @@ static inline void l2_clean_pa_range(unsigned long start, unsigned long end)
*/
BUG_ON((start ^ end) >> PAGE_SHIFT);
raw_local_irq_save(flags);
va_start = l2_start_va(start);
va_start = l2_get_va(start);
va_end = va_start + (end - start);
raw_local_irq_save(flags);
__asm__("mcr p15, 1, %0, c15, c9, 4\n\t"
"mcr p15, 1, %1, c15, c9, 5"
: : "r" (va_start), "r" (va_end));
raw_local_irq_restore(flags);
l2_put_va(va_start);
}
static inline void l2_clean_inv_pa(unsigned long addr)
@ -106,13 +106,14 @@ static inline void l2_inv_pa_range(unsigned long start, unsigned long end)
*/
BUG_ON((start ^ end) >> PAGE_SHIFT);
raw_local_irq_save(flags);
va_start = l2_start_va(start);
va_start = l2_get_va(start);
va_end = va_start + (end - start);
raw_local_irq_save(flags);
__asm__("mcr p15, 1, %0, c15, c11, 4\n\t"
"mcr p15, 1, %1, c15, c11, 5"
: : "r" (va_start), "r" (va_end));
raw_local_irq_restore(flags);
l2_put_va(va_start);
}
static inline void l2_inv_all(void)

View File

@ -17,14 +17,10 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/highmem.h>
#include <asm/system.h>
#include <asm/cputype.h>
#include <asm/cacheflush.h>
#include <asm/kmap_types.h>
#include <asm/fixmap.h>
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include "mm.h"
#define CR_L2 (1 << 26)
@ -71,16 +67,15 @@ static inline void xsc3_l2_inv_all(void)
dsb();
}
static inline void l2_unmap_va(unsigned long va)
{
#ifdef CONFIG_HIGHMEM
#define l2_map_save_flags(x) raw_local_save_flags(x)
#define l2_map_restore_flags(x) raw_local_irq_restore(x)
#else
#define l2_map_save_flags(x) ((x) = 0)
#define l2_map_restore_flags(x) ((void)(x))
if (va != -1)
kunmap_atomic((void *)va);
#endif
}
static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va,
unsigned long flags)
static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va)
{
#ifdef CONFIG_HIGHMEM
unsigned long va = prev_va & PAGE_MASK;
@ -89,17 +84,10 @@ static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va,
/*
* Switching to a new page. Because cache ops are
* using virtual addresses only, we must put a mapping
* in place for it. We also enable interrupts for a
* short while and disable them again to protect this
* mapping.
* in place for it.
*/
unsigned long idx;
raw_local_irq_restore(flags);
idx = KM_L2_CACHE + KM_TYPE_NR * smp_processor_id();
va = __fix_to_virt(FIX_KMAP_BEGIN + idx);
raw_local_irq_restore(flags | PSR_I_BIT);
set_pte_ext(TOP_PTE(va), pfn_pte(pa >> PAGE_SHIFT, PAGE_KERNEL), 0);
local_flush_tlb_kernel_page(va);
l2_unmap_va(prev_va);
va = (unsigned long)kmap_atomic_pfn(pa >> PAGE_SHIFT);
}
return va + (pa_offset >> (32 - PAGE_SHIFT));
#else
@ -109,7 +97,7 @@ static inline unsigned long l2_map_va(unsigned long pa, unsigned long prev_va,
static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
{
unsigned long vaddr, flags;
unsigned long vaddr;
if (start == 0 && end == -1ul) {
xsc3_l2_inv_all();
@ -117,13 +105,12 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
}
vaddr = -1; /* to force the first mapping */
l2_map_save_flags(flags);
/*
* Clean and invalidate partial first cache line.
*/
if (start & (CACHE_LINE_SIZE - 1)) {
vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr, flags);
vaddr = l2_map_va(start & ~(CACHE_LINE_SIZE - 1), vaddr);
xsc3_l2_clean_mva(vaddr);
xsc3_l2_inv_mva(vaddr);
start = (start | (CACHE_LINE_SIZE - 1)) + 1;
@ -133,7 +120,7 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
* Invalidate all full cache lines between 'start' and 'end'.
*/
while (start < (end & ~(CACHE_LINE_SIZE - 1))) {
vaddr = l2_map_va(start, vaddr, flags);
vaddr = l2_map_va(start, vaddr);
xsc3_l2_inv_mva(vaddr);
start += CACHE_LINE_SIZE;
}
@ -142,31 +129,30 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
* Clean and invalidate partial last cache line.
*/
if (start < end) {
vaddr = l2_map_va(start, vaddr, flags);
vaddr = l2_map_va(start, vaddr);
xsc3_l2_clean_mva(vaddr);
xsc3_l2_inv_mva(vaddr);
}
l2_map_restore_flags(flags);
l2_unmap_va(vaddr);
dsb();
}
static void xsc3_l2_clean_range(unsigned long start, unsigned long end)
{
unsigned long vaddr, flags;
unsigned long vaddr;
vaddr = -1; /* to force the first mapping */
l2_map_save_flags(flags);
start &= ~(CACHE_LINE_SIZE - 1);
while (start < end) {
vaddr = l2_map_va(start, vaddr, flags);
vaddr = l2_map_va(start, vaddr);
xsc3_l2_clean_mva(vaddr);
start += CACHE_LINE_SIZE;
}
l2_map_restore_flags(flags);
l2_unmap_va(vaddr);
dsb();
}
@ -193,7 +179,7 @@ static inline void xsc3_l2_flush_all(void)
static void xsc3_l2_flush_range(unsigned long start, unsigned long end)
{
unsigned long vaddr, flags;
unsigned long vaddr;
if (start == 0 && end == -1ul) {
xsc3_l2_flush_all();
@ -201,17 +187,16 @@ static void xsc3_l2_flush_range(unsigned long start, unsigned long end)
}
vaddr = -1; /* to force the first mapping */
l2_map_save_flags(flags);
start &= ~(CACHE_LINE_SIZE - 1);
while (start < end) {
vaddr = l2_map_va(start, vaddr, flags);
vaddr = l2_map_va(start, vaddr);
xsc3_l2_clean_mva(vaddr);
xsc3_l2_inv_mva(vaddr);
start += CACHE_LINE_SIZE;
}
l2_map_restore_flags(flags);
l2_unmap_va(vaddr);
dsb();
}

View File

@ -17,6 +17,7 @@
#include <linux/init.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/highmem.h>
#include <asm/memory.h>
#include <asm/highmem.h>
@ -480,10 +481,10 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
op(vaddr, len, dir);
kunmap_high(page);
} else if (cache_is_vipt()) {
pte_t saved_pte;
vaddr = kmap_high_l1_vipt(page, &saved_pte);
/* unmapped pages might still be cached */
vaddr = kmap_atomic(page);
op(vaddr + offset, len, dir);
kunmap_high_l1_vipt(page, saved_pte);
kunmap_atomic(vaddr);
}
} else {
vaddr = page_address(page) + offset;

View File

@ -10,6 +10,7 @@
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <asm/cacheflush.h>
#include <asm/cachetype.h>
@ -180,10 +181,10 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
kunmap_high(page);
} else if (cache_is_vipt()) {
pte_t saved_pte;
addr = kmap_high_l1_vipt(page, &saved_pte);
/* unmapped pages might still be cached */
addr = kmap_atomic(page);
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
kunmap_high_l1_vipt(page, saved_pte);
kunmap_atomic(addr);
}
}

View File

@ -140,90 +140,3 @@ struct page *kmap_atomic_to_page(const void *ptr)
pte = TOP_PTE(vaddr);
return pte_page(*pte);
}
#ifdef CONFIG_CPU_CACHE_VIPT
#include <linux/percpu.h>
/*
* The VIVT cache of a highmem page is always flushed before the page
* is unmapped. Hence unmapped highmem pages need no cache maintenance
* in that case.
*
* However unmapped pages may still be cached with a VIPT cache, and
* it is not possible to perform cache maintenance on them using physical
* addresses unfortunately. So we have no choice but to set up a temporary
* virtual mapping for that purpose.
*
* Yet this VIPT cache maintenance may be triggered from DMA support
* functions which are possibly called from interrupt context. As we don't
* want to keep interrupt disabled all the time when such maintenance is
* taking place, we therefore allow for some reentrancy by preserving and
* restoring the previous fixmap entry before the interrupted context is
* resumed. If the reentrancy depth is 0 then there is no need to restore
* the previous fixmap, and leaving the current one in place allow it to
* be reused the next time without a TLB flush (common with DMA).
*/
static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth);
void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte)
{
unsigned int idx, cpu;
int *depth;
unsigned long vaddr, flags;
pte_t pte, *ptep;
if (!in_interrupt())
preempt_disable();
cpu = smp_processor_id();
depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
ptep = TOP_PTE(vaddr);
pte = mk_pte(page, kmap_prot);
raw_local_irq_save(flags);
(*depth)++;
if (pte_val(*ptep) == pte_val(pte)) {
*saved_pte = pte;
} else {
*saved_pte = *ptep;
set_pte_ext(ptep, pte, 0);
local_flush_tlb_kernel_page(vaddr);
}
raw_local_irq_restore(flags);
return (void *)vaddr;
}
void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte)
{
unsigned int idx, cpu = smp_processor_id();
int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu);
unsigned long vaddr, flags;
pte_t pte, *ptep;
idx = KM_L1_CACHE + KM_TYPE_NR * cpu;
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
ptep = TOP_PTE(vaddr);
pte = mk_pte(page, kmap_prot);
BUG_ON(pte_val(*ptep) != pte_val(pte));
BUG_ON(*depth <= 0);
raw_local_irq_save(flags);
(*depth)--;
if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) {
set_pte_ext(ptep, saved_pte, 0);
local_flush_tlb_kernel_page(vaddr);
}
raw_local_irq_restore(flags);
if (!in_interrupt())
preempt_enable();
}
#endif /* CONFIG_CPU_CACHE_VIPT */

View File

@ -459,7 +459,7 @@ void migrate_irqs(void)
tmp = CROSS_GxICR(irq, new);
x &= GxICR_LEVEL | GxICR_ENABLE;
if (GxICR(irq) & GxICR_REQUEST) {
if (GxICR(irq) & GxICR_REQUEST)
x |= GxICR_REQUEST | GxICR_DETECT;
CROSS_GxICR(irq, new) = x;
tmp = CROSS_GxICR(irq, new);

View File

@ -63,6 +63,7 @@
#include <linux/of_gpio.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/watchdog.h>
#include <linux/miscdevice.h>
#include <linux/uaccess.h>

View File

@ -140,7 +140,7 @@ void __init init_se7206_IRQ(void)
make_se7206_irq(IRQ1_IRQ); /* ATA */
make_se7206_irq(IRQ3_IRQ); /* SLOT / PCM */
__raw_writew(__raw_readw(INTC_ICR1) | 0x000b, INTC_ICR); /* ICR1 */
__raw_writew(__raw_readw(INTC_ICR1) | 0x000b, INTC_ICR1); /* ICR1 */
/* FPGA System register setup*/
__raw_writew(0x0000,INTSTS0); /* Clear INTSTS0 */

View File

@ -34,7 +34,7 @@ static const int pfc_divisors[]={1,2,3,4,6,8,12};
static void master_clk_init(struct clk *clk)
{
return 10000000 * PLL2 * pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007];
clk->rate = 10000000 * PLL2 * pll1rate[(__raw_readw(FREQCR) >> 8) & 0x0007];
}
static struct clk_ops sh7201_master_clk_ops = {

View File

@ -81,8 +81,7 @@ static void shoc_clk_init(struct clk *clk)
for (i = 0; i < ARRAY_SIZE(frqcr3_divisors); i++) {
int divisor = frqcr3_divisors[i];
if (clk->ops->set_rate(clk, clk->parent->rate /
divisor, 0) == 0)
if (clk->ops->set_rate(clk, clk->parent->rate / divisor) == 0)
break;
}

View File

@ -364,8 +364,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
/* For performance reasons, reuse mc area when possible */
if (!mc || mc_size > curr_mc_size) {
if (mc)
vfree(mc);
vfree(mc);
mc = vmalloc(mc_size);
if (!mc)
break;
@ -374,13 +373,11 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
if (get_ucode_data(mc, ucode_ptr, mc_size) ||
microcode_sanity_check(mc) < 0) {
vfree(mc);
break;
}
if (get_matching_microcode(&uci->cpu_sig, mc, new_rev)) {
if (new_mc)
vfree(new_mc);
vfree(new_mc);
new_rev = mc_header.rev;
new_mc = mc;
mc = NULL; /* trigger new vmalloc */
@ -390,12 +387,10 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
leftover -= mc_size;
}
if (mc)
vfree(mc);
vfree(mc);
if (leftover) {
if (new_mc)
vfree(new_mc);
vfree(new_mc);
state = UCODE_ERROR;
goto out;
}
@ -405,8 +400,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
goto out;
}
if (uci->mc)
vfree(uci->mc);
vfree(uci->mc);
uci->mc = (struct microcode_intel *)new_mc;
pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",

View File

@ -501,7 +501,18 @@ static inline unsigned long long get_total_mem(void)
return total << PAGE_SHIFT;
}
#define DEFAULT_BZIMAGE_ADDR_MAX 0x37FFFFFF
/*
* Keep the crash kernel below this limit. On 32 bits earlier kernels
* would limit the kernel to the low 512 MiB due to mapping restrictions.
* On 64 bits, kexec-tools currently limits us to 896 MiB; increase this
* limit once kexec-tools are fixed.
*/
#ifdef CONFIG_X86_32
# define CRASH_KERNEL_ADDR_MAX (512 << 20)
#else
# define CRASH_KERNEL_ADDR_MAX (896 << 20)
#endif
static void __init reserve_crashkernel(void)
{
unsigned long long total_mem;
@ -520,10 +531,10 @@ static void __init reserve_crashkernel(void)
const unsigned long long alignment = 16<<20; /* 16M */
/*
* kexec want bzImage is below DEFAULT_BZIMAGE_ADDR_MAX
* kexec want bzImage is below CRASH_KERNEL_ADDR_MAX
*/
crash_base = memblock_find_in_range(alignment,
DEFAULT_BZIMAGE_ADDR_MAX, crash_size, alignment);
CRASH_KERNEL_ADDR_MAX, crash_size, alignment);
if (crash_base == MEMBLOCK_ERROR) {
pr_info("crashkernel reservation failed - No suitable area found.\n");

View File

@ -575,6 +575,8 @@ struct kvm_pic *kvm_create_pic(struct kvm *kvm)
s->pics[1].elcr_mask = 0xde;
s->pics[0].pics_state = s;
s->pics[1].pics_state = s;
s->pics[0].isr_ack = 0xff;
s->pics[1].isr_ack = 0xff;
/*
* Initialize PIO device

View File

@ -2394,7 +2394,8 @@ static int mmu_alloc_direct_roots(struct kvm_vcpu *vcpu)
ASSERT(!VALID_PAGE(root));
spin_lock(&vcpu->kvm->mmu_lock);
kvm_mmu_free_some_pages(vcpu);
sp = kvm_mmu_get_page(vcpu, i << 30, i << 30,
sp = kvm_mmu_get_page(vcpu, i << (30 - PAGE_SHIFT),
i << 30,
PT32_ROOT_LEVEL, 1, ACC_ALL,
NULL);
root = __pa(sp->spt);

View File

@ -630,21 +630,29 @@ static int __init_ibs_nmi(void)
return 0;
}
/* initialize the APIC for the IBS interrupts if available */
/*
* check and reserve APIC extended interrupt LVT offset for IBS if
* available
*
* init_ibs() preforms implicitly cpu-local operations, so pin this
* thread to its current CPU
*/
static void init_ibs(void)
{
preempt_disable();
ibs_caps = get_ibs_caps();
if (!ibs_caps)
return;
goto out;
if (__init_ibs_nmi()) {
if (__init_ibs_nmi() < 0)
ibs_caps = 0;
return;
}
else
printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n", ibs_caps);
printk(KERN_INFO "oprofile: AMD IBS detected (0x%08x)\n",
(unsigned)ibs_caps);
out:
preempt_enable();
}
static int (*create_arch_files)(struct super_block *sb, struct dentry *root);

View File

@ -408,6 +408,9 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
return_ACPI_STATUS(AE_OK);
}
/* Disable the GPE in case it's been enabled already. */
(void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
/*
* Add the GPE information from above to the gpe_event_info block for
* use during dispatch of this GPE.

View File

@ -130,8 +130,6 @@ struct acpi_battery {
unsigned long flags;
};
static int acpi_battery_update(struct acpi_battery *battery);
#define to_acpi_battery(x) container_of(x, struct acpi_battery, bat);
inline int acpi_battery_present(struct acpi_battery *battery)
@ -186,9 +184,6 @@ static int acpi_battery_get_property(struct power_supply *psy,
int ret = 0;
struct acpi_battery *battery = to_acpi_battery(psy);
if (acpi_battery_update(battery))
return -ENODEV;
if (acpi_battery_present(battery)) {
/* run battery update only if it is present */
acpi_battery_get_state(battery);

View File

@ -705,54 +705,85 @@ static int acpi_bus_get_perf_flags(struct acpi_device *device)
}
static acpi_status
acpi_bus_extract_wakeup_device_power_package(struct acpi_device *device,
union acpi_object *package)
acpi_bus_extract_wakeup_device_power_package(acpi_handle handle,
struct acpi_device_wakeup *wakeup)
{
int i = 0;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *package = NULL;
union acpi_object *element = NULL;
acpi_status status;
int i = 0;
if (!device || !package || (package->package.count < 2))
if (!wakeup)
return AE_BAD_PARAMETER;
/* _PRW */
status = acpi_evaluate_object(handle, "_PRW", NULL, &buffer);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PRW"));
return status;
}
package = (union acpi_object *)buffer.pointer;
if (!package || (package->package.count < 2)) {
status = AE_BAD_DATA;
goto out;
}
element = &(package->package.elements[0]);
if (!element)
return AE_BAD_PARAMETER;
if (!element) {
status = AE_BAD_DATA;
goto out;
}
if (element->type == ACPI_TYPE_PACKAGE) {
if ((element->package.count < 2) ||
(element->package.elements[0].type !=
ACPI_TYPE_LOCAL_REFERENCE)
|| (element->package.elements[1].type != ACPI_TYPE_INTEGER))
return AE_BAD_DATA;
device->wakeup.gpe_device =
|| (element->package.elements[1].type != ACPI_TYPE_INTEGER)) {
status = AE_BAD_DATA;
goto out;
}
wakeup->gpe_device =
element->package.elements[0].reference.handle;
device->wakeup.gpe_number =
wakeup->gpe_number =
(u32) element->package.elements[1].integer.value;
} else if (element->type == ACPI_TYPE_INTEGER) {
device->wakeup.gpe_number = element->integer.value;
} else
return AE_BAD_DATA;
wakeup->gpe_device = NULL;
wakeup->gpe_number = element->integer.value;
} else {
status = AE_BAD_DATA;
goto out;
}
element = &(package->package.elements[1]);
if (element->type != ACPI_TYPE_INTEGER) {
return AE_BAD_DATA;
status = AE_BAD_DATA;
goto out;
}
device->wakeup.sleep_state = element->integer.value;
wakeup->sleep_state = element->integer.value;
if ((package->package.count - 2) > ACPI_MAX_HANDLES) {
return AE_NO_MEMORY;
status = AE_NO_MEMORY;
goto out;
}
device->wakeup.resources.count = package->package.count - 2;
for (i = 0; i < device->wakeup.resources.count; i++) {
wakeup->resources.count = package->package.count - 2;
for (i = 0; i < wakeup->resources.count; i++) {
element = &(package->package.elements[i + 2]);
if (element->type != ACPI_TYPE_LOCAL_REFERENCE)
return AE_BAD_DATA;
if (element->type != ACPI_TYPE_LOCAL_REFERENCE) {
status = AE_BAD_DATA;
goto out;
}
device->wakeup.resources.handles[i] = element->reference.handle;
wakeup->resources.handles[i] = element->reference.handle;
}
acpi_gpe_can_wake(device->wakeup.gpe_device, device->wakeup.gpe_number);
acpi_gpe_can_wake(wakeup->gpe_device, wakeup->gpe_number);
return AE_OK;
out:
kfree(buffer.pointer);
return status;
}
static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
@ -787,26 +818,15 @@ static void acpi_bus_set_run_wake_flags(struct acpi_device *device)
static int acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
{
acpi_status status = 0;
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *package = NULL;
int psw_error;
/* _PRW */
status = acpi_evaluate_object(device->handle, "_PRW", NULL, &buffer);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PRW"));
goto end;
}
package = (union acpi_object *)buffer.pointer;
status = acpi_bus_extract_wakeup_device_power_package(device, package);
status = acpi_bus_extract_wakeup_device_power_package(device->handle,
&device->wakeup);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status, "Extracting _PRW package"));
goto end;
}
kfree(buffer.pointer);
device->wakeup.flags.valid = 1;
device->wakeup.prepare_count = 0;
acpi_bus_set_run_wake_flags(device);
@ -1351,6 +1371,7 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl,
struct acpi_bus_ops *ops = context;
int type;
unsigned long long sta;
struct acpi_device_wakeup wakeup;
struct acpi_device *device;
acpi_status status;
int result;
@ -1360,8 +1381,10 @@ static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl,
return AE_OK;
if (!(sta & ACPI_STA_DEVICE_PRESENT) &&
!(sta & ACPI_STA_DEVICE_FUNCTIONING))
!(sta & ACPI_STA_DEVICE_FUNCTIONING)) {
acpi_bus_extract_wakeup_device_power_package(handle, &wakeup);
return AE_CTRL_DEPTH;
}
/*
* We may already have an acpi_device from a previous enumeration. If

View File

@ -128,16 +128,6 @@ config PDC_ADMA
If unsure, say N.
config PATA_MPC52xx
tristate "Freescale MPC52xx SoC internal IDE"
depends on PPC_MPC52xx && PPC_BESTCOMM
select PPC_BESTCOMM_ATA
help
This option enables support for integrated IDE controller
of the Freescale MPC52xx SoC.
If unsure, say N.
config PATA_OCTEON_CF
tristate "OCTEON Boot Bus Compact Flash support"
depends on CPU_CAVIUM_OCTEON
@ -366,7 +356,7 @@ config PATA_CS5535
config PATA_CS5536
tristate "CS5536 PATA support"
depends on PCI && X86 && !X86_64
depends on PCI
help
This option enables support for the AMD CS5536
companion chip used with the Geode LX processor family.
@ -491,6 +481,16 @@ config PATA_MARVELL
If unsure, say N.
config PATA_MPC52xx
tristate "Freescale MPC52xx SoC internal IDE"
depends on PPC_MPC52xx && PPC_BESTCOMM
select PPC_BESTCOMM_ATA
help
This option enables support for integrated IDE controller
of the Freescale MPC52xx SoC.
If unsure, say N.
config PATA_NETCELL
tristate "NETCELL Revolution RAID support"
depends on PCI

View File

@ -11,7 +11,6 @@ obj-$(CONFIG_SATA_DWC) += sata_dwc_460ex.o
# SFF w/ custom DMA
obj-$(CONFIG_PDC_ADMA) += pdc_adma.o
obj-$(CONFIG_PATA_MPC52xx) += pata_mpc52xx.o
obj-$(CONFIG_PATA_OCTEON_CF) += pata_octeon_cf.o
obj-$(CONFIG_SATA_QSTOR) += sata_qstor.o
obj-$(CONFIG_SATA_SX4) += sata_sx4.o
@ -52,6 +51,7 @@ obj-$(CONFIG_PATA_IT821X) += pata_it821x.o
obj-$(CONFIG_PATA_JMICRON) += pata_jmicron.o
obj-$(CONFIG_PATA_MACIO) += pata_macio.o
obj-$(CONFIG_PATA_MARVELL) += pata_marvell.o
obj-$(CONFIG_PATA_MPC52xx) += pata_mpc52xx.o
obj-$(CONFIG_PATA_NETCELL) += pata_netcell.o
obj-$(CONFIG_PATA_NINJA32) += pata_ninja32.o
obj-$(CONFIG_PATA_NS87415) += pata_ns87415.o

View File

@ -4807,9 +4807,6 @@ static void ata_verify_xfer(struct ata_queued_cmd *qc)
{
struct ata_device *dev = qc->dev;
if (ata_tag_internal(qc->tag))
return;
if (ata_is_nodata(qc->tf.protocol))
return;
@ -4858,14 +4855,23 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
if (unlikely(qc->err_mask))
qc->flags |= ATA_QCFLAG_FAILED;
if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
/* always fill result TF for failed qc */
/*
* Finish internal commands without any further processing
* and always with the result TF filled.
*/
if (unlikely(ata_tag_internal(qc->tag))) {
fill_result_tf(qc);
__ata_qc_complete(qc);
return;
}
if (!ata_tag_internal(qc->tag))
ata_qc_schedule_eh(qc);
else
__ata_qc_complete(qc);
/*
* Non-internal qc has failed. Fill the result TF and
* summon EH.
*/
if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
fill_result_tf(qc);
ata_qc_schedule_eh(qc);
return;
}

View File

@ -3275,6 +3275,7 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
struct ata_port *ap = ata_is_host_link(link) ? link->ap : NULL;
struct ata_eh_context *ehc = &link->eh_context;
struct ata_device *dev, *link_dev = NULL, *lpm_dev = NULL;
enum ata_lpm_policy old_policy = link->lpm_policy;
unsigned int hints = ATA_LPM_EMPTY | ATA_LPM_HIPM;
unsigned int err_mask;
int rc;
@ -3338,6 +3339,14 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
goto fail;
}
/*
* Low level driver acked the transition. Issue DIPM command
* with the new policy set.
*/
link->lpm_policy = policy;
if (ap && ap->slave_link)
ap->slave_link->lpm_policy = policy;
/* host config updated, enable DIPM if transitioning to MIN_POWER */
ata_for_each_dev(dev, link, ENABLED) {
if (policy == ATA_LPM_MIN_POWER && ata_id_has_dipm(dev->id)) {
@ -3353,12 +3362,14 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
}
}
link->lpm_policy = policy;
if (ap && ap->slave_link)
ap->slave_link->lpm_policy = policy;
return 0;
fail:
/* restore the old policy */
link->lpm_policy = old_policy;
if (ap && ap->slave_link)
ap->slave_link->lpm_policy = old_policy;
/* if no device or only one more chance is left, disable LPM */
if (!dev || ehc->tries[dev->devno] <= 2) {
ata_link_printk(link, KERN_WARNING,

View File

@ -1532,11 +1532,10 @@ static unsigned int __ata_sff_port_intr(struct ata_port *ap,
if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
return ata_sff_idle_irq(ap);
break;
case HSM_ST:
case HSM_ST_LAST:
break;
default:
case HSM_ST_IDLE:
return ata_sff_idle_irq(ap);
default:
break;
}
/* check main status, clearing INTRQ if needed */

View File

@ -37,10 +37,22 @@
#include <linux/delay.h>
#include <linux/libata.h>
#include <scsi/scsi_host.h>
#ifdef CONFIG_X86_32
#include <asm/msr.h>
static int use_msr;
module_param_named(msr, use_msr, int, 0644);
MODULE_PARM_DESC(msr, "Force using MSR to configure IDE function (Default: 0)");
#else
#undef rdmsr /* avoid accidental MSR usage on, e.g. x86-64 */
#undef wrmsr
#define rdmsr(x, y, z) do { } while (0)
#define wrmsr(x, y, z) do { } while (0)
#define use_msr 0
#endif
#define DRV_NAME "pata_cs5536"
#define DRV_VERSION "0.0.7"
#define DRV_VERSION "0.0.8"
enum {
CFG = 0,
@ -75,8 +87,6 @@ enum {
IDE_ETC_NODMA = 0x03,
};
static int use_msr;
static const u32 msr_reg[4] = {
MSR_IDE_CFG, MSR_IDE_DTC, MSR_IDE_CAST, MSR_IDE_ETC,
};
@ -88,7 +98,7 @@ static const u8 pci_reg[4] = {
static inline int cs5536_read(struct pci_dev *pdev, int reg, u32 *val)
{
if (unlikely(use_msr)) {
u32 dummy;
u32 dummy __maybe_unused;
rdmsr(msr_reg[reg], *val, dummy);
return 0;
@ -294,8 +304,6 @@ MODULE_DESCRIPTION("low-level driver for the CS5536 IDE controller");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, cs5536);
MODULE_VERSION(DRV_VERSION);
module_param_named(msr, use_msr, int, 0644);
MODULE_PARM_DESC(msr, "Force using MSR to configure IDE function (Default: 0)");
module_init(cs5536_init);
module_exit(cs5536_exit);

View File

@ -392,7 +392,10 @@ static int atmtcp_attach(struct atm_vcc *vcc,int itf)
atm_dev_put(dev);
return -EMEDIUMTYPE;
}
if (PRIV(dev)->vcc) return -EBUSY;
if (PRIV(dev)->vcc) {
atm_dev_put(dev);
return -EBUSY;
}
}
else {
int error;

View File

@ -311,8 +311,10 @@ static void hci_uart_tty_close(struct tty_struct *tty)
if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) {
hu->proto->close(hu);
hci_unregister_dev(hdev);
hci_free_dev(hdev);
if (hdev) {
hci_unregister_dev(hdev);
hci_free_dev(hdev);
}
}
}
}

View File

@ -1192,12 +1192,19 @@ static void i9xx_chipset_flush(void)
writel(1, intel_private.i9xx_flush_page);
}
static void i965_write_entry(dma_addr_t addr, unsigned int entry,
static void i965_write_entry(dma_addr_t addr,
unsigned int entry,
unsigned int flags)
{
u32 pte_flags;
pte_flags = I810_PTE_VALID;
if (flags == AGP_USER_CACHED_MEMORY)
pte_flags |= I830_PTE_SYSTEM_CACHED;
/* Shift high bits down */
addr |= (addr >> 28) & 0xf0;
writel(addr | I810_PTE_VALID, intel_private.gtt + entry);
writel(addr | pte_flags, intel_private.gtt + entry);
}
static bool gen6_check_flags(unsigned int flags)

View File

@ -29,7 +29,6 @@
#include <linux/ramoops.h>
#define RAMOOPS_KERNMSG_HDR "===="
#define RAMOOPS_HEADER_SIZE (5 + sizeof(struct timeval))
#define RECORD_SIZE 4096
@ -65,8 +64,8 @@ static void ramoops_do_dump(struct kmsg_dumper *dumper,
struct ramoops_context, dump);
unsigned long s1_start, s2_start;
unsigned long l1_cpy, l2_cpy;
int res;
char *buf;
int res, hdr_size;
char *buf, *buf_orig;
struct timeval timestamp;
/* Only dump oopses if dump_oops is set */
@ -74,6 +73,8 @@ static void ramoops_do_dump(struct kmsg_dumper *dumper,
return;
buf = (char *)(cxt->virt_addr + (cxt->count * RECORD_SIZE));
buf_orig = buf;
memset(buf, '\0', RECORD_SIZE);
res = sprintf(buf, "%s", RAMOOPS_KERNMSG_HDR);
buf += res;
@ -81,8 +82,9 @@ static void ramoops_do_dump(struct kmsg_dumper *dumper,
res = sprintf(buf, "%lu.%lu\n", (long)timestamp.tv_sec, (long)timestamp.tv_usec);
buf += res;
l2_cpy = min(l2, (unsigned long)(RECORD_SIZE - RAMOOPS_HEADER_SIZE));
l1_cpy = min(l1, (unsigned long)(RECORD_SIZE - RAMOOPS_HEADER_SIZE) - l2_cpy);
hdr_size = buf - buf_orig;
l2_cpy = min(l2, (unsigned long)(RECORD_SIZE - hdr_size));
l1_cpy = min(l1, (unsigned long)(RECORD_SIZE - hdr_size) - l2_cpy);
s2_start = l2 - l2_cpy;
s1_start = l1 - l1_cpy;

View File

@ -449,7 +449,7 @@ mv_xor_slot_cleanup(struct mv_xor_chan *mv_chan)
static void mv_xor_tasklet(unsigned long data)
{
struct mv_xor_chan *chan = (struct mv_xor_chan *) data;
__mv_xor_slot_cleanup(chan);
mv_xor_slot_cleanup(chan);
}
static struct mv_xor_desc_slot *

View File

@ -56,15 +56,26 @@ static struct cs5535_gpio_chip {
* registers, see include/linux/cs5535.h.
*/
static void errata_outl(u32 val, unsigned long addr)
static void errata_outl(struct cs5535_gpio_chip *chip, u32 val,
unsigned int reg)
{
unsigned long addr = chip->base + 0x80 + reg;
/*
* According to the CS5536 errata (#36), after suspend
* a write to the high bank GPIO register will clear all
* non-selected bits; the recommended workaround is a
* read-modify-write operation.
*
* Don't apply this errata to the edge status GPIOs, as writing
* to their lower bits will clear them.
*/
val |= inl(addr);
if (reg != GPIO_POSITIVE_EDGE_STS && reg != GPIO_NEGATIVE_EDGE_STS) {
if (val & 0xffff)
val |= (inl(addr) & 0xffff); /* ignore the high bits */
else
val |= (inl(addr) ^ (val >> 16));
}
outl(val, addr);
}
@ -76,7 +87,7 @@ static void __cs5535_gpio_set(struct cs5535_gpio_chip *chip, unsigned offset,
outl(1 << offset, chip->base + reg);
else
/* high bank register */
errata_outl(1 << (offset - 16), chip->base + 0x80 + reg);
errata_outl(chip, 1 << (offset - 16), reg);
}
void cs5535_gpio_set(unsigned offset, unsigned int reg)
@ -98,7 +109,7 @@ static void __cs5535_gpio_clear(struct cs5535_gpio_chip *chip, unsigned offset,
outl(1 << (offset + 16), chip->base + reg);
else
/* high bank register */
errata_outl(1 << offset, chip->base + 0x80 + reg);
errata_outl(chip, 1 << offset, reg);
}
void cs5535_gpio_clear(unsigned offset, unsigned int reg)

View File

@ -1281,6 +1281,9 @@ int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
err = gpio_direction_output(gpio,
(flags & GPIOF_INIT_HIGH) ? 1 : 0);
if (err)
gpio_free(gpio);
return err;
}
EXPORT_SYMBOL_GPL(gpio_request_one);

View File

@ -135,7 +135,7 @@ static int __devinit rdc321x_gpio_probe(struct platform_device *pdev)
struct rdc321x_gpio *rdc321x_gpio_dev;
struct rdc321x_gpio_pdata *pdata;
pdata = pdev->dev.platform_data;
pdata = platform_get_drvdata(pdev);
if (!pdata) {
dev_err(&pdev->dev, "no platform data supplied\n");
return -ENODEV;

View File

@ -241,7 +241,7 @@ void drm_helper_disable_unused_functions(struct drm_device *dev)
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
if (encoder->crtc && !drm_helper_encoder_in_use(encoder)) {
if (!drm_helper_encoder_in_use(encoder)) {
drm_encoder_disable(encoder);
/* disconnector encoder from any connector */
encoder->crtc = NULL;
@ -874,7 +874,10 @@ static void output_poll_execute(struct work_struct *work)
continue;
connector->status = connector->funcs->detect(connector, false);
DRM_DEBUG_KMS("connector status updated to %d\n", connector->status);
DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %d to %d\n",
connector->base.id,
drm_get_connector_name(connector),
old_status, connector->status);
if (old_status != connector->status)
changed = true;
}

View File

@ -242,7 +242,7 @@ fail:
static enum drm_connector_status ch7017_detect(struct intel_dvo_device *dvo)
{
return connector_status_unknown;
return connector_status_connected;
}
static enum drm_mode_status ch7017_mode_valid(struct intel_dvo_device *dvo,

View File

@ -34,6 +34,7 @@
#include "i915_drm.h"
#include "i915_drv.h"
#include "i915_trace.h"
#include "../../../platform/x86/intel_ips.h"
#include <linux/pci.h>
#include <linux/vgaarb.h>
#include <linux/acpi.h>
@ -1870,6 +1871,26 @@ out_unlock:
}
EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
/**
* Tells the intel_ips driver that the i915 driver is now loaded, if
* IPS got loaded first.
*
* This awkward dance is so that neither module has to depend on the
* other in order for IPS to do the appropriate communication of
* GPU turbo limits to i915.
*/
static void
ips_ping_for_i915_load(void)
{
void (*link)(void);
link = symbol_get(ips_link_to_i915_driver);
if (link) {
link();
symbol_put(ips_link_to_i915_driver);
}
}
/**
* i915_driver_load - setup chip and create an initial config
* @dev: DRM device
@ -2075,6 +2096,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->mchdev_lock = &mchdev_lock;
spin_unlock(&mchdev_lock);
ips_ping_for_i915_load();
return 0;
out_workqueue_free:

View File

@ -2471,6 +2471,9 @@
# define MARIUNIT_CLOCK_GATE_DISABLE (1 << 18)
# define SVSMUNIT_CLOCK_GATE_DISABLE (1 << 1)
#define PCH_3DCGDIS1 0x46024
# define VFMUNIT_CLOCK_GATE_DISABLE (1 << 11)
#define FDI_PLL_FREQ_CTL 0x46030
#define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24)
#define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00
@ -2588,6 +2591,13 @@
#define ILK_DISPLAY_CHICKEN2 0x42004
#define ILK_DPARB_GATE (1<<22)
#define ILK_VSDPFD_FULL (1<<21)
#define ILK_DISPLAY_CHICKEN_FUSES 0x42014
#define ILK_INTERNAL_GRAPHICS_DISABLE (1<<31)
#define ILK_INTERNAL_DISPLAY_DISABLE (1<<30)
#define ILK_DISPLAY_DEBUG_DISABLE (1<<29)
#define ILK_HDCP_DISABLE (1<<25)
#define ILK_eDP_A_DISABLE (1<<24)
#define ILK_DESKTOP (1<<23)
#define ILK_DSPCLK_GATE 0x42020
#define ILK_DPARB_CLK_GATE (1<<5)
/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */

View File

@ -5379,6 +5379,23 @@ static int intel_encoder_clones(struct drm_device *dev, int type_mask)
return index_mask;
}
static bool has_edp_a(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
if (!IS_MOBILE(dev))
return false;
if ((I915_READ(DP_A) & DP_DETECTED) == 0)
return false;
if (IS_GEN5(dev) &&
(I915_READ(ILK_DISPLAY_CHICKEN_FUSES) & ILK_eDP_A_DISABLE))
return false;
return true;
}
static void intel_setup_outputs(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@ -5396,7 +5413,7 @@ static void intel_setup_outputs(struct drm_device *dev)
if (HAS_PCH_SPLIT(dev)) {
dpd_is_edp = intel_dpd_is_edp(dev);
if (IS_MOBILE(dev) && (I915_READ(DP_A) & DP_DETECTED))
if (has_edp_a(dev))
intel_dp_init(dev, DP_A);
if (dpd_is_edp && (I915_READ(PCH_DP_D) & DP_DETECTED))
@ -5825,6 +5842,8 @@ void intel_init_clock_gating(struct drm_device *dev)
I915_WRITE(PCH_3DCGDIS0,
MARIUNIT_CLOCK_GATE_DISABLE |
SVSMUNIT_CLOCK_GATE_DISABLE);
I915_WRITE(PCH_3DCGDIS1,
VFMUNIT_CLOCK_GATE_DISABLE);
}
I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate);

View File

@ -479,6 +479,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
uint16_t address = algo_data->address;
uint8_t msg[5];
uint8_t reply[2];
unsigned retry;
int msg_bytes;
int reply_bytes;
int ret;
@ -513,14 +514,33 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
break;
}
for (;;) {
ret = intel_dp_aux_ch(intel_dp,
msg, msg_bytes,
reply, reply_bytes);
for (retry = 0; retry < 5; retry++) {
ret = intel_dp_aux_ch(intel_dp,
msg, msg_bytes,
reply, reply_bytes);
if (ret < 0) {
DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
return ret;
}
switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
case AUX_NATIVE_REPLY_ACK:
/* I2C-over-AUX Reply field is only valid
* when paired with AUX ACK.
*/
break;
case AUX_NATIVE_REPLY_NACK:
DRM_DEBUG_KMS("aux_ch native nack\n");
return -EREMOTEIO;
case AUX_NATIVE_REPLY_DEFER:
udelay(100);
continue;
default:
DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
reply[0]);
return -EREMOTEIO;
}
switch (reply[0] & AUX_I2C_REPLY_MASK) {
case AUX_I2C_REPLY_ACK:
if (mode == MODE_I2C_READ) {
@ -528,17 +548,20 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
}
return reply_bytes - 1;
case AUX_I2C_REPLY_NACK:
DRM_DEBUG_KMS("aux_ch nack\n");
DRM_DEBUG_KMS("aux_i2c nack\n");
return -EREMOTEIO;
case AUX_I2C_REPLY_DEFER:
DRM_DEBUG_KMS("aux_ch defer\n");
DRM_DEBUG_KMS("aux_i2c defer\n");
udelay(100);
break;
default:
DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]);
DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
return -EREMOTEIO;
}
}
DRM_ERROR("too many retries, giving up\n");
return -EREMOTEIO;
}
static int

View File

@ -696,20 +696,17 @@ int intel_wait_ring_buffer(struct drm_device *dev,
drm_i915_private_t *dev_priv = dev->dev_private;
u32 head;
head = intel_read_status_page(ring, 4);
if (head) {
ring->head = head & HEAD_ADDR;
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->size;
if (ring->space >= n)
return 0;
}
trace_i915_ring_wait_begin (dev);
end = jiffies + 3 * HZ;
do {
ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
/* If the reported head position has wrapped or hasn't advanced,
* fallback to the slow and accurate path.
*/
head = intel_read_status_page(ring, 4);
if (head < ring->actual_head)
head = I915_READ_HEAD(ring);
ring->actual_head = head;
ring->head = head & HEAD_ADDR;
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->size;

View File

@ -30,8 +30,9 @@ struct intel_ring_buffer {
struct drm_device *dev;
struct drm_gem_object *gem_object;
unsigned int head;
unsigned int tail;
u32 actual_head;
u32 head;
u32 tail;
int space;
struct intel_hw_status_page status_page;

View File

@ -1908,9 +1908,12 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
speed = mapping->i2c_speed;
}
sdvo->i2c = &dev_priv->gmbus[pin].adapter;
intel_gmbus_set_speed(sdvo->i2c, speed);
intel_gmbus_force_bit(sdvo->i2c, true);
if (pin < GMBUS_NUM_PORTS) {
sdvo->i2c = &dev_priv->gmbus[pin].adapter;
intel_gmbus_set_speed(sdvo->i2c, speed);
intel_gmbus_force_bit(sdvo->i2c, true);
} else
sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter;
}
static bool
@ -2037,13 +2040,14 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
SDVO_COLORIMETRY_RGB256);
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
intel_sdvo_add_hdmi_properties(intel_sdvo_connector);
intel_sdvo->is_hdmi = true;
}
intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
(1 << INTEL_ANALOG_CLONE_BIT));
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
if (intel_sdvo->is_hdmi)
intel_sdvo_add_hdmi_properties(intel_sdvo_connector);
return true;
}

View File

@ -253,7 +253,8 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
atombios_blank_crtc(crtc, ATOM_ENABLE);
if (radeon_crtc->enabled)
atombios_blank_crtc(crtc, ATOM_ENABLE);
if (ASIC_IS_DCE3(rdev))
atombios_enable_crtc_memreq(crtc, ATOM_DISABLE);
atombios_enable_crtc(crtc, ATOM_DISABLE);
@ -530,7 +531,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
dp_clock = dig_connector->dp_clock;
}
}
#if 0 /* doesn't work properly on some laptops */
/* use recommended ref_div for ss */
if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
if (ss_enabled) {
@ -540,7 +541,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
}
}
}
#endif
if (ASIC_IS_AVIVO(rdev)) {
/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)

View File

@ -748,6 +748,8 @@ void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev)
unsigned i;
u32 tmp;
WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
WREG32(VM_CONTEXT0_REQUEST_RESPONSE, REQUEST_TYPE(1));
for (i = 0; i < rdev->usec_timeout; i++) {
/* read MC_STATUS */
@ -1922,7 +1924,6 @@ bool evergreen_gpu_is_lockup(struct radeon_device *rdev)
static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
{
struct evergreen_mc_save save;
u32 srbm_reset = 0;
u32 grbm_reset = 0;
dev_info(rdev->dev, "GPU softreset \n");
@ -1961,16 +1962,6 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
udelay(50);
WREG32(GRBM_SOFT_RESET, 0);
(void)RREG32(GRBM_SOFT_RESET);
/* reset all the system blocks */
srbm_reset = SRBM_SOFT_RESET_ALL_MASK;
dev_info(rdev->dev, " SRBM_SOFT_RESET=0x%08X\n", srbm_reset);
WREG32(SRBM_SOFT_RESET, srbm_reset);
(void)RREG32(SRBM_SOFT_RESET);
udelay(50);
WREG32(SRBM_SOFT_RESET, 0);
(void)RREG32(SRBM_SOFT_RESET);
/* Wait a little for things to settle down */
udelay(50);
dev_info(rdev->dev, " GRBM_STATUS=0x%08X\n",
@ -1981,10 +1972,6 @@ static int evergreen_gpu_soft_reset(struct radeon_device *rdev)
RREG32(GRBM_STATUS_SE1));
dev_info(rdev->dev, " SRBM_STATUS=0x%08X\n",
RREG32(SRBM_STATUS));
/* After reset we need to reinit the asic as GPU often endup in an
* incoherent state.
*/
atom_asic_init(rdev->mode_info.atom_context);
evergreen_mc_resume(rdev, &save);
return 0;
}
@ -2596,6 +2583,11 @@ int evergreen_resume(struct radeon_device *rdev)
{
int r;
/* reset the asic, the gfx blocks are often in a bad state
* after the driver is unloaded or after a resume
*/
if (radeon_asic_reset(rdev))
dev_warn(rdev->dev, "GPU reset failed !\n");
/* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
* posting will perform necessary task to bring back GPU into good
* shape.
@ -2712,6 +2704,11 @@ int evergreen_init(struct radeon_device *rdev)
r = radeon_atombios_init(rdev);
if (r)
return r;
/* reset the asic, the gfx blocks are often in a bad state
* after the driver is unloaded or after a resume
*/
if (radeon_asic_reset(rdev))
dev_warn(rdev->dev, "GPU reset failed !\n");
/* Post card if necessary */
if (!evergreen_card_posted(rdev)) {
if (!rdev->bios) {

View File

@ -174,6 +174,7 @@
#define HDP_NONSURFACE_BASE 0x2C04
#define HDP_NONSURFACE_INFO 0x2C08
#define HDP_NONSURFACE_SIZE 0x2C0C
#define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
#define HDP_REG_COHERENCY_FLUSH_CNTL 0x54A0
#define HDP_TILING_CONFIG 0x2F3C

View File

@ -1342,13 +1342,19 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev)
u32 srbm_status;
u32 grbm_status;
u32 grbm_status2;
struct r100_gpu_lockup *lockup;
int r;
if (rdev->family >= CHIP_RV770)
lockup = &rdev->config.rv770.lockup;
else
lockup = &rdev->config.r600.lockup;
srbm_status = RREG32(R_000E50_SRBM_STATUS);
grbm_status = RREG32(R_008010_GRBM_STATUS);
grbm_status2 = RREG32(R_008014_GRBM_STATUS2);
if (!G_008010_GUI_ACTIVE(grbm_status)) {
r100_gpu_lockup_update(&rdev->config.r300.lockup, &rdev->cp);
r100_gpu_lockup_update(lockup, &rdev->cp);
return false;
}
/* force CP activities */
@ -1360,7 +1366,7 @@ bool r600_gpu_is_lockup(struct radeon_device *rdev)
radeon_ring_unlock_commit(rdev);
}
rdev->cp.rptr = RREG32(R600_CP_RB_RPTR);
return r100_gpu_cp_is_lockup(rdev, &rdev->config.r300.lockup, &rdev->cp);
return r100_gpu_cp_is_lockup(rdev, lockup, &rdev->cp);
}
int r600_asic_reset(struct radeon_device *rdev)

View File

@ -315,11 +315,10 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
/* the initial DDX does bad things with the CB size occasionally */
/* it rounds up height too far for slice tile max but the BO is smaller */
tmp = (height - 7) * 8 * bpe;
if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i]));
return -EINVAL;
}
/* r600c,g also seem to flush at bad times in some apps resulting in
* bogus values here. So for linear just allow anything to avoid breaking
* broken userspace.
*/
} else {
dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i]));
return -EINVAL;

View File

@ -910,11 +910,6 @@ int radeon_resume_kms(struct drm_device *dev)
radeon_pm_resume(rdev);
radeon_restore_bios_scratch_regs(rdev);
/* turn on display hw */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
}
radeon_fbdev_set_suspend(rdev, 0);
release_console_sem();
@ -922,6 +917,10 @@ int radeon_resume_kms(struct drm_device *dev)
radeon_hpd_init(rdev);
/* blat the mode back in */
drm_helper_resume_force_mode(dev);
/* turn on display hw */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
}
return 0;
}

View File

@ -232,9 +232,28 @@ static struct drm_driver driver_old = {
static struct drm_driver kms_driver;
static void radeon_kick_out_firmware_fb(struct pci_dev *pdev)
{
struct apertures_struct *ap;
bool primary = false;
ap = alloc_apertures(1);
ap->ranges[0].base = pci_resource_start(pdev, 0);
ap->ranges[0].size = pci_resource_len(pdev, 0);
#ifdef CONFIG_X86
primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
#endif
remove_conflicting_framebuffers(ap, "radeondrmfb", primary);
kfree(ap);
}
static int __devinit
radeon_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
/* Get rid of things like offb */
radeon_kick_out_firmware_fb(pdev);
return drm_get_pci_dev(pdev, ent, &kms_driver);
}

View File

@ -245,7 +245,7 @@ static int radeonfb_create(struct radeon_fbdev *rfbdev,
goto out_unref;
}
info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base;
info->apertures->ranges[0].size = rdev->mc.real_vram_size;
info->apertures->ranges[0].size = rdev->mc.aper_size;
info->fix.mmio_start = 0;
info->fix.mmio_len = 0;

View File

@ -234,7 +234,6 @@ static int s3c_hwmon_create_attr(struct device *dev,
attr->index = channel;
attr->dev_attr.attr.name = attrs->in_name;
attr->dev_attr.attr.mode = S_IRUGO;
attr->dev_attr.attr.owner = THIS_MODULE;
attr->dev_attr.show = s3c_hwmon_ch_show;
ret = device_create_file(dev, &attr->dev_attr);
@ -252,7 +251,6 @@ static int s3c_hwmon_create_attr(struct device *dev,
attr->index = channel;
attr->dev_attr.attr.name = attrs->label_name;
attr->dev_attr.attr.mode = S_IRUGO;
attr->dev_attr.attr.owner = THIS_MODULE;
attr->dev_attr.show = s3c_hwmon_label_show;
ret = device_create_file(dev, &attr->dev_attr);

View File

@ -1900,6 +1900,7 @@ static void do_disconnect_req(struct gigaset_capi_ctr *iif,
if (b3skb == NULL) {
dev_err(cs->dev, "%s: out of memory\n", __func__);
send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
kfree(b3cmsg);
return;
}
capi_cmsg2message(b3cmsg,

View File

@ -267,7 +267,7 @@ void led_blink_set(struct led_classdev *led_cdev,
unsigned long *delay_off)
{
if (led_cdev->blink_set &&
led_cdev->blink_set(led_cdev, delay_on, delay_off))
!led_cdev->blink_set(led_cdev, delay_on, delay_off))
return;
/* blink with 1 Hz as default if nothing specified */

View File

@ -26,8 +26,8 @@ static struct ir_scancode rc6_mce[] = {
{ 0x800f040a, KEY_DELETE },
{ 0x800f040b, KEY_ENTER },
{ 0x800f040c, KEY_POWER },
{ 0x800f040d, KEY_PROG1 }, /* Windows MCE button */
{ 0x800f040c, KEY_POWER }, /* PC Power */
{ 0x800f040d, KEY_PROG1 }, /* Windows MCE button */
{ 0x800f040e, KEY_MUTE },
{ 0x800f040f, KEY_INFO },
@ -56,31 +56,32 @@ static struct ir_scancode rc6_mce[] = {
{ 0x800f0422, KEY_OK },
{ 0x800f0423, KEY_EXIT },
{ 0x800f0424, KEY_DVD },
{ 0x800f0425, KEY_TUNER }, /* LiveTV */
{ 0x800f0426, KEY_EPG }, /* Guide */
{ 0x800f0427, KEY_ZOOM }, /* Aspect */
{ 0x800f0425, KEY_TUNER }, /* LiveTV */
{ 0x800f0426, KEY_EPG }, /* Guide */
{ 0x800f0427, KEY_ZOOM }, /* Aspect */
{ 0x800f043a, KEY_BRIGHTNESSUP },
{ 0x800f0446, KEY_TV },
{ 0x800f0447, KEY_AUDIO }, /* My Music */
{ 0x800f0448, KEY_PVR }, /* RecordedTV */
{ 0x800f0447, KEY_AUDIO }, /* My Music */
{ 0x800f0448, KEY_PVR }, /* RecordedTV */
{ 0x800f0449, KEY_CAMERA },
{ 0x800f044a, KEY_VIDEO },
{ 0x800f044c, KEY_LANGUAGE },
{ 0x800f044d, KEY_TITLE },
{ 0x800f044e, KEY_PRINT }, /* Print - HP OEM version of remote */
{ 0x800f044e, KEY_PRINT }, /* Print - HP OEM version of remote */
{ 0x800f0450, KEY_RADIO },
{ 0x800f045a, KEY_SUBTITLE }, /* Caption/Teletext */
{ 0x800f045a, KEY_SUBTITLE }, /* Caption/Teletext */
{ 0x800f045b, KEY_RED },
{ 0x800f045c, KEY_GREEN },
{ 0x800f045d, KEY_YELLOW },
{ 0x800f045e, KEY_BLUE },
{ 0x800f0465, KEY_POWER2 }, /* TV Power */
{ 0x800f046e, KEY_PLAYPAUSE },
{ 0x800f046f, KEY_MEDIA }, /* Start media application (NEW) */
{ 0x800f046f, KEY_MEDIA }, /* Start media application (NEW) */
{ 0x800f0480, KEY_BRIGHTNESSDOWN },
{ 0x800f0481, KEY_PLAYPAUSE },

View File

@ -522,10 +522,8 @@ unsigned int lirc_dev_fop_poll(struct file *file, poll_table *wait)
dev_dbg(ir->d.dev, LOGHEAD "poll called\n", ir->d.name, ir->d.minor);
if (!ir->attached) {
mutex_unlock(&ir->irctl_lock);
if (!ir->attached)
return POLLERR;
}
poll_wait(file, &ir->buf->wait_poll, wait);
@ -649,18 +647,18 @@ ssize_t lirc_dev_fop_read(struct file *file,
if (!buf)
return -ENOMEM;
if (mutex_lock_interruptible(&ir->irctl_lock))
return -ERESTARTSYS;
if (mutex_lock_interruptible(&ir->irctl_lock)) {
ret = -ERESTARTSYS;
goto out_unlocked;
}
if (!ir->attached) {
mutex_unlock(&ir->irctl_lock);
return -ENODEV;
ret = -ENODEV;
goto out_locked;
}
if (length % ir->chunk_size) {
dev_dbg(ir->d.dev, LOGHEAD "read result = -EINVAL\n",
ir->d.name, ir->d.minor);
mutex_unlock(&ir->irctl_lock);
return -EINVAL;
ret = -EINVAL;
goto out_locked;
}
/*
@ -711,18 +709,23 @@ ssize_t lirc_dev_fop_read(struct file *file,
lirc_buffer_read(ir->buf, buf);
ret = copy_to_user((void *)buffer+written, buf,
ir->buf->chunk_size);
written += ir->buf->chunk_size;
if (!ret)
written += ir->buf->chunk_size;
else
ret = -EFAULT;
}
}
remove_wait_queue(&ir->buf->wait_poll, &wait);
set_current_state(TASK_RUNNING);
out_locked:
mutex_unlock(&ir->irctl_lock);
out_unlocked:
kfree(buf);
dev_dbg(ir->d.dev, LOGHEAD "read result = %s (%d)\n",
ir->d.name, ir->d.minor, ret ? "-EFAULT" : "OK", ret);
ir->d.name, ir->d.minor, ret ? "<fail>" : "<ok>", ret);
return ret ? ret : written;
}

View File

@ -35,10 +35,10 @@
#include <linux/device.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/input.h>
#include <linux/usb.h>
#include <linux/usb/input.h>
#include <media/ir-core.h>
#include <media/ir-common.h>
#define DRIVER_VERSION "1.91"
#define DRIVER_AUTHOR "Jarod Wilson <jarod@wilsonet.com>"
@ -49,6 +49,7 @@
#define USB_BUFLEN 32 /* USB reception buffer length */
#define USB_CTRL_MSG_SZ 2 /* Size of usb ctrl msg on gen1 hw */
#define MCE_G1_INIT_MSGS 40 /* Init messages on gen1 hw to throw out */
#define MS_TO_NS(msec) ((msec) * 1000)
/* MCE constants */
#define MCE_CMDBUF_SIZE 384 /* MCE Command buffer length */
@ -74,6 +75,7 @@
#define MCE_PACKET_LENGTH_MASK 0x1f /* Packet length mask */
/* Sub-commands, which follow MCE_COMMAND_HEADER or MCE_HW_CMD_HEADER */
#define MCE_CMD_SIG_END 0x01 /* End of signal */
#define MCE_CMD_PING 0x03 /* Ping device */
#define MCE_CMD_UNKNOWN 0x04 /* Unknown */
#define MCE_CMD_UNKNOWN2 0x05 /* Unknown */
@ -91,6 +93,7 @@
#define MCE_CMD_G_TXMASK 0x13 /* Set TX port bitmask */
#define MCE_CMD_S_RXSENSOR 0x14 /* Set RX sensor (std/learning) */
#define MCE_CMD_G_RXSENSOR 0x15 /* Get RX sensor (std/learning) */
#define MCE_RSP_PULSE_COUNT 0x15 /* RX pulse count (only if learning) */
#define MCE_CMD_TX_PORTS 0x16 /* Get number of TX ports */
#define MCE_CMD_G_WAKESRC 0x17 /* Get wake source */
#define MCE_CMD_UNKNOWN7 0x18 /* Unknown */
@ -146,14 +149,16 @@ enum mceusb_model_type {
MCE_GEN3,
MCE_GEN2_TX_INV,
POLARIS_EVK,
CX_HYBRID_TV,
};
struct mceusb_model {
u32 mce_gen1:1;
u32 mce_gen2:1;
u32 mce_gen3:1;
u32 tx_mask_inverted:1;
u32 tx_mask_normal:1;
u32 is_polaris:1;
u32 no_tx:1;
const char *rc_map; /* Allow specify a per-board map */
const char *name; /* per-board name */
@ -162,18 +167,18 @@ struct mceusb_model {
static const struct mceusb_model mceusb_model[] = {
[MCE_GEN1] = {
.mce_gen1 = 1,
.tx_mask_inverted = 1,
.tx_mask_normal = 1,
},
[MCE_GEN2] = {
.mce_gen2 = 1,
},
[MCE_GEN2_TX_INV] = {
.mce_gen2 = 1,
.tx_mask_inverted = 1,
.tx_mask_normal = 1,
},
[MCE_GEN3] = {
.mce_gen3 = 1,
.tx_mask_inverted = 1,
.tx_mask_normal = 1,
},
[POLARIS_EVK] = {
.is_polaris = 1,
@ -183,7 +188,12 @@ static const struct mceusb_model mceusb_model[] = {
* to allow testing it
*/
.rc_map = RC_MAP_RC5_HAUPPAUGE_NEW,
.name = "cx231xx MCE IR",
.name = "Conexant Hybrid TV (cx231xx) MCE IR",
},
[CX_HYBRID_TV] = {
.is_polaris = 1,
.no_tx = 1, /* tx isn't wired up at all */
.name = "Conexant Hybrid TV (cx231xx) MCE IR",
},
};
@ -273,6 +283,8 @@ static struct usb_device_id mceusb_dev_table[] = {
{ USB_DEVICE(VENDOR_FORMOSA, 0xe03c) },
/* Formosa Industrial Computing */
{ USB_DEVICE(VENDOR_FORMOSA, 0xe03e) },
/* Fintek eHome Infrared Transceiver (HP branded) */
{ USB_DEVICE(VENDOR_FINTEK, 0x5168) },
/* Fintek eHome Infrared Transceiver */
{ USB_DEVICE(VENDOR_FINTEK, 0x0602) },
/* Fintek eHome Infrared Transceiver (in the AOpen MP45) */
@ -292,9 +304,12 @@ static struct usb_device_id mceusb_dev_table[] = {
{ USB_DEVICE(VENDOR_NORTHSTAR, 0xe004) },
/* TiVo PC IR Receiver */
{ USB_DEVICE(VENDOR_TIVO, 0x2000) },
/* Conexant SDK */
/* Conexant Hybrid TV "Shelby" Polaris SDK */
{ USB_DEVICE(VENDOR_CONEXANT, 0x58a1),
.driver_info = POLARIS_EVK },
/* Conexant Hybrid TV RDU253S Polaris */
{ USB_DEVICE(VENDOR_CONEXANT, 0x58a5),
.driver_info = CX_HYBRID_TV },
/* Terminating entry */
{ }
};
@ -303,7 +318,10 @@ static struct usb_device_id mceusb_dev_table[] = {
struct mceusb_dev {
/* ir-core bits */
struct ir_dev_props *props;
struct ir_raw_event rawir;
/* optional features we can enable */
bool carrier_report_enabled;
bool learning_enabled;
/* core device bits */
struct device *dev;
@ -318,6 +336,8 @@ struct mceusb_dev {
/* buffers and dma */
unsigned char *buf_in;
unsigned int len_in;
dma_addr_t dma_in;
dma_addr_t dma_out;
enum {
CMD_HEADER = 0,
@ -325,15 +345,14 @@ struct mceusb_dev {
CMD_DATA,
PARSE_IRDATA,
} parser_state;
u8 cmd, rem; /* Remaining IR data bytes in packet */
dma_addr_t dma_in;
dma_addr_t dma_out;
u8 cmd, rem; /* Remaining IR data bytes in packet */
struct {
u32 connected:1;
u32 tx_mask_inverted:1;
u32 tx_mask_normal:1;
u32 microsoft_gen1:1;
u32 no_tx:1;
} flags;
/* transmit support */
@ -408,9 +427,10 @@ static int mceusb_cmdsize(u8 cmd, u8 subcmd)
case MCE_CMD_UNKNOWN:
case MCE_CMD_S_CARRIER:
case MCE_CMD_S_TIMEOUT:
case MCE_CMD_G_RXSENSOR:
case MCE_RSP_PULSE_COUNT:
datasize = 2;
break;
case MCE_CMD_SIG_END:
case MCE_CMD_S_TXMASK:
case MCE_CMD_S_RXSENSOR:
datasize = 1;
@ -433,7 +453,7 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
return;
/* skip meaningless 0xb1 0x60 header bytes on orig receiver */
if (ir->flags.microsoft_gen1 && !out)
if (ir->flags.microsoft_gen1 && !out && !offset)
skip = 2;
if (len <= skip)
@ -491,6 +511,9 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
break;
case MCE_COMMAND_HEADER:
switch (subcmd) {
case MCE_CMD_SIG_END:
dev_info(dev, "End of signal\n");
break;
case MCE_CMD_PING:
dev_info(dev, "Ping\n");
break;
@ -525,10 +548,11 @@ static void mceusb_dev_printdata(struct mceusb_dev *ir, char *buf,
inout, data1 == 0x02 ? "short" : "long");
break;
case MCE_CMD_G_RXSENSOR:
if (len == 2)
/* aka MCE_RSP_PULSE_COUNT */
if (out)
dev_info(dev, "Get receive sensor\n");
else
dev_info(dev, "Received pulse count is %d\n",
else if (ir->learning_enabled)
dev_info(dev, "RX pulse count: %d\n",
((data1 << 8) | data2));
break;
case MCE_RSP_CMD_INVALID:
@ -724,16 +748,16 @@ out:
return ret ? ret : n;
}
/* Sets active IR outputs -- mce devices typically (all?) have two */
/* Sets active IR outputs -- mce devices typically have two */
static int mceusb_set_tx_mask(void *priv, u32 mask)
{
struct mceusb_dev *ir = priv;
if (ir->flags.tx_mask_inverted)
if (ir->flags.tx_mask_normal)
ir->tx_mask = mask;
else
ir->tx_mask = (mask != MCE_DEFAULT_TX_MASK ?
mask ^ MCE_DEFAULT_TX_MASK : mask) << 1;
else
ir->tx_mask = mask;
return 0;
}
@ -752,7 +776,7 @@ static int mceusb_set_tx_carrier(void *priv, u32 carrier)
if (carrier == 0) {
ir->carrier = carrier;
cmdbuf[2] = 0x01;
cmdbuf[2] = MCE_CMD_SIG_END;
cmdbuf[3] = MCE_IRDATA_TRAILER;
dev_dbg(ir->dev, "%s: disabling carrier "
"modulation\n", __func__);
@ -782,6 +806,34 @@ static int mceusb_set_tx_carrier(void *priv, u32 carrier)
return carrier;
}
/*
* We don't do anything but print debug spew for many of the command bits
* we receive from the hardware, but some of them are useful information
* we want to store so that we can use them.
*/
static void mceusb_handle_command(struct mceusb_dev *ir, int index)
{
u8 hi = ir->buf_in[index + 1] & 0xff;
u8 lo = ir->buf_in[index + 2] & 0xff;
switch (ir->buf_in[index]) {
/* 2-byte return value commands */
case MCE_CMD_S_TIMEOUT:
ir->props->timeout = MS_TO_NS((hi << 8 | lo) / 2);
break;
/* 1-byte return value commands */
case MCE_CMD_S_TXMASK:
ir->tx_mask = hi;
break;
case MCE_CMD_S_RXSENSOR:
ir->learning_enabled = (hi == 0x02);
break;
default:
break;
}
}
static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
{
DEFINE_IR_RAW_EVENT(rawir);
@ -791,39 +843,30 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
if (ir->flags.microsoft_gen1)
i = 2;
/* if there's no data, just return now */
if (buf_len <= i)
return;
for (; i < buf_len; i++) {
switch (ir->parser_state) {
case SUBCMD:
ir->rem = mceusb_cmdsize(ir->cmd, ir->buf_in[i]);
mceusb_dev_printdata(ir, ir->buf_in, i - 1,
ir->rem + 2, false);
mceusb_handle_command(ir, i);
ir->parser_state = CMD_DATA;
break;
case PARSE_IRDATA:
ir->rem--;
rawir.pulse = ((ir->buf_in[i] & MCE_PULSE_BIT) != 0);
rawir.duration = (ir->buf_in[i] & MCE_PULSE_MASK)
* MCE_TIME_UNIT * 1000;
if ((ir->buf_in[i] & MCE_PULSE_MASK) == 0x7f) {
if (ir->rawir.pulse == rawir.pulse) {
ir->rawir.duration += rawir.duration;
} else {
ir->rawir.duration = rawir.duration;
ir->rawir.pulse = rawir.pulse;
}
if (ir->rem)
break;
}
rawir.duration += ir->rawir.duration;
ir->rawir.duration = 0;
ir->rawir.pulse = rawir.pulse;
* MS_TO_NS(MCE_TIME_UNIT);
dev_dbg(ir->dev, "Storing %s with duration %d\n",
rawir.pulse ? "pulse" : "space",
rawir.duration);
ir_raw_event_store(ir->idev, &rawir);
ir_raw_event_store_with_filter(ir->idev, &rawir);
break;
case CMD_DATA:
ir->rem--;
@ -839,17 +882,10 @@ static void mceusb_process_ir_data(struct mceusb_dev *ir, int buf_len)
continue;
}
ir->rem = (ir->cmd & MCE_PACKET_LENGTH_MASK);
mceusb_dev_printdata(ir, ir->buf_in, i, ir->rem + 1, false);
if (ir->rem) {
mceusb_dev_printdata(ir, ir->buf_in,
i, ir->rem + 1, false);
if (ir->rem)
ir->parser_state = PARSE_IRDATA;
break;
}
/*
* a package with len=0 (e. g. 0x80) means end of
* data. We could use it to do the call to
* ir_raw_event_handle(). For now, we don't need to
* use it.
*/
break;
}
@ -984,9 +1020,11 @@ static void mceusb_get_parameters(struct mceusb_dev *ir)
mce_async_out(ir, GET_CARRIER_FREQ, sizeof(GET_CARRIER_FREQ));
mce_sync_in(ir, NULL, maxp);
/* get the transmitter bitmask */
mce_async_out(ir, GET_TX_BITMASK, sizeof(GET_TX_BITMASK));
mce_sync_in(ir, NULL, maxp);
if (!ir->flags.no_tx) {
/* get the transmitter bitmask */
mce_async_out(ir, GET_TX_BITMASK, sizeof(GET_TX_BITMASK));
mce_sync_in(ir, NULL, maxp);
}
/* get receiver timeout value */
mce_async_out(ir, GET_RX_TIMEOUT, sizeof(GET_RX_TIMEOUT));
@ -1035,12 +1073,18 @@ static struct input_dev *mceusb_init_input_dev(struct mceusb_dev *ir)
props->priv = ir;
props->driver_type = RC_DRIVER_IR_RAW;
props->allowed_protos = IR_TYPE_ALL;
props->s_tx_mask = mceusb_set_tx_mask;
props->s_tx_carrier = mceusb_set_tx_carrier;
props->tx_ir = mceusb_tx_ir;
props->timeout = MS_TO_NS(1000);
if (!ir->flags.no_tx) {
props->s_tx_mask = mceusb_set_tx_mask;
props->s_tx_carrier = mceusb_set_tx_carrier;
props->tx_ir = mceusb_tx_ir;
}
ir->props = props;
usb_to_input_id(ir->usbdev, &idev->id);
idev->dev.parent = ir->dev;
if (mceusb_model[ir->model].rc_map)
rc_map = mceusb_model[ir->model].rc_map;
@ -1074,16 +1118,16 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
enum mceusb_model_type model = id->driver_info;
bool is_gen3;
bool is_microsoft_gen1;
bool tx_mask_inverted;
bool tx_mask_normal;
bool is_polaris;
dev_dbg(&intf->dev, ": %s called\n", __func__);
dev_dbg(&intf->dev, "%s called\n", __func__);
idesc = intf->cur_altsetting;
is_gen3 = mceusb_model[model].mce_gen3;
is_microsoft_gen1 = mceusb_model[model].mce_gen1;
tx_mask_inverted = mceusb_model[model].tx_mask_inverted;
tx_mask_normal = mceusb_model[model].tx_mask_normal;
is_polaris = mceusb_model[model].is_polaris;
if (is_polaris) {
@ -1107,7 +1151,7 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
ep_in = ep;
ep_in->bmAttributes = USB_ENDPOINT_XFER_INT;
ep_in->bInterval = 1;
dev_dbg(&intf->dev, ": acceptable inbound endpoint "
dev_dbg(&intf->dev, "acceptable inbound endpoint "
"found\n");
}
@ -1122,12 +1166,12 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
ep_out = ep;
ep_out->bmAttributes = USB_ENDPOINT_XFER_INT;
ep_out->bInterval = 1;
dev_dbg(&intf->dev, ": acceptable outbound endpoint "
dev_dbg(&intf->dev, "acceptable outbound endpoint "
"found\n");
}
}
if (ep_in == NULL) {
dev_dbg(&intf->dev, ": inbound and/or endpoint not found\n");
dev_dbg(&intf->dev, "inbound and/or endpoint not found\n");
return -ENODEV;
}
@ -1150,11 +1194,10 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
ir->dev = &intf->dev;
ir->len_in = maxp;
ir->flags.microsoft_gen1 = is_microsoft_gen1;
ir->flags.tx_mask_inverted = tx_mask_inverted;
ir->flags.tx_mask_normal = tx_mask_normal;
ir->flags.no_tx = mceusb_model[model].no_tx;
ir->model = model;
init_ir_raw_event(&ir->rawir);
/* Saving usb interface data for use by the transmitter routine */
ir->usb_ep_in = ep_in;
ir->usb_ep_out = ep_out;
@ -1191,7 +1234,8 @@ static int __devinit mceusb_dev_probe(struct usb_interface *intf,
mceusb_get_parameters(ir);
mceusb_set_tx_mask(ir, MCE_DEFAULT_TX_MASK);
if (!ir->flags.no_tx)
mceusb_set_tx_mask(ir, MCE_DEFAULT_TX_MASK);
usb_set_intfdata(intf, ir);

View File

@ -603,6 +603,8 @@ static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
count = nvt->pkts;
nvt_dbg_verbose("Processing buffer of len %d", count);
init_ir_raw_event(&rawir);
for (i = 0; i < count; i++) {
nvt->pkts--;
sample = nvt->buf[i];
@ -643,11 +645,15 @@ static void nvt_process_rx_ir_data(struct nvt_dev *nvt)
* indicates end of IR signal, but new data incoming. In both
* cases, it means we're ready to call ir_raw_event_handle
*/
if (sample == BUF_PULSE_BIT || ((sample != BUF_LEN_MASK) &&
(sample & BUF_REPEAT_MASK) == BUF_REPEAT_BYTE))
if ((sample == BUF_PULSE_BIT) && nvt->pkts) {
nvt_dbg("Calling ir_raw_event_handle (signal end)\n");
ir_raw_event_handle(nvt->rdev);
}
}
nvt_dbg("Calling ir_raw_event_handle (buffer empty)\n");
ir_raw_event_handle(nvt->rdev);
if (nvt->pkts) {
nvt_dbg("Odd, pkts should be 0 now... (its %u)", nvt->pkts);
nvt->pkts = 0;

View File

@ -34,8 +34,9 @@
#include <linux/device.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/input.h>
#include <linux/usb.h>
#include <linux/usb/input.h>
#include <media/ir-core.h>
#define DRIVER_VERSION "1.61"
@ -140,7 +141,9 @@ static struct usb_driver streamzap_driver = {
static void sz_push(struct streamzap_ir *sz, struct ir_raw_event rawir)
{
ir_raw_event_store(sz->idev, &rawir);
dev_dbg(sz->dev, "Storing %s with duration %u us\n",
(rawir.pulse ? "pulse" : "space"), rawir.duration);
ir_raw_event_store_with_filter(sz->idev, &rawir);
}
static void sz_push_full_pulse(struct streamzap_ir *sz,
@ -167,7 +170,6 @@ static void sz_push_full_pulse(struct streamzap_ir *sz,
rawir.duration *= 1000;
rawir.duration &= IR_MAX_DURATION;
}
dev_dbg(sz->dev, "ls %u\n", rawir.duration);
sz_push(sz, rawir);
sz->idle = false;
@ -180,7 +182,6 @@ static void sz_push_full_pulse(struct streamzap_ir *sz,
sz->sum += rawir.duration;
rawir.duration *= 1000;
rawir.duration &= IR_MAX_DURATION;
dev_dbg(sz->dev, "p %u\n", rawir.duration);
sz_push(sz, rawir);
}
@ -200,7 +201,6 @@ static void sz_push_full_space(struct streamzap_ir *sz,
rawir.duration += SZ_RESOLUTION / 2;
sz->sum += rawir.duration;
rawir.duration *= 1000;
dev_dbg(sz->dev, "s %u\n", rawir.duration);
sz_push(sz, rawir);
}
@ -221,8 +221,6 @@ static void streamzap_callback(struct urb *urb)
struct streamzap_ir *sz;
unsigned int i;
int len;
static int timeout = (((SZ_TIMEOUT * SZ_RESOLUTION * 1000) &
IR_MAX_DURATION) | 0x03000000);
if (!urb)
return;
@ -246,7 +244,7 @@ static void streamzap_callback(struct urb *urb)
dev_dbg(sz->dev, "%s: received urb, len %d\n", __func__, len);
for (i = 0; i < len; i++) {
dev_dbg(sz->dev, "sz idx %d: %x\n",
dev_dbg(sz->dev, "sz->buf_in[%d]: %x\n",
i, (unsigned char)sz->buf_in[i]);
switch (sz->decoder_state) {
case PulseSpace:
@ -273,7 +271,7 @@ static void streamzap_callback(struct urb *urb)
DEFINE_IR_RAW_EVENT(rawir);
rawir.pulse = false;
rawir.duration = timeout;
rawir.duration = sz->props->timeout;
sz->idle = true;
if (sz->timeout_enabled)
sz_push(sz, rawir);
@ -335,6 +333,9 @@ static struct input_dev *streamzap_init_input_dev(struct streamzap_ir *sz)
sz->props = props;
usb_to_input_id(sz->usbdev, &idev->id);
idev->dev.parent = sz->dev;
ret = ir_input_register(idev, RC_MAP_STREAMZAP, props, DRIVER_NAME);
if (ret < 0) {
dev_err(dev, "remote input device register failed\n");
@ -444,6 +445,8 @@ static int __devinit streamzap_probe(struct usb_interface *intf,
sz->decoder_state = PulseSpace;
/* FIXME: don't yet have a way to set this */
sz->timeout_enabled = true;
sz->props->timeout = (((SZ_TIMEOUT * SZ_RESOLUTION * 1000) &
IR_MAX_DURATION) | 0x03000000);
#if 0
/* not yet supported, depends on patches from maxim */
/* see also: LIRC_GET_REC_RESOLUTION and LIRC_SET_REC_TIMEOUT */

View File

@ -1989,8 +1989,23 @@ static int cx25840_probe(struct i2c_client *client,
v4l2_ctrl_new_std(&state->hdl, &cx25840_ctrl_ops,
V4L2_CID_HUE, -128, 127, 1, 0);
if (!is_cx2583x(state)) {
default_volume = 228 - cx25840_read(client, 0x8d4);
default_volume = ((default_volume / 2) + 23) << 9;
default_volume = cx25840_read(client, 0x8d4);
/*
* Enforce the legacy PVR-350/MSP3400 to PVR-150/CX25843 volume
* scale mapping limits to avoid -ERANGE errors when
* initializing the volume control
*/
if (default_volume > 228) {
/* Bottom out at -96 dB, v4l2 vol range 0x2e00-0x2fff */
default_volume = 228;
cx25840_write(client, 0x8d4, 228);
}
else if (default_volume < 20) {
/* Top out at + 8 dB, v4l2 vol range 0xfe00-0xffff */
default_volume = 20;
cx25840_write(client, 0x8d4, 20);
}
default_volume = (((228 - default_volume) >> 1) + 23) << 9;
state->volume = v4l2_ctrl_new_std(&state->hdl,
&cx25840_audio_ctrl_ops, V4L2_CID_AUDIO_VOLUME,

View File

@ -40,7 +40,6 @@
#include <sound/control.h>
#include <sound/initval.h>
#include <sound/tlv.h>
#include <media/wm8775.h>
#include "cx88.h"
#include "cx88-reg.h"
@ -587,47 +586,26 @@ static int snd_cx88_volume_put(struct snd_kcontrol *kcontrol,
int left, right, v, b;
int changed = 0;
u32 old;
struct v4l2_control client_ctl;
/* Pass volume & balance onto any WM8775 */
if (value->value.integer.value[0] >= value->value.integer.value[1]) {
v = value->value.integer.value[0] << 10;
b = value->value.integer.value[0] ?
(0x8000 * value->value.integer.value[1]) / value->value.integer.value[0] :
0x8000;
} else {
v = value->value.integer.value[1] << 10;
b = value->value.integer.value[1] ?
0xffff - (0x8000 * value->value.integer.value[0]) / value->value.integer.value[1] :
0x8000;
}
client_ctl.value = v;
client_ctl.id = V4L2_CID_AUDIO_VOLUME;
call_hw(core, WM8775_GID, core, s_ctrl, &client_ctl);
client_ctl.value = b;
client_ctl.id = V4L2_CID_AUDIO_BALANCE;
call_hw(core, WM8775_GID, core, s_ctrl, &client_ctl);
left = value->value.integer.value[0] & 0x3f;
right = value->value.integer.value[1] & 0x3f;
b = right - left;
if (b < 0) {
v = 0x3f - left;
b = (-b) | 0x40;
v = 0x3f - left;
b = (-b) | 0x40;
} else {
v = 0x3f - right;
v = 0x3f - right;
}
/* Do we really know this will always be called with IRQs on? */
spin_lock_irq(&chip->reg_lock);
old = cx_read(AUD_VOL_CTL);
if (v != (old & 0x3f)) {
cx_swrite(SHADOW_AUD_VOL_CTL, AUD_VOL_CTL, (old & ~0x3f) | v);
changed = 1;
cx_write(AUD_VOL_CTL, (old & ~0x3f) | v);
changed = 1;
}
if ((cx_read(AUD_BAL_CTL) & 0x7f) != b) {
cx_write(AUD_BAL_CTL, b);
changed = 1;
if (cx_read(AUD_BAL_CTL) != b) {
cx_write(AUD_BAL_CTL, b);
changed = 1;
}
spin_unlock_irq(&chip->reg_lock);
@ -640,7 +618,7 @@ static const struct snd_kcontrol_new snd_cx88_volume = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE |
SNDRV_CTL_ELEM_ACCESS_TLV_READ,
.name = "Analog-TV Volume",
.name = "Playback Volume",
.info = snd_cx88_volume_info,
.get = snd_cx88_volume_get,
.put = snd_cx88_volume_put,
@ -671,14 +649,7 @@ static int snd_cx88_switch_put(struct snd_kcontrol *kcontrol,
vol = cx_read(AUD_VOL_CTL);
if (value->value.integer.value[0] != !(vol & bit)) {
vol ^= bit;
cx_swrite(SHADOW_AUD_VOL_CTL, AUD_VOL_CTL, vol);
/* Pass mute onto any WM8775 */
if ((1<<6) == bit) {
struct v4l2_control client_ctl;
client_ctl.value = 0 != (vol & bit);
client_ctl.id = V4L2_CID_AUDIO_MUTE;
call_hw(core, WM8775_GID, core, s_ctrl, &client_ctl);
}
cx_write(AUD_VOL_CTL, vol);
ret = 1;
}
spin_unlock_irq(&chip->reg_lock);
@ -687,7 +658,7 @@ static int snd_cx88_switch_put(struct snd_kcontrol *kcontrol,
static const struct snd_kcontrol_new snd_cx88_dac_switch = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Audio-Out Switch",
.name = "Playback Switch",
.info = snd_ctl_boolean_mono_info,
.get = snd_cx88_switch_get,
.put = snd_cx88_switch_put,
@ -696,49 +667,13 @@ static const struct snd_kcontrol_new snd_cx88_dac_switch = {
static const struct snd_kcontrol_new snd_cx88_source_switch = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Analog-TV Switch",
.name = "Capture Switch",
.info = snd_ctl_boolean_mono_info,
.get = snd_cx88_switch_get,
.put = snd_cx88_switch_put,
.private_value = (1<<6),
};
static int snd_cx88_alc_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *value)
{
snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
struct cx88_core *core = chip->core;
struct v4l2_control client_ctl;
client_ctl.id = V4L2_CID_AUDIO_LOUDNESS;
call_hw(core, WM8775_GID, core, g_ctrl, &client_ctl);
value->value.integer.value[0] = client_ctl.value ? 1 : 0;
return 0;
}
static int snd_cx88_alc_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *value)
{
snd_cx88_card_t *chip = snd_kcontrol_chip(kcontrol);
struct cx88_core *core = chip->core;
struct v4l2_control client_ctl;
client_ctl.value = 0 != value->value.integer.value[0];
client_ctl.id = V4L2_CID_AUDIO_LOUDNESS;
call_hw(core, WM8775_GID, core, s_ctrl, &client_ctl);
return 0;
}
static struct snd_kcontrol_new snd_cx88_alc_switch = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Line-In ALC Switch",
.info = snd_ctl_boolean_mono_info,
.get = snd_cx88_alc_get,
.put = snd_cx88_alc_put,
};
/****************************************************************************
Basic Flow for Sound Devices
****************************************************************************/
@ -860,7 +795,6 @@ static int __devinit cx88_audio_initdev(struct pci_dev *pci,
{
struct snd_card *card;
snd_cx88_card_t *chip;
struct v4l2_subdev *sd;
int err;
if (devno >= SNDRV_CARDS)
@ -896,15 +830,6 @@ static int __devinit cx88_audio_initdev(struct pci_dev *pci,
if (err < 0)
goto error;
/* If there's a wm8775 then add a Line-In ALC switch */
list_for_each_entry(sd, &chip->core->v4l2_dev.subdevs, list) {
if (WM8775_GID == sd->grp_id) {
snd_ctl_add(card, snd_ctl_new1(&snd_cx88_alc_switch,
chip));
break;
}
}
strcpy (card->driver, "CX88x");
sprintf(card->shortname, "Conexant CX%x", pci->device);
sprintf(card->longname, "%s at %#llx",

View File

@ -1007,15 +1007,22 @@ static const struct cx88_board cx88_boards[] = {
.radio_type = UNSET,
.tuner_addr = ADDR_UNSET,
.radio_addr = ADDR_UNSET,
.audio_chip = V4L2_IDENT_WM8775,
.input = {{
.type = CX88_VMUX_DVB,
.vmux = 0,
/* 2: Line-In */
.audioroute = 2,
},{
.type = CX88_VMUX_COMPOSITE1,
.vmux = 1,
/* 2: Line-In */
.audioroute = 2,
},{
.type = CX88_VMUX_SVIDEO,
.vmux = 2,
/* 2: Line-In */
.audioroute = 2,
}},
.mpeg = CX88_MPEG_DVB,
},

View File

@ -40,7 +40,6 @@
#include "cx88.h"
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include <media/wm8775.h>
MODULE_DESCRIPTION("v4l2 driver module for cx2388x based TV cards");
MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
@ -977,7 +976,6 @@ int cx88_set_control(struct cx88_core *core, struct v4l2_control *ctl)
const struct cx88_ctrl *c = NULL;
u32 value,mask;
int i;
struct v4l2_control client_ctl;
for (i = 0; i < CX8800_CTLS; i++) {
if (cx8800_ctls[i].v.id == ctl->id) {
@ -991,27 +989,6 @@ int cx88_set_control(struct cx88_core *core, struct v4l2_control *ctl)
ctl->value = c->v.minimum;
if (ctl->value > c->v.maximum)
ctl->value = c->v.maximum;
/* Pass changes onto any WM8775 */
client_ctl.id = ctl->id;
switch (ctl->id) {
case V4L2_CID_AUDIO_MUTE:
client_ctl.value = ctl->value;
break;
case V4L2_CID_AUDIO_VOLUME:
client_ctl.value = (ctl->value) ?
(0x90 + ctl->value) << 8 : 0;
break;
case V4L2_CID_AUDIO_BALANCE:
client_ctl.value = ctl->value << 9;
break;
default:
client_ctl.id = 0;
break;
}
if (client_ctl.id)
call_hw(core, WM8775_GID, core, s_ctrl, &client_ctl);
mask=c->mask;
switch (ctl->id) {
case V4L2_CID_AUDIO_BALANCE:
@ -1558,9 +1535,7 @@ static int radio_queryctrl (struct file *file, void *priv,
if (c->id < V4L2_CID_BASE ||
c->id >= V4L2_CID_LASTP1)
return -EINVAL;
if (c->id == V4L2_CID_AUDIO_MUTE ||
c->id == V4L2_CID_AUDIO_VOLUME ||
c->id == V4L2_CID_AUDIO_BALANCE) {
if (c->id == V4L2_CID_AUDIO_MUTE) {
for (i = 0; i < CX8800_CTLS; i++) {
if (cx8800_ctls[i].v.id == c->id)
break;

View File

@ -398,19 +398,17 @@ static inline struct cx88_core *to_core(struct v4l2_device *v4l2_dev)
return container_of(v4l2_dev, struct cx88_core, v4l2_dev);
}
#define call_hw(core, grpid, o, f, args...) \
#define call_all(core, o, f, args...) \
do { \
if (!core->i2c_rc) { \
if (core->gate_ctrl) \
core->gate_ctrl(core, 1); \
v4l2_device_call_all(&core->v4l2_dev, grpid, o, f, ##args); \
v4l2_device_call_all(&core->v4l2_dev, 0, o, f, ##args); \
if (core->gate_ctrl) \
core->gate_ctrl(core, 0); \
} \
} while (0)
#define call_all(core, o, f, args...) call_hw(core, 0, o, f, ##args)
struct cx8800_dev;
struct cx8802_dev;

View File

@ -2377,7 +2377,7 @@ static const struct v4l2_file_operations radio_fops = {
.owner = THIS_MODULE,
.open = em28xx_v4l2_open,
.release = em28xx_v4l2_close,
.ioctl = video_ioctl2,
.unlocked_ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops radio_ioctl_ops = {

View File

@ -807,8 +807,6 @@ static int mx2_camera_set_bus_param(struct soc_camera_device *icd,
if (common_flags & SOCAM_PCLK_SAMPLE_RISING)
csicr1 |= CSICR1_REDGE;
if (common_flags & SOCAM_PCLK_SAMPLE_FALLING)
csicr1 |= CSICR1_INV_PCLK;
if (common_flags & SOCAM_VSYNC_ACTIVE_HIGH)
csicr1 |= CSICR1_SOF_POL;
if (common_flags & SOCAM_HSYNC_ACTIVE_HIGH)

View File

@ -522,6 +522,7 @@ static int fimc_cap_streamon(struct file *file, void *priv,
INIT_LIST_HEAD(&fimc->vid_cap.active_buf_q);
fimc->vid_cap.active_buf_cnt = 0;
fimc->vid_cap.frame_count = 0;
fimc->vid_cap.buf_index = fimc_hw_get_frame_index(fimc);
set_bit(ST_CAPT_PEND, &fimc->state);
ret = videobuf_streamon(&fimc->vid_cap.vbq);
@ -652,6 +653,50 @@ static int fimc_cap_s_ctrl(struct file *file, void *priv,
return ret;
}
static int fimc_cap_cropcap(struct file *file, void *fh,
struct v4l2_cropcap *cr)
{
struct fimc_frame *f;
struct fimc_ctx *ctx = fh;
struct fimc_dev *fimc = ctx->fimc_dev;
if (cr->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
if (mutex_lock_interruptible(&fimc->lock))
return -ERESTARTSYS;
f = &ctx->s_frame;
cr->bounds.left = 0;
cr->bounds.top = 0;
cr->bounds.width = f->o_width;
cr->bounds.height = f->o_height;
cr->defrect = cr->bounds;
mutex_unlock(&fimc->lock);
return 0;
}
static int fimc_cap_g_crop(struct file *file, void *fh, struct v4l2_crop *cr)
{
struct fimc_frame *f;
struct fimc_ctx *ctx = file->private_data;
struct fimc_dev *fimc = ctx->fimc_dev;
if (mutex_lock_interruptible(&fimc->lock))
return -ERESTARTSYS;
f = &ctx->s_frame;
cr->c.left = f->offs_h;
cr->c.top = f->offs_v;
cr->c.width = f->width;
cr->c.height = f->height;
mutex_unlock(&fimc->lock);
return 0;
}
static int fimc_cap_s_crop(struct file *file, void *fh,
struct v4l2_crop *cr)
{
@ -716,9 +761,9 @@ static const struct v4l2_ioctl_ops fimc_capture_ioctl_ops = {
.vidioc_g_ctrl = fimc_vidioc_g_ctrl,
.vidioc_s_ctrl = fimc_cap_s_ctrl,
.vidioc_g_crop = fimc_vidioc_g_crop,
.vidioc_g_crop = fimc_cap_g_crop,
.vidioc_s_crop = fimc_cap_s_crop,
.vidioc_cropcap = fimc_vidioc_cropcap,
.vidioc_cropcap = fimc_cap_cropcap,
.vidioc_enum_input = fimc_cap_enum_input,
.vidioc_s_input = fimc_cap_s_input,
@ -785,7 +830,7 @@ int fimc_register_capture_device(struct fimc_dev *fimc)
videobuf_queue_dma_contig_init(&vid_cap->vbq, &fimc_qops,
vid_cap->v4l2_dev.dev, &fimc->irqlock,
V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE,
sizeof(struct fimc_vid_buffer), (void *)ctx);
sizeof(struct fimc_vid_buffer), (void *)ctx, NULL);
ret = video_register_device(vfd, VFL_TYPE_GRABBER, -1);
if (ret) {

View File

@ -50,8 +50,8 @@ static struct fimc_fmt fimc_formats[] = {
.planes_cnt = 1,
.flags = FMT_FLAGS_M2M,
}, {
.name = "XRGB-8-8-8-8, 24 bpp",
.fourcc = V4L2_PIX_FMT_RGB24,
.name = "XRGB-8-8-8-8, 32 bpp",
.fourcc = V4L2_PIX_FMT_RGB32,
.depth = 32,
.color = S5P_FIMC_RGB888,
.buff_cnt = 1,
@ -983,6 +983,7 @@ int fimc_vidioc_queryctrl(struct file *file, void *priv,
{
struct fimc_ctx *ctx = priv;
struct v4l2_queryctrl *c;
int ret = -EINVAL;
c = get_ctrl(qc->id);
if (c) {
@ -990,10 +991,14 @@ int fimc_vidioc_queryctrl(struct file *file, void *priv,
return 0;
}
if (ctx->state & FIMC_CTX_CAP)
return v4l2_subdev_call(ctx->fimc_dev->vid_cap.sd,
if (ctx->state & FIMC_CTX_CAP) {
if (mutex_lock_interruptible(&ctx->fimc_dev->lock))
return -ERESTARTSYS;
ret = v4l2_subdev_call(ctx->fimc_dev->vid_cap.sd,
core, queryctrl, qc);
return -EINVAL;
mutex_unlock(&ctx->fimc_dev->lock);
}
return ret;
}
int fimc_vidioc_g_ctrl(struct file *file, void *priv,
@ -1115,7 +1120,7 @@ static int fimc_m2m_s_ctrl(struct file *file, void *priv,
return 0;
}
int fimc_vidioc_cropcap(struct file *file, void *fh,
static int fimc_m2m_cropcap(struct file *file, void *fh,
struct v4l2_cropcap *cr)
{
struct fimc_frame *frame;
@ -1139,7 +1144,7 @@ int fimc_vidioc_cropcap(struct file *file, void *fh,
return 0;
}
int fimc_vidioc_g_crop(struct file *file, void *fh, struct v4l2_crop *cr)
static int fimc_m2m_g_crop(struct file *file, void *fh, struct v4l2_crop *cr)
{
struct fimc_frame *frame;
struct fimc_ctx *ctx = file->private_data;
@ -1167,22 +1172,22 @@ int fimc_try_crop(struct fimc_ctx *ctx, struct v4l2_crop *cr)
struct fimc_frame *f;
u32 min_size, halign;
f = (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) ?
&ctx->s_frame : &ctx->d_frame;
if (cr->c.top < 0 || cr->c.left < 0) {
v4l2_err(&fimc->m2m.v4l2_dev,
"doesn't support negative values for top & left\n");
return -EINVAL;
}
f = ctx_get_frame(ctx, cr->type);
if (IS_ERR(f))
return PTR_ERR(f);
if (cr->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
f = (ctx->state & FIMC_CTX_CAP) ? &ctx->s_frame : &ctx->d_frame;
else if (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
ctx->state & FIMC_CTX_M2M)
f = &ctx->s_frame;
else
return -EINVAL;
min_size = (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT)
? fimc->variant->min_inp_pixsize
: fimc->variant->min_out_pixsize;
min_size = (f == &ctx->s_frame) ?
fimc->variant->min_inp_pixsize : fimc->variant->min_out_pixsize;
if (ctx->state & FIMC_CTX_M2M) {
if (fimc->id == 1 && fimc->variant->pix_hoff)
@ -1233,6 +1238,9 @@ static int fimc_m2m_s_crop(struct file *file, void *fh, struct v4l2_crop *cr)
f = (cr->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) ?
&ctx->s_frame : &ctx->d_frame;
if (mutex_lock_interruptible(&fimc->lock))
return -ERESTARTSYS;
spin_lock_irqsave(&ctx->slock, flags);
if (~ctx->state & (FIMC_SRC_FMT | FIMC_DST_FMT)) {
/* Check to see if scaling ratio is within supported range */
@ -1241,9 +1249,9 @@ static int fimc_m2m_s_crop(struct file *file, void *fh, struct v4l2_crop *cr)
else
ret = fimc_check_scaler_ratio(&cr->c, &ctx->s_frame);
if (ret) {
spin_unlock_irqrestore(&ctx->slock, flags);
v4l2_err(&fimc->m2m.v4l2_dev, "Out of scaler range");
return -EINVAL;
ret = -EINVAL;
goto scr_unlock;
}
}
ctx->state |= FIMC_PARAMS;
@ -1253,7 +1261,9 @@ static int fimc_m2m_s_crop(struct file *file, void *fh, struct v4l2_crop *cr)
f->width = cr->c.width;
f->height = cr->c.height;
scr_unlock:
spin_unlock_irqrestore(&ctx->slock, flags);
mutex_unlock(&fimc->lock);
return 0;
}
@ -1285,9 +1295,9 @@ static const struct v4l2_ioctl_ops fimc_m2m_ioctl_ops = {
.vidioc_g_ctrl = fimc_vidioc_g_ctrl,
.vidioc_s_ctrl = fimc_m2m_s_ctrl,
.vidioc_g_crop = fimc_vidioc_g_crop,
.vidioc_g_crop = fimc_m2m_g_crop,
.vidioc_s_crop = fimc_m2m_s_crop,
.vidioc_cropcap = fimc_vidioc_cropcap
.vidioc_cropcap = fimc_m2m_cropcap
};
@ -1396,7 +1406,7 @@ static const struct v4l2_file_operations fimc_m2m_fops = {
.open = fimc_m2m_open,
.release = fimc_m2m_release,
.poll = fimc_m2m_poll,
.ioctl = video_ioctl2,
.unlocked_ioctl = video_ioctl2,
.mmap = fimc_m2m_mmap,
};
@ -1736,6 +1746,7 @@ static struct samsung_fimc_variant fimc0_variant_s5pv310 = {
.pix_hoff = 1,
.has_inp_rot = 1,
.has_out_rot = 1,
.has_cistatus2 = 1,
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 1,
@ -1745,6 +1756,7 @@ static struct samsung_fimc_variant fimc0_variant_s5pv310 = {
static struct samsung_fimc_variant fimc2_variant_s5pv310 = {
.pix_hoff = 1,
.has_cistatus2 = 1,
.min_inp_pixsize = 16,
.min_out_pixsize = 16,
.hor_offs_align = 1,

View File

@ -13,13 +13,15 @@
/*#define DEBUG*/
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/videodev2.h>
#include <media/videobuf-core.h>
#include <media/v4l2-device.h>
#include <media/v4l2-mem2mem.h>
#include <media/v4l2-mediabus.h>
#include <media/s3c_fimc.h>
#include <linux/videodev2.h>
#include "regs-fimc.h"
#define err(fmt, args...) \
@ -369,6 +371,7 @@ struct fimc_pix_limit {
* @pix_hoff: indicate whether horizontal offset is in pixels or in bytes
* @has_inp_rot: set if has input rotator
* @has_out_rot: set if has output rotator
* @has_cistatus2: 1 if CISTATUS2 register is present in this IP revision
* @pix_limit: pixel size constraints for the scaler
* @min_inp_pixsize: minimum input pixel size
* @min_out_pixsize: minimum output pixel size
@ -379,6 +382,7 @@ struct samsung_fimc_variant {
unsigned int pix_hoff:1;
unsigned int has_inp_rot:1;
unsigned int has_out_rot:1;
unsigned int has_cistatus2:1;
struct fimc_pix_limit *pix_limit;
u16 min_inp_pixsize;
u16 min_out_pixsize;
@ -554,11 +558,19 @@ static inline struct fimc_frame *ctx_get_frame(struct fimc_ctx *ctx,
return frame;
}
/* Return an index to the buffer actually being written. */
static inline u32 fimc_hw_get_frame_index(struct fimc_dev *dev)
{
u32 reg = readl(dev->regs + S5P_CISTATUS);
return (reg & S5P_CISTATUS_FRAMECNT_MASK) >>
S5P_CISTATUS_FRAMECNT_SHIFT;
u32 reg;
if (dev->variant->has_cistatus2) {
reg = readl(dev->regs + S5P_CISTATUS2) & 0x3F;
return reg > 0 ? --reg : reg;
} else {
reg = readl(dev->regs + S5P_CISTATUS);
return (reg & S5P_CISTATUS_FRAMECNT_MASK) >>
S5P_CISTATUS_FRAMECNT_SHIFT;
}
}
/* -----------------------------------------------------*/
@ -594,10 +606,6 @@ int fimc_vidioc_g_fmt(struct file *file, void *priv,
struct v4l2_format *f);
int fimc_vidioc_try_fmt(struct file *file, void *priv,
struct v4l2_format *f);
int fimc_vidioc_g_crop(struct file *file, void *fh,
struct v4l2_crop *cr);
int fimc_vidioc_cropcap(struct file *file, void *fh,
struct v4l2_cropcap *cr);
int fimc_vidioc_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *qc);
int fimc_vidioc_g_ctrl(struct file *file, void *priv,

View File

@ -165,6 +165,9 @@
#define S5P_CISTATUS_VVALID_A (1 << 15)
#define S5P_CISTATUS_VVALID_B (1 << 14)
/* Indexes to the last and the currently processed buffer. */
#define S5P_CISTATUS2 0x68
/* Image capture control */
#define S5P_CIIMGCPT 0xc0
#define S5P_CIIMGCPT_IMGCPTEN (1 << 31)

View File

@ -1980,7 +1980,7 @@ static int __devinit sh_mobile_ceu_probe(struct platform_device *pdev)
* we complete the completion.
*/
if (!csi2->driver || !csi2->driver->owner) {
if (!csi2->driver) {
complete(&wait.completion);
/* Either too late, or probing failed */
bus_unregister_notifier(&platform_bus_type, &wait.notifier);

View File

@ -405,13 +405,13 @@ static int soc_camera_open(struct file *file)
ret = soc_camera_set_fmt(icd, &f);
if (ret < 0)
goto esfmt;
ici->ops->init_videobuf(&icd->vb_vidq, icd);
}
file->private_data = icd;
dev_dbg(&icd->dev, "camera device open\n");
ici->ops->init_videobuf(&icd->vb_vidq, icd);
mutex_unlock(&icd->video_lock);
return 0;

View File

@ -35,7 +35,6 @@
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
#include <media/v4l2-ctrls.h>
#include <media/wm8775.h>
MODULE_DESCRIPTION("wm8775 driver");
MODULE_AUTHOR("Ulf Eklund, Hans Verkuil");
@ -51,16 +50,10 @@ enum {
TOT_REGS
};
#define ALC_HOLD 0x85 /* R17: use zero cross detection, ALC hold time 42.6 ms */
#define ALC_EN 0x100 /* R17: ALC enable */
struct wm8775_state {
struct v4l2_subdev sd;
struct v4l2_ctrl_handler hdl;
struct v4l2_ctrl *mute;
struct v4l2_ctrl *vol;
struct v4l2_ctrl *bal;
struct v4l2_ctrl *loud;
u8 input; /* Last selected input (0-0xf) */
};
@ -92,30 +85,6 @@ static int wm8775_write(struct v4l2_subdev *sd, int reg, u16 val)
return -1;
}
static void wm8775_set_audio(struct v4l2_subdev *sd, int quietly)
{
struct wm8775_state *state = to_state(sd);
u8 vol_l, vol_r;
int muted = 0 != state->mute->val;
u16 volume = (u16)state->vol->val;
u16 balance = (u16)state->bal->val;
/* normalize ( 65535 to 0 -> 255 to 0 (+24dB to -103dB) ) */
vol_l = (min(65536 - balance, 32768) * volume) >> 23;
vol_r = (min(balance, (u16)32768) * volume) >> 23;
/* Mute */
if (muted || quietly)
wm8775_write(sd, R21, 0x0c0 | state->input);
wm8775_write(sd, R14, vol_l | 0x100); /* 0x100= Left channel ADC zero cross enable */
wm8775_write(sd, R15, vol_r | 0x100); /* 0x100= Right channel ADC zero cross enable */
/* Un-mute */
if (!muted)
wm8775_write(sd, R21, state->input);
}
static int wm8775_s_routing(struct v4l2_subdev *sd,
u32 input, u32 output, u32 config)
{
@ -133,26 +102,25 @@ static int wm8775_s_routing(struct v4l2_subdev *sd,
state->input = input;
if (!v4l2_ctrl_g_ctrl(state->mute))
return 0;
if (!v4l2_ctrl_g_ctrl(state->vol))
return 0;
if (!v4l2_ctrl_g_ctrl(state->bal))
return 0;
wm8775_set_audio(sd, 1);
wm8775_write(sd, R21, 0x0c0);
wm8775_write(sd, R14, 0x1d4);
wm8775_write(sd, R15, 0x1d4);
wm8775_write(sd, R21, 0x100 + state->input);
return 0;
}
static int wm8775_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = to_sd(ctrl);
struct wm8775_state *state = to_state(sd);
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
case V4L2_CID_AUDIO_VOLUME:
case V4L2_CID_AUDIO_BALANCE:
wm8775_set_audio(sd, 0);
return 0;
case V4L2_CID_AUDIO_LOUDNESS:
wm8775_write(sd, R17, (ctrl->val ? ALC_EN : 0) | ALC_HOLD);
wm8775_write(sd, R21, 0x0c0);
wm8775_write(sd, R14, 0x1d4);
wm8775_write(sd, R15, 0x1d4);
if (!ctrl->val)
wm8775_write(sd, R21, 0x100 + state->input);
return 0;
}
return -EINVAL;
@ -176,7 +144,16 @@ static int wm8775_log_status(struct v4l2_subdev *sd)
static int wm8775_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *freq)
{
wm8775_set_audio(sd, 0);
struct wm8775_state *state = to_state(sd);
/* If I remove this, then it can happen that I have no
sound the first time I tune from static to a valid channel.
It's difficult to reproduce and is almost certainly related
to the zero cross detect circuit. */
wm8775_write(sd, R21, 0x0c0);
wm8775_write(sd, R14, 0x1d4);
wm8775_write(sd, R15, 0x1d4);
wm8775_write(sd, R21, 0x100 + state->input);
return 0;
}
@ -226,7 +203,6 @@ static int wm8775_probe(struct i2c_client *client,
{
struct wm8775_state *state;
struct v4l2_subdev *sd;
int err;
/* Check if the adapter supports the needed features */
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
@ -240,21 +216,15 @@ static int wm8775_probe(struct i2c_client *client,
return -ENOMEM;
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &wm8775_ops);
sd->grp_id = WM8775_GID; /* subdev group id */
state->input = 2;
v4l2_ctrl_handler_init(&state->hdl, 4);
v4l2_ctrl_handler_init(&state->hdl, 1);
state->mute = v4l2_ctrl_new_std(&state->hdl, &wm8775_ctrl_ops,
V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0);
state->vol = v4l2_ctrl_new_std(&state->hdl, &wm8775_ctrl_ops,
V4L2_CID_AUDIO_VOLUME, 0, 65535, (65535+99)/100, 0xCF00); /* 0dB*/
state->bal = v4l2_ctrl_new_std(&state->hdl, &wm8775_ctrl_ops,
V4L2_CID_AUDIO_BALANCE, 0, 65535, (65535+99)/100, 32768);
state->loud = v4l2_ctrl_new_std(&state->hdl, &wm8775_ctrl_ops,
V4L2_CID_AUDIO_LOUDNESS, 0, 1, 1, 1);
sd->ctrl_handler = &state->hdl;
err = state->hdl.error;
if (err) {
if (state->hdl.error) {
int err = state->hdl.error;
v4l2_ctrl_handler_free(&state->hdl);
kfree(state);
return err;
@ -266,25 +236,29 @@ static int wm8775_probe(struct i2c_client *client,
wm8775_write(sd, R23, 0x000);
/* Disable zero cross detect timeout */
wm8775_write(sd, R7, 0x000);
/* HPF enable, I2S mode, 24-bit */
wm8775_write(sd, R11, 0x022);
/* Left justified, 24-bit mode */
wm8775_write(sd, R11, 0x021);
/* Master mode, clock ratio 256fs */
wm8775_write(sd, R12, 0x102);
/* Powered up */
wm8775_write(sd, R13, 0x000);
/* ALC stereo, ALC target level -5dB FS, ALC max gain +8dB */
wm8775_write(sd, R16, 0x1bb);
/* Set ALC mode and hold time */
wm8775_write(sd, R17, (state->loud->val ? ALC_EN : 0) | ALC_HOLD);
/* ADC gain +2.5dB, enable zero cross */
wm8775_write(sd, R14, 0x1d4);
/* ADC gain +2.5dB, enable zero cross */
wm8775_write(sd, R15, 0x1d4);
/* ALC Stereo, ALC target level -1dB FS max gain +8dB */
wm8775_write(sd, R16, 0x1bf);
/* Enable gain control, use zero cross detection,
ALC hold time 42.6 ms */
wm8775_write(sd, R17, 0x185);
/* ALC gain ramp up delay 34 s, ALC gain ramp down delay 33 ms */
wm8775_write(sd, R18, 0x0a2);
/* Enable noise gate, threshold -72dBfs */
wm8775_write(sd, R19, 0x005);
/* Transient window 4ms, ALC min gain -5dB */
wm8775_write(sd, R20, 0x0fb);
wm8775_set_audio(sd, 1); /* set volume/mute/mux */
/* Transient window 4ms, lower PGA gain limit -1dB */
wm8775_write(sd, R20, 0x07a);
/* LRBOTH = 1, use input 2. */
wm8775_write(sd, R21, 0x102);
return 0;
}

View File

@ -303,7 +303,7 @@ static irqreturn_t ab8500_irq(int irq, void *dev)
continue;
do {
int bit = __ffs(status);
int bit = __ffs(value);
int line = i * 8 + bit;
handle_nested_irq(ab8500->irq_base + line);

View File

@ -1455,7 +1455,11 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
dev_err(wm831x->dev, "Failed to read parent ID: %d\n", ret);
goto err;
}
if (ret != 0x6204) {
switch (ret) {
case 0x6204:
case 0x6246:
break;
default:
dev_err(wm831x->dev, "Device is not a WM831x: ID %x\n", ret);
ret = -EINVAL;
goto err;
@ -1620,7 +1624,7 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
case WM8325:
ret = mfd_add_devices(wm831x->dev, -1,
wm8320_devs, ARRAY_SIZE(wm8320_devs),
NULL, 0);
NULL, wm831x->irq_base);
break;
default:

View File

@ -1773,6 +1773,7 @@ int mmc_pm_notify(struct notifier_block *notify_block,
case PM_POST_SUSPEND:
case PM_POST_HIBERNATION:
case PM_POST_RESTORE:
spin_lock_irqsave(&host->lock, flags);
host->rescan_disable = 0;

View File

@ -69,6 +69,7 @@
#include <linux/highmem.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sdio.h>
#include <asm/io.h>
#include <asm/irq.h>
@ -493,10 +494,14 @@ static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command
else if (data->flags & MMC_DATA_WRITE)
cmdr |= AT91_MCI_TRCMD_START;
if (data->flags & MMC_DATA_STREAM)
cmdr |= AT91_MCI_TRTYP_STREAM;
if (data->blocks > 1)
cmdr |= AT91_MCI_TRTYP_MULTIPLE;
if (cmd->opcode == SD_IO_RW_EXTENDED) {
cmdr |= AT91_MCI_TRTYP_SDIO_BLOCK;
} else {
if (data->flags & MMC_DATA_STREAM)
cmdr |= AT91_MCI_TRTYP_STREAM;
if (data->blocks > 1)
cmdr |= AT91_MCI_TRTYP_MULTIPLE;
}
}
else {
block_length = 0;

View File

@ -26,6 +26,7 @@
#include <linux/stat.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sdio.h>
#include <mach/atmel-mci.h>
#include <linux/atmel-mci.h>
@ -532,12 +533,17 @@ static u32 atmci_prepare_command(struct mmc_host *mmc,
data = cmd->data;
if (data) {
cmdr |= MCI_CMDR_START_XFER;
if (data->flags & MMC_DATA_STREAM)
cmdr |= MCI_CMDR_STREAM;
else if (data->blocks > 1)
cmdr |= MCI_CMDR_MULTI_BLOCK;
else
cmdr |= MCI_CMDR_BLOCK;
if (cmd->opcode == SD_IO_RW_EXTENDED) {
cmdr |= MCI_CMDR_SDIO_BLOCK;
} else {
if (data->flags & MMC_DATA_STREAM)
cmdr |= MCI_CMDR_STREAM;
else if (data->blocks > 1)
cmdr |= MCI_CMDR_MULTI_BLOCK;
else
cmdr |= MCI_CMDR_BLOCK;
}
if (data->flags & MMC_DATA_READ)
cmdr |= MCI_CMDR_TRDIR_READ;

View File

@ -702,6 +702,7 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter)
adapter->wol = 0;
device_set_wakeup_enable(&pdev->dev, false);
adapter->link_speed = SPEED_0;
adapter->link_duplex = FULL_DUPLEX;
adapter->num_rx_queues = AT_DEF_RECEIVE_QUEUE;
@ -2444,8 +2445,9 @@ static int atl1c_close(struct net_device *netdev)
return 0;
}
static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
static int atl1c_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl1c_adapter *adapter = netdev_priv(netdev);
struct atl1c_hw *hw = &adapter->hw;
@ -2454,7 +2456,6 @@ static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
u32 wol_ctrl_data = 0;
u16 mii_intr_status_data = 0;
u32 wufc = adapter->wol;
int retval = 0;
atl1c_disable_l0s_l1(hw);
if (netif_running(netdev)) {
@ -2462,9 +2463,6 @@ static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
atl1c_down(adapter);
}
netif_device_detach(netdev);
retval = pci_save_state(pdev);
if (retval)
return retval;
if (wufc)
if (atl1c_phy_power_saving(hw) != 0)
@ -2525,12 +2523,8 @@ static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data);
AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
/* pcie patch */
device_set_wakeup_enable(&pdev->dev, 1);
AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT |
GPHY_CTRL_EXT_RESET);
pci_prepare_to_sleep(pdev);
} else {
AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_POWER_SAVING);
master_ctrl_data |= MASTER_CTRL_CLK_SEL_DIS;
@ -2540,25 +2534,17 @@ static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
hw->phy_configured = false; /* re-init PHY when resume */
pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
}
pci_disable_device(pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
static int atl1c_resume(struct pci_dev *pdev)
static int atl1c_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl1c_adapter *adapter = netdev_priv(netdev);
pci_set_power_state(pdev, PCI_D0);
pci_restore_state(pdev);
pci_enable_wake(pdev, PCI_D3hot, 0);
pci_enable_wake(pdev, PCI_D3cold, 0);
AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE |
ATL1C_PCIE_PHY_RESET);
@ -2582,7 +2568,12 @@ static int atl1c_resume(struct pci_dev *pdev)
static void atl1c_shutdown(struct pci_dev *pdev)
{
atl1c_suspend(pdev, PMSG_SUSPEND);
struct net_device *netdev = pci_get_drvdata(pdev);
struct atl1c_adapter *adapter = netdev_priv(netdev);
atl1c_suspend(&pdev->dev);
pci_wake_from_d3(pdev, adapter->wol);
pci_set_power_state(pdev, PCI_D3hot);
}
static const struct net_device_ops atl1c_netdev_ops = {
@ -2886,16 +2877,16 @@ static struct pci_error_handlers atl1c_err_handler = {
.resume = atl1c_io_resume,
};
static SIMPLE_DEV_PM_OPS(atl1c_pm_ops, atl1c_suspend, atl1c_resume);
static struct pci_driver atl1c_driver = {
.name = atl1c_driver_name,
.id_table = atl1c_pci_tbl,
.probe = atl1c_probe,
.remove = __devexit_p(atl1c_remove),
/* Power Managment Hooks */
.suspend = atl1c_suspend,
.resume = atl1c_resume,
.shutdown = atl1c_shutdown,
.err_handler = &atl1c_err_handler
.err_handler = &atl1c_err_handler,
.driver.pm = &atl1c_pm_ops,
};
/*

View File

@ -3504,6 +3504,8 @@ static int atl1_set_ringparam(struct net_device *netdev,
struct atl1_rfd_ring rfd_old, rfd_new;
struct atl1_rrd_ring rrd_old, rrd_new;
struct atl1_ring_header rhdr_old, rhdr_new;
struct atl1_smb smb;
struct atl1_cmb cmb;
int err;
tpd_old = adapter->tpd_ring;
@ -3544,11 +3546,19 @@ static int atl1_set_ringparam(struct net_device *netdev,
adapter->rrd_ring = rrd_old;
adapter->tpd_ring = tpd_old;
adapter->ring_header = rhdr_old;
/*
* Save SMB and CMB, since atl1_free_ring_resources
* will clear them.
*/
smb = adapter->smb;
cmb = adapter->cmb;
atl1_free_ring_resources(adapter);
adapter->rfd_ring = rfd_new;
adapter->rrd_ring = rrd_new;
adapter->tpd_ring = tpd_new;
adapter->ring_header = rhdr_new;
adapter->smb = smb;
adapter->cmb = cmb;
err = atl1_up(adapter);
if (err)

View File

@ -234,7 +234,7 @@ struct be_adapter {
u8 __iomem *db; /* Door Bell */
u8 __iomem *pcicfg; /* PCI config space */
spinlock_t mbox_lock; /* For serializing mbox cmds to BE card */
struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
struct be_dma_mem mbox_mem;
/* Mbox mem is adjusted to align to 16 bytes. The allocated addr
* is stored for freeing purpose */

View File

@ -462,7 +462,8 @@ int be_cmd_fw_init(struct be_adapter *adapter)
u8 *wrb;
int status;
spin_lock(&adapter->mbox_lock);
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
wrb = (u8 *)wrb_from_mbox(adapter);
*wrb++ = 0xFF;
@ -476,7 +477,7 @@ int be_cmd_fw_init(struct be_adapter *adapter)
status = be_mbox_notify_wait(adapter);
spin_unlock(&adapter->mbox_lock);
mutex_unlock(&adapter->mbox_lock);
return status;
}
@ -491,7 +492,8 @@ int be_cmd_fw_clean(struct be_adapter *adapter)
if (adapter->eeh_err)
return -EIO;
spin_lock(&adapter->mbox_lock);
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
wrb = (u8 *)wrb_from_mbox(adapter);
*wrb++ = 0xFF;
@ -505,7 +507,7 @@ int be_cmd_fw_clean(struct be_adapter *adapter)
status = be_mbox_notify_wait(adapter);
spin_unlock(&adapter->mbox_lock);
mutex_unlock(&adapter->mbox_lock);
return status;
}
int be_cmd_eq_create(struct be_adapter *adapter,
@ -516,7 +518,8 @@ int be_cmd_eq_create(struct be_adapter *adapter,
struct be_dma_mem *q_mem = &eq->dma_mem;
int status;
spin_lock(&adapter->mbox_lock);
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@ -546,7 +549,7 @@ int be_cmd_eq_create(struct be_adapter *adapter,
eq->created = true;
}
spin_unlock(&adapter->mbox_lock);
mutex_unlock(&adapter->mbox_lock);
return status;
}
@ -558,7 +561,8 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
struct be_cmd_req_mac_query *req;
int status;
spin_lock(&adapter->mbox_lock);
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@ -583,7 +587,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
}
spin_unlock(&adapter->mbox_lock);
mutex_unlock(&adapter->mbox_lock);
return status;
}
@ -667,7 +671,8 @@ int be_cmd_cq_create(struct be_adapter *adapter,
void *ctxt;
int status;
spin_lock(&adapter->mbox_lock);
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@ -701,7 +706,7 @@ int be_cmd_cq_create(struct be_adapter *adapter,
cq->created = true;
}
spin_unlock(&adapter->mbox_lock);
mutex_unlock(&adapter->mbox_lock);
return status;
}
@ -724,7 +729,8 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
void *ctxt;
int status;
spin_lock(&adapter->mbox_lock);
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@ -754,7 +760,7 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
mccq->id = le16_to_cpu(resp->id);
mccq->created = true;
}
spin_unlock(&adapter->mbox_lock);
mutex_unlock(&adapter->mbox_lock);
return status;
}
@ -769,7 +775,8 @@ int be_cmd_txq_create(struct be_adapter *adapter,
void *ctxt;
int status;
spin_lock(&adapter->mbox_lock);
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@ -801,7 +808,7 @@ int be_cmd_txq_create(struct be_adapter *adapter,
txq->created = true;
}
spin_unlock(&adapter->mbox_lock);
mutex_unlock(&adapter->mbox_lock);
return status;
}
@ -816,7 +823,8 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
struct be_dma_mem *q_mem = &rxq->dma_mem;
int status;
spin_lock(&adapter->mbox_lock);
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@ -843,7 +851,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
*rss_id = resp->rss_id;
}
spin_unlock(&adapter->mbox_lock);
mutex_unlock(&adapter->mbox_lock);
return status;
}
@ -862,7 +870,8 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
if (adapter->eeh_err)
return -EIO;
spin_lock(&adapter->mbox_lock);
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@ -899,7 +908,7 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
status = be_mbox_notify_wait(adapter);
spin_unlock(&adapter->mbox_lock);
mutex_unlock(&adapter->mbox_lock);
return status;
}
@ -915,7 +924,8 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
struct be_cmd_req_if_create *req;
int status;
spin_lock(&adapter->mbox_lock);
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@ -941,7 +951,7 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
*pmac_id = le32_to_cpu(resp->pmac_id);
}
spin_unlock(&adapter->mbox_lock);
mutex_unlock(&adapter->mbox_lock);
return status;
}
@ -955,7 +965,8 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
if (adapter->eeh_err)
return -EIO;
spin_lock(&adapter->mbox_lock);
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@ -970,7 +981,7 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
status = be_mbox_notify_wait(adapter);
spin_unlock(&adapter->mbox_lock);
mutex_unlock(&adapter->mbox_lock);
return status;
}
@ -1060,7 +1071,8 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
struct be_cmd_req_get_fw_version *req;
int status;
spin_lock(&adapter->mbox_lock);
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@ -1077,7 +1089,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
}
spin_unlock(&adapter->mbox_lock);
mutex_unlock(&adapter->mbox_lock);
return status;
}
@ -1322,7 +1334,8 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
struct be_cmd_req_query_fw_cfg *req;
int status;
spin_lock(&adapter->mbox_lock);
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@ -1341,7 +1354,7 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
*caps = le32_to_cpu(resp->function_caps);
}
spin_unlock(&adapter->mbox_lock);
mutex_unlock(&adapter->mbox_lock);
return status;
}
@ -1352,7 +1365,8 @@ int be_cmd_reset_function(struct be_adapter *adapter)
struct be_cmd_req_hdr *req;
int status;
spin_lock(&adapter->mbox_lock);
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@ -1365,7 +1379,7 @@ int be_cmd_reset_function(struct be_adapter *adapter)
status = be_mbox_notify_wait(adapter);
spin_unlock(&adapter->mbox_lock);
mutex_unlock(&adapter->mbox_lock);
return status;
}
@ -1376,7 +1390,8 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
u32 myhash[10];
int status;
spin_lock(&adapter->mbox_lock);
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
wrb = wrb_from_mbox(adapter);
req = embedded_payload(wrb);
@ -1396,7 +1411,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
status = be_mbox_notify_wait(adapter);
spin_unlock(&adapter->mbox_lock);
mutex_unlock(&adapter->mbox_lock);
return status;
}

View File

@ -2677,7 +2677,7 @@ static int be_ctrl_init(struct be_adapter *adapter)
}
memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
spin_lock_init(&adapter->mbox_lock);
mutex_init(&adapter->mbox_lock);
spin_lock_init(&adapter->mcc_lock);
spin_lock_init(&adapter->mcc_cq_lock);

View File

@ -88,7 +88,12 @@ static void bond_na_send(struct net_device *slave_dev,
}
if (vlan_id) {
skb = vlan_put_tag(skb, vlan_id);
/* The Ethernet header is not present yet, so it is
* too early to insert a VLAN tag. Force use of an
* out-of-line tag here and let dev_hard_start_xmit()
* insert it if the slave hardware can't.
*/
skb = __vlan_hwaccel_put_tag(skb, vlan_id);
if (!skb) {
pr_err("failed to insert VLAN tag\n");
return;

Some files were not shown because too many files have changed in this diff Show More