Merge with git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git

This commit is contained in:
Adrian Bunk 2006-04-02 10:37:38 +02:00
commit 733f896927
50 changed files with 2762 additions and 458 deletions

View File

@ -77,6 +77,14 @@ config FIQ
config ARCH_MTD_XIP
bool
config VECTORS_BASE
hex
default 0xffff0000 if MMU
default DRAM_BASE if REMAP_VECTORS_TO_RAM
default 0x00000000
help
The base address of exception vectors.
source "init/Kconfig"
menu "System Type"

44
arch/arm/Kconfig-nommu Normal file
View File

@ -0,0 +1,44 @@
#
# Kconfig for uClinux(non-paged MM) depend configurations
# Hyok S. Choi <hyok.choi@samsung.com>
#
config SET_MEM_PARAM
bool "Set flash/sdram size and base addr"
help
Say Y to manually set the base addresses and sizes.
otherwise, the default values are assigned.
config DRAM_BASE
hex '(S)DRAM Base Address' if SET_MEM_PARAM
default 0x00800000
config DRAM_SIZE
hex '(S)DRAM SIZE' if SET_MEM_PARAM
default 0x00800000
config FLASH_MEM_BASE
hex 'FLASH Base Address' if SET_MEM_PARAM
default 0x00400000
config FLASH_SIZE
hex 'FLASH Size' if SET_MEM_PARAM
default 0x00400000
config REMAP_VECTORS_TO_RAM
bool 'Install vectors to the begining of RAM' if DRAM_BASE
depends on DRAM_BASE
help
The kernel needs to change the hardware exception vectors.
In nommu mode, the hardware exception vectors are normally
placed at address 0x00000000. However, this region may be
occupied by read-only memory depending on H/W design.
If the region contains read-write memory, say 'n' here.
If your CPU provides a remap facility which allows the exception
vectors to be mapped to writable memory, say 'n' here.
Otherwise, say 'y' here. In this case, the kernel will require
external support to redirect the hardware exception vectors to
the writable versions located at DRAM_BASE.

View File

@ -20,6 +20,11 @@ GZFLAGS :=-9
# Select a platform tht is kept up-to-date
KBUILD_DEFCONFIG := versatile_defconfig
# defines filename extension depending memory manement type.
ifeq ($(CONFIG_MMU),)
MMUEXT := -nommu
endif
ifeq ($(CONFIG_FRAME_POINTER),y)
CFLAGS +=-fno-omit-frame-pointer -mapcs -mno-sched-prolog
endif
@ -73,7 +78,7 @@ AFLAGS +=$(CFLAGS_ABI) $(arch-y) $(tune-y) -msoft-float
CHECKFLAGS += -D__arm__
#Default value
head-y := arch/arm/kernel/head.o arch/arm/kernel/init_task.o
head-y := arch/arm/kernel/head$(MMUEXT).o arch/arm/kernel/init_task.o
textofs-y := 0x00008000
machine-$(CONFIG_ARCH_RPC) := rpc
@ -133,7 +138,7 @@ else
MACHINE :=
endif
export TEXT_OFFSET GZFLAGS
export TEXT_OFFSET GZFLAGS MMUEXT
# Do we have FASTFPE?
FASTFPE :=arch/arm/fastfpe

View File

@ -2,6 +2,7 @@
* linux/arch/arm/boot/compressed/head.S
*
* Copyright (C) 1996-2002 Russell King
* Copyright (C) 2004 Hyok S. Choi (MPU support)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@ -320,6 +321,62 @@ params: ldr r0, =params_phys
cache_on: mov r3, #8 @ cache_on function
b call_cache_fn
/*
* Initialize the highest priority protection region, PR7
* to cover all 32bit address and cacheable and bufferable.
*/
__armv4_mpu_cache_on:
mov r0, #0x3f @ 4G, the whole
mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
mcr p15, 0, r0, c6, c7, 1
mov r0, #0x80 @ PR7
mcr p15, 0, r0, c2, c0, 0 @ D-cache on
mcr p15, 0, r0, c2, c0, 1 @ I-cache on
mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
mov r0, #0xc000
mcr p15, 0, r0, c5, c0, 1 @ I-access permission
mcr p15, 0, r0, c5, c0, 0 @ D-access permission
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
mrc p15, 0, r0, c1, c0, 0 @ read control reg
@ ...I .... ..D. WC.M
orr r0, r0, #0x002d @ .... .... ..1. 11.1
orr r0, r0, #0x1000 @ ...1 .... .... ....
mcr p15, 0, r0, c1, c0, 0 @ write control reg
mov r0, #0
mcr p15, 0, r0, c7, c5, 0 @ flush(inval) I-Cache
mcr p15, 0, r0, c7, c6, 0 @ flush(inval) D-Cache
mov pc, lr
__armv3_mpu_cache_on:
mov r0, #0x3f @ 4G, the whole
mcr p15, 0, r0, c6, c7, 0 @ PR7 Area Setting
mov r0, #0x80 @ PR7
mcr p15, 0, r0, c2, c0, 0 @ cache on
mcr p15, 0, r0, c3, c0, 0 @ write-buffer on
mov r0, #0xc000
mcr p15, 0, r0, c5, c0, 0 @ access permission
mov r0, #0
mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
mrc p15, 0, r0, c1, c0, 0 @ read control reg
@ .... .... .... WC.M
orr r0, r0, #0x000d @ .... .... .... 11.1
mov r0, #0
mcr p15, 0, r0, c1, c0, 0 @ write control reg
mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
mov pc, lr
__setup_mmu: sub r3, r4, #16384 @ Page directory size
bic r3, r3, #0xff @ Align the pointer
bic r3, r3, #0x3f00
@ -496,6 +553,18 @@ proc_types:
b __armv4_mmu_cache_off
mov pc, lr
.word 0x41007400 @ ARM74x
.word 0xff00ff00
b __armv3_mpu_cache_on
b __armv3_mpu_cache_off
b __armv3_mpu_cache_flush
.word 0x41009400 @ ARM94x
.word 0xff00ff00
b __armv4_mpu_cache_on
b __armv4_mpu_cache_off
b __armv4_mpu_cache_flush
.word 0x00007000 @ ARM7 IDs
.word 0x0000f000
mov pc, lr
@ -562,6 +631,24 @@ proc_types:
cache_off: mov r3, #12 @ cache_off function
b call_cache_fn
__armv4_mpu_cache_off:
mrc p15, 0, r0, c1, c0
bic r0, r0, #0x000d
mcr p15, 0, r0, c1, c0 @ turn MPU and cache off
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
mcr p15, 0, r0, c7, c6, 0 @ flush D-Cache
mcr p15, 0, r0, c7, c5, 0 @ flush I-Cache
mov pc, lr
__armv3_mpu_cache_off:
mrc p15, 0, r0, c1, c0
bic r0, r0, #0x000d
mcr p15, 0, r0, c1, c0, 0 @ turn MPU and cache off
mov r0, #0
mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
mov pc, lr
__armv4_mmu_cache_off:
mrc p15, 0, r0, c1, c0
bic r0, r0, #0x000d
@ -601,6 +688,24 @@ cache_clean_flush:
mov r3, #16
b call_cache_fn
__armv4_mpu_cache_flush:
mov r2, #1
mov r3, #0
mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
mov r1, #7 << 5 @ 8 segments
1: orr r3, r1, #63 << 26 @ 64 entries
2: mcr p15, 0, r3, c7, c14, 2 @ clean & invalidate D index
subs r3, r3, #1 << 26
bcs 2b @ entries 63 to 0
subs r1, r1, #1 << 5
bcs 1b @ segments 7 to 0
teq r2, #0
mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
mcr p15, 0, ip, c7, c10, 4 @ drain WB
mov pc, lr
__armv6_mmu_cache_flush:
mov r1, #0
mcr p15, 0, r1, c7, c14, 0 @ clean+invalidate D
@ -638,6 +743,7 @@ no_cache_id:
mov pc, lr
__armv3_mmu_cache_flush:
__armv3_mpu_cache_flush:
mov r1, #0
mcr p15, 0, r0, c7, c0, 0 @ invalidate whole cache v3
mov pc, lr

View File

@ -666,7 +666,7 @@ __kuser_helper_start:
*
* #define __kernel_dmb() \
* asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \
* : : : "lr","cc" )
* : : : "r0", "lr","cc" )
*/
__kuser_memory_barrier: @ 0xffff0fa0

View File

@ -0,0 +1,217 @@
/*
* linux/arch/arm/kernel/head-common.S
*
* Copyright (C) 1994-2002 Russell King
* Copyright (c) 2003 ARM Limited
* All Rights Reserved
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
.type __switch_data, %object
__switch_data:
.long __mmap_switched
.long __data_loc @ r4
.long __data_start @ r5
.long __bss_start @ r6
.long _end @ r7
.long processor_id @ r4
.long __machine_arch_type @ r5
.long cr_alignment @ r6
.long init_thread_union + THREAD_START_SP @ sp
/*
* The following fragment of code is executed with the MMU on in MMU mode,
* and uses absolute addresses; this is not position independent.
*
* r0 = cp#15 control register
* r1 = machine ID
* r9 = processor ID
*/
.type __mmap_switched, %function
__mmap_switched:
adr r3, __switch_data + 4
ldmia r3!, {r4, r5, r6, r7}
cmp r4, r5 @ Copy data segment if needed
1: cmpne r5, r6
ldrne fp, [r4], #4
strne fp, [r5], #4
bne 1b
mov fp, #0 @ Clear BSS (and zero fp)
1: cmp r6, r7
strcc fp, [r6],#4
bcc 1b
ldmia r3, {r4, r5, r6, sp}
str r9, [r4] @ Save processor ID
str r1, [r5] @ Save machine type
bic r4, r0, #CR_A @ Clear 'A' bit
stmia r6, {r0, r4} @ Save control register values
b start_kernel
/*
* Exception handling. Something went wrong and we can't proceed. We
* ought to tell the user, but since we don't have any guarantee that
* we're even running on the right architecture, we do virtually nothing.
*
* If CONFIG_DEBUG_LL is set we try to print out something about the error
* and hope for the best (useful if bootloader fails to pass a proper
* machine ID for example).
*/
.type __error_p, %function
__error_p:
#ifdef CONFIG_DEBUG_LL
adr r0, str_p1
bl printascii
b __error
str_p1: .asciz "\nError: unrecognized/unsupported processor variant.\n"
.align
#endif
.type __error_a, %function
__error_a:
#ifdef CONFIG_DEBUG_LL
mov r4, r1 @ preserve machine ID
adr r0, str_a1
bl printascii
mov r0, r4
bl printhex8
adr r0, str_a2
bl printascii
adr r3, 3f
ldmia r3, {r4, r5, r6} @ get machine desc list
sub r4, r3, r4 @ get offset between virt&phys
add r5, r5, r4 @ convert virt addresses to
add r6, r6, r4 @ physical address space
1: ldr r0, [r5, #MACHINFO_TYPE] @ get machine type
bl printhex8
mov r0, #'\t'
bl printch
ldr r0, [r5, #MACHINFO_NAME] @ get machine name
add r0, r0, r4
bl printascii
mov r0, #'\n'
bl printch
add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc
cmp r5, r6
blo 1b
adr r0, str_a3
bl printascii
b __error
str_a1: .asciz "\nError: unrecognized/unsupported machine ID (r1 = 0x"
str_a2: .asciz ").\n\nAvailable machine support:\n\nID (hex)\tNAME\n"
str_a3: .asciz "\nPlease check your kernel config and/or bootloader.\n"
.align
#endif
.type __error, %function
__error:
#ifdef CONFIG_ARCH_RPC
/*
* Turn the screen red on a error - RiscPC only.
*/
mov r0, #0x02000000
mov r3, #0x11
orr r3, r3, r3, lsl #8
orr r3, r3, r3, lsl #16
str r3, [r0], #4
str r3, [r0], #4
str r3, [r0], #4
str r3, [r0], #4
#endif
1: mov r0, r0
b 1b
/*
* Read processor ID register (CP#15, CR0), and look up in the linker-built
* supported processor list. Note that we can't use the absolute addresses
* for the __proc_info lists since we aren't running with the MMU on
* (and therefore, we are not in the correct address space). We have to
* calculate the offset.
*
* r9 = cpuid
* Returns:
* r3, r4, r6 corrupted
* r5 = proc_info pointer in physical address space
* r9 = cpuid (preserved)
*/
.type __lookup_processor_type, %function
__lookup_processor_type:
adr r3, 3f
ldmda r3, {r5 - r7}
sub r3, r3, r7 @ get offset between virt&phys
add r5, r5, r3 @ convert virt addresses to
add r6, r6, r3 @ physical address space
1: ldmia r5, {r3, r4} @ value, mask
and r4, r4, r9 @ mask wanted bits
teq r3, r4
beq 2f
add r5, r5, #PROC_INFO_SZ @ sizeof(proc_info_list)
cmp r5, r6
blo 1b
mov r5, #0 @ unknown processor
2: mov pc, lr
/*
* This provides a C-API version of the above function.
*/
ENTRY(lookup_processor_type)
stmfd sp!, {r4 - r7, r9, lr}
mov r9, r0
bl __lookup_processor_type
mov r0, r5
ldmfd sp!, {r4 - r7, r9, pc}
/*
* Look in include/asm-arm/procinfo.h and arch/arm/kernel/arch.[ch] for
* more information about the __proc_info and __arch_info structures.
*/
.long __proc_info_begin
.long __proc_info_end
3: .long .
.long __arch_info_begin
.long __arch_info_end
/*
* Lookup machine architecture in the linker-build list of architectures.
* Note that we can't use the absolute addresses for the __arch_info
* lists since we aren't running with the MMU on (and therefore, we are
* not in the correct address space). We have to calculate the offset.
*
* r1 = machine architecture number
* Returns:
* r3, r4, r6 corrupted
* r5 = mach_info pointer in physical address space
*/
.type __lookup_machine_type, %function
__lookup_machine_type:
adr r3, 3b
ldmia r3, {r4, r5, r6}
sub r3, r3, r4 @ get offset between virt&phys
add r5, r5, r3 @ convert virt addresses to
add r6, r6, r3 @ physical address space
1: ldr r3, [r5, #MACHINFO_TYPE] @ get machine type
teq r3, r1 @ matches loader number?
beq 2f @ found
add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc
cmp r5, r6
blo 1b
mov r5, #0 @ unknown machine
2: mov pc, lr
/*
* This provides a C-API version of the above function.
*/
ENTRY(lookup_machine_type)
stmfd sp!, {r4 - r6, lr}
mov r1, r0
bl __lookup_machine_type
mov r0, r5
ldmfd sp!, {r4 - r6, pc}

View File

@ -0,0 +1,83 @@
/*
* linux/arch/arm/kernel/head-nommu.S
*
* Copyright (C) 1994-2002 Russell King
* Copyright (C) 2003-2006 Hyok S. Choi
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Common kernel startup code (non-paged MM)
* for 32-bit CPUs which has a process ID register(CP15).
*
*/
#include <linux/config.h>
#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/assembler.h>
#include <asm/mach-types.h>
#include <asm/procinfo.h>
#include <asm/ptrace.h>
#include <asm/constants.h>
#include <asm/system.h>
#define PROCINFO_INITFUNC 12
/*
* Kernel startup entry point.
* ---------------------------
*
* This is normally called from the decompressor code. The requirements
* are: MMU = off, D-cache = off, I-cache = dont care, r0 = 0,
* r1 = machine nr.
*
* See linux/arch/arm/tools/mach-types for the complete list of machine
* numbers for r1.
*
*/
__INIT
.type stext, %function
ENTRY(stext)
msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | MODE_SVC @ ensure svc mode
@ and irqs disabled
mrc p15, 0, r9, c0, c0 @ get processor id
bl __lookup_processor_type @ r5=procinfo r9=cpuid
movs r10, r5 @ invalid processor (r5=0)?
beq __error_p @ yes, error 'p'
bl __lookup_machine_type @ r5=machinfo
movs r8, r5 @ invalid machine (r5=0)?
beq __error_a @ yes, error 'a'
ldr r13, __switch_data @ address to jump to after
@ the initialization is done
adr lr, __after_proc_init @ return (PIC) address
add pc, r10, #PROCINFO_INITFUNC
/*
* Set the Control Register and Read the process ID.
*/
.type __after_proc_init, %function
__after_proc_init:
mrc p15, 0, r0, c1, c0, 0 @ read control reg
#ifdef CONFIG_ALIGNMENT_TRAP
orr r0, r0, #CR_A
#else
bic r0, r0, #CR_A
#endif
#ifdef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CR_C
#endif
#ifdef CONFIG_CPU_BPREDICT_DISABLE
bic r0, r0, #CR_Z
#endif
#ifdef CONFIG_CPU_ICACHE_DISABLE
bic r0, r0, #CR_I
#endif
mcr p15, 0, r0, c1, c0, 0 @ write control reg
mov pc, r13 @ clear the BSS and jump
@ to start_kernel
#include "head-common.S"

View File

@ -102,49 +102,6 @@ ENTRY(stext)
adr lr, __enable_mmu @ return (PIC) address
add pc, r10, #PROCINFO_INITFUNC
.type __switch_data, %object
__switch_data:
.long __mmap_switched
.long __data_loc @ r4
.long __data_start @ r5
.long __bss_start @ r6
.long _end @ r7
.long processor_id @ r4
.long __machine_arch_type @ r5
.long cr_alignment @ r6
.long init_thread_union + THREAD_START_SP @ sp
/*
* The following fragment of code is executed with the MMU on, and uses
* absolute addresses; this is not position independent.
*
* r0 = cp#15 control register
* r1 = machine ID
* r9 = processor ID
*/
.type __mmap_switched, %function
__mmap_switched:
adr r3, __switch_data + 4
ldmia r3!, {r4, r5, r6, r7}
cmp r4, r5 @ Copy data segment if needed
1: cmpne r5, r6
ldrne fp, [r4], #4
strne fp, [r5], #4
bne 1b
mov fp, #0 @ Clear BSS (and zero fp)
1: cmp r6, r7
strcc fp, [r6],#4
bcc 1b
ldmia r3, {r4, r5, r6, sp}
str r9, [r4] @ Save processor ID
str r1, [r5] @ Save machine type
bic r4, r0, #CR_A @ Clear 'A' bit
stmia r6, {r0, r4} @ Save control register values
b start_kernel
#if defined(CONFIG_SMP)
.type secondary_startup, #function
ENTRY(secondary_startup)
@ -367,166 +324,4 @@ __create_page_tables:
mov pc, lr
.ltorg
/*
* Exception handling. Something went wrong and we can't proceed. We
* ought to tell the user, but since we don't have any guarantee that
* we're even running on the right architecture, we do virtually nothing.
*
* If CONFIG_DEBUG_LL is set we try to print out something about the error
* and hope for the best (useful if bootloader fails to pass a proper
* machine ID for example).
*/
.type __error_p, %function
__error_p:
#ifdef CONFIG_DEBUG_LL
adr r0, str_p1
bl printascii
b __error
str_p1: .asciz "\nError: unrecognized/unsupported processor variant.\n"
.align
#endif
.type __error_a, %function
__error_a:
#ifdef CONFIG_DEBUG_LL
mov r4, r1 @ preserve machine ID
adr r0, str_a1
bl printascii
mov r0, r4
bl printhex8
adr r0, str_a2
bl printascii
adr r3, 3f
ldmia r3, {r4, r5, r6} @ get machine desc list
sub r4, r3, r4 @ get offset between virt&phys
add r5, r5, r4 @ convert virt addresses to
add r6, r6, r4 @ physical address space
1: ldr r0, [r5, #MACHINFO_TYPE] @ get machine type
bl printhex8
mov r0, #'\t'
bl printch
ldr r0, [r5, #MACHINFO_NAME] @ get machine name
add r0, r0, r4
bl printascii
mov r0, #'\n'
bl printch
add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc
cmp r5, r6
blo 1b
adr r0, str_a3
bl printascii
b __error
str_a1: .asciz "\nError: unrecognized/unsupported machine ID (r1 = 0x"
str_a2: .asciz ").\n\nAvailable machine support:\n\nID (hex)\tNAME\n"
str_a3: .asciz "\nPlease check your kernel config and/or bootloader.\n"
.align
#endif
.type __error, %function
__error:
#ifdef CONFIG_ARCH_RPC
/*
* Turn the screen red on a error - RiscPC only.
*/
mov r0, #0x02000000
mov r3, #0x11
orr r3, r3, r3, lsl #8
orr r3, r3, r3, lsl #16
str r3, [r0], #4
str r3, [r0], #4
str r3, [r0], #4
str r3, [r0], #4
#endif
1: mov r0, r0
b 1b
/*
* Read processor ID register (CP#15, CR0), and look up in the linker-built
* supported processor list. Note that we can't use the absolute addresses
* for the __proc_info lists since we aren't running with the MMU on
* (and therefore, we are not in the correct address space). We have to
* calculate the offset.
*
* r9 = cpuid
* Returns:
* r3, r4, r6 corrupted
* r5 = proc_info pointer in physical address space
* r9 = cpuid (preserved)
*/
.type __lookup_processor_type, %function
__lookup_processor_type:
adr r3, 3f
ldmda r3, {r5 - r7}
sub r3, r3, r7 @ get offset between virt&phys
add r5, r5, r3 @ convert virt addresses to
add r6, r6, r3 @ physical address space
1: ldmia r5, {r3, r4} @ value, mask
and r4, r4, r9 @ mask wanted bits
teq r3, r4
beq 2f
add r5, r5, #PROC_INFO_SZ @ sizeof(proc_info_list)
cmp r5, r6
blo 1b
mov r5, #0 @ unknown processor
2: mov pc, lr
/*
* This provides a C-API version of the above function.
*/
ENTRY(lookup_processor_type)
stmfd sp!, {r4 - r7, r9, lr}
mov r9, r0
bl __lookup_processor_type
mov r0, r5
ldmfd sp!, {r4 - r7, r9, pc}
/*
* Look in include/asm-arm/procinfo.h and arch/arm/kernel/arch.[ch] for
* more information about the __proc_info and __arch_info structures.
*/
.long __proc_info_begin
.long __proc_info_end
3: .long .
.long __arch_info_begin
.long __arch_info_end
/*
* Lookup machine architecture in the linker-build list of architectures.
* Note that we can't use the absolute addresses for the __arch_info
* lists since we aren't running with the MMU on (and therefore, we are
* not in the correct address space). We have to calculate the offset.
*
* r1 = machine architecture number
* Returns:
* r3, r4, r6 corrupted
* r5 = mach_info pointer in physical address space
*/
.type __lookup_machine_type, %function
__lookup_machine_type:
adr r3, 3b
ldmia r3, {r4, r5, r6}
sub r3, r3, r4 @ get offset between virt&phys
add r5, r5, r3 @ convert virt addresses to
add r6, r6, r3 @ physical address space
1: ldr r3, [r5, #MACHINFO_TYPE] @ get machine type
teq r3, r1 @ matches loader number?
beq 2f @ found
add r5, r5, #SIZEOF_MACHINE_DESC @ next machine_desc
cmp r5, r6
blo 1b
mov r5, #0 @ unknown machine
2: mov pc, lr
/*
* This provides a C-API version of the above function.
*/
ENTRY(lookup_machine_type)
stmfd sp!, {r4 - r6, lr}
mov r1, r0
bl __lookup_machine_type
mov r0, r5
ldmfd sp!, {r4 - r6, pc}
#include "head-common.S"

View File

@ -7,6 +7,6 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define KERN_SIGRETURN_CODE 0xffff0500
#define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500)
extern const unsigned long sigreturn_codes[7];

View File

@ -688,6 +688,7 @@ EXPORT_SYMBOL(abort);
void __init trap_init(void)
{
unsigned long vectors = CONFIG_VECTORS_BASE;
extern char __stubs_start[], __stubs_end[];
extern char __vectors_start[], __vectors_end[];
extern char __kuser_helper_start[], __kuser_helper_end[];
@ -698,9 +699,9 @@ void __init trap_init(void)
* into the vector page, mapped at 0xffff0000, and ensure these
* are visible to the instruction stream.
*/
memcpy((void *)0xffff0000, __vectors_start, __vectors_end - __vectors_start);
memcpy((void *)0xffff0200, __stubs_start, __stubs_end - __stubs_start);
memcpy((void *)0xffff1000 - kuser_sz, __kuser_helper_start, kuser_sz);
memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
memcpy((void *)vectors + 0x200, __stubs_start, __stubs_end - __stubs_start);
memcpy((void *)vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
/*
* Copy signal return handlers into the vector page, and
@ -709,6 +710,6 @@ void __init trap_init(void)
memcpy((void *)KERN_SIGRETURN_CODE, sigreturn_codes,
sizeof(sigreturn_codes));
flush_icache_range(0xffff0000, 0xffff0000 + PAGE_SIZE);
flush_icache_range(vectors, vectors + PAGE_SIZE);
modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
}

View File

@ -30,6 +30,7 @@
#include <asm/procinfo.h>
#include <asm/hardware.h>
#include <asm/pgtable.h>
#include <asm/pgtable-hwdef.h>
#include <asm/page.h>
#include <asm/ptrace.h>
#include "proc-macros.S"

View File

@ -240,7 +240,7 @@ cache_info(char *page)
}
p += sprintf(p,
"%s Cache level %lu:\n"
"\tSize : %lu bytes\n"
"\tSize : %u bytes\n"
"\tAttributes : ",
cache_types[j+cci.pcci_unified], i+1,
cci.pcci_cache_size);
@ -648,9 +648,9 @@ frequency_info(char *page)
if (ia64_pal_freq_ratios(&proc, &bus, &itc) != 0) return 0;
p += sprintf(p,
"Processor/Clock ratio : %ld/%ld\n"
"Bus/Clock ratio : %ld/%ld\n"
"ITC/Clock ratio : %ld/%ld\n",
"Processor/Clock ratio : %d/%d\n"
"Bus/Clock ratio : %d/%d\n"
"ITC/Clock ratio : %d/%d\n",
proc.num, proc.den, bus.num, bus.den, itc.num, itc.den);
return p - page;

View File

@ -188,7 +188,7 @@ ia64_init_itm (void)
itc_freq = (platform_base_freq*itc_ratio.num)/itc_ratio.den;
local_cpu_data->itm_delta = (itc_freq + HZ/2) / HZ;
printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%lu/%lu, "
printk(KERN_DEBUG "CPU %d: base freq=%lu.%03luMHz, ITC ratio=%u/%u, "
"ITC freq=%lu.%03luMHz", smp_processor_id(),
platform_base_freq / 1000000, (platform_base_freq / 1000) % 1000,
itc_ratio.num, itc_ratio.den, itc_freq / 1000000, (itc_freq / 1000) % 1000);

View File

@ -9,6 +9,8 @@
* 2002/08/07 Erich Focht <efocht@ess.nec.de>
* Populate cpu entries in sysfs for non-numa systems as well
* Intel Corporation - Ashok Raj
* 02/27/2006 Zhang, Yanmin
* Populate cpu cache entries in sysfs for cpu cache info
*/
#include <linux/config.h>
@ -19,6 +21,7 @@
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/nodemask.h>
#include <linux/notifier.h>
#include <asm/mmzone.h>
#include <asm/numa.h>
#include <asm/cpu.h>
@ -101,3 +104,367 @@ out:
}
subsys_initcall(topology_init);
/*
* Export cpu cache information through sysfs
*/
/*
* A bunch of string array to get pretty printing
*/
static const char *cache_types[] = {
"", /* not used */
"Instruction",
"Data",
"Unified" /* unified */
};
static const char *cache_mattrib[]={
"WriteThrough",
"WriteBack",
"", /* reserved */
"" /* reserved */
};
struct cache_info {
pal_cache_config_info_t cci;
cpumask_t shared_cpu_map;
int level;
int type;
struct kobject kobj;
};
struct cpu_cache_info {
struct cache_info *cache_leaves;
int num_cache_leaves;
struct kobject kobj;
};
static struct cpu_cache_info all_cpu_cache_info[NR_CPUS];
#define LEAF_KOBJECT_PTR(x,y) (&all_cpu_cache_info[x].cache_leaves[y])
#ifdef CONFIG_SMP
static void cache_shared_cpu_map_setup( unsigned int cpu,
struct cache_info * this_leaf)
{
pal_cache_shared_info_t csi;
int num_shared, i = 0;
unsigned int j;
if (cpu_data(cpu)->threads_per_core <= 1 &&
cpu_data(cpu)->cores_per_socket <= 1) {
cpu_set(cpu, this_leaf->shared_cpu_map);
return;
}
if (ia64_pal_cache_shared_info(this_leaf->level,
this_leaf->type,
0,
&csi) != PAL_STATUS_SUCCESS)
return;
num_shared = (int) csi.num_shared;
do {
for_each_cpu(j)
if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id
&& cpu_data(j)->core_id == csi.log1_cid
&& cpu_data(j)->thread_id == csi.log1_tid)
cpu_set(j, this_leaf->shared_cpu_map);
i++;
} while (i < num_shared &&
ia64_pal_cache_shared_info(this_leaf->level,
this_leaf->type,
i,
&csi) == PAL_STATUS_SUCCESS);
}
#else
static void cache_shared_cpu_map_setup(unsigned int cpu,
struct cache_info * this_leaf)
{
cpu_set(cpu, this_leaf->shared_cpu_map);
return;
}
#endif
static ssize_t show_coherency_line_size(struct cache_info *this_leaf,
char *buf)
{
return sprintf(buf, "%u\n", 1 << this_leaf->cci.pcci_line_size);
}
static ssize_t show_ways_of_associativity(struct cache_info *this_leaf,
char *buf)
{
return sprintf(buf, "%u\n", this_leaf->cci.pcci_assoc);
}
static ssize_t show_attributes(struct cache_info *this_leaf, char *buf)
{
return sprintf(buf,
"%s\n",
cache_mattrib[this_leaf->cci.pcci_cache_attr]);
}
static ssize_t show_size(struct cache_info *this_leaf, char *buf)
{
return sprintf(buf, "%uK\n", this_leaf->cci.pcci_cache_size / 1024);
}
static ssize_t show_number_of_sets(struct cache_info *this_leaf, char *buf)
{
unsigned number_of_sets = this_leaf->cci.pcci_cache_size;
number_of_sets /= this_leaf->cci.pcci_assoc;
number_of_sets /= 1 << this_leaf->cci.pcci_line_size;
return sprintf(buf, "%u\n", number_of_sets);
}
static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf)
{
ssize_t len;
cpumask_t shared_cpu_map;
cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map);
len = cpumask_scnprintf(buf, NR_CPUS+1, shared_cpu_map);
len += sprintf(buf+len, "\n");
return len;
}
static ssize_t show_type(struct cache_info *this_leaf, char *buf)
{
int type = this_leaf->type + this_leaf->cci.pcci_unified;
return sprintf(buf, "%s\n", cache_types[type]);
}
static ssize_t show_level(struct cache_info *this_leaf, char *buf)
{
return sprintf(buf, "%u\n", this_leaf->level);
}
struct cache_attr {
struct attribute attr;
ssize_t (*show)(struct cache_info *, char *);
ssize_t (*store)(struct cache_info *, const char *, size_t count);
};
#ifdef define_one_ro
#undef define_one_ro
#endif
#define define_one_ro(_name) \
static struct cache_attr _name = \
__ATTR(_name, 0444, show_##_name, NULL)
define_one_ro(level);
define_one_ro(type);
define_one_ro(coherency_line_size);
define_one_ro(ways_of_associativity);
define_one_ro(size);
define_one_ro(number_of_sets);
define_one_ro(shared_cpu_map);
define_one_ro(attributes);
static struct attribute * cache_default_attrs[] = {
&type.attr,
&level.attr,
&coherency_line_size.attr,
&ways_of_associativity.attr,
&attributes.attr,
&size.attr,
&number_of_sets.attr,
&shared_cpu_map.attr,
NULL
};
#define to_object(k) container_of(k, struct cache_info, kobj)
#define to_attr(a) container_of(a, struct cache_attr, attr)
static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char * buf)
{
struct cache_attr *fattr = to_attr(attr);
struct cache_info *this_leaf = to_object(kobj);
ssize_t ret;
ret = fattr->show ? fattr->show(this_leaf, buf) : 0;
return ret;
}
static struct sysfs_ops cache_sysfs_ops = {
.show = cache_show
};
static struct kobj_type cache_ktype = {
.sysfs_ops = &cache_sysfs_ops,
.default_attrs = cache_default_attrs,
};
static struct kobj_type cache_ktype_percpu_entry = {
.sysfs_ops = &cache_sysfs_ops,
};
static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu)
{
if (all_cpu_cache_info[cpu].cache_leaves) {
kfree(all_cpu_cache_info[cpu].cache_leaves);
all_cpu_cache_info[cpu].cache_leaves = NULL;
}
all_cpu_cache_info[cpu].num_cache_leaves = 0;
memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
return;
}
static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu)
{
u64 i, levels, unique_caches;
pal_cache_config_info_t cci;
int j;
s64 status;
struct cache_info *this_cache;
int num_cache_leaves = 0;
if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status);
return -1;
}
this_cache=kzalloc(sizeof(struct cache_info)*unique_caches,
GFP_KERNEL);
if (this_cache == NULL)
return -ENOMEM;
for (i=0; i < levels; i++) {
for (j=2; j >0 ; j--) {
if ((status=ia64_pal_cache_config_info(i,j, &cci)) !=
PAL_STATUS_SUCCESS)
continue;
this_cache[num_cache_leaves].cci = cci;
this_cache[num_cache_leaves].level = i + 1;
this_cache[num_cache_leaves].type = j;
cache_shared_cpu_map_setup(cpu,
&this_cache[num_cache_leaves]);
num_cache_leaves ++;
}
}
all_cpu_cache_info[cpu].cache_leaves = this_cache;
all_cpu_cache_info[cpu].num_cache_leaves = num_cache_leaves;
memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
return 0;
}
/* Add cache interface for CPU device */
static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
{
unsigned int cpu = sys_dev->id;
unsigned long i, j;
struct cache_info *this_object;
int retval = 0;
cpumask_t oldmask;
if (all_cpu_cache_info[cpu].kobj.parent)
return 0;
oldmask = current->cpus_allowed;
retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
if (unlikely(retval))
return retval;
retval = cpu_cache_sysfs_init(cpu);
set_cpus_allowed(current, oldmask);
if (unlikely(retval < 0))
return retval;
all_cpu_cache_info[cpu].kobj.parent = &sys_dev->kobj;
kobject_set_name(&all_cpu_cache_info[cpu].kobj, "%s", "cache");
all_cpu_cache_info[cpu].kobj.ktype = &cache_ktype_percpu_entry;
retval = kobject_register(&all_cpu_cache_info[cpu].kobj);
for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) {
this_object = LEAF_KOBJECT_PTR(cpu,i);
this_object->kobj.parent = &all_cpu_cache_info[cpu].kobj;
kobject_set_name(&(this_object->kobj), "index%1lu", i);
this_object->kobj.ktype = &cache_ktype;
retval = kobject_register(&(this_object->kobj));
if (unlikely(retval)) {
for (j = 0; j < i; j++) {
kobject_unregister(
&(LEAF_KOBJECT_PTR(cpu,j)->kobj));
}
kobject_unregister(&all_cpu_cache_info[cpu].kobj);
cpu_cache_sysfs_exit(cpu);
break;
}
}
return retval;
}
/* Remove cache interface for CPU device */
static int __cpuinit cache_remove_dev(struct sys_device * sys_dev)
{
unsigned int cpu = sys_dev->id;
unsigned long i;
for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++)
kobject_unregister(&(LEAF_KOBJECT_PTR(cpu,i)->kobj));
if (all_cpu_cache_info[cpu].kobj.parent) {
kobject_unregister(&all_cpu_cache_info[cpu].kobj);
memset(&all_cpu_cache_info[cpu].kobj,
0,
sizeof(struct kobject));
}
cpu_cache_sysfs_exit(cpu);
return 0;
}
/*
* When a cpu is hot-plugged, do a check and initiate
* cache kobject if necessary
*/
static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
struct sys_device *sys_dev;
sys_dev = get_cpu_sysdev(cpu);
switch (action) {
case CPU_ONLINE:
cache_add_dev(sys_dev);
break;
case CPU_DEAD:
cache_remove_dev(sys_dev);
break;
}
return NOTIFY_OK;
}
static struct notifier_block cache_cpu_notifier =
{
.notifier_call = cache_cpu_callback
};
static int __cpuinit cache_sysfs_init(void)
{
int i;
for_each_online_cpu(i) {
cache_cpu_callback(&cache_cpu_notifier, CPU_ONLINE,
(void *)(long)i);
}
register_cpu_notifier(&cache_cpu_notifier);
return 0;
}
device_initcall(cache_sysfs_init);

View File

@ -60,6 +60,17 @@ config MMC_SDHCI
If unsure, say N.
config MMC_OMAP
tristate "TI OMAP Multimedia Card Interface support"
depends on ARCH_OMAP && MMC
select TPS65010 if MACH_OMAP_H2
help
This selects the TI OMAP Multimedia card Interface.
If you have an OMAP board with a Multimedia Card slot,
say Y or M here.
If unsure, say N.
config MMC_WBSD
tristate "Winbond W83L51xD SD/MMC Card Interface support"
depends on MMC && ISA_DMA_API

View File

@ -20,5 +20,10 @@ obj-$(CONFIG_MMC_PXA) += pxamci.o
obj-$(CONFIG_MMC_SDHCI) += sdhci.o
obj-$(CONFIG_MMC_WBSD) += wbsd.o
obj-$(CONFIG_MMC_AU1X) += au1xmmc.o
obj-$(CONFIG_MMC_OMAP) += omap.o
mmc_core-y := mmc.o mmc_queue.o mmc_sysfs.o
ifeq ($(CONFIG_MMC_DEBUG),y)
EXTRA_CFLAGS += -DDEBUG
endif

View File

@ -56,12 +56,11 @@
#define DRIVER_NAME "au1xxx-mmc"
/* Set this to enable special debugging macros */
/* #define MMC_DEBUG */
#ifdef MMC_DEBUG
#define DEBUG(fmt, idx, args...) printk("au1xx(%d): DEBUG: " fmt, idx, ##args)
#ifdef DEBUG
#define DBG(fmt, idx, args...) printk("au1xx(%d): DEBUG: " fmt, idx, ##args)
#else
#define DEBUG(fmt, idx, args...)
#define DBG(fmt, idx, args...)
#endif
const struct {
@ -424,18 +423,18 @@ static void au1xmmc_receive_pio(struct au1xmmc_host *host)
break;
if (status & SD_STATUS_RC) {
DEBUG("RX CRC Error [%d + %d].\n", host->id,
DBG("RX CRC Error [%d + %d].\n", host->id,
host->pio.len, count);
break;
}
if (status & SD_STATUS_RO) {
DEBUG("RX Overrun [%d + %d]\n", host->id,
DBG("RX Overrun [%d + %d]\n", host->id,
host->pio.len, count);
break;
}
else if (status & SD_STATUS_RU) {
DEBUG("RX Underrun [%d + %d]\n", host->id,
DBG("RX Underrun [%d + %d]\n", host->id,
host->pio.len, count);
break;
}
@ -721,7 +720,7 @@ static void au1xmmc_set_ios(struct mmc_host* mmc, struct mmc_ios* ios)
{
struct au1xmmc_host *host = mmc_priv(mmc);
DEBUG("set_ios (power=%u, clock=%uHz, vdd=%u, mode=%u)\n",
DBG("set_ios (power=%u, clock=%uHz, vdd=%u, mode=%u)\n",
host->id, ios->power_mode, ios->clock, ios->vdd,
ios->bus_mode);
@ -810,7 +809,7 @@ static irqreturn_t au1xmmc_irq(int irq, void *dev_id, struct pt_regs *regs)
au1xmmc_receive_pio(host);
}
else if (status & 0x203FBC70) {
DEBUG("Unhandled status %8.8x\n", host->id, status);
DBG("Unhandled status %8.8x\n", host->id, status);
handled = 0;
}
@ -839,7 +838,7 @@ static void au1xmmc_poll_event(unsigned long arg)
if (host->mrq != NULL) {
u32 status = au_readl(HOST_STATUS(host));
DEBUG("PENDING - %8.8x\n", host->id, status);
DBG("PENDING - %8.8x\n", host->id, status);
}
mod_timer(&host->timer, jiffies + AU1XMMC_DETECT_TIMEOUT);

View File

@ -27,12 +27,6 @@
#include "mmc.h"
#ifdef CONFIG_MMC_DEBUG
#define DBG(x...) printk(KERN_DEBUG x)
#else
#define DBG(x...) do { } while (0)
#endif
#define CMD_RETRIES 3
/*
@ -77,8 +71,9 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)
{
struct mmc_command *cmd = mrq->cmd;
int err = mrq->cmd->error;
DBG("MMC: req done (%02x): %d: %08x %08x %08x %08x\n", cmd->opcode,
err, cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]);
pr_debug("MMC: req done (%02x): %d: %08x %08x %08x %08x\n",
cmd->opcode, err, cmd->resp[0], cmd->resp[1],
cmd->resp[2], cmd->resp[3]);
if (err && cmd->retries) {
cmd->retries--;
@ -102,8 +97,8 @@ EXPORT_SYMBOL(mmc_request_done);
void
mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
{
DBG("MMC: starting cmd %02x arg %08x flags %08x\n",
mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags);
pr_debug("MMC: starting cmd %02x arg %08x flags %08x\n",
mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags);
WARN_ON(host->card_busy == NULL);
@ -976,8 +971,8 @@ static unsigned int mmc_calculate_clock(struct mmc_host *host)
if (!mmc_card_dead(card) && max_dtr > card->csd.max_dtr)
max_dtr = card->csd.max_dtr;
DBG("MMC: selected %d.%03dMHz transfer rate\n",
max_dtr / 1000000, (max_dtr / 1000) % 1000);
pr_debug("MMC: selected %d.%03dMHz transfer rate\n",
max_dtr / 1000000, (max_dtr / 1000) % 1000);
return max_dtr;
}

View File

@ -33,12 +33,8 @@
#define DRIVER_NAME "mmci-pl18x"
#ifdef CONFIG_MMC_DEBUG
#define DBG(host,fmt,args...) \
pr_debug("%s: %s: " fmt, mmc_hostname(host->mmc), __func__ , args)
#else
#define DBG(host,fmt,args...) do { } while (0)
#endif
static unsigned int fmax = 515633;

1226
drivers/mmc/omap.c Normal file

File diff suppressed because it is too large Load Diff

55
drivers/mmc/omap.h Normal file
View File

@ -0,0 +1,55 @@
#ifndef DRIVERS_MEDIA_MMC_OMAP_H
#define DRIVERS_MEDIA_MMC_OMAP_H
#define OMAP_MMC_REG_CMD 0x00
#define OMAP_MMC_REG_ARGL 0x04
#define OMAP_MMC_REG_ARGH 0x08
#define OMAP_MMC_REG_CON 0x0c
#define OMAP_MMC_REG_STAT 0x10
#define OMAP_MMC_REG_IE 0x14
#define OMAP_MMC_REG_CTO 0x18
#define OMAP_MMC_REG_DTO 0x1c
#define OMAP_MMC_REG_DATA 0x20
#define OMAP_MMC_REG_BLEN 0x24
#define OMAP_MMC_REG_NBLK 0x28
#define OMAP_MMC_REG_BUF 0x2c
#define OMAP_MMC_REG_SDIO 0x34
#define OMAP_MMC_REG_REV 0x3c
#define OMAP_MMC_REG_RSP0 0x40
#define OMAP_MMC_REG_RSP1 0x44
#define OMAP_MMC_REG_RSP2 0x48
#define OMAP_MMC_REG_RSP3 0x4c
#define OMAP_MMC_REG_RSP4 0x50
#define OMAP_MMC_REG_RSP5 0x54
#define OMAP_MMC_REG_RSP6 0x58
#define OMAP_MMC_REG_RSP7 0x5c
#define OMAP_MMC_REG_IOSR 0x60
#define OMAP_MMC_REG_SYSC 0x64
#define OMAP_MMC_REG_SYSS 0x68
#define OMAP_MMC_STAT_CARD_ERR (1 << 14)
#define OMAP_MMC_STAT_CARD_IRQ (1 << 13)
#define OMAP_MMC_STAT_OCR_BUSY (1 << 12)
#define OMAP_MMC_STAT_A_EMPTY (1 << 11)
#define OMAP_MMC_STAT_A_FULL (1 << 10)
#define OMAP_MMC_STAT_CMD_CRC (1 << 8)
#define OMAP_MMC_STAT_CMD_TOUT (1 << 7)
#define OMAP_MMC_STAT_DATA_CRC (1 << 6)
#define OMAP_MMC_STAT_DATA_TOUT (1 << 5)
#define OMAP_MMC_STAT_END_BUSY (1 << 4)
#define OMAP_MMC_STAT_END_OF_DATA (1 << 3)
#define OMAP_MMC_STAT_CARD_BUSY (1 << 2)
#define OMAP_MMC_STAT_END_OF_CMD (1 << 0)
#define OMAP_MMC_READ(base, reg) __raw_readw((base) + OMAP_MMC_REG_##reg)
#define OMAP_MMC_WRITE(base, reg, val) __raw_writew((val), (base) + OMAP_MMC_REG_##reg)
/*
* Command types
*/
#define OMAP_MMC_CMDTYPE_BC 0
#define OMAP_MMC_CMDTYPE_BCR 1
#define OMAP_MMC_CMDTYPE_AC 2
#define OMAP_MMC_CMDTYPE_ADTC 3
#endif

View File

@ -37,12 +37,6 @@
#include "pxamci.h"
#ifdef CONFIG_MMC_DEBUG
#define DBG(x...) printk(KERN_DEBUG x)
#else
#define DBG(x...) do { } while (0)
#endif
#define DRIVER_NAME "pxa2xx-mci"
#define NR_SG 1
@ -206,7 +200,7 @@ static void pxamci_start_cmd(struct pxamci_host *host, struct mmc_command *cmd,
static void pxamci_finish_request(struct pxamci_host *host, struct mmc_request *mrq)
{
DBG("PXAMCI: request done\n");
pr_debug("PXAMCI: request done\n");
host->mrq = NULL;
host->cmd = NULL;
host->data = NULL;
@ -252,7 +246,7 @@ static int pxamci_cmd_done(struct pxamci_host *host, unsigned int stat)
if ((cmd->resp[0] & 0x80000000) == 0)
cmd->error = MMC_ERR_BADCRC;
} else {
DBG("ignoring CRC from command %d - *risky*\n",cmd->opcode);
pr_debug("ignoring CRC from command %d - *risky*\n",cmd->opcode);
}
#else
cmd->error = MMC_ERR_BADCRC;
@ -317,12 +311,12 @@ static irqreturn_t pxamci_irq(int irq, void *devid, struct pt_regs *regs)
ireg = readl(host->base + MMC_I_REG);
DBG("PXAMCI: irq %08x\n", ireg);
pr_debug("PXAMCI: irq %08x\n", ireg);
if (ireg) {
unsigned stat = readl(host->base + MMC_STAT);
DBG("PXAMCI: stat %08x\n", stat);
pr_debug("PXAMCI: stat %08x\n", stat);
if (ireg & END_CMD_RES)
handled |= pxamci_cmd_done(host, stat);
@ -376,9 +370,9 @@ static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct pxamci_host *host = mmc_priv(mmc);
DBG("pxamci_set_ios: clock %u power %u vdd %u.%02u\n",
ios->clock, ios->power_mode, ios->vdd / 100,
ios->vdd % 100);
pr_debug("pxamci_set_ios: clock %u power %u vdd %u.%02u\n",
ios->clock, ios->power_mode, ios->vdd / 100,
ios->vdd % 100);
if (ios->clock) {
unsigned int clk = CLOCKRATE / ios->clock;
@ -405,8 +399,8 @@ static void pxamci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
host->cmdat |= CMDAT_INIT;
}
DBG("pxamci_set_ios: clkrt = %x cmdat = %x\n",
host->clkrt, host->cmdat);
pr_debug("pxamci_set_ios: clkrt = %x cmdat = %x\n",
host->clkrt, host->cmdat);
}
static struct mmc_host_ops pxamci_ops = {

View File

@ -31,12 +31,8 @@
#define BUGMAIL "<sdhci-devel@list.drzeus.cx>"
#ifdef CONFIG_MMC_DEBUG
#define DBG(f, x...) \
printk(KERN_DEBUG DRIVER_NAME " [%s()]: " f, __func__,## x)
#else
#define DBG(f, x...) do { } while (0)
#endif
pr_debug(DRIVER_NAME " [%s()]: " f, __func__,## x)
static const struct pci_device_id pci_ids[] __devinitdata = {
/* handle any SD host controller */

View File

@ -44,15 +44,10 @@
#define DRIVER_NAME "wbsd"
#define DRIVER_VERSION "1.5"
#ifdef CONFIG_MMC_DEBUG
#define DBG(x...) \
printk(KERN_DEBUG DRIVER_NAME ": " x)
pr_debug(DRIVER_NAME ": " x)
#define DBGF(f, x...) \
printk(KERN_DEBUG DRIVER_NAME " [%s()]: " f, __func__ , ##x)
#else
#define DBG(x...) do { } while (0)
#define DBGF(x...) do { } while (0)
#endif
pr_debug(DRIVER_NAME " [%s()]: " f, __func__ , ##x)
/*
* Device resources

View File

@ -63,6 +63,33 @@ config SERIAL_8250_CONSOLE
If unsure, say N.
config SERIAL_8250_GSC
tristate
depends on SERIAL_8250 && GSC
default SERIAL_8250
config SERIAL_8250_PCI
tristate "8250/16550 PCI device support" if EMBEDDED
depends on SERIAL_8250 && PCI
default SERIAL_8250
help
This builds standard PCI serial support. You may be able to
disable this feature if you only need legacy serial support.
Saves about 9K.
config SERIAL_8250_PNP
tristate "8250/16550 PNP device support" if EMBEDDED
depends on SERIAL_8250 && PNP
default SERIAL_8250
help
This builds standard PNP serial support. You may be able to
disable this feature if you only need legacy serial support.
config SERIAL_8250_HP300
tristate
depends on SERIAL_8250 && HP300
default SERIAL_8250
config SERIAL_8250_CS
tristate "8250/16550 PCMCIA device support"
depends on PCMCIA && SERIAL_8250

View File

@ -4,15 +4,13 @@
# $Id: Makefile,v 1.8 2002/07/21 21:32:30 rmk Exp $
#
serial-8250-y :=
serial-8250-$(CONFIG_PNP) += 8250_pnp.o
serial-8250-$(CONFIG_GSC) += 8250_gsc.o
serial-8250-$(CONFIG_PCI) += 8250_pci.o
serial-8250-$(CONFIG_HP300) += 8250_hp300.o
obj-$(CONFIG_SERIAL_CORE) += serial_core.o
obj-$(CONFIG_SERIAL_21285) += 21285.o
obj-$(CONFIG_SERIAL_8250) += 8250.o $(serial-8250-y)
obj-$(CONFIG_SERIAL_8250) += 8250.o
obj-$(CONFIG_SERIAL_8250_PNP) += 8250_pnp.o
obj-$(CONFIG_SERIAL_8250_GSC) += 8250_gsc.o
obj-$(CONFIG_SERIAL_8250_PCI) += 8250_pci.o
obj-$(CONFIG_SERIAL_8250_HP300) += 8250_hp300.o
obj-$(CONFIG_SERIAL_8250_CS) += serial_cs.o
obj-$(CONFIG_SERIAL_8250_ACORN) += 8250_acorn.o
obj-$(CONFIG_SERIAL_8250_CONSOLE) += 8250_early.o

View File

@ -1,3 +1,21 @@
Version 1.42
------------
Fix slow oplock break when mounted to different servers at the same time and
the tids match and we try to find matching fid on wrong server.
Version 1.41
------------
Fix NTLMv2 security (can be enabled in /proc/fs/cifs) so customers can
configure stronger authentication. Fix sfu symlinks so they can
be followed (not just recognized). Fix wraparound of bcc on
read responses when buffer size over 64K and also fix wrap of
max smb buffer size when CIFSMaxBufSize over 64K. Fix oops in
cifs_user_read and cifs_readpages (when EAGAIN on send of smb
on socket is returned over and over). Add POSIX (advisory) byte range
locking support (requires server with newest CIFS UNIX Extensions
to the protocol implemented). Slow down negprot slightly in port 139
RFC1001 case to give session_init time on buggy servers.
Version 1.40
------------
Use fsuid (fsgid) more consistently instead of uid (gid). Improve performance

View File

@ -3,4 +3,4 @@
#
obj-$(CONFIG_CIFS) += cifs.o
cifs-objs := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o link.o misc.o netmisc.o smbdes.o smbencrypt.o transport.o asn1.o md4.o md5.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o fcntl.o readdir.o ioctl.o
cifs-objs := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o link.o misc.o netmisc.o smbdes.o smbencrypt.o transport.o asn1.o md4.o md5.o cifs_unicode.o nterr.o xattr.o cifsencrypt.o fcntl.o readdir.o ioctl.o ntlmssp.o

View File

@ -422,6 +422,13 @@ A partial list of the supported mount options follows:
nomapchars Do not translate any of these seven characters (default).
nocase Request case insensitive path name matching (case
sensitive is the default if the server suports it).
posixpaths If CIFS Unix extensions are supported, attempt to
negotiate posix path name support which allows certain
characters forbidden in typical CIFS filenames, without
requiring remapping. (default)
noposixpaths If CIFS Unix extensions are supported, do not request
posix path name support (this may cause servers to
reject creatingfile with certain reserved characters).
nobrl Do not send byte range lock requests to the server.
This is necessary for certain applications that break
with cifs style mandatory byte range locks (and most

View File

@ -1,7 +1,7 @@
/*
* fs/cifs/cifsencrypt.c
*
* Copyright (C) International Business Machines Corp., 2005
* Copyright (C) International Business Machines Corp., 2005,2006
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
@ -36,7 +36,8 @@
extern void mdfour(unsigned char *out, unsigned char *in, int n);
extern void E_md4hash(const unsigned char *passwd, unsigned char *p16);
static int cifs_calculate_signature(const struct smb_hdr * cifs_pdu, const char * key, char * signature)
static int cifs_calculate_signature(const struct smb_hdr * cifs_pdu,
const char * key, char * signature)
{
struct MD5Context context;
@ -56,9 +57,6 @@ int cifs_sign_smb(struct smb_hdr * cifs_pdu, struct TCP_Server_Info * server,
int rc = 0;
char smb_signature[20];
/* BB remember to initialize sequence number elsewhere and initialize mac_signing key elsewhere BB */
/* BB remember to add code to save expected sequence number in midQ entry BB */
if((cifs_pdu == NULL) || (server == NULL))
return -EINVAL;
@ -85,20 +83,33 @@ int cifs_sign_smb(struct smb_hdr * cifs_pdu, struct TCP_Server_Info * server,
static int cifs_calc_signature2(const struct kvec * iov, int n_vec,
const char * key, char * signature)
{
struct MD5Context context;
struct MD5Context context;
int i;
if((iov == NULL) || (signature == NULL))
return -EINVAL;
if((iov == NULL) || (signature == NULL))
return -EINVAL;
MD5Init(&context);
MD5Update(&context,key,CIFS_SESSION_KEY_SIZE+16);
MD5Init(&context);
MD5Update(&context,key,CIFS_SESSION_KEY_SIZE+16);
for(i=0;i<n_vec;i++) {
if(iov[i].iov_base == NULL) {
cERROR(1,("null iovec entry"));
return -EIO;
} else if(iov[i].iov_len == 0)
break; /* bail out if we are sent nothing to sign */
/* The first entry includes a length field (which does not get
signed that occupies the first 4 bytes before the header */
if(i==0) {
if (iov[0].iov_len <= 8 ) /* cmd field at offset 9 */
break; /* nothing to sign or corrupt header */
MD5Update(&context,iov[0].iov_base+4, iov[0].iov_len-4);
} else
MD5Update(&context,iov[i].iov_base, iov[i].iov_len);
}
/* MD5Update(&context,cifs_pdu->Protocol,cifs_pdu->smb_buf_length); */ /* BB FIXME BB */
MD5Final(signature,&context);
MD5Final(signature,&context);
return -EOPNOTSUPP;
/* return 0; */
return 0;
}
@ -259,4 +270,5 @@ void CalcNTLMv2_response(const struct cifsSesInfo * ses,char * v2_session_respon
/* hmac_md5_update(v2_session_response+16)client thing,8,&context); */ /* BB fix */
hmac_md5_final(v2_session_response,&context);
cifs_dump_mem("v2_sess_rsp: ", v2_session_response, 32); /* BB removeme BB */
}

View File

@ -93,13 +93,10 @@ cifs_read_super(struct super_block *sb, void *data,
int rc = 0;
sb->s_flags |= MS_NODIRATIME; /* and probably even noatime */
sb->s_fs_info = kmalloc(sizeof(struct cifs_sb_info),GFP_KERNEL);
sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info),GFP_KERNEL);
cifs_sb = CIFS_SB(sb);
if(cifs_sb == NULL)
return -ENOMEM;
else
memset(cifs_sb,0,sizeof(struct cifs_sb_info));
rc = cifs_mount(sb, cifs_sb, data, devname);

View File

@ -99,5 +99,5 @@ extern ssize_t cifs_getxattr(struct dentry *, const char *, void *, size_t);
extern ssize_t cifs_listxattr(struct dentry *, char *, size_t);
extern int cifs_ioctl (struct inode * inode, struct file * filep,
unsigned int command, unsigned long arg);
#define CIFS_VERSION "1.40"
#define CIFS_VERSION "1.42"
#endif /* _CIFSFS_H */

View File

@ -1,7 +1,7 @@
/*
* fs/cifs/cifsglob.h
*
* Copyright (C) International Business Machines Corp., 2002,2005
* Copyright (C) International Business Machines Corp., 2002,2006
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
@ -430,6 +430,15 @@ struct dir_notify_req {
#define CIFS_LARGE_BUFFER 2
#define CIFS_IOVEC 4 /* array of response buffers */
/* Type of session setup needed */
#define CIFS_PLAINTEXT 0
#define CIFS_LANMAN 1
#define CIFS_NTLM 2
#define CIFS_NTLMSSP_NEG 3
#define CIFS_NTLMSSP_AUTH 4
#define CIFS_SPNEGO_INIT 5
#define CIFS_SPNEGO_TARG 6
/*
*****************************************************************
* All constants go here

View File

@ -859,7 +859,10 @@ typedef struct smb_com_lock_req {
LOCKING_ANDX_RANGE Locks[1];
} __attribute__((packed)) LOCK_REQ;
/* lock type */
#define CIFS_RDLCK 0
#define CIFS_WRLCK 1
#define CIFS_UNLCK 2
typedef struct cifs_posix_lock {
__le16 lock_type; /* 0 = Read, 1 = Write, 2 = Unlock */
__le16 lock_flags; /* 1 = Wait (only valid for setlock) */
@ -1786,7 +1789,13 @@ typedef struct {
#define CIFS_UNIX_POSIX_ACL_CAP 0x00000002 /* support getfacl/setfacl */
#define CIFS_UNIX_XATTR_CAP 0x00000004 /* support new namespace */
#define CIFS_UNIX_EXTATTR_CAP 0x00000008 /* support chattr/chflag */
#define CIFS_UNIX_POSIX_PATHNAMES_CAP 0x00000010 /* Use POSIX pathnames on the wire. */
#define CIFS_UNIX_POSIX_PATHNAMES_CAP 0x00000010 /* Allow POSIX path chars */
#ifdef CONFIG_CIFS_POSIX
#define CIFS_UNIX_CAP_MASK 0x0000001b
#else
#define CIFS_UNIX_CAP_MASK 0x00000013
#endif /* CONFIG_CIFS_POSIX */
#define CIFS_POSIX_EXTENSIONS 0x00000010 /* support for new QFSInfo */

View File

@ -1,7 +1,7 @@
/*
* fs/cifs/cifsproto.h
*
* Copyright (c) International Business Machines Corp., 2002,2005
* Copyright (c) International Business Machines Corp., 2002,2006
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
@ -64,6 +64,14 @@ extern int map_smb_to_linux_error(struct smb_hdr *smb);
extern void header_assemble(struct smb_hdr *, char /* command */ ,
const struct cifsTconInfo *, int /* length of
fixed section (word count) in two byte units */);
#ifdef CONFIG_CIFS_EXPERIMENTAL
extern int small_smb_init_no_tc(const int smb_cmd, const int wct,
struct cifsSesInfo *ses,
void ** request_buf);
extern int CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses,
const int stage, int * pNTLMv2_flg,
const struct nls_table *nls_cp);
#endif
extern __u16 GetNextMid(struct TCP_Server_Info *server);
extern struct oplock_q_entry * AllocOplockQEntry(struct inode *, u16,
struct cifsTconInfo *);
@ -257,7 +265,10 @@ extern int CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
const __u64 offset, const __u32 numUnlock,
const __u32 numLock, const __u8 lockType,
const int waitFlag);
extern int CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
const __u16 smb_file_id, const int get_flag,
const __u64 len, const __u64 offset,
const __u16 lock_type, const int waitFlag);
extern int CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon);
extern int CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses);

View File

@ -1,7 +1,7 @@
/*
* fs/cifs/cifssmb.c
*
* Copyright (C) International Business Machines Corp., 2002,2005
* Copyright (C) International Business Machines Corp., 2002,2006
* Author(s): Steve French (sfrench@us.ibm.com)
*
* Contains the routines for constructing the SMB PDUs themselves
@ -186,7 +186,35 @@ small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon,
cifs_stats_inc(&tcon->num_smbs_sent);
return rc;
}
}
#ifdef CONFIG_CIFS_EXPERIMENTAL
int
small_smb_init_no_tc(const int smb_command, const int wct,
struct cifsSesInfo *ses, void **request_buf)
{
int rc;
struct smb_hdr * buffer;
rc = small_smb_init(smb_command, wct, NULL, request_buf);
if(rc)
return rc;
buffer = (struct smb_hdr *)*request_buf;
buffer->Mid = GetNextMid(ses->server);
if (ses->capabilities & CAP_UNICODE)
buffer->Flags2 |= SMBFLG2_UNICODE;
if (ses->capabilities & CAP_STATUS32)
buffer->Flags2 |= SMBFLG2_ERR_STATUS;
/* uid, tid can stay at zero as set in header assemble */
/* BB add support for turning on the signing when
this function is used after 1st of session setup requests */
return rc;
}
#endif /* CONFIG_CIFS_EXPERIMENTAL */
/* If the return code is zero, this function must fill in request_buf pointer */
static int
@ -1042,7 +1070,7 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon,
}
}
cifs_small_buf_release(pSMB);
/* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */
if(*buf) {
if(resp_buf_type == CIFS_SMALL_BUFFER)
cifs_small_buf_release(iov[0].iov_base);
@ -1246,7 +1274,7 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
*nbytes += le16_to_cpu(pSMBr->Count);
}
cifs_small_buf_release(pSMB);
/* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */
if(resp_buf_type == CIFS_SMALL_BUFFER)
cifs_small_buf_release(iov[0].iov_base);
else if(resp_buf_type == CIFS_LARGE_BUFFER)
@ -1324,6 +1352,85 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
return rc;
}
int
CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
const __u16 smb_file_id, const int get_flag, const __u64 len,
const __u64 lkoffset, const __u16 lock_type, const int waitFlag)
{
struct smb_com_transaction2_sfi_req *pSMB = NULL;
struct smb_com_transaction2_sfi_rsp *pSMBr = NULL;
char *data_offset;
struct cifs_posix_lock *parm_data;
int rc = 0;
int bytes_returned = 0;
__u16 params, param_offset, offset, byte_count, count;
cFYI(1, ("Posix Lock"));
rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB);
if (rc)
return rc;
pSMBr = (struct smb_com_transaction2_sfi_rsp *)pSMB;
params = 6;
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
pSMB->Timeout = 0;
pSMB->Reserved2 = 0;
param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4;
offset = param_offset + params;
data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
count = sizeof(struct cifs_posix_lock);
pSMB->MaxParameterCount = cpu_to_le16(2);
pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB PDU from sess */
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
if(get_flag)
pSMB->SubCommand = cpu_to_le16(TRANS2_QUERY_FILE_INFORMATION);
else
pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION);
byte_count = 3 /* pad */ + params + count;
pSMB->DataCount = cpu_to_le16(count);
pSMB->ParameterCount = cpu_to_le16(params);
pSMB->TotalDataCount = pSMB->DataCount;
pSMB->TotalParameterCount = pSMB->ParameterCount;
pSMB->ParameterOffset = cpu_to_le16(param_offset);
parm_data = (struct cifs_posix_lock *)
(((char *) &pSMB->hdr.Protocol) + offset);
parm_data->lock_type = cpu_to_le16(lock_type);
if(waitFlag)
parm_data->lock_flags = 1;
parm_data->pid = cpu_to_le32(current->tgid);
parm_data->start = lkoffset;
parm_data->length = len; /* normalize negative numbers */
pSMB->DataOffset = cpu_to_le16(offset);
pSMB->Fid = smb_file_id;
pSMB->InformationLevel = cpu_to_le16(SMB_SET_POSIX_LOCK);
pSMB->Reserved4 = 0;
pSMB->hdr.smb_buf_length += byte_count;
pSMB->ByteCount = cpu_to_le16(byte_count);
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cFYI(1, ("Send error in Posix Lock = %d", rc));
}
if (pSMB)
cifs_small_buf_release(pSMB);
/* Note: On -EAGAIN error only caller can retry on handle based calls
since file handle passed in no longer valid */
return rc;
}
int
CIFSSMBClose(const int xid, struct cifsTconInfo *tcon, int smb_file_id)
{
@ -2578,7 +2685,7 @@ qsec_out:
cifs_small_buf_release(iov[0].iov_base);
else if(buf_type == CIFS_LARGE_BUFFER)
cifs_buf_release(iov[0].iov_base);
cifs_small_buf_release(pSMB);
/* cifs_small_buf_release(pSMB); */ /* Freed earlier now in SendReceive2 */
return rc;
}
@ -2954,7 +3061,8 @@ findFirstRetry:
pSMB->TotalParameterCount = cpu_to_le16(params);
pSMB->ParameterCount = pSMB->TotalParameterCount;
pSMB->ParameterOffset = cpu_to_le16(
offsetof(struct smb_com_transaction2_ffirst_req, SearchAttributes) - 4);
offsetof(struct smb_com_transaction2_ffirst_req, SearchAttributes)
- 4);
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->SetupCount = 1; /* one byte, no need to make endian neutral */
@ -2977,12 +3085,12 @@ findFirstRetry:
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
cifs_stats_inc(&tcon->num_ffirst);
if (rc) {/* BB add logic to retry regular search if Unix search rejected unexpectedly by server */
if (rc) {/* BB add logic to retry regular search if Unix search
rejected unexpectedly by server */
/* BB Add code to handle unsupported level rc */
cFYI(1, ("Error in FindFirst = %d", rc));
if (pSMB)
cifs_buf_release(pSMB);
cifs_buf_release(pSMB);
/* BB eventually could optimize out free and realloc of buf */
/* for this case */
@ -2998,6 +3106,7 @@ findFirstRetry:
psrch_inf->unicode = FALSE;
psrch_inf->ntwrk_buf_start = (char *)pSMBr;
psrch_inf->smallBuf = 0;
psrch_inf->srch_entries_start =
(char *) &pSMBr->hdr.Protocol +
le16_to_cpu(pSMBr->t2.DataOffset);
@ -3118,9 +3227,14 @@ int CIFSFindNext(const int xid, struct cifsTconInfo *tcon,
parms = (T2_FNEXT_RSP_PARMS *)response_data;
response_data = (char *)&pSMBr->hdr.Protocol +
le16_to_cpu(pSMBr->t2.DataOffset);
cifs_buf_release(psrch_inf->ntwrk_buf_start);
if(psrch_inf->smallBuf)
cifs_small_buf_release(
psrch_inf->ntwrk_buf_start);
else
cifs_buf_release(psrch_inf->ntwrk_buf_start);
psrch_inf->srch_entries_start = response_data;
psrch_inf->ntwrk_buf_start = (char *)pSMB;
psrch_inf->smallBuf = 0;
if(parms->EndofSearch)
psrch_inf->endOfSearch = TRUE;
else
@ -3834,6 +3948,7 @@ CIFSSMBSetFSUnixInfo(const int xid, struct cifsTconInfo *tcon, __u64 cap)
cFYI(1, ("In SETFSUnixInfo"));
SETFSUnixRetry:
/* BB switch to small buf init to save memory */
rc = smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB,
(void **) &pSMBr);
if (rc)

View File

@ -1,7 +1,7 @@
/*
* fs/cifs/connect.c
*
* Copyright (C) International Business Machines Corp., 2002,2005
* Copyright (C) International Business Machines Corp., 2002,2006
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
@ -564,7 +564,7 @@ cifs_demultiplex_thread(struct TCP_Server_Info *server)
dump_smb(smb_buffer, length);
if (checkSMB (smb_buffer, smb_buffer->Mid, total_read+4)) {
if (checkSMB(smb_buffer, smb_buffer->Mid, total_read+4)) {
cifs_dump_mem("Bad SMB: ", smb_buffer, 48);
continue;
}
@ -1476,6 +1476,14 @@ ipv4_connect(struct sockaddr_in *psin_server, struct socket **csocket,
rc = smb_send(*csocket, smb_buf, 0x44,
(struct sockaddr *)psin_server);
kfree(ses_init_buf);
msleep(1); /* RFC1001 layer in at least one server
requires very short break before negprot
presumably because not expecting negprot
to follow so fast. This is a simple
solution that works without
complicating the code and causes no
significant slowing down on mount
for everyone else */
}
/* else the negprot may still work without this
even though malloc failed */
@ -1920,27 +1928,34 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
cifs_sb->tcon = tcon;
tcon->ses = pSesInfo;
/* do not care if following two calls succeed - informational only */
/* do not care if following two calls succeed - informational */
CIFSSMBQFSDeviceInfo(xid, tcon);
CIFSSMBQFSAttributeInfo(xid, tcon);
if (tcon->ses->capabilities & CAP_UNIX) {
if(!CIFSSMBQFSUnixInfo(xid, tcon)) {
if(!volume_info.no_psx_acl) {
if(CIFS_UNIX_POSIX_ACL_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))
cFYI(1,("server negotiated posix acl support"));
sb->s_flags |= MS_POSIXACL;
__u64 cap =
le64_to_cpu(tcon->fsUnixInfo.Capability);
cap &= CIFS_UNIX_CAP_MASK;
if(volume_info.no_psx_acl)
cap &= ~CIFS_UNIX_POSIX_ACL_CAP;
else if(CIFS_UNIX_POSIX_ACL_CAP & cap) {
cFYI(1,("negotiated posix acl support"));
sb->s_flags |= MS_POSIXACL;
}
/* Try and negotiate POSIX pathnames if we can. */
if (volume_info.posix_paths && (CIFS_UNIX_POSIX_PATHNAMES_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
if (!CIFSSMBSetFSUnixInfo(xid, tcon, CIFS_UNIX_POSIX_PATHNAMES_CAP)) {
cFYI(1,("negotiated posix pathnames support"));
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS;
} else {
cFYI(1,("posix pathnames support requested but not supported"));
}
if(volume_info.posix_paths == 0)
cap &= ~CIFS_UNIX_POSIX_PATHNAMES_CAP;
else if(cap & CIFS_UNIX_POSIX_PATHNAMES_CAP) {
cFYI(1,("negotiate posix pathnames"));
cifs_sb->mnt_cifs_flags |=
CIFS_MOUNT_POSIX_PATHS;
}
cFYI(1,("Negotiate caps 0x%x",(int)cap));
if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) {
cFYI(1,("setting capabilities failed"));
}
}
}
@ -2278,6 +2293,8 @@ CIFSSpnegoSessSetup(unsigned int xid, struct cifsSesInfo *ses,
smb_buffer->Mid = GetNextMid(ses->server);
pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
pSMB->req.AndXCommand = 0xFF;
if(ses->server->maxBuf > 64*1024)
ses->server->maxBuf = (64*1023);
pSMB->req.MaxBufferSize = cpu_to_le16(ses->server->maxBuf);
pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq);
@ -2525,7 +2542,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
__u32 negotiate_flags, capabilities;
__u16 count;
cFYI(1, ("In NTLMSSP sesssetup (negotiate) "));
cFYI(1, ("In NTLMSSP sesssetup (negotiate)"));
if(ses == NULL)
return -EINVAL;
domain = ses->domainName;
@ -2575,7 +2592,8 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
SecurityBlob->MessageType = NtLmNegotiate;
negotiate_flags =
NTLMSSP_NEGOTIATE_UNICODE | NTLMSSP_NEGOTIATE_OEM |
NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_NTLM | 0x80000000 |
NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_NTLM |
NTLMSSP_NEGOTIATE_56 |
/* NTLMSSP_NEGOTIATE_ALWAYS_SIGN | */ NTLMSSP_NEGOTIATE_128;
if(sign_CIFS_PDUs)
negotiate_flags |= NTLMSSP_NEGOTIATE_SIGN;
@ -2588,26 +2606,11 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
SecurityBlob->WorkstationName.Length = 0;
SecurityBlob->WorkstationName.MaximumLength = 0;
if (domain == NULL) {
SecurityBlob->DomainName.Buffer = 0;
SecurityBlob->DomainName.Length = 0;
SecurityBlob->DomainName.MaximumLength = 0;
} else {
__u16 len;
negotiate_flags |= NTLMSSP_NEGOTIATE_DOMAIN_SUPPLIED;
strncpy(bcc_ptr, domain, 63);
len = strnlen(domain, 64);
SecurityBlob->DomainName.MaximumLength =
cpu_to_le16(len);
SecurityBlob->DomainName.Buffer =
cpu_to_le32((long) &SecurityBlob->
DomainString -
(long) &SecurityBlob->Signature);
bcc_ptr += len;
SecurityBlobLength += len;
SecurityBlob->DomainName.Length =
cpu_to_le16(len);
}
/* Domain not sent on first Sesssetup in NTLMSSP, instead it is sent
along with username on auth request (ie the response to challenge) */
SecurityBlob->DomainName.Buffer = 0;
SecurityBlob->DomainName.Length = 0;
SecurityBlob->DomainName.MaximumLength = 0;
if (ses->capabilities & CAP_UNICODE) {
if ((long) bcc_ptr % 2) {
*bcc_ptr = 0;
@ -2677,7 +2680,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
SecurityBlob2->MessageType));
} else if (ses) {
ses->Suid = smb_buffer_response->Uid; /* UID left in le format */
cFYI(1, ("UID = %d ", ses->Suid));
cFYI(1, ("UID = %d", ses->Suid));
if ((pSMBr->resp.hdr.WordCount == 3)
|| ((pSMBr->resp.hdr.WordCount == 4)
&& (blob_len <
@ -2685,17 +2688,17 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
if (pSMBr->resp.hdr.WordCount == 4) {
bcc_ptr += blob_len;
cFYI(1,
("Security Blob Length %d ",
cFYI(1, ("Security Blob Length %d",
blob_len));
}
cFYI(1, ("NTLMSSP Challenge rcvd "));
cFYI(1, ("NTLMSSP Challenge rcvd"));
memcpy(ses->server->cryptKey,
SecurityBlob2->Challenge,
CIFS_CRYPTO_KEY_SIZE);
if(SecurityBlob2->NegotiateFlags & cpu_to_le32(NTLMSSP_NEGOTIATE_NTLMV2))
if(SecurityBlob2->NegotiateFlags &
cpu_to_le32(NTLMSSP_NEGOTIATE_NTLMV2))
*pNTLMv2_flag = TRUE;
if((SecurityBlob2->NegotiateFlags &
@ -2818,7 +2821,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
bcc_ptr++;
} else
cFYI(1,
("Variable field of length %d extends beyond end of smb ",
("Variable field of length %d extends beyond end of smb",
len));
}
} else {
@ -2830,7 +2833,7 @@ CIFSNTLMSSPNegotiateSessSetup(unsigned int xid,
}
} else {
cERROR(1,
(" Invalid Word count %d: ",
(" Invalid Word count %d:",
smb_buffer_response->WordCount));
rc = -EIO;
}
@ -3447,7 +3450,7 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo,
if (extended_security
&& (pSesInfo->capabilities & CAP_EXTENDED_SECURITY)
&& (pSesInfo->server->secType == NTLMSSP)) {
cFYI(1, ("New style sesssetup "));
cFYI(1, ("New style sesssetup"));
rc = CIFSSpnegoSessSetup(xid, pSesInfo,
NULL /* security blob */,
0 /* blob length */,
@ -3455,7 +3458,7 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *pSesInfo,
} else if (extended_security
&& (pSesInfo->capabilities & CAP_EXTENDED_SECURITY)
&& (pSesInfo->server->secType == RawNTLMSSP)) {
cFYI(1, ("NTLMSSP sesssetup "));
cFYI(1, ("NTLMSSP sesssetup"));
rc = CIFSNTLMSSPNegotiateSessSetup(xid,
pSesInfo,
&ntlmv2_flag,

View File

@ -48,13 +48,14 @@ build_path_from_dentry(struct dentry *direntry)
struct dentry *temp;
int namelen = 0;
char *full_path;
char dirsep = CIFS_DIR_SEP(CIFS_SB(direntry->d_sb));
char dirsep;
if(direntry == NULL)
return NULL; /* not much we can do if dentry is freed and
we need to reopen the file after it was closed implicitly
when the server crashed */
dirsep = CIFS_DIR_SEP(CIFS_SB(direntry->d_sb));
cifs_bp_rename_retry:
for (temp = direntry; !IS_ROOT(temp);) {
namelen += (1 + temp->d_name.len);
@ -255,12 +256,10 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
CIFSSMBClose(xid, pTcon, fileHandle);
} else if(newinode) {
pCifsFile =
kmalloc(sizeof (struct cifsFileInfo), GFP_KERNEL);
kzalloc(sizeof (struct cifsFileInfo), GFP_KERNEL);
if(pCifsFile == NULL)
goto cifs_create_out;
memset((char *)pCifsFile, 0,
sizeof (struct cifsFileInfo));
pCifsFile->netfid = fileHandle;
pCifsFile->pid = current->tgid;
pCifsFile->pInode = newinode;

View File

@ -555,7 +555,10 @@ int cifs_closedir(struct inode *inode, struct file *file)
if (ptmp) {
cFYI(1, ("closedir free smb buf in srch struct"));
pCFileStruct->srch_inf.ntwrk_buf_start = NULL;
cifs_buf_release(ptmp);
if(pCFileStruct->srch_inf.smallBuf)
cifs_small_buf_release(ptmp);
else
cifs_buf_release(ptmp);
}
ptmp = pCFileStruct->search_resume_name;
if (ptmp) {
@ -574,13 +577,14 @@ int cifs_closedir(struct inode *inode, struct file *file)
int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
{
int rc, xid;
__u32 lockType = LOCKING_ANDX_LARGE_FILES;
__u32 numLock = 0;
__u32 numUnlock = 0;
__u64 length;
int wait_flag = FALSE;
struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *pTcon;
__u16 netfid;
__u8 lockType = LOCKING_ANDX_LARGE_FILES;
length = 1 + pfLock->fl_end - pfLock->fl_start;
rc = -EACCES;
@ -592,11 +596,11 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
pfLock->fl_end));
if (pfLock->fl_flags & FL_POSIX)
cFYI(1, ("Posix "));
cFYI(1, ("Posix"));
if (pfLock->fl_flags & FL_FLOCK)
cFYI(1, ("Flock "));
cFYI(1, ("Flock"));
if (pfLock->fl_flags & FL_SLEEP) {
cFYI(1, ("Blocking lock "));
cFYI(1, ("Blocking lock"));
wait_flag = TRUE;
}
if (pfLock->fl_flags & FL_ACCESS)
@ -612,21 +616,23 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
cFYI(1, ("F_WRLCK "));
numLock = 1;
} else if (pfLock->fl_type == F_UNLCK) {
cFYI(1, ("F_UNLCK "));
cFYI(1, ("F_UNLCK"));
numUnlock = 1;
/* Check if unlock includes more than
one lock range */
} else if (pfLock->fl_type == F_RDLCK) {
cFYI(1, ("F_RDLCK "));
cFYI(1, ("F_RDLCK"));
lockType |= LOCKING_ANDX_SHARED_LOCK;
numLock = 1;
} else if (pfLock->fl_type == F_EXLCK) {
cFYI(1, ("F_EXLCK "));
cFYI(1, ("F_EXLCK"));
numLock = 1;
} else if (pfLock->fl_type == F_SHLCK) {
cFYI(1, ("F_SHLCK "));
cFYI(1, ("F_SHLCK"));
lockType |= LOCKING_ANDX_SHARED_LOCK;
numLock = 1;
} else
cFYI(1, ("Unknown type of lock "));
cFYI(1, ("Unknown type of lock"));
cifs_sb = CIFS_SB(file->f_dentry->d_sb);
pTcon = cifs_sb->tcon;
@ -635,27 +641,41 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
FreeXid(xid);
return -EBADF;
}
netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
/* BB add code here to normalize offset and length to
account for negative length which we can not accept over the
wire */
if (IS_GETLK(cmd)) {
rc = CIFSSMBLock(xid, pTcon,
((struct cifsFileInfo *)file->
private_data)->netfid,
length,
pfLock->fl_start, 0, 1, lockType,
0 /* wait flag */ );
if(experimEnabled &&
(cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
(CIFS_UNIX_FCNTL_CAP &
le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability))) {
int posix_lock_type;
if(lockType & LOCKING_ANDX_SHARED_LOCK)
posix_lock_type = CIFS_RDLCK;
else
posix_lock_type = CIFS_WRLCK;
rc = CIFSSMBPosixLock(xid, pTcon, netfid, 1 /* get */,
length, pfLock->fl_start,
posix_lock_type, wait_flag);
FreeXid(xid);
return rc;
}
/* BB we could chain these into one lock request BB */
rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
0, 1, lockType, 0 /* wait flag */ );
if (rc == 0) {
rc = CIFSSMBLock(xid, pTcon,
((struct cifsFileInfo *) file->
private_data)->netfid,
length,
rc = CIFSSMBLock(xid, pTcon, netfid, length,
pfLock->fl_start, 1 /* numUnlock */ ,
0 /* numLock */ , lockType,
0 /* wait flag */ );
pfLock->fl_type = F_UNLCK;
if (rc != 0)
cERROR(1, ("Error unlocking previously locked "
"range %d during test of lock ",
rc));
"range %d during test of lock", rc));
rc = 0;
} else {
@ -667,12 +687,30 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock)
FreeXid(xid);
return rc;
}
rc = CIFSSMBLock(xid, pTcon,
((struct cifsFileInfo *) file->private_data)->
netfid, length,
pfLock->fl_start, numUnlock, numLock, lockType,
wait_flag);
if (experimEnabled &&
(cifs_sb->tcon->ses->capabilities & CAP_UNIX) &&
(CIFS_UNIX_FCNTL_CAP &
le64_to_cpu(cifs_sb->tcon->fsUnixInfo.Capability))) {
int posix_lock_type;
if(lockType & LOCKING_ANDX_SHARED_LOCK)
posix_lock_type = CIFS_RDLCK;
else
posix_lock_type = CIFS_WRLCK;
if(numUnlock == 1)
posix_lock_type = CIFS_UNLCK;
else if(numLock == 0) {
/* if no lock or unlock then nothing
to do since we do not know what it is */
FreeXid(xid);
return -EOPNOTSUPP;
}
rc = CIFSSMBPosixLock(xid, pTcon, netfid, 0 /* set */,
length, pfLock->fl_start,
posix_lock_type, wait_flag);
} else
rc = CIFSSMBLock(xid, pTcon, netfid, length, pfLock->fl_start,
numUnlock, numLock, lockType, wait_flag);
if (pfLock->fl_flags & FL_POSIX)
posix_lock_file_wait(file, pfLock);
FreeXid(xid);

View File

@ -565,11 +565,14 @@ int cifs_unlink(struct inode *inode, struct dentry *direntry)
struct cifsInodeInfo *cifsInode;
FILE_BASIC_INFO *pinfo_buf;
cFYI(1, ("cifs_unlink, inode = 0x%p with ", inode));
cFYI(1, ("cifs_unlink, inode = 0x%p", inode));
xid = GetXid();
cifs_sb = CIFS_SB(inode->i_sb);
if(inode)
cifs_sb = CIFS_SB(inode->i_sb);
else
cifs_sb = CIFS_SB(direntry->d_sb);
pTcon = cifs_sb->tcon;
/* Unlink can be called from rename so we can not grab the sem here
@ -609,9 +612,8 @@ int cifs_unlink(struct inode *inode, struct dentry *direntry)
}
} else if (rc == -EACCES) {
/* try only if r/o attribute set in local lookup data? */
pinfo_buf = kmalloc(sizeof(FILE_BASIC_INFO), GFP_KERNEL);
pinfo_buf = kzalloc(sizeof(FILE_BASIC_INFO), GFP_KERNEL);
if (pinfo_buf) {
memset(pinfo_buf, 0, sizeof(FILE_BASIC_INFO));
/* ATTRS set to normal clears r/o bit */
pinfo_buf->Attributes = cpu_to_le32(ATTR_NORMAL);
if (!(pTcon->ses->flags & CIFS_SES_NT4))
@ -693,9 +695,11 @@ int cifs_unlink(struct inode *inode, struct dentry *direntry)
when needed */
direntry->d_inode->i_ctime = current_fs_time(inode->i_sb);
}
inode->i_ctime = inode->i_mtime = current_fs_time(inode->i_sb);
cifsInode = CIFS_I(inode);
cifsInode->time = 0; /* force revalidate of dir as well */
if(inode) {
inode->i_ctime = inode->i_mtime = current_fs_time(inode->i_sb);
cifsInode = CIFS_I(inode);
cifsInode->time = 0; /* force revalidate of dir as well */
}
kfree(full_path);
FreeXid(xid);
@ -1167,7 +1171,7 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
nfid, npid, FALSE);
atomic_dec(&open_file->wrtPending);
cFYI(1,("SetFSize for attrs rc = %d", rc));
if(rc == -EINVAL) {
if((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
int bytes_written;
rc = CIFSSMBWrite(xid, pTcon,
nfid, 0, attrs->ia_size,
@ -1189,7 +1193,7 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
cFYI(1, ("SetEOF by path (setattrs) rc = %d", rc));
if(rc == -EINVAL) {
if((rc == -EINVAL) || (rc == -EOPNOTSUPP)) {
__u16 netfid;
int oplock = FALSE;

View File

@ -67,7 +67,7 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode,
cifs_sb_target->local_nls,
cifs_sb_target->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
if(rc == -EIO)
if((rc == -EIO) || (rc == -EINVAL))
rc = -EOPNOTSUPP;
}

View File

@ -72,10 +72,9 @@ sesInfoAlloc(void)
struct cifsSesInfo *ret_buf;
ret_buf =
(struct cifsSesInfo *) kmalloc(sizeof (struct cifsSesInfo),
(struct cifsSesInfo *) kzalloc(sizeof (struct cifsSesInfo),
GFP_KERNEL);
if (ret_buf) {
memset(ret_buf, 0, sizeof (struct cifsSesInfo));
write_lock(&GlobalSMBSeslock);
atomic_inc(&sesInfoAllocCount);
ret_buf->status = CifsNew;
@ -110,10 +109,9 @@ tconInfoAlloc(void)
{
struct cifsTconInfo *ret_buf;
ret_buf =
(struct cifsTconInfo *) kmalloc(sizeof (struct cifsTconInfo),
(struct cifsTconInfo *) kzalloc(sizeof (struct cifsTconInfo),
GFP_KERNEL);
if (ret_buf) {
memset(ret_buf, 0, sizeof (struct cifsTconInfo));
write_lock(&GlobalSMBSeslock);
atomic_inc(&tconInfoAllocCount);
list_add(&ret_buf->cifsConnectionList,
@ -423,9 +421,7 @@ checkSMB(struct smb_hdr *smb, __u16 mid, int length)
{
__u32 len = smb->smb_buf_length;
__u32 clc_len; /* calculated length */
cFYI(0,
("Entering checkSMB with Length: %x, smb_buf_length: %x",
length, len));
cFYI(0, ("checkSMB Length: 0x%x, smb_buf_length: 0x%x", length, len));
if (((unsigned int)length < 2 + sizeof (struct smb_hdr)) ||
(len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4)) {
if ((unsigned int)length < 2 + sizeof (struct smb_hdr)) {
@ -433,29 +429,36 @@ checkSMB(struct smb_hdr *smb, __u16 mid, int length)
sizeof (struct smb_hdr) - 1)
&& (smb->Status.CifsError != 0)) {
smb->WordCount = 0;
return 0; /* some error cases do not return wct and bcc */
/* some error cases do not return wct and bcc */
return 0;
} else {
cERROR(1, ("Length less than smb header size"));
}
}
if (len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4)
cERROR(1,
("smb_buf_length greater than MaxBufSize"));
cERROR(1,
("bad smb detected. Illegal length. mid=%d",
smb->Mid));
cERROR(1, ("smb length greater than MaxBufSize, mid=%d",
smb->Mid));
return 1;
}
if (checkSMBhdr(smb, mid))
return 1;
clc_len = smbCalcSize_LE(smb);
if ((4 + len != clc_len)
|| (4 + len != (unsigned int)length)) {
cERROR(1, ("Calculated size 0x%x vs actual length 0x%x",
clc_len, 4 + len));
cERROR(1, ("bad smb size detected for Mid=%d", smb->Mid));
if(4 + len != (unsigned int)length) {
cERROR(1, ("Length read does not match RFC1001 length %d",len));
return 1;
}
if (4 + len != clc_len) {
/* check if bcc wrapped around for large read responses */
if((len > 64 * 1024) && (len > clc_len)) {
/* check if lengths match mod 64K */
if(((4 + len) & 0xFFFF) == (clc_len & 0xFFFF))
return 0; /* bcc wrapped */
}
cFYI(1, ("Calculated size %d vs length %d mismatch for mid %d",
clc_len, 4 + len, smb->Mid));
/* Windows XP can return a few bytes too much, presumably
an illegal pad, at the end of byte range lock responses
so we allow for that three byte pad, as long as actual
@ -469,8 +472,11 @@ checkSMB(struct smb_hdr *smb, __u16 mid, int length)
wct and bcc to minimum size and drop the t2 parms and data */
if((4+len > clc_len) && (len <= clc_len + 512))
return 0;
else
else {
cERROR(1, ("RFC1001 size %d bigger than SMB for Mid=%d",
len, smb->Mid));
return 1;
}
}
return 0;
}

129
fs/cifs/ntlmssp.c Normal file
View File

@ -0,0 +1,129 @@
/*
* fs/cifs/ntlmssp.h
*
* Copyright (c) International Business Machines Corp., 2006
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation; either version 2.1 of the License, or
* (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this library; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include "cifspdu.h"
#include "cifsglob.h"
#include "cifsproto.h"
#include "cifs_unicode.h"
#include "cifs_debug.h"
#include "ntlmssp.h"
#include "nterr.h"
#ifdef CONFIG_CIFS_EXPERIMENTAL
static __u32 cifs_ssetup_hdr(struct cifsSesInfo *ses, SESSION_SETUP_ANDX *pSMB)
{
__u32 capabilities = 0;
/* init fields common to all four types of SessSetup */
/* note that header is initialized to zero in header_assemble */
pSMB->req.AndXCommand = 0xFF;
pSMB->req.MaxBufferSize = cpu_to_le16(ses->server->maxBuf);
pSMB->req.MaxMpxCount = cpu_to_le16(ses->server->maxReq);
/* Now no need to set SMBFLG_CASELESS or obsolete CANONICAL PATH */
/* BB verify whether signing required on neg or just on auth frame
(and NTLM case) */
capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS |
CAP_LARGE_WRITE_X | CAP_LARGE_READ_X;
if(ses->server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
pSMB->req.hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
if (ses->capabilities & CAP_UNICODE) {
pSMB->req.hdr.Flags2 |= SMBFLG2_UNICODE;
capabilities |= CAP_UNICODE;
}
if (ses->capabilities & CAP_STATUS32) {
pSMB->req.hdr.Flags2 |= SMBFLG2_ERR_STATUS;
capabilities |= CAP_STATUS32;
}
if (ses->capabilities & CAP_DFS) {
pSMB->req.hdr.Flags2 |= SMBFLG2_DFS;
capabilities |= CAP_DFS;
}
/* BB check whether to init vcnum BB */
return capabilities;
}
int
CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, const int type,
int * pNTLMv2_flg, const struct nls_table *nls_cp)
{
int rc = 0;
int wct;
struct smb_hdr *smb_buffer;
char *bcc_ptr;
SESSION_SETUP_ANDX *pSMB;
__u32 capabilities;
if(ses == NULL)
return -EINVAL;
cFYI(1,("SStp type: %d",type));
if(type < CIFS_NTLM) {
#ifndef CONFIG_CIFS_WEAK_PW_HASH
/* LANMAN and plaintext are less secure and off by default.
So we make this explicitly be turned on in kconfig (in the
build) and turned on at runtime (changed from the default)
in proc/fs/cifs or via mount parm. Unfortunately this is
needed for old Win (e.g. Win95), some obscure NAS and OS/2 */
return -EOPNOTSUPP;
#endif
wct = 10; /* lanman 2 style sessionsetup */
} else if(type < CIFS_NTLMSSP_NEG)
wct = 13; /* old style NTLM sessionsetup */
else /* same size for negotiate or auth, NTLMSSP or extended security */
wct = 12;
rc = small_smb_init_no_tc(SMB_COM_SESSION_SETUP_ANDX, wct, ses,
(void **)&smb_buffer);
if(rc)
return rc;
pSMB = (SESSION_SETUP_ANDX *)smb_buffer;
capabilities = cifs_ssetup_hdr(ses, pSMB);
bcc_ptr = pByteArea(smb_buffer);
if(type > CIFS_NTLM) {
pSMB->req.hdr.Flags2 |= SMBFLG2_EXT_SEC;
capabilities |= CAP_EXTENDED_SECURITY;
pSMB->req.Capabilities = cpu_to_le32(capabilities);
/* BB set password lengths */
} else if(type < CIFS_NTLM) /* lanman */ {
/* no capabilities flags in old lanman negotiation */
/* pSMB->old_req.PasswordLength = */ /* BB fixme BB */
} else /* type CIFS_NTLM */ {
pSMB->req_no_secext.Capabilities = cpu_to_le32(capabilities);
pSMB->req_no_secext.CaseInsensitivePasswordLength =
cpu_to_le16(CIFS_SESSION_KEY_SIZE);
pSMB->req_no_secext.CaseSensitivePasswordLength =
cpu_to_le16(CIFS_SESSION_KEY_SIZE);
}
/* rc = SendReceive2(xid, ses, iov, num_iovecs, &resp_buf_type, 0); */
/* SMB request buf freed in SendReceive2 */
return rc;
}
#endif /* CONFIG_CIFS_EXPERIMENTAL */

View File

@ -1,7 +1,7 @@
/*
* fs/cifs/ntlmssp.h
*
* Copyright (c) International Business Machines Corp., 2002
* Copyright (c) International Business Machines Corp., 2002,2006
* Author(s): Steve French (sfrench@us.ibm.com)
*
* This library is free software; you can redistribute it and/or modify

View File

@ -604,7 +604,12 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
cifsFile->search_resume_name = NULL;
if(cifsFile->srch_inf.ntwrk_buf_start) {
cFYI(1,("freeing SMB ff cache buf on search rewind"));
cifs_buf_release(cifsFile->srch_inf.ntwrk_buf_start);
if(cifsFile->srch_inf.smallBuf)
cifs_small_buf_release(cifsFile->srch_inf.
ntwrk_buf_start);
else
cifs_buf_release(cifsFile->srch_inf.
ntwrk_buf_start);
}
rc = initiate_cifs_search(xid,file);
if(rc) {

View File

@ -309,17 +309,16 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
*pRespBufType = CIFS_NO_BUFFER; /* no response buf yet */
if (ses == NULL) {
cERROR(1,("Null smb session"));
return -EIO;
}
if(ses->server == NULL) {
cERROR(1,("Null tcp session"));
if ((ses == NULL) || (ses->server == NULL)) {
cifs_small_buf_release(in_buf);
cERROR(1,("Null session"));
return -EIO;
}
if(ses->server->tcpStatus == CifsExiting)
if(ses->server->tcpStatus == CifsExiting) {
cifs_small_buf_release(in_buf);
return -ENOENT;
}
/* Ensure that we do not send more than 50 overlapping requests
to the same server. We may make this configurable later or
@ -346,6 +345,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
} else {
if(ses->server->tcpStatus == CifsExiting) {
spin_unlock(&GlobalMid_Lock);
cifs_small_buf_release(in_buf);
return -ENOENT;
}
@ -385,6 +385,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
midQ = AllocMidQEntry(in_buf, ses);
if (midQ == NULL) {
up(&ses->server->tcpSem);
cifs_small_buf_release(in_buf);
/* If not lock req, update # of requests on wire to server */
if(long_op < 3) {
atomic_dec(&ses->server->inFlight);
@ -408,14 +409,18 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
if(rc < 0) {
DeleteMidQEntry(midQ);
up(&ses->server->tcpSem);
cifs_small_buf_release(in_buf);
/* If not lock req, update # of requests on wire to server */
if(long_op < 3) {
atomic_dec(&ses->server->inFlight);
wake_up(&ses->server->request_q);
}
return rc;
} else
} else {
up(&ses->server->tcpSem);
cifs_small_buf_release(in_buf);
}
if (long_op == -1)
goto cifs_no_response_exit2;
else if (long_op == 2) /* writes past end of file can take loong time */
@ -543,6 +548,7 @@ cifs_no_response_exit2:
out_unlock2:
up(&ses->server->tcpSem);
cifs_small_buf_release(in_buf);
/* If not lock req, update # of requests on wire to server */
if(long_op < 3) {
atomic_dec(&ses->server->inFlight);

View File

@ -16,26 +16,21 @@
#define UART_BASE ((volatile u32 *)IXP23XX_UART1_PHYS)
static __inline__ void putc(char c)
static inline void putc(char c)
{
int j;
for (j = 0; j < 0x1000; j++) {
if (UART_BASE[UART_LSR] & UART_LSR_THRE)
break;
barrier();
}
UART_BASE[UART_TX] = c;
}
static void putstr(const char *s)
static inline void flush(void)
{
while (*s) {
putc(*s);
if (*s == '\n')
putc('\r');
s++;
}
}
#define arch_decomp_setup()

View File

@ -485,7 +485,7 @@
#define SACR1_ENLBF (1 << 5) /* Enable Loopback */
#define SACR1_DRPL (1 << 4) /* Disable Replaying Function */
#define SACR1_DREC (1 << 3) /* Disable Recording Function */
#define SACR1_AMSL (1 << 1) /* Specify Alternate Mode */
#define SACR1_AMSL (1 << 0) /* Specify Alternate Mode */
#define SASR0_I2SOFF (1 << 7) /* Controller Status */
#define SASR0_ROR (1 << 6) /* Rx FIFO Overrun */

View File

@ -308,8 +308,6 @@
#define __NR_mq_notify (__NR_SYSCALL_BASE+278)
#define __NR_mq_getsetattr (__NR_SYSCALL_BASE+279)
#define __NR_waitid (__NR_SYSCALL_BASE+280)
#if defined(__ARM_EABI__) /* reserve these for un-muxing socketcall */
#define __NR_socket (__NR_SYSCALL_BASE+281)
#define __NR_bind (__NR_SYSCALL_BASE+282)
#define __NR_connect (__NR_SYSCALL_BASE+283)
@ -327,9 +325,6 @@
#define __NR_getsockopt (__NR_SYSCALL_BASE+295)
#define __NR_sendmsg (__NR_SYSCALL_BASE+296)
#define __NR_recvmsg (__NR_SYSCALL_BASE+297)
#endif
#if defined(__ARM_EABI__) /* reserve these for un-muxing ipc */
#define __NR_semop (__NR_SYSCALL_BASE+298)
#define __NR_semget (__NR_SYSCALL_BASE+299)
#define __NR_semctl (__NR_SYSCALL_BASE+300)
@ -341,16 +336,10 @@
#define __NR_shmdt (__NR_SYSCALL_BASE+306)
#define __NR_shmget (__NR_SYSCALL_BASE+307)
#define __NR_shmctl (__NR_SYSCALL_BASE+308)
#endif
#define __NR_add_key (__NR_SYSCALL_BASE+309)
#define __NR_request_key (__NR_SYSCALL_BASE+310)
#define __NR_keyctl (__NR_SYSCALL_BASE+311)
#if defined(__ARM_EABI__) /* reserved for un-muxing ipc */
#define __NR_semtimedop (__NR_SYSCALL_BASE+312)
#endif
#define __NR_vserver (__NR_SYSCALL_BASE+313)
#define __NR_ioprio_set (__NR_SYSCALL_BASE+314)
#define __NR_ioprio_get (__NR_SYSCALL_BASE+315)

View File

@ -68,6 +68,7 @@
#define PAL_SHUTDOWN 40 /* enter processor shutdown state */
#define PAL_PREFETCH_VISIBILITY 41 /* Make Processor Prefetches Visible */
#define PAL_LOGICAL_TO_PHYSICAL 42 /* returns information on logical to physical processor mapping */
#define PAL_CACHE_SHARED_INFO 43 /* returns information on caches shared by logical processor */
#define PAL_COPY_PAL 256 /* relocate PAL procedures and PAL PMI */
#define PAL_HALT_INFO 257 /* return the low power capabilities of processor */
@ -130,7 +131,7 @@ typedef u64 pal_cache_line_state_t;
#define PAL_CACHE_LINE_STATE_MODIFIED 3 /* Modified */
typedef struct pal_freq_ratio {
u64 den : 32, num : 32; /* numerator & denominator */
u32 den, num; /* numerator & denominator */
} itc_ratio, proc_ratio;
typedef union pal_cache_config_info_1_s {
@ -151,10 +152,10 @@ typedef union pal_cache_config_info_1_s {
typedef union pal_cache_config_info_2_s {
struct {
u64 cache_size : 32, /*cache size in bytes*/
u32 cache_size; /*cache size in bytes*/
alias_boundary : 8, /* 39-32 aliased addr
u32 alias_boundary : 8, /* 39-32 aliased addr
* separation for max
* performance.
*/
@ -1647,6 +1648,33 @@ ia64_pal_logical_to_phys(u64 proc_number, pal_logical_to_physical_t *mapping)
return iprv.status;
}
typedef struct pal_cache_shared_info_s
{
u64 num_shared;
pal_proc_n_log_info1_t ppli1;
pal_proc_n_log_info2_t ppli2;
} pal_cache_shared_info_t;
/* Get information on logical to physical processor mappings. */
static inline s64
ia64_pal_cache_shared_info(u64 level,
u64 type,
u64 proc_number,
pal_cache_shared_info_t *info)
{
struct ia64_pal_retval iprv;
PAL_CALL(iprv, PAL_CACHE_SHARED_INFO, level, type, proc_number);
if (iprv.status == PAL_STATUS_SUCCESS) {
info->num_shared = iprv.v0;
info->ppli1.ppli1_data = iprv.v1;
info->ppli2.ppli2_data = iprv.v2;
}
return iprv.status;
}
#endif /* __ASSEMBLY__ */
#endif /* _ASM_IA64_PAL_H */