Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux

Pull s390 updates from Martin Schwidefsky:

 - New entropy generation for the pseudo random number generator.

 - Early boot printk output via sclp to help debug crashes on boot. This
   needs to be enabled with a kernel parameter.

 - Add proper no-execute support with a bit in the page table entry.

 - Bug fixes and cleanups.

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/s390/linux: (65 commits)
  s390/syscall: fix single stepped system calls
  s390/zcrypt: make ap_bus explicitly non-modular
  s390/zcrypt: Removed unneeded debug feature directory creation.
  s390: add missing "do {} while (0)" loop constructs to multiline macros
  s390/mm: add cond_resched call to kernel page table dumper
  s390: get rid of MACHINE_HAS_PFMF and MACHINE_HAS_HPAGE
  s390/mm: make memory_block_size_bytes available for !MEMORY_HOTPLUG
  s390: replace ACCESS_ONCE with READ_ONCE
  s390: Audit and remove any remaining unnecessary uses of module.h
  s390: mm: Audit and remove any unnecessary uses of module.h
  s390: kernel: Audit and remove any unnecessary uses of module.h
  s390/kdump: Use "LINUX" ELF note name instead of "CORE"
  s390: add no-execute support
  s390: report new vector facilities
  s390: use correct input data address for setup_randomness
  s390/sclp: get rid of common response code handling
  s390/sclp: don't add new lines to each printed string
  s390/sclp: make early sclp code readable
  s390/sclp: disable early sclp code as soon as the base sclp driver is active
  s390/sclp: move early printk code to drivers
  ...
This commit is contained in:
Linus Torvalds 2017-02-22 10:20:04 -08:00
commit ff47d8c050
103 changed files with 1252 additions and 1082 deletions

View File

@ -970,9 +970,10 @@
address. The serial port must already be setup
and configured. Options are not yet supported.
earlyprintk= [X86,SH,BLACKFIN,ARM,M68k]
earlyprintk= [X86,SH,BLACKFIN,ARM,M68k,S390]
earlyprintk=vga
earlyprintk=efi
earlyprintk=sclp
earlyprintk=xen
earlyprintk=serial[,ttySn[,baudrate]]
earlyprintk=serial[,0x...[,baudrate]]
@ -1007,6 +1008,8 @@
The xen output can only be used by Xen PV guests.
The sclp output can only be used on s390.
edac_report= [HW,EDAC] Control how to report EDAC event
Format: {"on" | "off" | "force"}
on: enable EDAC to report H/W event. May be overridden

View File

@ -17,4 +17,7 @@ config S390_PTDUMP
kernel.
If in doubt, say "N"
config EARLY_PRINTK
def_bool y
endmenu

View File

@ -19,7 +19,8 @@ KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
GCOV_PROFILE := n
UBSAN_SANITIZE := n
OBJECTS := $(addprefix $(objtree)/arch/s390/kernel/, head.o sclp.o ebcdic.o als.o)
OBJECTS := $(addprefix $(objtree)/arch/s390/kernel/, head.o ebcdic.o als.o)
OBJECTS += $(objtree)/drivers/s390/char/sclp_early_core.o
OBJECTS += $(obj)/head.o $(obj)/misc.o $(obj)/piggy.o
LDFLAGS_vmlinux := --oformat $(LD_BFD) -e startup -T

View File

@ -66,7 +66,7 @@ static unsigned long free_mem_end_ptr;
static int puts(const char *s)
{
_sclp_print_early(s);
sclp_early_printk(s);
return 0;
}

View File

@ -28,6 +28,7 @@
#include <linux/cpufeature.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/fips.h>
#include <crypto/xts.h>
#include <asm/cpacf.h>
@ -501,6 +502,12 @@ static int xts_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
if (err)
return err;
/* In fips mode only 128 bit or 256 bit keys are valid */
if (fips_enabled && key_len != 32 && key_len != 64) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
/* Pick the correct function code based on the key length */
fc = (key_len == 32) ? CPACF_KM_XTS_128 :
(key_len == 64) ? CPACF_KM_XTS_256 : 0;

View File

@ -18,6 +18,7 @@
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <linux/crypto.h>
#include <linux/fips.h>
#include <crypto/algapi.h>
#include <crypto/des.h>
#include <asm/cpacf.h>
@ -221,6 +222,8 @@ static struct crypto_alg cbc_des_alg = {
* same as DES. Implementers MUST reject keys that exhibit this
* property.
*
* In fips mode additinally check for all 3 keys are unique.
*
*/
static int des3_setkey(struct crypto_tfm *tfm, const u8 *key,
unsigned int key_len)
@ -234,6 +237,17 @@ static int des3_setkey(struct crypto_tfm *tfm, const u8 *key,
tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
/* in fips mode, ensure k1 != k2 and k2 != k3 and k1 != k3 */
if (fips_enabled &&
!(crypto_memneq(key, &key[DES_KEY_SIZE], DES_KEY_SIZE) &&
crypto_memneq(&key[DES_KEY_SIZE], &key[DES_KEY_SIZE * 2],
DES_KEY_SIZE) &&
crypto_memneq(key, &key[DES_KEY_SIZE * 2], DES_KEY_SIZE))) {
tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY;
return -EINVAL;
}
memcpy(ctx->key, key, key_len);
return 0;
}

View File

@ -110,22 +110,30 @@ static const u8 initial_parm_block[32] __initconst = {
/*** helper functions ***/
/*
* generate_entropy:
* This algorithm produces 64 bytes of entropy data based on 1024
* individual stckf() invocations assuming that each stckf() value
* contributes 0.25 bits of entropy. So the caller gets 256 bit
* entropy per 64 byte or 4 bits entropy per byte.
*/
static int generate_entropy(u8 *ebuf, size_t nbytes)
{
int n, ret = 0;
u8 *pg, *h, hash[32];
u8 *pg, *h, hash[64];
pg = (u8 *) __get_free_page(GFP_KERNEL);
/* allocate 2 pages */
pg = (u8 *) __get_free_pages(GFP_KERNEL, 1);
if (!pg) {
prng_errorflag = PRNG_GEN_ENTROPY_FAILED;
return -ENOMEM;
}
while (nbytes) {
/* fill page with urandom bytes */
get_random_bytes(pg, PAGE_SIZE);
/* exor page with stckf values */
for (n = 0; n < PAGE_SIZE / sizeof(u64); n++) {
/* fill pages with urandom bytes */
get_random_bytes(pg, 2*PAGE_SIZE);
/* exor pages with 1024 stckf values */
for (n = 0; n < 2 * PAGE_SIZE / sizeof(u64); n++) {
u64 *p = ((u64 *)pg) + n;
*p ^= get_tod_clock_fast();
}
@ -134,8 +142,8 @@ static int generate_entropy(u8 *ebuf, size_t nbytes)
h = hash;
else
h = ebuf;
/* generate sha256 from this page */
cpacf_kimd(CPACF_KIMD_SHA_256, h, pg, PAGE_SIZE);
/* hash over the filled pages */
cpacf_kimd(CPACF_KIMD_SHA_512, h, pg, 2*PAGE_SIZE);
if (n < sizeof(hash))
memcpy(ebuf, hash, n);
ret += n;
@ -143,7 +151,7 @@ static int generate_entropy(u8 *ebuf, size_t nbytes)
nbytes -= n;
}
free_page((unsigned long)pg);
free_pages((unsigned long)pg, 1);
return ret;
}
@ -334,7 +342,7 @@ static int __init prng_sha512_selftest(void)
static int __init prng_sha512_instantiate(void)
{
int ret, datalen;
u8 seed[64];
u8 seed[64 + 32 + 16];
pr_debug("prng runs in SHA-512 mode "
"with chunksize=%d and reseed_limit=%u\n",
@ -357,12 +365,12 @@ static int __init prng_sha512_instantiate(void)
if (ret)
goto outfree;
/* generate initial seed bytestring, first 48 bytes of entropy */
ret = generate_entropy(seed, 48);
if (ret != 48)
/* generate initial seed bytestring, with 256 + 128 bits entropy */
ret = generate_entropy(seed, 64 + 32);
if (ret != 64 + 32)
goto outfree;
/* followed by 16 bytes of unique nonce */
get_tod_clock_ext(seed + 48);
get_tod_clock_ext(seed + 64 + 32);
/* initial seed of the ppno drng */
cpacf_ppno(CPACF_PPNO_SHA512_DRNG_SEED,
@ -395,9 +403,9 @@ static void prng_sha512_deinstantiate(void)
static int prng_sha512_reseed(void)
{
int ret;
u8 seed[32];
u8 seed[64];
/* generate 32 bytes of fresh entropy */
/* fetch 256 bits of fresh entropy */
ret = generate_entropy(seed, sizeof(seed));
if (ret != sizeof(seed))
return ret;

View File

@ -4,9 +4,31 @@
/* Caches aren't brain-dead on the s390. */
#include <asm-generic/cacheflush.h>
int set_memory_ro(unsigned long addr, int numpages);
int set_memory_rw(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages);
int set_memory_x(unsigned long addr, int numpages);
#define SET_MEMORY_RO 1UL
#define SET_MEMORY_RW 2UL
#define SET_MEMORY_NX 4UL
#define SET_MEMORY_X 8UL
int __set_memory(unsigned long addr, int numpages, unsigned long flags);
static inline int set_memory_ro(unsigned long addr, int numpages)
{
return __set_memory(addr, numpages, SET_MEMORY_RO);
}
static inline int set_memory_rw(unsigned long addr, int numpages)
{
return __set_memory(addr, numpages, SET_MEMORY_RW);
}
static inline int set_memory_nx(unsigned long addr, int numpages)
{
return __set_memory(addr, numpages, SET_MEMORY_NX);
}
static inline int set_memory_x(unsigned long addr, int numpages)
{
return __set_memory(addr, numpages, SET_MEMORY_X);
}
#endif /* _S390_CACHEFLUSH_H */

View File

@ -199,14 +199,15 @@ static inline int ecctr(u64 ctr, u64 *val)
/* Store CPU counter multiple for the MT utilization counter set */
static inline int stcctm5(u64 num, u64 *val)
{
typedef struct { u64 _[num]; } addrtype;
int cc;
asm volatile (
" .insn rsy,0xeb0000000017,%2,5,%1\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (cc), "=Q" (*(addrtype *) val) : "d" (num) : "cc");
: "=d" (cc)
: "Q" (*val), "d" (num)
: "cc", "memory");
return cc;
}

View File

@ -9,7 +9,7 @@
#include <linux/bug.h>
#define __ctl_load(array, low, high) { \
#define __ctl_load(array, low, high) do { \
typedef struct { char _[sizeof(array)]; } addrtype; \
\
BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
@ -18,9 +18,9 @@
: \
: "Q" (*(addrtype *)(&array)), "i" (low), "i" (high) \
: "memory"); \
}
} while (0)
#define __ctl_store(array, low, high) { \
#define __ctl_store(array, low, high) do { \
typedef struct { char _[sizeof(array)]; } addrtype; \
\
BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
@ -28,7 +28,7 @@
" stctg %1,%2,%0\n" \
: "=Q" (*(addrtype *)(&array)) \
: "i" (low), "i" (high)); \
}
} while (0)
static inline void __ctl_set_bit(unsigned int cr, unsigned int bit)
{
@ -62,7 +62,9 @@ union ctlreg0 {
unsigned long : 4;
unsigned long afp : 1; /* AFP-register control */
unsigned long vx : 1; /* Vector enablement control */
unsigned long : 17;
unsigned long : 7;
unsigned long sssm : 1; /* Service signal subclass mask */
unsigned long : 9;
};
};

View File

@ -103,6 +103,8 @@
#define HWCAP_S390_HIGH_GPRS 512
#define HWCAP_S390_TE 1024
#define HWCAP_S390_VXRS 2048
#define HWCAP_S390_VXRS_BCD 4096
#define HWCAP_S390_VXRS_EXT 8192
/* Internal bits, not exposed via elf */
#define HWCAP_INT_SIE 1UL

View File

@ -14,7 +14,7 @@
#define is_hugepage_only_range(mm, addr, len) 0
#define hugetlb_free_pgd_range free_pgd_range
#define hugepages_supported() (MACHINE_HAS_HPAGE)
#define hugepages_supported() (MACHINE_HAS_EDAT1)
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte);

View File

@ -17,7 +17,7 @@
#ifndef ASM_LIVEPATCH_H
#define ASM_LIVEPATCH_H
#include <linux/module.h>
#include <asm/ptrace.h>
static inline int klp_check_compiler_support(void)
{

View File

@ -85,7 +85,7 @@ struct clp_rsp_query_pci {
u32 fid; /* pci function id */
u8 bar_size[PCI_BAR_COUNT];
u16 pchid;
u32 bar[PCI_BAR_COUNT];
__le32 bar[PCI_BAR_COUNT];
u8 pfip[CLP_PFIP_NR_SEGMENTS]; /* pci function internal path */
u32 : 16;
u8 fmb_len;

View File

@ -200,6 +200,7 @@ static inline int is_module_addr(void *addr)
*/
/* Hardware bits in the page table entry */
#define _PAGE_NOEXEC 0x100 /* HW no-execute bit */
#define _PAGE_PROTECT 0x200 /* HW read-only bit */
#define _PAGE_INVALID 0x400 /* HW invalid bit */
#define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
@ -277,6 +278,7 @@ static inline int is_module_addr(void *addr)
/* Bits in the region table entry */
#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
#define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
#define _REGION_ENTRY_NOEXEC 0x100 /* region no-execute bit */
#define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */
#define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
@ -316,6 +318,7 @@ static inline int is_module_addr(void *addr)
#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
#define _SEGMENT_ENTRY_NOEXEC 0x100 /* region no-execute bit */
#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
#define _SEGMENT_ENTRY (0)
@ -385,17 +388,23 @@ static inline int is_module_addr(void *addr)
* Page protection definitions.
*/
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
#define PAGE_READ __pgprot(_PAGE_PRESENT | _PAGE_READ | \
#define PAGE_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | \
_PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
#define PAGE_RX __pgprot(_PAGE_PRESENT | _PAGE_READ | \
_PAGE_INVALID | _PAGE_PROTECT)
#define PAGE_WRITE __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
#define PAGE_RW __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
_PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
_PAGE_INVALID | _PAGE_PROTECT)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
_PAGE_YOUNG | _PAGE_DIRTY)
_PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
_PAGE_YOUNG | _PAGE_DIRTY)
_PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
_PAGE_PROTECT)
_PAGE_PROTECT | _PAGE_NOEXEC)
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
_PAGE_YOUNG | _PAGE_DIRTY)
/*
* On s390 the page table entry has an invalid bit and a read-only bit.
@ -404,43 +413,51 @@ static inline int is_module_addr(void *addr)
*/
/*xwr*/
#define __P000 PAGE_NONE
#define __P001 PAGE_READ
#define __P010 PAGE_READ
#define __P011 PAGE_READ
#define __P100 PAGE_READ
#define __P101 PAGE_READ
#define __P110 PAGE_READ
#define __P111 PAGE_READ
#define __P001 PAGE_RO
#define __P010 PAGE_RO
#define __P011 PAGE_RO
#define __P100 PAGE_RX
#define __P101 PAGE_RX
#define __P110 PAGE_RX
#define __P111 PAGE_RX
#define __S000 PAGE_NONE
#define __S001 PAGE_READ
#define __S010 PAGE_WRITE
#define __S011 PAGE_WRITE
#define __S100 PAGE_READ
#define __S101 PAGE_READ
#define __S110 PAGE_WRITE
#define __S111 PAGE_WRITE
#define __S001 PAGE_RO
#define __S010 PAGE_RW
#define __S011 PAGE_RW
#define __S100 PAGE_RX
#define __S101 PAGE_RX
#define __S110 PAGE_RWX
#define __S111 PAGE_RWX
/*
* Segment entry (large page) protection definitions.
*/
#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
_SEGMENT_ENTRY_PROTECT)
#define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_PROTECT | \
#define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PROTECT | \
_SEGMENT_ENTRY_READ | \
_SEGMENT_ENTRY_NOEXEC)
#define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PROTECT | \
_SEGMENT_ENTRY_READ)
#define SEGMENT_WRITE __pgprot(_SEGMENT_ENTRY_READ | \
#define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_READ | \
_SEGMENT_ENTRY_WRITE | \
_SEGMENT_ENTRY_NOEXEC)
#define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_READ | \
_SEGMENT_ENTRY_WRITE)
#define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \
_SEGMENT_ENTRY_LARGE | \
_SEGMENT_ENTRY_READ | \
_SEGMENT_ENTRY_WRITE | \
_SEGMENT_ENTRY_YOUNG | \
_SEGMENT_ENTRY_DIRTY)
_SEGMENT_ENTRY_DIRTY | \
_SEGMENT_ENTRY_NOEXEC)
#define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \
_SEGMENT_ENTRY_LARGE | \
_SEGMENT_ENTRY_READ | \
_SEGMENT_ENTRY_YOUNG | \
_SEGMENT_ENTRY_PROTECT)
_SEGMENT_ENTRY_PROTECT | \
_SEGMENT_ENTRY_NOEXEC)
/*
* Region3 entry (large page) protection definitions.
@ -451,12 +468,14 @@ static inline int is_module_addr(void *addr)
_REGION3_ENTRY_READ | \
_REGION3_ENTRY_WRITE | \
_REGION3_ENTRY_YOUNG | \
_REGION3_ENTRY_DIRTY)
_REGION3_ENTRY_DIRTY | \
_REGION_ENTRY_NOEXEC)
#define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
_REGION3_ENTRY_LARGE | \
_REGION3_ENTRY_READ | \
_REGION3_ENTRY_YOUNG | \
_REGION_ENTRY_PROTECT)
_REGION_ENTRY_PROTECT | \
_REGION_ENTRY_NOEXEC)
static inline int mm_has_pgste(struct mm_struct *mm)
{
@ -801,14 +820,14 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
pte_val(pte) &= _PAGE_CHG_MASK;
pte_val(pte) |= pgprot_val(newprot);
/*
* newprot for PAGE_NONE, PAGE_READ and PAGE_WRITE has the
* invalid bit set, clear it again for readable, young pages
* newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
* has the invalid bit set, clear it again for readable, young pages
*/
if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
pte_val(pte) &= ~_PAGE_INVALID;
/*
* newprot for PAGE_READ and PAGE_WRITE has the page protection
* bit set, clear it again for writable, dirty pages
* newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
* protection bit set, clear it again for writable, dirty pages
*/
if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
pte_val(pte) &= ~_PAGE_PROTECT;
@ -1029,6 +1048,8 @@ int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t entry)
{
if (!MACHINE_HAS_NX)
pte_val(entry) &= ~_PAGE_NOEXEC;
if (mm_has_pgste(mm))
ptep_set_pte_at(mm, addr, ptep, entry);
else
@ -1173,14 +1194,18 @@ static inline pud_t pud_mkdirty(pud_t pud)
static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
{
/*
* pgprot is PAGE_NONE, PAGE_READ, or PAGE_WRITE (see __Pxxx / __Sxxx)
* Convert to segment table entry format.
* pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX
* (see __Pxxx / __Sxxx). Convert to segment table entry format.
*/
if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
return pgprot_val(SEGMENT_NONE);
if (pgprot_val(pgprot) == pgprot_val(PAGE_READ))
return pgprot_val(SEGMENT_READ);
return pgprot_val(SEGMENT_WRITE);
if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
return pgprot_val(SEGMENT_RO);
if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
return pgprot_val(SEGMENT_RX);
if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
return pgprot_val(SEGMENT_RW);
return pgprot_val(SEGMENT_RWX);
}
static inline pmd_t pmd_mkyoung(pmd_t pmd)
@ -1315,6 +1340,8 @@ static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t entry)
{
if (!MACHINE_HAS_NX)
pmd_val(entry) &= ~_SEGMENT_ENTRY_NOEXEC;
*pmdp = entry;
}
@ -1389,7 +1416,7 @@ static inline int pmd_trans_huge(pmd_t pmd)
#define has_transparent_hugepage has_transparent_hugepage
static inline int has_transparent_hugepage(void)
{
return MACHINE_HAS_HPAGE ? 1 : 0;
return MACHINE_HAS_EDAT1 ? 1 : 0;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */

View File

@ -361,12 +361,12 @@ extern void (*s390_base_ext_handler_fn)(void);
extern int memcpy_real(void *, void *, size_t);
extern void memcpy_absolute(void *, void *, size_t);
#define mem_assign_absolute(dest, val) { \
#define mem_assign_absolute(dest, val) do { \
__typeof__(dest) __tmp = (val); \
\
BUILD_BUG_ON(sizeof(__tmp) != sizeof(val)); \
memcpy_absolute(&(dest), &__tmp, sizeof(__tmp)); \
}
} while (0)
#endif /* __ASSEMBLY__ */

View File

@ -101,7 +101,12 @@ struct zpci_report_error_header {
u8 data[0]; /* Subsequent Data passed verbatim to SCLP ET 24 */
} __packed;
int _sclp_get_core_info_early(struct sclp_core_info *info);
int sclp_early_get_core_info(struct sclp_core_info *info);
void sclp_early_get_ipl_info(struct sclp_ipl_info *info);
void sclp_early_detect(void);
void sclp_early_printk(const char *s);
void __sclp_early_printk(const char *s, unsigned int len);
int _sclp_get_core_info(struct sclp_core_info *info);
int sclp_core_configure(u8 core);
int sclp_core_deconfigure(u8 core);
@ -110,20 +115,17 @@ int sclp_sdias_copy(void *dest, int blk_num, int nr_blks);
int sclp_chp_configure(struct chp_id chpid);
int sclp_chp_deconfigure(struct chp_id chpid);
int sclp_chp_read_info(struct sclp_chp_info *info);
void sclp_get_ipl_info(struct sclp_ipl_info *info);
int sclp_pci_configure(u32 fid);
int sclp_pci_deconfigure(u32 fid);
int sclp_pci_report(struct zpci_report_error_header *report, u32 fh, u32 fid);
int memcpy_hsa_kernel(void *dest, unsigned long src, size_t count);
int memcpy_hsa_user(void __user *dest, unsigned long src, size_t count);
void sclp_early_detect(void);
void _sclp_print_early(const char *);
void sclp_ocf_cpc_name_copy(char *dst);
static inline int sclp_get_core_info(struct sclp_core_info *info, int early)
{
if (early)
return _sclp_get_core_info_early(info);
return sclp_early_get_core_info(info);
return _sclp_get_core_info(info);
}

View File

@ -30,6 +30,7 @@
#define MACHINE_FLAG_TLB_LC _BITUL(12)
#define MACHINE_FLAG_VX _BITUL(13)
#define MACHINE_FLAG_CAD _BITUL(14)
#define MACHINE_FLAG_NX _BITUL(15)
#define LPP_MAGIC _BITUL(31)
#define LPP_PFAULT_PID_MASK _AC(0xffffffff, UL)
@ -58,9 +59,6 @@ extern void detect_memory_memblock(void);
#define MACHINE_HAS_DIAG9C (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C)
#define MACHINE_HAS_ESOP (S390_lowcore.machine_flags & MACHINE_FLAG_ESOP)
#define MACHINE_HAS_PFMF MACHINE_HAS_EDAT1
#define MACHINE_HAS_HPAGE MACHINE_HAS_EDAT1
#define MACHINE_HAS_IDTE (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE)
#define MACHINE_HAS_DIAG44 (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG44)
#define MACHINE_HAS_EDAT1 (S390_lowcore.machine_flags & MACHINE_FLAG_EDAT1)
@ -71,6 +69,7 @@ extern void detect_memory_memblock(void);
#define MACHINE_HAS_TLB_LC (S390_lowcore.machine_flags & MACHINE_FLAG_TLB_LC)
#define MACHINE_HAS_VX (S390_lowcore.machine_flags & MACHINE_FLAG_VX)
#define MACHINE_HAS_CAD (S390_lowcore.machine_flags & MACHINE_FLAG_CAD)
#define MACHINE_HAS_NX (S390_lowcore.machine_flags & MACHINE_FLAG_NX)
/*
* Console mode. Override with conmode=

View File

@ -63,7 +63,7 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
static inline int arch_spin_is_locked(arch_spinlock_t *lp)
{
return ACCESS_ONCE(lp->lock) != 0;
return READ_ONCE(lp->lock) != 0;
}
static inline int arch_spin_trylock_once(arch_spinlock_t *lp)

View File

@ -178,14 +178,6 @@ int get_phys_clock(unsigned long long *clock);
void init_cpu_timer(void);
unsigned long long monotonic_clock(void);
void tod_to_timeval(__u64 todval, struct timespec64 *xt);
static inline
void stck_to_timespec64(unsigned long long stck, struct timespec64 *ts)
{
tod_to_timeval(stck - TOD_UNIX_EPOCH, ts);
}
extern u64 sched_clock_base_cc;
/**

View File

@ -38,13 +38,13 @@
#define get_fs() (current->thread.mm_segment)
#define set_fs(x) \
{ \
do { \
unsigned long __pto; \
current->thread.mm_segment = (x); \
__pto = current->thread.mm_segment.ar4 ? \
S390_lowcore.user_asce : S390_lowcore.kernel_asce; \
__ctl_load(__pto, 7, 7); \
}
} while (0)
#define segment_eq(a,b) ((a).ar4 == (b).ar4)
@ -177,7 +177,7 @@ static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size)
(unsigned long *)x,
size, spec);
break;
};
}
return rc;
}
@ -207,7 +207,7 @@ static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long s
(unsigned long __user *)ptr,
size, spec);
break;
};
}
return rc;
}

View File

@ -10,31 +10,25 @@ CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
# Do not trace early setup code
CFLAGS_REMOVE_als.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_early.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_sclp.o = $(CC_FLAGS_FTRACE)
endif
GCOV_PROFILE_als.o := n
GCOV_PROFILE_early.o := n
GCOV_PROFILE_sclp.o := n
KCOV_INSTRUMENT_als.o := n
KCOV_INSTRUMENT_early.o := n
KCOV_INSTRUMENT_sclp.o := n
UBSAN_SANITIZE_als.o := n
UBSAN_SANITIZE_early.o := n
UBSAN_SANITIZE_sclp.o := n
#
# Use -march=z900 for sclp.c and als.c to be able to print an error
# Use -march=z900 for als.c to be able to print an error
# message if the kernel is started on a machine which is too old
#
ifneq ($(CC_FLAGS_MARCH),-march=z900)
CFLAGS_REMOVE_als.o += $(CC_FLAGS_MARCH)
CFLAGS_als.o += -march=z900
CFLAGS_REMOVE_sclp.o += $(CC_FLAGS_MARCH)
CFLAGS_sclp.o += -march=z900
AFLAGS_REMOVE_head.o += $(CC_FLAGS_MARCH)
AFLAGS_head.o += -march=z900
endif
@ -61,7 +55,7 @@ CFLAGS_sysinfo.o += -w
obj-y := traps.o time.o process.o base.o early.o setup.o idle.o vtime.o
obj-y += processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o
obj-y += debug.o irq.o ipl.o dis.o diag.o sclp.o vdso.o als.o
obj-y += debug.o irq.o ipl.o dis.o diag.o vdso.o als.o
obj-y += sysinfo.o jump_label.o lgr.o os_info.o machine_kexec.o pgm_check.o
obj-y += runtime_instr.o cache.o fpu.o dumpstack.o
obj-y += entry.o reipl.o relocate_kernel.o
@ -76,7 +70,7 @@ obj-$(CONFIG_AUDIT) += audit.o
compat-obj-$(CONFIG_AUDIT) += compat_audit.o
obj-$(CONFIG_COMPAT) += compat_linux.o compat_signal.o
obj-$(CONFIG_COMPAT) += compat_wrapper.o $(compat-obj-y)
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o

View File

@ -41,7 +41,8 @@ static void __init print_machine_type(void)
get_cpu_id(&id);
u16_to_hex(type_str, id.machine);
strcat(mach_str, type_str);
_sclp_print_early(mach_str);
strcat(mach_str, "\n");
sclp_early_printk(mach_str);
}
static void __init u16_to_decimal(char *str, u16 val)
@ -79,7 +80,8 @@ static void __init print_missing_facilities(void)
* z/VM adds a four character prefix.
*/
if (strlen(als_str) > 70) {
_sclp_print_early(als_str);
strcat(als_str, "\n");
sclp_early_printk(als_str);
*als_str = '\0';
}
u16_to_decimal(val_str, i * BITS_PER_LONG + j);
@ -87,13 +89,14 @@ static void __init print_missing_facilities(void)
first = 0;
}
}
_sclp_print_early(als_str);
_sclp_print_early("See Principles of Operations for facility bits");
strcat(als_str, "\n");
sclp_early_printk(als_str);
sclp_early_printk("See Principles of Operations for facility bits\n");
}
static void __init facility_mismatch(void)
{
_sclp_print_early("The Linux kernel requires more recent processor hardware");
sclp_early_printk("The Linux kernel requires more recent processor hardware\n");
print_machine_type();
print_missing_facilities();
disabled_wait(0x8badcccc);

View File

@ -28,7 +28,6 @@
#include <linux/shm.h>
#include <linux/uio.h>
#include <linux/quota.h>
#include <linux/module.h>
#include <linux/poll.h>
#include <linux/personality.h>
#include <linux/stat.h>

View File

@ -9,7 +9,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>

View File

@ -8,7 +8,8 @@
#include <linux/crash_dump.h>
#include <asm/lowcore.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/bootmem.h>
@ -329,7 +330,11 @@ static void *nt_init_name(void *buf, Elf64_Word type, void *desc, int d_len,
static inline void *nt_init(void *buf, Elf64_Word type, void *desc, int d_len)
{
return nt_init_name(buf, type, desc, d_len, KEXEC_CORE_NOTE_NAME);
const char *note_name = "LINUX";
if (type == NT_PRPSINFO || type == NT_PRSTATUS || type == NT_PRFPREG)
note_name = KEXEC_CORE_NOTE_NAME;
return nt_init_name(buf, type, desc, d_len, note_name);
}
/*

View File

@ -20,7 +20,7 @@
#include <linux/string.h>
#include <linux/sysctl.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/debugfs.h>
@ -866,7 +866,7 @@ static inline void
debug_finish_entry(debug_info_t * id, debug_entry_t* active, int level,
int exception)
{
active->id.stck = get_tod_clock_fast();
active->id.stck = get_tod_clock_fast() - sched_clock_base_cc;
active->id.fields.cpuid = smp_processor_id();
active->caller = __builtin_return_address(0);
active->id.fields.exception = exception;
@ -1455,23 +1455,24 @@ int
debug_dflt_header_fn(debug_info_t * id, struct debug_view *view,
int area, debug_entry_t * entry, char *out_buf)
{
struct timespec64 time_spec;
unsigned long sec, usec;
char *except_str;
unsigned long caller;
int rc = 0;
unsigned int level;
level = entry->id.fields.level;
stck_to_timespec64(entry->id.stck, &time_spec);
sec = (entry->id.stck >> 12) + (sched_clock_base_cc >> 12);
sec = sec - (TOD_UNIX_EPOCH >> 12);
usec = do_div(sec, USEC_PER_SEC);
if (entry->id.fields.exception)
except_str = "*";
else
except_str = "-";
caller = (unsigned long) entry->caller;
rc += sprintf(out_buf, "%02i %011lld:%06lu %1u %1s %02i %p ",
area, (long long)time_spec.tv_sec,
time_spec.tv_nsec / 1000, level, except_str,
rc += sprintf(out_buf, "%02i %011ld:%06lu %1u %1s %02i %p ",
area, sec, usec, level, except_str,
entry->id.fields.cpuid, (void *)caller);
return rc;
}

View File

@ -5,7 +5,8 @@
* Author(s): Michael Holzheu <holzheu@de.ibm.com>
*/
#include <linux/module.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>

View File

@ -16,7 +16,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/export.h>
#include <linux/kallsyms.h>
#include <linux/reboot.h>
#include <linux/kprobes.h>

View File

@ -354,6 +354,10 @@ static __init void detect_machine_facilities(void)
S390_lowcore.machine_flags |= MACHINE_FLAG_VX;
__ctl_set_bit(0, 17);
}
if (test_facility(130)) {
S390_lowcore.machine_flags |= MACHINE_FLAG_NX;
__ctl_set_bit(0, 20);
}
}
static inline void save_vector_registers(void)
@ -364,6 +368,18 @@ static inline void save_vector_registers(void)
#endif
}
static int __init topology_setup(char *str)
{
bool enabled;
int rc;
rc = kstrtobool(str, &enabled);
if (!rc && !enabled)
S390_lowcore.machine_flags &= ~MACHINE_HAS_TOPOLOGY;
return rc;
}
early_param("topology", topology_setup);
static int __init disable_vector_extension(char *str)
{
S390_lowcore.machine_flags &= ~MACHINE_FLAG_VX;
@ -372,6 +388,21 @@ static int __init disable_vector_extension(char *str)
}
early_param("novx", disable_vector_extension);
static int __init noexec_setup(char *str)
{
bool enabled;
int rc;
rc = kstrtobool(str, &enabled);
if (!rc && !enabled) {
/* Disable no-execute support */
S390_lowcore.machine_flags &= ~MACHINE_FLAG_NX;
__ctl_clear_bit(0, 20);
}
return rc;
}
early_param("noexec", noexec_setup);
static int __init cad_setup(char *str)
{
int val;

View File

@ -0,0 +1,35 @@
/*
* Copyright IBM Corp. 2017
*/
#include <linux/console.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <asm/sclp.h>
static void sclp_early_write(struct console *con, const char *s, unsigned int len)
{
__sclp_early_printk(s, len);
}
static struct console sclp_early_console = {
.name = "earlysclp",
.write = sclp_early_write,
.flags = CON_PRINTBUFFER | CON_BOOT,
.index = -1,
};
static int __init setup_early_printk(char *buf)
{
if (early_console)
return 0;
/* Accept only "earlyprintk" and "earlyprintk=sclp" */
if (buf && strncmp(buf, "sclp", 4))
return 0;
if (!sclp.has_linemode && !sclp.has_vt220)
return 0;
early_console = &sclp_early_console;
register_console(early_console);
return 0;
}
early_param("earlyprintk", setup_early_printk);

View File

@ -8,8 +8,8 @@
* Martin Peschke <peschke@fh-brandenburg.de>
*/
#include <linux/module.h>
#include <asm/types.h>
#include <linux/types.h>
#include <linux/export.h>
#include <asm/ebcdic.h>
/*

View File

@ -103,8 +103,7 @@ _PIF_WORK = (_PIF_PER_TRAP)
CHECK_STACK 1<<STACK_SHIFT,\savearea
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 3f
1: LAST_BREAK %r14
UPDATE_VTIME %r14,%r15,\timer
1: UPDATE_VTIME %r14,%r15,\timer
2: lg %r15,__LC_ASYNC_STACK # load async stack
3: la %r11,STACK_FRAME_OVERHEAD(%r15)
.endm
@ -121,18 +120,6 @@ _PIF_WORK = (_PIF_PER_TRAP)
mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer
.endm
.macro LAST_BREAK scratch
srag \scratch,%r10,23
#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
jz .+10
stg %r10,__TASK_thread+__THREAD_last_break(%r12)
#else
jz .+14
lghi \scratch,__TASK_thread
stg %r10,__THREAD_last_break(\scratch,%r12)
#endif
.endm
.macro REENABLE_IRQS
stg %r8,__LC_RETURN_PSW
ni __LC_RETURN_PSW,0xbf
@ -278,15 +265,14 @@ ENTRY(system_call)
stpt __LC_SYNC_ENTER_TIMER
.Lsysc_stmg:
stmg %r8,%r15,__LC_SAVE_AREA_SYNC
lg %r10,__LC_LAST_BREAK
lg %r12,__LC_CURRENT
lghi %r13,__TASK_thread
lghi %r14,_PIF_SYSCALL
.Lsysc_per:
lg %r15,__LC_KERNEL_STACK
la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
LAST_BREAK %r13
.Lsysc_vtime:
UPDATE_VTIME %r10,%r13,__LC_SYNC_ENTER_TIMER
UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
stmg %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
@ -294,12 +280,7 @@ ENTRY(system_call)
stg %r14,__PT_FLAGS(%r11)
.Lsysc_do_svc:
# load address of system call table
#ifdef CONFIG_HAVE_MARCH_Z990_FEATURES
lg %r10,__TASK_thread+__THREAD_sysc_table(%r12)
#else
lghi %r13,__TASK_thread
lg %r10,__THREAD_sysc_table(%r13,%r12)
#endif
llgh %r8,__PT_INT_CODE+2(%r11)
slag %r8,%r8,2 # shift and test for svc 0
jnz .Lsysc_nr_ok
@ -399,13 +380,11 @@ ENTRY(system_call)
brasl %r14,do_signal
TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL
jno .Lsysc_return
.Lsysc_do_syscall:
lghi %r13,__TASK_thread
lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
lghi %r8,0 # svc 0 returns -ENOSYS
llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number
cghi %r1,NR_syscalls
jnl .Lsysc_nr_ok # invalid svc number -> do svc 0
slag %r8,%r1,2
j .Lsysc_nr_ok # restart svc
lghi %r1,0 # svc 0 returns -ENOSYS
j .Lsysc_do_svc
#
# _TIF_NOTIFY_RESUME is set, call do_notify_resume
@ -508,8 +487,7 @@ ENTRY(pgm_check_handler)
1: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
j 3f
2: LAST_BREAK %r14
UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
2: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER
lg %r15,__LC_KERNEL_STACK
lgr %r14,%r12
aghi %r14,__TASK_thread # pointer to thread_struct
@ -518,6 +496,7 @@ ENTRY(pgm_check_handler)
jz 3f
mvc __THREAD_trap_tdb(256,%r14),0(%r13)
3: la %r11,STACK_FRAME_OVERHEAD(%r15)
stg %r10,__THREAD_last_break(%r14)
stmg %r0,%r7,__PT_R0(%r11)
mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
stmg %r8,%r9,__PT_PSW(%r11)
@ -547,6 +526,8 @@ ENTRY(pgm_check_handler)
LOCKDEP_SYS_EXIT
tm __PT_PSW+1(%r11),0x01 # returning to user ?
jno .Lsysc_restore
TSTMSK __PT_FLAGS(%r11),_PIF_SYSCALL
jo .Lsysc_do_syscall
j .Lsysc_tif
#
@ -564,6 +545,7 @@ ENTRY(pgm_check_handler)
#
.Lpgm_svcper:
mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
lghi %r13,__TASK_thread
larl %r14,.Lsysc_per
stg %r14,__LC_RETURN_PSW+8
lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
@ -576,7 +558,6 @@ ENTRY(io_int_handler)
STCK __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
lg %r10,__LC_LAST_BREAK
lg %r12,__LC_CURRENT
larl %r13,cleanup_critical
lmg %r8,%r9,__LC_IO_OLD_PSW
@ -750,7 +731,6 @@ ENTRY(ext_int_handler)
STCK __LC_INT_CLOCK
stpt __LC_ASYNC_ENTER_TIMER
stmg %r8,%r15,__LC_SAVE_AREA_ASYNC
lg %r10,__LC_LAST_BREAK
lg %r12,__LC_CURRENT
larl %r13,cleanup_critical
lmg %r8,%r9,__LC_EXT_OLD_PSW
@ -893,7 +873,6 @@ ENTRY(mcck_int_handler)
la %r1,4095 # revalidate r1
spt __LC_CPU_TIMER_SAVE_AREA-4095(%r1) # revalidate cpu timer
lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs
lg %r10,__LC_LAST_BREAK
lg %r12,__LC_CURRENT
larl %r13,cleanup_critical
lmg %r8,%r9,__LC_MCK_OLD_PSW
@ -1088,9 +1067,10 @@ cleanup_critical:
0: # check if base register setup + TIF bit load has been done
clg %r9,BASED(.Lcleanup_system_call_insn+16)
jhe 0f
# set up saved registers r10 and r12
stg %r10,16(%r11) # r10 last break
stg %r12,32(%r11) # r12 task struct pointer
# set up saved register r12 task struct pointer
stg %r12,32(%r11)
# set up saved register r13 __TASK_thread offset
mvc 40(8,%r11),BASED(.Lcleanup_system_call_const)
0: # check if the user time update has been done
clg %r9,BASED(.Lcleanup_system_call_insn+24)
jh 0f
@ -1107,14 +1087,7 @@ cleanup_critical:
stg %r15,__LC_SYSTEM_TIMER
0: # update accounting time stamp
mvc __LC_LAST_UPDATE_TIMER(8),__LC_SYNC_ENTER_TIMER
# do LAST_BREAK
lg %r9,16(%r11)
srag %r9,%r9,23
jz 0f
lgr %r9,%r12
aghi %r9,__TASK_thread
mvc __THREAD_last_break(8,%r9),16(%r11)
0: # set up saved register r11
# set up saved register r11
lg %r15,__LC_KERNEL_STACK
la %r9,STACK_FRAME_OVERHEAD(%r15)
stg %r9,24(%r11) # r11 pt_regs pointer
@ -1136,6 +1109,8 @@ cleanup_critical:
.quad .Lsysc_per
.quad .Lsysc_vtime+36
.quad .Lsysc_vtime+42
.Lcleanup_system_call_const:
.quad __TASK_thread
.Lcleanup_sysc_tif:
larl %r9,.Lsysc_tif

View File

@ -57,8 +57,8 @@ static ssize_t show_idle_count(struct device *dev,
do {
seq = read_seqcount_begin(&idle->seqcount);
idle_count = ACCESS_ONCE(idle->idle_count);
if (ACCESS_ONCE(idle->clock_idle_enter))
idle_count = READ_ONCE(idle->idle_count);
if (READ_ONCE(idle->clock_idle_enter))
idle_count++;
} while (read_seqcount_retry(&idle->seqcount, seq));
return sprintf(buf, "%llu\n", idle_count);
@ -75,9 +75,9 @@ static ssize_t show_idle_time(struct device *dev,
do {
now = get_tod_clock();
seq = read_seqcount_begin(&idle->seqcount);
idle_time = ACCESS_ONCE(idle->idle_time);
idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
idle_time = READ_ONCE(idle->idle_time);
idle_enter = READ_ONCE(idle->clock_idle_enter);
idle_exit = READ_ONCE(idle->clock_idle_exit);
} while (read_seqcount_retry(&idle->seqcount, seq));
idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
return sprintf(buf, "%llu\n", idle_time >> 12);
@ -93,8 +93,8 @@ u64 arch_cpu_idle_time(int cpu)
do {
now = get_tod_clock();
seq = read_seqcount_begin(&idle->seqcount);
idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
idle_enter = READ_ONCE(idle->clock_idle_enter);
idle_exit = READ_ONCE(idle->clock_idle_exit);
} while (read_seqcount_retry(&idle->seqcount, seq));
return cputime_to_nsecs(idle_enter ? ((idle_exit ?: now) - idle_enter) : 0);

View File

@ -8,7 +8,8 @@
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/reboot.h>
@ -1546,7 +1547,8 @@ static void dump_reipl_run(struct shutdown_trigger *trigger)
unsigned long ipib = (unsigned long) reipl_block_actual;
unsigned int csum;
csum = csum_partial(reipl_block_actual, reipl_block_actual->hdr.len, 0);
csum = (__force unsigned int)
csum_partial(reipl_block_actual, reipl_block_actual->hdr.len, 0);
mem_assign_absolute(S390_lowcore.ipib, ipib);
mem_assign_absolute(S390_lowcore.ipib_checksum, csum);
dump_run(trigger);
@ -1863,7 +1865,7 @@ static int __init s390_ipl_init(void)
{
char str[8] = {0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40};
sclp_get_ipl_info(&sclp_ipl_info);
sclp_early_get_ipl_info(&sclp_ipl_info);
/*
* Fix loadparm: There are systems where the (SCSI) LOADPARM
* returned by read SCP info is invalid (contains EBCDIC blanks)

View File

@ -12,11 +12,12 @@
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#include <linux/profile.h>
#include <linux/module.h>
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/ftrace.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/cpu.h>
#include <linux/irq.h>
#include <asm/irq_regs.h>

View File

@ -4,7 +4,6 @@
* Copyright IBM Corp. 2011
* Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
*/
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/stop_machine.h>
#include <linux/jump_label.h>

View File

@ -45,11 +45,17 @@ DEFINE_INSN_CACHE_OPS(dmainsn);
static void *alloc_dmainsn_page(void)
{
return (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
void *page;
page = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
if (page)
set_memory_x((unsigned long) page, 1);
return page;
}
static void free_dmainsn_page(void *page)
{
set_memory_nx((unsigned long) page, 1);
free_page((unsigned long)page);
}

View File

@ -45,7 +45,8 @@ void *module_alloc(unsigned long size)
if (PAGE_ALIGN(size) > MODULES_LEN)
return NULL;
return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
GFP_KERNEL, PAGE_KERNEL_EXEC,
0, NUMA_NO_NODE,
__builtin_return_address(0));
}

View File

@ -13,7 +13,7 @@
#include <linux/errno.h>
#include <linux/hardirq.h>
#include <linux/time.h>
#include <linux/module.h>
#include <linux/export.h>
#include <asm/lowcore.h>
#include <asm/smp.h>
#include <asm/stp.h>

View File

@ -26,7 +26,7 @@ static struct os_info os_info __page_aligned_data;
u32 os_info_csum(struct os_info *os_info)
{
int size = sizeof(*os_info) - offsetof(struct os_info, version_major);
return csum_partial(&os_info->version_major, size, 0);
return (__force u32)csum_partial(&os_info->version_major, size, 0);
}
/*
@ -46,7 +46,7 @@ void os_info_entry_add(int nr, void *ptr, u64 size)
{
os_info.entry[nr].addr = (u64)(unsigned long)ptr;
os_info.entry[nr].size = size;
os_info.entry[nr].csum = csum_partial(ptr, size, 0);
os_info.entry[nr].csum = (__force u32)csum_partial(ptr, size, 0);
os_info.csum = os_info_csum(&os_info);
}
@ -93,7 +93,7 @@ static void os_info_old_alloc(int nr, int align)
msg = "copy failed";
goto fail_free;
}
csum = csum_partial(buf_align, size, 0);
csum = (__force u32)csum_partial(buf_align, size, 0);
if (csum != os_info_old->entry[nr].csum) {
msg = "checksum failed";
goto fail_free;

View File

@ -309,7 +309,7 @@ __init const struct attribute_group **cpumf_cf_event_group(void)
default:
model = NULL;
break;
};
}
if (!model)
goto out;

View File

@ -23,7 +23,7 @@
#include <linux/compat.h>
#include <linux/kprobes.h>
#include <linux/random.h>
#include <linux/module.h>
#include <linux/export.h>
#include <linux/init_task.h>
#include <asm/io.h>
#include <asm/processor.h>

View File

@ -32,7 +32,7 @@ static bool machine_has_cpu_mhz;
void __init cpu_detect_mhz_feature(void)
{
if (test_facility(34) && __ecag(ECAG_CPU_ATTRIBUTE, 0) != -1UL)
machine_has_cpu_mhz = 1;
machine_has_cpu_mhz = true;
}
static void update_cpu_mhz(void *arg)
@ -92,7 +92,7 @@ static void show_cpu_summary(struct seq_file *m, void *v)
{
static const char *hwcap_str[] = {
"esan3", "zarch", "stfle", "msa", "ldisp", "eimm", "dfp",
"edat", "etf3eh", "highgprs", "te", "vx"
"edat", "etf3eh", "highgprs", "te", "vx", "vxd", "vxe"
};
static const char * const int_hwcap_str[] = {
"sie"

View File

@ -1,196 +0,0 @@
/*
* Copyright IBM Corp. 2015
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/kernel.h>
#include <asm/ebcdic.h>
#include <asm/irq.h>
#include <asm/lowcore.h>
#include <asm/processor.h>
#include <asm/sclp.h>
#define EVTYP_VT220MSG_MASK 0x00000040
#define EVTYP_MSG_MASK 0x40000000
static char _sclp_work_area[4096] __aligned(PAGE_SIZE) __section(data);
static bool have_vt220 __section(data);
static bool have_linemode __section(data);
static void _sclp_wait_int(void)
{
unsigned long cr0, cr0_new, psw_mask, addr;
psw_t psw_ext_save, psw_wait;
__ctl_store(cr0, 0, 0);
cr0_new = cr0 | 0x200;
__ctl_load(cr0_new, 0, 0);
psw_ext_save = S390_lowcore.external_new_psw;
psw_mask = __extract_psw();
S390_lowcore.external_new_psw.mask = psw_mask;
psw_wait.mask = psw_mask | PSW_MASK_EXT | PSW_MASK_WAIT;
S390_lowcore.ext_int_code = 0;
do {
asm volatile(
" larl %[addr],0f\n"
" stg %[addr],%[psw_wait_addr]\n"
" stg %[addr],%[psw_ext_addr]\n"
" lpswe %[psw_wait]\n"
"0:\n"
: [addr] "=&d" (addr),
[psw_wait_addr] "=Q" (psw_wait.addr),
[psw_ext_addr] "=Q" (S390_lowcore.external_new_psw.addr)
: [psw_wait] "Q" (psw_wait)
: "cc", "memory");
} while (S390_lowcore.ext_int_code != EXT_IRQ_SERVICE_SIG);
__ctl_load(cr0, 0, 0);
S390_lowcore.external_new_psw = psw_ext_save;
}
static int _sclp_servc(unsigned int cmd, char *sccb)
{
unsigned int cc;
do {
asm volatile(
" .insn rre,0xb2200000,%1,%2\n"
" ipm %0\n"
: "=d" (cc) : "d" (cmd), "a" (sccb)
: "cc", "memory");
cc >>= 28;
if (cc == 3)
return -EINVAL;
_sclp_wait_int();
} while (cc != 0);
return (*(unsigned short *)(sccb + 6) == 0x20) ? 0 : -EIO;
}
static int _sclp_setup(int disable)
{
static unsigned char init_sccb[] = {
0x00, 0x1c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x04,
0x80, 0x00, 0x00, 0x00, 0x40, 0x00, 0x00, 0x40,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
};
unsigned int *masks;
int rc;
memcpy(_sclp_work_area, init_sccb, 28);
masks = (unsigned int *)(_sclp_work_area + 12);
if (disable)
memset(masks, 0, 16);
/* SCLP write mask */
rc = _sclp_servc(0x00780005, _sclp_work_area);
if (rc)
return rc;
have_vt220 = masks[2] & EVTYP_VT220MSG_MASK;
have_linemode = masks[2] & EVTYP_MSG_MASK;
return 0;
}
/* Output multi-line text using SCLP Message interface. */
static void _sclp_print_lm(const char *str)
{
static unsigned char write_head[] = {
/* sccb header */
0x00, 0x52, /* 0 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 2 */
/* evbuf */
0x00, 0x4a, /* 8 */
0x02, 0x00, 0x00, 0x00, /* 10 */
/* mdb */
0x00, 0x44, /* 14 */
0x00, 0x01, /* 16 */
0xd4, 0xc4, 0xc2, 0x40, /* 18 */
0x00, 0x00, 0x00, 0x01, /* 22 */
/* go */
0x00, 0x38, /* 26 */
0x00, 0x01, /* 28 */
0x00, 0x00, 0x00, 0x00, /* 30 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 34 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 42 */
0x00, 0x00, 0x00, 0x00, /* 50 */
0x00, 0x00, /* 54 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 56 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 64 */
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 72 */
0x00, 0x00, /* 80 */
};
static unsigned char write_mto[] = {
/* mto */
0x00, 0x0a, /* 0 */
0x00, 0x04, /* 2 */
0x10, 0x00, /* 4 */
0x00, 0x00, 0x00, 0x00 /* 6 */
};
unsigned char *ptr, ch;
unsigned int count;
memcpy(_sclp_work_area, write_head, sizeof(write_head));
ptr = _sclp_work_area + sizeof(write_head);
do {
memcpy(ptr, write_mto, sizeof(write_mto));
for (count = sizeof(write_mto); (ch = *str++) != 0; count++) {
if (ch == 0x0a)
break;
ptr[count] = _ascebc[ch];
}
/* Update length fields in mto, mdb, evbuf and sccb */
*(unsigned short *) ptr = count;
*(unsigned short *)(_sclp_work_area + 14) += count;
*(unsigned short *)(_sclp_work_area + 8) += count;
*(unsigned short *)(_sclp_work_area + 0) += count;
ptr += count;
} while (ch != 0);
/* SCLP write data */
_sclp_servc(0x00760005, _sclp_work_area);
}
/* Output multi-line text (plus a newline) using SCLP VT220
* interface.
*/
static void _sclp_print_vt220(const char *str)
{
static unsigned char const write_head[] = {
/* sccb header */
0x00, 0x0e,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* evbuf header */
0x00, 0x06,
0x1a, 0x00, 0x00, 0x00,
};
size_t len = strlen(str);
if (sizeof(write_head) + len >= sizeof(_sclp_work_area))
len = sizeof(_sclp_work_area) - sizeof(write_head) - 1;
memcpy(_sclp_work_area, write_head, sizeof(write_head));
memcpy(_sclp_work_area + sizeof(write_head), str, len);
_sclp_work_area[sizeof(write_head) + len] = '\n';
/* Update length fields in evbuf and sccb headers */
*(unsigned short *)(_sclp_work_area + 8) += len + 1;
*(unsigned short *)(_sclp_work_area + 0) += len + 1;
/* SCLP write data */
(void)_sclp_servc(0x00760005, _sclp_work_area);
}
/* Output one or more lines of text on the SCLP console (VT220 and /
* or line-mode). All lines get terminated; no need for a trailing LF.
*/
void _sclp_print_early(const char *str)
{
if (_sclp_setup(0) != 0)
return;
if (have_linemode)
_sclp_print_lm(str);
if (have_vt220)
_sclp_print_vt220(str);
_sclp_setup(1);
}

View File

@ -636,6 +636,8 @@ static void __init reserve_crashkernel(void)
static void __init reserve_initrd(void)
{
#ifdef CONFIG_BLK_DEV_INITRD
if (!INITRD_START || !INITRD_SIZE)
return;
initrd_start = INITRD_START;
initrd_end = initrd_start + INITRD_SIZE;
memblock_reserve(INITRD_START, INITRD_SIZE);
@ -747,7 +749,7 @@ static int __init setup_hwcaps(void)
/*
* Huge page support HWCAP_S390_HPAGE is bit 7.
*/
if (MACHINE_HAS_HPAGE)
if (MACHINE_HAS_EDAT1)
elf_hwcap |= HWCAP_S390_HPAGE;
/*
@ -767,8 +769,14 @@ static int __init setup_hwcaps(void)
* can be disabled with the "novx" parameter. Use MACHINE_HAS_VX
* instead of facility bit 129.
*/
if (MACHINE_HAS_VX)
if (MACHINE_HAS_VX) {
elf_hwcap |= HWCAP_S390_VXRS;
if (test_facility(134))
elf_hwcap |= HWCAP_S390_VXRS_EXT;
if (test_facility(135))
elf_hwcap |= HWCAP_S390_VXRS_BCD;
}
get_cpu_id(&cpu_id);
add_device_randomness(&cpu_id, sizeof(cpu_id));
switch (cpu_id.machine) {
@ -820,10 +828,10 @@ static void __init setup_randomness(void)
{
struct sysinfo_3_2_2 *vmms;
vmms = (struct sysinfo_3_2_2 *) alloc_page(GFP_KERNEL);
if (vmms && stsi(vmms, 3, 2, 2) == 0 && vmms->count)
add_device_randomness(&vmms, vmms->count);
free_page((unsigned long) vmms);
vmms = (struct sysinfo_3_2_2 *) memblock_alloc(PAGE_SIZE, PAGE_SIZE);
if (stsi(vmms, 3, 2, 2) == 0 && vmms->count)
add_device_randomness(&vmms->vm, sizeof(vmms->vm[0]) * vmms->count);
memblock_free((unsigned long) vmms, PAGE_SIZE);
}
/*

View File

@ -20,7 +20,7 @@
#include <linux/workqueue.h>
#include <linux/bootmem.h>
#include <linux/module.h>
#include <linux/export.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/err.h>

View File

@ -8,7 +8,7 @@
#include <linux/sched.h>
#include <linux/stacktrace.h>
#include <linux/kallsyms.h>
#include <linux/module.h>
#include <linux/export.h>
static int __save_address(void *data, unsigned long address, int nosched)
{

View File

@ -196,7 +196,7 @@ pgm_check_entry:
larl %r15,init_thread_union
ahi %r15,1<<(PAGE_SHIFT+THREAD_SIZE_ORDER)
larl %r2,.Lpanic_string
larl %r3,_sclp_print_early
larl %r3,sclp_early_printk
lghi %r1,0
sam31
sigp %r1,%r0,SIGP_SET_ARCHITECTURE
@ -273,7 +273,7 @@ restore_registers:
.Ldisabled_wait_31:
.long 0x000a0000,0x00000000
.Lpanic_string:
.asciz "Resume not possible because suspend CPU is no longer available"
.asciz "Resume not possible because suspend CPU is no longer available\n"
.align 8
.Lrestart_diag308_psw:
.long 0x00080000,0x80000000

View File

@ -10,7 +10,7 @@
#include <linux/seq_file.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <asm/ebcdic.h>
#include <asm/sysinfo.h>

View File

@ -16,7 +16,7 @@
#include <linux/kernel_stat.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/export.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/param.h>
@ -110,7 +110,7 @@ unsigned long long monotonic_clock(void)
}
EXPORT_SYMBOL(monotonic_clock);
void tod_to_timeval(__u64 todval, struct timespec64 *xt)
static void tod_to_timeval(__u64 todval, struct timespec64 *xt)
{
unsigned long long sec;
@ -120,7 +120,6 @@ void tod_to_timeval(__u64 todval, struct timespec64 *xt)
todval -= (sec * 1000000) << 12;
xt->tv_nsec = ((todval * 1000) >> 12);
}
EXPORT_SYMBOL(tod_to_timeval);
void clock_comparator_work(void)
{
@ -492,7 +491,7 @@ static void __init stp_reset(void)
pr_warn("The real or virtual hardware system does not provide an STP interface\n");
free_page((unsigned long) stp_page);
stp_page = NULL;
stp_online = 0;
stp_online = false;
}
}

View File

@ -38,7 +38,6 @@ static void set_topology_timer(void);
static void topology_work_fn(struct work_struct *work);
static struct sysinfo_15_1_x *tl_info;
static bool topology_enabled = true;
static DECLARE_WORK(topology_work, topology_work_fn);
/*
@ -59,7 +58,7 @@ static cpumask_t cpu_group_map(struct mask_info *info, unsigned int cpu)
cpumask_t mask;
cpumask_copy(&mask, cpumask_of(cpu));
if (!topology_enabled || !MACHINE_HAS_TOPOLOGY)
if (!MACHINE_HAS_TOPOLOGY)
return mask;
for (; info; info = info->next) {
if (cpumask_test_cpu(cpu, &info->mask))
@ -74,7 +73,7 @@ static cpumask_t cpu_thread_map(unsigned int cpu)
int i;
cpumask_copy(&mask, cpumask_of(cpu));
if (!topology_enabled || !MACHINE_HAS_TOPOLOGY)
if (!MACHINE_HAS_TOPOLOGY)
return mask;
cpu -= cpu % (smp_cpu_mtid + 1);
for (i = 0; i <= smp_cpu_mtid; i++)
@ -428,12 +427,6 @@ static const struct cpumask *cpu_drawer_mask(int cpu)
return &cpu_topology[cpu].drawer_mask;
}
static int __init early_parse_topology(char *p)
{
return kstrtobool(p, &topology_enabled);
}
early_param("topology", early_parse_topology);
static struct sched_domain_topology_level s390_topology[] = {
{ cpu_thread_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
{ cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
@ -461,18 +454,16 @@ static void __init alloc_masks(struct sysinfo_15_1_x *info,
void __init topology_init_early(void)
{
struct sysinfo_15_1_x *info;
int i;
set_sched_topology(s390_topology);
if (!MACHINE_HAS_TOPOLOGY)
goto out;
tl_info = memblock_virt_alloc(sizeof(*tl_info), PAGE_SIZE);
tl_info = memblock_virt_alloc(PAGE_SIZE, PAGE_SIZE);
info = tl_info;
store_topology(info);
pr_info("The CPU configuration topology of the machine is:");
for (i = 0; i < TOPOLOGY_NR_MAG; i++)
printk(KERN_CONT " %d", info->mag[i]);
printk(KERN_CONT " / %d\n", info->mnest);
pr_info("The CPU configuration topology of the machine is: %d %d %d %d %d %d / %d\n",
info->mag[0], info->mag[1], info->mag[2], info->mag[3],
info->mag[4], info->mag[5], info->mnest);
alloc_masks(info, &socket_info, 1);
alloc_masks(info, &book_info, 2);
alloc_masks(info, &drawer_info, 3);

View File

@ -9,7 +9,7 @@
* as published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>

View File

@ -44,6 +44,7 @@ SECTIONS
*(.gnu.warning)
} :text = 0x0700
. = ALIGN(PAGE_SIZE);
_etext = .; /* End of text section */
NOTES :text :note
@ -79,7 +80,13 @@ SECTIONS
. = ALIGN(PAGE_SIZE); /* Init code and data */
__init_begin = .;
INIT_TEXT_SECTION(PAGE_SIZE)
. = ALIGN(PAGE_SIZE);
.init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
VMLINUX_SYMBOL(_sinittext) = . ;
INIT_TEXT
. = ALIGN(PAGE_SIZE);
VMLINUX_SYMBOL(_einittext) = . ;
}
/*
* .exit.text is discarded at runtime, not link time,

View File

@ -23,6 +23,7 @@
#include <linux/kvm_host.h>
#include <linux/mman.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/timer.h>

View File

@ -311,7 +311,7 @@ static int handle_sske(struct kvm_vcpu *vcpu)
if (rc < 0)
return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
start += PAGE_SIZE;
};
}
if (m3 & (SSKE_MC | SSKE_MR)) {
if (m3 & SSKE_MB) {

View File

@ -899,7 +899,7 @@ static int vsie_run(struct kvm_vcpu *vcpu, struct vsie_page *vsie_page)
if (rc || scb_s->icptcode || signal_pending(current) ||
kvm_s390_vcpu_has_irq(vcpu, 0))
break;
};
}
if (rc == -EFAULT) {
/*

View File

@ -9,7 +9,7 @@
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/timex.h>
#include <linux/module.h>
#include <linux/export.h>
#include <linux/irqflags.h>
#include <linux/interrupt.h>
#include <linux/irq.h>

View File

@ -14,31 +14,29 @@ ENTRY(memmove)
ltgr %r4,%r4
lgr %r1,%r2
bzr %r14
aghi %r4,-1
clgr %r2,%r3
jnh .Lmemmove_forward
la %r5,0(%r4,%r3)
la %r5,1(%r4,%r3)
clgr %r2,%r5
jl .Lmemmove_reverse
.Lmemmove_forward:
aghi %r4,-1
srlg %r0,%r4,8
ltgr %r0,%r0
jz .Lmemmove_rest
.Lmemmove_loop:
jz .Lmemmove_forward_remainder
.Lmemmove_forward_loop:
mvc 0(256,%r1),0(%r3)
la %r1,256(%r1)
la %r3,256(%r3)
brctg %r0,.Lmemmove_loop
.Lmemmove_rest:
brctg %r0,.Lmemmove_forward_loop
.Lmemmove_forward_remainder:
larl %r5,.Lmemmove_mvc
ex %r4,0(%r5)
br %r14
.Lmemmove_reverse:
aghi %r4,-1
.Lmemmove_reverse_loop:
ic %r0,0(%r4,%r3)
stc %r0,0(%r4,%r1)
brctg %r4,.Lmemmove_reverse_loop
brctg %r4,.Lmemmove_reverse
ic %r0,0(%r4,%r3)
stc %r0,0(%r4,%r1)
br %r14
@ -70,12 +68,12 @@ ENTRY(memset)
srlg %r3,%r4,8
ltgr %r3,%r3
lgr %r1,%r2
jz .Lmemset_clear_rest
jz .Lmemset_clear_remainder
.Lmemset_clear_loop:
xc 0(256,%r1),0(%r1)
la %r1,256(%r1)
brctg %r3,.Lmemset_clear_loop
.Lmemset_clear_rest:
.Lmemset_clear_remainder:
larl %r3,.Lmemset_xc
ex %r4,0(%r3)
br %r14
@ -87,12 +85,12 @@ ENTRY(memset)
aghi %r4,-2
srlg %r3,%r4,8
ltgr %r3,%r3
jz .Lmemset_fill_rest
jz .Lmemset_fill_remainder
.Lmemset_fill_loop:
mvc 1(256,%r1),0(%r1)
la %r1,256(%r1)
brctg %r3,.Lmemset_fill_loop
.Lmemset_fill_rest:
.Lmemset_fill_remainder:
larl %r3,.Lmemset_mvc
ex %r4,0(%r3)
br %r14
@ -115,7 +113,7 @@ ENTRY(memcpy)
ltgr %r5,%r5
lgr %r1,%r2
jnz .Lmemcpy_loop
.Lmemcpy_rest:
.Lmemcpy_remainder:
larl %r5,.Lmemcpy_mvc
ex %r4,0(%r5)
br %r14
@ -124,7 +122,7 @@ ENTRY(memcpy)
la %r1,256(%r1)
la %r3,256(%r3)
brctg %r5,.Lmemcpy_loop
j .Lmemcpy_rest
j .Lmemcpy_remainder
.Lmemcpy_mvc:
mvc 0(1,%r1),0(%r3)
EXPORT_SYMBOL(memcpy)

View File

@ -6,7 +6,7 @@
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/init.h>
#include <linux/smp.h>
@ -133,7 +133,7 @@ int arch_spin_trylock_retry(arch_spinlock_t *lp)
int count;
for (count = spin_retry; count > 0; count--) {
owner = ACCESS_ONCE(lp->lock);
owner = READ_ONCE(lp->lock);
/* Try to get the lock if it is free. */
if (!owner) {
if (_raw_compare_and_swap(&lp->lock, 0, cpu))

View File

@ -9,7 +9,8 @@
#define IN_ARCH_STRING_C 1
#include <linux/types.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/export.h>
/*
* Helper functions to find the end of a string

View File

@ -6,7 +6,7 @@
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/export.h>
#include <linux/raid/xor.h>
static void xor_xc_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)

View File

@ -10,6 +10,7 @@
#include <linux/fs.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/gfp.h>
#include <linux/sched.h>
#include <linux/sysctl.h>

View File

@ -1,6 +1,6 @@
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <asm/sections.h>
#include <asm/pgtable.h>
@ -49,8 +49,8 @@ static void print_prot(struct seq_file *m, unsigned int pr, int level)
seq_printf(m, "I\n");
return;
}
seq_printf(m, "%s", pr & _PAGE_PROTECT ? "RO " : "RW ");
seq_putc(m, '\n');
seq_puts(m, (pr & _PAGE_PROTECT) ? "RO " : "RW ");
seq_puts(m, (pr & _PAGE_NOEXEC) ? "NX\n" : "X\n");
}
static void note_page(struct seq_file *m, struct pg_state *st,
@ -117,7 +117,8 @@ static void walk_pte_level(struct seq_file *m, struct pg_state *st,
for (i = 0; i < PTRS_PER_PTE && addr < max_addr; i++) {
st->current_address = addr;
pte = pte_offset_kernel(pmd, addr);
prot = pte_val(*pte) & (_PAGE_PROTECT | _PAGE_INVALID);
prot = pte_val(*pte) &
(_PAGE_PROTECT | _PAGE_INVALID | _PAGE_NOEXEC);
note_page(m, st, prot, 4);
addr += PAGE_SIZE;
}
@ -135,7 +136,9 @@ static void walk_pmd_level(struct seq_file *m, struct pg_state *st,
pmd = pmd_offset(pud, addr);
if (!pmd_none(*pmd)) {
if (pmd_large(*pmd)) {
prot = pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT;
prot = pmd_val(*pmd) &
(_SEGMENT_ENTRY_PROTECT |
_SEGMENT_ENTRY_NOEXEC);
note_page(m, st, prot, 3);
} else
walk_pte_level(m, st, pmd, addr);
@ -157,7 +160,9 @@ static void walk_pud_level(struct seq_file *m, struct pg_state *st,
pud = pud_offset(pgd, addr);
if (!pud_none(*pud))
if (pud_large(*pud)) {
prot = pud_val(*pud) & _REGION_ENTRY_PROTECT;
prot = pud_val(*pud) &
(_REGION_ENTRY_PROTECT |
_REGION_ENTRY_NOEXEC);
note_page(m, st, prot, 2);
} else
walk_pmd_level(m, st, pud, addr);
@ -183,6 +188,7 @@ static void walk_pgd_level(struct seq_file *m)
else
note_page(m, &st, _PAGE_INVALID, 1);
addr += PGDIR_SIZE;
cond_resched();
}
/* Flush out the last page */
st.current_address = max_addr;

View File

@ -14,7 +14,7 @@
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/export.h>
#include <linux/bootmem.h>
#include <linux/ctype.h>
#include <linux/ioport.h>
@ -154,7 +154,7 @@ dcss_mkname(char *name, char *dcss_name)
if (name[i] == '\0')
break;
dcss_name[i] = toupper(name[i]);
};
}
for (; i < 8; i++)
dcss_name[i] = ' ';
ASCEBC(dcss_name, 8);

View File

@ -311,12 +311,34 @@ static noinline void do_sigbus(struct pt_regs *regs)
force_sig_info(SIGBUS, &si, tsk);
}
static noinline void do_fault_error(struct pt_regs *regs, int fault)
static noinline int signal_return(struct pt_regs *regs)
{
u16 instruction;
int rc;
rc = __get_user(instruction, (u16 __user *) regs->psw.addr);
if (rc)
return rc;
if (instruction == 0x0a77) {
set_pt_regs_flag(regs, PIF_SYSCALL);
regs->int_code = 0x00040077;
return 0;
} else if (instruction == 0x0aad) {
set_pt_regs_flag(regs, PIF_SYSCALL);
regs->int_code = 0x000400ad;
return 0;
}
return -EACCES;
}
static noinline void do_fault_error(struct pt_regs *regs, int access, int fault)
{
int si_code;
switch (fault) {
case VM_FAULT_BADACCESS:
if (access == VM_EXEC && signal_return(regs) == 0)
break;
case VM_FAULT_BADMAP:
/* Bad memory access. Check if it is kernel or user space. */
if (user_mode(regs)) {
@ -324,7 +346,7 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault)
si_code = (fault == VM_FAULT_BADMAP) ?
SEGV_MAPERR : SEGV_ACCERR;
do_sigsegv(regs, si_code);
return;
break;
}
case VM_FAULT_BADCONTEXT:
case VM_FAULT_PFAULT:
@ -525,7 +547,7 @@ out:
void do_protection_exception(struct pt_regs *regs)
{
unsigned long trans_exc_code;
int fault;
int access, fault;
trans_exc_code = regs->int_parm_long;
/*
@ -544,9 +566,17 @@ void do_protection_exception(struct pt_regs *regs)
do_low_address(regs);
return;
}
fault = do_exception(regs, VM_WRITE);
if (unlikely(MACHINE_HAS_NX && (trans_exc_code & 0x80))) {
regs->int_parm_long = (trans_exc_code & ~PAGE_MASK) |
(regs->psw.addr & PAGE_MASK);
access = VM_EXEC;
fault = VM_FAULT_BADACCESS;
} else {
access = VM_WRITE;
fault = do_exception(regs, access);
}
if (unlikely(fault))
do_fault_error(regs, fault);
do_fault_error(regs, access, fault);
}
NOKPROBE_SYMBOL(do_protection_exception);
@ -557,7 +587,7 @@ void do_dat_exception(struct pt_regs *regs)
access = VM_READ | VM_EXEC | VM_WRITE;
fault = do_exception(regs, access);
if (unlikely(fault))
do_fault_error(regs, fault);
do_fault_error(regs, access, fault);
}
NOKPROBE_SYMBOL(do_dat_exception);

View File

@ -59,6 +59,8 @@ static inline unsigned long __pte_to_rste(pte_t pte)
rste |= move_set_bit(pte_val(pte), _PAGE_SOFT_DIRTY,
_SEGMENT_ENTRY_SOFT_DIRTY);
#endif
rste |= move_set_bit(pte_val(pte), _PAGE_NOEXEC,
_SEGMENT_ENTRY_NOEXEC);
} else
rste = _SEGMENT_ENTRY_INVALID;
return rste;
@ -113,6 +115,8 @@ static inline pte_t __rste_to_pte(unsigned long rste)
pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY,
_PAGE_DIRTY);
#endif
pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_NOEXEC,
_PAGE_NOEXEC);
} else
pte_val(pte) = _PAGE_INVALID;
return pte;
@ -121,7 +125,11 @@ static inline pte_t __rste_to_pte(unsigned long rste)
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte)
{
unsigned long rste = __pte_to_rste(pte);
unsigned long rste;
rste = __pte_to_rste(pte);
if (!MACHINE_HAS_NX)
rste &= ~_SEGMENT_ENTRY_NOEXEC;
/* Set correct table type for 2G hugepages */
if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)

View File

@ -137,6 +137,9 @@ void __init mem_init(void)
void free_initmem(void)
{
__set_memory((unsigned long) _sinittext,
(_einittext - _sinittext) >> PAGE_SHIFT,
SET_MEMORY_RW | SET_MEMORY_NX);
free_initmem_default(POISON_FREE_INITMEM);
}
@ -148,6 +151,15 @@ void __init free_initrd_mem(unsigned long start, unsigned long end)
}
#endif
unsigned long memory_block_size_bytes(void)
{
/*
* Make sure the memory block size is always greater
* or equal than the memory increment size.
*/
return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm);
}
#ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
{
@ -191,15 +203,6 @@ int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
return rc;
}
unsigned long memory_block_size_bytes(void)
{
/*
* Make sure the memory block size is always greater
* or equal than the memory increment size.
*/
return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm);
}
#ifdef CONFIG_MEMORY_HOTREMOVE
int arch_remove_memory(u64 start, u64 size)
{

View File

@ -5,7 +5,6 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/memblock.h>
#include <linux/init.h>
#include <linux/debugfs.h>
@ -19,6 +18,8 @@
static inline void memblock_physmem_add(phys_addr_t start, phys_addr_t size)
{
memblock_dbg("memblock_physmem_add: [%#016llx-%#016llx]\n",
start, start + size - 1);
memblock_add_range(&memblock.memory, start, size, 0, 0);
memblock_add_range(&memblock.physmem, start, size, 0, 0);
}
@ -39,7 +40,8 @@ void __init detect_memory_memblock(void)
memblock_set_bottom_up(true);
do {
size = 0;
type = tprot(addr);
/* assume lowcore is writable */
type = addr ? tprot(addr) : CHUNK_READ_WRITE;
do {
size += rzm;
if (max_physmem_end && addr + size >= max_physmem_end)
@ -55,4 +57,5 @@ void __init detect_memory_memblock(void)
memblock_set_bottom_up(false);
if (!max_physmem_end)
max_physmem_end = memblock_end_of_DRAM();
memblock_dump_all();
}

View File

@ -26,11 +26,11 @@
#include <linux/personality.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/compat.h>
#include <linux/security.h>
#include <asm/pgalloc.h>
#include <asm/elf.h>
static unsigned long stack_maxrandom_size(void)
{

View File

@ -3,7 +3,6 @@
* Author(s): Jan Glauber <jang@linux.vnet.ibm.com>
*/
#include <linux/hugetlb.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include <asm/facility.h>
@ -81,24 +80,24 @@ static void pgt_set(unsigned long *old, unsigned long new, unsigned long addr,
}
}
struct cpa {
unsigned int set_ro : 1;
unsigned int clear_ro : 1;
};
static int walk_pte_level(pmd_t *pmdp, unsigned long addr, unsigned long end,
struct cpa cpa)
unsigned long flags)
{
pte_t *ptep, new;
ptep = pte_offset(pmdp, addr);
do {
if (pte_none(*ptep))
new = *ptep;
if (pte_none(new))
return -EINVAL;
if (cpa.set_ro)
new = pte_wrprotect(*ptep);
else if (cpa.clear_ro)
new = pte_mkwrite(pte_mkdirty(*ptep));
if (flags & SET_MEMORY_RO)
new = pte_wrprotect(new);
else if (flags & SET_MEMORY_RW)
new = pte_mkwrite(pte_mkdirty(new));
if ((flags & SET_MEMORY_NX) && MACHINE_HAS_NX)
pte_val(new) |= _PAGE_NOEXEC;
else if (flags & SET_MEMORY_X)
pte_val(new) &= ~_PAGE_NOEXEC;
pgt_set((unsigned long *)ptep, pte_val(new), addr, CRDTE_DTT_PAGE);
ptep++;
addr += PAGE_SIZE;
@ -112,14 +111,17 @@ static int split_pmd_page(pmd_t *pmdp, unsigned long addr)
unsigned long pte_addr, prot;
pte_t *pt_dir, *ptep;
pmd_t new;
int i, ro;
int i, ro, nx;
pt_dir = vmem_pte_alloc();
if (!pt_dir)
return -ENOMEM;
pte_addr = pmd_pfn(*pmdp) << PAGE_SHIFT;
ro = !!(pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT);
nx = !!(pmd_val(*pmdp) & _SEGMENT_ENTRY_NOEXEC);
prot = pgprot_val(ro ? PAGE_KERNEL_RO : PAGE_KERNEL);
if (!nx)
prot &= ~_PAGE_NOEXEC;
ptep = pt_dir;
for (i = 0; i < PTRS_PER_PTE; i++) {
pte_val(*ptep) = pte_addr | prot;
@ -133,19 +135,24 @@ static int split_pmd_page(pmd_t *pmdp, unsigned long addr)
return 0;
}
static void modify_pmd_page(pmd_t *pmdp, unsigned long addr, struct cpa cpa)
static void modify_pmd_page(pmd_t *pmdp, unsigned long addr,
unsigned long flags)
{
pmd_t new;
pmd_t new = *pmdp;
if (cpa.set_ro)
new = pmd_wrprotect(*pmdp);
else if (cpa.clear_ro)
new = pmd_mkwrite(pmd_mkdirty(*pmdp));
if (flags & SET_MEMORY_RO)
new = pmd_wrprotect(new);
else if (flags & SET_MEMORY_RW)
new = pmd_mkwrite(pmd_mkdirty(new));
if ((flags & SET_MEMORY_NX) && MACHINE_HAS_NX)
pmd_val(new) |= _SEGMENT_ENTRY_NOEXEC;
else if (flags & SET_MEMORY_X)
pmd_val(new) &= ~_SEGMENT_ENTRY_NOEXEC;
pgt_set((unsigned long *)pmdp, pmd_val(new), addr, CRDTE_DTT_SEGMENT);
}
static int walk_pmd_level(pud_t *pudp, unsigned long addr, unsigned long end,
struct cpa cpa)
unsigned long flags)
{
unsigned long next;
pmd_t *pmdp;
@ -163,9 +170,9 @@ static int walk_pmd_level(pud_t *pudp, unsigned long addr, unsigned long end,
return rc;
continue;
}
modify_pmd_page(pmdp, addr, cpa);
modify_pmd_page(pmdp, addr, flags);
} else {
rc = walk_pte_level(pmdp, addr, next, cpa);
rc = walk_pte_level(pmdp, addr, next, flags);
if (rc)
return rc;
}
@ -181,14 +188,17 @@ static int split_pud_page(pud_t *pudp, unsigned long addr)
unsigned long pmd_addr, prot;
pmd_t *pm_dir, *pmdp;
pud_t new;
int i, ro;
int i, ro, nx;
pm_dir = vmem_pmd_alloc();
if (!pm_dir)
return -ENOMEM;
pmd_addr = pud_pfn(*pudp) << PAGE_SHIFT;
ro = !!(pud_val(*pudp) & _REGION_ENTRY_PROTECT);
nx = !!(pud_val(*pudp) & _REGION_ENTRY_NOEXEC);
prot = pgprot_val(ro ? SEGMENT_KERNEL_RO : SEGMENT_KERNEL);
if (!nx)
prot &= ~_SEGMENT_ENTRY_NOEXEC;
pmdp = pm_dir;
for (i = 0; i < PTRS_PER_PMD; i++) {
pmd_val(*pmdp) = pmd_addr | prot;
@ -202,19 +212,24 @@ static int split_pud_page(pud_t *pudp, unsigned long addr)
return 0;
}
static void modify_pud_page(pud_t *pudp, unsigned long addr, struct cpa cpa)
static void modify_pud_page(pud_t *pudp, unsigned long addr,
unsigned long flags)
{
pud_t new;
pud_t new = *pudp;
if (cpa.set_ro)
new = pud_wrprotect(*pudp);
else if (cpa.clear_ro)
new = pud_mkwrite(pud_mkdirty(*pudp));
if (flags & SET_MEMORY_RO)
new = pud_wrprotect(new);
else if (flags & SET_MEMORY_RW)
new = pud_mkwrite(pud_mkdirty(new));
if ((flags & SET_MEMORY_NX) && MACHINE_HAS_NX)
pud_val(new) |= _REGION_ENTRY_NOEXEC;
else if (flags & SET_MEMORY_X)
pud_val(new) &= ~_REGION_ENTRY_NOEXEC;
pgt_set((unsigned long *)pudp, pud_val(new), addr, CRDTE_DTT_REGION3);
}
static int walk_pud_level(pgd_t *pgd, unsigned long addr, unsigned long end,
struct cpa cpa)
unsigned long flags)
{
unsigned long next;
pud_t *pudp;
@ -232,9 +247,9 @@ static int walk_pud_level(pgd_t *pgd, unsigned long addr, unsigned long end,
break;
continue;
}
modify_pud_page(pudp, addr, cpa);
modify_pud_page(pudp, addr, flags);
} else {
rc = walk_pmd_level(pudp, addr, next, cpa);
rc = walk_pmd_level(pudp, addr, next, flags);
}
pudp++;
addr = next;
@ -246,7 +261,7 @@ static int walk_pud_level(pgd_t *pgd, unsigned long addr, unsigned long end,
static DEFINE_MUTEX(cpa_mutex);
static int change_page_attr(unsigned long addr, unsigned long end,
struct cpa cpa)
unsigned long flags)
{
unsigned long next;
int rc = -EINVAL;
@ -262,7 +277,7 @@ static int change_page_attr(unsigned long addr, unsigned long end,
if (pgd_none(*pgdp))
break;
next = pgd_addr_end(addr, end);
rc = walk_pud_level(pgdp, addr, next, cpa);
rc = walk_pud_level(pgdp, addr, next, flags);
if (rc)
break;
cond_resched();
@ -271,35 +286,10 @@ static int change_page_attr(unsigned long addr, unsigned long end,
return rc;
}
int set_memory_ro(unsigned long addr, int numpages)
int __set_memory(unsigned long addr, int numpages, unsigned long flags)
{
struct cpa cpa = {
.set_ro = 1,
};
addr &= PAGE_MASK;
return change_page_attr(addr, addr + numpages * PAGE_SIZE, cpa);
}
int set_memory_rw(unsigned long addr, int numpages)
{
struct cpa cpa = {
.clear_ro = 1,
};
addr &= PAGE_MASK;
return change_page_attr(addr, addr + numpages * PAGE_SIZE, cpa);
}
/* not possible */
int set_memory_nx(unsigned long addr, int numpages)
{
return 0;
}
int set_memory_x(unsigned long addr, int numpages)
{
return 0;
return change_page_attr(addr, addr + numpages * PAGE_SIZE, flags);
}
#ifdef CONFIG_DEBUG_PAGEALLOC
@ -339,7 +329,7 @@ void __kernel_map_pages(struct page *page, int numpages, int enable)
nr = min(numpages - i, nr);
if (enable) {
for (j = 0; j < nr; j++) {
pte_val(*pte) = address | pgprot_val(PAGE_KERNEL);
pte_val(*pte) &= ~_PAGE_INVALID;
address += PAGE_SIZE;
pte++;
}

View File

@ -275,6 +275,8 @@ void ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
{
pgste_t pgste;
if (!MACHINE_HAS_NX)
pte_val(pte) &= ~_PAGE_NOEXEC;
if (mm_has_pgste(mm)) {
pgste = pgste_get(ptep);
pgste_set_key(ptep, pgste, pte, mm);

View File

@ -6,7 +6,7 @@
#include <linux/bootmem.h>
#include <linux/pfn.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/hugetlb.h>
#include <linux/slab.h>
@ -79,6 +79,7 @@ pte_t __ref *vmem_pte_alloc(void)
*/
static int vmem_add_mem(unsigned long start, unsigned long size)
{
unsigned long pgt_prot, sgt_prot, r3_prot;
unsigned long pages4k, pages1m, pages2g;
unsigned long end = start + size;
unsigned long address = start;
@ -88,6 +89,14 @@ static int vmem_add_mem(unsigned long start, unsigned long size)
pte_t *pt_dir;
int ret = -ENOMEM;
pgt_prot = pgprot_val(PAGE_KERNEL);
sgt_prot = pgprot_val(SEGMENT_KERNEL);
r3_prot = pgprot_val(REGION3_KERNEL);
if (!MACHINE_HAS_NX) {
pgt_prot &= ~_PAGE_NOEXEC;
sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
r3_prot &= ~_REGION_ENTRY_NOEXEC;
}
pages4k = pages1m = pages2g = 0;
while (address < end) {
pg_dir = pgd_offset_k(address);
@ -101,7 +110,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size)
if (MACHINE_HAS_EDAT2 && pud_none(*pu_dir) && address &&
!(address & ~PUD_MASK) && (address + PUD_SIZE <= end) &&
!debug_pagealloc_enabled()) {
pud_val(*pu_dir) = address | pgprot_val(REGION3_KERNEL);
pud_val(*pu_dir) = address | r3_prot;
address += PUD_SIZE;
pages2g++;
continue;
@ -116,7 +125,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size)
if (MACHINE_HAS_EDAT1 && pmd_none(*pm_dir) && address &&
!(address & ~PMD_MASK) && (address + PMD_SIZE <= end) &&
!debug_pagealloc_enabled()) {
pmd_val(*pm_dir) = address | pgprot_val(SEGMENT_KERNEL);
pmd_val(*pm_dir) = address | sgt_prot;
address += PMD_SIZE;
pages1m++;
continue;
@ -129,7 +138,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size)
}
pt_dir = pte_offset_kernel(pm_dir, address);
pte_val(*pt_dir) = address | pgprot_val(PAGE_KERNEL);
pte_val(*pt_dir) = address | pgt_prot;
address += PAGE_SIZE;
pages4k++;
}
@ -200,6 +209,7 @@ static void vmem_remove_range(unsigned long start, unsigned long size)
*/
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
{
unsigned long pgt_prot, sgt_prot;
unsigned long address = start;
pgd_t *pg_dir;
pud_t *pu_dir;
@ -207,6 +217,12 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
pte_t *pt_dir;
int ret = -ENOMEM;
pgt_prot = pgprot_val(PAGE_KERNEL);
sgt_prot = pgprot_val(SEGMENT_KERNEL);
if (!MACHINE_HAS_NX) {
pgt_prot &= ~_PAGE_NOEXEC;
sgt_prot &= ~_SEGMENT_ENTRY_NOEXEC;
}
for (address = start; address < end;) {
pg_dir = pgd_offset_k(address);
if (pgd_none(*pg_dir)) {
@ -238,8 +254,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
new_page = vmemmap_alloc_block(PMD_SIZE, node);
if (!new_page)
goto out;
pmd_val(*pm_dir) = __pa(new_page) |
_SEGMENT_ENTRY | _SEGMENT_ENTRY_LARGE;
pmd_val(*pm_dir) = __pa(new_page) | sgt_prot;
address = (address + PMD_SIZE) & PMD_MASK;
continue;
}
@ -259,8 +274,7 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
new_page = vmemmap_alloc_block(PAGE_SIZE, node);
if (!new_page)
goto out;
pte_val(*pt_dir) =
__pa(new_page) | pgprot_val(PAGE_KERNEL);
pte_val(*pt_dir) = __pa(new_page) | pgt_prot;
}
address += PAGE_SIZE;
}
@ -372,13 +386,21 @@ out:
*/
void __init vmem_map_init(void)
{
unsigned long size = _eshared - _stext;
struct memblock_region *reg;
for_each_memblock(memory, reg)
vmem_add_mem(reg->base, reg->size);
set_memory_ro((unsigned long)_stext, size >> PAGE_SHIFT);
pr_info("Write protected kernel read-only data: %luk\n", size >> 10);
__set_memory((unsigned long) _stext,
(_etext - _stext) >> PAGE_SHIFT,
SET_MEMORY_RO | SET_MEMORY_X);
__set_memory((unsigned long) _etext,
(_eshared - _etext) >> PAGE_SHIFT,
SET_MEMORY_RO);
__set_memory((unsigned long) _sinittext,
(_einittext - _sinittext) >> PAGE_SHIFT,
SET_MEMORY_RO | SET_MEMORY_X);
pr_info("Write protected kernel read-only data: %luk\n",
(_eshared - _stext) >> 10);
}
/*

View File

@ -1323,14 +1323,11 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
}
if (bpf_jit_enable > 1) {
bpf_jit_dump(fp->len, jit.size, pass, jit.prg_buf);
if (jit.prg_buf)
print_fn_code(jit.prg_buf, jit.size_prg);
}
if (jit.prg_buf) {
bpf_jit_binary_lock_ro(header);
fp->bpf_func = (void *) jit.prg_buf;
fp->jited = 1;
print_fn_code(jit.prg_buf, jit.size_prg);
}
bpf_jit_binary_lock_ro(header);
fp->bpf_func = (void *) jit.prg_buf;
fp->jited = 1;
free_addrs:
kfree(jit.addrs);
out:

View File

@ -224,8 +224,8 @@ static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
rc = zpci_load(&data, req, offset);
if (!rc) {
data = data << ((8 - len) * 8);
data = le64_to_cpu(data);
data = le64_to_cpu((__force __le64) data);
data >>= (8 - len) * 8;
*val = (u32) data;
} else
*val = 0xffffffff;
@ -238,8 +238,8 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
u64 data = val;
int rc;
data = cpu_to_le64(data);
data = data >> ((8 - len) * 8);
data <<= (8 - len) * 8;
data = (__force u64) cpu_to_le64(data);
rc = zpci_store(data, req, offset);
return rc;
}

View File

@ -1712,8 +1712,11 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
/* check for for attention message */
if (scsw_dstat(&irb->scsw) & DEV_STAT_ATTENTION) {
device = dasd_device_from_cdev_locked(cdev);
device->discipline->check_attention(device, irb->esw.esw1.lpum);
dasd_put_device(device);
if (!IS_ERR(device)) {
device->discipline->check_attention(device,
irb->esw.esw1.lpum);
dasd_put_device(device);
}
}
if (!cqr)
@ -3598,10 +3601,11 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
* empty
*/
/* sync blockdev and partitions */
rc = fsync_bdev(device->block->bdev);
if (rc != 0)
goto interrupted;
if (device->block) {
rc = fsync_bdev(device->block->bdev);
if (rc != 0)
goto interrupted;
}
/* schedule device tasklet and wait for completion */
dasd_schedule_device_bh(device);
rc = wait_event_interruptible(shutdown_waitq,

View File

@ -26,6 +26,7 @@
/* This is ugly... */
#define PRINTK_HEADER "dasd_devmap:"
#define DASD_BUS_ID_SIZE 20
#define DASD_MAX_PARAMS 256
#include "dasd_int.h"
@ -76,7 +77,7 @@ EXPORT_SYMBOL_GPL(dasd_nofcx);
* it is named 'dasd' to directly be filled by insmod with the comma separated
* strings when running as a module.
*/
static char *dasd[256];
static char *dasd[DASD_MAX_PARAMS];
module_param_array(dasd, charp, NULL, S_IRUGO);
/*
@ -104,18 +105,19 @@ dasd_hash_busid(const char *bus_id)
}
#ifndef MODULE
/*
* The parameter parsing functions for builtin-drivers are called
* before kmalloc works. Store the pointers to the parameters strings
* into dasd[] for later processing.
*/
static int __init
dasd_call_setup(char *str)
static int __init dasd_call_setup(char *opt)
{
static int count = 0;
static int i __initdata;
char *tmp;
while (i < DASD_MAX_PARAMS) {
tmp = strsep(&opt, ",");
if (!tmp)
break;
dasd[i++] = tmp;
}
if (count < 256)
dasd[count++] = str;
return 1;
}
@ -127,14 +129,13 @@ __setup ("dasd=", dasd_call_setup);
/*
* Read a device busid/devno from a string.
*/
static int
dasd_busid(char **str, int *id0, int *id1, int *devno)
static int __init dasd_busid(char *str, int *id0, int *id1, int *devno)
{
int val, old_style;
unsigned int val;
char *tok;
/* Interpret ipldev busid */
if (strncmp(DASD_IPLDEV, *str, strlen(DASD_IPLDEV)) == 0) {
if (strncmp(DASD_IPLDEV, str, strlen(DASD_IPLDEV)) == 0) {
if (ipl_info.type != IPL_TYPE_CCW) {
pr_err("The IPL device is not a CCW device\n");
return -EINVAL;
@ -142,63 +143,50 @@ dasd_busid(char **str, int *id0, int *id1, int *devno)
*id0 = 0;
*id1 = ipl_info.data.ccw.dev_id.ssid;
*devno = ipl_info.data.ccw.dev_id.devno;
*str += strlen(DASD_IPLDEV);
return 0;
}
/* check for leading '0x' */
old_style = 0;
if ((*str)[0] == '0' && (*str)[1] == 'x') {
*str += 2;
old_style = 1;
}
if (!isxdigit((*str)[0])) /* We require at least one hex digit */
return -EINVAL;
val = simple_strtoul(*str, str, 16);
if (old_style || (*str)[0] != '.') {
/* Old style 0xXXXX or XXXX */
if (!kstrtouint(str, 16, &val)) {
*id0 = *id1 = 0;
if (val < 0 || val > 0xffff)
return -EINVAL;
*devno = val;
return 0;
}
/* New style x.y.z busid */
if (val < 0 || val > 0xff)
tok = strsep(&str, ".");
if (kstrtouint(tok, 16, &val) || val > 0xff)
return -EINVAL;
*id0 = val;
(*str)++;
if (!isxdigit((*str)[0])) /* We require at least one hex digit */
return -EINVAL;
val = simple_strtoul(*str, str, 16);
if (val < 0 || val > 0xff || (*str)++[0] != '.')
tok = strsep(&str, ".");
if (kstrtouint(tok, 16, &val) || val > 0xff)
return -EINVAL;
*id1 = val;
if (!isxdigit((*str)[0])) /* We require at least one hex digit */
return -EINVAL;
val = simple_strtoul(*str, str, 16);
if (val < 0 || val > 0xffff)
tok = strsep(&str, ".");
if (kstrtouint(tok, 16, &val) || val > 0xffff)
return -EINVAL;
*devno = val;
return 0;
}
/*
* Read colon separated list of dasd features. Currently there is
* only one: "ro" for read-only devices. The default feature set
* is empty (value 0).
* Read colon separated list of dasd features.
*/
static int
dasd_feature_list(char *str, char **endp)
static int __init dasd_feature_list(char *str)
{
int features, len, rc;
rc = 0;
if (*str != '(') {
*endp = str;
return DASD_FEATURE_DEFAULT;
}
str++;
features = 0;
rc = 0;
if (!str)
return DASD_FEATURE_DEFAULT;
while (1) {
for (len = 0;
@ -223,15 +211,8 @@ dasd_feature_list(char *str, char **endp)
break;
str++;
}
if (*str != ')') {
pr_warn("A closing parenthesis ')' is missing in the dasd= parameter\n");
rc = -EINVAL;
} else
str++;
*endp = str;
if (rc != 0)
return rc;
return features;
return rc ? : features;
}
/*
@ -240,48 +221,38 @@ dasd_feature_list(char *str, char **endp)
* action and return a pointer to the residual string. If the first element
* could not be matched to any keyword then return an error code.
*/
static char *
dasd_parse_keyword( char *parsestring ) {
static int __init dasd_parse_keyword(char *keyword)
{
int length = strlen(keyword);
char *nextcomma, *residual_str;
int length;
nextcomma = strchr(parsestring,',');
if (nextcomma) {
length = nextcomma - parsestring;
residual_str = nextcomma + 1;
} else {
length = strlen(parsestring);
residual_str = parsestring + length;
}
if (strncmp("autodetect", parsestring, length) == 0) {
if (strncmp("autodetect", keyword, length) == 0) {
dasd_autodetect = 1;
pr_info("The autodetection mode has been activated\n");
return residual_str;
return 0;
}
if (strncmp("probeonly", parsestring, length) == 0) {
if (strncmp("probeonly", keyword, length) == 0) {
dasd_probeonly = 1;
pr_info("The probeonly mode has been activated\n");
return residual_str;
return 0;
}
if (strncmp("nopav", parsestring, length) == 0) {
if (strncmp("nopav", keyword, length) == 0) {
if (MACHINE_IS_VM)
pr_info("'nopav' is not supported on z/VM\n");
else {
dasd_nopav = 1;
pr_info("PAV support has be deactivated\n");
}
return residual_str;
return 0;
}
if (strncmp("nofcx", parsestring, length) == 0) {
if (strncmp("nofcx", keyword, length) == 0) {
dasd_nofcx = 1;
pr_info("High Performance FICON support has been "
"deactivated\n");
return residual_str;
return 0;
}
if (strncmp("fixedbuffers", parsestring, length) == 0) {
if (strncmp("fixedbuffers", keyword, length) == 0) {
if (dasd_page_cache)
return residual_str;
return 0;
dasd_page_cache =
kmem_cache_create("dasd_page_cache", PAGE_SIZE,
PAGE_SIZE, SLAB_CACHE_DMA,
@ -292,107 +263,126 @@ dasd_parse_keyword( char *parsestring ) {
else
DBF_EVENT(DBF_INFO, "%s",
"turning on fixed buffer mode");
return residual_str;
}
return ERR_PTR(-EINVAL);
return 0;
}
return -EINVAL;
}
/*
* Try to interprete the first element on the comma separated parse string
* as a device number or a range of devices. If the interpretation is
* successful, create the matching dasd_devmap entries and return a pointer
* to the residual string.
* Split a string of a device range into its pieces and return the from, to, and
* feature parts separately.
* e.g.:
* 0.0.1234-0.0.5678(ro:erplog) -> from: 0.0.1234 to: 0.0.5678 features: ro:erplog
* 0.0.8765(raw) -> from: 0.0.8765 to: null features: raw
* 0x4321 -> from: 0x4321 to: null features: null
*/
static int __init dasd_evaluate_range_param(char *range, char **from_str,
char **to_str, char **features_str)
{
int rc = 0;
/* Do we have a range or a single device? */
if (strchr(range, '-')) {
*from_str = strsep(&range, "-");
*to_str = strsep(&range, "(");
*features_str = strsep(&range, ")");
} else {
*from_str = strsep(&range, "(");
*features_str = strsep(&range, ")");
}
if (*features_str && !range) {
pr_warn("A closing parenthesis ')' is missing in the dasd= parameter\n");
rc = -EINVAL;
}
return rc;
}
/*
* Try to interprete the range string as a device number or a range of devices.
* If the interpretation is successful, create the matching dasd_devmap entries.
* If interpretation fails or in case of an error, return an error code.
*/
static char *
dasd_parse_range( char *parsestring ) {
static int __init dasd_parse_range(const char *range)
{
struct dasd_devmap *devmap;
int from, from_id0, from_id1;
int to, to_id0, to_id1;
int features, rc;
char bus_id[DASD_BUS_ID_SIZE+1], *str;
int features;
char bus_id[DASD_BUS_ID_SIZE + 1];
char *features_str = NULL;
char *from_str = NULL;
char *to_str = NULL;
size_t len = strlen(range) + 1;
char tmp[len];
str = parsestring;
rc = dasd_busid(&str, &from_id0, &from_id1, &from);
if (rc == 0) {
to = from;
to_id0 = from_id0;
to_id1 = from_id1;
if (*str == '-') {
str++;
rc = dasd_busid(&str, &to_id0, &to_id1, &to);
strlcpy(tmp, range, len);
if (dasd_evaluate_range_param(tmp, &from_str, &to_str, &features_str))
goto out_err;
if (dasd_busid(from_str, &from_id0, &from_id1, &from))
goto out_err;
to = from;
to_id0 = from_id0;
to_id1 = from_id1;
if (to_str) {
if (dasd_busid(to_str, &to_id0, &to_id1, &to))
goto out_err;
if (from_id0 != to_id0 || from_id1 != to_id1 || from > to) {
pr_err("%s is not a valid device range\n", range);
goto out_err;
}
}
if (rc == 0 &&
(from_id0 != to_id0 || from_id1 != to_id1 || from > to))
rc = -EINVAL;
if (rc) {
pr_err("%s is not a valid device range\n", parsestring);
return ERR_PTR(rc);
}
features = dasd_feature_list(str, &str);
features = dasd_feature_list(features_str);
if (features < 0)
return ERR_PTR(-EINVAL);
goto out_err;
/* each device in dasd= parameter should be set initially online */
features |= DASD_FEATURE_INITIAL_ONLINE;
while (from <= to) {
sprintf(bus_id, "%01x.%01x.%04x",
from_id0, from_id1, from++);
sprintf(bus_id, "%01x.%01x.%04x", from_id0, from_id1, from++);
devmap = dasd_add_busid(bus_id, features);
if (IS_ERR(devmap))
return (char *)devmap;
return PTR_ERR(devmap);
}
if (*str == ',')
return str + 1;
if (*str == '\0')
return str;
pr_warn("The dasd= parameter value %s has an invalid ending\n", str);
return ERR_PTR(-EINVAL);
}
static char *
dasd_parse_next_element( char *parsestring ) {
char * residual_str;
residual_str = dasd_parse_keyword(parsestring);
if (!IS_ERR(residual_str))
return residual_str;
residual_str = dasd_parse_range(parsestring);
return residual_str;
return 0;
out_err:
return -EINVAL;
}
/*
* Parse parameters stored in dasd[]
* The 'dasd=...' parameter allows to specify a comma separated list of
* keywords and device ranges. When the dasd driver is build into the kernel,
* the complete list will be stored as one element of the dasd[] array.
* When the dasd driver is build as a module, then the list is broken into
* it's elements and each dasd[] entry contains one element.
* keywords and device ranges. The parameters in that list will be stored as
* separate elementes in dasd[].
*/
int
dasd_parse(void)
int __init dasd_parse(void)
{
int rc, i;
char *parsestring;
char *cur;
rc = 0;
for (i = 0; i < 256; i++) {
if (dasd[i] == NULL)
for (i = 0; i < DASD_MAX_PARAMS; i++) {
cur = dasd[i];
if (!cur)
break;
parsestring = dasd[i];
/* loop over the comma separated list in the parsestring */
while (*parsestring) {
parsestring = dasd_parse_next_element(parsestring);
if(IS_ERR(parsestring)) {
rc = PTR_ERR(parsestring);
break;
}
}
if (rc) {
DBF_EVENT(DBF_ALERT, "%s", "invalid range found");
if (*cur == '\0')
continue;
rc = dasd_parse_keyword(cur);
if (rc)
rc = dasd_parse_range(cur);
if (rc)
break;
}
}
return rc;
}
@ -1528,14 +1518,12 @@ dasd_path_threshold_store(struct device *dev, struct device_attribute *attr,
if (IS_ERR(device))
return -ENODEV;
if ((kstrtoul(buf, 10, &val) != 0) ||
(val > DASD_THRHLD_MAX) || val == 0) {
if (kstrtoul(buf, 10, &val) != 0 || val > DASD_THRHLD_MAX) {
dasd_put_device(device);
return -EINVAL;
}
spin_lock_irqsave(get_ccwdev_lock(to_ccwdev(dev)), flags);
if (val)
device->path_thrhld = val;
device->path_thrhld = val;
spin_unlock_irqrestore(get_ccwdev_lock(to_ccwdev(dev)), flags);
dasd_put_device(device);
return count;

View File

@ -2543,8 +2543,8 @@ dasd_eckd_build_format(struct dasd_device *base,
DASD_ECKD_CCW_WRITE_CKD_MT;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 8;
ccw->cda = (__u32)(addr_t) ect;
ccw++;
ccw->cda = (__u32)(addr_t) ect;
ccw++;
}
}
}

View File

@ -805,7 +805,7 @@ struct dasd_device *dasd_device_from_devindex(int);
void dasd_add_link_to_gendisk(struct gendisk *, struct dasd_device *);
struct dasd_device *dasd_device_from_gendisk(struct gendisk *);
int dasd_parse(void);
int dasd_parse(void) __init;
int dasd_busid_known(const char *);
/* externals in dasd_gendisk.c */

View File

@ -892,7 +892,7 @@ dcssblk_direct_access (struct block_device *bdev, sector_t secnum,
dev_info = bdev->bd_disk->private_data;
if (!dev_info)
return -ENODEV;
dev_sz = dev_info->end - dev_info->start;
dev_sz = dev_info->end - dev_info->start + 1;
offset = secnum * 512;
*kaddr = (void *) dev_info->start + offset;
*pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset), PFN_DEV);

View File

@ -2,9 +2,23 @@
# S/390 character devices
#
ifdef CONFIG_FUNCTION_TRACER
# Do not trace early setup code
CFLAGS_REMOVE_sclp_early_core.o = $(CC_FLAGS_FTRACE)
endif
GCOV_PROFILE_sclp_early_core.o := n
KCOV_INSTRUMENT_sclp_early_core.o := n
UBSAN_SANITIZE_sclp_early_core.o := n
ifneq ($(CC_FLAGS_MARCH),-march=z900)
CFLAGS_REMOVE_sclp_early_core.o += $(CC_FLAGS_MARCH)
CFLAGS_sclp_early_core.o += -march=z900
endif
obj-y += ctrlchar.o keyboard.o defkeymap.o sclp.o sclp_rw.o sclp_quiesce.o \
sclp_cmd.o sclp_config.o sclp_cpi_sys.o sclp_ocf.o sclp_ctl.o \
sclp_early.o
sclp_early.o sclp_early_core.o
obj-$(CONFIG_TN3270) += raw3270.o
obj-$(CONFIG_TN3270_CONSOLE) += con3270.o

View File

@ -31,7 +31,7 @@
static struct raw3270_fn con3270_fn;
static bool auto_update = 1;
static bool auto_update = true;
module_param(auto_update, bool, 0);
/*

View File

@ -82,7 +82,7 @@ static LIST_HEAD(raw3270_devices);
static int raw3270_registered;
/* Module parameters */
static bool tubxcorrect = 0;
static bool tubxcorrect;
module_param(tubxcorrect, bool, 0);
/*

View File

@ -94,13 +94,6 @@ static struct timer_list sclp_request_timer;
/* Timer for queued requests. */
static struct timer_list sclp_queue_timer;
/* Internal state: is the driver initialized? */
static volatile enum sclp_init_state_t {
sclp_init_state_uninitialized,
sclp_init_state_initializing,
sclp_init_state_initialized
} sclp_init_state = sclp_init_state_uninitialized;
/* Internal state: is a request active at the sclp? */
static volatile enum sclp_running_state_t {
sclp_running_state_idle,
@ -147,31 +140,6 @@ static void __sclp_make_read_req(void);
static int sclp_init_mask(int calculate);
static int sclp_init(void);
/* Perform service call. Return 0 on success, non-zero otherwise. */
int
sclp_service_call(sclp_cmdw_t command, void *sccb)
{
int cc = 4; /* Initialize for program check handling */
asm volatile(
"0: .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */
"1: ipm %0\n"
" srl %0,28\n"
"2:\n"
EX_TABLE(0b, 2b)
EX_TABLE(1b, 2b)
: "+&d" (cc) : "d" (command), "a" (__pa(sccb))
: "cc", "memory");
if (cc == 4)
return -EINVAL;
if (cc == 3)
return -EIO;
if (cc == 2)
return -EBUSY;
return 0;
}
static void
__sclp_queue_read_req(void)
{

View File

@ -204,19 +204,57 @@ void sclp_unregister(struct sclp_register *reg);
int sclp_remove_processed(struct sccb_header *sccb);
int sclp_deactivate(void);
int sclp_reactivate(void);
int sclp_service_call(sclp_cmdw_t command, void *sccb);
int sclp_sync_request(sclp_cmdw_t command, void *sccb);
int sclp_sync_request_timeout(sclp_cmdw_t command, void *sccb, int timeout);
int sclp_sdias_init(void);
void sclp_sdias_exit(void);
enum {
sclp_init_state_uninitialized,
sclp_init_state_initializing,
sclp_init_state_initialized
};
extern int sclp_init_state;
extern int sclp_console_pages;
extern int sclp_console_drop;
extern unsigned long sclp_console_full;
extern char sclp_early_sccb[PAGE_SIZE];
void sclp_early_wait_irq(void);
int sclp_early_cmd(sclp_cmdw_t cmd, void *sccb);
unsigned int sclp_early_con_check_linemode(struct init_sccb *sccb);
int sclp_early_set_event_mask(struct init_sccb *sccb,
unsigned long receive_mask,
unsigned long send_mask);
/* useful inlines */
/* Perform service call. Return 0 on success, non-zero otherwise. */
static inline int sclp_service_call(sclp_cmdw_t command, void *sccb)
{
int cc = 4; /* Initialize for program check handling */
asm volatile(
"0: .insn rre,0xb2200000,%1,%2\n" /* servc %1,%2 */
"1: ipm %0\n"
" srl %0,28\n"
"2:\n"
EX_TABLE(0b, 2b)
EX_TABLE(1b, 2b)
: "+&d" (cc) : "d" (command), "a" ((unsigned long)sccb)
: "cc", "memory");
if (cc == 4)
return -EINVAL;
if (cc == 3)
return -EIO;
if (cc == 2)
return -EBUSY;
return 0;
}
/* VM uses EBCDIC 037, LPAR+native(SE+HMC) use EBCDIC 500 */
/* translate single character from ASCII to EBCDIC */
static inline unsigned char

View File

@ -55,46 +55,23 @@ struct read_info_sccb {
u8 _pad_128[4096 - 128]; /* 128-4095 */
} __packed __aligned(PAGE_SIZE);
static char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE) __initdata;
static struct sclp_ipl_info sclp_ipl_info;
struct sclp_info sclp;
EXPORT_SYMBOL(sclp);
static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb)
static int __init sclp_early_read_info(struct read_info_sccb *sccb)
{
int rc;
__ctl_set_bit(0, 9);
rc = sclp_service_call(cmd, sccb);
if (rc)
goto out;
__load_psw_mask(PSW_DEFAULT_KEY | PSW_MASK_BASE | PSW_MASK_EA |
PSW_MASK_BA | PSW_MASK_EXT | PSW_MASK_WAIT);
local_irq_disable();
out:
/* Contents of the sccb might have changed. */
barrier();
__ctl_clear_bit(0, 9);
return rc;
}
static int __init sclp_read_info_early(struct read_info_sccb *sccb)
{
int rc, i;
int i;
sclp_cmdw_t commands[] = {SCLP_CMDW_READ_SCP_INFO_FORCED,
SCLP_CMDW_READ_SCP_INFO};
for (i = 0; i < ARRAY_SIZE(commands); i++) {
do {
memset(sccb, 0, sizeof(*sccb));
sccb->header.length = sizeof(*sccb);
sccb->header.function_code = 0x80;
sccb->header.control_mask[2] = 0x80;
rc = sclp_cmd_sync_early(commands[i], sccb);
} while (rc == -EBUSY);
if (rc)
memset(sccb, 0, sizeof(*sccb));
sccb->header.length = sizeof(*sccb);
sccb->header.function_code = 0x80;
sccb->header.control_mask[2] = 0x80;
if (sclp_early_cmd(commands[i], sccb))
break;
if (sccb->header.response_code == 0x10)
return 0;
@ -104,12 +81,12 @@ static int __init sclp_read_info_early(struct read_info_sccb *sccb)
return -EIO;
}
static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
static void __init sclp_early_facilities_detect(struct read_info_sccb *sccb)
{
struct sclp_core_entry *cpue;
u16 boot_cpu_address, cpu;
if (sclp_read_info_early(sccb))
if (sclp_early_read_info(sccb))
return;
sclp.facilities = sccb->facilities;
@ -172,34 +149,43 @@ static void __init sclp_facilities_detect(struct read_info_sccb *sccb)
}
/*
* This function will be called after sclp_facilities_detect(), which gets
* called from early.c code. The sclp_facilities_detect() function retrieves
* This function will be called after sclp_early_facilities_detect(), which gets
* called from early.c code. The sclp_early_facilities_detect() function retrieves
* and saves the IPL information.
*/
void __init sclp_get_ipl_info(struct sclp_ipl_info *info)
void __init sclp_early_get_ipl_info(struct sclp_ipl_info *info)
{
*info = sclp_ipl_info;
}
static int __init sclp_cmd_early(sclp_cmdw_t cmd, void *sccb)
static struct sclp_core_info sclp_early_core_info __initdata;
static int sclp_early_core_info_valid __initdata;
static void __init sclp_early_init_core_info(struct read_cpu_info_sccb *sccb)
{
int rc;
if (!SCLP_HAS_CPU_INFO)
return;
memset(sccb, 0, sizeof(*sccb));
sccb->header.length = sizeof(*sccb);
if (sclp_early_cmd(SCLP_CMDW_READ_CPU_INFO, sccb))
return;
if (sccb->header.response_code != 0x0010)
return;
sclp_fill_core_info(&sclp_early_core_info, sccb);
sclp_early_core_info_valid = 1;
}
do {
rc = sclp_cmd_sync_early(cmd, sccb);
} while (rc == -EBUSY);
if (rc)
return -EIO;
if (((struct sccb_header *) sccb)->response_code != 0x0020)
int __init sclp_early_get_core_info(struct sclp_core_info *info)
{
if (!sclp_early_core_info_valid)
return -EIO;
*info = sclp_early_core_info;
return 0;
}
static void __init sccb_init_eq_size(struct sdias_sccb *sccb)
static long __init sclp_early_hsa_size_init(struct sdias_sccb *sccb)
{
memset(sccb, 0, sizeof(*sccb));
sccb->hdr.length = sizeof(*sccb);
sccb->evbuf.hdr.length = sizeof(struct sdias_evbuf);
sccb->evbuf.hdr.type = EVTYP_SDIAS;
@ -207,106 +193,52 @@ static void __init sccb_init_eq_size(struct sdias_sccb *sccb)
sccb->evbuf.data_id = SDIAS_DI_FCP_DUMP;
sccb->evbuf.event_id = 4712;
sccb->evbuf.dbs = 1;
}
static int __init sclp_set_event_mask(struct init_sccb *sccb,
unsigned long receive_mask,
unsigned long send_mask)
{
memset(sccb, 0, sizeof(*sccb));
sccb->header.length = sizeof(*sccb);
sccb->mask_length = sizeof(sccb_mask_t);
sccb->receive_mask = receive_mask;
sccb->send_mask = send_mask;
return sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_MASK, sccb);
}
static struct sclp_core_info sclp_core_info_early __initdata;
static int sclp_core_info_early_valid __initdata;
static void __init sclp_init_core_info_early(struct read_cpu_info_sccb *sccb)
{
int rc;
if (!SCLP_HAS_CPU_INFO)
return;
memset(sccb, 0, sizeof(*sccb));
sccb->header.length = sizeof(*sccb);
do {
rc = sclp_cmd_sync_early(SCLP_CMDW_READ_CPU_INFO, sccb);
} while (rc == -EBUSY);
if (rc)
return;
if (sccb->header.response_code != 0x0010)
return;
sclp_fill_core_info(&sclp_core_info_early, sccb);
sclp_core_info_early_valid = 1;
}
int __init _sclp_get_core_info_early(struct sclp_core_info *info)
{
if (!sclp_core_info_early_valid)
if (sclp_early_cmd(SCLP_CMDW_WRITE_EVENT_DATA, sccb))
return -EIO;
*info = sclp_core_info_early;
return 0;
}
static long __init sclp_hsa_size_init(struct sdias_sccb *sccb)
{
sccb_init_eq_size(sccb);
if (sclp_cmd_early(SCLP_CMDW_WRITE_EVENT_DATA, sccb))
if (sccb->hdr.response_code != 0x20)
return -EIO;
if (sccb->evbuf.blk_cnt == 0)
return 0;
return (sccb->evbuf.blk_cnt - 1) * PAGE_SIZE;
}
static long __init sclp_hsa_copy_wait(struct sccb_header *sccb)
static long __init sclp_early_hsa_copy_wait(struct sdias_sccb *sccb)
{
memset(sccb, 0, PAGE_SIZE);
sccb->length = PAGE_SIZE;
if (sclp_cmd_early(SCLP_CMDW_READ_EVENT_DATA, sccb))
sccb->hdr.length = PAGE_SIZE;
if (sclp_early_cmd(SCLP_CMDW_READ_EVENT_DATA, sccb))
return -EIO;
if (((struct sdias_sccb *) sccb)->evbuf.blk_cnt == 0)
if ((sccb->hdr.response_code != 0x20) && (sccb->hdr.response_code != 0x220))
return -EIO;
if (sccb->evbuf.blk_cnt == 0)
return 0;
return (((struct sdias_sccb *) sccb)->evbuf.blk_cnt - 1) * PAGE_SIZE;
return (sccb->evbuf.blk_cnt - 1) * PAGE_SIZE;
}
static void __init sclp_hsa_size_detect(void *sccb)
static void __init sclp_early_hsa_size_detect(void *sccb)
{
long size;
unsigned long flags;
long size = -EIO;
/* First try synchronous interface (LPAR) */
if (sclp_set_event_mask(sccb, 0, 0x40000010))
return;
size = sclp_hsa_size_init(sccb);
if (size < 0)
return;
if (size != 0)
raw_local_irq_save(flags);
if (sclp_early_set_event_mask(sccb, EVTYP_SDIAS_MASK, EVTYP_SDIAS_MASK))
goto out;
/* Then try asynchronous interface (z/VM) */
if (sclp_set_event_mask(sccb, 0x00000010, 0x40000010))
return;
size = sclp_hsa_size_init(sccb);
if (size < 0)
return;
size = sclp_hsa_copy_wait(sccb);
if (size < 0)
return;
size = sclp_early_hsa_size_init(sccb);
/* First check for synchronous response (LPAR) */
if (size)
goto out_mask;
if (!(S390_lowcore.ext_params & 1))
sclp_early_wait_irq();
size = sclp_early_hsa_copy_wait(sccb);
out_mask:
sclp_early_set_event_mask(sccb, 0, 0);
out:
sclp.hsa_size = size;
raw_local_irq_restore(flags);
if (size > 0)
sclp.hsa_size = size;
}
static unsigned int __init sclp_con_check_linemode(struct init_sccb *sccb)
{
if (!(sccb->sclp_send_mask & EVTYP_OPCMD_MASK))
return 0;
if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)))
return 0;
return 1;
}
static void __init sclp_console_detect(struct init_sccb *sccb)
static void __init sclp_early_console_detect(struct init_sccb *sccb)
{
if (sccb->header.response_code != 0x20)
return;
@ -314,21 +246,22 @@ static void __init sclp_console_detect(struct init_sccb *sccb)
if (sccb->sclp_send_mask & EVTYP_VT220MSG_MASK)
sclp.has_vt220 = 1;
if (sclp_con_check_linemode(sccb))
if (sclp_early_con_check_linemode(sccb))
sclp.has_linemode = 1;
}
void __init sclp_early_detect(void)
{
void *sccb = &sccb_early;
void *sccb = &sclp_early_sccb;
sclp_facilities_detect(sccb);
sclp_init_core_info_early(sccb);
sclp_hsa_size_detect(sccb);
sclp_early_facilities_detect(sccb);
sclp_early_init_core_info(sccb);
sclp_early_hsa_size_detect(sccb);
/* Turn off SCLP event notifications. Also save remote masks in the
/*
* Turn off SCLP event notifications. Also save remote masks in the
* sccb. These are sufficient to detect sclp console capabilities.
*/
sclp_set_event_mask(sccb, 0, 0);
sclp_console_detect(sccb);
sclp_early_set_event_mask(sccb, 0, 0);
sclp_early_console_detect(sccb);
}

View File

@ -0,0 +1,208 @@
/*
* Copyright IBM Corp. 2015
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/kernel.h>
#include <asm/processor.h>
#include <asm/lowcore.h>
#include <asm/ebcdic.h>
#include <asm/irq.h>
#include "sclp.h"
#include "sclp_rw.h"
char sclp_early_sccb[PAGE_SIZE] __aligned(PAGE_SIZE) __section(data);
int sclp_init_state __section(data) = sclp_init_state_uninitialized;
void sclp_early_wait_irq(void)
{
unsigned long psw_mask, addr;
psw_t psw_ext_save, psw_wait;
union ctlreg0 cr0, cr0_new;
__ctl_store(cr0.val, 0, 0);
cr0_new.val = cr0.val & ~CR0_IRQ_SUBCLASS_MASK;
cr0_new.lap = 0;
cr0_new.sssm = 1;
__ctl_load(cr0_new.val, 0, 0);
psw_ext_save = S390_lowcore.external_new_psw;
psw_mask = __extract_psw();
S390_lowcore.external_new_psw.mask = psw_mask;
psw_wait.mask = psw_mask | PSW_MASK_EXT | PSW_MASK_WAIT;
S390_lowcore.ext_int_code = 0;
do {
asm volatile(
" larl %[addr],0f\n"
" stg %[addr],%[psw_wait_addr]\n"
" stg %[addr],%[psw_ext_addr]\n"
" lpswe %[psw_wait]\n"
"0:\n"
: [addr] "=&d" (addr),
[psw_wait_addr] "=Q" (psw_wait.addr),
[psw_ext_addr] "=Q" (S390_lowcore.external_new_psw.addr)
: [psw_wait] "Q" (psw_wait)
: "cc", "memory");
} while (S390_lowcore.ext_int_code != EXT_IRQ_SERVICE_SIG);
S390_lowcore.external_new_psw = psw_ext_save;
__ctl_load(cr0.val, 0, 0);
}
int sclp_early_cmd(sclp_cmdw_t cmd, void *sccb)
{
unsigned long flags;
int rc;
raw_local_irq_save(flags);
rc = sclp_service_call(cmd, sccb);
if (rc)
goto out;
sclp_early_wait_irq();
out:
raw_local_irq_restore(flags);
return rc;
}
struct write_sccb {
struct sccb_header header;
struct msg_buf msg;
} __packed;
/* Output multi-line text using SCLP Message interface. */
static void sclp_early_print_lm(const char *str, unsigned int len)
{
unsigned char *ptr, *end, ch;
unsigned int count, offset;
struct write_sccb *sccb;
struct msg_buf *msg;
struct mdb *mdb;
struct mto *mto;
struct go *go;
sccb = (struct write_sccb *) &sclp_early_sccb;
end = (unsigned char *) sccb + sizeof(sclp_early_sccb) - 1;
memset(sccb, 0, sizeof(*sccb));
ptr = (unsigned char *) &sccb->msg.mdb.mto;
offset = 0;
do {
for (count = sizeof(*mto); offset < len; count++) {
ch = str[offset++];
if ((ch == 0x0a) || (ptr + count > end))
break;
ptr[count] = _ascebc[ch];
}
mto = (struct mto *) ptr;
memset(mto, 0, sizeof(*mto));
mto->length = count;
mto->type = 4;
mto->line_type_flags = LNTPFLGS_ENDTEXT;
ptr += count;
} while ((offset < len) && (ptr + sizeof(*mto) <= end));
len = ptr - (unsigned char *) sccb;
sccb->header.length = len - offsetof(struct write_sccb, header);
msg = &sccb->msg;
msg->header.type = EVTYP_MSG;
msg->header.length = len - offsetof(struct write_sccb, msg.header);
mdb = &msg->mdb;
mdb->header.type = 1;
mdb->header.tag = 0xD4C4C240;
mdb->header.revision_code = 1;
mdb->header.length = len - offsetof(struct write_sccb, msg.mdb.header);
go = &mdb->go;
go->length = sizeof(*go);
go->type = 1;
sclp_early_cmd(SCLP_CMDW_WRITE_EVENT_DATA, sccb);
}
struct vt220_sccb {
struct sccb_header header;
struct {
struct evbuf_header header;
char data[];
} msg;
} __packed;
/* Output multi-line text using SCLP VT220 interface. */
static void sclp_early_print_vt220(const char *str, unsigned int len)
{
struct vt220_sccb *sccb;
sccb = (struct vt220_sccb *) &sclp_early_sccb;
if (sizeof(*sccb) + len >= sizeof(sclp_early_sccb))
len = sizeof(sclp_early_sccb) - sizeof(*sccb);
memset(sccb, 0, sizeof(*sccb));
memcpy(&sccb->msg.data, str, len);
sccb->header.length = sizeof(*sccb) + len;
sccb->msg.header.length = sizeof(sccb->msg) + len;
sccb->msg.header.type = EVTYP_VT220MSG;
sclp_early_cmd(SCLP_CMDW_WRITE_EVENT_DATA, sccb);
}
int sclp_early_set_event_mask(struct init_sccb *sccb,
unsigned long receive_mask,
unsigned long send_mask)
{
memset(sccb, 0, sizeof(*sccb));
sccb->header.length = sizeof(*sccb);
sccb->mask_length = sizeof(sccb_mask_t);
sccb->receive_mask = receive_mask;
sccb->send_mask = send_mask;
if (sclp_early_cmd(SCLP_CMDW_WRITE_EVENT_MASK, sccb))
return -EIO;
if (sccb->header.response_code != 0x20)
return -EIO;
return 0;
}
unsigned int sclp_early_con_check_linemode(struct init_sccb *sccb)
{
if (!(sccb->sclp_send_mask & EVTYP_OPCMD_MASK))
return 0;
if (!(sccb->sclp_receive_mask & (EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK)))
return 0;
return 1;
}
static int sclp_early_setup(int disable, int *have_linemode, int *have_vt220)
{
unsigned long receive_mask, send_mask;
struct init_sccb *sccb;
int rc;
*have_linemode = *have_vt220 = 0;
sccb = (struct init_sccb *) &sclp_early_sccb;
receive_mask = disable ? 0 : EVTYP_OPCMD_MASK;
send_mask = disable ? 0 : EVTYP_VT220MSG_MASK | EVTYP_MSG_MASK;
rc = sclp_early_set_event_mask(sccb, receive_mask, send_mask);
if (rc)
return rc;
*have_linemode = sclp_early_con_check_linemode(sccb);
*have_vt220 = sccb->send_mask & EVTYP_VT220MSG_MASK;
return rc;
}
/*
* Output one or more lines of text on the SCLP console (VT220 and /
* or line-mode).
*/
void __sclp_early_printk(const char *str, unsigned int len)
{
int have_linemode, have_vt220;
if (sclp_init_state != sclp_init_state_uninitialized)
return;
if (sclp_early_setup(0, &have_linemode, &have_vt220) != 0)
return;
if (have_linemode)
sclp_early_print_lm(str, len);
if (have_vt220)
sclp_early_print_vt220(str, len);
sclp_early_setup(1, &have_linemode, &have_vt220);
}
void sclp_early_printk(const char *str)
{
__sclp_early_printk(str, strlen(str));
}

View File

@ -15,7 +15,6 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/miscdevice.h>
#include <linux/debugfs.h>
#include <linux/memblock.h>
@ -273,7 +272,7 @@ static int __init zcore_reipl_init(void)
rc = memcpy_hsa_kernel(ipl_block, ipib_info.ipib, PAGE_SIZE);
else
rc = memcpy_real(ipl_block, (void *) ipib_info.ipib, PAGE_SIZE);
if (rc || csum_partial(ipl_block, ipl_block->hdr.len, 0) !=
if (rc || (__force u32)csum_partial(ipl_block, ipl_block->hdr.len, 0) !=
ipib_info.checksum) {
TRACE("Checksum does not match\n");
free_page((unsigned long) ipl_block);

View File

@ -444,6 +444,7 @@ int chp_update_desc(struct channel_path *chp)
*/
int chp_new(struct chp_id chpid)
{
struct channel_subsystem *css = css_by_id(chpid.cssid);
struct channel_path *chp;
int ret;
@ -456,7 +457,7 @@ int chp_new(struct chp_id chpid)
/* fill in status, etc. */
chp->chpid = chpid;
chp->state = 1;
chp->dev.parent = &channel_subsystems[chpid.cssid]->device;
chp->dev.parent = &css->device;
chp->dev.groups = chp_attr_groups;
chp->dev.release = chp_release;
mutex_init(&chp->lock);
@ -479,17 +480,17 @@ int chp_new(struct chp_id chpid)
put_device(&chp->dev);
goto out;
}
mutex_lock(&channel_subsystems[chpid.cssid]->mutex);
if (channel_subsystems[chpid.cssid]->cm_enabled) {
mutex_lock(&css->mutex);
if (css->cm_enabled) {
ret = chp_add_cmg_attr(chp);
if (ret) {
device_unregister(&chp->dev);
mutex_unlock(&channel_subsystems[chpid.cssid]->mutex);
mutex_unlock(&css->mutex);
goto out;
}
}
channel_subsystems[chpid.cssid]->chps[chpid.id] = chp;
mutex_unlock(&channel_subsystems[chpid.cssid]->mutex);
css->chps[chpid.id] = chp;
mutex_unlock(&css->mutex);
goto out;
out_free:
kfree(chp);

View File

@ -54,7 +54,7 @@ struct channel_path {
/* Return channel_path struct for given chpid. */
static inline struct channel_path *chpid_to_chp(struct chp_id chpid)
{
return channel_subsystems[chpid.cssid]->chps[chpid.id];
return css_by_id(chpid.cssid)->chps[chpid.id];
}
int chp_get_status(struct chp_id chpid);

View File

@ -1131,6 +1131,52 @@ int chsc_enable_facility(int operation_code)
return ret;
}
int __init chsc_get_cssid(int idx)
{
struct {
struct chsc_header request;
u8 atype;
u32 : 24;
u32 reserved1[6];
struct chsc_header response;
u32 reserved2[3];
struct {
u8 cssid;
u32 : 24;
} list[0];
} __packed *sdcal_area;
int ret;
spin_lock_irq(&chsc_page_lock);
memset(chsc_page, 0, PAGE_SIZE);
sdcal_area = chsc_page;
sdcal_area->request.length = 0x0020;
sdcal_area->request.code = 0x0034;
sdcal_area->atype = 4;
ret = chsc(sdcal_area);
if (ret) {
ret = (ret == 3) ? -ENODEV : -EBUSY;
goto exit;
}
ret = chsc_error_from_response(sdcal_area->response.code);
if (ret) {
CIO_CRW_EVENT(2, "chsc: sdcal failed (rc=%04x)\n",
sdcal_area->response.code);
goto exit;
}
if ((addr_t) &sdcal_area->list[idx] <
(addr_t) &sdcal_area->response + sdcal_area->response.length)
ret = sdcal_area->list[idx].cssid;
else
ret = -ENODEV;
exit:
spin_unlock_irq(&chsc_page_lock);
return ret;
}
struct css_general_char css_general_characteristics;
struct css_chsc_char css_chsc_characteristics;
@ -1216,7 +1262,7 @@ int chsc_sstpi(void *page, void *result, size_t size)
struct chsc_header request;
unsigned int rsvd0[3];
struct chsc_header response;
char data[size];
char data[];
} __attribute__ ((packed)) *rr;
int rc;

View File

@ -242,6 +242,8 @@ int chsc_pnso_brinfo(struct subchannel_id schid,
struct chsc_brinfo_resume_token resume_token,
int cnc);
int __init chsc_get_cssid(int idx);
#ifdef CONFIG_SCM_BUS
int scm_update_information(void);
int scm_process_availability_information(void);

View File

@ -1085,15 +1085,9 @@ static ssize_t cmb_show_avg_utilization(struct device *dev,
data.function_pending_time +
data.device_disconnect_time;
/* shift to avoid long long division */
while (-1ul < (data.elapsed_time | utilization)) {
utilization >>= 8;
data.elapsed_time >>= 8;
}
/* calculate value in 0.1 percent units */
t = (unsigned long) data.elapsed_time / 1000;
u = (unsigned long) utilization / t;
t = data.elapsed_time / 1000;
u = utilization / t;
return sprintf(buf, "%02ld.%01ld%%\n", u/ 10, u - (u/ 10) * 10);
}

View File

@ -36,7 +36,8 @@
int css_init_done = 0;
int max_ssid;
struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1];
#define MAX_CSS_IDX 0
struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1];
static struct bus_type css_bus_type;
int
@ -702,7 +703,8 @@ css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
if (css_general_characteristics.mcss) {
css->global_pgid.pgid_high.ext_cssid.version = 0x80;
css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
css->global_pgid.pgid_high.ext_cssid.cssid =
(css->cssid < 0) ? 0 : css->cssid;
} else {
css->global_pgid.pgid_high.cpu_addr = stap();
}
@ -712,43 +714,44 @@ css_generate_pgid(struct channel_subsystem *css, u32 tod_high)
css->global_pgid.tod_high = tod_high;
}
static void
channel_subsystem_release(struct device *dev)
static void channel_subsystem_release(struct device *dev)
{
struct channel_subsystem *css;
struct channel_subsystem *css = to_css(dev);
css = to_css(dev);
mutex_destroy(&css->mutex);
if (css->pseudo_subchannel) {
/* Implies that it has been generated but never registered. */
css_subchannel_release(&css->pseudo_subchannel->dev);
css->pseudo_subchannel = NULL;
}
kfree(css);
}
static ssize_t
css_cm_enable_show(struct device *dev, struct device_attribute *attr,
char *buf)
static ssize_t real_cssid_show(struct device *dev, struct device_attribute *a,
char *buf)
{
struct channel_subsystem *css = to_css(dev);
if (css->cssid < 0)
return -EINVAL;
return sprintf(buf, "%x\n", css->cssid);
}
static DEVICE_ATTR_RO(real_cssid);
static ssize_t cm_enable_show(struct device *dev, struct device_attribute *a,
char *buf)
{
struct channel_subsystem *css = to_css(dev);
int ret;
if (!css)
return 0;
mutex_lock(&css->mutex);
ret = sprintf(buf, "%x\n", css->cm_enabled);
mutex_unlock(&css->mutex);
return ret;
}
static ssize_t
css_cm_enable_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
static ssize_t cm_enable_store(struct device *dev, struct device_attribute *a,
const char *buf, size_t count)
{
struct channel_subsystem *css = to_css(dev);
int ret;
unsigned long val;
int ret;
ret = kstrtoul(buf, 16, &val);
if (ret)
@ -767,51 +770,104 @@ css_cm_enable_store(struct device *dev, struct device_attribute *attr,
mutex_unlock(&css->mutex);
return ret < 0 ? ret : count;
}
static DEVICE_ATTR_RW(cm_enable);
static DEVICE_ATTR(cm_enable, 0644, css_cm_enable_show, css_cm_enable_store);
static umode_t cm_enable_mode(struct kobject *kobj, struct attribute *attr,
int index)
{
return css_chsc_characteristics.secm ? attr->mode : 0;
}
static struct attribute *cssdev_attrs[] = {
&dev_attr_real_cssid.attr,
NULL,
};
static struct attribute_group cssdev_attr_group = {
.attrs = cssdev_attrs,
};
static struct attribute *cssdev_cm_attrs[] = {
&dev_attr_cm_enable.attr,
NULL,
};
static struct attribute_group cssdev_cm_attr_group = {
.attrs = cssdev_cm_attrs,
.is_visible = cm_enable_mode,
};
static const struct attribute_group *cssdev_attr_groups[] = {
&cssdev_attr_group,
&cssdev_cm_attr_group,
NULL,
};
static int __init setup_css(int nr)
{
u32 tod_high;
int ret;
struct channel_subsystem *css;
int ret;
css = channel_subsystems[nr];
memset(css, 0, sizeof(struct channel_subsystem));
css->pseudo_subchannel =
kzalloc(sizeof(*css->pseudo_subchannel), GFP_KERNEL);
if (!css->pseudo_subchannel)
css = kzalloc(sizeof(*css), GFP_KERNEL);
if (!css)
return -ENOMEM;
channel_subsystems[nr] = css;
dev_set_name(&css->device, "css%x", nr);
css->device.groups = cssdev_attr_groups;
css->device.release = channel_subsystem_release;
mutex_init(&css->mutex);
css->cssid = chsc_get_cssid(nr);
css_generate_pgid(css, (u32) (get_tod_clock() >> 32));
ret = device_register(&css->device);
if (ret) {
put_device(&css->device);
goto out_err;
}
css->pseudo_subchannel = kzalloc(sizeof(*css->pseudo_subchannel),
GFP_KERNEL);
if (!css->pseudo_subchannel) {
device_unregister(&css->device);
ret = -ENOMEM;
goto out_err;
}
css->pseudo_subchannel->dev.parent = &css->device;
css->pseudo_subchannel->dev.release = css_subchannel_release;
dev_set_name(&css->pseudo_subchannel->dev, "defunct");
mutex_init(&css->pseudo_subchannel->reg_mutex);
ret = css_sch_create_locks(css->pseudo_subchannel);
if (ret) {
kfree(css->pseudo_subchannel);
return ret;
device_unregister(&css->device);
goto out_err;
}
mutex_init(&css->mutex);
css->valid = 1;
css->cssid = nr;
dev_set_name(&css->device, "css%x", nr);
css->device.release = channel_subsystem_release;
tod_high = (u32) (get_tod_clock() >> 32);
css_generate_pgid(css, tod_high);
return 0;
dev_set_name(&css->pseudo_subchannel->dev, "defunct");
ret = device_register(&css->pseudo_subchannel->dev);
if (ret) {
put_device(&css->pseudo_subchannel->dev);
device_unregister(&css->device);
goto out_err;
}
return ret;
out_err:
channel_subsystems[nr] = NULL;
return ret;
}
static int css_reboot_event(struct notifier_block *this,
unsigned long event,
void *ptr)
{
int ret, i;
struct channel_subsystem *css;
int ret;
ret = NOTIFY_DONE;
for (i = 0; i <= __MAX_CSSID; i++) {
struct channel_subsystem *css;
css = channel_subsystems[i];
for_each_css(css) {
mutex_lock(&css->mutex);
if (css->cm_enabled)
if (chsc_secm(css, 0))
@ -835,16 +891,14 @@ static struct notifier_block css_reboot_notifier = {
static int css_power_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
int ret, i;
struct channel_subsystem *css;
int ret;
switch (event) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
ret = NOTIFY_DONE;
for (i = 0; i <= __MAX_CSSID; i++) {
struct channel_subsystem *css;
css = channel_subsystems[i];
for_each_css(css) {
mutex_lock(&css->mutex);
if (!css->cm_enabled) {
mutex_unlock(&css->mutex);
@ -858,10 +912,7 @@ static int css_power_event(struct notifier_block *this, unsigned long event,
case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
ret = NOTIFY_DONE;
for (i = 0; i <= __MAX_CSSID; i++) {
struct channel_subsystem *css;
css = channel_subsystems[i];
for_each_css(css) {
mutex_lock(&css->mutex);
if (!css->cm_enabled) {
mutex_unlock(&css->mutex);
@ -916,36 +967,10 @@ static int __init css_bus_init(void)
goto out;
/* Setup css structure. */
for (i = 0; i <= __MAX_CSSID; i++) {
struct channel_subsystem *css;
css = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL);
if (!css) {
ret = -ENOMEM;
goto out_unregister;
}
channel_subsystems[i] = css;
for (i = 0; i <= MAX_CSS_IDX; i++) {
ret = setup_css(i);
if (ret) {
kfree(channel_subsystems[i]);
if (ret)
goto out_unregister;
}
ret = device_register(&css->device);
if (ret) {
put_device(&css->device);
goto out_unregister;
}
if (css_chsc_characteristics.secm) {
ret = device_create_file(&css->device,
&dev_attr_cm_enable);
if (ret)
goto out_device;
}
ret = device_register(&css->pseudo_subchannel->dev);
if (ret) {
put_device(&css->pseudo_subchannel->dev);
goto out_file;
}
}
ret = register_reboot_notifier(&css_reboot_notifier);
if (ret)
@ -961,23 +986,10 @@ static int __init css_bus_init(void)
isc_register(IO_SCH_ISC);
return 0;
out_file:
if (css_chsc_characteristics.secm)
device_remove_file(&channel_subsystems[i]->device,
&dev_attr_cm_enable);
out_device:
device_unregister(&channel_subsystems[i]->device);
out_unregister:
while (i > 0) {
struct channel_subsystem *css;
i--;
css = channel_subsystems[i];
while (i-- > 0) {
struct channel_subsystem *css = channel_subsystems[i];
device_unregister(&css->pseudo_subchannel->dev);
css->pseudo_subchannel = NULL;
if (css_chsc_characteristics.secm)
device_remove_file(&css->device,
&dev_attr_cm_enable);
device_unregister(&css->device);
}
bus_unregister(&css_bus_type);
@ -993,14 +1005,9 @@ out:
static void __init css_bus_cleanup(void)
{
struct channel_subsystem *css;
int i;
for (i = 0; i <= __MAX_CSSID; i++) {
css = channel_subsystems[i];
for_each_css(css) {
device_unregister(&css->pseudo_subchannel->dev);
css->pseudo_subchannel = NULL;
if (css_chsc_characteristics.secm)
device_remove_file(&css->device, &dev_attr_cm_enable);
device_unregister(&css->device);
}
bus_unregister(&css_bus_type);

View File

@ -113,8 +113,7 @@ extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *);
void css_update_ssd_info(struct subchannel *sch);
struct channel_subsystem {
u8 cssid;
int valid;
int cssid;
struct channel_path *chps[__MAX_CHPID + 1];
struct device device;
struct pgid global_pgid;
@ -130,6 +129,16 @@ struct channel_subsystem {
extern struct channel_subsystem *channel_subsystems[];
/* Dummy helper which needs to change once we support more than one css. */
static inline struct channel_subsystem *css_by_id(u8 cssid)
{
return channel_subsystems[0];
}
/* Dummy iterator which needs to change once we support more than one css. */
#define for_each_css(css) \
for ((css) = channel_subsystems[0]; (css); (css) = NULL)
/* Helper functions to build lists for the slow path. */
void css_schedule_eval(struct subchannel_id schid);
void css_schedule_eval_all(void);

View File

@ -457,7 +457,7 @@ static inline void inbound_primed(struct qdio_q *q, int count)
{
int new;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %02x", count);
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim:%1d %02x", q->nr, count);
/* for QEBSM the ACK was already set by EQBS */
if (is_qebsm(q)) {
@ -544,7 +544,8 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
case SLSB_P_INPUT_ACK:
if (q->irq_ptr->perf_stat_enabled)
q->q_stats.nr_sbal_nop++;
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop:%1d %#02x",
q->nr, q->first_to_check);
break;
default:
WARN_ON_ONCE(1);

View File

@ -147,11 +147,11 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
struct qdio_q *q;
int i;
for_each_input_queue(irq, q, i) {
if (!references_shared_dsci(irq) &&
has_multiple_inq_on_dsci(irq))
xchg(q->irq_ptr->dsci, 0);
if (!references_shared_dsci(irq) &&
has_multiple_inq_on_dsci(irq))
xchg(irq->dsci, 0);
for_each_input_queue(irq, q, i) {
if (q->u.in.queue_start_poll) {
/* skip if polling is enabled or already in work */
if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED,
@ -161,11 +161,11 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
}
/* avoid dsci clear here, done after processing */
q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr,
q->irq_ptr->int_parm);
q->u.in.queue_start_poll(irq->cdev, q->nr,
irq->int_parm);
} else {
if (!shared_ind(q->irq_ptr))
xchg(q->irq_ptr->dsci, 0);
if (!shared_ind(irq))
xchg(irq->dsci, 0);
/*
* Call inbound processing but not directly
@ -178,8 +178,7 @@ static inline void tiqdio_call_inq_handlers(struct qdio_irq *irq)
/**
* tiqdio_thinint_handler - thin interrupt handler for qdio
* @alsi: pointer to adapter local summary indicator
* @data: NULL
* @airq: pointer to adapter interrupt descriptor
*/
static void tiqdio_thinint_handler(struct airq_struct *airq)
{

View File

@ -129,7 +129,6 @@ static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
unsigned long long psmid,
void *msg, size_t length)
{
struct msgblock { char _[length]; };
register unsigned long reg0 asm ("0") = qid | 0x40000000UL;
register struct ap_queue_status reg1 asm ("1");
register unsigned long reg2 asm ("2") = (unsigned long) msg;
@ -141,8 +140,8 @@ static inline struct ap_queue_status ap_nqap(ap_qid_t qid,
"0: .long 0xb2ad0042\n" /* NQAP */
" brc 2,0b"
: "+d" (reg0), "=d" (reg1), "+d" (reg2), "+d" (reg3)
: "d" (reg4), "d" (reg5), "m" (*(struct msgblock *) msg)
: "cc");
: "d" (reg4), "d" (reg5)
: "cc", "memory");
return reg1;
}
@ -168,7 +167,6 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
unsigned long long *psmid,
void *msg, size_t length)
{
struct msgblock { char _[length]; };
register unsigned long reg0 asm("0") = qid | 0x80000000UL;
register struct ap_queue_status reg1 asm ("1");
register unsigned long reg2 asm("2") = 0UL;
@ -182,8 +180,8 @@ static inline struct ap_queue_status ap_dqap(ap_qid_t qid,
"0: .long 0xb2ae0064\n" /* DQAP */
" brc 6,0b\n"
: "+d" (reg0), "=d" (reg1), "+d" (reg2),
"+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7),
"=m" (*(struct msgblock *) msg) : : "cc");
"+d" (reg4), "+d" (reg5), "+d" (reg6), "+d" (reg7)
: : "cc", "memory");
*psmid = (((unsigned long long) reg6) << 32) + reg7;
return reg1;
}

View File

@ -27,7 +27,7 @@
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/err.h>
@ -54,16 +54,7 @@
#include "ap_debug.h"
/*
* Module description.
*/
MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("Adjunct Processor Bus driver, " \
"Copyright IBM Corp. 2006, 2012");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("z90crypt");
/*
* Module parameter
* Module parameters; note though this file itself isn't modular.
*/
int ap_domain_index = -1; /* Adjunct Processor Domain Index */
static DEFINE_SPINLOCK(ap_domain_lock);
@ -86,7 +77,6 @@ static bool initialised;
/*
* AP bus related debug feature things.
*/
static struct dentry *ap_dbf_root;
debug_info_t *ap_dbf_info;
/*
@ -1148,7 +1138,6 @@ static struct reset_call ap_reset_call = {
int __init ap_debug_init(void)
{
ap_dbf_root = debugfs_create_dir("ap", NULL);
ap_dbf_info = debug_register("ap", 1, 1,
DBF_MAX_SPRINTF_ARGS * sizeof(long));
debug_register_view(ap_dbf_info, &debug_sprintf_view);
@ -1159,7 +1148,6 @@ int __init ap_debug_init(void)
void ap_debug_exit(void)
{
debugfs_remove(ap_dbf_root);
debug_unregister(ap_dbf_info);
}
@ -1270,43 +1258,4 @@ out_free:
kfree(ap_configuration);
return rc;
}
/**
* ap_modules_exit(): The module termination code
*
* Terminates the module.
*/
void ap_module_exit(void)
{
int i;
initialised = false;
ap_reset_domain();
ap_poll_thread_stop();
del_timer_sync(&ap_config_timer);
hrtimer_cancel(&ap_poll_timer);
tasklet_kill(&ap_tasklet);
/* first remove queue devices */
bus_for_each_dev(&ap_bus_type, NULL, NULL,
__ap_queue_devices_unregister);
/* now remove the card devices */
bus_for_each_dev(&ap_bus_type, NULL, NULL,
__ap_card_devices_unregister);
/* remove bus attributes */
for (i = 0; ap_bus_attrs[i]; i++)
bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
unregister_pm_notifier(&ap_power_notifier);
root_device_unregister(ap_root_device);
bus_unregister(&ap_bus_type);
kfree(ap_configuration);
unregister_reset_call(&ap_reset_call);
if (ap_using_interrupts())
unregister_adapter_interrupt(&ap_airq);
ap_debug_exit();
}
module_init(ap_module_init);
module_exit(ap_module_exit);
device_initcall(ap_module_init);

View File

@ -137,7 +137,7 @@ static const struct attribute_group *ap_card_dev_attr_groups[] = {
NULL
};
struct device_type ap_card_type = {
static struct device_type ap_card_type = {
.name = "ap_card",
.groups = ap_card_dev_attr_groups,
};

Some files were not shown because too many files have changed in this diff Show More