From 4f5e838605c264fcf16c3ff9495bd83da99acc6a Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 2 Jan 2018 12:15:53 -0800 Subject: [PATCH 01/38] usercopy: Remove pointer from overflow report Using %p was already mostly useless in the usercopy overflow reports, so this removes it entirely to avoid confusion now that %p-hashing is enabled. Fixes: ad67b74d2469d9b8 ("printk: hash addresses printed with %p") Signed-off-by: Kees Cook --- mm/usercopy.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/mm/usercopy.c b/mm/usercopy.c index a9852b24715d..5df1e68d4585 100644 --- a/mm/usercopy.c +++ b/mm/usercopy.c @@ -58,12 +58,11 @@ static noinline int check_stack_object(const void *obj, unsigned long len) return GOOD_STACK; } -static void report_usercopy(const void *ptr, unsigned long len, - bool to_user, const char *type) +static void report_usercopy(unsigned long len, bool to_user, const char *type) { - pr_emerg("kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n", + pr_emerg("kernel memory %s attempt detected %s '%s' (%lu bytes)\n", to_user ? "exposure" : "overwrite", - to_user ? "from" : "to", ptr, type ? : "unknown", len); + to_user ? "from" : "to", type ? : "unknown", len); /* * For greater effect, it would be nice to do do_group_exit(), * but BUG() actually hooks all the lock-breaking and per-arch @@ -261,6 +260,6 @@ void __check_object_size(const void *ptr, unsigned long n, bool to_user) return; report: - report_usercopy(ptr, n, to_user, err); + report_usercopy(n, to_user, err); } EXPORT_SYMBOL(__check_object_size); From b394d468e7d75637e682a9be4a1181b27186c593 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 10 Jan 2018 14:22:38 -0800 Subject: [PATCH 02/38] usercopy: Enhance and rename report_usercopy() In preparation for refactoring the usercopy checks to pass offset to the hardened usercopy report, this renames report_usercopy() to the more accurate usercopy_abort(), marks it as noreturn because it is, adds a hopefully helpful comment for anyone investigating such reports, makes the function available to the slab allocators, and adds new "detail" and "offset" arguments. Signed-off-by: Kees Cook --- include/linux/uaccess.h | 6 ++++++ mm/usercopy.c | 24 +++++++++++++++++++----- tools/objtool/check.c | 1 + 3 files changed, 26 insertions(+), 5 deletions(-) diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 251e655d407f..38b6442dc569 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -273,4 +273,10 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); #define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0) #endif +#ifdef CONFIG_HARDENED_USERCOPY +void __noreturn usercopy_abort(const char *name, const char *detail, + bool to_user, unsigned long offset, + unsigned long len); +#endif + #endif /* __LINUX_UACCESS_H__ */ diff --git a/mm/usercopy.c b/mm/usercopy.c index 5df1e68d4585..8006baa4caac 100644 --- a/mm/usercopy.c +++ b/mm/usercopy.c @@ -58,11 +58,25 @@ static noinline int check_stack_object(const void *obj, unsigned long len) return GOOD_STACK; } -static void report_usercopy(unsigned long len, bool to_user, const char *type) +/* + * If this function is reached, then CONFIG_HARDENED_USERCOPY has found an + * unexpected state during a copy_from_user() or copy_to_user() call. + * There are several checks being performed on the buffer by the + * __check_object_size() function. Normal stack buffer usage should never + * trip the checks, and kernel text addressing will always trip the check. + * For cache objects, copies must be within the object size. + */ +void __noreturn usercopy_abort(const char *name, const char *detail, + bool to_user, unsigned long offset, + unsigned long len) { - pr_emerg("kernel memory %s attempt detected %s '%s' (%lu bytes)\n", - to_user ? "exposure" : "overwrite", - to_user ? "from" : "to", type ? : "unknown", len); + pr_emerg("Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n", + to_user ? "exposure" : "overwrite", + to_user ? "from" : "to", + name ? : "unknown?!", + detail ? " '" : "", detail ? : "", detail ? "'" : "", + offset, len); + /* * For greater effect, it would be nice to do do_group_exit(), * but BUG() actually hooks all the lock-breaking and per-arch @@ -260,6 +274,6 @@ void __check_object_size(const void *ptr, unsigned long n, bool to_user) return; report: - report_usercopy(n, to_user, err); + usercopy_abort(err, NULL, to_user, 0, n); } EXPORT_SYMBOL(__check_object_size); diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 9b341584eb1b..ae39444896d4 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -138,6 +138,7 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func, "__reiserfs_panic", "lbug_with_loc", "fortify_panic", + "usercopy_abort", }; if (func->bind == STB_WEAK) From f4e6e289cb9cf67885b6b18b9d56d2c3e1c714a1 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 10 Jan 2018 14:48:22 -0800 Subject: [PATCH 03/38] usercopy: Include offset in hardened usercopy report This refactors the hardened usercopy code so that failure reporting can happen within the checking functions instead of at the top level. This simplifies the return value handling and allows more details and offsets to be included in the report. Having the offset can be much more helpful in understanding hardened usercopy bugs. Signed-off-by: Kees Cook --- include/linux/slab.h | 12 ++---- mm/slab.c | 8 ++-- mm/slub.c | 14 ++++--- mm/usercopy.c | 95 +++++++++++++++++++------------------------- 4 files changed, 57 insertions(+), 72 deletions(-) diff --git a/include/linux/slab.h b/include/linux/slab.h index 50697a1d6621..2dbeccdcb76b 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -167,15 +167,11 @@ void kzfree(const void *); size_t ksize(const void *); #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR -const char *__check_heap_object(const void *ptr, unsigned long n, - struct page *page); +void __check_heap_object(const void *ptr, unsigned long n, struct page *page, + bool to_user); #else -static inline const char *__check_heap_object(const void *ptr, - unsigned long n, - struct page *page) -{ - return NULL; -} +static inline void __check_heap_object(const void *ptr, unsigned long n, + struct page *page, bool to_user) { } #endif /* diff --git a/mm/slab.c b/mm/slab.c index 183e996dde5f..b2beb2cc15e2 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -4397,8 +4397,8 @@ module_init(slab_proc_init); * Returns NULL if check passes, otherwise const char * to name of cache * to indicate an error. */ -const char *__check_heap_object(const void *ptr, unsigned long n, - struct page *page) +void __check_heap_object(const void *ptr, unsigned long n, struct page *page, + bool to_user) { struct kmem_cache *cachep; unsigned int objnr; @@ -4414,9 +4414,9 @@ const char *__check_heap_object(const void *ptr, unsigned long n, /* Allow address range falling entirely within object size. */ if (offset <= cachep->object_size && n <= cachep->object_size - offset) - return NULL; + return; - return cachep->name; + usercopy_abort("SLAB object", cachep->name, to_user, offset, n); } #endif /* CONFIG_HARDENED_USERCOPY */ diff --git a/mm/slub.c b/mm/slub.c index cfd56e5a35fb..bcd22332300a 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3818,8 +3818,8 @@ EXPORT_SYMBOL(__kmalloc_node); * Returns NULL if check passes, otherwise const char * to name of cache * to indicate an error. */ -const char *__check_heap_object(const void *ptr, unsigned long n, - struct page *page) +void __check_heap_object(const void *ptr, unsigned long n, struct page *page, + bool to_user) { struct kmem_cache *s; unsigned long offset; @@ -3831,7 +3831,8 @@ const char *__check_heap_object(const void *ptr, unsigned long n, /* Reject impossible pointers. */ if (ptr < page_address(page)) - return s->name; + usercopy_abort("SLUB object not in SLUB page?!", NULL, + to_user, 0, n); /* Find offset within object. */ offset = (ptr - page_address(page)) % s->size; @@ -3839,15 +3840,16 @@ const char *__check_heap_object(const void *ptr, unsigned long n, /* Adjust for redzone and reject if within the redzone. */ if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) { if (offset < s->red_left_pad) - return s->name; + usercopy_abort("SLUB object in left red zone", + s->name, to_user, offset, n); offset -= s->red_left_pad; } /* Allow address range falling entirely within object size. */ if (offset <= object_size && n <= object_size - offset) - return NULL; + return; - return s->name; + usercopy_abort("SLUB object", s->name, to_user, offset, n); } #endif /* CONFIG_HARDENED_USERCOPY */ diff --git a/mm/usercopy.c b/mm/usercopy.c index 8006baa4caac..a562dd094ace 100644 --- a/mm/usercopy.c +++ b/mm/usercopy.c @@ -86,10 +86,10 @@ void __noreturn usercopy_abort(const char *name, const char *detail, } /* Returns true if any portion of [ptr,ptr+n) over laps with [low,high). */ -static bool overlaps(const void *ptr, unsigned long n, unsigned long low, - unsigned long high) +static bool overlaps(const unsigned long ptr, unsigned long n, + unsigned long low, unsigned long high) { - unsigned long check_low = (uintptr_t)ptr; + const unsigned long check_low = ptr; unsigned long check_high = check_low + n; /* Does not overlap if entirely above or entirely below. */ @@ -100,15 +100,15 @@ static bool overlaps(const void *ptr, unsigned long n, unsigned long low, } /* Is this address range in the kernel text area? */ -static inline const char *check_kernel_text_object(const void *ptr, - unsigned long n) +static inline void check_kernel_text_object(const unsigned long ptr, + unsigned long n, bool to_user) { unsigned long textlow = (unsigned long)_stext; unsigned long texthigh = (unsigned long)_etext; unsigned long textlow_linear, texthigh_linear; if (overlaps(ptr, n, textlow, texthigh)) - return ""; + usercopy_abort("kernel text", NULL, to_user, ptr - textlow, n); /* * Some architectures have virtual memory mappings with a secondary @@ -121,32 +121,30 @@ static inline const char *check_kernel_text_object(const void *ptr, textlow_linear = (unsigned long)lm_alias(textlow); /* No different mapping: we're done. */ if (textlow_linear == textlow) - return NULL; + return; /* Check the secondary mapping... */ texthigh_linear = (unsigned long)lm_alias(texthigh); if (overlaps(ptr, n, textlow_linear, texthigh_linear)) - return ""; - - return NULL; + usercopy_abort("linear kernel text", NULL, to_user, + ptr - textlow_linear, n); } -static inline const char *check_bogus_address(const void *ptr, unsigned long n) +static inline void check_bogus_address(const unsigned long ptr, unsigned long n, + bool to_user) { /* Reject if object wraps past end of memory. */ - if ((unsigned long)ptr + n < (unsigned long)ptr) - return ""; + if (ptr + n < ptr) + usercopy_abort("wrapped address", NULL, to_user, 0, ptr + n); /* Reject if NULL or ZERO-allocation. */ if (ZERO_OR_NULL_PTR(ptr)) - return ""; - - return NULL; + usercopy_abort("null address", NULL, to_user, ptr, n); } /* Checks for allocs that are marked in some way as spanning multiple pages. */ -static inline const char *check_page_span(const void *ptr, unsigned long n, - struct page *page, bool to_user) +static inline void check_page_span(const void *ptr, unsigned long n, + struct page *page, bool to_user) { #ifdef CONFIG_HARDENED_USERCOPY_PAGESPAN const void *end = ptr + n - 1; @@ -163,28 +161,28 @@ static inline const char *check_page_span(const void *ptr, unsigned long n, if (ptr >= (const void *)__start_rodata && end <= (const void *)__end_rodata) { if (!to_user) - return ""; - return NULL; + usercopy_abort("rodata", NULL, to_user, 0, n); + return; } /* Allow kernel data region (if not marked as Reserved). */ if (ptr >= (const void *)_sdata && end <= (const void *)_edata) - return NULL; + return; /* Allow kernel bss region (if not marked as Reserved). */ if (ptr >= (const void *)__bss_start && end <= (const void *)__bss_stop) - return NULL; + return; /* Is the object wholly within one base page? */ if (likely(((unsigned long)ptr & (unsigned long)PAGE_MASK) == ((unsigned long)end & (unsigned long)PAGE_MASK))) - return NULL; + return; /* Allow if fully inside the same compound (__GFP_COMP) page. */ endpage = virt_to_head_page(end); if (likely(endpage == page)) - return NULL; + return; /* * Reject if range is entirely either Reserved (i.e. special or @@ -194,36 +192,37 @@ static inline const char *check_page_span(const void *ptr, unsigned long n, is_reserved = PageReserved(page); is_cma = is_migrate_cma_page(page); if (!is_reserved && !is_cma) - return ""; + usercopy_abort("spans multiple pages", NULL, to_user, 0, n); for (ptr += PAGE_SIZE; ptr <= end; ptr += PAGE_SIZE) { page = virt_to_head_page(ptr); if (is_reserved && !PageReserved(page)) - return ""; + usercopy_abort("spans Reserved and non-Reserved pages", + NULL, to_user, 0, n); if (is_cma && !is_migrate_cma_page(page)) - return ""; + usercopy_abort("spans CMA and non-CMA pages", NULL, + to_user, 0, n); } #endif - - return NULL; } -static inline const char *check_heap_object(const void *ptr, unsigned long n, - bool to_user) +static inline void check_heap_object(const void *ptr, unsigned long n, + bool to_user) { struct page *page; if (!virt_addr_valid(ptr)) - return NULL; + return; page = virt_to_head_page(ptr); - /* Check slab allocator for flags and size. */ - if (PageSlab(page)) - return __check_heap_object(ptr, n, page); - - /* Verify object does not incorrectly span multiple pages. */ - return check_page_span(ptr, n, page, to_user); + if (PageSlab(page)) { + /* Check slab allocator for flags and size. */ + __check_heap_object(ptr, n, page, to_user); + } else { + /* Verify object does not incorrectly span multiple pages. */ + check_page_span(ptr, n, page, to_user); + } } /* @@ -234,21 +233,15 @@ static inline const char *check_heap_object(const void *ptr, unsigned long n, */ void __check_object_size(const void *ptr, unsigned long n, bool to_user) { - const char *err; - /* Skip all tests if size is zero. */ if (!n) return; /* Check for invalid addresses. */ - err = check_bogus_address(ptr, n); - if (err) - goto report; + check_bogus_address((const unsigned long)ptr, n, to_user); /* Check for bad heap object. */ - err = check_heap_object(ptr, n, to_user); - if (err) - goto report; + check_heap_object(ptr, n, to_user); /* Check for bad stack object. */ switch (check_stack_object(ptr, n)) { @@ -264,16 +257,10 @@ void __check_object_size(const void *ptr, unsigned long n, bool to_user) */ return; default: - err = ""; - goto report; + usercopy_abort("process stack", NULL, to_user, 0, n); } /* Check for object in kernel to avoid text exposure. */ - err = check_kernel_text_object(ptr, n); - if (!err) - return; - -report: - usercopy_abort(err, NULL, to_user, 0, n); + check_kernel_text_object((const unsigned long)ptr, n, to_user); } EXPORT_SYMBOL(__check_object_size); From c75886862480bb243833e94292db9c44b8af9868 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 2 Jan 2018 12:06:27 -0800 Subject: [PATCH 04/38] lkdtm/usercopy: Adjust test to include an offset to check reporting Instead of doubling the size, push the start position up by 16 bytes to still trigger an overflow. This allows to verify that offset reporting is working correctly. Signed-off-by: Kees Cook --- drivers/misc/lkdtm_usercopy.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/drivers/misc/lkdtm_usercopy.c b/drivers/misc/lkdtm_usercopy.c index a64372cc148d..9ebbb031e5e3 100644 --- a/drivers/misc/lkdtm_usercopy.c +++ b/drivers/misc/lkdtm_usercopy.c @@ -119,6 +119,8 @@ static void do_usercopy_heap_size(bool to_user) { unsigned long user_addr; unsigned char *one, *two; + void __user *test_user_addr; + void *test_kern_addr; size_t size = unconst + 1024; one = kmalloc(size, GFP_KERNEL); @@ -139,27 +141,30 @@ static void do_usercopy_heap_size(bool to_user) memset(one, 'A', size); memset(two, 'B', size); + test_user_addr = (void __user *)(user_addr + 16); + test_kern_addr = one + 16; + if (to_user) { pr_info("attempting good copy_to_user of correct size\n"); - if (copy_to_user((void __user *)user_addr, one, size)) { + if (copy_to_user(test_user_addr, test_kern_addr, size / 2)) { pr_warn("copy_to_user failed unexpectedly?!\n"); goto free_user; } pr_info("attempting bad copy_to_user of too large size\n"); - if (copy_to_user((void __user *)user_addr, one, 2 * size)) { + if (copy_to_user(test_user_addr, test_kern_addr, size)) { pr_warn("copy_to_user failed, but lacked Oops\n"); goto free_user; } } else { pr_info("attempting good copy_from_user of correct size\n"); - if (copy_from_user(one, (void __user *)user_addr, size)) { + if (copy_from_user(test_kern_addr, test_user_addr, size / 2)) { pr_warn("copy_from_user failed unexpectedly?!\n"); goto free_user; } pr_info("attempting bad copy_from_user of too large size\n"); - if (copy_from_user(one, (void __user *)user_addr, 2 * size)) { + if (copy_from_user(test_kern_addr, test_user_addr, size)) { pr_warn("copy_from_user failed, but lacked Oops\n"); goto free_user; } From 4229a470175be14e1d2648713be8a5e8e8fbea02 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 10 Jan 2018 12:53:20 -0800 Subject: [PATCH 05/38] stddef.h: Introduce sizeof_field() The size of fields within a structure is needed in a few places in the kernel already, and will be needed for the usercopy whitelisting when declaring whitelist regions within structures. This creates a dedicated macro and redefines offsetofend() to use it. Existing usage, ignoring the 1200+ lustre assert uses: $ git grep -E 'sizeof\(\(\((struct )?[a-zA-Z_]+ \*\)0\)->' | \ grep -v staging/lustre | wc -l 65 Signed-off-by: Kees Cook --- include/linux/stddef.h | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/include/linux/stddef.h b/include/linux/stddef.h index 2181719fd907..998a4ba28eba 100644 --- a/include/linux/stddef.h +++ b/include/linux/stddef.h @@ -19,6 +19,14 @@ enum { #define offsetof(TYPE, MEMBER) ((size_t)&((TYPE *)0)->MEMBER) #endif +/** + * sizeof_field(TYPE, MEMBER) + * + * @TYPE: The structure containing the field of interest + * @MEMBER: The field to return the size of + */ +#define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER)) + /** * offsetofend(TYPE, MEMBER) * @@ -26,6 +34,6 @@ enum { * @MEMBER: The member within the structure to get the end offset of */ #define offsetofend(TYPE, MEMBER) \ - (offsetof(TYPE, MEMBER) + sizeof(((TYPE *)0)->MEMBER)) + (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER)) #endif From 8eb8284b412906181357c2b0110d879d5af95e52 Mon Sep 17 00:00:00 2001 From: David Windsor Date: Sat, 10 Jun 2017 22:50:28 -0400 Subject: [PATCH 06/38] usercopy: Prepare for usercopy whitelisting This patch prepares the slab allocator to handle caches having annotations (useroffset and usersize) defining usercopy regions. This patch is modified from Brad Spengler/PaX Team's PAX_USERCOPY whitelisting code in the last public patch of grsecurity/PaX based on my understanding of the code. Changes or omissions from the original code are mine and don't reflect the original grsecurity/PaX code. Currently, hardened usercopy performs dynamic bounds checking on slab cache objects. This is good, but still leaves a lot of kernel memory available to be copied to/from userspace in the face of bugs. To further restrict what memory is available for copying, this creates a way to whitelist specific areas of a given slab cache object for copying to/from userspace, allowing much finer granularity of access control. Slab caches that are never exposed to userspace can declare no whitelist for their objects, thereby keeping them unavailable to userspace via dynamic copy operations. (Note, an implicit form of whitelisting is the use of constant sizes in usercopy operations and get_user()/put_user(); these bypass hardened usercopy checks since these sizes cannot change at runtime.) To support this whitelist annotation, usercopy region offset and size members are added to struct kmem_cache. The slab allocator receives a new function, kmem_cache_create_usercopy(), that creates a new cache with a usercopy region defined, suitable for declaring spans of fields within the objects that get copied to/from userspace. In this patch, the default kmem_cache_create() marks the entire allocation as whitelisted, leaving it semantically unchanged. Once all fine-grained whitelists have been added (in subsequent patches), this will be changed to a usersize of 0, making caches created with kmem_cache_create() not copyable to/from userspace. After the entire usercopy whitelist series is applied, less than 15% of the slab cache memory remains exposed to potential usercopy bugs after a fresh boot: Total Slab Memory: 48074720 Usercopyable Memory: 6367532 13.2% task_struct 0.2% 4480/1630720 RAW 0.3% 300/96000 RAWv6 2.1% 1408/64768 ext4_inode_cache 3.0% 269760/8740224 dentry 11.1% 585984/5273856 mm_struct 29.1% 54912/188448 kmalloc-8 100.0% 24576/24576 kmalloc-16 100.0% 28672/28672 kmalloc-32 100.0% 81920/81920 kmalloc-192 100.0% 96768/96768 kmalloc-128 100.0% 143360/143360 names_cache 100.0% 163840/163840 kmalloc-64 100.0% 167936/167936 kmalloc-256 100.0% 339968/339968 kmalloc-512 100.0% 350720/350720 kmalloc-96 100.0% 455616/455616 kmalloc-8192 100.0% 655360/655360 kmalloc-1024 100.0% 812032/812032 kmalloc-4096 100.0% 819200/819200 kmalloc-2048 100.0% 1310720/1310720 After some kernel build workloads, the percentage (mainly driven by dentry and inode caches expanding) drops under 10%: Total Slab Memory: 95516184 Usercopyable Memory: 8497452 8.8% task_struct 0.2% 4000/1456000 RAW 0.3% 300/96000 RAWv6 2.1% 1408/64768 ext4_inode_cache 3.0% 1217280/39439872 dentry 11.1% 1623200/14608800 mm_struct 29.1% 73216/251264 kmalloc-8 100.0% 24576/24576 kmalloc-16 100.0% 28672/28672 kmalloc-32 100.0% 94208/94208 kmalloc-192 100.0% 96768/96768 kmalloc-128 100.0% 143360/143360 names_cache 100.0% 163840/163840 kmalloc-64 100.0% 245760/245760 kmalloc-256 100.0% 339968/339968 kmalloc-512 100.0% 350720/350720 kmalloc-96 100.0% 563520/563520 kmalloc-8192 100.0% 655360/655360 kmalloc-1024 100.0% 794624/794624 kmalloc-4096 100.0% 819200/819200 kmalloc-2048 100.0% 1257472/1257472 Signed-off-by: David Windsor [kees: adjust commit log, split out a few extra kmalloc hunks] [kees: add field names to function declarations] [kees: convert BUGs to WARNs and fail closed] [kees: add attack surface reduction analysis to commit log] Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Cc: Andrew Morton Cc: linux-mm@kvack.org Cc: linux-xfs@vger.kernel.org Signed-off-by: Kees Cook Acked-by: Christoph Lameter --- include/linux/slab.h | 27 +++++++++++++++++------ include/linux/slab_def.h | 3 +++ include/linux/slub_def.h | 3 +++ mm/slab.c | 2 +- mm/slab.h | 5 ++++- mm/slab_common.c | 46 +++++++++++++++++++++++++++++++++------- mm/slub.c | 11 ++++++++-- 7 files changed, 79 insertions(+), 18 deletions(-) diff --git a/include/linux/slab.h b/include/linux/slab.h index 2dbeccdcb76b..8bf14d9762ec 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -135,9 +135,13 @@ struct mem_cgroup; void __init kmem_cache_init(void); bool slab_is_available(void); -struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, - slab_flags_t, - void (*)(void *)); +struct kmem_cache *kmem_cache_create(const char *name, size_t size, + size_t align, slab_flags_t flags, + void (*ctor)(void *)); +struct kmem_cache *kmem_cache_create_usercopy(const char *name, + size_t size, size_t align, slab_flags_t flags, + size_t useroffset, size_t usersize, + void (*ctor)(void *)); void kmem_cache_destroy(struct kmem_cache *); int kmem_cache_shrink(struct kmem_cache *); @@ -153,9 +157,20 @@ void memcg_destroy_kmem_caches(struct mem_cgroup *); * f.e. add ____cacheline_aligned_in_smp to the struct declaration * then the objects will be properly aligned in SMP configurations. */ -#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\ - sizeof(struct __struct), __alignof__(struct __struct),\ - (__flags), NULL) +#define KMEM_CACHE(__struct, __flags) \ + kmem_cache_create(#__struct, sizeof(struct __struct), \ + __alignof__(struct __struct), (__flags), NULL) + +/* + * To whitelist a single field for copying to/from usercopy, use this + * macro instead for KMEM_CACHE() above. + */ +#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \ + kmem_cache_create_usercopy(#__struct, \ + sizeof(struct __struct), \ + __alignof__(struct __struct), (__flags), \ + offsetof(struct __struct, __field), \ + sizeof_field(struct __struct, __field), NULL) /* * Common kmalloc functions provided by all allocators diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 072e46e9e1d5..7385547c04b1 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h @@ -85,6 +85,9 @@ struct kmem_cache { unsigned int *random_seq; #endif + size_t useroffset; /* Usercopy region offset */ + size_t usersize; /* Usercopy region size */ + struct kmem_cache_node *node[MAX_NUMNODES]; }; diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 0adae162dc8f..8ad99c47b19c 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h @@ -135,6 +135,9 @@ struct kmem_cache { struct kasan_cache kasan_info; #endif + size_t useroffset; /* Usercopy region offset */ + size_t usersize; /* Usercopy region size */ + struct kmem_cache_node *node[MAX_NUMNODES]; }; diff --git a/mm/slab.c b/mm/slab.c index b2beb2cc15e2..47acfe54e1ae 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1281,7 +1281,7 @@ void __init kmem_cache_init(void) create_boot_cache(kmem_cache, "kmem_cache", offsetof(struct kmem_cache, node) + nr_node_ids * sizeof(struct kmem_cache_node *), - SLAB_HWCACHE_ALIGN); + SLAB_HWCACHE_ALIGN, 0, 0); list_add(&kmem_cache->list, &slab_caches); slab_state = PARTIAL; diff --git a/mm/slab.h b/mm/slab.h index ad657ffa44e5..8f3030788e01 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -22,6 +22,8 @@ struct kmem_cache { unsigned int size; /* The aligned/padded/added on size */ unsigned int align; /* Alignment as calculated */ slab_flags_t flags; /* Active flags on the slab */ + size_t useroffset; /* Usercopy region offset */ + size_t usersize; /* Usercopy region size */ const char *name; /* Slab name for sysfs */ int refcount; /* Use counter */ void (*ctor)(void *); /* Called on object slot creation */ @@ -97,7 +99,8 @@ int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags); extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size, slab_flags_t flags); extern void create_boot_cache(struct kmem_cache *, const char *name, - size_t size, slab_flags_t flags); + size_t size, slab_flags_t flags, size_t useroffset, + size_t usersize); int slab_unmergeable(struct kmem_cache *s); struct kmem_cache *find_mergeable(size_t size, size_t align, diff --git a/mm/slab_common.c b/mm/slab_common.c index c8cb36774ba1..fc3e66bdce75 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -281,6 +281,9 @@ int slab_unmergeable(struct kmem_cache *s) if (s->ctor) return 1; + if (s->usersize) + return 1; + /* * We may have set a slab to be unmergeable during bootstrap. */ @@ -366,12 +369,16 @@ unsigned long calculate_alignment(slab_flags_t flags, static struct kmem_cache *create_cache(const char *name, size_t object_size, size_t size, size_t align, - slab_flags_t flags, void (*ctor)(void *), + slab_flags_t flags, size_t useroffset, + size_t usersize, void (*ctor)(void *), struct mem_cgroup *memcg, struct kmem_cache *root_cache) { struct kmem_cache *s; int err; + if (WARN_ON(useroffset + usersize > object_size)) + useroffset = usersize = 0; + err = -ENOMEM; s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL); if (!s) @@ -382,6 +389,8 @@ static struct kmem_cache *create_cache(const char *name, s->size = size; s->align = align; s->ctor = ctor; + s->useroffset = useroffset; + s->usersize = usersize; err = init_memcg_params(s, memcg, root_cache); if (err) @@ -406,11 +415,13 @@ out_free_cache: } /* - * kmem_cache_create - Create a cache. + * kmem_cache_create_usercopy - Create a cache. * @name: A string which is used in /proc/slabinfo to identify this cache. * @size: The size of objects to be created in this cache. * @align: The required alignment for the objects. * @flags: SLAB flags + * @useroffset: Usercopy region offset + * @usersize: Usercopy region size * @ctor: A constructor for the objects. * * Returns a ptr to the cache on success, NULL on failure. @@ -430,8 +441,9 @@ out_free_cache: * as davem. */ struct kmem_cache * -kmem_cache_create(const char *name, size_t size, size_t align, - slab_flags_t flags, void (*ctor)(void *)) +kmem_cache_create_usercopy(const char *name, size_t size, size_t align, + slab_flags_t flags, size_t useroffset, size_t usersize, + void (*ctor)(void *)) { struct kmem_cache *s = NULL; const char *cache_name; @@ -462,7 +474,13 @@ kmem_cache_create(const char *name, size_t size, size_t align, */ flags &= CACHE_CREATE_MASK; - s = __kmem_cache_alias(name, size, align, flags, ctor); + /* Fail closed on bad usersize of useroffset values. */ + if (WARN_ON(!usersize && useroffset) || + WARN_ON(size < usersize || size - usersize < useroffset)) + usersize = useroffset = 0; + + if (!usersize) + s = __kmem_cache_alias(name, size, align, flags, ctor); if (s) goto out_unlock; @@ -474,7 +492,7 @@ kmem_cache_create(const char *name, size_t size, size_t align, s = create_cache(cache_name, size, size, calculate_alignment(flags, align, size), - flags, ctor, NULL, NULL); + flags, useroffset, usersize, ctor, NULL, NULL); if (IS_ERR(s)) { err = PTR_ERR(s); kfree_const(cache_name); @@ -500,6 +518,15 @@ out_unlock: } return s; } +EXPORT_SYMBOL(kmem_cache_create_usercopy); + +struct kmem_cache * +kmem_cache_create(const char *name, size_t size, size_t align, + slab_flags_t flags, void (*ctor)(void *)) +{ + return kmem_cache_create_usercopy(name, size, align, flags, 0, size, + ctor); +} EXPORT_SYMBOL(kmem_cache_create); static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work) @@ -612,6 +639,7 @@ void memcg_create_kmem_cache(struct mem_cgroup *memcg, s = create_cache(cache_name, root_cache->object_size, root_cache->size, root_cache->align, root_cache->flags & CACHE_CREATE_MASK, + root_cache->useroffset, root_cache->usersize, root_cache->ctor, memcg, root_cache); /* * If we could not create a memcg cache, do not complain, because @@ -879,13 +907,15 @@ bool slab_is_available(void) #ifndef CONFIG_SLOB /* Create a cache during boot when no slab services are available yet */ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size, - slab_flags_t flags) + slab_flags_t flags, size_t useroffset, size_t usersize) { int err; s->name = name; s->size = s->object_size = size; s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size); + s->useroffset = useroffset; + s->usersize = usersize; slab_init_memcg_params(s); @@ -906,7 +936,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size, if (!s) panic("Out of memory when creating slab %s\n", name); - create_boot_cache(s, name, size, flags); + create_boot_cache(s, name, size, flags, 0, size); list_add(&s->list, &slab_caches); memcg_link_cache(s); s->refcount = 1; diff --git a/mm/slub.c b/mm/slub.c index bcd22332300a..f40a57164dd6 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -4183,7 +4183,7 @@ void __init kmem_cache_init(void) kmem_cache = &boot_kmem_cache; create_boot_cache(kmem_cache_node, "kmem_cache_node", - sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN); + sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN, 0, 0); register_hotmemory_notifier(&slab_memory_callback_nb); @@ -4193,7 +4193,7 @@ void __init kmem_cache_init(void) create_boot_cache(kmem_cache, "kmem_cache", offsetof(struct kmem_cache, node) + nr_node_ids * sizeof(struct kmem_cache_node *), - SLAB_HWCACHE_ALIGN); + SLAB_HWCACHE_ALIGN, 0, 0); kmem_cache = bootstrap(&boot_kmem_cache); @@ -5063,6 +5063,12 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf) SLAB_ATTR_RO(cache_dma); #endif +static ssize_t usersize_show(struct kmem_cache *s, char *buf) +{ + return sprintf(buf, "%zu\n", s->usersize); +} +SLAB_ATTR_RO(usersize); + static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf) { return sprintf(buf, "%d\n", !!(s->flags & SLAB_TYPESAFE_BY_RCU)); @@ -5437,6 +5443,7 @@ static struct attribute *slab_attrs[] = { #ifdef CONFIG_FAILSLAB &failslab_attr.attr, #endif + &usersize_attr.attr, NULL }; From afcc90f8621e289cd082ba97900e76f01afe778c Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 10 Jan 2018 15:17:01 -0800 Subject: [PATCH 07/38] usercopy: WARN() on slab cache usercopy region violations This patch adds checking of usercopy cache whitelisting, and is modified from Brad Spengler/PaX Team's PAX_USERCOPY whitelisting code in the last public patch of grsecurity/PaX based on my understanding of the code. Changes or omissions from the original code are mine and don't reflect the original grsecurity/PaX code. The SLAB and SLUB allocators are modified to WARN() on all copy operations in which the kernel heap memory being modified falls outside of the cache's defined usercopy region. Based on an earlier patch from David Windsor. Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Cc: Andrew Morton Cc: Laura Abbott Cc: Ingo Molnar Cc: Mark Rutland Cc: linux-mm@kvack.org Cc: linux-xfs@vger.kernel.org Signed-off-by: Kees Cook --- include/linux/uaccess.h | 2 ++ mm/slab.c | 22 +++++++++++++++++++--- mm/slub.c | 23 +++++++++++++++++++---- mm/usercopy.c | 21 ++++++++++++++++++--- 4 files changed, 58 insertions(+), 10 deletions(-) diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 38b6442dc569..efe79c1cdd47 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -274,6 +274,8 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); #endif #ifdef CONFIG_HARDENED_USERCOPY +void usercopy_warn(const char *name, const char *detail, bool to_user, + unsigned long offset, unsigned long len); void __noreturn usercopy_abort(const char *name, const char *detail, bool to_user, unsigned long offset, unsigned long len); diff --git a/mm/slab.c b/mm/slab.c index 47acfe54e1ae..1c02f6e94235 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -4392,7 +4392,9 @@ module_init(slab_proc_init); #ifdef CONFIG_HARDENED_USERCOPY /* - * Rejects objects that are incorrectly sized. + * Rejects incorrectly sized objects and objects that are to be copied + * to/from userspace but do not fall entirely within the containing slab + * cache's usercopy region. * * Returns NULL if check passes, otherwise const char * to name of cache * to indicate an error. @@ -4412,10 +4414,24 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page, /* Find offset within object. */ offset = ptr - index_to_obj(cachep, page, objnr) - obj_offset(cachep); - /* Allow address range falling entirely within object size. */ - if (offset <= cachep->object_size && n <= cachep->object_size - offset) + /* Allow address range falling entirely within usercopy region. */ + if (offset >= cachep->useroffset && + offset - cachep->useroffset <= cachep->usersize && + n <= cachep->useroffset - offset + cachep->usersize) return; + /* + * If the copy is still within the allocated object, produce + * a warning instead of rejecting the copy. This is intended + * to be a temporary method to find any missing usercopy + * whitelists. + */ + if (offset <= cachep->object_size && + n <= cachep->object_size - offset) { + usercopy_warn("SLAB object", cachep->name, to_user, offset, n); + return; + } + usercopy_abort("SLAB object", cachep->name, to_user, offset, n); } #endif /* CONFIG_HARDENED_USERCOPY */ diff --git a/mm/slub.c b/mm/slub.c index f40a57164dd6..6d9b1e7d3226 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3813,7 +3813,9 @@ EXPORT_SYMBOL(__kmalloc_node); #ifdef CONFIG_HARDENED_USERCOPY /* - * Rejects objects that are incorrectly sized. + * Rejects incorrectly sized objects and objects that are to be copied + * to/from userspace but do not fall entirely within the containing slab + * cache's usercopy region. * * Returns NULL if check passes, otherwise const char * to name of cache * to indicate an error. @@ -3827,7 +3829,6 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page, /* Find object and usable object size. */ s = page->slab_cache; - object_size = slab_ksize(s); /* Reject impossible pointers. */ if (ptr < page_address(page)) @@ -3845,10 +3846,24 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page, offset -= s->red_left_pad; } - /* Allow address range falling entirely within object size. */ - if (offset <= object_size && n <= object_size - offset) + /* Allow address range falling entirely within usercopy region. */ + if (offset >= s->useroffset && + offset - s->useroffset <= s->usersize && + n <= s->useroffset - offset + s->usersize) return; + /* + * If the copy is still within the allocated object, produce + * a warning instead of rejecting the copy. This is intended + * to be a temporary method to find any missing usercopy + * whitelists. + */ + object_size = slab_ksize(s); + if (offset <= object_size && n <= object_size - offset) { + usercopy_warn("SLUB object", s->name, to_user, offset, n); + return; + } + usercopy_abort("SLUB object", s->name, to_user, offset, n); } #endif /* CONFIG_HARDENED_USERCOPY */ diff --git a/mm/usercopy.c b/mm/usercopy.c index a562dd094ace..e9e9325f7638 100644 --- a/mm/usercopy.c +++ b/mm/usercopy.c @@ -59,13 +59,28 @@ static noinline int check_stack_object(const void *obj, unsigned long len) } /* - * If this function is reached, then CONFIG_HARDENED_USERCOPY has found an - * unexpected state during a copy_from_user() or copy_to_user() call. + * If these functions are reached, then CONFIG_HARDENED_USERCOPY has found + * an unexpected state during a copy_from_user() or copy_to_user() call. * There are several checks being performed on the buffer by the * __check_object_size() function. Normal stack buffer usage should never * trip the checks, and kernel text addressing will always trip the check. - * For cache objects, copies must be within the object size. + * For cache objects, it is checking that only the whitelisted range of + * bytes for a given cache is being accessed (via the cache's usersize and + * useroffset fields). To adjust a cache whitelist, use the usercopy-aware + * kmem_cache_create_usercopy() function to create the cache (and + * carefully audit the whitelist range). */ +void usercopy_warn(const char *name, const char *detail, bool to_user, + unsigned long offset, unsigned long len) +{ + WARN_ONCE(1, "Bad or missing usercopy whitelist? Kernel memory %s attempt detected %s %s%s%s%s (offset %lu, size %lu)!\n", + to_user ? "exposure" : "overwrite", + to_user ? "from" : "to", + name ? : "unknown?!", + detail ? " '" : "", detail ? : "", detail ? "'" : "", + offset, len); +} + void __noreturn usercopy_abort(const char *name, const char *detail, bool to_user, unsigned long offset, unsigned long len) From 2d891fbc3bb681ba1f826e7ee70dbe38ca7465fe Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 30 Nov 2017 13:04:32 -0800 Subject: [PATCH 08/38] usercopy: Allow strict enforcement of whitelists This introduces CONFIG_HARDENED_USERCOPY_FALLBACK to control the behavior of hardened usercopy whitelist violations. By default, whitelist violations will continue to WARN() so that any bad or missing usercopy whitelists can be discovered without being too disruptive. If this config is disabled at build time or a system is booted with "slab_common.usercopy_fallback=0", usercopy whitelists will BUG() instead of WARN(). This is useful for admins that want to use usercopy whitelists immediately. Suggested-by: Matthew Garrett Signed-off-by: Kees Cook --- include/linux/slab.h | 2 ++ mm/slab.c | 3 ++- mm/slab_common.c | 8 ++++++++ mm/slub.c | 3 ++- security/Kconfig | 14 ++++++++++++++ 5 files changed, 28 insertions(+), 2 deletions(-) diff --git a/include/linux/slab.h b/include/linux/slab.h index 8bf14d9762ec..231abc8976c5 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -135,6 +135,8 @@ struct mem_cgroup; void __init kmem_cache_init(void); bool slab_is_available(void); +extern bool usercopy_fallback; + struct kmem_cache *kmem_cache_create(const char *name, size_t size, size_t align, slab_flags_t flags, void (*ctor)(void *)); diff --git a/mm/slab.c b/mm/slab.c index 1c02f6e94235..b9b0df620bb9 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -4426,7 +4426,8 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page, * to be a temporary method to find any missing usercopy * whitelists. */ - if (offset <= cachep->object_size && + if (usercopy_fallback && + offset <= cachep->object_size && n <= cachep->object_size - offset) { usercopy_warn("SLAB object", cachep->name, to_user, offset, n); return; diff --git a/mm/slab_common.c b/mm/slab_common.c index fc3e66bdce75..a51f65408637 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -31,6 +31,14 @@ LIST_HEAD(slab_caches); DEFINE_MUTEX(slab_mutex); struct kmem_cache *kmem_cache; +#ifdef CONFIG_HARDENED_USERCOPY +bool usercopy_fallback __ro_after_init = + IS_ENABLED(CONFIG_HARDENED_USERCOPY_FALLBACK); +module_param(usercopy_fallback, bool, 0400); +MODULE_PARM_DESC(usercopy_fallback, + "WARN instead of reject usercopy whitelist violations"); +#endif + static LIST_HEAD(slab_caches_to_rcu_destroy); static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work); static DECLARE_WORK(slab_caches_to_rcu_destroy_work, diff --git a/mm/slub.c b/mm/slub.c index 6d9b1e7d3226..862d835b3042 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -3859,7 +3859,8 @@ void __check_heap_object(const void *ptr, unsigned long n, struct page *page, * whitelists. */ object_size = slab_ksize(s); - if (offset <= object_size && n <= object_size - offset) { + if (usercopy_fallback && + offset <= object_size && n <= object_size - offset) { usercopy_warn("SLUB object", s->name, to_user, offset, n); return; } diff --git a/security/Kconfig b/security/Kconfig index e8e449444e65..ae457b018da5 100644 --- a/security/Kconfig +++ b/security/Kconfig @@ -152,6 +152,20 @@ config HARDENED_USERCOPY or are part of the kernel text. This kills entire classes of heap overflow exploits and similar kernel memory exposures. +config HARDENED_USERCOPY_FALLBACK + bool "Allow usercopy whitelist violations to fallback to object size" + depends on HARDENED_USERCOPY + default y + help + This is a temporary option that allows missing usercopy whitelists + to be discovered via a WARN() to the kernel log, instead of + rejecting the copy, falling back to non-whitelisted hardened + usercopy that checks the slab allocation size instead of the + whitelist size. This option will be removed once it seems like + all missing usercopy whitelists have been identified and fixed. + Booting with "slab_common.usercopy_fallback=Y/N" can change + this setting. + config HARDENED_USERCOPY_PAGESPAN bool "Refuse to copy allocations that span multiple pages" depends on HARDENED_USERCOPY From 6c0c21adc742a255f0203ef76ef0ed6c8c3b0630 Mon Sep 17 00:00:00 2001 From: David Windsor Date: Sat, 10 Jun 2017 22:50:47 -0400 Subject: [PATCH 09/38] usercopy: Mark kmalloc caches as usercopy caches Mark the kmalloc slab caches as entirely whitelisted. These caches are frequently used to fulfill kernel allocations that contain data to be copied to/from userspace. Internal-only uses are also common, but are scattered in the kernel. For now, mark all the kmalloc caches as whitelisted. This patch is modified from Brad Spengler/PaX Team's PAX_USERCOPY whitelisting code in the last public patch of grsecurity/PaX based on my understanding of the code. Changes or omissions from the original code are mine and don't reflect the original grsecurity/PaX code. Signed-off-by: David Windsor [kees: merged in moved kmalloc hunks, adjust commit log] Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Cc: Andrew Morton Cc: linux-mm@kvack.org Cc: linux-xfs@vger.kernel.org Signed-off-by: Kees Cook Acked-by: Christoph Lameter --- mm/slab.c | 3 ++- mm/slab.h | 3 ++- mm/slab_common.c | 10 ++++++---- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/mm/slab.c b/mm/slab.c index b9b0df620bb9..dd367fe17a4e 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1291,7 +1291,8 @@ void __init kmem_cache_init(void) */ kmalloc_caches[INDEX_NODE] = create_kmalloc_cache( kmalloc_info[INDEX_NODE].name, - kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS); + kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS, + 0, kmalloc_size(INDEX_NODE)); slab_state = PARTIAL_NODE; setup_kmalloc_cache_index_table(); diff --git a/mm/slab.h b/mm/slab.h index 8f3030788e01..1f013f7795c6 100644 --- a/mm/slab.h +++ b/mm/slab.h @@ -97,7 +97,8 @@ struct kmem_cache *kmalloc_slab(size_t, gfp_t); int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags); extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size, - slab_flags_t flags); + slab_flags_t flags, size_t useroffset, + size_t usersize); extern void create_boot_cache(struct kmem_cache *, const char *name, size_t size, slab_flags_t flags, size_t useroffset, size_t usersize); diff --git a/mm/slab_common.c b/mm/slab_common.c index a51f65408637..8ac2a6320a6c 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -937,14 +937,15 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz } struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size, - slab_flags_t flags) + slab_flags_t flags, size_t useroffset, + size_t usersize) { struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); if (!s) panic("Out of memory when creating slab %s\n", name); - create_boot_cache(s, name, size, flags, 0, size); + create_boot_cache(s, name, size, flags, useroffset, usersize); list_add(&s->list, &slab_caches); memcg_link_cache(s); s->refcount = 1; @@ -1098,7 +1099,8 @@ void __init setup_kmalloc_cache_index_table(void) static void __init new_kmalloc_cache(int idx, slab_flags_t flags) { kmalloc_caches[idx] = create_kmalloc_cache(kmalloc_info[idx].name, - kmalloc_info[idx].size, flags); + kmalloc_info[idx].size, flags, 0, + kmalloc_info[idx].size); } /* @@ -1139,7 +1141,7 @@ void __init create_kmalloc_caches(slab_flags_t flags) BUG_ON(!n); kmalloc_dma_caches[i] = create_kmalloc_cache(n, - size, SLAB_CACHE_DMA | flags); + size, SLAB_CACHE_DMA | flags, 0, 0); } } #endif From 80344266c19b82a27e018083e9dd501048a6a06a Mon Sep 17 00:00:00 2001 From: David Windsor Date: Sat, 10 Jun 2017 22:50:44 -0400 Subject: [PATCH 10/38] dcache: Define usercopy region in dentry_cache slab cache When a dentry name is short enough, it can be stored directly in the dentry itself (instead in a separate kmalloc allocation). These dentry short names, stored in struct dentry.d_iname and therefore contained in the dentry_cache slab cache, need to be coped to userspace. cache object allocation: fs/dcache.c: __d_alloc(...): ... dentry = kmem_cache_alloc(dentry_cache, ...); ... dentry->d_name.name = dentry->d_iname; example usage trace: filldir+0xb0/0x140 dcache_readdir+0x82/0x170 iterate_dir+0x142/0x1b0 SyS_getdents+0xb5/0x160 fs/readdir.c: (called via ctx.actor by dir_emit) filldir(..., const char *name, ...): ... copy_to_user(..., name, namlen) fs/libfs.c: dcache_readdir(...): ... next = next_positive(dentry, p, 1) ... dir_emit(..., next->d_name.name, ...) In support of usercopy hardening, this patch defines a region in the dentry_cache slab cache in which userspace copy operations are allowed. This region is known as the slab cache's usercopy region. Slab caches can now check that each dynamic copy operation involving cache-managed memory falls entirely within the slab's usercopy region. This patch is modified from Brad Spengler/PaX Team's PAX_USERCOPY whitelisting code in the last public patch of grsecurity/PaX based on my understanding of the code. Changes or omissions from the original code are mine and don't reflect the original grsecurity/PaX code. Signed-off-by: David Windsor [kees: adjust hunks for kmalloc-specific things moved later] [kees: adjust commit log, provide usage trace] Cc: Alexander Viro Cc: linux-fsdevel@vger.kernel.org Signed-off-by: Kees Cook --- fs/dcache.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/fs/dcache.c b/fs/dcache.c index 5c7df1df81ff..92ad7a2168e1 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -3601,8 +3601,9 @@ static void __init dcache_init(void) * but it is probably not worth it because of the cache nature * of the dcache. */ - dentry_cache = KMEM_CACHE(dentry, - SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD|SLAB_ACCOUNT); + dentry_cache = KMEM_CACHE_USERCOPY(dentry, + SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|SLAB_MEM_SPREAD|SLAB_ACCOUNT, + d_iname); /* Hash may have been set up in dcache_init_early */ if (!hashdist) From 6a9b88204cb00da2140afda27848b5d17a7402a3 Mon Sep 17 00:00:00 2001 From: David Windsor Date: Sat, 10 Jun 2017 22:50:30 -0400 Subject: [PATCH 11/38] vfs: Define usercopy region in names_cache slab caches VFS pathnames are stored in the names_cache slab cache, either inline or across an entire allocation entry (when approaching PATH_MAX). These are copied to/from userspace, so they must be entirely whitelisted. cache object allocation: include/linux/fs.h: #define __getname() kmem_cache_alloc(names_cachep, GFP_KERNEL) example usage trace: strncpy_from_user+0x4d/0x170 getname_flags+0x6f/0x1f0 user_path_at_empty+0x23/0x40 do_mount+0x69/0xda0 SyS_mount+0x83/0xd0 fs/namei.c: getname_flags(...): ... result = __getname(); ... kname = (char *)result->iname; result->name = kname; len = strncpy_from_user(kname, filename, EMBEDDED_NAME_MAX); ... if (unlikely(len == EMBEDDED_NAME_MAX)) { const size_t size = offsetof(struct filename, iname[1]); kname = (char *)result; result = kzalloc(size, GFP_KERNEL); ... result->name = kname; len = strncpy_from_user(kname, filename, PATH_MAX); In support of usercopy hardening, this patch defines the entire cache object in the names_cache slab cache as whitelisted, since it may entirely hold name strings to be copied to/from userspace. This patch is verbatim from Brad Spengler/PaX Team's PAX_USERCOPY whitelisting code in the last public patch of grsecurity/PaX based on my understanding of the code. Changes or omissions from the original code are mine and don't reflect the original grsecurity/PaX code. Signed-off-by: David Windsor [kees: adjust commit log, add usage trace] Cc: Alexander Viro Cc: linux-fsdevel@vger.kernel.org Signed-off-by: Kees Cook --- fs/dcache.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/dcache.c b/fs/dcache.c index 92ad7a2168e1..9d7ee2de682c 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -3640,8 +3640,8 @@ void __init vfs_caches_init_early(void) void __init vfs_caches_init(void) { - names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, - SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); + names_cachep = kmem_cache_create_usercopy("names_cache", PATH_MAX, 0, + SLAB_HWCACHE_ALIGN|SLAB_PANIC, 0, PATH_MAX, NULL); dcache_init(); inode_init(); From 6391af6f5829e8767c6d5e777194c9ecdd5d7ead Mon Sep 17 00:00:00 2001 From: David Windsor Date: Sat, 10 Jun 2017 22:50:31 -0400 Subject: [PATCH 12/38] vfs: Copy struct mount.mnt_id to userspace using put_user() The mnt_id field can be copied with put_user(), so there is no need to use copy_to_user(). In both cases, hardened usercopy is being bypassed since the size is constant, and not open to runtime manipulation. This patch is verbatim from Brad Spengler/PaX Team's PAX_USERCOPY whitelisting code in the last public patch of grsecurity/PaX based on my understanding of the code. Changes or omissions from the original code are mine and don't reflect the original grsecurity/PaX code. Signed-off-by: David Windsor [kees: adjust commit log] Cc: Alexander Viro Cc: linux-fsdevel@vger.kernel.org Signed-off-by: Kees Cook --- fs/fhandle.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/fs/fhandle.c b/fs/fhandle.c index 0ace128f5d23..0ee727485615 100644 --- a/fs/fhandle.c +++ b/fs/fhandle.c @@ -69,8 +69,7 @@ static long do_sys_name_to_handle(struct path *path, } else retval = 0; /* copy the mount id */ - if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id, - sizeof(*mnt_id)) || + if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) || copy_to_user(ufh, handle, sizeof(struct file_handle) + handle_bytes)) retval = -EFAULT; From f8dd7c70869565085542cffd14bf93705492ce14 Mon Sep 17 00:00:00 2001 From: David Windsor Date: Sat, 10 Jun 2017 22:50:36 -0400 Subject: [PATCH 13/38] ext4: Define usercopy region in ext4_inode_cache slab cache The ext4 symlink pathnames, stored in struct ext4_inode_info.i_data and therefore contained in the ext4_inode_cache slab cache, need to be copied to/from userspace. cache object allocation: fs/ext4/super.c: ext4_alloc_inode(...): struct ext4_inode_info *ei; ... ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS); ... return &ei->vfs_inode; include/trace/events/ext4.h: #define EXT4_I(inode) \ (container_of(inode, struct ext4_inode_info, vfs_inode)) fs/ext4/namei.c: ext4_symlink(...): ... inode->i_link = (char *)&EXT4_I(inode)->i_data; example usage trace: readlink_copy+0x43/0x70 vfs_readlink+0x62/0x110 SyS_readlinkat+0x100/0x130 fs/namei.c: readlink_copy(..., link): ... copy_to_user(..., link, len) (inlined into vfs_readlink) generic_readlink(dentry, ...): struct inode *inode = d_inode(dentry); const char *link = inode->i_link; ... readlink_copy(..., link); In support of usercopy hardening, this patch defines a region in the ext4_inode_cache slab cache in which userspace copy operations are allowed. This region is known as the slab cache's usercopy region. Slab caches can now check that each dynamically sized copy operation involving cache-managed memory falls entirely within the slab's usercopy region. This patch is modified from Brad Spengler/PaX Team's PAX_USERCOPY whitelisting code in the last public patch of grsecurity/PaX based on my understanding of the code. Changes or omissions from the original code are mine and don't reflect the original grsecurity/PaX code. Signed-off-by: David Windsor [kees: adjust commit log, provide usage trace] Cc: "Theodore Ts'o" Cc: Andreas Dilger Cc: linux-ext4@vger.kernel.org Signed-off-by: Kees Cook --- fs/ext4/super.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 7c46693a14d7..57a8fa451d3e 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1036,11 +1036,13 @@ static void init_once(void *foo) static int __init init_inodecache(void) { - ext4_inode_cachep = kmem_cache_create("ext4_inode_cache", - sizeof(struct ext4_inode_info), - 0, (SLAB_RECLAIM_ACCOUNT| - SLAB_MEM_SPREAD|SLAB_ACCOUNT), - init_once); + ext4_inode_cachep = kmem_cache_create_usercopy("ext4_inode_cache", + sizeof(struct ext4_inode_info), 0, + (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD| + SLAB_ACCOUNT), + offsetof(struct ext4_inode_info, i_data), + sizeof_field(struct ext4_inode_info, i_data), + init_once); if (ext4_inode_cachep == NULL) return -ENOMEM; return 0; From 85212d4e04c345ccebb92b3afb4c978a0db56c56 Mon Sep 17 00:00:00 2001 From: David Windsor Date: Sat, 10 Jun 2017 22:50:35 -0400 Subject: [PATCH 14/38] ext2: Define usercopy region in ext2_inode_cache slab cache The ext2 symlink pathnames, stored in struct ext2_inode_info.i_data and therefore contained in the ext2_inode_cache slab cache, need to be copied to/from userspace. cache object allocation: fs/ext2/super.c: ext2_alloc_inode(...): struct ext2_inode_info *ei; ... ei = kmem_cache_alloc(ext2_inode_cachep, GFP_NOFS); ... return &ei->vfs_inode; fs/ext2/ext2.h: EXT2_I(struct inode *inode): return container_of(inode, struct ext2_inode_info, vfs_inode); fs/ext2/namei.c: ext2_symlink(...): ... inode->i_link = (char *)&EXT2_I(inode)->i_data; example usage trace: readlink_copy+0x43/0x70 vfs_readlink+0x62/0x110 SyS_readlinkat+0x100/0x130 fs/namei.c: readlink_copy(..., link): ... copy_to_user(..., link, len); (inlined into vfs_readlink) generic_readlink(dentry, ...): struct inode *inode = d_inode(dentry); const char *link = inode->i_link; ... readlink_copy(..., link); In support of usercopy hardening, this patch defines a region in the ext2_inode_cache slab cache in which userspace copy operations are allowed. This region is known as the slab cache's usercopy region. Slab caches can now check that each dynamically sized copy operation involving cache-managed memory falls entirely within the slab's usercopy region. This patch is modified from Brad Spengler/PaX Team's PAX_USERCOPY whitelisting code in the last public patch of grsecurity/PaX based on my understanding of the code. Changes or omissions from the original code are mine and don't reflect the original grsecurity/PaX code. Signed-off-by: David Windsor [kees: adjust commit log, provide usage trace] Cc: Jan Kara Cc: linux-ext4@vger.kernel.org Signed-off-by: Kees Cook Acked-by: Jan Kara --- fs/ext2/super.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 7646818ab266..50b8946c3d1a 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -220,11 +220,13 @@ static void init_once(void *foo) static int __init init_inodecache(void) { - ext2_inode_cachep = kmem_cache_create("ext2_inode_cache", - sizeof(struct ext2_inode_info), - 0, (SLAB_RECLAIM_ACCOUNT| - SLAB_MEM_SPREAD|SLAB_ACCOUNT), - init_once); + ext2_inode_cachep = kmem_cache_create_usercopy("ext2_inode_cache", + sizeof(struct ext2_inode_info), 0, + (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD| + SLAB_ACCOUNT), + offsetof(struct ext2_inode_info, i_data), + sizeof_field(struct ext2_inode_info, i_data), + init_once); if (ext2_inode_cachep == NULL) return -ENOMEM; return 0; From 8d2704d382a9fd14f65dcdd2a3459067162fd4e6 Mon Sep 17 00:00:00 2001 From: David Windsor Date: Sat, 10 Jun 2017 22:50:38 -0400 Subject: [PATCH 15/38] jfs: Define usercopy region in jfs_ip slab cache The jfs symlink pathnames, stored in struct jfs_inode_info.i_inline and therefore contained in the jfs_ip slab cache, need to be copied to/from userspace. cache object allocation: fs/jfs/super.c: jfs_alloc_inode(...): ... jfs_inode = kmem_cache_alloc(jfs_inode_cachep, GFP_NOFS); ... return &jfs_inode->vfs_inode; fs/jfs/jfs_incore.h: JFS_IP(struct inode *inode): return container_of(inode, struct jfs_inode_info, vfs_inode); fs/jfs/inode.c: jfs_iget(...): ... inode->i_link = JFS_IP(inode)->i_inline; example usage trace: readlink_copy+0x43/0x70 vfs_readlink+0x62/0x110 SyS_readlinkat+0x100/0x130 fs/namei.c: readlink_copy(..., link): ... copy_to_user(..., link, len); (inlined in vfs_readlink) generic_readlink(dentry, ...): struct inode *inode = d_inode(dentry); const char *link = inode->i_link; ... readlink_copy(..., link); In support of usercopy hardening, this patch defines a region in the jfs_ip slab cache in which userspace copy operations are allowed. This region is known as the slab cache's usercopy region. Slab caches can now check that each dynamically sized copy operation involving cache-managed memory falls entirely within the slab's usercopy region. This patch is modified from Brad Spengler/PaX Team's PAX_USERCOPY whitelisting code in the last public patch of grsecurity/PaX based on my understanding of the code. Changes or omissions from the original code are mine and don't reflect the original grsecurity/PaX code. Signed-off-by: David Windsor [kees: adjust commit log, provide usage trace] Cc: Dave Kleikamp Cc: jfs-discussion@lists.sourceforge.net Signed-off-by: Kees Cook Acked-by: Dave Kleikamp --- fs/jfs/super.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/fs/jfs/super.c b/fs/jfs/super.c index 90373aebfdca..1b9264fd54b6 100644 --- a/fs/jfs/super.c +++ b/fs/jfs/super.c @@ -965,9 +965,11 @@ static int __init init_jfs_fs(void) int rc; jfs_inode_cachep = - kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0, - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_ACCOUNT, - init_once); + kmem_cache_create_usercopy("jfs_ip", sizeof(struct jfs_inode_info), + 0, SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_ACCOUNT, + offsetof(struct jfs_inode_info, i_inline), + sizeof_field(struct jfs_inode_info, i_inline), + init_once); if (jfs_inode_cachep == NULL) return -ENOMEM; From 0fc256d3ad76f2552de0db781854506bb623715a Mon Sep 17 00:00:00 2001 From: David Windsor Date: Sat, 10 Jun 2017 22:50:32 -0400 Subject: [PATCH 16/38] befs: Define usercopy region in befs_inode_cache slab cache befs symlink pathnames, stored in struct befs_inode_info.i_data.symlink and therefore contained in the befs_inode_cache slab cache, need to be copied to/from userspace. cache object allocation: fs/befs/linuxvfs.c: befs_alloc_inode(...): ... bi = kmem_cache_alloc(befs_inode_cachep, GFP_KERNEL); ... return &bi->vfs_inode; befs_iget(...): ... strlcpy(befs_ino->i_data.symlink, raw_inode->data.symlink, BEFS_SYMLINK_LEN); ... inode->i_link = befs_ino->i_data.symlink; example usage trace: readlink_copy+0x43/0x70 vfs_readlink+0x62/0x110 SyS_readlinkat+0x100/0x130 fs/namei.c: readlink_copy(..., link): ... copy_to_user(..., link, len); (inlined in vfs_readlink) generic_readlink(dentry, ...): struct inode *inode = d_inode(dentry); const char *link = inode->i_link; ... readlink_copy(..., link); In support of usercopy hardening, this patch defines a region in the befs_inode_cache slab cache in which userspace copy operations are allowed. This region is known as the slab cache's usercopy region. Slab caches can now check that each dynamically sized copy operation involving cache-managed memory falls entirely within the slab's usercopy region. This patch is modified from Brad Spengler/PaX Team's PAX_USERCOPY whitelisting code in the last public patch of grsecurity/PaX based on my understanding of the code. Changes or omissions from the original code are mine and don't reflect the original grsecurity/PaX code. Signed-off-by: David Windsor [kees: adjust commit log, provide usage trace] Cc: Luis de Bethencourt Cc: Salah Triki Signed-off-by: Kees Cook Acked-by: Luis de Bethencourt --- fs/befs/linuxvfs.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c index ee236231cafa..af2832aaeec5 100644 --- a/fs/befs/linuxvfs.c +++ b/fs/befs/linuxvfs.c @@ -444,11 +444,15 @@ unacquire_none: static int __init befs_init_inodecache(void) { - befs_inode_cachep = kmem_cache_create("befs_inode_cache", - sizeof (struct befs_inode_info), - 0, (SLAB_RECLAIM_ACCOUNT| - SLAB_MEM_SPREAD|SLAB_ACCOUNT), - init_once); + befs_inode_cachep = kmem_cache_create_usercopy("befs_inode_cache", + sizeof(struct befs_inode_info), 0, + (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD| + SLAB_ACCOUNT), + offsetof(struct befs_inode_info, + i_data.symlink), + sizeof_field(struct befs_inode_info, + i_data.symlink), + init_once); if (befs_inode_cachep == NULL) return -ENOMEM; From 2b06a9e336170d7e3c807c21d5cd85cbc92fff4e Mon Sep 17 00:00:00 2001 From: David Windsor Date: Sat, 10 Jun 2017 22:50:34 -0400 Subject: [PATCH 17/38] exofs: Define usercopy region in exofs_inode_cache slab cache The exofs short symlink names, stored in struct exofs_i_info.i_data and therefore contained in the exofs_inode_cache slab cache, need to be copied to/from userspace. cache object allocation: fs/exofs/super.c: exofs_alloc_inode(...): ... oi = kmem_cache_alloc(exofs_inode_cachep, GFP_KERNEL); ... return &oi->vfs_inode; fs/exofs/namei.c: exofs_symlink(...): ... inode->i_link = (char *)oi->i_data; example usage trace: readlink_copy+0x43/0x70 vfs_readlink+0x62/0x110 SyS_readlinkat+0x100/0x130 fs/namei.c: readlink_copy(..., link): ... copy_to_user(..., link, len); (inlined in vfs_readlink) generic_readlink(dentry, ...): struct inode *inode = d_inode(dentry); const char *link = inode->i_link; ... readlink_copy(..., link); In support of usercopy hardening, this patch defines a region in the exofs_inode_cache slab cache in which userspace copy operations are allowed. This region is known as the slab cache's usercopy region. Slab caches can now check that each dynamically sized copy operation involving cache-managed memory falls entirely within the slab's usercopy region. This patch is modified from Brad Spengler/PaX Team's PAX_USERCOPY whitelisting code in the last public patch of grsecurity/PaX based on my understanding of the code. Changes or omissions from the original code are mine and don't reflect the original grsecurity/PaX code. Signed-off-by: David Windsor [kees: adjust commit log, provide usage trace] Cc: Boaz Harrosh Signed-off-by: Kees Cook --- fs/exofs/super.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/fs/exofs/super.c b/fs/exofs/super.c index 819624cfc8da..e5c532875bb7 100644 --- a/fs/exofs/super.c +++ b/fs/exofs/super.c @@ -192,10 +192,13 @@ static void exofs_init_once(void *foo) */ static int init_inodecache(void) { - exofs_inode_cachep = kmem_cache_create("exofs_inode_cache", + exofs_inode_cachep = kmem_cache_create_usercopy("exofs_inode_cache", sizeof(struct exofs_i_info), 0, SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD | - SLAB_ACCOUNT, exofs_init_once); + SLAB_ACCOUNT, + offsetof(struct exofs_i_info, i_data), + sizeof_field(struct exofs_i_info, i_data), + exofs_init_once); if (exofs_inode_cachep == NULL) return -ENOMEM; return 0; From 6b330623e56900db8db3634b782093289c8ad175 Mon Sep 17 00:00:00 2001 From: David Windsor Date: Sat, 10 Jun 2017 22:50:39 -0400 Subject: [PATCH 18/38] orangefs: Define usercopy region in orangefs_inode_cache slab cache orangefs symlink pathnames, stored in struct orangefs_inode_s.link_target and therefore contained in the orangefs_inode_cache, need to be copied to/from userspace. cache object allocation: fs/orangefs/super.c: orangefs_alloc_inode(...): ... orangefs_inode = kmem_cache_alloc(orangefs_inode_cache, ...); ... return &orangefs_inode->vfs_inode; fs/orangefs/orangefs-utils.c: exofs_symlink(...): ... inode->i_link = orangefs_inode->link_target; example usage trace: readlink_copy+0x43/0x70 vfs_readlink+0x62/0x110 SyS_readlinkat+0x100/0x130 fs/namei.c: readlink_copy(..., link): ... copy_to_user(..., link, len); (inlined in vfs_readlink) generic_readlink(dentry, ...): struct inode *inode = d_inode(dentry); const char *link = inode->i_link; ... readlink_copy(..., link); In support of usercopy hardening, this patch defines a region in the orangefs_inode_cache slab cache in which userspace copy operations are allowed. This region is known as the slab cache's usercopy region. Slab caches can now check that each dynamically sized copy operation involving cache-managed memory falls entirely within the slab's usercopy region. This patch is modified from Brad Spengler/PaX Team's PAX_USERCOPY whitelisting code in the last public patch of grsecurity/PaX based on my understanding of the code. Changes or omissions from the original code are mine and don't reflect the original grsecurity/PaX code. Signed-off-by: David Windsor [kees: adjust commit log, provide usage trace] Cc: Mike Marshall Signed-off-by: Kees Cook --- fs/orangefs/super.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/fs/orangefs/super.c b/fs/orangefs/super.c index 36f1390b5ed7..62d49e53061c 100644 --- a/fs/orangefs/super.c +++ b/fs/orangefs/super.c @@ -610,11 +610,16 @@ void orangefs_kill_sb(struct super_block *sb) int orangefs_inode_cache_initialize(void) { - orangefs_inode_cache = kmem_cache_create("orangefs_inode_cache", - sizeof(struct orangefs_inode_s), - 0, - ORANGEFS_CACHE_CREATE_FLAGS, - orangefs_inode_cache_ctor); + orangefs_inode_cache = kmem_cache_create_usercopy( + "orangefs_inode_cache", + sizeof(struct orangefs_inode_s), + 0, + ORANGEFS_CACHE_CREATE_FLAGS, + offsetof(struct orangefs_inode_s, + link_target), + sizeof_field(struct orangefs_inode_s, + link_target), + orangefs_inode_cache_ctor); if (!orangefs_inode_cache) { gossip_err("Cannot create orangefs_inode_cache\n"); From df5f3cfc52fec828af92444bf02ad8fd4e4c59e3 Mon Sep 17 00:00:00 2001 From: David Windsor Date: Sat, 10 Jun 2017 22:50:40 -0400 Subject: [PATCH 19/38] ufs: Define usercopy region in ufs_inode_cache slab cache The ufs symlink pathnames, stored in struct ufs_inode_info.i_u1.i_symlink and therefore contained in the ufs_inode_cache slab cache, need to be copied to/from userspace. cache object allocation: fs/ufs/super.c: ufs_alloc_inode(...): ... ei = kmem_cache_alloc(ufs_inode_cachep, GFP_NOFS); ... return &ei->vfs_inode; fs/ufs/ufs.h: UFS_I(struct inode *inode): return container_of(inode, struct ufs_inode_info, vfs_inode); fs/ufs/namei.c: ufs_symlink(...): ... inode->i_link = (char *)UFS_I(inode)->i_u1.i_symlink; example usage trace: readlink_copy+0x43/0x70 vfs_readlink+0x62/0x110 SyS_readlinkat+0x100/0x130 fs/namei.c: readlink_copy(..., link): ... copy_to_user(..., link, len); (inlined in vfs_readlink) generic_readlink(dentry, ...): struct inode *inode = d_inode(dentry); const char *link = inode->i_link; ... readlink_copy(..., link); In support of usercopy hardening, this patch defines a region in the ufs_inode_cache slab cache in which userspace copy operations are allowed. This region is known as the slab cache's usercopy region. Slab caches can now check that each dynamically sized copy operation involving cache-managed memory falls entirely within the slab's usercopy region. This patch is modified from Brad Spengler/PaX Team's PAX_USERCOPY whitelisting code in the last public patch of grsecurity/PaX based on my understanding of the code. Changes or omissions from the original code are mine and don't reflect the original grsecurity/PaX code. Signed-off-by: David Windsor [kees: adjust commit log, provide usage trace] Cc: Evgeniy Dushistov Signed-off-by: Kees Cook --- fs/ufs/super.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/fs/ufs/super.c b/fs/ufs/super.c index 4d497e9c6883..652a77702aec 100644 --- a/fs/ufs/super.c +++ b/fs/ufs/super.c @@ -1466,11 +1466,14 @@ static void init_once(void *foo) static int __init init_inodecache(void) { - ufs_inode_cachep = kmem_cache_create("ufs_inode_cache", - sizeof(struct ufs_inode_info), - 0, (SLAB_RECLAIM_ACCOUNT| - SLAB_MEM_SPREAD|SLAB_ACCOUNT), - init_once); + ufs_inode_cachep = kmem_cache_create_usercopy("ufs_inode_cache", + sizeof(struct ufs_inode_info), 0, + (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD| + SLAB_ACCOUNT), + offsetof(struct ufs_inode_info, i_u1.i_symlink), + sizeof_field(struct ufs_inode_info, + i_u1.i_symlink), + init_once); if (ufs_inode_cachep == NULL) return -ENOMEM; return 0; From e9a0561b7c8ef964078fa340fc1983f1f6d30544 Mon Sep 17 00:00:00 2001 From: David Windsor Date: Sat, 10 Jun 2017 22:50:37 -0400 Subject: [PATCH 20/38] vxfs: Define usercopy region in vxfs_inode slab cache vxfs symlink pathnames, stored in struct vxfs_inode_info field vii_immed.vi_immed and therefore contained in the vxfs_inode slab cache, need to be copied to/from userspace. cache object allocation: fs/freevxfs/vxfs_super.c: vxfs_alloc_inode(...): ... vi = kmem_cache_alloc(vxfs_inode_cachep, GFP_KERNEL); ... return &vi->vfs_inode; fs/freevxfs/vxfs_inode.c: cxfs_iget(...): ... inode->i_link = vip->vii_immed.vi_immed; example usage trace: readlink_copy+0x43/0x70 vfs_readlink+0x62/0x110 SyS_readlinkat+0x100/0x130 fs/namei.c: readlink_copy(..., link): ... copy_to_user(..., link, len); (inlined in vfs_readlink) generic_readlink(dentry, ...): struct inode *inode = d_inode(dentry); const char *link = inode->i_link; ... readlink_copy(..., link); In support of usercopy hardening, this patch defines a region in the vxfs_inode slab cache in which userspace copy operations are allowed. This region is known as the slab cache's usercopy region. Slab caches can now check that each dynamically sized copy operation involving cache-managed memory falls entirely within the slab's usercopy region. This patch is modified from Brad Spengler/PaX Team's PAX_USERCOPY whitelisting code in the last public patch of grsecurity/PaX based on my understanding of the code. Changes or omissions from the original code are mine and don't reflect the original grsecurity/PaX code. Signed-off-by: David Windsor [kees: adjust commit log, provide usage trace] Cc: Christoph Hellwig Signed-off-by: Kees Cook --- fs/freevxfs/vxfs_super.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/fs/freevxfs/vxfs_super.c b/fs/freevxfs/vxfs_super.c index f989efa051a0..48b24bb50d02 100644 --- a/fs/freevxfs/vxfs_super.c +++ b/fs/freevxfs/vxfs_super.c @@ -332,9 +332,13 @@ vxfs_init(void) { int rv; - vxfs_inode_cachep = kmem_cache_create("vxfs_inode", + vxfs_inode_cachep = kmem_cache_create_usercopy("vxfs_inode", sizeof(struct vxfs_inode_info), 0, - SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL); + SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, + offsetof(struct vxfs_inode_info, vii_immed.vi_immed), + sizeof_field(struct vxfs_inode_info, + vii_immed.vi_immed), + NULL); if (!vxfs_inode_cachep) return -ENOMEM; rv = register_filesystem(&vxfs_fs_type); From de046449045a329bae5c9256e55b58a685a22532 Mon Sep 17 00:00:00 2001 From: David Windsor Date: Sat, 10 Jun 2017 22:50:33 -0400 Subject: [PATCH 21/38] cifs: Define usercopy region in cifs_request slab cache CIFS request buffers, stored in the cifs_request slab cache, need to be copied to/from userspace. cache object allocation: fs/cifs/cifsfs.c: cifs_init_request_bufs(): ... cifs_req_poolp = mempool_create_slab_pool(cifs_min_rcv, cifs_req_cachep); fs/cifs/misc.c: cifs_buf_get(): ... ret_buf = mempool_alloc(cifs_req_poolp, GFP_NOFS); ... return ret_buf; In support of usercopy hardening, this patch defines a region in the cifs_request slab cache in which userspace copy operations are allowed. This region is known as the slab cache's usercopy region. Slab caches can now check that each dynamically sized copy operation involving cache-managed memory falls entirely within the slab's usercopy region. This patch is verbatim from Brad Spengler/PaX Team's PAX_USERCOPY whitelisting code in the last public patch of grsecurity/PaX based on my understanding of the code. Changes or omissions from the original code are mine and don't reflect the original grsecurity/PaX code. Signed-off-by: David Windsor [kees: adjust commit log, provide usage trace] Cc: Steve French Cc: linux-cifs@vger.kernel.org Signed-off-by: Kees Cook --- fs/cifs/cifsfs.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 31b7565b1617..29f4b0290fbd 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -1231,9 +1231,11 @@ cifs_init_request_bufs(void) cifs_dbg(VFS, "CIFSMaxBufSize %d 0x%x\n", CIFSMaxBufSize, CIFSMaxBufSize); */ - cifs_req_cachep = kmem_cache_create("cifs_request", + cifs_req_cachep = kmem_cache_create_usercopy("cifs_request", CIFSMaxBufSize + max_hdr_size, 0, - SLAB_HWCACHE_ALIGN, NULL); + SLAB_HWCACHE_ALIGN, 0, + CIFSMaxBufSize + max_hdr_size, + NULL); if (cifs_req_cachep == NULL) return -ENOMEM; @@ -1259,9 +1261,9 @@ cifs_init_request_bufs(void) more SMBs to use small buffer alloc and is still much more efficient to alloc 1 per page off the slab compared to 17K (5page) alloc of large cifs buffers even when page debugging is on */ - cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq", + cifs_sm_req_cachep = kmem_cache_create_usercopy("cifs_small_rq", MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN, - NULL); + 0, MAX_CIFS_SMALL_BUFFER_SIZE, NULL); if (cifs_sm_req_cachep == NULL) { mempool_destroy(cifs_req_poolp); kmem_cache_destroy(cifs_req_cachep); From 0afe76e88c57d91ef5697720aed380a339e3df70 Mon Sep 17 00:00:00 2001 From: David Windsor Date: Sat, 10 Jun 2017 22:50:45 -0400 Subject: [PATCH 22/38] scsi: Define usercopy region in scsi_sense_cache slab cache SCSI sense buffers, stored in struct scsi_cmnd.sense and therefore contained in the scsi_sense_cache slab cache, need to be copied to/from userspace. cache object allocation: drivers/scsi/scsi_lib.c: scsi_select_sense_cache(...): return ... ? scsi_sense_isadma_cache : scsi_sense_cache scsi_alloc_sense_buffer(...): return kmem_cache_alloc_node(scsi_select_sense_cache(), ...); scsi_init_request(...): ... cmd->sense_buffer = scsi_alloc_sense_buffer(...); ... cmd->req.sense = cmd->sense_buffer example usage trace: block/scsi_ioctl.c: (inline from sg_io) blk_complete_sghdr_rq(...): struct scsi_request *req = scsi_req(rq); ... copy_to_user(..., req->sense, len) scsi_cmd_ioctl(...): sg_io(...); In support of usercopy hardening, this patch defines a region in the scsi_sense_cache slab cache in which userspace copy operations are allowed. This region is known as the slab cache's usercopy region. Slab caches can now check that each dynamically sized copy operation involving cache-managed memory falls entirely within the slab's usercopy region. Signed-off-by: David Windsor [kees: adjust commit log, provide usage trace] Cc: "James E.J. Bottomley" Cc: "Martin K. Petersen" Cc: linux-scsi@vger.kernel.org Signed-off-by: Kees Cook --- drivers/scsi/scsi_lib.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 1cbc497e00bd..164d062c4d94 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -79,14 +79,15 @@ int scsi_init_sense_cache(struct Scsi_Host *shost) if (shost->unchecked_isa_dma) { scsi_sense_isadma_cache = kmem_cache_create("scsi_sense_cache(DMA)", - SCSI_SENSE_BUFFERSIZE, 0, - SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL); + SCSI_SENSE_BUFFERSIZE, 0, + SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA, NULL); if (!scsi_sense_isadma_cache) ret = -ENOMEM; } else { scsi_sense_cache = - kmem_cache_create("scsi_sense_cache", - SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN, NULL); + kmem_cache_create_usercopy("scsi_sense_cache", + SCSI_SENSE_BUFFERSIZE, 0, SLAB_HWCACHE_ALIGN, + 0, SCSI_SENSE_BUFFERSIZE, NULL); if (!scsi_sense_cache) ret = -ENOMEM; } From 30c2c9f158f6c9cef41e916d1c7c11097df4befb Mon Sep 17 00:00:00 2001 From: David Windsor Date: Sat, 10 Jun 2017 22:50:42 -0400 Subject: [PATCH 23/38] net: Define usercopy region in struct proto slab cache In support of usercopy hardening, this patch defines a region in the struct proto slab cache in which userspace copy operations are allowed. Some protocols need to copy objects to/from userspace, and they can declare the region via their proto structure with the new usersize and useroffset fields. Initially, if no region is specified (usersize == 0), the entire field is marked as whitelisted. This allows protocols to be whitelisted in subsequent patches. Once all protocols have been annotated, the full-whitelist default can be removed. This region is known as the slab cache's usercopy region. Slab caches can now check that each dynamically sized copy operation involving cache-managed memory falls entirely within the slab's usercopy region. This patch is modified from Brad Spengler/PaX Team's PAX_USERCOPY whitelisting code in the last public patch of grsecurity/PaX based on my understanding of the code. Changes or omissions from the original code are mine and don't reflect the original grsecurity/PaX code. Signed-off-by: David Windsor [kees: adjust commit log, split off per-proto patches] [kees: add logic for by-default full-whitelist] Cc: "David S. Miller" Cc: Eric Dumazet Cc: Paolo Abeni Cc: David Howells Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook --- include/net/sock.h | 2 ++ net/core/sock.c | 6 +++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/include/net/sock.h b/include/net/sock.h index 79e1a2c7912c..b77a710ee831 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1112,6 +1112,8 @@ struct proto { struct kmem_cache *slab; unsigned int obj_size; slab_flags_t slab_flags; + size_t useroffset; /* Usercopy region offset */ + size_t usersize; /* Usercopy region size */ struct percpu_counter *orphan_count; diff --git a/net/core/sock.c b/net/core/sock.c index c0b5b2f17412..261e6dbf0259 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -3151,8 +3151,12 @@ static int req_prot_init(const struct proto *prot) int proto_register(struct proto *prot, int alloc_slab) { if (alloc_slab) { - prot->slab = kmem_cache_create(prot->name, prot->obj_size, 0, + prot->slab = kmem_cache_create_usercopy(prot->name, + prot->obj_size, 0, SLAB_HWCACHE_ALIGN | prot->slab_flags, + prot->usersize ? prot->useroffset : 0, + prot->usersize ? prot->usersize + : prot->obj_size, NULL); if (prot->slab == NULL) { From 8c2bc895a9347846b33c47124a75db624aa83677 Mon Sep 17 00:00:00 2001 From: David Windsor Date: Thu, 24 Aug 2017 16:49:14 -0700 Subject: [PATCH 24/38] ip: Define usercopy region in IP proto slab cache The ICMP filters for IPv4 and IPv6 raw sockets need to be copied to/from userspace. In support of usercopy hardening, this patch defines a region in the struct proto slab cache in which userspace copy operations are allowed. example usage trace: net/ipv4/raw.c: raw_seticmpfilter(...): ... copy_from_user(&raw_sk(sk)->filter, ..., optlen) raw_geticmpfilter(...): ... copy_to_user(..., &raw_sk(sk)->filter, len) net/ipv6/raw.c: rawv6_seticmpfilter(...): ... copy_from_user(&raw6_sk(sk)->filter, ..., optlen) rawv6_geticmpfilter(...): ... copy_to_user(..., &raw6_sk(sk)->filter, len) This region is known as the slab cache's usercopy region. Slab caches can now check that each dynamically sized copy operation involving cache-managed memory falls entirely within the slab's usercopy region. This patch is modified from Brad Spengler/PaX Team's PAX_USERCOPY whitelisting code in the last public patch of grsecurity/PaX based on my understanding of the code. Changes or omissions from the original code are mine and don't reflect the original grsecurity/PaX code. Signed-off-by: David Windsor [kees: split from network patch, provide usage trace] Cc: "David S. Miller" Cc: Alexey Kuznetsov Cc: Hideaki YOSHIFUJI Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook --- net/ipv4/raw.c | 2 ++ net/ipv6/raw.c | 2 ++ 2 files changed, 4 insertions(+) diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 33b70bfd1122..1b6fa4195ac9 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -970,6 +970,8 @@ struct proto raw_prot = { .hash = raw_hash_sk, .unhash = raw_unhash_sk, .obj_size = sizeof(struct raw_sock), + .useroffset = offsetof(struct raw_sock, filter), + .usersize = sizeof_field(struct raw_sock, filter), .h.raw_hash = &raw_v4_hashinfo, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_raw_setsockopt, diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 761a473a07c5..08a85fabdfd1 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c @@ -1272,6 +1272,8 @@ struct proto rawv6_prot = { .hash = raw_hash_sk, .unhash = raw_unhash_sk, .obj_size = sizeof(struct raw6_sock), + .useroffset = offsetof(struct raw6_sock, filter), + .usersize = sizeof_field(struct raw6_sock, filter), .h.raw_hash = &raw_v6_hashinfo, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_rawv6_setsockopt, From 93070d339d7bc6f6b07b64faf5134fd144e8ec48 Mon Sep 17 00:00:00 2001 From: David Windsor Date: Thu, 24 Aug 2017 16:58:35 -0700 Subject: [PATCH 25/38] caif: Define usercopy region in caif proto slab cache The CAIF channel connection request parameters need to be copied to/from userspace. In support of usercopy hardening, this patch defines a region in the struct proto slab cache in which userspace copy operations are allowed. example usage trace: net/caif/caif_socket.c: setsockopt(...): ... copy_from_user(&cf_sk->conn_req.param.data, ..., ol) This region is known as the slab cache's usercopy region. Slab caches can now check that each dynamically sized copy operation involving cache-managed memory falls entirely within the slab's usercopy region. This patch is modified from Brad Spengler/PaX Team's PAX_USERCOPY whitelisting code in the last public patch of grsecurity/PaX based on my understanding of the code. Changes or omissions from the original code are mine and don't reflect the original grsecurity/PaX code. Signed-off-by: David Windsor [kees: split from network patch, provide usage trace] Cc: "David S. Miller" Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook --- net/caif/caif_socket.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 632d5a416d97..c76d513b9a7a 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -1032,6 +1032,8 @@ static int caif_create(struct net *net, struct socket *sock, int protocol, static struct proto prot = {.name = "PF_CAIF", .owner = THIS_MODULE, .obj_size = sizeof(struct caifsock), + .useroffset = offsetof(struct caifsock, conn_req.param), + .usersize = sizeof_field(struct caifsock, conn_req.param) }; if (!capable(CAP_SYS_ADMIN) && !capable(CAP_NET_ADMIN)) From ab9ee8e38b292f9a6698a4fedbb6ff8d08ce2012 Mon Sep 17 00:00:00 2001 From: David Windsor Date: Thu, 24 Aug 2017 16:57:57 -0700 Subject: [PATCH 26/38] sctp: Define usercopy region in SCTP proto slab cache The SCTP socket event notification subscription information need to be copied to/from userspace. In support of usercopy hardening, this patch defines a region in the struct proto slab cache in which userspace copy operations are allowed. Additionally moves the usercopy fields to be adjacent for the region to cover both. example usage trace: net/sctp/socket.c: sctp_getsockopt_events(...): ... copy_to_user(..., &sctp_sk(sk)->subscribe, len) sctp_setsockopt_events(...): ... copy_from_user(&sctp_sk(sk)->subscribe, ..., optlen) sctp_getsockopt_initmsg(...): ... copy_to_user(..., &sctp_sk(sk)->initmsg, len) This region is known as the slab cache's usercopy region. Slab caches can now check that each dynamically sized copy operation involving cache-managed memory falls entirely within the slab's usercopy region. This patch is modified from Brad Spengler/PaX Team's PAX_USERCOPY whitelisting code in the last public patch of grsecurity/PaX based on my understanding of the code. Changes or omissions from the original code are mine and don't reflect the original grsecurity/PaX code. Signed-off-by: David Windsor [kees: split from network patch, move struct members adjacent] [kees: add SCTPv6 struct whitelist, provide usage trace] Cc: Vlad Yasevich Cc: Neil Horman Cc: "David S. Miller" Cc: linux-sctp@vger.kernel.org Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook --- include/net/sctp/structs.h | 9 +++++++-- net/sctp/socket.c | 8 ++++++++ 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index 16f949eef52f..6168e3449131 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -202,12 +202,17 @@ struct sctp_sock { /* Flags controlling Heartbeat, SACK delay, and Path MTU Discovery. */ __u32 param_flags; - struct sctp_initmsg initmsg; struct sctp_rtoinfo rtoinfo; struct sctp_paddrparams paddrparam; - struct sctp_event_subscribe subscribe; struct sctp_assocparams assocparams; + /* + * These two structures must be grouped together for the usercopy + * whitelist region. + */ + struct sctp_event_subscribe subscribe; + struct sctp_initmsg initmsg; + int user_frag; __u32 autoclose; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 014847e25648..efbc8f52c531 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -8470,6 +8470,10 @@ struct proto sctp_prot = { .unhash = sctp_unhash, .get_port = sctp_get_port, .obj_size = sizeof(struct sctp_sock), + .useroffset = offsetof(struct sctp_sock, subscribe), + .usersize = offsetof(struct sctp_sock, initmsg) - + offsetof(struct sctp_sock, subscribe) + + sizeof_field(struct sctp_sock, initmsg), .sysctl_mem = sysctl_sctp_mem, .sysctl_rmem = sysctl_sctp_rmem, .sysctl_wmem = sysctl_sctp_wmem, @@ -8509,6 +8513,10 @@ struct proto sctpv6_prot = { .unhash = sctp_unhash, .get_port = sctp_get_port, .obj_size = sizeof(struct sctp6_sock), + .useroffset = offsetof(struct sctp6_sock, sctp.subscribe), + .usersize = offsetof(struct sctp6_sock, sctp.initmsg) - + offsetof(struct sctp6_sock, sctp.subscribe) + + sizeof_field(struct sctp6_sock, sctp.initmsg), .sysctl_mem = sysctl_sctp_mem, .sysctl_rmem = sysctl_sctp_rmem, .sysctl_wmem = sysctl_sctp_wmem, From b2ce04c2a336c39539b51955fd15fe27ebc96080 Mon Sep 17 00:00:00 2001 From: David Windsor Date: Sat, 10 Jun 2017 22:50:43 -0400 Subject: [PATCH 27/38] sctp: Copy struct sctp_sock.autoclose to userspace using put_user() The autoclose field can be copied with put_user(), so there is no need to use copy_to_user(). In both cases, hardened usercopy is being bypassed since the size is constant, and not open to runtime manipulation. This patch is verbatim from Brad Spengler/PaX Team's PAX_USERCOPY whitelisting code in the last public patch of grsecurity/PaX based on my understanding of the code. Changes or omissions from the original code are mine and don't reflect the original grsecurity/PaX code. Signed-off-by: David Windsor [kees: adjust commit log] Cc: Vlad Yasevich Cc: Neil Horman Cc: "David S. Miller" Cc: linux-sctp@vger.kernel.org Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook --- net/sctp/socket.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index efbc8f52c531..15491491ec88 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -5011,7 +5011,7 @@ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optv len = sizeof(int); if (put_user(len, optlen)) return -EFAULT; - if (copy_to_user(optval, &sctp_sk(sk)->autoclose, sizeof(int))) + if (put_user(sctp_sk(sk)->autoclose, (int __user *)optval)) return -EFAULT; return 0; } From 289a4860d1f5de35b308c1c9e7c8592022c90af9 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Thu, 24 Aug 2017 16:59:38 -0700 Subject: [PATCH 28/38] net: Restrict unwhitelisted proto caches to size 0 Now that protocols have been annotated (the copy of icsk_ca_ops->name is of an ops field from outside the slab cache): $ git grep 'copy_.*_user.*sk.*->' caif/caif_socket.c: copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) { ipv4/raw.c: if (copy_from_user(&raw_sk(sk)->filter, optval, optlen)) ipv4/raw.c: copy_to_user(optval, &raw_sk(sk)->filter, len)) ipv4/tcp.c: if (copy_to_user(optval, icsk->icsk_ca_ops->name, len)) ipv4/tcp.c: if (copy_to_user(optval, icsk->icsk_ulp_ops->name, len)) ipv6/raw.c: if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen)) ipv6/raw.c: if (copy_to_user(optval, &raw6_sk(sk)->filter, len)) sctp/socket.c: if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen)) sctp/socket.c: if (copy_to_user(optval, &sctp_sk(sk)->subscribe, len)) sctp/socket.c: if (copy_to_user(optval, &sctp_sk(sk)->initmsg, len)) we can switch the default proto usercopy region to size 0. Any protocols needing to add whitelisted regions must annotate the fields with the useroffset and usersize fields of struct proto. This patch is modified from Brad Spengler/PaX Team's PAX_USERCOPY whitelisting code in the last public patch of grsecurity/PaX based on my understanding of the code. Changes or omissions from the original code are mine and don't reflect the original grsecurity/PaX code. Cc: "David S. Miller" Cc: Eric Dumazet Cc: Paolo Abeni Cc: David Howells Cc: netdev@vger.kernel.org Signed-off-by: Kees Cook --- net/core/sock.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/net/core/sock.c b/net/core/sock.c index 261e6dbf0259..f39206b41b32 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -3154,9 +3154,7 @@ int proto_register(struct proto *prot, int alloc_slab) prot->slab = kmem_cache_create_usercopy(prot->name, prot->obj_size, 0, SLAB_HWCACHE_ALIGN | prot->slab_flags, - prot->usersize ? prot->useroffset : 0, - prot->usersize ? prot->usersize - : prot->obj_size, + prot->useroffset, prot->usersize, NULL); if (prot->slab == NULL) { From 07dcd7fe89938934ddad65f738bc5aac89b8e54d Mon Sep 17 00:00:00 2001 From: David Windsor Date: Tue, 15 Aug 2017 16:45:00 -0700 Subject: [PATCH 29/38] fork: Define usercopy region in mm_struct slab caches In support of usercopy hardening, this patch defines a region in the mm_struct slab caches in which userspace copy operations are allowed. Only the auxv field is copied to userspace. cache object allocation: kernel/fork.c: #define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) dup_mm(): ... mm = allocate_mm(); copy_mm(...): ... dup_mm(); copy_process(...): ... copy_mm(...) _do_fork(...): ... copy_process(...) example usage trace: fs/binfmt_elf.c: create_elf_tables(...): ... elf_info = (elf_addr_t *)current->mm->saved_auxv; ... copy_to_user(..., elf_info, ei_index * sizeof(elf_addr_t)) load_elf_binary(...): ... create_elf_tables(...); This region is known as the slab cache's usercopy region. Slab caches can now check that each dynamically sized copy operation involving cache-managed memory falls entirely within the slab's usercopy region. This patch is modified from Brad Spengler/PaX Team's PAX_USERCOPY whitelisting code in the last public patch of grsecurity/PaX based on my understanding of the code. Changes or omissions from the original code are mine and don't reflect the original grsecurity/PaX code. Signed-off-by: David Windsor [kees: adjust commit log, split patch, provide usage trace] Cc: Ingo Molnar Cc: Andrew Morton Cc: Thomas Gleixner Cc: Andy Lutomirski Signed-off-by: Kees Cook Acked-by: Rik van Riel --- kernel/fork.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/kernel/fork.c b/kernel/fork.c index 432eadf6b58c..82f2a0441d3b 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2225,9 +2225,11 @@ void __init proc_caches_init(void) * maximum number of CPU's we can ever have. The cpumask_allocation * is at the end of the structure, exactly for that reason. */ - mm_cachep = kmem_cache_create("mm_struct", + mm_cachep = kmem_cache_create_usercopy("mm_struct", sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, + offsetof(struct mm_struct, saved_auxv), + sizeof_field(struct mm_struct, saved_auxv), NULL); vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT); mmap_init(); From f9d29946c56734e954459bc9a0e688a8ae9b4cbf Mon Sep 17 00:00:00 2001 From: David Windsor Date: Sat, 10 Jun 2017 22:50:41 -0400 Subject: [PATCH 30/38] fork: Define usercopy region in thread_stack slab caches In support of usercopy hardening, this patch defines a region in the thread_stack slab caches in which userspace copy operations are allowed. Since the entire thread_stack needs to be available to userspace, the entire slab contents are whitelisted. Note that the slab-based thread stack is only present on systems with THREAD_SIZE < PAGE_SIZE and !CONFIG_VMAP_STACK. cache object allocation: kernel/fork.c: alloc_thread_stack_node(...): return kmem_cache_alloc_node(thread_stack_cache, ...) dup_task_struct(...): ... stack = alloc_thread_stack_node(...) ... tsk->stack = stack; copy_process(...): ... dup_task_struct(...) _do_fork(...): ... copy_process(...) This region is known as the slab cache's usercopy region. Slab caches can now check that each dynamically sized copy operation involving cache-managed memory falls entirely within the slab's usercopy region. This patch is modified from Brad Spengler/PaX Team's PAX_USERCOPY whitelisting code in the last public patch of grsecurity/PaX based on my understanding of the code. Changes or omissions from the original code are mine and don't reflect the original grsecurity/PaX code. Signed-off-by: David Windsor [kees: adjust commit log, split patch, provide usage trace] Cc: Ingo Molnar Cc: Andrew Morton Cc: Thomas Gleixner Cc: Andy Lutomirski Signed-off-by: Kees Cook Acked-by: Rik van Riel --- kernel/fork.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/kernel/fork.c b/kernel/fork.c index 82f2a0441d3b..0e086af148f2 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -282,8 +282,9 @@ static void free_thread_stack(struct task_struct *tsk) void thread_stack_cache_init(void) { - thread_stack_cache = kmem_cache_create("thread_stack", THREAD_SIZE, - THREAD_SIZE, 0, NULL); + thread_stack_cache = kmem_cache_create_usercopy("thread_stack", + THREAD_SIZE, THREAD_SIZE, 0, 0, + THREAD_SIZE, NULL); BUG_ON(thread_stack_cache == NULL); } # endif From 5905429ad85657c28d93ec3d826ddeea1f44c3ce Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 16 Aug 2017 13:00:58 -0700 Subject: [PATCH 31/38] fork: Provide usercopy whitelisting for task_struct MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit While the blocked and saved_sigmask fields of task_struct are copied to userspace (via sigmask_to_save() and setup_rt_frame()), it is always copied with a static length (i.e. sizeof(sigset_t)). The only portion of task_struct that is potentially dynamically sized and may be copied to userspace is in the architecture-specific thread_struct at the end of task_struct. cache object allocation: kernel/fork.c: alloc_task_struct_node(...): return kmem_cache_alloc_node(task_struct_cachep, ...); dup_task_struct(...): ... tsk = alloc_task_struct_node(node); copy_process(...): ... dup_task_struct(...) _do_fork(...): ... copy_process(...) example usage trace: arch/x86/kernel/fpu/signal.c: __fpu__restore_sig(...): ... struct task_struct *tsk = current; struct fpu *fpu = &tsk->thread.fpu; ... __copy_from_user(&fpu->state.xsave, ..., state_size); fpu__restore_sig(...): ... return __fpu__restore_sig(...); arch/x86/kernel/signal.c: restore_sigcontext(...): ... fpu__restore_sig(...) This introduces arch_thread_struct_whitelist() to let an architecture declare specifically where the whitelist should be within thread_struct. If undefined, the entire thread_struct field is left whitelisted. Cc: Andrew Morton Cc: Nicholas Piggin Cc: Laura Abbott Cc: "Mickaël Salaün" Cc: Ingo Molnar Cc: Thomas Gleixner Cc: Andy Lutomirski Signed-off-by: Kees Cook Acked-by: Rik van Riel --- arch/Kconfig | 11 +++++++++++ include/linux/sched/task.h | 14 ++++++++++++++ kernel/fork.c | 22 ++++++++++++++++++++-- 3 files changed, 45 insertions(+), 2 deletions(-) diff --git a/arch/Kconfig b/arch/Kconfig index 400b9e1b2f27..8911ff37335a 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -242,6 +242,17 @@ config ARCH_INIT_TASK config ARCH_TASK_STRUCT_ALLOCATOR bool +config HAVE_ARCH_THREAD_STRUCT_WHITELIST + bool + depends on !ARCH_TASK_STRUCT_ALLOCATOR + help + An architecture should select this to provide hardened usercopy + knowledge about what region of the thread_struct should be + whitelisted for copying to userspace. Normally this is only the + FPU registers. Specifically, arch_thread_struct_whitelist() + should be implemented. Without this, the entire thread_struct + field in task_struct will be left whitelisted. + # Select if arch has its private alloc_thread_stack() function config ARCH_THREAD_STACK_ALLOCATOR bool diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index 05b8650f06f5..5be31eb7b266 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -104,6 +104,20 @@ extern int arch_task_struct_size __read_mostly; # define arch_task_struct_size (sizeof(struct task_struct)) #endif +#ifndef CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST +/* + * If an architecture has not declared a thread_struct whitelist we + * must assume something there may need to be copied to userspace. + */ +static inline void arch_thread_struct_whitelist(unsigned long *offset, + unsigned long *size) +{ + *offset = 0; + /* Handle dynamically sized thread_struct. */ + *size = arch_task_struct_size - offsetof(struct task_struct, thread); +} +#endif + #ifdef CONFIG_VMAP_STACK static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) { diff --git a/kernel/fork.c b/kernel/fork.c index 0e086af148f2..5977e691c754 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -458,6 +458,21 @@ static void set_max_threads(unsigned int max_threads_suggested) int arch_task_struct_size __read_mostly; #endif +static void task_struct_whitelist(unsigned long *offset, unsigned long *size) +{ + /* Fetch thread_struct whitelist for the architecture. */ + arch_thread_struct_whitelist(offset, size); + + /* + * Handle zero-sized whitelist or empty thread_struct, otherwise + * adjust offset to position of thread_struct in task_struct. + */ + if (unlikely(*size == 0)) + *offset = 0; + else + *offset += offsetof(struct task_struct, thread); +} + void __init fork_init(void) { int i; @@ -466,11 +481,14 @@ void __init fork_init(void) #define ARCH_MIN_TASKALIGN 0 #endif int align = max_t(int, L1_CACHE_BYTES, ARCH_MIN_TASKALIGN); + unsigned long useroffset, usersize; /* create a slab on which task_structs can be allocated */ - task_struct_cachep = kmem_cache_create("task_struct", + task_struct_whitelist(&useroffset, &usersize); + task_struct_cachep = kmem_cache_create_usercopy("task_struct", arch_task_struct_size, align, - SLAB_PANIC|SLAB_ACCOUNT, NULL); + SLAB_PANIC|SLAB_ACCOUNT, + useroffset, usersize, NULL); #endif /* do the arch specific task caches init */ From f7d83c1cf3c77ae45876792aee5285ae970413ac Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 16 Aug 2017 13:26:03 -0700 Subject: [PATCH 32/38] x86: Implement thread_struct whitelist for hardened usercopy This whitelists the FPU register state portion of the thread_struct for copying to userspace, instead of the default entire struct. This is needed because FPU register state is dynamically sized, so it doesn't bypass the hardened usercopy checks. Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: x86@kernel.org Cc: Borislav Petkov Cc: Andy Lutomirski Cc: Mathias Krause Signed-off-by: Kees Cook Acked-by: Rik van Riel --- arch/x86/Kconfig | 1 + arch/x86/include/asm/processor.h | 8 ++++++++ 2 files changed, 9 insertions(+) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 8eed3f94bfc7..9ac4ac1a856b 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -116,6 +116,7 @@ config X86 select HAVE_ARCH_MMAP_RND_COMPAT_BITS if MMU && COMPAT select HAVE_ARCH_COMPAT_MMAP_BASES if MMU && COMPAT select HAVE_ARCH_SECCOMP_FILTER + select HAVE_ARCH_THREAD_STRUCT_WHITELIST select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRANSPARENT_HUGEPAGE select HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD if X86_64 diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index cc16fa882e3e..2b037b7fe0eb 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -492,6 +492,14 @@ struct thread_struct { */ }; +/* Whitelist the FPU state from the task_struct for hardened usercopy. */ +static inline void arch_thread_struct_whitelist(unsigned long *offset, + unsigned long *size) +{ + *offset = offsetof(struct thread_struct, fpu.state); + *size = fpu_kernel_xstate_size; +} + /* * Thread-synchronous status. * From 9e8084d3f761413b2d58b2625bc6e332eab2bfce Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 16 Aug 2017 14:05:09 -0700 Subject: [PATCH 33/38] arm64: Implement thread_struct whitelist for hardened usercopy While ARM64 carries FPU state in the thread structure that is saved and restored during signal handling, it doesn't need to declare a usercopy whitelist, since existing accessors are all either using a bounce buffer (for which whitelisting isn't checking the slab), are statically sized (which will bypass the hardened usercopy check), or both. Cc: Catalin Marinas Cc: Will Deacon Cc: Christian Borntraeger Cc: Ingo Molnar Cc: James Morse Cc: "Peter Zijlstra (Intel)" Cc: Dave Martin Cc: zijun_hu Cc: linux-arm-kernel@lists.infradead.org Signed-off-by: Kees Cook --- arch/arm64/Kconfig | 1 + arch/arm64/include/asm/processor.h | 10 ++++++++++ 2 files changed, 11 insertions(+) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index a93339f5178f..c84477e6a884 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -90,6 +90,7 @@ config ARM64 select HAVE_ARCH_MMAP_RND_BITS select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT select HAVE_ARCH_SECCOMP_FILTER + select HAVE_ARCH_THREAD_STRUCT_WHITELIST select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRANSPARENT_HUGEPAGE select HAVE_ARCH_VMAP_STACK diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 023cacb946c3..ba6c9ce6cc75 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -113,6 +113,16 @@ struct thread_struct { struct debug_info debug; /* debugging */ }; +/* + * Everything usercopied to/from thread_struct is statically-sized, so + * no hardened usercopy whitelist is needed. + */ +static inline void arch_thread_struct_whitelist(unsigned long *offset, + unsigned long *size) +{ + *offset = *size = 0; +} + #ifdef CONFIG_COMPAT #define task_user_tls(t) \ ({ \ From 08626a6056aad824c43d34ce587ab2b01f49d1a4 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 16 Aug 2017 14:09:13 -0700 Subject: [PATCH 34/38] arm: Implement thread_struct whitelist for hardened usercopy While ARM32 carries FPU state in the thread structure that is saved and restored during signal handling, it doesn't need to declare a usercopy whitelist, since existing accessors are all either using a bounce buffer (for which whitelisting isn't checking the slab), are statically sized (which will bypass the hardened usercopy check), or both. Cc: Russell King Cc: Ingo Molnar Cc: Christian Borntraeger Cc: "Peter Zijlstra (Intel)" Cc: linux-arm-kernel@lists.infradead.org Signed-off-by: Kees Cook --- arch/arm/Kconfig | 1 + arch/arm/include/asm/processor.h | 10 ++++++++++ 2 files changed, 11 insertions(+) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 51c8df561077..3ea00d65f35d 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -50,6 +50,7 @@ config ARM select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 && MMU select HAVE_ARCH_MMAP_RND_BITS if MMU select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT) + select HAVE_ARCH_THREAD_STRUCT_WHITELIST select HAVE_ARCH_TRACEHOOK select HAVE_ARM_SMCCC if CPU_V7 select HAVE_EBPF_JIT if !CPU_ENDIAN_BE32 diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h index 338cbe0a18ef..1bf65b47808a 100644 --- a/arch/arm/include/asm/processor.h +++ b/arch/arm/include/asm/processor.h @@ -45,6 +45,16 @@ struct thread_struct { struct debug_info debug; }; +/* + * Everything usercopied to/from thread_struct is statically-sized, so + * no hardened usercopy whitelist is needed. + */ +static inline void arch_thread_struct_whitelist(unsigned long *offset, + unsigned long *size) +{ + *offset = *size = 0; +} + #define INIT_THREAD { } #define start_thread(regs,pc,sp) \ From 46515736f8687c5dbde5637ca2f2678055c9c0f7 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Thu, 26 Oct 2017 15:45:46 +0200 Subject: [PATCH 35/38] kvm: whitelist struct kvm_vcpu_arch MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On x86, ARM and s390, struct kvm_vcpu_arch has a usercopy region that is read and written by the KVM_GET/SET_CPUID2 ioctls (x86) or KVM_GET/SET_ONE_REG (ARM/s390). Without whitelisting the area, KVM is completely broken on those architectures with usercopy hardening enabled. For now, allow writing to the entire struct on all architectures. The KVM tree will not refine this to an architecture-specific subset of struct kvm_vcpu_arch. Cc: kernel-hardening@lists.openwall.com Cc: Kees Cook Cc: Christian Borntraeger Cc: Christoffer Dall Cc: Radim Krčmář Signed-off-by: Paolo Bonzini Acked-by: Christoffer Dall Acked-by: Marc Zyngier Acked-by: Christian Borntraeger Signed-off-by: Kees Cook --- virt/kvm/kvm_main.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index c422c10cd1dd..96689967f5c3 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -4029,8 +4029,12 @@ int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, /* A kmem cache lets us meet the alignment requirements of fx_save. */ if (!vcpu_align) vcpu_align = __alignof__(struct kvm_vcpu); - kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size, vcpu_align, - SLAB_ACCOUNT, NULL); + kvm_vcpu_cache = + kmem_cache_create_usercopy("kvm_vcpu", vcpu_size, vcpu_align, + SLAB_ACCOUNT, + offsetof(struct kvm_vcpu, arch), + sizeof_field(struct kvm_vcpu, arch), + NULL); if (!kvm_vcpu_cache) { r = -ENOMEM; goto out_free_3; From 51776043afa415435c7e4636204fbe4f7edc4501 Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Thu, 26 Oct 2017 15:45:47 +0200 Subject: [PATCH 36/38] kvm: x86: fix KVM_XEN_HVM_CONFIG ioctl MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This ioctl is obsolete (it was used by Xenner as far as I know) but still let's not break it gratuitously... Its handler is copying directly into struct kvm. Go through a bounce buffer instead, with the added benefit that we can actually do something useful with the flags argument---the previous code was exiting with -EINVAL but still doing the copy. This technically is a userspace ABI breakage, but since no one should be using the ioctl, it's a good occasion to see if someone actually complains. Cc: kernel-hardening@lists.openwall.com Cc: Kees Cook Cc: Radim Krčmář Signed-off-by: Paolo Bonzini Signed-off-by: Kees Cook --- arch/x86/kvm/x86.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index eee8e7faf1af..6c16461e3a86 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4238,13 +4238,14 @@ set_identity_unlock: mutex_unlock(&kvm->lock); break; case KVM_XEN_HVM_CONFIG: { + struct kvm_xen_hvm_config xhc; r = -EFAULT; - if (copy_from_user(&kvm->arch.xen_hvm_config, argp, - sizeof(struct kvm_xen_hvm_config))) + if (copy_from_user(&xhc, argp, sizeof(xhc))) goto out; r = -EINVAL; - if (kvm->arch.xen_hvm_config.flags) + if (xhc.flags) goto out; + memcpy(&kvm->arch.xen_hvm_config, &xhc, sizeof(xhc)); r = 0; break; } From 6d07d1cd300f4c7e16005f881fea388164999cc8 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 14 Jun 2017 16:12:04 -0700 Subject: [PATCH 37/38] usercopy: Restrict non-usercopy caches to size 0 With all known usercopied cache whitelists now defined in the kernel, switch the default usercopy region of kmem_cache_create() to size 0. Any new caches with usercopy regions will now need to use kmem_cache_create_usercopy() instead of kmem_cache_create(). This patch is modified from Brad Spengler/PaX Team's PAX_USERCOPY whitelisting code in the last public patch of grsecurity/PaX based on my understanding of the code. Changes or omissions from the original code are mine and don't reflect the original grsecurity/PaX code. Cc: David Windsor Cc: Christoph Lameter Cc: Pekka Enberg Cc: David Rientjes Cc: Joonsoo Kim Cc: Andrew Morton Cc: linux-mm@kvack.org Signed-off-by: Kees Cook --- mm/slab_common.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/slab_common.c b/mm/slab_common.c index 8ac2a6320a6c..d00cd3f0f8ac 100644 --- a/mm/slab_common.c +++ b/mm/slab_common.c @@ -532,7 +532,7 @@ struct kmem_cache * kmem_cache_create(const char *name, size_t size, size_t align, slab_flags_t flags, void (*ctor)(void *)) { - return kmem_cache_create_usercopy(name, size, align, flags, 0, size, + return kmem_cache_create_usercopy(name, size, align, flags, 0, 0, ctor); } EXPORT_SYMBOL(kmem_cache_create); From e47e311843dece8073146f3606871280ee9beb87 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 14 Jun 2017 18:56:55 -0700 Subject: [PATCH 38/38] lkdtm: Update usercopy tests for whitelisting This updates the USERCOPY_HEAP_FLAG_* tests to USERCOPY_HEAP_WHITELIST_*, since the final form of usercopy whitelisting ended up using an offset/size window instead of the earlier proposed allocation flags. Signed-off-by: Kees Cook --- drivers/misc/lkdtm.h | 4 +- drivers/misc/lkdtm_core.c | 4 +- drivers/misc/lkdtm_usercopy.c | 88 +++++++++++++++++++---------------- 3 files changed, 53 insertions(+), 43 deletions(-) diff --git a/drivers/misc/lkdtm.h b/drivers/misc/lkdtm.h index 687a0dbbe199..9e513dcfd809 100644 --- a/drivers/misc/lkdtm.h +++ b/drivers/misc/lkdtm.h @@ -76,8 +76,8 @@ void __init lkdtm_usercopy_init(void); void __exit lkdtm_usercopy_exit(void); void lkdtm_USERCOPY_HEAP_SIZE_TO(void); void lkdtm_USERCOPY_HEAP_SIZE_FROM(void); -void lkdtm_USERCOPY_HEAP_FLAG_TO(void); -void lkdtm_USERCOPY_HEAP_FLAG_FROM(void); +void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void); +void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void); void lkdtm_USERCOPY_STACK_FRAME_TO(void); void lkdtm_USERCOPY_STACK_FRAME_FROM(void); void lkdtm_USERCOPY_STACK_BEYOND(void); diff --git a/drivers/misc/lkdtm_core.c b/drivers/misc/lkdtm_core.c index ba92291508dc..e3f2bac4b245 100644 --- a/drivers/misc/lkdtm_core.c +++ b/drivers/misc/lkdtm_core.c @@ -177,8 +177,8 @@ static const struct crashtype crashtypes[] = { CRASHTYPE(ATOMIC_TIMING), CRASHTYPE(USERCOPY_HEAP_SIZE_TO), CRASHTYPE(USERCOPY_HEAP_SIZE_FROM), - CRASHTYPE(USERCOPY_HEAP_FLAG_TO), - CRASHTYPE(USERCOPY_HEAP_FLAG_FROM), + CRASHTYPE(USERCOPY_HEAP_WHITELIST_TO), + CRASHTYPE(USERCOPY_HEAP_WHITELIST_FROM), CRASHTYPE(USERCOPY_STACK_FRAME_TO), CRASHTYPE(USERCOPY_STACK_FRAME_FROM), CRASHTYPE(USERCOPY_STACK_BEYOND), diff --git a/drivers/misc/lkdtm_usercopy.c b/drivers/misc/lkdtm_usercopy.c index 9ebbb031e5e3..9725aed305bb 100644 --- a/drivers/misc/lkdtm_usercopy.c +++ b/drivers/misc/lkdtm_usercopy.c @@ -20,7 +20,7 @@ */ static volatile size_t unconst = 0; static volatile size_t cache_size = 1024; -static struct kmem_cache *bad_cache; +static struct kmem_cache *whitelist_cache; static const unsigned char test_text[] = "This is a test.\n"; @@ -115,6 +115,10 @@ free_user: vm_munmap(user_addr, PAGE_SIZE); } +/* + * This checks for whole-object size validation with hardened usercopy, + * with or without usercopy whitelisting. + */ static void do_usercopy_heap_size(bool to_user) { unsigned long user_addr; @@ -177,77 +181,79 @@ free_kernel: kfree(two); } -static void do_usercopy_heap_flag(bool to_user) +/* + * This checks for the specific whitelist window within an object. If this + * test passes, then do_usercopy_heap_size() tests will pass too. + */ +static void do_usercopy_heap_whitelist(bool to_user) { - unsigned long user_addr; - unsigned char *good_buf = NULL; - unsigned char *bad_buf = NULL; + unsigned long user_alloc; + unsigned char *buf = NULL; + unsigned char __user *user_addr; + size_t offset, size; /* Make sure cache was prepared. */ - if (!bad_cache) { + if (!whitelist_cache) { pr_warn("Failed to allocate kernel cache\n"); return; } /* - * Allocate one buffer from each cache (kmalloc will have the - * SLAB_USERCOPY flag already, but "bad_cache" won't). + * Allocate a buffer with a whitelisted window in the buffer. */ - good_buf = kmalloc(cache_size, GFP_KERNEL); - bad_buf = kmem_cache_alloc(bad_cache, GFP_KERNEL); - if (!good_buf || !bad_buf) { - pr_warn("Failed to allocate buffers from caches\n"); + buf = kmem_cache_alloc(whitelist_cache, GFP_KERNEL); + if (!buf) { + pr_warn("Failed to allocate buffer from whitelist cache\n"); goto free_alloc; } /* Allocate user memory we'll poke at. */ - user_addr = vm_mmap(NULL, 0, PAGE_SIZE, + user_alloc = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE, 0); - if (user_addr >= TASK_SIZE) { + if (user_alloc >= TASK_SIZE) { pr_warn("Failed to allocate user memory\n"); goto free_alloc; } + user_addr = (void __user *)user_alloc; - memset(good_buf, 'A', cache_size); - memset(bad_buf, 'B', cache_size); + memset(buf, 'B', cache_size); + + /* Whitelisted window in buffer, from kmem_cache_create_usercopy. */ + offset = (cache_size / 4) + unconst; + size = (cache_size / 16) + unconst; if (to_user) { - pr_info("attempting good copy_to_user with SLAB_USERCOPY\n"); - if (copy_to_user((void __user *)user_addr, good_buf, - cache_size)) { + pr_info("attempting good copy_to_user inside whitelist\n"); + if (copy_to_user(user_addr, buf + offset, size)) { pr_warn("copy_to_user failed unexpectedly?!\n"); goto free_user; } - pr_info("attempting bad copy_to_user w/o SLAB_USERCOPY\n"); - if (copy_to_user((void __user *)user_addr, bad_buf, - cache_size)) { + pr_info("attempting bad copy_to_user outside whitelist\n"); + if (copy_to_user(user_addr, buf + offset - 1, size)) { pr_warn("copy_to_user failed, but lacked Oops\n"); goto free_user; } } else { - pr_info("attempting good copy_from_user with SLAB_USERCOPY\n"); - if (copy_from_user(good_buf, (void __user *)user_addr, - cache_size)) { + pr_info("attempting good copy_from_user inside whitelist\n"); + if (copy_from_user(buf + offset, user_addr, size)) { pr_warn("copy_from_user failed unexpectedly?!\n"); goto free_user; } - pr_info("attempting bad copy_from_user w/o SLAB_USERCOPY\n"); - if (copy_from_user(bad_buf, (void __user *)user_addr, - cache_size)) { + pr_info("attempting bad copy_from_user outside whitelist\n"); + if (copy_from_user(buf + offset - 1, user_addr, size)) { pr_warn("copy_from_user failed, but lacked Oops\n"); goto free_user; } } free_user: - vm_munmap(user_addr, PAGE_SIZE); + vm_munmap(user_alloc, PAGE_SIZE); free_alloc: - if (bad_buf) - kmem_cache_free(bad_cache, bad_buf); - kfree(good_buf); + if (buf) + kmem_cache_free(whitelist_cache, buf); } /* Callable tests. */ @@ -261,14 +267,14 @@ void lkdtm_USERCOPY_HEAP_SIZE_FROM(void) do_usercopy_heap_size(false); } -void lkdtm_USERCOPY_HEAP_FLAG_TO(void) +void lkdtm_USERCOPY_HEAP_WHITELIST_TO(void) { - do_usercopy_heap_flag(true); + do_usercopy_heap_whitelist(true); } -void lkdtm_USERCOPY_HEAP_FLAG_FROM(void) +void lkdtm_USERCOPY_HEAP_WHITELIST_FROM(void) { - do_usercopy_heap_flag(false); + do_usercopy_heap_whitelist(false); } void lkdtm_USERCOPY_STACK_FRAME_TO(void) @@ -319,11 +325,15 @@ free_user: void __init lkdtm_usercopy_init(void) { /* Prepare cache that lacks SLAB_USERCOPY flag. */ - bad_cache = kmem_cache_create("lkdtm-no-usercopy", cache_size, 0, - 0, NULL); + whitelist_cache = + kmem_cache_create_usercopy("lkdtm-usercopy", cache_size, + 0, 0, + cache_size / 4, + cache_size / 16, + NULL); } void __exit lkdtm_usercopy_exit(void) { - kmem_cache_destroy(bad_cache); + kmem_cache_destroy(whitelist_cache); }