diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 04ca65912638..e7a5f1d1c314 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c @@ -41,7 +41,7 @@ static DEFINE_IDR(zram_index_idr); static DEFINE_MUTEX(zram_index_mutex); static int zram_major; -static const char *default_compressor = "lzo"; +static const char *default_compressor = "lzo-rle"; /* Module params (documentation at end) */ static unsigned int num_devices = 1; diff --git a/include/linux/swap.h b/include/linux/swap.h index fc50e21b3b88..4bfb5c4ac108 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -157,9 +157,9 @@ struct swap_extent { /* * Max bad pages in the new format.. */ -#define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x) #define MAX_SWAP_BADPAGES \ - ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int)) + ((offsetof(union swap_header, magic.magic) - \ + offsetof(union swap_header, info.badpages)) / sizeof(int)) enum { SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ diff --git a/mm/filemap.c b/mm/filemap.c index a3b4021c448f..ec6566ffbd90 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2420,20 +2420,20 @@ static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask) * Synchronous readahead happens when we don't even find * a page in the page cache at all. */ -static void do_sync_mmap_readahead(struct vm_area_struct *vma, - struct file_ra_state *ra, - struct file *file, - pgoff_t offset) +static void do_sync_mmap_readahead(struct vm_fault *vmf) { + struct file *file = vmf->vma->vm_file; + struct file_ra_state *ra = &file->f_ra; struct address_space *mapping = file->f_mapping; + pgoff_t offset = vmf->pgoff; /* If we don't want any read-ahead, don't bother */ - if (vma->vm_flags & VM_RAND_READ) + if (vmf->vma->vm_flags & VM_RAND_READ) return; if (!ra->ra_pages) return; - if (vma->vm_flags & VM_SEQ_READ) { + if (vmf->vma->vm_flags & VM_SEQ_READ) { page_cache_sync_readahead(mapping, ra, file, offset, ra->ra_pages); return; @@ -2463,16 +2463,16 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma, * Asynchronous readahead happens when we find the page and PG_readahead, * so we want to possibly extend the readahead further.. */ -static void do_async_mmap_readahead(struct vm_area_struct *vma, - struct file_ra_state *ra, - struct file *file, - struct page *page, - pgoff_t offset) +static void do_async_mmap_readahead(struct vm_fault *vmf, + struct page *page) { + struct file *file = vmf->vma->vm_file; + struct file_ra_state *ra = &file->f_ra; struct address_space *mapping = file->f_mapping; + pgoff_t offset = vmf->pgoff; /* If we don't want any read-ahead, don't bother */ - if (vma->vm_flags & VM_RAND_READ) + if (vmf->vma->vm_flags & VM_RAND_READ) return; if (ra->mmap_miss > 0) ra->mmap_miss--; @@ -2531,10 +2531,10 @@ vm_fault_t filemap_fault(struct vm_fault *vmf) * We found the page, so try async readahead before * waiting for the lock. */ - do_async_mmap_readahead(vmf->vma, ra, file, page, offset); + do_async_mmap_readahead(vmf, page); } else if (!page) { /* No page in the page cache at all */ - do_sync_mmap_readahead(vmf->vma, ra, file, offset); + do_sync_mmap_readahead(vmf); count_vm_event(PGMAJFAULT); count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); ret = VM_FAULT_MAJOR; diff --git a/tools/testing/selftests/proc/proc-pid-vm.c b/tools/testing/selftests/proc/proc-pid-vm.c index bbe8150d18aa..7202bbac976e 100644 --- a/tools/testing/selftests/proc/proc-pid-vm.c +++ b/tools/testing/selftests/proc/proc-pid-vm.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include @@ -36,11 +37,14 @@ #include #include #include +#include #include #include #include #include #include +#include +#include static inline long sys_execveat(int dirfd, const char *pathname, char **argv, char **envp, int flags) { @@ -205,12 +209,44 @@ static int make_exe(const uint8_t *payload, size_t len) } #endif +static bool g_vsyscall = false; + +static const char str_vsyscall[] = +"ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall]\n"; + #ifdef __x86_64__ +/* + * vsyscall page can't be unmapped, probe it with memory load. + */ +static void vsyscall(void) +{ + pid_t pid; + int wstatus; + + pid = fork(); + if (pid < 0) { + fprintf(stderr, "fork, errno %d\n", errno); + exit(1); + } + if (pid == 0) { + struct rlimit rlim = {0, 0}; + (void)setrlimit(RLIMIT_CORE, &rlim); + *(volatile int *)0xffffffffff600000UL; + exit(0); + } + wait(&wstatus); + if (WIFEXITED(wstatus)) { + g_vsyscall = true; + } +} + int main(void) { int pipefd[2]; int exec_fd; + vsyscall(); + atexit(ate); make_private_tmp(); @@ -261,9 +297,9 @@ int main(void) snprintf(buf0 + MAPS_OFFSET, sizeof(buf0) - MAPS_OFFSET, "/tmp/#%llu (deleted)\n", (unsigned long long)st.st_ino); - /* Test /proc/$PID/maps */ { + const size_t len = strlen(buf0) + (g_vsyscall ? strlen(str_vsyscall) : 0); char buf[256]; ssize_t rv; int fd; @@ -274,13 +310,16 @@ int main(void) return 1; } rv = read(fd, buf, sizeof(buf)); - assert(rv == strlen(buf0)); + assert(rv == len); assert(memcmp(buf, buf0, strlen(buf0)) == 0); + if (g_vsyscall) { + assert(memcmp(buf + strlen(buf0), str_vsyscall, strlen(str_vsyscall)) == 0); + } } /* Test /proc/$PID/smaps */ { - char buf[1024]; + char buf[4096]; ssize_t rv; int fd; @@ -319,6 +358,10 @@ int main(void) for (i = 0; i < sizeof(S)/sizeof(S[0]); i++) { assert(memmem(buf, rv, S[i], strlen(S[i]))); } + + if (g_vsyscall) { + assert(memmem(buf, rv, str_vsyscall, strlen(str_vsyscall))); + } } /* Test /proc/$PID/smaps_rollup */