tile: Use the more common pr_warn instead of pr_warning
And other message logging neatening. Other miscellanea: o coalesce formats o realign arguments o standardize a couple of macros o use __func__ instead of embedding the function name Signed-off-by: Joe Perches <joe@perches.com> Signed-off-by: Chris Metcalf <cmetcalf@tilera.com>
This commit is contained in:
parent
ebd25caf7d
commit
f47436734d
|
@ -392,8 +392,7 @@ extern void ioport_unmap(void __iomem *addr);
|
|||
static inline long ioport_panic(void)
|
||||
{
|
||||
#ifdef __tilegx__
|
||||
panic("PCI IO space support is disabled. Configure the kernel with"
|
||||
" CONFIG_TILE_PCI_IO to enable it");
|
||||
panic("PCI IO space support is disabled. Configure the kernel with CONFIG_TILE_PCI_IO to enable it");
|
||||
#else
|
||||
panic("inb/outb and friends do not exist on tile");
|
||||
#endif
|
||||
|
@ -402,7 +401,7 @@ static inline long ioport_panic(void)
|
|||
|
||||
static inline void __iomem *ioport_map(unsigned long port, unsigned int len)
|
||||
{
|
||||
pr_info("ioport_map: mapping IO resources is unsupported on tile.\n");
|
||||
pr_info("ioport_map: mapping IO resources is unsupported on tile\n");
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
|
|
@ -235,9 +235,9 @@ static inline void __pte_clear(pte_t *ptep)
|
|||
#define pte_donemigrate(x) hv_pte_set_present(hv_pte_clear_migrating(x))
|
||||
|
||||
#define pte_ERROR(e) \
|
||||
pr_err("%s:%d: bad pte 0x%016llx.\n", __FILE__, __LINE__, pte_val(e))
|
||||
pr_err("%s:%d: bad pte 0x%016llx\n", __FILE__, __LINE__, pte_val(e))
|
||||
#define pgd_ERROR(e) \
|
||||
pr_err("%s:%d: bad pgd 0x%016llx.\n", __FILE__, __LINE__, pgd_val(e))
|
||||
pr_err("%s:%d: bad pgd 0x%016llx\n", __FILE__, __LINE__, pgd_val(e))
|
||||
|
||||
/* Return PA and protection info for a given kernel VA. */
|
||||
int va_to_cpa_and_pte(void *va, phys_addr_t *cpa, pte_t *pte);
|
||||
|
|
|
@ -86,7 +86,7 @@ static inline int pud_huge_page(pud_t pud)
|
|||
}
|
||||
|
||||
#define pmd_ERROR(e) \
|
||||
pr_err("%s:%d: bad pmd 0x%016llx.\n", __FILE__, __LINE__, pmd_val(e))
|
||||
pr_err("%s:%d: bad pmd 0x%016llx\n", __FILE__, __LINE__, pmd_val(e))
|
||||
|
||||
static inline void pud_clear(pud_t *pudp)
|
||||
{
|
||||
|
|
|
@ -365,8 +365,7 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
|
|||
* to quiesce.
|
||||
*/
|
||||
if (rect->teardown_in_progress) {
|
||||
pr_notice("cpu %d: detected %s hardwall violation %#lx"
|
||||
" while teardown already in progress\n",
|
||||
pr_notice("cpu %d: detected %s hardwall violation %#lx while teardown already in progress\n",
|
||||
cpu, hwt->name,
|
||||
(long)mfspr_XDN(hwt, DIRECTION_PROTECT));
|
||||
goto done;
|
||||
|
@ -630,8 +629,7 @@ static void _hardwall_deactivate(struct hardwall_type *hwt,
|
|||
struct thread_struct *ts = &task->thread;
|
||||
|
||||
if (cpumask_weight(&task->cpus_allowed) != 1) {
|
||||
pr_err("pid %d (%s) releasing %s hardwall with"
|
||||
" an affinity mask containing %d cpus!\n",
|
||||
pr_err("pid %d (%s) releasing %s hardwall with an affinity mask containing %d cpus!\n",
|
||||
task->pid, task->comm, hwt->name,
|
||||
cpumask_weight(&task->cpus_allowed));
|
||||
BUG();
|
||||
|
|
|
@ -107,9 +107,8 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
|
|||
{
|
||||
long sp = stack_pointer - (long) current_thread_info();
|
||||
if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
|
||||
pr_emerg("tile_dev_intr: "
|
||||
"stack overflow: %ld\n",
|
||||
sp - sizeof(struct thread_info));
|
||||
pr_emerg("%s: stack overflow: %ld\n",
|
||||
__func__, sp - sizeof(struct thread_info));
|
||||
dump_stack();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -90,8 +90,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
|
|||
return -EINVAL;
|
||||
|
||||
if (insn_has_control(*p->addr)) {
|
||||
pr_notice("Kprobes for control instructions are not "
|
||||
"supported\n");
|
||||
pr_notice("Kprobes for control instructions are not supported\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
|
|
|
@ -77,16 +77,13 @@ void machine_crash_shutdown(struct pt_regs *regs)
|
|||
int machine_kexec_prepare(struct kimage *image)
|
||||
{
|
||||
if (num_online_cpus() > 1) {
|
||||
pr_warning("%s: detected attempt to kexec "
|
||||
"with num_online_cpus() > 1\n",
|
||||
pr_warn("%s: detected attempt to kexec with num_online_cpus() > 1\n",
|
||||
__func__);
|
||||
return -ENOSYS;
|
||||
}
|
||||
if (image->type != KEXEC_TYPE_DEFAULT) {
|
||||
pr_warning("%s: detected attempt to kexec "
|
||||
"with unsupported type: %d\n",
|
||||
__func__,
|
||||
image->type);
|
||||
pr_warn("%s: detected attempt to kexec with unsupported type: %d\n",
|
||||
__func__, image->type);
|
||||
return -ENOSYS;
|
||||
}
|
||||
return 0;
|
||||
|
@ -131,7 +128,7 @@ static unsigned char *kexec_bn2cl(void *pg)
|
|||
*/
|
||||
csum = ip_compute_csum(pg, bhdrp->b_size);
|
||||
if (csum != 0) {
|
||||
pr_warning("%s: bad checksum %#x (size %d)\n",
|
||||
pr_warn("%s: bad checksum %#x (size %d)\n",
|
||||
__func__, csum, bhdrp->b_size);
|
||||
return 0;
|
||||
}
|
||||
|
@ -160,8 +157,7 @@ static unsigned char *kexec_bn2cl(void *pg)
|
|||
while (*desc != '\0') {
|
||||
desc++;
|
||||
if (((unsigned long)desc & PAGE_MASK) != (unsigned long)pg) {
|
||||
pr_info("%s: ran off end of page\n",
|
||||
__func__);
|
||||
pr_info("%s: ran off end of page\n", __func__);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
@ -195,19 +191,17 @@ static void kexec_find_and_set_command_line(struct kimage *image)
|
|||
}
|
||||
|
||||
if (command_line != 0) {
|
||||
pr_info("setting new command line to \"%s\"\n",
|
||||
command_line);
|
||||
pr_info("setting new command line to \"%s\"\n", command_line);
|
||||
|
||||
hverr = hv_set_command_line(
|
||||
(HV_VirtAddr) command_line, strlen(command_line));
|
||||
kunmap_atomic(command_line);
|
||||
} else {
|
||||
pr_info("%s: no command line found; making empty\n",
|
||||
__func__);
|
||||
pr_info("%s: no command line found; making empty\n", __func__);
|
||||
hverr = hv_set_command_line((HV_VirtAddr) command_line, 0);
|
||||
}
|
||||
if (hverr)
|
||||
pr_warning("%s: hv_set_command_line returned error: %d\n",
|
||||
pr_warn("%s: hv_set_command_line returned error: %d\n",
|
||||
__func__, hverr);
|
||||
}
|
||||
|
||||
|
|
|
@ -59,9 +59,8 @@ void hv_message_intr(struct pt_regs *regs, int intnum)
|
|||
{
|
||||
long sp = stack_pointer - (long) current_thread_info();
|
||||
if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
|
||||
pr_emerg("hv_message_intr: "
|
||||
"stack overflow: %ld\n",
|
||||
sp - sizeof(struct thread_info));
|
||||
pr_emerg("%s: stack overflow: %ld\n",
|
||||
__func__, sp - sizeof(struct thread_info));
|
||||
dump_stack();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -96,7 +96,7 @@ void module_free(struct module *mod, void *module_region)
|
|||
static int validate_hw2_last(long value, struct module *me)
|
||||
{
|
||||
if (((value << 16) >> 16) != value) {
|
||||
pr_warning("module %s: Out of range HW2_LAST value %#lx\n",
|
||||
pr_warn("module %s: Out of range HW2_LAST value %#lx\n",
|
||||
me->name, value);
|
||||
return 0;
|
||||
}
|
||||
|
@ -210,8 +210,8 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
|
|||
value -= (unsigned long) location; /* pc-relative */
|
||||
value = (long) value >> 3; /* count by instrs */
|
||||
if (!validate_jumpoff(value)) {
|
||||
pr_warning("module %s: Out of range jump to"
|
||||
" %#llx at %#llx (%p)\n", me->name,
|
||||
pr_warn("module %s: Out of range jump to %#llx at %#llx (%p)\n",
|
||||
me->name,
|
||||
sym->st_value + rel[i].r_addend,
|
||||
rel[i].r_offset, location);
|
||||
return -ENOEXEC;
|
||||
|
|
|
@ -178,8 +178,8 @@ int __init tile_pci_init(void)
|
|||
continue;
|
||||
hv_cfg_fd1 = tile_pcie_open(i, 1);
|
||||
if (hv_cfg_fd1 < 0) {
|
||||
pr_err("PCI: Couldn't open config fd to HV "
|
||||
"for controller %d\n", i);
|
||||
pr_err("PCI: Couldn't open config fd to HV for controller %d\n",
|
||||
i);
|
||||
goto err_cont;
|
||||
}
|
||||
|
||||
|
@ -423,8 +423,7 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
|
|||
for (i = 0; i < 6; i++) {
|
||||
r = &dev->resource[i];
|
||||
if (r->flags & IORESOURCE_UNSET) {
|
||||
pr_err("PCI: Device %s not available "
|
||||
"because of resource collisions\n",
|
||||
pr_err("PCI: Device %s not available because of resource collisions\n",
|
||||
pci_name(dev));
|
||||
return -EINVAL;
|
||||
}
|
||||
|
|
|
@ -131,8 +131,7 @@ static int tile_irq_cpu(int irq)
|
|||
|
||||
count = cpumask_weight(&intr_cpus_map);
|
||||
if (unlikely(count == 0)) {
|
||||
pr_warning("intr_cpus_map empty, interrupts will be"
|
||||
" delievered to dataplane tiles\n");
|
||||
pr_warn("intr_cpus_map empty, interrupts will be delievered to dataplane tiles\n");
|
||||
return irq % (smp_height * smp_width);
|
||||
}
|
||||
|
||||
|
@ -197,16 +196,16 @@ static int tile_pcie_open(int trio_index)
|
|||
/* Get the properties of the PCIe ports on this TRIO instance. */
|
||||
ret = gxio_trio_get_port_property(context, &pcie_ports[trio_index]);
|
||||
if (ret < 0) {
|
||||
pr_err("PCI: PCIE_GET_PORT_PROPERTY failure, error %d,"
|
||||
" on TRIO %d\n", ret, trio_index);
|
||||
pr_err("PCI: PCIE_GET_PORT_PROPERTY failure, error %d, on TRIO %d\n",
|
||||
ret, trio_index);
|
||||
goto get_port_property_failure;
|
||||
}
|
||||
|
||||
context->mmio_base_mac =
|
||||
iorpc_ioremap(context->fd, 0, HV_TRIO_CONFIG_IOREMAP_SIZE);
|
||||
if (context->mmio_base_mac == NULL) {
|
||||
pr_err("PCI: TRIO config space mapping failure, error %d,"
|
||||
" on TRIO %d\n", ret, trio_index);
|
||||
pr_err("PCI: TRIO config space mapping failure, error %d, on TRIO %d\n",
|
||||
ret, trio_index);
|
||||
ret = -ENOMEM;
|
||||
|
||||
goto trio_mmio_mapping_failure;
|
||||
|
@ -622,8 +621,7 @@ static void fixup_read_and_payload_sizes(struct pci_controller *controller)
|
|||
dev_control.max_read_req_sz,
|
||||
mac);
|
||||
if (err < 0) {
|
||||
pr_err("PCI: PCIE_CONFIGURE_MAC_MPS_MRS failure, "
|
||||
"MAC %d on TRIO %d\n",
|
||||
pr_err("PCI: PCIE_CONFIGURE_MAC_MPS_MRS failure, MAC %d on TRIO %d\n",
|
||||
mac, controller->trio_index);
|
||||
}
|
||||
}
|
||||
|
@ -720,27 +718,24 @@ int __init pcibios_init(void)
|
|||
reg_offset);
|
||||
if (!port_status.dl_up) {
|
||||
if (rc_delay[trio_index][mac]) {
|
||||
pr_info("Delaying PCIe RC TRIO init %d sec"
|
||||
" on MAC %d on TRIO %d\n",
|
||||
pr_info("Delaying PCIe RC TRIO init %d sec on MAC %d on TRIO %d\n",
|
||||
rc_delay[trio_index][mac], mac,
|
||||
trio_index);
|
||||
msleep(rc_delay[trio_index][mac] * 1000);
|
||||
}
|
||||
ret = gxio_trio_force_rc_link_up(trio_context, mac);
|
||||
if (ret < 0)
|
||||
pr_err("PCI: PCIE_FORCE_LINK_UP failure, "
|
||||
"MAC %d on TRIO %d\n", mac, trio_index);
|
||||
pr_err("PCI: PCIE_FORCE_LINK_UP failure, MAC %d on TRIO %d\n",
|
||||
mac, trio_index);
|
||||
}
|
||||
|
||||
pr_info("PCI: Found PCI controller #%d on TRIO %d MAC %d\n", i,
|
||||
trio_index, controller->mac);
|
||||
pr_info("PCI: Found PCI controller #%d on TRIO %d MAC %d\n",
|
||||
i, trio_index, controller->mac);
|
||||
|
||||
/* Delay the bus probe if needed. */
|
||||
if (rc_delay[trio_index][mac]) {
|
||||
pr_info("Delaying PCIe RC bus enumerating %d sec"
|
||||
" on MAC %d on TRIO %d\n",
|
||||
rc_delay[trio_index][mac], mac,
|
||||
trio_index);
|
||||
pr_info("Delaying PCIe RC bus enumerating %d sec on MAC %d on TRIO %d\n",
|
||||
rc_delay[trio_index][mac], mac, trio_index);
|
||||
msleep(rc_delay[trio_index][mac] * 1000);
|
||||
} else {
|
||||
/*
|
||||
|
@ -758,8 +753,7 @@ int __init pcibios_init(void)
|
|||
if (pcie_ports[trio_index].ports[mac].removable) {
|
||||
pr_info("PCI: link is down, MAC %d on TRIO %d\n",
|
||||
mac, trio_index);
|
||||
pr_info("This is expected if no PCIe card"
|
||||
" is connected to this link\n");
|
||||
pr_info("This is expected if no PCIe card is connected to this link\n");
|
||||
} else
|
||||
pr_err("PCI: link is down, MAC %d on TRIO %d\n",
|
||||
mac, trio_index);
|
||||
|
@ -829,8 +823,8 @@ int __init pcibios_init(void)
|
|||
/* Alloc a PIO region for PCI config access per MAC. */
|
||||
ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
|
||||
if (ret < 0) {
|
||||
pr_err("PCI: PCI CFG PIO alloc failure for mac %d "
|
||||
"on TRIO %d, give up\n", mac, trio_index);
|
||||
pr_err("PCI: PCI CFG PIO alloc failure for mac %d on TRIO %d, give up\n",
|
||||
mac, trio_index);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
@ -842,8 +836,8 @@ int __init pcibios_init(void)
|
|||
trio_context->pio_cfg_index[mac],
|
||||
mac, 0, HV_TRIO_PIO_FLAG_CONFIG_SPACE);
|
||||
if (ret < 0) {
|
||||
pr_err("PCI: PCI CFG PIO init failure for mac %d "
|
||||
"on TRIO %d, give up\n", mac, trio_index);
|
||||
pr_err("PCI: PCI CFG PIO init failure for mac %d on TRIO %d, give up\n",
|
||||
mac, trio_index);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
@ -925,9 +919,8 @@ int __init pcibios_init(void)
|
|||
/* Alloc a PIO region for PCI memory access for each RC port. */
|
||||
ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
|
||||
if (ret < 0) {
|
||||
pr_err("PCI: MEM PIO alloc failure on TRIO %d mac %d, "
|
||||
"give up\n", controller->trio_index,
|
||||
controller->mac);
|
||||
pr_err("PCI: MEM PIO alloc failure on TRIO %d mac %d, give up\n",
|
||||
controller->trio_index, controller->mac);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
@ -944,9 +937,8 @@ int __init pcibios_init(void)
|
|||
0,
|
||||
0);
|
||||
if (ret < 0) {
|
||||
pr_err("PCI: MEM PIO init failure on TRIO %d mac %d, "
|
||||
"give up\n", controller->trio_index,
|
||||
controller->mac);
|
||||
pr_err("PCI: MEM PIO init failure on TRIO %d mac %d, give up\n",
|
||||
controller->trio_index, controller->mac);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
@ -957,9 +949,8 @@ int __init pcibios_init(void)
|
|||
*/
|
||||
ret = gxio_trio_alloc_pio_regions(trio_context, 1, 0, 0);
|
||||
if (ret < 0) {
|
||||
pr_err("PCI: I/O PIO alloc failure on TRIO %d mac %d, "
|
||||
"give up\n", controller->trio_index,
|
||||
controller->mac);
|
||||
pr_err("PCI: I/O PIO alloc failure on TRIO %d mac %d, give up\n",
|
||||
controller->trio_index, controller->mac);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
@ -976,9 +967,8 @@ int __init pcibios_init(void)
|
|||
0,
|
||||
HV_TRIO_PIO_FLAG_IO_SPACE);
|
||||
if (ret < 0) {
|
||||
pr_err("PCI: I/O PIO init failure on TRIO %d mac %d, "
|
||||
"give up\n", controller->trio_index,
|
||||
controller->mac);
|
||||
pr_err("PCI: I/O PIO init failure on TRIO %d mac %d, give up\n",
|
||||
controller->trio_index, controller->mac);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
@ -997,10 +987,9 @@ int __init pcibios_init(void)
|
|||
ret = gxio_trio_alloc_memory_maps(trio_context, 1, 0,
|
||||
0);
|
||||
if (ret < 0) {
|
||||
pr_err("PCI: Mem-Map alloc failure on TRIO %d "
|
||||
"mac %d for MC %d, give up\n",
|
||||
controller->trio_index,
|
||||
controller->mac, j);
|
||||
pr_err("PCI: Mem-Map alloc failure on TRIO %d mac %d for MC %d, give up\n",
|
||||
controller->trio_index, controller->mac,
|
||||
j);
|
||||
|
||||
goto alloc_mem_map_failed;
|
||||
}
|
||||
|
@ -1030,10 +1019,9 @@ int __init pcibios_init(void)
|
|||
j,
|
||||
GXIO_TRIO_ORDER_MODE_UNORDERED);
|
||||
if (ret < 0) {
|
||||
pr_err("PCI: Mem-Map init failure on TRIO %d "
|
||||
"mac %d for MC %d, give up\n",
|
||||
controller->trio_index,
|
||||
controller->mac, j);
|
||||
pr_err("PCI: Mem-Map init failure on TRIO %d mac %d for MC %d, give up\n",
|
||||
controller->trio_index, controller->mac,
|
||||
j);
|
||||
|
||||
goto alloc_mem_map_failed;
|
||||
}
|
||||
|
@ -1510,9 +1498,7 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
|
|||
* Most PCIe endpoint devices do support 64-bit message addressing.
|
||||
*/
|
||||
if (desc->msi_attrib.is_64 == 0) {
|
||||
dev_printk(KERN_INFO, &pdev->dev,
|
||||
"64-bit MSI message address not supported, "
|
||||
"falling back to legacy interrupts.\n");
|
||||
dev_info(&pdev->dev, "64-bit MSI message address not supported, falling back to legacy interrupts\n");
|
||||
|
||||
ret = -ENOMEM;
|
||||
goto is_64_failure;
|
||||
|
@ -1549,10 +1535,7 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
|
|||
/* SQ regions are out, allocate from map mem regions. */
|
||||
mem_map = gxio_trio_alloc_memory_maps(trio_context, 1, 0, 0);
|
||||
if (mem_map < 0) {
|
||||
dev_printk(KERN_INFO, &pdev->dev,
|
||||
"%s Mem-Map alloc failure. "
|
||||
"Failed to initialize MSI interrupts. "
|
||||
"Falling back to legacy interrupts.\n",
|
||||
dev_info(&pdev->dev, "%s Mem-Map alloc failure - failed to initialize MSI interrupts - falling back to legacy interrupts\n",
|
||||
desc->msi_attrib.is_msix ? "MSI-X" : "MSI");
|
||||
ret = -ENOMEM;
|
||||
goto msi_mem_map_alloc_failure;
|
||||
|
@ -1580,7 +1563,7 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
|
|||
mem_map, mem_map_base, mem_map_limit,
|
||||
trio_context->asid);
|
||||
if (ret < 0) {
|
||||
dev_printk(KERN_INFO, &pdev->dev, "HV MSI config failed.\n");
|
||||
dev_info(&pdev->dev, "HV MSI config failed\n");
|
||||
|
||||
goto hv_msi_config_failure;
|
||||
}
|
||||
|
|
|
@ -52,7 +52,7 @@ static int __init idle_setup(char *str)
|
|||
return -EINVAL;
|
||||
|
||||
if (!strcmp(str, "poll")) {
|
||||
pr_info("using polling idle threads.\n");
|
||||
pr_info("using polling idle threads\n");
|
||||
cpu_idle_poll_ctrl(true);
|
||||
return 0;
|
||||
} else if (!strcmp(str, "halt")) {
|
||||
|
@ -547,7 +547,6 @@ void show_regs(struct pt_regs *regs)
|
|||
struct task_struct *tsk = validate_current();
|
||||
int i;
|
||||
|
||||
pr_err("\n");
|
||||
if (tsk != &corrupt_current)
|
||||
show_regs_print_info(KERN_ERR);
|
||||
#ifdef __tilegx__
|
||||
|
@ -560,8 +559,7 @@ void show_regs(struct pt_regs *regs)
|
|||
pr_err(" sp : " REGFMT " lr : " REGFMT "\n", regs->sp, regs->lr);
|
||||
#else
|
||||
for (i = 0; i < 13; i++)
|
||||
pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT
|
||||
" r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
|
||||
pr_err(" r%-2d: " REGFMT " r%-2d: " REGFMT " r%-2d: " REGFMT " r%-2d: " REGFMT "\n",
|
||||
i, regs->regs[i], i+14, regs->regs[i+14],
|
||||
i+27, regs->regs[i+27], i+40, regs->regs[i+40]);
|
||||
pr_err(" r13: " REGFMT " tp : " REGFMT " sp : " REGFMT " lr : " REGFMT "\n",
|
||||
|
|
|
@ -417,8 +417,7 @@ static void __init setup_memory(void)
|
|||
range.start = (start_pa + HPAGE_SIZE - 1) & HPAGE_MASK;
|
||||
range.size -= (range.start - start_pa);
|
||||
range.size &= HPAGE_MASK;
|
||||
pr_err("Range not hugepage-aligned: %#llx..%#llx:"
|
||||
" now %#llx-%#llx\n",
|
||||
pr_err("Range not hugepage-aligned: %#llx..%#llx: now %#llx-%#llx\n",
|
||||
start_pa, start_pa + orig_size,
|
||||
range.start, range.start + range.size);
|
||||
}
|
||||
|
@ -437,8 +436,8 @@ static void __init setup_memory(void)
|
|||
if (PFN_DOWN(range.size) > maxnodemem_pfn[i]) {
|
||||
int max_size = maxnodemem_pfn[i];
|
||||
if (max_size > 0) {
|
||||
pr_err("Maxnodemem reduced node %d to"
|
||||
" %d pages\n", i, max_size);
|
||||
pr_err("Maxnodemem reduced node %d to %d pages\n",
|
||||
i, max_size);
|
||||
range.size = PFN_PHYS(max_size);
|
||||
} else {
|
||||
pr_err("Maxnodemem disabled node %d\n", i);
|
||||
|
@ -490,8 +489,8 @@ static void __init setup_memory(void)
|
|||
NR_CPUS * (PFN_UP(per_cpu_size) >> PAGE_SHIFT);
|
||||
if (end < pci_reserve_end_pfn + percpu_pages) {
|
||||
end = pci_reserve_start_pfn;
|
||||
pr_err("PCI mapping region reduced node %d to"
|
||||
" %ld pages\n", i, end - start);
|
||||
pr_err("PCI mapping region reduced node %d to %ld pages\n",
|
||||
i, end - start);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -556,10 +555,9 @@ static void __init setup_memory(void)
|
|||
MAXMEM_PFN : mappable_physpages;
|
||||
highmem_pages = (long) (physpages - lowmem_pages);
|
||||
|
||||
pr_notice("%ldMB HIGHMEM available.\n",
|
||||
pr_notice("%ldMB HIGHMEM available\n",
|
||||
pages_to_mb(highmem_pages > 0 ? highmem_pages : 0));
|
||||
pr_notice("%ldMB LOWMEM available.\n",
|
||||
pages_to_mb(lowmem_pages));
|
||||
pr_notice("%ldMB LOWMEM available\n", pages_to_mb(lowmem_pages));
|
||||
#else
|
||||
/* Set max_low_pfn based on what node 0 can directly address. */
|
||||
max_low_pfn = node_end_pfn[0];
|
||||
|
@ -573,7 +571,7 @@ static void __init setup_memory(void)
|
|||
max_pfn = MAXMEM_PFN;
|
||||
node_end_pfn[0] = MAXMEM_PFN;
|
||||
} else {
|
||||
pr_notice("%ldMB memory available.\n",
|
||||
pr_notice("%ldMB memory available\n",
|
||||
pages_to_mb(node_end_pfn[0]));
|
||||
}
|
||||
for (i = 1; i < MAX_NUMNODES; ++i) {
|
||||
|
@ -589,8 +587,7 @@ static void __init setup_memory(void)
|
|||
if (pages)
|
||||
high_memory = pfn_to_kaddr(node_end_pfn[i]);
|
||||
}
|
||||
pr_notice("%ldMB memory available.\n",
|
||||
pages_to_mb(lowmem_pages));
|
||||
pr_notice("%ldMB memory available\n", pages_to_mb(lowmem_pages));
|
||||
#endif
|
||||
#endif
|
||||
}
|
||||
|
@ -1540,8 +1537,7 @@ static void __init pcpu_fc_populate_pte(unsigned long addr)
|
|||
|
||||
BUG_ON(pgd_addr_invalid(addr));
|
||||
if (addr < VMALLOC_START || addr >= VMALLOC_END)
|
||||
panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx;"
|
||||
" try increasing CONFIG_VMALLOC_RESERVE\n",
|
||||
panic("PCPU addr %#lx outside vmalloc range %#lx..%#lx; try increasing CONFIG_VMALLOC_RESERVE\n",
|
||||
addr, VMALLOC_START, VMALLOC_END);
|
||||
|
||||
pgd = swapper_pg_dir + pgd_index(addr);
|
||||
|
@ -1596,8 +1592,8 @@ void __init setup_per_cpu_areas(void)
|
|||
lowmem_va = (unsigned long)pfn_to_kaddr(pfn);
|
||||
ptep = virt_to_kpte(lowmem_va);
|
||||
if (pte_huge(*ptep)) {
|
||||
printk(KERN_DEBUG "early shatter of huge page"
|
||||
" at %#lx\n", lowmem_va);
|
||||
printk(KERN_DEBUG "early shatter of huge page at %#lx\n",
|
||||
lowmem_va);
|
||||
shatter_pmd((pmd_t *)ptep);
|
||||
ptep = virt_to_kpte(lowmem_va);
|
||||
BUG_ON(pte_huge(*ptep));
|
||||
|
|
|
@ -337,7 +337,6 @@ static void dump_mem(void __user *address)
|
|||
int i, j, k;
|
||||
int found_readable_mem = 0;
|
||||
|
||||
pr_err("\n");
|
||||
if (!access_ok(VERIFY_READ, address, 1)) {
|
||||
pr_err("Not dumping at address 0x%lx (kernel address)\n",
|
||||
(unsigned long)address);
|
||||
|
@ -403,8 +402,7 @@ void trace_unhandled_signal(const char *type, struct pt_regs *regs,
|
|||
case SIGFPE:
|
||||
case SIGSEGV:
|
||||
case SIGBUS:
|
||||
pr_err("User crash: signal %d,"
|
||||
" trap %ld, address 0x%lx\n",
|
||||
pr_err("User crash: signal %d, trap %ld, address 0x%lx\n",
|
||||
sig, regs->faultnum, address);
|
||||
show_regs(regs);
|
||||
dump_mem((void __user *)address);
|
||||
|
|
|
@ -222,11 +222,9 @@ static tilepro_bundle_bits rewrite_load_store_unaligned(
|
|||
}
|
||||
|
||||
if (unaligned_printk || unaligned_fixup_count == 0) {
|
||||
pr_info("Process %d/%s: PC %#lx: Fixup of"
|
||||
" unaligned %s at %#lx.\n",
|
||||
pr_info("Process %d/%s: PC %#lx: Fixup of unaligned %s at %#lx\n",
|
||||
current->pid, current->comm, regs->pc,
|
||||
(mem_op == MEMOP_LOAD ||
|
||||
mem_op == MEMOP_LOAD_POSTINCR) ?
|
||||
mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR ?
|
||||
"load" : "store",
|
||||
(unsigned long)addr);
|
||||
if (!unaligned_printk) {
|
||||
|
|
|
@ -127,8 +127,7 @@ static __init int reset_init_affinity(void)
|
|||
{
|
||||
long rc = sched_setaffinity(current->pid, &init_affinity);
|
||||
if (rc != 0)
|
||||
pr_warning("couldn't reset init affinity (%ld)\n",
|
||||
rc);
|
||||
pr_warn("couldn't reset init affinity (%ld)\n", rc);
|
||||
return 0;
|
||||
}
|
||||
late_initcall(reset_init_affinity);
|
||||
|
@ -174,7 +173,7 @@ static void start_secondary(void)
|
|||
/* Indicate that we're ready to come up. */
|
||||
/* Must not do this before we're ready to receive messages */
|
||||
if (cpumask_test_and_set_cpu(cpuid, &cpu_started)) {
|
||||
pr_warning("CPU#%d already started!\n", cpuid);
|
||||
pr_warn("CPU#%d already started!\n", cpuid);
|
||||
for (;;)
|
||||
local_irq_enable();
|
||||
}
|
||||
|
|
|
@ -387,9 +387,7 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
|
|||
* then bust_spinlocks() spit out a space in front of us
|
||||
* and it will mess up our KERN_ERR.
|
||||
*/
|
||||
pr_err("\n");
|
||||
pr_err("Starting stack dump of tid %d, pid %d (%s)"
|
||||
" on cpu %d at cycle %lld\n",
|
||||
pr_err("Starting stack dump of tid %d, pid %d (%s) on cpu %d at cycle %lld\n",
|
||||
kbt->task->pid, kbt->task->tgid, kbt->task->comm,
|
||||
raw_smp_processor_id(), get_cycles());
|
||||
}
|
||||
|
@ -411,8 +409,7 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
|
|||
i++, address, namebuf, (unsigned long)(kbt->it.sp));
|
||||
|
||||
if (i >= 100) {
|
||||
pr_err("Stack dump truncated"
|
||||
" (%d frames)\n", i);
|
||||
pr_err("Stack dump truncated (%d frames)\n", i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -305,8 +305,8 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
|
|||
case INT_ILL:
|
||||
if (copy_from_user(&instr, (void __user *)regs->pc,
|
||||
sizeof(instr))) {
|
||||
pr_err("Unreadable instruction for INT_ILL:"
|
||||
" %#lx\n", regs->pc);
|
||||
pr_err("Unreadable instruction for INT_ILL: %#lx\n",
|
||||
regs->pc);
|
||||
do_exit(SIGKILL);
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -969,8 +969,7 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,
|
|||
unaligned_fixup_count++;
|
||||
|
||||
if (unaligned_printk) {
|
||||
pr_info("%s/%d. Unalign fixup for kernel access "
|
||||
"to userspace %lx.",
|
||||
pr_info("%s/%d - Unalign fixup for kernel access to userspace %lx\n",
|
||||
current->comm, current->pid, regs->regs[ra]);
|
||||
}
|
||||
|
||||
|
@ -985,7 +984,7 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,
|
|||
.si_addr = (unsigned char __user *)0
|
||||
};
|
||||
if (unaligned_printk)
|
||||
pr_info("Unalign bundle: unexp @%llx, %llx",
|
||||
pr_info("Unalign bundle: unexp @%llx, %llx\n",
|
||||
(unsigned long long)regs->pc,
|
||||
(unsigned long long)bundle);
|
||||
|
||||
|
@ -1370,8 +1369,7 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,
|
|||
frag.bundle = bundle;
|
||||
|
||||
if (unaligned_printk) {
|
||||
pr_info("%s/%d, Unalign fixup: pc=%lx "
|
||||
"bundle=%lx %d %d %d %d %d %d %d %d.",
|
||||
pr_info("%s/%d, Unalign fixup: pc=%lx bundle=%lx %d %d %d %d %d %d %d %d\n",
|
||||
current->comm, current->pid,
|
||||
(unsigned long)frag.pc,
|
||||
(unsigned long)frag.bundle,
|
||||
|
@ -1380,8 +1378,8 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,
|
|||
(int)y1_lr, (int)y1_br, (int)x1_add);
|
||||
|
||||
for (k = 0; k < n; k += 2)
|
||||
pr_info("[%d] %016llx %016llx", k,
|
||||
(unsigned long long)frag.insn[k],
|
||||
pr_info("[%d] %016llx %016llx\n",
|
||||
k, (unsigned long long)frag.insn[k],
|
||||
(unsigned long long)frag.insn[k+1]);
|
||||
}
|
||||
|
||||
|
@ -1402,7 +1400,7 @@ void jit_bundle_gen(struct pt_regs *regs, tilegx_bundle_bits bundle,
|
|||
.si_addr = (void __user *)&jit_code_area[idx]
|
||||
};
|
||||
|
||||
pr_warn("Unalign fixup: pid=%d %s jit_code_area=%llx",
|
||||
pr_warn("Unalign fixup: pid=%d %s jit_code_area=%llx\n",
|
||||
current->pid, current->comm,
|
||||
(unsigned long long)&jit_code_area[idx]);
|
||||
|
||||
|
@ -1485,7 +1483,7 @@ void do_unaligned(struct pt_regs *regs, int vecnum)
|
|||
/* If exception came from kernel, try fix it up. */
|
||||
if (fixup_exception(regs)) {
|
||||
if (unaligned_printk)
|
||||
pr_info("Unalign fixup: %d %llx @%llx",
|
||||
pr_info("Unalign fixup: %d %llx @%llx\n",
|
||||
(int)unaligned_fixup,
|
||||
(unsigned long long)regs->ex1,
|
||||
(unsigned long long)regs->pc);
|
||||
|
@ -1519,7 +1517,7 @@ void do_unaligned(struct pt_regs *regs, int vecnum)
|
|||
};
|
||||
|
||||
if (unaligned_printk)
|
||||
pr_info("Unalign fixup: %d %llx @%llx",
|
||||
pr_info("Unalign fixup: %d %llx @%llx\n",
|
||||
(int)unaligned_fixup,
|
||||
(unsigned long long)regs->ex1,
|
||||
(unsigned long long)regs->pc);
|
||||
|
@ -1579,14 +1577,14 @@ void do_unaligned(struct pt_regs *regs, int vecnum)
|
|||
0);
|
||||
|
||||
if (IS_ERR((void __force *)user_page)) {
|
||||
pr_err("Out of kernel pages trying do_mmap.\n");
|
||||
pr_err("Out of kernel pages trying do_mmap\n");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Save the address in the thread_info struct */
|
||||
info->unalign_jit_base = user_page;
|
||||
if (unaligned_printk)
|
||||
pr_info("Unalign bundle: %d:%d, allocate page @%llx",
|
||||
pr_info("Unalign bundle: %d:%d, allocate page @%llx\n",
|
||||
raw_smp_processor_id(), current->pid,
|
||||
(unsigned long long)user_page);
|
||||
}
|
||||
|
|
|
@ -169,8 +169,7 @@ static void wait_for_migration(pte_t *pte)
|
|||
while (pte_migrating(*pte)) {
|
||||
barrier();
|
||||
if (++retries > bound)
|
||||
panic("Hit migrating PTE (%#llx) and"
|
||||
" page PFN %#lx still migrating",
|
||||
panic("Hit migrating PTE (%#llx) and page PFN %#lx still migrating",
|
||||
pte->val, pte_pfn(*pte));
|
||||
}
|
||||
}
|
||||
|
@ -292,8 +291,7 @@ static int handle_page_fault(struct pt_regs *regs,
|
|||
*/
|
||||
stack_offset = stack_pointer & (THREAD_SIZE-1);
|
||||
if (stack_offset < THREAD_SIZE / 8) {
|
||||
pr_alert("Potential stack overrun: sp %#lx\n",
|
||||
stack_pointer);
|
||||
pr_alert("Potential stack overrun: sp %#lx\n", stack_pointer);
|
||||
show_regs(regs);
|
||||
pr_alert("Killing current process %d/%s\n",
|
||||
tsk->pid, tsk->comm);
|
||||
|
@ -519,9 +517,8 @@ no_context:
|
|||
pte_t *pte = lookup_address(address);
|
||||
|
||||
if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
|
||||
pr_crit("kernel tried to execute"
|
||||
" non-executable page - exploit attempt?"
|
||||
" (uid: %d)\n", current->uid);
|
||||
pr_crit("kernel tried to execute non-executable page - exploit attempt? (uid: %d)\n",
|
||||
current->uid);
|
||||
}
|
||||
#endif
|
||||
if (address < PAGE_SIZE)
|
||||
|
@ -575,9 +572,10 @@ do_sigbus:
|
|||
#ifndef __tilegx__
|
||||
|
||||
/* We must release ICS before panicking or we won't get anywhere. */
|
||||
#define ics_panic(fmt, ...) do { \
|
||||
#define ics_panic(fmt, ...) \
|
||||
do { \
|
||||
__insn_mtspr(SPR_INTERRUPT_CRITICAL_SECTION, 0); \
|
||||
panic(fmt, __VA_ARGS__); \
|
||||
panic(fmt, ##__VA_ARGS__); \
|
||||
} while (0)
|
||||
|
||||
/*
|
||||
|
@ -615,8 +613,7 @@ struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
|
|||
fault_num != INT_DTLB_ACCESS)) {
|
||||
unsigned long old_pc = regs->pc;
|
||||
regs->pc = pc;
|
||||
ics_panic("Bad ICS page fault args:"
|
||||
" old PC %#lx, fault %d/%d at %#lx\n",
|
||||
ics_panic("Bad ICS page fault args: old PC %#lx, fault %d/%d at %#lx",
|
||||
old_pc, fault_num, write, address);
|
||||
}
|
||||
|
||||
|
@ -669,8 +666,8 @@ struct intvec_state do_page_fault_ics(struct pt_regs *regs, int fault_num,
|
|||
#endif
|
||||
fixup = search_exception_tables(pc);
|
||||
if (!fixup)
|
||||
ics_panic("ICS atomic fault not in table:"
|
||||
" PC %#lx, fault %d", pc, fault_num);
|
||||
ics_panic("ICS atomic fault not in table: PC %#lx, fault %d",
|
||||
pc, fault_num);
|
||||
regs->pc = fixup->fixup;
|
||||
regs->ex1 = PL_ICS_EX1(KERNEL_PL, 0);
|
||||
}
|
||||
|
@ -826,8 +823,7 @@ void do_page_fault(struct pt_regs *regs, int fault_num,
|
|||
|
||||
set_thread_flag(TIF_ASYNC_TLB);
|
||||
if (async->fault_num != 0) {
|
||||
panic("Second async fault %d;"
|
||||
" old fault was %d (%#lx/%ld)",
|
||||
panic("Second async fault %d; old fault was %d (%#lx/%ld)",
|
||||
fault_num, async->fault_num,
|
||||
address, write);
|
||||
}
|
||||
|
|
|
@ -152,12 +152,10 @@ void flush_remote(unsigned long cache_pfn, unsigned long cache_control,
|
|||
cpumask_scnprintf(cache_buf, sizeof(cache_buf), &cache_cpumask_copy);
|
||||
cpumask_scnprintf(tlb_buf, sizeof(tlb_buf), &tlb_cpumask_copy);
|
||||
|
||||
pr_err("hv_flush_remote(%#llx, %#lx, %p [%s],"
|
||||
" %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n",
|
||||
pr_err("hv_flush_remote(%#llx, %#lx, %p [%s], %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n",
|
||||
cache_pa, cache_control, cache_cpumask, cache_buf,
|
||||
(unsigned long)tlb_va, tlb_length, tlb_pgsize,
|
||||
tlb_cpumask, tlb_buf,
|
||||
asids, asidcount, rc);
|
||||
tlb_cpumask, tlb_buf, asids, asidcount, rc);
|
||||
panic("Unsafe to continue.");
|
||||
}
|
||||
|
||||
|
|
|
@ -284,22 +284,21 @@ static __init int __setup_hugepagesz(unsigned long ps)
|
|||
int level, base_shift;
|
||||
|
||||
if ((1UL << log_ps) != ps || (log_ps & 1) != 0) {
|
||||
pr_warn("Not enabling %ld byte huge pages;"
|
||||
" must be a power of four.\n", ps);
|
||||
pr_warn("Not enabling %ld byte huge pages; must be a power of four\n",
|
||||
ps);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (ps > 64*1024*1024*1024UL) {
|
||||
pr_warn("Not enabling %ld MB huge pages;"
|
||||
" largest legal value is 64 GB .\n", ps >> 20);
|
||||
pr_warn("Not enabling %ld MB huge pages; largest legal value is 64 GB\n",
|
||||
ps >> 20);
|
||||
return -EINVAL;
|
||||
} else if (ps >= PUD_SIZE) {
|
||||
static long hv_jpage_size;
|
||||
if (hv_jpage_size == 0)
|
||||
hv_jpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_JUMBO);
|
||||
if (hv_jpage_size != PUD_SIZE) {
|
||||
pr_warn("Not enabling >= %ld MB huge pages:"
|
||||
" hypervisor reports size %ld\n",
|
||||
pr_warn("Not enabling >= %ld MB huge pages: hypervisor reports size %ld\n",
|
||||
PUD_SIZE >> 20, hv_jpage_size);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
@ -320,14 +319,13 @@ static __init int __setup_hugepagesz(unsigned long ps)
|
|||
int shift_val = log_ps - base_shift;
|
||||
if (huge_shift[level] != 0) {
|
||||
int old_shift = base_shift + huge_shift[level];
|
||||
pr_warn("Not enabling %ld MB huge pages;"
|
||||
" already have size %ld MB.\n",
|
||||
pr_warn("Not enabling %ld MB huge pages; already have size %ld MB\n",
|
||||
ps >> 20, (1UL << old_shift) >> 20);
|
||||
return -EINVAL;
|
||||
}
|
||||
if (hv_set_pte_super_shift(level, shift_val) != 0) {
|
||||
pr_warn("Not enabling %ld MB huge pages;"
|
||||
" no hypervisor support.\n", ps >> 20);
|
||||
pr_warn("Not enabling %ld MB huge pages; no hypervisor support\n",
|
||||
ps >> 20);
|
||||
return -EINVAL;
|
||||
}
|
||||
printk(KERN_DEBUG "Enabled %ld MB huge pages\n", ps >> 20);
|
||||
|
|
|
@ -357,8 +357,8 @@ static int __init setup_ktext(char *str)
|
|||
cpulist_scnprintf(buf, sizeof(buf), &ktext_mask);
|
||||
if (cpumask_weight(&ktext_mask) > 1) {
|
||||
ktext_small = 1;
|
||||
pr_info("ktext: using caching neighborhood %s "
|
||||
"with small pages\n", buf);
|
||||
pr_info("ktext: using caching neighborhood %s with small pages\n",
|
||||
buf);
|
||||
} else {
|
||||
pr_info("ktext: caching on cpu %s with one huge page\n",
|
||||
buf);
|
||||
|
@ -413,19 +413,16 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
|
|||
int rc, i;
|
||||
|
||||
if (ktext_arg_seen && ktext_hash) {
|
||||
pr_warning("warning: \"ktext\" boot argument ignored"
|
||||
" if \"kcache_hash\" sets up text hash-for-home\n");
|
||||
pr_warn("warning: \"ktext\" boot argument ignored if \"kcache_hash\" sets up text hash-for-home\n");
|
||||
ktext_small = 0;
|
||||
}
|
||||
|
||||
if (kdata_arg_seen && kdata_hash) {
|
||||
pr_warning("warning: \"kdata\" boot argument ignored"
|
||||
" if \"kcache_hash\" sets up data hash-for-home\n");
|
||||
pr_warn("warning: \"kdata\" boot argument ignored if \"kcache_hash\" sets up data hash-for-home\n");
|
||||
}
|
||||
|
||||
if (kdata_huge && !hash_default) {
|
||||
pr_warning("warning: disabling \"kdata=huge\"; requires"
|
||||
" kcache_hash=all or =allbutstack\n");
|
||||
pr_warn("warning: disabling \"kdata=huge\"; requires kcache_hash=all or =allbutstack\n");
|
||||
kdata_huge = 0;
|
||||
}
|
||||
|
||||
|
@ -470,8 +467,8 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
|
|||
pte[pte_ofs] = pfn_pte(pfn, prot);
|
||||
} else {
|
||||
if (kdata_huge)
|
||||
printk(KERN_DEBUG "pre-shattered huge"
|
||||
" page at %#lx\n", address);
|
||||
printk(KERN_DEBUG "pre-shattered huge page at %#lx\n",
|
||||
address);
|
||||
for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE;
|
||||
pfn++, pte_ofs++, address += PAGE_SIZE) {
|
||||
pgprot_t prot = init_pgprot(address);
|
||||
|
@ -501,7 +498,7 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
|
|||
pr_info("ktext: not using unavailable cpus %s\n", buf);
|
||||
}
|
||||
if (cpumask_empty(&ktext_mask)) {
|
||||
pr_warning("ktext: no valid cpus; caching on %d.\n",
|
||||
pr_warn("ktext: no valid cpus; caching on %d\n",
|
||||
smp_processor_id());
|
||||
cpumask_copy(&ktext_mask,
|
||||
cpumask_of(smp_processor_id()));
|
||||
|
@ -798,11 +795,9 @@ void __init mem_init(void)
|
|||
#ifdef CONFIG_HIGHMEM
|
||||
/* check that fixmap and pkmap do not overlap */
|
||||
if (PKMAP_ADDR(LAST_PKMAP-1) >= FIXADDR_START) {
|
||||
pr_err("fixmap and kmap areas overlap"
|
||||
" - this will crash\n");
|
||||
pr_err("fixmap and kmap areas overlap - this will crash\n");
|
||||
pr_err("pkstart: %lxh pkend: %lxh fixstart %lxh\n",
|
||||
PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP-1),
|
||||
FIXADDR_START);
|
||||
PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP-1), FIXADDR_START);
|
||||
BUG();
|
||||
}
|
||||
#endif
|
||||
|
@ -926,8 +921,7 @@ static void free_init_pages(char *what, unsigned long begin, unsigned long end)
|
|||
unsigned long addr = (unsigned long) begin;
|
||||
|
||||
if (kdata_huge && !initfree) {
|
||||
pr_warning("Warning: ignoring initfree=0:"
|
||||
" incompatible with kdata=huge\n");
|
||||
pr_warn("Warning: ignoring initfree=0: incompatible with kdata=huge\n");
|
||||
initfree = 1;
|
||||
}
|
||||
end = (end + PAGE_SIZE - 1) & PAGE_MASK;
|
||||
|
|
|
@ -44,9 +44,7 @@ void show_mem(unsigned int filter)
|
|||
{
|
||||
struct zone *zone;
|
||||
|
||||
pr_err("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu"
|
||||
" free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu"
|
||||
" pagecache:%lu swap:%lu\n",
|
||||
pr_err("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu free:%lu\n slab:%lu mapped:%lu pagetables:%lu bounce:%lu pagecache:%lu swap:%lu\n",
|
||||
(global_page_state(NR_ACTIVE_ANON) +
|
||||
global_page_state(NR_ACTIVE_FILE)),
|
||||
(global_page_state(NR_INACTIVE_ANON) +
|
||||
|
|
Loading…
Reference in New Issue