[SCSI] ipr: increase the dump size for 64 bit adapters

Currently the size of the dump generated by the driver is limited
in 4MB, which is insufficient to gather much useful data from the
new 64 bit adapters.

This patch makes the needed changes to increase the dump limit
for the 64 bit adapters to 32MB, or even to a bigger value in the
future, but keeping the current limitations for the legacy 32 bit
adapters.

Signed-off-by: Kleber Sacilotto de Souza <klebers@linux.vnet.ibm.com>
Acked-by: Brian King <brking@linux.vnet.ibm.com>
Signed-off-by: James Bottomley <James.Bottomley@suse.de>
This commit is contained in:
Kleber Sacilotto de Souza 2011-04-26 19:23:29 -03:00 committed by James Bottomley
parent 0b15fb1fdf
commit 4d4dd70655
2 changed files with 75 additions and 21 deletions

View File

@ -60,6 +60,7 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/pci.h>
@ -2717,13 +2718,18 @@ static int ipr_sdt_copy(struct ipr_ioa_cfg *ioa_cfg,
unsigned long pci_address, u32 length)
{
int bytes_copied = 0;
int cur_len, rc, rem_len, rem_page_len;
int cur_len, rc, rem_len, rem_page_len, max_dump_size;
__be32 *page;
unsigned long lock_flags = 0;
struct ipr_ioa_dump *ioa_dump = &ioa_cfg->dump->ioa_dump;
if (ioa_cfg->sis64)
max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
else
max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
while (bytes_copied < length &&
(ioa_dump->hdr.len + bytes_copied) < IPR_MAX_IOA_DUMP_SIZE) {
(ioa_dump->hdr.len + bytes_copied) < max_dump_size) {
if (ioa_dump->page_offset >= PAGE_SIZE ||
ioa_dump->page_offset == 0) {
page = (__be32 *)__get_free_page(GFP_ATOMIC);
@ -2885,8 +2891,8 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
unsigned long lock_flags = 0;
struct ipr_driver_dump *driver_dump = &dump->driver_dump;
struct ipr_ioa_dump *ioa_dump = &dump->ioa_dump;
u32 num_entries, start_off, end_off;
u32 bytes_to_copy, bytes_copied, rc;
u32 num_entries, max_num_entries, start_off, end_off;
u32 max_dump_size, bytes_to_copy, bytes_copied, rc;
struct ipr_sdt *sdt;
int valid = 1;
int i;
@ -2947,8 +2953,18 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
on entries in this table */
sdt = &ioa_dump->sdt;
if (ioa_cfg->sis64) {
max_num_entries = IPR_FMT3_NUM_SDT_ENTRIES;
max_dump_size = IPR_FMT3_MAX_IOA_DUMP_SIZE;
} else {
max_num_entries = IPR_FMT2_NUM_SDT_ENTRIES;
max_dump_size = IPR_FMT2_MAX_IOA_DUMP_SIZE;
}
bytes_to_copy = offsetof(struct ipr_sdt, entry) +
(max_num_entries * sizeof(struct ipr_sdt_entry));
rc = ipr_get_ldump_data_section(ioa_cfg, start_addr, (__be32 *)sdt,
sizeof(struct ipr_sdt) / sizeof(__be32));
bytes_to_copy / sizeof(__be32));
/* Smart Dump table is ready to use and the first entry is valid */
if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
@ -2964,13 +2980,20 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
num_entries = be32_to_cpu(sdt->hdr.num_entries_used);
if (num_entries > IPR_NUM_SDT_ENTRIES)
num_entries = IPR_NUM_SDT_ENTRIES;
if (num_entries > max_num_entries)
num_entries = max_num_entries;
/* Update dump length to the actual data to be copied */
dump->driver_dump.hdr.len += sizeof(struct ipr_sdt_header);
if (ioa_cfg->sis64)
dump->driver_dump.hdr.len += num_entries * sizeof(struct ipr_sdt_entry);
else
dump->driver_dump.hdr.len += max_num_entries * sizeof(struct ipr_sdt_entry);
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
for (i = 0; i < num_entries; i++) {
if (ioa_dump->hdr.len > IPR_MAX_IOA_DUMP_SIZE) {
if (ioa_dump->hdr.len > max_dump_size) {
driver_dump->hdr.status = IPR_DUMP_STATUS_QUAL_SUCCESS;
break;
}
@ -2989,7 +3012,7 @@ static void ipr_get_ioa_dump(struct ipr_ioa_cfg *ioa_cfg, struct ipr_dump *dump)
valid = 0;
}
if (valid) {
if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
if (bytes_to_copy > max_dump_size) {
sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
continue;
}
@ -3044,6 +3067,7 @@ static void ipr_release_dump(struct kref *kref)
for (i = 0; i < dump->ioa_dump.next_page_index; i++)
free_page((unsigned long) dump->ioa_dump.ioa_data[i]);
vfree(dump->ioa_dump.ioa_data);
kfree(dump);
LEAVE;
}
@ -3835,7 +3859,7 @@ static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
struct ipr_dump *dump;
unsigned long lock_flags = 0;
char *src;
int len;
int len, sdt_end;
size_t rc = count;
if (!capable(CAP_SYS_ADMIN))
@ -3875,9 +3899,17 @@ static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
off -= sizeof(dump->driver_dump);
if (count && off < offsetof(struct ipr_ioa_dump, ioa_data)) {
if (off + count > offsetof(struct ipr_ioa_dump, ioa_data))
len = offsetof(struct ipr_ioa_dump, ioa_data) - off;
if (ioa_cfg->sis64)
sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
(be32_to_cpu(dump->ioa_dump.sdt.hdr.num_entries_used) *
sizeof(struct ipr_sdt_entry));
else
sdt_end = offsetof(struct ipr_ioa_dump, sdt.entry) +
(IPR_FMT2_NUM_SDT_ENTRIES * sizeof(struct ipr_sdt_entry));
if (count && off < sdt_end) {
if (off + count > sdt_end)
len = sdt_end - off;
else
len = count;
src = (u8 *)&dump->ioa_dump + off;
@ -3887,7 +3919,7 @@ static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
count -= len;
}
off -= offsetof(struct ipr_ioa_dump, ioa_data);
off -= sdt_end;
while (count) {
if ((off & PAGE_MASK) != ((off + count) & PAGE_MASK))
@ -3916,6 +3948,7 @@ static ssize_t ipr_read_dump(struct file *filp, struct kobject *kobj,
static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
{
struct ipr_dump *dump;
__be32 **ioa_data;
unsigned long lock_flags = 0;
dump = kzalloc(sizeof(struct ipr_dump), GFP_KERNEL);
@ -3925,6 +3958,19 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
return -ENOMEM;
}
if (ioa_cfg->sis64)
ioa_data = vmalloc(IPR_FMT3_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
else
ioa_data = vmalloc(IPR_FMT2_MAX_NUM_DUMP_PAGES * sizeof(__be32 *));
if (!ioa_data) {
ipr_err("Dump memory allocation failed\n");
kfree(dump);
return -ENOMEM;
}
dump->ioa_dump.ioa_data = ioa_data;
kref_init(&dump->kref);
dump->ioa_cfg = ioa_cfg;
@ -3932,6 +3978,7 @@ static int ipr_alloc_dump(struct ipr_ioa_cfg *ioa_cfg)
if (INACTIVE != ioa_cfg->sdt_state) {
spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
vfree(dump->ioa_dump.ioa_data);
kfree(dump);
return 0;
}
@ -7566,7 +7613,10 @@ static int ipr_reset_restore_cfg_space(struct ipr_cmnd *ipr_cmd)
ipr_cmd->job_step = ipr_reset_enable_ioa;
if (GET_DUMP == ioa_cfg->sdt_state) {
ipr_reset_start_timer(ipr_cmd, IPR_DUMP_TIMEOUT);
if (ioa_cfg->sis64)
ipr_reset_start_timer(ipr_cmd, IPR_SIS64_DUMP_TIMEOUT);
else
ipr_reset_start_timer(ipr_cmd, IPR_SIS32_DUMP_TIMEOUT);
ipr_cmd->job_step = ipr_reset_wait_for_dump;
schedule_work(&ioa_cfg->work_q);
return IPR_RC_JOB_RETURN;

View File

@ -217,7 +217,8 @@
#define IPR_CHECK_FOR_RESET_TIMEOUT (HZ / 10)
#define IPR_WAIT_FOR_BIST_TIMEOUT (2 * HZ)
#define IPR_PCI_RESET_TIMEOUT (HZ / 2)
#define IPR_DUMP_TIMEOUT (15 * HZ)
#define IPR_SIS32_DUMP_TIMEOUT (15 * HZ)
#define IPR_SIS64_DUMP_TIMEOUT (40 * HZ)
#define IPR_DUMP_DELAY_SECONDS 4
#define IPR_DUMP_DELAY_TIMEOUT (IPR_DUMP_DELAY_SECONDS * HZ)
@ -285,9 +286,12 @@ IPR_PCII_NO_HOST_RRQ | IPR_PCII_IOARRIN_LOST | IPR_PCII_MMIO_ERROR)
/*
* Dump literals
*/
#define IPR_MAX_IOA_DUMP_SIZE (4 * 1024 * 1024)
#define IPR_NUM_SDT_ENTRIES 511
#define IPR_MAX_NUM_DUMP_PAGES ((IPR_MAX_IOA_DUMP_SIZE / PAGE_SIZE) + 1)
#define IPR_FMT2_MAX_IOA_DUMP_SIZE (4 * 1024 * 1024)
#define IPR_FMT3_MAX_IOA_DUMP_SIZE (32 * 1024 * 1024)
#define IPR_FMT2_NUM_SDT_ENTRIES 511
#define IPR_FMT3_NUM_SDT_ENTRIES 0xFFF
#define IPR_FMT2_MAX_NUM_DUMP_PAGES ((IPR_FMT2_MAX_IOA_DUMP_SIZE / PAGE_SIZE) + 1)
#define IPR_FMT3_MAX_NUM_DUMP_PAGES ((IPR_FMT3_MAX_IOA_DUMP_SIZE / PAGE_SIZE) + 1)
/*
* Misc literals
@ -1164,7 +1168,7 @@ struct ipr_sdt_header {
struct ipr_sdt {
struct ipr_sdt_header hdr;
struct ipr_sdt_entry entry[IPR_NUM_SDT_ENTRIES];
struct ipr_sdt_entry entry[IPR_FMT3_NUM_SDT_ENTRIES];
}__attribute__((packed, aligned (4)));
struct ipr_uc_sdt {
@ -1608,7 +1612,7 @@ struct ipr_driver_dump {
struct ipr_ioa_dump {
struct ipr_dump_entry_header hdr;
struct ipr_sdt sdt;
__be32 *ioa_data[IPR_MAX_NUM_DUMP_PAGES];
__be32 **ioa_data;
u32 reserved;
u32 next_page_index;
u32 page_offset;