[S390] subchannel lock conversion.

Convert the subchannel lock to a pointer to a lock.  Needed for the dynamic
subchannel mapping patch.

Signed-off-by: Cornelia Huck <cornelia.huck@de.ibm.com>
Cc: Greg KH <greg@kroah.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
This commit is contained in:
Cornelia Huck 2006-12-08 15:54:26 +01:00 committed by Martin Schwidefsky
parent 7674da77cb
commit 2ec2298412
6 changed files with 98 additions and 63 deletions

View File

@ -183,7 +183,7 @@ css_get_ssd_info(struct subchannel *sch)
page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
spin_lock_irq(&sch->lock); spin_lock_irq(sch->lock);
ret = chsc_get_sch_desc_irq(sch, page); ret = chsc_get_sch_desc_irq(sch, page);
if (ret) { if (ret) {
static int cio_chsc_err_msg; static int cio_chsc_err_msg;
@ -197,7 +197,7 @@ css_get_ssd_info(struct subchannel *sch)
cio_chsc_err_msg = 1; cio_chsc_err_msg = 1;
} }
} }
spin_unlock_irq(&sch->lock); spin_unlock_irq(sch->lock);
free_page((unsigned long)page); free_page((unsigned long)page);
if (!ret) { if (!ret) {
int j, chpid, mask; int j, chpid, mask;
@ -233,7 +233,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
if (j >= 8) if (j >= 8)
return 0; return 0;
spin_lock_irq(&sch->lock); spin_lock_irq(sch->lock);
stsch(sch->schid, &schib); stsch(sch->schid, &schib);
if (!schib.pmcw.dnv) if (!schib.pmcw.dnv)
@ -265,10 +265,10 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
else if (sch->lpm == mask) else if (sch->lpm == mask)
goto out_unreg; goto out_unreg;
out_unlock: out_unlock:
spin_unlock_irq(&sch->lock); spin_unlock_irq(sch->lock);
return 0; return 0;
out_unreg: out_unreg:
spin_unlock_irq(&sch->lock); spin_unlock_irq(sch->lock);
sch->lpm = 0; sch->lpm = 0;
if (css_enqueue_subchannel_slow(sch->schid)) { if (css_enqueue_subchannel_slow(sch->schid)) {
css_clear_subchannel_slow_list(); css_clear_subchannel_slow_list();
@ -378,12 +378,12 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
/* Check if a subchannel is newly available. */ /* Check if a subchannel is newly available. */
return s390_process_res_acc_new_sch(schid); return s390_process_res_acc_new_sch(schid);
spin_lock_irq(&sch->lock); spin_lock_irq(sch->lock);
chp_mask = s390_process_res_acc_sch(res_data, sch); chp_mask = s390_process_res_acc_sch(res_data, sch);
if (chp_mask == 0) { if (chp_mask == 0) {
spin_unlock_irq(&sch->lock); spin_unlock_irq(sch->lock);
put_device(&sch->dev); put_device(&sch->dev);
return 0; return 0;
} }
@ -397,7 +397,7 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
else if (sch->driver && sch->driver->verify) else if (sch->driver && sch->driver->verify)
sch->driver->verify(&sch->dev); sch->driver->verify(&sch->dev);
spin_unlock_irq(&sch->lock); spin_unlock_irq(sch->lock);
put_device(&sch->dev); put_device(&sch->dev);
return 0; return 0;
} }
@ -635,21 +635,21 @@ __chp_add(struct subchannel_id schid, void *data)
if (!sch) if (!sch)
/* Check if the subchannel is now available. */ /* Check if the subchannel is now available. */
return __chp_add_new_sch(schid); return __chp_add_new_sch(schid);
spin_lock_irq(&sch->lock); spin_lock_irq(sch->lock);
for (i=0; i<8; i++) { for (i=0; i<8; i++) {
mask = 0x80 >> i; mask = 0x80 >> i;
if ((sch->schib.pmcw.pim & mask) && if ((sch->schib.pmcw.pim & mask) &&
(sch->schib.pmcw.chpid[i] == chp->id)) { (sch->schib.pmcw.chpid[i] == chp->id)) {
if (stsch(sch->schid, &sch->schib) != 0) { if (stsch(sch->schid, &sch->schib) != 0) {
/* Endgame. */ /* Endgame. */
spin_unlock_irq(&sch->lock); spin_unlock_irq(sch->lock);
return -ENXIO; return -ENXIO;
} }
break; break;
} }
} }
if (i==8) { if (i==8) {
spin_unlock_irq(&sch->lock); spin_unlock_irq(sch->lock);
return 0; return 0;
} }
sch->lpm = ((sch->schib.pmcw.pim & sch->lpm = ((sch->schib.pmcw.pim &
@ -660,7 +660,7 @@ __chp_add(struct subchannel_id schid, void *data)
if (sch->driver && sch->driver->verify) if (sch->driver && sch->driver->verify)
sch->driver->verify(&sch->dev); sch->driver->verify(&sch->dev);
spin_unlock_irq(&sch->lock); spin_unlock_irq(sch->lock);
put_device(&sch->dev); put_device(&sch->dev);
return 0; return 0;
} }
@ -750,7 +750,7 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
if (!sch->ssd_info.valid) if (!sch->ssd_info.valid)
return; return;
spin_lock_irqsave(&sch->lock, flags); spin_lock_irqsave(sch->lock, flags);
old_lpm = sch->lpm; old_lpm = sch->lpm;
for (chp = 0; chp < 8; chp++) { for (chp = 0; chp < 8; chp++) {
if (sch->ssd_info.chpid[chp] != chpid) if (sch->ssd_info.chpid[chp] != chpid)
@ -785,7 +785,7 @@ __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on)
sch->driver->verify(&sch->dev); sch->driver->verify(&sch->dev);
break; break;
} }
spin_unlock_irqrestore(&sch->lock, flags); spin_unlock_irqrestore(sch->lock, flags);
} }
static int static int

View File

@ -143,11 +143,11 @@ cio_tpi(void)
return 1; return 1;
local_bh_disable(); local_bh_disable();
irq_enter (); irq_enter ();
spin_lock(&sch->lock); spin_lock(sch->lock);
memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw)); memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw));
if (sch->driver && sch->driver->irq) if (sch->driver && sch->driver->irq)
sch->driver->irq(&sch->dev); sch->driver->irq(&sch->dev);
spin_unlock(&sch->lock); spin_unlock(sch->lock);
irq_exit (); irq_exit ();
_local_bh_enable(); _local_bh_enable();
return 1; return 1;
@ -496,6 +496,15 @@ cio_disable_subchannel (struct subchannel *sch)
return ret; return ret;
} }
static int cio_create_sch_lock(struct subchannel *sch)
{
sch->lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL);
if (!sch->lock)
return -ENOMEM;
spin_lock_init(sch->lock);
return 0;
}
/* /*
* cio_validate_subchannel() * cio_validate_subchannel()
* *
@ -513,6 +522,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
{ {
char dbf_txt[15]; char dbf_txt[15];
int ccode; int ccode;
int err;
sprintf (dbf_txt, "valsch%x", schid.sch_no); sprintf (dbf_txt, "valsch%x", schid.sch_no);
CIO_TRACE_EVENT (4, dbf_txt); CIO_TRACE_EVENT (4, dbf_txt);
@ -520,9 +530,15 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
/* Nuke all fields. */ /* Nuke all fields. */
memset(sch, 0, sizeof(struct subchannel)); memset(sch, 0, sizeof(struct subchannel));
spin_lock_init(&sch->lock); sch->schid = schid;
if (cio_is_console(schid)) {
sch->lock = cio_get_console_lock();
} else {
err = cio_create_sch_lock(sch);
if (err)
goto out;
}
mutex_init(&sch->reg_mutex); mutex_init(&sch->reg_mutex);
/* Set a name for the subchannel */ /* Set a name for the subchannel */
snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", schid.ssid, snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", schid.ssid,
schid.sch_no); schid.sch_no);
@ -534,10 +550,10 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
* is not valid. * is not valid.
*/ */
ccode = stsch_err (schid, &sch->schib); ccode = stsch_err (schid, &sch->schib);
if (ccode) if (ccode) {
return (ccode == 3) ? -ENXIO : ccode; err = (ccode == 3) ? -ENXIO : ccode;
goto out;
sch->schid = schid; }
/* Copy subchannel type from path management control word. */ /* Copy subchannel type from path management control word. */
sch->st = sch->schib.pmcw.st; sch->st = sch->schib.pmcw.st;
@ -550,14 +566,16 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
"non-I/O subchannel type %04X\n", "non-I/O subchannel type %04X\n",
sch->schid.ssid, sch->schid.sch_no, sch->st); sch->schid.ssid, sch->schid.sch_no, sch->st);
/* We stop here for non-io subchannels. */ /* We stop here for non-io subchannels. */
return sch->st; err = sch->st;
goto out;
} }
/* Initialization for io subchannels. */ /* Initialization for io subchannels. */
if (!sch->schib.pmcw.dnv) if (!sch->schib.pmcw.dnv) {
/* io subchannel but device number is invalid. */ /* io subchannel but device number is invalid. */
return -ENODEV; err = -ENODEV;
goto out;
}
/* Devno is valid. */ /* Devno is valid. */
if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) { if (is_blacklisted (sch->schid.ssid, sch->schib.pmcw.dev)) {
/* /*
@ -567,7 +585,8 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
CIO_MSG_EVENT(0, "Blacklisted device detected " CIO_MSG_EVENT(0, "Blacklisted device detected "
"at devno %04X, subchannel set %x\n", "at devno %04X, subchannel set %x\n",
sch->schib.pmcw.dev, sch->schid.ssid); sch->schib.pmcw.dev, sch->schid.ssid);
return -ENODEV; err = -ENODEV;
goto out;
} }
sch->opm = 0xff; sch->opm = 0xff;
if (!cio_is_console(sch->schid)) if (!cio_is_console(sch->schid))
@ -595,6 +614,11 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
if ((sch->lpm & (sch->lpm - 1)) != 0) if ((sch->lpm & (sch->lpm - 1)) != 0)
sch->schib.pmcw.mp = 1; /* multipath mode */ sch->schib.pmcw.mp = 1; /* multipath mode */
return 0; return 0;
out:
if (!cio_is_console(schid))
kfree(sch->lock);
sch->lock = NULL;
return err;
} }
/* /*
@ -637,7 +661,7 @@ do_IRQ (struct pt_regs *regs)
} }
sch = (struct subchannel *)(unsigned long)tpi_info->intparm; sch = (struct subchannel *)(unsigned long)tpi_info->intparm;
if (sch) if (sch)
spin_lock(&sch->lock); spin_lock(sch->lock);
/* Store interrupt response block to lowcore. */ /* Store interrupt response block to lowcore. */
if (tsch (tpi_info->schid, irb) == 0 && sch) { if (tsch (tpi_info->schid, irb) == 0 && sch) {
/* Keep subchannel information word up to date. */ /* Keep subchannel information word up to date. */
@ -648,7 +672,7 @@ do_IRQ (struct pt_regs *regs)
sch->driver->irq(&sch->dev); sch->driver->irq(&sch->dev);
} }
if (sch) if (sch)
spin_unlock(&sch->lock); spin_unlock(sch->lock);
/* /*
* Are more interrupts pending? * Are more interrupts pending?
* If so, the tpi instruction will update the lowcore * If so, the tpi instruction will update the lowcore
@ -687,10 +711,10 @@ wait_cons_dev (void)
__ctl_load (cr6, 6, 6); __ctl_load (cr6, 6, 6);
do { do {
spin_unlock(&console_subchannel.lock); spin_unlock(console_subchannel.lock);
if (!cio_tpi()) if (!cio_tpi())
cpu_relax(); cpu_relax();
spin_lock(&console_subchannel.lock); spin_lock(console_subchannel.lock);
} while (console_subchannel.schib.scsw.actl != 0); } while (console_subchannel.schib.scsw.actl != 0);
/* /*
* restore previous isc value * restore previous isc value

View File

@ -87,7 +87,7 @@ struct orb {
/* subchannel data structure used by I/O subroutines */ /* subchannel data structure used by I/O subroutines */
struct subchannel { struct subchannel {
struct subchannel_id schid; struct subchannel_id schid;
spinlock_t lock; /* subchannel lock */ spinlock_t *lock; /* subchannel lock */
struct mutex reg_mutex; struct mutex reg_mutex;
enum { enum {
SUBCHANNEL_TYPE_IO = 0, SUBCHANNEL_TYPE_IO = 0,
@ -137,9 +137,11 @@ extern struct subchannel *cio_probe_console(void);
extern void cio_release_console(void); extern void cio_release_console(void);
extern int cio_is_console(struct subchannel_id); extern int cio_is_console(struct subchannel_id);
extern struct subchannel *cio_get_console_subchannel(void); extern struct subchannel *cio_get_console_subchannel(void);
extern spinlock_t * cio_get_console_lock(void);
#else #else
#define cio_is_console(schid) 0 #define cio_is_console(schid) 0
#define cio_get_console_subchannel() NULL #define cio_get_console_subchannel() NULL
#define cio_get_console_lock() NULL;
#endif #endif
extern int cio_show_msg; extern int cio_show_msg;

View File

@ -91,9 +91,9 @@ css_free_subchannel(struct subchannel *sch)
/* Reset intparm to zeroes. */ /* Reset intparm to zeroes. */
sch->schib.pmcw.intparm = 0; sch->schib.pmcw.intparm = 0;
cio_modify(sch); cio_modify(sch);
kfree(sch->lock);
kfree(sch); kfree(sch);
} }
} }
static void static void
@ -102,8 +102,10 @@ css_subchannel_release(struct device *dev)
struct subchannel *sch; struct subchannel *sch;
sch = to_subchannel(dev); sch = to_subchannel(dev);
if (!cio_is_console(sch->schid)) if (!cio_is_console(sch->schid)) {
kfree(sch->lock);
kfree(sch); kfree(sch);
}
} }
extern int css_get_ssd_info(struct subchannel *sch); extern int css_get_ssd_info(struct subchannel *sch);
@ -206,18 +208,18 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
unsigned long flags; unsigned long flags;
enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action; enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action;
spin_lock_irqsave(&sch->lock, flags); spin_lock_irqsave(sch->lock, flags);
disc = device_is_disconnected(sch); disc = device_is_disconnected(sch);
if (disc && slow) { if (disc && slow) {
/* Disconnected devices are evaluated directly only.*/ /* Disconnected devices are evaluated directly only.*/
spin_unlock_irqrestore(&sch->lock, flags); spin_unlock_irqrestore(sch->lock, flags);
return 0; return 0;
} }
/* No interrupt after machine check - kill pending timers. */ /* No interrupt after machine check - kill pending timers. */
device_kill_pending_timer(sch); device_kill_pending_timer(sch);
if (!disc && !slow) { if (!disc && !slow) {
/* Non-disconnected devices are evaluated on the slow path. */ /* Non-disconnected devices are evaluated on the slow path. */
spin_unlock_irqrestore(&sch->lock, flags); spin_unlock_irqrestore(sch->lock, flags);
return -EAGAIN; return -EAGAIN;
} }
event = css_get_subchannel_status(sch); event = css_get_subchannel_status(sch);
@ -242,9 +244,9 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
/* Ask driver what to do with device. */ /* Ask driver what to do with device. */
action = UNREGISTER; action = UNREGISTER;
if (sch->driver && sch->driver->notify) { if (sch->driver && sch->driver->notify) {
spin_unlock_irqrestore(&sch->lock, flags); spin_unlock_irqrestore(sch->lock, flags);
ret = sch->driver->notify(&sch->dev, event); ret = sch->driver->notify(&sch->dev, event);
spin_lock_irqsave(&sch->lock, flags); spin_lock_irqsave(sch->lock, flags);
if (ret) if (ret)
action = NONE; action = NONE;
} }
@ -269,9 +271,9 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
case UNREGISTER: case UNREGISTER:
case UNREGISTER_PROBE: case UNREGISTER_PROBE:
/* Unregister device (will use subchannel lock). */ /* Unregister device (will use subchannel lock). */
spin_unlock_irqrestore(&sch->lock, flags); spin_unlock_irqrestore(sch->lock, flags);
css_sch_device_unregister(sch); css_sch_device_unregister(sch);
spin_lock_irqsave(&sch->lock, flags); spin_lock_irqsave(sch->lock, flags);
/* Reset intparm to zeroes. */ /* Reset intparm to zeroes. */
sch->schib.pmcw.intparm = 0; sch->schib.pmcw.intparm = 0;
@ -283,7 +285,7 @@ static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
default: default:
break; break;
} }
spin_unlock_irqrestore(&sch->lock, flags); spin_unlock_irqrestore(sch->lock, flags);
/* Probe if necessary. */ /* Probe if necessary. */
if (action == UNREGISTER_PROBE) if (action == UNREGISTER_PROBE)
ret = css_probe_device(sch->schid); ret = css_probe_device(sch->schid);

View File

@ -774,9 +774,9 @@ io_subchannel_register(struct work_struct *work)
printk (KERN_WARNING "%s: could not register %s\n", printk (KERN_WARNING "%s: could not register %s\n",
__func__, cdev->dev.bus_id); __func__, cdev->dev.bus_id);
put_device(&cdev->dev); put_device(&cdev->dev);
spin_lock_irqsave(&sch->lock, flags); spin_lock_irqsave(sch->lock, flags);
sch->dev.driver_data = NULL; sch->dev.driver_data = NULL;
spin_unlock_irqrestore(&sch->lock, flags); spin_unlock_irqrestore(sch->lock, flags);
kfree (cdev->private); kfree (cdev->private);
kfree (cdev); kfree (cdev);
put_device(&sch->dev); put_device(&sch->dev);
@ -860,7 +860,7 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
sch->dev.driver_data = cdev; sch->dev.driver_data = cdev;
sch->driver = &io_subchannel_driver; sch->driver = &io_subchannel_driver;
cdev->ccwlock = &sch->lock; cdev->ccwlock = sch->lock;
/* Init private data. */ /* Init private data. */
priv = cdev->private; priv = cdev->private;
@ -880,9 +880,9 @@ io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
atomic_inc(&ccw_device_init_count); atomic_inc(&ccw_device_init_count);
/* Start async. device sensing. */ /* Start async. device sensing. */
spin_lock_irq(&sch->lock); spin_lock_irq(sch->lock);
rc = ccw_device_recognition(cdev); rc = ccw_device_recognition(cdev);
spin_unlock_irq(&sch->lock); spin_unlock_irq(sch->lock);
if (rc) { if (rc) {
if (atomic_dec_and_test(&ccw_device_init_count)) if (atomic_dec_and_test(&ccw_device_init_count))
wake_up(&ccw_device_init_wq); wake_up(&ccw_device_init_wq);
@ -924,9 +924,9 @@ io_subchannel_probe (struct subchannel *sch)
rc = io_subchannel_recog(cdev, sch); rc = io_subchannel_recog(cdev, sch);
if (rc) { if (rc) {
spin_lock_irqsave(&sch->lock, flags); spin_lock_irqsave(sch->lock, flags);
sch->dev.driver_data = NULL; sch->dev.driver_data = NULL;
spin_unlock_irqrestore(&sch->lock, flags); spin_unlock_irqrestore(sch->lock, flags);
if (cdev->dev.release) if (cdev->dev.release)
cdev->dev.release(&cdev->dev); cdev->dev.release(&cdev->dev);
} }
@ -1036,6 +1036,13 @@ static struct ccw_device console_cdev;
static struct ccw_device_private console_private; static struct ccw_device_private console_private;
static int console_cdev_in_use; static int console_cdev_in_use;
static DEFINE_SPINLOCK(ccw_console_lock);
spinlock_t * cio_get_console_lock(void)
{
return &ccw_console_lock;
}
static int static int
ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch) ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch)
{ {

View File

@ -316,9 +316,9 @@ __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, _
ccw_device_set_timeout(cdev, 0); ccw_device_set_timeout(cdev, 0);
if (ret == -EBUSY) { if (ret == -EBUSY) {
/* Try again later. */ /* Try again later. */
spin_unlock_irq(&sch->lock); spin_unlock_irq(sch->lock);
msleep(10); msleep(10);
spin_lock_irq(&sch->lock); spin_lock_irq(sch->lock);
continue; continue;
} }
if (ret != 0) if (ret != 0)
@ -326,12 +326,12 @@ __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, _
break; break;
/* Wait for end of request. */ /* Wait for end of request. */
cdev->private->intparm = magic; cdev->private->intparm = magic;
spin_unlock_irq(&sch->lock); spin_unlock_irq(sch->lock);
wait_event(cdev->private->wait_q, wait_event(cdev->private->wait_q,
(cdev->private->intparm == -EIO) || (cdev->private->intparm == -EIO) ||
(cdev->private->intparm == -EAGAIN) || (cdev->private->intparm == -EAGAIN) ||
(cdev->private->intparm == 0)); (cdev->private->intparm == 0));
spin_lock_irq(&sch->lock); spin_lock_irq(sch->lock);
/* Check at least for channel end / device end */ /* Check at least for channel end / device end */
if (cdev->private->intparm == -EIO) { if (cdev->private->intparm == -EIO) {
/* Non-retryable error. */ /* Non-retryable error. */
@ -342,9 +342,9 @@ __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, _
/* Success. */ /* Success. */
break; break;
/* Try again later. */ /* Try again later. */
spin_unlock_irq(&sch->lock); spin_unlock_irq(sch->lock);
msleep(10); msleep(10);
spin_lock_irq(&sch->lock); spin_lock_irq(sch->lock);
} while (1); } while (1);
return ret; return ret;
@ -389,7 +389,7 @@ read_dev_chars (struct ccw_device *cdev, void **buffer, int length)
return ret; return ret;
} }
spin_lock_irq(&sch->lock); spin_lock_irq(sch->lock);
/* Save interrupt handler. */ /* Save interrupt handler. */
handler = cdev->handler; handler = cdev->handler;
/* Temporarily install own handler. */ /* Temporarily install own handler. */
@ -406,7 +406,7 @@ read_dev_chars (struct ccw_device *cdev, void **buffer, int length)
/* Restore interrupt handler. */ /* Restore interrupt handler. */
cdev->handler = handler; cdev->handler = handler;
spin_unlock_irq(&sch->lock); spin_unlock_irq(sch->lock);
clear_normalized_cda (rdc_ccw); clear_normalized_cda (rdc_ccw);
kfree(rdc_ccw); kfree(rdc_ccw);
@ -463,7 +463,7 @@ read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lp
rcd_ccw->count = ciw->count; rcd_ccw->count = ciw->count;
rcd_ccw->flags = CCW_FLAG_SLI; rcd_ccw->flags = CCW_FLAG_SLI;
spin_lock_irq(&sch->lock); spin_lock_irq(sch->lock);
/* Save interrupt handler. */ /* Save interrupt handler. */
handler = cdev->handler; handler = cdev->handler;
/* Temporarily install own handler. */ /* Temporarily install own handler. */
@ -480,7 +480,7 @@ read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lp
/* Restore interrupt handler. */ /* Restore interrupt handler. */
cdev->handler = handler; cdev->handler = handler;
spin_unlock_irq(&sch->lock); spin_unlock_irq(sch->lock);
/* /*
* on success we update the user input parms * on success we update the user input parms
@ -537,7 +537,7 @@ ccw_device_stlck(struct ccw_device *cdev)
kfree(buf); kfree(buf);
return -ENOMEM; return -ENOMEM;
} }
spin_lock_irqsave(&sch->lock, flags); spin_lock_irqsave(sch->lock, flags);
ret = cio_enable_subchannel(sch, 3); ret = cio_enable_subchannel(sch, 3);
if (ret) if (ret)
goto out_unlock; goto out_unlock;
@ -559,9 +559,9 @@ ccw_device_stlck(struct ccw_device *cdev)
goto out_unlock; goto out_unlock;
} }
cdev->private->irb.scsw.actl |= SCSW_ACTL_START_PEND; cdev->private->irb.scsw.actl |= SCSW_ACTL_START_PEND;
spin_unlock_irqrestore(&sch->lock, flags); spin_unlock_irqrestore(sch->lock, flags);
wait_event(cdev->private->wait_q, cdev->private->irb.scsw.actl == 0); wait_event(cdev->private->wait_q, cdev->private->irb.scsw.actl == 0);
spin_lock_irqsave(&sch->lock, flags); spin_lock_irqsave(sch->lock, flags);
cio_disable_subchannel(sch); //FIXME: return code? cio_disable_subchannel(sch); //FIXME: return code?
if ((cdev->private->irb.scsw.dstat != if ((cdev->private->irb.scsw.dstat !=
(DEV_STAT_CHN_END|DEV_STAT_DEV_END)) || (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
@ -572,7 +572,7 @@ ccw_device_stlck(struct ccw_device *cdev)
out_unlock: out_unlock:
kfree(buf); kfree(buf);
kfree(buf2); kfree(buf2);
spin_unlock_irqrestore(&sch->lock, flags); spin_unlock_irqrestore(sch->lock, flags);
return ret; return ret;
} }