Staging: hv: storvsc: Include storvsc.c in storvsc_drv.c

As part of further cleanup of our storage drivers, include the content
of storvsc.c into storvsc_drv.c and delete storvsc.c and do the necessary
adjustments.

Signed-off-by: K. Y. Srinivasan <kys@microsoft.com>
Signed-off-by: Haiyang Zhang <haiyangz@microsoft.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
This commit is contained in:
K. Y. Srinivasan 2011-08-27 11:31:22 -07:00 committed by Greg Kroah-Hartman
parent fa23b8c78f
commit 8dcf37d446
3 changed files with 530 additions and 529 deletions

View File

@ -7,6 +7,6 @@ obj-$(CONFIG_HYPERV_MOUSE) += hv_mouse.o
hv_vmbus-y := vmbus_drv.o \
hv.o connection.o channel.o \
channel_mgmt.o ring_buffer.o
hv_storvsc-y := storvsc_drv.o storvsc.o
hv_storvsc-y := storvsc_drv.o
hv_netvsc-y := netvsc_drv.o netvsc.o rndis_filter.o
hv_utils-y := hv_util.o hv_kvp.o

View File

@ -1,528 +0,0 @@
/*
* Copyright (c) 2009, Microsoft Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*
* Authors:
* Haiyang Zhang <haiyangz@microsoft.com>
* Hank Janssen <hjanssen@microsoft.com>
* K. Y. Srinivasan <kys@microsoft.com>
*
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/completion.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include "hyperv.h"
#include "hyperv_storage.h"
static inline struct storvsc_device *alloc_stor_device(struct hv_device *device)
{
struct storvsc_device *stor_device;
stor_device = kzalloc(sizeof(struct storvsc_device), GFP_KERNEL);
if (!stor_device)
return NULL;
stor_device->destroy = false;
init_waitqueue_head(&stor_device->waiting_to_drain);
stor_device->device = device;
device->ext = stor_device;
return stor_device;
}
static inline struct storvsc_device *get_in_stor_device(
struct hv_device *device)
{
struct storvsc_device *stor_device;
unsigned long flags;
spin_lock_irqsave(&device->channel->inbound_lock, flags);
stor_device = (struct storvsc_device *)device->ext;
if (!stor_device)
goto get_in_err;
/*
* If the device is being destroyed; allow incoming
* traffic only to cleanup outstanding requests.
*/
if (stor_device->destroy &&
(atomic_read(&stor_device->num_outstanding_req) == 0))
stor_device = NULL;
get_in_err:
spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
return stor_device;
}
static int storvsc_channel_init(struct hv_device *device)
{
struct storvsc_device *stor_device;
struct hv_storvsc_request *request;
struct vstor_packet *vstor_packet;
int ret, t;
stor_device = get_out_stor_device(device);
if (!stor_device)
return -ENODEV;
request = &stor_device->init_request;
vstor_packet = &request->vstor_packet;
/*
* Now, initiate the vsc/vsp initialization protocol on the open
* channel
*/
memset(request, 0, sizeof(struct hv_storvsc_request));
init_completion(&request->wait_event);
vstor_packet->operation = VSTOR_OPERATION_BEGIN_INITIALIZATION;
vstor_packet->flags = REQUEST_COMPLETION_FLAG;
ret = vmbus_sendpacket(device->channel, vstor_packet,
sizeof(struct vstor_packet),
(unsigned long)request,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret != 0)
goto cleanup;
t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
if (t == 0) {
ret = -ETIMEDOUT;
goto cleanup;
}
if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
vstor_packet->status != 0)
goto cleanup;
/* reuse the packet for version range supported */
memset(vstor_packet, 0, sizeof(struct vstor_packet));
vstor_packet->operation = VSTOR_OPERATION_QUERY_PROTOCOL_VERSION;
vstor_packet->flags = REQUEST_COMPLETION_FLAG;
vstor_packet->version.major_minor = VMSTOR_PROTOCOL_VERSION_CURRENT;
FILL_VMSTOR_REVISION(vstor_packet->version.revision);
ret = vmbus_sendpacket(device->channel, vstor_packet,
sizeof(struct vstor_packet),
(unsigned long)request,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret != 0)
goto cleanup;
t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
if (t == 0) {
ret = -ETIMEDOUT;
goto cleanup;
}
if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
vstor_packet->status != 0)
goto cleanup;
memset(vstor_packet, 0, sizeof(struct vstor_packet));
vstor_packet->operation = VSTOR_OPERATION_QUERY_PROPERTIES;
vstor_packet->flags = REQUEST_COMPLETION_FLAG;
vstor_packet->storage_channel_properties.port_number =
stor_device->port_number;
ret = vmbus_sendpacket(device->channel, vstor_packet,
sizeof(struct vstor_packet),
(unsigned long)request,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret != 0)
goto cleanup;
t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
if (t == 0) {
ret = -ETIMEDOUT;
goto cleanup;
}
if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
vstor_packet->status != 0)
goto cleanup;
stor_device->path_id = vstor_packet->storage_channel_properties.path_id;
stor_device->target_id
= vstor_packet->storage_channel_properties.target_id;
memset(vstor_packet, 0, sizeof(struct vstor_packet));
vstor_packet->operation = VSTOR_OPERATION_END_INITIALIZATION;
vstor_packet->flags = REQUEST_COMPLETION_FLAG;
ret = vmbus_sendpacket(device->channel, vstor_packet,
sizeof(struct vstor_packet),
(unsigned long)request,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret != 0)
goto cleanup;
t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
if (t == 0) {
ret = -ETIMEDOUT;
goto cleanup;
}
if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
vstor_packet->status != 0)
goto cleanup;
cleanup:
return ret;
}
static void storvsc_on_io_completion(struct hv_device *device,
struct vstor_packet *vstor_packet,
struct hv_storvsc_request *request)
{
struct storvsc_device *stor_device;
struct vstor_packet *stor_pkt;
stor_device = (struct storvsc_device *)device->ext;
stor_pkt = &request->vstor_packet;
/* Copy over the status...etc */
stor_pkt->vm_srb.scsi_status = vstor_packet->vm_srb.scsi_status;
stor_pkt->vm_srb.srb_status = vstor_packet->vm_srb.srb_status;
stor_pkt->vm_srb.sense_info_length =
vstor_packet->vm_srb.sense_info_length;
if (vstor_packet->vm_srb.scsi_status != 0 ||
vstor_packet->vm_srb.srb_status != 1){
DPRINT_WARN(STORVSC,
"cmd 0x%x scsi status 0x%x srb status 0x%x\n",
stor_pkt->vm_srb.cdb[0],
vstor_packet->vm_srb.scsi_status,
vstor_packet->vm_srb.srb_status);
}
if ((vstor_packet->vm_srb.scsi_status & 0xFF) == 0x02) {
/* CHECK_CONDITION */
if (vstor_packet->vm_srb.srb_status & 0x80) {
/* autosense data available */
DPRINT_WARN(STORVSC, "storvsc pkt %p autosense data "
"valid - len %d\n", request,
vstor_packet->vm_srb.sense_info_length);
memcpy(request->sense_buffer,
vstor_packet->vm_srb.sense_data,
vstor_packet->vm_srb.sense_info_length);
}
}
stor_pkt->vm_srb.data_transfer_length =
vstor_packet->vm_srb.data_transfer_length;
request->on_io_completion(request);
if (atomic_dec_and_test(&stor_device->num_outstanding_req) &&
stor_device->drain_notify)
wake_up(&stor_device->waiting_to_drain);
}
static void storvsc_on_receive(struct hv_device *device,
struct vstor_packet *vstor_packet,
struct hv_storvsc_request *request)
{
switch (vstor_packet->operation) {
case VSTOR_OPERATION_COMPLETE_IO:
storvsc_on_io_completion(device, vstor_packet, request);
break;
case VSTOR_OPERATION_REMOVE_DEVICE:
default:
break;
}
}
static void storvsc_on_channel_callback(void *context)
{
struct hv_device *device = (struct hv_device *)context;
struct storvsc_device *stor_device;
u32 bytes_recvd;
u64 request_id;
unsigned char packet[ALIGN(sizeof(struct vstor_packet), 8)];
struct hv_storvsc_request *request;
int ret;
stor_device = get_in_stor_device(device);
if (!stor_device)
return;
do {
ret = vmbus_recvpacket(device->channel, packet,
ALIGN(sizeof(struct vstor_packet), 8),
&bytes_recvd, &request_id);
if (ret == 0 && bytes_recvd > 0) {
request = (struct hv_storvsc_request *)
(unsigned long)request_id;
if ((request == &stor_device->init_request) ||
(request == &stor_device->reset_request)) {
memcpy(&request->vstor_packet, packet,
sizeof(struct vstor_packet));
complete(&request->wait_event);
} else {
storvsc_on_receive(device,
(struct vstor_packet *)packet,
request);
}
} else {
break;
}
} while (1);
return;
}
static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size)
{
struct vmstorage_channel_properties props;
int ret;
memset(&props, 0, sizeof(struct vmstorage_channel_properties));
/* Open the channel */
ret = vmbus_open(device->channel,
ring_size,
ring_size,
(void *)&props,
sizeof(struct vmstorage_channel_properties),
storvsc_on_channel_callback, device);
if (ret != 0)
return ret;
ret = storvsc_channel_init(device);
return ret;
}
int storvsc_dev_add(struct hv_device *device,
void *additional_info)
{
struct storvsc_device *stor_device;
struct storvsc_device_info *device_info;
int ret = 0;
device_info = (struct storvsc_device_info *)additional_info;
stor_device = alloc_stor_device(device);
if (!stor_device)
return -ENOMEM;
/* Save the channel properties to our storvsc channel */
/*
* If we support more than 1 scsi channel, we need to set the
* port number here to the scsi channel but how do we get the
* scsi channel prior to the bus scan.
*
* The host does not support this.
*/
stor_device->port_number = device_info->port_number;
/* Send it back up */
ret = storvsc_connect_to_vsp(device, device_info->ring_buffer_size);
if (ret) {
kfree(stor_device);
return ret;
}
device_info->path_id = stor_device->path_id;
device_info->target_id = stor_device->target_id;
return ret;
}
int storvsc_dev_remove(struct hv_device *device)
{
struct storvsc_device *stor_device;
unsigned long flags;
stor_device = (struct storvsc_device *)device->ext;
spin_lock_irqsave(&device->channel->inbound_lock, flags);
stor_device->destroy = true;
spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
/*
* At this point, all outbound traffic should be disable. We
* only allow inbound traffic (responses) to proceed so that
* outstanding requests can be completed.
*/
storvsc_wait_to_drain(stor_device);
/*
* Since we have already drained, we don't need to busy wait
* as was done in final_release_stor_device()
* Note that we cannot set the ext pointer to NULL until
* we have drained - to drain the outgoing packets, we need to
* allow incoming packets.
*/
spin_lock_irqsave(&device->channel->inbound_lock, flags);
device->ext = NULL;
spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
/* Close the channel */
vmbus_close(device->channel);
kfree(stor_device);
return 0;
}
int storvsc_do_io(struct hv_device *device,
struct hv_storvsc_request *request)
{
struct storvsc_device *stor_device;
struct vstor_packet *vstor_packet;
int ret = 0;
vstor_packet = &request->vstor_packet;
stor_device = get_out_stor_device(device);
if (!stor_device)
return -ENODEV;
request->device = device;
vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
vstor_packet->vm_srb.length = sizeof(struct vmscsi_request);
vstor_packet->vm_srb.sense_info_length = SENSE_BUFFER_SIZE;
vstor_packet->vm_srb.data_transfer_length =
request->data_buffer.len;
vstor_packet->operation = VSTOR_OPERATION_EXECUTE_SRB;
if (request->data_buffer.len) {
ret = vmbus_sendpacket_multipagebuffer(device->channel,
&request->data_buffer,
vstor_packet,
sizeof(struct vstor_packet),
(unsigned long)request);
} else {
ret = vmbus_sendpacket(device->channel, vstor_packet,
sizeof(struct vstor_packet),
(unsigned long)request,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
}
if (ret != 0)
return ret;
atomic_inc(&stor_device->num_outstanding_req);
return ret;
}
/*
* The channel properties uniquely specify how the device is to be
* presented to the guest. Map this information for use by the block
* driver. For Linux guests on Hyper-V, we emulate a scsi HBA in the guest
* (storvsc_drv) and so scsi devices in the guest are handled by
* native upper level Linux drivers. Consequently, Hyper-V
* block driver, while being a generic block driver, presently does not
* deal with anything other than devices that would need to be presented
* to the guest as an IDE disk.
*
* This function maps the channel properties as embedded in the input
* parameter device_info onto information necessary to register the
* corresponding block device.
*
* Currently, there is no way to stop the emulation of the block device
* on the host side. And so, to prevent the native IDE drivers in Linux
* from taking over these devices (to be managedby Hyper-V block
* driver), we will take over if need be the major of the IDE controllers.
*
*/
int storvsc_get_major_info(struct storvsc_device_info *device_info,
struct storvsc_major_info *major_info)
{
static bool ide0_registered;
static bool ide1_registered;
/*
* For now we only support IDE disks.
*/
major_info->devname = "ide";
major_info->diskname = "hd";
if (device_info->path_id) {
major_info->major = 22;
if (!ide1_registered) {
major_info->do_register = true;
ide1_registered = true;
} else
major_info->do_register = false;
if (device_info->target_id)
major_info->index = 3;
else
major_info->index = 2;
return 0;
} else {
major_info->major = 3;
if (!ide0_registered) {
major_info->do_register = true;
ide0_registered = true;
} else
major_info->do_register = false;
if (device_info->target_id)
major_info->index = 1;
else
major_info->index = 0;
return 0;
}
return -ENODEV;
}

View File

@ -36,6 +36,535 @@
#include "hyperv.h"
#include "hyperv_storage.h"
/*
* Copyright (c) 2009, Microsoft Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59 Temple
* Place - Suite 330, Boston, MA 02111-1307 USA.
*
* Authors:
* Haiyang Zhang <haiyangz@microsoft.com>
* Hank Janssen <hjanssen@microsoft.com>
* K. Y. Srinivasan <kys@microsoft.com>
*
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/completion.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include "hyperv.h"
#include "hyperv_storage.h"
static inline struct storvsc_device *alloc_stor_device(struct hv_device *device)
{
struct storvsc_device *stor_device;
stor_device = kzalloc(sizeof(struct storvsc_device), GFP_KERNEL);
if (!stor_device)
return NULL;
stor_device->destroy = false;
init_waitqueue_head(&stor_device->waiting_to_drain);
stor_device->device = device;
device->ext = stor_device;
return stor_device;
}
static inline struct storvsc_device *get_in_stor_device(
struct hv_device *device)
{
struct storvsc_device *stor_device;
unsigned long flags;
spin_lock_irqsave(&device->channel->inbound_lock, flags);
stor_device = (struct storvsc_device *)device->ext;
if (!stor_device)
goto get_in_err;
/*
* If the device is being destroyed; allow incoming
* traffic only to cleanup outstanding requests.
*/
if (stor_device->destroy &&
(atomic_read(&stor_device->num_outstanding_req) == 0))
stor_device = NULL;
get_in_err:
spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
return stor_device;
}
static int storvsc_channel_init(struct hv_device *device)
{
struct storvsc_device *stor_device;
struct hv_storvsc_request *request;
struct vstor_packet *vstor_packet;
int ret, t;
stor_device = get_out_stor_device(device);
if (!stor_device)
return -ENODEV;
request = &stor_device->init_request;
vstor_packet = &request->vstor_packet;
/*
* Now, initiate the vsc/vsp initialization protocol on the open
* channel
*/
memset(request, 0, sizeof(struct hv_storvsc_request));
init_completion(&request->wait_event);
vstor_packet->operation = VSTOR_OPERATION_BEGIN_INITIALIZATION;
vstor_packet->flags = REQUEST_COMPLETION_FLAG;
ret = vmbus_sendpacket(device->channel, vstor_packet,
sizeof(struct vstor_packet),
(unsigned long)request,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret != 0)
goto cleanup;
t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
if (t == 0) {
ret = -ETIMEDOUT;
goto cleanup;
}
if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
vstor_packet->status != 0)
goto cleanup;
/* reuse the packet for version range supported */
memset(vstor_packet, 0, sizeof(struct vstor_packet));
vstor_packet->operation = VSTOR_OPERATION_QUERY_PROTOCOL_VERSION;
vstor_packet->flags = REQUEST_COMPLETION_FLAG;
vstor_packet->version.major_minor = VMSTOR_PROTOCOL_VERSION_CURRENT;
FILL_VMSTOR_REVISION(vstor_packet->version.revision);
ret = vmbus_sendpacket(device->channel, vstor_packet,
sizeof(struct vstor_packet),
(unsigned long)request,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret != 0)
goto cleanup;
t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
if (t == 0) {
ret = -ETIMEDOUT;
goto cleanup;
}
if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
vstor_packet->status != 0)
goto cleanup;
memset(vstor_packet, 0, sizeof(struct vstor_packet));
vstor_packet->operation = VSTOR_OPERATION_QUERY_PROPERTIES;
vstor_packet->flags = REQUEST_COMPLETION_FLAG;
vstor_packet->storage_channel_properties.port_number =
stor_device->port_number;
ret = vmbus_sendpacket(device->channel, vstor_packet,
sizeof(struct vstor_packet),
(unsigned long)request,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret != 0)
goto cleanup;
t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
if (t == 0) {
ret = -ETIMEDOUT;
goto cleanup;
}
if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
vstor_packet->status != 0)
goto cleanup;
stor_device->path_id = vstor_packet->storage_channel_properties.path_id;
stor_device->target_id
= vstor_packet->storage_channel_properties.target_id;
memset(vstor_packet, 0, sizeof(struct vstor_packet));
vstor_packet->operation = VSTOR_OPERATION_END_INITIALIZATION;
vstor_packet->flags = REQUEST_COMPLETION_FLAG;
ret = vmbus_sendpacket(device->channel, vstor_packet,
sizeof(struct vstor_packet),
(unsigned long)request,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret != 0)
goto cleanup;
t = wait_for_completion_timeout(&request->wait_event, 5*HZ);
if (t == 0) {
ret = -ETIMEDOUT;
goto cleanup;
}
if (vstor_packet->operation != VSTOR_OPERATION_COMPLETE_IO ||
vstor_packet->status != 0)
goto cleanup;
cleanup:
return ret;
}
static void storvsc_on_io_completion(struct hv_device *device,
struct vstor_packet *vstor_packet,
struct hv_storvsc_request *request)
{
struct storvsc_device *stor_device;
struct vstor_packet *stor_pkt;
stor_device = (struct storvsc_device *)device->ext;
stor_pkt = &request->vstor_packet;
/* Copy over the status...etc */
stor_pkt->vm_srb.scsi_status = vstor_packet->vm_srb.scsi_status;
stor_pkt->vm_srb.srb_status = vstor_packet->vm_srb.srb_status;
stor_pkt->vm_srb.sense_info_length =
vstor_packet->vm_srb.sense_info_length;
if (vstor_packet->vm_srb.scsi_status != 0 ||
vstor_packet->vm_srb.srb_status != 1){
DPRINT_WARN(STORVSC,
"cmd 0x%x scsi status 0x%x srb status 0x%x\n",
stor_pkt->vm_srb.cdb[0],
vstor_packet->vm_srb.scsi_status,
vstor_packet->vm_srb.srb_status);
}
if ((vstor_packet->vm_srb.scsi_status & 0xFF) == 0x02) {
/* CHECK_CONDITION */
if (vstor_packet->vm_srb.srb_status & 0x80) {
/* autosense data available */
DPRINT_WARN(STORVSC, "storvsc pkt %p autosense data "
"valid - len %d\n", request,
vstor_packet->vm_srb.sense_info_length);
memcpy(request->sense_buffer,
vstor_packet->vm_srb.sense_data,
vstor_packet->vm_srb.sense_info_length);
}
}
stor_pkt->vm_srb.data_transfer_length =
vstor_packet->vm_srb.data_transfer_length;
request->on_io_completion(request);
if (atomic_dec_and_test(&stor_device->num_outstanding_req) &&
stor_device->drain_notify)
wake_up(&stor_device->waiting_to_drain);
}
static void storvsc_on_receive(struct hv_device *device,
struct vstor_packet *vstor_packet,
struct hv_storvsc_request *request)
{
switch (vstor_packet->operation) {
case VSTOR_OPERATION_COMPLETE_IO:
storvsc_on_io_completion(device, vstor_packet, request);
break;
case VSTOR_OPERATION_REMOVE_DEVICE:
default:
break;
}
}
static void storvsc_on_channel_callback(void *context)
{
struct hv_device *device = (struct hv_device *)context;
struct storvsc_device *stor_device;
u32 bytes_recvd;
u64 request_id;
unsigned char packet[ALIGN(sizeof(struct vstor_packet), 8)];
struct hv_storvsc_request *request;
int ret;
stor_device = get_in_stor_device(device);
if (!stor_device)
return;
do {
ret = vmbus_recvpacket(device->channel, packet,
ALIGN(sizeof(struct vstor_packet), 8),
&bytes_recvd, &request_id);
if (ret == 0 && bytes_recvd > 0) {
request = (struct hv_storvsc_request *)
(unsigned long)request_id;
if ((request == &stor_device->init_request) ||
(request == &stor_device->reset_request)) {
memcpy(&request->vstor_packet, packet,
sizeof(struct vstor_packet));
complete(&request->wait_event);
} else {
storvsc_on_receive(device,
(struct vstor_packet *)packet,
request);
}
} else {
break;
}
} while (1);
return;
}
static int storvsc_connect_to_vsp(struct hv_device *device, u32 ring_size)
{
struct vmstorage_channel_properties props;
int ret;
memset(&props, 0, sizeof(struct vmstorage_channel_properties));
/* Open the channel */
ret = vmbus_open(device->channel,
ring_size,
ring_size,
(void *)&props,
sizeof(struct vmstorage_channel_properties),
storvsc_on_channel_callback, device);
if (ret != 0)
return ret;
ret = storvsc_channel_init(device);
return ret;
}
int storvsc_dev_add(struct hv_device *device,
void *additional_info)
{
struct storvsc_device *stor_device;
struct storvsc_device_info *device_info;
int ret = 0;
device_info = (struct storvsc_device_info *)additional_info;
stor_device = alloc_stor_device(device);
if (!stor_device)
return -ENOMEM;
/* Save the channel properties to our storvsc channel */
/*
* If we support more than 1 scsi channel, we need to set the
* port number here to the scsi channel but how do we get the
* scsi channel prior to the bus scan.
*
* The host does not support this.
*/
stor_device->port_number = device_info->port_number;
/* Send it back up */
ret = storvsc_connect_to_vsp(device, device_info->ring_buffer_size);
if (ret) {
kfree(stor_device);
return ret;
}
device_info->path_id = stor_device->path_id;
device_info->target_id = stor_device->target_id;
return ret;
}
int storvsc_dev_remove(struct hv_device *device)
{
struct storvsc_device *stor_device;
unsigned long flags;
stor_device = (struct storvsc_device *)device->ext;
spin_lock_irqsave(&device->channel->inbound_lock, flags);
stor_device->destroy = true;
spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
/*
* At this point, all outbound traffic should be disable. We
* only allow inbound traffic (responses) to proceed so that
* outstanding requests can be completed.
*/
storvsc_wait_to_drain(stor_device);
/*
* Since we have already drained, we don't need to busy wait
* as was done in final_release_stor_device()
* Note that we cannot set the ext pointer to NULL until
* we have drained - to drain the outgoing packets, we need to
* allow incoming packets.
*/
spin_lock_irqsave(&device->channel->inbound_lock, flags);
device->ext = NULL;
spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
/* Close the channel */
vmbus_close(device->channel);
kfree(stor_device);
return 0;
}
int storvsc_do_io(struct hv_device *device,
struct hv_storvsc_request *request)
{
struct storvsc_device *stor_device;
struct vstor_packet *vstor_packet;
int ret = 0;
vstor_packet = &request->vstor_packet;
stor_device = get_out_stor_device(device);
if (!stor_device)
return -ENODEV;
request->device = device;
vstor_packet->flags |= REQUEST_COMPLETION_FLAG;
vstor_packet->vm_srb.length = sizeof(struct vmscsi_request);
vstor_packet->vm_srb.sense_info_length = SENSE_BUFFER_SIZE;
vstor_packet->vm_srb.data_transfer_length =
request->data_buffer.len;
vstor_packet->operation = VSTOR_OPERATION_EXECUTE_SRB;
if (request->data_buffer.len) {
ret = vmbus_sendpacket_multipagebuffer(device->channel,
&request->data_buffer,
vstor_packet,
sizeof(struct vstor_packet),
(unsigned long)request);
} else {
ret = vmbus_sendpacket(device->channel, vstor_packet,
sizeof(struct vstor_packet),
(unsigned long)request,
VM_PKT_DATA_INBAND,
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
}
if (ret != 0)
return ret;
atomic_inc(&stor_device->num_outstanding_req);
return ret;
}
/*
* The channel properties uniquely specify how the device is to be
* presented to the guest. Map this information for use by the block
* driver. For Linux guests on Hyper-V, we emulate a scsi HBA in the guest
* (storvsc_drv) and so scsi devices in the guest are handled by
* native upper level Linux drivers. Consequently, Hyper-V
* block driver, while being a generic block driver, presently does not
* deal with anything other than devices that would need to be presented
* to the guest as an IDE disk.
*
* This function maps the channel properties as embedded in the input
* parameter device_info onto information necessary to register the
* corresponding block device.
*
* Currently, there is no way to stop the emulation of the block device
* on the host side. And so, to prevent the native IDE drivers in Linux
* from taking over these devices (to be managedby Hyper-V block
* driver), we will take over if need be the major of the IDE controllers.
*
*/
int storvsc_get_major_info(struct storvsc_device_info *device_info,
struct storvsc_major_info *major_info)
{
static bool ide0_registered;
static bool ide1_registered;
/*
* For now we only support IDE disks.
*/
major_info->devname = "ide";
major_info->diskname = "hd";
if (device_info->path_id) {
major_info->major = 22;
if (!ide1_registered) {
major_info->do_register = true;
ide1_registered = true;
} else
major_info->do_register = false;
if (device_info->target_id)
major_info->index = 3;
else
major_info->index = 2;
return 0;
} else {
major_info->major = 3;
if (!ide0_registered) {
major_info->do_register = true;
ide0_registered = true;
} else
major_info->do_register = false;
if (device_info->target_id)
major_info->index = 1;
else
major_info->index = 0;
return 0;
}
return -ENODEV;
}
static int storvsc_ringbuffer_size = STORVSC_RING_BUFFER_SIZE;
module_param(storvsc_ringbuffer_size, int, S_IRUGO);