Merge git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending

Pull SCSI target fixes from Nicholas Bellinger:
 "This series addresses a number of outstanding issues wrt to active I/O
  shutdown using iser-target.  This includes:

   - Fix a long standing tpg_state bug where a tpg could be referenced
     during explicit shutdown (v3.1+ stable)
   - Use list_del_init for iscsi_cmd->i_conn_node so list_empty checks
     work as expected (v3.10+ stable)
   - Fix a isert_conn->state related hung task bug + ensure outstanding
     I/O completes during session shutdown.  (v3.10+ stable)
   - Fix isert_conn->post_send_buf_count accounting for RDMA READ/WRITEs
     (v3.10+ stable)
   - Ignore FRWR completions during active I/O shutdown (v3.12+ stable)
   - Fix command leakage for interrupt coalescing during active I/O
     shutdown (v3.13+ stable)

  Also included is another DIF emulation fix from Sagi specific to
  v3.14-rc code"

* git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending:
  Target/sbc: Fix sbc_copy_prot for offset scatters
  iser-target: Fix command leak for tx_desc->comp_llnode_batch
  iser-target: Ignore completions for FRWRs in isert_cq_tx_work
  iser-target: Fix post_send_buf_count for RDMA READ/WRITE
  iscsi/iser-target: Fix isert_conn->state hung shutdown issues
  iscsi/iser-target: Use list_del_init for ->i_conn_node
  iscsi-target: Fix iscsit_get_tpg_from_np tpg_state bug
This commit is contained in:
Linus Torvalds 2014-03-09 13:50:14 -07:00
commit 66a523db70
7 changed files with 150 additions and 102 deletions

View File

@ -492,12 +492,11 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
isert_conn->state = ISER_CONN_INIT;
INIT_LIST_HEAD(&isert_conn->conn_accept_node);
init_completion(&isert_conn->conn_login_comp);
init_waitqueue_head(&isert_conn->conn_wait);
init_waitqueue_head(&isert_conn->conn_wait_comp_err);
init_completion(&isert_conn->conn_wait);
init_completion(&isert_conn->conn_wait_comp_err);
kref_init(&isert_conn->conn_kref);
kref_get(&isert_conn->conn_kref);
mutex_init(&isert_conn->conn_mutex);
mutex_init(&isert_conn->conn_comp_mutex);
spin_lock_init(&isert_conn->conn_lock);
cma_id->context = isert_conn;
@ -688,11 +687,11 @@ isert_disconnect_work(struct work_struct *work)
pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
mutex_lock(&isert_conn->conn_mutex);
isert_conn->state = ISER_CONN_DOWN;
if (isert_conn->state == ISER_CONN_UP)
isert_conn->state = ISER_CONN_TERMINATING;
if (isert_conn->post_recv_buf_count == 0 &&
atomic_read(&isert_conn->post_send_buf_count) == 0) {
pr_debug("Calling wake_up(&isert_conn->conn_wait);\n");
mutex_unlock(&isert_conn->conn_mutex);
goto wake_up;
}
@ -712,7 +711,7 @@ isert_disconnect_work(struct work_struct *work)
mutex_unlock(&isert_conn->conn_mutex);
wake_up:
wake_up(&isert_conn->conn_wait);
complete(&isert_conn->conn_wait);
isert_put_conn(isert_conn);
}
@ -888,16 +887,17 @@ isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
* Coalesce send completion interrupts by only setting IB_SEND_SIGNALED
* bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
*/
mutex_lock(&isert_conn->conn_comp_mutex);
if (coalesce &&
mutex_lock(&isert_conn->conn_mutex);
if (coalesce && isert_conn->state == ISER_CONN_UP &&
++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
tx_desc->llnode_active = true;
llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
mutex_unlock(&isert_conn->conn_comp_mutex);
mutex_unlock(&isert_conn->conn_mutex);
return;
}
isert_conn->conn_comp_batch = 0;
tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist);
mutex_unlock(&isert_conn->conn_comp_mutex);
mutex_unlock(&isert_conn->conn_mutex);
send_wr->send_flags = IB_SEND_SIGNALED;
}
@ -1464,7 +1464,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
case ISCSI_OP_SCSI_CMD:
spin_lock_bh(&conn->cmd_lock);
if (!list_empty(&cmd->i_conn_node))
list_del(&cmd->i_conn_node);
list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
if (cmd->data_direction == DMA_TO_DEVICE)
@ -1476,7 +1476,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
case ISCSI_OP_SCSI_TMFUNC:
spin_lock_bh(&conn->cmd_lock);
if (!list_empty(&cmd->i_conn_node))
list_del(&cmd->i_conn_node);
list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
transport_generic_free_cmd(&cmd->se_cmd, 0);
@ -1486,7 +1486,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
case ISCSI_OP_TEXT:
spin_lock_bh(&conn->cmd_lock);
if (!list_empty(&cmd->i_conn_node))
list_del(&cmd->i_conn_node);
list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
/*
@ -1549,6 +1549,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
iscsit_stop_dataout_timer(cmd);
device->unreg_rdma_mem(isert_cmd, isert_conn);
cmd->write_data_done = wr->cur_rdma_length;
wr->send_wr_num = 0;
pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
spin_lock_bh(&cmd->istate_lock);
@ -1589,7 +1590,7 @@ isert_do_control_comp(struct work_struct *work)
pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
/*
* Call atomic_dec(&isert_conn->post_send_buf_count)
* from isert_free_conn()
* from isert_wait_conn()
*/
isert_conn->logout_posted = true;
iscsit_logout_post_handler(cmd, cmd->conn);
@ -1613,6 +1614,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
struct ib_device *ib_dev)
{
struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
@ -1624,7 +1626,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
queue_work(isert_comp_wq, &isert_cmd->comp_work);
return;
}
atomic_dec(&isert_conn->post_send_buf_count);
atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
cmd->i_state = ISTATE_SENT_STATUS;
isert_completion_put(tx_desc, isert_cmd, ib_dev);
@ -1662,7 +1664,7 @@ __isert_send_completion(struct iser_tx_desc *tx_desc,
case ISER_IB_RDMA_READ:
pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
atomic_dec(&isert_conn->post_send_buf_count);
atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
isert_completion_rdma_read(tx_desc, isert_cmd);
break;
default:
@ -1691,31 +1693,76 @@ isert_send_completion(struct iser_tx_desc *tx_desc,
}
static void
isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_dev)
{
struct llist_node *llnode;
struct isert_rdma_wr *wr;
struct iser_tx_desc *t;
mutex_lock(&isert_conn->conn_mutex);
llnode = llist_del_all(&isert_conn->conn_comp_llist);
isert_conn->conn_comp_batch = 0;
mutex_unlock(&isert_conn->conn_mutex);
while (llnode) {
t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
llnode = llist_next(llnode);
wr = &t->isert_cmd->rdma_wr;
atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
isert_completion_put(t, t->isert_cmd, ib_dev);
}
}
static void
isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
{
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
struct llist_node *llnode = tx_desc->comp_llnode_batch;
struct isert_rdma_wr *wr;
struct iser_tx_desc *t;
if (tx_desc) {
struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
while (llnode) {
t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
llnode = llist_next(llnode);
wr = &t->isert_cmd->rdma_wr;
if (!isert_cmd)
isert_unmap_tx_desc(tx_desc, ib_dev);
else
isert_completion_put(tx_desc, isert_cmd, ib_dev);
atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
isert_completion_put(t, t->isert_cmd, ib_dev);
}
tx_desc->comp_llnode_batch = NULL;
if (!isert_cmd)
isert_unmap_tx_desc(tx_desc, ib_dev);
else
isert_completion_put(tx_desc, isert_cmd, ib_dev);
}
static void
isert_cq_rx_comp_err(struct isert_conn *isert_conn)
{
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
struct iscsi_conn *conn = isert_conn->conn;
if (isert_conn->post_recv_buf_count)
return;
isert_cq_drain_comp_llist(isert_conn, ib_dev);
if (conn->sess) {
target_sess_cmd_list_set_waiting(conn->sess->se_sess);
target_wait_for_sess_cmds(conn->sess->se_sess);
}
if (isert_conn->post_recv_buf_count == 0 &&
atomic_read(&isert_conn->post_send_buf_count) == 0) {
pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
pr_debug("Calling wake_up from isert_cq_comp_err\n");
while (atomic_read(&isert_conn->post_send_buf_count))
msleep(3000);
mutex_lock(&isert_conn->conn_mutex);
if (isert_conn->state != ISER_CONN_DOWN)
isert_conn->state = ISER_CONN_TERMINATING;
mutex_unlock(&isert_conn->conn_mutex);
mutex_lock(&isert_conn->conn_mutex);
isert_conn->state = ISER_CONN_DOWN;
mutex_unlock(&isert_conn->conn_mutex);
wake_up(&isert_conn->conn_wait_comp_err);
}
complete(&isert_conn->conn_wait_comp_err);
}
static void
@ -1740,8 +1787,14 @@ isert_cq_tx_work(struct work_struct *work)
pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
pr_debug("TX wc.status: 0x%08x\n", wc.status);
pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err);
atomic_dec(&isert_conn->post_send_buf_count);
isert_cq_comp_err(tx_desc, isert_conn);
if (wc.wr_id != ISER_FASTREG_LI_WRID) {
if (tx_desc->llnode_active)
continue;
atomic_dec(&isert_conn->post_send_buf_count);
isert_cq_tx_comp_err(tx_desc, isert_conn);
}
}
}
@ -1784,7 +1837,7 @@ isert_cq_rx_work(struct work_struct *work)
wc.vendor_err);
}
isert_conn->post_recv_buf_count--;
isert_cq_comp_err(NULL, isert_conn);
isert_cq_rx_comp_err(isert_conn);
}
}
@ -2202,6 +2255,7 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
if (!fr_desc->valid) {
memset(&inv_wr, 0, sizeof(inv_wr));
inv_wr.wr_id = ISER_FASTREG_LI_WRID;
inv_wr.opcode = IB_WR_LOCAL_INV;
inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey;
wr = &inv_wr;
@ -2212,6 +2266,7 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
/* Prepare FASTREG WR */
memset(&fr_wr, 0, sizeof(fr_wr));
fr_wr.wr_id = ISER_FASTREG_LI_WRID;
fr_wr.opcode = IB_WR_FAST_REG_MR;
fr_wr.wr.fast_reg.iova_start =
fr_desc->data_frpl->page_list[0] + page_off;
@ -2377,12 +2432,12 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
isert_init_send_wr(isert_conn, isert_cmd,
&isert_cmd->tx_desc.send_wr, true);
atomic_inc(&isert_conn->post_send_buf_count);
atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
if (rc) {
pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
atomic_dec(&isert_conn->post_send_buf_count);
atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
}
pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n",
isert_cmd);
@ -2410,12 +2465,12 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
return rc;
}
atomic_inc(&isert_conn->post_send_buf_count);
atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
if (rc) {
pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
atomic_dec(&isert_conn->post_send_buf_count);
atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
}
pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
isert_cmd);
@ -2702,22 +2757,11 @@ isert_free_np(struct iscsi_np *np)
kfree(isert_np);
}
static int isert_check_state(struct isert_conn *isert_conn, int state)
{
int ret;
mutex_lock(&isert_conn->conn_mutex);
ret = (isert_conn->state == state);
mutex_unlock(&isert_conn->conn_mutex);
return ret;
}
static void isert_free_conn(struct iscsi_conn *conn)
static void isert_wait_conn(struct iscsi_conn *conn)
{
struct isert_conn *isert_conn = conn->context;
pr_debug("isert_free_conn: Starting \n");
pr_debug("isert_wait_conn: Starting \n");
/*
* Decrement post_send_buf_count for special case when called
* from isert_do_control_comp() -> iscsit_logout_post_handler()
@ -2727,38 +2771,29 @@ static void isert_free_conn(struct iscsi_conn *conn)
atomic_dec(&isert_conn->post_send_buf_count);
if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
pr_debug("Calling rdma_disconnect from isert_free_conn\n");
pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
rdma_disconnect(isert_conn->conn_cm_id);
}
/*
* Only wait for conn_wait_comp_err if the isert_conn made it
* into full feature phase..
*/
if (isert_conn->state == ISER_CONN_UP) {
pr_debug("isert_free_conn: Before wait_event comp_err %d\n",
isert_conn->state);
mutex_unlock(&isert_conn->conn_mutex);
wait_event(isert_conn->conn_wait_comp_err,
(isert_check_state(isert_conn, ISER_CONN_TERMINATING)));
wait_event(isert_conn->conn_wait,
(isert_check_state(isert_conn, ISER_CONN_DOWN)));
isert_put_conn(isert_conn);
return;
}
if (isert_conn->state == ISER_CONN_INIT) {
mutex_unlock(&isert_conn->conn_mutex);
isert_put_conn(isert_conn);
return;
}
pr_debug("isert_free_conn: wait_event conn_wait %d\n",
isert_conn->state);
if (isert_conn->state == ISER_CONN_UP)
isert_conn->state = ISER_CONN_TERMINATING;
mutex_unlock(&isert_conn->conn_mutex);
wait_event(isert_conn->conn_wait,
(isert_check_state(isert_conn, ISER_CONN_DOWN)));
wait_for_completion(&isert_conn->conn_wait_comp_err);
wait_for_completion(&isert_conn->conn_wait);
}
static void isert_free_conn(struct iscsi_conn *conn)
{
struct isert_conn *isert_conn = conn->context;
isert_put_conn(isert_conn);
}
@ -2771,6 +2806,7 @@ static struct iscsit_transport iser_target_transport = {
.iscsit_setup_np = isert_setup_np,
.iscsit_accept_np = isert_accept_np,
.iscsit_free_np = isert_free_np,
.iscsit_wait_conn = isert_wait_conn,
.iscsit_free_conn = isert_free_conn,
.iscsit_get_login_rx = isert_get_login_rx,
.iscsit_put_login_tx = isert_put_login_tx,

View File

@ -6,6 +6,7 @@
#define ISERT_RDMA_LISTEN_BACKLOG 10
#define ISCSI_ISER_SG_TABLESIZE 256
#define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL
enum isert_desc_type {
ISCSI_TX_CONTROL,
@ -45,6 +46,7 @@ struct iser_tx_desc {
struct isert_cmd *isert_cmd;
struct llist_node *comp_llnode_batch;
struct llist_node comp_llnode;
bool llnode_active;
struct ib_send_wr send_wr;
} __packed;
@ -116,8 +118,8 @@ struct isert_conn {
struct isert_device *conn_device;
struct work_struct conn_logout_work;
struct mutex conn_mutex;
wait_queue_head_t conn_wait;
wait_queue_head_t conn_wait_comp_err;
struct completion conn_wait;
struct completion conn_wait_comp_err;
struct kref conn_kref;
struct list_head conn_fr_pool;
int conn_fr_pool_size;
@ -126,7 +128,6 @@ struct isert_conn {
#define ISERT_COMP_BATCH_COUNT 8
int conn_comp_batch;
struct llist_head conn_comp_llist;
struct mutex conn_comp_mutex;
};
#define ISERT_MAX_CQ 64

View File

@ -785,7 +785,7 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
spin_unlock_bh(&conn->cmd_lock);
list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) {
list_del(&cmd->i_conn_node);
list_del_init(&cmd->i_conn_node);
iscsit_free_cmd(cmd, false);
}
}
@ -3708,7 +3708,7 @@ iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state
break;
case ISTATE_REMOVE:
spin_lock_bh(&conn->cmd_lock);
list_del(&cmd->i_conn_node);
list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd, false);
@ -4151,7 +4151,7 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
spin_lock_bh(&conn->cmd_lock);
list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
list_del(&cmd->i_conn_node);
list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_increment_maxcmdsn(cmd, sess);
@ -4196,6 +4196,10 @@ int iscsit_close_connection(
iscsit_stop_timers_for_cmds(conn);
iscsit_stop_nopin_response_timer(conn);
iscsit_stop_nopin_timer(conn);
if (conn->conn_transport->iscsit_wait_conn)
conn->conn_transport->iscsit_wait_conn(conn);
iscsit_free_queue_reqs_for_conn(conn);
/*

View File

@ -138,7 +138,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
list_for_each_entry_safe(cmd, cmd_tmp,
&cr->conn_recovery_cmd_list, i_conn_node) {
list_del(&cmd->i_conn_node);
list_del_init(&cmd->i_conn_node);
cmd->conn = NULL;
spin_unlock(&cr->conn_recovery_cmd_lock);
iscsit_free_cmd(cmd, true);
@ -160,7 +160,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
list_for_each_entry_safe(cmd, cmd_tmp,
&cr->conn_recovery_cmd_list, i_conn_node) {
list_del(&cmd->i_conn_node);
list_del_init(&cmd->i_conn_node);
cmd->conn = NULL;
spin_unlock(&cr->conn_recovery_cmd_lock);
iscsit_free_cmd(cmd, true);
@ -216,7 +216,7 @@ int iscsit_remove_cmd_from_connection_recovery(
}
cr = cmd->cr;
list_del(&cmd->i_conn_node);
list_del_init(&cmd->i_conn_node);
return --cr->cmd_count;
}
@ -297,7 +297,7 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
if (!(cmd->cmd_flags & ICF_OOO_CMDSN))
continue;
list_del(&cmd->i_conn_node);
list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd, true);
@ -335,7 +335,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
/*
* Only perform connection recovery on ISCSI_OP_SCSI_CMD or
* ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call
* list_del(&cmd->i_conn_node); to release the command to the
* list_del_init(&cmd->i_conn_node); to release the command to the
* session pool and remove it from the connection's list.
*
* Also stop the DataOUT timer, which will be restarted after
@ -351,7 +351,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
" CID: %hu\n", cmd->iscsi_opcode,
cmd->init_task_tag, cmd->cmd_sn, conn->cid);
list_del(&cmd->i_conn_node);
list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd, true);
spin_lock_bh(&conn->cmd_lock);
@ -371,7 +371,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
*/
if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) {
list_del(&cmd->i_conn_node);
list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd, true);
spin_lock_bh(&conn->cmd_lock);
@ -393,7 +393,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
cmd->sess = conn->sess;
list_del(&cmd->i_conn_node);
list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_all_datain_reqs(cmd);

View File

@ -137,7 +137,7 @@ struct iscsi_portal_group *iscsit_get_tpg_from_np(
list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
spin_lock(&tpg->tpg_state_lock);
if (tpg->tpg_state == TPG_STATE_FREE) {
if (tpg->tpg_state != TPG_STATE_ACTIVE) {
spin_unlock(&tpg->tpg_state_lock);
continue;
}

View File

@ -1079,25 +1079,31 @@ sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
left = sectors * dev->prot_length;
for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
len = min(psg->length, left);
if (offset >= sg->length) {
sg = sg_next(sg);
offset = 0;
}
unsigned int psg_len, copied = 0;
paddr = kmap_atomic(sg_page(psg)) + psg->offset;
addr = kmap_atomic(sg_page(sg)) + sg->offset + offset;
psg_len = min(left, psg->length);
while (psg_len) {
len = min(psg_len, sg->length - offset);
addr = kmap_atomic(sg_page(sg)) + sg->offset + offset;
if (read)
memcpy(paddr, addr, len);
else
memcpy(addr, paddr, len);
if (read)
memcpy(paddr + copied, addr, len);
else
memcpy(addr, paddr + copied, len);
left -= len;
offset += len;
left -= len;
offset += len;
copied += len;
psg_len -= len;
if (offset >= sg->length) {
sg = sg_next(sg);
offset = 0;
}
kunmap_atomic(addr);
}
kunmap_atomic(paddr);
kunmap_atomic(addr);
}
}

View File

@ -12,6 +12,7 @@ struct iscsit_transport {
int (*iscsit_setup_np)(struct iscsi_np *, struct __kernel_sockaddr_storage *);
int (*iscsit_accept_np)(struct iscsi_np *, struct iscsi_conn *);
void (*iscsit_free_np)(struct iscsi_np *);
void (*iscsit_wait_conn)(struct iscsi_conn *);
void (*iscsit_free_conn)(struct iscsi_conn *);
int (*iscsit_get_login_rx)(struct iscsi_conn *, struct iscsi_login *);
int (*iscsit_put_login_tx)(struct iscsi_conn *, struct iscsi_login *, u32);