diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index 1a696f76b616..0bb99bb38809 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c @@ -624,17 +624,6 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv, */ BUG_ON(iw_event->status); - /* - * We could be destroying the listening id. If so, ignore this - * upcall. - */ - spin_lock_irqsave(&listen_id_priv->lock, flags); - if (listen_id_priv->state != IW_CM_STATE_LISTEN) { - spin_unlock_irqrestore(&listen_id_priv->lock, flags); - goto out; - } - spin_unlock_irqrestore(&listen_id_priv->lock, flags); - cm_id = iw_create_cm_id(listen_id_priv->id.device, listen_id_priv->id.cm_handler, listen_id_priv->id.context); @@ -649,6 +638,19 @@ static void cm_conn_req_handler(struct iwcm_id_private *listen_id_priv, cm_id_priv = container_of(cm_id, struct iwcm_id_private, id); cm_id_priv->state = IW_CM_STATE_CONN_RECV; + /* + * We could be destroying the listening id. If so, ignore this + * upcall. + */ + spin_lock_irqsave(&listen_id_priv->lock, flags); + if (listen_id_priv->state != IW_CM_STATE_LISTEN) { + spin_unlock_irqrestore(&listen_id_priv->lock, flags); + iw_cm_reject(cm_id, NULL, 0); + iw_destroy_cm_id(cm_id); + goto out; + } + spin_unlock_irqrestore(&listen_id_priv->lock, flags); + ret = alloc_work_entries(cm_id_priv, 3); if (ret) { iw_cm_reject(cm_id, NULL, 0); diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 2fe428bba54c..426bb7617ec6 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -1842,6 +1842,24 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, } } +static bool generate_unmatched_resp(struct ib_mad_private *recv, + struct ib_mad_private *response) +{ + if (recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_GET || + recv->mad.mad.mad_hdr.method == IB_MGMT_METHOD_SET) { + memcpy(response, recv, sizeof *response); + response->header.recv_wc.wc = &response->header.wc; + response->header.recv_wc.recv_buf.mad = &response->mad.mad; + response->header.recv_wc.recv_buf.grh = &response->grh; + response->mad.mad.mad_hdr.method = IB_MGMT_METHOD_GET_RESP; + response->mad.mad.mad_hdr.status = + cpu_to_be16(IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB); + + return true; + } else { + return false; + } +} static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv, struct ib_wc *wc) { @@ -1963,6 +1981,9 @@ local: * or via recv_handler in ib_mad_complete_recv() */ recv = NULL; + } else if (generate_unmatched_resp(recv, response)) { + agent_send_response(&response->mad.mad, &recv->grh, wc, + port_priv->device, port_num, qp_info->qp->qp_num); } out: diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index c61bca30fd2d..83b720ef6c34 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -179,34 +179,37 @@ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused, { struct ib_port_attr attr; char *speed = ""; - int rate; + int rate = -1; /* in deci-Gb/sec */ ssize_t ret; ret = ib_query_port(p->ibdev, p->port_num, &attr); if (ret) return ret; - rate = (25 * attr.active_speed) / 10; - switch (attr.active_speed) { - case 2: - speed = " DDR"; - break; - case 4: - speed = " QDR"; - break; - case 8: - speed = " FDR10"; - rate = 10; - break; - case 16: - speed = " FDR"; - rate = 14; - break; - case 32: - speed = " EDR"; + case IB_SPEED_SDR: rate = 25; break; + case IB_SPEED_DDR: + speed = " DDR"; + rate = 50; + break; + case IB_SPEED_QDR: + speed = " QDR"; + rate = 100; + break; + case IB_SPEED_FDR10: + speed = " FDR10"; + rate = 100; + break; + case IB_SPEED_FDR: + speed = " FDR"; + rate = 140; + break; + case IB_SPEED_EDR: + speed = " EDR"; + rate = 250; + break; } rate *= ib_width_enum_to_int(attr.active_width); @@ -214,7 +217,7 @@ static ssize_t rate_show(struct ib_port *p, struct port_attribute *unused, return -EINVAL; return sprintf(buf, "%d%s Gb/sec (%dX%s)\n", - rate, (attr.active_speed == 1) ? ".5" : "", + rate / 10, rate % 10 ? ".5" : "", ib_width_enum_to_int(attr.active_width), speed); } diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 5034a87cc72d..5861cdb22b7c 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -449,24 +449,6 @@ static void ucma_cleanup_multicast(struct ucma_context *ctx) mutex_unlock(&mut); } -static void ucma_cleanup_events(struct ucma_context *ctx) -{ - struct ucma_event *uevent, *tmp; - - list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) { - if (uevent->ctx != ctx) - continue; - - list_del(&uevent->list); - - /* clear incoming connections. */ - if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) - rdma_destroy_id(uevent->cm_id); - - kfree(uevent); - } -} - static void ucma_cleanup_mc_events(struct ucma_multicast *mc) { struct ucma_event *uevent, *tmp; @@ -480,9 +462,16 @@ static void ucma_cleanup_mc_events(struct ucma_multicast *mc) } } +/* + * We cannot hold file->mut when calling rdma_destroy_id() or we can + * deadlock. We also acquire file->mut in ucma_event_handler(), and + * rdma_destroy_id() will wait until all callbacks have completed. + */ static int ucma_free_ctx(struct ucma_context *ctx) { int events_reported; + struct ucma_event *uevent, *tmp; + LIST_HEAD(list); /* No new events will be generated after destroying the id. */ rdma_destroy_id(ctx->cm_id); @@ -491,10 +480,20 @@ static int ucma_free_ctx(struct ucma_context *ctx) /* Cleanup events not yet reported to the user. */ mutex_lock(&ctx->file->mut); - ucma_cleanup_events(ctx); + list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) { + if (uevent->ctx == ctx) + list_move_tail(&uevent->list, &list); + } list_del(&ctx->list); mutex_unlock(&ctx->file->mut); + list_for_each_entry_safe(uevent, tmp, &list, list) { + list_del(&uevent->list); + if (uevent->resp.event == RDMA_CM_EVENT_CONNECT_REQUEST) + rdma_destroy_id(uevent->cm_id); + kfree(uevent); + } + events_reported = ctx->events_reported; kfree(ctx); return events_reported; diff --git a/drivers/infiniband/hw/amso1100/c2_provider.c b/drivers/infiniband/hw/amso1100/c2_provider.c index 12f923d64e42..07eb3a8067d8 100644 --- a/drivers/infiniband/hw/amso1100/c2_provider.c +++ b/drivers/infiniband/hw/amso1100/c2_provider.c @@ -94,7 +94,7 @@ static int c2_query_port(struct ib_device *ibdev, props->pkey_tbl_len = 1; props->qkey_viol_cntr = 0; props->active_width = 1; - props->active_speed = 1; + props->active_speed = IB_SPEED_SDR; return 0; } diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 37c224fc3ad9..0bdf09aa6f42 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -1227,7 +1227,7 @@ static int iwch_query_port(struct ib_device *ibdev, props->gid_tbl_len = 1; props->pkey_tbl_len = 1; props->active_width = 2; - props->active_speed = 2; + props->active_speed = IB_SPEED_DDR; props->max_msg_sz = -1; return 0; diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index bea5839d89ee..6de8463f453b 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c @@ -803,7 +803,7 @@ int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg) * Assumes qhp lock is held. */ static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp, - struct iwch_cq *schp, unsigned long *flag) + struct iwch_cq *schp) { int count; int flushed; @@ -812,44 +812,44 @@ static void __flush_qp(struct iwch_qp *qhp, struct iwch_cq *rchp, PDBG("%s qhp %p rchp %p schp %p\n", __func__, qhp, rchp, schp); /* take a ref on the qhp since we must release the lock */ atomic_inc(&qhp->refcnt); - spin_unlock_irqrestore(&qhp->lock, *flag); + spin_unlock(&qhp->lock); /* locking hierarchy: cq lock first, then qp lock. */ - spin_lock_irqsave(&rchp->lock, *flag); + spin_lock(&rchp->lock); spin_lock(&qhp->lock); cxio_flush_hw_cq(&rchp->cq); cxio_count_rcqes(&rchp->cq, &qhp->wq, &count); flushed = cxio_flush_rq(&qhp->wq, &rchp->cq, count); spin_unlock(&qhp->lock); - spin_unlock_irqrestore(&rchp->lock, *flag); + spin_unlock(&rchp->lock); if (flushed) { - spin_lock_irqsave(&rchp->comp_handler_lock, *flag); + spin_lock(&rchp->comp_handler_lock); (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); - spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag); + spin_unlock(&rchp->comp_handler_lock); } /* locking hierarchy: cq lock first, then qp lock. */ - spin_lock_irqsave(&schp->lock, *flag); + spin_lock(&schp->lock); spin_lock(&qhp->lock); cxio_flush_hw_cq(&schp->cq); cxio_count_scqes(&schp->cq, &qhp->wq, &count); flushed = cxio_flush_sq(&qhp->wq, &schp->cq, count); spin_unlock(&qhp->lock); - spin_unlock_irqrestore(&schp->lock, *flag); + spin_unlock(&schp->lock); if (flushed) { - spin_lock_irqsave(&schp->comp_handler_lock, *flag); + spin_lock(&schp->comp_handler_lock); (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); - spin_unlock_irqrestore(&schp->comp_handler_lock, *flag); + spin_unlock(&schp->comp_handler_lock); } /* deref */ if (atomic_dec_and_test(&qhp->refcnt)) wake_up(&qhp->wait); - spin_lock_irqsave(&qhp->lock, *flag); + spin_lock(&qhp->lock); } -static void flush_qp(struct iwch_qp *qhp, unsigned long *flag) +static void flush_qp(struct iwch_qp *qhp) { struct iwch_cq *rchp, *schp; @@ -859,19 +859,19 @@ static void flush_qp(struct iwch_qp *qhp, unsigned long *flag) if (qhp->ibqp.uobject) { cxio_set_wq_in_error(&qhp->wq); cxio_set_cq_in_error(&rchp->cq); - spin_lock_irqsave(&rchp->comp_handler_lock, *flag); + spin_lock(&rchp->comp_handler_lock); (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context); - spin_unlock_irqrestore(&rchp->comp_handler_lock, *flag); + spin_unlock(&rchp->comp_handler_lock); if (schp != rchp) { cxio_set_cq_in_error(&schp->cq); - spin_lock_irqsave(&schp->comp_handler_lock, *flag); + spin_lock(&schp->comp_handler_lock); (*schp->ibcq.comp_handler)(&schp->ibcq, schp->ibcq.cq_context); - spin_unlock_irqrestore(&schp->comp_handler_lock, *flag); + spin_unlock(&schp->comp_handler_lock); } return; } - __flush_qp(qhp, rchp, schp, flag); + __flush_qp(qhp, rchp, schp); } @@ -1030,7 +1030,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp, break; case IWCH_QP_STATE_ERROR: qhp->attr.state = IWCH_QP_STATE_ERROR; - flush_qp(qhp, &flag); + flush_qp(qhp); break; default: ret = -EINVAL; @@ -1078,7 +1078,7 @@ int iwch_modify_qp(struct iwch_dev *rhp, struct iwch_qp *qhp, } switch (attrs->next_state) { case IWCH_QP_STATE_IDLE: - flush_qp(qhp, &flag); + flush_qp(qhp); qhp->attr.state = IWCH_QP_STATE_IDLE; qhp->attr.llp_stream_handle = NULL; put_ep(&qhp->ep->com); @@ -1132,7 +1132,7 @@ err: free=1; wake_up(&qhp->wait); BUG_ON(!ep); - flush_qp(qhp, &flag); + flush_qp(qhp); out: spin_unlock_irqrestore(&qhp->lock, flag); diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 0cf61554f176..92b4c2b0308b 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -1114,7 +1114,7 @@ static void process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb) * generated when moving QP to RTS state. * A TERM message will be sent after QP has moved to RTS state */ - if ((ep->mpa_attr.version == 2) && + if ((ep->mpa_attr.version == 2) && peer2peer && (ep->mpa_attr.p2p_type != p2p_type)) { ep->mpa_attr.p2p_type = FW_RI_INIT_P2PTYPE_DISABLED; rtr_mismatch = 1; diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index 247fe706e7fa..be1c18f44400 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -329,7 +329,7 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port, props->gid_tbl_len = 1; props->pkey_tbl_len = 1; props->active_width = 2; - props->active_speed = 2; + props->active_speed = IB_SPEED_DDR; props->max_msg_sz = -1; return 0; diff --git a/drivers/infiniband/hw/ehca/ehca_hca.c b/drivers/infiniband/hw/ehca/ehca_hca.c index 73edc3668663..9ed4d2588304 100644 --- a/drivers/infiniband/hw/ehca/ehca_hca.c +++ b/drivers/infiniband/hw/ehca/ehca_hca.c @@ -233,7 +233,7 @@ int ehca_query_port(struct ib_device *ibdev, props->phys_state = 5; props->state = rblock->state; props->active_width = IB_WIDTH_12X; - props->active_speed = 0x1; + props->active_speed = IB_SPEED_SDR; } query_port1: diff --git a/drivers/infiniband/hw/ehca/ehca_irq.c b/drivers/infiniband/hw/ehca/ehca_irq.c index e571e60ecb88..53589000fd07 100644 --- a/drivers/infiniband/hw/ehca/ehca_irq.c +++ b/drivers/infiniband/hw/ehca/ehca_irq.c @@ -786,7 +786,8 @@ static struct task_struct *create_comp_task(struct ehca_comp_pool *pool, spin_lock_init(&cct->task_lock); INIT_LIST_HEAD(&cct->cq_list); init_waitqueue_head(&cct->wait_queue); - cct->task = kthread_create(comp_task, cct, "ehca_comp/%d", cpu); + cct->task = kthread_create_on_node(comp_task, cct, cpu_to_node(cpu), + "ehca_comp/%d", cpu); return cct->task; } diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c index 43cae84005f0..b781b2cb0624 100644 --- a/drivers/infiniband/hw/ehca/ehca_mrmw.c +++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c @@ -112,7 +112,7 @@ static u32 ehca_encode_hwpage_size(u32 pgsize) static u64 ehca_get_max_hwpage_size(struct ehca_shca *shca) { - return 1UL << ilog2(shca->hca_cap_mr_pgsize); + return rounddown_pow_of_two(shca->hca_cap_mr_pgsize); } static struct ehca_mr *ehca_mr_new(void) diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 5ecf38d97269..77c8cb4c5073 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c @@ -720,7 +720,8 @@ repoll: wc->dlid_path_bits = (g_mlpath_rqpn >> 24) & 0x7f; wc->wc_flags |= g_mlpath_rqpn & 0x80000000 ? IB_WC_GRH : 0; wc->pkey_index = be32_to_cpu(cqe->immed_rss_invalid) & 0x7f; - wc->csum_ok = mlx4_ib_ipoib_csum_ok(cqe->status, cqe->checksum); + wc->wc_flags |= mlx4_ib_ipoib_csum_ok(cqe->status, + cqe->checksum) ? IB_WC_IP_CSUM_OK : 0; if (rdma_port_get_link_layer(wc->qp->device, (*cur_qp)->port) == IB_LINK_LAYER_ETHERNET) wc->sl = be16_to_cpu(cqe->sl_vid) >> 13; @@ -747,8 +748,7 @@ int mlx4_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) break; } - if (npolled) - mlx4_cq_set_ci(&cq->mcq); + mlx4_cq_set_ci(&cq->mcq); spin_unlock_irqrestore(&cq->lock, flags); diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 7b445df6a667..75d305629300 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -163,7 +163,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm; props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * props->max_mcast_grp; - props->max_map_per_fmr = (1 << (32 - ilog2(dev->dev->caps.num_mpts))) - 1; + props->max_map_per_fmr = dev->dev->caps.max_fmr_maps; out: kfree(in_mad); @@ -182,12 +182,27 @@ mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num) } static int ib_link_query_port(struct ib_device *ibdev, u8 port, - struct ib_port_attr *props, - struct ib_smp *in_mad, - struct ib_smp *out_mad) + struct ib_port_attr *props) { + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; int ext_active_speed; - int err; + int err = -ENOMEM; + + in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); + out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; + in_mad->attr_mod = cpu_to_be32(port); + + err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, + in_mad, out_mad); + if (err) + goto out; + props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16)); props->lmc = out_mad->data[34] & 0x7; @@ -215,34 +230,33 @@ static int ib_link_query_port(struct ib_device *ibdev, u8 port, switch (ext_active_speed) { case 1: - props->active_speed = 16; /* FDR */ + props->active_speed = IB_SPEED_FDR; break; case 2: - props->active_speed = 32; /* EDR */ + props->active_speed = IB_SPEED_EDR; break; } } /* If reported active speed is QDR, check if is FDR-10 */ - if (props->active_speed == 4) { - if (to_mdev(ibdev)->dev->caps.ext_port_cap[port] & - MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) { - init_query_mad(in_mad); - in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO; - in_mad->attr_mod = cpu_to_be32(port); + if (props->active_speed == IB_SPEED_QDR) { + init_query_mad(in_mad); + in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO; + in_mad->attr_mod = cpu_to_be32(port); - err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, - NULL, NULL, in_mad, out_mad); - if (err) - return err; + err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, + NULL, NULL, in_mad, out_mad); + if (err) + return err; - /* Checking LinkSpeedActive for FDR-10 */ - if (out_mad->data[15] & 0x1) - props->active_speed = 8; - } + /* Checking LinkSpeedActive for FDR-10 */ + if (out_mad->data[15] & 0x1) + props->active_speed = IB_SPEED_FDR10; } - - return 0; +out: + kfree(in_mad); + kfree(out_mad); + return err; } static u8 state_to_phys_state(enum ib_port_state state) @@ -251,32 +265,42 @@ static u8 state_to_phys_state(enum ib_port_state state) } static int eth_link_query_port(struct ib_device *ibdev, u8 port, - struct ib_port_attr *props, - struct ib_smp *out_mad) + struct ib_port_attr *props) { - struct mlx4_ib_iboe *iboe = &to_mdev(ibdev)->iboe; + + struct mlx4_ib_dev *mdev = to_mdev(ibdev); + struct mlx4_ib_iboe *iboe = &mdev->iboe; struct net_device *ndev; enum ib_mtu tmp; + struct mlx4_cmd_mailbox *mailbox; + int err = 0; - props->active_width = IB_WIDTH_1X; - props->active_speed = 4; + mailbox = mlx4_alloc_cmd_mailbox(mdev->dev); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + err = mlx4_cmd_box(mdev->dev, 0, mailbox->dma, port, 0, + MLX4_CMD_QUERY_PORT, MLX4_CMD_TIME_CLASS_B, + MLX4_CMD_WRAPPED); + if (err) + goto out; + + props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ? + IB_WIDTH_4X : IB_WIDTH_1X; + props->active_speed = IB_SPEED_QDR; props->port_cap_flags = IB_PORT_CM_SUP; - props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port]; - props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz; + props->gid_tbl_len = mdev->dev->caps.gid_table_len[port]; + props->max_msg_sz = mdev->dev->caps.max_msg_sz; props->pkey_tbl_len = 1; - props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46)); - props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48)); props->max_mtu = IB_MTU_4096; - props->subnet_timeout = 0; - props->max_vl_num = out_mad->data[37] >> 4; - props->init_type_reply = 0; + props->max_vl_num = 2; props->state = IB_PORT_DOWN; props->phys_state = state_to_phys_state(props->state); props->active_mtu = IB_MTU_256; spin_lock(&iboe->lock); ndev = iboe->netdevs[port - 1]; if (!ndev) - goto out; + goto out_unlock; tmp = iboe_get_mtu(ndev->mtu); props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256; @@ -284,41 +308,23 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port, props->state = (netif_running(ndev) && netif_carrier_ok(ndev)) ? IB_PORT_ACTIVE : IB_PORT_DOWN; props->phys_state = state_to_phys_state(props->state); - -out: +out_unlock: spin_unlock(&iboe->lock); - return 0; +out: + mlx4_free_cmd_mailbox(mdev->dev, mailbox); + return err; } static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props) { - struct ib_smp *in_mad = NULL; - struct ib_smp *out_mad = NULL; - int err = -ENOMEM; - - in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); - out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); - if (!in_mad || !out_mad) - goto out; + int err; memset(props, 0, sizeof *props); - init_query_mad(in_mad); - in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; - in_mad->attr_mod = cpu_to_be32(port); - - err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad); - if (err) - goto out; - err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ? - ib_link_query_port(ibdev, port, props, in_mad, out_mad) : - eth_link_query_port(ibdev, port, props, out_mad); - -out: - kfree(in_mad); - kfree(out_mad); + ib_link_query_port(ibdev, port, props) : + eth_link_query_port(ibdev, port, props); return err; } diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index aa2aefa4236c..3a7848966627 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -1884,6 +1884,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, wmb(); if (wr->opcode < 0 || wr->opcode >= ARRAY_SIZE(mlx4_ib_opcode)) { + *bad_wr = wr; err = -EINVAL; goto out; } diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c index 53157b86a1ba..40ba83338155 100644 --- a/drivers/infiniband/hw/mthca/mthca_cq.c +++ b/drivers/infiniband/hw/mthca/mthca_cq.c @@ -643,7 +643,8 @@ static inline int mthca_poll_one(struct mthca_dev *dev, entry->wc_flags |= cqe->g_mlpath & 0x80 ? IB_WC_GRH : 0; checksum = (be32_to_cpu(cqe->rqpn) >> 24) | ((be32_to_cpu(cqe->my_ee) >> 16) & 0xff00); - entry->csum_ok = (cqe->sl_ipok & 1 && checksum == 0xffff); + entry->wc_flags |= (cqe->sl_ipok & 1 && checksum == 0xffff) ? + IB_WC_IP_CSUM_OK : 0; } entry->status = IB_WC_SUCCESS; diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index c5e4cb2d3223..71edfbbcce1c 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c @@ -338,18 +338,21 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type, case IETF_MPA_V2: { u16 ird_size; u16 ord_size; + u16 rtr_ctrl_ird; + u16 rtr_ctrl_ord; + mpa_v2_frame = (struct ietf_mpa_v2 *)buffer; mpa_hdr_len += IETF_RTR_MSG_SIZE; cm_node->mpa_frame_size -= IETF_RTR_MSG_SIZE; rtr_msg = &mpa_v2_frame->rtr_msg; /* parse rtr message */ - rtr_msg->ctrl_ird = ntohs(rtr_msg->ctrl_ird); - rtr_msg->ctrl_ord = ntohs(rtr_msg->ctrl_ord); - ird_size = rtr_msg->ctrl_ird & IETF_NO_IRD_ORD; - ord_size = rtr_msg->ctrl_ord & IETF_NO_IRD_ORD; + rtr_ctrl_ird = ntohs(rtr_msg->ctrl_ird); + rtr_ctrl_ord = ntohs(rtr_msg->ctrl_ord); + ird_size = rtr_ctrl_ird & IETF_NO_IRD_ORD; + ord_size = rtr_ctrl_ord & IETF_NO_IRD_ORD; - if (!(rtr_msg->ctrl_ird & IETF_PEER_TO_PEER)) { + if (!(rtr_ctrl_ird & IETF_PEER_TO_PEER)) { /* send reset */ return -EINVAL; } @@ -370,9 +373,9 @@ static int parse_mpa(struct nes_cm_node *cm_node, u8 *buffer, u32 *type, } } - if (rtr_msg->ctrl_ord & IETF_RDMA0_READ) { + if (rtr_ctrl_ord & IETF_RDMA0_READ) { cm_node->send_rdma0_op = SEND_RDMA_READ_ZERO; - } else if (rtr_msg->ctrl_ord & IETF_RDMA0_WRITE) { + } else if (rtr_ctrl_ord & IETF_RDMA0_WRITE) { cm_node->send_rdma0_op = SEND_RDMA_WRITE_ZERO; } else { /* Not supported RDMA0 operation */ return -EINVAL; @@ -543,6 +546,8 @@ static void build_mpa_v2(struct nes_cm_node *cm_node, { struct ietf_mpa_v2 *mpa_frame = (struct ietf_mpa_v2 *)start_addr; struct ietf_rtr_msg *rtr_msg = &mpa_frame->rtr_msg; + u16 ctrl_ird; + u16 ctrl_ord; /* initialize the upper 5 bytes of the frame */ build_mpa_v1(cm_node, start_addr, mpa_key); @@ -550,31 +555,31 @@ static void build_mpa_v2(struct nes_cm_node *cm_node, mpa_frame->priv_data_len += htons(IETF_RTR_MSG_SIZE); /* initialize RTR msg */ - rtr_msg->ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ? + ctrl_ird = (cm_node->ird_size > IETF_NO_IRD_ORD) ? IETF_NO_IRD_ORD : cm_node->ird_size; - rtr_msg->ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ? + ctrl_ord = (cm_node->ord_size > IETF_NO_IRD_ORD) ? IETF_NO_IRD_ORD : cm_node->ord_size; - rtr_msg->ctrl_ird |= IETF_PEER_TO_PEER; - rtr_msg->ctrl_ird |= IETF_FLPDU_ZERO_LEN; + ctrl_ird |= IETF_PEER_TO_PEER; + ctrl_ird |= IETF_FLPDU_ZERO_LEN; switch (mpa_key) { case MPA_KEY_REQUEST: - rtr_msg->ctrl_ord |= IETF_RDMA0_WRITE; - rtr_msg->ctrl_ord |= IETF_RDMA0_READ; + ctrl_ord |= IETF_RDMA0_WRITE; + ctrl_ord |= IETF_RDMA0_READ; break; case MPA_KEY_REPLY: switch (cm_node->send_rdma0_op) { case SEND_RDMA_WRITE_ZERO: - rtr_msg->ctrl_ord |= IETF_RDMA0_WRITE; + ctrl_ord |= IETF_RDMA0_WRITE; break; case SEND_RDMA_READ_ZERO: - rtr_msg->ctrl_ord |= IETF_RDMA0_READ; + ctrl_ord |= IETF_RDMA0_READ; break; } } - rtr_msg->ctrl_ird = htons(rtr_msg->ctrl_ird); - rtr_msg->ctrl_ord = htons(rtr_msg->ctrl_ord); + rtr_msg->ctrl_ird = htons(ctrl_ird); + rtr_msg->ctrl_ord = htons(ctrl_ord); } /** diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 0927b5cc65d3..8b8812de4b5c 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -597,7 +597,7 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr props->pkey_tbl_len = 1; props->qkey_viol_cntr = 0; props->active_width = IB_WIDTH_4X; - props->active_speed = 1; + props->active_speed = IB_SPEED_SDR; props->max_msg_sz = 0x80000000; return 0; diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h index b881bdc401f5..6b811e3e8bd1 100644 --- a/drivers/infiniband/hw/qib/qib.h +++ b/drivers/infiniband/hw/qib/qib.h @@ -427,6 +427,14 @@ struct qib_verbs_txreq { /* how often we check for packet activity for "power on hours (in seconds) */ #define ACTIVITY_TIMER 5 +#define MAX_NAME_SIZE 64 +struct qib_msix_entry { + struct msix_entry msix; + void *arg; + char name[MAX_NAME_SIZE]; + cpumask_var_t mask; +}; + /* Below is an opaque struct. Each chip (device) can maintain * private data needed for its operation, but not germane to the * rest of the driver. For convenience, we define another that @@ -1355,7 +1363,7 @@ int qib_pcie_init(struct pci_dev *, const struct pci_device_id *); int qib_pcie_ddinit(struct qib_devdata *, struct pci_dev *, const struct pci_device_id *); void qib_pcie_ddcleanup(struct qib_devdata *); -int qib_pcie_params(struct qib_devdata *, u32, u32 *, struct msix_entry *); +int qib_pcie_params(struct qib_devdata *, u32, u32 *, struct qib_msix_entry *); int qib_reinit_intr(struct qib_devdata *); void qib_enable_intx(struct pci_dev *); void qib_nomsi(struct qib_devdata *); diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index 41e92089e41b..060b96064469 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -541,8 +541,7 @@ struct qib_chip_specific { u32 lastbuf_for_pio; u32 stay_in_freeze; u32 recovery_ports_initted; - struct msix_entry *msix_entries; - void **msix_arg; + struct qib_msix_entry *msix_entries; unsigned long *sendchkenable; unsigned long *sendgrhchk; unsigned long *sendibchk; @@ -639,24 +638,24 @@ static struct { int lsb; int port; /* 0 if not port-specific, else port # */ } irq_table[] = { - { QIB_DRV_NAME, qib_7322intr, -1, 0 }, - { QIB_DRV_NAME " (buf avail)", qib_7322bufavail, + { "", qib_7322intr, -1, 0 }, + { " (buf avail)", qib_7322bufavail, SYM_LSB(IntStatus, SendBufAvail), 0 }, - { QIB_DRV_NAME " (sdma 0)", sdma_intr, + { " (sdma 0)", sdma_intr, SYM_LSB(IntStatus, SDmaInt_0), 1 }, - { QIB_DRV_NAME " (sdma 1)", sdma_intr, + { " (sdma 1)", sdma_intr, SYM_LSB(IntStatus, SDmaInt_1), 2 }, - { QIB_DRV_NAME " (sdmaI 0)", sdma_idle_intr, + { " (sdmaI 0)", sdma_idle_intr, SYM_LSB(IntStatus, SDmaIdleInt_0), 1 }, - { QIB_DRV_NAME " (sdmaI 1)", sdma_idle_intr, + { " (sdmaI 1)", sdma_idle_intr, SYM_LSB(IntStatus, SDmaIdleInt_1), 2 }, - { QIB_DRV_NAME " (sdmaP 0)", sdma_progress_intr, + { " (sdmaP 0)", sdma_progress_intr, SYM_LSB(IntStatus, SDmaProgressInt_0), 1 }, - { QIB_DRV_NAME " (sdmaP 1)", sdma_progress_intr, + { " (sdmaP 1)", sdma_progress_intr, SYM_LSB(IntStatus, SDmaProgressInt_1), 2 }, - { QIB_DRV_NAME " (sdmaC 0)", sdma_cleanup_intr, + { " (sdmaC 0)", sdma_cleanup_intr, SYM_LSB(IntStatus, SDmaCleanupDone_0), 1 }, - { QIB_DRV_NAME " (sdmaC 1)", sdma_cleanup_intr, + { " (sdmaC 1)", sdma_cleanup_intr, SYM_LSB(IntStatus, SDmaCleanupDone_1), 2 }, }; @@ -2567,9 +2566,13 @@ static void qib_7322_nomsix(struct qib_devdata *dd) int i; dd->cspec->num_msix_entries = 0; - for (i = 0; i < n; i++) - free_irq(dd->cspec->msix_entries[i].vector, - dd->cspec->msix_arg[i]); + for (i = 0; i < n; i++) { + irq_set_affinity_hint( + dd->cspec->msix_entries[i].msix.vector, NULL); + free_cpumask_var(dd->cspec->msix_entries[i].mask); + free_irq(dd->cspec->msix_entries[i].msix.vector, + dd->cspec->msix_entries[i].arg); + } qib_nomsix(dd); } /* make sure no MSIx interrupts are left pending */ @@ -2597,7 +2600,6 @@ static void qib_setup_7322_cleanup(struct qib_devdata *dd) kfree(dd->cspec->sendgrhchk); kfree(dd->cspec->sendibchk); kfree(dd->cspec->msix_entries); - kfree(dd->cspec->msix_arg); for (i = 0; i < dd->num_pports; i++) { unsigned long flags; u32 mask = QSFP_GPIO_MOD_PRS_N | @@ -3070,6 +3072,8 @@ static void qib_setup_7322_interrupt(struct qib_devdata *dd, int clearpend) int ret, i, msixnum; u64 redirect[6]; u64 mask; + const struct cpumask *local_mask; + int firstcpu, secondcpu = 0, currrcvcpu = 0; if (!dd->num_pports) return; @@ -3118,13 +3122,28 @@ try_intx: memset(redirect, 0, sizeof redirect); mask = ~0ULL; msixnum = 0; + local_mask = cpumask_of_pcibus(dd->pcidev->bus); + firstcpu = cpumask_first(local_mask); + if (firstcpu >= nr_cpu_ids || + cpumask_weight(local_mask) == num_online_cpus()) { + local_mask = topology_core_cpumask(0); + firstcpu = cpumask_first(local_mask); + } + if (firstcpu < nr_cpu_ids) { + secondcpu = cpumask_next(firstcpu, local_mask); + if (secondcpu >= nr_cpu_ids) + secondcpu = firstcpu; + currrcvcpu = secondcpu; + } for (i = 0; msixnum < dd->cspec->num_msix_entries; i++) { irq_handler_t handler; - const char *name; void *arg; u64 val; int lsb, reg, sh; + dd->cspec->msix_entries[msixnum]. + name[sizeof(dd->cspec->msix_entries[msixnum].name) - 1] + = '\0'; if (i < ARRAY_SIZE(irq_table)) { if (irq_table[i].port) { /* skip if for a non-configured port */ @@ -3135,7 +3154,11 @@ try_intx: arg = dd; lsb = irq_table[i].lsb; handler = irq_table[i].handler; - name = irq_table[i].name; + snprintf(dd->cspec->msix_entries[msixnum].name, + sizeof(dd->cspec->msix_entries[msixnum].name) + - 1, + QIB_DRV_NAME "%d%s", dd->unit, + irq_table[i].name); } else { unsigned ctxt; @@ -3148,23 +3171,28 @@ try_intx: continue; lsb = QIB_I_RCVAVAIL_LSB + ctxt; handler = qib_7322pintr; - name = QIB_DRV_NAME " (kctx)"; + snprintf(dd->cspec->msix_entries[msixnum].name, + sizeof(dd->cspec->msix_entries[msixnum].name) + - 1, + QIB_DRV_NAME "%d (kctx)", dd->unit); } - ret = request_irq(dd->cspec->msix_entries[msixnum].vector, - handler, 0, name, arg); + ret = request_irq( + dd->cspec->msix_entries[msixnum].msix.vector, + handler, 0, dd->cspec->msix_entries[msixnum].name, + arg); if (ret) { /* * Shouldn't happen since the enable said we could * have as many as we are trying to setup here. */ qib_dev_err(dd, "Couldn't setup MSIx " - "interrupt (vec=%d, irq=%d): %d\n", msixnum, - dd->cspec->msix_entries[msixnum].vector, - ret); + "interrupt (vec=%d, irq=%d): %d\n", msixnum, + dd->cspec->msix_entries[msixnum].msix.vector, + ret); qib_7322_nomsix(dd); goto try_intx; } - dd->cspec->msix_arg[msixnum] = arg; + dd->cspec->msix_entries[msixnum].arg = arg; if (lsb >= 0) { reg = lsb / IBA7322_REDIRECT_VEC_PER_REG; sh = (lsb % IBA7322_REDIRECT_VEC_PER_REG) * @@ -3174,6 +3202,25 @@ try_intx: } val = qib_read_kreg64(dd, 2 * msixnum + 1 + (QIB_7322_MsixTable_OFFS / sizeof(u64))); + if (firstcpu < nr_cpu_ids && + zalloc_cpumask_var( + &dd->cspec->msix_entries[msixnum].mask, + GFP_KERNEL)) { + if (handler == qib_7322pintr) { + cpumask_set_cpu(currrcvcpu, + dd->cspec->msix_entries[msixnum].mask); + currrcvcpu = cpumask_next(currrcvcpu, + local_mask); + if (currrcvcpu >= nr_cpu_ids) + currrcvcpu = secondcpu; + } else { + cpumask_set_cpu(firstcpu, + dd->cspec->msix_entries[msixnum].mask); + } + irq_set_affinity_hint( + dd->cspec->msix_entries[msixnum].msix.vector, + dd->cspec->msix_entries[msixnum].mask); + } msixnum++; } /* Initialize the vector mapping */ @@ -3365,7 +3412,7 @@ static int qib_do_7322_reset(struct qib_devdata *dd) if (msix_entries) { /* restore the MSIx vector address and data if saved above */ for (i = 0; i < msix_entries; i++) { - dd->cspec->msix_entries[i].entry = i; + dd->cspec->msix_entries[i].msix.entry = i; if (!msix_vecsave || !msix_vecsave[2 * i]) continue; qib_write_kreg(dd, 2 * i + @@ -6865,15 +6912,13 @@ struct qib_devdata *qib_init_iba7322_funcs(struct pci_dev *pdev, tabsize = actual_cnt; dd->cspec->msix_entries = kmalloc(tabsize * - sizeof(struct msix_entry), GFP_KERNEL); - dd->cspec->msix_arg = kmalloc(tabsize * - sizeof(void *), GFP_KERNEL); - if (!dd->cspec->msix_entries || !dd->cspec->msix_arg) { + sizeof(struct qib_msix_entry), GFP_KERNEL); + if (!dd->cspec->msix_entries) { qib_dev_err(dd, "No memory for MSIx table\n"); tabsize = 0; } for (i = 0; i < tabsize; i++) - dd->cspec->msix_entries[i].entry = i; + dd->cspec->msix_entries[i].msix.entry = i; if (qib_pcie_params(dd, 8, &tabsize, dd->cspec->msix_entries)) qib_dev_err(dd, "Failed to setup PCIe or interrupts; " diff --git a/drivers/infiniband/hw/qib/qib_mad.c b/drivers/infiniband/hw/qib/qib_mad.c index 3b3745f261f0..c4ff788823b5 100644 --- a/drivers/infiniband/hw/qib/qib_mad.c +++ b/drivers/infiniband/hw/qib/qib_mad.c @@ -433,7 +433,6 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev, struct qib_pportdata *ppd; struct qib_ibport *ibp; struct ib_port_info *pip = (struct ib_port_info *)smp->data; - u16 lid; u8 mtu; int ret; u32 state; @@ -469,8 +468,7 @@ static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev, ibp->mkeyprot == 1)) pip->mkey = ibp->mkey; pip->gid_prefix = ibp->gid_prefix; - lid = ppd->lid; - pip->lid = lid ? cpu_to_be16(lid) : IB_LID_PERMISSIVE; + pip->lid = cpu_to_be16(ppd->lid); pip->sm_lid = cpu_to_be16(ibp->sm_lid); pip->cap_mask = cpu_to_be32(ibp->port_cap_flags); /* pip->diag_code; */ diff --git a/drivers/infiniband/hw/qib/qib_pcie.c b/drivers/infiniband/hw/qib/qib_pcie.c index 0fde788e1100..790646ef5106 100644 --- a/drivers/infiniband/hw/qib/qib_pcie.c +++ b/drivers/infiniband/hw/qib/qib_pcie.c @@ -194,11 +194,24 @@ void qib_pcie_ddcleanup(struct qib_devdata *dd) } static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt, - struct msix_entry *msix_entry) + struct qib_msix_entry *qib_msix_entry) { int ret; u32 tabsize = 0; u16 msix_flags; + struct msix_entry *msix_entry; + int i; + + /* We can't pass qib_msix_entry array to qib_msix_setup + * so use a dummy msix_entry array and copy the allocated + * irq back to the qib_msix_entry array. */ + msix_entry = kmalloc(*msixcnt * sizeof(*msix_entry), GFP_KERNEL); + if (!msix_entry) { + ret = -ENOMEM; + goto do_intx; + } + for (i = 0; i < *msixcnt; i++) + msix_entry[i] = qib_msix_entry[i].msix; pci_read_config_word(dd->pcidev, pos + PCI_MSIX_FLAGS, &msix_flags); tabsize = 1 + (msix_flags & PCI_MSIX_FLAGS_QSIZE); @@ -209,11 +222,15 @@ static void qib_msix_setup(struct qib_devdata *dd, int pos, u32 *msixcnt, tabsize = ret; ret = pci_enable_msix(dd->pcidev, msix_entry, tabsize); } +do_intx: if (ret) { qib_dev_err(dd, "pci_enable_msix %d vectors failed: %d, " "falling back to INTx\n", tabsize, ret); tabsize = 0; } + for (i = 0; i < tabsize; i++) + qib_msix_entry[i].msix = msix_entry[i]; + kfree(msix_entry); *msixcnt = tabsize; if (ret) @@ -251,7 +268,7 @@ static int qib_msi_setup(struct qib_devdata *dd, int pos) } int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent, - struct msix_entry *entry) + struct qib_msix_entry *entry) { u16 linkstat, speed; int pos = 0, pose, ret = 1; diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index 894afac26f3b..765b4cbaa020 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c @@ -2048,7 +2048,6 @@ send_last: wc.pkey_index = 0; wc.dlid_path_bits = 0; wc.port_num = 0; - wc.csum_ok = 0; /* Signal completion event if the solicited bit is set. */ qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, (ohdr->bth[0] & diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c index 847e7afdfd94..7ce2ac2ed219 100644 --- a/drivers/infiniband/hw/qib/qib_uc.c +++ b/drivers/infiniband/hw/qib/qib_uc.c @@ -422,7 +422,6 @@ last_imm: wc.pkey_index = 0; wc.dlid_path_bits = 0; wc.port_num = 0; - wc.csum_ok = 0; /* Signal completion event if the solicited bit is set. */ qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, (ohdr->bth[0] & diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 4115be54ba3b..5c1bc995e560 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -296,7 +296,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) dev->stats.rx_bytes += skb->len; skb->dev = dev; - if ((dev->features & NETIF_F_RXCSUM) && likely(wc->csum_ok)) + if ((dev->features & NETIF_F_RXCSUM) && + likely(wc->wc_flags & IB_WC_IP_CSUM_OK)) skb->ip_summed = CHECKSUM_UNNECESSARY; napi_gro_receive(&priv->napi, skb); diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 9a43cb07f294..db43b3117168 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -364,6 +364,9 @@ iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, } ib_conn = ep->dd_data; + if (iser_alloc_rx_descriptors(ib_conn)) + return -ENOMEM; + /* binds the iSER connection retrieved from the previously * connected ep_handle to the iSCSI layer connection. exchanges * connection pointers */ @@ -398,19 +401,6 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) iser_conn->ib_conn = NULL; } -static int -iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn) -{ - struct iscsi_conn *conn = cls_conn->dd_data; - int err; - - err = iser_conn_set_full_featured_mode(conn); - if (err) - return err; - - return iscsi_conn_start(cls_conn); -} - static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session) { struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); @@ -724,7 +714,7 @@ static struct iscsi_transport iscsi_iser_transport = { .get_conn_param = iscsi_conn_get_param, .get_ep_param = iscsi_iser_get_ep_param, .get_session_param = iscsi_session_get_param, - .start_conn = iscsi_iser_conn_start, + .start_conn = iscsi_conn_start, .stop_conn = iscsi_iser_conn_stop, /* iscsi host params */ .get_host_param = iscsi_host_get_param, diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index db7ea3704da7..296be431a0e9 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -366,4 +366,5 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task); int iser_initialize_task_headers(struct iscsi_task *task, struct iser_tx_desc *tx_desc); +int iser_alloc_rx_descriptors(struct iser_conn *ib_conn); #endif diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index a607542fc796..a00ccd1ca333 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c @@ -170,7 +170,7 @@ static void iser_create_send_desc(struct iser_conn *ib_conn, } -static int iser_alloc_rx_descriptors(struct iser_conn *ib_conn) +int iser_alloc_rx_descriptors(struct iser_conn *ib_conn) { int i, j; u64 dma_addr; @@ -220,18 +220,6 @@ void iser_free_rx_descriptors(struct iser_conn *ib_conn) struct iser_rx_desc *rx_desc; struct iser_device *device = ib_conn->device; - if (ib_conn->login_buf) { - if (ib_conn->login_req_dma) - ib_dma_unmap_single(device->ib_device, - ib_conn->login_req_dma, - ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE); - if (ib_conn->login_resp_dma) - ib_dma_unmap_single(device->ib_device, - ib_conn->login_resp_dma, - ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE); - kfree(ib_conn->login_buf); - } - if (!ib_conn->rx_descs) return; @@ -242,23 +230,24 @@ void iser_free_rx_descriptors(struct iser_conn *ib_conn) kfree(ib_conn->rx_descs); } -/** - * iser_conn_set_full_featured_mode - (iSER API) - */ -int iser_conn_set_full_featured_mode(struct iscsi_conn *conn) +static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req) { struct iscsi_iser_conn *iser_conn = conn->dd_data; + iser_dbg("req op %x flags %x\n", req->opcode, req->flags); + /* check if this is the last login - going to full feature phase */ + if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE) + return 0; + + /* + * Check that there is one posted recv buffer (for the last login + * response) and no posted send buffers left - they must have been + * consumed during previous login phases. + */ + WARN_ON(iser_conn->ib_conn->post_recv_buf_count != 1); + WARN_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0); + iser_dbg("Initially post: %d\n", ISER_MIN_POSTED_RX); - - /* Check that there is no posted recv or send buffers left - */ - /* they must be consumed during the login phase */ - BUG_ON(iser_conn->ib_conn->post_recv_buf_count != 0); - BUG_ON(atomic_read(&iser_conn->ib_conn->post_send_buf_count) != 0); - - if (iser_alloc_rx_descriptors(iser_conn->ib_conn)) - return -ENOMEM; - /* Initial post receive buffers */ if (iser_post_recvm(iser_conn->ib_conn, ISER_MIN_POSTED_RX)) return -ENOMEM; @@ -438,6 +427,9 @@ int iser_send_control(struct iscsi_conn *conn, err = iser_post_recvl(iser_conn->ib_conn); if (err) goto send_control_error; + err = iser_post_rx_bufs(conn, task->hdr); + if (err) + goto send_control_error; } err = iser_post_send(iser_conn->ib_conn, mdesc); diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index e28877c4ce15..14224ba44fd8 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -274,6 +274,18 @@ static int iser_free_ib_conn_res(struct iser_conn *ib_conn, int can_destroy_id) ib_conn->cma_id = NULL; kfree(ib_conn->page_vec); + if (ib_conn->login_buf) { + if (ib_conn->login_req_dma) + ib_dma_unmap_single(ib_conn->device->ib_device, + ib_conn->login_req_dma, + ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE); + if (ib_conn->login_resp_dma) + ib_dma_unmap_single(ib_conn->device->ib_device, + ib_conn->login_resp_dma, + ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE); + kfree(ib_conn->login_buf); + } + return 0; } diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 0bfa545675b8..bcbf22ee0aa7 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -30,6 +30,8 @@ * SOFTWARE. */ +#define pr_fmt(fmt) PFX fmt + #include #include #include @@ -165,7 +167,7 @@ static void srp_free_iu(struct srp_host *host, struct srp_iu *iu) static void srp_qp_event(struct ib_event *event, void *context) { - printk(KERN_ERR PFX "QP event %d\n", event->event); + pr_debug("QP event %d\n", event->event); } static int srp_init_qp(struct srp_target_port *target, @@ -472,6 +474,21 @@ static void srp_free_req_data(struct srp_target_port *target) } } +/** + * srp_del_scsi_host_attr() - Remove attributes defined in the host template. + * @shost: SCSI host whose attributes to remove from sysfs. + * + * Note: Any attributes defined in the host template and that did not exist + * before invocation of this function will be ignored. + */ +static void srp_del_scsi_host_attr(struct Scsi_Host *shost) +{ + struct device_attribute **attr; + + for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr) + device_remove_file(&shost->shost_dev, *attr); +} + static void srp_remove_work(struct work_struct *work) { struct srp_target_port *target = @@ -484,6 +501,7 @@ static void srp_remove_work(struct work_struct *work) list_del(&target->list); spin_unlock(&target->srp_host->target_lock); + srp_del_scsi_host_attr(target->scsi_host); srp_remove_host(target->scsi_host); scsi_remove_host(target->scsi_host); ib_destroy_cm_id(target->cm_id); @@ -1676,10 +1694,6 @@ static ssize_t show_id_ext(struct device *dev, struct device_attribute *attr, { struct srp_target_port *target = host_to_target(class_to_shost(dev)); - if (target->state == SRP_TARGET_DEAD || - target->state == SRP_TARGET_REMOVED) - return -ENODEV; - return sprintf(buf, "0x%016llx\n", (unsigned long long) be64_to_cpu(target->id_ext)); } @@ -1689,10 +1703,6 @@ static ssize_t show_ioc_guid(struct device *dev, struct device_attribute *attr, { struct srp_target_port *target = host_to_target(class_to_shost(dev)); - if (target->state == SRP_TARGET_DEAD || - target->state == SRP_TARGET_REMOVED) - return -ENODEV; - return sprintf(buf, "0x%016llx\n", (unsigned long long) be64_to_cpu(target->ioc_guid)); } @@ -1702,10 +1712,6 @@ static ssize_t show_service_id(struct device *dev, { struct srp_target_port *target = host_to_target(class_to_shost(dev)); - if (target->state == SRP_TARGET_DEAD || - target->state == SRP_TARGET_REMOVED) - return -ENODEV; - return sprintf(buf, "0x%016llx\n", (unsigned long long) be64_to_cpu(target->service_id)); } @@ -1715,10 +1721,6 @@ static ssize_t show_pkey(struct device *dev, struct device_attribute *attr, { struct srp_target_port *target = host_to_target(class_to_shost(dev)); - if (target->state == SRP_TARGET_DEAD || - target->state == SRP_TARGET_REMOVED) - return -ENODEV; - return sprintf(buf, "0x%04x\n", be16_to_cpu(target->path.pkey)); } @@ -1727,10 +1729,6 @@ static ssize_t show_dgid(struct device *dev, struct device_attribute *attr, { struct srp_target_port *target = host_to_target(class_to_shost(dev)); - if (target->state == SRP_TARGET_DEAD || - target->state == SRP_TARGET_REMOVED) - return -ENODEV; - return sprintf(buf, "%pI6\n", target->path.dgid.raw); } @@ -1739,10 +1737,6 @@ static ssize_t show_orig_dgid(struct device *dev, { struct srp_target_port *target = host_to_target(class_to_shost(dev)); - if (target->state == SRP_TARGET_DEAD || - target->state == SRP_TARGET_REMOVED) - return -ENODEV; - return sprintf(buf, "%pI6\n", target->orig_dgid); } @@ -1751,10 +1745,6 @@ static ssize_t show_req_lim(struct device *dev, { struct srp_target_port *target = host_to_target(class_to_shost(dev)); - if (target->state == SRP_TARGET_DEAD || - target->state == SRP_TARGET_REMOVED) - return -ENODEV; - return sprintf(buf, "%d\n", target->req_lim); } @@ -1763,10 +1753,6 @@ static ssize_t show_zero_req_lim(struct device *dev, { struct srp_target_port *target = host_to_target(class_to_shost(dev)); - if (target->state == SRP_TARGET_DEAD || - target->state == SRP_TARGET_REMOVED) - return -ENODEV; - return sprintf(buf, "%d\n", target->zero_req_lim); } @@ -1989,7 +1975,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) goto out; } if (strlen(p) != 32) { - printk(KERN_WARNING PFX "bad dest GID parameter '%s'\n", p); + pr_warn("bad dest GID parameter '%s'\n", p); kfree(p); goto out; } @@ -2004,7 +1990,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) case SRP_OPT_PKEY: if (match_hex(args, &token)) { - printk(KERN_WARNING PFX "bad P_Key parameter '%s'\n", p); + pr_warn("bad P_Key parameter '%s'\n", p); goto out; } target->path.pkey = cpu_to_be16(token); @@ -2023,7 +2009,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) case SRP_OPT_MAX_SECT: if (match_int(args, &token)) { - printk(KERN_WARNING PFX "bad max sect parameter '%s'\n", p); + pr_warn("bad max sect parameter '%s'\n", p); goto out; } target->scsi_host->max_sectors = token; @@ -2031,7 +2017,8 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) case SRP_OPT_MAX_CMD_PER_LUN: if (match_int(args, &token)) { - printk(KERN_WARNING PFX "bad max cmd_per_lun parameter '%s'\n", p); + pr_warn("bad max cmd_per_lun parameter '%s'\n", + p); goto out; } target->scsi_host->cmd_per_lun = min(token, SRP_CMD_SQ_SIZE); @@ -2039,14 +2026,14 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) case SRP_OPT_IO_CLASS: if (match_hex(args, &token)) { - printk(KERN_WARNING PFX "bad IO class parameter '%s' \n", p); + pr_warn("bad IO class parameter '%s'\n", p); goto out; } if (token != SRP_REV10_IB_IO_CLASS && token != SRP_REV16A_IB_IO_CLASS) { - printk(KERN_WARNING PFX "unknown IO class parameter value" - " %x specified (use %x or %x).\n", - token, SRP_REV10_IB_IO_CLASS, SRP_REV16A_IB_IO_CLASS); + pr_warn("unknown IO class parameter value %x specified (use %x or %x).\n", + token, SRP_REV10_IB_IO_CLASS, + SRP_REV16A_IB_IO_CLASS); goto out; } target->io_class = token; @@ -2064,7 +2051,8 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) case SRP_OPT_CMD_SG_ENTRIES: if (match_int(args, &token) || token < 1 || token > 255) { - printk(KERN_WARNING PFX "bad max cmd_sg_entries parameter '%s'\n", p); + pr_warn("bad max cmd_sg_entries parameter '%s'\n", + p); goto out; } target->cmd_sg_cnt = token; @@ -2072,7 +2060,7 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) case SRP_OPT_ALLOW_EXT_SG: if (match_int(args, &token)) { - printk(KERN_WARNING PFX "bad allow_ext_sg parameter '%s'\n", p); + pr_warn("bad allow_ext_sg parameter '%s'\n", p); goto out; } target->allow_ext_sg = !!token; @@ -2081,15 +2069,16 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) case SRP_OPT_SG_TABLESIZE: if (match_int(args, &token) || token < 1 || token > SCSI_MAX_SG_CHAIN_SEGMENTS) { - printk(KERN_WARNING PFX "bad max sg_tablesize parameter '%s'\n", p); + pr_warn("bad max sg_tablesize parameter '%s'\n", + p); goto out; } target->sg_tablesize = token; break; default: - printk(KERN_WARNING PFX "unknown parameter or missing value " - "'%s' in target creation request\n", p); + pr_warn("unknown parameter or missing value '%s' in target creation request\n", + p); goto out; } } @@ -2100,9 +2089,8 @@ static int srp_parse_options(const char *buf, struct srp_target_port *target) for (i = 0; i < ARRAY_SIZE(srp_opt_tokens); ++i) if ((srp_opt_tokens[i].token & SRP_OPT_ALL) && !(srp_opt_tokens[i].token & opt_mask)) - printk(KERN_WARNING PFX "target creation request is " - "missing parameter '%s'\n", - srp_opt_tokens[i].pattern); + pr_warn("target creation request is missing parameter '%s'\n", + srp_opt_tokens[i].pattern); out: kfree(options); @@ -2149,7 +2137,7 @@ static ssize_t srp_create_target(struct device *dev, if (!host->srp_dev->fmr_pool && !target->allow_ext_sg && target->cmd_sg_cnt < target->sg_tablesize) { - printk(KERN_WARNING PFX "No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n"); + pr_warn("No FMR pool and no external indirect descriptors, limiting sg_tablesize to cmd_sg_cnt\n"); target->sg_tablesize = target->cmd_sg_cnt; } @@ -2309,8 +2297,7 @@ static void srp_add_one(struct ib_device *device) return; if (ib_query_device(device, dev_attr)) { - printk(KERN_WARNING PFX "Query device failed for %s\n", - device->name); + pr_warn("Query device failed for %s\n", device->name); goto free_attr; } @@ -2429,6 +2416,7 @@ static void srp_remove_one(struct ib_device *device) list_for_each_entry_safe(target, tmp_target, &host->target_list, list) { + srp_del_scsi_host_attr(target->scsi_host); srp_remove_host(target->scsi_host); scsi_remove_host(target->scsi_host); srp_disconnect_target(target); @@ -2459,7 +2447,7 @@ static int __init srp_init_module(void) BUILD_BUG_ON(FIELD_SIZEOF(struct ib_wc, wr_id) < sizeof(void *)); if (srp_sg_tablesize) { - printk(KERN_WARNING PFX "srp_sg_tablesize is deprecated, please use cmd_sg_entries\n"); + pr_warn("srp_sg_tablesize is deprecated, please use cmd_sg_entries\n"); if (!cmd_sg_entries) cmd_sg_entries = srp_sg_tablesize; } @@ -2468,14 +2456,15 @@ static int __init srp_init_module(void) cmd_sg_entries = SRP_DEF_SG_TABLESIZE; if (cmd_sg_entries > 255) { - printk(KERN_WARNING PFX "Clamping cmd_sg_entries to 255\n"); + pr_warn("Clamping cmd_sg_entries to 255\n"); cmd_sg_entries = 255; } if (!indirect_sg_entries) indirect_sg_entries = cmd_sg_entries; else if (indirect_sg_entries < cmd_sg_entries) { - printk(KERN_WARNING PFX "Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n", cmd_sg_entries); + pr_warn("Bumping up indirect_sg_entries to match cmd_sg_entries (%u)\n", + cmd_sg_entries); indirect_sg_entries = cmd_sg_entries; } @@ -2486,7 +2475,7 @@ static int __init srp_init_module(void) ret = class_register(&srp_class); if (ret) { - printk(KERN_ERR PFX "couldn't register class infiniband_srp\n"); + pr_err("couldn't register class infiniband_srp\n"); srp_release_transport(ib_srp_transport_template); return ret; } @@ -2495,7 +2484,7 @@ static int __init srp_init_module(void) ret = ib_register_client(&srp_client); if (ret) { - printk(KERN_ERR PFX "couldn't register IB client\n"); + pr_err("couldn't register IB client\n"); srp_release_transport(ib_srp_transport_template); ib_sa_unregister_client(&srp_sa_client); class_unregister(&srp_class); diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c index 9129ace02560..3b6f8efbf141 100644 --- a/drivers/net/ethernet/mellanox/mlx4/eq.c +++ b/drivers/net/ethernet/mellanox/mlx4/eq.c @@ -79,7 +79,8 @@ enum { (1ull << MLX4_EVENT_TYPE_SRQ_LIMIT) | \ (1ull << MLX4_EVENT_TYPE_CMD) | \ (1ull << MLX4_EVENT_TYPE_COMM_CHANNEL) | \ - (1ull << MLX4_EVENT_TYPE_FLR_EVENT)) + (1ull << MLX4_EVENT_TYPE_FLR_EVENT) | \ + (1ull << MLX4_EVENT_TYPE_FATAL_WARNING)) static void eq_set_ci(struct mlx4_eq *eq, int req_not) { @@ -443,6 +444,35 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq) queue_work(priv->mfunc.master.comm_wq, &priv->mfunc.master.slave_flr_event_work); break; + + case MLX4_EVENT_TYPE_FATAL_WARNING: + if (eqe->subtype == MLX4_FATAL_WARNING_SUBTYPE_WARMING) { + if (mlx4_is_master(dev)) + for (i = 0; i < dev->num_slaves; i++) { + mlx4_dbg(dev, "%s: Sending " + "MLX4_FATAL_WARNING_SUBTYPE_WARMING" + " to slave: %d\n", __func__, i); + if (i == dev->caps.function) + continue; + mlx4_slave_event(dev, i, eqe); + } + mlx4_err(dev, "Temperature Threshold was reached! " + "Threshold: %d celsius degrees; " + "Current Temperature: %d\n", + be16_to_cpu(eqe->event.warming.warning_threshold), + be16_to_cpu(eqe->event.warming.current_temperature)); + } else + mlx4_warn(dev, "Unhandled event FATAL WARNING (%02x), " + "subtype %02x on EQ %d at index %u. owner=%x, " + "nent=0x%x, slave=%x, ownership=%s\n", + eqe->type, eqe->subtype, eq->eqn, + eq->cons_index, eqe->owner, eq->nent, + eqe->slave_id, + !!(eqe->owner & 0x80) ^ + !!(eq->cons_index & eq->nent) ? "HW" : "SW"); + + break; + case MLX4_EVENT_TYPE_EEC_CATAS_ERROR: case MLX4_EVENT_TYPE_ECC_DETECT: default: diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 5f15014713bc..8bb05b46db86 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -394,7 +394,7 @@ static int mlx4_how_many_lives_vf(struct mlx4_dev *dev) return ret; } -static int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) +int mlx4_is_slave_active(struct mlx4_dev *dev, int slave) { struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_slave_state *s_slave; @@ -646,6 +646,99 @@ out: return err ? err : count; } +enum ibta_mtu { + IB_MTU_256 = 1, + IB_MTU_512 = 2, + IB_MTU_1024 = 3, + IB_MTU_2048 = 4, + IB_MTU_4096 = 5 +}; + +static inline int int_to_ibta_mtu(int mtu) +{ + switch (mtu) { + case 256: return IB_MTU_256; + case 512: return IB_MTU_512; + case 1024: return IB_MTU_1024; + case 2048: return IB_MTU_2048; + case 4096: return IB_MTU_4096; + default: return -1; + } +} + +static inline int ibta_mtu_to_int(enum ibta_mtu mtu) +{ + switch (mtu) { + case IB_MTU_256: return 256; + case IB_MTU_512: return 512; + case IB_MTU_1024: return 1024; + case IB_MTU_2048: return 2048; + case IB_MTU_4096: return 4096; + default: return -1; + } +} + +static ssize_t show_port_ib_mtu(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, + port_mtu_attr); + struct mlx4_dev *mdev = info->dev; + + if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) + mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); + + sprintf(buf, "%d\n", + ibta_mtu_to_int(mdev->caps.port_ib_mtu[info->port])); + return strlen(buf); +} + +static ssize_t set_port_ib_mtu(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info, + port_mtu_attr); + struct mlx4_dev *mdev = info->dev; + struct mlx4_priv *priv = mlx4_priv(mdev); + int err, port, mtu, ibta_mtu = -1; + + if (mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_ETH) { + mlx4_warn(mdev, "port level mtu is only used for IB ports\n"); + return -EINVAL; + } + + err = sscanf(buf, "%d", &mtu); + if (err > 0) + ibta_mtu = int_to_ibta_mtu(mtu); + + if (err <= 0 || ibta_mtu < 0) { + mlx4_err(mdev, "%s is invalid IBTA mtu\n", buf); + return -EINVAL; + } + + mdev->caps.port_ib_mtu[info->port] = ibta_mtu; + + mlx4_stop_sense(mdev); + mutex_lock(&priv->port_mutex); + mlx4_unregister_device(mdev); + for (port = 1; port <= mdev->caps.num_ports; port++) { + mlx4_CLOSE_PORT(mdev, port); + err = mlx4_SET_PORT(mdev, port); + if (err) { + mlx4_err(mdev, "Failed to set port %d, " + "aborting\n", port); + goto err_set_port; + } + } + err = mlx4_register_device(mdev); +err_set_port: + mutex_unlock(&priv->port_mutex); + mlx4_start_sense(mdev); + return err ? err : count; +} + static int mlx4_load_fw(struct mlx4_dev *dev) { struct mlx4_priv *priv = mlx4_priv(dev); @@ -1133,6 +1226,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev) goto err_stop_fw; } + dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; + init_hca.log_uar_sz = ilog2(dev->caps.num_uars); init_hca.uar_page_sz = PAGE_SHIFT - 12; @@ -1363,12 +1458,10 @@ static int mlx4_setup_hca(struct mlx4_dev *dev) "with caps = 0\n", port, err); dev->caps.ib_port_def_cap[port] = ib_port_default_caps; - err = mlx4_check_ext_port_caps(dev, port); - if (err) - mlx4_warn(dev, "failed to get port %d extended " - "port capabilities support info (%d)." - " Assuming not supported\n", - port, err); + if (mlx4_is_mfunc(dev)) + dev->caps.port_ib_mtu[port] = IB_MTU_2048; + else + dev->caps.port_ib_mtu[port] = IB_MTU_4096; err = mlx4_SET_PORT(dev, port); if (err) { @@ -1524,6 +1617,24 @@ static int mlx4_init_port_info(struct mlx4_dev *dev, int port) info->port = -1; } + sprintf(info->dev_mtu_name, "mlx4_port%d_mtu", port); + info->port_mtu_attr.attr.name = info->dev_mtu_name; + if (mlx4_is_mfunc(dev)) + info->port_mtu_attr.attr.mode = S_IRUGO; + else { + info->port_mtu_attr.attr.mode = S_IRUGO | S_IWUSR; + info->port_mtu_attr.store = set_port_ib_mtu; + } + info->port_mtu_attr.show = show_port_ib_mtu; + sysfs_attr_init(&info->port_mtu_attr.attr); + + err = device_create_file(&dev->pdev->dev, &info->port_mtu_attr); + if (err) { + mlx4_err(dev, "Failed to create mtu file for port %d\n", port); + device_remove_file(&info->dev->pdev->dev, &info->port_attr); + info->port = -1; + } + return err; } @@ -1533,6 +1644,7 @@ static void mlx4_cleanup_port_info(struct mlx4_port_info *info) return; device_remove_file(&info->dev->pdev->dev, &info->port_attr); + device_remove_file(&info->dev->pdev->dev, &info->port_mtu_attr); } static int mlx4_init_steering(struct mlx4_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h index 5da51b99dbb8..2a0ff2cc7182 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h +++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h @@ -363,6 +363,10 @@ struct mlx4_eqe { struct { __be32 slave_id; } __packed flr_event; + struct { + __be16 current_temperature; + __be16 warning_threshold; + } __packed warming; } event; u8 slave_id; u8 reserved3[2]; @@ -399,7 +403,7 @@ struct mlx4_profile { int num_cq; int num_mcg; int num_mpt; - int num_mtt; + unsigned num_mtt; }; struct mlx4_fw { @@ -682,6 +686,8 @@ struct mlx4_port_info { char dev_name[16]; struct device_attribute port_attr; enum mlx4_port_type tmp_type; + char dev_mtu_name[16]; + struct device_attribute port_mtu_attr; struct mlx4_mac_table mac_table; struct radix_tree_root mac_tree; struct mlx4_vlan_table vlan_table; @@ -1024,7 +1030,6 @@ int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, struct mlx4_cmd_mailbox *outbox, struct mlx4_cmd_info *cmd); int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps); -int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port); int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave, diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index 32e2b6616f3d..fe2ac8449c19 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -791,6 +791,9 @@ int mlx4_fmr_alloc(struct mlx4_dev *dev, u32 pd, u32 access, int max_pages, u64 mtt_offset; int err = -ENOMEM; + if (max_maps > dev->caps.max_fmr_maps) + return -EINVAL; + if (page_shift < (ffs(dev->caps.page_size_cap) - 1) || page_shift >= 32) return -EINVAL; diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c index 98e776261ead..77535ff18f1b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/port.c +++ b/drivers/net/ethernet/mellanox/mlx4/port.c @@ -590,49 +590,6 @@ int mlx4_get_port_ib_caps(struct mlx4_dev *dev, u8 port, __be32 *caps) return err; } -int mlx4_check_ext_port_caps(struct mlx4_dev *dev, u8 port) -{ - struct mlx4_cmd_mailbox *inmailbox, *outmailbox; - u8 *inbuf, *outbuf; - int err, packet_error; - - inmailbox = mlx4_alloc_cmd_mailbox(dev); - if (IS_ERR(inmailbox)) - return PTR_ERR(inmailbox); - - outmailbox = mlx4_alloc_cmd_mailbox(dev); - if (IS_ERR(outmailbox)) { - mlx4_free_cmd_mailbox(dev, inmailbox); - return PTR_ERR(outmailbox); - } - - inbuf = inmailbox->buf; - outbuf = outmailbox->buf; - memset(inbuf, 0, 256); - memset(outbuf, 0, 256); - inbuf[0] = 1; - inbuf[1] = 1; - inbuf[2] = 1; - inbuf[3] = 1; - - *(__be16 *) (&inbuf[16]) = MLX4_ATTR_EXTENDED_PORT_INFO; - *(__be32 *) (&inbuf[20]) = cpu_to_be32(port); - - err = mlx4_cmd_box(dev, inmailbox->dma, outmailbox->dma, port, 3, - MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C, - MLX4_CMD_NATIVE); - - packet_error = be16_to_cpu(*(__be16 *) (outbuf + 4)); - - dev->caps.ext_port_cap[port] = (!err && !packet_error) ? - MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO - : 0; - - mlx4_free_cmd_mailbox(dev, inmailbox); - mlx4_free_cmd_mailbox(dev, outmailbox); - return err; -} - static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod, u8 op_mod, struct mlx4_cmd_mailbox *inbox) { @@ -766,10 +723,18 @@ int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave, vhcr->op_modifier, inbox); } +/* bit locations for set port command with zero op modifier */ +enum { + MLX4_SET_PORT_VL_CAP = 4, /* bits 7:4 */ + MLX4_SET_PORT_MTU_CAP = 12, /* bits 15:12 */ + MLX4_CHANGE_PORT_VL_CAP = 21, + MLX4_CHANGE_PORT_MTU_CAP = 22, +}; + int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port) { struct mlx4_cmd_mailbox *mailbox; - int err; + int err, vl_cap; if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH) return 0; @@ -781,8 +746,19 @@ int mlx4_SET_PORT(struct mlx4_dev *dev, u8 port) memset(mailbox->buf, 0, 256); ((__be32 *) mailbox->buf)[1] = dev->caps.ib_port_def_cap[port]; - err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT, - MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); + + /* IB VL CAP enum isn't used by the firmware, just numerical values */ + for (vl_cap = 8; vl_cap >= 1; vl_cap >>= 1) { + ((__be32 *) mailbox->buf)[0] = cpu_to_be32( + (1 << MLX4_CHANGE_PORT_MTU_CAP) | + (1 << MLX4_CHANGE_PORT_VL_CAP) | + (dev->caps.port_ib_mtu[port] << MLX4_SET_PORT_MTU_CAP) | + (vl_cap << MLX4_SET_PORT_VL_CAP)); + err = mlx4_cmd(dev, mailbox->dma, port, 0, MLX4_CMD_SET_PORT, + MLX4_CMD_TIME_CLASS_B, MLX4_CMD_WRAPPED); + if (err != -ENOMEM) + break; + } mlx4_free_cmd_mailbox(dev, mailbox); return err; diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c index 1129677daa62..06e5adeb76f7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/profile.c +++ b/drivers/net/ethernet/mellanox/mlx4/profile.c @@ -83,12 +83,31 @@ u64 mlx4_make_profile(struct mlx4_dev *dev, u64 total_size = 0; struct mlx4_resource *profile; struct mlx4_resource tmp; + struct sysinfo si; int i, j; profile = kcalloc(MLX4_RES_NUM, sizeof(*profile), GFP_KERNEL); if (!profile) return -ENOMEM; + /* + * We want to scale the number of MTTs with the size of the + * system memory, since it makes sense to register a lot of + * memory on a system with a lot of memory. As a heuristic, + * make sure we have enough MTTs to cover twice the system + * memory (with PAGE_SIZE entries). + * + * This number has to be a power of two and fit into 32 bits + * due to device limitations, so cap this at 2^31 as well. + * That limits us to 8TB of memory registration per HCA with + * 4KB pages, which is probably OK for the next few months. + */ + si_meminfo(&si); + request->num_mtt = + roundup_pow_of_two(max_t(unsigned, request->num_mtt, + min(1UL << 31, + si.totalram >> (log_mtts_per_seg - 1)))); + profile[MLX4_RES_QP].size = dev_cap->qpc_entry_sz; profile[MLX4_RES_RDMARC].size = dev_cap->rdmarc_entry_sz; profile[MLX4_RES_ALTC].size = dev_cap->altc_entry_sz; diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 44d8144e9ae8..834c96c5d879 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h @@ -100,10 +100,6 @@ enum { #define MLX4_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90) -enum { - MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0 -}; - enum { MLX4_BMME_FLAG_LOCAL_INV = 1 << 6, MLX4_BMME_FLAG_REMOTE_INV = 1 << 7, @@ -133,6 +129,7 @@ enum mlx4_event { MLX4_EVENT_TYPE_CMD = 0x0a, MLX4_EVENT_TYPE_VEP_UPDATE = 0x19, MLX4_EVENT_TYPE_COMM_CHANNEL = 0x18, + MLX4_EVENT_TYPE_FATAL_WARNING = 0x1b, MLX4_EVENT_TYPE_FLR_EVENT = 0x1c, MLX4_EVENT_TYPE_NONE = 0xff, }; @@ -142,6 +139,10 @@ enum { MLX4_PORT_CHANGE_SUBTYPE_ACTIVE = 4 }; +enum { + MLX4_FATAL_WARNING_SUBTYPE_WARMING = 0, +}; + enum { MLX4_PERM_LOCAL_READ = 1 << 10, MLX4_PERM_LOCAL_WRITE = 1 << 11, @@ -273,6 +274,7 @@ struct mlx4_caps { int num_comp_vectors; int comp_pool; int num_mpts; + int max_fmr_maps; int num_mtts; int fmr_reserved_mtts; int reserved_mtts; @@ -308,7 +310,7 @@ struct mlx4_caps { u32 port_mask[MLX4_MAX_PORTS + 1]; enum mlx4_port_type possible_type[MLX4_MAX_PORTS + 1]; u32 max_counters; - u8 ext_port_cap[MLX4_MAX_PORTS + 1]; + u8 port_ib_mtu[MLX4_MAX_PORTS + 1]; }; struct mlx4_buf_list { diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h index d3b9401b77b0..b513f57e1725 100644 --- a/include/rdma/ib_mad.h +++ b/include/rdma/ib_mad.h @@ -77,6 +77,15 @@ #define IB_MGMT_MAX_METHODS 128 +/* MAD Status field bit masks */ +#define IB_MGMT_MAD_STATUS_SUCCESS 0x0000 +#define IB_MGMT_MAD_STATUS_BUSY 0x0001 +#define IB_MGMT_MAD_STATUS_REDIRECT_REQD 0x0002 +#define IB_MGMT_MAD_STATUS_BAD_VERSION 0x0004 +#define IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD 0x0008 +#define IB_MGMT_MAD_STATUS_UNSUPPORTED_METHOD_ATTRIB 0x000c +#define IB_MGMT_MAD_STATUS_INVALID_ATTRIB_VALUE 0x001c + /* RMPP information */ #define IB_MGMT_RMPP_VERSION 1 diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index bf5daafe8ecc..c3cca5a4dacd 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -239,6 +239,15 @@ static inline int ib_width_enum_to_int(enum ib_port_width width) } } +enum ib_port_speed { + IB_SPEED_SDR = 1, + IB_SPEED_DDR = 2, + IB_SPEED_QDR = 4, + IB_SPEED_FDR10 = 8, + IB_SPEED_FDR = 16, + IB_SPEED_EDR = 32 +}; + struct ib_protocol_stats { /* TBD... */ }; @@ -509,6 +518,7 @@ enum ib_wc_flags { IB_WC_GRH = 1, IB_WC_WITH_IMM = (1<<1), IB_WC_WITH_INVALIDATE = (1<<2), + IB_WC_IP_CSUM_OK = (1<<3), }; struct ib_wc { @@ -529,7 +539,6 @@ struct ib_wc { u8 sl; u8 dlid_path_bits; u8 port_num; /* valid only for DR SMPs on switches */ - int csum_ok; }; enum ib_cq_notify_flags {