Fixes for 4.12-rc

- Multiple i40iw, nes, iw_cxgb4, hfi1, qib, mlx4, mlx5 fixes
 - A few upper layer protocol fixes (IPoIB, iSER, SRP)
 - A modest number of core fixes
 -----BEGIN PGP SIGNATURE-----
 
 iQIcBAABAgAGBQJZMIXpAAoJELgmozMOVy/dEHIP/RbIL0lGjH6qOOGUjTYVpFBn
 odS0nVWFl/gkw4mnvRDNm3h/BMk0SdaiyUtnRUMEcRhPrqwq40TpT5Sg59LrUgKe
 JGKB4oir7OYKGh8f6ublDlfFkdZiyGTXW50qp7+cxIu0FXSIREYWRIXIxdDdNhGK
 5+EMCJgT0eUNciRE+RlNV1slrKiMGdKm4N5U2nvgy2u/jk1JIpfhkOXrIPar5Ciq
 4Sk2DQoLu2RiMr8Htd49yrXaxxguGX0KpJwdOUv3xNlO4WejkT7KFEYB82NNdu0P
 NpnQGZXea7manripqRRrMBnaqkQD7lTtDHJBepmr4cCgY6XVTq3CQFWnsMywP60A
 10rHNeGixMH76DdE+kzTKQ2PKlVW4jjW6fk18cZ2GWbH4T9r/OzUnGR3uMJdhgHs
 g+zixnIokXa5/8S1p7Pkaq1datAQC4lb2O20c9bjnLM4jQsXMrEnbNevkvouADqj
 LWT5i1ZTQ5kquom5LmwTG9CcwPH/1E6xLXw4E41seqoZcqYZJakzACU43mJ450cO
 t3Afqz2AWgBa28DAEGy5+YR7Fr5/xof997GTB/eHnzY0E/cSJ+ntrlBLwMMrY9/t
 xzE1lZ/7750F9yjkFsLhrU4Xf3snoCg8NW1z7/aaFrx/y+CIKKjP3iRG+JQiKFnx
 D9tRKJY8iXPuCG+8OzSx
 =X6Rg
 -----END PGP SIGNATURE-----

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma

Pull rdma fixes from Doug Ledford:
 "For the most part this is just a minor -rc cycle for the rdma
  subsystem. Even given that this is all of the -rc patches since the
  merge window closed, it's still only about 25 patches:

   - Multiple i40iw, nes, iw_cxgb4, hfi1, qib, mlx4, mlx5 fixes

   - A few upper layer protocol fixes (IPoIB, iSER, SRP)

   - A modest number of core fixes"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma: (26 commits)
  RDMA/SA: Fix kernel panic in CMA request handler flow
  RDMA/umem: Fix missing mmap_sem in get umem ODP call
  RDMA/core: not to set page dirty bit if it's already set.
  RDMA/uverbs: Declare local function static and add brackets to sizeof
  RDMA/netlink: Reduce exposure of RDMA netlink functions
  RDMA/srp: Fix NULL deref at srp_destroy_qp()
  RDMA/IPoIB: Limit the ipoib_dev_uninit_default scope
  RDMA/IPoIB: Replace netdev_priv with ipoib_priv for ipoib_get_link_ksettings
  RDMA/qedr: add null check before pointer dereference
  RDMA/mlx5: set UMR wqe fence according to HCA cap
  net/mlx5: Define interface bits for fencing UMR wqe
  RDMA/mlx4: Fix MAD tunneling when SRIOV is enabled
  RDMA/qib,hfi1: Fix MR reference count leak on write with immediate
  RDMA/hfi1: Defer setting VL15 credits to link-up interrupt
  RDMA/hfi1: change PCI bar addr assignments to Linux API functions
  RDMA/hfi1: fix array termination by appending NULL to attr array
  RDMA/iw_cxgb4: fix the calculation of ipv6 header size
  RDMA/iw_cxgb4: calculate t4_eq_status_entries properly
  RDMA/iw_cxgb4: Avoid touch after free error in ARP failure handlers
  RDMA/nes: ACK MPA Reply frame
  ...
This commit is contained in:
Linus Torvalds 2017-06-04 10:41:32 -07:00
commit 55cbdaf639
37 changed files with 194 additions and 170 deletions

View File

@ -1429,7 +1429,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
primary_path->packet_life_time = primary_path->packet_life_time =
cm_req_get_primary_local_ack_timeout(req_msg); cm_req_get_primary_local_ack_timeout(req_msg);
primary_path->packet_life_time -= (primary_path->packet_life_time > 0); primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
sa_path_set_service_id(primary_path, req_msg->service_id); primary_path->service_id = req_msg->service_id;
if (req_msg->alt_local_lid) { if (req_msg->alt_local_lid) {
alt_path->dgid = req_msg->alt_local_gid; alt_path->dgid = req_msg->alt_local_gid;
@ -1452,7 +1452,7 @@ static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
alt_path->packet_life_time = alt_path->packet_life_time =
cm_req_get_alt_local_ack_timeout(req_msg); cm_req_get_alt_local_ack_timeout(req_msg);
alt_path->packet_life_time -= (alt_path->packet_life_time > 0); alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
sa_path_set_service_id(alt_path, req_msg->service_id); alt_path->service_id = req_msg->service_id;
} }
} }

View File

@ -1140,7 +1140,7 @@ static void cma_save_ib_info(struct sockaddr *src_addr,
ib->sib_pkey = path->pkey; ib->sib_pkey = path->pkey;
ib->sib_flowinfo = path->flow_label; ib->sib_flowinfo = path->flow_label;
memcpy(&ib->sib_addr, &path->sgid, 16); memcpy(&ib->sib_addr, &path->sgid, 16);
ib->sib_sid = sa_path_get_service_id(path); ib->sib_sid = path->service_id;
ib->sib_scope_id = 0; ib->sib_scope_id = 0;
} else { } else {
ib->sib_pkey = listen_ib->sib_pkey; ib->sib_pkey = listen_ib->sib_pkey;
@ -1274,8 +1274,7 @@ static int cma_save_req_info(const struct ib_cm_event *ib_event,
memcpy(&req->local_gid, &req_param->primary_path->sgid, memcpy(&req->local_gid, &req_param->primary_path->sgid,
sizeof(req->local_gid)); sizeof(req->local_gid));
req->has_gid = true; req->has_gid = true;
req->service_id = req->service_id = req_param->primary_path->service_id;
sa_path_get_service_id(req_param->primary_path);
req->pkey = be16_to_cpu(req_param->primary_path->pkey); req->pkey = be16_to_cpu(req_param->primary_path->pkey);
if (req->pkey != req_param->bth_pkey) if (req->pkey != req_param->bth_pkey)
pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n" pr_warn_ratelimited("RDMA CMA: got different BTH P_Key (0x%x) and primary path P_Key (0x%x)\n"
@ -1827,7 +1826,8 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,
struct rdma_route *rt; struct rdma_route *rt;
const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family;
struct sa_path_rec *path = ib_event->param.req_rcvd.primary_path; struct sa_path_rec *path = ib_event->param.req_rcvd.primary_path;
const __be64 service_id = sa_path_get_service_id(path); const __be64 service_id =
ib_event->param.req_rcvd.primary_path->service_id;
int ret; int ret;
id = rdma_create_id(listen_id->route.addr.dev_addr.net, id = rdma_create_id(listen_id->route.addr.dev_addr.net,
@ -2345,9 +2345,8 @@ static int cma_query_ib_route(struct rdma_id_private *id_priv, int timeout_ms,
path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr)); path_rec.pkey = cpu_to_be16(ib_addr_get_pkey(dev_addr));
path_rec.numb_path = 1; path_rec.numb_path = 1;
path_rec.reversible = 1; path_rec.reversible = 1;
sa_path_set_service_id(&path_rec, path_rec.service_id = rdma_get_service_id(&id_priv->id,
rdma_get_service_id(&id_priv->id, cma_dst_addr(id_priv));
cma_dst_addr(id_priv)));
comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID | comp_mask = IB_SA_PATH_REC_DGID | IB_SA_PATH_REC_SGID |
IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH | IB_SA_PATH_REC_PKEY | IB_SA_PATH_REC_NUMB_PATH |

View File

@ -169,6 +169,16 @@ void ib_mad_cleanup(void);
int ib_sa_init(void); int ib_sa_init(void);
void ib_sa_cleanup(void); void ib_sa_cleanup(void);
int ibnl_init(void);
void ibnl_cleanup(void);
/**
* Check if there are any listeners to the netlink group
* @group: the netlink group ID
* Returns 0 on success or a negative for no listeners.
*/
int ibnl_chk_listeners(unsigned int group);
int ib_nl_handle_resolve_resp(struct sk_buff *skb, int ib_nl_handle_resolve_resp(struct sk_buff *skb,
struct netlink_callback *cb); struct netlink_callback *cb);
int ib_nl_handle_set_timeout(struct sk_buff *skb, int ib_nl_handle_set_timeout(struct sk_buff *skb,

View File

@ -37,6 +37,7 @@
#include <net/net_namespace.h> #include <net/net_namespace.h>
#include <net/sock.h> #include <net/sock.h>
#include <rdma/rdma_netlink.h> #include <rdma/rdma_netlink.h>
#include "core_priv.h"
struct ibnl_client { struct ibnl_client {
struct list_head list; struct list_head list;
@ -55,7 +56,6 @@ int ibnl_chk_listeners(unsigned int group)
return -1; return -1;
return 0; return 0;
} }
EXPORT_SYMBOL(ibnl_chk_listeners);
int ibnl_add_client(int index, int nops, int ibnl_add_client(int index, int nops,
const struct ibnl_client_cbs cb_table[]) const struct ibnl_client_cbs cb_table[])

View File

@ -194,7 +194,7 @@ static u32 tid;
.field_name = "sa_path_rec:" #field .field_name = "sa_path_rec:" #field
static const struct ib_field path_rec_table[] = { static const struct ib_field path_rec_table[] = {
{ PATH_REC_FIELD(ib.service_id), { PATH_REC_FIELD(service_id),
.offset_words = 0, .offset_words = 0,
.offset_bits = 0, .offset_bits = 0,
.size_bits = 64 }, .size_bits = 64 },
@ -296,7 +296,7 @@ static const struct ib_field path_rec_table[] = {
.field_name = "sa_path_rec:" #field .field_name = "sa_path_rec:" #field
static const struct ib_field opa_path_rec_table[] = { static const struct ib_field opa_path_rec_table[] = {
{ OPA_PATH_REC_FIELD(opa.service_id), { OPA_PATH_REC_FIELD(service_id),
.offset_words = 0, .offset_words = 0,
.offset_bits = 0, .offset_bits = 0,
.size_bits = 64 }, .size_bits = 64 },
@ -774,7 +774,7 @@ static void ib_nl_set_path_rec_attrs(struct sk_buff *skb,
/* Now build the attributes */ /* Now build the attributes */
if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) { if (comp_mask & IB_SA_PATH_REC_SERVICE_ID) {
val64 = be64_to_cpu(sa_path_get_service_id(sa_rec)); val64 = be64_to_cpu(sa_rec->service_id);
nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID, nla_put(skb, RDMA_NLA_F_MANDATORY | LS_NLA_TYPE_SERVICE_ID,
sizeof(val64), &val64); sizeof(val64), &val64);
} }

View File

@ -58,7 +58,7 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) { for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) {
page = sg_page(sg); page = sg_page(sg);
if (umem->writable && dirty) if (!PageDirty(page) && umem->writable && dirty)
set_page_dirty_lock(page); set_page_dirty_lock(page);
put_page(page); put_page(page);
} }

View File

@ -321,11 +321,15 @@ int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem,
struct vm_area_struct *vma; struct vm_area_struct *vma;
struct hstate *h; struct hstate *h;
down_read(&mm->mmap_sem);
vma = find_vma(mm, ib_umem_start(umem)); vma = find_vma(mm, ib_umem_start(umem));
if (!vma || !is_vm_hugetlb_page(vma)) if (!vma || !is_vm_hugetlb_page(vma)) {
up_read(&mm->mmap_sem);
return -EINVAL; return -EINVAL;
}
h = hstate_vma(vma); h = hstate_vma(vma);
umem->page_shift = huge_page_shift(h); umem->page_shift = huge_page_shift(h);
up_read(&mm->mmap_sem);
umem->hugetlb = 1; umem->hugetlb = 1;
} else { } else {
umem->hugetlb = 0; umem->hugetlb = 0;

View File

@ -96,11 +96,11 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
} }
EXPORT_SYMBOL(ib_copy_qp_attr_to_user); EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
void __ib_copy_path_rec_to_user(struct ib_user_path_rec *dst, static void __ib_copy_path_rec_to_user(struct ib_user_path_rec *dst,
struct sa_path_rec *src) struct sa_path_rec *src)
{ {
memcpy(dst->dgid, src->dgid.raw, sizeof src->dgid); memcpy(dst->dgid, src->dgid.raw, sizeof(src->dgid));
memcpy(dst->sgid, src->sgid.raw, sizeof src->sgid); memcpy(dst->sgid, src->sgid.raw, sizeof(src->sgid));
dst->dlid = htons(ntohl(sa_path_get_dlid(src))); dst->dlid = htons(ntohl(sa_path_get_dlid(src)));
dst->slid = htons(ntohl(sa_path_get_slid(src))); dst->slid = htons(ntohl(sa_path_get_slid(src)));

View File

@ -488,6 +488,7 @@ static int _put_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
release_ep_resources(ep); release_ep_resources(ep);
kfree_skb(skb);
return 0; return 0;
} }
@ -498,6 +499,7 @@ static int _put_pass_ep_safe(struct c4iw_dev *dev, struct sk_buff *skb)
ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *))); ep = *((struct c4iw_ep **)(skb->cb + 2 * sizeof(void *)));
c4iw_put_ep(&ep->parent_ep->com); c4iw_put_ep(&ep->parent_ep->com);
release_ep_resources(ep); release_ep_resources(ep);
kfree_skb(skb);
return 0; return 0;
} }
@ -569,11 +571,13 @@ static void abort_arp_failure(void *handle, struct sk_buff *skb)
pr_debug("%s rdev %p\n", __func__, rdev); pr_debug("%s rdev %p\n", __func__, rdev);
req->cmd = CPL_ABORT_NO_RST; req->cmd = CPL_ABORT_NO_RST;
skb_get(skb);
ret = c4iw_ofld_send(rdev, skb); ret = c4iw_ofld_send(rdev, skb);
if (ret) { if (ret) {
__state_set(&ep->com, DEAD); __state_set(&ep->com, DEAD);
queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE); queue_arp_failure_cpl(ep, skb, FAKE_CPL_PUT_EP_SAFE);
} } else
kfree_skb(skb);
} }
static int send_flowc(struct c4iw_ep *ep) static int send_flowc(struct c4iw_ep *ep)
@ -2517,7 +2521,8 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
goto reject; goto reject;
} }
hdrs = sizeof(struct iphdr) + sizeof(struct tcphdr) + hdrs = ((iptype == 4) ? sizeof(struct iphdr) : sizeof(struct ipv6hdr)) +
sizeof(struct tcphdr) +
((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0); ((enable_tcp_timestamps && req->tcpopt.tstamp) ? 12 : 0);
if (peer_mss && child_ep->mtu > (peer_mss + hdrs)) if (peer_mss && child_ep->mtu > (peer_mss + hdrs))
child_ep->mtu = peer_mss + hdrs; child_ep->mtu = peer_mss + hdrs;

View File

@ -971,7 +971,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
devp->rdev.lldi.sge_egrstatuspagesize); devp->rdev.lldi.sge_egrstatuspagesize);
devp->rdev.hw_queue.t4_eq_status_entries = devp->rdev.hw_queue.t4_eq_status_entries =
devp->rdev.lldi.sge_ingpadboundary > 64 ? 2 : 1; devp->rdev.lldi.sge_egrstatuspagesize / 64;
devp->rdev.hw_queue.t4_max_eq_size = 65520; devp->rdev.hw_queue.t4_max_eq_size = 65520;
devp->rdev.hw_queue.t4_max_iq_size = 65520; devp->rdev.hw_queue.t4_max_iq_size = 65520;
devp->rdev.hw_queue.t4_max_rq_size = 8192 - devp->rdev.hw_queue.t4_max_rq_size = 8192 -

View File

@ -6312,25 +6312,38 @@ static void handle_8051_request(struct hfi1_pportdata *ppd)
} }
} }
static void write_global_credit(struct hfi1_devdata *dd, /*
u8 vau, u16 total, u16 shared) * Set up allocation unit vaulue.
*/
void set_up_vau(struct hfi1_devdata *dd, u8 vau)
{ {
write_csr(dd, SEND_CM_GLOBAL_CREDIT, u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
((u64)total <<
SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT) | /* do not modify other values in the register */
((u64)shared << reg &= ~SEND_CM_GLOBAL_CREDIT_AU_SMASK;
SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT) | reg |= (u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT;
((u64)vau << SEND_CM_GLOBAL_CREDIT_AU_SHIFT)); write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
} }
/* /*
* Set up initial VL15 credits of the remote. Assumes the rest of * Set up initial VL15 credits of the remote. Assumes the rest of
* the CM credit registers are zero from a previous global or credit reset . * the CM credit registers are zero from a previous global or credit reset.
* Shared limit for VL15 will always be 0.
*/ */
void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf) void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf)
{ {
/* leave shared count at zero for both global and VL15 */ u64 reg = read_csr(dd, SEND_CM_GLOBAL_CREDIT);
write_global_credit(dd, vau, vl15buf, 0);
/* set initial values for total and shared credit limit */
reg &= ~(SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SMASK |
SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SMASK);
/*
* Set total limit to be equal to VL15 credits.
* Leave shared limit at 0.
*/
reg |= (u64)vl15buf << SEND_CM_GLOBAL_CREDIT_TOTAL_CREDIT_LIMIT_SHIFT;
write_csr(dd, SEND_CM_GLOBAL_CREDIT, reg);
write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf write_csr(dd, SEND_CM_CREDIT_VL15, (u64)vl15buf
<< SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT); << SEND_CM_CREDIT_VL15_DEDICATED_LIMIT_VL_SHIFT);
@ -6348,9 +6361,11 @@ void reset_link_credits(struct hfi1_devdata *dd)
for (i = 0; i < TXE_NUM_DATA_VL; i++) for (i = 0; i < TXE_NUM_DATA_VL; i++)
write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0); write_csr(dd, SEND_CM_CREDIT_VL + (8 * i), 0);
write_csr(dd, SEND_CM_CREDIT_VL15, 0); write_csr(dd, SEND_CM_CREDIT_VL15, 0);
write_global_credit(dd, 0, 0, 0); write_csr(dd, SEND_CM_GLOBAL_CREDIT, 0);
/* reset the CM block */ /* reset the CM block */
pio_send_control(dd, PSC_CM_RESET); pio_send_control(dd, PSC_CM_RESET);
/* reset cached value */
dd->vl15buf_cached = 0;
} }
/* convert a vCU to a CU */ /* convert a vCU to a CU */
@ -6839,24 +6854,35 @@ void handle_link_up(struct work_struct *work)
{ {
struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata, struct hfi1_pportdata *ppd = container_of(work, struct hfi1_pportdata,
link_up_work); link_up_work);
struct hfi1_devdata *dd = ppd->dd;
set_link_state(ppd, HLS_UP_INIT); set_link_state(ppd, HLS_UP_INIT);
/* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */ /* cache the read of DC_LCB_STS_ROUND_TRIP_LTP_CNT */
read_ltp_rtt(ppd->dd); read_ltp_rtt(dd);
/* /*
* OPA specifies that certain counters are cleared on a transition * OPA specifies that certain counters are cleared on a transition
* to link up, so do that. * to link up, so do that.
*/ */
clear_linkup_counters(ppd->dd); clear_linkup_counters(dd);
/* /*
* And (re)set link up default values. * And (re)set link up default values.
*/ */
set_linkup_defaults(ppd); set_linkup_defaults(ppd);
/*
* Set VL15 credits. Use cached value from verify cap interrupt.
* In case of quick linkup or simulator, vl15 value will be set by
* handle_linkup_change. VerifyCap interrupt handler will not be
* called in those scenarios.
*/
if (!(quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR))
set_up_vl15(dd, dd->vl15buf_cached);
/* enforce link speed enabled */ /* enforce link speed enabled */
if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) { if ((ppd->link_speed_active & ppd->link_speed_enabled) == 0) {
/* oops - current speed is not enabled, bounce */ /* oops - current speed is not enabled, bounce */
dd_dev_err(ppd->dd, dd_dev_err(dd,
"Link speed active 0x%x is outside enabled 0x%x, downing link\n", "Link speed active 0x%x is outside enabled 0x%x, downing link\n",
ppd->link_speed_active, ppd->link_speed_enabled); ppd->link_speed_active, ppd->link_speed_enabled);
set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0, set_link_down_reason(ppd, OPA_LINKDOWN_REASON_SPEED_POLICY, 0,
@ -7357,7 +7383,14 @@ void handle_verify_cap(struct work_struct *work)
*/ */
if (vau == 0) if (vau == 0)
vau = 1; vau = 1;
set_up_vl15(dd, vau, vl15buf); set_up_vau(dd, vau);
/*
* Set VL15 credits to 0 in global credit register. Cache remote VL15
* credits value and wait for link-up interrupt ot set it.
*/
set_up_vl15(dd, 0);
dd->vl15buf_cached = vl15buf;
/* set up the LCB CRC mode */ /* set up the LCB CRC mode */
crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc; crc_mask = ppd->port_crc_mode_enabled & partner_supported_crc;

View File

@ -839,7 +839,9 @@
#define SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK 0x8ull #define SEND_CM_CTRL_FORCE_CREDIT_MODE_SMASK 0x8ull
#define SEND_CM_CTRL_RESETCSR 0x0000000000000020ull #define SEND_CM_CTRL_RESETCSR 0x0000000000000020ull
#define SEND_CM_GLOBAL_CREDIT (TXE + 0x000000000508) #define SEND_CM_GLOBAL_CREDIT (TXE + 0x000000000508)
#define SEND_CM_GLOBAL_CREDIT_AU_MASK 0x7ull
#define SEND_CM_GLOBAL_CREDIT_AU_SHIFT 16 #define SEND_CM_GLOBAL_CREDIT_AU_SHIFT 16
#define SEND_CM_GLOBAL_CREDIT_AU_SMASK 0x70000ull
#define SEND_CM_GLOBAL_CREDIT_RESETCSR 0x0000094000030000ull #define SEND_CM_GLOBAL_CREDIT_RESETCSR 0x0000094000030000ull
#define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK 0xFFFFull #define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_MASK 0xFFFFull
#define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT 0 #define SEND_CM_GLOBAL_CREDIT_SHARED_LIMIT_SHIFT 0

View File

@ -1045,6 +1045,14 @@ struct hfi1_devdata {
/* initial vl15 credits to use */ /* initial vl15 credits to use */
u16 vl15_init; u16 vl15_init;
/*
* Cached value for vl15buf, read during verify cap interrupt. VL15
* credits are to be kept at 0 and set when handling the link-up
* interrupt. This removes the possibility of receiving VL15 MAD
* packets before this HFI is ready.
*/
u16 vl15buf_cached;
/* Misc small ints */ /* Misc small ints */
u8 n_krcv_queues; u8 n_krcv_queues;
u8 qos_shift; u8 qos_shift;
@ -1598,7 +1606,8 @@ int hfi1_rcvbuf_validate(u32 size, u8 type, u16 *encode);
int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t); int fm_get_table(struct hfi1_pportdata *ppd, int which, void *t);
int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t); int fm_set_table(struct hfi1_pportdata *ppd, int which, void *t);
void set_up_vl15(struct hfi1_devdata *dd, u8 vau, u16 vl15buf); void set_up_vau(struct hfi1_devdata *dd, u8 vau);
void set_up_vl15(struct hfi1_devdata *dd, u16 vl15buf);
void reset_link_credits(struct hfi1_devdata *dd); void reset_link_credits(struct hfi1_devdata *dd);
void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu); void assign_remote_cm_au_table(struct hfi1_devdata *dd, u8 vcu);

View File

@ -130,7 +130,8 @@ void handle_linkup_change(struct hfi1_devdata *dd, u32 linkup)
* the remote values. Both sides must be using the values. * the remote values. Both sides must be using the values.
*/ */
if (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR) { if (quick_linkup || dd->icode == ICODE_FUNCTIONAL_SIMULATOR) {
set_up_vl15(dd, dd->vau, dd->vl15_init); set_up_vau(dd, dd->vau);
set_up_vl15(dd, dd->vl15_init);
assign_remote_cm_au_table(dd, dd->vcu); assign_remote_cm_au_table(dd, dd->vcu);
} }

View File

@ -207,8 +207,8 @@ int hfi1_pcie_ddinit(struct hfi1_devdata *dd, struct pci_dev *pdev)
/* /*
* Save BARs and command to rewrite after device reset. * Save BARs and command to rewrite after device reset.
*/ */
dd->pcibar0 = addr; pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_0, &dd->pcibar0);
dd->pcibar1 = addr >> 32; pci_read_config_dword(dd->pcidev, PCI_BASE_ADDRESS_1, &dd->pcibar1);
pci_read_config_dword(dd->pcidev, PCI_ROM_ADDRESS, &dd->pci_rom); pci_read_config_dword(dd->pcidev, PCI_ROM_ADDRESS, &dd->pci_rom);
pci_read_config_word(dd->pcidev, PCI_COMMAND, &dd->pci_command); pci_read_config_word(dd->pcidev, PCI_COMMAND, &dd->pci_command);
pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &dd->pcie_devctl); pcie_capability_read_word(dd->pcidev, PCI_EXP_DEVCTL, &dd->pcie_devctl);

View File

@ -2159,8 +2159,11 @@ send_last:
ret = hfi1_rvt_get_rwqe(qp, 1); ret = hfi1_rvt_get_rwqe(qp, 1);
if (ret < 0) if (ret < 0)
goto nack_op_err; goto nack_op_err;
if (!ret) if (!ret) {
/* peer will send again */
rvt_put_ss(&qp->r_sge);
goto rnr_nak; goto rnr_nak;
}
wc.ex.imm_data = ohdr->u.rc.imm_data; wc.ex.imm_data = ohdr->u.rc.imm_data;
wc.wc_flags = IB_WC_WITH_IMM; wc.wc_flags = IB_WC_WITH_IMM;
goto send_last; goto send_last;

View File

@ -196,7 +196,8 @@ static const struct sysfs_ops port_cc_sysfs_ops = {
}; };
static struct attribute *port_cc_default_attributes[] = { static struct attribute *port_cc_default_attributes[] = {
&cc_prescan_attr.attr &cc_prescan_attr.attr,
NULL
}; };
static struct kobj_type port_cc_ktype = { static struct kobj_type port_cc_ktype = {

View File

@ -784,7 +784,6 @@ static void i40iw_build_mpa_v2(struct i40iw_cm_node *cm_node,
} }
ctrl_ird |= IETF_PEER_TO_PEER; ctrl_ird |= IETF_PEER_TO_PEER;
ctrl_ird |= IETF_FLPDU_ZERO_LEN;
switch (mpa_key) { switch (mpa_key) {
case MPA_KEY_REQUEST: case MPA_KEY_REQUEST:
@ -2446,8 +2445,8 @@ static void i40iw_handle_rcv_mpa(struct i40iw_cm_node *cm_node,
} else { } else {
type = I40IW_CM_EVENT_CONNECTED; type = I40IW_CM_EVENT_CONNECTED;
cm_node->state = I40IW_CM_STATE_OFFLOADED; cm_node->state = I40IW_CM_STATE_OFFLOADED;
i40iw_send_ack(cm_node);
} }
i40iw_send_ack(cm_node);
break; break;
default: default:
pr_err("%s wrong cm_node state =%d\n", __func__, cm_node->state); pr_err("%s wrong cm_node state =%d\n", __func__, cm_node->state);

View File

@ -285,28 +285,20 @@ void i40iw_change_l2params(struct i40iw_sc_vsi *vsi, struct i40iw_l2params *l2pa
struct i40iw_sc_dev *dev = vsi->dev; struct i40iw_sc_dev *dev = vsi->dev;
struct i40iw_sc_qp *qp = NULL; struct i40iw_sc_qp *qp = NULL;
bool qs_handle_change = false; bool qs_handle_change = false;
bool mss_change = false;
unsigned long flags; unsigned long flags;
u16 qs_handle; u16 qs_handle;
int i; int i;
if (vsi->mss != l2params->mss) { vsi->mss = l2params->mss;
mss_change = true;
vsi->mss = l2params->mss;
}
i40iw_fill_qos_list(l2params->qs_handle_list); i40iw_fill_qos_list(l2params->qs_handle_list);
for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) { for (i = 0; i < I40IW_MAX_USER_PRIORITY; i++) {
qs_handle = l2params->qs_handle_list[i]; qs_handle = l2params->qs_handle_list[i];
if (vsi->qos[i].qs_handle != qs_handle) if (vsi->qos[i].qs_handle != qs_handle)
qs_handle_change = true; qs_handle_change = true;
else if (!mss_change)
continue; /* no MSS nor qs handle change */
spin_lock_irqsave(&vsi->qos[i].lock, flags); spin_lock_irqsave(&vsi->qos[i].lock, flags);
qp = i40iw_get_qp(&vsi->qos[i].qplist, qp); qp = i40iw_get_qp(&vsi->qos[i].qplist, qp);
while (qp) { while (qp) {
if (mss_change)
i40iw_qp_mss_modify(dev, qp);
if (qs_handle_change) { if (qs_handle_change) {
qp->qs_handle = qs_handle; qp->qs_handle = qs_handle;
/* issue cqp suspend command */ /* issue cqp suspend command */
@ -2395,7 +2387,6 @@ static enum i40iw_status_code i40iw_sc_qp_modify(
set_64bit_val(wqe, set_64bit_val(wqe,
8, 8,
LS_64(info->new_mss, I40IW_CQPSQ_QP_NEWMSS) |
LS_64(term_len, I40IW_CQPSQ_QP_TERMLEN)); LS_64(term_len, I40IW_CQPSQ_QP_TERMLEN));
set_64bit_val(wqe, 16, qp->hw_host_ctx_pa); set_64bit_val(wqe, 16, qp->hw_host_ctx_pa);
@ -2410,7 +2401,6 @@ static enum i40iw_status_code i40iw_sc_qp_modify(
LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) | LS_64(info->cq_num_valid, I40IW_CQPSQ_QP_CQNUMVALID) |
LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) | LS_64(info->force_loopback, I40IW_CQPSQ_QP_FORCELOOPBACK) |
LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) | LS_64(qp->qp_type, I40IW_CQPSQ_QP_QPTYPE) |
LS_64(info->mss_change, I40IW_CQPSQ_QP_MSSCHANGE) |
LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) | LS_64(info->static_rsrc, I40IW_CQPSQ_QP_STATRSRC) |
LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) | LS_64(info->remove_hash_idx, I40IW_CQPSQ_QP_REMOVEHASHENTRY) |
LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) | LS_64(term_actions, I40IW_CQPSQ_QP_TERMACT) |

View File

@ -1319,13 +1319,13 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE, status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_QUERY_FPM_BUF_SIZE,
I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK); I40IW_FPM_QUERY_BUF_ALIGNMENT_MASK);
if (status) if (status)
goto exit; goto error;
info.fpm_query_buf_pa = mem.pa; info.fpm_query_buf_pa = mem.pa;
info.fpm_query_buf = mem.va; info.fpm_query_buf = mem.va;
status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE, status = i40iw_obj_aligned_mem(iwdev, &mem, I40IW_COMMIT_FPM_BUF_SIZE,
I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK); I40IW_FPM_COMMIT_BUF_ALIGNMENT_MASK);
if (status) if (status)
goto exit; goto error;
info.fpm_commit_buf_pa = mem.pa; info.fpm_commit_buf_pa = mem.pa;
info.fpm_commit_buf = mem.va; info.fpm_commit_buf = mem.va;
info.hmc_fn_id = ldev->fid; info.hmc_fn_id = ldev->fid;
@ -1347,11 +1347,9 @@ static enum i40iw_status_code i40iw_initialize_dev(struct i40iw_device *iwdev,
info.exception_lan_queue = 1; info.exception_lan_queue = 1;
info.vchnl_send = i40iw_virtchnl_send; info.vchnl_send = i40iw_virtchnl_send;
status = i40iw_device_init(&iwdev->sc_dev, &info); status = i40iw_device_init(&iwdev->sc_dev, &info);
exit:
if (status) { if (status)
kfree(iwdev->hmc_info_mem); goto error;
iwdev->hmc_info_mem = NULL;
}
memset(&vsi_info, 0, sizeof(vsi_info)); memset(&vsi_info, 0, sizeof(vsi_info));
vsi_info.dev = &iwdev->sc_dev; vsi_info.dev = &iwdev->sc_dev;
vsi_info.back_vsi = (void *)iwdev; vsi_info.back_vsi = (void *)iwdev;
@ -1362,11 +1360,19 @@ exit:
memset(&stats_info, 0, sizeof(stats_info)); memset(&stats_info, 0, sizeof(stats_info));
stats_info.fcn_id = ldev->fid; stats_info.fcn_id = ldev->fid;
stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL); stats_info.pestat = kzalloc(sizeof(*stats_info.pestat), GFP_KERNEL);
if (!stats_info.pestat) {
status = I40IW_ERR_NO_MEMORY;
goto error;
}
stats_info.stats_initialize = true; stats_info.stats_initialize = true;
if (stats_info.pestat) if (stats_info.pestat)
i40iw_vsi_stats_init(&iwdev->vsi, &stats_info); i40iw_vsi_stats_init(&iwdev->vsi, &stats_info);
} }
return status; return status;
error:
kfree(iwdev->hmc_info_mem);
iwdev->hmc_info_mem = NULL;
return status;
} }
/** /**

View File

@ -199,7 +199,6 @@ void i40iw_cqp_spawn_worker(struct i40iw_sc_dev *dev,
struct i40iw_virtchnl_work_info *work_info, u32 iw_vf_idx); struct i40iw_virtchnl_work_info *work_info, u32 iw_vf_idx);
void *i40iw_remove_head(struct list_head *list); void *i40iw_remove_head(struct list_head *list);
void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend); void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, bool suspend);
void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp);
void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len); void i40iw_term_modify_qp(struct i40iw_sc_qp *qp, u8 next_state, u8 term, u8 term_len);
void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred); void i40iw_terminate_done(struct i40iw_sc_qp *qp, int timeout_occurred);

View File

@ -541,7 +541,6 @@ struct i40iw_create_qp_info {
struct i40iw_modify_qp_info { struct i40iw_modify_qp_info {
u64 rx_win0; u64 rx_win0;
u64 rx_win1; u64 rx_win1;
u16 new_mss;
u8 next_iwarp_state; u8 next_iwarp_state;
u8 termlen; u8 termlen;
bool ord_valid; bool ord_valid;
@ -554,7 +553,6 @@ struct i40iw_modify_qp_info {
bool dont_send_term; bool dont_send_term;
bool dont_send_fin; bool dont_send_fin;
bool cached_var_valid; bool cached_var_valid;
bool mss_change;
bool force_loopback; bool force_loopback;
}; };

View File

@ -756,23 +756,6 @@ void i40iw_qp_suspend_resume(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp, b
i40iw_pr_err("CQP-OP QP Suspend/Resume fail"); i40iw_pr_err("CQP-OP QP Suspend/Resume fail");
} }
/**
* i40iw_qp_mss_modify - modify mss for qp
* @dev: hardware control device structure
* @qp: hardware control qp
*/
void i40iw_qp_mss_modify(struct i40iw_sc_dev *dev, struct i40iw_sc_qp *qp)
{
struct i40iw_device *iwdev = (struct i40iw_device *)dev->back_dev;
struct i40iw_qp *iwqp = (struct i40iw_qp *)qp->back_qp;
struct i40iw_modify_qp_info info;
memset(&info, 0, sizeof(info));
info.mss_change = true;
info.new_mss = qp->vsi->mss;
i40iw_hw_modify_qp(iwdev, iwqp, &info, false);
}
/** /**
* i40iw_term_modify_qp - modify qp for term message * i40iw_term_modify_qp - modify qp for term message
* @qp: hardware control qp * @qp: hardware control qp

View File

@ -443,10 +443,7 @@ enum i40iw_status_code i40iw_vchnl_recv_pf(struct i40iw_sc_dev *dev,
if (!dev->vchnl_up) if (!dev->vchnl_up)
return I40IW_ERR_NOT_READY; return I40IW_ERR_NOT_READY;
if (vchnl_msg->iw_op_code == I40IW_VCHNL_OP_GET_VER) { if (vchnl_msg->iw_op_code == I40IW_VCHNL_OP_GET_VER) {
if (vchnl_msg->iw_op_ver != I40IW_VCHNL_OP_GET_VER_V0) vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
else
vchnl_pf_send_get_ver_resp(dev, vf_id, vchnl_msg);
return I40IW_SUCCESS; return I40IW_SUCCESS;
} }
for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) { for (iw_vf_idx = 0; iw_vf_idx < I40IW_MAX_PE_ENABLED_VF_COUNT; iw_vf_idx++) {

View File

@ -1578,6 +1578,7 @@ static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc
if (port < 0) if (port < 0)
return; return;
ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff)); ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff));
ah.ibah.type = rdma_ah_find_type(&dev->ib_dev, port);
mlx4_ib_query_ah(&ah.ibah, &ah_attr); mlx4_ib_query_ah(&ah.ibah, &ah_attr);
if (rdma_ah_get_ah_flags(&ah_attr) & IB_AH_GRH) if (rdma_ah_get_ah_flags(&ah_attr) & IB_AH_GRH)

View File

@ -2979,6 +2979,18 @@ error_0:
return ret; return ret;
} }
static u8 mlx5_get_umr_fence(u8 umr_fence_cap)
{
switch (umr_fence_cap) {
case MLX5_CAP_UMR_FENCE_NONE:
return MLX5_FENCE_MODE_NONE;
case MLX5_CAP_UMR_FENCE_SMALL:
return MLX5_FENCE_MODE_INITIATOR_SMALL;
default:
return MLX5_FENCE_MODE_STRONG_ORDERING;
}
}
static int create_dev_resources(struct mlx5_ib_resources *devr) static int create_dev_resources(struct mlx5_ib_resources *devr)
{ {
struct ib_srq_init_attr attr; struct ib_srq_init_attr attr;
@ -3693,6 +3705,8 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
mlx5_ib_internal_fill_odp_caps(dev); mlx5_ib_internal_fill_odp_caps(dev);
dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence));
if (MLX5_CAP_GEN(mdev, imaicl)) { if (MLX5_CAP_GEN(mdev, imaicl)) {
dev->ib_dev.alloc_mw = mlx5_ib_alloc_mw; dev->ib_dev.alloc_mw = mlx5_ib_alloc_mw;
dev->ib_dev.dealloc_mw = mlx5_ib_dealloc_mw; dev->ib_dev.dealloc_mw = mlx5_ib_dealloc_mw;

View File

@ -349,7 +349,7 @@ struct mlx5_ib_qp {
struct mlx5_ib_wq rq; struct mlx5_ib_wq rq;
u8 sq_signal_bits; u8 sq_signal_bits;
u8 fm_cache; u8 next_fence;
struct mlx5_ib_wq sq; struct mlx5_ib_wq sq;
/* serialize qp state modifications /* serialize qp state modifications
@ -654,6 +654,7 @@ struct mlx5_ib_dev {
struct mlx5_ib_port *port; struct mlx5_ib_port *port;
struct mlx5_sq_bfreg bfreg; struct mlx5_sq_bfreg bfreg;
struct mlx5_sq_bfreg fp_bfreg; struct mlx5_sq_bfreg fp_bfreg;
u8 umr_fence;
}; };
static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq) static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)

View File

@ -3738,24 +3738,6 @@ static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
} }
} }
static u8 get_fence(u8 fence, struct ib_send_wr *wr)
{
if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
wr->send_flags & IB_SEND_FENCE))
return MLX5_FENCE_MODE_STRONG_ORDERING;
if (unlikely(fence)) {
if (wr->send_flags & IB_SEND_FENCE)
return MLX5_FENCE_MODE_SMALL_AND_FENCE;
else
return fence;
} else if (unlikely(wr->send_flags & IB_SEND_FENCE)) {
return MLX5_FENCE_MODE_FENCE;
}
return 0;
}
static int begin_wqe(struct mlx5_ib_qp *qp, void **seg, static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
struct mlx5_wqe_ctrl_seg **ctrl, struct mlx5_wqe_ctrl_seg **ctrl,
struct ib_send_wr *wr, unsigned *idx, struct ib_send_wr *wr, unsigned *idx,
@ -3784,8 +3766,7 @@ static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
static void finish_wqe(struct mlx5_ib_qp *qp, static void finish_wqe(struct mlx5_ib_qp *qp,
struct mlx5_wqe_ctrl_seg *ctrl, struct mlx5_wqe_ctrl_seg *ctrl,
u8 size, unsigned idx, u64 wr_id, u8 size, unsigned idx, u64 wr_id,
int nreq, u8 fence, u8 next_fence, int nreq, u8 fence, u32 mlx5_opcode)
u32 mlx5_opcode)
{ {
u8 opmod = 0; u8 opmod = 0;
@ -3793,7 +3774,6 @@ static void finish_wqe(struct mlx5_ib_qp *qp,
mlx5_opcode | ((u32)opmod << 24)); mlx5_opcode | ((u32)opmod << 24));
ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8)); ctrl->qpn_ds = cpu_to_be32(size | (qp->trans_qp.base.mqp.qpn << 8));
ctrl->fm_ce_se |= fence; ctrl->fm_ce_se |= fence;
qp->fm_cache = next_fence;
if (unlikely(qp->wq_sig)) if (unlikely(qp->wq_sig))
ctrl->signature = wq_sig(ctrl); ctrl->signature = wq_sig(ctrl);
@ -3853,7 +3833,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
goto out; goto out;
} }
fence = qp->fm_cache;
num_sge = wr->num_sge; num_sge = wr->num_sge;
if (unlikely(num_sge > qp->sq.max_gs)) { if (unlikely(num_sge > qp->sq.max_gs)) {
mlx5_ib_warn(dev, "\n"); mlx5_ib_warn(dev, "\n");
@ -3870,6 +3849,19 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
goto out; goto out;
} }
if (wr->opcode == IB_WR_LOCAL_INV ||
wr->opcode == IB_WR_REG_MR) {
fence = dev->umr_fence;
next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
} else if (wr->send_flags & IB_SEND_FENCE) {
if (qp->next_fence)
fence = MLX5_FENCE_MODE_SMALL_AND_FENCE;
else
fence = MLX5_FENCE_MODE_FENCE;
} else {
fence = qp->next_fence;
}
switch (ibqp->qp_type) { switch (ibqp->qp_type) {
case IB_QPT_XRC_INI: case IB_QPT_XRC_INI:
xrc = seg; xrc = seg;
@ -3896,7 +3888,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
goto out; goto out;
case IB_WR_LOCAL_INV: case IB_WR_LOCAL_INV:
next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
qp->sq.wr_data[idx] = IB_WR_LOCAL_INV; qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey); ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
set_linv_wr(qp, &seg, &size); set_linv_wr(qp, &seg, &size);
@ -3904,7 +3895,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
break; break;
case IB_WR_REG_MR: case IB_WR_REG_MR:
next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
qp->sq.wr_data[idx] = IB_WR_REG_MR; qp->sq.wr_data[idx] = IB_WR_REG_MR;
ctrl->imm = cpu_to_be32(reg_wr(wr)->key); ctrl->imm = cpu_to_be32(reg_wr(wr)->key);
err = set_reg_wr(qp, reg_wr(wr), &seg, &size); err = set_reg_wr(qp, reg_wr(wr), &seg, &size);
@ -3927,9 +3917,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
goto out; goto out;
} }
finish_wqe(qp, ctrl, size, idx, wr->wr_id, finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
nreq, get_fence(fence, wr), fence, MLX5_OPCODE_UMR);
next_fence, MLX5_OPCODE_UMR);
/* /*
* SET_PSV WQEs are not signaled and solicited * SET_PSV WQEs are not signaled and solicited
* on error * on error
@ -3954,9 +3943,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
goto out; goto out;
} }
finish_wqe(qp, ctrl, size, idx, wr->wr_id, finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
nreq, get_fence(fence, wr), fence, MLX5_OPCODE_SET_PSV);
next_fence, MLX5_OPCODE_SET_PSV);
err = begin_wqe(qp, &seg, &ctrl, wr, err = begin_wqe(qp, &seg, &ctrl, wr,
&idx, &size, nreq); &idx, &size, nreq);
if (err) { if (err) {
@ -3966,7 +3954,6 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
goto out; goto out;
} }
next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire, err = set_psv_wr(&sig_handover_wr(wr)->sig_attrs->wire,
mr->sig->psv_wire.psv_idx, &seg, mr->sig->psv_wire.psv_idx, &seg,
&size); &size);
@ -3976,9 +3963,9 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
goto out; goto out;
} }
finish_wqe(qp, ctrl, size, idx, wr->wr_id, finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
nreq, get_fence(fence, wr), fence, MLX5_OPCODE_SET_PSV);
next_fence, MLX5_OPCODE_SET_PSV); qp->next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
num_sge = 0; num_sge = 0;
goto skip_psv; goto skip_psv;
@ -4089,8 +4076,8 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
} }
} }
finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, qp->next_fence = next_fence;
get_fence(fence, wr), next_fence, finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq, fence,
mlx5_ib_opcode[wr->opcode]); mlx5_ib_opcode[wr->opcode]);
skip_psv: skip_psv:
if (0) if (0)

View File

@ -610,7 +610,6 @@ static void build_mpa_v2(struct nes_cm_node *cm_node,
ctrl_ord = cm_node->ord_size & IETF_NO_IRD_ORD; ctrl_ord = cm_node->ord_size & IETF_NO_IRD_ORD;
} }
ctrl_ird |= IETF_PEER_TO_PEER; ctrl_ird |= IETF_PEER_TO_PEER;
ctrl_ird |= IETF_FLPDU_ZERO_LEN;
switch (mpa_key) { switch (mpa_key) {
case MPA_KEY_REQUEST: case MPA_KEY_REQUEST:
@ -1826,7 +1825,7 @@ static void handle_rcv_mpa(struct nes_cm_node *cm_node, struct sk_buff *skb)
type = NES_CM_EVENT_CONNECTED; type = NES_CM_EVENT_CONNECTED;
cm_node->state = NES_CM_STATE_TSA; cm_node->state = NES_CM_STATE_TSA;
} }
send_ack(cm_node, NULL);
break; break;
default: default:
WARN_ON(1); WARN_ON(1);

View File

@ -270,11 +270,13 @@ static inline int qedr_gsi_build_header(struct qedr_dev *dev,
return rc; return rc;
} }
vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev); if (sgid_attr.ndev) {
if (vlan_id < VLAN_CFI_MASK) vlan_id = rdma_vlan_dev_vlan_id(sgid_attr.ndev);
has_vlan = true; if (vlan_id < VLAN_CFI_MASK)
if (sgid_attr.ndev) has_vlan = true;
dev_put(sgid_attr.ndev); dev_put(sgid_attr.ndev);
}
if (!memcmp(&sgid, &zgid, sizeof(sgid))) { if (!memcmp(&sgid, &zgid, sizeof(sgid))) {
DP_ERR(dev, "gsi post send: GID not found GID index %d\n", DP_ERR(dev, "gsi post send: GID not found GID index %d\n",

View File

@ -1956,8 +1956,10 @@ send_last:
ret = qib_get_rwqe(qp, 1); ret = qib_get_rwqe(qp, 1);
if (ret < 0) if (ret < 0)
goto nack_op_err; goto nack_op_err;
if (!ret) if (!ret) {
rvt_put_ss(&qp->r_sge);
goto rnr_nak; goto rnr_nak;
}
wc.ex.imm_data = ohdr->u.rc.imm_data; wc.ex.imm_data = ohdr->u.rc.imm_data;
hdrsize += 4; hdrsize += 4;
wc.wc_flags = IB_WC_WITH_IMM; wc.wc_flags = IB_WC_WITH_IMM;

View File

@ -178,7 +178,7 @@ static inline int ib_speed_enum_to_int(int speed)
static int ipoib_get_link_ksettings(struct net_device *netdev, static int ipoib_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *cmd) struct ethtool_link_ksettings *cmd)
{ {
struct ipoib_dev_priv *priv = netdev_priv(netdev); struct ipoib_dev_priv *priv = ipoib_priv(netdev);
struct ib_port_attr attr; struct ib_port_attr attr;
int ret, speed, width; int ret, speed, width;

View File

@ -1590,7 +1590,7 @@ static void ipoib_neigh_hash_uninit(struct net_device *dev)
wait_for_completion(&priv->ntbl.deleted); wait_for_completion(&priv->ntbl.deleted);
} }
void ipoib_dev_uninit_default(struct net_device *dev) static void ipoib_dev_uninit_default(struct net_device *dev)
{ {
struct ipoib_dev_priv *priv = ipoib_priv(dev); struct ipoib_dev_priv *priv = ipoib_priv(dev);

View File

@ -320,7 +320,7 @@ static int srp_new_cm_id(struct srp_rdma_ch *ch)
ch->path.sgid = target->sgid; ch->path.sgid = target->sgid;
ch->path.dgid = target->orig_dgid; ch->path.dgid = target->orig_dgid;
ch->path.pkey = target->pkey; ch->path.pkey = target->pkey;
sa_path_set_service_id(&ch->path, target->service_id); ch->path.service_id = target->service_id;
return 0; return 0;
} }
@ -575,7 +575,7 @@ static int srp_create_ch_ib(struct srp_rdma_ch *ch)
return 0; return 0;
err_qp: err_qp:
srp_destroy_qp(ch, qp); ib_destroy_qp(qp);
err_send_cq: err_send_cq:
ib_free_cq(send_cq); ib_free_cq(send_cq);

View File

@ -766,6 +766,12 @@ enum {
MLX5_CAP_PORT_TYPE_ETH = 0x1, MLX5_CAP_PORT_TYPE_ETH = 0x1,
}; };
enum {
MLX5_CAP_UMR_FENCE_STRONG = 0x0,
MLX5_CAP_UMR_FENCE_SMALL = 0x1,
MLX5_CAP_UMR_FENCE_NONE = 0x2,
};
struct mlx5_ifc_cmd_hca_cap_bits { struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_0[0x80]; u8 reserved_at_0[0x80];
@ -875,7 +881,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_202[0x1]; u8 reserved_at_202[0x1];
u8 ipoib_enhanced_offloads[0x1]; u8 ipoib_enhanced_offloads[0x1];
u8 ipoib_basic_offloads[0x1]; u8 ipoib_basic_offloads[0x1];
u8 reserved_at_205[0xa]; u8 reserved_at_205[0x5];
u8 umr_fence[0x2];
u8 reserved_at_20c[0x3];
u8 drain_sigerr[0x1]; u8 drain_sigerr[0x1];
u8 cmdif_checksum[0x2]; u8 cmdif_checksum[0x2];
u8 sigerr_cqe[0x1]; u8 sigerr_cqe[0x1];

View File

@ -158,7 +158,6 @@ enum sa_path_rec_type {
}; };
struct sa_path_rec_ib { struct sa_path_rec_ib {
__be64 service_id;
__be16 dlid; __be16 dlid;
__be16 slid; __be16 slid;
u8 raw_traffic; u8 raw_traffic;
@ -174,7 +173,6 @@ struct sa_path_rec_roce {
}; };
struct sa_path_rec_opa { struct sa_path_rec_opa {
__be64 service_id;
__be32 dlid; __be32 dlid;
__be32 slid; __be32 slid;
u8 raw_traffic; u8 raw_traffic;
@ -189,6 +187,7 @@ struct sa_path_rec_opa {
struct sa_path_rec { struct sa_path_rec {
union ib_gid dgid; union ib_gid dgid;
union ib_gid sgid; union ib_gid sgid;
__be64 service_id;
/* reserved */ /* reserved */
__be32 flow_label; __be32 flow_label;
u8 hop_limit; u8 hop_limit;
@ -262,7 +261,7 @@ static inline void path_conv_opa_to_ib(struct sa_path_rec *ib,
ib->ib.dlid = htons(ntohl(opa->opa.dlid)); ib->ib.dlid = htons(ntohl(opa->opa.dlid));
ib->ib.slid = htons(ntohl(opa->opa.slid)); ib->ib.slid = htons(ntohl(opa->opa.slid));
} }
ib->ib.service_id = opa->opa.service_id; ib->service_id = opa->service_id;
ib->ib.raw_traffic = opa->opa.raw_traffic; ib->ib.raw_traffic = opa->opa.raw_traffic;
} }
@ -281,7 +280,7 @@ static inline void path_conv_ib_to_opa(struct sa_path_rec *opa,
} }
opa->opa.slid = slid; opa->opa.slid = slid;
opa->opa.dlid = dlid; opa->opa.dlid = dlid;
opa->opa.service_id = ib->ib.service_id; opa->service_id = ib->service_id;
opa->opa.raw_traffic = ib->ib.raw_traffic; opa->opa.raw_traffic = ib->ib.raw_traffic;
} }
@ -591,15 +590,6 @@ static inline bool sa_path_is_roce(struct sa_path_rec *rec)
(rec->rec_type == SA_PATH_REC_TYPE_ROCE_V2)); (rec->rec_type == SA_PATH_REC_TYPE_ROCE_V2));
} }
static inline void sa_path_set_service_id(struct sa_path_rec *rec,
__be64 service_id)
{
if (rec->rec_type == SA_PATH_REC_TYPE_IB)
rec->ib.service_id = service_id;
else if (rec->rec_type == SA_PATH_REC_TYPE_OPA)
rec->opa.service_id = service_id;
}
static inline void sa_path_set_slid(struct sa_path_rec *rec, __be32 slid) static inline void sa_path_set_slid(struct sa_path_rec *rec, __be32 slid)
{ {
if (rec->rec_type == SA_PATH_REC_TYPE_IB) if (rec->rec_type == SA_PATH_REC_TYPE_IB)
@ -625,15 +615,6 @@ static inline void sa_path_set_raw_traffic(struct sa_path_rec *rec,
rec->opa.raw_traffic = raw_traffic; rec->opa.raw_traffic = raw_traffic;
} }
static inline __be64 sa_path_get_service_id(struct sa_path_rec *rec)
{
if (rec->rec_type == SA_PATH_REC_TYPE_IB)
return rec->ib.service_id;
else if (rec->rec_type == SA_PATH_REC_TYPE_OPA)
return rec->opa.service_id;
return 0;
}
static inline __be32 sa_path_get_slid(struct sa_path_rec *rec) static inline __be32 sa_path_get_slid(struct sa_path_rec *rec)
{ {
if (rec->rec_type == SA_PATH_REC_TYPE_IB) if (rec->rec_type == SA_PATH_REC_TYPE_IB)

View File

@ -10,9 +10,6 @@ struct ibnl_client_cbs {
struct module *module; struct module *module;
}; };
int ibnl_init(void);
void ibnl_cleanup(void);
/** /**
* Add a a client to the list of IB netlink exporters. * Add a a client to the list of IB netlink exporters.
* @index: Index of the added client * @index: Index of the added client
@ -77,11 +74,4 @@ int ibnl_unicast(struct sk_buff *skb, struct nlmsghdr *nlh,
int ibnl_multicast(struct sk_buff *skb, struct nlmsghdr *nlh, int ibnl_multicast(struct sk_buff *skb, struct nlmsghdr *nlh,
unsigned int group, gfp_t flags); unsigned int group, gfp_t flags);
/**
* Check if there are any listeners to the netlink group
* @group: the netlink group ID
* Returns 0 on success or a negative for no listeners.
*/
int ibnl_chk_listeners(unsigned int group);
#endif /* _RDMA_NETLINK_H */ #endif /* _RDMA_NETLINK_H */