Merge branch 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of master.kernel.org:/pub/scm/linux/kernel/git/roland/infiniband: (49 commits)
  IB: Set class_dev->dev in core for nice device symlink
  IB/ehca: Implement modify_port
  IB/umad: Clarify documentation of transaction ID
  IPoIB/cm: spin_lock_irqsave() -> spin_lock_irq() replacements
  IB/mad: Change SMI to use enums rather than magic return codes
  IB/umad: Implement GRH handling for sent/received MADs
  IB/ipoib: Use ib_init_ah_from_path to initialize ah_attr
  IB/sa: Set src_path_bits correctly in ib_init_ah_from_path()
  IB/ucm: Simplify ib_ucm_event()
  RDMA/ucma: Simplify ucma_get_event()
  IB/mthca: Simplify CQ cleaning in mthca_free_qp()
  IB/mthca: Fix mthca_write_mtt() on HCAs with hidden memory
  IB/mthca: Update HCA firmware revisions
  IB/ipath: Fix WC format drift between user and kernel space
  IB/ipath: Check that a UD work request's address handle is valid
  IB/ipath: Remove duplicate stuff from ipath_verbs.h
  IB/ipath: Check reserved memory keys
  IB/ipath: Fix unit selection when all CPU affinity bits set
  IB/ipath: Don't allow QPs 0 and 1 to be opened multiple times
  IB/ipath: Disable IB link earlier in shutdown sequence
  ...
This commit is contained in:
Linus Torvalds 2007-04-27 09:39:27 -07:00
commit afc2e82c08
47 changed files with 1630 additions and 1012 deletions

View File

@ -91,6 +91,14 @@ Sending MADs
if (ret != sizeof *mad + mad_length)
perror("write");
Transaction IDs
Users of the umad devices can use the lower 32 bits of the
transaction ID field (that is, the least significant half of the
field in network byte order) in MADs being sent to match
request/response pairs. The upper 32 bits are reserved for use by
the kernel and will be overwritten before a MAD is sent.
Setting IsSM Capability Bit
To set the IsSM capability bit for a port, simply open the

View File

@ -72,7 +72,6 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_MMC) += mmc/
obj-$(CONFIG_NEW_LEDS) += leds/
obj-$(CONFIG_INFINIBAND) += infiniband/
obj-$(CONFIG_IPATH_CORE) += infiniband/
obj-$(CONFIG_SGI_SN) += sn/
obj-y += firmware/
obj-$(CONFIG_CRYPTO) += crypto/

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved.
* Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.
* Copyright (c) 2005 Intel Corporation. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved.
*
@ -31,7 +31,6 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* $Id: mad.c 5596 2006-03-03 01:00:07Z sean.hefty $
*/
#include <linux/dma-mapping.h>
#include <rdma/ib_cache.h>
@ -668,7 +667,7 @@ static void build_smp_wc(struct ib_qp *qp,
static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
struct ib_mad_send_wr_private *mad_send_wr)
{
int ret;
int ret = 0;
struct ib_smp *smp = mad_send_wr->send_buf.mad;
unsigned long flags;
struct ib_mad_local_private *local;
@ -688,14 +687,15 @@ static int handle_outgoing_dr_smp(struct ib_mad_agent_private *mad_agent_priv,
*/
if ((ib_get_smp_direction(smp) ? smp->dr_dlid : smp->dr_slid) ==
IB_LID_PERMISSIVE &&
!smi_handle_dr_smp_send(smp, device->node_type, port_num)) {
smi_handle_dr_smp_send(smp, device->node_type, port_num) ==
IB_SMI_DISCARD) {
ret = -EINVAL;
printk(KERN_ERR PFX "Invalid directed route\n");
goto out;
}
/* Check to post send on QP or process locally */
ret = smi_check_local_smp(smp, device);
if (!ret)
if (smi_check_local_smp(smp, device) == IB_SMI_DISCARD)
goto out;
local = kmalloc(sizeof *local, GFP_ATOMIC);
@ -1874,18 +1874,22 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
if (recv->mad.mad.mad_hdr.mgmt_class ==
IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
if (!smi_handle_dr_smp_recv(&recv->mad.smp,
port_priv->device->node_type,
port_priv->port_num,
port_priv->device->phys_port_cnt))
if (smi_handle_dr_smp_recv(&recv->mad.smp,
port_priv->device->node_type,
port_priv->port_num,
port_priv->device->phys_port_cnt) ==
IB_SMI_DISCARD)
goto out;
if (!smi_check_forward_dr_smp(&recv->mad.smp))
if (smi_check_forward_dr_smp(&recv->mad.smp) == IB_SMI_LOCAL)
goto local;
if (!smi_handle_dr_smp_send(&recv->mad.smp,
port_priv->device->node_type,
port_priv->port_num))
if (smi_handle_dr_smp_send(&recv->mad.smp,
port_priv->device->node_type,
port_priv->port_num) == IB_SMI_DISCARD)
goto out;
if (!smi_check_local_smp(&recv->mad.smp, port_priv->device))
if (smi_check_local_smp(&recv->mad.smp, port_priv->device) == IB_SMI_DISCARD)
goto out;
}

View File

@ -57,6 +57,7 @@ MODULE_LICENSE("Dual BSD/GPL");
struct ib_sa_sm_ah {
struct ib_ah *ah;
struct kref ref;
u8 src_path_mask;
};
struct ib_sa_port {
@ -380,6 +381,7 @@ static void update_sm_ah(struct work_struct *work)
}
kref_init(&new_ah->ref);
new_ah->src_path_mask = (1 << port_attr.lmc) - 1;
memset(&ah_attr, 0, sizeof ah_attr);
ah_attr.dlid = port_attr.sm_lid;
@ -460,6 +462,25 @@ void ib_sa_cancel_query(int id, struct ib_sa_query *query)
}
EXPORT_SYMBOL(ib_sa_cancel_query);
static u8 get_src_path_mask(struct ib_device *device, u8 port_num)
{
struct ib_sa_device *sa_dev;
struct ib_sa_port *port;
unsigned long flags;
u8 src_path_mask;
sa_dev = ib_get_client_data(device, &sa_client);
if (!sa_dev)
return 0x7f;
port = &sa_dev->port[port_num - sa_dev->start_port];
spin_lock_irqsave(&port->ah_lock, flags);
src_path_mask = port->sm_ah ? port->sm_ah->src_path_mask : 0x7f;
spin_unlock_irqrestore(&port->ah_lock, flags);
return src_path_mask;
}
int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
struct ib_sa_path_rec *rec, struct ib_ah_attr *ah_attr)
{
@ -469,7 +490,8 @@ int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
memset(ah_attr, 0, sizeof *ah_attr);
ah_attr->dlid = be16_to_cpu(rec->dlid);
ah_attr->sl = rec->sl;
ah_attr->src_path_bits = be16_to_cpu(rec->slid) & 0x7f;
ah_attr->src_path_bits = be16_to_cpu(rec->slid) &
get_src_path_mask(device, port_num);
ah_attr->port_num = port_num;
ah_attr->static_rate = rec->rate;

View File

@ -3,7 +3,7 @@
* Copyright (c) 2004, 2005 Infinicon Corporation. All rights reserved.
* Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
* Copyright (c) 2004, 2005 Topspin Corporation. All rights reserved.
* Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
* Copyright (c) 2004-2007 Voltaire Corporation. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@ -34,7 +34,6 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* $Id: smi.c 1389 2004-12-27 22:56:47Z roland $
*/
#include <rdma/ib_smi.h>
@ -44,9 +43,8 @@
* Fixup a directed route SMP for sending
* Return 0 if the SMP should be discarded
*/
int smi_handle_dr_smp_send(struct ib_smp *smp,
u8 node_type,
int port_num)
enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
u8 node_type, int port_num)
{
u8 hop_ptr, hop_cnt;
@ -59,18 +57,18 @@ int smi_handle_dr_smp_send(struct ib_smp *smp,
if (hop_cnt && hop_ptr == 0) {
smp->hop_ptr++;
return (smp->initial_path[smp->hop_ptr] ==
port_num);
port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD);
}
/* C14-9:2 */
if (hop_ptr && hop_ptr < hop_cnt) {
if (node_type != RDMA_NODE_IB_SWITCH)
return 0;
return IB_SMI_DISCARD;
/* smp->return_path set when received */
smp->hop_ptr++;
return (smp->initial_path[smp->hop_ptr] ==
port_num);
port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD);
}
/* C14-9:3 -- We're at the end of the DR segment of path */
@ -78,29 +76,30 @@ int smi_handle_dr_smp_send(struct ib_smp *smp,
/* smp->return_path set when received */
smp->hop_ptr++;
return (node_type == RDMA_NODE_IB_SWITCH ||
smp->dr_dlid == IB_LID_PERMISSIVE);
smp->dr_dlid == IB_LID_PERMISSIVE ?
IB_SMI_HANDLE : IB_SMI_DISCARD);
}
/* C14-9:4 -- hop_ptr = hop_cnt + 1 -> give to SMA/SM */
/* C14-9:5 -- Fail unreasonable hop pointer */
return (hop_ptr == hop_cnt + 1);
return (hop_ptr == hop_cnt + 1 ? IB_SMI_HANDLE : IB_SMI_DISCARD);
} else {
/* C14-13:1 */
if (hop_cnt && hop_ptr == hop_cnt + 1) {
smp->hop_ptr--;
return (smp->return_path[smp->hop_ptr] ==
port_num);
port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD);
}
/* C14-13:2 */
if (2 <= hop_ptr && hop_ptr <= hop_cnt) {
if (node_type != RDMA_NODE_IB_SWITCH)
return 0;
return IB_SMI_DISCARD;
smp->hop_ptr--;
return (smp->return_path[smp->hop_ptr] ==
port_num);
port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD);
}
/* C14-13:3 -- at the end of the DR segment of path */
@ -108,15 +107,16 @@ int smi_handle_dr_smp_send(struct ib_smp *smp,
smp->hop_ptr--;
/* C14-13:3 -- SMPs destined for SM shouldn't be here */
return (node_type == RDMA_NODE_IB_SWITCH ||
smp->dr_slid == IB_LID_PERMISSIVE);
smp->dr_slid == IB_LID_PERMISSIVE ?
IB_SMI_HANDLE : IB_SMI_DISCARD);
}
/* C14-13:4 -- hop_ptr = 0 -> should have gone to SM */
if (hop_ptr == 0)
return 1;
return IB_SMI_HANDLE;
/* C14-13:5 -- Check for unreasonable hop pointer */
return 0;
return IB_SMI_DISCARD;
}
}
@ -124,10 +124,8 @@ int smi_handle_dr_smp_send(struct ib_smp *smp,
* Adjust information for a received SMP
* Return 0 if the SMP should be dropped
*/
int smi_handle_dr_smp_recv(struct ib_smp *smp,
u8 node_type,
int port_num,
int phys_port_cnt)
enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
int port_num, int phys_port_cnt)
{
u8 hop_ptr, hop_cnt;
@ -138,16 +136,17 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp,
if (!ib_get_smp_direction(smp)) {
/* C14-9:1 -- sender should have incremented hop_ptr */
if (hop_cnt && hop_ptr == 0)
return 0;
return IB_SMI_DISCARD;
/* C14-9:2 -- intermediate hop */
if (hop_ptr && hop_ptr < hop_cnt) {
if (node_type != RDMA_NODE_IB_SWITCH)
return 0;
return IB_SMI_DISCARD;
smp->return_path[hop_ptr] = port_num;
/* smp->hop_ptr updated when sending */
return (smp->initial_path[hop_ptr+1] <= phys_port_cnt);
return (smp->initial_path[hop_ptr+1] <= phys_port_cnt ?
IB_SMI_HANDLE : IB_SMI_DISCARD);
}
/* C14-9:3 -- We're at the end of the DR segment of path */
@ -157,12 +156,13 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp,
/* smp->hop_ptr updated when sending */
return (node_type == RDMA_NODE_IB_SWITCH ||
smp->dr_dlid == IB_LID_PERMISSIVE);
smp->dr_dlid == IB_LID_PERMISSIVE ?
IB_SMI_HANDLE : IB_SMI_DISCARD);
}
/* C14-9:4 -- hop_ptr = hop_cnt + 1 -> give to SMA/SM */
/* C14-9:5 -- fail unreasonable hop pointer */
return (hop_ptr == hop_cnt + 1);
return (hop_ptr == hop_cnt + 1 ? IB_SMI_HANDLE : IB_SMI_DISCARD);
} else {
@ -170,16 +170,17 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp,
if (hop_cnt && hop_ptr == hop_cnt + 1) {
smp->hop_ptr--;
return (smp->return_path[smp->hop_ptr] ==
port_num);
port_num ? IB_SMI_HANDLE : IB_SMI_DISCARD);
}
/* C14-13:2 */
if (2 <= hop_ptr && hop_ptr <= hop_cnt) {
if (node_type != RDMA_NODE_IB_SWITCH)
return 0;
return IB_SMI_DISCARD;
/* smp->hop_ptr updated when sending */
return (smp->return_path[hop_ptr-1] <= phys_port_cnt);
return (smp->return_path[hop_ptr-1] <= phys_port_cnt ?
IB_SMI_HANDLE : IB_SMI_DISCARD);
}
/* C14-13:3 -- We're at the end of the DR segment of path */
@ -187,23 +188,20 @@ int smi_handle_dr_smp_recv(struct ib_smp *smp,
if (smp->dr_slid == IB_LID_PERMISSIVE) {
/* giving SMP to SM - update hop_ptr */
smp->hop_ptr--;
return 1;
return IB_SMI_HANDLE;
}
/* smp->hop_ptr updated when sending */
return (node_type == RDMA_NODE_IB_SWITCH);
return (node_type == RDMA_NODE_IB_SWITCH ?
IB_SMI_HANDLE: IB_SMI_DISCARD);
}
/* C14-13:4 -- hop_ptr = 0 -> give to SM */
/* C14-13:5 -- Check for unreasonable hop pointer */
return (hop_ptr == 0);
return (hop_ptr == 0 ? IB_SMI_HANDLE : IB_SMI_DISCARD);
}
}
/*
* Return 1 if the received DR SMP should be forwarded to the send queue
* Return 0 if the SMP should be completed up the stack
*/
int smi_check_forward_dr_smp(struct ib_smp *smp)
enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp)
{
u8 hop_ptr, hop_cnt;
@ -213,23 +211,25 @@ int smi_check_forward_dr_smp(struct ib_smp *smp)
if (!ib_get_smp_direction(smp)) {
/* C14-9:2 -- intermediate hop */
if (hop_ptr && hop_ptr < hop_cnt)
return 1;
return IB_SMI_SEND;
/* C14-9:3 -- at the end of the DR segment of path */
if (hop_ptr == hop_cnt)
return (smp->dr_dlid == IB_LID_PERMISSIVE);
return (smp->dr_dlid == IB_LID_PERMISSIVE ?
IB_SMI_SEND : IB_SMI_LOCAL);
/* C14-9:4 -- hop_ptr = hop_cnt + 1 -> give to SMA/SM */
if (hop_ptr == hop_cnt + 1)
return 1;
return IB_SMI_SEND;
} else {
/* C14-13:2 */
/* C14-13:2 -- intermediate hop */
if (2 <= hop_ptr && hop_ptr <= hop_cnt)
return 1;
return IB_SMI_SEND;
/* C14-13:3 -- at the end of the DR segment of path */
if (hop_ptr == 1)
return (smp->dr_slid != IB_LID_PERMISSIVE);
return (smp->dr_slid != IB_LID_PERMISSIVE ?
IB_SMI_SEND : IB_SMI_LOCAL);
}
return 0;
return IB_SMI_LOCAL;
}

View File

@ -3,7 +3,7 @@
* Copyright (c) 2004 Infinicon Corporation. All rights reserved.
* Copyright (c) 2004 Intel Corporation. All rights reserved.
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
* Copyright (c) 2004 Voltaire Corporation. All rights reserved.
* Copyright (c) 2004-2007 Voltaire Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@ -33,7 +33,6 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* $Id: smi.h 1389 2004-12-27 22:56:47Z roland $
*/
#ifndef __SMI_H_
@ -41,26 +40,33 @@
#include <rdma/ib_smi.h>
int smi_handle_dr_smp_recv(struct ib_smp *smp,
u8 node_type,
int port_num,
int phys_port_cnt);
extern int smi_check_forward_dr_smp(struct ib_smp *smp);
extern int smi_handle_dr_smp_send(struct ib_smp *smp,
u8 node_type,
int port_num);
enum smi_action {
IB_SMI_DISCARD,
IB_SMI_HANDLE
};
enum smi_forward_action {
IB_SMI_LOCAL, /* SMP should be completed up the stack */
IB_SMI_SEND, /* received DR SMP should be forwarded to the send queue */
};
enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,
int port_num, int phys_port_cnt);
extern enum smi_forward_action smi_check_forward_dr_smp(struct ib_smp *smp);
extern enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,
u8 node_type, int port_num);
/*
* Return 1 if the SMP should be handled by the local SMA/SM via process_mad
*/
static inline int smi_check_local_smp(struct ib_smp *smp,
struct ib_device *device)
static inline enum smi_action smi_check_local_smp(struct ib_smp *smp,
struct ib_device *device)
{
/* C14-9:3 -- We're at the end of the DR segment of path */
/* C14-9:4 -- Hop Pointer = Hop Count + 1 -> give to SMA/SM */
return ((device->process_mad &&
!ib_get_smp_direction(smp) &&
(smp->hop_ptr == smp->hop_cnt + 1)));
(smp->hop_ptr == smp->hop_cnt + 1)) ?
IB_SMI_HANDLE : IB_SMI_DISCARD);
}
#endif /* __SMI_H_ */

View File

@ -683,6 +683,7 @@ int ib_device_register_sysfs(struct ib_device *device)
class_dev->class = &ib_class;
class_dev->class_data = device;
class_dev->dev = device->dma_device;
strlcpy(class_dev->class_id, device->name, BUS_ID_SIZE);
INIT_LIST_HEAD(&device->port_list);

View File

@ -407,29 +407,18 @@ static ssize_t ib_ucm_event(struct ib_ucm_file *file,
mutex_lock(&file->file_mutex);
while (list_empty(&file->events)) {
if (file->filp->f_flags & O_NONBLOCK) {
result = -EAGAIN;
break;
}
if (signal_pending(current)) {
result = -ERESTARTSYS;
break;
}
prepare_to_wait(&file->poll_wait, &wait, TASK_INTERRUPTIBLE);
mutex_unlock(&file->file_mutex);
schedule();
if (file->filp->f_flags & O_NONBLOCK)
return -EAGAIN;
if (wait_event_interruptible(file->poll_wait,
!list_empty(&file->events)))
return -ERESTARTSYS;
mutex_lock(&file->file_mutex);
finish_wait(&file->poll_wait, &wait);
}
if (result)
goto done;
uevent = list_entry(file->events.next, struct ib_ucm_event, file_list);
if (ib_ucm_new_cm_id(uevent->resp.event)) {

View File

@ -306,25 +306,17 @@ static ssize_t ucma_get_event(struct ucma_file *file, const char __user *inbuf,
mutex_lock(&file->mut);
while (list_empty(&file->event_list)) {
if (file->filp->f_flags & O_NONBLOCK) {
ret = -EAGAIN;
break;
}
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
prepare_to_wait(&file->poll_wait, &wait, TASK_INTERRUPTIBLE);
mutex_unlock(&file->mut);
schedule();
mutex_lock(&file->mut);
finish_wait(&file->poll_wait, &wait);
}
if (ret)
goto done;
if (file->filp->f_flags & O_NONBLOCK)
return -EAGAIN;
if (wait_event_interruptible(file->poll_wait,
!list_empty(&file->event_list)))
return -ERESTARTSYS;
mutex_lock(&file->mut);
}
uevent = list_entry(file->event_list.next, struct ucma_event, list);

View File

@ -135,7 +135,7 @@ static const dev_t base_dev = MKDEV(IB_UMAD_MAJOR, IB_UMAD_MINOR_BASE);
static DEFINE_SPINLOCK(port_lock);
static struct ib_umad_port *umad_port[IB_UMAD_MAX_PORTS];
static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS * 2);
static DECLARE_BITMAP(dev_map, IB_UMAD_MAX_PORTS);
static void ib_umad_add_one(struct ib_device *device);
static void ib_umad_remove_one(struct ib_device *device);
@ -231,12 +231,17 @@ static void recv_handler(struct ib_mad_agent *agent,
packet->mad.hdr.path_bits = mad_recv_wc->wc->dlid_path_bits;
packet->mad.hdr.grh_present = !!(mad_recv_wc->wc->wc_flags & IB_WC_GRH);
if (packet->mad.hdr.grh_present) {
/* XXX parse GRH */
packet->mad.hdr.gid_index = 0;
packet->mad.hdr.hop_limit = 0;
packet->mad.hdr.traffic_class = 0;
memset(packet->mad.hdr.gid, 0, 16);
packet->mad.hdr.flow_label = 0;
struct ib_ah_attr ah_attr;
ib_init_ah_from_wc(agent->device, agent->port_num,
mad_recv_wc->wc, mad_recv_wc->recv_buf.grh,
&ah_attr);
packet->mad.hdr.gid_index = ah_attr.grh.sgid_index;
packet->mad.hdr.hop_limit = ah_attr.grh.hop_limit;
packet->mad.hdr.traffic_class = ah_attr.grh.traffic_class;
memcpy(packet->mad.hdr.gid, &ah_attr.grh.dgid, 16);
packet->mad.hdr.flow_label = cpu_to_be32(ah_attr.grh.flow_label);
}
if (queue_packet(file, agent, packet))
@ -473,6 +478,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
if (packet->mad.hdr.grh_present) {
ah_attr.ah_flags = IB_AH_GRH;
memcpy(ah_attr.grh.dgid.raw, packet->mad.hdr.gid, 16);
ah_attr.grh.sgid_index = packet->mad.hdr.gid_index;
ah_attr.grh.flow_label = be32_to_cpu(packet->mad.hdr.flow_label);
ah_attr.grh.hop_limit = packet->mad.hdr.hop_limit;
ah_attr.grh.traffic_class = packet->mad.hdr.traffic_class;

View File

@ -796,7 +796,6 @@ int c2_register_device(struct c2_dev *dev)
memcpy(&dev->ibdev.node_guid, dev->pseudo_netdev->dev_addr, 6);
dev->ibdev.phys_port_cnt = 1;
dev->ibdev.dma_device = &dev->pcidev->dev;
dev->ibdev.class_dev.dev = &dev->pcidev->dev;
dev->ibdev.query_device = c2_query_device;
dev->ibdev.query_port = c2_query_port;
dev->ibdev.modify_port = c2_modify_port;

View File

@ -1108,7 +1108,6 @@ int iwch_register_device(struct iwch_dev *dev)
memcpy(dev->ibdev.node_desc, IWCH_NODE_DESC, sizeof(IWCH_NODE_DESC));
dev->ibdev.phys_port_cnt = dev->rdev.port_info.nports;
dev->ibdev.dma_device = &(dev->rdev.rnic_info.pdev->dev);
dev->ibdev.class_dev.dev = &(dev->rdev.rnic_info.pdev->dev);
dev->ibdev.query_device = iwch_query_device;
dev->ibdev.query_port = iwch_query_port;
dev->ibdev.modify_port = iwch_modify_port;

View File

@ -106,6 +106,7 @@ struct ehca_shca {
struct ehca_mr *maxmr;
struct ehca_pd *pd;
struct h_galpas galpas;
struct mutex modify_mutex;
};
struct ehca_pd {

View File

@ -147,6 +147,7 @@ int ehca_query_port(struct ib_device *ibdev,
break;
}
props->port_cap_flags = rblock->capability_mask;
props->gid_tbl_len = rblock->gid_tbl_len;
props->max_msg_sz = rblock->max_msg_sz;
props->bad_pkey_cntr = rblock->bad_pkey_cntr;
@ -236,10 +237,60 @@ query_gid1:
return ret;
}
const u32 allowed_port_caps = (
IB_PORT_SM | IB_PORT_LED_INFO_SUP | IB_PORT_CM_SUP |
IB_PORT_SNMP_TUNNEL_SUP | IB_PORT_DEVICE_MGMT_SUP |
IB_PORT_VENDOR_CLASS_SUP);
int ehca_modify_port(struct ib_device *ibdev,
u8 port, int port_modify_mask,
struct ib_port_modify *props)
{
/* Not implemented yet */
return -EFAULT;
int ret = 0;
struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, ib_device);
struct hipz_query_port *rblock;
u32 cap;
u64 hret;
if ((props->set_port_cap_mask | props->clr_port_cap_mask)
& ~allowed_port_caps) {
ehca_err(&shca->ib_device, "Non-changeable bits set in masks "
"set=%x clr=%x allowed=%x", props->set_port_cap_mask,
props->clr_port_cap_mask, allowed_port_caps);
return -EINVAL;
}
if (mutex_lock_interruptible(&shca->modify_mutex))
return -ERESTARTSYS;
rblock = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
if (!rblock) {
ehca_err(&shca->ib_device, "Can't allocate rblock memory.");
ret = -ENOMEM;
goto modify_port1;
}
if (hipz_h_query_port(shca->ipz_hca_handle, port, rblock) != H_SUCCESS) {
ehca_err(&shca->ib_device, "Can't query port properties");
ret = -EINVAL;
goto modify_port2;
}
cap = (rblock->capability_mask | props->set_port_cap_mask)
& ~props->clr_port_cap_mask;
hret = hipz_h_modify_port(shca->ipz_hca_handle, port,
cap, props->init_type, port_modify_mask);
if (hret != H_SUCCESS) {
ehca_err(&shca->ib_device, "Modify port failed hret=%lx", hret);
ret = -EINVAL;
}
modify_port2:
ehca_free_fw_ctrlblock(rblock);
modify_port1:
mutex_unlock(&shca->modify_mutex);
return ret;
}

View File

@ -587,6 +587,7 @@ static int __devinit ehca_probe(struct ibmebus_dev *dev,
ehca_gen_err("Cannot allocate shca memory.");
return -ENOMEM;
}
mutex_init(&shca->modify_mutex);
shca->ibmebus_dev = dev;
shca->ipz_hca_handle.handle = *handle;

View File

@ -70,6 +70,10 @@
#define H_ALL_RES_QP_SQUEUE_SIZE_PAGES EHCA_BMASK_IBM(0, 31)
#define H_ALL_RES_QP_RQUEUE_SIZE_PAGES EHCA_BMASK_IBM(32, 63)
#define H_MP_INIT_TYPE EHCA_BMASK_IBM(44, 47)
#define H_MP_SHUTDOWN EHCA_BMASK_IBM(48, 48)
#define H_MP_RESET_QKEY_CTR EHCA_BMASK_IBM(49, 49)
/* direct access qp controls */
#define DAQP_CTRL_ENABLE 0x01
#define DAQP_CTRL_SEND_COMP 0x20
@ -364,6 +368,26 @@ u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
return ret;
}
u64 hipz_h_modify_port(const struct ipz_adapter_handle adapter_handle,
const u8 port_id, const u32 port_cap,
const u8 init_type, const int modify_mask)
{
u64 port_attributes = port_cap;
if (modify_mask & IB_PORT_SHUTDOWN)
port_attributes |= EHCA_BMASK_SET(H_MP_SHUTDOWN, 1);
if (modify_mask & IB_PORT_INIT_TYPE)
port_attributes |= EHCA_BMASK_SET(H_MP_INIT_TYPE, init_type);
if (modify_mask & IB_PORT_RESET_QKEY_CNTR)
port_attributes |= EHCA_BMASK_SET(H_MP_RESET_QKEY_CTR, 1);
return ehca_plpar_hcall_norets(H_MODIFY_PORT,
adapter_handle.handle, /* r4 */
port_id, /* r5 */
port_attributes, /* r6 */
0, 0, 0, 0);
}
u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
struct hipz_query_hca *query_hca_rblock)
{

View File

@ -85,6 +85,10 @@ u64 hipz_h_query_port(const struct ipz_adapter_handle adapter_handle,
const u8 port_id,
struct hipz_query_port *query_port_response_block);
u64 hipz_h_modify_port(const struct ipz_adapter_handle adapter_handle,
const u8 port_id, const u32 port_cap,
const u8 init_type, const int modify_mask);
u64 hipz_h_query_hca(const struct ipz_adapter_handle adapter_handle,
struct hipz_query_hca *query_hca_rblock);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2006 QLogic, Inc. All rights reserved.
* Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@ -78,6 +78,8 @@
#define IPATH_IB_LINKINIT 3
#define IPATH_IB_LINKDOWN_SLEEP 4
#define IPATH_IB_LINKDOWN_DISABLE 5
#define IPATH_IB_LINK_LOOPBACK 6 /* enable local loopback */
#define IPATH_IB_LINK_EXTERNAL 7 /* normal, disable local loopback */
/*
* stats maintained by the driver. For now, at least, this is global
@ -316,11 +318,17 @@ struct ipath_base_info {
/* address of readonly memory copy of the rcvhdrq tail register. */
__u64 spi_rcvhdr_tailaddr;
/* shared memory pages for subports if IPATH_RUNTIME_MASTER is set */
/* shared memory pages for subports if port is shared */
__u64 spi_subport_uregbase;
__u64 spi_subport_rcvegrbuf;
__u64 spi_subport_rcvhdr_base;
/* shared memory page for hardware port if it is shared */
__u64 spi_port_uregbase;
__u64 spi_port_rcvegrbuf;
__u64 spi_port_rcvhdr_base;
__u64 spi_port_rcvhdr_tailaddr;
} __attribute__ ((aligned(8)));
@ -344,7 +352,7 @@ struct ipath_base_info {
* may not be implemented; the user code must deal with this if it
* cares, or it must abort after initialization reports the difference.
*/
#define IPATH_USER_SWMINOR 3
#define IPATH_USER_SWMINOR 5
#define IPATH_USER_SWVERSION ((IPATH_USER_SWMAJOR<<16) | IPATH_USER_SWMINOR)
@ -418,11 +426,14 @@ struct ipath_user_info {
#define IPATH_CMD_TID_UPDATE 19 /* update expected TID entries */
#define IPATH_CMD_TID_FREE 20 /* free expected TID entries */
#define IPATH_CMD_SET_PART_KEY 21 /* add partition key */
#define IPATH_CMD_SLAVE_INFO 22 /* return info on slave processes */
#define __IPATH_CMD_SLAVE_INFO 22 /* return info on slave processes (for old user code) */
#define IPATH_CMD_ASSIGN_PORT 23 /* allocate HCA and port */
#define IPATH_CMD_USER_INIT 24 /* set up userspace */
#define IPATH_CMD_UNUSED_1 25
#define IPATH_CMD_UNUSED_2 26
#define IPATH_CMD_PIOAVAILUPD 27 /* force an update of PIOAvail reg */
#define IPATH_CMD_MAX 24
#define IPATH_CMD_MAX 27
struct ipath_port_info {
__u32 num_active; /* number of active units */
@ -430,7 +441,7 @@ struct ipath_port_info {
__u16 port; /* port on unit assigned to caller */
__u16 subport; /* subport on unit assigned to caller */
__u16 num_ports; /* number of ports available on unit */
__u16 num_subports; /* number of subport slaves opened on port */
__u16 num_subports; /* number of subports opened on port */
};
struct ipath_tid_info {

View File

@ -76,7 +76,20 @@ void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
}
return;
}
wc->queue[head] = *entry;
wc->queue[head].wr_id = entry->wr_id;
wc->queue[head].status = entry->status;
wc->queue[head].opcode = entry->opcode;
wc->queue[head].vendor_err = entry->vendor_err;
wc->queue[head].byte_len = entry->byte_len;
wc->queue[head].imm_data = (__u32 __force)entry->imm_data;
wc->queue[head].qp_num = entry->qp->qp_num;
wc->queue[head].src_qp = entry->src_qp;
wc->queue[head].wc_flags = entry->wc_flags;
wc->queue[head].pkey_index = entry->pkey_index;
wc->queue[head].slid = entry->slid;
wc->queue[head].sl = entry->sl;
wc->queue[head].dlid_path_bits = entry->dlid_path_bits;
wc->queue[head].port_num = entry->port_num;
wc->head = next;
if (cq->notify == IB_CQ_NEXT_COMP ||
@ -122,9 +135,30 @@ int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
if (tail > (u32) cq->ibcq.cqe)
tail = (u32) cq->ibcq.cqe;
for (npolled = 0; npolled < num_entries; ++npolled, ++entry) {
struct ipath_qp *qp;
if (tail == wc->head)
break;
*entry = wc->queue[tail];
qp = ipath_lookup_qpn(&to_idev(cq->ibcq.device)->qp_table,
wc->queue[tail].qp_num);
entry->qp = &qp->ibqp;
if (atomic_dec_and_test(&qp->refcount))
wake_up(&qp->wait);
entry->wr_id = wc->queue[tail].wr_id;
entry->status = wc->queue[tail].status;
entry->opcode = wc->queue[tail].opcode;
entry->vendor_err = wc->queue[tail].vendor_err;
entry->byte_len = wc->queue[tail].byte_len;
entry->imm_data = wc->queue[tail].imm_data;
entry->src_qp = wc->queue[tail].src_qp;
entry->wc_flags = wc->queue[tail].wc_flags;
entry->pkey_index = wc->queue[tail].pkey_index;
entry->slid = wc->queue[tail].slid;
entry->sl = wc->queue[tail].sl;
entry->dlid_path_bits = wc->queue[tail].dlid_path_bits;
entry->port_num = wc->queue[tail].port_num;
if (tail >= cq->ibcq.cqe)
tail = 0;
else

View File

@ -57,6 +57,7 @@
#define __IPATH_PROCDBG 0x100
/* print mmap/nopage stuff, not using VDBG any more */
#define __IPATH_MMDBG 0x200
#define __IPATH_ERRPKTDBG 0x400
#define __IPATH_USER_SEND 0x1000 /* use user mode send */
#define __IPATH_KERNEL_SEND 0x2000 /* use kernel mode send */
#define __IPATH_EPKTDBG 0x4000 /* print ethernet packet data */

View File

@ -296,7 +296,7 @@ static int ipath_diag_open(struct inode *in, struct file *fp)
}
fp->private_data = dd;
ipath_diag_inuse = 1;
ipath_diag_inuse = -2;
diag_set_link = 0;
ret = 0;
@ -461,6 +461,8 @@ static ssize_t ipath_diag_read(struct file *fp, char __user *data,
else if ((count % 4) || (*off % 4))
/* address or length is not 32-bit aligned, hence invalid */
ret = -EINVAL;
else if (ipath_diag_inuse < 1 && (*off || count != 8))
ret = -EINVAL; /* prevent cat /dev/ipath_diag* */
else if ((count % 8) || (*off % 8))
/* address or length not 64-bit aligned; do 32-bit reads */
ret = ipath_read_umem32(dd, data, kreg_base + *off, count);
@ -470,6 +472,8 @@ static ssize_t ipath_diag_read(struct file *fp, char __user *data,
if (ret >= 0) {
*off += count;
ret = count;
if (ipath_diag_inuse == -2)
ipath_diag_inuse++;
}
return ret;
@ -489,6 +493,9 @@ static ssize_t ipath_diag_write(struct file *fp, const char __user *data,
else if ((count % 4) || (*off % 4))
/* address or length is not 32-bit aligned, hence invalid */
ret = -EINVAL;
else if ((ipath_diag_inuse == -1 && (*off || count != 8)) ||
ipath_diag_inuse == -2) /* read qw off 0, write qw off 0 */
ret = -EINVAL; /* before any other write allowed */
else if ((count % 8) || (*off % 8))
/* address or length not 64-bit aligned; do 32-bit writes */
ret = ipath_write_umem32(dd, kreg_base + *off, data, count);
@ -498,6 +505,8 @@ static ssize_t ipath_diag_write(struct file *fp, const char __user *data,
if (ret >= 0) {
*off += count;
ret = count;
if (ipath_diag_inuse == -1)
ipath_diag_inuse = 1; /* all read/write OK now */
}
return ret;

View File

@ -390,15 +390,23 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
/* setup the chip-specific functions, as early as possible. */
switch (ent->device) {
#ifdef CONFIG_HT_IRQ
case PCI_DEVICE_ID_INFINIPATH_HT:
#ifdef CONFIG_HT_IRQ
ipath_init_iba6110_funcs(dd);
break;
#else
ipath_dev_err(dd, "QLogic HT device 0x%x cannot work if "
"CONFIG_HT_IRQ is not enabled\n", ent->device);
return -ENODEV;
#endif
#ifdef CONFIG_PCI_MSI
case PCI_DEVICE_ID_INFINIPATH_PE800:
#ifdef CONFIG_PCI_MSI
ipath_init_iba6120_funcs(dd);
break;
#else
ipath_dev_err(dd, "QLogic PCIE device 0x%x cannot work if "
"CONFIG_PCI_MSI is not enabled\n", ent->device);
return -ENODEV;
#endif
default:
ipath_dev_err(dd, "Found unknown QLogic deviceid 0x%x, "
@ -486,7 +494,7 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
ret = ipath_init_chip(dd, 0); /* do the chip-specific init */
if (ret)
goto bail_iounmap;
goto bail_irqsetup;
ret = ipath_enable_wc(dd);
@ -505,6 +513,9 @@ static int __devinit ipath_init_one(struct pci_dev *pdev,
goto bail;
bail_irqsetup:
if (pdev->irq) free_irq(pdev->irq, dd);
bail_iounmap:
iounmap((volatile void __iomem *) dd->ipath_kregbase);
@ -525,8 +536,6 @@ static void __devexit cleanup_device(struct ipath_devdata *dd)
{
int port;
ipath_shutdown_device(dd);
if (*dd->ipath_statusp & IPATH_STATUS_CHIP_PRESENT) {
/* can't do anything more with chip; needs re-init */
*dd->ipath_statusp &= ~IPATH_STATUS_CHIP_PRESENT;
@ -594,8 +603,9 @@ static void __devexit cleanup_device(struct ipath_devdata *dd)
ipath_cdbg(VERBOSE, "Free shadow page tid array at %p\n",
dd->ipath_pageshadow);
vfree(dd->ipath_pageshadow);
tmpp = dd->ipath_pageshadow;
dd->ipath_pageshadow = NULL;
vfree(tmpp);
}
/*
@ -622,6 +632,12 @@ static void __devexit ipath_remove_one(struct pci_dev *pdev)
ipath_cdbg(VERBOSE, "removing, pdev=%p, dd=%p\n", pdev, dd);
/*
* disable the IB link early, to be sure no new packets arrive, which
* complicates the shutdown process
*/
ipath_shutdown_device(dd);
if (dd->verbs_dev)
ipath_unregister_ib_device(dd->verbs_dev);
@ -754,9 +770,42 @@ static int ipath_wait_linkstate(struct ipath_devdata *dd, u32 state,
return (dd->ipath_flags & state) ? 0 : -ETIMEDOUT;
}
void ipath_decode_err(char *buf, size_t blen, ipath_err_t err)
/*
* Decode the error status into strings, deciding whether to always
* print * it or not depending on "normal packet errors" vs everything
* else. Return 1 if "real" errors, otherwise 0 if only packet
* errors, so caller can decide what to print with the string.
*/
int ipath_decode_err(char *buf, size_t blen, ipath_err_t err)
{
int iserr = 1;
*buf = '\0';
if (err & INFINIPATH_E_PKTERRS) {
if (!(err & ~INFINIPATH_E_PKTERRS))
iserr = 0; // if only packet errors.
if (ipath_debug & __IPATH_ERRPKTDBG) {
if (err & INFINIPATH_E_REBP)
strlcat(buf, "EBP ", blen);
if (err & INFINIPATH_E_RVCRC)
strlcat(buf, "VCRC ", blen);
if (err & INFINIPATH_E_RICRC) {
strlcat(buf, "CRC ", blen);
// clear for check below, so only once
err &= INFINIPATH_E_RICRC;
}
if (err & INFINIPATH_E_RSHORTPKTLEN)
strlcat(buf, "rshortpktlen ", blen);
if (err & INFINIPATH_E_SDROPPEDDATAPKT)
strlcat(buf, "sdroppeddatapkt ", blen);
if (err & INFINIPATH_E_SPKTLEN)
strlcat(buf, "spktlen ", blen);
}
if ((err & INFINIPATH_E_RICRC) &&
!(err&(INFINIPATH_E_RVCRC|INFINIPATH_E_REBP)))
strlcat(buf, "CRC ", blen);
if (!iserr)
goto done;
}
if (err & INFINIPATH_E_RHDRLEN)
strlcat(buf, "rhdrlen ", blen);
if (err & INFINIPATH_E_RBADTID)
@ -767,12 +816,12 @@ void ipath_decode_err(char *buf, size_t blen, ipath_err_t err)
strlcat(buf, "rhdr ", blen);
if (err & INFINIPATH_E_RLONGPKTLEN)
strlcat(buf, "rlongpktlen ", blen);
if (err & INFINIPATH_E_RSHORTPKTLEN)
strlcat(buf, "rshortpktlen ", blen);
if (err & INFINIPATH_E_RMAXPKTLEN)
strlcat(buf, "rmaxpktlen ", blen);
if (err & INFINIPATH_E_RMINPKTLEN)
strlcat(buf, "rminpktlen ", blen);
if (err & INFINIPATH_E_SMINPKTLEN)
strlcat(buf, "sminpktlen ", blen);
if (err & INFINIPATH_E_RFORMATERR)
strlcat(buf, "rformaterr ", blen);
if (err & INFINIPATH_E_RUNSUPVL)
@ -781,32 +830,20 @@ void ipath_decode_err(char *buf, size_t blen, ipath_err_t err)
strlcat(buf, "runexpchar ", blen);
if (err & INFINIPATH_E_RIBFLOW)
strlcat(buf, "ribflow ", blen);
if (err & INFINIPATH_E_REBP)
strlcat(buf, "EBP ", blen);
if (err & INFINIPATH_E_SUNDERRUN)
strlcat(buf, "sunderrun ", blen);
if (err & INFINIPATH_E_SPIOARMLAUNCH)
strlcat(buf, "spioarmlaunch ", blen);
if (err & INFINIPATH_E_SUNEXPERRPKTNUM)
strlcat(buf, "sunexperrpktnum ", blen);
if (err & INFINIPATH_E_SDROPPEDDATAPKT)
strlcat(buf, "sdroppeddatapkt ", blen);
if (err & INFINIPATH_E_SDROPPEDSMPPKT)
strlcat(buf, "sdroppedsmppkt ", blen);
if (err & INFINIPATH_E_SMAXPKTLEN)
strlcat(buf, "smaxpktlen ", blen);
if (err & INFINIPATH_E_SMINPKTLEN)
strlcat(buf, "sminpktlen ", blen);
if (err & INFINIPATH_E_SUNSUPVL)
strlcat(buf, "sunsupVL ", blen);
if (err & INFINIPATH_E_SPKTLEN)
strlcat(buf, "spktlen ", blen);
if (err & INFINIPATH_E_INVALIDADDR)
strlcat(buf, "invalidaddr ", blen);
if (err & INFINIPATH_E_RICRC)
strlcat(buf, "CRC ", blen);
if (err & INFINIPATH_E_RVCRC)
strlcat(buf, "VCRC ", blen);
if (err & INFINIPATH_E_RRCVEGRFULL)
strlcat(buf, "rcvegrfull ", blen);
if (err & INFINIPATH_E_RRCVHDRFULL)
@ -819,6 +856,8 @@ void ipath_decode_err(char *buf, size_t blen, ipath_err_t err)
strlcat(buf, "hardware ", blen);
if (err & INFINIPATH_E_RESET)
strlcat(buf, "reset ", blen);
done:
return iserr;
}
/**
@ -1662,6 +1701,22 @@ int ipath_set_linkstate(struct ipath_devdata *dd, u8 newstate)
lstate = IPATH_LINKACTIVE;
break;
case IPATH_IB_LINK_LOOPBACK:
dev_info(&dd->pcidev->dev, "Enabling IB local loopback\n");
dd->ipath_ibcctrl |= INFINIPATH_IBCC_LOOPBACK;
ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
dd->ipath_ibcctrl);
ret = 0;
goto bail; // no state change to wait for
case IPATH_IB_LINK_EXTERNAL:
dev_info(&dd->pcidev->dev, "Disabling IB local loopback (normal)\n");
dd->ipath_ibcctrl &= ~INFINIPATH_IBCC_LOOPBACK;
ipath_write_kreg(dd, dd->ipath_kregs->kr_ibcctrl,
dd->ipath_ibcctrl);
ret = 0;
goto bail; // no state change to wait for
default:
ipath_dbg("Invalid linkstate 0x%x requested\n", newstate);
ret = -EINVAL;
@ -1765,29 +1820,6 @@ int ipath_set_lid(struct ipath_devdata *dd, u32 arg, u8 lmc)
return 0;
}
/**
* ipath_read_kreg64_port - read a device's per-port 64-bit kernel register
* @dd: the infinipath device
* @regno: the register number to read
* @port: the port containing the register
*
* Registers that vary with the chip implementation constants (port)
* use this routine.
*/
u64 ipath_read_kreg64_port(const struct ipath_devdata *dd, ipath_kreg regno,
unsigned port)
{
u16 where;
if (port < dd->ipath_portcnt &&
(regno == dd->ipath_kregs->kr_rcvhdraddr ||
regno == dd->ipath_kregs->kr_rcvhdrtailaddr))
where = regno + port;
else
where = -1;
return ipath_read_kreg64(dd, where);
}
/**
* ipath_write_kreg_port - write a device's per-port 64-bit kernel register
@ -1973,7 +2005,8 @@ static int __init infinipath_init(void)
{
int ret;
ipath_dbg(KERN_INFO DRIVER_LOAD_MSG "%s", ib_ipath_version);
if (ipath_debug & __IPATH_DBG)
printk(KERN_INFO DRIVER_LOAD_MSG "%s", ib_ipath_version);
/*
* These must be called before the driver is registered with

View File

@ -626,6 +626,10 @@ void ipath_get_eeprom_info(struct ipath_devdata *dd)
} else
memcpy(dd->ipath_serial, ifp->if_serial,
sizeof ifp->if_serial);
if (!strstr(ifp->if_comment, "Tested successfully"))
ipath_dev_err(dd, "Board SN %s did not pass functional "
"test: %s\n", dd->ipath_serial,
ifp->if_comment);
ipath_cdbg(VERBOSE, "Initted GUID to %llx from eeprom\n",
(unsigned long long) be64_to_cpu(dd->ipath_guid));

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2006 QLogic, Inc. All rights reserved.
* Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
* Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
@ -41,12 +41,6 @@
#include "ipath_kernel.h"
#include "ipath_common.h"
/*
* mmap64 doesn't allow all 64 bits for 32-bit applications
* so only use the low 43 bits.
*/
#define MMAP64_MASK 0x7FFFFFFFFFFUL
static int ipath_open(struct inode *, struct file *);
static int ipath_close(struct inode *, struct file *);
static ssize_t ipath_write(struct file *, const char __user *, size_t,
@ -63,6 +57,24 @@ static const struct file_operations ipath_file_ops = {
.mmap = ipath_mmap
};
/*
* Convert kernel virtual addresses to physical addresses so they don't
* potentially conflict with the chip addresses used as mmap offsets.
* It doesn't really matter what mmap offset we use as long as we can
* interpret it correctly.
*/
static u64 cvt_kvaddr(void *p)
{
struct page *page;
u64 paddr = 0;
page = vmalloc_to_page(p);
if (page)
paddr = page_to_pfn(page) << PAGE_SHIFT;
return paddr;
}
static int ipath_get_base_info(struct file *fp,
void __user *ubase, size_t ubase_size)
{
@ -87,7 +99,7 @@ static int ipath_get_base_info(struct file *fp,
sz = sizeof(*kinfo);
/* If port sharing is not requested, allow the old size structure */
if (!shared)
sz -= 3 * sizeof(u64);
sz -= 7 * sizeof(u64);
if (ubase_size < sz) {
ipath_cdbg(PROC,
"Base size %zu, need %zu (version mismatch?)\n",
@ -165,24 +177,41 @@ static int ipath_get_base_info(struct file *fp,
kinfo->spi_piobufbase = (u64) pd->port_piobufs +
dd->ipath_palign *
(dd->ipath_pbufsport - kinfo->spi_piocnt);
kinfo->__spi_uregbase = (u64) dd->ipath_uregbase +
dd->ipath_palign * pd->port_port;
} else {
unsigned slave = subport_fp(fp) - 1;
kinfo->spi_piocnt = dd->ipath_pbufsport / subport_cnt;
kinfo->spi_piobufbase = (u64) pd->port_piobufs +
dd->ipath_palign * kinfo->spi_piocnt * slave;
kinfo->__spi_uregbase = ((u64) pd->subport_uregbase +
PAGE_SIZE * slave) & MMAP64_MASK;
}
if (shared) {
kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase +
dd->ipath_palign * pd->port_port;
kinfo->spi_port_rcvegrbuf = kinfo->spi_rcv_egrbufs;
kinfo->spi_port_rcvhdr_base = kinfo->spi_rcvhdr_base;
kinfo->spi_port_rcvhdr_tailaddr = kinfo->spi_rcvhdr_tailaddr;
kinfo->spi_rcvhdr_base = ((u64) pd->subport_rcvhdr_base +
pd->port_rcvhdrq_size * slave) & MMAP64_MASK;
kinfo->spi_rcvhdr_tailaddr =
(u64) pd->port_rcvhdrqtailaddr_phys & MMAP64_MASK;
kinfo->spi_rcv_egrbufs = ((u64) pd->subport_rcvegrbuf +
dd->ipath_rcvegrcnt * dd->ipath_rcvegrbufsize * slave) &
MMAP64_MASK;
kinfo->__spi_uregbase = cvt_kvaddr(pd->subport_uregbase +
PAGE_SIZE * subport_fp(fp));
kinfo->spi_rcvhdr_base = cvt_kvaddr(pd->subport_rcvhdr_base +
pd->port_rcvhdrq_size * subport_fp(fp));
kinfo->spi_rcvhdr_tailaddr = 0;
kinfo->spi_rcv_egrbufs = cvt_kvaddr(pd->subport_rcvegrbuf +
pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size *
subport_fp(fp));
kinfo->spi_subport_uregbase =
cvt_kvaddr(pd->subport_uregbase);
kinfo->spi_subport_rcvegrbuf =
cvt_kvaddr(pd->subport_rcvegrbuf);
kinfo->spi_subport_rcvhdr_base =
cvt_kvaddr(pd->subport_rcvhdr_base);
ipath_cdbg(PROC, "port %u flags %x %llx %llx %llx\n",
kinfo->spi_port, kinfo->spi_runtime_flags,
(unsigned long long) kinfo->spi_subport_uregbase,
(unsigned long long) kinfo->spi_subport_rcvegrbuf,
(unsigned long long) kinfo->spi_subport_rcvhdr_base);
}
kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->ipath_piobufbase) /
@ -199,20 +228,10 @@ static int ipath_get_base_info(struct file *fp,
if (master) {
kinfo->spi_runtime_flags |= IPATH_RUNTIME_MASTER;
kinfo->spi_subport_uregbase =
(u64) pd->subport_uregbase & MMAP64_MASK;
kinfo->spi_subport_rcvegrbuf =
(u64) pd->subport_rcvegrbuf & MMAP64_MASK;
kinfo->spi_subport_rcvhdr_base =
(u64) pd->subport_rcvhdr_base & MMAP64_MASK;
ipath_cdbg(PROC, "port %u flags %x %llx %llx %llx\n",
kinfo->spi_port, kinfo->spi_runtime_flags,
(unsigned long long) kinfo->spi_subport_uregbase,
(unsigned long long) kinfo->spi_subport_rcvegrbuf,
(unsigned long long) kinfo->spi_subport_rcvhdr_base);
}
if (copy_to_user(ubase, kinfo, sizeof(*kinfo)))
sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo);
if (copy_to_user(ubase, kinfo, sz))
ret = -EFAULT;
bail:
@ -1132,67 +1151,55 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
struct ipath_devdata *dd;
void *addr;
size_t size;
int ret;
int ret = 0;
/* If the port is not shared, all addresses should be physical */
if (!pd->port_subport_cnt) {
ret = -EINVAL;
if (!pd->port_subport_cnt)
goto bail;
}
dd = pd->port_dd;
size = pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size;
/*
* Master has all the slave uregbase, rcvhdrq, and
* rcvegrbufs mmapped.
* Each process has all the subport uregbase, rcvhdrq, and
* rcvegrbufs mmapped - as an array for all the processes,
* and also separately for this process.
*/
if (subport == 0) {
unsigned num_slaves = pd->port_subport_cnt - 1;
if (pgaddr == ((u64) pd->subport_uregbase & MMAP64_MASK)) {
addr = pd->subport_uregbase;
size = PAGE_SIZE * num_slaves;
} else if (pgaddr == ((u64) pd->subport_rcvhdr_base &
MMAP64_MASK)) {
addr = pd->subport_rcvhdr_base;
size = pd->port_rcvhdrq_size * num_slaves;
} else if (pgaddr == ((u64) pd->subport_rcvegrbuf &
MMAP64_MASK)) {
addr = pd->subport_rcvegrbuf;
size *= num_slaves;
} else {
ret = -EINVAL;
goto bail;
}
} else if (pgaddr == (((u64) pd->subport_uregbase +
PAGE_SIZE * (subport - 1)) & MMAP64_MASK)) {
addr = pd->subport_uregbase + PAGE_SIZE * (subport - 1);
size = PAGE_SIZE;
} else if (pgaddr == (((u64) pd->subport_rcvhdr_base +
pd->port_rcvhdrq_size * (subport - 1)) &
MMAP64_MASK)) {
addr = pd->subport_rcvhdr_base +
pd->port_rcvhdrq_size * (subport - 1);
size = pd->port_rcvhdrq_size;
} else if (pgaddr == (((u64) pd->subport_rcvegrbuf +
size * (subport - 1)) & MMAP64_MASK)) {
addr = pd->subport_rcvegrbuf + size * (subport - 1);
/* rcvegrbufs are read-only on the slave */
if (vma->vm_flags & VM_WRITE) {
dev_info(&dd->pcidev->dev,
"Can't map eager buffers as "
"writable (flags=%lx)\n", vma->vm_flags);
ret = -EPERM;
goto bail;
}
/*
* Don't allow permission to later change to writeable
* with mprotect.
*/
vma->vm_flags &= ~VM_MAYWRITE;
if (pgaddr == cvt_kvaddr(pd->subport_uregbase)) {
addr = pd->subport_uregbase;
size = PAGE_SIZE * pd->port_subport_cnt;
} else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base)) {
addr = pd->subport_rcvhdr_base;
size = pd->port_rcvhdrq_size * pd->port_subport_cnt;
} else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf)) {
addr = pd->subport_rcvegrbuf;
size *= pd->port_subport_cnt;
} else if (pgaddr == cvt_kvaddr(pd->subport_uregbase +
PAGE_SIZE * subport)) {
addr = pd->subport_uregbase + PAGE_SIZE * subport;
size = PAGE_SIZE;
} else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base +
pd->port_rcvhdrq_size * subport)) {
addr = pd->subport_rcvhdr_base +
pd->port_rcvhdrq_size * subport;
size = pd->port_rcvhdrq_size;
} else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf +
size * subport)) {
addr = pd->subport_rcvegrbuf + size * subport;
/* rcvegrbufs are read-only on the slave */
if (vma->vm_flags & VM_WRITE) {
dev_info(&dd->pcidev->dev,
"Can't map eager buffers as "
"writable (flags=%lx)\n", vma->vm_flags);
ret = -EPERM;
goto bail;
}
/*
* Don't allow permission to later change to writeable
* with mprotect.
*/
vma->vm_flags &= ~VM_MAYWRITE;
} else {
ret = -EINVAL;
goto bail;
}
len = vma->vm_end - vma->vm_start;
@ -1205,7 +1212,7 @@ static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
vma->vm_ops = &ipath_file_vm_ops;
vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
ret = 0;
ret = 1;
bail:
return ret;
@ -1265,19 +1272,20 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
* Check for kernel virtual addresses first, anything else must
* match a HW or memory address.
*/
if (pgaddr >= (1ULL<<40)) {
ret = mmap_kvaddr(vma, pgaddr, pd, subport_fp(fp));
ret = mmap_kvaddr(vma, pgaddr, pd, subport_fp(fp));
if (ret) {
if (ret > 0)
ret = 0;
goto bail;
}
ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port;
if (!pd->port_subport_cnt) {
/* port is not shared */
ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port;
piocnt = dd->ipath_pbufsport;
piobufs = pd->port_piobufs;
} else if (!subport_fp(fp)) {
/* caller is the master */
ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port;
piocnt = (dd->ipath_pbufsport / pd->port_subport_cnt) +
(dd->ipath_pbufsport % pd->port_subport_cnt);
piobufs = pd->port_piobufs +
@ -1286,7 +1294,6 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
unsigned slave = subport_fp(fp) - 1;
/* caller is a slave */
ureg = 0;
piocnt = dd->ipath_pbufsport / pd->port_subport_cnt;
piobufs = pd->port_piobufs + dd->ipath_palign * piocnt * slave;
}
@ -1300,9 +1307,6 @@ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0,
(void *) dd->ipath_pioavailregs_dma,
"pioavail registers");
else if (subport_fp(fp))
/* Subports don't mmap the physical receive buffers */
ret = -EINVAL;
else if (pgaddr == pd->port_rcvegr_phys)
ret = mmap_rcvegrbufs(vma, pd);
else if (pgaddr == (u64) pd->port_rcvhdrq_phys)
@ -1400,32 +1404,41 @@ static int init_subports(struct ipath_devdata *dd,
const struct ipath_user_info *uinfo)
{
int ret = 0;
unsigned num_slaves;
unsigned num_subports;
size_t size;
/* Old user binaries don't know about subports */
if ((uinfo->spu_userversion & 0xffff) != IPATH_USER_SWMINOR)
goto bail;
/*
* If the user is requesting zero or one port,
* skip the subport allocation.
*/
if (uinfo->spu_subport_cnt <= 1)
goto bail;
if (uinfo->spu_subport_cnt > 4) {
/* Old user binaries don't know about new subport implementation */
if ((uinfo->spu_userversion & 0xffff) != IPATH_USER_SWMINOR) {
dev_info(&dd->pcidev->dev,
"Mismatched user minor version (%d) and driver "
"minor version (%d) while port sharing. Ensure "
"that driver and library are from the same "
"release.\n",
(int) (uinfo->spu_userversion & 0xffff),
IPATH_USER_SWMINOR);
goto bail;
}
if (uinfo->spu_subport_cnt > INFINIPATH_MAX_SUBPORT) {
ret = -EINVAL;
goto bail;
}
num_slaves = uinfo->spu_subport_cnt - 1;
pd->subport_uregbase = vmalloc(PAGE_SIZE * num_slaves);
num_subports = uinfo->spu_subport_cnt;
pd->subport_uregbase = vmalloc(PAGE_SIZE * num_subports);
if (!pd->subport_uregbase) {
ret = -ENOMEM;
goto bail;
}
/* Note: pd->port_rcvhdrq_size isn't initialized yet. */
size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
sizeof(u32), PAGE_SIZE) * num_slaves;
sizeof(u32), PAGE_SIZE) * num_subports;
pd->subport_rcvhdr_base = vmalloc(size);
if (!pd->subport_rcvhdr_base) {
ret = -ENOMEM;
@ -1434,7 +1447,7 @@ static int init_subports(struct ipath_devdata *dd,
pd->subport_rcvegrbuf = vmalloc(pd->port_rcvegrbuf_chunks *
pd->port_rcvegrbuf_size *
num_slaves);
num_subports);
if (!pd->subport_rcvegrbuf) {
ret = -ENOMEM;
goto bail_rhdr;
@ -1443,6 +1456,12 @@ static int init_subports(struct ipath_devdata *dd,
pd->port_subport_cnt = uinfo->spu_subport_cnt;
pd->port_subport_id = uinfo->spu_subport_id;
pd->active_slaves = 1;
set_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag);
memset(pd->subport_uregbase, 0, PAGE_SIZE * num_subports);
memset(pd->subport_rcvhdr_base, 0, size);
memset(pd->subport_rcvegrbuf, 0, pd->port_rcvegrbuf_chunks *
pd->port_rcvegrbuf_size *
num_subports);
goto bail;
bail_rhdr:
@ -1573,18 +1592,19 @@ static int find_best_unit(struct file *fp,
*/
if (!cpus_empty(current->cpus_allowed) &&
!cpus_full(current->cpus_allowed)) {
int ncpus = num_online_cpus(), curcpu = -1;
int ncpus = num_online_cpus(), curcpu = -1, nset = 0;
for (i = 0; i < ncpus; i++)
if (cpu_isset(i, current->cpus_allowed)) {
ipath_cdbg(PROC, "%s[%u] affinity set for "
"cpu %d\n", current->comm,
current->pid, i);
"cpu %d/%d\n", current->comm,
current->pid, i, ncpus);
curcpu = i;
nset++;
}
if (curcpu != -1) {
if (curcpu != -1 && nset != ncpus) {
if (npresent) {
prefunit = curcpu / (ncpus / npresent);
ipath_dbg("%s[%u] %d chips, %d cpus, "
ipath_cdbg(PROC,"%s[%u] %d chips, %d cpus, "
"%d cpus/chip, select unit %d\n",
current->comm, current->pid,
npresent, ncpus, ncpus / npresent,
@ -1764,11 +1784,17 @@ static int ipath_do_user_init(struct file *fp,
const struct ipath_user_info *uinfo)
{
int ret;
struct ipath_portdata *pd;
struct ipath_portdata *pd = port_fp(fp);
struct ipath_devdata *dd;
u32 head32;
pd = port_fp(fp);
/* Subports don't need to initialize anything since master did it. */
if (subport_fp(fp)) {
ret = wait_event_interruptible(pd->port_wait,
!test_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag));
goto done;
}
dd = pd->port_dd;
if (uinfo->spu_rcvhdrsize) {
@ -1826,6 +1852,11 @@ static int ipath_do_user_init(struct file *fp,
dd->ipath_rcvctrl & ~INFINIPATH_R_TAILUPD);
ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
dd->ipath_rcvctrl);
/* Notify any waiting slaves */
if (pd->port_subport_cnt) {
clear_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag);
wake_up(&pd->port_wait);
}
done:
return ret;
}
@ -2017,6 +2048,17 @@ static int ipath_get_slave_info(struct ipath_portdata *pd,
return ret;
}
static int ipath_force_pio_avail_update(struct ipath_devdata *dd)
{
u64 reg = dd->ipath_sendctrl;
clear_bit(IPATH_S_PIOBUFAVAILUPD, &reg);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, reg);
ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
return 0;
}
static ssize_t ipath_write(struct file *fp, const char __user *data,
size_t count, loff_t *off)
{
@ -2071,27 +2113,35 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
dest = &cmd.cmd.part_key;
src = &ucmd->cmd.part_key;
break;
case IPATH_CMD_SLAVE_INFO:
case __IPATH_CMD_SLAVE_INFO:
copy = sizeof(cmd.cmd.slave_mask_addr);
dest = &cmd.cmd.slave_mask_addr;
src = &ucmd->cmd.slave_mask_addr;
break;
case IPATH_CMD_PIOAVAILUPD: // force an update of PIOAvail reg
copy = 0;
src = NULL;
dest = NULL;
break;
default:
ret = -EINVAL;
goto bail;
}
if ((count - consumed) < copy) {
ret = -EINVAL;
goto bail;
if (copy) {
if ((count - consumed) < copy) {
ret = -EINVAL;
goto bail;
}
if (copy_from_user(dest, src, copy)) {
ret = -EFAULT;
goto bail;
}
consumed += copy;
}
if (copy_from_user(dest, src, copy)) {
ret = -EFAULT;
goto bail;
}
consumed += copy;
pd = port_fp(fp);
if (!pd && cmd.type != __IPATH_CMD_USER_INIT &&
cmd.type != IPATH_CMD_ASSIGN_PORT) {
@ -2137,11 +2187,14 @@ static ssize_t ipath_write(struct file *fp, const char __user *data,
case IPATH_CMD_SET_PART_KEY:
ret = ipath_set_part_key(pd, cmd.cmd.part_key);
break;
case IPATH_CMD_SLAVE_INFO:
case __IPATH_CMD_SLAVE_INFO:
ret = ipath_get_slave_info(pd,
(void __user *) (unsigned long)
cmd.cmd.slave_mask_addr);
break;
case IPATH_CMD_PIOAVAILUPD:
ret = ipath_force_pio_avail_update(pd->port_dd);
break;
}
if (ret >= 0)

View File

@ -43,6 +43,9 @@
#include "ipath_kernel.h"
#include "ipath_registers.h"
static void ipath_setup_ht_setextled(struct ipath_devdata *, u64, u64);
/*
* This lists the InfiniPath registers, in the actual chip layout.
* This structure should never be directly accessed.
@ -208,8 +211,8 @@ static const struct ipath_kregs ipath_ht_kregs = {
.kr_serdesstatus = IPATH_KREG_OFFSET(SerdesStatus),
.kr_xgxsconfig = IPATH_KREG_OFFSET(XGXSConfig),
/*
* These should not be used directly via ipath_read_kreg64(),
* use them with ipath_read_kreg64_port(),
* These should not be used directly via ipath_write_kreg64(),
* use them with ipath_write_kreg64_port(),
*/
.kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0),
.kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0)
@ -284,6 +287,14 @@ static const struct ipath_cregs ipath_ht_cregs = {
#define INFINIPATH_EXTS_MEMBIST_ENDTEST 0x0000000000004000
#define INFINIPATH_EXTS_MEMBIST_CORRECT 0x0000000000008000
/* TID entries (memory), HT-only */
#define INFINIPATH_RT_ADDR_MASK 0xFFFFFFFFFFULL /* 40 bits valid */
#define INFINIPATH_RT_VALID 0x8000000000000000ULL
#define INFINIPATH_RT_ADDR_SHIFT 0
#define INFINIPATH_RT_BUFSIZE_MASK 0x3FFFULL
#define INFINIPATH_RT_BUFSIZE_SHIFT 48
/*
* masks and bits that are different in different chips, or present only
* in one
@ -402,6 +413,14 @@ static const struct ipath_hwerror_msgs ipath_6110_hwerror_msgs[] = {
INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"),
};
#define TXE_PIO_PARITY ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | \
INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) \
<< INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)
#define RXE_EAGER_PARITY (INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID \
<< INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT)
static int ipath_ht_txe_recover(struct ipath_devdata *);
/**
* ipath_ht_handle_hwerrors - display hardware errors.
* @dd: the infinipath device
@ -450,13 +469,12 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg,
/*
* make sure we get this much out, unless told to be quiet,
* it's a parity error we may recover from,
* or it's occurred within the last 5 seconds
*/
if ((hwerrs & ~(dd->ipath_lasthwerror |
((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
<< INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT))) ||
(ipath_debug & __IPATH_VERBDBG))
if ((hwerrs & ~(dd->ipath_lasthwerror | TXE_PIO_PARITY |
RXE_EAGER_PARITY)) ||
(ipath_debug & __IPATH_VERBDBG))
dev_info(&dd->pcidev->dev, "Hardware error: hwerr=0x%llx "
"(cleared)\n", (unsigned long long) hwerrs);
dd->ipath_lasthwerror |= hwerrs;
@ -467,7 +485,7 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg,
(hwerrs & ~dd->ipath_hwe_bitsextant));
ctrl = ipath_read_kreg32(dd, dd->ipath_kregs->kr_control);
if (ctrl & INFINIPATH_C_FREEZEMODE) {
if ((ctrl & INFINIPATH_C_FREEZEMODE) && !ipath_diag_inuse) {
/*
* parity errors in send memory are recoverable,
* just cancel the send (if indicated in * sendbuffererror),
@ -476,50 +494,14 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg,
* occur if a processor speculative read is done to the PIO
* buffer while we are sending a packet, for example.
*/
if (hwerrs & ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
<< INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)) {
ipath_stats.sps_txeparity++;
ipath_dbg("Recovering from TXE parity error (%llu), "
"hwerrstatus=%llx\n",
(unsigned long long) ipath_stats.sps_txeparity,
(unsigned long long) hwerrs);
ipath_disarm_senderrbufs(dd);
hwerrs &= ~((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
<< INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT);
if (!hwerrs) { /* else leave in freeze mode */
ipath_write_kreg(dd,
dd->ipath_kregs->kr_control,
dd->ipath_control);
return;
}
}
if (hwerrs) {
/*
* if any set that we aren't ignoring; only
* make the complaint once, in case it's stuck
* or recurring, and we get here multiple
* times.
*/
if (dd->ipath_flags & IPATH_INITTED) {
ipath_dev_err(dd, "Fatal Hardware Error (freeze "
"mode), no longer usable, SN %.16s\n",
dd->ipath_serial);
isfatal = 1;
}
*dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
/* mark as having had error */
*dd->ipath_statusp |= IPATH_STATUS_HWERROR;
/*
* mark as not usable, at a minimum until driver
* is reloaded, probably until reboot, since no
* other reset is possible.
*/
dd->ipath_flags &= ~IPATH_INITTED;
} else {
ipath_dbg("Clearing freezemode on ignored hardware "
"error\n");
if ((hwerrs & TXE_PIO_PARITY) && ipath_ht_txe_recover(dd))
hwerrs &= ~TXE_PIO_PARITY;
if (hwerrs & RXE_EAGER_PARITY)
ipath_dev_err(dd, "RXE parity, Eager TID error is not "
"recoverable\n");
if (!hwerrs) {
ipath_dbg("Clearing freezemode on ignored or "
"recovered hardware error\n");
ctrl &= ~INFINIPATH_C_FREEZEMODE;
ipath_write_kreg(dd, dd->ipath_kregs->kr_control,
ctrl);
@ -587,7 +569,39 @@ static void ipath_ht_handle_hwerrors(struct ipath_devdata *dd, char *msg,
dd->ipath_hwerrmask);
}
ipath_dev_err(dd, "%s hardware error\n", msg);
if (hwerrs) {
/*
* if any set that we aren't ignoring; only
* make the complaint once, in case it's stuck
* or recurring, and we get here multiple
* times.
* force link down, so switch knows, and
* LEDs are turned off
*/
if (dd->ipath_flags & IPATH_INITTED) {
ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
ipath_setup_ht_setextled(dd,
INFINIPATH_IBCS_L_STATE_DOWN,
INFINIPATH_IBCS_LT_STATE_DISABLED);
ipath_dev_err(dd, "Fatal Hardware Error (freeze "
"mode), no longer usable, SN %.16s\n",
dd->ipath_serial);
isfatal = 1;
}
*dd->ipath_statusp &= ~IPATH_STATUS_IB_READY;
/* mark as having had error */
*dd->ipath_statusp |= IPATH_STATUS_HWERROR;
/*
* mark as not usable, at a minimum until driver
* is reloaded, probably until reboot, since no
* other reset is possible.
*/
dd->ipath_flags &= ~IPATH_INITTED;
}
else
*msg = 0; /* recovered from all of them */
if (*msg)
ipath_dev_err(dd, "%s hardware error\n", msg);
if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg)
/*
* for status file; if no trailing brace is copied,
@ -658,7 +672,8 @@ static int ipath_ht_boardname(struct ipath_devdata *dd, char *name,
if (n)
snprintf(name, namelen, "%s", n);
if (dd->ipath_majrev != 3 || (dd->ipath_minrev < 2 || dd->ipath_minrev > 3)) {
if (dd->ipath_majrev != 3 || (dd->ipath_minrev < 2 ||
dd->ipath_minrev > 3)) {
/*
* This version of the driver only supports Rev 3.2 and 3.3
*/
@ -1163,6 +1178,8 @@ static void ipath_ht_init_hwerrors(struct ipath_devdata *dd)
if (!(extsval & INFINIPATH_EXTS_MEMBIST_ENDTEST))
ipath_dev_err(dd, "MemBIST did not complete!\n");
if (extsval & INFINIPATH_EXTS_MEMBIST_CORRECT)
ipath_dbg("MemBIST corrected\n");
ipath_check_htlink(dd);
@ -1366,6 +1383,9 @@ static void ipath_ht_put_tid(struct ipath_devdata *dd,
u64 __iomem *tidptr, u32 type,
unsigned long pa)
{
if (!dd->ipath_kregbase)
return;
if (pa != dd->ipath_tidinvalid) {
if (unlikely((pa & ~INFINIPATH_RT_ADDR_MASK))) {
dev_info(&dd->pcidev->dev,
@ -1382,10 +1402,10 @@ static void ipath_ht_put_tid(struct ipath_devdata *dd,
pa |= lenvalid | INFINIPATH_RT_VALID;
}
}
if (dd->ipath_kregbase)
writeq(pa, tidptr);
writeq(pa, tidptr);
}
/**
* ipath_ht_clear_tid - clear all TID entries for a port, expected and eager
* @dd: the infinipath device
@ -1515,7 +1535,7 @@ static int ipath_ht_early_init(struct ipath_devdata *dd)
INFINIPATH_S_ABORT);
ipath_get_eeprom_info(dd);
if(dd->ipath_boardrev == 5 && dd->ipath_serial[0] == '1' &&
if (dd->ipath_boardrev == 5 && dd->ipath_serial[0] == '1' &&
dd->ipath_serial[1] == '2' && dd->ipath_serial[2] == '8') {
/*
* Later production QHT7040 has same changes as QHT7140, so
@ -1528,6 +1548,24 @@ static int ipath_ht_early_init(struct ipath_devdata *dd)
return 0;
}
static int ipath_ht_txe_recover(struct ipath_devdata *dd)
{
int cnt = ++ipath_stats.sps_txeparity;
if (cnt >= IPATH_MAX_PARITY_ATTEMPTS) {
if (cnt == IPATH_MAX_PARITY_ATTEMPTS)
ipath_dev_err(dd,
"Too many attempts to recover from "
"TXE parity, giving up\n");
return 0;
}
dev_info(&dd->pcidev->dev,
"Recovering from TXE PIO parity error\n");
ipath_disarm_senderrbufs(dd, 1);
return 1;
}
/**
* ipath_init_ht_get_base_info - set chip-specific flags for user code
* @dd: the infinipath device

View File

@ -43,6 +43,8 @@
#include "ipath_kernel.h"
#include "ipath_registers.h"
static void ipath_setup_pe_setextled(struct ipath_devdata *, u64, u64);
/*
* This file contains all the chip-specific register information and
* access functions for the QLogic InfiniPath PCI-Express chip.
@ -207,8 +209,8 @@ static const struct ipath_kregs ipath_pe_kregs = {
.kr_ibpllcfg = IPATH_KREG_OFFSET(IBPLLCfg),
/*
* These should not be used directly via ipath_read_kreg64(),
* use them with ipath_read_kreg64_port()
* These should not be used directly via ipath_write_kreg64(),
* use them with ipath_write_kreg64_port(),
*/
.kr_rcvhdraddr = IPATH_KREG_OFFSET(RcvHdrAddr0),
.kr_rcvhdrtailaddr = IPATH_KREG_OFFSET(RcvHdrTailAddr0),
@ -321,6 +323,12 @@ static const struct ipath_hwerror_msgs ipath_6120_hwerror_msgs[] = {
INFINIPATH_HWE_MSG(SERDESPLLFAILED, "SerDes PLL"),
};
#define TXE_PIO_PARITY ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF | \
INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC) \
<< INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)
static int ipath_pe_txe_recover(struct ipath_devdata *);
/**
* ipath_pe_handle_hwerrors - display hardware errors.
* @dd: the infinipath device
@ -394,32 +402,21 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
* occur if a processor speculative read is done to the PIO
* buffer while we are sending a packet, for example.
*/
if (hwerrs & ((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
<< INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT)) {
ipath_stats.sps_txeparity++;
ipath_dbg("Recovering from TXE parity error (%llu), "
"hwerrstatus=%llx\n",
(unsigned long long) ipath_stats.sps_txeparity,
(unsigned long long) hwerrs);
ipath_disarm_senderrbufs(dd);
hwerrs &= ~((INFINIPATH_HWE_TXEMEMPARITYERR_PIOBUF |
INFINIPATH_HWE_TXEMEMPARITYERR_PIOPBC)
<< INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT);
if (!hwerrs) { /* else leave in freeze mode */
ipath_write_kreg(dd,
dd->ipath_kregs->kr_control,
dd->ipath_control);
return;
}
}
if ((hwerrs & TXE_PIO_PARITY) && ipath_pe_txe_recover(dd))
hwerrs &= ~TXE_PIO_PARITY;
if (hwerrs) {
/*
* if any set that we aren't ignoring only make the
* complaint once, in case it's stuck or recurring,
* and we get here multiple times
* Force link down, so switch knows, and
* LEDs are turned off
*/
if (dd->ipath_flags & IPATH_INITTED) {
ipath_set_linkstate(dd, IPATH_IB_LINKDOWN);
ipath_setup_pe_setextled(dd,
INFINIPATH_IBCS_L_STATE_DOWN,
INFINIPATH_IBCS_LT_STATE_DISABLED);
ipath_dev_err(dd, "Fatal Hardware Error (freeze "
"mode), no longer usable, SN %.16s\n",
dd->ipath_serial);
@ -493,7 +490,8 @@ static void ipath_pe_handle_hwerrors(struct ipath_devdata *dd, char *msg,
dd->ipath_hwerrmask);
}
ipath_dev_err(dd, "%s hardware error\n", msg);
if (*msg)
ipath_dev_err(dd, "%s hardware error\n", msg);
if (isfatal && !ipath_diag_inuse && dd->ipath_freezemsg) {
/*
* for /sys status file ; if no trailing } is copied, we'll
@ -581,6 +579,8 @@ static void ipath_pe_init_hwerrors(struct ipath_devdata *dd)
if (!(extsval & INFINIPATH_EXTS_MEMBIST_ENDTEST))
ipath_dev_err(dd, "MemBIST did not complete!\n");
if (extsval & INFINIPATH_EXTS_MEMBIST_FOUND)
ipath_dbg("MemBIST corrected\n");
val = ~0ULL; /* barring bugs, all hwerrors become interrupts, */
@ -1330,6 +1330,35 @@ static void ipath_pe_free_irq(struct ipath_devdata *dd)
dd->ipath_irq = 0;
}
/*
* On platforms using this chip, and not having ordered WC stores, we
* can get TXE parity errors due to speculative reads to the PIO buffers,
* and this, due to a chip bug can result in (many) false parity error
* reports. So it's a debug print on those, and an info print on systems
* where the speculative reads don't occur.
* Because we can get lots of false errors, we have no upper limit
* on recovery attempts on those platforms.
*/
static int ipath_pe_txe_recover(struct ipath_devdata *dd)
{
if (ipath_unordered_wc())
ipath_dbg("Recovering from TXE PIO parity error\n");
else {
int cnt = ++ipath_stats.sps_txeparity;
if (cnt >= IPATH_MAX_PARITY_ATTEMPTS) {
if (cnt == IPATH_MAX_PARITY_ATTEMPTS)
ipath_dev_err(dd,
"Too many attempts to recover from "
"TXE parity, giving up\n");
return 0;
}
dev_info(&dd->pcidev->dev,
"Recovering from TXE PIO parity error\n");
}
ipath_disarm_senderrbufs(dd, 1);
return 1;
}
/**
* ipath_init_iba6120_funcs - set up the chip-specific function pointers
* @dd: the infinipath device

View File

@ -216,6 +216,20 @@ static int bringup_link(struct ipath_devdata *dd)
return ret;
}
static struct ipath_portdata *create_portdata0(struct ipath_devdata *dd)
{
struct ipath_portdata *pd = NULL;
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
if (pd) {
pd->port_dd = dd;
pd->port_cnt = 1;
/* The port 0 pkey table is used by the layer interface. */
pd->port_pkeys[0] = IPATH_DEFAULT_P_KEY;
}
return pd;
}
static int init_chip_first(struct ipath_devdata *dd,
struct ipath_portdata **pdp)
{
@ -271,20 +285,16 @@ static int init_chip_first(struct ipath_devdata *dd,
goto done;
}
dd->ipath_pd[0] = kzalloc(sizeof(*pd), GFP_KERNEL);
pd = create_portdata0(dd);
if (!dd->ipath_pd[0]) {
if (!pd) {
ipath_dev_err(dd, "Unable to allocate portdata for port "
"0, failing\n");
ret = -ENOMEM;
goto done;
}
pd = dd->ipath_pd[0];
pd->port_dd = dd;
pd->port_port = 0;
pd->port_cnt = 1;
/* The port 0 pkey table is used by the layer interface. */
pd->port_pkeys[0] = IPATH_DEFAULT_P_KEY;
dd->ipath_pd[0] = pd;
dd->ipath_rcvtidcnt =
ipath_read_kreg32(dd, dd->ipath_kregs->kr_rcvtidcnt);
dd->ipath_rcvtidbase =
@ -590,6 +600,10 @@ static int init_housekeeping(struct ipath_devdata *dd,
goto done;
}
/* clear diagctrl register, in case diags were running and crashed */
ipath_write_kreg (dd, dd->ipath_kregs->kr_hwdiagctrl, 0);
/* clear the initial reset flag, in case first driver load */
ipath_write_kreg(dd, dd->ipath_kregs->kr_errorclear,
INFINIPATH_E_RESET);
@ -668,6 +682,7 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
{
int ret = 0, i;
u32 val32, kpiobufs;
u32 piobufs, uports;
u64 val;
struct ipath_portdata *pd = NULL; /* keep gcc4 happy */
gfp_t gfp_flags = GFP_USER | __GFP_COMP;
@ -702,16 +717,17 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
* the in memory DMA'ed copies of the registers. This has to
* be done early, before we calculate lastport, etc.
*/
val = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
piobufs = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k;
/*
* calc number of pioavail registers, and save it; we have 2
* bits per buffer.
*/
dd->ipath_pioavregs = ALIGN(val, sizeof(u64) * BITS_PER_BYTE / 2)
dd->ipath_pioavregs = ALIGN(piobufs, sizeof(u64) * BITS_PER_BYTE / 2)
/ (sizeof(u64) * BITS_PER_BYTE / 2);
uports = dd->ipath_cfgports ? dd->ipath_cfgports - 1 : 0;
if (ipath_kpiobufs == 0) {
/* not set by user (this is default) */
if ((dd->ipath_piobcnt2k + dd->ipath_piobcnt4k) > 128)
if (piobufs >= (uports * IPATH_MIN_USER_PORT_BUFCNT) + 32)
kpiobufs = 32;
else
kpiobufs = 16;
@ -719,31 +735,25 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
else
kpiobufs = ipath_kpiobufs;
if (kpiobufs >
(dd->ipath_piobcnt2k + dd->ipath_piobcnt4k -
(dd->ipath_cfgports * IPATH_MIN_USER_PORT_BUFCNT))) {
i = dd->ipath_piobcnt2k + dd->ipath_piobcnt4k -
(dd->ipath_cfgports * IPATH_MIN_USER_PORT_BUFCNT);
if (kpiobufs + (uports * IPATH_MIN_USER_PORT_BUFCNT) > piobufs) {
i = (int) piobufs -
(int) (uports * IPATH_MIN_USER_PORT_BUFCNT);
if (i < 0)
i = 0;
dev_info(&dd->pcidev->dev, "Allocating %d PIO bufs for "
"kernel leaves too few for %d user ports "
dev_info(&dd->pcidev->dev, "Allocating %d PIO bufs of "
"%d for kernel leaves too few for %d user ports "
"(%d each); using %u\n", kpiobufs,
dd->ipath_cfgports - 1,
IPATH_MIN_USER_PORT_BUFCNT, i);
piobufs, uports, IPATH_MIN_USER_PORT_BUFCNT, i);
/*
* shouldn't change ipath_kpiobufs, because could be
* different for different devices...
*/
kpiobufs = i;
}
dd->ipath_lastport_piobuf =
dd->ipath_piobcnt2k + dd->ipath_piobcnt4k - kpiobufs;
dd->ipath_pbufsport = dd->ipath_cfgports > 1
? dd->ipath_lastport_piobuf / (dd->ipath_cfgports - 1)
: 0;
val32 = dd->ipath_lastport_piobuf -
(dd->ipath_pbufsport * (dd->ipath_cfgports - 1));
dd->ipath_lastport_piobuf = piobufs - kpiobufs;
dd->ipath_pbufsport =
uports ? dd->ipath_lastport_piobuf / uports : 0;
val32 = dd->ipath_lastport_piobuf - (dd->ipath_pbufsport * uports);
if (val32 > 0) {
ipath_dbg("allocating %u pbufs/port leaves %u unused, "
"add to kernel\n", dd->ipath_pbufsport, val32);
@ -754,8 +764,7 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
dd->ipath_lastpioindex = dd->ipath_lastport_piobuf;
ipath_cdbg(VERBOSE, "%d PIO bufs for kernel out of %d total %u "
"each for %u user ports\n", kpiobufs,
dd->ipath_piobcnt2k + dd->ipath_piobcnt4k,
dd->ipath_pbufsport, dd->ipath_cfgports - 1);
piobufs, dd->ipath_pbufsport, uports);
dd->ipath_f_early_init(dd);
@ -839,11 +848,24 @@ int ipath_init_chip(struct ipath_devdata *dd, int reinit)
* Set up the port 0 (kernel) rcvhdr q and egr TIDs. If doing
* re-init, the simplest way to handle this is to free
* existing, and re-allocate.
* Need to re-create rest of port 0 portdata as well.
*/
if (reinit) {
struct ipath_portdata *pd = dd->ipath_pd[0];
dd->ipath_pd[0] = NULL;
ipath_free_pddata(dd, pd);
/* Alloc and init new ipath_portdata for port0,
* Then free old pd. Could lead to fragmentation, but also
* makes later support for hot-swap easier.
*/
struct ipath_portdata *npd;
npd = create_portdata0(dd);
if (npd) {
ipath_free_pddata(dd, pd);
dd->ipath_pd[0] = pd = npd;
} else {
ipath_dev_err(dd, "Unable to allocate portdata for"
" port 0, failing\n");
ret = -ENOMEM;
goto done;
}
}
dd->ipath_f_tidtemplate(dd);
ret = ipath_create_rcvhdrq(dd, pd);

View File

@ -37,11 +37,40 @@
#include "ipath_verbs.h"
#include "ipath_common.h"
/*
* clear (write) a pio buffer, to clear a parity error. This routine
* should only be called when in freeze mode, and the buffer should be
* canceled afterwards.
*/
static void ipath_clrpiobuf(struct ipath_devdata *dd, u32 pnum)
{
u32 __iomem *pbuf;
u32 dwcnt; /* dword count to write */
if (pnum < dd->ipath_piobcnt2k) {
pbuf = (u32 __iomem *) (dd->ipath_pio2kbase + pnum *
dd->ipath_palign);
dwcnt = dd->ipath_piosize2k >> 2;
}
else {
pbuf = (u32 __iomem *) (dd->ipath_pio4kbase +
(pnum - dd->ipath_piobcnt2k) * dd->ipath_4kalign);
dwcnt = dd->ipath_piosize4k >> 2;
}
dev_info(&dd->pcidev->dev,
"Rewrite PIO buffer %u, to recover from parity error\n",
pnum);
*pbuf = dwcnt+1; /* no flush required, since already in freeze */
while(--dwcnt)
*pbuf++ = 0;
}
/*
* Called when we might have an error that is specific to a particular
* PIO buffer, and may need to cancel that buffer, so it can be re-used.
* If rewrite is true, and bits are set in the sendbufferror registers,
* we'll write to the buffer, for error recovery on parity errors.
*/
void ipath_disarm_senderrbufs(struct ipath_devdata *dd)
void ipath_disarm_senderrbufs(struct ipath_devdata *dd, int rewrite)
{
u32 piobcnt;
unsigned long sbuf[4];
@ -74,8 +103,11 @@ void ipath_disarm_senderrbufs(struct ipath_devdata *dd)
}
for (i = 0; i < piobcnt; i++)
if (test_bit(i, sbuf))
if (test_bit(i, sbuf)) {
if (rewrite)
ipath_clrpiobuf(dd, i);
ipath_disarm_piobufs(dd, i, 1);
}
dd->ipath_lastcancel = jiffies+3; /* no armlaunch for a bit */
}
}
@ -114,7 +146,7 @@ static u64 handle_e_sum_errs(struct ipath_devdata *dd, ipath_err_t errs)
{
u64 ignore_this_time = 0;
ipath_disarm_senderrbufs(dd);
ipath_disarm_senderrbufs(dd, 0);
if ((errs & E_SUM_LINK_PKTERRS) &&
!(dd->ipath_flags & IPATH_LINKACTIVE)) {
/*
@ -403,10 +435,13 @@ static void handle_supp_msgs(struct ipath_devdata *dd,
* happens so often we never want to count it.
*/
if (dd->ipath_lasterror & ~INFINIPATH_E_IBSTATUSCHANGED) {
ipath_decode_err(msg, sizeof msg, dd->ipath_lasterror &
~INFINIPATH_E_IBSTATUSCHANGED);
int iserr;
iserr = ipath_decode_err(msg, sizeof msg,
dd->ipath_lasterror &
~INFINIPATH_E_IBSTATUSCHANGED);
if (dd->ipath_lasterror &
~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL))
~(INFINIPATH_E_RRCVEGRFULL |
INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS))
ipath_dev_err(dd, "Suppressed %u messages for "
"fast-repeating errors (%s) (%llx)\n",
supp_msgs, msg,
@ -420,8 +455,13 @@ static void handle_supp_msgs(struct ipath_devdata *dd,
* them. So only complain about these at debug
* level.
*/
ipath_dbg("Suppressed %u messages for %s\n",
supp_msgs, msg);
if (iserr)
ipath_dbg("Suppressed %u messages for %s\n",
supp_msgs, msg);
else
ipath_cdbg(ERRPKT,
"Suppressed %u messages for %s\n",
supp_msgs, msg);
}
}
}
@ -462,7 +502,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
{
char msg[512];
u64 ignore_this_time = 0;
int i;
int i, iserr = 0;
int chkerrpkts = 0, noprint = 0;
unsigned supp_msgs;
@ -502,6 +542,7 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
}
if (supp_msgs == 250000) {
int s_iserr;
/*
* It's not entirely reasonable assuming that the errors set
* in the last clear period are all responsible for the
@ -511,17 +552,17 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
dd->ipath_maskederrs |= dd->ipath_lasterror | errs;
ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,
~dd->ipath_maskederrs);
ipath_decode_err(msg, sizeof msg,
s_iserr = ipath_decode_err(msg, sizeof msg,
(dd->ipath_maskederrs & ~dd->
ipath_ignorederrs));
if ((dd->ipath_maskederrs & ~dd->ipath_ignorederrs) &
~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL))
ipath_dev_err(dd, "Disabling error(s) %llx because "
"occurring too frequently (%s)\n",
(unsigned long long)
(dd->ipath_maskederrs &
~dd->ipath_ignorederrs), msg);
~(INFINIPATH_E_RRCVEGRFULL |
INFINIPATH_E_RRCVHDRFULL | INFINIPATH_E_PKTERRS))
ipath_dev_err(dd, "Temporarily disabling "
"error(s) %llx reporting; too frequent (%s)\n",
(unsigned long long) (dd->ipath_maskederrs &
~dd->ipath_ignorederrs), msg);
else {
/*
* rcvegrfull and rcvhdrqfull are "normal",
@ -530,8 +571,15 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
* processing them. So only complain about
* these at debug level.
*/
ipath_dbg("Disabling frequent queue full errors "
"(%s)\n", msg);
if (s_iserr)
ipath_dbg("Temporarily disabling reporting "
"too frequent queue full errors (%s)\n",
msg);
else
ipath_cdbg(ERRPKT,
"Temporarily disabling reporting too"
" frequent packet errors (%s)\n",
msg);
}
/*
@ -589,6 +637,8 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
ipath_stats.sps_crcerrs++;
chkerrpkts = 1;
}
iserr = errs & ~(E_SUM_PKTERRS | INFINIPATH_E_PKTERRS);
/*
* We don't want to print these two as they happen, or we can make
@ -677,8 +727,13 @@ static int handle_errors(struct ipath_devdata *dd, ipath_err_t errs)
*dd->ipath_statusp &= ~IPATH_STATUS_IB_CONF;
}
if (!noprint && *msg)
ipath_dev_err(dd, "%s error\n", msg);
if (!noprint && *msg) {
if (iserr)
ipath_dev_err(dd, "%s error\n", msg);
else
dev_info(&dd->pcidev->dev, "%s packet problems\n",
msg);
}
if (dd->ipath_state_wanted & dd->ipath_flags) {
ipath_cdbg(VERBOSE, "driver wanted state %x, iflags now %x, "
"waking\n", dd->ipath_state_wanted,
@ -819,11 +874,10 @@ static void handle_urcv(struct ipath_devdata *dd, u32 istat)
struct ipath_portdata *pd = dd->ipath_pd[i];
if (portr & (1 << i) && pd && pd->port_cnt &&
test_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag)) {
int rcbit;
clear_bit(IPATH_PORT_WAITING_RCV,
&pd->port_flag);
rcbit = i + INFINIPATH_R_INTRAVAIL_SHIFT;
clear_bit(1UL << rcbit, &dd->ipath_rcvctrl);
clear_bit(i + INFINIPATH_R_INTRAVAIL_SHIFT,
&dd->ipath_rcvctrl);
wake_up_interruptible(&pd->port_wait);
rcvdint = 1;
}

View File

@ -590,7 +590,6 @@ int ipath_enable_wc(struct ipath_devdata *dd);
void ipath_disable_wc(struct ipath_devdata *dd);
int ipath_count_units(int *npresentp, int *nupp, u32 *maxportsp);
void ipath_shutdown_device(struct ipath_devdata *);
void ipath_disarm_senderrbufs(struct ipath_devdata *);
struct file_operations;
int ipath_cdev_init(int minor, char *name, const struct file_operations *fops,
@ -611,7 +610,7 @@ struct sk_buff *ipath_alloc_skb(struct ipath_devdata *dd, gfp_t);
extern int ipath_diag_inuse;
irqreturn_t ipath_intr(int irq, void *devid);
void ipath_decode_err(char *buf, size_t blen, ipath_err_t err);
int ipath_decode_err(char *buf, size_t blen, ipath_err_t err);
#if __IPATH_INFO || __IPATH_DBG
extern const char *ipath_ibcstatus_str[];
#endif
@ -701,6 +700,8 @@ int ipath_set_rx_pol_inv(struct ipath_devdata *dd, u8 new_pol_inv);
#define IPATH_PORT_WAITING_RCV 2
/* waiting for a PIO buffer to be available */
#define IPATH_PORT_WAITING_PIO 3
/* master has not finished initializing */
#define IPATH_PORT_MASTER_UNINIT 4
/* free up any allocated data at closes */
void ipath_free_data(struct ipath_portdata *dd);
@ -711,6 +712,7 @@ void ipath_init_iba6120_funcs(struct ipath_devdata *);
void ipath_init_iba6110_funcs(struct ipath_devdata *);
void ipath_get_eeprom_info(struct ipath_devdata *);
u64 ipath_snap_cntr(struct ipath_devdata *, ipath_creg);
void ipath_disarm_senderrbufs(struct ipath_devdata *, int);
/*
* number of words used for protocol header if not set by ipath_userinit();
@ -754,8 +756,6 @@ int ipath_eeprom_write(struct ipath_devdata *, u8, const void *, int);
/* these are used for the registers that vary with port */
void ipath_write_kreg_port(const struct ipath_devdata *, ipath_kreg,
unsigned, u64);
u64 ipath_read_kreg64_port(const struct ipath_devdata *, ipath_kreg,
unsigned);
/*
* We could have a single register get/put routine, that takes a group type,
@ -897,6 +897,8 @@ dma_addr_t ipath_map_single(struct pci_dev *, void *, size_t, int);
extern unsigned ipath_debug; /* debugging bit mask */
#define IPATH_MAX_PARITY_ATTEMPTS 10000 /* max times to try recovery */
const char *ipath_get_unit_name(int unit);
extern struct mutex ipath_mutex;

View File

@ -61,7 +61,7 @@ int ipath_alloc_lkey(struct ipath_lkey_table *rkt, struct ipath_mregion *mr)
r = (r + 1) & (rkt->max - 1);
if (r == n) {
spin_unlock_irqrestore(&rkt->lock, flags);
ipath_dbg(KERN_INFO "LKEY table full\n");
ipath_dbg("LKEY table full\n");
ret = 0;
goto bail;
}
@ -133,6 +133,12 @@ int ipath_lkey_ok(struct ipath_qp *qp, struct ipath_sge *isge,
* being reversible by calling bus_to_virt().
*/
if (sge->lkey == 0) {
struct ipath_pd *pd = to_ipd(qp->ibqp.pd);
if (pd->user) {
ret = 0;
goto bail;
}
isge->mr = NULL;
isge->vaddr = (void *) sge->addr;
isge->length = sge->length;
@ -206,6 +212,12 @@ int ipath_rkey_ok(struct ipath_qp *qp, struct ipath_sge_state *ss,
* (see ipath_get_dma_mr and ipath_dma.c).
*/
if (rkey == 0) {
struct ipath_pd *pd = to_ipd(qp->ibqp.pd);
if (pd->user) {
ret = 0;
goto bail;
}
sge->mr = NULL;
sge->vaddr = (void *) vaddr;
sge->length = len;

View File

@ -210,9 +210,15 @@ struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
m = 0;
n = 0;
list_for_each_entry(chunk, &region->chunk_list, list) {
for (i = 0; i < chunk->nmap; i++) {
mr->mr.map[m]->segs[n].vaddr =
page_address(chunk->page_list[i].page);
for (i = 0; i < chunk->nents; i++) {
void *vaddr;
vaddr = page_address(chunk->page_list[i].page);
if (!vaddr) {
ret = ERR_PTR(-EINVAL);
goto bail;
}
mr->mr.map[m]->segs[n].vaddr = vaddr;
mr->mr.map[m]->segs[n].length = region->page_size;
n++;
if (n == IPATH_SEGSZ) {

View File

@ -81,11 +81,51 @@ static u32 credit_table[31] = {
32768 /* 1E */
};
static u32 alloc_qpn(struct ipath_qp_table *qpt)
static void get_map_page(struct ipath_qp_table *qpt, struct qpn_map *map)
{
unsigned long page = get_zeroed_page(GFP_KERNEL);
unsigned long flags;
/*
* Free the page if someone raced with us installing it.
*/
spin_lock_irqsave(&qpt->lock, flags);
if (map->page)
free_page(page);
else
map->page = (void *)page;
spin_unlock_irqrestore(&qpt->lock, flags);
}
static int alloc_qpn(struct ipath_qp_table *qpt, enum ib_qp_type type)
{
u32 i, offset, max_scan, qpn;
struct qpn_map *map;
u32 ret;
u32 ret = -1;
if (type == IB_QPT_SMI)
ret = 0;
else if (type == IB_QPT_GSI)
ret = 1;
if (ret != -1) {
map = &qpt->map[0];
if (unlikely(!map->page)) {
get_map_page(qpt, map);
if (unlikely(!map->page)) {
ret = -ENOMEM;
goto bail;
}
}
if (!test_and_set_bit(ret, map->page))
atomic_dec(&map->n_free);
else
ret = -EBUSY;
goto bail;
}
qpn = qpt->last + 1;
if (qpn >= QPN_MAX)
@ -95,19 +135,7 @@ static u32 alloc_qpn(struct ipath_qp_table *qpt)
max_scan = qpt->nmaps - !offset;
for (i = 0;;) {
if (unlikely(!map->page)) {
unsigned long page = get_zeroed_page(GFP_KERNEL);
unsigned long flags;
/*
* Free the page if someone raced with us
* installing it:
*/
spin_lock_irqsave(&qpt->lock, flags);
if (map->page)
free_page(page);
else
map->page = (void *)page;
spin_unlock_irqrestore(&qpt->lock, flags);
get_map_page(qpt, map);
if (unlikely(!map->page))
break;
}
@ -151,7 +179,7 @@ static u32 alloc_qpn(struct ipath_qp_table *qpt)
qpn = mk_qpn(qpt, map, offset);
}
ret = 0;
ret = -ENOMEM;
bail:
return ret;
@ -180,29 +208,19 @@ static int ipath_alloc_qpn(struct ipath_qp_table *qpt, struct ipath_qp *qp,
enum ib_qp_type type)
{
unsigned long flags;
u32 qpn;
int ret;
if (type == IB_QPT_SMI)
qpn = 0;
else if (type == IB_QPT_GSI)
qpn = 1;
else {
/* Allocate the next available QPN */
qpn = alloc_qpn(qpt);
if (qpn == 0) {
ret = -ENOMEM;
goto bail;
}
}
qp->ibqp.qp_num = qpn;
ret = alloc_qpn(qpt, type);
if (ret < 0)
goto bail;
qp->ibqp.qp_num = ret;
/* Add the QP to the hash table. */
spin_lock_irqsave(&qpt->lock, flags);
qpn %= qpt->max;
qp->next = qpt->table[qpn];
qpt->table[qpn] = qp;
ret %= qpt->max;
qp->next = qpt->table[ret];
qpt->table[ret] = qp;
atomic_inc(&qp->refcount);
spin_unlock_irqrestore(&qpt->lock, flags);
@ -245,9 +263,7 @@ static void ipath_free_qp(struct ipath_qp_table *qpt, struct ipath_qp *qp)
if (!fnd)
return;
/* If QPN is not reserved, mark QPN free in the bitmap. */
if (qp->ibqp.qp_num > 1)
free_qpn(qpt, qp->ibqp.qp_num);
free_qpn(qpt, qp->ibqp.qp_num);
wait_event(qp->wait, !atomic_read(&qp->refcount));
}
@ -270,11 +286,10 @@ void ipath_free_all_qps(struct ipath_qp_table *qpt)
while (qp) {
nqp = qp->next;
if (qp->ibqp.qp_num > 1)
free_qpn(qpt, qp->ibqp.qp_num);
free_qpn(qpt, qp->ibqp.qp_num);
if (!atomic_dec_and_test(&qp->refcount) ||
!ipath_destroy_qp(&qp->ibqp))
ipath_dbg(KERN_INFO "QP memory leak!\n");
ipath_dbg("QP memory leak!\n");
qp = nqp;
}
}
@ -320,7 +335,8 @@ static void ipath_reset_qp(struct ipath_qp *qp)
qp->remote_qpn = 0;
qp->qkey = 0;
qp->qp_access_flags = 0;
clear_bit(IPATH_S_BUSY, &qp->s_flags);
qp->s_busy = 0;
qp->s_flags &= ~IPATH_S_SIGNAL_REQ_WR;
qp->s_hdrwords = 0;
qp->s_psn = 0;
qp->r_psn = 0;
@ -333,7 +349,6 @@ static void ipath_reset_qp(struct ipath_qp *qp)
qp->r_state = IB_OPCODE_UC_SEND_LAST;
}
qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
qp->r_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
qp->r_nak_state = 0;
qp->r_wrid_valid = 0;
qp->s_rnr_timeout = 0;
@ -344,6 +359,10 @@ static void ipath_reset_qp(struct ipath_qp *qp)
qp->s_ssn = 1;
qp->s_lsn = 0;
qp->s_wait_credit = 0;
memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
qp->r_head_ack_queue = 0;
qp->s_tail_ack_queue = 0;
qp->s_num_rd_atomic = 0;
if (qp->r_rq.wq) {
qp->r_rq.wq->head = 0;
qp->r_rq.wq->tail = 0;
@ -357,7 +376,7 @@ static void ipath_reset_qp(struct ipath_qp *qp)
* @err: the receive completion error to signal if a RWQE is active
*
* Flushes both send and receive work queues.
* QP s_lock should be held and interrupts disabled.
* The QP s_lock should be held and interrupts disabled.
*/
void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
@ -365,7 +384,7 @@ void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
struct ib_wc wc;
ipath_dbg(KERN_INFO "QP%d/%d in error state\n",
ipath_dbg("QP%d/%d in error state\n",
qp->ibqp.qp_num, qp->remote_qpn);
spin_lock(&dev->pending_lock);
@ -389,6 +408,8 @@ void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
wc.port_num = 0;
if (qp->r_wrid_valid) {
qp->r_wrid_valid = 0;
wc.wr_id = qp->r_wr_id;
wc.opcode = IB_WC_RECV;
wc.status = err;
ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
}
@ -503,13 +524,17 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
attr->path_mig_state != IB_MIG_REARM)
goto inval;
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
if (attr->max_dest_rd_atomic > IPATH_MAX_RDMA_ATOMIC)
goto inval;
switch (new_state) {
case IB_QPS_RESET:
ipath_reset_qp(qp);
break;
case IB_QPS_ERR:
ipath_error_qp(qp, IB_WC_GENERAL_ERR);
ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
break;
default:
@ -559,6 +584,12 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (attr_mask & IB_QP_QKEY)
qp->qkey = attr->qkey;
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
qp->s_max_rd_atomic = attr->max_rd_atomic;
qp->state = new_state;
spin_unlock_irqrestore(&qp->s_lock, flags);
@ -598,8 +629,8 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
attr->alt_pkey_index = 0;
attr->en_sqd_async_notify = 0;
attr->sq_draining = 0;
attr->max_rd_atomic = 1;
attr->max_dest_rd_atomic = 1;
attr->max_rd_atomic = qp->s_max_rd_atomic;
attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
attr->min_rnr_timer = qp->r_min_rnr_timer;
attr->port_num = 1;
attr->timeout = qp->timeout;
@ -614,7 +645,7 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
init_attr->recv_cq = qp->ibqp.recv_cq;
init_attr->srq = qp->ibqp.srq;
init_attr->cap = attr->cap;
if (qp->s_flags & (1 << IPATH_S_SIGNAL_REQ_WR))
if (qp->s_flags & IPATH_S_SIGNAL_REQ_WR)
init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
else
init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
@ -786,7 +817,7 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
qp->s_size = init_attr->cap.max_send_wr + 1;
qp->s_max_sge = init_attr->cap.max_send_sge;
if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
qp->s_flags = 1 << IPATH_S_SIGNAL_REQ_WR;
qp->s_flags = IPATH_S_SIGNAL_REQ_WR;
else
qp->s_flags = 0;
dev = to_idev(ibpd->device);
@ -958,7 +989,7 @@ bail:
* @wc: the WC responsible for putting the QP in this state
*
* Flushes the send work queue.
* The QP s_lock should be held.
* The QP s_lock should be held and interrupts disabled.
*/
void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
@ -966,7 +997,7 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
ipath_dbg(KERN_INFO "Send queue error on QP%d/%d: err: %d\n",
ipath_dbg("Send queue error on QP%d/%d: err: %d\n",
qp->ibqp.qp_num, qp->remote_qpn, wc->status);
spin_lock(&dev->pending_lock);
@ -984,12 +1015,12 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
wc->status = IB_WC_WR_FLUSH_ERR;
while (qp->s_last != qp->s_head) {
wqe = get_swqe_ptr(qp, qp->s_last);
wc->wr_id = wqe->wr.wr_id;
wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1);
if (++qp->s_last >= qp->s_size)
qp->s_last = 0;
wqe = get_swqe_ptr(qp, qp->s_last);
}
qp->s_cur = qp->s_tail = qp->s_head;
qp->state = IB_QPS_SQE;

File diff suppressed because it is too large Load Diff

View File

@ -126,9 +126,18 @@
#define INFINIPATH_E_RESET 0x0004000000000000ULL
#define INFINIPATH_E_HARDWARE 0x0008000000000000ULL
/*
* this is used to print "common" packet errors only when the
* __IPATH_ERRPKTDBG bit is set in ipath_debug.
*/
#define INFINIPATH_E_PKTERRS ( INFINIPATH_E_SPKTLEN \
| INFINIPATH_E_SDROPPEDDATAPKT | INFINIPATH_E_RVCRC \
| INFINIPATH_E_RICRC | INFINIPATH_E_RSHORTPKTLEN \
| INFINIPATH_E_REBP )
/* kr_hwerrclear, kr_hwerrmask, kr_hwerrstatus, bits */
/* TXEMEMPARITYERR bit 0: PIObuf, 1: PIOpbc, 2: launchfifo
* RXEMEMPARITYERR bit 0: rcvbuf, 1: lookupq, 2: eagerTID, 3: expTID
* RXEMEMPARITYERR bit 0: rcvbuf, 1: lookupq, 2: expTID, 3: eagerTID
* bit 4: flag buffer, 5: datainfo, 6: header info */
#define INFINIPATH_HWE_TXEMEMPARITYERR_MASK 0xFULL
#define INFINIPATH_HWE_TXEMEMPARITYERR_SHIFT 40
@ -143,8 +152,8 @@
/* rxe mem parity errors (shift by INFINIPATH_HWE_RXEMEMPARITYERR_SHIFT) */
#define INFINIPATH_HWE_RXEMEMPARITYERR_RCVBUF 0x01ULL
#define INFINIPATH_HWE_RXEMEMPARITYERR_LOOKUPQ 0x02ULL
#define INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID 0x04ULL
#define INFINIPATH_HWE_RXEMEMPARITYERR_EXPTID 0x08ULL
#define INFINIPATH_HWE_RXEMEMPARITYERR_EXPTID 0x04ULL
#define INFINIPATH_HWE_RXEMEMPARITYERR_EAGERTID 0x08ULL
#define INFINIPATH_HWE_RXEMEMPARITYERR_FLAGBUF 0x10ULL
#define INFINIPATH_HWE_RXEMEMPARITYERR_DATAINFO 0x20ULL
#define INFINIPATH_HWE_RXEMEMPARITYERR_HDRINFO 0x40ULL
@ -299,13 +308,6 @@
#define INFINIPATH_XGXS_RX_POL_SHIFT 19
#define INFINIPATH_XGXS_RX_POL_MASK 0xfULL
#define INFINIPATH_RT_ADDR_MASK 0xFFFFFFFFFFULL /* 40 bits valid */
/* TID entries (memory), HT-only */
#define INFINIPATH_RT_VALID 0x8000000000000000ULL
#define INFINIPATH_RT_ADDR_SHIFT 0
#define INFINIPATH_RT_BUFSIZE_MASK 0x3FFF
#define INFINIPATH_RT_BUFSIZE_SHIFT 48
/*
* IPATH_PIO_MAXIBHDR is the max IB header size allowed for in our

View File

@ -202,6 +202,7 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
wq->tail = tail;
ret = 1;
qp->r_wrid_valid = 1;
if (handler) {
u32 n;
@ -229,7 +230,6 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
}
}
spin_unlock_irqrestore(&rq->lock, flags);
qp->r_wrid_valid = 1;
bail:
return ret;
@ -255,6 +255,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
unsigned long flags;
struct ib_wc wc;
u64 sdata;
atomic64_t *maddr;
qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn);
if (!qp) {
@ -265,7 +266,8 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
again:
spin_lock_irqsave(&sqp->s_lock, flags);
if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_SEND_OK)) {
if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_SEND_OK) ||
qp->s_rnr_timeout) {
spin_unlock_irqrestore(&sqp->s_lock, flags);
goto done;
}
@ -310,7 +312,7 @@ again:
sqp->s_rnr_retry--;
dev->n_rnr_naks++;
sqp->s_rnr_timeout =
ib_ipath_rnr_table[sqp->r_min_rnr_timer];
ib_ipath_rnr_table[qp->r_min_rnr_timer];
ipath_insert_rnr_queue(sqp);
goto done;
}
@ -343,20 +345,22 @@ again:
wc.sl = sqp->remote_ah_attr.sl;
wc.dlid_path_bits = 0;
wc.port_num = 0;
spin_lock_irqsave(&sqp->s_lock, flags);
ipath_sqerror_qp(sqp, &wc);
spin_unlock_irqrestore(&sqp->s_lock, flags);
goto done;
}
break;
case IB_WR_RDMA_READ:
if (unlikely(!(qp->qp_access_flags &
IB_ACCESS_REMOTE_READ)))
goto acc_err;
if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length,
wqe->wr.wr.rdma.remote_addr,
wqe->wr.wr.rdma.rkey,
IB_ACCESS_REMOTE_READ)))
goto acc_err;
if (unlikely(!(qp->qp_access_flags &
IB_ACCESS_REMOTE_READ)))
goto acc_err;
qp->r_sge.sge = wqe->sg_list[0];
qp->r_sge.sg_list = wqe->sg_list + 1;
qp->r_sge.num_sge = wqe->wr.num_sge;
@ -364,22 +368,22 @@ again:
case IB_WR_ATOMIC_CMP_AND_SWP:
case IB_WR_ATOMIC_FETCH_AND_ADD:
if (unlikely(!(qp->qp_access_flags &
IB_ACCESS_REMOTE_ATOMIC)))
goto acc_err;
if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64),
wqe->wr.wr.rdma.remote_addr,
wqe->wr.wr.rdma.rkey,
wqe->wr.wr.atomic.remote_addr,
wqe->wr.wr.atomic.rkey,
IB_ACCESS_REMOTE_ATOMIC)))
goto acc_err;
/* Perform atomic OP and save result. */
sdata = wqe->wr.wr.atomic.swap;
spin_lock_irqsave(&dev->pending_lock, flags);
qp->r_atomic_data = *(u64 *) qp->r_sge.sge.vaddr;
if (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
*(u64 *) qp->r_sge.sge.vaddr =
qp->r_atomic_data + sdata;
else if (qp->r_atomic_data == wqe->wr.wr.atomic.compare_add)
*(u64 *) qp->r_sge.sge.vaddr = sdata;
spin_unlock_irqrestore(&dev->pending_lock, flags);
*(u64 *) sqp->s_sge.sge.vaddr = qp->r_atomic_data;
maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
sdata = wqe->wr.wr.atomic.compare_add;
*(u64 *) sqp->s_sge.sge.vaddr =
(wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
(u64) atomic64_add_return(sdata, maddr) - sdata :
(u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
sdata, wqe->wr.wr.atomic.swap);
goto send_comp;
default:
@ -440,7 +444,7 @@ again:
send_comp:
sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &sqp->s_flags) ||
if (!(sqp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
(wqe->wr.send_flags & IB_SEND_SIGNALED)) {
wc.wr_id = wqe->wr.wr_id;
wc.status = IB_WC_SUCCESS;
@ -502,7 +506,7 @@ void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev)
* We clear the tasklet flag now since we are committing to return
* from the tasklet function.
*/
clear_bit(IPATH_S_BUSY, &qp->s_flags);
clear_bit(IPATH_S_BUSY, &qp->s_busy);
tasklet_unlock(&qp->s_task);
want_buffer(dev->dd);
dev->n_piowait++;
@ -541,6 +545,9 @@ int ipath_post_ruc_send(struct ipath_qp *qp, struct ib_send_wr *wr)
wr->sg_list[0].addr & (sizeof(u64) - 1))) {
ret = -EINVAL;
goto bail;
} else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) {
ret = -EINVAL;
goto bail;
}
/* IB spec says that num_sge == 0 is OK. */
if (wr->num_sge > qp->s_max_sge) {
@ -647,7 +654,7 @@ void ipath_do_ruc_send(unsigned long data)
u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
struct ipath_other_headers *ohdr;
if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags))
if (test_and_set_bit(IPATH_S_BUSY, &qp->s_busy))
goto bail;
if (unlikely(qp->remote_ah_attr.dlid == dev->dd->ipath_lid)) {
@ -683,19 +690,15 @@ again:
*/
spin_lock_irqsave(&qp->s_lock, flags);
/* Sending responses has higher priority over sending requests. */
if (qp->s_ack_state != IB_OPCODE_RC_ACKNOWLEDGE &&
(bth0 = ipath_make_rc_ack(qp, ohdr, pmtu)) != 0)
bth2 = qp->s_ack_psn++ & IPATH_PSN_MASK;
else if (!((qp->ibqp.qp_type == IB_QPT_RC) ?
ipath_make_rc_req(qp, ohdr, pmtu, &bth0, &bth2) :
ipath_make_uc_req(qp, ohdr, pmtu, &bth0, &bth2))) {
if (!((qp->ibqp.qp_type == IB_QPT_RC) ?
ipath_make_rc_req(qp, ohdr, pmtu, &bth0, &bth2) :
ipath_make_uc_req(qp, ohdr, pmtu, &bth0, &bth2))) {
/*
* Clear the busy bit before unlocking to avoid races with
* adding new work queue items and then failing to process
* them.
*/
clear_bit(IPATH_S_BUSY, &qp->s_flags);
clear_bit(IPATH_S_BUSY, &qp->s_busy);
spin_unlock_irqrestore(&qp->s_lock, flags);
goto bail;
}
@ -728,7 +731,7 @@ again:
goto again;
clear:
clear_bit(IPATH_S_BUSY, &qp->s_flags);
clear_bit(IPATH_S_BUSY, &qp->s_busy);
bail:
return;
}

View File

@ -207,7 +207,7 @@ void ipath_get_faststats(unsigned long opaque)
* don't access the chip while running diags, or memory diags can
* fail
*/
if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_PRESENT) ||
if (!dd->ipath_kregbase || !(dd->ipath_flags & IPATH_INITTED) ||
ipath_diag_inuse)
/* but re-arm the timer, for diags case; won't hurt other */
goto done;
@ -237,11 +237,13 @@ void ipath_get_faststats(unsigned long opaque)
if ((dd->ipath_maskederrs & ~dd->ipath_ignorederrs)
&& time_after(jiffies, dd->ipath_unmasktime)) {
char ebuf[256];
ipath_decode_err(ebuf, sizeof ebuf,
int iserr;
iserr = ipath_decode_err(ebuf, sizeof ebuf,
(dd->ipath_maskederrs & ~dd->
ipath_ignorederrs));
if ((dd->ipath_maskederrs & ~dd->ipath_ignorederrs) &
~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL))
~(INFINIPATH_E_RRCVEGRFULL | INFINIPATH_E_RRCVHDRFULL |
INFINIPATH_E_PKTERRS ))
ipath_dev_err(dd, "Re-enabling masked errors "
"(%s)\n", ebuf);
else {
@ -252,8 +254,12 @@ void ipath_get_faststats(unsigned long opaque)
* them. So only complain about these at debug
* level.
*/
ipath_dbg("Disabling frequent queue full errors "
"(%s)\n", ebuf);
if (iserr)
ipath_dbg("Re-enabling queue full errors (%s)\n",
ebuf);
else
ipath_cdbg(ERRPKT, "Re-enabling packet"
" problem interrupt (%s)\n", ebuf);
}
dd->ipath_maskederrs = dd->ipath_ignorederrs;
ipath_write_kreg(dd, dd->ipath_kregs->kr_errormask,

View File

@ -42,7 +42,7 @@ static void complete_last_send(struct ipath_qp *qp, struct ipath_swqe *wqe,
{
if (++qp->s_last == qp->s_size)
qp->s_last = 0;
if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &qp->s_flags) ||
if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
(wqe->wr.send_flags & IB_SEND_SIGNALED)) {
wc->wr_id = wqe->wr.wr_id;
wc->status = IB_WC_SUCCESS;
@ -344,13 +344,13 @@ void ipath_uc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
send_first:
if (qp->r_reuse_sge) {
qp->r_reuse_sge = 0;
qp->r_sge = qp->s_rdma_sge;
qp->r_sge = qp->s_rdma_read_sge;
} else if (!ipath_get_rwqe(qp, 0)) {
dev->n_pkt_drops++;
goto done;
}
/* Save the WQE so we can reuse it in case of an error. */
qp->s_rdma_sge = qp->r_sge;
qp->s_rdma_read_sge = qp->r_sge;
qp->r_rcv_len = 0;
if (opcode == OP(SEND_ONLY))
goto send_last;

View File

@ -308,6 +308,11 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr)
goto bail;
}
if (wr->wr.ud.ah->pd != qp->ibqp.pd) {
ret = -EPERM;
goto bail;
}
/* IB spec says that num_sge == 0 is OK. */
if (wr->num_sge > qp->s_max_sge) {
ret = -EINVAL;
@ -467,7 +472,7 @@ int ipath_post_ud_send(struct ipath_qp *qp, struct ib_send_wr *wr)
done:
/* Queue the completion status entry. */
if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &qp->s_flags) ||
if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
(wr->send_flags & IB_SEND_SIGNALED)) {
wc.wr_id = wr->wr_id;
wc.status = IB_WC_SUCCESS;
@ -647,6 +652,7 @@ void ipath_ud_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
ipath_skip_sge(&qp->r_sge, sizeof(struct ib_grh));
ipath_copy_sge(&qp->r_sge, data,
wc.byte_len - sizeof(struct ib_grh));
qp->r_wrid_valid = 0;
wc.wr_id = qp->r_wr_id;
wc.status = IB_WC_SUCCESS;
wc.opcode = IB_WC_RECV;

View File

@ -438,6 +438,10 @@ void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data,
struct ipath_mcast *mcast;
struct ipath_mcast_qp *p;
if (lnh != IPATH_LRH_GRH) {
dev->n_pkt_drops++;
goto bail;
}
mcast = ipath_mcast_find(&hdr->u.l.grh.dgid);
if (mcast == NULL) {
dev->n_pkt_drops++;
@ -445,8 +449,7 @@ void ipath_ib_rcv(struct ipath_ibdev *dev, void *rhdr, void *data,
}
dev->n_multicast_rcv++;
list_for_each_entry_rcu(p, &mcast->qp_list, list)
ipath_qp_rcv(dev, hdr, lnh == IPATH_LRH_GRH, data,
tlen, p->qp);
ipath_qp_rcv(dev, hdr, 1, data, tlen, p->qp);
/*
* Notify ipath_multicast_detach() if it is waiting for us
* to finish.
@ -773,7 +776,6 @@ int ipath_verbs_send(struct ipath_devdata *dd, u32 hdrwords,
/* +1 is for the qword padding of pbc */
plen = hdrwords + ((len + 3) >> 2) + 1;
if (unlikely((plen << 2) > dd->ipath_ibmaxlen)) {
ipath_dbg("packet len 0x%x too long, failing\n", plen);
ret = -EINVAL;
goto bail;
}
@ -980,14 +982,14 @@ static int ipath_query_device(struct ib_device *ibdev,
props->max_cqe = ib_ipath_max_cqes;
props->max_mr = dev->lk_table.max;
props->max_pd = ib_ipath_max_pds;
props->max_qp_rd_atom = 1;
props->max_qp_init_rd_atom = 1;
props->max_qp_rd_atom = IPATH_MAX_RDMA_ATOMIC;
props->max_qp_init_rd_atom = 255;
/* props->max_res_rd_atom */
props->max_srq = ib_ipath_max_srqs;
props->max_srq_wr = ib_ipath_max_srq_wrs;
props->max_srq_sge = ib_ipath_max_srq_sges;
/* props->local_ca_ack_delay */
props->atomic_cap = IB_ATOMIC_HCA;
props->atomic_cap = IB_ATOMIC_GLOB;
props->max_pkeys = ipath_get_npkeys(dev->dd);
props->max_mcast_grp = ib_ipath_max_mcast_grps;
props->max_mcast_qp_attach = ib_ipath_max_mcast_qp_attached;
@ -1557,7 +1559,6 @@ int ipath_register_ib_device(struct ipath_devdata *dd)
dev->node_type = RDMA_NODE_IB_CA;
dev->phys_port_cnt = 1;
dev->dma_device = &dd->pcidev->dev;
dev->class_dev.dev = dev->dma_device;
dev->query_device = ipath_query_device;
dev->modify_device = ipath_modify_device;
dev->query_port = ipath_query_port;

View File

@ -40,9 +40,12 @@
#include <linux/interrupt.h>
#include <linux/kref.h>
#include <rdma/ib_pack.h>
#include <rdma/ib_user_verbs.h>
#include "ipath_layer.h"
#define IPATH_MAX_RDMA_ATOMIC 4
#define QPN_MAX (1 << 24)
#define QPNMAP_ENTRIES (QPN_MAX / PAGE_SIZE / BITS_PER_BYTE)
@ -89,7 +92,7 @@ struct ib_reth {
} __attribute__ ((packed));
struct ib_atomic_eth {
__be64 vaddr;
__be32 vaddr[2]; /* unaligned so access as 2 32-bit words */
__be32 rkey;
__be64 swap_data;
__be64 compare_data;
@ -108,7 +111,7 @@ struct ipath_other_headers {
} rc;
struct {
__be32 aeth;
__be64 atomic_ack_eth;
__be32 atomic_ack_eth[2];
} at;
__be32 imm_data;
__be32 aeth;
@ -186,7 +189,7 @@ struct ipath_mmap_info {
struct ipath_cq_wc {
u32 head; /* index of next entry to fill */
u32 tail; /* index of next ib_poll_cq() entry */
struct ib_wc queue[1]; /* this is actually size ibcq.cqe + 1 */
struct ib_uverbs_wc queue[1]; /* this is actually size ibcq.cqe + 1 */
};
/*
@ -311,6 +314,19 @@ struct ipath_sge_state {
u8 num_sge;
};
/*
* This structure holds the information that the send tasklet needs
* to send a RDMA read response or atomic operation.
*/
struct ipath_ack_entry {
u8 opcode;
u32 psn;
union {
struct ipath_sge_state rdma_sge;
u64 atomic_data;
};
};
/*
* Variables prefixed with s_ are for the requester (sender).
* Variables prefixed with r_ are for the responder (receiver).
@ -333,24 +349,24 @@ struct ipath_qp {
struct ipath_mmap_info *ip;
struct ipath_sge_state *s_cur_sge;
struct ipath_sge_state s_sge; /* current send request data */
/* current RDMA read send data */
struct ipath_sge_state s_rdma_sge;
struct ipath_ack_entry s_ack_queue[IPATH_MAX_RDMA_ATOMIC + 1];
struct ipath_sge_state s_ack_rdma_sge;
struct ipath_sge_state s_rdma_read_sge;
struct ipath_sge_state r_sge; /* current receive data */
spinlock_t s_lock;
unsigned long s_flags;
unsigned long s_busy;
u32 s_hdrwords; /* size of s_hdr in 32 bit words */
u32 s_cur_size; /* size of send packet in bytes */
u32 s_len; /* total length of s_sge */
u32 s_rdma_len; /* total length of s_rdma_sge */
u32 s_rdma_read_len; /* total length of s_rdma_read_sge */
u32 s_next_psn; /* PSN for next request */
u32 s_last_psn; /* last response PSN processed */
u32 s_psn; /* current packet sequence number */
u32 s_ack_psn; /* PSN for RDMA_READ */
u32 s_ack_rdma_psn; /* PSN for sending RDMA read responses */
u32 s_ack_psn; /* PSN for acking sends and RDMA writes */
u32 s_rnr_timeout; /* number of milliseconds for RNR timeout */
u32 r_ack_psn; /* PSN for next ACK or atomic ACK */
u64 r_wr_id; /* ID for current receive WQE */
u64 r_atomic_data; /* data for last atomic op */
u32 r_atomic_psn; /* PSN of last atomic op */
u32 r_len; /* total length of r_sge */
u32 r_rcv_len; /* receive data len processed */
u32 r_psn; /* expected rcv packet sequence number */
@ -360,12 +376,13 @@ struct ipath_qp {
u8 s_ack_state; /* opcode of packet to ACK */
u8 s_nak_state; /* non-zero if NAK is pending */
u8 r_state; /* opcode of last packet received */
u8 r_ack_state; /* opcode of packet to ACK */
u8 r_nak_state; /* non-zero if NAK is pending */
u8 r_min_rnr_timer; /* retry timeout value for RNR NAKs */
u8 r_reuse_sge; /* for UC receive errors */
u8 r_sge_inx; /* current index into sg_list */
u8 r_wrid_valid; /* r_wrid set but CQ entry not yet made */
u8 r_max_rd_atomic; /* max number of RDMA read/atomic to receive */
u8 r_head_ack_queue; /* index into s_ack_queue[] */
u8 qp_access_flags;
u8 s_max_sge; /* size of s_wq->sg_list */
u8 s_retry_cnt; /* number of times to retry */
@ -374,6 +391,10 @@ struct ipath_qp {
u8 s_rnr_retry; /* requester RNR retry counter */
u8 s_wait_credit; /* limit number of unacked packets sent */
u8 s_pkey_index; /* PKEY index to use */
u8 s_max_rd_atomic; /* max number of RDMA read/atomic to send */
u8 s_num_rd_atomic; /* number of RDMA read/atomic pending */
u8 s_tail_ack_queue; /* index into s_ack_queue[] */
u8 s_flags;
u8 timeout; /* Timeout for this QP */
enum ib_mtu path_mtu;
u32 remote_qpn;
@ -390,11 +411,16 @@ struct ipath_qp {
struct ipath_sge r_sg_list[0]; /* verified SGEs */
};
/* Bit definition for s_busy. */
#define IPATH_S_BUSY 0
/*
* Bit definitions for s_flags.
*/
#define IPATH_S_BUSY 0
#define IPATH_S_SIGNAL_REQ_WR 1
#define IPATH_S_SIGNAL_REQ_WR 0x01
#define IPATH_S_FENCE_PENDING 0x02
#define IPATH_S_RDMAR_PENDING 0x04
#define IPATH_S_ACK_PENDING 0x08
#define IPATH_PSN_CREDIT 2048
@ -706,8 +732,6 @@ int ipath_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr);
int ipath_destroy_srq(struct ib_srq *ibsrq);
void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int sig);
int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry);
struct ib_cq *ipath_create_cq(struct ib_device *ibdev, int entries,
@ -757,9 +781,6 @@ u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr,
void ipath_do_ruc_send(unsigned long data);
u32 ipath_make_rc_ack(struct ipath_qp *qp, struct ipath_other_headers *ohdr,
u32 pmtu);
int ipath_make_rc_req(struct ipath_qp *qp, struct ipath_other_headers *ohdr,
u32 pmtu, u32 *bth0p, u32 *bth2p);

View File

@ -1013,14 +1013,14 @@ static struct {
u64 latest_fw;
u32 flags;
} mthca_hca_table[] = {
[TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 4, 0),
[TAVOR] = { .latest_fw = MTHCA_FW_VER(3, 5, 0),
.flags = 0 },
[ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 7, 600),
[ARBEL_COMPAT] = { .latest_fw = MTHCA_FW_VER(4, 8, 200),
.flags = MTHCA_FLAG_PCIE },
[ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 1, 400),
[ARBEL_NATIVE] = { .latest_fw = MTHCA_FW_VER(5, 2, 0),
.flags = MTHCA_FLAG_MEMFREE |
MTHCA_FLAG_PCIE },
[SINAI] = { .latest_fw = MTHCA_FW_VER(1, 1, 0),
[SINAI] = { .latest_fw = MTHCA_FW_VER(1, 2, 0),
.flags = MTHCA_FLAG_MEMFREE |
MTHCA_FLAG_PCIE |
MTHCA_FLAG_SINAI_OPT }
@ -1135,7 +1135,7 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
goto err_cmd;
if (mdev->fw_ver < mthca_hca_table[hca_type].latest_fw) {
mthca_warn(mdev, "HCA FW version %d.%d.%d is old (%d.%d.%d is current).\n",
mthca_warn(mdev, "HCA FW version %d.%d.%3d is old (%d.%d.%3d is current).\n",
(int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff,
(int) (mdev->fw_ver & 0xffff),
(int) (mthca_hca_table[hca_type].latest_fw >> 32),

View File

@ -297,7 +297,8 @@ out:
int mthca_write_mtt_size(struct mthca_dev *dev)
{
if (dev->mr_table.fmr_mtt_buddy != &dev->mr_table.mtt_buddy)
if (dev->mr_table.fmr_mtt_buddy != &dev->mr_table.mtt_buddy ||
!(dev->mthca_flags & MTHCA_FLAG_FMR))
/*
* Be friendly to WRITE_MTT command
* and leave two empty slots for the
@ -355,7 +356,8 @@ int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
int size = mthca_write_mtt_size(dev);
int chunk;
if (dev->mr_table.fmr_mtt_buddy != &dev->mr_table.mtt_buddy)
if (dev->mr_table.fmr_mtt_buddy != &dev->mr_table.mtt_buddy ||
!(dev->mthca_flags & MTHCA_FLAG_FMR))
return __mthca_write_mtt(dev, mtt, start_index, buffer_list, list_len);
while (list_len > 0) {

View File

@ -1293,7 +1293,6 @@ int mthca_register_device(struct mthca_dev *dev)
dev->ib_dev.node_type = RDMA_NODE_IB_CA;
dev->ib_dev.phys_port_cnt = dev->limits.num_ports;
dev->ib_dev.dma_device = &dev->pdev->dev;
dev->ib_dev.class_dev.dev = &dev->pdev->dev;
dev->ib_dev.query_device = mthca_query_device;
dev->ib_dev.query_port = mthca_query_port;
dev->ib_dev.modify_device = mthca_modify_device;

View File

@ -1419,11 +1419,10 @@ void mthca_free_qp(struct mthca_dev *dev,
* unref the mem-free tables and free the QPN in our table.
*/
if (!qp->ibqp.uobject) {
mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn,
mthca_cq_clean(dev, recv_cq, qp->qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
if (send_cq != recv_cq)
mthca_cq_clean(dev, send_cq, qp->qpn, NULL);
mthca_free_memfree(dev, qp);
mthca_free_wqe_buf(dev, qp);

View File

@ -228,7 +228,6 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
struct net_device *dev = cm_id->context;
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_cm_rx *p;
unsigned long flags;
unsigned psn;
int ret;
@ -257,9 +256,9 @@ static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
cm_id->context = p;
p->jiffies = jiffies;
spin_lock_irqsave(&priv->lock, flags);
spin_lock_irq(&priv->lock);
list_add(&p->list, &priv->cm.passive_ids);
spin_unlock_irqrestore(&priv->lock, flags);
spin_unlock_irq(&priv->lock);
queue_delayed_work(ipoib_workqueue,
&priv->cm.stale_task, IPOIB_CM_RX_DELAY);
return 0;
@ -277,7 +276,6 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
{
struct ipoib_cm_rx *p;
struct ipoib_dev_priv *priv;
unsigned long flags;
int ret;
switch (event->event) {
@ -290,14 +288,14 @@ static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
case IB_CM_REJ_RECEIVED:
p = cm_id->context;
priv = netdev_priv(p->dev);
spin_lock_irqsave(&priv->lock, flags);
spin_lock_irq(&priv->lock);
if (list_empty(&p->list))
ret = 0; /* Connection is going away already. */
else {
list_del_init(&p->list);
ret = -ECONNRESET;
}
spin_unlock_irqrestore(&priv->lock, flags);
spin_unlock_irq(&priv->lock);
if (ret) {
ib_destroy_qp(p->qp);
kfree(p);
@ -351,8 +349,8 @@ void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
u64 mapping[IPOIB_CM_RX_SG];
int frags;
ipoib_dbg_data(priv, "cm recv completion: id %d, op %d, status: %d\n",
wr_id, wc->opcode, wc->status);
ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n",
wr_id, wc->status);
if (unlikely(wr_id >= ipoib_recvq_size)) {
ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
@ -504,8 +502,8 @@ static void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ipoib_cm_tx *tx
struct ipoib_tx_buf *tx_req;
unsigned long flags;
ipoib_dbg_data(priv, "cm send completion: id %d, op %d, status: %d\n",
wr_id, wc->opcode, wc->status);
ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
wr_id, wc->status);
if (unlikely(wr_id >= ipoib_sendq_size)) {
ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n",
@ -612,23 +610,22 @@ void ipoib_cm_dev_stop(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_cm_rx *p;
unsigned long flags;
if (!IPOIB_CM_SUPPORTED(dev->dev_addr))
return;
ib_destroy_cm_id(priv->cm.id);
spin_lock_irqsave(&priv->lock, flags);
spin_lock_irq(&priv->lock);
while (!list_empty(&priv->cm.passive_ids)) {
p = list_entry(priv->cm.passive_ids.next, typeof(*p), list);
list_del_init(&p->list);
spin_unlock_irqrestore(&priv->lock, flags);
spin_unlock_irq(&priv->lock);
ib_destroy_cm_id(p->id);
ib_destroy_qp(p->qp);
kfree(p);
spin_lock_irqsave(&priv->lock, flags);
spin_lock_irq(&priv->lock);
}
spin_unlock_irqrestore(&priv->lock, flags);
spin_unlock_irq(&priv->lock);
cancel_delayed_work(&priv->cm.stale_task);
}
@ -642,7 +639,6 @@ static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
struct ib_qp_attr qp_attr;
int qp_attr_mask, ret;
struct sk_buff *skb;
unsigned long flags;
p->mtu = be32_to_cpu(data->mtu);
@ -680,12 +676,12 @@ static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *even
skb_queue_head_init(&skqueue);
spin_lock_irqsave(&priv->lock, flags);
spin_lock_irq(&priv->lock);
set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
if (p->neigh)
while ((skb = __skb_dequeue(&p->neigh->queue)))
__skb_queue_tail(&skqueue, skb);
spin_unlock_irqrestore(&priv->lock, flags);
spin_unlock_irq(&priv->lock);
while ((skb = __skb_dequeue(&skqueue))) {
skb->dev = p->dev;
@ -895,7 +891,6 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
struct net_device *dev = priv->dev;
struct ipoib_neigh *neigh;
unsigned long flags;
int ret;
switch (event->event) {
@ -914,7 +909,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
case IB_CM_REJ_RECEIVED:
case IB_CM_TIMEWAIT_EXIT:
ipoib_dbg(priv, "CM error %d.\n", event->event);
spin_lock_irqsave(&priv->tx_lock, flags);
spin_lock_irq(&priv->tx_lock);
spin_lock(&priv->lock);
neigh = tx->neigh;
@ -934,7 +929,7 @@ static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
}
spin_unlock(&priv->lock);
spin_unlock_irqrestore(&priv->tx_lock, flags);
spin_unlock_irq(&priv->tx_lock);
break;
default:
break;
@ -1023,21 +1018,20 @@ static void ipoib_cm_tx_reap(struct work_struct *work)
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
cm.reap_task);
struct ipoib_cm_tx *p;
unsigned long flags;
spin_lock_irqsave(&priv->tx_lock, flags);
spin_lock_irq(&priv->tx_lock);
spin_lock(&priv->lock);
while (!list_empty(&priv->cm.reap_list)) {
p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
list_del(&p->list);
spin_unlock(&priv->lock);
spin_unlock_irqrestore(&priv->tx_lock, flags);
spin_unlock_irq(&priv->tx_lock);
ipoib_cm_tx_destroy(p);
spin_lock_irqsave(&priv->tx_lock, flags);
spin_lock_irq(&priv->tx_lock);
spin_lock(&priv->lock);
}
spin_unlock(&priv->lock);
spin_unlock_irqrestore(&priv->tx_lock, flags);
spin_unlock_irq(&priv->tx_lock);
}
static void ipoib_cm_skb_reap(struct work_struct *work)
@ -1046,15 +1040,14 @@ static void ipoib_cm_skb_reap(struct work_struct *work)
cm.skb_task);
struct net_device *dev = priv->dev;
struct sk_buff *skb;
unsigned long flags;
unsigned mtu = priv->mcast_mtu;
spin_lock_irqsave(&priv->tx_lock, flags);
spin_lock_irq(&priv->tx_lock);
spin_lock(&priv->lock);
while ((skb = skb_dequeue(&priv->cm.skb_queue))) {
spin_unlock(&priv->lock);
spin_unlock_irqrestore(&priv->tx_lock, flags);
spin_unlock_irq(&priv->tx_lock);
if (skb->protocol == htons(ETH_P_IP))
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@ -1062,11 +1055,11 @@ static void ipoib_cm_skb_reap(struct work_struct *work)
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu, dev);
#endif
dev_kfree_skb_any(skb);
spin_lock_irqsave(&priv->tx_lock, flags);
spin_lock_irq(&priv->tx_lock);
spin_lock(&priv->lock);
}
spin_unlock(&priv->lock);
spin_unlock_irqrestore(&priv->tx_lock, flags);
spin_unlock_irq(&priv->tx_lock);
}
void ipoib_cm_skb_too_long(struct net_device* dev, struct sk_buff *skb,
@ -1088,9 +1081,8 @@ static void ipoib_cm_stale_task(struct work_struct *work)
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
cm.stale_task.work);
struct ipoib_cm_rx *p;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
spin_lock_irq(&priv->lock);
while (!list_empty(&priv->cm.passive_ids)) {
/* List if sorted by LRU, start from tail,
* stop when we see a recently used entry */
@ -1098,13 +1090,13 @@ static void ipoib_cm_stale_task(struct work_struct *work)
if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT))
break;
list_del_init(&p->list);
spin_unlock_irqrestore(&priv->lock, flags);
spin_unlock_irq(&priv->lock);
ib_destroy_cm_id(p->id);
ib_destroy_qp(p->qp);
kfree(p);
spin_lock_irqsave(&priv->lock, flags);
spin_lock_irq(&priv->lock);
}
spin_unlock_irqrestore(&priv->lock, flags);
spin_unlock_irq(&priv->lock);
}

View File

@ -172,8 +172,8 @@ static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
struct sk_buff *skb;
u64 addr;
ipoib_dbg_data(priv, "recv completion: id %d, op %d, status: %d\n",
wr_id, wc->opcode, wc->status);
ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n",
wr_id, wc->status);
if (unlikely(wr_id >= ipoib_recvq_size)) {
ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n",
@ -245,8 +245,8 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
struct ipoib_tx_buf *tx_req;
unsigned long flags;
ipoib_dbg_data(priv, "send completion: id %d, op %d, status: %d\n",
wr_id, wc->opcode, wc->status);
ipoib_dbg_data(priv, "send completion: id %d, status: %d\n",
wr_id, wc->status);
if (unlikely(wr_id >= ipoib_sendq_size)) {
ipoib_warn(priv, "send completion event with wrid %d (> %d)\n",

View File

@ -395,14 +395,10 @@ static void path_rec_completion(int status,
skb_queue_head_init(&skqueue);
if (!status) {
struct ib_ah_attr av = {
.dlid = be16_to_cpu(pathrec->dlid),
.sl = pathrec->sl,
.port_num = priv->port,
.static_rate = pathrec->rate
};
struct ib_ah_attr av;
ah = ipoib_create_ah(dev, priv->pd, &av);
if (!ib_init_ah_from_path(priv->ca, priv->port, pathrec, &av))
ah = ipoib_create_ah(dev, priv->pd, &av);
}
spin_lock_irqsave(&priv->lock, flags);