IB/core: Use the new verbs DMA mapping functions
Convert code in core/ to use the new DMA mapping functions for kernel verbs consumers. Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
This commit is contained in:
parent
f2cbb660ed
commit
1527106ff8
|
@ -998,17 +998,17 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
|
||||||
|
|
||||||
mad_agent = mad_send_wr->send_buf.mad_agent;
|
mad_agent = mad_send_wr->send_buf.mad_agent;
|
||||||
sge = mad_send_wr->sg_list;
|
sge = mad_send_wr->sg_list;
|
||||||
sge[0].addr = dma_map_single(mad_agent->device->dma_device,
|
sge[0].addr = ib_dma_map_single(mad_agent->device,
|
||||||
mad_send_wr->send_buf.mad,
|
mad_send_wr->send_buf.mad,
|
||||||
sge[0].length,
|
sge[0].length,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
pci_unmap_addr_set(mad_send_wr, header_mapping, sge[0].addr);
|
mad_send_wr->header_mapping = sge[0].addr;
|
||||||
|
|
||||||
sge[1].addr = dma_map_single(mad_agent->device->dma_device,
|
sge[1].addr = ib_dma_map_single(mad_agent->device,
|
||||||
ib_get_payload(mad_send_wr),
|
ib_get_payload(mad_send_wr),
|
||||||
sge[1].length,
|
sge[1].length,
|
||||||
DMA_TO_DEVICE);
|
DMA_TO_DEVICE);
|
||||||
pci_unmap_addr_set(mad_send_wr, payload_mapping, sge[1].addr);
|
mad_send_wr->payload_mapping = sge[1].addr;
|
||||||
|
|
||||||
spin_lock_irqsave(&qp_info->send_queue.lock, flags);
|
spin_lock_irqsave(&qp_info->send_queue.lock, flags);
|
||||||
if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
|
if (qp_info->send_queue.count < qp_info->send_queue.max_active) {
|
||||||
|
@ -1026,11 +1026,11 @@ int ib_send_mad(struct ib_mad_send_wr_private *mad_send_wr)
|
||||||
}
|
}
|
||||||
spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
|
spin_unlock_irqrestore(&qp_info->send_queue.lock, flags);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
dma_unmap_single(mad_agent->device->dma_device,
|
ib_dma_unmap_single(mad_agent->device,
|
||||||
pci_unmap_addr(mad_send_wr, header_mapping),
|
mad_send_wr->header_mapping,
|
||||||
sge[0].length, DMA_TO_DEVICE);
|
sge[0].length, DMA_TO_DEVICE);
|
||||||
dma_unmap_single(mad_agent->device->dma_device,
|
ib_dma_unmap_single(mad_agent->device,
|
||||||
pci_unmap_addr(mad_send_wr, payload_mapping),
|
mad_send_wr->payload_mapping,
|
||||||
sge[1].length, DMA_TO_DEVICE);
|
sge[1].length, DMA_TO_DEVICE);
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
|
@ -1850,8 +1850,8 @@ static void ib_mad_recv_done_handler(struct ib_mad_port_private *port_priv,
|
||||||
mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
|
mad_priv_hdr = container_of(mad_list, struct ib_mad_private_header,
|
||||||
mad_list);
|
mad_list);
|
||||||
recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
|
recv = container_of(mad_priv_hdr, struct ib_mad_private, header);
|
||||||
dma_unmap_single(port_priv->device->dma_device,
|
ib_dma_unmap_single(port_priv->device,
|
||||||
pci_unmap_addr(&recv->header, mapping),
|
recv->header.mapping,
|
||||||
sizeof(struct ib_mad_private) -
|
sizeof(struct ib_mad_private) -
|
||||||
sizeof(struct ib_mad_private_header),
|
sizeof(struct ib_mad_private_header),
|
||||||
DMA_FROM_DEVICE);
|
DMA_FROM_DEVICE);
|
||||||
|
@ -2080,11 +2080,11 @@ static void ib_mad_send_done_handler(struct ib_mad_port_private *port_priv,
|
||||||
qp_info = send_queue->qp_info;
|
qp_info = send_queue->qp_info;
|
||||||
|
|
||||||
retry:
|
retry:
|
||||||
dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device,
|
ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
|
||||||
pci_unmap_addr(mad_send_wr, header_mapping),
|
mad_send_wr->header_mapping,
|
||||||
mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
|
mad_send_wr->sg_list[0].length, DMA_TO_DEVICE);
|
||||||
dma_unmap_single(mad_send_wr->send_buf.mad_agent->device->dma_device,
|
ib_dma_unmap_single(mad_send_wr->send_buf.mad_agent->device,
|
||||||
pci_unmap_addr(mad_send_wr, payload_mapping),
|
mad_send_wr->payload_mapping,
|
||||||
mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
|
mad_send_wr->sg_list[1].length, DMA_TO_DEVICE);
|
||||||
queued_send_wr = NULL;
|
queued_send_wr = NULL;
|
||||||
spin_lock_irqsave(&send_queue->lock, flags);
|
spin_lock_irqsave(&send_queue->lock, flags);
|
||||||
|
@ -2528,13 +2528,12 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
sg_list.addr = dma_map_single(qp_info->port_priv->
|
sg_list.addr = ib_dma_map_single(qp_info->port_priv->device,
|
||||||
device->dma_device,
|
|
||||||
&mad_priv->grh,
|
&mad_priv->grh,
|
||||||
sizeof *mad_priv -
|
sizeof *mad_priv -
|
||||||
sizeof mad_priv->header,
|
sizeof mad_priv->header,
|
||||||
DMA_FROM_DEVICE);
|
DMA_FROM_DEVICE);
|
||||||
pci_unmap_addr_set(&mad_priv->header, mapping, sg_list.addr);
|
mad_priv->header.mapping = sg_list.addr;
|
||||||
recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
|
recv_wr.wr_id = (unsigned long)&mad_priv->header.mad_list;
|
||||||
mad_priv->header.mad_list.mad_queue = recv_queue;
|
mad_priv->header.mad_list.mad_queue = recv_queue;
|
||||||
|
|
||||||
|
@ -2549,9 +2548,8 @@ static int ib_mad_post_receive_mads(struct ib_mad_qp_info *qp_info,
|
||||||
list_del(&mad_priv->header.mad_list.list);
|
list_del(&mad_priv->header.mad_list.list);
|
||||||
recv_queue->count--;
|
recv_queue->count--;
|
||||||
spin_unlock_irqrestore(&recv_queue->lock, flags);
|
spin_unlock_irqrestore(&recv_queue->lock, flags);
|
||||||
dma_unmap_single(qp_info->port_priv->device->dma_device,
|
ib_dma_unmap_single(qp_info->port_priv->device,
|
||||||
pci_unmap_addr(&mad_priv->header,
|
mad_priv->header.mapping,
|
||||||
mapping),
|
|
||||||
sizeof *mad_priv -
|
sizeof *mad_priv -
|
||||||
sizeof mad_priv->header,
|
sizeof mad_priv->header,
|
||||||
DMA_FROM_DEVICE);
|
DMA_FROM_DEVICE);
|
||||||
|
@ -2586,8 +2584,8 @@ static void cleanup_recv_queue(struct ib_mad_qp_info *qp_info)
|
||||||
/* Remove from posted receive MAD list */
|
/* Remove from posted receive MAD list */
|
||||||
list_del(&mad_list->list);
|
list_del(&mad_list->list);
|
||||||
|
|
||||||
dma_unmap_single(qp_info->port_priv->device->dma_device,
|
ib_dma_unmap_single(qp_info->port_priv->device,
|
||||||
pci_unmap_addr(&recv->header, mapping),
|
recv->header.mapping,
|
||||||
sizeof(struct ib_mad_private) -
|
sizeof(struct ib_mad_private) -
|
||||||
sizeof(struct ib_mad_private_header),
|
sizeof(struct ib_mad_private_header),
|
||||||
DMA_FROM_DEVICE);
|
DMA_FROM_DEVICE);
|
||||||
|
|
|
@ -73,7 +73,7 @@ struct ib_mad_private_header {
|
||||||
struct ib_mad_list_head mad_list;
|
struct ib_mad_list_head mad_list;
|
||||||
struct ib_mad_recv_wc recv_wc;
|
struct ib_mad_recv_wc recv_wc;
|
||||||
struct ib_wc wc;
|
struct ib_wc wc;
|
||||||
DECLARE_PCI_UNMAP_ADDR(mapping)
|
u64 mapping;
|
||||||
} __attribute__ ((packed));
|
} __attribute__ ((packed));
|
||||||
|
|
||||||
struct ib_mad_private {
|
struct ib_mad_private {
|
||||||
|
@ -126,8 +126,8 @@ struct ib_mad_send_wr_private {
|
||||||
struct list_head agent_list;
|
struct list_head agent_list;
|
||||||
struct ib_mad_agent_private *mad_agent_priv;
|
struct ib_mad_agent_private *mad_agent_priv;
|
||||||
struct ib_mad_send_buf send_buf;
|
struct ib_mad_send_buf send_buf;
|
||||||
DECLARE_PCI_UNMAP_ADDR(header_mapping)
|
u64 header_mapping;
|
||||||
DECLARE_PCI_UNMAP_ADDR(payload_mapping)
|
u64 payload_mapping;
|
||||||
struct ib_send_wr send_wr;
|
struct ib_send_wr send_wr;
|
||||||
struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
|
struct ib_sge sg_list[IB_MAD_SEND_REQ_MAX_SG];
|
||||||
__be64 tid;
|
__be64 tid;
|
||||||
|
|
|
@ -52,7 +52,7 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) {
|
list_for_each_entry_safe(chunk, tmp, &umem->chunk_list, list) {
|
||||||
dma_unmap_sg(dev->dma_device, chunk->page_list,
|
ib_dma_unmap_sg(dev, chunk->page_list,
|
||||||
chunk->nents, DMA_BIDIRECTIONAL);
|
chunk->nents, DMA_BIDIRECTIONAL);
|
||||||
for (i = 0; i < chunk->nents; ++i) {
|
for (i = 0; i < chunk->nents; ++i) {
|
||||||
if (umem->writable && dirty)
|
if (umem->writable && dirty)
|
||||||
|
@ -136,7 +136,7 @@ int ib_umem_get(struct ib_device *dev, struct ib_umem *mem,
|
||||||
chunk->page_list[i].length = PAGE_SIZE;
|
chunk->page_list[i].length = PAGE_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
chunk->nmap = dma_map_sg(dev->dma_device,
|
chunk->nmap = ib_dma_map_sg(dev,
|
||||||
&chunk->page_list[0],
|
&chunk->page_list[0],
|
||||||
chunk->nents,
|
chunk->nents,
|
||||||
DMA_BIDIRECTIONAL);
|
DMA_BIDIRECTIONAL);
|
||||||
|
|
Loading…
Reference in New Issue