Merge branch 'netvsc-enhancements'

Stephen Hemminger says:

====================
netvsc driver enhancements for net-next

Lots of little things in here. Support for minor more ethtool control,
negotiation of offload parameters with host (based on FreeBSD) and
several cleanups.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
David S. Miller 2017-01-24 16:29:02 -05:00
commit 264e6777f9
4 changed files with 848 additions and 592 deletions

View File

@ -34,6 +34,7 @@
#define NDIS_OBJECT_TYPE_RSS_CAPABILITIES 0x88
#define NDIS_OBJECT_TYPE_RSS_PARAMETERS 0x89
#define NDIS_OBJECT_TYPE_OFFLOAD 0xa7
#define NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2 2
#define NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2 2
@ -118,6 +119,7 @@ struct ndis_recv_scale_param { /* NDIS_RECEIVE_SCALE_PARAMETERS */
/* Fwd declaration */
struct ndis_tcp_ip_checksum_info;
struct ndis_pkt_8021q_info;
/*
* Represent netvsc packet which contains 1 RNDIS and 1 ethernet frame
@ -135,8 +137,10 @@ struct hv_netvsc_packet {
u8 page_buf_cnt;
u16 q_idx;
u32 send_buf_index;
u16 total_packets;
u32 total_bytes;
u32 send_buf_index;
u32 total_data_buflen;
};
@ -155,6 +159,8 @@ enum rndis_device_state {
RNDIS_DEV_DATAINITIALIZED,
};
#define NETVSC_HASH_KEYLEN 40
struct rndis_device {
struct net_device *ndev;
@ -165,14 +171,17 @@ struct rndis_device {
spinlock_t request_lock;
struct list_head req_list;
unsigned char hw_mac_adr[ETH_ALEN];
u8 hw_mac_adr[ETH_ALEN];
u8 rss_key[NETVSC_HASH_KEYLEN];
u16 ind_table[ITAB_NUM];
};
/* Interface */
struct rndis_message;
struct netvsc_device;
int netvsc_device_add(struct hv_device *device, void *additional_info);
int netvsc_device_add(struct hv_device *device,
const struct netvsc_device_info *info);
void netvsc_device_remove(struct hv_device *device);
int netvsc_send(struct hv_device *device,
struct hv_netvsc_packet *packet,
@ -181,22 +190,25 @@ int netvsc_send(struct hv_device *device,
struct sk_buff *skb);
void netvsc_linkstatus_callback(struct hv_device *device_obj,
struct rndis_message *resp);
int netvsc_recv_callback(struct hv_device *device_obj,
struct hv_netvsc_packet *packet,
void **data,
struct ndis_tcp_ip_checksum_info *csum_info,
struct vmbus_channel *channel,
u16 vlan_tci);
int netvsc_recv_callback(struct net_device *net,
struct vmbus_channel *channel,
void *data, u32 len,
const struct ndis_tcp_ip_checksum_info *csum_info,
const struct ndis_pkt_8021q_info *vlan);
void netvsc_channel_cb(void *context);
int rndis_filter_open(struct netvsc_device *nvdev);
int rndis_filter_close(struct netvsc_device *nvdev);
int rndis_filter_device_add(struct hv_device *dev,
void *additional_info);
void rndis_filter_device_remove(struct hv_device *dev);
int rndis_filter_receive(struct hv_device *dev,
struct hv_netvsc_packet *pkt,
void **data,
struct vmbus_channel *channel);
struct netvsc_device_info *info);
void rndis_filter_device_remove(struct hv_device *dev,
struct netvsc_device *nvdev);
int rndis_filter_set_rss_param(struct rndis_device *rdev,
const u8 *key, int num_queue);
int rndis_filter_receive(struct net_device *ndev,
struct netvsc_device *net_dev,
struct hv_device *dev,
struct vmbus_channel *channel,
void *data, u32 buflen);
int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter);
int rndis_filter_set_device_mac(struct net_device *ndev, char *mac);
@ -622,6 +634,7 @@ struct nvsp_message {
#define VRSS_SEND_TAB_SIZE 16
#define VRSS_CHANNEL_MAX 64
#define VRSS_CHANNEL_DEFAULT 8
#define RNDIS_MAX_PKT_DEFAULT 8
#define RNDIS_PKT_ALIGN_DEFAULT 8
@ -685,8 +698,7 @@ struct net_device_context {
struct work_struct work;
u32 msg_enable; /* debug level */
struct netvsc_stats __percpu *tx_stats;
struct netvsc_stats __percpu *rx_stats;
u32 tx_checksum_mask;
/* Ethtool settings */
u8 duplex;
@ -705,11 +717,21 @@ struct net_device_context {
u32 vf_serial;
};
/* Per channel data */
struct netvsc_channel {
struct vmbus_channel *channel;
struct multi_send_data msd;
struct multi_recv_comp mrc;
atomic_t queue_sends;
struct netvsc_stats tx_stats;
struct netvsc_stats rx_stats;
};
/* Per netvsc device */
struct netvsc_device {
u32 nvsp_version;
atomic_t num_outstanding_sends;
wait_queue_head_t wait_drain;
bool destroy;
@ -735,32 +757,25 @@ struct netvsc_device {
struct nvsp_message revoke_packet;
struct vmbus_channel *chn_table[VRSS_CHANNEL_MAX];
u32 send_table[VRSS_SEND_TAB_SIZE];
u32 max_chn;
u32 num_chn;
spinlock_t sc_lock; /* Protects num_sc_offered variable */
u32 num_sc_offered;
atomic_t queue_sends[VRSS_CHANNEL_MAX];
/* Holds rndis device info */
void *extension;
int ring_size;
/* The primary channel callback buffer */
unsigned char *cb_buffer;
/* The sub channel callback buffer */
unsigned char *sub_cb_buf;
struct multi_send_data msd[VRSS_CHANNEL_MAX];
u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
u32 pkt_align; /* alignment bytes, e.g. 8 */
struct multi_recv_comp mrc[VRSS_CHANNEL_MAX];
atomic_t num_outstanding_recvs;
atomic_t open_cnt;
struct netvsc_channel chan_table[VRSS_CHANNEL_MAX];
};
static inline struct netvsc_device *
@ -939,7 +954,7 @@ struct ndis_pkt_8021q_info {
};
};
struct ndis_oject_header {
struct ndis_object_header {
u8 type;
u8 revision;
u16 size;
@ -947,6 +962,9 @@ struct ndis_oject_header {
#define NDIS_OBJECT_TYPE_DEFAULT 0x80
#define NDIS_OFFLOAD_PARAMETERS_REVISION_3 3
#define NDIS_OFFLOAD_PARAMETERS_REVISION_2 2
#define NDIS_OFFLOAD_PARAMETERS_REVISION_1 1
#define NDIS_OFFLOAD_PARAMETERS_NO_CHANGE 0
#define NDIS_OFFLOAD_PARAMETERS_LSOV2_DISABLED 1
#define NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED 2
@ -973,8 +991,135 @@ struct ndis_oject_header {
#define OID_TCP_CONNECTION_OFFLOAD_HARDWARE_CAPABILITIES 0xFC01020F /* query */
#define OID_OFFLOAD_ENCAPSULATION 0x0101010A /* set/query */
/*
* OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES
* ndis_type: NDIS_OBJTYPE_OFFLOAD
*/
#define NDIS_OFFLOAD_ENCAP_NONE 0x0000
#define NDIS_OFFLOAD_ENCAP_NULL 0x0001
#define NDIS_OFFLOAD_ENCAP_8023 0x0002
#define NDIS_OFFLOAD_ENCAP_8023PQ 0x0004
#define NDIS_OFFLOAD_ENCAP_8023PQ_OOB 0x0008
#define NDIS_OFFLOAD_ENCAP_RFC1483 0x0010
struct ndis_csum_offload {
u32 ip4_txenc;
u32 ip4_txcsum;
#define NDIS_TXCSUM_CAP_IP4OPT 0x001
#define NDIS_TXCSUM_CAP_TCP4OPT 0x004
#define NDIS_TXCSUM_CAP_TCP4 0x010
#define NDIS_TXCSUM_CAP_UDP4 0x040
#define NDIS_TXCSUM_CAP_IP4 0x100
#define NDIS_TXCSUM_ALL_TCP4 (NDIS_TXCSUM_CAP_TCP4 | NDIS_TXCSUM_CAP_TCP4OPT)
u32 ip4_rxenc;
u32 ip4_rxcsum;
#define NDIS_RXCSUM_CAP_IP4OPT 0x001
#define NDIS_RXCSUM_CAP_TCP4OPT 0x004
#define NDIS_RXCSUM_CAP_TCP4 0x010
#define NDIS_RXCSUM_CAP_UDP4 0x040
#define NDIS_RXCSUM_CAP_IP4 0x100
u32 ip6_txenc;
u32 ip6_txcsum;
#define NDIS_TXCSUM_CAP_IP6EXT 0x001
#define NDIS_TXCSUM_CAP_TCP6OPT 0x004
#define NDIS_TXCSUM_CAP_TCP6 0x010
#define NDIS_TXCSUM_CAP_UDP6 0x040
u32 ip6_rxenc;
u32 ip6_rxcsum;
#define NDIS_RXCSUM_CAP_IP6EXT 0x001
#define NDIS_RXCSUM_CAP_TCP6OPT 0x004
#define NDIS_RXCSUM_CAP_TCP6 0x010
#define NDIS_RXCSUM_CAP_UDP6 0x040
#define NDIS_TXCSUM_ALL_TCP6 (NDIS_TXCSUM_CAP_TCP6 | \
NDIS_TXCSUM_CAP_TCP6OPT | \
NDIS_TXCSUM_CAP_IP6EXT)
};
struct ndis_lsov1_offload {
u32 encap;
u32 maxsize;
u32 minsegs;
u32 opts;
};
struct ndis_ipsecv1_offload {
u32 encap;
u32 ah_esp;
u32 xport_tun;
u32 ip4_opts;
u32 flags;
u32 ip4_ah;
u32 ip4_esp;
};
struct ndis_lsov2_offload {
u32 ip4_encap;
u32 ip4_maxsz;
u32 ip4_minsg;
u32 ip6_encap;
u32 ip6_maxsz;
u32 ip6_minsg;
u32 ip6_opts;
#define NDIS_LSOV2_CAP_IP6EXT 0x001
#define NDIS_LSOV2_CAP_TCP6OPT 0x004
#define NDIS_LSOV2_CAP_IP6 (NDIS_LSOV2_CAP_IP6EXT | \
NDIS_LSOV2_CAP_TCP6OPT)
};
struct ndis_ipsecv2_offload {
u32 encap;
u16 ip6;
u16 ip4opt;
u16 ip6ext;
u16 ah;
u16 esp;
u16 ah_esp;
u16 xport;
u16 tun;
u16 xport_tun;
u16 lso;
u16 extseq;
u32 udp_esp;
u32 auth;
u32 crypto;
u32 sa_caps;
};
struct ndis_rsc_offload {
u16 ip4;
u16 ip6;
};
struct ndis_encap_offload {
u32 flags;
u32 maxhdr;
};
struct ndis_offload {
struct ndis_object_header header;
struct ndis_csum_offload csum;
struct ndis_lsov1_offload lsov1;
struct ndis_ipsecv1_offload ipsecv1;
struct ndis_lsov2_offload lsov2;
u32 flags;
/* NDIS >= 6.1 */
struct ndis_ipsecv2_offload ipsecv2;
/* NDIS >= 6.30 */
struct ndis_rsc_offload rsc;
struct ndis_encap_offload encap_gre;
};
#define NDIS_OFFLOAD_SIZE sizeof(struct ndis_offload)
#define NDIS_OFFLOAD_SIZE_6_0 offsetof(struct ndis_offload, ipsecv2)
#define NDIS_OFFLOAD_SIZE_6_1 offsetof(struct ndis_offload, rsc)
struct ndis_offload_params {
struct ndis_oject_header header;
struct ndis_object_header header;
u8 ip_v4_csum;
u8 tcp_ip_v4_csum;
u8 udp_ip_v4_csum;
@ -1301,15 +1446,10 @@ struct rndis_message {
#define NDIS_PACKET_TYPE_FUNCTIONAL 0x00000400
#define NDIS_PACKET_TYPE_MAC_FRAME 0x00000800
#define INFO_IPV4 2
#define INFO_IPV6 4
#define INFO_TCP 2
#define INFO_UDP 4
#define TRANSPORT_INFO_NOT_IP 0
#define TRANSPORT_INFO_IPV4_TCP ((INFO_IPV4 << 16) | INFO_TCP)
#define TRANSPORT_INFO_IPV4_UDP ((INFO_IPV4 << 16) | INFO_UDP)
#define TRANSPORT_INFO_IPV6_TCP ((INFO_IPV6 << 16) | INFO_TCP)
#define TRANSPORT_INFO_IPV6_UDP ((INFO_IPV6 << 16) | INFO_UDP)
#define TRANSPORT_INFO_IPV4_TCP 0x01
#define TRANSPORT_INFO_IPV4_UDP 0x02
#define TRANSPORT_INFO_IPV6_TCP 0x10
#define TRANSPORT_INFO_IPV6_UDP 0x20
#endif /* _HYPERV_NET_H */

View File

@ -67,14 +67,8 @@ static struct netvsc_device *alloc_net_device(void)
if (!net_device)
return NULL;
net_device->cb_buffer = kzalloc(NETVSC_PACKET_SIZE, GFP_KERNEL);
if (!net_device->cb_buffer) {
kfree(net_device);
return NULL;
}
net_device->mrc[0].buf = vzalloc(NETVSC_RECVSLOT_MAX *
sizeof(struct recv_comp_data));
net_device->chan_table[0].mrc.buf
= vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data));
init_waitqueue_head(&net_device->wait_drain);
net_device->destroy = false;
@ -91,12 +85,21 @@ static void free_netvsc_device(struct netvsc_device *nvdev)
int i;
for (i = 0; i < VRSS_CHANNEL_MAX; i++)
vfree(nvdev->mrc[i].buf);
vfree(nvdev->chan_table[i].mrc.buf);
kfree(nvdev->cb_buffer);
kfree(nvdev);
}
static inline bool netvsc_channel_idle(const struct netvsc_device *net_device,
u16 q_idx)
{
const struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
return atomic_read(&net_device->num_outstanding_recvs) == 0 &&
atomic_read(&nvchan->queue_sends) == 0;
}
static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
{
struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
@ -107,22 +110,6 @@ static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
return net_device;
}
static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
{
struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
if (!net_device)
goto get_in_err;
if (net_device->destroy &&
atomic_read(&net_device->num_outstanding_sends) == 0 &&
atomic_read(&net_device->num_outstanding_recvs) == 0)
net_device = NULL;
get_in_err:
return net_device;
}
static void netvsc_destroy_buf(struct hv_device *device)
{
struct nvsp_message *revoke_packet;
@ -584,7 +571,6 @@ void netvsc_device_remove(struct hv_device *device)
vmbus_close(device->channel);
/* Release all resources */
vfree(net_device->sub_cb_buf);
free_netvsc_device(net_device);
}
@ -620,29 +606,35 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
struct net_device *ndev = hv_get_drvdata(device);
struct net_device_context *net_device_ctx = netdev_priv(ndev);
struct vmbus_channel *channel = device->channel;
int num_outstanding_sends;
u16 q_idx = 0;
int queue_sends;
/* Notify the layer above us */
if (likely(skb)) {
struct hv_netvsc_packet *nvsc_packet
const struct hv_netvsc_packet *packet
= (struct hv_netvsc_packet *)skb->cb;
u32 send_index = nvsc_packet->send_buf_index;
u32 send_index = packet->send_buf_index;
struct netvsc_stats *tx_stats;
if (send_index != NETVSC_INVALID_INDEX)
netvsc_free_send_slot(net_device, send_index);
q_idx = nvsc_packet->q_idx;
q_idx = packet->q_idx;
channel = incoming_channel;
tx_stats = &net_device->chan_table[q_idx].tx_stats;
u64_stats_update_begin(&tx_stats->syncp);
tx_stats->packets += packet->total_packets;
tx_stats->bytes += packet->total_bytes;
u64_stats_update_end(&tx_stats->syncp);
dev_consume_skb_any(skb);
}
num_outstanding_sends =
atomic_dec_return(&net_device->num_outstanding_sends);
queue_sends = atomic_dec_return(&net_device->queue_sends[q_idx]);
queue_sends =
atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
if (net_device->destroy && num_outstanding_sends == 0)
if (net_device->destroy && queue_sends == 0)
wake_up(&net_device->wait_drain);
if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
@ -688,27 +680,15 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
{
unsigned long index;
u32 max_words = net_device->map_words;
unsigned long *map_addr = (unsigned long *)net_device->send_section_map;
u32 section_cnt = net_device->send_section_cnt;
int ret_val = NETVSC_INVALID_INDEX;
int i;
int prev_val;
unsigned long *map_addr = net_device->send_section_map;
unsigned int i;
for (i = 0; i < max_words; i++) {
if (!~(map_addr[i]))
continue;
index = ffz(map_addr[i]);
prev_val = sync_test_and_set_bit(index, &map_addr[i]);
if (prev_val)
continue;
if ((index + (i * BITS_PER_LONG)) >= section_cnt)
break;
ret_val = (index + (i * BITS_PER_LONG));
break;
for_each_clear_bit(i, map_addr, net_device->map_words) {
if (sync_test_and_set_bit(i, map_addr) == 0)
return i;
}
return ret_val;
return NETVSC_INVALID_INDEX;
}
static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
@ -765,9 +745,11 @@ static inline int netvsc_send_pkt(
struct sk_buff *skb)
{
struct nvsp_message nvmsg;
u16 q_idx = packet->q_idx;
struct vmbus_channel *out_channel = net_device->chn_table[q_idx];
struct netvsc_channel *nvchan
= &net_device->chan_table[packet->q_idx];
struct vmbus_channel *out_channel = nvchan->channel;
struct net_device *ndev = hv_get_drvdata(device);
struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
u64 req_id;
int ret;
struct hv_page_buffer *pgbuf;
@ -827,23 +809,14 @@ static inline int netvsc_send_pkt(
}
if (ret == 0) {
atomic_inc(&net_device->num_outstanding_sends);
atomic_inc(&net_device->queue_sends[q_idx]);
atomic_inc_return(&nvchan->queue_sends);
if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
netif_tx_stop_queue(netdev_get_tx_queue(ndev, q_idx));
if (atomic_read(&net_device->
queue_sends[q_idx]) < 1)
netif_tx_wake_queue(netdev_get_tx_queue(
ndev, q_idx));
}
if (ring_avail < RING_AVAIL_PERCENT_LOWATER)
netif_tx_stop_queue(txq);
} else if (ret == -EAGAIN) {
netif_tx_stop_queue(netdev_get_tx_queue(
ndev, q_idx));
if (atomic_read(&net_device->queue_sends[q_idx]) < 1) {
netif_tx_wake_queue(netdev_get_tx_queue(
ndev, q_idx));
netif_tx_stop_queue(txq);
if (atomic_read(&nvchan->queue_sends) < 1) {
netif_tx_wake_queue(txq);
ret = -ENOSPC;
}
} else {
@ -874,8 +847,7 @@ int netvsc_send(struct hv_device *device,
{
struct netvsc_device *net_device;
int ret = 0;
struct vmbus_channel *out_channel;
u16 q_idx = packet->q_idx;
struct netvsc_channel *nvchan;
u32 pktlen = packet->total_data_buflen, msd_len = 0;
unsigned int section_index = NETVSC_INVALID_INDEX;
struct multi_send_data *msdp;
@ -895,8 +867,7 @@ int netvsc_send(struct hv_device *device,
if (!net_device->send_section_map)
return -EAGAIN;
out_channel = net_device->chn_table[q_idx];
nvchan = &net_device->chan_table[packet->q_idx];
packet->send_buf_index = NETVSC_INVALID_INDEX;
packet->cp_partial = false;
@ -908,9 +879,8 @@ int netvsc_send(struct hv_device *device,
goto send_now;
}
msdp = &net_device->msd[q_idx];
/* batch packets in send buffer if possible */
msdp = &nvchan->msd;
if (msdp->pkt)
msd_len = msdp->pkt->total_data_buflen;
@ -950,6 +920,11 @@ int netvsc_send(struct hv_device *device,
packet->total_data_buflen += msd_len;
}
if (msdp->pkt) {
packet->total_packets += msdp->pkt->total_packets;
packet->total_bytes += msdp->pkt->total_bytes;
}
if (msdp->skb)
dev_consume_skb_any(msdp->skb);
@ -1011,8 +986,9 @@ static int netvsc_send_recv_completion(struct vmbus_channel *channel,
static inline void count_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx,
u32 *filled, u32 *avail)
{
u32 first = nvdev->mrc[q_idx].first;
u32 next = nvdev->mrc[q_idx].next;
struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
u32 first = mrc->first;
u32 next = mrc->next;
*filled = (first > next) ? NETVSC_RECVSLOT_MAX - first + next :
next - first;
@ -1024,26 +1000,26 @@ static inline void count_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx,
static inline struct recv_comp_data *read_recv_comp_slot(struct netvsc_device
*nvdev, u16 q_idx)
{
struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
u32 filled, avail;
if (!nvdev->mrc[q_idx].buf)
if (unlikely(!mrc->buf))
return NULL;
count_recv_comp_slot(nvdev, q_idx, &filled, &avail);
if (!filled)
return NULL;
return nvdev->mrc[q_idx].buf + nvdev->mrc[q_idx].first *
sizeof(struct recv_comp_data);
return mrc->buf + mrc->first * sizeof(struct recv_comp_data);
}
/* Put the first filled slot back to available pool */
static inline void put_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx)
{
struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
int num_recv;
nvdev->mrc[q_idx].first = (nvdev->mrc[q_idx].first + 1) %
NETVSC_RECVSLOT_MAX;
mrc->first = (mrc->first + 1) % NETVSC_RECVSLOT_MAX;
num_recv = atomic_dec_return(&nvdev->num_outstanding_recvs);
@ -1078,13 +1054,14 @@ static void netvsc_chk_recv_comp(struct netvsc_device *nvdev,
static inline struct recv_comp_data *get_recv_comp_slot(
struct netvsc_device *nvdev, struct vmbus_channel *channel, u16 q_idx)
{
struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
u32 filled, avail, next;
struct recv_comp_data *rcd;
if (!nvdev->recv_section)
if (unlikely(!nvdev->recv_section))
return NULL;
if (!nvdev->mrc[q_idx].buf)
if (unlikely(!mrc->buf))
return NULL;
if (atomic_read(&nvdev->num_outstanding_recvs) >
@ -1095,60 +1072,44 @@ static inline struct recv_comp_data *get_recv_comp_slot(
if (!avail)
return NULL;
next = nvdev->mrc[q_idx].next;
rcd = nvdev->mrc[q_idx].buf + next * sizeof(struct recv_comp_data);
nvdev->mrc[q_idx].next = (next + 1) % NETVSC_RECVSLOT_MAX;
next = mrc->next;
rcd = mrc->buf + next * sizeof(struct recv_comp_data);
mrc->next = (next + 1) % NETVSC_RECVSLOT_MAX;
atomic_inc(&nvdev->num_outstanding_recvs);
return rcd;
}
static void netvsc_receive(struct netvsc_device *net_device,
struct vmbus_channel *channel,
struct hv_device *device,
struct vmpacket_descriptor *packet)
static void netvsc_receive(struct net_device *ndev,
struct netvsc_device *net_device,
struct net_device_context *net_device_ctx,
struct hv_device *device,
struct vmbus_channel *channel,
struct vmtransfer_page_packet_header *vmxferpage_packet,
struct nvsp_message *nvsp)
{
struct vmtransfer_page_packet_header *vmxferpage_packet;
struct nvsp_message *nvsp_packet;
struct hv_netvsc_packet nv_pkt;
struct hv_netvsc_packet *netvsc_packet = &nv_pkt;
char *recv_buf = net_device->recv_buf;
u32 status = NVSP_STAT_SUCCESS;
int i;
int count = 0;
struct net_device *ndev = hv_get_drvdata(device);
void *data;
int ret;
struct recv_comp_data *rcd;
u16 q_idx = channel->offermsg.offer.sub_channel_index;
/*
* All inbound packets other than send completion should be xfer page
* packet
*/
if (packet->type != VM_PKT_DATA_USING_XFER_PAGES) {
netdev_err(ndev, "Unknown packet type received - %d\n",
packet->type);
return;
}
nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
(packet->offset8 << 3));
/* Make sure this is a valid nvsp packet */
if (nvsp_packet->hdr.msg_type !=
NVSP_MSG1_TYPE_SEND_RNDIS_PKT) {
netdev_err(ndev, "Unknown nvsp packet type received-"
" %d\n", nvsp_packet->hdr.msg_type);
if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
netif_err(net_device_ctx, rx_err, ndev,
"Unknown nvsp packet type received %u\n",
nvsp->hdr.msg_type);
return;
}
vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet;
if (vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID) {
netdev_err(ndev, "Invalid xfer page set id - "
"expecting %x got %x\n", NETVSC_RECEIVE_BUFFER_ID,
vmxferpage_packet->xfer_pageset_id);
if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
netif_err(net_device_ctx, rx_err, ndev,
"Invalid xfer page set id - expecting %x got %x\n",
NETVSC_RECEIVE_BUFFER_ID,
vmxferpage_packet->xfer_pageset_id);
return;
}
@ -1156,18 +1117,16 @@ static void netvsc_receive(struct netvsc_device *net_device,
/* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
for (i = 0; i < count; i++) {
/* Initialize the netvsc packet */
data = (void *)((unsigned long)net_device->
recv_buf + vmxferpage_packet->ranges[i].byte_offset);
netvsc_packet->total_data_buflen =
vmxferpage_packet->ranges[i].byte_count;
void *data = recv_buf
+ vmxferpage_packet->ranges[i].byte_offset;
u32 buflen = vmxferpage_packet->ranges[i].byte_count;
/* Pass it to the upper layer */
status = rndis_filter_receive(device, netvsc_packet, &data,
channel);
status = rndis_filter_receive(ndev, net_device, device,
channel, data, buflen);
}
if (!net_device->mrc[q_idx].buf) {
if (!net_device->chan_table[q_idx].mrc.buf) {
ret = netvsc_send_recv_completion(channel,
vmxferpage_packet->d.trans_id,
status);
@ -1243,11 +1202,10 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
u64 request_id,
struct vmpacket_descriptor *desc)
{
struct nvsp_message *nvmsg;
struct net_device_context *net_device_ctx = netdev_priv(ndev);
nvmsg = (struct nvsp_message *)((unsigned long)
desc + (desc->offset8 << 3));
struct nvsp_message *nvmsg
= (struct nvsp_message *)((unsigned long)desc
+ (desc->offset8 << 3));
switch (desc->type) {
case VM_PKT_COMP:
@ -1255,7 +1213,10 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
break;
case VM_PKT_DATA_USING_XFER_PAGES:
netvsc_receive(net_device, channel, device, desc);
netvsc_receive(ndev, net_device, net_device_ctx,
device, channel,
(struct vmtransfer_page_packet_header *)desc,
nvmsg);
break;
case VM_PKT_DATA_INBAND:
@ -1271,16 +1232,11 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
void netvsc_channel_cb(void *context)
{
int ret;
struct vmbus_channel *channel = (struct vmbus_channel *)context;
struct vmbus_channel *channel = context;
u16 q_idx = channel->offermsg.offer.sub_channel_index;
struct hv_device *device;
struct netvsc_device *net_device;
u32 bytes_recvd;
u64 request_id;
struct vmpacket_descriptor *desc;
unsigned char *buffer;
int bufferlen = NETVSC_PACKET_SIZE;
struct net_device *ndev;
bool need_to_commit = false;
@ -1289,68 +1245,25 @@ void netvsc_channel_cb(void *context)
else
device = channel->device_obj;
net_device = get_inbound_net_device(device);
if (!net_device)
return;
ndev = hv_get_drvdata(device);
buffer = get_per_channel_state(channel);
if (unlikely(!ndev))
return;
do {
desc = get_next_pkt_raw(channel);
if (desc != NULL) {
netvsc_process_raw_pkt(device,
channel,
net_device,
ndev,
desc->trans_id,
desc);
net_device = net_device_to_netvsc_device(ndev);
if (unlikely(net_device->destroy) &&
netvsc_channel_idle(net_device, q_idx))
return;
put_pkt_raw(channel, desc);
need_to_commit = true;
continue;
}
if (need_to_commit) {
need_to_commit = false;
commit_rd_index(channel);
}
while ((desc = get_next_pkt_raw(channel)) != NULL) {
netvsc_process_raw_pkt(device, channel, net_device,
ndev, desc->trans_id, desc);
ret = vmbus_recvpacket_raw(channel, buffer, bufferlen,
&bytes_recvd, &request_id);
if (ret == 0) {
if (bytes_recvd > 0) {
desc = (struct vmpacket_descriptor *)buffer;
netvsc_process_raw_pkt(device,
channel,
net_device,
ndev,
request_id,
desc);
} else {
/*
* We are done for this pass.
*/
break;
}
put_pkt_raw(channel, desc);
need_to_commit = true;
}
} else if (ret == -ENOBUFS) {
if (bufferlen > NETVSC_PACKET_SIZE)
kfree(buffer);
/* Handle large packet */
buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
if (buffer == NULL) {
/* Try again next time around */
netdev_err(ndev,
"unable to allocate buffer of size "
"(%d)!!\n", bytes_recvd);
break;
}
bufferlen = bytes_recvd;
}
} while (1);
if (bufferlen > NETVSC_PACKET_SIZE)
kfree(buffer);
if (need_to_commit)
commit_rd_index(channel);
netvsc_chk_recv_comp(net_device, channel, q_idx);
}
@ -1359,11 +1272,11 @@ void netvsc_channel_cb(void *context)
* netvsc_device_add - Callback when the device belonging to this
* driver is added
*/
int netvsc_device_add(struct hv_device *device, void *additional_info)
int netvsc_device_add(struct hv_device *device,
const struct netvsc_device_info *device_info)
{
int i, ret = 0;
int ring_size =
((struct netvsc_device_info *)additional_info)->ring_size;
int ring_size = device_info->ring_size;
struct netvsc_device *net_device;
struct net_device *ndev = hv_get_drvdata(device);
struct net_device_context *net_device_ctx = netdev_priv(ndev);
@ -1374,8 +1287,6 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
net_device->ring_size = ring_size;
set_per_channel_state(device->channel, net_device->cb_buffer);
/* Open the channel */
ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
ring_size * PAGE_SIZE, NULL, 0,
@ -1394,7 +1305,7 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
* opened.
*/
for (i = 0; i < VRSS_CHANNEL_MAX; i++)
net_device->chn_table[i] = device->channel;
net_device->chan_table[i].channel = device->channel;
/* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
* populated.

View File

@ -42,21 +42,11 @@
#define RING_SIZE_MIN 64
#define LINKCHANGE_INT (2 * HZ)
#define NETVSC_HW_FEATURES (NETIF_F_RXCSUM | \
NETIF_F_SG | \
NETIF_F_TSO | \
NETIF_F_TSO6 | \
NETIF_F_HW_CSUM)
/* Restrict GSO size to account for NVGRE */
#define NETVSC_GSO_MAX_SIZE 62768
static int ring_size = 128;
module_param(ring_size, int, S_IRUGO);
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
static int max_num_vrss_chns = 8;
static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFUP |
NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
@ -145,7 +135,7 @@ static int netvsc_close(struct net_device *net)
while (true) {
aread = 0;
for (i = 0; i < nvdev->num_chn; i++) {
chn = nvdev->chn_table[i];
chn = nvdev->chan_table[i].channel;
if (!chn)
continue;
@ -201,22 +191,41 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
return ppi;
}
/*
* Select queue for transmit.
*
* If a valid queue has already been assigned, then use that.
* Otherwise compute tx queue based on hash and the send table.
*
* This is basically similar to default (__netdev_pick_tx) with the added step
* of using the host send_table when no other queue has been assigned.
*
* TODO support XPS - but get_xps_queue not exported
*/
static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
struct net_device_context *net_device_ctx = netdev_priv(ndev);
struct netvsc_device *nvsc_dev = net_device_ctx->nvdev;
u32 hash;
u16 q_idx = 0;
struct sock *sk = skb->sk;
int q_idx = sk_tx_queue_get(sk);
if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1)
return 0;
if (q_idx < 0 || skb->ooo_okay ||
q_idx >= ndev->real_num_tx_queues) {
u16 hash = __skb_tx_hash(ndev, skb, VRSS_SEND_TAB_SIZE);
int new_idx;
hash = skb_get_hash(skb);
q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
ndev->real_num_tx_queues;
new_idx = nvsc_dev->send_table[hash]
% nvsc_dev->num_chn;
if (!nvsc_dev->chn_table[q_idx])
if (q_idx != new_idx && sk &&
sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
sk_tx_queue_set(sk, new_idx);
q_idx = new_idx;
}
if (unlikely(!nvsc_dev->chan_table[q_idx].channel))
q_idx = 0;
return q_idx;
@ -323,33 +332,25 @@ static int netvsc_get_slots(struct sk_buff *skb)
return slots + frag_slots;
}
static u32 get_net_transport_info(struct sk_buff *skb, u32 *trans_off)
static u32 net_checksum_info(struct sk_buff *skb)
{
u32 ret_val = TRANSPORT_INFO_NOT_IP;
if (skb->protocol == htons(ETH_P_IP)) {
struct iphdr *ip = ip_hdr(skb);
if ((eth_hdr(skb)->h_proto != htons(ETH_P_IP)) &&
(eth_hdr(skb)->h_proto != htons(ETH_P_IPV6))) {
goto not_ip;
}
*trans_off = skb_transport_offset(skb);
if ((eth_hdr(skb)->h_proto == htons(ETH_P_IP))) {
struct iphdr *iphdr = ip_hdr(skb);
if (iphdr->protocol == IPPROTO_TCP)
ret_val = TRANSPORT_INFO_IPV4_TCP;
else if (iphdr->protocol == IPPROTO_UDP)
ret_val = TRANSPORT_INFO_IPV4_UDP;
if (ip->protocol == IPPROTO_TCP)
return TRANSPORT_INFO_IPV4_TCP;
else if (ip->protocol == IPPROTO_UDP)
return TRANSPORT_INFO_IPV4_UDP;
} else {
if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
ret_val = TRANSPORT_INFO_IPV6_TCP;
struct ipv6hdr *ip6 = ipv6_hdr(skb);
if (ip6->nexthdr == IPPROTO_TCP)
return TRANSPORT_INFO_IPV6_TCP;
else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
ret_val = TRANSPORT_INFO_IPV6_UDP;
return TRANSPORT_INFO_IPV6_UDP;
}
not_ip:
return ret_val;
return TRANSPORT_INFO_NOT_IP;
}
static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
@ -362,11 +363,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
struct rndis_packet *rndis_pkt;
u32 rndis_msg_size;
struct rndis_per_packet_info *ppi;
struct ndis_tcp_ip_checksum_info *csum_info;
int hdr_offset;
u32 net_trans_info;
u32 hash;
u32 skb_length;
struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
struct hv_page_buffer *pb = page_buf;
@ -376,7 +373,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
* more pages we try linearizing it.
*/
skb_length = skb->len;
num_data_pgs = netvsc_get_slots(skb) + 2;
if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
@ -409,6 +405,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
packet->q_idx = skb_get_queue_mapping(skb);
packet->total_data_buflen = skb->len;
packet->total_bytes = skb->len;
packet->total_packets = 1;
rndis_msg = (struct rndis_message *)skb->head;
@ -445,13 +443,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
VLAN_PRIO_SHIFT;
}
net_trans_info = get_net_transport_info(skb, &hdr_offset);
/*
* Setup the sendside checksum offload only if this is not a
* GSO packet.
*/
if ((net_trans_info & (INFO_TCP | INFO_UDP)) && skb_is_gso(skb)) {
if (skb_is_gso(skb)) {
struct ndis_tcp_lso_info *lso_info;
rndis_msg_size += NDIS_LSO_PPI_SIZE;
@ -462,7 +454,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
ppi->ppi_offset);
lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
if (net_trans_info & (INFO_IPV4 << 16)) {
if (skb->protocol == htons(ETH_P_IP)) {
lso_info->lso_v2_transmit.ip_version =
NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
ip_hdr(skb)->tot_len = 0;
@ -478,10 +470,12 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
}
lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset;
lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
if (net_trans_info & INFO_TCP) {
if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) {
struct ndis_tcp_ip_checksum_info *csum_info;
rndis_msg_size += NDIS_CSUM_PPI_SIZE;
ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
TCPIP_CHKSUM_PKTINFO);
@ -489,15 +483,25 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
ppi->ppi_offset);
if (net_trans_info & (INFO_IPV4 << 16))
csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
if (skb->protocol == htons(ETH_P_IP)) {
csum_info->transmit.is_ipv4 = 1;
else
if (ip_hdr(skb)->protocol == IPPROTO_TCP)
csum_info->transmit.tcp_checksum = 1;
else
csum_info->transmit.udp_checksum = 1;
} else {
csum_info->transmit.is_ipv6 = 1;
csum_info->transmit.tcp_checksum = 1;
csum_info->transmit.tcp_header_offset = hdr_offset;
if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
csum_info->transmit.tcp_checksum = 1;
else
csum_info->transmit.udp_checksum = 1;
}
} else {
/* UDP checksum (and other) offload is not supported. */
/* Can't do offload of this type of checksum */
if (skb_checksum_help(skb))
goto drop;
}
@ -513,15 +517,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
skb_tx_timestamp(skb);
ret = netvsc_send(net_device_ctx->device_ctx, packet,
rndis_msg, &pb, skb);
if (likely(ret == 0)) {
struct netvsc_stats *tx_stats = this_cpu_ptr(net_device_ctx->tx_stats);
u64_stats_update_begin(&tx_stats->syncp);
tx_stats->packets++;
tx_stats->bytes += skb_length;
u64_stats_update_end(&tx_stats->syncp);
if (likely(ret == 0))
return NETDEV_TX_OK;
}
if (ret == -EAGAIN) {
++net_device_ctx->eth_stats.tx_busy;
@ -541,7 +538,6 @@ no_memory:
++net_device_ctx->eth_stats.tx_no_memory;
goto drop;
}
/*
* netvsc_linkstatus_callback - Link up/down notification
*/
@ -593,13 +589,13 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
}
static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
struct hv_netvsc_packet *packet,
struct ndis_tcp_ip_checksum_info *csum_info,
void *data, u16 vlan_tci)
const struct ndis_tcp_ip_checksum_info *csum_info,
const struct ndis_pkt_8021q_info *vlan,
void *data, u32 buflen)
{
struct sk_buff *skb;
skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
skb = netdev_alloc_skb_ip_align(net, buflen);
if (!skb)
return skb;
@ -607,8 +603,7 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
* Copy to skb. This copy is needed here since the memory pointed by
* hv_netvsc_packet cannot be deallocated
*/
memcpy(skb_put(skb, packet->total_data_buflen), data,
packet->total_data_buflen);
memcpy(skb_put(skb, buflen), data, buflen);
skb->protocol = eth_type_trans(skb, net);
@ -625,9 +620,12 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
if (vlan_tci & VLAN_TAG_PRESENT)
if (vlan) {
u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT);
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
vlan_tci);
}
return skb;
}
@ -636,18 +634,19 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
* netvsc_recv_callback - Callback when we receive a packet from the
* "wire" on the specified device.
*/
int netvsc_recv_callback(struct hv_device *device_obj,
struct hv_netvsc_packet *packet,
void **data,
struct ndis_tcp_ip_checksum_info *csum_info,
struct vmbus_channel *channel,
u16 vlan_tci)
int netvsc_recv_callback(struct net_device *net,
struct vmbus_channel *channel,
void *data, u32 len,
const struct ndis_tcp_ip_checksum_info *csum_info,
const struct ndis_pkt_8021q_info *vlan)
{
struct net_device *net = hv_get_drvdata(device_obj);
struct net_device_context *net_device_ctx = netdev_priv(net);
struct netvsc_device *net_device = net_device_ctx->nvdev;
struct net_device *vf_netdev;
struct sk_buff *skb;
struct netvsc_stats *rx_stats;
u16 q_idx = channel->offermsg.offer.sub_channel_index;
if (net->reg_state != NETREG_REGISTERED)
return NVSP_STAT_FAIL;
@ -665,7 +664,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
net = vf_netdev;
/* Allocate a skb - TODO direct I/O to pages? */
skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci);
skb = netvsc_alloc_recv_skb(net, csum_info, vlan, data, len);
if (unlikely(!skb)) {
++net->stats.rx_dropped;
rcu_read_unlock();
@ -673,18 +672,17 @@ int netvsc_recv_callback(struct hv_device *device_obj,
}
if (net != vf_netdev)
skb_record_rx_queue(skb,
channel->offermsg.offer.sub_channel_index);
skb_record_rx_queue(skb, q_idx);
/*
* Even if injecting the packet, record the statistics
* on the synthetic device because modifying the VF device
* statistics will not work correctly.
*/
rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);
rx_stats = &net_device->chan_table[q_idx].rx_stats;
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->packets++;
rx_stats->bytes += packet->total_data_buflen;
rx_stats->bytes += len;
if (skb->pkt_type == PACKET_BROADCAST)
++rx_stats->broadcast;
@ -697,7 +695,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
* is done.
* TODO - use NAPI?
*/
netif_rx(skb);
netif_receive_skb(skb);
rcu_read_unlock();
return 0;
@ -722,102 +720,76 @@ static void netvsc_get_channels(struct net_device *net,
}
}
static int netvsc_set_queues(struct net_device *net, struct hv_device *dev,
u32 num_chn)
{
struct netvsc_device_info device_info;
int ret;
memset(&device_info, 0, sizeof(device_info));
device_info.num_chn = num_chn;
device_info.ring_size = ring_size;
device_info.max_num_vrss_chns = num_chn;
ret = rndis_filter_device_add(dev, &device_info);
if (ret)
return ret;
ret = netif_set_real_num_tx_queues(net, num_chn);
if (ret)
return ret;
ret = netif_set_real_num_rx_queues(net, num_chn);
return ret;
}
static int netvsc_set_channels(struct net_device *net,
struct ethtool_channels *channels)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
struct hv_device *dev = net_device_ctx->device_ctx;
struct netvsc_device *nvdev = net_device_ctx->nvdev;
struct netvsc_device_info device_info;
u32 num_chn;
u32 max_chn;
int ret = 0;
bool recovering = false;
unsigned int count = channels->combined_count;
int ret;
/* We do not support separate count for rx, tx, or other */
if (count == 0 ||
channels->rx_count || channels->tx_count || channels->other_count)
return -EINVAL;
if (count > net->num_tx_queues || count > net->num_rx_queues)
return -EINVAL;
if (net_device_ctx->start_remove || !nvdev || nvdev->destroy)
return -ENODEV;
num_chn = nvdev->num_chn;
max_chn = min_t(u32, nvdev->max_chn, num_online_cpus());
if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) {
pr_info("vRSS unsupported before NVSP Version 5\n");
return -EINVAL;
}
/* We do not support rx, tx, or other */
if (!channels ||
channels->rx_count ||
channels->tx_count ||
channels->other_count ||
(channels->combined_count < 1))
if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5)
return -EINVAL;
if (channels->combined_count > max_chn) {
pr_info("combined channels too high, using %d\n", max_chn);
channels->combined_count = max_chn;
}
if (count > nvdev->max_chn)
return -EINVAL;
ret = netvsc_close(net);
if (ret)
goto out;
return ret;
do_set:
net_device_ctx->start_remove = true;
rndis_filter_device_remove(dev);
rndis_filter_device_remove(dev, nvdev);
nvdev->num_chn = channels->combined_count;
ret = netvsc_set_queues(net, dev, count);
if (ret == 0)
nvdev->num_chn = count;
else
netvsc_set_queues(net, dev, nvdev->num_chn);
memset(&device_info, 0, sizeof(device_info));
device_info.num_chn = nvdev->num_chn; /* passed to RNDIS */
device_info.ring_size = ring_size;
device_info.max_num_vrss_chns = max_num_vrss_chns;
ret = rndis_filter_device_add(dev, &device_info);
if (ret) {
if (recovering) {
netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
return ret;
}
goto recover;
}
nvdev = net_device_ctx->nvdev;
ret = netif_set_real_num_tx_queues(net, nvdev->num_chn);
if (ret) {
if (recovering) {
netdev_err(net, "could not set tx queue count (ret %d)\n", ret);
return ret;
}
goto recover;
}
ret = netif_set_real_num_rx_queues(net, nvdev->num_chn);
if (ret) {
if (recovering) {
netdev_err(net, "could not set rx queue count (ret %d)\n", ret);
return ret;
}
goto recover;
}
out:
netvsc_open(net);
net_device_ctx->start_remove = false;
/* We may have missed link change notifications */
schedule_delayed_work(&net_device_ctx->dwork, 0);
return ret;
recover:
/* If the above failed, we attempt to recover through the same
* process but with the original number of channels.
*/
netdev_err(net, "could not set channels, recovering\n");
recovering = true;
channels->combined_count = num_chn;
goto do_set;
}
static bool netvsc_validate_ethtool_ss_cmd(const struct ethtool_cmd *cmd)
@ -878,8 +850,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
struct netvsc_device *nvdev = ndevctx->nvdev;
struct hv_device *hdev = ndevctx->device_ctx;
struct netvsc_device_info device_info;
u32 num_chn;
int ret = 0;
int ret;
if (ndevctx->start_remove || !nvdev || nvdev->destroy)
return -ENODEV;
@ -888,17 +859,15 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
if (ret)
goto out;
num_chn = nvdev->num_chn;
ndevctx->start_remove = true;
rndis_filter_device_remove(hdev);
rndis_filter_device_remove(hdev, nvdev);
ndev->mtu = mtu;
memset(&device_info, 0, sizeof(device_info));
device_info.ring_size = ring_size;
device_info.num_chn = num_chn;
device_info.max_num_vrss_chns = max_num_vrss_chns;
device_info.num_chn = nvdev->num_chn;
device_info.max_num_vrss_chns = nvdev->num_chn;
rndis_filter_device_add(hdev, &device_info);
out:
@ -915,34 +884,39 @@ static void netvsc_get_stats64(struct net_device *net,
struct rtnl_link_stats64 *t)
{
struct net_device_context *ndev_ctx = netdev_priv(net);
int cpu;
struct netvsc_device *nvdev = ndev_ctx->nvdev;
int i;
for_each_possible_cpu(cpu) {
struct netvsc_stats *tx_stats = per_cpu_ptr(ndev_ctx->tx_stats,
cpu);
struct netvsc_stats *rx_stats = per_cpu_ptr(ndev_ctx->rx_stats,
cpu);
u64 tx_packets, tx_bytes, rx_packets, rx_bytes, rx_multicast;
if (!nvdev)
return;
for (i = 0; i < nvdev->num_chn; i++) {
const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
const struct netvsc_stats *stats;
u64 packets, bytes, multicast;
unsigned int start;
stats = &nvchan->tx_stats;
do {
start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
tx_packets = tx_stats->packets;
tx_bytes = tx_stats->bytes;
} while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
start = u64_stats_fetch_begin_irq(&stats->syncp);
packets = stats->packets;
bytes = stats->bytes;
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
t->tx_bytes += bytes;
t->tx_packets += packets;
stats = &nvchan->rx_stats;
do {
start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
rx_packets = rx_stats->packets;
rx_bytes = rx_stats->bytes;
rx_multicast = rx_stats->multicast + rx_stats->broadcast;
} while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
start = u64_stats_fetch_begin_irq(&stats->syncp);
packets = stats->packets;
bytes = stats->bytes;
multicast = stats->multicast + stats->broadcast;
} while (u64_stats_fetch_retry_irq(&stats->syncp, start));
t->tx_bytes += tx_bytes;
t->tx_packets += tx_packets;
t->rx_bytes += rx_bytes;
t->rx_packets += rx_packets;
t->multicast += rx_multicast;
t->rx_bytes += bytes;
t->rx_packets += packets;
t->multicast += multicast;
}
t->tx_dropped = net->stats.tx_dropped;
@ -987,11 +961,19 @@ static const struct {
{ "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) },
};
#define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats)
/* 4 statistics per queue (rx/tx packets/bytes) */
#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4)
static int netvsc_get_sset_count(struct net_device *dev, int string_set)
{
struct net_device_context *ndc = netdev_priv(dev);
struct netvsc_device *nvdev = ndc->nvdev;
switch (string_set) {
case ETH_SS_STATS:
return ARRAY_SIZE(netvsc_stats);
return NETVSC_GLOBAL_STATS_LEN + NETVSC_QUEUE_STATS_LEN(nvdev);
default:
return -EINVAL;
}
@ -1001,26 +983,109 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct net_device_context *ndc = netdev_priv(dev);
struct netvsc_device *nvdev = ndc->nvdev;
const void *nds = &ndc->eth_stats;
int i;
const struct netvsc_stats *qstats;
unsigned int start;
u64 packets, bytes;
int i, j;
for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++)
for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++)
data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
for (j = 0; j < nvdev->num_chn; j++) {
qstats = &nvdev->chan_table[j].tx_stats;
do {
start = u64_stats_fetch_begin_irq(&qstats->syncp);
packets = qstats->packets;
bytes = qstats->bytes;
} while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
data[i++] = packets;
data[i++] = bytes;
qstats = &nvdev->chan_table[j].rx_stats;
do {
start = u64_stats_fetch_begin_irq(&qstats->syncp);
packets = qstats->packets;
bytes = qstats->bytes;
} while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
data[i++] = packets;
data[i++] = bytes;
}
}
static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
struct net_device_context *ndc = netdev_priv(dev);
struct netvsc_device *nvdev = ndc->nvdev;
u8 *p = data;
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++)
memcpy(data + i * ETH_GSTRING_LEN,
memcpy(p + i * ETH_GSTRING_LEN,
netvsc_stats[i].name, ETH_GSTRING_LEN);
p += i * ETH_GSTRING_LEN;
for (i = 0; i < nvdev->num_chn; i++) {
sprintf(p, "tx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "tx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rx_queue_%u_packets", i);
p += ETH_GSTRING_LEN;
sprintf(p, "rx_queue_%u_bytes", i);
p += ETH_GSTRING_LEN;
}
break;
}
}
static int
netvsc_get_rss_hash_opts(struct netvsc_device *nvdev,
struct ethtool_rxnfc *info)
{
info->data = RXH_IP_SRC | RXH_IP_DST;
switch (info->flow_type) {
case TCP_V4_FLOW:
case TCP_V6_FLOW:
info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
/* fallthrough */
case UDP_V4_FLOW:
case UDP_V6_FLOW:
case IPV4_FLOW:
case IPV6_FLOW:
break;
default:
info->data = 0;
break;
}
return 0;
}
static int
netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
u32 *rules)
{
struct net_device_context *ndc = netdev_priv(dev);
struct netvsc_device *nvdev = ndc->nvdev;
switch (info->cmd) {
case ETHTOOL_GRXRINGS:
info->data = nvdev->num_chn;
return 0;
case ETHTOOL_GRXFH:
return netvsc_get_rss_hash_opts(nvdev, info);
}
return -EOPNOTSUPP;
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void netvsc_poll_controller(struct net_device *net)
{
@ -1030,6 +1095,68 @@ static void netvsc_poll_controller(struct net_device *net)
}
#endif
static u32 netvsc_get_rxfh_key_size(struct net_device *dev)
{
return NETVSC_HASH_KEYLEN;
}
static u32 netvsc_rss_indir_size(struct net_device *dev)
{
return ITAB_NUM;
}
static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
u8 *hfunc)
{
struct net_device_context *ndc = netdev_priv(dev);
struct netvsc_device *ndev = ndc->nvdev;
struct rndis_device *rndis_dev = ndev->extension;
int i;
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
if (indir) {
for (i = 0; i < ITAB_NUM; i++)
indir[i] = rndis_dev->ind_table[i];
}
if (key)
memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN);
return 0;
}
static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
const u8 *key, const u8 hfunc)
{
struct net_device_context *ndc = netdev_priv(dev);
struct netvsc_device *ndev = ndc->nvdev;
struct rndis_device *rndis_dev = ndev->extension;
int i;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
if (indir) {
for (i = 0; i < ITAB_NUM; i++)
if (indir[i] >= dev->num_rx_queues)
return -EINVAL;
for (i = 0; i < ITAB_NUM; i++)
rndis_dev->ind_table[i] = indir[i];
}
if (!key) {
if (!indir)
return 0;
key = rndis_dev->rss_key;
}
return rndis_filter_set_rss_param(rndis_dev, key, ndev->num_chn);
}
static const struct ethtool_ops ethtool_ops = {
.get_drvinfo = netvsc_get_drvinfo,
.get_link = ethtool_op_get_link,
@ -1041,6 +1168,11 @@ static const struct ethtool_ops ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
.get_settings = netvsc_get_settings,
.set_settings = netvsc_set_settings,
.get_rxnfc = netvsc_get_rxnfc,
.get_rxfh_key_size = netvsc_get_rxfh_key_size,
.get_rxfh_indir_size = netvsc_rss_indir_size,
.get_rxfh = netvsc_get_rxfh,
.set_rxfh = netvsc_set_rxfh,
};
static const struct net_device_ops device_ops = {
@ -1161,15 +1293,6 @@ out_unlock:
rtnl_unlock();
}
static void netvsc_free_netdev(struct net_device *netdev)
{
struct net_device_context *net_device_ctx = netdev_priv(netdev);
free_percpu(net_device_ctx->tx_stats);
free_percpu(net_device_ctx->rx_stats);
free_netdev(netdev);
}
static struct net_device *get_netvsc_bymac(const u8 *mac)
{
struct net_device *dev;
@ -1306,7 +1429,6 @@ static int netvsc_vf_down(struct net_device *vf_netdev)
static int netvsc_unregister_vf(struct net_device *vf_netdev)
{
struct net_device *ndev;
struct netvsc_device *netvsc_dev;
struct net_device_context *net_device_ctx;
ndev = get_netvsc_byref(vf_netdev);
@ -1314,7 +1436,6 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
return NOTIFY_DONE;
net_device_ctx = netdev_priv(ndev);
netvsc_dev = net_device_ctx->nvdev;
netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
@ -1334,7 +1455,7 @@ static int netvsc_probe(struct hv_device *dev,
int ret;
net = alloc_etherdev_mq(sizeof(struct net_device_context),
num_online_cpus());
VRSS_CHANNEL_MAX);
if (!net)
return -ENOMEM;
@ -1349,18 +1470,6 @@ static int netvsc_probe(struct hv_device *dev,
netdev_dbg(net, "netvsc msg_enable: %d\n",
net_device_ctx->msg_enable);
net_device_ctx->tx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
if (!net_device_ctx->tx_stats) {
free_netdev(net);
return -ENOMEM;
}
net_device_ctx->rx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
if (!net_device_ctx->rx_stats) {
free_percpu(net_device_ctx->tx_stats);
free_netdev(net);
return -ENOMEM;
}
hv_set_drvdata(dev, net);
net_device_ctx->start_remove = false;
@ -1372,10 +1481,6 @@ static int netvsc_probe(struct hv_device *dev,
INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
net->netdev_ops = &device_ops;
net->hw_features = NETVSC_HW_FEATURES;
net->features = NETVSC_HW_FEATURES | NETIF_F_HW_VLAN_CTAG_TX;
net->ethtool_ops = &ethtool_ops;
SET_NETDEV_DEV(net, &dev->device);
@ -1385,20 +1490,26 @@ static int netvsc_probe(struct hv_device *dev,
/* Notify the netvsc driver of the new device */
memset(&device_info, 0, sizeof(device_info));
device_info.ring_size = ring_size;
device_info.max_num_vrss_chns = max_num_vrss_chns;
device_info.max_num_vrss_chns = min_t(u32, VRSS_CHANNEL_DEFAULT,
num_online_cpus());
ret = rndis_filter_device_add(dev, &device_info);
if (ret != 0) {
netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
netvsc_free_netdev(net);
free_netdev(net);
hv_set_drvdata(dev, NULL);
return ret;
}
memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
/* hw_features computed in rndis_filter_device_add */
net->features = net->hw_features |
NETIF_F_HIGHDMA | NETIF_F_SG |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
net->vlan_features = net->features;
nvdev = net_device_ctx->nvdev;
netif_set_real_num_tx_queues(net, nvdev->num_chn);
netif_set_real_num_rx_queues(net, nvdev->num_chn);
netif_set_gso_max_size(net, NETVSC_GSO_MAX_SIZE);
/* MTU range: 68 - 1500 or 65521 */
net->min_mtu = NETVSC_MTU_MIN;
@ -1410,8 +1521,8 @@ static int netvsc_probe(struct hv_device *dev,
ret = register_netdev(net);
if (ret != 0) {
pr_err("Unable to register netdev.\n");
rndis_filter_device_remove(dev);
netvsc_free_netdev(net);
rndis_filter_device_remove(dev, nvdev);
free_netdev(net);
}
return ret;
@ -1421,7 +1532,6 @@ static int netvsc_remove(struct hv_device *dev)
{
struct net_device *net;
struct net_device_context *ndev_ctx;
struct netvsc_device *net_device;
net = hv_get_drvdata(dev);
@ -1431,7 +1541,6 @@ static int netvsc_remove(struct hv_device *dev)
}
ndev_ctx = netdev_priv(net);
net_device = ndev_ctx->nvdev;
/* Avoid racing with netvsc_change_mtu()/netvsc_set_channels()
* removing the device.
@ -1452,11 +1561,11 @@ static int netvsc_remove(struct hv_device *dev)
* Call to the vsc driver to let it know that the device is being
* removed
*/
rndis_filter_device_remove(dev);
rndis_filter_device_remove(dev, ndev_ctx->nvdev);
hv_set_drvdata(dev, NULL);
netvsc_free_netdev(net);
free_netdev(net);
return 0;
}

View File

@ -57,6 +57,14 @@ struct rndis_request {
u8 request_ext[RNDIS_EXT_LEN];
};
static const u8 netvsc_hash_key[NETVSC_HASH_KEYLEN] = {
0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
};
static struct rndis_device *get_rndis_device(void)
{
struct rndis_device *device;
@ -124,7 +132,7 @@ static void put_rndis_request(struct rndis_device *dev,
}
static void dump_rndis_message(struct hv_device *hv_dev,
struct rndis_message *rndis_msg)
const struct rndis_message *rndis_msg)
{
struct net_device *netdev = hv_get_drvdata(hv_dev);
@ -339,102 +347,78 @@ static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type)
return NULL;
}
static int rndis_filter_receive_data(struct rndis_device *dev,
struct rndis_message *msg,
struct hv_netvsc_packet *pkt,
void **data,
struct vmbus_channel *channel)
static int rndis_filter_receive_data(struct net_device *ndev,
struct rndis_device *dev,
struct rndis_message *msg,
struct vmbus_channel *channel,
void *data, u32 data_buflen)
{
struct rndis_packet *rndis_pkt;
struct rndis_packet *rndis_pkt = &msg->msg.pkt;
const struct ndis_tcp_ip_checksum_info *csum_info;
const struct ndis_pkt_8021q_info *vlan;
u32 data_offset;
struct ndis_pkt_8021q_info *vlan;
struct ndis_tcp_ip_checksum_info *csum_info;
u16 vlan_tci = 0;
struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
rndis_pkt = &msg->msg.pkt;
/* Remove the rndis header and pass it back up the stack */
data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
pkt->total_data_buflen -= data_offset;
data_buflen -= data_offset;
/*
* Make sure we got a valid RNDIS message, now total_data_buflen
* should be the data packet size plus the trailer padding size
*/
if (pkt->total_data_buflen < rndis_pkt->data_len) {
if (unlikely(data_buflen < rndis_pkt->data_len)) {
netdev_err(dev->ndev, "rndis message buffer "
"overflow detected (got %u, min %u)"
"...dropping this message!\n",
pkt->total_data_buflen, rndis_pkt->data_len);
data_buflen, rndis_pkt->data_len);
return NVSP_STAT_FAIL;
}
vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO);
/*
* Remove the rndis trailer padding from rndis packet message
* rndis_pkt->data_len tell us the real data length, we only copy
* the data packet to the stack, without the rndis trailer padding
*/
pkt->total_data_buflen = rndis_pkt->data_len;
*data = (void *)((unsigned long)(*data) + data_offset);
vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO);
if (vlan) {
vlan_tci = VLAN_TAG_PRESENT | vlan->vlanid |
(vlan->pri << VLAN_PRIO_SHIFT);
}
data = (void *)((unsigned long)data + data_offset);
csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO);
return netvsc_recv_callback(net_device_ctx->device_ctx, pkt, data,
csum_info, channel, vlan_tci);
return netvsc_recv_callback(ndev, channel,
data, rndis_pkt->data_len,
csum_info, vlan);
}
int rndis_filter_receive(struct hv_device *dev,
struct hv_netvsc_packet *pkt,
void **data,
struct vmbus_channel *channel)
int rndis_filter_receive(struct net_device *ndev,
struct netvsc_device *net_dev,
struct hv_device *dev,
struct vmbus_channel *channel,
void *data, u32 buflen)
{
struct net_device *ndev = hv_get_drvdata(dev);
struct net_device_context *net_device_ctx = netdev_priv(ndev);
struct netvsc_device *net_dev = net_device_ctx->nvdev;
struct rndis_device *rndis_dev;
struct rndis_message *rndis_msg;
int ret = 0;
if (!net_dev) {
ret = NVSP_STAT_FAIL;
goto exit;
}
struct rndis_device *rndis_dev = net_dev->extension;
struct rndis_message *rndis_msg = data;
/* Make sure the rndis device state is initialized */
if (!net_dev->extension) {
netdev_err(ndev, "got rndis message but no rndis device - "
"dropping this message!\n");
ret = NVSP_STAT_FAIL;
goto exit;
if (unlikely(!rndis_dev)) {
netif_err(net_device_ctx, rx_err, ndev,
"got rndis message but no rndis device!\n");
return NVSP_STAT_FAIL;
}
rndis_dev = (struct rndis_device *)net_dev->extension;
if (rndis_dev->state == RNDIS_DEV_UNINITIALIZED) {
netdev_err(ndev, "got rndis message but rndis device "
"uninitialized...dropping this message!\n");
ret = NVSP_STAT_FAIL;
goto exit;
if (unlikely(rndis_dev->state == RNDIS_DEV_UNINITIALIZED)) {
netif_err(net_device_ctx, rx_err, ndev,
"got rndis message uninitialized\n");
return NVSP_STAT_FAIL;
}
rndis_msg = *data;
if (netif_msg_rx_err(net_device_ctx))
if (netif_msg_rx_status(net_device_ctx))
dump_rndis_message(dev, rndis_msg);
switch (rndis_msg->ndis_msg_type) {
case RNDIS_MSG_PACKET:
/* data msg */
ret = rndis_filter_receive_data(rndis_dev, rndis_msg, pkt,
data, channel);
break;
return rndis_filter_receive_data(ndev, rndis_dev, rndis_msg,
channel, data, buflen);
case RNDIS_MSG_INIT_C:
case RNDIS_MSG_QUERY_C:
case RNDIS_MSG_SET_C:
@ -454,8 +438,7 @@ int rndis_filter_receive(struct hv_device *dev,
break;
}
exit:
return ret;
return 0;
}
static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
@ -485,7 +468,35 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
query->info_buflen = 0;
query->dev_vc_handle = 0;
if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) {
if (oid == OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES) {
struct net_device_context *ndevctx = netdev_priv(dev->ndev);
struct netvsc_device *nvdev = ndevctx->nvdev;
struct ndis_offload *hwcaps;
u32 nvsp_version = nvdev->nvsp_version;
u8 ndis_rev;
size_t size;
if (nvsp_version >= NVSP_PROTOCOL_VERSION_5) {
ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
size = NDIS_OFFLOAD_SIZE;
} else if (nvsp_version >= NVSP_PROTOCOL_VERSION_4) {
ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_2;
size = NDIS_OFFLOAD_SIZE_6_1;
} else {
ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_1;
size = NDIS_OFFLOAD_SIZE_6_0;
}
request->request_msg.msg_len += size;
query->info_buflen = size;
hwcaps = (struct ndis_offload *)
((unsigned long)query + query->info_buf_offset);
hwcaps->header.type = NDIS_OBJECT_TYPE_OFFLOAD;
hwcaps->header.revision = ndis_rev;
hwcaps->header.size = size;
} else if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) {
struct ndis_recv_scale_cap *cap;
request->request_msg.msg_len +=
@ -526,6 +537,44 @@ cleanup:
return ret;
}
/* Get the hardware offload capabilities */
static int
rndis_query_hwcaps(struct rndis_device *dev, struct ndis_offload *caps)
{
u32 caps_len = sizeof(*caps);
int ret;
memset(caps, 0, sizeof(*caps));
ret = rndis_filter_query_device(dev,
OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES,
caps, &caps_len);
if (ret)
return ret;
if (caps->header.type != NDIS_OBJECT_TYPE_OFFLOAD) {
netdev_warn(dev->ndev, "invalid NDIS objtype %#x\n",
caps->header.type);
return -EINVAL;
}
if (caps->header.revision < NDIS_OFFLOAD_PARAMETERS_REVISION_1) {
netdev_warn(dev->ndev, "invalid NDIS objrev %x\n",
caps->header.revision);
return -EINVAL;
}
if (caps->header.size > caps_len ||
caps->header.size < NDIS_OFFLOAD_SIZE_6_0) {
netdev_warn(dev->ndev,
"invalid NDIS objsize %u, data size %u\n",
caps->header.size, caps_len);
return -EINVAL;
}
return 0;
}
static int rndis_filter_query_device_mac(struct rndis_device *dev)
{
u32 size = ETH_ALEN;
@ -663,23 +712,15 @@ cleanup:
return ret;
}
static const u8 netvsc_hash_key[] = {
0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
};
#define HASH_KEYLEN ARRAY_SIZE(netvsc_hash_key)
static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
int rndis_filter_set_rss_param(struct rndis_device *rdev,
const u8 *rss_key, int num_queue)
{
struct net_device *ndev = rdev->ndev;
struct rndis_request *request;
struct rndis_set_request *set;
struct rndis_set_complete *set_complete;
u32 extlen = sizeof(struct ndis_recv_scale_param) +
4*ITAB_NUM + HASH_KEYLEN;
4 * ITAB_NUM + NETVSC_HASH_KEYLEN;
struct ndis_recv_scale_param *rssp;
u32 *itab;
u8 *keyp;
@ -707,19 +748,18 @@ static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
NDIS_HASH_TCP_IPV6;
rssp->indirect_tabsize = 4*ITAB_NUM;
rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param);
rssp->hashkey_size = HASH_KEYLEN;
rssp->hashkey_size = NETVSC_HASH_KEYLEN;
rssp->kashkey_offset = rssp->indirect_taboffset +
rssp->indirect_tabsize;
/* Set indirection table entries */
itab = (u32 *)(rssp + 1);
for (i = 0; i < ITAB_NUM; i++)
itab[i] = i % num_queue;
itab[i] = rdev->ind_table[i];
/* Set hask key values */
keyp = (u8 *)((unsigned long)rssp + rssp->kashkey_offset);
for (i = 0; i < HASH_KEYLEN; i++)
keyp[i] = netvsc_hash_key[i];
memcpy(keyp, rss_key, NETVSC_HASH_KEYLEN);
ret = rndis_filter_send_request(rdev, request);
if (ret != 0)
@ -727,7 +767,9 @@ static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
wait_for_completion(&request->wait_event);
set_complete = &request->response_msg.msg.set_complete;
if (set_complete->status != RNDIS_STATUS_SUCCESS) {
if (set_complete->status == RNDIS_STATUS_SUCCESS)
memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN);
else {
netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
set_complete->status);
ret = -EINVAL;
@ -778,7 +820,6 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
struct rndis_request *request;
struct rndis_set_request *set;
struct rndis_set_complete *set_complete;
u32 status;
int ret;
request = get_rndis_request(dev, RNDIS_MSG_SET,
@ -805,8 +846,6 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
wait_for_completion(&request->wait_event);
set_complete = &request->response_msg.msg.set_complete;
status = set_complete->status;
cleanup:
if (request)
put_rndis_request(dev, request);
@ -864,6 +903,23 @@ cleanup:
return ret;
}
static bool netvsc_device_idle(const struct netvsc_device *nvdev)
{
int i;
if (atomic_read(&nvdev->num_outstanding_recvs) > 0)
return false;
for (i = 0; i < nvdev->num_chn; i++) {
const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
if (atomic_read(&nvchan->queue_sends) > 0)
return false;
}
return true;
}
static void rndis_filter_halt_device(struct rndis_device *dev)
{
struct rndis_request *request;
@ -894,9 +950,7 @@ cleanup:
spin_unlock_irqrestore(&hdev->channel->inbound_lock, flags);
/* Wait for all send completions */
wait_event(nvdev->wait_drain,
atomic_read(&nvdev->num_outstanding_sends) == 0 &&
atomic_read(&nvdev->num_outstanding_recvs) == 0);
wait_event(nvdev->wait_drain, netvsc_device_idle(nvdev));
if (request)
put_rndis_request(dev, request);
@ -948,18 +1002,15 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
if (chn_index >= nvscdev->num_chn)
return;
set_per_channel_state(new_sc, nvscdev->sub_cb_buf + (chn_index - 1) *
NETVSC_PACKET_SIZE);
nvscdev->mrc[chn_index].buf = vzalloc(NETVSC_RECVSLOT_MAX *
sizeof(struct recv_comp_data));
nvscdev->chan_table[chn_index].mrc.buf
= vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data));
ret = vmbus_open(new_sc, nvscdev->ring_size * PAGE_SIZE,
nvscdev->ring_size * PAGE_SIZE, NULL, 0,
netvsc_channel_cb, new_sc);
if (ret == 0)
nvscdev->chn_table[chn_index] = new_sc;
nvscdev->chan_table[chn_index].channel = new_sc;
spin_lock_irqsave(&nvscdev->sc_lock, flags);
nvscdev->num_sc_offered--;
@ -969,24 +1020,25 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
}
int rndis_filter_device_add(struct hv_device *dev,
void *additional_info)
struct netvsc_device_info *device_info)
{
int ret;
struct net_device *net = hv_get_drvdata(dev);
struct net_device_context *net_device_ctx = netdev_priv(net);
struct netvsc_device *net_device;
struct rndis_device *rndis_device;
struct netvsc_device_info *device_info = additional_info;
struct ndis_offload hwcaps;
struct ndis_offload_params offloads;
struct nvsp_message *init_packet;
struct ndis_recv_scale_cap rsscap;
u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
unsigned int gso_max_size = GSO_MAX_SIZE;
u32 mtu, size;
u32 num_rss_qs;
u32 sc_delta;
const struct cpumask *node_cpu_mask;
u32 num_possible_rss_qs;
unsigned long flags;
int i, ret;
rndis_device = get_rndis_device();
if (!rndis_device)
@ -997,7 +1049,7 @@ int rndis_filter_device_add(struct hv_device *dev,
* NOTE! Once the channel is created, we may get a receive callback
* (RndisFilterOnReceive()) before this call is completed
*/
ret = netvsc_device_add(dev, additional_info);
ret = netvsc_device_add(dev, device_info);
if (ret != 0) {
kfree(rndis_device);
return ret;
@ -1016,7 +1068,7 @@ int rndis_filter_device_add(struct hv_device *dev,
/* Send the rndis initialization message */
ret = rndis_filter_init_device(rndis_device);
if (ret != 0) {
rndis_filter_device_remove(dev);
rndis_filter_device_remove(dev, net_device);
return ret;
}
@ -1031,25 +1083,71 @@ int rndis_filter_device_add(struct hv_device *dev,
/* Get the mac address */
ret = rndis_filter_query_device_mac(rndis_device);
if (ret != 0) {
rndis_filter_device_remove(dev);
rndis_filter_device_remove(dev, net_device);
return ret;
}
memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
/* Turn on the offloads; the host supports all of the relevant
* offloads.
*/
/* Find HW offload capabilities */
ret = rndis_query_hwcaps(rndis_device, &hwcaps);
if (ret != 0) {
rndis_filter_device_remove(dev, net_device);
return ret;
}
/* A value of zero means "no change"; now turn on what we want. */
memset(&offloads, 0, sizeof(struct ndis_offload_params));
/* A value of zero means "no change"; now turn on what we
* want.
*/
offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
/* Linux does not care about IP checksum, always does in kernel */
offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED;
/* Compute tx offload settings based on hw capabilities */
net->hw_features = NETIF_F_RXCSUM;
if ((hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_ALL_TCP4) == NDIS_TXCSUM_ALL_TCP4) {
/* Can checksum TCP */
net->hw_features |= NETIF_F_IP_CSUM;
net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_TCP;
offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
if (hwcaps.lsov2.ip4_encap & NDIS_OFFLOAD_ENCAP_8023) {
offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
net->hw_features |= NETIF_F_TSO;
if (hwcaps.lsov2.ip4_maxsz < gso_max_size)
gso_max_size = hwcaps.lsov2.ip4_maxsz;
}
if (hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) {
offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_UDP;
}
}
if ((hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_ALL_TCP6) == NDIS_TXCSUM_ALL_TCP6) {
net->hw_features |= NETIF_F_IPV6_CSUM;
offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_TCP;
if ((hwcaps.lsov2.ip6_encap & NDIS_OFFLOAD_ENCAP_8023) &&
(hwcaps.lsov2.ip6_opts & NDIS_LSOV2_CAP_IP6) == NDIS_LSOV2_CAP_IP6) {
offloads.lso_v2_ipv6 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
net->hw_features |= NETIF_F_TSO6;
if (hwcaps.lsov2.ip6_maxsz < gso_max_size)
gso_max_size = hwcaps.lsov2.ip6_maxsz;
}
if (hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_CAP_UDP6) {
offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_UDP;
}
}
netif_set_gso_max_size(net, gso_max_size);
ret = rndis_filter_set_offload_params(net, &offloads);
if (ret)
@ -1094,19 +1192,16 @@ int rndis_filter_device_add(struct hv_device *dev,
net_device->num_chn = min(num_possible_rss_qs, num_rss_qs);
num_rss_qs = net_device->num_chn - 1;
for (i = 0; i < ITAB_NUM; i++)
rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i,
net_device->num_chn);
net_device->num_sc_offered = num_rss_qs;
if (net_device->num_chn == 1)
goto out;
net_device->sub_cb_buf = vzalloc((net_device->num_chn - 1) *
NETVSC_PACKET_SIZE);
if (!net_device->sub_cb_buf) {
net_device->num_chn = 1;
dev_info(&dev->device, "No memory for subchannels.\n");
goto out;
}
vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
init_packet = &net_device->channel_init_pkt;
@ -1132,7 +1227,8 @@ int rndis_filter_device_add(struct hv_device *dev,
net_device->num_chn = 1 +
init_packet->msg.v5_msg.subchn_comp.num_subchannels;
ret = rndis_filter_set_rss_param(rndis_device, net_device->num_chn);
ret = rndis_filter_set_rss_param(rndis_device, netvsc_hash_key,
net_device->num_chn);
/*
* Set the number of sub-channels to be received.
@ -1152,13 +1248,13 @@ out:
return 0; /* return 0 because primary channel can be used alone */
err_dev_remv:
rndis_filter_device_remove(dev);
rndis_filter_device_remove(dev, net_device);
return ret;
}
void rndis_filter_device_remove(struct hv_device *dev)
void rndis_filter_device_remove(struct hv_device *dev,
struct netvsc_device *net_dev)
{
struct netvsc_device *net_dev = hv_device_to_netvsc_device(dev);
struct rndis_device *rndis_dev = net_dev->extension;
/* If not all subchannel offers are complete, wait for them until