blob: cdb78eefab671496d5b6c406c34b25e95278d14b [file] [log] [blame]
Hank Janssenfceaf242009-07-13 15:34:54 -07001/*
Hank Janssenfceaf242009-07-13 15:34:54 -07002 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
Jeff Kirsheradf8d3f2013-12-06 06:28:47 -080014 * this program; if not, see <http://www.gnu.org/licenses/>.
Hank Janssenfceaf242009-07-13 15:34:54 -070015 *
16 * Authors:
Haiyang Zhangd0e94d12009-11-23 17:00:22 +000017 * Haiyang Zhang <haiyangz@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -070018 * Hank Janssen <hjanssen@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -070019 */
Hank Jansseneb335bc2011-03-29 13:58:48 -070020#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
Hank Janssenfceaf242009-07-13 15:34:54 -070022#include <linux/init.h>
K. Y. Srinivasan9079ce62011-06-16 13:16:37 -070023#include <linux/atomic.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070024#include <linux/module.h>
25#include <linux/highmem.h>
26#include <linux/device.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070027#include <linux/io.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070028#include <linux/delay.h>
29#include <linux/netdevice.h>
30#include <linux/inetdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
Haiyang Zhangc802db12013-05-28 06:15:56 +000033#include <linux/if_vlan.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070034#include <linux/in.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
stephen hemminger27f5aa92017-07-24 10:57:29 -070036#include <linux/rtnetlink.h>
stephen hemminger0c195562017-08-01 19:58:53 -070037#include <linux/netpoll.h>
Stephen Hemmingera7f99d02017-12-01 11:01:47 -080038#include <linux/reciprocal_div.h>
stephen hemminger27f5aa92017-07-24 10:57:29 -070039
Hank Janssenfceaf242009-07-13 15:34:54 -070040#include <net/arp.h>
41#include <net/route.h>
42#include <net/sock.h>
43#include <net/pkt_sched.h>
Michael Kelley8eb1b3c2017-05-30 11:36:56 -070044#include <net/checksum.h>
45#include <net/ip6_checksum.h>
K. Y. Srinivasan3f335ea2011-05-12 19:34:15 -070046
K. Y. Srinivasan5ca72522011-05-12 19:34:37 -070047#include "hyperv_net.h"
Hank Janssenfceaf242009-07-13 15:34:54 -070048
stephen hemminger8b532792017-08-09 17:46:11 -070049#define RING_SIZE_MIN 64
stephen hemminger8b532792017-08-09 17:46:11 -070050
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +010051#define LINKCHANGE_INT (2 * HZ)
stephen hemminger6123c662017-08-09 17:46:03 -070052#define VF_TAKEOVER_INT (HZ / 10)
stephen hemmingera50af862016-12-06 13:43:54 -080053
Stephen Hemmingera7f99d02017-12-01 11:01:47 -080054static unsigned int ring_size __ro_after_init = 128;
55module_param(ring_size, uint, S_IRUGO);
Stephen Hemminger450d7a42010-05-04 09:58:53 -070056MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
Stephen Hemmingera7f99d02017-12-01 11:01:47 -080057unsigned int netvsc_ring_bytes __ro_after_init;
58struct reciprocal_value netvsc_ring_reciprocal __ro_after_init;
Hank Janssenfceaf242009-07-13 15:34:54 -070059
Simon Xiao3f300ff2015-04-28 01:05:17 -070060static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
61 NETIF_MSG_LINK | NETIF_MSG_IFUP |
62 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
63 NETIF_MSG_TX_ERR;
64
65static int debug = -1;
66module_param(debug, int, S_IRUGO);
67MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
68
Stephen Hemmingerbee9d412018-03-02 13:49:09 -080069static void netvsc_change_rx_flags(struct net_device *net, int change)
Hank Janssenfceaf242009-07-13 15:34:54 -070070{
Stephen Hemmingerbee9d412018-03-02 13:49:09 -080071 struct net_device_context *ndev_ctx = netdev_priv(net);
72 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
73 int inc;
74
75 if (!vf_netdev)
76 return;
77
78 if (change & IFF_PROMISC) {
79 inc = (net->flags & IFF_PROMISC) ? 1 : -1;
80 dev_set_promiscuity(vf_netdev, inc);
81 }
82
83 if (change & IFF_ALLMULTI) {
84 inc = (net->flags & IFF_ALLMULTI) ? 1 : -1;
85 dev_set_allmulti(vf_netdev, inc);
86 }
87}
88
89static void netvsc_set_rx_mode(struct net_device *net)
90{
91 struct net_device_context *ndev_ctx = netdev_priv(net);
92 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
93 struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev);
94
95 if (vf_netdev) {
96 dev_uc_sync(vf_netdev, net);
97 dev_mc_sync(vf_netdev, net);
98 }
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080099
stephen hemminger4f19c0d2017-06-07 15:53:49 -0700100 rndis_filter_update(nvdev);
Hank Janssenfceaf242009-07-13 15:34:54 -0700101}
102
Hank Janssenfceaf242009-07-13 15:34:54 -0700103static int netvsc_open(struct net_device *net)
104{
Haiyang Zhang53fa1a62017-06-21 16:40:47 -0700105 struct net_device_context *ndev_ctx = netdev_priv(net);
stephen hemminger0c195562017-08-01 19:58:53 -0700106 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
stephen hemminger79e8cbe2017-07-19 11:53:13 -0700107 struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev);
Haiyang Zhang891de742014-02-12 16:54:27 -0800108 struct rndis_device *rdev;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700109 int ret = 0;
Hank Janssenfceaf242009-07-13 15:34:54 -0700110
Haiyang Zhang891de742014-02-12 16:54:27 -0800111 netif_carrier_off(net);
112
Haiyang Zhangd515d0f2011-09-28 13:24:15 -0700113 /* Open up the device */
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +0200114 ret = rndis_filter_open(nvdev);
Haiyang Zhangd515d0f2011-09-28 13:24:15 -0700115 if (ret != 0) {
116 netdev_err(net, "unable to open device (ret %d).\n", ret);
117 return ret;
Hank Janssenfceaf242009-07-13 15:34:54 -0700118 }
119
Haiyang Zhang891de742014-02-12 16:54:27 -0800120 rdev = nvdev->extension;
Stephen Hemmingerf4950e42018-03-02 13:49:02 -0800121 if (!rdev->link_state) {
Haiyang Zhang891de742014-02-12 16:54:27 -0800122 netif_carrier_on(net);
Stephen Hemmingerf4950e42018-03-02 13:49:02 -0800123 netif_tx_wake_all_queues(net);
124 }
Haiyang Zhang891de742014-02-12 16:54:27 -0800125
stephen hemminger0c195562017-08-01 19:58:53 -0700126 if (vf_netdev) {
127 /* Setting synthetic device up transparently sets
128 * slave as up. If open fails, then slave will be
129 * still be offline (and not used).
130 */
131 ret = dev_open(vf_netdev);
132 if (ret)
133 netdev_warn(net,
134 "unable to open slave: %s: %d\n",
135 vf_netdev->name, ret);
136 }
137 return 0;
Hank Janssenfceaf242009-07-13 15:34:54 -0700138}
139
Hank Janssenfceaf242009-07-13 15:34:54 -0700140static int netvsc_close(struct net_device *net)
141{
Hank Janssenfceaf242009-07-13 15:34:54 -0700142 struct net_device_context *net_device_ctx = netdev_priv(net);
stephen hemminger0c195562017-08-01 19:58:53 -0700143 struct net_device *vf_netdev
144 = rtnl_dereference(net_device_ctx->vf_netdev);
stephen hemminger545a8e72017-03-22 14:51:00 -0700145 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
Haiyang Zhangc6f71c42017-08-24 11:50:02 -0700146 int ret = 0;
stephen hemminger40975962017-06-08 16:21:19 -0700147 u32 aread, i, msec = 10, retry = 0, retry_max = 20;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700148 struct vmbus_channel *chn;
Hank Janssenfceaf242009-07-13 15:34:54 -0700149
Haiyang Zhang0a282532012-02-02 07:17:59 +0000150 netif_tx_disable(net);
Hank Janssenfceaf242009-07-13 15:34:54 -0700151
Haiyang Zhangc6f71c42017-08-24 11:50:02 -0700152 /* No need to close rndis filter if it is removed already */
153 if (!nvdev)
154 goto out;
155
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +0200156 ret = rndis_filter_close(nvdev);
Haiyang Zhang2de85302015-07-13 13:09:16 -0700157 if (ret != 0) {
Hank Jansseneb335bc2011-03-29 13:58:48 -0700158 netdev_err(net, "unable to close device (ret %d).\n", ret);
Haiyang Zhang2de85302015-07-13 13:09:16 -0700159 return ret;
160 }
161
162 /* Ensure pending bytes in ring are read */
163 while (true) {
164 aread = 0;
165 for (i = 0; i < nvdev->num_chn; i++) {
stephen hemmingerb8b835a2017-01-24 13:06:07 -0800166 chn = nvdev->chan_table[i].channel;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700167 if (!chn)
168 continue;
169
stephen hemminger40975962017-06-08 16:21:19 -0700170 aread = hv_get_bytes_to_read(&chn->inbound);
Haiyang Zhang2de85302015-07-13 13:09:16 -0700171 if (aread)
172 break;
173
stephen hemminger40975962017-06-08 16:21:19 -0700174 aread = hv_get_bytes_to_read(&chn->outbound);
Haiyang Zhang2de85302015-07-13 13:09:16 -0700175 if (aread)
176 break;
177 }
178
179 retry++;
180 if (retry > retry_max || aread == 0)
181 break;
182
183 msleep(msec);
184
185 if (msec < 1000)
186 msec *= 2;
187 }
188
189 if (aread) {
190 netdev_err(net, "Ring buffer not empty after closing rndis\n");
191 ret = -ETIMEDOUT;
192 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700193
Haiyang Zhangc6f71c42017-08-24 11:50:02 -0700194out:
stephen hemminger0c195562017-08-01 19:58:53 -0700195 if (vf_netdev)
196 dev_close(vf_netdev);
197
Hank Janssenfceaf242009-07-13 15:34:54 -0700198 return ret;
199}
200
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800201static inline void *init_ppi_data(struct rndis_message *msg,
202 u32 ppi_size, u32 pkt_type)
KY Srinivasan8a002512014-03-08 19:23:14 -0800203{
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800204 struct rndis_packet *rndis_pkt = &msg->msg.pkt;
KY Srinivasan8a002512014-03-08 19:23:14 -0800205 struct rndis_per_packet_info *ppi;
206
KY Srinivasan8a002512014-03-08 19:23:14 -0800207 rndis_pkt->data_offset += ppi_size;
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800208 ppi = (void *)rndis_pkt + rndis_pkt->per_pkt_info_offset
209 + rndis_pkt->per_pkt_info_len;
KY Srinivasan8a002512014-03-08 19:23:14 -0800210
211 ppi->size = ppi_size;
212 ppi->type = pkt_type;
213 ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
214
215 rndis_pkt->per_pkt_info_len += ppi_size;
216
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800217 return ppi + 1;
KY Srinivasan8a002512014-03-08 19:23:14 -0800218}
219
Haiyang Zhang4823eb22017-08-21 19:22:39 -0700220/* Azure hosts don't support non-TCP port numbers in hashing for fragmented
221 * packets. We can use ethtool to change UDP hash level when necessary.
Haiyang Zhangf72860a2017-04-12 11:45:18 -0700222 */
Haiyang Zhang4823eb22017-08-21 19:22:39 -0700223static inline u32 netvsc_get_hash(
224 struct sk_buff *skb,
225 const struct net_device_context *ndc)
Haiyang Zhangf72860a2017-04-12 11:45:18 -0700226{
227 struct flow_keys flow;
Haiyang Zhang486e3982017-10-06 08:33:57 -0700228 u32 hash, pkt_proto = 0;
Haiyang Zhangf72860a2017-04-12 11:45:18 -0700229 static u32 hashrnd __read_mostly;
230
231 net_get_random_once(&hashrnd, sizeof(hashrnd));
232
233 if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
234 return 0;
235
Haiyang Zhang486e3982017-10-06 08:33:57 -0700236 switch (flow.basic.ip_proto) {
237 case IPPROTO_TCP:
238 if (flow.basic.n_proto == htons(ETH_P_IP))
239 pkt_proto = HV_TCP4_L4HASH;
240 else if (flow.basic.n_proto == htons(ETH_P_IPV6))
241 pkt_proto = HV_TCP6_L4HASH;
242
243 break;
244
245 case IPPROTO_UDP:
246 if (flow.basic.n_proto == htons(ETH_P_IP))
247 pkt_proto = HV_UDP4_L4HASH;
248 else if (flow.basic.n_proto == htons(ETH_P_IPV6))
249 pkt_proto = HV_UDP6_L4HASH;
250
251 break;
252 }
253
254 if (pkt_proto & ndc->l4_hash) {
Haiyang Zhangf72860a2017-04-12 11:45:18 -0700255 return skb_get_hash(skb);
256 } else {
257 if (flow.basic.n_proto == htons(ETH_P_IP))
258 hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd);
259 else if (flow.basic.n_proto == htons(ETH_P_IPV6))
260 hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
261 else
262 hash = 0;
263
264 skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
265 }
266
267 return hash;
268}
269
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700270static inline int netvsc_get_tx_queue(struct net_device *ndev,
271 struct sk_buff *skb, int old_idx)
272{
273 const struct net_device_context *ndc = netdev_priv(ndev);
274 struct sock *sk = skb->sk;
275 int q_idx;
276
Haiyang Zhang39e91cf2017-10-13 12:28:04 -0700277 q_idx = ndc->tx_table[netvsc_get_hash(skb, ndc) &
278 (VRSS_SEND_TAB_SIZE - 1)];
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700279
280 /* If queue index changed record the new value */
281 if (q_idx != old_idx &&
282 sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
283 sk_tx_queue_set(sk, q_idx);
284
285 return q_idx;
286}
287
stephen hemmingerd8e18ee2017-01-24 13:06:05 -0800288/*
289 * Select queue for transmit.
290 *
291 * If a valid queue has already been assigned, then use that.
292 * Otherwise compute tx queue based on hash and the send table.
293 *
294 * This is basically similar to default (__netdev_pick_tx) with the added step
295 * of using the host send_table when no other queue has been assigned.
296 *
297 * TODO support XPS - but get_xps_queue not exported
298 */
stephen hemminger0c195562017-08-01 19:58:53 -0700299static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb)
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700300{
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700301 int q_idx = sk_tx_queue_get(skb->sk);
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700302
stephen hemminger0c195562017-08-01 19:58:53 -0700303 if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) {
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700304 /* If forwarding a packet, we use the recorded queue when
305 * available for better cache locality.
306 */
307 if (skb_rx_queue_recorded(skb))
308 q_idx = skb_get_rx_queue(skb);
309 else
310 q_idx = netvsc_get_tx_queue(ndev, skb, q_idx);
stephen hemmingerd8e18ee2017-01-24 13:06:05 -0800311 }
312
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700313 return q_idx;
314}
315
stephen hemminger0c195562017-08-01 19:58:53 -0700316static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
317 void *accel_priv,
318 select_queue_fallback_t fallback)
319{
320 struct net_device_context *ndc = netdev_priv(ndev);
321 struct net_device *vf_netdev;
322 u16 txq;
323
324 rcu_read_lock();
325 vf_netdev = rcu_dereference(ndc->vf_netdev);
326 if (vf_netdev) {
Stephen Hemmingerb3bf5662018-03-02 13:49:07 -0800327 const struct net_device_ops *vf_ops = vf_netdev->netdev_ops;
328
329 if (vf_ops->ndo_select_queue)
330 txq = vf_ops->ndo_select_queue(vf_netdev, skb,
331 accel_priv, fallback);
332 else
333 txq = fallback(vf_netdev, skb);
334
335 /* Record the queue selected by VF so that it can be
336 * used for common case where VF has more queues than
337 * the synthetic device.
338 */
339 qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq;
stephen hemminger0c195562017-08-01 19:58:53 -0700340 } else {
341 txq = netvsc_pick_tx(ndev, skb);
342 }
343 rcu_read_unlock();
344
345 while (unlikely(txq >= ndev->real_num_tx_queues))
346 txq -= ndev->real_num_tx_queues;
347
348 return txq;
349}
350
KY Srinivasan54a73572014-03-08 19:23:13 -0800351static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
stephen hemminger89bb42b2017-08-09 17:46:08 -0700352 struct hv_page_buffer *pb)
KY Srinivasan54a73572014-03-08 19:23:13 -0800353{
354 int j = 0;
355
356 /* Deal with compund pages by ignoring unused part
357 * of the page.
358 */
359 page += (offset >> PAGE_SHIFT);
360 offset &= ~PAGE_MASK;
361
362 while (len > 0) {
363 unsigned long bytes;
364
365 bytes = PAGE_SIZE - offset;
366 if (bytes > len)
367 bytes = len;
368 pb[j].pfn = page_to_pfn(page);
369 pb[j].offset = offset;
370 pb[j].len = bytes;
371
372 offset += bytes;
373 len -= bytes;
374
375 if (offset == PAGE_SIZE && len) {
376 page++;
377 offset = 0;
378 j++;
379 }
380 }
381
382 return j + 1;
383}
384
KY Srinivasan8a002512014-03-08 19:23:14 -0800385static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800386 struct hv_netvsc_packet *packet,
stephen hemminger02b6de02017-07-28 08:59:44 -0700387 struct hv_page_buffer *pb)
KY Srinivasan54a73572014-03-08 19:23:13 -0800388{
389 u32 slots_used = 0;
390 char *data = skb->data;
391 int frags = skb_shinfo(skb)->nr_frags;
392 int i;
393
394 /* The packet is laid out thus:
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700395 * 1. hdr: RNDIS header and PPI
KY Srinivasan54a73572014-03-08 19:23:13 -0800396 * 2. skb linear data
397 * 3. skb fragment data
398 */
stephen hemmingerea5a32c2017-08-09 17:46:10 -0700399 slots_used += fill_pg_buf(virt_to_page(hdr),
400 offset_in_page(hdr),
401 len, &pb[slots_used]);
KY Srinivasan54a73572014-03-08 19:23:13 -0800402
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700403 packet->rmsg_size = len;
404 packet->rmsg_pgcnt = slots_used;
405
KY Srinivasan54a73572014-03-08 19:23:13 -0800406 slots_used += fill_pg_buf(virt_to_page(data),
407 offset_in_page(data),
408 skb_headlen(skb), &pb[slots_used]);
409
410 for (i = 0; i < frags; i++) {
411 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
412
413 slots_used += fill_pg_buf(skb_frag_page(frag),
414 frag->page_offset,
415 skb_frag_size(frag), &pb[slots_used]);
416 }
KY Srinivasan8a002512014-03-08 19:23:14 -0800417 return slots_used;
KY Srinivasan54a73572014-03-08 19:23:13 -0800418}
419
stephen hemminger80d887d2017-07-24 21:03:19 -0700420static int count_skb_frag_slots(struct sk_buff *skb)
KY Srinivasan54a73572014-03-08 19:23:13 -0800421{
stephen hemminger80d887d2017-07-24 21:03:19 -0700422 int i, frags = skb_shinfo(skb)->nr_frags;
423 int pages = 0;
424
425 for (i = 0; i < frags; i++) {
426 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
427 unsigned long size = skb_frag_size(frag);
428 unsigned long offset = frag->page_offset;
429
430 /* Skip unused frames from start of page */
431 offset &= ~PAGE_MASK;
432 pages += PFN_UP(offset + size);
433 }
434 return pages;
435}
436
437static int netvsc_get_slots(struct sk_buff *skb)
438{
439 char *data = skb->data;
440 unsigned int offset = offset_in_page(data);
441 unsigned int len = skb_headlen(skb);
442 int slots;
443 int frag_slots;
444
445 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
446 frag_slots = count_skb_frag_slots(skb);
447 return slots + frag_slots;
KY Srinivasan54a73572014-03-08 19:23:13 -0800448}
449
stephen hemminger23312a32017-01-24 13:05:59 -0800450static u32 net_checksum_info(struct sk_buff *skb)
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800451{
stephen hemminger23312a32017-01-24 13:05:59 -0800452 if (skb->protocol == htons(ETH_P_IP)) {
453 struct iphdr *ip = ip_hdr(skb);
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800454
stephen hemminger23312a32017-01-24 13:05:59 -0800455 if (ip->protocol == IPPROTO_TCP)
456 return TRANSPORT_INFO_IPV4_TCP;
457 else if (ip->protocol == IPPROTO_UDP)
458 return TRANSPORT_INFO_IPV4_UDP;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800459 } else {
stephen hemminger23312a32017-01-24 13:05:59 -0800460 struct ipv6hdr *ip6 = ipv6_hdr(skb);
461
462 if (ip6->nexthdr == IPPROTO_TCP)
463 return TRANSPORT_INFO_IPV6_TCP;
Mohammed Gamal37b9dfa2017-07-24 10:57:26 -0700464 else if (ip6->nexthdr == IPPROTO_UDP)
stephen hemminger23312a32017-01-24 13:05:59 -0800465 return TRANSPORT_INFO_IPV6_UDP;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800466 }
467
stephen hemminger23312a32017-01-24 13:05:59 -0800468 return TRANSPORT_INFO_NOT_IP;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800469}
470
stephen hemminger0c195562017-08-01 19:58:53 -0700471/* Send skb on the slave VF device. */
472static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev,
473 struct sk_buff *skb)
474{
475 struct net_device_context *ndev_ctx = netdev_priv(net);
476 unsigned int len = skb->len;
477 int rc;
478
479 skb->dev = vf_netdev;
480 skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
481
482 rc = dev_queue_xmit(skb);
483 if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) {
484 struct netvsc_vf_pcpu_stats *pcpu_stats
485 = this_cpu_ptr(ndev_ctx->vf_stats);
486
487 u64_stats_update_begin(&pcpu_stats->syncp);
488 pcpu_stats->tx_packets++;
489 pcpu_stats->tx_bytes += len;
490 u64_stats_update_end(&pcpu_stats->syncp);
491 } else {
492 this_cpu_inc(ndev_ctx->vf_stats->tx_dropped);
493 }
494
495 return rc;
496}
497
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700498static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
Hank Janssenfceaf242009-07-13 15:34:54 -0700499{
Hank Janssenfceaf242009-07-13 15:34:54 -0700500 struct net_device_context *net_device_ctx = netdev_priv(net);
Vitaly Kuznetsov981a1bd2015-04-08 17:54:05 +0200501 struct hv_netvsc_packet *packet = NULL;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700502 int ret;
KY Srinivasan8a002512014-03-08 19:23:14 -0800503 unsigned int num_data_pgs;
504 struct rndis_message *rndis_msg;
stephen hemminger0c195562017-08-01 19:58:53 -0700505 struct net_device *vf_netdev;
KY Srinivasan8a002512014-03-08 19:23:14 -0800506 u32 rndis_msg_size;
Haiyang Zhang307f0992014-05-21 12:55:39 -0700507 u32 hash;
stephen hemminger02b6de02017-07-28 08:59:44 -0700508 struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
Hank Janssenfceaf242009-07-13 15:34:54 -0700509
stephen hemminger0c195562017-08-01 19:58:53 -0700510 /* if VF is present and up then redirect packets
511 * already called with rcu_read_lock_bh
512 */
513 vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev);
514 if (vf_netdev && netif_running(vf_netdev) &&
515 !netpoll_tx_running(net))
516 return netvsc_vf_xmit(net, vf_netdev, skb);
517
stephen hemminger80d887d2017-07-24 21:03:19 -0700518 /* We will atmost need two pages to describe the rndis
519 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200520 * of pages in a single packet. If skb is scattered around
521 * more pages we try linearizing it.
KY Srinivasan54a73572014-03-08 19:23:13 -0800522 */
stephen hemminger80d887d2017-07-24 21:03:19 -0700523
524 num_data_pgs = netvsc_get_slots(skb) + 2;
525
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700526 if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
Stephen Hemminger4323b472016-08-23 12:17:57 -0700527 ++net_device_ctx->eth_stats.tx_scattered;
528
529 if (skb_linearize(skb))
530 goto no_memory;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700531
stephen hemminger80d887d2017-07-24 21:03:19 -0700532 num_data_pgs = netvsc_get_slots(skb) + 2;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700533 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
Stephen Hemminger4323b472016-08-23 12:17:57 -0700534 ++net_device_ctx->eth_stats.tx_too_big;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700535 goto drop;
536 }
KY Srinivasan54a73572014-03-08 19:23:13 -0800537 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700538
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800539 /*
540 * Place the rndis header in the skb head room and
541 * the skb->cb will be used for hv_netvsc_packet
542 * structure.
543 */
544 ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
Stephen Hemminger4323b472016-08-23 12:17:57 -0700545 if (ret)
546 goto no_memory;
547
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800548 /* Use the skb control buffer for building up the packet */
549 BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
550 FIELD_SIZEOF(struct sk_buff, cb));
551 packet = (struct hv_netvsc_packet *)skb->cb;
Hank Janssenfceaf242009-07-13 15:34:54 -0700552
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700553 packet->q_idx = skb_get_queue_mapping(skb);
554
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800555 packet->total_data_buflen = skb->len;
stephen hemminger793e3952017-01-24 13:06:12 -0800556 packet->total_bytes = skb->len;
557 packet->total_packets = 1;
Hank Janssenfceaf242009-07-13 15:34:54 -0700558
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800559 rndis_msg = (struct rndis_message *)skb->head;
KY Srinivasanb08cc792015-03-29 21:08:42 -0700560
KY Srinivasan8a002512014-03-08 19:23:14 -0800561 /* Add the rndis header */
KY Srinivasan8a002512014-03-08 19:23:14 -0800562 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
563 rndis_msg->msg_len = packet->total_data_buflen;
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800564
565 rndis_msg->msg.pkt = (struct rndis_packet) {
566 .data_offset = sizeof(struct rndis_packet),
567 .data_len = packet->total_data_buflen,
568 .per_pkt_info_offset = sizeof(struct rndis_packet),
569 };
KY Srinivasan8a002512014-03-08 19:23:14 -0800570
571 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
572
Haiyang Zhang307f0992014-05-21 12:55:39 -0700573 hash = skb_get_hash_raw(skb);
574 if (hash != 0 && net->real_num_tx_queues > 1) {
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800575 u32 *hash_info;
576
Haiyang Zhang307f0992014-05-21 12:55:39 -0700577 rndis_msg_size += NDIS_HASH_PPI_SIZE;
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800578 hash_info = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
579 NBL_HASH_VALUE);
580 *hash_info = hash;
Haiyang Zhang307f0992014-05-21 12:55:39 -0700581 }
582
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700583 if (skb_vlan_tag_present(skb)) {
KY Srinivasan8a002512014-03-08 19:23:14 -0800584 struct ndis_pkt_8021q_info *vlan;
585
586 rndis_msg_size += NDIS_VLAN_PPI_SIZE;
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800587 vlan = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
588 IEEE_8021Q_INFO);
stephen hemminger00f50242017-08-09 17:46:09 -0700589
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800590 vlan->value = 0;
KY Srinivasan760d1e32015-12-01 16:43:19 -0800591 vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK;
592 vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >>
KY Srinivasan8a002512014-03-08 19:23:14 -0800593 VLAN_PRIO_SHIFT;
594 }
595
stephen hemminger23312a32017-01-24 13:05:59 -0800596 if (skb_is_gso(skb)) {
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700597 struct ndis_tcp_lso_info *lso_info;
598
599 rndis_msg_size += NDIS_LSO_PPI_SIZE;
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800600 lso_info = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
601 TCP_LARGESEND_PKTINFO);
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700602
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800603 lso_info->value = 0;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700604 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
stephen hemminger23312a32017-01-24 13:05:59 -0800605 if (skb->protocol == htons(ETH_P_IP)) {
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700606 lso_info->lso_v2_transmit.ip_version =
607 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
608 ip_hdr(skb)->tot_len = 0;
609 ip_hdr(skb)->check = 0;
610 tcp_hdr(skb)->check =
611 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
612 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
613 } else {
614 lso_info->lso_v2_transmit.ip_version =
615 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
616 ipv6_hdr(skb)->payload_len = 0;
617 tcp_hdr(skb)->check =
618 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
619 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
620 }
stephen hemminger23312a32017-01-24 13:05:59 -0800621 lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700622 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
stephen hemmingerad19bc82016-10-11 14:03:07 -0700623 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
stephen hemminger23312a32017-01-24 13:05:59 -0800624 if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) {
625 struct ndis_tcp_ip_checksum_info *csum_info;
626
stephen hemmingerad19bc82016-10-11 14:03:07 -0700627 rndis_msg_size += NDIS_CSUM_PPI_SIZE;
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800628 csum_info = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
629 TCPIP_CHKSUM_PKTINFO);
stephen hemmingerad19bc82016-10-11 14:03:07 -0700630
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800631 csum_info->value = 0;
stephen hemminger23312a32017-01-24 13:05:59 -0800632 csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
633
634 if (skb->protocol == htons(ETH_P_IP)) {
stephen hemmingerad19bc82016-10-11 14:03:07 -0700635 csum_info->transmit.is_ipv4 = 1;
stephen hemminger23312a32017-01-24 13:05:59 -0800636
637 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
638 csum_info->transmit.tcp_checksum = 1;
639 else
640 csum_info->transmit.udp_checksum = 1;
641 } else {
stephen hemmingerad19bc82016-10-11 14:03:07 -0700642 csum_info->transmit.is_ipv6 = 1;
643
stephen hemminger23312a32017-01-24 13:05:59 -0800644 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
645 csum_info->transmit.tcp_checksum = 1;
646 else
647 csum_info->transmit.udp_checksum = 1;
648 }
stephen hemmingerad19bc82016-10-11 14:03:07 -0700649 } else {
stephen hemminger23312a32017-01-24 13:05:59 -0800650 /* Can't do offload of this type of checksum */
stephen hemmingerad19bc82016-10-11 14:03:07 -0700651 if (skb_checksum_help(skb))
652 goto drop;
653 }
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700654 }
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800655
KY Srinivasan8a002512014-03-08 19:23:14 -0800656 /* Start filling in the page buffers with the rndis hdr */
657 rndis_msg->msg_len += rndis_msg_size;
Haiyang Zhang942396b2014-10-22 13:47:18 -0700658 packet->total_data_buflen = rndis_msg->msg_len;
KY Srinivasan8a002512014-03-08 19:23:14 -0800659 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
stephen hemminger02b6de02017-07-28 08:59:44 -0700660 skb, packet, pb);
KY Srinivasan8a002512014-03-08 19:23:14 -0800661
sixiao@microsoft.com76d13b52016-02-17 16:43:59 -0800662 /* timestamp packet in software */
663 skb_tx_timestamp(skb);
stephen hemminger2a926f72017-07-19 11:53:17 -0700664
Stephen Hemmingercfd8afd2017-12-12 16:48:40 -0800665 ret = netvsc_send(net, packet, rndis_msg, pb, skb);
stephen hemminger793e3952017-01-24 13:06:12 -0800666 if (likely(ret == 0))
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700667 return NETDEV_TX_OK;
Stephen Hemminger4323b472016-08-23 12:17:57 -0700668
669 if (ret == -EAGAIN) {
670 ++net_device_ctx->eth_stats.tx_busy;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700671 return NETDEV_TX_BUSY;
Stephen Hemminger4323b472016-08-23 12:17:57 -0700672 }
673
674 if (ret == -ENOSPC)
675 ++net_device_ctx->eth_stats.tx_no_space;
Hank Janssenfceaf242009-07-13 15:34:54 -0700676
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700677drop:
678 dev_kfree_skb_any(skb);
679 net->stats.tx_dropped++;
680
681 return NETDEV_TX_OK;
Stephen Hemminger4323b472016-08-23 12:17:57 -0700682
683no_memory:
684 ++net_device_ctx->eth_stats.tx_no_memory;
685 goto drop;
Hank Janssenfceaf242009-07-13 15:34:54 -0700686}
stephen hemminger89bb42b2017-08-09 17:46:08 -0700687
Hank Janssen3e189512010-03-04 22:11:00 +0000688/*
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700689 * netvsc_linkstatus_callback - Link up/down notification
690 */
Stephen Hemminger79cf1ba2017-12-12 16:48:37 -0800691void netvsc_linkstatus_callback(struct net_device *net,
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700692 struct rndis_message *resp)
Hank Janssenfceaf242009-07-13 15:34:54 -0700693{
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700694 struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
Stephen Hemminger79cf1ba2017-12-12 16:48:37 -0800695 struct net_device_context *ndev_ctx = netdev_priv(net);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100696 struct netvsc_reconfig *event;
697 unsigned long flags;
698
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700699 /* Update the physical link speed when changing to another vSwitch */
700 if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
701 u32 speed;
702
stephen hemminger89bb42b2017-08-09 17:46:08 -0700703 speed = *(u32 *)((void *)indicate
704 + indicate->status_buf_offset) / 10000;
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700705 ndev_ctx->speed = speed;
706 return;
707 }
708
709 /* Handle these link change statuses below */
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100710 if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE &&
711 indicate->status != RNDIS_STATUS_MEDIA_CONNECT &&
712 indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
713 return;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700714
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700715 if (net->reg_state != NETREG_REGISTERED)
Hank Janssenfceaf242009-07-13 15:34:54 -0700716 return;
Hank Janssenfceaf242009-07-13 15:34:54 -0700717
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100718 event = kzalloc(sizeof(*event), GFP_ATOMIC);
719 if (!event)
720 return;
721 event->event = indicate->status;
722
723 spin_lock_irqsave(&ndev_ctx->lock, flags);
724 list_add_tail(&event->list, &ndev_ctx->reconfig_events);
725 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
726
727 schedule_delayed_work(&ndev_ctx->dwork, 0);
Hank Janssenfceaf242009-07-13 15:34:54 -0700728}
729
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700730static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
stephen hemmingere91e7dd2017-02-27 10:26:51 -0800731 struct napi_struct *napi,
stephen hemmingerdc54a082017-01-24 13:06:08 -0800732 const struct ndis_tcp_ip_checksum_info *csum_info,
733 const struct ndis_pkt_8021q_info *vlan,
734 void *data, u32 buflen)
Hank Janssenfceaf242009-07-13 15:34:54 -0700735{
Hank Janssenfceaf242009-07-13 15:34:54 -0700736 struct sk_buff *skb;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700737
stephen hemmingere91e7dd2017-02-27 10:26:51 -0800738 skb = napi_alloc_skb(napi, buflen);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700739 if (!skb)
740 return skb;
Hank Janssenfceaf242009-07-13 15:34:54 -0700741
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700742 /*
743 * Copy to skb. This copy is needed here since the memory pointed by
744 * hv_netvsc_packet cannot be deallocated
745 */
Johannes Berg59ae1d12017-06-16 14:29:20 +0200746 skb_put_data(skb, data, buflen);
Hank Janssenfceaf242009-07-13 15:34:54 -0700747
748 skb->protocol = eth_type_trans(skb, net);
Stephen Hemmingere52fed72016-10-23 21:32:47 -0700749
750 /* skb is already created with CHECKSUM_NONE */
751 skb_checksum_none_assert(skb);
752
753 /*
754 * In Linux, the IP checksum is always checked.
755 * Do L4 checksum offload if enabled and present.
756 */
757 if (csum_info && (net->features & NETIF_F_RXCSUM)) {
758 if (csum_info->receive.tcp_checksum_succeeded ||
759 csum_info->receive.udp_checksum_succeeded)
KY Srinivasane3d605e2014-03-08 19:23:16 -0800760 skb->ip_summed = CHECKSUM_UNNECESSARY;
KY Srinivasane3d605e2014-03-08 19:23:16 -0800761 }
762
stephen hemmingerdc54a082017-01-24 13:06:08 -0800763 if (vlan) {
764 u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT);
765
Haiyang Zhang93725cb2013-06-17 15:36:49 -0700766 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
KY Srinivasan760d1e32015-12-01 16:43:19 -0800767 vlan_tci);
stephen hemmingerdc54a082017-01-24 13:06:08 -0800768 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700769
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700770 return skb;
771}
772
773/*
774 * netvsc_recv_callback - Callback when we receive a packet from the
775 * "wire" on the specified device.
776 */
stephen hemmingerdc54a082017-01-24 13:06:08 -0800777int netvsc_recv_callback(struct net_device *net,
Stephen Hemminger345ac082017-12-12 16:48:38 -0800778 struct netvsc_device *net_device,
stephen hemmingerdc54a082017-01-24 13:06:08 -0800779 struct vmbus_channel *channel,
780 void *data, u32 len,
781 const struct ndis_tcp_ip_checksum_info *csum_info,
782 const struct ndis_pkt_8021q_info *vlan)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700783{
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200784 struct net_device_context *net_device_ctx = netdev_priv(net);
stephen hemminger742fe542017-02-27 10:26:50 -0800785 u16 q_idx = channel->offermsg.offer.sub_channel_index;
Stephen Hemminger345ac082017-12-12 16:48:38 -0800786 struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700787 struct sk_buff *skb;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700788 struct netvsc_stats *rx_stats;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700789
Stephen Hemminger9cbcc422016-09-22 16:56:34 -0700790 if (net->reg_state != NETREG_REGISTERED)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700791 return NVSP_STAT_FAIL;
792
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700793 /* Allocate a skb - TODO direct I/O to pages? */
stephen hemmingere91e7dd2017-02-27 10:26:51 -0800794 skb = netvsc_alloc_recv_skb(net, &nvchan->napi,
795 csum_info, vlan, data, len);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700796 if (unlikely(!skb)) {
Stephen Hemmingerf61a9d62017-12-12 16:48:36 -0800797 ++net_device_ctx->eth_stats.rx_no_memory;
stephen hemminger0719e722017-01-11 09:16:32 -0800798 rcu_read_unlock();
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700799 return NVSP_STAT_FAIL;
800 }
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700801
stephen hemminger0c195562017-08-01 19:58:53 -0700802 skb_record_rx_queue(skb, q_idx);
Stephen Hemminger9cbcc422016-09-22 16:56:34 -0700803
804 /*
805 * Even if injecting the packet, record the statistics
806 * on the synthetic device because modifying the VF device
807 * statistics will not work correctly.
808 */
stephen hemminger742fe542017-02-27 10:26:50 -0800809 rx_stats = &nvchan->rx_stats;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700810 u64_stats_update_begin(&rx_stats->syncp);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700811 rx_stats->packets++;
stephen hemmingerdc54a082017-01-24 13:06:08 -0800812 rx_stats->bytes += len;
Stephen Hemmingerf7ad75b2016-09-22 16:56:35 -0700813
814 if (skb->pkt_type == PACKET_BROADCAST)
815 ++rx_stats->broadcast;
816 else if (skb->pkt_type == PACKET_MULTICAST)
817 ++rx_stats->multicast;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700818 u64_stats_update_end(&rx_stats->syncp);
Stephen Hemminger9495c282010-03-09 17:42:17 -0800819
stephen hemminger742fe542017-02-27 10:26:50 -0800820 napi_gro_receive(&nvchan->napi, skb);
Hank Janssenfceaf242009-07-13 15:34:54 -0700821 return 0;
822}
823
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700824static void netvsc_get_drvinfo(struct net_device *net,
825 struct ethtool_drvinfo *info)
826{
Jiri Pirko7826d432013-01-06 00:44:26 +0000827 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
Jiri Pirko7826d432013-01-06 00:44:26 +0000828 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700829}
830
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -0800831static void netvsc_get_channels(struct net_device *net,
832 struct ethtool_channels *channel)
833{
834 struct net_device_context *net_device_ctx = netdev_priv(net);
stephen hemminger545a8e72017-03-22 14:51:00 -0700835 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -0800836
837 if (nvdev) {
838 channel->max_combined = nvdev->max_chn;
839 channel->combined_count = nvdev->num_chn;
840 }
841}
842
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700843static int netvsc_set_channels(struct net_device *net,
844 struct ethtool_channels *channels)
845{
846 struct net_device_context *net_device_ctx = netdev_priv(net);
847 struct hv_device *dev = net_device_ctx->device_ctx;
stephen hemminger545a8e72017-03-22 14:51:00 -0700848 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
stephen hemminger7ca45932017-07-24 10:57:28 -0700849 unsigned int orig, count = channels->combined_count;
850 struct netvsc_device_info device_info;
stephen hemmingerea383bf2017-07-19 11:53:15 -0700851 bool was_opened;
stephen hemminger7ca45932017-07-24 10:57:28 -0700852 int ret = 0;
stephen hemminger2b018882017-01-24 13:06:03 -0800853
854 /* We do not support separate count for rx, tx, or other */
855 if (count == 0 ||
856 channels->rx_count || channels->tx_count || channels->other_count)
857 return -EINVAL;
858
stephen hemmingera0be4502017-03-22 14:51:01 -0700859 if (!nvdev || nvdev->destroy)
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700860 return -ENODEV;
861
stephen hemminger2b018882017-01-24 13:06:03 -0800862 if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5)
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700863 return -EINVAL;
864
stephen hemminger2b018882017-01-24 13:06:03 -0800865 if (count > nvdev->max_chn)
866 return -EINVAL;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700867
stephen hemminger7ca45932017-07-24 10:57:28 -0700868 orig = nvdev->num_chn;
stephen hemmingerea383bf2017-07-19 11:53:15 -0700869 was_opened = rndis_filter_opened(nvdev);
870 if (was_opened)
871 rndis_filter_close(nvdev);
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700872
stephen hemminger7ca45932017-07-24 10:57:28 -0700873 memset(&device_info, 0, sizeof(device_info));
874 device_info.num_chn = count;
stephen hemminger8b532792017-08-09 17:46:11 -0700875 device_info.send_sections = nvdev->send_section_cnt;
Alex Ng0ab09be2017-09-20 11:17:35 -0700876 device_info.send_section_size = nvdev->send_section_size;
stephen hemminger8b532792017-08-09 17:46:11 -0700877 device_info.recv_sections = nvdev->recv_section_cnt;
Alex Ng0ab09be2017-09-20 11:17:35 -0700878 device_info.recv_section_size = nvdev->recv_section_size;
stephen hemminger8b532792017-08-09 17:46:11 -0700879
880 rndis_filter_device_remove(dev, nvdev);
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700881
stephen hemminger7ca45932017-07-24 10:57:28 -0700882 nvdev = rndis_filter_device_add(dev, &device_info);
Stephen Hemminger8195b132017-09-06 13:53:05 -0700883 if (IS_ERR(nvdev)) {
stephen hemmingerd6aac1f2017-07-28 08:59:41 -0700884 ret = PTR_ERR(nvdev);
stephen hemminger7ca45932017-07-24 10:57:28 -0700885 device_info.num_chn = orig;
stephen hemminger68d715f2017-08-09 17:46:06 -0700886 nvdev = rndis_filter_device_add(dev, &device_info);
887
888 if (IS_ERR(nvdev)) {
889 netdev_err(net, "restoring channel setting failed: %ld\n",
890 PTR_ERR(nvdev));
891 return ret;
892 }
stephen hemminger7ca45932017-07-24 10:57:28 -0700893 }
894
stephen hemmingerea383bf2017-07-19 11:53:15 -0700895 if (was_opened)
896 rndis_filter_open(nvdev);
stephen hemminger163891d2017-03-22 14:50:58 -0700897
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +0200898 /* We may have missed link change notifications */
stephen hemminger1b019942017-07-19 11:53:12 -0700899 net_device_ctx->last_reconfig = 0;
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +0200900 schedule_delayed_work(&net_device_ctx->dwork, 0);
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700901
902 return ret;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700903}
904
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100905static bool
906netvsc_validate_ethtool_ss_cmd(const struct ethtool_link_ksettings *cmd)
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800907{
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100908 struct ethtool_link_ksettings diff1 = *cmd;
909 struct ethtool_link_ksettings diff2 = {};
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800910
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100911 diff1.base.speed = 0;
912 diff1.base.duplex = 0;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800913 /* advertising and cmd are usually set */
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100914 ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
915 diff1.base.cmd = 0;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800916 /* We set port to PORT_OTHER */
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100917 diff2.base.port = PORT_OTHER;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800918
919 return !memcmp(&diff1, &diff2, sizeof(diff1));
920}
921
922static void netvsc_init_settings(struct net_device *dev)
923{
924 struct net_device_context *ndc = netdev_priv(dev);
925
Haiyang Zhang486e3982017-10-06 08:33:57 -0700926 ndc->l4_hash = HV_DEFAULT_L4HASH;
Haiyang Zhang4823eb22017-08-21 19:22:39 -0700927
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800928 ndc->speed = SPEED_UNKNOWN;
Simon Xiaof3c9d40e2017-04-14 14:42:58 -0700929 ndc->duplex = DUPLEX_FULL;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800930}
931
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100932static int netvsc_get_link_ksettings(struct net_device *dev,
933 struct ethtool_link_ksettings *cmd)
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800934{
935 struct net_device_context *ndc = netdev_priv(dev);
936
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100937 cmd->base.speed = ndc->speed;
938 cmd->base.duplex = ndc->duplex;
939 cmd->base.port = PORT_OTHER;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800940
941 return 0;
942}
943
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100944static int netvsc_set_link_ksettings(struct net_device *dev,
945 const struct ethtool_link_ksettings *cmd)
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800946{
947 struct net_device_context *ndc = netdev_priv(dev);
948 u32 speed;
949
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100950 speed = cmd->base.speed;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800951 if (!ethtool_validate_speed(speed) ||
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100952 !ethtool_validate_duplex(cmd->base.duplex) ||
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800953 !netvsc_validate_ethtool_ss_cmd(cmd))
954 return -EINVAL;
955
956 ndc->speed = speed;
Philippe Reynes5e8456f2017-03-08 23:41:04 +0100957 ndc->duplex = cmd->base.duplex;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800958
959 return 0;
960}
961
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800962static int netvsc_change_mtu(struct net_device *ndev, int mtu)
963{
964 struct net_device_context *ndevctx = netdev_priv(ndev);
stephen hemminger0c195562017-08-01 19:58:53 -0700965 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
stephen hemminger545a8e72017-03-22 14:51:00 -0700966 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200967 struct hv_device *hdev = ndevctx->device_ctx;
stephen hemminger9749fed2017-07-19 11:53:16 -0700968 int orig_mtu = ndev->mtu;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800969 struct netvsc_device_info device_info;
stephen hemmingerea383bf2017-07-19 11:53:15 -0700970 bool was_opened;
stephen hemminger9749fed2017-07-19 11:53:16 -0700971 int ret = 0;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800972
stephen hemmingera0be4502017-03-22 14:51:01 -0700973 if (!nvdev || nvdev->destroy)
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800974 return -ENODEV;
975
stephen hemminger0c195562017-08-01 19:58:53 -0700976 /* Change MTU of underlying VF netdev first. */
977 if (vf_netdev) {
978 ret = dev_set_mtu(vf_netdev, mtu);
979 if (ret)
980 return ret;
981 }
982
stephen hemmingerea383bf2017-07-19 11:53:15 -0700983 netif_device_detach(ndev);
984 was_opened = rndis_filter_opened(nvdev);
985 if (was_opened)
986 rndis_filter_close(nvdev);
Haiyang Zhang2de85302015-07-13 13:09:16 -0700987
Andrew Schwartzmeyer8ebdcc52015-08-11 17:14:31 -0700988 memset(&device_info, 0, sizeof(device_info));
stephen hemminger2b018882017-01-24 13:06:03 -0800989 device_info.num_chn = nvdev->num_chn;
stephen hemminger8b532792017-08-09 17:46:11 -0700990 device_info.send_sections = nvdev->send_section_cnt;
Alex Ng0ab09be2017-09-20 11:17:35 -0700991 device_info.send_section_size = nvdev->send_section_size;
stephen hemminger8b532792017-08-09 17:46:11 -0700992 device_info.recv_sections = nvdev->recv_section_cnt;
Alex Ng0ab09be2017-09-20 11:17:35 -0700993 device_info.recv_section_size = nvdev->recv_section_size;
Dexuan Cui152669b2017-03-02 13:00:53 +0000994
Dexuan Cui152669b2017-03-02 13:00:53 +0000995 rndis_filter_device_remove(hdev, nvdev);
996
Dexuan Cui152669b2017-03-02 13:00:53 +0000997 ndev->mtu = mtu;
998
stephen hemminger9749fed2017-07-19 11:53:16 -0700999 nvdev = rndis_filter_device_add(hdev, &device_info);
1000 if (IS_ERR(nvdev)) {
1001 ret = PTR_ERR(nvdev);
1002
1003 /* Attempt rollback to original MTU */
1004 ndev->mtu = orig_mtu;
stephen hemminger68d715f2017-08-09 17:46:06 -07001005 nvdev = rndis_filter_device_add(hdev, &device_info);
stephen hemminger0c195562017-08-01 19:58:53 -07001006
1007 if (vf_netdev)
1008 dev_set_mtu(vf_netdev, orig_mtu);
stephen hemminger68d715f2017-08-09 17:46:06 -07001009
1010 if (IS_ERR(nvdev)) {
1011 netdev_err(ndev, "restoring mtu failed: %ld\n",
1012 PTR_ERR(nvdev));
1013 return ret;
1014 }
stephen hemminger9749fed2017-07-19 11:53:16 -07001015 }
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001016
stephen hemmingerea383bf2017-07-19 11:53:15 -07001017 if (was_opened)
1018 rndis_filter_open(nvdev);
1019
1020 netif_device_attach(ndev);
stephen hemminger163891d2017-03-22 14:50:58 -07001021
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001022 /* We may have missed link change notifications */
1023 schedule_delayed_work(&ndevctx->dwork, 0);
1024
stephen hemminger9749fed2017-07-19 11:53:16 -07001025 return ret;
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001026}
1027
stephen hemminger0c195562017-08-01 19:58:53 -07001028static void netvsc_get_vf_stats(struct net_device *net,
1029 struct netvsc_vf_pcpu_stats *tot)
1030{
1031 struct net_device_context *ndev_ctx = netdev_priv(net);
1032 int i;
1033
1034 memset(tot, 0, sizeof(*tot));
1035
1036 for_each_possible_cpu(i) {
1037 const struct netvsc_vf_pcpu_stats *stats
1038 = per_cpu_ptr(ndev_ctx->vf_stats, i);
1039 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1040 unsigned int start;
1041
1042 do {
1043 start = u64_stats_fetch_begin_irq(&stats->syncp);
1044 rx_packets = stats->rx_packets;
1045 tx_packets = stats->tx_packets;
1046 rx_bytes = stats->rx_bytes;
1047 tx_bytes = stats->tx_bytes;
1048 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1049
1050 tot->rx_packets += rx_packets;
1051 tot->tx_packets += tx_packets;
1052 tot->rx_bytes += rx_bytes;
1053 tot->tx_bytes += tx_bytes;
1054 tot->tx_dropped += stats->tx_dropped;
1055 }
1056}
1057
stephen hemmingerbc1f4472017-01-06 19:12:52 -08001058static void netvsc_get_stats64(struct net_device *net,
1059 struct rtnl_link_stats64 *t)
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001060{
1061 struct net_device_context *ndev_ctx = netdev_priv(net);
stephen hemminger776e7262017-04-14 14:42:57 -07001062 struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
stephen hemminger0c195562017-08-01 19:58:53 -07001063 struct netvsc_vf_pcpu_stats vf_tot;
stephen hemminger89bb42b2017-08-09 17:46:08 -07001064 int i;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001065
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001066 if (!nvdev)
1067 return;
1068
stephen hemminger0c195562017-08-01 19:58:53 -07001069 netdev_stats_to_stats64(t, &net->stats);
1070
1071 netvsc_get_vf_stats(net, &vf_tot);
1072 t->rx_packets += vf_tot.rx_packets;
1073 t->tx_packets += vf_tot.tx_packets;
1074 t->rx_bytes += vf_tot.rx_bytes;
1075 t->tx_bytes += vf_tot.tx_bytes;
1076 t->tx_dropped += vf_tot.tx_dropped;
1077
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001078 for (i = 0; i < nvdev->num_chn; i++) {
1079 const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1080 const struct netvsc_stats *stats;
1081 u64 packets, bytes, multicast;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001082 unsigned int start;
1083
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001084 stats = &nvchan->tx_stats;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001085 do {
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001086 start = u64_stats_fetch_begin_irq(&stats->syncp);
1087 packets = stats->packets;
1088 bytes = stats->bytes;
1089 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001090
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001091 t->tx_bytes += bytes;
1092 t->tx_packets += packets;
1093
1094 stats = &nvchan->rx_stats;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001095 do {
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001096 start = u64_stats_fetch_begin_irq(&stats->syncp);
1097 packets = stats->packets;
1098 bytes = stats->bytes;
1099 multicast = stats->multicast + stats->broadcast;
1100 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001101
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001102 t->rx_bytes += bytes;
1103 t->rx_packets += packets;
1104 t->multicast += multicast;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001105 }
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001106}
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001107
1108static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
1109{
stephen hemminger867047c2017-07-28 08:59:42 -07001110 struct net_device_context *ndc = netdev_priv(ndev);
stephen hemminger16ba3262017-08-09 17:46:05 -07001111 struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
stephen hemminger867047c2017-07-28 08:59:42 -07001112 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001113 struct sockaddr *addr = p;
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001114 int err;
1115
stephen hemminger16ba3262017-08-09 17:46:05 -07001116 err = eth_prepare_mac_addr_change(ndev, p);
1117 if (err)
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001118 return err;
1119
stephen hemminger867047c2017-07-28 08:59:42 -07001120 if (!nvdev)
1121 return -ENODEV;
1122
stephen hemminger16ba3262017-08-09 17:46:05 -07001123 if (vf_netdev) {
1124 err = dev_set_mac_address(vf_netdev, addr);
1125 if (err)
1126 return err;
1127 }
1128
stephen hemminger867047c2017-07-28 08:59:42 -07001129 err = rndis_filter_set_device_mac(nvdev, addr->sa_data);
stephen hemminger16ba3262017-08-09 17:46:05 -07001130 if (!err) {
1131 eth_commit_mac_addr_change(ndev, p);
1132 } else if (vf_netdev) {
1133 /* rollback change on VF */
1134 memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN);
1135 dev_set_mac_address(vf_netdev, addr);
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001136 }
1137
1138 return err;
1139}
1140
Stephen Hemminger4323b472016-08-23 12:17:57 -07001141static const struct {
1142 char name[ETH_GSTRING_LEN];
1143 u16 offset;
1144} netvsc_stats[] = {
1145 { "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
Stephen Hemmingerf61a9d62017-12-12 16:48:36 -08001146 { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
Stephen Hemminger4323b472016-08-23 12:17:57 -07001147 { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) },
1148 { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) },
1149 { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) },
stephen hemmingercad5c192017-08-09 17:46:12 -07001150 { "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) },
1151 { "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) },
Stephen Hemmingerf61a9d62017-12-12 16:48:36 -08001152 { "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) },
Simon Xiao09af87d2017-09-29 11:39:46 -07001153 { "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) },
1154 { "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) },
stephen hemminger0c195562017-08-01 19:58:53 -07001155}, vf_stats[] = {
1156 { "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) },
1157 { "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) },
1158 { "vf_tx_packets", offsetof(struct netvsc_vf_pcpu_stats, tx_packets) },
1159 { "vf_tx_bytes", offsetof(struct netvsc_vf_pcpu_stats, tx_bytes) },
1160 { "vf_tx_dropped", offsetof(struct netvsc_vf_pcpu_stats, tx_dropped) },
Stephen Hemminger4323b472016-08-23 12:17:57 -07001161};
1162
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001163#define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats)
stephen hemminger0c195562017-08-01 19:58:53 -07001164#define NETVSC_VF_STATS_LEN ARRAY_SIZE(vf_stats)
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001165
1166/* 4 statistics per queue (rx/tx packets/bytes) */
1167#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4)
1168
Stephen Hemminger4323b472016-08-23 12:17:57 -07001169static int netvsc_get_sset_count(struct net_device *dev, int string_set)
1170{
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001171 struct net_device_context *ndc = netdev_priv(dev);
stephen hemmingerfbd4c7e2017-06-07 15:53:47 -07001172 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001173
1174 if (!nvdev)
1175 return -ENODEV;
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001176
Stephen Hemminger4323b472016-08-23 12:17:57 -07001177 switch (string_set) {
1178 case ETH_SS_STATS:
stephen hemminger0c195562017-08-01 19:58:53 -07001179 return NETVSC_GLOBAL_STATS_LEN
1180 + NETVSC_VF_STATS_LEN
1181 + NETVSC_QUEUE_STATS_LEN(nvdev);
Stephen Hemminger4323b472016-08-23 12:17:57 -07001182 default:
1183 return -EINVAL;
1184 }
1185}
1186
1187static void netvsc_get_ethtool_stats(struct net_device *dev,
1188 struct ethtool_stats *stats, u64 *data)
1189{
1190 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger867047c2017-07-28 08:59:42 -07001191 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
Stephen Hemminger4323b472016-08-23 12:17:57 -07001192 const void *nds = &ndc->eth_stats;
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001193 const struct netvsc_stats *qstats;
stephen hemminger0c195562017-08-01 19:58:53 -07001194 struct netvsc_vf_pcpu_stats sum;
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001195 unsigned int start;
1196 u64 packets, bytes;
1197 int i, j;
Stephen Hemminger4323b472016-08-23 12:17:57 -07001198
stephen hemminger545a8e72017-03-22 14:51:00 -07001199 if (!nvdev)
1200 return;
1201
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001202 for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++)
Stephen Hemminger4323b472016-08-23 12:17:57 -07001203 data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001204
stephen hemminger0c195562017-08-01 19:58:53 -07001205 netvsc_get_vf_stats(dev, &sum);
1206 for (j = 0; j < NETVSC_VF_STATS_LEN; j++)
1207 data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset);
1208
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001209 for (j = 0; j < nvdev->num_chn; j++) {
1210 qstats = &nvdev->chan_table[j].tx_stats;
1211
1212 do {
1213 start = u64_stats_fetch_begin_irq(&qstats->syncp);
1214 packets = qstats->packets;
1215 bytes = qstats->bytes;
1216 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1217 data[i++] = packets;
1218 data[i++] = bytes;
1219
1220 qstats = &nvdev->chan_table[j].rx_stats;
1221 do {
1222 start = u64_stats_fetch_begin_irq(&qstats->syncp);
1223 packets = qstats->packets;
1224 bytes = qstats->bytes;
1225 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1226 data[i++] = packets;
1227 data[i++] = bytes;
1228 }
Stephen Hemminger4323b472016-08-23 12:17:57 -07001229}
1230
1231static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1232{
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001233 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger867047c2017-07-28 08:59:42 -07001234 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001235 u8 *p = data;
Stephen Hemminger4323b472016-08-23 12:17:57 -07001236 int i;
1237
stephen hemminger545a8e72017-03-22 14:51:00 -07001238 if (!nvdev)
1239 return;
1240
Stephen Hemminger4323b472016-08-23 12:17:57 -07001241 switch (stringset) {
1242 case ETH_SS_STATS:
stephen hemminger0c195562017-08-01 19:58:53 -07001243 for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) {
1244 memcpy(p, netvsc_stats[i].name, ETH_GSTRING_LEN);
1245 p += ETH_GSTRING_LEN;
1246 }
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001247
stephen hemminger0c195562017-08-01 19:58:53 -07001248 for (i = 0; i < ARRAY_SIZE(vf_stats); i++) {
1249 memcpy(p, vf_stats[i].name, ETH_GSTRING_LEN);
1250 p += ETH_GSTRING_LEN;
1251 }
1252
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001253 for (i = 0; i < nvdev->num_chn; i++) {
1254 sprintf(p, "tx_queue_%u_packets", i);
1255 p += ETH_GSTRING_LEN;
1256 sprintf(p, "tx_queue_%u_bytes", i);
1257 p += ETH_GSTRING_LEN;
1258 sprintf(p, "rx_queue_%u_packets", i);
1259 p += ETH_GSTRING_LEN;
1260 sprintf(p, "rx_queue_%u_bytes", i);
1261 p += ETH_GSTRING_LEN;
1262 }
1263
Stephen Hemminger4323b472016-08-23 12:17:57 -07001264 break;
1265 }
1266}
1267
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001268static int
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001269netvsc_get_rss_hash_opts(struct net_device_context *ndc,
1270 struct ethtool_rxnfc *info)
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001271{
Haiyang Zhang486e3982017-10-06 08:33:57 -07001272 const u32 l4_flag = RXH_L4_B_0_1 | RXH_L4_B_2_3;
1273
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001274 info->data = RXH_IP_SRC | RXH_IP_DST;
1275
1276 switch (info->flow_type) {
1277 case TCP_V4_FLOW:
Haiyang Zhang0518ec4f2017-10-06 08:33:58 -07001278 if (ndc->l4_hash & HV_TCP4_L4HASH)
1279 info->data |= l4_flag;
1280
1281 break;
1282
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001283 case TCP_V6_FLOW:
Haiyang Zhang0518ec4f2017-10-06 08:33:58 -07001284 if (ndc->l4_hash & HV_TCP6_L4HASH)
1285 info->data |= l4_flag;
1286
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001287 break;
1288
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001289 case UDP_V4_FLOW:
Haiyang Zhang486e3982017-10-06 08:33:57 -07001290 if (ndc->l4_hash & HV_UDP4_L4HASH)
1291 info->data |= l4_flag;
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001292
1293 break;
1294
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001295 case UDP_V6_FLOW:
Haiyang Zhang486e3982017-10-06 08:33:57 -07001296 if (ndc->l4_hash & HV_UDP6_L4HASH)
1297 info->data |= l4_flag;
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001298
1299 break;
1300
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001301 case IPV4_FLOW:
1302 case IPV6_FLOW:
1303 break;
1304 default:
1305 info->data = 0;
1306 break;
1307 }
1308
1309 return 0;
1310}
1311
1312static int
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001313netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1314 u32 *rules)
1315{
1316 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger867047c2017-07-28 08:59:42 -07001317 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001318
1319 if (!nvdev)
1320 return -ENODEV;
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001321
1322 switch (info->cmd) {
1323 case ETHTOOL_GRXRINGS:
1324 info->data = nvdev->num_chn;
1325 return 0;
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001326
1327 case ETHTOOL_GRXFH:
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001328 return netvsc_get_rss_hash_opts(ndc, info);
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001329 }
1330 return -EOPNOTSUPP;
1331}
1332
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001333static int netvsc_set_rss_hash_opts(struct net_device_context *ndc,
1334 struct ethtool_rxnfc *info)
1335{
1336 if (info->data == (RXH_IP_SRC | RXH_IP_DST |
1337 RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
Haiyang Zhang486e3982017-10-06 08:33:57 -07001338 switch (info->flow_type) {
Haiyang Zhang0518ec4f2017-10-06 08:33:58 -07001339 case TCP_V4_FLOW:
1340 ndc->l4_hash |= HV_TCP4_L4HASH;
1341 break;
1342
1343 case TCP_V6_FLOW:
1344 ndc->l4_hash |= HV_TCP6_L4HASH;
1345 break;
1346
Haiyang Zhang486e3982017-10-06 08:33:57 -07001347 case UDP_V4_FLOW:
1348 ndc->l4_hash |= HV_UDP4_L4HASH;
1349 break;
1350
1351 case UDP_V6_FLOW:
1352 ndc->l4_hash |= HV_UDP6_L4HASH;
1353 break;
1354
1355 default:
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001356 return -EOPNOTSUPP;
Haiyang Zhang486e3982017-10-06 08:33:57 -07001357 }
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001358
1359 return 0;
1360 }
1361
1362 if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
Haiyang Zhang486e3982017-10-06 08:33:57 -07001363 switch (info->flow_type) {
Haiyang Zhang0518ec4f2017-10-06 08:33:58 -07001364 case TCP_V4_FLOW:
1365 ndc->l4_hash &= ~HV_TCP4_L4HASH;
1366 break;
1367
1368 case TCP_V6_FLOW:
1369 ndc->l4_hash &= ~HV_TCP6_L4HASH;
1370 break;
1371
Haiyang Zhang486e3982017-10-06 08:33:57 -07001372 case UDP_V4_FLOW:
1373 ndc->l4_hash &= ~HV_UDP4_L4HASH;
1374 break;
1375
1376 case UDP_V6_FLOW:
1377 ndc->l4_hash &= ~HV_UDP6_L4HASH;
1378 break;
1379
1380 default:
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001381 return -EOPNOTSUPP;
Haiyang Zhang486e3982017-10-06 08:33:57 -07001382 }
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001383
1384 return 0;
1385 }
1386
1387 return -EOPNOTSUPP;
1388}
1389
1390static int
1391netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info)
1392{
1393 struct net_device_context *ndc = netdev_priv(ndev);
1394
1395 if (info->cmd == ETHTOOL_SRXFH)
1396 return netvsc_set_rss_hash_opts(ndc, info);
1397
1398 return -EOPNOTSUPP;
1399}
1400
Richard Weinberger316158f2014-07-09 16:23:59 +02001401#ifdef CONFIG_NET_POLL_CONTROLLER
stephen hemmingera5ecd432017-06-07 15:53:48 -07001402static void netvsc_poll_controller(struct net_device *dev)
Richard Weinberger316158f2014-07-09 16:23:59 +02001403{
stephen hemmingera5ecd432017-06-07 15:53:48 -07001404 struct net_device_context *ndc = netdev_priv(dev);
1405 struct netvsc_device *ndev;
1406 int i;
1407
1408 rcu_read_lock();
1409 ndev = rcu_dereference(ndc->nvdev);
1410 if (ndev) {
1411 for (i = 0; i < ndev->num_chn; i++) {
1412 struct netvsc_channel *nvchan = &ndev->chan_table[i];
1413
1414 napi_schedule(&nvchan->napi);
1415 }
1416 }
1417 rcu_read_unlock();
Richard Weinberger316158f2014-07-09 16:23:59 +02001418}
1419#endif
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001420
stephen hemminger962f3fe2017-01-24 13:06:02 -08001421static u32 netvsc_get_rxfh_key_size(struct net_device *dev)
1422{
1423 return NETVSC_HASH_KEYLEN;
1424}
1425
1426static u32 netvsc_rss_indir_size(struct net_device *dev)
1427{
stephen hemmingerff4a4412017-01-24 13:06:04 -08001428 return ITAB_NUM;
stephen hemminger962f3fe2017-01-24 13:06:02 -08001429}
1430
1431static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
1432 u8 *hfunc)
1433{
1434 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger867047c2017-07-28 08:59:42 -07001435 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
Colin Ian Kingeb996ed2017-03-25 14:26:39 +00001436 struct rndis_device *rndis_dev;
stephen hemmingerff4a4412017-01-24 13:06:04 -08001437 int i;
stephen hemminger962f3fe2017-01-24 13:06:02 -08001438
stephen hemminger545a8e72017-03-22 14:51:00 -07001439 if (!ndev)
1440 return -ENODEV;
1441
stephen hemminger962f3fe2017-01-24 13:06:02 -08001442 if (hfunc)
1443 *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
1444
Colin Ian Kingeb996ed2017-03-25 14:26:39 +00001445 rndis_dev = ndev->extension;
stephen hemmingerff4a4412017-01-24 13:06:04 -08001446 if (indir) {
1447 for (i = 0; i < ITAB_NUM; i++)
Haiyang Zhang473713002017-10-13 12:28:03 -07001448 indir[i] = rndis_dev->rx_table[i];
stephen hemmingerff4a4412017-01-24 13:06:04 -08001449 }
1450
stephen hemminger962f3fe2017-01-24 13:06:02 -08001451 if (key)
1452 memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN);
1453
1454 return 0;
1455}
1456
1457static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
1458 const u8 *key, const u8 hfunc)
1459{
1460 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001461 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
Colin Ian Kingeb996ed2017-03-25 14:26:39 +00001462 struct rndis_device *rndis_dev;
stephen hemmingerff4a4412017-01-24 13:06:04 -08001463 int i;
stephen hemminger962f3fe2017-01-24 13:06:02 -08001464
stephen hemminger545a8e72017-03-22 14:51:00 -07001465 if (!ndev)
1466 return -ENODEV;
1467
stephen hemminger962f3fe2017-01-24 13:06:02 -08001468 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
1469 return -EOPNOTSUPP;
1470
Colin Ian Kingeb996ed2017-03-25 14:26:39 +00001471 rndis_dev = ndev->extension;
stephen hemmingerff4a4412017-01-24 13:06:04 -08001472 if (indir) {
1473 for (i = 0; i < ITAB_NUM; i++)
Haiyang Zhangdb3cd7a2017-09-01 14:30:07 -07001474 if (indir[i] >= ndev->num_chn)
stephen hemmingerff4a4412017-01-24 13:06:04 -08001475 return -EINVAL;
1476
1477 for (i = 0; i < ITAB_NUM; i++)
Haiyang Zhang473713002017-10-13 12:28:03 -07001478 rndis_dev->rx_table[i] = indir[i];
stephen hemmingerff4a4412017-01-24 13:06:04 -08001479 }
1480
1481 if (!key) {
1482 if (!indir)
1483 return 0;
1484
1485 key = rndis_dev->rss_key;
1486 }
stephen hemminger962f3fe2017-01-24 13:06:02 -08001487
Haiyang Zhang715e2ec2017-09-01 14:30:04 -07001488 return rndis_filter_set_rss_param(rndis_dev, key);
stephen hemminger962f3fe2017-01-24 13:06:02 -08001489}
1490
stephen hemminger8b532792017-08-09 17:46:11 -07001491/* Hyper-V RNDIS protocol does not have ring in the HW sense.
1492 * It does have pre-allocated receive area which is divided into sections.
1493 */
1494static void __netvsc_get_ringparam(struct netvsc_device *nvdev,
1495 struct ethtool_ringparam *ring)
1496{
1497 u32 max_buf_size;
1498
1499 ring->rx_pending = nvdev->recv_section_cnt;
1500 ring->tx_pending = nvdev->send_section_cnt;
1501
1502 if (nvdev->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
1503 max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
1504 else
1505 max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
1506
1507 ring->rx_max_pending = max_buf_size / nvdev->recv_section_size;
1508 ring->tx_max_pending = NETVSC_SEND_BUFFER_SIZE
1509 / nvdev->send_section_size;
1510}
1511
1512static void netvsc_get_ringparam(struct net_device *ndev,
1513 struct ethtool_ringparam *ring)
1514{
1515 struct net_device_context *ndevctx = netdev_priv(ndev);
1516 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1517
1518 if (!nvdev)
1519 return;
1520
1521 __netvsc_get_ringparam(nvdev, ring);
1522}
1523
1524static int netvsc_set_ringparam(struct net_device *ndev,
1525 struct ethtool_ringparam *ring)
1526{
1527 struct net_device_context *ndevctx = netdev_priv(ndev);
1528 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1529 struct hv_device *hdev = ndevctx->device_ctx;
1530 struct netvsc_device_info device_info;
1531 struct ethtool_ringparam orig;
1532 u32 new_tx, new_rx;
1533 bool was_opened;
1534 int ret = 0;
1535
1536 if (!nvdev || nvdev->destroy)
1537 return -ENODEV;
1538
1539 memset(&orig, 0, sizeof(orig));
1540 __netvsc_get_ringparam(nvdev, &orig);
1541
1542 new_tx = clamp_t(u32, ring->tx_pending,
1543 NETVSC_MIN_TX_SECTIONS, orig.tx_max_pending);
1544 new_rx = clamp_t(u32, ring->rx_pending,
1545 NETVSC_MIN_RX_SECTIONS, orig.rx_max_pending);
1546
1547 if (new_tx == orig.tx_pending &&
1548 new_rx == orig.rx_pending)
1549 return 0; /* no change */
1550
1551 memset(&device_info, 0, sizeof(device_info));
1552 device_info.num_chn = nvdev->num_chn;
stephen hemminger8b532792017-08-09 17:46:11 -07001553 device_info.send_sections = new_tx;
Alex Ng0ab09be2017-09-20 11:17:35 -07001554 device_info.send_section_size = nvdev->send_section_size;
stephen hemminger8b532792017-08-09 17:46:11 -07001555 device_info.recv_sections = new_rx;
Alex Ng0ab09be2017-09-20 11:17:35 -07001556 device_info.recv_section_size = nvdev->recv_section_size;
stephen hemminger8b532792017-08-09 17:46:11 -07001557
1558 netif_device_detach(ndev);
1559 was_opened = rndis_filter_opened(nvdev);
1560 if (was_opened)
1561 rndis_filter_close(nvdev);
1562
1563 rndis_filter_device_remove(hdev, nvdev);
1564
1565 nvdev = rndis_filter_device_add(hdev, &device_info);
1566 if (IS_ERR(nvdev)) {
1567 ret = PTR_ERR(nvdev);
1568
1569 device_info.send_sections = orig.tx_pending;
1570 device_info.recv_sections = orig.rx_pending;
1571 nvdev = rndis_filter_device_add(hdev, &device_info);
1572 if (IS_ERR(nvdev)) {
1573 netdev_err(ndev, "restoring ringparam failed: %ld\n",
1574 PTR_ERR(nvdev));
1575 return ret;
1576 }
1577 }
1578
1579 if (was_opened)
1580 rndis_filter_open(nvdev);
1581 netif_device_attach(ndev);
1582
1583 /* We may have missed link change notifications */
1584 ndevctx->last_reconfig = 0;
1585 schedule_delayed_work(&ndevctx->dwork, 0);
1586
1587 return ret;
1588}
1589
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -07001590static const struct ethtool_ops ethtool_ops = {
1591 .get_drvinfo = netvsc_get_drvinfo,
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -07001592 .get_link = ethtool_op_get_link,
Stephen Hemminger4323b472016-08-23 12:17:57 -07001593 .get_ethtool_stats = netvsc_get_ethtool_stats,
1594 .get_sset_count = netvsc_get_sset_count,
1595 .get_strings = netvsc_get_strings,
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -08001596 .get_channels = netvsc_get_channels,
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -07001597 .set_channels = netvsc_set_channels,
sixiao@microsoft.com76d13b52016-02-17 16:43:59 -08001598 .get_ts_info = ethtool_op_get_ts_info,
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001599 .get_rxnfc = netvsc_get_rxnfc,
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001600 .set_rxnfc = netvsc_set_rxnfc,
stephen hemminger962f3fe2017-01-24 13:06:02 -08001601 .get_rxfh_key_size = netvsc_get_rxfh_key_size,
1602 .get_rxfh_indir_size = netvsc_rss_indir_size,
1603 .get_rxfh = netvsc_get_rxfh,
1604 .set_rxfh = netvsc_set_rxfh,
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001605 .get_link_ksettings = netvsc_get_link_ksettings,
1606 .set_link_ksettings = netvsc_set_link_ksettings,
stephen hemminger8b532792017-08-09 17:46:11 -07001607 .get_ringparam = netvsc_get_ringparam,
1608 .set_ringparam = netvsc_set_ringparam,
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -07001609};
1610
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001611static const struct net_device_ops device_ops = {
1612 .ndo_open = netvsc_open,
1613 .ndo_stop = netvsc_close,
1614 .ndo_start_xmit = netvsc_start_xmit,
Stephen Hemmingerbee9d412018-03-02 13:49:09 -08001615 .ndo_change_rx_flags = netvsc_change_rx_flags,
1616 .ndo_set_rx_mode = netvsc_set_rx_mode,
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001617 .ndo_change_mtu = netvsc_change_mtu,
Haiyang Zhangb681b582010-08-03 19:15:31 +00001618 .ndo_validate_addr = eth_validate_addr,
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001619 .ndo_set_mac_address = netvsc_set_mac_addr,
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001620 .ndo_select_queue = netvsc_select_queue,
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001621 .ndo_get_stats64 = netvsc_get_stats64,
Richard Weinberger316158f2014-07-09 16:23:59 +02001622#ifdef CONFIG_NET_POLL_CONTROLLER
1623 .ndo_poll_controller = netvsc_poll_controller,
1624#endif
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001625};
1626
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001627/*
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001628 * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
1629 * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
1630 * present send GARP packet to network peers with netif_notify_peers().
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001631 */
Haiyang Zhang891de742014-02-12 16:54:27 -08001632static void netvsc_link_change(struct work_struct *w)
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001633{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001634 struct net_device_context *ndev_ctx =
1635 container_of(w, struct net_device_context, dwork.work);
1636 struct hv_device *device_obj = ndev_ctx->device_ctx;
1637 struct net_device *net = hv_get_drvdata(device_obj);
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07001638 struct netvsc_device *net_device;
Haiyang Zhang891de742014-02-12 16:54:27 -08001639 struct rndis_device *rdev;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001640 struct netvsc_reconfig *event = NULL;
1641 bool notify = false, reschedule = false;
1642 unsigned long flags, next_reconfig, delay;
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001643
stephen hemminger9b4e9462017-08-24 16:49:16 -07001644 /* if changes are happening, comeback later */
1645 if (!rtnl_trylock()) {
1646 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
1647 return;
1648 }
1649
stephen hemmingera0be4502017-03-22 14:51:01 -07001650 net_device = rtnl_dereference(ndev_ctx->nvdev);
1651 if (!net_device)
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001652 goto out_unlock;
1653
Haiyang Zhang891de742014-02-12 16:54:27 -08001654 rdev = net_device->extension;
Haiyang Zhang891de742014-02-12 16:54:27 -08001655
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001656 next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
1657 if (time_is_after_jiffies(next_reconfig)) {
1658 /* link_watch only sends one notification with current state
1659 * per second, avoid doing reconfig more frequently. Handle
1660 * wrap around.
1661 */
1662 delay = next_reconfig - jiffies;
1663 delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
1664 schedule_delayed_work(&ndev_ctx->dwork, delay);
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001665 goto out_unlock;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001666 }
1667 ndev_ctx->last_reconfig = jiffies;
1668
1669 spin_lock_irqsave(&ndev_ctx->lock, flags);
1670 if (!list_empty(&ndev_ctx->reconfig_events)) {
1671 event = list_first_entry(&ndev_ctx->reconfig_events,
1672 struct netvsc_reconfig, list);
1673 list_del(&event->list);
1674 reschedule = !list_empty(&ndev_ctx->reconfig_events);
1675 }
1676 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1677
1678 if (!event)
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001679 goto out_unlock;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001680
1681 switch (event->event) {
1682 /* Only the following events are possible due to the check in
1683 * netvsc_linkstatus_callback()
1684 */
1685 case RNDIS_STATUS_MEDIA_CONNECT:
1686 if (rdev->link_state) {
1687 rdev->link_state = false;
stephen hemminger0c195562017-08-01 19:58:53 -07001688 netif_carrier_on(net);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001689 netif_tx_wake_all_queues(net);
1690 } else {
1691 notify = true;
Haiyang Zhang3a494e72014-06-19 18:34:36 -07001692 }
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001693 kfree(event);
1694 break;
1695 case RNDIS_STATUS_MEDIA_DISCONNECT:
1696 if (!rdev->link_state) {
1697 rdev->link_state = true;
1698 netif_carrier_off(net);
1699 netif_tx_stop_all_queues(net);
1700 }
1701 kfree(event);
1702 break;
1703 case RNDIS_STATUS_NETWORK_CHANGE:
1704 /* Only makes sense if carrier is present */
1705 if (!rdev->link_state) {
1706 rdev->link_state = true;
1707 netif_carrier_off(net);
1708 netif_tx_stop_all_queues(net);
1709 event->event = RNDIS_STATUS_MEDIA_CONNECT;
1710 spin_lock_irqsave(&ndev_ctx->lock, flags);
Haiyang Zhang15cfd402016-04-21 16:13:01 -07001711 list_add(&event->list, &ndev_ctx->reconfig_events);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001712 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1713 reschedule = true;
1714 }
1715 break;
Haiyang Zhang891de742014-02-12 16:54:27 -08001716 }
1717
1718 rtnl_unlock();
1719
1720 if (notify)
1721 netdev_notify_peers(net);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001722
1723 /* link_watch only sends one notification with current state per
1724 * second, handle next reconfig event in 2 seconds.
1725 */
1726 if (reschedule)
1727 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001728
1729 return;
1730
1731out_unlock:
1732 rtnl_unlock();
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001733}
1734
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001735static struct net_device *get_netvsc_bymac(const u8 *mac)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001736{
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001737 struct net_device *dev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001738
Stephen Hemminger8737caa2016-08-23 12:17:44 -07001739 ASSERT_RTNL();
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001740
1741 for_each_netdev(&init_net, dev) {
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001742 if (dev->netdev_ops != &device_ops)
1743 continue; /* not a netvsc device */
1744
1745 if (ether_addr_equal(mac, dev->perm_addr))
1746 return dev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001747 }
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001748
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001749 return NULL;
1750}
1751
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001752static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001753{
1754 struct net_device *dev;
1755
1756 ASSERT_RTNL();
1757
1758 for_each_netdev(&init_net, dev) {
1759 struct net_device_context *net_device_ctx;
1760
1761 if (dev->netdev_ops != &device_ops)
1762 continue; /* not a netvsc device */
1763
1764 net_device_ctx = netdev_priv(dev);
stephen hemminger79e8cbe2017-07-19 11:53:13 -07001765 if (!rtnl_dereference(net_device_ctx->nvdev))
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001766 continue; /* device is removed */
1767
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001768 if (rtnl_dereference(net_device_ctx->vf_netdev) == vf_netdev)
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001769 return dev; /* a match */
1770 }
1771
1772 return NULL;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001773}
1774
stephen hemminger0c195562017-08-01 19:58:53 -07001775/* Called when VF is injecting data into network stack.
1776 * Change the associated network device from VF to netvsc.
1777 * note: already called with rcu_read_lock
1778 */
1779static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb)
1780{
1781 struct sk_buff *skb = *pskb;
1782 struct net_device *ndev = rcu_dereference(skb->dev->rx_handler_data);
1783 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1784 struct netvsc_vf_pcpu_stats *pcpu_stats
1785 = this_cpu_ptr(ndev_ctx->vf_stats);
1786
1787 skb->dev = ndev;
1788
1789 u64_stats_update_begin(&pcpu_stats->syncp);
1790 pcpu_stats->rx_packets++;
1791 pcpu_stats->rx_bytes += skb->len;
1792 u64_stats_update_end(&pcpu_stats->syncp);
1793
1794 return RX_HANDLER_ANOTHER;
1795}
1796
1797static int netvsc_vf_join(struct net_device *vf_netdev,
1798 struct net_device *ndev)
1799{
1800 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1801 int ret;
1802
1803 ret = netdev_rx_handler_register(vf_netdev,
1804 netvsc_vf_handle_frame, ndev);
1805 if (ret != 0) {
1806 netdev_err(vf_netdev,
1807 "can not register netvsc VF receive handler (err = %d)\n",
1808 ret);
1809 goto rx_handler_failed;
1810 }
1811
David Ahern42ab19e2017-10-04 17:48:47 -07001812 ret = netdev_upper_dev_link(vf_netdev, ndev, NULL);
stephen hemminger0c195562017-08-01 19:58:53 -07001813 if (ret != 0) {
1814 netdev_err(vf_netdev,
1815 "can not set master device %s (err = %d)\n",
1816 ndev->name, ret);
1817 goto upper_link_failed;
1818 }
1819
1820 /* set slave flag before open to prevent IPv6 addrconf */
1821 vf_netdev->flags |= IFF_SLAVE;
1822
stephen hemminger6123c662017-08-09 17:46:03 -07001823 schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
1824
1825 call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
stephen hemminger0c195562017-08-01 19:58:53 -07001826
1827 netdev_info(vf_netdev, "joined to %s\n", ndev->name);
1828 return 0;
1829
1830upper_link_failed:
1831 netdev_rx_handler_unregister(vf_netdev);
1832rx_handler_failed:
1833 return ret;
1834}
1835
1836static void __netvsc_vf_setup(struct net_device *ndev,
1837 struct net_device *vf_netdev)
1838{
1839 int ret;
1840
stephen hemminger0c195562017-08-01 19:58:53 -07001841 /* Align MTU of VF with master */
1842 ret = dev_set_mtu(vf_netdev, ndev->mtu);
1843 if (ret)
1844 netdev_warn(vf_netdev,
1845 "unable to change mtu to %u\n", ndev->mtu);
1846
Stephen Hemmingerbee9d412018-03-02 13:49:09 -08001847 /* set multicast etc flags on VF */
1848 dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE);
1849 dev_uc_sync(vf_netdev, ndev);
1850 dev_mc_sync(vf_netdev, ndev);
1851
stephen hemminger0c195562017-08-01 19:58:53 -07001852 if (netif_running(ndev)) {
1853 ret = dev_open(vf_netdev);
1854 if (ret)
1855 netdev_warn(vf_netdev,
1856 "unable to open: %d\n", ret);
1857 }
1858}
1859
1860/* Setup VF as slave of the synthetic device.
1861 * Runs in workqueue to avoid recursion in netlink callbacks.
1862 */
1863static void netvsc_vf_setup(struct work_struct *w)
1864{
1865 struct net_device_context *ndev_ctx
stephen hemminger6123c662017-08-09 17:46:03 -07001866 = container_of(w, struct net_device_context, vf_takeover.work);
stephen hemminger0c195562017-08-01 19:58:53 -07001867 struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx);
1868 struct net_device *vf_netdev;
1869
stephen hemmingerfb84af82017-08-04 12:14:00 -07001870 if (!rtnl_trylock()) {
stephen hemminger6123c662017-08-09 17:46:03 -07001871 schedule_delayed_work(&ndev_ctx->vf_takeover, 0);
stephen hemmingerfb84af82017-08-04 12:14:00 -07001872 return;
1873 }
1874
stephen hemminger0c195562017-08-01 19:58:53 -07001875 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
1876 if (vf_netdev)
1877 __netvsc_vf_setup(ndev, vf_netdev);
1878
1879 rtnl_unlock();
1880}
1881
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001882static int netvsc_register_vf(struct net_device *vf_netdev)
1883{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001884 struct net_device *ndev;
1885 struct net_device_context *net_device_ctx;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001886 struct netvsc_device *netvsc_dev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001887
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001888 if (vf_netdev->addr_len != ETH_ALEN)
1889 return NOTIFY_DONE;
1890
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001891 /*
1892 * We will use the MAC address to locate the synthetic interface to
1893 * associate with the VF interface. If we don't find a matching
1894 * synthetic interface, move on.
1895 */
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001896 ndev = get_netvsc_bymac(vf_netdev->perm_addr);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001897 if (!ndev)
1898 return NOTIFY_DONE;
1899
1900 net_device_ctx = netdev_priv(ndev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001901 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001902 if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001903 return NOTIFY_DONE;
1904
stephen hemminger0c195562017-08-01 19:58:53 -07001905 if (netvsc_vf_join(vf_netdev, ndev) != 0)
1906 return NOTIFY_DONE;
1907
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001908 netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
stephen hemminger0c195562017-08-01 19:58:53 -07001909
Stephen Hemminger07d0f002016-09-22 16:56:30 -07001910 dev_hold(vf_netdev);
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001911 rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001912 return NOTIFY_OK;
1913}
1914
Stephen Hemminger9a0c48d2017-08-31 16:16:12 -07001915/* VF up/down change detected, schedule to change data path */
1916static int netvsc_vf_changed(struct net_device *vf_netdev)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001917{
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001918 struct net_device_context *net_device_ctx;
stephen hemminger7b83f522017-08-07 11:30:00 -07001919 struct netvsc_device *netvsc_dev;
stephen hemminger0c195562017-08-01 19:58:53 -07001920 struct net_device *ndev;
Stephen Hemminger9a0c48d2017-08-31 16:16:12 -07001921 bool vf_is_up = netif_running(vf_netdev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001922
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001923 ndev = get_netvsc_byref(vf_netdev);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001924 if (!ndev)
1925 return NOTIFY_DONE;
1926
1927 net_device_ctx = netdev_priv(ndev);
stephen hemminger7b83f522017-08-07 11:30:00 -07001928 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
1929 if (!netvsc_dev)
1930 return NOTIFY_DONE;
1931
Stephen Hemminger9a0c48d2017-08-31 16:16:12 -07001932 netvsc_switch_datapath(ndev, vf_is_up);
1933 netdev_info(ndev, "Data path switched %s VF: %s\n",
1934 vf_is_up ? "to" : "from", vf_netdev->name);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001935
1936 return NOTIFY_OK;
1937}
1938
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001939static int netvsc_unregister_vf(struct net_device *vf_netdev)
1940{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001941 struct net_device *ndev;
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001942 struct net_device_context *net_device_ctx;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001943
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001944 ndev = get_netvsc_byref(vf_netdev);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001945 if (!ndev)
1946 return NOTIFY_DONE;
1947
1948 net_device_ctx = netdev_priv(ndev);
stephen hemminger6123c662017-08-09 17:46:03 -07001949 cancel_delayed_work_sync(&net_device_ctx->vf_takeover);
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001950
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001951 netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001952
Stephen Hemmingerec158f72017-08-31 16:16:13 -07001953 netdev_rx_handler_unregister(vf_netdev);
stephen hemminger0c195562017-08-01 19:58:53 -07001954 netdev_upper_dev_unlink(vf_netdev, ndev);
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001955 RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
Stephen Hemminger07d0f002016-09-22 16:56:30 -07001956 dev_put(vf_netdev);
Stephen Hemmingerec158f72017-08-31 16:16:13 -07001957
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001958 return NOTIFY_OK;
1959}
1960
K. Y. Srinivasan84946892011-09-13 10:59:38 -07001961static int netvsc_probe(struct hv_device *dev,
1962 const struct hv_vmbus_device_id *dev_id)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001963{
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001964 struct net_device *net = NULL;
1965 struct net_device_context *net_device_ctx;
1966 struct netvsc_device_info device_info;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001967 struct netvsc_device *nvdev;
stephen hemminger0c195562017-08-01 19:58:53 -07001968 int ret = -ENOMEM;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001969
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001970 net = alloc_etherdev_mq(sizeof(struct net_device_context),
stephen hemminger2b018882017-01-24 13:06:03 -08001971 VRSS_CHANNEL_MAX);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001972 if (!net)
stephen hemminger0c195562017-08-01 19:58:53 -07001973 goto no_net;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001974
Haiyang Zhang1b07da52014-03-04 14:11:06 -08001975 netif_carrier_off(net);
1976
Haiyang Zhangb37879e2016-08-04 10:42:14 -07001977 netvsc_init_settings(net);
1978
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001979 net_device_ctx = netdev_priv(net);
K. Y. Srinivasan9efd21e2011-04-29 13:45:10 -07001980 net_device_ctx->device_ctx = dev;
Simon Xiao3f300ff2015-04-28 01:05:17 -07001981 net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
1982 if (netif_msg_probe(net_device_ctx))
1983 netdev_dbg(net, "netvsc msg_enable: %d\n",
1984 net_device_ctx->msg_enable);
1985
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07001986 hv_set_drvdata(dev, net);
Vitaly Kuznetsovf580aec2016-05-13 13:55:20 +02001987
Haiyang Zhang891de742014-02-12 16:54:27 -08001988 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001989
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001990 spin_lock_init(&net_device_ctx->lock);
1991 INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
stephen hemminger6123c662017-08-09 17:46:03 -07001992 INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
stephen hemminger0c195562017-08-01 19:58:53 -07001993
1994 net_device_ctx->vf_stats
1995 = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats);
1996 if (!net_device_ctx->vf_stats)
1997 goto no_stats;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001998
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001999 net->netdev_ops = &device_ops;
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00002000 net->ethtool_ops = &ethtool_ops;
K. Y. Srinivasan9efd21e2011-04-29 13:45:10 -07002001 SET_NETDEV_DEV(net, &dev->device);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002002
Vitaly Kuznetsov14a03cf2016-02-05 17:29:08 +01002003 /* We always need headroom for rndis header */
2004 net->needed_headroom = RNDIS_AND_PPI_SIZE;
2005
Haiyang Zhang6450f8f2017-09-22 15:31:38 -07002006 /* Initialize the number of queues to be 1, we may change it if more
2007 * channels are offered later.
2008 */
2009 netif_set_real_num_tx_queues(net, 1);
2010 netif_set_real_num_rx_queues(net, 1);
2011
Haiyang Zhang692e0842011-09-01 12:19:43 -07002012 /* Notify the netvsc driver of the new device */
Andrew Schwartzmeyer8ebdcc52015-08-11 17:14:31 -07002013 memset(&device_info, 0, sizeof(device_info));
stephen hemminger3071ada2017-03-22 14:50:59 -07002014 device_info.num_chn = VRSS_CHANNEL_DEFAULT;
stephen hemminger8b532792017-08-09 17:46:11 -07002015 device_info.send_sections = NETVSC_DEFAULT_TX;
Alex Ng0ab09be2017-09-20 11:17:35 -07002016 device_info.send_section_size = NETVSC_SEND_SECTION_SIZE;
stephen hemminger8b532792017-08-09 17:46:11 -07002017 device_info.recv_sections = NETVSC_DEFAULT_RX;
Alex Ng0ab09be2017-09-20 11:17:35 -07002018 device_info.recv_section_size = NETVSC_RECV_SECTION_SIZE;
stephen hemminger9749fed2017-07-19 11:53:16 -07002019
2020 nvdev = rndis_filter_device_add(dev, &device_info);
2021 if (IS_ERR(nvdev)) {
2022 ret = PTR_ERR(nvdev);
Haiyang Zhang692e0842011-09-01 12:19:43 -07002023 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
stephen hemminger0c195562017-08-01 19:58:53 -07002024 goto rndis_failed;
Haiyang Zhang692e0842011-09-01 12:19:43 -07002025 }
stephen hemminger0c195562017-08-01 19:58:53 -07002026
Haiyang Zhang692e0842011-09-01 12:19:43 -07002027 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
2028
Vitaly Kuznetsovaefd80e2017-11-15 15:12:55 +01002029 /* hw_features computed in rndis_netdev_set_hwcaps() */
stephen hemminger23312a32017-01-24 13:05:59 -08002030 net->features = net->hw_features |
2031 NETIF_F_HIGHDMA | NETIF_F_SG |
2032 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
2033 net->vlan_features = net->features;
2034
stephen hemminger9749fed2017-07-19 11:53:16 -07002035 netdev_lockdep_set_classes(net);
2036
Jarod Wilsond0c2c992016-10-20 13:55:21 -04002037 /* MTU range: 68 - 1500 or 65521 */
2038 net->min_mtu = NETVSC_MTU_MIN;
2039 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
2040 net->max_mtu = NETVSC_MTU - ETH_HLEN;
2041 else
2042 net->max_mtu = ETH_DATA_LEN;
2043
Haiyang Zhanga68f9612013-12-20 16:52:31 -08002044 ret = register_netdev(net);
2045 if (ret != 0) {
2046 pr_err("Unable to register netdev.\n");
stephen hemminger0c195562017-08-01 19:58:53 -07002047 goto register_failed;
Haiyang Zhanga68f9612013-12-20 16:52:31 -08002048 }
2049
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002050 return ret;
stephen hemminger0c195562017-08-01 19:58:53 -07002051
2052register_failed:
2053 rndis_filter_device_remove(dev, nvdev);
2054rndis_failed:
2055 free_percpu(net_device_ctx->vf_stats);
2056no_stats:
2057 hv_set_drvdata(dev, NULL);
2058 free_netdev(net);
2059no_net:
2060 return ret;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002061}
2062
K. Y. Srinivasan415b0232011-04-29 13:45:12 -07002063static int netvsc_remove(struct hv_device *dev)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002064{
Haiyang Zhang122a5f62011-05-27 06:21:55 -07002065 struct net_device_context *ndev_ctx;
Stephen Hemmingerec158f72017-08-31 16:16:13 -07002066 struct net_device *vf_netdev;
2067 struct net_device *net;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07002068
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02002069 net = hv_get_drvdata(dev);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002070 if (net == NULL) {
K. Y. Srinivasan415b0232011-04-29 13:45:12 -07002071 dev_err(&dev->device, "No net device to remove\n");
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002072 return 0;
2073 }
2074
Haiyang Zhang122a5f62011-05-27 06:21:55 -07002075 ndev_ctx = netdev_priv(net);
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02002076
stephen hemmingera0be4502017-03-22 14:51:01 -07002077 netif_device_detach(net);
Vitaly Kuznetsovf580aec2016-05-13 13:55:20 +02002078
Haiyang Zhang122a5f62011-05-27 06:21:55 -07002079 cancel_delayed_work_sync(&ndev_ctx->dwork);
2080
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002081 /*
2082 * Call to the vsc driver to let it know that the device is being
stephen hemmingera0be4502017-03-22 14:51:01 -07002083 * removed. Also blocks mtu and channel changes.
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002084 */
stephen hemmingera0be4502017-03-22 14:51:01 -07002085 rtnl_lock();
Stephen Hemmingerec158f72017-08-31 16:16:13 -07002086 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2087 if (vf_netdev)
2088 netvsc_unregister_vf(vf_netdev);
2089
Stephen Hemminger8195b132017-09-06 13:53:05 -07002090 unregister_netdevice(net);
2091
stephen hemminger79e8cbe2017-07-19 11:53:13 -07002092 rndis_filter_device_remove(dev,
2093 rtnl_dereference(ndev_ctx->nvdev));
stephen hemmingera0be4502017-03-22 14:51:01 -07002094 rtnl_unlock();
2095
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02002096 hv_set_drvdata(dev, NULL);
2097
stephen hemminger0c195562017-08-01 19:58:53 -07002098 free_percpu(ndev_ctx->vf_stats);
Simon Xiao6c80f3f2017-01-24 13:06:13 -08002099 free_netdev(net);
Haiyang Zhangdf06bcf2011-05-23 09:03:47 -07002100 return 0;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002101}
2102
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07002103static const struct hv_vmbus_device_id id_table[] = {
Greg Kroah-Hartmanc45cf2d2011-08-25 11:41:33 -07002104 /* Network guid */
K. Y. Srinivasan8f505942013-01-23 17:42:42 -08002105 { HV_NIC_GUID, },
Greg Kroah-Hartmanc45cf2d2011-08-25 11:41:33 -07002106 { },
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07002107};
2108
2109MODULE_DEVICE_TABLE(vmbus, id_table);
2110
K. Y. Srinivasanf1542a62011-05-10 07:55:16 -07002111/* The one and only one */
K. Y. Srinivasanfde0ef92011-05-12 19:35:08 -07002112static struct hv_driver netvsc_drv = {
Haiyang Zhangd31b20f2012-03-07 10:02:00 +00002113 .name = KBUILD_MODNAME,
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07002114 .id_table = id_table,
K. Y. Srinivasanfde0ef92011-05-12 19:35:08 -07002115 .probe = netvsc_probe,
2116 .remove = netvsc_remove,
K. Y. Srinivasand4890972011-05-10 07:55:17 -07002117};
K. Y. Srinivasanf1542a62011-05-10 07:55:16 -07002118
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002119/*
2120 * On Hyper-V, every VF interface is matched with a corresponding
2121 * synthetic interface. The synthetic interface is presented first
2122 * to the guest. When the corresponding VF instance is registered,
2123 * we will take care of switching the data path.
2124 */
2125static int netvsc_netdev_event(struct notifier_block *this,
2126 unsigned long event, void *ptr)
2127{
2128 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
2129
Stephen Hemmingeree837a12016-09-22 16:56:31 -07002130 /* Skip our own events */
2131 if (event_dev->netdev_ops == &device_ops)
2132 return NOTIFY_DONE;
2133
2134 /* Avoid non-Ethernet type devices */
2135 if (event_dev->type != ARPHRD_ETHER)
2136 return NOTIFY_DONE;
2137
Vitaly Kuznetsov0dbff142016-08-15 17:48:43 +02002138 /* Avoid Vlan dev with same MAC registering as VF */
Parav Panditd0d7b102017-02-04 11:00:49 -06002139 if (is_vlan_dev(event_dev))
Vitaly Kuznetsov0dbff142016-08-15 17:48:43 +02002140 return NOTIFY_DONE;
2141
2142 /* Avoid Bonding master dev with same MAC registering as VF */
Stephen Hemmingeree837a12016-09-22 16:56:31 -07002143 if ((event_dev->priv_flags & IFF_BONDING) &&
2144 (event_dev->flags & IFF_MASTER))
Haiyang Zhangcb2911f2016-06-02 12:02:04 -07002145 return NOTIFY_DONE;
2146
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002147 switch (event) {
2148 case NETDEV_REGISTER:
2149 return netvsc_register_vf(event_dev);
2150 case NETDEV_UNREGISTER:
2151 return netvsc_unregister_vf(event_dev);
2152 case NETDEV_UP:
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002153 case NETDEV_DOWN:
Stephen Hemminger9a0c48d2017-08-31 16:16:12 -07002154 return netvsc_vf_changed(event_dev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002155 default:
2156 return NOTIFY_DONE;
2157 }
2158}
2159
2160static struct notifier_block netvsc_netdev_notifier = {
2161 .notifier_call = netvsc_netdev_event,
2162};
2163
K. Y. Srinivasana9869c92011-05-12 19:35:17 -07002164static void __exit netvsc_drv_exit(void)
Hank Janssenfceaf242009-07-13 15:34:54 -07002165{
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002166 unregister_netdevice_notifier(&netvsc_netdev_notifier);
Greg Kroah-Hartman768fa212011-08-25 15:07:32 -07002167 vmbus_driver_unregister(&netvsc_drv);
Hank Janssenfceaf242009-07-13 15:34:54 -07002168}
2169
K. Y. Srinivasan1fde28c2011-05-12 19:35:16 -07002170static int __init netvsc_drv_init(void)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002171{
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002172 int ret;
2173
Haiyang Zhangfa85a6c2012-07-25 08:08:41 +00002174 if (ring_size < RING_SIZE_MIN) {
2175 ring_size = RING_SIZE_MIN;
Stephen Hemmingera7f99d02017-12-01 11:01:47 -08002176 pr_info("Increased ring_size to %u (min allowed)\n",
Haiyang Zhangfa85a6c2012-07-25 08:08:41 +00002177 ring_size);
2178 }
Stephen Hemmingera7f99d02017-12-01 11:01:47 -08002179 netvsc_ring_bytes = ring_size * PAGE_SIZE;
2180 netvsc_ring_reciprocal = reciprocal_value(netvsc_ring_bytes);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002181
Stephen Hemmingera7f99d02017-12-01 11:01:47 -08002182 ret = vmbus_driver_register(&netvsc_drv);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002183 if (ret)
2184 return ret;
2185
2186 register_netdevice_notifier(&netvsc_netdev_notifier);
2187 return 0;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002188}
2189
Hank Janssen26c14cc2010-02-11 23:02:42 +00002190MODULE_LICENSE("GPL");
Stephen Hemminger7880fc52010-05-04 09:58:52 -07002191MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
Hank Janssenfceaf242009-07-13 15:34:54 -07002192
K. Y. Srinivasan1fde28c2011-05-12 19:35:16 -07002193module_init(netvsc_drv_init);
K. Y. Srinivasana9869c92011-05-12 19:35:17 -07002194module_exit(netvsc_drv_exit);