blob: b20fb0fb595bde3d7455e2c6214812fef6732017 [file] [log] [blame]
Hank Janssenfceaf242009-07-13 15:34:54 -07001/*
Hank Janssenfceaf242009-07-13 15:34:54 -07002 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
Jeff Kirsheradf8d3f2013-12-06 06:28:47 -080014 * this program; if not, see <http://www.gnu.org/licenses/>.
Hank Janssenfceaf242009-07-13 15:34:54 -070015 *
16 * Authors:
Haiyang Zhangd0e94d12009-11-23 17:00:22 +000017 * Haiyang Zhang <haiyangz@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -070018 * Hank Janssen <hjanssen@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -070019 */
Hank Jansseneb335bc2011-03-29 13:58:48 -070020#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
Hank Janssenfceaf242009-07-13 15:34:54 -070022#include <linux/init.h>
K. Y. Srinivasan9079ce62011-06-16 13:16:37 -070023#include <linux/atomic.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070024#include <linux/module.h>
25#include <linux/highmem.h>
26#include <linux/device.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070027#include <linux/io.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070028#include <linux/delay.h>
29#include <linux/netdevice.h>
30#include <linux/inetdevice.h>
31#include <linux/etherdevice.h>
Stephen Hemmingerb93c1b52018-08-21 10:40:38 -070032#include <linux/pci.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070033#include <linux/skbuff.h>
Haiyang Zhangc802db12013-05-28 06:15:56 +000034#include <linux/if_vlan.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070035#include <linux/in.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
stephen hemminger27f5aa92017-07-24 10:57:29 -070037#include <linux/rtnetlink.h>
stephen hemminger0c195562017-08-01 19:58:53 -070038#include <linux/netpoll.h>
stephen hemminger27f5aa92017-07-24 10:57:29 -070039
Hank Janssenfceaf242009-07-13 15:34:54 -070040#include <net/arp.h>
41#include <net/route.h>
42#include <net/sock.h>
43#include <net/pkt_sched.h>
Michael Kelley8eb1b3c2017-05-30 11:36:56 -070044#include <net/checksum.h>
45#include <net/ip6_checksum.h>
K. Y. Srinivasan3f335ea2011-05-12 19:34:15 -070046
K. Y. Srinivasan5ca72522011-05-12 19:34:37 -070047#include "hyperv_net.h"
Hank Janssenfceaf242009-07-13 15:34:54 -070048
Stephen Hemminger7b2ee502018-03-20 15:03:05 -070049#define RING_SIZE_MIN 64
50#define RETRY_US_LO 5000
51#define RETRY_US_HI 10000
52#define RETRY_MAX 2000 /* >10 sec */
stephen hemminger8b532792017-08-09 17:46:11 -070053
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +010054#define LINKCHANGE_INT (2 * HZ)
stephen hemminger6123c662017-08-09 17:46:03 -070055#define VF_TAKEOVER_INT (HZ / 10)
stephen hemmingera50af862016-12-06 13:43:54 -080056
Stephen Hemmingera7f99d02017-12-01 11:01:47 -080057static unsigned int ring_size __ro_after_init = 128;
Joe Perchesd61e4032018-03-23 15:54:39 -070058module_param(ring_size, uint, 0444);
Stephen Hemminger450d7a42010-05-04 09:58:53 -070059MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
Stephen Hemmingera7f99d02017-12-01 11:01:47 -080060unsigned int netvsc_ring_bytes __ro_after_init;
Hank Janssenfceaf242009-07-13 15:34:54 -070061
Simon Xiao3f300ff2015-04-28 01:05:17 -070062static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
63 NETIF_MSG_LINK | NETIF_MSG_IFUP |
64 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
65 NETIF_MSG_TX_ERR;
66
67static int debug = -1;
Joe Perchesd61e4032018-03-23 15:54:39 -070068module_param(debug, int, 0444);
Simon Xiao3f300ff2015-04-28 01:05:17 -070069MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70
Stephen Hemminger7bf7bb32018-06-11 12:44:55 -070071static LIST_HEAD(netvsc_dev_list);
72
Stephen Hemmingerbee9d412018-03-02 13:49:09 -080073static void netvsc_change_rx_flags(struct net_device *net, int change)
Hank Janssenfceaf242009-07-13 15:34:54 -070074{
Stephen Hemmingerbee9d412018-03-02 13:49:09 -080075 struct net_device_context *ndev_ctx = netdev_priv(net);
76 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
77 int inc;
78
79 if (!vf_netdev)
80 return;
81
82 if (change & IFF_PROMISC) {
83 inc = (net->flags & IFF_PROMISC) ? 1 : -1;
84 dev_set_promiscuity(vf_netdev, inc);
85 }
86
87 if (change & IFF_ALLMULTI) {
88 inc = (net->flags & IFF_ALLMULTI) ? 1 : -1;
89 dev_set_allmulti(vf_netdev, inc);
90 }
91}
92
93static void netvsc_set_rx_mode(struct net_device *net)
94{
95 struct net_device_context *ndev_ctx = netdev_priv(net);
Stephen Hemminger35a57b72018-03-07 13:49:11 -080096 struct net_device *vf_netdev;
97 struct netvsc_device *nvdev;
Stephen Hemmingerbee9d412018-03-02 13:49:09 -080098
Stephen Hemminger35a57b72018-03-07 13:49:11 -080099 rcu_read_lock();
100 vf_netdev = rcu_dereference(ndev_ctx->vf_netdev);
Stephen Hemmingerbee9d412018-03-02 13:49:09 -0800101 if (vf_netdev) {
102 dev_uc_sync(vf_netdev, net);
103 dev_mc_sync(vf_netdev, net);
104 }
Haiyang Zhangd426b2e2011-11-30 07:19:08 -0800105
Stephen Hemminger35a57b72018-03-07 13:49:11 -0800106 nvdev = rcu_dereference(ndev_ctx->nvdev);
107 if (nvdev)
108 rndis_filter_update(nvdev);
109 rcu_read_unlock();
Hank Janssenfceaf242009-07-13 15:34:54 -0700110}
111
Haiyang Zhang1b704c42019-03-28 19:40:36 +0000112static void netvsc_tx_enable(struct netvsc_device *nvscdev,
113 struct net_device *ndev)
114{
115 nvscdev->tx_disable = false;
116 virt_wmb(); /* ensure queue wake up mechanism is on */
117
118 netif_tx_wake_all_queues(ndev);
119}
120
Hank Janssenfceaf242009-07-13 15:34:54 -0700121static int netvsc_open(struct net_device *net)
122{
Haiyang Zhang53fa1a62017-06-21 16:40:47 -0700123 struct net_device_context *ndev_ctx = netdev_priv(net);
stephen hemminger0c195562017-08-01 19:58:53 -0700124 struct net_device *vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
stephen hemminger79e8cbe2017-07-19 11:53:13 -0700125 struct netvsc_device *nvdev = rtnl_dereference(ndev_ctx->nvdev);
Haiyang Zhang891de742014-02-12 16:54:27 -0800126 struct rndis_device *rdev;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700127 int ret = 0;
Hank Janssenfceaf242009-07-13 15:34:54 -0700128
Haiyang Zhang891de742014-02-12 16:54:27 -0800129 netif_carrier_off(net);
130
Haiyang Zhangd515d0f2011-09-28 13:24:15 -0700131 /* Open up the device */
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +0200132 ret = rndis_filter_open(nvdev);
Haiyang Zhangd515d0f2011-09-28 13:24:15 -0700133 if (ret != 0) {
134 netdev_err(net, "unable to open device (ret %d).\n", ret);
135 return ret;
Hank Janssenfceaf242009-07-13 15:34:54 -0700136 }
137
Haiyang Zhang891de742014-02-12 16:54:27 -0800138 rdev = nvdev->extension;
Dexuan Cui52acf732018-06-06 21:32:51 +0000139 if (!rdev->link_state) {
Haiyang Zhang891de742014-02-12 16:54:27 -0800140 netif_carrier_on(net);
Haiyang Zhang1b704c42019-03-28 19:40:36 +0000141 netvsc_tx_enable(nvdev, net);
Dexuan Cui52acf732018-06-06 21:32:51 +0000142 }
Haiyang Zhang891de742014-02-12 16:54:27 -0800143
stephen hemminger0c195562017-08-01 19:58:53 -0700144 if (vf_netdev) {
145 /* Setting synthetic device up transparently sets
146 * slave as up. If open fails, then slave will be
147 * still be offline (and not used).
148 */
Petr Machata00f54e62018-12-06 17:05:36 +0000149 ret = dev_open(vf_netdev, NULL);
stephen hemminger0c195562017-08-01 19:58:53 -0700150 if (ret)
151 netdev_warn(net,
152 "unable to open slave: %s: %d\n",
153 vf_netdev->name, ret);
154 }
155 return 0;
Hank Janssenfceaf242009-07-13 15:34:54 -0700156}
157
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700158static int netvsc_wait_until_empty(struct netvsc_device *nvdev)
Hank Janssenfceaf242009-07-13 15:34:54 -0700159{
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700160 unsigned int retry = 0;
161 int i;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700162
163 /* Ensure pending bytes in ring are read */
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700164 for (;;) {
165 u32 aread = 0;
166
Haiyang Zhang2de85302015-07-13 13:09:16 -0700167 for (i = 0; i < nvdev->num_chn; i++) {
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700168 struct vmbus_channel *chn
169 = nvdev->chan_table[i].channel;
170
Haiyang Zhang2de85302015-07-13 13:09:16 -0700171 if (!chn)
172 continue;
173
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700174 /* make sure receive not running now */
175 napi_synchronize(&nvdev->chan_table[i].napi);
176
stephen hemminger40975962017-06-08 16:21:19 -0700177 aread = hv_get_bytes_to_read(&chn->inbound);
Haiyang Zhang2de85302015-07-13 13:09:16 -0700178 if (aread)
179 break;
180
stephen hemminger40975962017-06-08 16:21:19 -0700181 aread = hv_get_bytes_to_read(&chn->outbound);
Haiyang Zhang2de85302015-07-13 13:09:16 -0700182 if (aread)
183 break;
184 }
185
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700186 if (aread == 0)
187 return 0;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700188
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700189 if (++retry > RETRY_MAX)
190 return -ETIMEDOUT;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700191
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700192 usleep_range(RETRY_US_LO, RETRY_US_HI);
193 }
194}
195
Haiyang Zhang1b704c42019-03-28 19:40:36 +0000196static void netvsc_tx_disable(struct netvsc_device *nvscdev,
197 struct net_device *ndev)
198{
199 if (nvscdev) {
200 nvscdev->tx_disable = true;
201 virt_wmb(); /* ensure txq will not wake up after stop */
202 }
203
204 netif_tx_disable(ndev);
205}
206
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700207static int netvsc_close(struct net_device *net)
208{
209 struct net_device_context *net_device_ctx = netdev_priv(net);
210 struct net_device *vf_netdev
211 = rtnl_dereference(net_device_ctx->vf_netdev);
212 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
213 int ret;
214
Haiyang Zhang1b704c42019-03-28 19:40:36 +0000215 netvsc_tx_disable(nvdev, net);
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700216
217 /* No need to close rndis filter if it is removed already */
218 if (!nvdev)
219 return 0;
220
221 ret = rndis_filter_close(nvdev);
222 if (ret != 0) {
223 netdev_err(net, "unable to close device (ret %d).\n", ret);
224 return ret;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700225 }
226
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700227 ret = netvsc_wait_until_empty(nvdev);
228 if (ret)
Haiyang Zhang2de85302015-07-13 13:09:16 -0700229 netdev_err(net, "Ring buffer not empty after closing rndis\n");
Hank Janssenfceaf242009-07-13 15:34:54 -0700230
stephen hemminger0c195562017-08-01 19:58:53 -0700231 if (vf_netdev)
232 dev_close(vf_netdev);
233
Hank Janssenfceaf242009-07-13 15:34:54 -0700234 return ret;
235}
236
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800237static inline void *init_ppi_data(struct rndis_message *msg,
238 u32 ppi_size, u32 pkt_type)
KY Srinivasan8a002512014-03-08 19:23:14 -0800239{
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800240 struct rndis_packet *rndis_pkt = &msg->msg.pkt;
KY Srinivasan8a002512014-03-08 19:23:14 -0800241 struct rndis_per_packet_info *ppi;
242
KY Srinivasan8a002512014-03-08 19:23:14 -0800243 rndis_pkt->data_offset += ppi_size;
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800244 ppi = (void *)rndis_pkt + rndis_pkt->per_pkt_info_offset
245 + rndis_pkt->per_pkt_info_len;
KY Srinivasan8a002512014-03-08 19:23:14 -0800246
247 ppi->size = ppi_size;
248 ppi->type = pkt_type;
Haiyang Zhange3a96672018-09-28 14:41:23 +0000249 ppi->internal = 0;
KY Srinivasan8a002512014-03-08 19:23:14 -0800250 ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
251
252 rndis_pkt->per_pkt_info_len += ppi_size;
253
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800254 return ppi + 1;
KY Srinivasan8a002512014-03-08 19:23:14 -0800255}
256
Haiyang Zhang4823eb22017-08-21 19:22:39 -0700257/* Azure hosts don't support non-TCP port numbers in hashing for fragmented
258 * packets. We can use ethtool to change UDP hash level when necessary.
Haiyang Zhangf72860a2017-04-12 11:45:18 -0700259 */
Haiyang Zhang4823eb22017-08-21 19:22:39 -0700260static inline u32 netvsc_get_hash(
261 struct sk_buff *skb,
262 const struct net_device_context *ndc)
Haiyang Zhangf72860a2017-04-12 11:45:18 -0700263{
264 struct flow_keys flow;
Haiyang Zhang486e3982017-10-06 08:33:57 -0700265 u32 hash, pkt_proto = 0;
Haiyang Zhangf72860a2017-04-12 11:45:18 -0700266 static u32 hashrnd __read_mostly;
267
268 net_get_random_once(&hashrnd, sizeof(hashrnd));
269
270 if (!skb_flow_dissect_flow_keys(skb, &flow, 0))
271 return 0;
272
Haiyang Zhang486e3982017-10-06 08:33:57 -0700273 switch (flow.basic.ip_proto) {
274 case IPPROTO_TCP:
275 if (flow.basic.n_proto == htons(ETH_P_IP))
276 pkt_proto = HV_TCP4_L4HASH;
277 else if (flow.basic.n_proto == htons(ETH_P_IPV6))
278 pkt_proto = HV_TCP6_L4HASH;
279
280 break;
281
282 case IPPROTO_UDP:
283 if (flow.basic.n_proto == htons(ETH_P_IP))
284 pkt_proto = HV_UDP4_L4HASH;
285 else if (flow.basic.n_proto == htons(ETH_P_IPV6))
286 pkt_proto = HV_UDP6_L4HASH;
287
288 break;
289 }
290
291 if (pkt_proto & ndc->l4_hash) {
Haiyang Zhangf72860a2017-04-12 11:45:18 -0700292 return skb_get_hash(skb);
293 } else {
294 if (flow.basic.n_proto == htons(ETH_P_IP))
295 hash = jhash2((u32 *)&flow.addrs.v4addrs, 2, hashrnd);
296 else if (flow.basic.n_proto == htons(ETH_P_IPV6))
297 hash = jhash2((u32 *)&flow.addrs.v6addrs, 8, hashrnd);
298 else
299 hash = 0;
300
301 skb_set_hash(skb, hash, PKT_HASH_TYPE_L3);
302 }
303
304 return hash;
305}
306
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700307static inline int netvsc_get_tx_queue(struct net_device *ndev,
308 struct sk_buff *skb, int old_idx)
309{
310 const struct net_device_context *ndc = netdev_priv(ndev);
311 struct sock *sk = skb->sk;
312 int q_idx;
313
Haiyang Zhang39e91cf2017-10-13 12:28:04 -0700314 q_idx = ndc->tx_table[netvsc_get_hash(skb, ndc) &
315 (VRSS_SEND_TAB_SIZE - 1)];
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700316
317 /* If queue index changed record the new value */
318 if (q_idx != old_idx &&
319 sk && sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
320 sk_tx_queue_set(sk, q_idx);
321
322 return q_idx;
323}
324
stephen hemmingerd8e18ee2017-01-24 13:06:05 -0800325/*
326 * Select queue for transmit.
327 *
328 * If a valid queue has already been assigned, then use that.
329 * Otherwise compute tx queue based on hash and the send table.
330 *
331 * This is basically similar to default (__netdev_pick_tx) with the added step
332 * of using the host send_table when no other queue has been assigned.
333 *
334 * TODO support XPS - but get_xps_queue not exported
335 */
stephen hemminger0c195562017-08-01 19:58:53 -0700336static u16 netvsc_pick_tx(struct net_device *ndev, struct sk_buff *skb)
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700337{
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700338 int q_idx = sk_tx_queue_get(skb->sk);
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700339
stephen hemminger0c195562017-08-01 19:58:53 -0700340 if (q_idx < 0 || skb->ooo_okay || q_idx >= ndev->real_num_tx_queues) {
Haiyang Zhang8db91f62017-04-12 11:35:05 -0700341 /* If forwarding a packet, we use the recorded queue when
342 * available for better cache locality.
343 */
344 if (skb_rx_queue_recorded(skb))
345 q_idx = skb_get_rx_queue(skb);
346 else
347 q_idx = netvsc_get_tx_queue(ndev, skb, q_idx);
stephen hemmingerd8e18ee2017-01-24 13:06:05 -0800348 }
349
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700350 return q_idx;
351}
352
stephen hemminger0c195562017-08-01 19:58:53 -0700353static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
Alexander Duyck4f49dec2018-07-09 12:19:59 -0400354 struct net_device *sb_dev,
stephen hemminger0c195562017-08-01 19:58:53 -0700355 select_queue_fallback_t fallback)
356{
357 struct net_device_context *ndc = netdev_priv(ndev);
358 struct net_device *vf_netdev;
359 u16 txq;
360
361 rcu_read_lock();
362 vf_netdev = rcu_dereference(ndc->vf_netdev);
363 if (vf_netdev) {
Stephen Hemmingerb3bf5662018-03-02 13:49:07 -0800364 const struct net_device_ops *vf_ops = vf_netdev->netdev_ops;
365
366 if (vf_ops->ndo_select_queue)
367 txq = vf_ops->ndo_select_queue(vf_netdev, skb,
Alexander Duyck4f49dec2018-07-09 12:19:59 -0400368 sb_dev, fallback);
Stephen Hemmingerb3bf5662018-03-02 13:49:07 -0800369 else
Alexander Duyck8ec56fc2018-07-09 12:20:04 -0400370 txq = fallback(vf_netdev, skb, NULL);
Stephen Hemmingerb3bf5662018-03-02 13:49:07 -0800371
372 /* Record the queue selected by VF so that it can be
373 * used for common case where VF has more queues than
374 * the synthetic device.
375 */
376 qdisc_skb_cb(skb)->slave_dev_queue_mapping = txq;
stephen hemminger0c195562017-08-01 19:58:53 -0700377 } else {
378 txq = netvsc_pick_tx(ndev, skb);
379 }
380 rcu_read_unlock();
381
382 while (unlikely(txq >= ndev->real_num_tx_queues))
383 txq -= ndev->real_num_tx_queues;
384
385 return txq;
386}
387
KY Srinivasan54a73572014-03-08 19:23:13 -0800388static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
stephen hemminger89bb42b2017-08-09 17:46:08 -0700389 struct hv_page_buffer *pb)
KY Srinivasan54a73572014-03-08 19:23:13 -0800390{
391 int j = 0;
392
Adrian Vladu52d3b492019-01-03 19:43:08 +0000393 /* Deal with compound pages by ignoring unused part
KY Srinivasan54a73572014-03-08 19:23:13 -0800394 * of the page.
395 */
396 page += (offset >> PAGE_SHIFT);
397 offset &= ~PAGE_MASK;
398
399 while (len > 0) {
400 unsigned long bytes;
401
402 bytes = PAGE_SIZE - offset;
403 if (bytes > len)
404 bytes = len;
405 pb[j].pfn = page_to_pfn(page);
406 pb[j].offset = offset;
407 pb[j].len = bytes;
408
409 offset += bytes;
410 len -= bytes;
411
412 if (offset == PAGE_SIZE && len) {
413 page++;
414 offset = 0;
415 j++;
416 }
417 }
418
419 return j + 1;
420}
421
KY Srinivasan8a002512014-03-08 19:23:14 -0800422static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800423 struct hv_netvsc_packet *packet,
stephen hemminger02b6de02017-07-28 08:59:44 -0700424 struct hv_page_buffer *pb)
KY Srinivasan54a73572014-03-08 19:23:13 -0800425{
426 u32 slots_used = 0;
427 char *data = skb->data;
428 int frags = skb_shinfo(skb)->nr_frags;
429 int i;
430
431 /* The packet is laid out thus:
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700432 * 1. hdr: RNDIS header and PPI
KY Srinivasan54a73572014-03-08 19:23:13 -0800433 * 2. skb linear data
434 * 3. skb fragment data
435 */
stephen hemmingerea5a32c2017-08-09 17:46:10 -0700436 slots_used += fill_pg_buf(virt_to_page(hdr),
437 offset_in_page(hdr),
438 len, &pb[slots_used]);
KY Srinivasan54a73572014-03-08 19:23:13 -0800439
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700440 packet->rmsg_size = len;
441 packet->rmsg_pgcnt = slots_used;
442
KY Srinivasan54a73572014-03-08 19:23:13 -0800443 slots_used += fill_pg_buf(virt_to_page(data),
444 offset_in_page(data),
445 skb_headlen(skb), &pb[slots_used]);
446
447 for (i = 0; i < frags; i++) {
448 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
449
450 slots_used += fill_pg_buf(skb_frag_page(frag),
451 frag->page_offset,
452 skb_frag_size(frag), &pb[slots_used]);
453 }
KY Srinivasan8a002512014-03-08 19:23:14 -0800454 return slots_used;
KY Srinivasan54a73572014-03-08 19:23:13 -0800455}
456
stephen hemminger80d887d2017-07-24 21:03:19 -0700457static int count_skb_frag_slots(struct sk_buff *skb)
KY Srinivasan54a73572014-03-08 19:23:13 -0800458{
stephen hemminger80d887d2017-07-24 21:03:19 -0700459 int i, frags = skb_shinfo(skb)->nr_frags;
460 int pages = 0;
461
462 for (i = 0; i < frags; i++) {
463 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
464 unsigned long size = skb_frag_size(frag);
465 unsigned long offset = frag->page_offset;
466
467 /* Skip unused frames from start of page */
468 offset &= ~PAGE_MASK;
469 pages += PFN_UP(offset + size);
470 }
471 return pages;
472}
473
474static int netvsc_get_slots(struct sk_buff *skb)
475{
476 char *data = skb->data;
477 unsigned int offset = offset_in_page(data);
478 unsigned int len = skb_headlen(skb);
479 int slots;
480 int frag_slots;
481
482 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
483 frag_slots = count_skb_frag_slots(skb);
484 return slots + frag_slots;
KY Srinivasan54a73572014-03-08 19:23:13 -0800485}
486
stephen hemminger23312a32017-01-24 13:05:59 -0800487static u32 net_checksum_info(struct sk_buff *skb)
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800488{
stephen hemminger23312a32017-01-24 13:05:59 -0800489 if (skb->protocol == htons(ETH_P_IP)) {
490 struct iphdr *ip = ip_hdr(skb);
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800491
stephen hemminger23312a32017-01-24 13:05:59 -0800492 if (ip->protocol == IPPROTO_TCP)
493 return TRANSPORT_INFO_IPV4_TCP;
494 else if (ip->protocol == IPPROTO_UDP)
495 return TRANSPORT_INFO_IPV4_UDP;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800496 } else {
stephen hemminger23312a32017-01-24 13:05:59 -0800497 struct ipv6hdr *ip6 = ipv6_hdr(skb);
498
499 if (ip6->nexthdr == IPPROTO_TCP)
500 return TRANSPORT_INFO_IPV6_TCP;
Mohammed Gamal37b9dfa2017-07-24 10:57:26 -0700501 else if (ip6->nexthdr == IPPROTO_UDP)
stephen hemminger23312a32017-01-24 13:05:59 -0800502 return TRANSPORT_INFO_IPV6_UDP;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800503 }
504
stephen hemminger23312a32017-01-24 13:05:59 -0800505 return TRANSPORT_INFO_NOT_IP;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800506}
507
stephen hemminger0c195562017-08-01 19:58:53 -0700508/* Send skb on the slave VF device. */
509static int netvsc_vf_xmit(struct net_device *net, struct net_device *vf_netdev,
510 struct sk_buff *skb)
511{
512 struct net_device_context *ndev_ctx = netdev_priv(net);
513 unsigned int len = skb->len;
514 int rc;
515
516 skb->dev = vf_netdev;
517 skb->queue_mapping = qdisc_skb_cb(skb)->slave_dev_queue_mapping;
518
519 rc = dev_queue_xmit(skb);
520 if (likely(rc == NET_XMIT_SUCCESS || rc == NET_XMIT_CN)) {
521 struct netvsc_vf_pcpu_stats *pcpu_stats
522 = this_cpu_ptr(ndev_ctx->vf_stats);
523
524 u64_stats_update_begin(&pcpu_stats->syncp);
525 pcpu_stats->tx_packets++;
526 pcpu_stats->tx_bytes += len;
527 u64_stats_update_end(&pcpu_stats->syncp);
528 } else {
529 this_cpu_inc(ndev_ctx->vf_stats->tx_dropped);
530 }
531
532 return rc;
533}
534
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700535static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
Hank Janssenfceaf242009-07-13 15:34:54 -0700536{
Hank Janssenfceaf242009-07-13 15:34:54 -0700537 struct net_device_context *net_device_ctx = netdev_priv(net);
Vitaly Kuznetsov981a1bd2015-04-08 17:54:05 +0200538 struct hv_netvsc_packet *packet = NULL;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700539 int ret;
KY Srinivasan8a002512014-03-08 19:23:14 -0800540 unsigned int num_data_pgs;
541 struct rndis_message *rndis_msg;
stephen hemminger0c195562017-08-01 19:58:53 -0700542 struct net_device *vf_netdev;
KY Srinivasan8a002512014-03-08 19:23:14 -0800543 u32 rndis_msg_size;
Haiyang Zhang307f0992014-05-21 12:55:39 -0700544 u32 hash;
stephen hemminger02b6de02017-07-28 08:59:44 -0700545 struct hv_page_buffer pb[MAX_PAGE_BUFFER_COUNT];
Hank Janssenfceaf242009-07-13 15:34:54 -0700546
stephen hemminger0c195562017-08-01 19:58:53 -0700547 /* if VF is present and up then redirect packets
548 * already called with rcu_read_lock_bh
549 */
550 vf_netdev = rcu_dereference_bh(net_device_ctx->vf_netdev);
551 if (vf_netdev && netif_running(vf_netdev) &&
552 !netpoll_tx_running(net))
553 return netvsc_vf_xmit(net, vf_netdev, skb);
554
stephen hemminger80d887d2017-07-24 21:03:19 -0700555 /* We will atmost need two pages to describe the rndis
556 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200557 * of pages in a single packet. If skb is scattered around
558 * more pages we try linearizing it.
KY Srinivasan54a73572014-03-08 19:23:13 -0800559 */
stephen hemminger80d887d2017-07-24 21:03:19 -0700560
561 num_data_pgs = netvsc_get_slots(skb) + 2;
562
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700563 if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
Stephen Hemminger4323b472016-08-23 12:17:57 -0700564 ++net_device_ctx->eth_stats.tx_scattered;
565
566 if (skb_linearize(skb))
567 goto no_memory;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700568
stephen hemminger80d887d2017-07-24 21:03:19 -0700569 num_data_pgs = netvsc_get_slots(skb) + 2;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700570 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
Stephen Hemminger4323b472016-08-23 12:17:57 -0700571 ++net_device_ctx->eth_stats.tx_too_big;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700572 goto drop;
573 }
KY Srinivasan54a73572014-03-08 19:23:13 -0800574 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700575
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800576 /*
577 * Place the rndis header in the skb head room and
578 * the skb->cb will be used for hv_netvsc_packet
579 * structure.
580 */
581 ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
Stephen Hemminger4323b472016-08-23 12:17:57 -0700582 if (ret)
583 goto no_memory;
584
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800585 /* Use the skb control buffer for building up the packet */
586 BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
587 FIELD_SIZEOF(struct sk_buff, cb));
588 packet = (struct hv_netvsc_packet *)skb->cb;
Hank Janssenfceaf242009-07-13 15:34:54 -0700589
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700590 packet->q_idx = skb_get_queue_mapping(skb);
591
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800592 packet->total_data_buflen = skb->len;
stephen hemminger793e3952017-01-24 13:06:12 -0800593 packet->total_bytes = skb->len;
594 packet->total_packets = 1;
Hank Janssenfceaf242009-07-13 15:34:54 -0700595
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800596 rndis_msg = (struct rndis_message *)skb->head;
KY Srinivasanb08cc792015-03-29 21:08:42 -0700597
KY Srinivasan8a002512014-03-08 19:23:14 -0800598 /* Add the rndis header */
KY Srinivasan8a002512014-03-08 19:23:14 -0800599 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
600 rndis_msg->msg_len = packet->total_data_buflen;
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800601
602 rndis_msg->msg.pkt = (struct rndis_packet) {
603 .data_offset = sizeof(struct rndis_packet),
604 .data_len = packet->total_data_buflen,
605 .per_pkt_info_offset = sizeof(struct rndis_packet),
606 };
KY Srinivasan8a002512014-03-08 19:23:14 -0800607
608 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
609
Haiyang Zhang307f0992014-05-21 12:55:39 -0700610 hash = skb_get_hash_raw(skb);
611 if (hash != 0 && net->real_num_tx_queues > 1) {
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800612 u32 *hash_info;
613
Haiyang Zhang307f0992014-05-21 12:55:39 -0700614 rndis_msg_size += NDIS_HASH_PPI_SIZE;
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800615 hash_info = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
616 NBL_HASH_VALUE);
617 *hash_info = hash;
Haiyang Zhang307f0992014-05-21 12:55:39 -0700618 }
619
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700620 if (skb_vlan_tag_present(skb)) {
KY Srinivasan8a002512014-03-08 19:23:14 -0800621 struct ndis_pkt_8021q_info *vlan;
622
623 rndis_msg_size += NDIS_VLAN_PPI_SIZE;
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800624 vlan = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
625 IEEE_8021Q_INFO);
stephen hemminger00f50242017-08-09 17:46:09 -0700626
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800627 vlan->value = 0;
Michał Mirosław98ba7802018-11-20 13:20:32 +0100628 vlan->vlanid = skb_vlan_tag_get_id(skb);
629 vlan->cfi = skb_vlan_tag_get_cfi(skb);
630 vlan->pri = skb_vlan_tag_get_prio(skb);
KY Srinivasan8a002512014-03-08 19:23:14 -0800631 }
632
stephen hemminger23312a32017-01-24 13:05:59 -0800633 if (skb_is_gso(skb)) {
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700634 struct ndis_tcp_lso_info *lso_info;
635
636 rndis_msg_size += NDIS_LSO_PPI_SIZE;
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800637 lso_info = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
638 TCP_LARGESEND_PKTINFO);
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700639
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800640 lso_info->value = 0;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700641 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
stephen hemminger23312a32017-01-24 13:05:59 -0800642 if (skb->protocol == htons(ETH_P_IP)) {
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700643 lso_info->lso_v2_transmit.ip_version =
644 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
645 ip_hdr(skb)->tot_len = 0;
646 ip_hdr(skb)->check = 0;
647 tcp_hdr(skb)->check =
648 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
649 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
650 } else {
651 lso_info->lso_v2_transmit.ip_version =
652 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
653 ipv6_hdr(skb)->payload_len = 0;
654 tcp_hdr(skb)->check =
655 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
656 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
657 }
stephen hemminger23312a32017-01-24 13:05:59 -0800658 lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700659 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
stephen hemmingerad19bc82016-10-11 14:03:07 -0700660 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
stephen hemminger23312a32017-01-24 13:05:59 -0800661 if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) {
662 struct ndis_tcp_ip_checksum_info *csum_info;
663
stephen hemmingerad19bc82016-10-11 14:03:07 -0700664 rndis_msg_size += NDIS_CSUM_PPI_SIZE;
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800665 csum_info = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
666 TCPIP_CHKSUM_PKTINFO);
stephen hemmingerad19bc82016-10-11 14:03:07 -0700667
Stephen Hemmingerf5a22552017-12-01 11:01:48 -0800668 csum_info->value = 0;
stephen hemminger23312a32017-01-24 13:05:59 -0800669 csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
670
671 if (skb->protocol == htons(ETH_P_IP)) {
stephen hemmingerad19bc82016-10-11 14:03:07 -0700672 csum_info->transmit.is_ipv4 = 1;
stephen hemminger23312a32017-01-24 13:05:59 -0800673
674 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
675 csum_info->transmit.tcp_checksum = 1;
676 else
677 csum_info->transmit.udp_checksum = 1;
678 } else {
stephen hemmingerad19bc82016-10-11 14:03:07 -0700679 csum_info->transmit.is_ipv6 = 1;
680
stephen hemminger23312a32017-01-24 13:05:59 -0800681 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
682 csum_info->transmit.tcp_checksum = 1;
683 else
684 csum_info->transmit.udp_checksum = 1;
685 }
stephen hemmingerad19bc82016-10-11 14:03:07 -0700686 } else {
stephen hemminger23312a32017-01-24 13:05:59 -0800687 /* Can't do offload of this type of checksum */
stephen hemmingerad19bc82016-10-11 14:03:07 -0700688 if (skb_checksum_help(skb))
689 goto drop;
690 }
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700691 }
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800692
KY Srinivasan8a002512014-03-08 19:23:14 -0800693 /* Start filling in the page buffers with the rndis hdr */
694 rndis_msg->msg_len += rndis_msg_size;
Haiyang Zhang942396b2014-10-22 13:47:18 -0700695 packet->total_data_buflen = rndis_msg->msg_len;
KY Srinivasan8a002512014-03-08 19:23:14 -0800696 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
stephen hemminger02b6de02017-07-28 08:59:44 -0700697 skb, packet, pb);
KY Srinivasan8a002512014-03-08 19:23:14 -0800698
sixiao@microsoft.com76d13b52016-02-17 16:43:59 -0800699 /* timestamp packet in software */
700 skb_tx_timestamp(skb);
stephen hemminger2a926f72017-07-19 11:53:17 -0700701
Stephen Hemmingercfd8afd2017-12-12 16:48:40 -0800702 ret = netvsc_send(net, packet, rndis_msg, pb, skb);
stephen hemminger793e3952017-01-24 13:06:12 -0800703 if (likely(ret == 0))
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700704 return NETDEV_TX_OK;
Stephen Hemminger4323b472016-08-23 12:17:57 -0700705
706 if (ret == -EAGAIN) {
707 ++net_device_ctx->eth_stats.tx_busy;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700708 return NETDEV_TX_BUSY;
Stephen Hemminger4323b472016-08-23 12:17:57 -0700709 }
710
711 if (ret == -ENOSPC)
712 ++net_device_ctx->eth_stats.tx_no_space;
Hank Janssenfceaf242009-07-13 15:34:54 -0700713
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700714drop:
715 dev_kfree_skb_any(skb);
716 net->stats.tx_dropped++;
717
718 return NETDEV_TX_OK;
Stephen Hemminger4323b472016-08-23 12:17:57 -0700719
720no_memory:
721 ++net_device_ctx->eth_stats.tx_no_memory;
722 goto drop;
Hank Janssenfceaf242009-07-13 15:34:54 -0700723}
stephen hemminger89bb42b2017-08-09 17:46:08 -0700724
Hank Janssen3e189512010-03-04 22:11:00 +0000725/*
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700726 * netvsc_linkstatus_callback - Link up/down notification
727 */
Stephen Hemminger79cf1ba2017-12-12 16:48:37 -0800728void netvsc_linkstatus_callback(struct net_device *net,
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700729 struct rndis_message *resp)
Hank Janssenfceaf242009-07-13 15:34:54 -0700730{
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700731 struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
Stephen Hemminger79cf1ba2017-12-12 16:48:37 -0800732 struct net_device_context *ndev_ctx = netdev_priv(net);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100733 struct netvsc_reconfig *event;
734 unsigned long flags;
735
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700736 /* Update the physical link speed when changing to another vSwitch */
737 if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
738 u32 speed;
739
stephen hemminger89bb42b2017-08-09 17:46:08 -0700740 speed = *(u32 *)((void *)indicate
741 + indicate->status_buf_offset) / 10000;
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700742 ndev_ctx->speed = speed;
743 return;
744 }
745
746 /* Handle these link change statuses below */
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100747 if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE &&
748 indicate->status != RNDIS_STATUS_MEDIA_CONNECT &&
749 indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
750 return;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700751
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700752 if (net->reg_state != NETREG_REGISTERED)
Hank Janssenfceaf242009-07-13 15:34:54 -0700753 return;
Hank Janssenfceaf242009-07-13 15:34:54 -0700754
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100755 event = kzalloc(sizeof(*event), GFP_ATOMIC);
756 if (!event)
757 return;
758 event->event = indicate->status;
759
760 spin_lock_irqsave(&ndev_ctx->lock, flags);
761 list_add_tail(&event->list, &ndev_ctx->reconfig_events);
762 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
763
764 schedule_delayed_work(&ndev_ctx->dwork, 0);
Hank Janssenfceaf242009-07-13 15:34:54 -0700765}
766
Haiyang Zhangbf486482019-02-22 18:25:03 +0000767static void netvsc_comp_ipcsum(struct sk_buff *skb)
768{
769 struct iphdr *iph = (struct iphdr *)skb->data;
770
771 iph->check = 0;
772 iph->check = ip_fast_csum(iph, iph->ihl);
773}
774
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700775static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
Haiyang Zhangc8e4eff2018-09-21 18:20:35 +0000776 struct netvsc_channel *nvchan)
Hank Janssenfceaf242009-07-13 15:34:54 -0700777{
Haiyang Zhangc8e4eff2018-09-21 18:20:35 +0000778 struct napi_struct *napi = &nvchan->napi;
779 const struct ndis_pkt_8021q_info *vlan = nvchan->rsc.vlan;
780 const struct ndis_tcp_ip_checksum_info *csum_info =
781 nvchan->rsc.csum_info;
Hank Janssenfceaf242009-07-13 15:34:54 -0700782 struct sk_buff *skb;
Haiyang Zhangc8e4eff2018-09-21 18:20:35 +0000783 int i;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700784
Haiyang Zhangc8e4eff2018-09-21 18:20:35 +0000785 skb = napi_alloc_skb(napi, nvchan->rsc.pktlen);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700786 if (!skb)
787 return skb;
Hank Janssenfceaf242009-07-13 15:34:54 -0700788
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700789 /*
790 * Copy to skb. This copy is needed here since the memory pointed by
791 * hv_netvsc_packet cannot be deallocated
792 */
Haiyang Zhangc8e4eff2018-09-21 18:20:35 +0000793 for (i = 0; i < nvchan->rsc.cnt; i++)
794 skb_put_data(skb, nvchan->rsc.data[i], nvchan->rsc.len[i]);
Hank Janssenfceaf242009-07-13 15:34:54 -0700795
796 skb->protocol = eth_type_trans(skb, net);
Stephen Hemmingere52fed72016-10-23 21:32:47 -0700797
798 /* skb is already created with CHECKSUM_NONE */
799 skb_checksum_none_assert(skb);
800
Haiyang Zhangbf486482019-02-22 18:25:03 +0000801 /* Incoming packets may have IP header checksum verified by the host.
802 * They may not have IP header checksum computed after coalescing.
803 * We compute it here if the flags are set, because on Linux, the IP
804 * checksum is always checked.
805 */
806 if (csum_info && csum_info->receive.ip_checksum_value_invalid &&
807 csum_info->receive.ip_checksum_succeeded &&
808 skb->protocol == htons(ETH_P_IP))
809 netvsc_comp_ipcsum(skb);
810
811 /* Do L4 checksum offload if enabled and present.
Stephen Hemmingere52fed72016-10-23 21:32:47 -0700812 */
813 if (csum_info && (net->features & NETIF_F_RXCSUM)) {
814 if (csum_info->receive.tcp_checksum_succeeded ||
815 csum_info->receive.udp_checksum_succeeded)
KY Srinivasane3d605e2014-03-08 19:23:16 -0800816 skb->ip_summed = CHECKSUM_UNNECESSARY;
KY Srinivasane3d605e2014-03-08 19:23:16 -0800817 }
818
stephen hemmingerdc54a082017-01-24 13:06:08 -0800819 if (vlan) {
Michał Mirosław98ba7802018-11-20 13:20:32 +0100820 u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT) |
821 (vlan->cfi ? VLAN_CFI_MASK : 0);
stephen hemmingerdc54a082017-01-24 13:06:08 -0800822
Haiyang Zhang93725cb2013-06-17 15:36:49 -0700823 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
KY Srinivasan760d1e32015-12-01 16:43:19 -0800824 vlan_tci);
stephen hemmingerdc54a082017-01-24 13:06:08 -0800825 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700826
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700827 return skb;
828}
829
830/*
831 * netvsc_recv_callback - Callback when we receive a packet from the
832 * "wire" on the specified device.
833 */
stephen hemmingerdc54a082017-01-24 13:06:08 -0800834int netvsc_recv_callback(struct net_device *net,
Stephen Hemminger345ac082017-12-12 16:48:38 -0800835 struct netvsc_device *net_device,
Haiyang Zhangc8e4eff2018-09-21 18:20:35 +0000836 struct netvsc_channel *nvchan)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700837{
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200838 struct net_device_context *net_device_ctx = netdev_priv(net);
Haiyang Zhangc8e4eff2018-09-21 18:20:35 +0000839 struct vmbus_channel *channel = nvchan->channel;
stephen hemminger742fe542017-02-27 10:26:50 -0800840 u16 q_idx = channel->offermsg.offer.sub_channel_index;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700841 struct sk_buff *skb;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700842 struct netvsc_stats *rx_stats;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700843
Stephen Hemminger9cbcc422016-09-22 16:56:34 -0700844 if (net->reg_state != NETREG_REGISTERED)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700845 return NVSP_STAT_FAIL;
846
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700847 /* Allocate a skb - TODO direct I/O to pages? */
Haiyang Zhangc8e4eff2018-09-21 18:20:35 +0000848 skb = netvsc_alloc_recv_skb(net, nvchan);
849
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700850 if (unlikely(!skb)) {
Stephen Hemmingerf61a9d62017-12-12 16:48:36 -0800851 ++net_device_ctx->eth_stats.rx_no_memory;
stephen hemminger0719e722017-01-11 09:16:32 -0800852 rcu_read_unlock();
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700853 return NVSP_STAT_FAIL;
854 }
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700855
stephen hemminger0c195562017-08-01 19:58:53 -0700856 skb_record_rx_queue(skb, q_idx);
Stephen Hemminger9cbcc422016-09-22 16:56:34 -0700857
858 /*
859 * Even if injecting the packet, record the statistics
860 * on the synthetic device because modifying the VF device
861 * statistics will not work correctly.
862 */
stephen hemminger742fe542017-02-27 10:26:50 -0800863 rx_stats = &nvchan->rx_stats;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700864 u64_stats_update_begin(&rx_stats->syncp);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700865 rx_stats->packets++;
Haiyang Zhangc8e4eff2018-09-21 18:20:35 +0000866 rx_stats->bytes += nvchan->rsc.pktlen;
Stephen Hemmingerf7ad75b2016-09-22 16:56:35 -0700867
868 if (skb->pkt_type == PACKET_BROADCAST)
869 ++rx_stats->broadcast;
870 else if (skb->pkt_type == PACKET_MULTICAST)
871 ++rx_stats->multicast;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700872 u64_stats_update_end(&rx_stats->syncp);
Stephen Hemminger9495c282010-03-09 17:42:17 -0800873
stephen hemminger742fe542017-02-27 10:26:50 -0800874 napi_gro_receive(&nvchan->napi, skb);
Haiyang Zhang5c71dad2018-03-22 12:01:13 -0700875 return NVSP_STAT_SUCCESS;
Hank Janssenfceaf242009-07-13 15:34:54 -0700876}
877
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700878static void netvsc_get_drvinfo(struct net_device *net,
879 struct ethtool_drvinfo *info)
880{
Jiri Pirko7826d432013-01-06 00:44:26 +0000881 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
Jiri Pirko7826d432013-01-06 00:44:26 +0000882 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700883}
884
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -0800885static void netvsc_get_channels(struct net_device *net,
886 struct ethtool_channels *channel)
887{
888 struct net_device_context *net_device_ctx = netdev_priv(net);
stephen hemminger545a8e72017-03-22 14:51:00 -0700889 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -0800890
891 if (nvdev) {
892 channel->max_combined = nvdev->max_chn;
893 channel->combined_count = nvdev->num_chn;
894 }
895}
896
Haiyang Zhang7c9f3352019-01-15 00:51:43 +0000897/* Alloc struct netvsc_device_info, and initialize it from either existing
898 * struct netvsc_device, or from default values.
899 */
900static struct netvsc_device_info *netvsc_devinfo_get
901 (struct netvsc_device *nvdev)
902{
903 struct netvsc_device_info *dev_info;
904
905 dev_info = kzalloc(sizeof(*dev_info), GFP_ATOMIC);
906
907 if (!dev_info)
908 return NULL;
909
910 if (nvdev) {
911 dev_info->num_chn = nvdev->num_chn;
912 dev_info->send_sections = nvdev->send_section_cnt;
913 dev_info->send_section_size = nvdev->send_section_size;
914 dev_info->recv_sections = nvdev->recv_section_cnt;
915 dev_info->recv_section_size = nvdev->recv_section_size;
Haiyang Zhang17d91252019-01-15 00:51:44 +0000916
917 memcpy(dev_info->rss_key, nvdev->extension->rss_key,
918 NETVSC_HASH_KEYLEN);
Haiyang Zhang7c9f3352019-01-15 00:51:43 +0000919 } else {
920 dev_info->num_chn = VRSS_CHANNEL_DEFAULT;
921 dev_info->send_sections = NETVSC_DEFAULT_TX;
922 dev_info->send_section_size = NETVSC_SEND_SECTION_SIZE;
923 dev_info->recv_sections = NETVSC_DEFAULT_RX;
924 dev_info->recv_section_size = NETVSC_RECV_SECTION_SIZE;
925 }
926
927 return dev_info;
928}
929
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700930static int netvsc_detach(struct net_device *ndev,
931 struct netvsc_device *nvdev)
932{
933 struct net_device_context *ndev_ctx = netdev_priv(ndev);
934 struct hv_device *hdev = ndev_ctx->device_ctx;
935 int ret;
936
937 /* Don't try continuing to try and setup sub channels */
938 if (cancel_work_sync(&nvdev->subchan_work))
939 nvdev->num_chn = 1;
940
941 /* If device was up (receiving) then shutdown */
942 if (netif_running(ndev)) {
Haiyang Zhang1b704c42019-03-28 19:40:36 +0000943 netvsc_tx_disable(nvdev, ndev);
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700944
945 ret = rndis_filter_close(nvdev);
946 if (ret) {
947 netdev_err(ndev,
948 "unable to close device (ret %d).\n", ret);
949 return ret;
950 }
951
952 ret = netvsc_wait_until_empty(nvdev);
953 if (ret) {
954 netdev_err(ndev,
955 "Ring buffer not empty after closing rndis\n");
956 return ret;
957 }
958 }
959
960 netif_device_detach(ndev);
961
962 rndis_filter_device_remove(hdev, nvdev);
963
964 return 0;
965}
966
967static int netvsc_attach(struct net_device *ndev,
968 struct netvsc_device_info *dev_info)
969{
970 struct net_device_context *ndev_ctx = netdev_priv(ndev);
971 struct hv_device *hdev = ndev_ctx->device_ctx;
972 struct netvsc_device *nvdev;
973 struct rndis_device *rdev;
974 int ret;
975
976 nvdev = rndis_filter_device_add(hdev, dev_info);
977 if (IS_ERR(nvdev))
978 return PTR_ERR(nvdev);
979
Stephen Hemminger3ffe64f2018-06-29 14:07:16 -0700980 if (nvdev->num_chn > 1) {
Haiyang Zhang17d91252019-01-15 00:51:44 +0000981 ret = rndis_set_subchannel(ndev, nvdev, dev_info);
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700982
Stephen Hemminger3ffe64f2018-06-29 14:07:16 -0700983 /* if unavailable, just proceed with one queue */
984 if (ret) {
985 nvdev->max_chn = 1;
986 nvdev->num_chn = 1;
987 }
988 }
989
990 /* In any case device is now ready */
991 netif_device_attach(ndev);
992
993 /* Note: enable and attach happen when sub-channels setup */
Stephen Hemminger7b2ee502018-03-20 15:03:05 -0700994 netif_carrier_off(ndev);
995
996 if (netif_running(ndev)) {
997 ret = rndis_filter_open(nvdev);
998 if (ret)
999 return ret;
1000
1001 rdev = nvdev->extension;
1002 if (!rdev->link_state)
1003 netif_carrier_on(ndev);
1004 }
1005
1006 return 0;
1007}
1008
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -07001009static int netvsc_set_channels(struct net_device *net,
1010 struct ethtool_channels *channels)
1011{
1012 struct net_device_context *net_device_ctx = netdev_priv(net);
stephen hemminger545a8e72017-03-22 14:51:00 -07001013 struct netvsc_device *nvdev = rtnl_dereference(net_device_ctx->nvdev);
stephen hemminger7ca45932017-07-24 10:57:28 -07001014 unsigned int orig, count = channels->combined_count;
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001015 struct netvsc_device_info *device_info;
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001016 int ret;
stephen hemminger2b018882017-01-24 13:06:03 -08001017
1018 /* We do not support separate count for rx, tx, or other */
1019 if (count == 0 ||
1020 channels->rx_count || channels->tx_count || channels->other_count)
1021 return -EINVAL;
1022
stephen hemmingera0be4502017-03-22 14:51:01 -07001023 if (!nvdev || nvdev->destroy)
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -07001024 return -ENODEV;
1025
stephen hemminger2b018882017-01-24 13:06:03 -08001026 if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5)
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -07001027 return -EINVAL;
1028
stephen hemminger2b018882017-01-24 13:06:03 -08001029 if (count > nvdev->max_chn)
1030 return -EINVAL;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -07001031
stephen hemminger7ca45932017-07-24 10:57:28 -07001032 orig = nvdev->num_chn;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -07001033
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001034 device_info = netvsc_devinfo_get(nvdev);
1035
1036 if (!device_info)
1037 return -ENOMEM;
1038
1039 device_info->num_chn = count;
stephen hemminger8b532792017-08-09 17:46:11 -07001040
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001041 ret = netvsc_detach(net, nvdev);
1042 if (ret)
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001043 goto out;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -07001044
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001045 ret = netvsc_attach(net, device_info);
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001046 if (ret) {
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001047 device_info->num_chn = orig;
1048 if (netvsc_attach(net, device_info))
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001049 netdev_err(net, "restoring channel setting failed\n");
stephen hemminger7ca45932017-07-24 10:57:28 -07001050 }
1051
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001052out:
1053 kfree(device_info);
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -07001054 return ret;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -07001055}
1056
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001057static bool
1058netvsc_validate_ethtool_ss_cmd(const struct ethtool_link_ksettings *cmd)
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001059{
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001060 struct ethtool_link_ksettings diff1 = *cmd;
1061 struct ethtool_link_ksettings diff2 = {};
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001062
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001063 diff1.base.speed = 0;
1064 diff1.base.duplex = 0;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001065 /* advertising and cmd are usually set */
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001066 ethtool_link_ksettings_zero_link_mode(&diff1, advertising);
1067 diff1.base.cmd = 0;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001068 /* We set port to PORT_OTHER */
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001069 diff2.base.port = PORT_OTHER;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001070
1071 return !memcmp(&diff1, &diff2, sizeof(diff1));
1072}
1073
1074static void netvsc_init_settings(struct net_device *dev)
1075{
1076 struct net_device_context *ndc = netdev_priv(dev);
1077
Haiyang Zhang486e3982017-10-06 08:33:57 -07001078 ndc->l4_hash = HV_DEFAULT_L4HASH;
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001079
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001080 ndc->speed = SPEED_UNKNOWN;
Simon Xiaof3c9d40e2017-04-14 14:42:58 -07001081 ndc->duplex = DUPLEX_FULL;
Haiyang Zhangd6792a52018-09-21 18:20:36 +00001082
1083 dev->features = NETIF_F_LRO;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001084}
1085
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001086static int netvsc_get_link_ksettings(struct net_device *dev,
1087 struct ethtool_link_ksettings *cmd)
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001088{
1089 struct net_device_context *ndc = netdev_priv(dev);
1090
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001091 cmd->base.speed = ndc->speed;
1092 cmd->base.duplex = ndc->duplex;
1093 cmd->base.port = PORT_OTHER;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001094
1095 return 0;
1096}
1097
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001098static int netvsc_set_link_ksettings(struct net_device *dev,
1099 const struct ethtool_link_ksettings *cmd)
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001100{
1101 struct net_device_context *ndc = netdev_priv(dev);
1102 u32 speed;
1103
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001104 speed = cmd->base.speed;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001105 if (!ethtool_validate_speed(speed) ||
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001106 !ethtool_validate_duplex(cmd->base.duplex) ||
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001107 !netvsc_validate_ethtool_ss_cmd(cmd))
1108 return -EINVAL;
1109
1110 ndc->speed = speed;
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001111 ndc->duplex = cmd->base.duplex;
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001112
1113 return 0;
1114}
1115
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001116static int netvsc_change_mtu(struct net_device *ndev, int mtu)
1117{
1118 struct net_device_context *ndevctx = netdev_priv(ndev);
stephen hemminger0c195562017-08-01 19:58:53 -07001119 struct net_device *vf_netdev = rtnl_dereference(ndevctx->vf_netdev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001120 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
stephen hemminger9749fed2017-07-19 11:53:16 -07001121 int orig_mtu = ndev->mtu;
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001122 struct netvsc_device_info *device_info;
stephen hemminger9749fed2017-07-19 11:53:16 -07001123 int ret = 0;
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001124
stephen hemmingera0be4502017-03-22 14:51:01 -07001125 if (!nvdev || nvdev->destroy)
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001126 return -ENODEV;
1127
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001128 device_info = netvsc_devinfo_get(nvdev);
1129
1130 if (!device_info)
1131 return -ENOMEM;
1132
stephen hemminger0c195562017-08-01 19:58:53 -07001133 /* Change MTU of underlying VF netdev first. */
1134 if (vf_netdev) {
1135 ret = dev_set_mtu(vf_netdev, mtu);
1136 if (ret)
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001137 goto out;
stephen hemminger0c195562017-08-01 19:58:53 -07001138 }
1139
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001140 ret = netvsc_detach(ndev, nvdev);
1141 if (ret)
1142 goto rollback_vf;
Dexuan Cui152669b2017-03-02 13:00:53 +00001143
Dexuan Cui152669b2017-03-02 13:00:53 +00001144 ndev->mtu = mtu;
1145
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001146 ret = netvsc_attach(ndev, device_info);
1147 if (!ret)
1148 goto out;
stephen hemminger9749fed2017-07-19 11:53:16 -07001149
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001150 /* Attempt rollback to original MTU */
1151 ndev->mtu = orig_mtu;
stephen hemminger68d715f2017-08-09 17:46:06 -07001152
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001153 if (netvsc_attach(ndev, device_info))
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001154 netdev_err(ndev, "restoring mtu failed\n");
1155rollback_vf:
1156 if (vf_netdev)
1157 dev_set_mtu(vf_netdev, orig_mtu);
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001158
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001159out:
1160 kfree(device_info);
stephen hemminger9749fed2017-07-19 11:53:16 -07001161 return ret;
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001162}
1163
stephen hemminger0c195562017-08-01 19:58:53 -07001164static void netvsc_get_vf_stats(struct net_device *net,
1165 struct netvsc_vf_pcpu_stats *tot)
1166{
1167 struct net_device_context *ndev_ctx = netdev_priv(net);
1168 int i;
1169
1170 memset(tot, 0, sizeof(*tot));
1171
1172 for_each_possible_cpu(i) {
1173 const struct netvsc_vf_pcpu_stats *stats
1174 = per_cpu_ptr(ndev_ctx->vf_stats, i);
1175 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1176 unsigned int start;
1177
1178 do {
1179 start = u64_stats_fetch_begin_irq(&stats->syncp);
1180 rx_packets = stats->rx_packets;
1181 tx_packets = stats->tx_packets;
1182 rx_bytes = stats->rx_bytes;
1183 tx_bytes = stats->tx_bytes;
1184 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1185
1186 tot->rx_packets += rx_packets;
1187 tot->tx_packets += tx_packets;
1188 tot->rx_bytes += rx_bytes;
1189 tot->tx_bytes += tx_bytes;
1190 tot->tx_dropped += stats->tx_dropped;
1191 }
1192}
1193
Yidong Ren6ae74672018-07-30 17:09:45 +00001194static void netvsc_get_pcpu_stats(struct net_device *net,
1195 struct netvsc_ethtool_pcpu_stats *pcpu_tot)
1196{
1197 struct net_device_context *ndev_ctx = netdev_priv(net);
1198 struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
1199 int i;
1200
1201 /* fetch percpu stats of vf */
1202 for_each_possible_cpu(i) {
1203 const struct netvsc_vf_pcpu_stats *stats =
1204 per_cpu_ptr(ndev_ctx->vf_stats, i);
1205 struct netvsc_ethtool_pcpu_stats *this_tot = &pcpu_tot[i];
1206 unsigned int start;
1207
1208 do {
1209 start = u64_stats_fetch_begin_irq(&stats->syncp);
1210 this_tot->vf_rx_packets = stats->rx_packets;
1211 this_tot->vf_tx_packets = stats->tx_packets;
1212 this_tot->vf_rx_bytes = stats->rx_bytes;
1213 this_tot->vf_tx_bytes = stats->tx_bytes;
1214 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1215 this_tot->rx_packets = this_tot->vf_rx_packets;
1216 this_tot->tx_packets = this_tot->vf_tx_packets;
1217 this_tot->rx_bytes = this_tot->vf_rx_bytes;
1218 this_tot->tx_bytes = this_tot->vf_tx_bytes;
1219 }
1220
1221 /* fetch percpu stats of netvsc */
1222 for (i = 0; i < nvdev->num_chn; i++) {
1223 const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1224 const struct netvsc_stats *stats;
1225 struct netvsc_ethtool_pcpu_stats *this_tot =
1226 &pcpu_tot[nvchan->channel->target_cpu];
1227 u64 packets, bytes;
1228 unsigned int start;
1229
1230 stats = &nvchan->tx_stats;
1231 do {
1232 start = u64_stats_fetch_begin_irq(&stats->syncp);
1233 packets = stats->packets;
1234 bytes = stats->bytes;
1235 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1236
1237 this_tot->tx_bytes += bytes;
1238 this_tot->tx_packets += packets;
1239
1240 stats = &nvchan->rx_stats;
1241 do {
1242 start = u64_stats_fetch_begin_irq(&stats->syncp);
1243 packets = stats->packets;
1244 bytes = stats->bytes;
1245 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
1246
1247 this_tot->rx_bytes += bytes;
1248 this_tot->rx_packets += packets;
1249 }
1250}
1251
stephen hemmingerbc1f4472017-01-06 19:12:52 -08001252static void netvsc_get_stats64(struct net_device *net,
1253 struct rtnl_link_stats64 *t)
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001254{
1255 struct net_device_context *ndev_ctx = netdev_priv(net);
stephen hemminger776e7262017-04-14 14:42:57 -07001256 struct netvsc_device *nvdev = rcu_dereference_rtnl(ndev_ctx->nvdev);
stephen hemminger0c195562017-08-01 19:58:53 -07001257 struct netvsc_vf_pcpu_stats vf_tot;
stephen hemminger89bb42b2017-08-09 17:46:08 -07001258 int i;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001259
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001260 if (!nvdev)
1261 return;
1262
stephen hemminger0c195562017-08-01 19:58:53 -07001263 netdev_stats_to_stats64(t, &net->stats);
1264
1265 netvsc_get_vf_stats(net, &vf_tot);
1266 t->rx_packets += vf_tot.rx_packets;
1267 t->tx_packets += vf_tot.tx_packets;
1268 t->rx_bytes += vf_tot.rx_bytes;
1269 t->tx_bytes += vf_tot.tx_bytes;
1270 t->tx_dropped += vf_tot.tx_dropped;
1271
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001272 for (i = 0; i < nvdev->num_chn; i++) {
1273 const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
1274 const struct netvsc_stats *stats;
1275 u64 packets, bytes, multicast;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001276 unsigned int start;
1277
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001278 stats = &nvchan->tx_stats;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001279 do {
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001280 start = u64_stats_fetch_begin_irq(&stats->syncp);
1281 packets = stats->packets;
1282 bytes = stats->bytes;
1283 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001284
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001285 t->tx_bytes += bytes;
1286 t->tx_packets += packets;
1287
1288 stats = &nvchan->rx_stats;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001289 do {
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001290 start = u64_stats_fetch_begin_irq(&stats->syncp);
1291 packets = stats->packets;
1292 bytes = stats->bytes;
1293 multicast = stats->multicast + stats->broadcast;
1294 } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001295
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001296 t->rx_bytes += bytes;
1297 t->rx_packets += packets;
1298 t->multicast += multicast;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001299 }
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001300}
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001301
1302static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
1303{
stephen hemminger867047c2017-07-28 08:59:42 -07001304 struct net_device_context *ndc = netdev_priv(ndev);
stephen hemminger16ba3262017-08-09 17:46:05 -07001305 struct net_device *vf_netdev = rtnl_dereference(ndc->vf_netdev);
stephen hemminger867047c2017-07-28 08:59:42 -07001306 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001307 struct sockaddr *addr = p;
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001308 int err;
1309
stephen hemminger16ba3262017-08-09 17:46:05 -07001310 err = eth_prepare_mac_addr_change(ndev, p);
1311 if (err)
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001312 return err;
1313
stephen hemminger867047c2017-07-28 08:59:42 -07001314 if (!nvdev)
1315 return -ENODEV;
1316
stephen hemminger16ba3262017-08-09 17:46:05 -07001317 if (vf_netdev) {
Petr Machata3a37a962018-12-13 11:54:30 +00001318 err = dev_set_mac_address(vf_netdev, addr, NULL);
stephen hemminger16ba3262017-08-09 17:46:05 -07001319 if (err)
1320 return err;
1321 }
1322
stephen hemminger867047c2017-07-28 08:59:42 -07001323 err = rndis_filter_set_device_mac(nvdev, addr->sa_data);
stephen hemminger16ba3262017-08-09 17:46:05 -07001324 if (!err) {
1325 eth_commit_mac_addr_change(ndev, p);
1326 } else if (vf_netdev) {
1327 /* rollback change on VF */
1328 memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN);
Petr Machata3a37a962018-12-13 11:54:30 +00001329 dev_set_mac_address(vf_netdev, addr, NULL);
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001330 }
1331
1332 return err;
1333}
1334
Stephen Hemminger4323b472016-08-23 12:17:57 -07001335static const struct {
1336 char name[ETH_GSTRING_LEN];
1337 u16 offset;
1338} netvsc_stats[] = {
1339 { "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
Stephen Hemmingerf61a9d62017-12-12 16:48:36 -08001340 { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
Stephen Hemminger4323b472016-08-23 12:17:57 -07001341 { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) },
1342 { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) },
1343 { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) },
stephen hemmingercad5c192017-08-09 17:46:12 -07001344 { "tx_send_full", offsetof(struct netvsc_ethtool_stats, tx_send_full) },
1345 { "rx_comp_busy", offsetof(struct netvsc_ethtool_stats, rx_comp_busy) },
Stephen Hemmingerf61a9d62017-12-12 16:48:36 -08001346 { "rx_no_memory", offsetof(struct netvsc_ethtool_stats, rx_no_memory) },
Simon Xiao09af87d2017-09-29 11:39:46 -07001347 { "stop_queue", offsetof(struct netvsc_ethtool_stats, stop_queue) },
1348 { "wake_queue", offsetof(struct netvsc_ethtool_stats, wake_queue) },
Yidong Ren6ae74672018-07-30 17:09:45 +00001349}, pcpu_stats[] = {
1350 { "cpu%u_rx_packets",
1351 offsetof(struct netvsc_ethtool_pcpu_stats, rx_packets) },
1352 { "cpu%u_rx_bytes",
1353 offsetof(struct netvsc_ethtool_pcpu_stats, rx_bytes) },
1354 { "cpu%u_tx_packets",
1355 offsetof(struct netvsc_ethtool_pcpu_stats, tx_packets) },
1356 { "cpu%u_tx_bytes",
1357 offsetof(struct netvsc_ethtool_pcpu_stats, tx_bytes) },
1358 { "cpu%u_vf_rx_packets",
1359 offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_packets) },
1360 { "cpu%u_vf_rx_bytes",
1361 offsetof(struct netvsc_ethtool_pcpu_stats, vf_rx_bytes) },
1362 { "cpu%u_vf_tx_packets",
1363 offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_packets) },
1364 { "cpu%u_vf_tx_bytes",
1365 offsetof(struct netvsc_ethtool_pcpu_stats, vf_tx_bytes) },
stephen hemminger0c195562017-08-01 19:58:53 -07001366}, vf_stats[] = {
1367 { "vf_rx_packets", offsetof(struct netvsc_vf_pcpu_stats, rx_packets) },
1368 { "vf_rx_bytes", offsetof(struct netvsc_vf_pcpu_stats, rx_bytes) },
1369 { "vf_tx_packets", offsetof(struct netvsc_vf_pcpu_stats, tx_packets) },
1370 { "vf_tx_bytes", offsetof(struct netvsc_vf_pcpu_stats, tx_bytes) },
1371 { "vf_tx_dropped", offsetof(struct netvsc_vf_pcpu_stats, tx_dropped) },
Stephen Hemminger4323b472016-08-23 12:17:57 -07001372};
1373
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001374#define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats)
stephen hemminger0c195562017-08-01 19:58:53 -07001375#define NETVSC_VF_STATS_LEN ARRAY_SIZE(vf_stats)
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001376
Yidong Ren6ae74672018-07-30 17:09:45 +00001377/* statistics per queue (rx/tx packets/bytes) */
1378#define NETVSC_PCPU_STATS_LEN (num_present_cpus() * ARRAY_SIZE(pcpu_stats))
1379
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001380/* 4 statistics per queue (rx/tx packets/bytes) */
1381#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4)
1382
Stephen Hemminger4323b472016-08-23 12:17:57 -07001383static int netvsc_get_sset_count(struct net_device *dev, int string_set)
1384{
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001385 struct net_device_context *ndc = netdev_priv(dev);
stephen hemmingerfbd4c7e2017-06-07 15:53:47 -07001386 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001387
1388 if (!nvdev)
1389 return -ENODEV;
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001390
Stephen Hemminger4323b472016-08-23 12:17:57 -07001391 switch (string_set) {
1392 case ETH_SS_STATS:
stephen hemminger0c195562017-08-01 19:58:53 -07001393 return NETVSC_GLOBAL_STATS_LEN
1394 + NETVSC_VF_STATS_LEN
Yidong Ren6ae74672018-07-30 17:09:45 +00001395 + NETVSC_QUEUE_STATS_LEN(nvdev)
1396 + NETVSC_PCPU_STATS_LEN;
Stephen Hemminger4323b472016-08-23 12:17:57 -07001397 default:
1398 return -EINVAL;
1399 }
1400}
1401
1402static void netvsc_get_ethtool_stats(struct net_device *dev,
1403 struct ethtool_stats *stats, u64 *data)
1404{
1405 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger867047c2017-07-28 08:59:42 -07001406 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
Stephen Hemminger4323b472016-08-23 12:17:57 -07001407 const void *nds = &ndc->eth_stats;
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001408 const struct netvsc_stats *qstats;
stephen hemminger0c195562017-08-01 19:58:53 -07001409 struct netvsc_vf_pcpu_stats sum;
Yidong Ren6ae74672018-07-30 17:09:45 +00001410 struct netvsc_ethtool_pcpu_stats *pcpu_sum;
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001411 unsigned int start;
1412 u64 packets, bytes;
Yidong Ren6ae74672018-07-30 17:09:45 +00001413 int i, j, cpu;
Stephen Hemminger4323b472016-08-23 12:17:57 -07001414
stephen hemminger545a8e72017-03-22 14:51:00 -07001415 if (!nvdev)
1416 return;
1417
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001418 for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++)
Stephen Hemminger4323b472016-08-23 12:17:57 -07001419 data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001420
stephen hemminger0c195562017-08-01 19:58:53 -07001421 netvsc_get_vf_stats(dev, &sum);
1422 for (j = 0; j < NETVSC_VF_STATS_LEN; j++)
1423 data[i++] = *(u64 *)((void *)&sum + vf_stats[j].offset);
1424
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001425 for (j = 0; j < nvdev->num_chn; j++) {
1426 qstats = &nvdev->chan_table[j].tx_stats;
1427
1428 do {
1429 start = u64_stats_fetch_begin_irq(&qstats->syncp);
1430 packets = qstats->packets;
1431 bytes = qstats->bytes;
1432 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1433 data[i++] = packets;
1434 data[i++] = bytes;
1435
1436 qstats = &nvdev->chan_table[j].rx_stats;
1437 do {
1438 start = u64_stats_fetch_begin_irq(&qstats->syncp);
1439 packets = qstats->packets;
1440 bytes = qstats->bytes;
1441 } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
1442 data[i++] = packets;
1443 data[i++] = bytes;
1444 }
Yidong Ren6ae74672018-07-30 17:09:45 +00001445
1446 pcpu_sum = kvmalloc_array(num_possible_cpus(),
1447 sizeof(struct netvsc_ethtool_pcpu_stats),
1448 GFP_KERNEL);
1449 netvsc_get_pcpu_stats(dev, pcpu_sum);
1450 for_each_present_cpu(cpu) {
1451 struct netvsc_ethtool_pcpu_stats *this_sum = &pcpu_sum[cpu];
1452
1453 for (j = 0; j < ARRAY_SIZE(pcpu_stats); j++)
1454 data[i++] = *(u64 *)((void *)this_sum
1455 + pcpu_stats[j].offset);
1456 }
1457 kvfree(pcpu_sum);
Stephen Hemminger4323b472016-08-23 12:17:57 -07001458}
1459
1460static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1461{
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001462 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger867047c2017-07-28 08:59:42 -07001463 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001464 u8 *p = data;
Yidong Ren6ae74672018-07-30 17:09:45 +00001465 int i, cpu;
Stephen Hemminger4323b472016-08-23 12:17:57 -07001466
stephen hemminger545a8e72017-03-22 14:51:00 -07001467 if (!nvdev)
1468 return;
1469
Stephen Hemminger4323b472016-08-23 12:17:57 -07001470 switch (stringset) {
1471 case ETH_SS_STATS:
stephen hemminger0c195562017-08-01 19:58:53 -07001472 for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++) {
1473 memcpy(p, netvsc_stats[i].name, ETH_GSTRING_LEN);
1474 p += ETH_GSTRING_LEN;
1475 }
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001476
stephen hemminger0c195562017-08-01 19:58:53 -07001477 for (i = 0; i < ARRAY_SIZE(vf_stats); i++) {
1478 memcpy(p, vf_stats[i].name, ETH_GSTRING_LEN);
1479 p += ETH_GSTRING_LEN;
1480 }
1481
Simon Xiao6c80f3f2017-01-24 13:06:13 -08001482 for (i = 0; i < nvdev->num_chn; i++) {
1483 sprintf(p, "tx_queue_%u_packets", i);
1484 p += ETH_GSTRING_LEN;
1485 sprintf(p, "tx_queue_%u_bytes", i);
1486 p += ETH_GSTRING_LEN;
1487 sprintf(p, "rx_queue_%u_packets", i);
1488 p += ETH_GSTRING_LEN;
1489 sprintf(p, "rx_queue_%u_bytes", i);
1490 p += ETH_GSTRING_LEN;
1491 }
1492
Yidong Ren6ae74672018-07-30 17:09:45 +00001493 for_each_present_cpu(cpu) {
1494 for (i = 0; i < ARRAY_SIZE(pcpu_stats); i++) {
1495 sprintf(p, pcpu_stats[i].name, cpu);
1496 p += ETH_GSTRING_LEN;
1497 }
1498 }
1499
Stephen Hemminger4323b472016-08-23 12:17:57 -07001500 break;
1501 }
1502}
1503
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001504static int
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001505netvsc_get_rss_hash_opts(struct net_device_context *ndc,
1506 struct ethtool_rxnfc *info)
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001507{
Haiyang Zhang486e3982017-10-06 08:33:57 -07001508 const u32 l4_flag = RXH_L4_B_0_1 | RXH_L4_B_2_3;
1509
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001510 info->data = RXH_IP_SRC | RXH_IP_DST;
1511
1512 switch (info->flow_type) {
1513 case TCP_V4_FLOW:
Haiyang Zhang0518ec4f2017-10-06 08:33:58 -07001514 if (ndc->l4_hash & HV_TCP4_L4HASH)
1515 info->data |= l4_flag;
1516
1517 break;
1518
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001519 case TCP_V6_FLOW:
Haiyang Zhang0518ec4f2017-10-06 08:33:58 -07001520 if (ndc->l4_hash & HV_TCP6_L4HASH)
1521 info->data |= l4_flag;
1522
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001523 break;
1524
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001525 case UDP_V4_FLOW:
Haiyang Zhang486e3982017-10-06 08:33:57 -07001526 if (ndc->l4_hash & HV_UDP4_L4HASH)
1527 info->data |= l4_flag;
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001528
1529 break;
1530
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001531 case UDP_V6_FLOW:
Haiyang Zhang486e3982017-10-06 08:33:57 -07001532 if (ndc->l4_hash & HV_UDP6_L4HASH)
1533 info->data |= l4_flag;
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001534
1535 break;
1536
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001537 case IPV4_FLOW:
1538 case IPV6_FLOW:
1539 break;
1540 default:
1541 info->data = 0;
1542 break;
1543 }
1544
1545 return 0;
1546}
1547
1548static int
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001549netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1550 u32 *rules)
1551{
1552 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger867047c2017-07-28 08:59:42 -07001553 struct netvsc_device *nvdev = rtnl_dereference(ndc->nvdev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001554
1555 if (!nvdev)
1556 return -ENODEV;
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001557
1558 switch (info->cmd) {
1559 case ETHTOOL_GRXRINGS:
1560 info->data = nvdev->num_chn;
1561 return 0;
stephen hemmingerb5a5dc82017-01-24 13:06:01 -08001562
1563 case ETHTOOL_GRXFH:
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001564 return netvsc_get_rss_hash_opts(ndc, info);
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001565 }
1566 return -EOPNOTSUPP;
1567}
1568
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001569static int netvsc_set_rss_hash_opts(struct net_device_context *ndc,
1570 struct ethtool_rxnfc *info)
1571{
1572 if (info->data == (RXH_IP_SRC | RXH_IP_DST |
1573 RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
Haiyang Zhang486e3982017-10-06 08:33:57 -07001574 switch (info->flow_type) {
Haiyang Zhang0518ec4f2017-10-06 08:33:58 -07001575 case TCP_V4_FLOW:
1576 ndc->l4_hash |= HV_TCP4_L4HASH;
1577 break;
1578
1579 case TCP_V6_FLOW:
1580 ndc->l4_hash |= HV_TCP6_L4HASH;
1581 break;
1582
Haiyang Zhang486e3982017-10-06 08:33:57 -07001583 case UDP_V4_FLOW:
1584 ndc->l4_hash |= HV_UDP4_L4HASH;
1585 break;
1586
1587 case UDP_V6_FLOW:
1588 ndc->l4_hash |= HV_UDP6_L4HASH;
1589 break;
1590
1591 default:
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001592 return -EOPNOTSUPP;
Haiyang Zhang486e3982017-10-06 08:33:57 -07001593 }
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001594
1595 return 0;
1596 }
1597
1598 if (info->data == (RXH_IP_SRC | RXH_IP_DST)) {
Haiyang Zhang486e3982017-10-06 08:33:57 -07001599 switch (info->flow_type) {
Haiyang Zhang0518ec4f2017-10-06 08:33:58 -07001600 case TCP_V4_FLOW:
1601 ndc->l4_hash &= ~HV_TCP4_L4HASH;
1602 break;
1603
1604 case TCP_V6_FLOW:
1605 ndc->l4_hash &= ~HV_TCP6_L4HASH;
1606 break;
1607
Haiyang Zhang486e3982017-10-06 08:33:57 -07001608 case UDP_V4_FLOW:
1609 ndc->l4_hash &= ~HV_UDP4_L4HASH;
1610 break;
1611
1612 case UDP_V6_FLOW:
1613 ndc->l4_hash &= ~HV_UDP6_L4HASH;
1614 break;
1615
1616 default:
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001617 return -EOPNOTSUPP;
Haiyang Zhang486e3982017-10-06 08:33:57 -07001618 }
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001619
1620 return 0;
1621 }
1622
1623 return -EOPNOTSUPP;
1624}
1625
1626static int
1627netvsc_set_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *info)
1628{
1629 struct net_device_context *ndc = netdev_priv(ndev);
1630
1631 if (info->cmd == ETHTOOL_SRXFH)
1632 return netvsc_set_rss_hash_opts(ndc, info);
1633
1634 return -EOPNOTSUPP;
1635}
1636
stephen hemminger962f3fe2017-01-24 13:06:02 -08001637static u32 netvsc_get_rxfh_key_size(struct net_device *dev)
1638{
1639 return NETVSC_HASH_KEYLEN;
1640}
1641
1642static u32 netvsc_rss_indir_size(struct net_device *dev)
1643{
stephen hemmingerff4a4412017-01-24 13:06:04 -08001644 return ITAB_NUM;
stephen hemminger962f3fe2017-01-24 13:06:02 -08001645}
1646
1647static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
1648 u8 *hfunc)
1649{
1650 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger867047c2017-07-28 08:59:42 -07001651 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
Colin Ian Kingeb996ed2017-03-25 14:26:39 +00001652 struct rndis_device *rndis_dev;
stephen hemmingerff4a4412017-01-24 13:06:04 -08001653 int i;
stephen hemminger962f3fe2017-01-24 13:06:02 -08001654
stephen hemminger545a8e72017-03-22 14:51:00 -07001655 if (!ndev)
1656 return -ENODEV;
1657
stephen hemminger962f3fe2017-01-24 13:06:02 -08001658 if (hfunc)
1659 *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
1660
Colin Ian Kingeb996ed2017-03-25 14:26:39 +00001661 rndis_dev = ndev->extension;
stephen hemmingerff4a4412017-01-24 13:06:04 -08001662 if (indir) {
1663 for (i = 0; i < ITAB_NUM; i++)
Haiyang Zhang473713002017-10-13 12:28:03 -07001664 indir[i] = rndis_dev->rx_table[i];
stephen hemmingerff4a4412017-01-24 13:06:04 -08001665 }
1666
stephen hemminger962f3fe2017-01-24 13:06:02 -08001667 if (key)
1668 memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN);
1669
1670 return 0;
1671}
1672
1673static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
1674 const u8 *key, const u8 hfunc)
1675{
1676 struct net_device_context *ndc = netdev_priv(dev);
stephen hemminger545a8e72017-03-22 14:51:00 -07001677 struct netvsc_device *ndev = rtnl_dereference(ndc->nvdev);
Colin Ian Kingeb996ed2017-03-25 14:26:39 +00001678 struct rndis_device *rndis_dev;
stephen hemmingerff4a4412017-01-24 13:06:04 -08001679 int i;
stephen hemminger962f3fe2017-01-24 13:06:02 -08001680
stephen hemminger545a8e72017-03-22 14:51:00 -07001681 if (!ndev)
1682 return -ENODEV;
1683
stephen hemminger962f3fe2017-01-24 13:06:02 -08001684 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
1685 return -EOPNOTSUPP;
1686
Colin Ian Kingeb996ed2017-03-25 14:26:39 +00001687 rndis_dev = ndev->extension;
stephen hemmingerff4a4412017-01-24 13:06:04 -08001688 if (indir) {
1689 for (i = 0; i < ITAB_NUM; i++)
Haiyang Zhangdb3cd7a2017-09-01 14:30:07 -07001690 if (indir[i] >= ndev->num_chn)
stephen hemmingerff4a4412017-01-24 13:06:04 -08001691 return -EINVAL;
1692
1693 for (i = 0; i < ITAB_NUM; i++)
Haiyang Zhang473713002017-10-13 12:28:03 -07001694 rndis_dev->rx_table[i] = indir[i];
stephen hemmingerff4a4412017-01-24 13:06:04 -08001695 }
1696
1697 if (!key) {
1698 if (!indir)
1699 return 0;
1700
1701 key = rndis_dev->rss_key;
1702 }
stephen hemminger962f3fe2017-01-24 13:06:02 -08001703
Haiyang Zhang715e2ec2017-09-01 14:30:04 -07001704 return rndis_filter_set_rss_param(rndis_dev, key);
stephen hemminger962f3fe2017-01-24 13:06:02 -08001705}
1706
stephen hemminger8b532792017-08-09 17:46:11 -07001707/* Hyper-V RNDIS protocol does not have ring in the HW sense.
1708 * It does have pre-allocated receive area which is divided into sections.
1709 */
1710static void __netvsc_get_ringparam(struct netvsc_device *nvdev,
1711 struct ethtool_ringparam *ring)
1712{
1713 u32 max_buf_size;
1714
1715 ring->rx_pending = nvdev->recv_section_cnt;
1716 ring->tx_pending = nvdev->send_section_cnt;
1717
1718 if (nvdev->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
1719 max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE_LEGACY;
1720 else
1721 max_buf_size = NETVSC_RECEIVE_BUFFER_SIZE;
1722
1723 ring->rx_max_pending = max_buf_size / nvdev->recv_section_size;
1724 ring->tx_max_pending = NETVSC_SEND_BUFFER_SIZE
1725 / nvdev->send_section_size;
1726}
1727
1728static void netvsc_get_ringparam(struct net_device *ndev,
1729 struct ethtool_ringparam *ring)
1730{
1731 struct net_device_context *ndevctx = netdev_priv(ndev);
1732 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1733
1734 if (!nvdev)
1735 return;
1736
1737 __netvsc_get_ringparam(nvdev, ring);
1738}
1739
1740static int netvsc_set_ringparam(struct net_device *ndev,
1741 struct ethtool_ringparam *ring)
1742{
1743 struct net_device_context *ndevctx = netdev_priv(ndev);
1744 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001745 struct netvsc_device_info *device_info;
stephen hemminger8b532792017-08-09 17:46:11 -07001746 struct ethtool_ringparam orig;
1747 u32 new_tx, new_rx;
stephen hemminger8b532792017-08-09 17:46:11 -07001748 int ret = 0;
1749
1750 if (!nvdev || nvdev->destroy)
1751 return -ENODEV;
1752
1753 memset(&orig, 0, sizeof(orig));
1754 __netvsc_get_ringparam(nvdev, &orig);
1755
1756 new_tx = clamp_t(u32, ring->tx_pending,
1757 NETVSC_MIN_TX_SECTIONS, orig.tx_max_pending);
1758 new_rx = clamp_t(u32, ring->rx_pending,
1759 NETVSC_MIN_RX_SECTIONS, orig.rx_max_pending);
1760
1761 if (new_tx == orig.tx_pending &&
1762 new_rx == orig.rx_pending)
1763 return 0; /* no change */
1764
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001765 device_info = netvsc_devinfo_get(nvdev);
1766
1767 if (!device_info)
1768 return -ENOMEM;
1769
1770 device_info->send_sections = new_tx;
1771 device_info->recv_sections = new_rx;
stephen hemminger8b532792017-08-09 17:46:11 -07001772
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001773 ret = netvsc_detach(ndev, nvdev);
1774 if (ret)
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001775 goto out;
stephen hemminger8b532792017-08-09 17:46:11 -07001776
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001777 ret = netvsc_attach(ndev, device_info);
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001778 if (ret) {
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001779 device_info->send_sections = orig.tx_pending;
1780 device_info->recv_sections = orig.rx_pending;
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001781
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001782 if (netvsc_attach(ndev, device_info))
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07001783 netdev_err(ndev, "restoring ringparam failed");
stephen hemminger8b532792017-08-09 17:46:11 -07001784 }
1785
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00001786out:
1787 kfree(device_info);
stephen hemminger8b532792017-08-09 17:46:11 -07001788 return ret;
1789}
1790
Haiyang Zhangd6792a52018-09-21 18:20:36 +00001791static int netvsc_set_features(struct net_device *ndev,
1792 netdev_features_t features)
1793{
1794 netdev_features_t change = features ^ ndev->features;
1795 struct net_device_context *ndevctx = netdev_priv(ndev);
1796 struct netvsc_device *nvdev = rtnl_dereference(ndevctx->nvdev);
1797 struct ndis_offload_params offloads;
1798
1799 if (!nvdev || nvdev->destroy)
1800 return -ENODEV;
1801
1802 if (!(change & NETIF_F_LRO))
1803 return 0;
1804
1805 memset(&offloads, 0, sizeof(struct ndis_offload_params));
1806
1807 if (features & NETIF_F_LRO) {
1808 offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
1809 offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_ENABLED;
1810 } else {
1811 offloads.rsc_ip_v4 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
1812 offloads.rsc_ip_v6 = NDIS_OFFLOAD_PARAMETERS_RSC_DISABLED;
1813 }
1814
1815 return rndis_filter_set_offload_params(ndev, nvdev, &offloads);
1816}
1817
Haiyang Zhang273de022018-05-22 11:29:34 -07001818static u32 netvsc_get_msglevel(struct net_device *ndev)
1819{
1820 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1821
1822 return ndev_ctx->msg_enable;
1823}
1824
1825static void netvsc_set_msglevel(struct net_device *ndev, u32 val)
1826{
1827 struct net_device_context *ndev_ctx = netdev_priv(ndev);
1828
1829 ndev_ctx->msg_enable = val;
1830}
1831
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -07001832static const struct ethtool_ops ethtool_ops = {
1833 .get_drvinfo = netvsc_get_drvinfo,
Haiyang Zhang273de022018-05-22 11:29:34 -07001834 .get_msglevel = netvsc_get_msglevel,
1835 .set_msglevel = netvsc_set_msglevel,
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -07001836 .get_link = ethtool_op_get_link,
Stephen Hemminger4323b472016-08-23 12:17:57 -07001837 .get_ethtool_stats = netvsc_get_ethtool_stats,
1838 .get_sset_count = netvsc_get_sset_count,
1839 .get_strings = netvsc_get_strings,
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -08001840 .get_channels = netvsc_get_channels,
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -07001841 .set_channels = netvsc_set_channels,
sixiao@microsoft.com76d13b52016-02-17 16:43:59 -08001842 .get_ts_info = ethtool_op_get_ts_info,
stephen hemmingerb448f4e2017-01-24 13:06:00 -08001843 .get_rxnfc = netvsc_get_rxnfc,
Haiyang Zhang4823eb22017-08-21 19:22:39 -07001844 .set_rxnfc = netvsc_set_rxnfc,
stephen hemminger962f3fe2017-01-24 13:06:02 -08001845 .get_rxfh_key_size = netvsc_get_rxfh_key_size,
1846 .get_rxfh_indir_size = netvsc_rss_indir_size,
1847 .get_rxfh = netvsc_get_rxfh,
1848 .set_rxfh = netvsc_set_rxfh,
Philippe Reynes5e8456f2017-03-08 23:41:04 +01001849 .get_link_ksettings = netvsc_get_link_ksettings,
1850 .set_link_ksettings = netvsc_set_link_ksettings,
stephen hemminger8b532792017-08-09 17:46:11 -07001851 .get_ringparam = netvsc_get_ringparam,
1852 .set_ringparam = netvsc_set_ringparam,
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -07001853};
1854
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001855static const struct net_device_ops device_ops = {
1856 .ndo_open = netvsc_open,
1857 .ndo_stop = netvsc_close,
1858 .ndo_start_xmit = netvsc_start_xmit,
Stephen Hemmingerbee9d412018-03-02 13:49:09 -08001859 .ndo_change_rx_flags = netvsc_change_rx_flags,
1860 .ndo_set_rx_mode = netvsc_set_rx_mode,
Haiyang Zhangd6792a52018-09-21 18:20:36 +00001861 .ndo_set_features = netvsc_set_features,
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001862 .ndo_change_mtu = netvsc_change_mtu,
Haiyang Zhangb681b582010-08-03 19:15:31 +00001863 .ndo_validate_addr = eth_validate_addr,
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001864 .ndo_set_mac_address = netvsc_set_mac_addr,
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001865 .ndo_select_queue = netvsc_select_queue,
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001866 .ndo_get_stats64 = netvsc_get_stats64,
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001867};
1868
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001869/*
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001870 * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
1871 * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
1872 * present send GARP packet to network peers with netif_notify_peers().
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001873 */
Haiyang Zhang891de742014-02-12 16:54:27 -08001874static void netvsc_link_change(struct work_struct *w)
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001875{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001876 struct net_device_context *ndev_ctx =
1877 container_of(w, struct net_device_context, dwork.work);
1878 struct hv_device *device_obj = ndev_ctx->device_ctx;
1879 struct net_device *net = hv_get_drvdata(device_obj);
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07001880 struct netvsc_device *net_device;
Haiyang Zhang891de742014-02-12 16:54:27 -08001881 struct rndis_device *rdev;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001882 struct netvsc_reconfig *event = NULL;
1883 bool notify = false, reschedule = false;
1884 unsigned long flags, next_reconfig, delay;
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001885
stephen hemminger9b4e9462017-08-24 16:49:16 -07001886 /* if changes are happening, comeback later */
1887 if (!rtnl_trylock()) {
1888 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
1889 return;
1890 }
1891
stephen hemmingera0be4502017-03-22 14:51:01 -07001892 net_device = rtnl_dereference(ndev_ctx->nvdev);
1893 if (!net_device)
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001894 goto out_unlock;
1895
Haiyang Zhang891de742014-02-12 16:54:27 -08001896 rdev = net_device->extension;
Haiyang Zhang891de742014-02-12 16:54:27 -08001897
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001898 next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
1899 if (time_is_after_jiffies(next_reconfig)) {
1900 /* link_watch only sends one notification with current state
1901 * per second, avoid doing reconfig more frequently. Handle
1902 * wrap around.
1903 */
1904 delay = next_reconfig - jiffies;
1905 delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
1906 schedule_delayed_work(&ndev_ctx->dwork, delay);
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001907 goto out_unlock;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001908 }
1909 ndev_ctx->last_reconfig = jiffies;
1910
1911 spin_lock_irqsave(&ndev_ctx->lock, flags);
1912 if (!list_empty(&ndev_ctx->reconfig_events)) {
1913 event = list_first_entry(&ndev_ctx->reconfig_events,
1914 struct netvsc_reconfig, list);
1915 list_del(&event->list);
1916 reschedule = !list_empty(&ndev_ctx->reconfig_events);
1917 }
1918 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1919
1920 if (!event)
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001921 goto out_unlock;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001922
1923 switch (event->event) {
1924 /* Only the following events are possible due to the check in
1925 * netvsc_linkstatus_callback()
1926 */
1927 case RNDIS_STATUS_MEDIA_CONNECT:
1928 if (rdev->link_state) {
1929 rdev->link_state = false;
stephen hemminger0c195562017-08-01 19:58:53 -07001930 netif_carrier_on(net);
Haiyang Zhang1b704c42019-03-28 19:40:36 +00001931 netvsc_tx_enable(net_device, net);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001932 } else {
1933 notify = true;
Haiyang Zhang3a494e72014-06-19 18:34:36 -07001934 }
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001935 kfree(event);
1936 break;
1937 case RNDIS_STATUS_MEDIA_DISCONNECT:
1938 if (!rdev->link_state) {
1939 rdev->link_state = true;
1940 netif_carrier_off(net);
Haiyang Zhang1b704c42019-03-28 19:40:36 +00001941 netvsc_tx_disable(net_device, net);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001942 }
1943 kfree(event);
1944 break;
1945 case RNDIS_STATUS_NETWORK_CHANGE:
1946 /* Only makes sense if carrier is present */
1947 if (!rdev->link_state) {
1948 rdev->link_state = true;
1949 netif_carrier_off(net);
Haiyang Zhang1b704c42019-03-28 19:40:36 +00001950 netvsc_tx_disable(net_device, net);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001951 event->event = RNDIS_STATUS_MEDIA_CONNECT;
1952 spin_lock_irqsave(&ndev_ctx->lock, flags);
Haiyang Zhang15cfd402016-04-21 16:13:01 -07001953 list_add(&event->list, &ndev_ctx->reconfig_events);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001954 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1955 reschedule = true;
1956 }
1957 break;
Haiyang Zhang891de742014-02-12 16:54:27 -08001958 }
1959
1960 rtnl_unlock();
1961
1962 if (notify)
1963 netdev_notify_peers(net);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001964
1965 /* link_watch only sends one notification with current state per
1966 * second, handle next reconfig event in 2 seconds.
1967 */
1968 if (reschedule)
1969 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001970
1971 return;
1972
1973out_unlock:
1974 rtnl_unlock();
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001975}
1976
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07001977static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
1978{
Stephen Hemminger7bf7bb32018-06-11 12:44:55 -07001979 struct net_device_context *net_device_ctx;
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07001980 struct net_device *dev;
1981
Stephen Hemminger7bf7bb32018-06-11 12:44:55 -07001982 dev = netdev_master_upper_dev_get(vf_netdev);
1983 if (!dev || dev->netdev_ops != &device_ops)
1984 return NULL; /* not a netvsc device */
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07001985
Stephen Hemminger7bf7bb32018-06-11 12:44:55 -07001986 net_device_ctx = netdev_priv(dev);
1987 if (!rtnl_dereference(net_device_ctx->nvdev))
1988 return NULL; /* device is removed */
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07001989
Stephen Hemminger7bf7bb32018-06-11 12:44:55 -07001990 return dev;
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07001991}
1992
stephen hemminger0c195562017-08-01 19:58:53 -07001993/* Called when VF is injecting data into network stack.
1994 * Change the associated network device from VF to netvsc.
1995 * note: already called with rcu_read_lock
1996 */
1997static rx_handler_result_t netvsc_vf_handle_frame(struct sk_buff **pskb)
1998{
1999 struct sk_buff *skb = *pskb;
2000 struct net_device *ndev = rcu_dereference(skb->dev->rx_handler_data);
2001 struct net_device_context *ndev_ctx = netdev_priv(ndev);
2002 struct netvsc_vf_pcpu_stats *pcpu_stats
2003 = this_cpu_ptr(ndev_ctx->vf_stats);
2004
2005 skb->dev = ndev;
2006
2007 u64_stats_update_begin(&pcpu_stats->syncp);
2008 pcpu_stats->rx_packets++;
2009 pcpu_stats->rx_bytes += skb->len;
2010 u64_stats_update_end(&pcpu_stats->syncp);
2011
2012 return RX_HANDLER_ANOTHER;
2013}
2014
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002015static int netvsc_vf_join(struct net_device *vf_netdev,
2016 struct net_device *ndev)
2017{
2018 struct net_device_context *ndev_ctx = netdev_priv(ndev);
2019 int ret;
2020
2021 ret = netdev_rx_handler_register(vf_netdev,
2022 netvsc_vf_handle_frame, ndev);
2023 if (ret != 0) {
2024 netdev_err(vf_netdev,
2025 "can not register netvsc VF receive handler (err = %d)\n",
2026 ret);
2027 goto rx_handler_failed;
2028 }
2029
2030 ret = netdev_master_upper_dev_link(vf_netdev, ndev,
2031 NULL, NULL, NULL);
2032 if (ret != 0) {
2033 netdev_err(vf_netdev,
2034 "can not set master device %s (err = %d)\n",
2035 ndev->name, ret);
2036 goto upper_link_failed;
2037 }
2038
2039 /* set slave flag before open to prevent IPv6 addrconf */
2040 vf_netdev->flags |= IFF_SLAVE;
2041
2042 schedule_delayed_work(&ndev_ctx->vf_takeover, VF_TAKEOVER_INT);
2043
2044 call_netdevice_notifiers(NETDEV_JOIN, vf_netdev);
2045
2046 netdev_info(vf_netdev, "joined to %s\n", ndev->name);
2047 return 0;
2048
2049upper_link_failed:
2050 netdev_rx_handler_unregister(vf_netdev);
2051rx_handler_failed:
2052 return ret;
2053}
2054
stephen hemminger0c195562017-08-01 19:58:53 -07002055static void __netvsc_vf_setup(struct net_device *ndev,
2056 struct net_device *vf_netdev)
2057{
2058 int ret;
2059
stephen hemminger0c195562017-08-01 19:58:53 -07002060 /* Align MTU of VF with master */
2061 ret = dev_set_mtu(vf_netdev, ndev->mtu);
2062 if (ret)
2063 netdev_warn(vf_netdev,
2064 "unable to change mtu to %u\n", ndev->mtu);
2065
Stephen Hemmingerbee9d412018-03-02 13:49:09 -08002066 /* set multicast etc flags on VF */
Petr Machata567c5e12018-12-06 17:05:42 +00002067 dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE, NULL);
Stephen Hemmingerb0dee792018-03-07 13:49:12 -08002068
2069 /* sync address list from ndev to VF */
2070 netif_addr_lock_bh(ndev);
Stephen Hemmingerbee9d412018-03-02 13:49:09 -08002071 dev_uc_sync(vf_netdev, ndev);
2072 dev_mc_sync(vf_netdev, ndev);
Stephen Hemmingerb0dee792018-03-07 13:49:12 -08002073 netif_addr_unlock_bh(ndev);
Stephen Hemmingerbee9d412018-03-02 13:49:09 -08002074
stephen hemminger0c195562017-08-01 19:58:53 -07002075 if (netif_running(ndev)) {
Petr Machata00f54e62018-12-06 17:05:36 +00002076 ret = dev_open(vf_netdev, NULL);
stephen hemminger0c195562017-08-01 19:58:53 -07002077 if (ret)
2078 netdev_warn(vf_netdev,
2079 "unable to open: %d\n", ret);
2080 }
2081}
2082
2083/* Setup VF as slave of the synthetic device.
2084 * Runs in workqueue to avoid recursion in netlink callbacks.
2085 */
2086static void netvsc_vf_setup(struct work_struct *w)
2087{
2088 struct net_device_context *ndev_ctx
stephen hemminger6123c662017-08-09 17:46:03 -07002089 = container_of(w, struct net_device_context, vf_takeover.work);
stephen hemminger0c195562017-08-01 19:58:53 -07002090 struct net_device *ndev = hv_get_drvdata(ndev_ctx->device_ctx);
2091 struct net_device *vf_netdev;
2092
stephen hemmingerfb84af82017-08-04 12:14:00 -07002093 if (!rtnl_trylock()) {
stephen hemminger6123c662017-08-09 17:46:03 -07002094 schedule_delayed_work(&ndev_ctx->vf_takeover, 0);
stephen hemmingerfb84af82017-08-04 12:14:00 -07002095 return;
2096 }
2097
stephen hemminger0c195562017-08-01 19:58:53 -07002098 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2099 if (vf_netdev)
2100 __netvsc_vf_setup(ndev, vf_netdev);
2101
2102 rtnl_unlock();
2103}
2104
Haiyang Zhang00547952018-10-15 19:06:15 +00002105/* Find netvsc by VF serial number.
2106 * The PCI hyperv controller records the serial number as the slot kobj name.
Stephen Hemminger00d7ddb2018-09-14 12:54:57 -07002107 */
2108static struct net_device *get_netvsc_byslot(const struct net_device *vf_netdev)
2109{
2110 struct device *parent = vf_netdev->dev.parent;
2111 struct net_device_context *ndev_ctx;
2112 struct pci_dev *pdev;
Haiyang Zhang00547952018-10-15 19:06:15 +00002113 u32 serial;
Stephen Hemminger00d7ddb2018-09-14 12:54:57 -07002114
2115 if (!parent || !dev_is_pci(parent))
2116 return NULL; /* not a PCI device */
2117
2118 pdev = to_pci_dev(parent);
2119 if (!pdev->slot) {
2120 netdev_notice(vf_netdev, "no PCI slot information\n");
2121 return NULL;
2122 }
2123
Haiyang Zhang00547952018-10-15 19:06:15 +00002124 if (kstrtou32(pci_slot_name(pdev->slot), 10, &serial)) {
2125 netdev_notice(vf_netdev, "Invalid vf serial:%s\n",
2126 pci_slot_name(pdev->slot));
2127 return NULL;
2128 }
2129
Stephen Hemminger00d7ddb2018-09-14 12:54:57 -07002130 list_for_each_entry(ndev_ctx, &netvsc_dev_list, list) {
2131 if (!ndev_ctx->vf_alloc)
2132 continue;
2133
Haiyang Zhang00547952018-10-15 19:06:15 +00002134 if (ndev_ctx->vf_serial == serial)
Stephen Hemminger00d7ddb2018-09-14 12:54:57 -07002135 return hv_get_drvdata(ndev_ctx->device_ctx);
2136 }
2137
2138 netdev_notice(vf_netdev,
Haiyang Zhang00547952018-10-15 19:06:15 +00002139 "no netdev found for vf serial:%u\n", serial);
Stephen Hemminger00d7ddb2018-09-14 12:54:57 -07002140 return NULL;
2141}
2142
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002143static int netvsc_register_vf(struct net_device *vf_netdev)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002144{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02002145 struct net_device_context *net_device_ctx;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002146 struct netvsc_device *netvsc_dev;
Stephen Hemminger00d7ddb2018-09-14 12:54:57 -07002147 struct net_device *ndev;
Stephen Hemmingerc0a41b82018-06-11 12:44:56 -07002148 int ret;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002149
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002150 if (vf_netdev->addr_len != ETH_ALEN)
2151 return NOTIFY_DONE;
2152
Stephen Hemminger00d7ddb2018-09-14 12:54:57 -07002153 ndev = get_netvsc_byslot(vf_netdev);
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002154 if (!ndev)
2155 return NOTIFY_DONE;
2156
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02002157 net_device_ctx = netdev_priv(ndev);
stephen hemminger545a8e72017-03-22 14:51:00 -07002158 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
Stephen Hemmingerf207c102016-09-22 16:56:33 -07002159 if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002160 return NOTIFY_DONE;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002161
Adrian Vladu52d3b492019-01-03 19:43:08 +00002162 /* if synthetic interface is a different namespace,
Stephen Hemmingerc0a41b82018-06-11 12:44:56 -07002163 * then move the VF to that namespace; join will be
2164 * done again in that context.
2165 */
2166 if (!net_eq(dev_net(ndev), dev_net(vf_netdev))) {
2167 ret = dev_change_net_namespace(vf_netdev,
2168 dev_net(ndev), "eth%d");
2169 if (ret)
2170 netdev_err(vf_netdev,
2171 "could not move to same namespace as %s: %d\n",
2172 ndev->name, ret);
2173 else
2174 netdev_info(vf_netdev,
2175 "VF moved to namespace with: %s\n",
2176 ndev->name);
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002177 return NOTIFY_DONE;
Stephen Hemmingerc0a41b82018-06-11 12:44:56 -07002178 }
stephen hemminger0c195562017-08-01 19:58:53 -07002179
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002180 netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
stephen hemminger0c195562017-08-01 19:58:53 -07002181
Stephen Hemmingerc0a41b82018-06-11 12:44:56 -07002182 if (netvsc_vf_join(vf_netdev, ndev) != 0)
2183 return NOTIFY_DONE;
2184
Stephen Hemminger07d0f002016-09-22 16:56:30 -07002185 dev_hold(vf_netdev);
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002186 rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev);
2187 return NOTIFY_OK;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002188}
2189
Stephen Hemminger9a0c48d2017-08-31 16:16:12 -07002190/* VF up/down change detected, schedule to change data path */
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002191static int netvsc_vf_changed(struct net_device *vf_netdev)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002192{
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002193 struct net_device_context *net_device_ctx;
stephen hemminger7b83f522017-08-07 11:30:00 -07002194 struct netvsc_device *netvsc_dev;
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002195 struct net_device *ndev;
Stephen Hemminger9a0c48d2017-08-31 16:16:12 -07002196 bool vf_is_up = netif_running(vf_netdev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002197
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002198 ndev = get_netvsc_byref(vf_netdev);
2199 if (!ndev)
2200 return NOTIFY_DONE;
2201
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02002202 net_device_ctx = netdev_priv(ndev);
stephen hemminger7b83f522017-08-07 11:30:00 -07002203 netvsc_dev = rtnl_dereference(net_device_ctx->nvdev);
2204 if (!netvsc_dev)
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002205 return NOTIFY_DONE;
stephen hemminger7b83f522017-08-07 11:30:00 -07002206
Stephen Hemminger9a0c48d2017-08-31 16:16:12 -07002207 netvsc_switch_datapath(ndev, vf_is_up);
2208 netdev_info(ndev, "Data path switched %s VF: %s\n",
2209 vf_is_up ? "to" : "from", vf_netdev->name);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002210
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002211 return NOTIFY_OK;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002212}
2213
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002214static int netvsc_unregister_vf(struct net_device *vf_netdev)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002215{
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002216 struct net_device *ndev;
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02002217 struct net_device_context *net_device_ctx;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002218
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002219 ndev = get_netvsc_byref(vf_netdev);
2220 if (!ndev)
2221 return NOTIFY_DONE;
2222
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02002223 net_device_ctx = netdev_priv(ndev);
stephen hemminger6123c662017-08-09 17:46:03 -07002224 cancel_delayed_work_sync(&net_device_ctx->vf_takeover);
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07002225
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02002226 netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
Stephen Hemmingerf207c102016-09-22 16:56:33 -07002227
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002228 netdev_rx_handler_unregister(vf_netdev);
2229 netdev_upper_dev_unlink(vf_netdev, ndev);
Stephen Hemmingerf207c102016-09-22 16:56:33 -07002230 RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
Stephen Hemminger07d0f002016-09-22 16:56:30 -07002231 dev_put(vf_netdev);
Stephen Hemmingerec158f72017-08-31 16:16:13 -07002232
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002233 return NOTIFY_OK;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002234}
2235
K. Y. Srinivasan84946892011-09-13 10:59:38 -07002236static int netvsc_probe(struct hv_device *dev,
2237 const struct hv_vmbus_device_id *dev_id)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002238{
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002239 struct net_device *net = NULL;
2240 struct net_device_context *net_device_ctx;
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00002241 struct netvsc_device_info *device_info = NULL;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07002242 struct netvsc_device *nvdev;
stephen hemminger0c195562017-08-01 19:58:53 -07002243 int ret = -ENOMEM;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002244
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07002245 net = alloc_etherdev_mq(sizeof(struct net_device_context),
stephen hemminger2b018882017-01-24 13:06:03 -08002246 VRSS_CHANNEL_MAX);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002247 if (!net)
stephen hemminger0c195562017-08-01 19:58:53 -07002248 goto no_net;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002249
Haiyang Zhang1b07da52014-03-04 14:11:06 -08002250 netif_carrier_off(net);
2251
Haiyang Zhangb37879e2016-08-04 10:42:14 -07002252 netvsc_init_settings(net);
2253
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002254 net_device_ctx = netdev_priv(net);
K. Y. Srinivasan9efd21e2011-04-29 13:45:10 -07002255 net_device_ctx->device_ctx = dev;
Simon Xiao3f300ff2015-04-28 01:05:17 -07002256 net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
2257 if (netif_msg_probe(net_device_ctx))
2258 netdev_dbg(net, "netvsc msg_enable: %d\n",
2259 net_device_ctx->msg_enable);
2260
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07002261 hv_set_drvdata(dev, net);
Vitaly Kuznetsovf580aec2016-05-13 13:55:20 +02002262
Haiyang Zhang891de742014-02-12 16:54:27 -08002263 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002264
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01002265 spin_lock_init(&net_device_ctx->lock);
2266 INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
stephen hemminger6123c662017-08-09 17:46:03 -07002267 INIT_DELAYED_WORK(&net_device_ctx->vf_takeover, netvsc_vf_setup);
stephen hemminger0c195562017-08-01 19:58:53 -07002268
2269 net_device_ctx->vf_stats
2270 = netdev_alloc_pcpu_stats(struct netvsc_vf_pcpu_stats);
2271 if (!net_device_ctx->vf_stats)
2272 goto no_stats;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01002273
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002274 net->netdev_ops = &device_ops;
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00002275 net->ethtool_ops = &ethtool_ops;
K. Y. Srinivasan9efd21e2011-04-29 13:45:10 -07002276 SET_NETDEV_DEV(net, &dev->device);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002277
Vitaly Kuznetsov14a03cf2016-02-05 17:29:08 +01002278 /* We always need headroom for rndis header */
2279 net->needed_headroom = RNDIS_AND_PPI_SIZE;
2280
Haiyang Zhang6450f8f2017-09-22 15:31:38 -07002281 /* Initialize the number of queues to be 1, we may change it if more
2282 * channels are offered later.
2283 */
2284 netif_set_real_num_tx_queues(net, 1);
2285 netif_set_real_num_rx_queues(net, 1);
2286
Haiyang Zhang692e0842011-09-01 12:19:43 -07002287 /* Notify the netvsc driver of the new device */
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00002288 device_info = netvsc_devinfo_get(NULL);
stephen hemminger9749fed2017-07-19 11:53:16 -07002289
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00002290 if (!device_info) {
2291 ret = -ENOMEM;
2292 goto devinfo_failed;
2293 }
2294
2295 nvdev = rndis_filter_device_add(dev, device_info);
stephen hemminger9749fed2017-07-19 11:53:16 -07002296 if (IS_ERR(nvdev)) {
2297 ret = PTR_ERR(nvdev);
Haiyang Zhang692e0842011-09-01 12:19:43 -07002298 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
stephen hemminger0c195562017-08-01 19:58:53 -07002299 goto rndis_failed;
Haiyang Zhang692e0842011-09-01 12:19:43 -07002300 }
stephen hemminger0c195562017-08-01 19:58:53 -07002301
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00002302 memcpy(net->dev_addr, device_info->mac_adr, ETH_ALEN);
Haiyang Zhang692e0842011-09-01 12:19:43 -07002303
Dexuan Cuie04e7a72018-08-30 05:42:13 +00002304 /* We must get rtnl lock before scheduling nvdev->subchan_work,
2305 * otherwise netvsc_subchan_work() can get rtnl lock first and wait
2306 * all subchannels to show up, but that may not happen because
2307 * netvsc_probe() can't get rtnl lock and as a result vmbus_onoffer()
2308 * -> ... -> device_add() -> ... -> __device_attach() can't get
2309 * the device lock, so all the subchannels can't be processed --
Adrian Vladu52d3b492019-01-03 19:43:08 +00002310 * finally netvsc_subchan_work() hangs forever.
Dexuan Cuie04e7a72018-08-30 05:42:13 +00002311 */
2312 rtnl_lock();
2313
Stephen Hemminger3ffe64f2018-06-29 14:07:16 -07002314 if (nvdev->num_chn > 1)
2315 schedule_work(&nvdev->subchan_work);
2316
Vitaly Kuznetsovaefd80e2017-11-15 15:12:55 +01002317 /* hw_features computed in rndis_netdev_set_hwcaps() */
stephen hemminger23312a32017-01-24 13:05:59 -08002318 net->features = net->hw_features |
2319 NETIF_F_HIGHDMA | NETIF_F_SG |
2320 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
2321 net->vlan_features = net->features;
2322
stephen hemminger9749fed2017-07-19 11:53:16 -07002323 netdev_lockdep_set_classes(net);
2324
Jarod Wilsond0c2c992016-10-20 13:55:21 -04002325 /* MTU range: 68 - 1500 or 65521 */
2326 net->min_mtu = NETVSC_MTU_MIN;
2327 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
2328 net->max_mtu = NETVSC_MTU - ETH_HLEN;
2329 else
2330 net->max_mtu = ETH_DATA_LEN;
2331
Stephen Hemminger7bf7bb32018-06-11 12:44:55 -07002332 ret = register_netdevice(net);
Haiyang Zhanga68f9612013-12-20 16:52:31 -08002333 if (ret != 0) {
2334 pr_err("Unable to register netdev.\n");
stephen hemminger0c195562017-08-01 19:58:53 -07002335 goto register_failed;
Haiyang Zhanga68f9612013-12-20 16:52:31 -08002336 }
2337
Stephen Hemminger7bf7bb32018-06-11 12:44:55 -07002338 list_add(&net_device_ctx->list, &netvsc_dev_list);
2339 rtnl_unlock();
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00002340
2341 kfree(device_info);
Stephen Hemminger7bf7bb32018-06-11 12:44:55 -07002342 return 0;
stephen hemminger0c195562017-08-01 19:58:53 -07002343
2344register_failed:
Stephen Hemminger7bf7bb32018-06-11 12:44:55 -07002345 rtnl_unlock();
stephen hemminger0c195562017-08-01 19:58:53 -07002346 rndis_filter_device_remove(dev, nvdev);
2347rndis_failed:
Haiyang Zhang7c9f3352019-01-15 00:51:43 +00002348 kfree(device_info);
2349devinfo_failed:
stephen hemminger0c195562017-08-01 19:58:53 -07002350 free_percpu(net_device_ctx->vf_stats);
2351no_stats:
2352 hv_set_drvdata(dev, NULL);
2353 free_netdev(net);
2354no_net:
2355 return ret;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002356}
2357
K. Y. Srinivasan415b0232011-04-29 13:45:12 -07002358static int netvsc_remove(struct hv_device *dev)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002359{
Haiyang Zhang122a5f62011-05-27 06:21:55 -07002360 struct net_device_context *ndev_ctx;
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07002361 struct net_device *vf_netdev, *net;
2362 struct netvsc_device *nvdev;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07002363
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02002364 net = hv_get_drvdata(dev);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002365 if (net == NULL) {
K. Y. Srinivasan415b0232011-04-29 13:45:12 -07002366 dev_err(&dev->device, "No net device to remove\n");
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002367 return 0;
2368 }
2369
Haiyang Zhang122a5f62011-05-27 06:21:55 -07002370 ndev_ctx = netdev_priv(net);
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02002371
Haiyang Zhang122a5f62011-05-27 06:21:55 -07002372 cancel_delayed_work_sync(&ndev_ctx->dwork);
2373
Stephen Hemminger018349d2018-09-13 08:03:43 -07002374 rtnl_lock();
2375 nvdev = rtnl_dereference(ndev_ctx->nvdev);
2376 if (nvdev)
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07002377 cancel_work_sync(&nvdev->subchan_work);
2378
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002379 /*
2380 * Call to the vsc driver to let it know that the device is being
stephen hemmingera0be4502017-03-22 14:51:01 -07002381 * removed. Also blocks mtu and channel changes.
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002382 */
Stephen Hemmingerec158f72017-08-31 16:16:13 -07002383 vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev);
2384 if (vf_netdev)
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002385 netvsc_unregister_vf(vf_netdev);
Stephen Hemmingerec158f72017-08-31 16:16:13 -07002386
Stephen Hemminger7b2ee502018-03-20 15:03:05 -07002387 if (nvdev)
2388 rndis_filter_device_remove(dev, nvdev);
2389
Stephen Hemminger8195b132017-09-06 13:53:05 -07002390 unregister_netdevice(net);
Stephen Hemminger7bf7bb32018-06-11 12:44:55 -07002391 list_del(&ndev_ctx->list);
Stephen Hemminger8195b132017-09-06 13:53:05 -07002392
stephen hemmingera0be4502017-03-22 14:51:01 -07002393 rtnl_unlock();
2394
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02002395 hv_set_drvdata(dev, NULL);
2396
stephen hemminger0c195562017-08-01 19:58:53 -07002397 free_percpu(ndev_ctx->vf_stats);
Simon Xiao6c80f3f2017-01-24 13:06:13 -08002398 free_netdev(net);
Haiyang Zhangdf06bcf2011-05-23 09:03:47 -07002399 return 0;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002400}
2401
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07002402static const struct hv_vmbus_device_id id_table[] = {
Greg Kroah-Hartmanc45cf2d2011-08-25 11:41:33 -07002403 /* Network guid */
K. Y. Srinivasan8f505942013-01-23 17:42:42 -08002404 { HV_NIC_GUID, },
Greg Kroah-Hartmanc45cf2d2011-08-25 11:41:33 -07002405 { },
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07002406};
2407
2408MODULE_DEVICE_TABLE(vmbus, id_table);
2409
K. Y. Srinivasanf1542a62011-05-10 07:55:16 -07002410/* The one and only one */
K. Y. Srinivasanfde0ef92011-05-12 19:35:08 -07002411static struct hv_driver netvsc_drv = {
Haiyang Zhangd31b20f2012-03-07 10:02:00 +00002412 .name = KBUILD_MODNAME,
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07002413 .id_table = id_table,
K. Y. Srinivasanfde0ef92011-05-12 19:35:08 -07002414 .probe = netvsc_probe,
2415 .remove = netvsc_remove,
Arjan van de Venaf0a5642018-06-05 13:37:49 -07002416 .driver = {
2417 .probe_type = PROBE_PREFER_ASYNCHRONOUS,
2418 },
K. Y. Srinivasand4890972011-05-10 07:55:17 -07002419};
K. Y. Srinivasanf1542a62011-05-10 07:55:16 -07002420
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002421/*
2422 * On Hyper-V, every VF interface is matched with a corresponding
2423 * synthetic interface. The synthetic interface is presented first
2424 * to the guest. When the corresponding VF instance is registered,
2425 * we will take care of switching the data path.
2426 */
2427static int netvsc_netdev_event(struct notifier_block *this,
2428 unsigned long event, void *ptr)
2429{
2430 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
2431
2432 /* Skip our own events */
2433 if (event_dev->netdev_ops == &device_ops)
2434 return NOTIFY_DONE;
2435
2436 /* Avoid non-Ethernet type devices */
2437 if (event_dev->type != ARPHRD_ETHER)
2438 return NOTIFY_DONE;
2439
2440 /* Avoid Vlan dev with same MAC registering as VF */
2441 if (is_vlan_dev(event_dev))
2442 return NOTIFY_DONE;
2443
2444 /* Avoid Bonding master dev with same MAC registering as VF */
2445 if ((event_dev->priv_flags & IFF_BONDING) &&
2446 (event_dev->flags & IFF_MASTER))
2447 return NOTIFY_DONE;
2448
2449 switch (event) {
2450 case NETDEV_REGISTER:
2451 return netvsc_register_vf(event_dev);
2452 case NETDEV_UNREGISTER:
2453 return netvsc_unregister_vf(event_dev);
2454 case NETDEV_UP:
2455 case NETDEV_DOWN:
2456 return netvsc_vf_changed(event_dev);
2457 default:
2458 return NOTIFY_DONE;
2459 }
2460}
2461
2462static struct notifier_block netvsc_netdev_notifier = {
2463 .notifier_call = netvsc_netdev_event,
2464};
2465
K. Y. Srinivasana9869c92011-05-12 19:35:17 -07002466static void __exit netvsc_drv_exit(void)
Hank Janssenfceaf242009-07-13 15:34:54 -07002467{
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002468 unregister_netdevice_notifier(&netvsc_netdev_notifier);
Greg Kroah-Hartman768fa212011-08-25 15:07:32 -07002469 vmbus_driver_unregister(&netvsc_drv);
Hank Janssenfceaf242009-07-13 15:34:54 -07002470}
2471
K. Y. Srinivasan1fde28c2011-05-12 19:35:16 -07002472static int __init netvsc_drv_init(void)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002473{
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002474 int ret;
2475
Haiyang Zhangfa85a6c2012-07-25 08:08:41 +00002476 if (ring_size < RING_SIZE_MIN) {
2477 ring_size = RING_SIZE_MIN;
Stephen Hemmingera7f99d02017-12-01 11:01:47 -08002478 pr_info("Increased ring_size to %u (min allowed)\n",
Haiyang Zhangfa85a6c2012-07-25 08:08:41 +00002479 ring_size);
2480 }
Stephen Hemmingera7f99d02017-12-01 11:01:47 -08002481 netvsc_ring_bytes = ring_size * PAGE_SIZE;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002482
Stephen Hemmingera7f99d02017-12-01 11:01:47 -08002483 ret = vmbus_driver_register(&netvsc_drv);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002484 if (ret)
2485 return ret;
2486
Stephen Hemminger8cde8f02018-06-11 12:44:54 -07002487 register_netdevice_notifier(&netvsc_netdev_notifier);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07002488 return 0;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07002489}
2490
Hank Janssen26c14cc2010-02-11 23:02:42 +00002491MODULE_LICENSE("GPL");
Stephen Hemminger7880fc52010-05-04 09:58:52 -07002492MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
Hank Janssenfceaf242009-07-13 15:34:54 -07002493
K. Y. Srinivasan1fde28c2011-05-12 19:35:16 -07002494module_init(netvsc_drv_init);
K. Y. Srinivasana9869c92011-05-12 19:35:17 -07002495module_exit(netvsc_drv_exit);