blob: c9140c3aeb67d5fc71b3911a8b2ddb51a2ef806a [file] [log] [blame]
Hank Janssenfceaf242009-07-13 15:34:54 -07001/*
Hank Janssenfceaf242009-07-13 15:34:54 -07002 * Copyright (c) 2009, Microsoft Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
Jeff Kirsheradf8d3f2013-12-06 06:28:47 -080014 * this program; if not, see <http://www.gnu.org/licenses/>.
Hank Janssenfceaf242009-07-13 15:34:54 -070015 *
16 * Authors:
Haiyang Zhangd0e94d12009-11-23 17:00:22 +000017 * Haiyang Zhang <haiyangz@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -070018 * Hank Janssen <hjanssen@microsoft.com>
Hank Janssenfceaf242009-07-13 15:34:54 -070019 */
Hank Jansseneb335bc2011-03-29 13:58:48 -070020#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
21
Hank Janssenfceaf242009-07-13 15:34:54 -070022#include <linux/init.h>
K. Y. Srinivasan9079ce62011-06-16 13:16:37 -070023#include <linux/atomic.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070024#include <linux/module.h>
25#include <linux/highmem.h>
26#include <linux/device.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070027#include <linux/io.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070028#include <linux/delay.h>
29#include <linux/netdevice.h>
30#include <linux/inetdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
Haiyang Zhangc802db12013-05-28 06:15:56 +000033#include <linux/if_vlan.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070034#include <linux/in.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Hank Janssenfceaf242009-07-13 15:34:54 -070036#include <net/arp.h>
37#include <net/route.h>
38#include <net/sock.h>
39#include <net/pkt_sched.h>
K. Y. Srinivasan3f335ea2011-05-12 19:34:15 -070040
K. Y. Srinivasan5ca72522011-05-12 19:34:37 -070041#include "hyperv_net.h"
Hank Janssenfceaf242009-07-13 15:34:54 -070042
Haiyang Zhangfa85a6c2012-07-25 08:08:41 +000043#define RING_SIZE_MIN 64
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +010044#define LINKCHANGE_INT (2 * HZ)
sixiao@microsoft.coma0606792016-02-04 15:49:34 -080045#define NETVSC_HW_FEATURES (NETIF_F_RXCSUM | \
46 NETIF_F_SG | \
47 NETIF_F_TSO | \
48 NETIF_F_TSO6 | \
49 NETIF_F_HW_CSUM)
stephen hemmingera50af862016-12-06 13:43:54 -080050
51/* Restrict GSO size to account for NVGRE */
52#define NETVSC_GSO_MAX_SIZE 62768
53
Hank Janssen99c8da02010-10-12 10:45:23 -070054static int ring_size = 128;
Stephen Hemminger450d7a42010-05-04 09:58:53 -070055module_param(ring_size, int, S_IRUGO);
56MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
Hank Janssenfceaf242009-07-13 15:34:54 -070057
KY Srinivasane01ec212015-05-27 13:16:57 -070058static int max_num_vrss_chns = 8;
59
Simon Xiao3f300ff2015-04-28 01:05:17 -070060static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
61 NETIF_MSG_LINK | NETIF_MSG_IFUP |
62 NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
63 NETIF_MSG_TX_ERR;
64
65static int debug = -1;
66module_param(debug, int, S_IRUGO);
67MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
68
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080069static void do_set_multicast(struct work_struct *w)
70{
Wenqi Ma792df872012-04-19 00:39:37 +000071 struct net_device_context *ndevctx =
72 container_of(w, struct net_device_context, work);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +020073 struct hv_device *device_obj = ndevctx->device_ctx;
74 struct net_device *ndev = hv_get_drvdata(device_obj);
75 struct netvsc_device *nvdev = ndevctx->nvdev;
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080076 struct rndis_device *rdev;
77
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +020078 if (!nvdev)
Wenqi Ma792df872012-04-19 00:39:37 +000079 return;
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080080
81 rdev = nvdev->extension;
82 if (rdev == NULL)
Wenqi Ma792df872012-04-19 00:39:37 +000083 return;
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080084
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +020085 if (ndev->flags & IFF_PROMISC)
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080086 rndis_filter_set_packet_filter(rdev,
87 NDIS_PACKET_TYPE_PROMISCUOUS);
88 else
89 rndis_filter_set_packet_filter(rdev,
90 NDIS_PACKET_TYPE_BROADCAST |
91 NDIS_PACKET_TYPE_ALL_MULTICAST |
92 NDIS_PACKET_TYPE_DIRECTED);
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080093}
94
Greg Kroah-Hartman4e9bfef2009-07-15 12:45:20 -070095static void netvsc_set_multicast_list(struct net_device *net)
Hank Janssenfceaf242009-07-13 15:34:54 -070096{
Wenqi Ma792df872012-04-19 00:39:37 +000097 struct net_device_context *net_device_ctx = netdev_priv(net);
Haiyang Zhangd426b2e2011-11-30 07:19:08 -080098
Wenqi Ma792df872012-04-19 00:39:37 +000099 schedule_work(&net_device_ctx->work);
Hank Janssenfceaf242009-07-13 15:34:54 -0700100}
101
Hank Janssenfceaf242009-07-13 15:34:54 -0700102static int netvsc_open(struct net_device *net)
103{
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +0200104 struct netvsc_device *nvdev = net_device_to_netvsc_device(net);
Haiyang Zhang891de742014-02-12 16:54:27 -0800105 struct rndis_device *rdev;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700106 int ret = 0;
Hank Janssenfceaf242009-07-13 15:34:54 -0700107
Haiyang Zhang891de742014-02-12 16:54:27 -0800108 netif_carrier_off(net);
109
Haiyang Zhangd515d0f2011-09-28 13:24:15 -0700110 /* Open up the device */
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +0200111 ret = rndis_filter_open(nvdev);
Haiyang Zhangd515d0f2011-09-28 13:24:15 -0700112 if (ret != 0) {
113 netdev_err(net, "unable to open device (ret %d).\n", ret);
114 return ret;
Hank Janssenfceaf242009-07-13 15:34:54 -0700115 }
116
Haiyang Zhang2de85302015-07-13 13:09:16 -0700117 netif_tx_wake_all_queues(net);
Haiyang Zhangd515d0f2011-09-28 13:24:15 -0700118
Haiyang Zhang891de742014-02-12 16:54:27 -0800119 rdev = nvdev->extension;
120 if (!rdev->link_state)
121 netif_carrier_on(net);
122
Hank Janssenfceaf242009-07-13 15:34:54 -0700123 return ret;
124}
125
Hank Janssenfceaf242009-07-13 15:34:54 -0700126static int netvsc_close(struct net_device *net)
127{
Hank Janssenfceaf242009-07-13 15:34:54 -0700128 struct net_device_context *net_device_ctx = netdev_priv(net);
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200129 struct netvsc_device *nvdev = net_device_ctx->nvdev;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700130 int ret;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700131 u32 aread, awrite, i, msec = 10, retry = 0, retry_max = 20;
132 struct vmbus_channel *chn;
Hank Janssenfceaf242009-07-13 15:34:54 -0700133
Haiyang Zhang0a282532012-02-02 07:17:59 +0000134 netif_tx_disable(net);
Hank Janssenfceaf242009-07-13 15:34:54 -0700135
Wenqi Ma792df872012-04-19 00:39:37 +0000136 /* Make sure netvsc_set_multicast_list doesn't re-enable filter! */
137 cancel_work_sync(&net_device_ctx->work);
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +0200138 ret = rndis_filter_close(nvdev);
Haiyang Zhang2de85302015-07-13 13:09:16 -0700139 if (ret != 0) {
Hank Jansseneb335bc2011-03-29 13:58:48 -0700140 netdev_err(net, "unable to close device (ret %d).\n", ret);
Haiyang Zhang2de85302015-07-13 13:09:16 -0700141 return ret;
142 }
143
144 /* Ensure pending bytes in ring are read */
145 while (true) {
146 aread = 0;
147 for (i = 0; i < nvdev->num_chn; i++) {
148 chn = nvdev->chn_table[i];
149 if (!chn)
150 continue;
151
152 hv_get_ringbuffer_availbytes(&chn->inbound, &aread,
153 &awrite);
154
155 if (aread)
156 break;
157
158 hv_get_ringbuffer_availbytes(&chn->outbound, &aread,
159 &awrite);
160
161 if (aread)
162 break;
163 }
164
165 retry++;
166 if (retry > retry_max || aread == 0)
167 break;
168
169 msleep(msec);
170
171 if (msec < 1000)
172 msec *= 2;
173 }
174
175 if (aread) {
176 netdev_err(net, "Ring buffer not empty after closing rndis\n");
177 ret = -ETIMEDOUT;
178 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700179
Hank Janssenfceaf242009-07-13 15:34:54 -0700180 return ret;
181}
182
KY Srinivasan8a002512014-03-08 19:23:14 -0800183static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
184 int pkt_type)
185{
186 struct rndis_packet *rndis_pkt;
187 struct rndis_per_packet_info *ppi;
188
189 rndis_pkt = &msg->msg.pkt;
190 rndis_pkt->data_offset += ppi_size;
191
192 ppi = (struct rndis_per_packet_info *)((void *)rndis_pkt +
193 rndis_pkt->per_pkt_info_offset + rndis_pkt->per_pkt_info_len);
194
195 ppi->size = ppi_size;
196 ppi->type = pkt_type;
197 ppi->ppi_offset = sizeof(struct rndis_per_packet_info);
198
199 rndis_pkt->per_pkt_info_len += ppi_size;
200
201 return ppi;
202}
203
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700204static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
205 void *accel_priv, select_queue_fallback_t fallback)
206{
207 struct net_device_context *net_device_ctx = netdev_priv(ndev);
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200208 struct netvsc_device *nvsc_dev = net_device_ctx->nvdev;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700209 u32 hash;
210 u16 q_idx = 0;
211
212 if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1)
213 return 0;
214
Vitaly Kuznetsov757647e2016-01-25 16:00:41 +0100215 hash = skb_get_hash(skb);
216 q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
217 ndev->real_num_tx_queues;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700218
Vitaly Kuznetsov8b9fbe12015-12-01 16:43:11 -0800219 if (!nvsc_dev->chn_table[q_idx])
220 q_idx = 0;
221
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700222 return q_idx;
223}
224
KY Srinivasan54a73572014-03-08 19:23:13 -0800225static u32 fill_pg_buf(struct page *page, u32 offset, u32 len,
226 struct hv_page_buffer *pb)
227{
228 int j = 0;
229
230 /* Deal with compund pages by ignoring unused part
231 * of the page.
232 */
233 page += (offset >> PAGE_SHIFT);
234 offset &= ~PAGE_MASK;
235
236 while (len > 0) {
237 unsigned long bytes;
238
239 bytes = PAGE_SIZE - offset;
240 if (bytes > len)
241 bytes = len;
242 pb[j].pfn = page_to_pfn(page);
243 pb[j].offset = offset;
244 pb[j].len = bytes;
245
246 offset += bytes;
247 len -= bytes;
248
249 if (offset == PAGE_SIZE && len) {
250 page++;
251 offset = 0;
252 j++;
253 }
254 }
255
256 return j + 1;
257}
258
KY Srinivasan8a002512014-03-08 19:23:14 -0800259static u32 init_page_array(void *hdr, u32 len, struct sk_buff *skb,
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800260 struct hv_netvsc_packet *packet,
261 struct hv_page_buffer **page_buf)
KY Srinivasan54a73572014-03-08 19:23:13 -0800262{
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800263 struct hv_page_buffer *pb = *page_buf;
KY Srinivasan54a73572014-03-08 19:23:13 -0800264 u32 slots_used = 0;
265 char *data = skb->data;
266 int frags = skb_shinfo(skb)->nr_frags;
267 int i;
268
269 /* The packet is laid out thus:
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700270 * 1. hdr: RNDIS header and PPI
KY Srinivasan54a73572014-03-08 19:23:13 -0800271 * 2. skb linear data
272 * 3. skb fragment data
273 */
274 if (hdr != NULL)
275 slots_used += fill_pg_buf(virt_to_page(hdr),
276 offset_in_page(hdr),
277 len, &pb[slots_used]);
278
Haiyang Zhangaa0a34b2015-04-13 16:34:35 -0700279 packet->rmsg_size = len;
280 packet->rmsg_pgcnt = slots_used;
281
KY Srinivasan54a73572014-03-08 19:23:13 -0800282 slots_used += fill_pg_buf(virt_to_page(data),
283 offset_in_page(data),
284 skb_headlen(skb), &pb[slots_used]);
285
286 for (i = 0; i < frags; i++) {
287 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
288
289 slots_used += fill_pg_buf(skb_frag_page(frag),
290 frag->page_offset,
291 skb_frag_size(frag), &pb[slots_used]);
292 }
KY Srinivasan8a002512014-03-08 19:23:14 -0800293 return slots_used;
KY Srinivasan54a73572014-03-08 19:23:13 -0800294}
295
296static int count_skb_frag_slots(struct sk_buff *skb)
297{
298 int i, frags = skb_shinfo(skb)->nr_frags;
299 int pages = 0;
300
301 for (i = 0; i < frags; i++) {
302 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
303 unsigned long size = skb_frag_size(frag);
304 unsigned long offset = frag->page_offset;
305
306 /* Skip unused frames from start of page */
307 offset &= ~PAGE_MASK;
308 pages += PFN_UP(offset + size);
309 }
310 return pages;
311}
312
313static int netvsc_get_slots(struct sk_buff *skb)
314{
315 char *data = skb->data;
316 unsigned int offset = offset_in_page(data);
317 unsigned int len = skb_headlen(skb);
318 int slots;
319 int frag_slots;
320
321 slots = DIV_ROUND_UP(offset + len, PAGE_SIZE);
322 frag_slots = count_skb_frag_slots(skb);
323 return slots + frag_slots;
324}
325
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800326static u32 get_net_transport_info(struct sk_buff *skb, u32 *trans_off)
327{
328 u32 ret_val = TRANSPORT_INFO_NOT_IP;
329
330 if ((eth_hdr(skb)->h_proto != htons(ETH_P_IP)) &&
331 (eth_hdr(skb)->h_proto != htons(ETH_P_IPV6))) {
332 goto not_ip;
333 }
334
335 *trans_off = skb_transport_offset(skb);
336
337 if ((eth_hdr(skb)->h_proto == htons(ETH_P_IP))) {
338 struct iphdr *iphdr = ip_hdr(skb);
339
340 if (iphdr->protocol == IPPROTO_TCP)
341 ret_val = TRANSPORT_INFO_IPV4_TCP;
342 else if (iphdr->protocol == IPPROTO_UDP)
343 ret_val = TRANSPORT_INFO_IPV4_UDP;
344 } else {
345 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
346 ret_val = TRANSPORT_INFO_IPV6_TCP;
347 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
348 ret_val = TRANSPORT_INFO_IPV6_UDP;
349 }
350
351not_ip:
352 return ret_val;
353}
354
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700355static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
Hank Janssenfceaf242009-07-13 15:34:54 -0700356{
Hank Janssenfceaf242009-07-13 15:34:54 -0700357 struct net_device_context *net_device_ctx = netdev_priv(net);
Vitaly Kuznetsov981a1bd2015-04-08 17:54:05 +0200358 struct hv_netvsc_packet *packet = NULL;
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700359 int ret;
KY Srinivasan8a002512014-03-08 19:23:14 -0800360 unsigned int num_data_pgs;
361 struct rndis_message *rndis_msg;
362 struct rndis_packet *rndis_pkt;
363 u32 rndis_msg_size;
KY Srinivasan8a002512014-03-08 19:23:14 -0800364 struct rndis_per_packet_info *ppi;
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800365 struct ndis_tcp_ip_checksum_info *csum_info;
366 int hdr_offset;
367 u32 net_trans_info;
Haiyang Zhang307f0992014-05-21 12:55:39 -0700368 u32 hash;
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200369 u32 skb_length;
KY Srinivasanb08cc792015-03-29 21:08:42 -0700370 struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800371 struct hv_page_buffer *pb = page_buf;
Hank Janssenfceaf242009-07-13 15:34:54 -0700372
KY Srinivasan54a73572014-03-08 19:23:13 -0800373 /* We will atmost need two pages to describe the rndis
374 * header. We can only transmit MAX_PAGE_BUFFER_COUNT number
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200375 * of pages in a single packet. If skb is scattered around
376 * more pages we try linearizing it.
KY Srinivasan54a73572014-03-08 19:23:13 -0800377 */
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200378
Vitaly Kuznetsove88f7e02015-04-08 17:54:06 +0200379 skb_length = skb->len;
KY Srinivasan8a002512014-03-08 19:23:14 -0800380 num_data_pgs = netvsc_get_slots(skb) + 2;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700381
382 if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
Stephen Hemminger4323b472016-08-23 12:17:57 -0700383 ++net_device_ctx->eth_stats.tx_scattered;
384
385 if (skb_linearize(skb))
386 goto no_memory;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700387
388 num_data_pgs = netvsc_get_slots(skb) + 2;
389 if (num_data_pgs > MAX_PAGE_BUFFER_COUNT) {
Stephen Hemminger4323b472016-08-23 12:17:57 -0700390 ++net_device_ctx->eth_stats.tx_too_big;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700391 goto drop;
392 }
KY Srinivasan54a73572014-03-08 19:23:13 -0800393 }
Hank Janssenfceaf242009-07-13 15:34:54 -0700394
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800395 /*
396 * Place the rndis header in the skb head room and
397 * the skb->cb will be used for hv_netvsc_packet
398 * structure.
399 */
400 ret = skb_cow_head(skb, RNDIS_AND_PPI_SIZE);
Stephen Hemminger4323b472016-08-23 12:17:57 -0700401 if (ret)
402 goto no_memory;
403
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800404 /* Use the skb control buffer for building up the packet */
405 BUILD_BUG_ON(sizeof(struct hv_netvsc_packet) >
406 FIELD_SIZEOF(struct sk_buff, cb));
407 packet = (struct hv_netvsc_packet *)skb->cb;
Hank Janssenfceaf242009-07-13 15:34:54 -0700408
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700409 packet->q_idx = skb_get_queue_mapping(skb);
410
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800411 packet->total_data_buflen = skb->len;
Hank Janssenfceaf242009-07-13 15:34:54 -0700412
KY Srinivasanc0eb4542015-12-01 16:43:10 -0800413 rndis_msg = (struct rndis_message *)skb->head;
KY Srinivasanb08cc792015-03-29 21:08:42 -0700414
KY Srinivasan24476762015-12-01 16:43:06 -0800415 memset(rndis_msg, 0, RNDIS_AND_PPI_SIZE);
Hank Janssenfceaf242009-07-13 15:34:54 -0700416
KY Srinivasan8a002512014-03-08 19:23:14 -0800417 /* Add the rndis header */
KY Srinivasan8a002512014-03-08 19:23:14 -0800418 rndis_msg->ndis_msg_type = RNDIS_MSG_PACKET;
419 rndis_msg->msg_len = packet->total_data_buflen;
420 rndis_pkt = &rndis_msg->msg.pkt;
421 rndis_pkt->data_offset = sizeof(struct rndis_packet);
422 rndis_pkt->data_len = packet->total_data_buflen;
423 rndis_pkt->per_pkt_info_offset = sizeof(struct rndis_packet);
424
425 rndis_msg_size = RNDIS_MESSAGE_SIZE(struct rndis_packet);
426
Haiyang Zhang307f0992014-05-21 12:55:39 -0700427 hash = skb_get_hash_raw(skb);
428 if (hash != 0 && net->real_num_tx_queues > 1) {
429 rndis_msg_size += NDIS_HASH_PPI_SIZE;
430 ppi = init_ppi_data(rndis_msg, NDIS_HASH_PPI_SIZE,
431 NBL_HASH_VALUE);
432 *(u32 *)((void *)ppi + ppi->ppi_offset) = hash;
433 }
434
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700435 if (skb_vlan_tag_present(skb)) {
KY Srinivasan8a002512014-03-08 19:23:14 -0800436 struct ndis_pkt_8021q_info *vlan;
437
438 rndis_msg_size += NDIS_VLAN_PPI_SIZE;
439 ppi = init_ppi_data(rndis_msg, NDIS_VLAN_PPI_SIZE,
440 IEEE_8021Q_INFO);
441 vlan = (struct ndis_pkt_8021q_info *)((void *)ppi +
442 ppi->ppi_offset);
KY Srinivasan760d1e32015-12-01 16:43:19 -0800443 vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK;
444 vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >>
KY Srinivasan8a002512014-03-08 19:23:14 -0800445 VLAN_PRIO_SHIFT;
446 }
447
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800448 net_trans_info = get_net_transport_info(skb, &hdr_offset);
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800449
450 /*
451 * Setup the sendside checksum offload only if this is not a
452 * GSO packet.
453 */
Arnd Bergmann52ccd632016-10-18 00:16:09 +0200454 if ((net_trans_info & (INFO_TCP | INFO_UDP)) && skb_is_gso(skb)) {
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700455 struct ndis_tcp_lso_info *lso_info;
456
457 rndis_msg_size += NDIS_LSO_PPI_SIZE;
458 ppi = init_ppi_data(rndis_msg, NDIS_LSO_PPI_SIZE,
459 TCP_LARGESEND_PKTINFO);
460
461 lso_info = (struct ndis_tcp_lso_info *)((void *)ppi +
462 ppi->ppi_offset);
463
464 lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
465 if (net_trans_info & (INFO_IPV4 << 16)) {
466 lso_info->lso_v2_transmit.ip_version =
467 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
468 ip_hdr(skb)->tot_len = 0;
469 ip_hdr(skb)->check = 0;
470 tcp_hdr(skb)->check =
471 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
472 ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
473 } else {
474 lso_info->lso_v2_transmit.ip_version =
475 NDIS_TCP_LARGE_SEND_OFFLOAD_IPV6;
476 ipv6_hdr(skb)->payload_len = 0;
477 tcp_hdr(skb)->check =
478 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
479 &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
480 }
481 lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset;
482 lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
stephen hemmingerad19bc82016-10-11 14:03:07 -0700483 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
484 if (net_trans_info & INFO_TCP) {
485 rndis_msg_size += NDIS_CSUM_PPI_SIZE;
486 ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
487 TCPIP_CHKSUM_PKTINFO);
488
489 csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
490 ppi->ppi_offset);
491
492 if (net_trans_info & (INFO_IPV4 << 16))
493 csum_info->transmit.is_ipv4 = 1;
494 else
495 csum_info->transmit.is_ipv6 = 1;
496
497 csum_info->transmit.tcp_checksum = 1;
498 csum_info->transmit.tcp_header_offset = hdr_offset;
499 } else {
500 /* UDP checksum (and other) offload is not supported. */
501 if (skb_checksum_help(skb))
502 goto drop;
503 }
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700504 }
KY Srinivasan08cd04b2014-03-08 19:23:17 -0800505
KY Srinivasan8a002512014-03-08 19:23:14 -0800506 /* Start filling in the page buffers with the rndis hdr */
507 rndis_msg->msg_len += rndis_msg_size;
Haiyang Zhang942396b2014-10-22 13:47:18 -0700508 packet->total_data_buflen = rndis_msg->msg_len;
KY Srinivasan8a002512014-03-08 19:23:14 -0800509 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
KY Srinivasana9f2e2d2015-12-01 16:43:13 -0800510 skb, packet, &pb);
KY Srinivasan8a002512014-03-08 19:23:14 -0800511
sixiao@microsoft.com76d13b52016-02-17 16:43:59 -0800512 /* timestamp packet in software */
513 skb_tx_timestamp(skb);
KY Srinivasan3a3d9a02015-12-01 16:43:14 -0800514 ret = netvsc_send(net_device_ctx->device_ctx, packet,
515 rndis_msg, &pb, skb);
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700516 if (likely(ret == 0)) {
Stephen Hemminger4323b472016-08-23 12:17:57 -0700517 struct netvsc_stats *tx_stats = this_cpu_ptr(net_device_ctx->tx_stats);
518
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700519 u64_stats_update_begin(&tx_stats->syncp);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700520 tx_stats->packets++;
521 tx_stats->bytes += skb_length;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700522 u64_stats_update_end(&tx_stats->syncp);
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700523 return NETDEV_TX_OK;
Hank Janssenfceaf242009-07-13 15:34:54 -0700524 }
Stephen Hemminger4323b472016-08-23 12:17:57 -0700525
526 if (ret == -EAGAIN) {
527 ++net_device_ctx->eth_stats.tx_busy;
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700528 return NETDEV_TX_BUSY;
Stephen Hemminger4323b472016-08-23 12:17:57 -0700529 }
530
531 if (ret == -ENOSPC)
532 ++net_device_ctx->eth_stats.tx_no_space;
Hank Janssenfceaf242009-07-13 15:34:54 -0700533
Stephen Hemminger0ab05142016-08-23 12:17:52 -0700534drop:
535 dev_kfree_skb_any(skb);
536 net->stats.tx_dropped++;
537
538 return NETDEV_TX_OK;
Stephen Hemminger4323b472016-08-23 12:17:57 -0700539
540no_memory:
541 ++net_device_ctx->eth_stats.tx_no_memory;
542 goto drop;
Hank Janssenfceaf242009-07-13 15:34:54 -0700543}
544
Hank Janssen3e189512010-03-04 22:11:00 +0000545/*
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700546 * netvsc_linkstatus_callback - Link up/down notification
547 */
K. Y. Srinivasan90ef1172011-05-12 19:34:50 -0700548void netvsc_linkstatus_callback(struct hv_device *device_obj,
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700549 struct rndis_message *resp)
Hank Janssenfceaf242009-07-13 15:34:54 -0700550{
Haiyang Zhang3a494e72014-06-19 18:34:36 -0700551 struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700552 struct net_device *net;
Haiyang Zhangc996edc2011-04-06 15:18:00 -0700553 struct net_device_context *ndev_ctx;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100554 struct netvsc_reconfig *event;
555 unsigned long flags;
556
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700557 net = hv_get_drvdata(device_obj);
558
559 if (!net)
560 return;
561
562 ndev_ctx = netdev_priv(net);
563
564 /* Update the physical link speed when changing to another vSwitch */
565 if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) {
566 u32 speed;
567
568 speed = *(u32 *)((void *)indicate + indicate->
569 status_buf_offset) / 10000;
570 ndev_ctx->speed = speed;
571 return;
572 }
573
574 /* Handle these link change statuses below */
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100575 if (indicate->status != RNDIS_STATUS_NETWORK_CHANGE &&
576 indicate->status != RNDIS_STATUS_MEDIA_CONNECT &&
577 indicate->status != RNDIS_STATUS_MEDIA_DISCONNECT)
578 return;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700579
Haiyang Zhang7f5d5af2016-08-04 10:42:15 -0700580 if (net->reg_state != NETREG_REGISTERED)
Hank Janssenfceaf242009-07-13 15:34:54 -0700581 return;
Hank Janssenfceaf242009-07-13 15:34:54 -0700582
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +0100583 event = kzalloc(sizeof(*event), GFP_ATOMIC);
584 if (!event)
585 return;
586 event->event = indicate->status;
587
588 spin_lock_irqsave(&ndev_ctx->lock, flags);
589 list_add_tail(&event->list, &ndev_ctx->reconfig_events);
590 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
591
592 schedule_delayed_work(&ndev_ctx->dwork, 0);
Hank Janssenfceaf242009-07-13 15:34:54 -0700593}
594
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700595static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
KY Srinivasane3d605e2014-03-08 19:23:16 -0800596 struct hv_netvsc_packet *packet,
KY Srinivasan25b85ee2015-12-01 16:43:05 -0800597 struct ndis_tcp_ip_checksum_info *csum_info,
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700598 void *data, u16 vlan_tci)
Hank Janssenfceaf242009-07-13 15:34:54 -0700599{
Hank Janssenfceaf242009-07-13 15:34:54 -0700600 struct sk_buff *skb;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -0700601
Haiyang Zhang72a2f5b2010-12-10 12:03:58 -0800602 skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700603 if (!skb)
604 return skb;
Hank Janssenfceaf242009-07-13 15:34:54 -0700605
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700606 /*
607 * Copy to skb. This copy is needed here since the memory pointed by
608 * hv_netvsc_packet cannot be deallocated
609 */
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700610 memcpy(skb_put(skb, packet->total_data_buflen), data,
611 packet->total_data_buflen);
Hank Janssenfceaf242009-07-13 15:34:54 -0700612
613 skb->protocol = eth_type_trans(skb, net);
Stephen Hemmingere52fed72016-10-23 21:32:47 -0700614
615 /* skb is already created with CHECKSUM_NONE */
616 skb_checksum_none_assert(skb);
617
618 /*
619 * In Linux, the IP checksum is always checked.
620 * Do L4 checksum offload if enabled and present.
621 */
622 if (csum_info && (net->features & NETIF_F_RXCSUM)) {
623 if (csum_info->receive.tcp_checksum_succeeded ||
624 csum_info->receive.udp_checksum_succeeded)
KY Srinivasane3d605e2014-03-08 19:23:16 -0800625 skb->ip_summed = CHECKSUM_UNNECESSARY;
KY Srinivasane3d605e2014-03-08 19:23:16 -0800626 }
627
KY Srinivasan760d1e32015-12-01 16:43:19 -0800628 if (vlan_tci & VLAN_TAG_PRESENT)
Haiyang Zhang93725cb2013-06-17 15:36:49 -0700629 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
KY Srinivasan760d1e32015-12-01 16:43:19 -0800630 vlan_tci);
Hank Janssenfceaf242009-07-13 15:34:54 -0700631
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700632 return skb;
633}
634
635/*
636 * netvsc_recv_callback - Callback when we receive a packet from the
637 * "wire" on the specified device.
638 */
639int netvsc_recv_callback(struct hv_device *device_obj,
640 struct hv_netvsc_packet *packet,
641 void **data,
642 struct ndis_tcp_ip_checksum_info *csum_info,
643 struct vmbus_channel *channel,
644 u16 vlan_tci)
645{
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200646 struct net_device *net = hv_get_drvdata(device_obj);
647 struct net_device_context *net_device_ctx = netdev_priv(net);
Stephen Hemmingerf207c102016-09-22 16:56:33 -0700648 struct net_device *vf_netdev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700649 struct sk_buff *skb;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700650 struct netvsc_stats *rx_stats;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700651
Stephen Hemminger9cbcc422016-09-22 16:56:34 -0700652 if (net->reg_state != NETREG_REGISTERED)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700653 return NVSP_STAT_FAIL;
654
Stephen Hemminger9cbcc422016-09-22 16:56:34 -0700655 /*
656 * If necessary, inject this packet into the VF interface.
657 * On Hyper-V, multicast and brodcast packets are only delivered
658 * to the synthetic interface (after subjecting these to
659 * policy filters on the host). Deliver these via the VF
660 * interface in the guest.
661 */
Stephen Hemmingerf207c102016-09-22 16:56:33 -0700662 vf_netdev = rcu_dereference(net_device_ctx->vf_netdev);
Stephen Hemminger9cbcc422016-09-22 16:56:34 -0700663 if (vf_netdev && (vf_netdev->flags & IFF_UP))
664 net = vf_netdev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -0700665
666 /* Allocate a skb - TODO direct I/O to pages? */
667 skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci);
668 if (unlikely(!skb)) {
669 ++net->stats.rx_dropped;
670 return NVSP_STAT_FAIL;
671 }
Haiyang Zhang5b54dac2014-04-21 10:20:28 -0700672
Stephen Hemminger9cbcc422016-09-22 16:56:34 -0700673 if (net != vf_netdev)
674 skb_record_rx_queue(skb,
675 channel->offermsg.offer.sub_channel_index);
676
677 /*
678 * Even if injecting the packet, record the statistics
679 * on the synthetic device because modifying the VF device
680 * statistics will not work correctly.
681 */
682 rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700683 u64_stats_update_begin(&rx_stats->syncp);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700684 rx_stats->packets++;
685 rx_stats->bytes += packet->total_data_buflen;
Stephen Hemmingerf7ad75b2016-09-22 16:56:35 -0700686
687 if (skb->pkt_type == PACKET_BROADCAST)
688 ++rx_stats->broadcast;
689 else if (skb->pkt_type == PACKET_MULTICAST)
690 ++rx_stats->multicast;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700691 u64_stats_update_end(&rx_stats->syncp);
Stephen Hemminger9495c282010-03-09 17:42:17 -0800692
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700693 /*
694 * Pass the skb back up. Network stack will deallocate the skb when it
Stephen Hemminger9495c282010-03-09 17:42:17 -0800695 * is done.
696 * TODO - use NAPI?
Greg Kroah-Hartman02fafbc2009-08-31 21:09:45 -0700697 */
Stephen Hemminger9495c282010-03-09 17:42:17 -0800698 netif_rx(skb);
Hank Janssenfceaf242009-07-13 15:34:54 -0700699
Hank Janssenfceaf242009-07-13 15:34:54 -0700700 return 0;
701}
702
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700703static void netvsc_get_drvinfo(struct net_device *net,
704 struct ethtool_drvinfo *info)
705{
Jiri Pirko7826d432013-01-06 00:44:26 +0000706 strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
Jiri Pirko7826d432013-01-06 00:44:26 +0000707 strlcpy(info->fw_version, "N/A", sizeof(info->fw_version));
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -0700708}
709
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -0800710static void netvsc_get_channels(struct net_device *net,
711 struct ethtool_channels *channel)
712{
713 struct net_device_context *net_device_ctx = netdev_priv(net);
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200714 struct netvsc_device *nvdev = net_device_ctx->nvdev;
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -0800715
716 if (nvdev) {
717 channel->max_combined = nvdev->max_chn;
718 channel->combined_count = nvdev->num_chn;
719 }
720}
721
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700722static int netvsc_set_channels(struct net_device *net,
723 struct ethtool_channels *channels)
724{
725 struct net_device_context *net_device_ctx = netdev_priv(net);
726 struct hv_device *dev = net_device_ctx->device_ctx;
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200727 struct netvsc_device *nvdev = net_device_ctx->nvdev;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700728 struct netvsc_device_info device_info;
Andrew Schwartzmeyer954591b2015-08-18 20:06:32 -0700729 u32 num_chn;
730 u32 max_chn;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700731 int ret = 0;
732 bool recovering = false;
733
Vitaly Kuznetsov6da72252016-05-13 13:55:24 +0200734 if (net_device_ctx->start_remove || !nvdev || nvdev->destroy)
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700735 return -ENODEV;
736
Andrew Schwartzmeyer954591b2015-08-18 20:06:32 -0700737 num_chn = nvdev->num_chn;
738 max_chn = min_t(u32, nvdev->max_chn, num_online_cpus());
739
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700740 if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) {
741 pr_info("vRSS unsupported before NVSP Version 5\n");
742 return -EINVAL;
743 }
744
745 /* We do not support rx, tx, or other */
746 if (!channels ||
747 channels->rx_count ||
748 channels->tx_count ||
749 channels->other_count ||
750 (channels->combined_count < 1))
751 return -EINVAL;
752
753 if (channels->combined_count > max_chn) {
754 pr_info("combined channels too high, using %d\n", max_chn);
755 channels->combined_count = max_chn;
756 }
757
758 ret = netvsc_close(net);
759 if (ret)
760 goto out;
761
762 do_set:
Vitaly Kuznetsovf580aec2016-05-13 13:55:20 +0200763 net_device_ctx->start_remove = true;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700764 rndis_filter_device_remove(dev);
765
766 nvdev->num_chn = channels->combined_count;
767
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700768 memset(&device_info, 0, sizeof(device_info));
769 device_info.num_chn = nvdev->num_chn; /* passed to RNDIS */
770 device_info.ring_size = ring_size;
771 device_info.max_num_vrss_chns = max_num_vrss_chns;
772
773 ret = rndis_filter_device_add(dev, &device_info);
774 if (ret) {
775 if (recovering) {
776 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
777 return ret;
778 }
779 goto recover;
780 }
781
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200782 nvdev = net_device_ctx->nvdev;
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700783
784 ret = netif_set_real_num_tx_queues(net, nvdev->num_chn);
785 if (ret) {
786 if (recovering) {
787 netdev_err(net, "could not set tx queue count (ret %d)\n", ret);
788 return ret;
789 }
790 goto recover;
791 }
792
793 ret = netif_set_real_num_rx_queues(net, nvdev->num_chn);
794 if (ret) {
795 if (recovering) {
796 netdev_err(net, "could not set rx queue count (ret %d)\n", ret);
797 return ret;
798 }
799 goto recover;
800 }
801
802 out:
803 netvsc_open(net);
Vitaly Kuznetsovf580aec2016-05-13 13:55:20 +0200804 net_device_ctx->start_remove = false;
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +0200805 /* We may have missed link change notifications */
806 schedule_delayed_work(&net_device_ctx->dwork, 0);
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -0700807
808 return ret;
809
810 recover:
811 /* If the above failed, we attempt to recover through the same
812 * process but with the original number of channels.
813 */
814 netdev_err(net, "could not set channels, recovering\n");
815 recovering = true;
816 channels->combined_count = num_chn;
817 goto do_set;
818}
819
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -0800820static bool netvsc_validate_ethtool_ss_cmd(const struct ethtool_cmd *cmd)
821{
822 struct ethtool_cmd diff1 = *cmd;
823 struct ethtool_cmd diff2 = {};
824
825 ethtool_cmd_speed_set(&diff1, 0);
826 diff1.duplex = 0;
827 /* advertising and cmd are usually set */
828 diff1.advertising = 0;
829 diff1.cmd = 0;
830 /* We set port to PORT_OTHER */
831 diff2.port = PORT_OTHER;
832
833 return !memcmp(&diff1, &diff2, sizeof(diff1));
834}
835
836static void netvsc_init_settings(struct net_device *dev)
837{
838 struct net_device_context *ndc = netdev_priv(dev);
839
840 ndc->speed = SPEED_UNKNOWN;
841 ndc->duplex = DUPLEX_UNKNOWN;
842}
843
844static int netvsc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
845{
846 struct net_device_context *ndc = netdev_priv(dev);
847
848 ethtool_cmd_speed_set(cmd, ndc->speed);
849 cmd->duplex = ndc->duplex;
850 cmd->port = PORT_OTHER;
851
852 return 0;
853}
854
855static int netvsc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
856{
857 struct net_device_context *ndc = netdev_priv(dev);
858 u32 speed;
859
860 speed = ethtool_cmd_speed(cmd);
861 if (!ethtool_validate_speed(speed) ||
862 !ethtool_validate_duplex(cmd->duplex) ||
863 !netvsc_validate_ethtool_ss_cmd(cmd))
864 return -EINVAL;
865
866 ndc->speed = speed;
867 ndc->duplex = cmd->duplex;
868
869 return 0;
870}
871
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800872static int netvsc_change_mtu(struct net_device *ndev, int mtu)
873{
874 struct net_device_context *ndevctx = netdev_priv(ndev);
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +0200875 struct netvsc_device *nvdev = ndevctx->nvdev;
876 struct hv_device *hdev = ndevctx->device_ctx;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800877 struct netvsc_device_info device_info;
878 int limit = ETH_DATA_LEN;
Haiyang Zhangd212b462016-03-23 09:43:09 -0700879 u32 num_chn;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700880 int ret = 0;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800881
Vitaly Kuznetsov6da72252016-05-13 13:55:24 +0200882 if (ndevctx->start_remove || !nvdev || nvdev->destroy)
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800883 return -ENODEV;
884
Haiyang Zhanga1eabb02014-02-19 15:49:45 -0800885 if (nvdev->nvsp_version >= NVSP_PROTOCOL_VERSION_2)
Haiyang Zhang4d3c9d32014-11-12 14:07:44 -0800886 limit = NETVSC_MTU - ETH_HLEN;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800887
Haiyang Zhangf9cbce32015-07-06 14:11:37 -0700888 if (mtu < NETVSC_MTU_MIN || mtu > limit)
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800889 return -EINVAL;
890
Haiyang Zhang2de85302015-07-13 13:09:16 -0700891 ret = netvsc_close(ndev);
892 if (ret)
893 goto out;
894
Haiyang Zhangd212b462016-03-23 09:43:09 -0700895 num_chn = nvdev->num_chn;
896
Vitaly Kuznetsovf580aec2016-05-13 13:55:20 +0200897 ndevctx->start_remove = true;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800898 rndis_filter_device_remove(hdev);
899
900 ndev->mtu = mtu;
901
Andrew Schwartzmeyer8ebdcc52015-08-11 17:14:31 -0700902 memset(&device_info, 0, sizeof(device_info));
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800903 device_info.ring_size = ring_size;
Haiyang Zhangd212b462016-03-23 09:43:09 -0700904 device_info.num_chn = num_chn;
KY Srinivasane01ec212015-05-27 13:16:57 -0700905 device_info.max_num_vrss_chns = max_num_vrss_chns;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800906 rndis_filter_device_add(hdev, &device_info);
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800907
Haiyang Zhang2de85302015-07-13 13:09:16 -0700908out:
909 netvsc_open(ndev);
Vitaly Kuznetsovf580aec2016-05-13 13:55:20 +0200910 ndevctx->start_remove = false;
Haiyang Zhang2de85302015-07-13 13:09:16 -0700911
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +0200912 /* We may have missed link change notifications */
913 schedule_delayed_work(&ndevctx->dwork, 0);
914
Haiyang Zhang2de85302015-07-13 13:09:16 -0700915 return ret;
Haiyang Zhang4d447c92011-12-15 13:45:17 -0800916}
917
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700918static struct rtnl_link_stats64 *netvsc_get_stats64(struct net_device *net,
919 struct rtnl_link_stats64 *t)
920{
921 struct net_device_context *ndev_ctx = netdev_priv(net);
922 int cpu;
923
924 for_each_possible_cpu(cpu) {
925 struct netvsc_stats *tx_stats = per_cpu_ptr(ndev_ctx->tx_stats,
926 cpu);
927 struct netvsc_stats *rx_stats = per_cpu_ptr(ndev_ctx->rx_stats,
928 cpu);
Stephen Hemmingerf7ad75b2016-09-22 16:56:35 -0700929 u64 tx_packets, tx_bytes, rx_packets, rx_bytes, rx_multicast;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700930 unsigned int start;
931
932 do {
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700933 start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700934 tx_packets = tx_stats->packets;
935 tx_bytes = tx_stats->bytes;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700936 } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700937
938 do {
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700939 start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700940 rx_packets = rx_stats->packets;
941 rx_bytes = rx_stats->bytes;
Stephen Hemmingerf7ad75b2016-09-22 16:56:35 -0700942 rx_multicast = rx_stats->multicast + rx_stats->broadcast;
sixiao@microsoft.com4b02b582015-05-15 02:33:03 -0700943 } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700944
945 t->tx_bytes += tx_bytes;
946 t->tx_packets += tx_packets;
947 t->rx_bytes += rx_bytes;
948 t->rx_packets += rx_packets;
Stephen Hemmingerf7ad75b2016-09-22 16:56:35 -0700949 t->multicast += rx_multicast;
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -0700950 }
951
952 t->tx_dropped = net->stats.tx_dropped;
953 t->tx_errors = net->stats.tx_dropped;
954
955 t->rx_dropped = net->stats.rx_dropped;
956 t->rx_errors = net->stats.rx_errors;
957
958 return t;
959}
Haiyang Zhang1ce09e82012-07-10 07:19:22 +0000960
961static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
962{
Haiyang Zhang1ce09e82012-07-10 07:19:22 +0000963 struct sockaddr *addr = p;
Jianjun Kong9a4c8312013-01-18 16:52:09 +0000964 char save_adr[ETH_ALEN];
Haiyang Zhang1ce09e82012-07-10 07:19:22 +0000965 unsigned char save_aatype;
966 int err;
967
968 memcpy(save_adr, ndev->dev_addr, ETH_ALEN);
969 save_aatype = ndev->addr_assign_type;
970
971 err = eth_mac_addr(ndev, p);
972 if (err != 0)
973 return err;
974
Vitaly Kuznetsove834da9a2016-06-03 17:51:01 +0200975 err = rndis_filter_set_device_mac(ndev, addr->sa_data);
Haiyang Zhang1ce09e82012-07-10 07:19:22 +0000976 if (err != 0) {
977 /* roll back to saved MAC */
978 memcpy(ndev->dev_addr, save_adr, ETH_ALEN);
979 ndev->addr_assign_type = save_aatype;
980 }
981
982 return err;
983}
984
Stephen Hemminger4323b472016-08-23 12:17:57 -0700985static const struct {
986 char name[ETH_GSTRING_LEN];
987 u16 offset;
988} netvsc_stats[] = {
989 { "tx_scattered", offsetof(struct netvsc_ethtool_stats, tx_scattered) },
990 { "tx_no_memory", offsetof(struct netvsc_ethtool_stats, tx_no_memory) },
991 { "tx_no_space", offsetof(struct netvsc_ethtool_stats, tx_no_space) },
992 { "tx_too_big", offsetof(struct netvsc_ethtool_stats, tx_too_big) },
993 { "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) },
994};
995
996static int netvsc_get_sset_count(struct net_device *dev, int string_set)
997{
998 switch (string_set) {
999 case ETH_SS_STATS:
1000 return ARRAY_SIZE(netvsc_stats);
1001 default:
1002 return -EINVAL;
1003 }
1004}
1005
1006static void netvsc_get_ethtool_stats(struct net_device *dev,
1007 struct ethtool_stats *stats, u64 *data)
1008{
1009 struct net_device_context *ndc = netdev_priv(dev);
1010 const void *nds = &ndc->eth_stats;
1011 int i;
1012
1013 for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++)
1014 data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
1015}
1016
1017static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1018{
1019 int i;
1020
1021 switch (stringset) {
1022 case ETH_SS_STATS:
1023 for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++)
1024 memcpy(data + i * ETH_GSTRING_LEN,
1025 netvsc_stats[i].name, ETH_GSTRING_LEN);
1026 break;
1027 }
1028}
1029
Richard Weinberger316158f2014-07-09 16:23:59 +02001030#ifdef CONFIG_NET_POLL_CONTROLLER
1031static void netvsc_poll_controller(struct net_device *net)
1032{
1033 /* As netvsc_start_xmit() works synchronous we don't have to
1034 * trigger anything here.
1035 */
1036}
1037#endif
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001038
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -07001039static const struct ethtool_ops ethtool_ops = {
1040 .get_drvinfo = netvsc_get_drvinfo,
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -07001041 .get_link = ethtool_op_get_link,
Stephen Hemminger4323b472016-08-23 12:17:57 -07001042 .get_ethtool_stats = netvsc_get_ethtool_stats,
1043 .get_sset_count = netvsc_get_sset_count,
1044 .get_strings = netvsc_get_strings,
Andrew Schwartzmeyer59995372015-02-26 16:27:14 -08001045 .get_channels = netvsc_get_channels,
Andrew Schwartzmeyerb5960e62015-08-11 17:14:32 -07001046 .set_channels = netvsc_set_channels,
sixiao@microsoft.com76d13b52016-02-17 16:43:59 -08001047 .get_ts_info = ethtool_op_get_ts_info,
sixiao@microsoft.com49eb9382016-02-25 15:24:08 -08001048 .get_settings = netvsc_get_settings,
1049 .set_settings = netvsc_set_settings,
Stephen Hemmingerf82f4ad2010-05-04 09:58:57 -07001050};
1051
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001052static const struct net_device_ops device_ops = {
1053 .ndo_open = netvsc_open,
1054 .ndo_stop = netvsc_close,
1055 .ndo_start_xmit = netvsc_start_xmit,
Jiri Pirkoafc4b132011-08-16 06:29:01 +00001056 .ndo_set_rx_mode = netvsc_set_multicast_list,
Haiyang Zhang4d447c92011-12-15 13:45:17 -08001057 .ndo_change_mtu = netvsc_change_mtu,
Haiyang Zhangb681b582010-08-03 19:15:31 +00001058 .ndo_validate_addr = eth_validate_addr,
Haiyang Zhang1ce09e82012-07-10 07:19:22 +00001059 .ndo_set_mac_address = netvsc_set_mac_addr,
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001060 .ndo_select_queue = netvsc_select_queue,
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001061 .ndo_get_stats64 = netvsc_get_stats64,
Richard Weinberger316158f2014-07-09 16:23:59 +02001062#ifdef CONFIG_NET_POLL_CONTROLLER
1063 .ndo_poll_controller = netvsc_poll_controller,
1064#endif
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001065};
1066
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001067/*
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001068 * Handle link status changes. For RNDIS_STATUS_NETWORK_CHANGE emulate link
1069 * down/up sequence. In case of RNDIS_STATUS_MEDIA_CONNECT when carrier is
1070 * present send GARP packet to network peers with netif_notify_peers().
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001071 */
Haiyang Zhang891de742014-02-12 16:54:27 -08001072static void netvsc_link_change(struct work_struct *w)
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001073{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001074 struct net_device_context *ndev_ctx =
1075 container_of(w, struct net_device_context, dwork.work);
1076 struct hv_device *device_obj = ndev_ctx->device_ctx;
1077 struct net_device *net = hv_get_drvdata(device_obj);
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07001078 struct netvsc_device *net_device;
Haiyang Zhang891de742014-02-12 16:54:27 -08001079 struct rndis_device *rdev;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001080 struct netvsc_reconfig *event = NULL;
1081 bool notify = false, reschedule = false;
1082 unsigned long flags, next_reconfig, delay;
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001083
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001084 rtnl_lock();
1085 if (ndev_ctx->start_remove)
1086 goto out_unlock;
1087
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02001088 net_device = ndev_ctx->nvdev;
Haiyang Zhang891de742014-02-12 16:54:27 -08001089 rdev = net_device->extension;
Haiyang Zhang891de742014-02-12 16:54:27 -08001090
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001091 next_reconfig = ndev_ctx->last_reconfig + LINKCHANGE_INT;
1092 if (time_is_after_jiffies(next_reconfig)) {
1093 /* link_watch only sends one notification with current state
1094 * per second, avoid doing reconfig more frequently. Handle
1095 * wrap around.
1096 */
1097 delay = next_reconfig - jiffies;
1098 delay = delay < LINKCHANGE_INT ? delay : LINKCHANGE_INT;
1099 schedule_delayed_work(&ndev_ctx->dwork, delay);
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001100 goto out_unlock;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001101 }
1102 ndev_ctx->last_reconfig = jiffies;
1103
1104 spin_lock_irqsave(&ndev_ctx->lock, flags);
1105 if (!list_empty(&ndev_ctx->reconfig_events)) {
1106 event = list_first_entry(&ndev_ctx->reconfig_events,
1107 struct netvsc_reconfig, list);
1108 list_del(&event->list);
1109 reschedule = !list_empty(&ndev_ctx->reconfig_events);
1110 }
1111 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1112
1113 if (!event)
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001114 goto out_unlock;
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001115
1116 switch (event->event) {
1117 /* Only the following events are possible due to the check in
1118 * netvsc_linkstatus_callback()
1119 */
1120 case RNDIS_STATUS_MEDIA_CONNECT:
1121 if (rdev->link_state) {
1122 rdev->link_state = false;
1123 netif_carrier_on(net);
1124 netif_tx_wake_all_queues(net);
1125 } else {
1126 notify = true;
Haiyang Zhang3a494e72014-06-19 18:34:36 -07001127 }
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001128 kfree(event);
1129 break;
1130 case RNDIS_STATUS_MEDIA_DISCONNECT:
1131 if (!rdev->link_state) {
1132 rdev->link_state = true;
1133 netif_carrier_off(net);
1134 netif_tx_stop_all_queues(net);
1135 }
1136 kfree(event);
1137 break;
1138 case RNDIS_STATUS_NETWORK_CHANGE:
1139 /* Only makes sense if carrier is present */
1140 if (!rdev->link_state) {
1141 rdev->link_state = true;
1142 netif_carrier_off(net);
1143 netif_tx_stop_all_queues(net);
1144 event->event = RNDIS_STATUS_MEDIA_CONNECT;
1145 spin_lock_irqsave(&ndev_ctx->lock, flags);
Haiyang Zhang15cfd402016-04-21 16:13:01 -07001146 list_add(&event->list, &ndev_ctx->reconfig_events);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001147 spin_unlock_irqrestore(&ndev_ctx->lock, flags);
1148 reschedule = true;
1149 }
1150 break;
Haiyang Zhang891de742014-02-12 16:54:27 -08001151 }
1152
1153 rtnl_unlock();
1154
1155 if (notify)
1156 netdev_notify_peers(net);
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001157
1158 /* link_watch only sends one notification with current state per
1159 * second, handle next reconfig event in 2 seconds.
1160 */
1161 if (reschedule)
1162 schedule_delayed_work(&ndev_ctx->dwork, LINKCHANGE_INT);
Vitaly Kuznetsov1bdcec82016-05-13 13:55:21 +02001163
1164 return;
1165
1166out_unlock:
1167 rtnl_unlock();
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001168}
1169
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001170static void netvsc_free_netdev(struct net_device *netdev)
1171{
1172 struct net_device_context *net_device_ctx = netdev_priv(netdev);
1173
1174 free_percpu(net_device_ctx->tx_stats);
1175 free_percpu(net_device_ctx->rx_stats);
1176 free_netdev(netdev);
1177}
Haiyang Zhangc996edc2011-04-06 15:18:00 -07001178
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001179static struct net_device *get_netvsc_bymac(const u8 *mac)
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001180{
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001181 struct net_device *dev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001182
Stephen Hemminger8737caa2016-08-23 12:17:44 -07001183 ASSERT_RTNL();
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001184
1185 for_each_netdev(&init_net, dev) {
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001186 if (dev->netdev_ops != &device_ops)
1187 continue; /* not a netvsc device */
1188
1189 if (ether_addr_equal(mac, dev->perm_addr))
1190 return dev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001191 }
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001192
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001193 return NULL;
1194}
1195
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001196static struct net_device *get_netvsc_byref(struct net_device *vf_netdev)
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001197{
1198 struct net_device *dev;
1199
1200 ASSERT_RTNL();
1201
1202 for_each_netdev(&init_net, dev) {
1203 struct net_device_context *net_device_ctx;
1204
1205 if (dev->netdev_ops != &device_ops)
1206 continue; /* not a netvsc device */
1207
1208 net_device_ctx = netdev_priv(dev);
1209 if (net_device_ctx->nvdev == NULL)
1210 continue; /* device is removed */
1211
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001212 if (rtnl_dereference(net_device_ctx->vf_netdev) == vf_netdev)
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001213 return dev; /* a match */
1214 }
1215
1216 return NULL;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001217}
1218
1219static int netvsc_register_vf(struct net_device *vf_netdev)
1220{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001221 struct net_device *ndev;
1222 struct net_device_context *net_device_ctx;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001223 struct netvsc_device *netvsc_dev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001224
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001225 if (vf_netdev->addr_len != ETH_ALEN)
1226 return NOTIFY_DONE;
1227
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001228 /*
1229 * We will use the MAC address to locate the synthetic interface to
1230 * associate with the VF interface. If we don't find a matching
1231 * synthetic interface, move on.
1232 */
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001233 ndev = get_netvsc_bymac(vf_netdev->perm_addr);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001234 if (!ndev)
1235 return NOTIFY_DONE;
1236
1237 net_device_ctx = netdev_priv(ndev);
1238 netvsc_dev = net_device_ctx->nvdev;
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001239 if (!netvsc_dev || rtnl_dereference(net_device_ctx->vf_netdev))
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001240 return NOTIFY_DONE;
1241
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001242 netdev_info(ndev, "VF registering: %s\n", vf_netdev->name);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001243 /*
1244 * Take a reference on the module.
1245 */
1246 try_module_get(THIS_MODULE);
Stephen Hemminger07d0f002016-09-22 16:56:30 -07001247
1248 dev_hold(vf_netdev);
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001249 rcu_assign_pointer(net_device_ctx->vf_netdev, vf_netdev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001250 return NOTIFY_OK;
1251}
1252
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001253static int netvsc_vf_up(struct net_device *vf_netdev)
1254{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001255 struct net_device *ndev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001256 struct netvsc_device *netvsc_dev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001257 struct net_device_context *net_device_ctx;
1258
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001259 ndev = get_netvsc_byref(vf_netdev);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001260 if (!ndev)
1261 return NOTIFY_DONE;
1262
1263 net_device_ctx = netdev_priv(ndev);
1264 netvsc_dev = net_device_ctx->nvdev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001265
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001266 netdev_info(ndev, "VF up: %s\n", vf_netdev->name);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001267
1268 /*
1269 * Open the device before switching data path.
1270 */
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +02001271 rndis_filter_open(netvsc_dev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001272
1273 /*
1274 * notify the host to switch the data path.
1275 */
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001276 netvsc_switch_datapath(ndev, true);
1277 netdev_info(ndev, "Data path switched to VF: %s\n", vf_netdev->name);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001278
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001279 netif_carrier_off(ndev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001280
Vitaly Kuznetsovd0722182016-08-15 17:48:40 +02001281 /* Now notify peers through VF device. */
1282 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, vf_netdev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001283
1284 return NOTIFY_OK;
1285}
1286
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001287static int netvsc_vf_down(struct net_device *vf_netdev)
1288{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001289 struct net_device *ndev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001290 struct netvsc_device *netvsc_dev;
1291 struct net_device_context *net_device_ctx;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001292
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001293 ndev = get_netvsc_byref(vf_netdev);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001294 if (!ndev)
1295 return NOTIFY_DONE;
1296
1297 net_device_ctx = netdev_priv(ndev);
1298 netvsc_dev = net_device_ctx->nvdev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001299
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001300 netdev_info(ndev, "VF down: %s\n", vf_netdev->name);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001301 netvsc_switch_datapath(ndev, false);
1302 netdev_info(ndev, "Data path switched from VF: %s\n", vf_netdev->name);
Vitaly Kuznetsov2f5fa6c2016-06-03 17:51:00 +02001303 rndis_filter_close(netvsc_dev);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001304 netif_carrier_on(ndev);
Vitaly Kuznetsovd0722182016-08-15 17:48:40 +02001305
1306 /* Now notify peers through netvsc device. */
1307 call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, ndev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001308
1309 return NOTIFY_OK;
1310}
1311
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001312static int netvsc_unregister_vf(struct net_device *vf_netdev)
1313{
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001314 struct net_device *ndev;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001315 struct netvsc_device *netvsc_dev;
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001316 struct net_device_context *net_device_ctx;
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001317
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001318 ndev = get_netvsc_byref(vf_netdev);
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001319 if (!ndev)
1320 return NOTIFY_DONE;
1321
1322 net_device_ctx = netdev_priv(ndev);
1323 netvsc_dev = net_device_ctx->nvdev;
Stephen Hemmingere8ff40d2016-09-22 16:56:32 -07001324
Vitaly Kuznetsov0a1275c2016-05-13 13:55:23 +02001325 netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
Stephen Hemmingerf207c102016-09-22 16:56:33 -07001326
1327 RCU_INIT_POINTER(net_device_ctx->vf_netdev, NULL);
Stephen Hemminger07d0f002016-09-22 16:56:30 -07001328 dev_put(vf_netdev);
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001329 module_put(THIS_MODULE);
1330 return NOTIFY_OK;
1331}
1332
K. Y. Srinivasan84946892011-09-13 10:59:38 -07001333static int netvsc_probe(struct hv_device *dev,
1334 const struct hv_vmbus_device_id *dev_id)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001335{
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001336 struct net_device *net = NULL;
1337 struct net_device_context *net_device_ctx;
1338 struct netvsc_device_info device_info;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001339 struct netvsc_device *nvdev;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001340 int ret;
1341
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001342 net = alloc_etherdev_mq(sizeof(struct net_device_context),
1343 num_online_cpus());
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001344 if (!net)
K. Y. Srinivasan51a805d2011-08-25 09:49:11 -07001345 return -ENOMEM;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001346
Haiyang Zhang1b07da52014-03-04 14:11:06 -08001347 netif_carrier_off(net);
1348
Haiyang Zhangb37879e2016-08-04 10:42:14 -07001349 netvsc_init_settings(net);
1350
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001351 net_device_ctx = netdev_priv(net);
K. Y. Srinivasan9efd21e2011-04-29 13:45:10 -07001352 net_device_ctx->device_ctx = dev;
Simon Xiao3f300ff2015-04-28 01:05:17 -07001353 net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
1354 if (netif_msg_probe(net_device_ctx))
1355 netdev_dbg(net, "netvsc msg_enable: %d\n",
1356 net_device_ctx->msg_enable);
1357
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001358 net_device_ctx->tx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
1359 if (!net_device_ctx->tx_stats) {
1360 free_netdev(net);
1361 return -ENOMEM;
1362 }
1363 net_device_ctx->rx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
1364 if (!net_device_ctx->rx_stats) {
1365 free_percpu(net_device_ctx->tx_stats);
1366 free_netdev(net);
1367 return -ENOMEM;
1368 }
1369
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07001370 hv_set_drvdata(dev, net);
Vitaly Kuznetsovf580aec2016-05-13 13:55:20 +02001371
1372 net_device_ctx->start_remove = false;
1373
Haiyang Zhang891de742014-02-12 16:54:27 -08001374 INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
Wenqi Ma792df872012-04-19 00:39:37 +00001375 INIT_WORK(&net_device_ctx->work, do_set_multicast);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001376
Vitaly Kuznetsov27a70af2015-11-27 11:39:55 +01001377 spin_lock_init(&net_device_ctx->lock);
1378 INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
1379
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001380 net->netdev_ops = &device_ops;
1381
sixiao@microsoft.coma0606792016-02-04 15:49:34 -08001382 net->hw_features = NETVSC_HW_FEATURES;
1383 net->features = NETVSC_HW_FEATURES | NETIF_F_HW_VLAN_CTAG_TX;
Stephen Hemminger60487182010-05-04 09:58:55 -07001384
Wilfried Klaebe7ad24ea2014-05-11 00:12:32 +00001385 net->ethtool_ops = &ethtool_ops;
K. Y. Srinivasan9efd21e2011-04-29 13:45:10 -07001386 SET_NETDEV_DEV(net, &dev->device);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001387
Vitaly Kuznetsov14a03cf2016-02-05 17:29:08 +01001388 /* We always need headroom for rndis header */
1389 net->needed_headroom = RNDIS_AND_PPI_SIZE;
1390
Haiyang Zhang692e0842011-09-01 12:19:43 -07001391 /* Notify the netvsc driver of the new device */
Andrew Schwartzmeyer8ebdcc52015-08-11 17:14:31 -07001392 memset(&device_info, 0, sizeof(device_info));
Haiyang Zhang692e0842011-09-01 12:19:43 -07001393 device_info.ring_size = ring_size;
KY Srinivasane01ec212015-05-27 13:16:57 -07001394 device_info.max_num_vrss_chns = max_num_vrss_chns;
Haiyang Zhang692e0842011-09-01 12:19:43 -07001395 ret = rndis_filter_device_add(dev, &device_info);
1396 if (ret != 0) {
1397 netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001398 netvsc_free_netdev(net);
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07001399 hv_set_drvdata(dev, NULL);
Haiyang Zhang692e0842011-09-01 12:19:43 -07001400 return ret;
1401 }
1402 memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
1403
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02001404 nvdev = net_device_ctx->nvdev;
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001405 netif_set_real_num_tx_queues(net, nvdev->num_chn);
1406 netif_set_real_num_rx_queues(net, nvdev->num_chn);
stephen hemmingera50af862016-12-06 13:43:54 -08001407 netif_set_gso_max_size(net, NETVSC_GSO_MAX_SIZE);
Haiyang Zhang5b54dac2014-04-21 10:20:28 -07001408
Haiyang Zhanga68f9612013-12-20 16:52:31 -08001409 ret = register_netdev(net);
1410 if (ret != 0) {
1411 pr_err("Unable to register netdev.\n");
1412 rndis_filter_device_remove(dev);
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001413 netvsc_free_netdev(net);
Haiyang Zhanga68f9612013-12-20 16:52:31 -08001414 }
1415
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001416 return ret;
1417}
1418
K. Y. Srinivasan415b0232011-04-29 13:45:12 -07001419static int netvsc_remove(struct hv_device *dev)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001420{
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07001421 struct net_device *net;
Haiyang Zhang122a5f62011-05-27 06:21:55 -07001422 struct net_device_context *ndev_ctx;
K. Y. Srinivasan2ddd5e5f2011-09-13 10:59:49 -07001423 struct netvsc_device *net_device;
1424
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02001425 net = hv_get_drvdata(dev);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001426
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001427 if (net == NULL) {
K. Y. Srinivasan415b0232011-04-29 13:45:12 -07001428 dev_err(&dev->device, "No net device to remove\n");
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001429 return 0;
1430 }
1431
Haiyang Zhang122a5f62011-05-27 06:21:55 -07001432 ndev_ctx = netdev_priv(net);
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02001433 net_device = ndev_ctx->nvdev;
1434
Vitaly Kuznetsov6da72252016-05-13 13:55:24 +02001435 /* Avoid racing with netvsc_change_mtu()/netvsc_set_channels()
1436 * removing the device.
1437 */
1438 rtnl_lock();
Vitaly Kuznetsovf580aec2016-05-13 13:55:20 +02001439 ndev_ctx->start_remove = true;
Vitaly Kuznetsov6da72252016-05-13 13:55:24 +02001440 rtnl_unlock();
Vitaly Kuznetsovf580aec2016-05-13 13:55:20 +02001441
Haiyang Zhang122a5f62011-05-27 06:21:55 -07001442 cancel_delayed_work_sync(&ndev_ctx->dwork);
Wenqi Ma792df872012-04-19 00:39:37 +00001443 cancel_work_sync(&ndev_ctx->work);
Haiyang Zhang122a5f62011-05-27 06:21:55 -07001444
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001445 /* Stop outbound asap */
Haiyang Zhang0a282532012-02-02 07:17:59 +00001446 netif_tx_disable(net);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001447
1448 unregister_netdev(net);
1449
1450 /*
1451 * Call to the vsc driver to let it know that the device is being
1452 * removed
1453 */
Haiyang Zhangdf06bcf2011-05-23 09:03:47 -07001454 rndis_filter_device_remove(dev);
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001455
Vitaly Kuznetsov3d541ac2016-05-13 13:55:22 +02001456 hv_set_drvdata(dev, NULL);
1457
sixiao@microsoft.com7eafd9b2015-05-14 01:00:25 -07001458 netvsc_free_netdev(net);
Haiyang Zhangdf06bcf2011-05-23 09:03:47 -07001459 return 0;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001460}
1461
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07001462static const struct hv_vmbus_device_id id_table[] = {
Greg Kroah-Hartmanc45cf2d2011-08-25 11:41:33 -07001463 /* Network guid */
K. Y. Srinivasan8f505942013-01-23 17:42:42 -08001464 { HV_NIC_GUID, },
Greg Kroah-Hartmanc45cf2d2011-08-25 11:41:33 -07001465 { },
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07001466};
1467
1468MODULE_DEVICE_TABLE(vmbus, id_table);
1469
K. Y. Srinivasanf1542a62011-05-10 07:55:16 -07001470/* The one and only one */
K. Y. Srinivasanfde0ef92011-05-12 19:35:08 -07001471static struct hv_driver netvsc_drv = {
Haiyang Zhangd31b20f2012-03-07 10:02:00 +00001472 .name = KBUILD_MODNAME,
K. Y. Srinivasan345c4cc2011-08-25 09:48:34 -07001473 .id_table = id_table,
K. Y. Srinivasanfde0ef92011-05-12 19:35:08 -07001474 .probe = netvsc_probe,
1475 .remove = netvsc_remove,
K. Y. Srinivasand4890972011-05-10 07:55:17 -07001476};
K. Y. Srinivasanf1542a62011-05-10 07:55:16 -07001477
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001478/*
1479 * On Hyper-V, every VF interface is matched with a corresponding
1480 * synthetic interface. The synthetic interface is presented first
1481 * to the guest. When the corresponding VF instance is registered,
1482 * we will take care of switching the data path.
1483 */
1484static int netvsc_netdev_event(struct notifier_block *this,
1485 unsigned long event, void *ptr)
1486{
1487 struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
1488
Stephen Hemmingeree837a12016-09-22 16:56:31 -07001489 /* Skip our own events */
1490 if (event_dev->netdev_ops == &device_ops)
1491 return NOTIFY_DONE;
1492
1493 /* Avoid non-Ethernet type devices */
1494 if (event_dev->type != ARPHRD_ETHER)
1495 return NOTIFY_DONE;
1496
Vitaly Kuznetsov0dbff142016-08-15 17:48:43 +02001497 /* Avoid Vlan dev with same MAC registering as VF */
1498 if (event_dev->priv_flags & IFF_802_1Q_VLAN)
1499 return NOTIFY_DONE;
1500
1501 /* Avoid Bonding master dev with same MAC registering as VF */
Stephen Hemmingeree837a12016-09-22 16:56:31 -07001502 if ((event_dev->priv_flags & IFF_BONDING) &&
1503 (event_dev->flags & IFF_MASTER))
Haiyang Zhangcb2911f2016-06-02 12:02:04 -07001504 return NOTIFY_DONE;
1505
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001506 switch (event) {
1507 case NETDEV_REGISTER:
1508 return netvsc_register_vf(event_dev);
1509 case NETDEV_UNREGISTER:
1510 return netvsc_unregister_vf(event_dev);
1511 case NETDEV_UP:
1512 return netvsc_vf_up(event_dev);
1513 case NETDEV_DOWN:
1514 return netvsc_vf_down(event_dev);
1515 default:
1516 return NOTIFY_DONE;
1517 }
1518}
1519
1520static struct notifier_block netvsc_netdev_notifier = {
1521 .notifier_call = netvsc_netdev_event,
1522};
1523
K. Y. Srinivasana9869c92011-05-12 19:35:17 -07001524static void __exit netvsc_drv_exit(void)
Hank Janssenfceaf242009-07-13 15:34:54 -07001525{
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001526 unregister_netdevice_notifier(&netvsc_netdev_notifier);
Greg Kroah-Hartman768fa212011-08-25 15:07:32 -07001527 vmbus_driver_unregister(&netvsc_drv);
Hank Janssenfceaf242009-07-13 15:34:54 -07001528}
1529
K. Y. Srinivasan1fde28c2011-05-12 19:35:16 -07001530static int __init netvsc_drv_init(void)
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001531{
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001532 int ret;
1533
Haiyang Zhangfa85a6c2012-07-25 08:08:41 +00001534 if (ring_size < RING_SIZE_MIN) {
1535 ring_size = RING_SIZE_MIN;
1536 pr_info("Increased ring_size to %d (min allowed)\n",
1537 ring_size);
1538 }
KY Srinivasan84bf9ce2016-04-14 16:31:54 -07001539 ret = vmbus_driver_register(&netvsc_drv);
1540
1541 if (ret)
1542 return ret;
1543
1544 register_netdevice_notifier(&netvsc_netdev_notifier);
1545 return 0;
Greg Kroah-Hartmandf2fff22009-08-31 21:11:12 -07001546}
1547
Hank Janssen26c14cc2010-02-11 23:02:42 +00001548MODULE_LICENSE("GPL");
Stephen Hemminger7880fc52010-05-04 09:58:52 -07001549MODULE_DESCRIPTION("Microsoft Hyper-V network driver");
Hank Janssenfceaf242009-07-13 15:34:54 -07001550
K. Y. Srinivasan1fde28c2011-05-12 19:35:16 -07001551module_init(netvsc_drv_init);
K. Y. Srinivasana9869c92011-05-12 19:35:17 -07001552module_exit(netvsc_drv_exit);