blob: 9a25a5d349aef98b094d1b164d04a73b459ef1fd [file] [log] [blame]
Murali Karicheri0e7623b2019-04-05 13:31:34 -04001/* SPDX-License-Identifier: GPL-2.0 */
Arvid Brodin70ebe4a2014-07-04 23:34:38 +02002/* Copyright 2011-2014 Autronica Fire and Security AS
Arvid Brodinf4214362013-10-30 21:10:47 +01003 *
Arvid Brodinf4214362013-10-30 21:10:47 +01004 * Author(s):
Arvid Brodin70ebe4a2014-07-04 23:34:38 +02005 * 2011-2014 Arvid Brodin, arvid.brodin@alten.se
Murali Karicheri8f4c0e02020-07-22 10:40:16 -04006 *
7 * include file for HSR and PRP.
Arvid Brodinf4214362013-10-30 21:10:47 +01008 */
9
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020010#ifndef __HSR_PRIVATE_H
11#define __HSR_PRIVATE_H
Arvid Brodinf4214362013-10-30 21:10:47 +010012
13#include <linux/netdevice.h>
14#include <linux/list.h>
Murali Karicheri451d8122020-07-22 10:40:21 -040015#include <linux/if_vlan.h>
Arvid Brodinf4214362013-10-30 21:10:47 +010016
Arvid Brodinf4214362013-10-30 21:10:47 +010017/* Time constants as specified in the HSR specification (IEC-62439-3 2010)
18 * Table 8.
19 * All values in milliseconds.
20 */
21#define HSR_LIFE_CHECK_INTERVAL 2000 /* ms */
22#define HSR_NODE_FORGET_TIME 60000 /* ms */
23#define HSR_ANNOUNCE_INTERVAL 100 /* ms */
Marco Wenzeldaea7722021-02-24 10:46:49 +010024#define HSR_ENTRY_FORGET_TIME 400 /* ms */
Arvid Brodinf4214362013-10-30 21:10:47 +010025
Arvid Brodinf4214362013-10-30 21:10:47 +010026/* By how much may slave1 and slave2 timestamps of latest received frame from
27 * each node differ before we notify of communication problem?
28 */
29#define MAX_SLAVE_DIFF 3000 /* ms */
Arvid Brodinf266a682014-07-04 23:41:03 +020030#define HSR_SEQNR_START (USHRT_MAX - 1024)
Peter Heiseee1c2792016-04-13 13:52:22 +020031#define HSR_SUP_SEQNR_START (HSR_SEQNR_START / 2)
Arvid Brodinf4214362013-10-30 21:10:47 +010032
Arvid Brodinf4214362013-10-30 21:10:47 +010033/* How often shall we check for broken ring and remove node entries older than
34 * HSR_NODE_FORGET_TIME?
35 */
36#define PRUNE_PERIOD 3000 /* ms */
37
Arvid Brodinf4214362013-10-30 21:10:47 +010038#define HSR_TLV_ANNOUNCE 22
39#define HSR_TLV_LIFE_CHECK 23
Murali Karicheric643ff02020-07-22 10:40:19 -040040/* PRP V1 life check for Duplicate discard */
41#define PRP_TLV_LIFE_CHECK_DD 20
42/* PRP V1 life check for Duplicate Accept */
43#define PRP_TLV_LIFE_CHECK_DA 21
Arvid Brodinf4214362013-10-30 21:10:47 +010044
Arvid Brodinf4214362013-10-30 21:10:47 +010045/* HSR Tag.
46 * As defined in IEC-62439-3:2010, the HSR tag is really { ethertype = 0x88FB,
47 * path, LSDU_size, sequence Nr }. But we let eth_header() create { h_dest,
48 * h_source, h_proto = 0x88FB }, and add { path, LSDU_size, sequence Nr,
49 * encapsulated protocol } instead.
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020050 *
51 * Field names as defined in the IEC:2010 standard for HSR.
Arvid Brodinf4214362013-10-30 21:10:47 +010052 */
Arvid Brodinf4214362013-10-30 21:10:47 +010053struct hsr_tag {
54 __be16 path_and_LSDU_size;
55 __be16 sequence_nr;
56 __be16 encap_proto;
57} __packed;
58
Arvid Brodin70ebe4a2014-07-04 23:34:38 +020059#define HSR_HLEN 6
Arvid Brodinf4214362013-10-30 21:10:47 +010060
Peter Heiseee1c2792016-04-13 13:52:22 +020061#define HSR_V1_SUP_LSDUSIZE 52
62
Arvid Brodinf4214362013-10-30 21:10:47 +010063/* The helper functions below assumes that 'path' occupies the 4 most
64 * significant bits of the 16-bit field shared by 'path' and 'LSDU_size' (or
65 * equivalently, the 4 most significant bits of HSR tag byte 14).
66 *
67 * This is unclear in the IEC specification; its definition of MAC addresses
68 * indicates the spec is written with the least significant bit first (to the
69 * left). This, however, would mean that the LSDU field would be split in two
70 * with the path field in-between, which seems strange. I'm guessing the MAC
71 * address definition is in error.
72 */
Arvid Brodinf4214362013-10-30 21:10:47 +010073
74static inline void set_hsr_tag_path(struct hsr_tag *ht, u16 path)
75{
Murali Karicherid595b852019-04-05 13:31:23 -040076 ht->path_and_LSDU_size =
77 htons((ntohs(ht->path_and_LSDU_size) & 0x0FFF) | (path << 12));
Arvid Brodinf4214362013-10-30 21:10:47 +010078}
79
80static inline void set_hsr_tag_LSDU_size(struct hsr_tag *ht, u16 LSDU_size)
81{
Murali Karicheri0525fc02019-04-05 13:31:27 -040082 ht->path_and_LSDU_size = htons((ntohs(ht->path_and_LSDU_size) &
83 0xF000) | (LSDU_size & 0x0FFF));
Arvid Brodinf4214362013-10-30 21:10:47 +010084}
85
86struct hsr_ethhdr {
87 struct ethhdr ethhdr;
88 struct hsr_tag hsr_tag;
89} __packed;
90
Murali Karicheri451d8122020-07-22 10:40:21 -040091struct hsr_vlan_ethhdr {
92 struct vlan_ethhdr vlanhdr;
93 struct hsr_tag hsr_tag;
94} __packed;
95
96/* HSR/PRP Supervision Frame data types.
Arvid Brodinf4214362013-10-30 21:10:47 +010097 * Field names as defined in the IEC:2010 standard for HSR.
98 */
99struct hsr_sup_tag {
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400100 __be16 path_and_HSR_ver;
Arvid Brodinf4214362013-10-30 21:10:47 +0100101 __be16 sequence_nr;
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400102 __u8 HSR_TLV_type;
103 __u8 HSR_TLV_length;
Arvid Brodinf4214362013-10-30 21:10:47 +0100104} __packed;
105
106struct hsr_sup_payload {
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400107 unsigned char macaddress_A[ETH_ALEN];
Arvid Brodinf4214362013-10-30 21:10:47 +0100108} __packed;
109
Arvid Brodinf4214362013-10-30 21:10:47 +0100110static inline void set_hsr_stag_path(struct hsr_sup_tag *hst, u16 path)
111{
Murali Karicheri5fa96772019-04-05 13:31:29 -0400112 set_hsr_tag_path((struct hsr_tag *)hst, path);
Arvid Brodinf4214362013-10-30 21:10:47 +0100113}
114
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400115static inline void set_hsr_stag_HSR_ver(struct hsr_sup_tag *hst, u16 HSR_ver)
Arvid Brodinf4214362013-10-30 21:10:47 +0100116{
Murali Karicherib1b4aa92019-04-05 13:31:32 -0400117 set_hsr_tag_LSDU_size((struct hsr_tag *)hst, HSR_ver);
Arvid Brodinf4214362013-10-30 21:10:47 +0100118}
119
Peter Heiseee1c2792016-04-13 13:52:22 +0200120struct hsrv0_ethhdr_sp {
Arvid Brodinf4214362013-10-30 21:10:47 +0100121 struct ethhdr ethhdr;
122 struct hsr_sup_tag hsr_sup;
123} __packed;
124
Peter Heiseee1c2792016-04-13 13:52:22 +0200125struct hsrv1_ethhdr_sp {
126 struct ethhdr ethhdr;
127 struct hsr_tag hsr;
128 struct hsr_sup_tag hsr_sup;
129} __packed;
130
Arvid Brodinc5a75912014-07-04 23:38:05 +0200131enum hsr_port_type {
132 HSR_PT_NONE = 0, /* Must be 0, used by framereg */
133 HSR_PT_SLAVE_A,
134 HSR_PT_SLAVE_B,
135 HSR_PT_INTERLINK,
136 HSR_PT_MASTER,
137 HSR_PT_PORTS, /* This must be the last item in the enum */
Arvid Brodinf4214362013-10-30 21:10:47 +0100138};
Arvid Brodinc5a75912014-07-04 23:38:05 +0200139
Murali Karicheric643ff02020-07-22 10:40:19 -0400140/* PRP Redunancy Control Trailor (RCT).
141 * As defined in IEC-62439-4:2012, the PRP RCT is really { sequence Nr,
142 * Lan indentifier (LanId), LSDU_size and PRP_suffix = 0x88FB }.
143 *
144 * Field names as defined in the IEC:2012 standard for PRP.
145 */
146struct prp_rct {
147 __be16 sequence_nr;
148 __be16 lan_id_and_LSDU_size;
149 __be16 PRP_suffix;
150} __packed;
151
Murali Karicheri451d8122020-07-22 10:40:21 -0400152static inline u16 get_prp_LSDU_size(struct prp_rct *rct)
153{
154 return ntohs(rct->lan_id_and_LSDU_size) & 0x0FFF;
155}
156
157static inline void set_prp_lan_id(struct prp_rct *rct, u16 lan_id)
158{
159 rct->lan_id_and_LSDU_size = htons((ntohs(rct->lan_id_and_LSDU_size) &
160 0x0FFF) | (lan_id << 12));
161}
Murali Karicheric643ff02020-07-22 10:40:19 -0400162static inline void set_prp_LSDU_size(struct prp_rct *rct, u16 LSDU_size)
163{
164 rct->lan_id_and_LSDU_size = htons((ntohs(rct->lan_id_and_LSDU_size) &
165 0xF000) | (LSDU_size & 0x0FFF));
166}
167
Arvid Brodinc5a75912014-07-04 23:38:05 +0200168struct hsr_port {
169 struct list_head port_list;
170 struct net_device *dev;
171 struct hsr_priv *hsr;
172 enum hsr_port_type type;
173};
Arvid Brodinf4214362013-10-30 21:10:47 +0100174
Murali Karicheri8f4c0e02020-07-22 10:40:16 -0400175/* used by driver internally to differentiate various protocols */
176enum hsr_version {
177 HSR_V0 = 0,
178 HSR_V1,
179 PRP_V1,
180};
181
Murali Karicherifa4dc892020-07-22 10:40:20 -0400182struct hsr_frame_info;
Murali Karicheri451d8122020-07-22 10:40:21 -0400183struct hsr_node;
Murali Karicherifa4dc892020-07-22 10:40:20 -0400184
Murali Karicheri28e458e2020-07-22 10:40:18 -0400185struct hsr_proto_ops {
186 /* format and send supervision frame */
187 void (*send_sv_frame)(struct hsr_port *port, unsigned long *interval);
Murali Karicheri451d8122020-07-22 10:40:21 -0400188 void (*handle_san_frame)(bool san, enum hsr_port_type port,
189 struct hsr_node *node);
190 bool (*drop_frame)(struct hsr_frame_info *frame, struct hsr_port *port);
Murali Karicherifa4dc892020-07-22 10:40:20 -0400191 struct sk_buff * (*get_untagged_frame)(struct hsr_frame_info *frame,
192 struct hsr_port *port);
193 struct sk_buff * (*create_tagged_frame)(struct hsr_frame_info *frame,
194 struct hsr_port *port);
George McCollisterf6442ee2021-05-24 13:50:54 -0500195 int (*fill_frame_info)(__be16 proto, struct sk_buff *skb,
196 struct hsr_frame_info *frame);
Murali Karicheri451d8122020-07-22 10:40:21 -0400197 bool (*invalid_dan_ingress_frame)(__be16 protocol);
198 void (*update_san_info)(struct hsr_node *node, bool is_sup);
Murali Karicheri28e458e2020-07-22 10:40:18 -0400199};
200
Arvid Brodinf4214362013-10-30 21:10:47 +0100201struct hsr_priv {
Arvid Brodinf4214362013-10-30 21:10:47 +0100202 struct rcu_head rcu_head;
Arvid Brodinc5a75912014-07-04 23:38:05 +0200203 struct list_head ports;
Arvid Brodinf266a682014-07-04 23:41:03 +0200204 struct list_head node_db; /* Known HSR nodes */
Arvid Brodinf4214362013-10-30 21:10:47 +0100205 struct list_head self_node_db; /* MACs of slaves */
206 struct timer_list announce_timer; /* Supervision frame dispatch */
Arvid Brodinabff7162014-07-04 23:35:47 +0200207 struct timer_list prune_timer;
Arvid Brodinf4214362013-10-30 21:10:47 +0100208 int announce_count;
209 u16 sequence_nr;
Murali Karicherid595b852019-04-05 13:31:23 -0400210 u16 sup_sequence_nr; /* For HSRv1 separate seq_nr for supervision */
Murali Karicheri8f4c0e02020-07-22 10:40:16 -0400211 enum hsr_version prot_version; /* Indicate if HSRv0, HSRv1 or PRPv1 */
Taehee Yoo92a35672019-12-22 11:26:54 +0000212 spinlock_t seqnr_lock; /* locking for sequence_nr */
213 spinlock_t list_lock; /* locking for node list */
Murali Karicheri28e458e2020-07-22 10:40:18 -0400214 struct hsr_proto_ops *proto_ops;
Murali Karicheri451d8122020-07-22 10:40:21 -0400215#define PRP_LAN_ID 0x5 /* 0x1010 for A and 0x1011 for B. Bit 0 is set
216 * based on SLAVE_A or SLAVE_B
217 */
218 u8 net_id; /* for PRP, it occupies most significant 3 bits
219 * of lan_id
220 */
Arvid Brodinf4214362013-10-30 21:10:47 +0100221 unsigned char sup_multicast_addr[ETH_ALEN];
Murali Karicherifc4ecae2019-04-05 13:31:35 -0400222#ifdef CONFIG_DEBUG_FS
223 struct dentry *node_tbl_root;
Murali Karicherifc4ecae2019-04-05 13:31:35 -0400224#endif
Arvid Brodinf4214362013-10-30 21:10:47 +0100225};
226
Arvid Brodinf266a682014-07-04 23:41:03 +0200227#define hsr_for_each_port(hsr, port) \
228 list_for_each_entry_rcu((port), &(hsr)->ports, port_list)
229
Arvid Brodinc5a75912014-07-04 23:38:05 +0200230struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, enum hsr_port_type pt);
Arvid Brodinf4214362013-10-30 21:10:47 +0100231
Arvid Brodinf266a682014-07-04 23:41:03 +0200232/* Caller must ensure skb is a valid HSR frame */
233static inline u16 hsr_get_skb_sequence_nr(struct sk_buff *skb)
234{
235 struct hsr_ethhdr *hsr_ethhdr;
236
Murali Karicheri5fa96772019-04-05 13:31:29 -0400237 hsr_ethhdr = (struct hsr_ethhdr *)skb_mac_header(skb);
Arvid Brodinf266a682014-07-04 23:41:03 +0200238 return ntohs(hsr_ethhdr->hsr_tag.sequence_nr);
239}
240
Murali Karicheri451d8122020-07-22 10:40:21 -0400241static inline struct prp_rct *skb_get_PRP_rct(struct sk_buff *skb)
242{
243 unsigned char *tail = skb_tail_pointer(skb) - HSR_HLEN;
244
245 struct prp_rct *rct = (struct prp_rct *)tail;
246
247 if (rct->PRP_suffix == htons(ETH_P_PRP))
248 return rct;
249
250 return NULL;
251}
252
253/* Assume caller has confirmed this skb is PRP suffixed */
254static inline u16 prp_get_skb_sequence_nr(struct prp_rct *rct)
255{
256 return ntohs(rct->sequence_nr);
257}
258
259static inline u16 get_prp_lan_id(struct prp_rct *rct)
260{
261 return ntohs(rct->lan_id_and_LSDU_size) >> 12;
262}
263
264/* assume there is a valid rct */
265static inline bool prp_check_lsdu_size(struct sk_buff *skb,
266 struct prp_rct *rct,
267 bool is_sup)
268{
269 struct ethhdr *ethhdr;
270 int expected_lsdu_size;
271
272 if (is_sup) {
273 expected_lsdu_size = HSR_V1_SUP_LSDUSIZE;
274 } else {
275 ethhdr = (struct ethhdr *)skb_mac_header(skb);
276 expected_lsdu_size = skb->len - 14;
277 if (ethhdr->h_proto == htons(ETH_P_8021Q))
278 expected_lsdu_size -= 4;
279 }
280
281 return (expected_lsdu_size == get_prp_LSDU_size(rct));
282}
283
Murali Karicherifc4ecae2019-04-05 13:31:35 -0400284#if IS_ENABLED(CONFIG_DEBUG_FS)
Taehee Yoo4c2d5e32019-12-22 11:26:39 +0000285void hsr_debugfs_rename(struct net_device *dev);
Taehee Yoo1d19e2d2019-12-22 11:26:15 +0000286void hsr_debugfs_init(struct hsr_priv *priv, struct net_device *hsr_dev);
Murali Karicheri9c5f8a192019-04-15 11:36:01 -0400287void hsr_debugfs_term(struct hsr_priv *priv);
Taehee Yooc6c4ccd2019-12-22 11:26:27 +0000288void hsr_debugfs_create_root(void);
289void hsr_debugfs_remove_root(void);
Murali Karicherifc4ecae2019-04-05 13:31:35 -0400290#else
xiaofeng.yan80892772020-01-20 14:26:39 +0800291static inline void hsr_debugfs_rename(struct net_device *dev)
Taehee Yoo4c2d5e32019-12-22 11:26:39 +0000292{
293}
Taehee Yoo1d19e2d2019-12-22 11:26:15 +0000294static inline void hsr_debugfs_init(struct hsr_priv *priv,
295 struct net_device *hsr_dev)
296{}
Murali Karicheri9c5f8a192019-04-15 11:36:01 -0400297static inline void hsr_debugfs_term(struct hsr_priv *priv)
Murali Karicherifc4ecae2019-04-05 13:31:35 -0400298{}
Taehee Yooc6c4ccd2019-12-22 11:26:27 +0000299static inline void hsr_debugfs_create_root(void)
300{}
301static inline void hsr_debugfs_remove_root(void)
302{}
Murali Karicherifc4ecae2019-04-05 13:31:35 -0400303#endif
304
Arvid Brodin70ebe4a2014-07-04 23:34:38 +0200305#endif /* __HSR_PRIVATE_H */