[SCSI] libfc: rename rport event CREATED to READY
[linux-2.6.git] / drivers / scsi / libfc / fc_rport.c
1 /*
2  * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc.,
15  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16  *
17  * Maintained at www.Open-FCoE.org
18  */
19
20 /*
21  * RPORT GENERAL INFO
22  *
23  * This file contains all processing regarding fc_rports. It contains the
24  * rport state machine and does all rport interaction with the transport class.
25  * There should be no other places in libfc that interact directly with the
26  * transport class in regards to adding and deleting rports.
27  *
28  * fc_rport's represent N_Port's within the fabric.
29  */
30
31 /*
32  * RPORT LOCKING
33  *
34  * The rport should never hold the rport mutex and then attempt to acquire
35  * either the lport or disc mutexes. The rport's mutex is considered lesser
36  * than both the lport's mutex and the disc mutex. Refer to fc_lport.c for
37  * more comments on the heirarchy.
38  *
39  * The locking strategy is similar to the lport's strategy. The lock protects
40  * the rport's states and is held and released by the entry points to the rport
41  * block. All _enter_* functions correspond to rport states and expect the rport
42  * mutex to be locked before calling them. This means that rports only handle
43  * one request or response at a time, since they're not critical for the I/O
44  * path this potential over-use of the mutex is acceptable.
45  */
46
47 #include <linux/kernel.h>
48 #include <linux/spinlock.h>
49 #include <linux/interrupt.h>
50 #include <linux/rcupdate.h>
51 #include <linux/timer.h>
52 #include <linux/workqueue.h>
53 #include <asm/unaligned.h>
54
55 #include <scsi/libfc.h>
56 #include <scsi/fc_encode.h>
57
58 struct workqueue_struct *rport_event_queue;
59
60 static void fc_rport_enter_plogi(struct fc_rport_priv *);
61 static void fc_rport_enter_prli(struct fc_rport_priv *);
62 static void fc_rport_enter_rtv(struct fc_rport_priv *);
63 static void fc_rport_enter_ready(struct fc_rport_priv *);
64 static void fc_rport_enter_logo(struct fc_rport_priv *);
65
66 static void fc_rport_recv_plogi_req(struct fc_rport_priv *,
67                                     struct fc_seq *, struct fc_frame *);
68 static void fc_rport_recv_prli_req(struct fc_rport_priv *,
69                                    struct fc_seq *, struct fc_frame *);
70 static void fc_rport_recv_prlo_req(struct fc_rport_priv *,
71                                    struct fc_seq *, struct fc_frame *);
72 static void fc_rport_recv_logo_req(struct fc_rport_priv *,
73                                    struct fc_seq *, struct fc_frame *);
74 static void fc_rport_timeout(struct work_struct *);
75 static void fc_rport_error(struct fc_rport_priv *, struct fc_frame *);
76 static void fc_rport_error_retry(struct fc_rport_priv *, struct fc_frame *);
77 static void fc_rport_work(struct work_struct *);
78
79 static const char *fc_rport_state_names[] = {
80         [RPORT_ST_INIT] = "Init",
81         [RPORT_ST_PLOGI] = "PLOGI",
82         [RPORT_ST_PRLI] = "PRLI",
83         [RPORT_ST_RTV] = "RTV",
84         [RPORT_ST_READY] = "Ready",
85         [RPORT_ST_LOGO] = "LOGO",
86         [RPORT_ST_DELETE] = "Delete",
87 };
88
89 static void fc_rport_rogue_destroy(struct device *dev)
90 {
91         struct fc_rport *rport = dev_to_rport(dev);
92         struct fc_rport_priv *rdata = RPORT_TO_PRIV(rport);
93
94         FC_RPORT_DBG(rdata, "Destroying rogue rport\n");
95         kfree(rport);
96 }
97
98 struct fc_rport_priv *fc_rport_rogue_create(struct fc_lport *lport,
99                                             struct fc_rport_identifiers *ids)
100 {
101         struct fc_rport *rport;
102         struct fc_rport_priv *rdata;
103         rport = kzalloc(sizeof(*rport) + sizeof(*rdata), GFP_KERNEL);
104
105         if (!rport)
106                 return NULL;
107
108         rdata = RPORT_TO_PRIV(rport);
109
110         rport->dd_data = rdata;
111         rport->port_id = ids->port_id;
112         rport->port_name = ids->port_name;
113         rport->node_name = ids->node_name;
114         rport->roles = ids->roles;
115         rport->maxframe_size = FC_MIN_MAX_PAYLOAD;
116         /*
117          * Note: all this libfc rogue rport code will be removed for
118          * upstream so it fine that this is really ugly and hacky right now.
119          */
120         device_initialize(&rport->dev);
121         rport->dev.release = fc_rport_rogue_destroy;
122
123         rdata->ids = *ids;
124         kref_init(&rdata->kref);
125         mutex_init(&rdata->rp_mutex);
126         rdata->rport = rport;
127         rdata->local_port = lport;
128         rdata->trans_state = FC_PORTSTATE_ROGUE;
129         rdata->rp_state = RPORT_ST_INIT;
130         rdata->event = RPORT_EV_NONE;
131         rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
132         rdata->ops = NULL;
133         rdata->e_d_tov = lport->e_d_tov;
134         rdata->r_a_tov = lport->r_a_tov;
135         rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
136         INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
137         INIT_WORK(&rdata->event_work, fc_rport_work);
138         /*
139          * For good measure, but not necessary as we should only
140          * add REAL rport to the lport list.
141          */
142         INIT_LIST_HEAD(&rdata->peers);
143
144         return rdata;
145 }
146
147 /**
148  * fc_rport_destroy() - free a remote port after last reference is released.
149  * @kref: pointer to kref inside struct fc_rport_priv
150  */
151 static void fc_rport_destroy(struct kref *kref)
152 {
153         struct fc_rport_priv *rdata;
154         struct fc_rport *rport;
155
156         rdata = container_of(kref, struct fc_rport_priv, kref);
157         rport = rdata->rport;
158         put_device(&rport->dev);
159 }
160
161 /**
162  * fc_rport_state() - return a string for the state the rport is in
163  * @rdata: remote port private data
164  */
165 static const char *fc_rport_state(struct fc_rport_priv *rdata)
166 {
167         const char *cp;
168
169         cp = fc_rport_state_names[rdata->rp_state];
170         if (!cp)
171                 cp = "Unknown";
172         return cp;
173 }
174
175 /**
176  * fc_set_rport_loss_tmo() - Set the remote port loss timeout in seconds.
177  * @rport: Pointer to Fibre Channel remote port structure
178  * @timeout: timeout in seconds
179  */
180 void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
181 {
182         if (timeout)
183                 rport->dev_loss_tmo = timeout + 5;
184         else
185                 rport->dev_loss_tmo = 30;
186 }
187 EXPORT_SYMBOL(fc_set_rport_loss_tmo);
188
189 /**
190  * fc_plogi_get_maxframe() - Get max payload from the common service parameters
191  * @flp: FLOGI payload structure
192  * @maxval: upper limit, may be less than what is in the service parameters
193  */
194 static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp,
195                                           unsigned int maxval)
196 {
197         unsigned int mfs;
198
199         /*
200          * Get max payload from the common service parameters and the
201          * class 3 receive data field size.
202          */
203         mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK;
204         if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
205                 maxval = mfs;
206         mfs = ntohs(flp->fl_cssp[3 - 1].cp_rdfs);
207         if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
208                 maxval = mfs;
209         return maxval;
210 }
211
212 /**
213  * fc_rport_state_enter() - Change the rport's state
214  * @rdata: The rport whose state should change
215  * @new: The new state of the rport
216  *
217  * Locking Note: Called with the rport lock held
218  */
219 static void fc_rport_state_enter(struct fc_rport_priv *rdata,
220                                  enum fc_rport_state new)
221 {
222         if (rdata->rp_state != new)
223                 rdata->retries = 0;
224         rdata->rp_state = new;
225 }
226
227 static void fc_rport_work(struct work_struct *work)
228 {
229         u32 port_id;
230         struct fc_rport_priv *rdata =
231                 container_of(work, struct fc_rport_priv, event_work);
232         enum fc_rport_event event;
233         enum fc_rport_trans_state trans_state;
234         struct fc_lport *lport = rdata->local_port;
235         struct fc_rport_operations *rport_ops;
236         struct fc_rport *new_rport;
237         struct fc_rport_priv *new_rdata;
238         struct fc_rport_identifiers ids;
239         struct fc_rport *rport;
240
241         mutex_lock(&rdata->rp_mutex);
242         event = rdata->event;
243         rport_ops = rdata->ops;
244         rport = rdata->rport;
245
246         switch (event) {
247         case RPORT_EV_READY:
248                 ids = rdata->ids;
249                 rdata->event = RPORT_EV_NONE;
250                 mutex_unlock(&rdata->rp_mutex);
251
252                 new_rport = fc_remote_port_add(lport->host, 0, &ids);
253                 if (new_rport) {
254                         /*
255                          * Switch from the rogue rport to the rport
256                          * returned by the FC class.
257                          */
258                         new_rport->maxframe_size = rdata->maxframe_size;
259
260                         new_rdata = new_rport->dd_data;
261                         new_rdata->rport = new_rport;
262                         new_rdata->ids = ids;
263                         new_rdata->e_d_tov = rdata->e_d_tov;
264                         new_rdata->r_a_tov = rdata->r_a_tov;
265                         new_rdata->ops = rdata->ops;
266                         new_rdata->local_port = rdata->local_port;
267                         new_rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
268                         new_rdata->trans_state = FC_PORTSTATE_REAL;
269                         new_rdata->maxframe_size = rdata->maxframe_size;
270                         new_rdata->supported_classes = rdata->supported_classes;
271                         kref_init(&new_rdata->kref);
272                         mutex_init(&new_rdata->rp_mutex);
273                         INIT_DELAYED_WORK(&new_rdata->retry_work,
274                                           fc_rport_timeout);
275                         INIT_LIST_HEAD(&new_rdata->peers);
276                         INIT_WORK(&new_rdata->event_work, fc_rport_work);
277
278                         fc_rport_state_enter(new_rdata, RPORT_ST_READY);
279                 } else {
280                         printk(KERN_WARNING "libfc: Failed to allocate "
281                                " memory for rport (%6x)\n", ids.port_id);
282                         event = RPORT_EV_FAILED;
283                 }
284                 if (rdata->ids.port_id != FC_FID_DIR_SERV)
285                         if (rport_ops->event_callback)
286                                 rport_ops->event_callback(lport, rdata,
287                                                           RPORT_EV_FAILED);
288                 kref_put(&rdata->kref, lport->tt.rport_destroy);
289                 rdata = new_rport->dd_data;
290                 if (rport_ops->event_callback)
291                         rport_ops->event_callback(lport, rdata, event);
292                 break;
293
294         case RPORT_EV_FAILED:
295         case RPORT_EV_LOGO:
296         case RPORT_EV_STOP:
297                 trans_state = rdata->trans_state;
298                 mutex_unlock(&rdata->rp_mutex);
299                 if (rport_ops->event_callback)
300                         rport_ops->event_callback(lport, rdata, event);
301                 cancel_delayed_work_sync(&rdata->retry_work);
302                 if (trans_state == FC_PORTSTATE_ROGUE)
303                         kref_put(&rdata->kref, lport->tt.rport_destroy);
304                 else {
305                         port_id = rport->port_id;
306                         fc_remote_port_delete(rport);
307                         lport->tt.exch_mgr_reset(lport, 0, port_id);
308                         lport->tt.exch_mgr_reset(lport, port_id, 0);
309                 }
310                 break;
311
312         default:
313                 mutex_unlock(&rdata->rp_mutex);
314                 break;
315         }
316 }
317
318 /**
319  * fc_rport_login() - Start the remote port login state machine
320  * @rdata: private remote port
321  *
322  * Locking Note: Called without the rport lock held. This
323  * function will hold the rport lock, call an _enter_*
324  * function and then unlock the rport.
325  */
326 int fc_rport_login(struct fc_rport_priv *rdata)
327 {
328         mutex_lock(&rdata->rp_mutex);
329
330         FC_RPORT_DBG(rdata, "Login to port\n");
331
332         fc_rport_enter_plogi(rdata);
333
334         mutex_unlock(&rdata->rp_mutex);
335
336         return 0;
337 }
338
339 /**
340  * fc_rport_enter_delete() - schedule a remote port to be deleted.
341  * @rdata: private remote port
342  * @event: event to report as the reason for deletion
343  *
344  * Locking Note: Called with the rport lock held.
345  *
346  * Allow state change into DELETE only once.
347  *
348  * Call queue_work only if there's no event already pending.
349  * Set the new event so that the old pending event will not occur.
350  * Since we have the mutex, even if fc_rport_work() is already started,
351  * it'll see the new event.
352  */
353 static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
354                                   enum fc_rport_event event)
355 {
356         if (rdata->rp_state == RPORT_ST_DELETE)
357                 return;
358
359         FC_RPORT_DBG(rdata, "Delete port\n");
360
361         fc_rport_state_enter(rdata, RPORT_ST_DELETE);
362
363         if (rdata->event == RPORT_EV_NONE)
364                 queue_work(rport_event_queue, &rdata->event_work);
365         rdata->event = event;
366 }
367
368 /**
369  * fc_rport_logoff() - Logoff and remove an rport
370  * @rdata: private remote port
371  *
372  * Locking Note: Called without the rport lock held. This
373  * function will hold the rport lock, call an _enter_*
374  * function and then unlock the rport.
375  */
376 int fc_rport_logoff(struct fc_rport_priv *rdata)
377 {
378         mutex_lock(&rdata->rp_mutex);
379
380         FC_RPORT_DBG(rdata, "Remove port\n");
381
382         if (rdata->rp_state == RPORT_ST_DELETE) {
383                 FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n");
384                 mutex_unlock(&rdata->rp_mutex);
385                 goto out;
386         }
387
388         fc_rport_enter_logo(rdata);
389
390         /*
391          * Change the state to Delete so that we discard
392          * the response.
393          */
394         fc_rport_enter_delete(rdata, RPORT_EV_STOP);
395         mutex_unlock(&rdata->rp_mutex);
396
397 out:
398         return 0;
399 }
400
401 /**
402  * fc_rport_enter_ready() - The rport is ready
403  * @rdata: private remote port
404  *
405  * Locking Note: The rport lock is expected to be held before calling
406  * this routine.
407  */
408 static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
409 {
410         fc_rport_state_enter(rdata, RPORT_ST_READY);
411
412         FC_RPORT_DBG(rdata, "Port is Ready\n");
413
414         if (rdata->event == RPORT_EV_NONE)
415                 queue_work(rport_event_queue, &rdata->event_work);
416         rdata->event = RPORT_EV_READY;
417 }
418
419 /**
420  * fc_rport_timeout() - Handler for the retry_work timer.
421  * @work: The work struct of the fc_rport_priv
422  *
423  * Locking Note: Called without the rport lock held. This
424  * function will hold the rport lock, call an _enter_*
425  * function and then unlock the rport.
426  */
427 static void fc_rport_timeout(struct work_struct *work)
428 {
429         struct fc_rport_priv *rdata =
430                 container_of(work, struct fc_rport_priv, retry_work.work);
431
432         mutex_lock(&rdata->rp_mutex);
433
434         switch (rdata->rp_state) {
435         case RPORT_ST_PLOGI:
436                 fc_rport_enter_plogi(rdata);
437                 break;
438         case RPORT_ST_PRLI:
439                 fc_rport_enter_prli(rdata);
440                 break;
441         case RPORT_ST_RTV:
442                 fc_rport_enter_rtv(rdata);
443                 break;
444         case RPORT_ST_LOGO:
445                 fc_rport_enter_logo(rdata);
446                 break;
447         case RPORT_ST_READY:
448         case RPORT_ST_INIT:
449         case RPORT_ST_DELETE:
450                 break;
451         }
452
453         mutex_unlock(&rdata->rp_mutex);
454 }
455
456 /**
457  * fc_rport_error() - Error handler, called once retries have been exhausted
458  * @rdata: private remote port
459  * @fp: The frame pointer
460  *
461  * Locking Note: The rport lock is expected to be held before
462  * calling this routine
463  */
464 static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
465 {
466         FC_RPORT_DBG(rdata, "Error %ld in state %s, retries %d\n",
467                      PTR_ERR(fp), fc_rport_state(rdata), rdata->retries);
468
469         switch (rdata->rp_state) {
470         case RPORT_ST_PLOGI:
471         case RPORT_ST_PRLI:
472         case RPORT_ST_LOGO:
473                 fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
474                 break;
475         case RPORT_ST_RTV:
476                 fc_rport_enter_ready(rdata);
477                 break;
478         case RPORT_ST_DELETE:
479         case RPORT_ST_READY:
480         case RPORT_ST_INIT:
481                 break;
482         }
483 }
484
485 /**
486  * fc_rport_error_retry() - Error handler when retries are desired
487  * @rdata: private remote port data
488  * @fp: The frame pointer
489  *
490  * If the error was an exchange timeout retry immediately,
491  * otherwise wait for E_D_TOV.
492  *
493  * Locking Note: The rport lock is expected to be held before
494  * calling this routine
495  */
496 static void fc_rport_error_retry(struct fc_rport_priv *rdata,
497                                  struct fc_frame *fp)
498 {
499         unsigned long delay = FC_DEF_E_D_TOV;
500
501         /* make sure this isn't an FC_EX_CLOSED error, never retry those */
502         if (PTR_ERR(fp) == -FC_EX_CLOSED)
503                 return fc_rport_error(rdata, fp);
504
505         if (rdata->retries < rdata->local_port->max_rport_retry_count) {
506                 FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n",
507                              PTR_ERR(fp), fc_rport_state(rdata));
508                 rdata->retries++;
509                 /* no additional delay on exchange timeouts */
510                 if (PTR_ERR(fp) == -FC_EX_TIMEOUT)
511                         delay = 0;
512                 schedule_delayed_work(&rdata->retry_work, delay);
513                 return;
514         }
515
516         return fc_rport_error(rdata, fp);
517 }
518
519 /**
520  * fc_rport_plogi_recv_resp() - Handle incoming ELS PLOGI response
521  * @sp: current sequence in the PLOGI exchange
522  * @fp: response frame
523  * @rdata_arg: private remote port data
524  *
525  * Locking Note: This function will be called without the rport lock
526  * held, but it will lock, call an _enter_* function or fc_rport_error
527  * and then unlock the rport.
528  */
529 static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
530                                 void *rdata_arg)
531 {
532         struct fc_rport_priv *rdata = rdata_arg;
533         struct fc_lport *lport = rdata->local_port;
534         struct fc_els_flogi *plp = NULL;
535         unsigned int tov;
536         u16 csp_seq;
537         u16 cssp_seq;
538         u8 op;
539
540         mutex_lock(&rdata->rp_mutex);
541
542         FC_RPORT_DBG(rdata, "Received a PLOGI response\n");
543
544         if (rdata->rp_state != RPORT_ST_PLOGI) {
545                 FC_RPORT_DBG(rdata, "Received a PLOGI response, but in state "
546                              "%s\n", fc_rport_state(rdata));
547                 if (IS_ERR(fp))
548                         goto err;
549                 goto out;
550         }
551
552         if (IS_ERR(fp)) {
553                 fc_rport_error_retry(rdata, fp);
554                 goto err;
555         }
556
557         op = fc_frame_payload_op(fp);
558         if (op == ELS_LS_ACC &&
559             (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) {
560                 rdata->ids.port_name = get_unaligned_be64(&plp->fl_wwpn);
561                 rdata->ids.node_name = get_unaligned_be64(&plp->fl_wwnn);
562
563                 tov = ntohl(plp->fl_csp.sp_e_d_tov);
564                 if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR)
565                         tov /= 1000;
566                 if (tov > rdata->e_d_tov)
567                         rdata->e_d_tov = tov;
568                 csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
569                 cssp_seq = ntohs(plp->fl_cssp[3 - 1].cp_con_seq);
570                 if (cssp_seq < csp_seq)
571                         csp_seq = cssp_seq;
572                 rdata->max_seq = csp_seq;
573                 rdata->maxframe_size = fc_plogi_get_maxframe(plp, lport->mfs);
574
575                 /*
576                  * If the rport is one of the well known addresses
577                  * we skip PRLI and RTV and go straight to READY.
578                  */
579                 if (rdata->ids.port_id >= FC_FID_DOM_MGR)
580                         fc_rport_enter_ready(rdata);
581                 else
582                         fc_rport_enter_prli(rdata);
583         } else
584                 fc_rport_error_retry(rdata, fp);
585
586 out:
587         fc_frame_free(fp);
588 err:
589         mutex_unlock(&rdata->rp_mutex);
590         kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
591 }
592
593 /**
594  * fc_rport_enter_plogi() - Send Port Login (PLOGI) request to peer
595  * @rdata: private remote port data
596  *
597  * Locking Note: The rport lock is expected to be held before calling
598  * this routine.
599  */
600 static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
601 {
602         struct fc_lport *lport = rdata->local_port;
603         struct fc_frame *fp;
604
605         FC_RPORT_DBG(rdata, "Port entered PLOGI state from %s state\n",
606                      fc_rport_state(rdata));
607
608         fc_rport_state_enter(rdata, RPORT_ST_PLOGI);
609
610         rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
611         fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
612         if (!fp) {
613                 fc_rport_error_retry(rdata, fp);
614                 return;
615         }
616         rdata->e_d_tov = lport->e_d_tov;
617
618         if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI,
619                                   fc_rport_plogi_resp, rdata, lport->e_d_tov))
620                 fc_rport_error_retry(rdata, fp);
621         else
622                 kref_get(&rdata->kref);
623 }
624
625 /**
626  * fc_rport_prli_resp() - Process Login (PRLI) response handler
627  * @sp: current sequence in the PRLI exchange
628  * @fp: response frame
629  * @rdata_arg: private remote port data
630  *
631  * Locking Note: This function will be called without the rport lock
632  * held, but it will lock, call an _enter_* function or fc_rport_error
633  * and then unlock the rport.
634  */
635 static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
636                                void *rdata_arg)
637 {
638         struct fc_rport_priv *rdata = rdata_arg;
639         struct {
640                 struct fc_els_prli prli;
641                 struct fc_els_spp spp;
642         } *pp;
643         u32 roles = FC_RPORT_ROLE_UNKNOWN;
644         u32 fcp_parm = 0;
645         u8 op;
646
647         mutex_lock(&rdata->rp_mutex);
648
649         FC_RPORT_DBG(rdata, "Received a PRLI response\n");
650
651         if (rdata->rp_state != RPORT_ST_PRLI) {
652                 FC_RPORT_DBG(rdata, "Received a PRLI response, but in state "
653                              "%s\n", fc_rport_state(rdata));
654                 if (IS_ERR(fp))
655                         goto err;
656                 goto out;
657         }
658
659         if (IS_ERR(fp)) {
660                 fc_rport_error_retry(rdata, fp);
661                 goto err;
662         }
663
664         op = fc_frame_payload_op(fp);
665         if (op == ELS_LS_ACC) {
666                 pp = fc_frame_payload_get(fp, sizeof(*pp));
667                 if (pp && pp->prli.prli_spp_len >= sizeof(pp->spp)) {
668                         fcp_parm = ntohl(pp->spp.spp_params);
669                         if (fcp_parm & FCP_SPPF_RETRY)
670                                 rdata->flags |= FC_RP_FLAGS_RETRY;
671                 }
672
673                 rdata->supported_classes = FC_COS_CLASS3;
674                 if (fcp_parm & FCP_SPPF_INIT_FCN)
675                         roles |= FC_RPORT_ROLE_FCP_INITIATOR;
676                 if (fcp_parm & FCP_SPPF_TARG_FCN)
677                         roles |= FC_RPORT_ROLE_FCP_TARGET;
678
679                 rdata->ids.roles = roles;
680                 fc_rport_enter_rtv(rdata);
681
682         } else {
683                 FC_RPORT_DBG(rdata, "Bad ELS response for PRLI command\n");
684                 fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
685         }
686
687 out:
688         fc_frame_free(fp);
689 err:
690         mutex_unlock(&rdata->rp_mutex);
691         kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
692 }
693
694 /**
695  * fc_rport_logo_resp() - Logout (LOGO) response handler
696  * @sp: current sequence in the LOGO exchange
697  * @fp: response frame
698  * @rdata_arg: private remote port data
699  *
700  * Locking Note: This function will be called without the rport lock
701  * held, but it will lock, call an _enter_* function or fc_rport_error
702  * and then unlock the rport.
703  */
704 static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
705                                void *rdata_arg)
706 {
707         struct fc_rport_priv *rdata = rdata_arg;
708         u8 op;
709
710         mutex_lock(&rdata->rp_mutex);
711
712         FC_RPORT_DBG(rdata, "Received a LOGO response\n");
713
714         if (rdata->rp_state != RPORT_ST_LOGO) {
715                 FC_RPORT_DBG(rdata, "Received a LOGO response, but in state "
716                              "%s\n", fc_rport_state(rdata));
717                 if (IS_ERR(fp))
718                         goto err;
719                 goto out;
720         }
721
722         if (IS_ERR(fp)) {
723                 fc_rport_error_retry(rdata, fp);
724                 goto err;
725         }
726
727         op = fc_frame_payload_op(fp);
728         if (op == ELS_LS_ACC) {
729                 fc_rport_enter_rtv(rdata);
730         } else {
731                 FC_RPORT_DBG(rdata, "Bad ELS response for LOGO command\n");
732                 fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
733         }
734
735 out:
736         fc_frame_free(fp);
737 err:
738         mutex_unlock(&rdata->rp_mutex);
739         kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
740 }
741
742 /**
743  * fc_rport_enter_prli() - Send Process Login (PRLI) request to peer
744  * @rdata: private remote port data
745  *
746  * Locking Note: The rport lock is expected to be held before calling
747  * this routine.
748  */
749 static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
750 {
751         struct fc_lport *lport = rdata->local_port;
752         struct {
753                 struct fc_els_prli prli;
754                 struct fc_els_spp spp;
755         } *pp;
756         struct fc_frame *fp;
757
758         FC_RPORT_DBG(rdata, "Port entered PRLI state from %s state\n",
759                      fc_rport_state(rdata));
760
761         fc_rport_state_enter(rdata, RPORT_ST_PRLI);
762
763         fp = fc_frame_alloc(lport, sizeof(*pp));
764         if (!fp) {
765                 fc_rport_error_retry(rdata, fp);
766                 return;
767         }
768
769         if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PRLI,
770                                   fc_rport_prli_resp, rdata, lport->e_d_tov))
771                 fc_rport_error_retry(rdata, fp);
772         else
773                 kref_get(&rdata->kref);
774 }
775
776 /**
777  * fc_rport_els_rtv_resp() - Request Timeout Value response handler
778  * @sp: current sequence in the RTV exchange
779  * @fp: response frame
780  * @rdata_arg: private remote port data
781  *
782  * Many targets don't seem to support this.
783  *
784  * Locking Note: This function will be called without the rport lock
785  * held, but it will lock, call an _enter_* function or fc_rport_error
786  * and then unlock the rport.
787  */
788 static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
789                               void *rdata_arg)
790 {
791         struct fc_rport_priv *rdata = rdata_arg;
792         u8 op;
793
794         mutex_lock(&rdata->rp_mutex);
795
796         FC_RPORT_DBG(rdata, "Received a RTV response\n");
797
798         if (rdata->rp_state != RPORT_ST_RTV) {
799                 FC_RPORT_DBG(rdata, "Received a RTV response, but in state "
800                              "%s\n", fc_rport_state(rdata));
801                 if (IS_ERR(fp))
802                         goto err;
803                 goto out;
804         }
805
806         if (IS_ERR(fp)) {
807                 fc_rport_error(rdata, fp);
808                 goto err;
809         }
810
811         op = fc_frame_payload_op(fp);
812         if (op == ELS_LS_ACC) {
813                 struct fc_els_rtv_acc *rtv;
814                 u32 toq;
815                 u32 tov;
816
817                 rtv = fc_frame_payload_get(fp, sizeof(*rtv));
818                 if (rtv) {
819                         toq = ntohl(rtv->rtv_toq);
820                         tov = ntohl(rtv->rtv_r_a_tov);
821                         if (tov == 0)
822                                 tov = 1;
823                         rdata->r_a_tov = tov;
824                         tov = ntohl(rtv->rtv_e_d_tov);
825                         if (toq & FC_ELS_RTV_EDRES)
826                                 tov /= 1000000;
827                         if (tov == 0)
828                                 tov = 1;
829                         rdata->e_d_tov = tov;
830                 }
831         }
832
833         fc_rport_enter_ready(rdata);
834
835 out:
836         fc_frame_free(fp);
837 err:
838         mutex_unlock(&rdata->rp_mutex);
839         kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
840 }
841
842 /**
843  * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request to peer
844  * @rdata: private remote port data
845  *
846  * Locking Note: The rport lock is expected to be held before calling
847  * this routine.
848  */
849 static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
850 {
851         struct fc_frame *fp;
852         struct fc_lport *lport = rdata->local_port;
853
854         FC_RPORT_DBG(rdata, "Port entered RTV state from %s state\n",
855                      fc_rport_state(rdata));
856
857         fc_rport_state_enter(rdata, RPORT_ST_RTV);
858
859         fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
860         if (!fp) {
861                 fc_rport_error_retry(rdata, fp);
862                 return;
863         }
864
865         if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV,
866                                      fc_rport_rtv_resp, rdata, lport->e_d_tov))
867                 fc_rport_error_retry(rdata, fp);
868         else
869                 kref_get(&rdata->kref);
870 }
871
872 /**
873  * fc_rport_enter_logo() - Send Logout (LOGO) request to peer
874  * @rdata: private remote port data
875  *
876  * Locking Note: The rport lock is expected to be held before calling
877  * this routine.
878  */
879 static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
880 {
881         struct fc_lport *lport = rdata->local_port;
882         struct fc_frame *fp;
883
884         FC_RPORT_DBG(rdata, "Port entered LOGO state from %s state\n",
885                      fc_rport_state(rdata));
886
887         fc_rport_state_enter(rdata, RPORT_ST_LOGO);
888
889         fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
890         if (!fp) {
891                 fc_rport_error_retry(rdata, fp);
892                 return;
893         }
894
895         if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO,
896                                   fc_rport_logo_resp, rdata, lport->e_d_tov))
897                 fc_rport_error_retry(rdata, fp);
898         else
899                 kref_get(&rdata->kref);
900 }
901
902
903 /**
904  * fc_rport_recv_req() - Receive a request from a rport
905  * @sp: current sequence in the PLOGI exchange
906  * @fp: response frame
907  * @rdata_arg: private remote port data
908  *
909  * Locking Note: Called without the rport lock held. This
910  * function will hold the rport lock, call an _enter_*
911  * function and then unlock the rport.
912  */
913 void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp,
914                        struct fc_rport_priv *rdata)
915 {
916         struct fc_lport *lport = rdata->local_port;
917
918         struct fc_frame_header *fh;
919         struct fc_seq_els_data els_data;
920         u8 op;
921
922         mutex_lock(&rdata->rp_mutex);
923
924         els_data.fp = NULL;
925         els_data.explan = ELS_EXPL_NONE;
926         els_data.reason = ELS_RJT_NONE;
927
928         fh = fc_frame_header_get(fp);
929
930         if (fh->fh_r_ctl == FC_RCTL_ELS_REQ && fh->fh_type == FC_TYPE_ELS) {
931                 op = fc_frame_payload_op(fp);
932                 switch (op) {
933                 case ELS_PLOGI:
934                         fc_rport_recv_plogi_req(rdata, sp, fp);
935                         break;
936                 case ELS_PRLI:
937                         fc_rport_recv_prli_req(rdata, sp, fp);
938                         break;
939                 case ELS_PRLO:
940                         fc_rport_recv_prlo_req(rdata, sp, fp);
941                         break;
942                 case ELS_LOGO:
943                         fc_rport_recv_logo_req(rdata, sp, fp);
944                         break;
945                 case ELS_RRQ:
946                         els_data.fp = fp;
947                         lport->tt.seq_els_rsp_send(sp, ELS_RRQ, &els_data);
948                         break;
949                 case ELS_REC:
950                         els_data.fp = fp;
951                         lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data);
952                         break;
953                 default:
954                         els_data.reason = ELS_RJT_UNSUP;
955                         lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
956                         break;
957                 }
958         }
959
960         mutex_unlock(&rdata->rp_mutex);
961 }
962
963 /**
964  * fc_rport_recv_plogi_req() - Handle incoming Port Login (PLOGI) request
965  * @rdata: private remote port data
966  * @sp: current sequence in the PLOGI exchange
967  * @fp: PLOGI request frame
968  *
969  * Locking Note: The rport lock is exected to be held before calling
970  * this function.
971  */
972 static void fc_rport_recv_plogi_req(struct fc_rport_priv *rdata,
973                                     struct fc_seq *sp, struct fc_frame *rx_fp)
974 {
975         struct fc_lport *lport = rdata->local_port;
976         struct fc_frame *fp = rx_fp;
977         struct fc_exch *ep;
978         struct fc_frame_header *fh;
979         struct fc_els_flogi *pl;
980         struct fc_seq_els_data rjt_data;
981         u32 sid;
982         u64 wwpn;
983         u64 wwnn;
984         enum fc_els_rjt_reason reject = 0;
985         u32 f_ctl;
986         rjt_data.fp = NULL;
987
988         fh = fc_frame_header_get(fp);
989
990         FC_RPORT_DBG(rdata, "Received PLOGI request while in state %s\n",
991                      fc_rport_state(rdata));
992
993         sid = ntoh24(fh->fh_s_id);
994         pl = fc_frame_payload_get(fp, sizeof(*pl));
995         if (!pl) {
996                 FC_RPORT_DBG(rdata, "Received PLOGI too short\n");
997                 WARN_ON(1);
998                 /* XXX TBD: send reject? */
999                 fc_frame_free(fp);
1000                 return;
1001         }
1002         wwpn = get_unaligned_be64(&pl->fl_wwpn);
1003         wwnn = get_unaligned_be64(&pl->fl_wwnn);
1004
1005         /*
1006          * If the session was just created, possibly due to the incoming PLOGI,
1007          * set the state appropriately and accept the PLOGI.
1008          *
1009          * If we had also sent a PLOGI, and if the received PLOGI is from a
1010          * higher WWPN, we accept it, otherwise an LS_RJT is sent with reason
1011          * "command already in progress".
1012          *
1013          * XXX TBD: If the session was ready before, the PLOGI should result in
1014          * all outstanding exchanges being reset.
1015          */
1016         switch (rdata->rp_state) {
1017         case RPORT_ST_INIT:
1018                 FC_RPORT_DBG(rdata, "Received PLOGI, wwpn %llx state INIT "
1019                              "- reject\n", (unsigned long long)wwpn);
1020                 reject = ELS_RJT_UNSUP;
1021                 break;
1022         case RPORT_ST_PLOGI:
1023                 FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI state %d\n",
1024                              rdata->rp_state);
1025                 if (wwpn < lport->wwpn)
1026                         reject = ELS_RJT_INPROG;
1027                 break;
1028         case RPORT_ST_PRLI:
1029         case RPORT_ST_READY:
1030                 FC_RPORT_DBG(rdata, "Received PLOGI in logged-in state %d "
1031                              "- ignored for now\n", rdata->rp_state);
1032                 /* XXX TBD - should reset */
1033                 break;
1034         case RPORT_ST_DELETE:
1035         default:
1036                 FC_RPORT_DBG(rdata, "Received PLOGI in unexpected "
1037                              "state %d\n", rdata->rp_state);
1038                 fc_frame_free(fp);
1039                 return;
1040                 break;
1041         }
1042
1043         if (reject) {
1044                 rjt_data.reason = reject;
1045                 rjt_data.explan = ELS_EXPL_NONE;
1046                 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1047                 fc_frame_free(fp);
1048         } else {
1049                 fp = fc_frame_alloc(lport, sizeof(*pl));
1050                 if (fp == NULL) {
1051                         fp = rx_fp;
1052                         rjt_data.reason = ELS_RJT_UNAB;
1053                         rjt_data.explan = ELS_EXPL_NONE;
1054                         lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1055                         fc_frame_free(fp);
1056                 } else {
1057                         sp = lport->tt.seq_start_next(sp);
1058                         WARN_ON(!sp);
1059                         rdata->ids.port_name = wwpn;
1060                         rdata->ids.node_name = wwnn;
1061
1062                         /*
1063                          * Get session payload size from incoming PLOGI.
1064                          */
1065                         rdata->maxframe_size =
1066                                 fc_plogi_get_maxframe(pl, lport->mfs);
1067                         fc_frame_free(rx_fp);
1068                         fc_plogi_fill(lport, fp, ELS_LS_ACC);
1069
1070                         /*
1071                          * Send LS_ACC.  If this fails,
1072                          * the originator should retry.
1073                          */
1074                         f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
1075                         f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1076                         ep = fc_seq_exch(sp);
1077                         fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1078                                        FC_TYPE_ELS, f_ctl, 0);
1079                         lport->tt.seq_send(lport, sp, fp);
1080                         if (rdata->rp_state == RPORT_ST_PLOGI)
1081                                 fc_rport_enter_prli(rdata);
1082                 }
1083         }
1084 }
1085
1086 /**
1087  * fc_rport_recv_prli_req() - Handle incoming Process Login (PRLI) request
1088  * @rdata: private remote port data
1089  * @sp: current sequence in the PRLI exchange
1090  * @fp: PRLI request frame
1091  *
1092  * Locking Note: The rport lock is exected to be held before calling
1093  * this function.
1094  */
1095 static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
1096                                    struct fc_seq *sp, struct fc_frame *rx_fp)
1097 {
1098         struct fc_lport *lport = rdata->local_port;
1099         struct fc_exch *ep;
1100         struct fc_frame *fp;
1101         struct fc_frame_header *fh;
1102         struct {
1103                 struct fc_els_prli prli;
1104                 struct fc_els_spp spp;
1105         } *pp;
1106         struct fc_els_spp *rspp;        /* request service param page */
1107         struct fc_els_spp *spp; /* response spp */
1108         unsigned int len;
1109         unsigned int plen;
1110         enum fc_els_rjt_reason reason = ELS_RJT_UNAB;
1111         enum fc_els_rjt_explan explan = ELS_EXPL_NONE;
1112         enum fc_els_spp_resp resp;
1113         struct fc_seq_els_data rjt_data;
1114         u32 f_ctl;
1115         u32 fcp_parm;
1116         u32 roles = FC_RPORT_ROLE_UNKNOWN;
1117         rjt_data.fp = NULL;
1118
1119         fh = fc_frame_header_get(rx_fp);
1120
1121         FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n",
1122                      fc_rport_state(rdata));
1123
1124         switch (rdata->rp_state) {
1125         case RPORT_ST_PRLI:
1126         case RPORT_ST_READY:
1127                 reason = ELS_RJT_NONE;
1128                 break;
1129         default:
1130                 fc_frame_free(rx_fp);
1131                 return;
1132                 break;
1133         }
1134         len = fr_len(rx_fp) - sizeof(*fh);
1135         pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
1136         if (pp == NULL) {
1137                 reason = ELS_RJT_PROT;
1138                 explan = ELS_EXPL_INV_LEN;
1139         } else {
1140                 plen = ntohs(pp->prli.prli_len);
1141                 if ((plen % 4) != 0 || plen > len) {
1142                         reason = ELS_RJT_PROT;
1143                         explan = ELS_EXPL_INV_LEN;
1144                 } else if (plen < len) {
1145                         len = plen;
1146                 }
1147                 plen = pp->prli.prli_spp_len;
1148                 if ((plen % 4) != 0 || plen < sizeof(*spp) ||
1149                     plen > len || len < sizeof(*pp)) {
1150                         reason = ELS_RJT_PROT;
1151                         explan = ELS_EXPL_INV_LEN;
1152                 }
1153                 rspp = &pp->spp;
1154         }
1155         if (reason != ELS_RJT_NONE ||
1156             (fp = fc_frame_alloc(lport, len)) == NULL) {
1157                 rjt_data.reason = reason;
1158                 rjt_data.explan = explan;
1159                 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1160         } else {
1161                 sp = lport->tt.seq_start_next(sp);
1162                 WARN_ON(!sp);
1163                 pp = fc_frame_payload_get(fp, len);
1164                 WARN_ON(!pp);
1165                 memset(pp, 0, len);
1166                 pp->prli.prli_cmd = ELS_LS_ACC;
1167                 pp->prli.prli_spp_len = plen;
1168                 pp->prli.prli_len = htons(len);
1169                 len -= sizeof(struct fc_els_prli);
1170
1171                 /*
1172                  * Go through all the service parameter pages and build
1173                  * response.  If plen indicates longer SPP than standard,
1174                  * use that.  The entire response has been pre-cleared above.
1175                  */
1176                 spp = &pp->spp;
1177                 while (len >= plen) {
1178                         spp->spp_type = rspp->spp_type;
1179                         spp->spp_type_ext = rspp->spp_type_ext;
1180                         spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
1181                         resp = FC_SPP_RESP_ACK;
1182                         if (rspp->spp_flags & FC_SPP_RPA_VAL)
1183                                 resp = FC_SPP_RESP_NO_PA;
1184                         switch (rspp->spp_type) {
1185                         case 0: /* common to all FC-4 types */
1186                                 break;
1187                         case FC_TYPE_FCP:
1188                                 fcp_parm = ntohl(rspp->spp_params);
1189                                 if (fcp_parm * FCP_SPPF_RETRY)
1190                                         rdata->flags |= FC_RP_FLAGS_RETRY;
1191                                 rdata->supported_classes = FC_COS_CLASS3;
1192                                 if (fcp_parm & FCP_SPPF_INIT_FCN)
1193                                         roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1194                                 if (fcp_parm & FCP_SPPF_TARG_FCN)
1195                                         roles |= FC_RPORT_ROLE_FCP_TARGET;
1196                                 rdata->ids.roles = roles;
1197
1198                                 spp->spp_params =
1199                                         htonl(lport->service_params);
1200                                 break;
1201                         default:
1202                                 resp = FC_SPP_RESP_INVL;
1203                                 break;
1204                         }
1205                         spp->spp_flags |= resp;
1206                         len -= plen;
1207                         rspp = (struct fc_els_spp *)((char *)rspp + plen);
1208                         spp = (struct fc_els_spp *)((char *)spp + plen);
1209                 }
1210
1211                 /*
1212                  * Send LS_ACC.  If this fails, the originator should retry.
1213                  */
1214                 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
1215                 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1216                 ep = fc_seq_exch(sp);
1217                 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1218                                FC_TYPE_ELS, f_ctl, 0);
1219                 lport->tt.seq_send(lport, sp, fp);
1220
1221                 /*
1222                  * Get lock and re-check state.
1223                  */
1224                 switch (rdata->rp_state) {
1225                 case RPORT_ST_PRLI:
1226                         fc_rport_enter_ready(rdata);
1227                         break;
1228                 case RPORT_ST_READY:
1229                         break;
1230                 default:
1231                         break;
1232                 }
1233         }
1234         fc_frame_free(rx_fp);
1235 }
1236
1237 /**
1238  * fc_rport_recv_prlo_req() - Handle incoming Process Logout (PRLO) request
1239  * @rdata: private remote port data
1240  * @sp: current sequence in the PRLO exchange
1241  * @fp: PRLO request frame
1242  *
1243  * Locking Note: The rport lock is exected to be held before calling
1244  * this function.
1245  */
1246 static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
1247                                    struct fc_seq *sp,
1248                                    struct fc_frame *fp)
1249 {
1250         struct fc_lport *lport = rdata->local_port;
1251
1252         struct fc_frame_header *fh;
1253         struct fc_seq_els_data rjt_data;
1254
1255         fh = fc_frame_header_get(fp);
1256
1257         FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n",
1258                      fc_rport_state(rdata));
1259
1260         if (rdata->rp_state == RPORT_ST_DELETE) {
1261                 fc_frame_free(fp);
1262                 return;
1263         }
1264
1265         rjt_data.fp = NULL;
1266         rjt_data.reason = ELS_RJT_UNAB;
1267         rjt_data.explan = ELS_EXPL_NONE;
1268         lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1269         fc_frame_free(fp);
1270 }
1271
1272 /**
1273  * fc_rport_recv_logo_req() - Handle incoming Logout (LOGO) request
1274  * @rdata: private remote port data
1275  * @sp: current sequence in the LOGO exchange
1276  * @fp: LOGO request frame
1277  *
1278  * Locking Note: The rport lock is exected to be held before calling
1279  * this function.
1280  */
1281 static void fc_rport_recv_logo_req(struct fc_rport_priv *rdata,
1282                                    struct fc_seq *sp,
1283                                    struct fc_frame *fp)
1284 {
1285         struct fc_frame_header *fh;
1286         struct fc_lport *lport = rdata->local_port;
1287
1288         fh = fc_frame_header_get(fp);
1289
1290         FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
1291                      fc_rport_state(rdata));
1292
1293         if (rdata->rp_state == RPORT_ST_DELETE) {
1294                 fc_frame_free(fp);
1295                 return;
1296         }
1297
1298         rdata->event = RPORT_EV_LOGO;
1299         fc_rport_state_enter(rdata, RPORT_ST_DELETE);
1300         queue_work(rport_event_queue, &rdata->event_work);
1301
1302         lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
1303         fc_frame_free(fp);
1304 }
1305
1306 static void fc_rport_flush_queue(void)
1307 {
1308         flush_workqueue(rport_event_queue);
1309 }
1310
1311 int fc_rport_init(struct fc_lport *lport)
1312 {
1313         if (!lport->tt.rport_create)
1314                 lport->tt.rport_create = fc_rport_rogue_create;
1315
1316         if (!lport->tt.rport_login)
1317                 lport->tt.rport_login = fc_rport_login;
1318
1319         if (!lport->tt.rport_logoff)
1320                 lport->tt.rport_logoff = fc_rport_logoff;
1321
1322         if (!lport->tt.rport_recv_req)
1323                 lport->tt.rport_recv_req = fc_rport_recv_req;
1324
1325         if (!lport->tt.rport_flush_queue)
1326                 lport->tt.rport_flush_queue = fc_rport_flush_queue;
1327
1328         if (!lport->tt.rport_destroy)
1329                 lport->tt.rport_destroy = fc_rport_destroy;
1330
1331         return 0;
1332 }
1333 EXPORT_SYMBOL(fc_rport_init);
1334
1335 int fc_setup_rport(void)
1336 {
1337         rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
1338         if (!rport_event_queue)
1339                 return -ENOMEM;
1340         return 0;
1341 }
1342 EXPORT_SYMBOL(fc_setup_rport);
1343
1344 void fc_destroy_rport(void)
1345 {
1346         destroy_workqueue(rport_event_queue);
1347 }
1348 EXPORT_SYMBOL(fc_destroy_rport);
1349
1350 void fc_rport_terminate_io(struct fc_rport *rport)
1351 {
1352         struct fc_rport_libfc_priv *rp = rport->dd_data;
1353         struct fc_lport *lport = rp->local_port;
1354
1355         lport->tt.exch_mgr_reset(lport, 0, rport->port_id);
1356         lport->tt.exch_mgr_reset(lport, rport->port_id, 0);
1357 }
1358 EXPORT_SYMBOL(fc_rport_terminate_io);