2 * iSCSI Initiator over TCP/IP Data-Path
4 * Copyright (C) 2004 Dmitry Yusupov
5 * Copyright (C) 2004 Alex Aizman
6 * Copyright (C) 2005 Mike Christie
7 * maintained by open-iscsi@googlegroups.com
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published
11 * by the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * See the file COPYING included with this distribution for more details.
28 #include <linux/types.h>
29 #include <linux/list.h>
30 #include <linux/inet.h>
31 #include <linux/blkdev.h>
32 #include <linux/crypto.h>
33 #include <linux/delay.h>
34 #include <linux/kfifo.h>
35 #include <linux/scatterlist.h>
37 #include <scsi/scsi_cmnd.h>
38 #include <scsi/scsi_device.h>
39 #include <scsi/scsi_eh.h>
40 #include <scsi/scsi_request.h>
41 #include <scsi/scsi_tcq.h>
42 #include <scsi/scsi_host.h>
43 #include <scsi/scsi.h>
44 #include <scsi/scsi_transport_iscsi.h>
46 #include "iscsi_tcp.h"
48 MODULE_AUTHOR("Dmitry Yusupov <dmitry_yus@yahoo.com>, "
49 "Alex Aizman <itn780@yahoo.com>");
50 MODULE_DESCRIPTION("iSCSI/TCP data-path");
51 MODULE_LICENSE("GPL");
53 /* #define DEBUG_TCP */
54 /* #define DEBUG_SCSI */
58 #define debug_tcp(fmt...) printk(KERN_DEBUG "tcp: " fmt)
60 #define debug_tcp(fmt...)
64 #define debug_scsi(fmt...) printk(KERN_DEBUG "scsi: " fmt)
66 #define debug_scsi(fmt...)
76 #define INVALID_SN_DELTA 0xffff
78 static unsigned int iscsi_max_lun = 512;
79 module_param_named(max_lun, iscsi_max_lun, uint, S_IRUGO);
82 static kmem_cache_t *taskcache;
85 iscsi_buf_init_virt(struct iscsi_buf *ibuf, char *vbuf, int size)
87 sg_init_one(&ibuf->sg, (u8 *)vbuf, size);
92 iscsi_buf_init_iov(struct iscsi_buf *ibuf, char *vbuf, int size)
94 ibuf->sg.page = (void*)vbuf;
95 ibuf->sg.offset = (unsigned int)-1;
96 ibuf->sg.length = size;
101 iscsi_buf_iov_base(struct iscsi_buf *ibuf)
103 return (char*)ibuf->sg.page + ibuf->sent;
107 iscsi_buf_init_sg(struct iscsi_buf *ibuf, struct scatterlist *sg)
110 * Fastpath: sg element fits into single page
112 if (sg->length + sg->offset <= PAGE_SIZE && page_count(sg->page) >= 2) {
113 ibuf->sg.page = sg->page;
114 ibuf->sg.offset = sg->offset;
115 ibuf->sg.length = sg->length;
117 iscsi_buf_init_iov(ibuf, page_address(sg->page), sg->length);
122 iscsi_buf_left(struct iscsi_buf *ibuf)
126 rc = ibuf->sg.length - ibuf->sent;
132 iscsi_buf_init_hdr(struct iscsi_conn *conn, struct iscsi_buf *ibuf,
135 iscsi_buf_init_virt(ibuf, vbuf, sizeof(struct iscsi_hdr));
136 if (conn->hdrdgst_en) {
137 crypto_digest_digest(conn->tx_tfm, &ibuf->sg, 1, crc);
138 ibuf->sg.length += sizeof(uint32_t);
143 iscsi_conn_failure(struct iscsi_conn *conn, enum iscsi_err err)
145 struct iscsi_session *session = conn->session;
148 spin_lock_irqsave(&session->lock, flags);
149 if (session->conn_cnt == 1 || session->leadconn == conn)
150 session->state = ISCSI_STATE_FAILED;
151 spin_unlock_irqrestore(&session->lock, flags);
152 set_bit(SUSPEND_BIT, &conn->suspend_tx);
153 set_bit(SUSPEND_BIT, &conn->suspend_rx);
154 iscsi_conn_error(iscsi_handle(conn), err);
158 iscsi_check_assign_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
160 uint32_t max_cmdsn = be32_to_cpu(hdr->max_cmdsn);
161 uint32_t exp_cmdsn = be32_to_cpu(hdr->exp_cmdsn);
163 if (max_cmdsn < exp_cmdsn -1 &&
164 max_cmdsn > exp_cmdsn - INVALID_SN_DELTA)
165 return ISCSI_ERR_MAX_CMDSN;
166 if (max_cmdsn > session->max_cmdsn ||
167 max_cmdsn < session->max_cmdsn - INVALID_SN_DELTA)
168 session->max_cmdsn = max_cmdsn;
169 if (exp_cmdsn > session->exp_cmdsn ||
170 exp_cmdsn < session->exp_cmdsn - INVALID_SN_DELTA)
171 session->exp_cmdsn = exp_cmdsn;
177 iscsi_hdr_extract(struct iscsi_conn *conn)
179 struct sk_buff *skb = conn->in.skb;
181 if (conn->in.copy >= conn->hdr_size &&
182 conn->in_progress == IN_PROGRESS_WAIT_HEADER) {
184 * Zero-copy PDU Header: using connection context
185 * to store header pointer.
187 if (skb_shinfo(skb)->frag_list == NULL &&
188 !skb_shinfo(skb)->nr_frags)
189 conn->in.hdr = (struct iscsi_hdr *)
190 ((char*)skb->data + conn->in.offset);
192 /* ignoring return code since we checked
194 skb_copy_bits(skb, conn->in.offset,
195 &conn->hdr, conn->hdr_size);
196 conn->in.hdr = &conn->hdr;
198 conn->in.offset += conn->hdr_size;
199 conn->in.copy -= conn->hdr_size;
205 * PDU header scattered across SKB's,
206 * copying it... This'll happen quite rarely.
209 if (conn->in_progress == IN_PROGRESS_WAIT_HEADER)
210 conn->in.hdr_offset = 0;
212 hdr_remains = conn->hdr_size - conn->in.hdr_offset;
213 BUG_ON(hdr_remains <= 0);
215 copylen = min(conn->in.copy, hdr_remains);
216 skb_copy_bits(skb, conn->in.offset,
217 (char*)&conn->hdr + conn->in.hdr_offset, copylen);
219 debug_tcp("PDU gather offset %d bytes %d in.offset %d "
220 "in.copy %d\n", conn->in.hdr_offset, copylen,
221 conn->in.offset, conn->in.copy);
223 conn->in.offset += copylen;
224 conn->in.copy -= copylen;
225 if (copylen < hdr_remains) {
226 conn->in_progress = IN_PROGRESS_HEADER_GATHER;
227 conn->in.hdr_offset += copylen;
230 conn->in.hdr = &conn->hdr;
231 conn->discontiguous_hdr_cnt++;
232 conn->in_progress = IN_PROGRESS_WAIT_HEADER;
239 iscsi_ctask_cleanup(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
241 struct scsi_cmnd *sc = ctask->sc;
242 struct iscsi_session *session = conn->session;
244 spin_lock(&session->lock);
246 spin_unlock(&session->lock);
249 if (sc->sc_data_direction == DMA_TO_DEVICE) {
250 struct iscsi_data_task *dtask, *n;
251 /* WRITE: cleanup Data-Out's if any */
252 spin_lock(&conn->lock);
253 list_for_each_entry_safe(dtask, n, &ctask->dataqueue, item) {
254 list_del(&dtask->item);
255 mempool_free(dtask, ctask->datapool);
257 spin_unlock(&conn->lock);
259 ctask->xmstate = XMSTATE_IDLE;
262 __kfifo_put(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
263 spin_unlock(&session->lock);
267 * iscsi_cmd_rsp - SCSI Command Response processing
268 * @conn: iscsi connection
269 * @ctask: scsi command task
272 iscsi_cmd_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
275 struct iscsi_cmd_rsp *rhdr = (struct iscsi_cmd_rsp *)conn->in.hdr;
276 struct iscsi_session *session = conn->session;
277 struct scsi_cmnd *sc = ctask->sc;
279 rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr);
281 sc->result = (DID_ERROR << 16);
285 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
287 sc->result = (DID_OK << 16) | rhdr->cmd_status;
289 if (rhdr->response != ISCSI_STATUS_CMD_COMPLETED) {
290 sc->result = (DID_ERROR << 16);
294 if (rhdr->cmd_status == SAM_STAT_CHECK_CONDITION && conn->senselen) {
295 int sensecopy = min(conn->senselen, SCSI_SENSE_BUFFERSIZE);
297 memcpy(sc->sense_buffer, conn->data + 2, sensecopy);
298 debug_scsi("copied %d bytes of sense\n", sensecopy);
301 if (sc->sc_data_direction == DMA_TO_DEVICE)
304 if (rhdr->flags & ISCSI_FLAG_CMD_UNDERFLOW) {
305 int res_count = be32_to_cpu(rhdr->residual_count);
307 if (res_count > 0 && res_count <= sc->request_bufflen)
308 sc->resid = res_count;
310 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
311 } else if (rhdr->flags & ISCSI_FLAG_CMD_BIDI_UNDERFLOW)
312 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
313 else if (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW)
314 sc->resid = be32_to_cpu(rhdr->residual_count);
317 debug_scsi("done [sc %lx res %d itt 0x%x]\n",
318 (long)sc, sc->result, ctask->itt);
319 conn->scsirsp_pdus_cnt++;
320 iscsi_ctask_cleanup(conn, ctask);
326 * iscsi_data_rsp - SCSI Data-In Response processing
327 * @conn: iscsi connection
328 * @ctask: scsi command task
331 iscsi_data_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
334 struct iscsi_data_rsp *rhdr = (struct iscsi_data_rsp *)conn->in.hdr;
335 struct iscsi_session *session = conn->session;
336 int datasn = be32_to_cpu(rhdr->datasn);
338 rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr);
342 * setup Data-In byte counter (gets decremented..)
344 ctask->data_count = conn->in.datalen;
346 if (conn->in.datalen == 0)
349 if (ctask->datasn != datasn)
350 return ISCSI_ERR_DATASN;
354 ctask->data_offset = be32_to_cpu(rhdr->offset);
355 if (ctask->data_offset + conn->in.datalen > ctask->total_length)
356 return ISCSI_ERR_DATA_OFFSET;
358 if (rhdr->flags & ISCSI_FLAG_DATA_STATUS) {
359 struct scsi_cmnd *sc = ctask->sc;
361 conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
362 if (rhdr->flags & ISCSI_FLAG_CMD_UNDERFLOW) {
363 int res_count = be32_to_cpu(rhdr->residual_count);
366 res_count <= sc->request_bufflen) {
367 sc->resid = res_count;
368 sc->result = (DID_OK << 16) | rhdr->cmd_status;
370 sc->result = (DID_BAD_TARGET << 16) |
372 } else if (rhdr->flags & ISCSI_FLAG_CMD_BIDI_UNDERFLOW)
373 sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
374 else if (rhdr->flags & ISCSI_FLAG_CMD_OVERFLOW) {
375 sc->resid = be32_to_cpu(rhdr->residual_count);
376 sc->result = (DID_OK << 16) | rhdr->cmd_status;
378 sc->result = (DID_OK << 16) | rhdr->cmd_status;
381 conn->datain_pdus_cnt++;
386 * iscsi_solicit_data_init - initialize first Data-Out
387 * @conn: iscsi connection
388 * @ctask: scsi command task
392 * Initialize first Data-Out within this R2T sequence and finds
393 * proper data_offset within this SCSI command.
395 * This function is called with connection lock taken.
398 iscsi_solicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
399 struct iscsi_r2t_info *r2t)
401 struct iscsi_data *hdr;
402 struct iscsi_data_task *dtask;
403 struct scsi_cmnd *sc = ctask->sc;
405 dtask = mempool_alloc(ctask->datapool, GFP_ATOMIC);
408 memset(hdr, 0, sizeof(struct iscsi_data));
410 hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
411 r2t->solicit_datasn++;
412 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
413 memcpy(hdr->lun, ctask->hdr.lun, sizeof(hdr->lun));
414 hdr->itt = ctask->hdr.itt;
415 hdr->exp_statsn = r2t->exp_statsn;
416 hdr->offset = cpu_to_be32(r2t->data_offset);
417 if (r2t->data_length > conn->max_xmit_dlength) {
418 hton24(hdr->dlength, conn->max_xmit_dlength);
419 r2t->data_count = conn->max_xmit_dlength;
422 hton24(hdr->dlength, r2t->data_length);
423 r2t->data_count = r2t->data_length;
424 hdr->flags = ISCSI_FLAG_CMD_FINAL;
426 conn->dataout_pdus_cnt++;
430 iscsi_buf_init_hdr(conn, &r2t->headbuf, (char*)hdr,
431 (u8 *)dtask->hdrext);
437 struct scatterlist *sg = sc->request_buffer;
440 for (i = 0; i < sc->use_sg; i++, sg += 1) {
441 /* FIXME: prefetch ? */
442 if (sg_count + sg->length > r2t->data_offset) {
447 /* offset within this page */
448 page_offset = r2t->data_offset - sg_count;
450 /* fill in this buffer */
451 iscsi_buf_init_sg(&r2t->sendbuf, sg);
452 r2t->sendbuf.sg.offset += page_offset;
453 r2t->sendbuf.sg.length -= page_offset;
455 /* xmit logic will continue with next one */
459 sg_count += sg->length;
461 BUG_ON(r2t->sg == NULL);
463 iscsi_buf_init_iov(&ctask->sendbuf,
464 (char*)sc->request_buffer + r2t->data_offset,
467 list_add(&dtask->item, &ctask->dataqueue);
471 * iscsi_r2t_rsp - iSCSI R2T Response processing
472 * @conn: iscsi connection
473 * @ctask: scsi command task
476 iscsi_r2t_rsp(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
478 struct iscsi_r2t_info *r2t;
479 struct iscsi_session *session = conn->session;
480 struct iscsi_r2t_rsp *rhdr = (struct iscsi_r2t_rsp *)conn->in.hdr;
481 int r2tsn = be32_to_cpu(rhdr->r2tsn);
485 return ISCSI_ERR_AHSLEN;
487 if (conn->in.datalen)
488 return ISCSI_ERR_DATALEN;
490 if (ctask->exp_r2tsn && ctask->exp_r2tsn != r2tsn)
491 return ISCSI_ERR_R2TSN;
493 rc = iscsi_check_assign_cmdsn(session, (struct iscsi_nopin*)rhdr);
497 /* FIXME: use R2TSN to detect missing R2T */
499 /* fill-in new R2T associated with the task */
500 spin_lock(&session->lock);
501 if (!ctask->sc || ctask->mtask ||
502 session->state != ISCSI_STATE_LOGGED_IN) {
503 printk(KERN_INFO "iscsi_tcp: dropping R2T itt %d in "
504 "recovery...\n", ctask->itt);
505 spin_unlock(&session->lock);
508 rc = __kfifo_get(ctask->r2tpool.queue, (void*)&r2t, sizeof(void*));
511 r2t->exp_statsn = rhdr->statsn;
512 r2t->data_length = be32_to_cpu(rhdr->data_length);
513 if (r2t->data_length == 0 ||
514 r2t->data_length > session->max_burst) {
515 spin_unlock(&session->lock);
516 return ISCSI_ERR_DATALEN;
519 r2t->data_offset = be32_to_cpu(rhdr->data_offset);
520 if (r2t->data_offset + r2t->data_length > ctask->total_length) {
521 spin_unlock(&session->lock);
522 return ISCSI_ERR_DATALEN;
525 r2t->ttt = rhdr->ttt; /* no flip */
526 r2t->solicit_datasn = 0;
528 iscsi_solicit_data_init(conn, ctask, r2t);
530 ctask->exp_r2tsn = r2tsn + 1;
531 ctask->xmstate |= XMSTATE_SOL_HDR;
532 __kfifo_put(ctask->r2tqueue, (void*)&r2t, sizeof(void*));
533 __kfifo_put(conn->writequeue, (void*)&ctask, sizeof(void*));
535 schedule_work(&conn->xmitwork);
536 conn->r2t_pdus_cnt++;
537 spin_unlock(&session->lock);
543 iscsi_hdr_recv(struct iscsi_conn *conn)
546 struct iscsi_hdr *hdr;
547 struct iscsi_cmd_task *ctask;
548 struct iscsi_session *session = conn->session;
549 uint32_t cdgst, rdgst = 0;
553 /* verify PDU length */
554 conn->in.datalen = ntoh24(hdr->dlength);
555 if (conn->in.datalen > conn->max_recv_dlength) {
556 printk(KERN_ERR "iscsi_tcp: datalen %d > %d\n",
557 conn->in.datalen, conn->max_recv_dlength);
558 return ISCSI_ERR_DATALEN;
560 conn->data_copied = 0;
563 conn->in.ahslen = hdr->hlength*(4*sizeof(__u16));
564 conn->in.offset += conn->in.ahslen;
565 conn->in.copy -= conn->in.ahslen;
566 if (conn->in.copy < 0) {
567 printk(KERN_ERR "iscsi_tcp: can't handle AHS with length "
568 "%d bytes\n", conn->in.ahslen);
569 return ISCSI_ERR_AHSLEN;
572 /* calculate read padding */
573 conn->in.padding = conn->in.datalen & (ISCSI_PAD_LEN-1);
574 if (conn->in.padding) {
575 conn->in.padding = ISCSI_PAD_LEN - conn->in.padding;
576 debug_scsi("read padding %d bytes\n", conn->in.padding);
579 if (conn->hdrdgst_en) {
580 struct scatterlist sg;
582 sg_init_one(&sg, (u8 *)hdr,
583 sizeof(struct iscsi_hdr) + conn->in.ahslen);
584 crypto_digest_digest(conn->rx_tfm, &sg, 1, (u8 *)&cdgst);
585 rdgst = *(uint32_t*)((char*)hdr + sizeof(struct iscsi_hdr) +
589 /* save opcode for later */
590 conn->in.opcode = hdr->opcode;
592 /* verify itt (itt encoding: age+cid+itt) */
593 if (hdr->itt != cpu_to_be32(ISCSI_RESERVED_TAG)) {
594 if ((hdr->itt & AGE_MASK) !=
595 (session->age << AGE_SHIFT)) {
596 printk(KERN_ERR "iscsi_tcp: received itt %x expected "
597 "session age (%x)\n", hdr->itt,
598 session->age & AGE_MASK);
599 return ISCSI_ERR_BAD_ITT;
602 if ((hdr->itt & CID_MASK) != (conn->id << CID_SHIFT)) {
603 printk(KERN_ERR "iscsi_tcp: received itt %x, expected "
604 "CID (%x)\n", hdr->itt, conn->id);
605 return ISCSI_ERR_BAD_ITT;
607 conn->in.itt = hdr->itt & ITT_MASK;
609 conn->in.itt = hdr->itt;
611 debug_tcp("opcode 0x%x offset %d copy %d ahslen %d datalen %d\n",
612 hdr->opcode, conn->in.offset, conn->in.copy,
613 conn->in.ahslen, conn->in.datalen);
615 if (conn->in.itt < session->cmds_max) {
616 if (conn->hdrdgst_en && cdgst != rdgst) {
617 printk(KERN_ERR "iscsi_tcp: itt %x: hdrdgst error "
618 "recv 0x%x calc 0x%x\n", conn->in.itt, rdgst,
620 return ISCSI_ERR_HDR_DGST;
623 ctask = (struct iscsi_cmd_task *)session->cmds[conn->in.itt];
626 printk(KERN_INFO "iscsi_tcp: dropping ctask with "
627 "itt 0x%x\n", ctask->itt);
628 conn->in.datalen = 0; /* force drop */
632 if (ctask->sc->SCp.phase != session->age) {
633 printk(KERN_ERR "iscsi_tcp: ctask's session age %d, "
634 "expected %d\n", ctask->sc->SCp.phase,
636 return ISCSI_ERR_SESSION_FAILED;
639 conn->in.ctask = ctask;
641 debug_scsi("rsp [op 0x%x cid %d sc %lx itt 0x%x len %d]\n",
642 hdr->opcode, conn->id, (long)ctask->sc,
643 ctask->itt, conn->in.datalen);
645 switch(conn->in.opcode) {
646 case ISCSI_OP_SCSI_CMD_RSP:
647 BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
648 if (ctask->hdr.flags & ISCSI_FLAG_CMD_WRITE)
649 rc = iscsi_cmd_rsp(conn, ctask);
650 else if (!conn->in.datalen)
651 rc = iscsi_cmd_rsp(conn, ctask);
654 * got sense or response data; copying PDU
655 * Header to the connection's header
658 memcpy(&conn->hdr, hdr,
659 sizeof(struct iscsi_hdr));
661 case ISCSI_OP_SCSI_DATA_IN:
662 BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
663 /* save flags for non-exceptional status */
664 conn->in.flags = hdr->flags;
665 /* save cmd_status for sense data */
666 conn->in.cmd_status =
667 ((struct iscsi_data_rsp*)hdr)->cmd_status;
668 rc = iscsi_data_rsp(conn, ctask);
671 BUG_ON((void*)ctask != ctask->sc->SCp.ptr);
672 if (ctask->hdr.flags & ISCSI_FLAG_CMD_WRITE &&
673 ctask->sc->sc_data_direction == DMA_TO_DEVICE)
674 rc = iscsi_r2t_rsp(conn, ctask);
676 rc = ISCSI_ERR_PROTO;
678 case ISCSI_OP_NOOP_IN:
679 case ISCSI_OP_TEXT_RSP:
680 case ISCSI_OP_LOGOUT_RSP:
681 case ISCSI_OP_ASYNC_EVENT:
682 case ISCSI_OP_REJECT:
683 rc = iscsi_check_assign_cmdsn(session,
684 (struct iscsi_nopin*)hdr);
688 /* update ExpStatSN */
689 conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
690 if (!conn->in.datalen) {
691 struct iscsi_mgmt_task *mtask;
693 rc = iscsi_recv_pdu(iscsi_handle(conn), hdr,
695 mtask = (struct iscsi_mgmt_task *)
696 session->mgmt_cmds[conn->in.itt -
697 ISCSI_MGMT_ITT_OFFSET];
698 if (conn->login_mtask != mtask) {
699 spin_lock(&session->lock);
700 __kfifo_put(session->mgmtpool.queue,
701 (void*)&mtask, sizeof(void*));
702 spin_unlock(&session->lock);
707 rc = ISCSI_ERR_BAD_OPCODE;
710 } else if (conn->in.itt >= ISCSI_MGMT_ITT_OFFSET &&
711 conn->in.itt < ISCSI_MGMT_ITT_OFFSET +
712 session->mgmtpool_max) {
713 struct iscsi_mgmt_task *mtask = (struct iscsi_mgmt_task *)
714 session->mgmt_cmds[conn->in.itt -
715 ISCSI_MGMT_ITT_OFFSET];
717 debug_scsi("immrsp [op 0x%x cid %d itt 0x%x len %d]\n",
718 conn->in.opcode, conn->id, mtask->itt,
721 switch(conn->in.opcode) {
722 case ISCSI_OP_LOGIN_RSP:
723 case ISCSI_OP_TEXT_RSP:
724 rc = iscsi_check_assign_cmdsn(session,
725 (struct iscsi_nopin*)hdr);
729 if (!conn->in.datalen) {
730 rc = iscsi_recv_pdu(iscsi_handle(conn), hdr,
732 if (conn->login_mtask != mtask) {
733 spin_lock(&session->lock);
734 __kfifo_put(session->mgmtpool.queue,
735 (void*)&mtask, sizeof(void*));
736 spin_unlock(&session->lock);
740 case ISCSI_OP_SCSI_TMFUNC_RSP:
741 rc = iscsi_check_assign_cmdsn(session,
742 (struct iscsi_nopin*)hdr);
746 if (conn->in.datalen || conn->in.ahslen) {
747 rc = ISCSI_ERR_PROTO;
750 conn->tmfrsp_pdus_cnt++;
751 spin_lock(&session->lock);
752 if (conn->tmabort_state == TMABORT_INITIAL) {
753 __kfifo_put(session->mgmtpool.queue,
754 (void*)&mtask, sizeof(void*));
755 conn->tmabort_state =
756 ((struct iscsi_tm_rsp *)hdr)->
757 response == SCSI_TCP_TM_RESP_COMPLETE ?
758 TMABORT_SUCCESS:TMABORT_FAILED;
759 /* unblock eh_abort() */
760 wake_up(&conn->ehwait);
762 spin_unlock(&session->lock);
765 rc = ISCSI_ERR_BAD_OPCODE;
768 } else if (conn->in.itt == ISCSI_RESERVED_TAG) {
769 if (conn->in.opcode == ISCSI_OP_NOOP_IN && !conn->in.datalen) {
770 rc = iscsi_check_assign_cmdsn(session,
771 (struct iscsi_nopin*)hdr);
773 rc = iscsi_recv_pdu(iscsi_handle(conn),
777 rc = ISCSI_ERR_BAD_OPCODE;
779 rc = ISCSI_ERR_BAD_ITT;
785 * iscsi_ctask_copy - copy skb bits to the destanation cmd task
786 * @conn: iscsi connection
787 * @ctask: scsi command task
788 * @buf: buffer to copy to
789 * @buf_size: size of buffer
790 * @offset: offset within the buffer
793 * The function calls skb_copy_bits() and updates per-connection and
794 * per-cmd byte counters.
796 * Read counters (in bytes):
798 * conn->in.offset offset within in progress SKB
799 * conn->in.copy left to copy from in progress SKB
801 * conn->in.copied copied already from in progress SKB
802 * conn->data_copied copied already from in progress buffer
803 * ctask->sent total bytes sent up to the MidLayer
804 * ctask->data_count left to copy from in progress Data-In
805 * buf_left left to copy from in progress buffer
808 iscsi_ctask_copy(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
809 void *buf, int buf_size, int offset)
811 int buf_left = buf_size - (conn->data_copied + offset);
812 int size = min(conn->in.copy, buf_left);
815 size = min(size, ctask->data_count);
817 debug_tcp("ctask_copy %d bytes at offset %d copied %d\n",
818 size, conn->in.offset, conn->in.copied);
821 BUG_ON(ctask->sent + size > ctask->total_length);
823 rc = skb_copy_bits(conn->in.skb, conn->in.offset,
824 (char*)buf + (offset + conn->data_copied), size);
825 /* must fit into skb->len */
828 conn->in.offset += size;
829 conn->in.copy -= size;
830 conn->in.copied += size;
831 conn->data_copied += size;
833 ctask->data_count -= size;
835 BUG_ON(conn->in.copy < 0);
836 BUG_ON(ctask->data_count < 0);
838 if (buf_size != (conn->data_copied + offset)) {
839 if (!ctask->data_count) {
840 BUG_ON(buf_size - conn->data_copied < 0);
841 /* done with this PDU */
842 return buf_size - conn->data_copied;
847 /* done with this buffer or with both - PDU and buffer */
848 conn->data_copied = 0;
853 * iscsi_tcp_copy - copy skb bits to the destanation buffer
854 * @conn: iscsi connection
855 * @buf: buffer to copy to
856 * @buf_size: number of bytes to copy
859 * The function calls skb_copy_bits() and updates per-connection
863 iscsi_tcp_copy(struct iscsi_conn *conn, void *buf, int buf_size)
865 int buf_left = buf_size - conn->data_copied;
866 int size = min(conn->in.copy, buf_left);
869 debug_tcp("tcp_copy %d bytes at offset %d copied %d\n",
870 size, conn->in.offset, conn->data_copied);
873 rc = skb_copy_bits(conn->in.skb, conn->in.offset,
874 (char*)buf + conn->data_copied, size);
877 conn->in.offset += size;
878 conn->in.copy -= size;
879 conn->in.copied += size;
880 conn->data_copied += size;
882 if (buf_size != conn->data_copied)
889 partial_sg_digest_update(struct iscsi_conn *conn, struct scatterlist *sg,
890 int offset, int length)
892 struct scatterlist temp;
894 memcpy(&temp, sg, sizeof(struct scatterlist));
895 temp.offset = offset;
896 temp.length = length;
897 crypto_digest_update(conn->data_rx_tfm, &temp, 1);
900 static int iscsi_scsi_data_in(struct iscsi_conn *conn)
902 struct iscsi_cmd_task *ctask = conn->in.ctask;
903 struct scsi_cmnd *sc = ctask->sc;
904 struct scatterlist tmp, *sg;
905 int i, offset, rc = 0;
907 BUG_ON((void*)ctask != sc->SCp.ptr);
910 * copying Data-In into the Scsi_Cmnd
913 i = ctask->data_count;
914 rc = iscsi_ctask_copy(conn, ctask, sc->request_buffer,
915 sc->request_bufflen, ctask->data_offset);
918 if (conn->datadgst_en) {
919 sg_init_one(&tmp, sc->request_buffer, i);
920 crypto_digest_update(conn->data_rx_tfm, &tmp, 1);
926 offset = ctask->data_offset;
927 sg = sc->request_buffer;
929 if (ctask->data_offset)
930 for (i = 0; i < ctask->sg_count; i++)
931 offset -= sg[i].length;
932 /* we've passed through partial sg*/
936 for (i = ctask->sg_count; i < sc->use_sg; i++) {
939 dest = kmap_atomic(sg[i].page, KM_SOFTIRQ0);
940 rc = iscsi_ctask_copy(conn, ctask, dest + sg[i].offset,
941 sg[i].length, offset);
942 kunmap_atomic(dest, KM_SOFTIRQ0);
944 /* continue with the next SKB/PDU */
947 if (conn->datadgst_en) {
949 crypto_digest_update(conn->data_rx_tfm,
952 partial_sg_digest_update(conn, &sg[i],
953 sg[i].offset + offset,
954 sg[i].length - offset);
960 if (!ctask->data_count) {
961 if (rc && conn->datadgst_en)
963 * data-in is complete, but buffer not...
965 partial_sg_digest_update(conn, &sg[i],
966 sg[i].offset, sg[i].length-rc);
974 BUG_ON(ctask->data_count);
977 /* check for non-exceptional status */
978 if (conn->in.flags & ISCSI_FLAG_DATA_STATUS) {
979 debug_scsi("done [sc %lx res %d itt 0x%x]\n",
980 (long)sc, sc->result, ctask->itt);
981 conn->scsirsp_pdus_cnt++;
982 iscsi_ctask_cleanup(conn, ctask);
990 iscsi_data_recv(struct iscsi_conn *conn)
992 struct iscsi_session *session = conn->session;
995 switch(conn->in.opcode) {
996 case ISCSI_OP_SCSI_DATA_IN:
997 rc = iscsi_scsi_data_in(conn);
999 case ISCSI_OP_SCSI_CMD_RSP: {
1002 * copying the entire Data Segment.
1004 if (iscsi_tcp_copy(conn, conn->data, conn->in.datalen)) {
1012 conn->in.hdr = &conn->hdr;
1013 conn->senselen = (conn->data[0] << 8) | conn->data[1];
1014 rc = iscsi_cmd_rsp(conn, conn->in.ctask);
1017 case ISCSI_OP_TEXT_RSP:
1018 case ISCSI_OP_LOGIN_RSP:
1019 case ISCSI_OP_NOOP_IN: {
1020 struct iscsi_mgmt_task *mtask = NULL;
1022 if (conn->in.itt != ISCSI_RESERVED_TAG)
1023 mtask = (struct iscsi_mgmt_task *)
1024 session->mgmt_cmds[conn->in.itt -
1025 ISCSI_MGMT_ITT_OFFSET];
1028 * Collect data segment to the connection's data
1031 if (iscsi_tcp_copy(conn, conn->data, conn->in.datalen)) {
1036 rc = iscsi_recv_pdu(iscsi_handle(conn), conn->in.hdr,
1037 conn->data, conn->in.datalen);
1039 if (mtask && conn->login_mtask != mtask) {
1040 spin_lock(&session->lock);
1041 __kfifo_put(session->mgmtpool.queue, (void*)&mtask,
1043 spin_unlock(&session->lock);
1055 * iscsi_tcp_data_recv - TCP receive in sendfile fashion
1056 * @rd_desc: read descriptor
1057 * @skb: socket buffer
1058 * @offset: offset in skb
1059 * @len: skb->len - offset
1062 iscsi_tcp_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
1063 unsigned int offset, size_t len)
1066 struct iscsi_conn *conn = rd_desc->arg.data;
1067 int start = skb_headlen(skb);
1069 char pad[ISCSI_PAD_LEN];
1070 struct scatterlist sg;
1073 * Save current SKB and its offset in the corresponding
1074 * connection context.
1076 conn->in.copy = start - offset;
1077 conn->in.offset = offset;
1079 conn->in.len = conn->in.copy;
1080 BUG_ON(conn->in.copy <= 0);
1081 debug_tcp("in %d bytes\n", conn->in.copy);
1084 conn->in.copied = 0;
1087 if (unlikely(conn->suspend_rx)) {
1088 debug_tcp("conn %d Rx suspended!\n", conn->id);
1092 if (conn->in_progress == IN_PROGRESS_WAIT_HEADER ||
1093 conn->in_progress == IN_PROGRESS_HEADER_GATHER) {
1094 rc = iscsi_hdr_extract(conn);
1099 iscsi_conn_failure(conn, rc);
1105 * Verify and process incoming PDU header.
1107 rc = iscsi_hdr_recv(conn);
1108 if (!rc && conn->in.datalen) {
1109 if (conn->datadgst_en &&
1110 conn->in.opcode == ISCSI_OP_SCSI_DATA_IN) {
1111 BUG_ON(!conn->data_rx_tfm);
1112 crypto_digest_init(conn->data_rx_tfm);
1114 conn->in_progress = IN_PROGRESS_DATA_RECV;
1116 iscsi_conn_failure(conn, rc);
1121 if (conn->in_progress == IN_PROGRESS_DDIGEST_RECV) {
1122 debug_tcp("extra data_recv offset %d copy %d\n",
1123 conn->in.offset, conn->in.copy);
1124 if (conn->in.opcode == ISCSI_OP_SCSI_DATA_IN) {
1125 uint32_t recv_digest;
1126 skb_copy_bits(conn->in.skb, conn->in.offset,
1128 conn->in.offset += 4;
1130 if (recv_digest != conn->in.datadgst) {
1131 debug_tcp("iscsi_tcp: data digest error!"
1132 "0x%x != 0x%x\n", recv_digest,
1134 iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
1137 debug_tcp("iscsi_tcp: data digest match!"
1138 "0x%x == 0x%x\n", recv_digest,
1140 conn->in_progress = IN_PROGRESS_WAIT_HEADER;
1145 if (conn->in_progress == IN_PROGRESS_DATA_RECV && conn->in.copy) {
1147 debug_tcp("data_recv offset %d copy %d\n",
1148 conn->in.offset, conn->in.copy);
1150 rc = iscsi_data_recv(conn);
1152 if (rc == -EAGAIN) {
1153 rd_desc->count = conn->in.datalen -
1154 conn->in.ctask->data_count;
1157 iscsi_conn_failure(conn, rc);
1160 conn->in.copy -= conn->in.padding;
1161 conn->in.offset += conn->in.padding;
1162 if (conn->datadgst_en &&
1163 conn->in.opcode == ISCSI_OP_SCSI_DATA_IN) {
1164 if (conn->in.padding) {
1165 debug_tcp("padding -> %d\n", conn->in.padding);
1166 memset(pad, 0, conn->in.padding);
1167 sg_init_one(&sg, pad, conn->in.padding);
1168 crypto_digest_update(conn->data_rx_tfm, &sg, 1);
1170 crypto_digest_final(conn->data_rx_tfm,
1171 (u8 *) & conn->in.datadgst);
1172 debug_tcp("rx digest 0x%x\n", conn->in.datadgst);
1173 conn->in_progress = IN_PROGRESS_DDIGEST_RECV;
1175 conn->in_progress = IN_PROGRESS_WAIT_HEADER;
1178 debug_tcp("f, processed %d from out of %d padding %d\n",
1179 conn->in.offset - offset, (int)len, conn->in.padding);
1180 BUG_ON(conn->in.offset - offset > len);
1182 if (conn->in.offset - offset != len) {
1183 debug_tcp("continue to process %d bytes\n",
1184 (int)len - (conn->in.offset - offset));
1189 processed = conn->in.offset - offset;
1190 BUG_ON(processed == 0);
1194 processed = conn->in.offset - offset;
1195 debug_tcp("c, processed %d from out of %d rd_desc_cnt %d\n",
1196 processed, (int)len, (int)rd_desc->count);
1197 BUG_ON(processed == 0);
1198 BUG_ON(processed > len);
1200 conn->rxdata_octets += processed;
1205 iscsi_tcp_data_ready(struct sock *sk, int flag)
1207 struct iscsi_conn *conn = sk->sk_user_data;
1208 read_descriptor_t rd_desc;
1210 read_lock(&sk->sk_callback_lock);
1212 /* use rd_desc to pass 'conn' to iscsi_tcp_data_recv */
1213 rd_desc.arg.data = conn;
1215 tcp_read_sock(sk, &rd_desc, iscsi_tcp_data_recv);
1217 read_unlock(&sk->sk_callback_lock);
1221 iscsi_tcp_state_change(struct sock *sk)
1223 struct iscsi_conn *conn;
1224 struct iscsi_session *session;
1225 void (*old_state_change)(struct sock *);
1227 read_lock(&sk->sk_callback_lock);
1229 conn = (struct iscsi_conn*)sk->sk_user_data;
1230 session = conn->session;
1232 if (sk->sk_state == TCP_CLOSE_WAIT ||
1233 sk->sk_state == TCP_CLOSE) {
1234 debug_tcp("iscsi_tcp_state_change: TCP_CLOSE|TCP_CLOSE_WAIT\n");
1235 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1238 old_state_change = conn->old_state_change;
1240 read_unlock(&sk->sk_callback_lock);
1242 old_state_change(sk);
1246 * iscsi_write_space - Called when more output buffer space is available
1247 * @sk: socket space is available for
1250 iscsi_write_space(struct sock *sk)
1252 struct iscsi_conn *conn = (struct iscsi_conn*)sk->sk_user_data;
1253 conn->old_write_space(sk);
1254 debug_tcp("iscsi_write_space: cid %d\n", conn->id);
1255 clear_bit(SUSPEND_BIT, &conn->suspend_tx);
1256 schedule_work(&conn->xmitwork);
1260 iscsi_conn_set_callbacks(struct iscsi_conn *conn)
1262 struct sock *sk = conn->sock->sk;
1264 /* assign new callbacks */
1265 write_lock_bh(&sk->sk_callback_lock);
1266 sk->sk_user_data = conn;
1267 conn->old_data_ready = sk->sk_data_ready;
1268 conn->old_state_change = sk->sk_state_change;
1269 conn->old_write_space = sk->sk_write_space;
1270 sk->sk_data_ready = iscsi_tcp_data_ready;
1271 sk->sk_state_change = iscsi_tcp_state_change;
1272 sk->sk_write_space = iscsi_write_space;
1273 write_unlock_bh(&sk->sk_callback_lock);
1277 iscsi_conn_restore_callbacks(struct iscsi_conn *conn)
1279 struct sock *sk = conn->sock->sk;
1281 /* restore socket callbacks, see also: iscsi_conn_set_callbacks() */
1282 write_lock_bh(&sk->sk_callback_lock);
1283 sk->sk_user_data = NULL;
1284 sk->sk_data_ready = conn->old_data_ready;
1285 sk->sk_state_change = conn->old_state_change;
1286 sk->sk_write_space = conn->old_write_space;
1287 sk->sk_no_check = 0;
1288 write_unlock_bh(&sk->sk_callback_lock);
1292 * iscsi_send - generic send routine
1293 * @sk: kernel's socket
1294 * @buf: buffer to write from
1295 * @size: actual size to write
1296 * @flags: socket's flags
1299 * depending on buffer will use tcp_sendpage() or tcp_sendmsg().
1300 * buf->sg.offset == -1 tells us that buffer is non S/G and forces
1301 * to use tcp_sendmsg().
1304 iscsi_send(struct socket *sk, struct iscsi_buf *buf, int size, int flags)
1308 if ((int)buf->sg.offset >= 0) {
1309 int offset = buf->sg.offset + buf->sent;
1312 res = sk->ops->sendpage(sk, buf->sg.page, offset, size, flags);
1316 buf->iov.iov_base = iscsi_buf_iov_base(buf);
1317 buf->iov.iov_len = size;
1319 memset(&msg, 0, sizeof(struct msghdr));
1322 res = kernel_sendmsg(sk, &msg, &buf->iov, 1, size);
1329 * iscsi_sendhdr - send PDU Header via tcp_sendpage()
1330 * @conn: iscsi connection
1331 * @buf: buffer to write from
1332 * @datalen: lenght of data to be sent after the header
1338 iscsi_sendhdr(struct iscsi_conn *conn, struct iscsi_buf *buf, int datalen)
1340 struct socket *sk = conn->sock;
1341 int flags = 0; /* MSG_DONTWAIT; */
1344 size = buf->sg.length - buf->sent;
1345 BUG_ON(buf->sent + size > buf->sg.length);
1346 if (buf->sent + size != buf->sg.length || datalen)
1349 res = iscsi_send(sk, buf, size, flags);
1350 debug_tcp("sendhdr %d bytes, sent %d res %d\n", size, buf->sent, res);
1352 conn->txdata_octets += res;
1357 } else if (res == -EAGAIN) {
1358 conn->sendpage_failures_cnt++;
1359 set_bit(SUSPEND_BIT, &conn->suspend_tx);
1360 } else if (res == -EPIPE)
1361 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1367 * iscsi_sendpage - send one page of iSCSI Data-Out.
1368 * @conn: iscsi connection
1369 * @buf: buffer to write from
1370 * @count: remaining data
1371 * @sent: number of bytes sent
1377 iscsi_sendpage(struct iscsi_conn *conn, struct iscsi_buf *buf,
1378 int *count, int *sent)
1380 struct socket *sk = conn->sock;
1381 int flags = 0; /* MSG_DONTWAIT; */
1384 size = buf->sg.length - buf->sent;
1385 BUG_ON(buf->sent + size > buf->sg.length);
1388 if (buf->sent + size != buf->sg.length)
1391 res = iscsi_send(sk, buf, size, flags);
1392 debug_tcp("sendpage: %d bytes, sent %d left %d sent %d res %d\n",
1393 size, buf->sent, *count, *sent, res);
1395 conn->txdata_octets += res;
1402 } else if (res == -EAGAIN) {
1403 conn->sendpage_failures_cnt++;
1404 set_bit(SUSPEND_BIT, &conn->suspend_tx);
1405 } else if (res == -EPIPE)
1406 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
1412 iscsi_data_digest_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1414 BUG_ON(!conn->data_tx_tfm);
1415 crypto_digest_init(conn->data_tx_tfm);
1416 ctask->digest_count = 4;
1420 iscsi_buf_data_digest_update(struct iscsi_conn *conn, struct iscsi_buf *buf)
1422 struct scatterlist sg;
1424 if (buf->sg.offset != -1)
1425 crypto_digest_update(conn->data_tx_tfm, &buf->sg, 1);
1427 sg_init_one(&sg, (char *)buf->sg.page, buf->sg.length);
1428 crypto_digest_update(conn->data_tx_tfm, &sg, 1);
1433 iscsi_digest_final_send(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1434 struct iscsi_buf *buf, uint32_t *digest, int final)
1440 crypto_digest_final(conn->data_tx_tfm, (u8*)digest);
1442 iscsi_buf_init_virt(buf, (char*)digest, 4);
1443 rc = iscsi_sendpage(conn, buf, &ctask->digest_count, &sent);
1445 ctask->datadigest = *digest;
1446 ctask->xmstate |= XMSTATE_DATA_DIGEST;
1448 ctask->digest_count = 4;
1453 * iscsi_solicit_data_cont - initialize next Data-Out
1454 * @conn: iscsi connection
1455 * @ctask: scsi command task
1457 * @left: bytes left to transfer
1460 * Initialize next Data-Out within this R2T sequence and continue
1461 * to process next Scatter-Gather element(if any) of this SCSI command.
1463 * Called under connection lock.
1466 iscsi_solicit_data_cont(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1467 struct iscsi_r2t_info *r2t, int left)
1469 struct iscsi_data *hdr;
1470 struct iscsi_data_task *dtask;
1471 struct scsi_cmnd *sc = ctask->sc;
1474 dtask = mempool_alloc(ctask->datapool, GFP_ATOMIC);
1477 memset(hdr, 0, sizeof(struct iscsi_data));
1478 hdr->ttt = r2t->ttt;
1479 hdr->datasn = cpu_to_be32(r2t->solicit_datasn);
1480 r2t->solicit_datasn++;
1481 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
1482 memcpy(hdr->lun, ctask->hdr.lun, sizeof(hdr->lun));
1483 hdr->itt = ctask->hdr.itt;
1484 hdr->exp_statsn = r2t->exp_statsn;
1485 new_offset = r2t->data_offset + r2t->sent;
1486 hdr->offset = cpu_to_be32(new_offset);
1487 if (left > conn->max_xmit_dlength) {
1488 hton24(hdr->dlength, conn->max_xmit_dlength);
1489 r2t->data_count = conn->max_xmit_dlength;
1491 hton24(hdr->dlength, left);
1492 r2t->data_count = left;
1493 hdr->flags = ISCSI_FLAG_CMD_FINAL;
1495 conn->dataout_pdus_cnt++;
1497 iscsi_buf_init_hdr(conn, &r2t->headbuf, (char*)hdr,
1498 (u8 *)dtask->hdrext);
1502 if (sc->use_sg && !iscsi_buf_left(&r2t->sendbuf)) {
1503 BUG_ON(ctask->bad_sg == r2t->sg);
1504 iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg);
1507 iscsi_buf_init_iov(&ctask->sendbuf,
1508 (char*)sc->request_buffer + new_offset,
1511 list_add(&dtask->item, &ctask->dataqueue);
1515 iscsi_unsolicit_data_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1517 struct iscsi_data *hdr;
1518 struct iscsi_data_task *dtask;
1520 dtask = mempool_alloc(ctask->datapool, GFP_ATOMIC);
1523 memset(hdr, 0, sizeof(struct iscsi_data));
1524 hdr->ttt = cpu_to_be32(ISCSI_RESERVED_TAG);
1525 hdr->datasn = cpu_to_be32(ctask->unsol_datasn);
1526 ctask->unsol_datasn++;
1527 hdr->opcode = ISCSI_OP_SCSI_DATA_OUT;
1528 memcpy(hdr->lun, ctask->hdr.lun, sizeof(hdr->lun));
1529 hdr->itt = ctask->hdr.itt;
1530 hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
1531 hdr->offset = cpu_to_be32(ctask->total_length -
1532 ctask->r2t_data_count -
1533 ctask->unsol_count);
1534 if (ctask->unsol_count > conn->max_xmit_dlength) {
1535 hton24(hdr->dlength, conn->max_xmit_dlength);
1536 ctask->data_count = conn->max_xmit_dlength;
1539 hton24(hdr->dlength, ctask->unsol_count);
1540 ctask->data_count = ctask->unsol_count;
1541 hdr->flags = ISCSI_FLAG_CMD_FINAL;
1544 iscsi_buf_init_hdr(conn, &ctask->headbuf, (char*)hdr,
1545 (u8 *)dtask->hdrext);
1547 list_add(&dtask->item, &ctask->dataqueue);
1549 ctask->dtask = dtask;
1553 * iscsi_cmd_init - Initialize iSCSI SCSI_READ or SCSI_WRITE commands
1554 * @conn: iscsi connection
1555 * @ctask: scsi command task
1559 iscsi_cmd_init(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask,
1560 struct scsi_cmnd *sc)
1562 struct iscsi_session *session = conn->session;
1564 BUG_ON(__kfifo_len(ctask->r2tqueue));
1568 ctask->hdr.opcode = ISCSI_OP_SCSI_CMD;
1569 ctask->hdr.flags = ISCSI_ATTR_SIMPLE;
1570 int_to_scsilun(sc->device->lun, (struct scsi_lun *)ctask->hdr.lun);
1571 ctask->hdr.itt = ctask->itt | (conn->id << CID_SHIFT) |
1572 (session->age << AGE_SHIFT);
1573 ctask->hdr.data_length = cpu_to_be32(sc->request_bufflen);
1574 ctask->hdr.cmdsn = cpu_to_be32(session->cmdsn); session->cmdsn++;
1575 ctask->hdr.exp_statsn = cpu_to_be32(conn->exp_statsn);
1576 memcpy(ctask->hdr.cdb, sc->cmnd, sc->cmd_len);
1577 memset(&ctask->hdr.cdb[sc->cmd_len], 0, MAX_COMMAND_SIZE - sc->cmd_len);
1579 ctask->mtask = NULL;
1581 ctask->sg_count = 0;
1583 ctask->total_length = sc->request_bufflen;
1585 if (sc->sc_data_direction == DMA_TO_DEVICE) {
1586 ctask->exp_r2tsn = 0;
1587 ctask->hdr.flags |= ISCSI_FLAG_CMD_WRITE;
1588 BUG_ON(ctask->total_length == 0);
1590 struct scatterlist *sg = sc->request_buffer;
1592 iscsi_buf_init_sg(&ctask->sendbuf,
1593 &sg[ctask->sg_count++]);
1595 ctask->bad_sg = sg + sc->use_sg;
1597 iscsi_buf_init_iov(&ctask->sendbuf, sc->request_buffer,
1598 sc->request_bufflen);
1604 * imm_count bytes to be sent right after
1607 * unsol_count bytes(as Data-Out) to be sent
1608 * without R2T ack right after
1611 * r2t_data_count bytes to be sent via R2T ack's
1613 * pad_count bytes to be sent as zero-padding
1615 ctask->imm_count = 0;
1616 ctask->unsol_count = 0;
1617 ctask->unsol_datasn = 0;
1618 ctask->xmstate = XMSTATE_W_HDR;
1619 /* calculate write padding */
1620 ctask->pad_count = ctask->total_length & (ISCSI_PAD_LEN-1);
1621 if (ctask->pad_count) {
1622 ctask->pad_count = ISCSI_PAD_LEN - ctask->pad_count;
1623 debug_scsi("write padding %d bytes\n",
1625 ctask->xmstate |= XMSTATE_W_PAD;
1627 if (session->imm_data_en) {
1628 if (ctask->total_length >= session->first_burst)
1629 ctask->imm_count = min(session->first_burst,
1630 conn->max_xmit_dlength);
1632 ctask->imm_count = min(ctask->total_length,
1633 conn->max_xmit_dlength);
1634 hton24(ctask->hdr.dlength, ctask->imm_count);
1635 ctask->xmstate |= XMSTATE_IMM_DATA;
1637 zero_data(ctask->hdr.dlength);
1639 if (!session->initial_r2t_en)
1640 ctask->unsol_count = min(session->first_burst,
1641 ctask->total_length) - ctask->imm_count;
1642 if (!ctask->unsol_count)
1643 /* No unsolicit Data-Out's */
1644 ctask->hdr.flags |= ISCSI_FLAG_CMD_FINAL;
1646 ctask->xmstate |= XMSTATE_UNS_HDR | XMSTATE_UNS_INIT;
1648 ctask->r2t_data_count = ctask->total_length -
1652 debug_scsi("cmd [itt %x total %d imm %d imm_data %d "
1654 ctask->itt, ctask->total_length, ctask->imm_count,
1655 ctask->unsol_count, ctask->r2t_data_count);
1657 ctask->hdr.flags |= ISCSI_FLAG_CMD_FINAL;
1658 if (sc->sc_data_direction == DMA_FROM_DEVICE)
1659 ctask->hdr.flags |= ISCSI_FLAG_CMD_READ;
1661 ctask->xmstate = XMSTATE_R_HDR;
1662 zero_data(ctask->hdr.dlength);
1665 iscsi_buf_init_hdr(conn, &ctask->headbuf, (char*)&ctask->hdr,
1666 (u8 *)ctask->hdrext);
1667 conn->scsicmd_pdus_cnt++;
1671 * iscsi_mtask_xmit - xmit management(immediate) task
1672 * @conn: iscsi connection
1673 * @mtask: task management task
1676 * The function can return -EAGAIN in which case caller must
1677 * call it again later, or recover. '0' return code means successful
1680 * Management xmit state machine consists of two states:
1681 * IN_PROGRESS_IMM_HEAD - PDU Header xmit in progress
1682 * IN_PROGRESS_IMM_DATA - PDU Data xmit in progress
1685 iscsi_mtask_xmit(struct iscsi_conn *conn, struct iscsi_mgmt_task *mtask)
1688 debug_scsi("mtask deq [cid %d state %x itt 0x%x]\n",
1689 conn->id, mtask->xmstate, mtask->itt);
1691 if (mtask->xmstate & XMSTATE_IMM_HDR) {
1692 mtask->xmstate &= ~XMSTATE_IMM_HDR;
1693 if (mtask->data_count)
1694 mtask->xmstate |= XMSTATE_IMM_DATA;
1695 if (iscsi_sendhdr(conn, &mtask->headbuf, mtask->data_count)) {
1696 mtask->xmstate |= XMSTATE_IMM_HDR;
1697 if (mtask->data_count)
1698 mtask->xmstate &= ~XMSTATE_IMM_DATA;
1703 if (mtask->xmstate & XMSTATE_IMM_DATA) {
1704 BUG_ON(!mtask->data_count);
1705 mtask->xmstate &= ~XMSTATE_IMM_DATA;
1706 /* FIXME: implement.
1707 * Virtual buffer could be spreaded across multiple pages...
1710 if (iscsi_sendpage(conn, &mtask->sendbuf,
1711 &mtask->data_count, &mtask->sent)) {
1712 mtask->xmstate |= XMSTATE_IMM_DATA;
1715 } while (mtask->data_count);
1718 BUG_ON(mtask->xmstate != XMSTATE_IDLE);
1723 handle_xmstate_r_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1725 ctask->xmstate &= ~XMSTATE_R_HDR;
1726 if (!iscsi_sendhdr(conn, &ctask->headbuf, 0)) {
1727 BUG_ON(ctask->xmstate != XMSTATE_IDLE);
1728 return 0; /* wait for Data-In */
1730 ctask->xmstate |= XMSTATE_R_HDR;
1735 handle_xmstate_w_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1737 ctask->xmstate &= ~XMSTATE_W_HDR;
1738 if (iscsi_sendhdr(conn, &ctask->headbuf, ctask->imm_count)) {
1739 ctask->xmstate |= XMSTATE_W_HDR;
1746 handle_xmstate_data_digest(struct iscsi_conn *conn,
1747 struct iscsi_cmd_task *ctask)
1749 ctask->xmstate &= ~XMSTATE_DATA_DIGEST;
1750 debug_tcp("resent data digest 0x%x\n", ctask->datadigest);
1751 if (iscsi_digest_final_send(conn, ctask, &ctask->immbuf,
1752 &ctask->datadigest, 0)) {
1753 ctask->xmstate |= XMSTATE_DATA_DIGEST;
1754 debug_tcp("resent data digest 0x%x fail!\n",
1762 handle_xmstate_imm_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1764 BUG_ON(!ctask->imm_count);
1765 ctask->xmstate &= ~XMSTATE_IMM_DATA;
1767 if (conn->datadgst_en) {
1768 iscsi_data_digest_init(conn, ctask);
1769 ctask->immdigest = 0;
1773 if (iscsi_sendpage(conn, &ctask->sendbuf, &ctask->imm_count,
1775 ctask->xmstate |= XMSTATE_IMM_DATA;
1776 if (conn->datadgst_en) {
1777 crypto_digest_final(conn->data_tx_tfm,
1778 (u8*)&ctask->immdigest);
1779 debug_tcp("tx imm sendpage fail 0x%x\n",
1784 if (conn->datadgst_en)
1785 iscsi_buf_data_digest_update(conn, &ctask->sendbuf);
1787 if (!ctask->imm_count)
1789 iscsi_buf_init_sg(&ctask->sendbuf,
1790 &ctask->sg[ctask->sg_count++]);
1793 if (conn->datadgst_en && !(ctask->xmstate & XMSTATE_W_PAD)) {
1794 if (iscsi_digest_final_send(conn, ctask, &ctask->immbuf,
1795 &ctask->immdigest, 1)) {
1796 debug_tcp("sending imm digest 0x%x fail!\n",
1800 debug_tcp("sending imm digest 0x%x\n", ctask->immdigest);
1807 handle_xmstate_uns_hdr(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1809 struct iscsi_data_task *dtask;
1811 ctask->xmstate |= XMSTATE_UNS_DATA;
1812 if (ctask->xmstate & XMSTATE_UNS_INIT) {
1813 iscsi_unsolicit_data_init(conn, ctask);
1814 BUG_ON(!ctask->dtask);
1815 dtask = ctask->dtask;
1817 ctask->xmstate &= ~XMSTATE_UNS_INIT;
1819 if (iscsi_sendhdr(conn, &ctask->headbuf, ctask->data_count)) {
1820 ctask->xmstate &= ~XMSTATE_UNS_DATA;
1821 ctask->xmstate |= XMSTATE_UNS_HDR;
1825 debug_scsi("uns dout [itt 0x%x dlen %d sent %d]\n",
1826 ctask->itt, ctask->unsol_count, ctask->sent);
1831 handle_xmstate_uns_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1833 struct iscsi_data_task *dtask = ctask->dtask;
1835 BUG_ON(!ctask->data_count);
1836 ctask->xmstate &= ~XMSTATE_UNS_DATA;
1838 if (conn->datadgst_en) {
1839 iscsi_data_digest_init(conn, ctask);
1844 int start = ctask->sent;
1846 if (iscsi_sendpage(conn, &ctask->sendbuf, &ctask->data_count,
1848 ctask->unsol_count -= ctask->sent - start;
1849 ctask->xmstate |= XMSTATE_UNS_DATA;
1850 /* will continue with this ctask later.. */
1851 if (conn->datadgst_en) {
1852 crypto_digest_final(conn->data_tx_tfm,
1853 (u8 *)&dtask->digest);
1854 debug_tcp("tx uns data fail 0x%x\n",
1860 BUG_ON(ctask->sent > ctask->total_length);
1861 ctask->unsol_count -= ctask->sent - start;
1864 * XXX:we may run here with un-initial sendbuf.
1867 if (conn->datadgst_en && ctask->sent - start > 0)
1868 iscsi_buf_data_digest_update(conn, &ctask->sendbuf);
1870 if (!ctask->data_count)
1872 iscsi_buf_init_sg(&ctask->sendbuf,
1873 &ctask->sg[ctask->sg_count++]);
1875 BUG_ON(ctask->unsol_count < 0);
1878 * Done with the Data-Out. Next, check if we need
1879 * to send another unsolicited Data-Out.
1881 if (ctask->unsol_count) {
1882 if (conn->datadgst_en) {
1883 if (iscsi_digest_final_send(conn, ctask,
1885 &dtask->digest, 1)) {
1886 debug_tcp("send uns digest 0x%x fail\n",
1890 debug_tcp("sending uns digest 0x%x, more uns\n",
1893 ctask->xmstate |= XMSTATE_UNS_INIT;
1897 if (conn->datadgst_en && !(ctask->xmstate & XMSTATE_W_PAD)) {
1898 if (iscsi_digest_final_send(conn, ctask,
1900 &dtask->digest, 1)) {
1901 debug_tcp("send last uns digest 0x%x fail\n",
1905 debug_tcp("sending uns digest 0x%x\n",dtask->digest);
1912 handle_xmstate_sol_data(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
1914 struct iscsi_session *session = conn->session;
1915 struct iscsi_r2t_info *r2t = ctask->r2t;
1916 struct iscsi_data_task *dtask = r2t->dtask;
1919 ctask->xmstate &= ~XMSTATE_SOL_DATA;
1920 ctask->dtask = dtask;
1922 if (conn->datadgst_en) {
1923 iscsi_data_digest_init(conn, ctask);
1928 * send Data-Out whitnin this R2T sequence.
1930 if (!r2t->data_count)
1933 if (iscsi_sendpage(conn, &r2t->sendbuf, &r2t->data_count, &r2t->sent)) {
1934 ctask->xmstate |= XMSTATE_SOL_DATA;
1935 /* will continue with this ctask later.. */
1936 if (conn->datadgst_en) {
1937 crypto_digest_final(conn->data_tx_tfm,
1938 (u8 *)&dtask->digest);
1939 debug_tcp("r2t data send fail 0x%x\n", dtask->digest);
1944 BUG_ON(r2t->data_count < 0);
1945 if (conn->datadgst_en)
1946 iscsi_buf_data_digest_update(conn, &r2t->sendbuf);
1948 if (r2t->data_count) {
1949 BUG_ON(ctask->sc->use_sg == 0);
1950 if (!iscsi_buf_left(&r2t->sendbuf)) {
1951 BUG_ON(ctask->bad_sg == r2t->sg);
1952 iscsi_buf_init_sg(&r2t->sendbuf, r2t->sg);
1960 * Done with this Data-Out. Next, check if we have
1961 * to send another Data-Out for this R2T.
1963 BUG_ON(r2t->data_length - r2t->sent < 0);
1964 left = r2t->data_length - r2t->sent;
1966 if (conn->datadgst_en) {
1967 if (iscsi_digest_final_send(conn, ctask,
1969 &dtask->digest, 1)) {
1970 debug_tcp("send r2t data digest 0x%x"
1971 "fail\n", dtask->digest);
1974 debug_tcp("r2t data send digest 0x%x\n",
1977 iscsi_solicit_data_cont(conn, ctask, r2t, left);
1978 ctask->xmstate |= XMSTATE_SOL_DATA;
1979 ctask->xmstate &= ~XMSTATE_SOL_HDR;
1984 * Done with this R2T. Check if there are more
1985 * outstanding R2Ts ready to be processed.
1987 BUG_ON(ctask->r2t_data_count - r2t->data_length < 0);
1988 if (conn->datadgst_en) {
1989 if (iscsi_digest_final_send(conn, ctask, &dtask->digestbuf,
1990 &dtask->digest, 1)) {
1991 debug_tcp("send last r2t data digest 0x%x"
1992 "fail\n", dtask->digest);
1995 debug_tcp("r2t done dout digest 0x%x\n", dtask->digest);
1998 ctask->r2t_data_count -= r2t->data_length;
2000 spin_lock_bh(&session->lock);
2001 __kfifo_put(ctask->r2tpool.queue, (void*)&r2t, sizeof(void*));
2002 spin_unlock_bh(&session->lock);
2003 if (__kfifo_get(ctask->r2tqueue, (void*)&r2t, sizeof(void*))) {
2005 ctask->xmstate |= XMSTATE_SOL_DATA;
2006 ctask->xmstate &= ~XMSTATE_SOL_HDR;
2014 handle_xmstate_w_pad(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
2016 struct iscsi_data_task *dtask = ctask->dtask;
2019 ctask->xmstate &= ~XMSTATE_W_PAD;
2020 iscsi_buf_init_virt(&ctask->sendbuf, (char*)&ctask->pad,
2022 if (iscsi_sendpage(conn, &ctask->sendbuf, &ctask->pad_count, &sent)) {
2023 ctask->xmstate |= XMSTATE_W_PAD;
2027 if (conn->datadgst_en) {
2028 iscsi_buf_data_digest_update(conn, &ctask->sendbuf);
2031 if (iscsi_digest_final_send(conn, ctask, &ctask->immbuf,
2032 &ctask->immdigest, 1)) {
2033 debug_tcp("send padding digest 0x%x"
2034 "fail!\n", ctask->immdigest);
2037 debug_tcp("done with padding, digest 0x%x\n",
2040 if (iscsi_digest_final_send(conn, ctask,
2042 &dtask->digest, 1)) {
2043 debug_tcp("send padding digest 0x%x"
2044 "fail\n", dtask->digest);
2047 debug_tcp("done with padding, digest 0x%x\n",
2056 iscsi_ctask_xmit(struct iscsi_conn *conn, struct iscsi_cmd_task *ctask)
2060 debug_scsi("ctask deq [cid %d xmstate %x itt 0x%x]\n",
2061 conn->id, ctask->xmstate, ctask->itt);
2064 * serialize with TMF AbortTask
2069 if (ctask->xmstate & XMSTATE_R_HDR) {
2070 rc = handle_xmstate_r_hdr(conn, ctask);
2074 if (ctask->xmstate & XMSTATE_W_HDR) {
2075 rc = handle_xmstate_w_hdr(conn, ctask);
2080 /* XXX: for data digest xmit recover */
2081 if (ctask->xmstate & XMSTATE_DATA_DIGEST) {
2082 rc = handle_xmstate_data_digest(conn, ctask);
2087 if (ctask->xmstate & XMSTATE_IMM_DATA) {
2088 rc = handle_xmstate_imm_data(conn, ctask);
2093 if (ctask->xmstate & XMSTATE_UNS_HDR) {
2094 BUG_ON(!ctask->unsol_count);
2095 ctask->xmstate &= ~XMSTATE_UNS_HDR;
2096 unsolicit_head_again:
2097 rc = handle_xmstate_uns_hdr(conn, ctask);
2102 if (ctask->xmstate & XMSTATE_UNS_DATA) {
2103 rc = handle_xmstate_uns_data(conn, ctask);
2105 goto unsolicit_head_again;
2111 if (ctask->xmstate & XMSTATE_SOL_HDR) {
2112 struct iscsi_r2t_info *r2t;
2114 ctask->xmstate &= ~XMSTATE_SOL_HDR;
2115 ctask->xmstate |= XMSTATE_SOL_DATA;
2117 __kfifo_get(ctask->r2tqueue, (void*)&ctask->r2t,
2122 if (iscsi_sendhdr(conn, &r2t->headbuf, r2t->data_count)) {
2123 ctask->xmstate &= ~XMSTATE_SOL_DATA;
2124 ctask->xmstate |= XMSTATE_SOL_HDR;
2128 debug_scsi("sol dout [dsn %d itt 0x%x dlen %d sent %d]\n",
2129 r2t->solicit_datasn - 1, ctask->itt, r2t->data_count,
2133 if (ctask->xmstate & XMSTATE_SOL_DATA) {
2134 rc = handle_xmstate_sol_data(conn, ctask);
2136 goto solicit_head_again;
2143 * Last thing to check is whether we need to send write
2144 * padding. Note that we check for xmstate equality, not just the bit.
2146 if (ctask->xmstate == XMSTATE_W_PAD)
2147 rc = handle_xmstate_w_pad(conn, ctask);
2153 * iscsi_data_xmit - xmit any command into the scheduled connection
2154 * @conn: iscsi connection
2157 * The function can return -EAGAIN in which case the caller must
2158 * re-schedule it again later or recover. '0' return code means
2162 iscsi_data_xmit(struct iscsi_conn *conn)
2164 if (unlikely(conn->suspend_tx)) {
2165 debug_tcp("conn %d Tx suspended!\n", conn->id);
2170 * Transmit in the following order:
2172 * 1) un-finished xmit (ctask or mtask)
2173 * 2) immediate control PDUs
2176 * 5) non-immediate control PDUs
2178 * No need to lock around __kfifo_get as long as
2179 * there's one producer and one consumer.
2182 BUG_ON(conn->ctask && conn->mtask);
2185 if (iscsi_ctask_xmit(conn, conn->ctask))
2187 /* done with this in-progress ctask */
2191 if (iscsi_mtask_xmit(conn, conn->mtask))
2193 /* done with this in-progress mtask */
2197 /* process immediate first */
2198 if (unlikely(__kfifo_len(conn->immqueue))) {
2199 struct iscsi_session *session = conn->session;
2200 while (__kfifo_get(conn->immqueue, (void*)&conn->mtask,
2202 if (iscsi_mtask_xmit(conn, conn->mtask))
2205 if (conn->mtask->hdr.itt ==
2206 cpu_to_be32(ISCSI_RESERVED_TAG)) {
2207 spin_lock_bh(&session->lock);
2208 __kfifo_put(session->mgmtpool.queue,
2209 (void*)&conn->mtask, sizeof(void*));
2210 spin_unlock_bh(&session->lock);
2213 /* done with this mtask */
2217 /* process write queue */
2218 while (__kfifo_get(conn->writequeue, (void*)&conn->ctask,
2220 if (iscsi_ctask_xmit(conn, conn->ctask))
2224 /* process command queue */
2225 while (__kfifo_get(conn->xmitqueue, (void*)&conn->ctask,
2227 if (iscsi_ctask_xmit(conn, conn->ctask))
2230 /* done with this ctask */
2233 /* process the rest control plane PDUs, if any */
2234 if (unlikely(__kfifo_len(conn->mgmtqueue))) {
2235 struct iscsi_session *session = conn->session;
2237 while (__kfifo_get(conn->mgmtqueue, (void*)&conn->mtask,
2239 if (iscsi_mtask_xmit(conn, conn->mtask))
2242 if (conn->mtask->hdr.itt ==
2243 cpu_to_be32(ISCSI_RESERVED_TAG)) {
2244 spin_lock_bh(&session->lock);
2245 __kfifo_put(session->mgmtpool.queue,
2246 (void*)&conn->mtask,
2248 spin_unlock_bh(&session->lock);
2251 /* done with this mtask */
2258 if (unlikely(conn->suspend_tx))
2265 iscsi_xmitworker(void *data)
2267 struct iscsi_conn *conn = data;
2270 * serialize Xmit worker on a per-connection basis.
2272 down(&conn->xmitsema);
2273 if (iscsi_data_xmit(conn))
2274 schedule_work(&conn->xmitwork);
2275 up(&conn->xmitsema);
2278 #define FAILURE_BAD_HOST 1
2279 #define FAILURE_SESSION_FAILED 2
2280 #define FAILURE_SESSION_FREED 3
2281 #define FAILURE_WINDOW_CLOSED 4
2282 #define FAILURE_SESSION_TERMINATE 5
2285 iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
2287 struct Scsi_Host *host;
2289 struct iscsi_session *session;
2290 struct iscsi_conn *conn = NULL;
2291 struct iscsi_cmd_task *ctask = NULL;
2293 sc->scsi_done = done;
2296 host = sc->device->host;
2297 session = iscsi_hostdata(host->hostdata);
2298 BUG_ON(host != session->host);
2300 spin_lock(&session->lock);
2302 if (session->state != ISCSI_STATE_LOGGED_IN) {
2303 if (session->state == ISCSI_STATE_FAILED) {
2304 reason = FAILURE_SESSION_FAILED;
2306 } else if (session->state == ISCSI_STATE_TERMINATE) {
2307 reason = FAILURE_SESSION_TERMINATE;
2310 reason = FAILURE_SESSION_FREED;
2315 * Check for iSCSI window and take care of CmdSN wrap-around
2317 if ((int)(session->max_cmdsn - session->cmdsn) < 0) {
2318 reason = FAILURE_WINDOW_CLOSED;
2322 conn = session->leadconn;
2324 __kfifo_get(session->cmdpool.queue, (void*)&ctask, sizeof(void*));
2327 sc->SCp.phase = session->age;
2328 sc->SCp.ptr = (char*)ctask;
2329 iscsi_cmd_init(conn, ctask, sc);
2331 __kfifo_put(conn->xmitqueue, (void*)&ctask, sizeof(void*));
2333 "ctask enq [%s cid %d sc %lx itt 0x%x len %d cmdsn %d win %d]\n",
2334 sc->sc_data_direction == DMA_TO_DEVICE ? "write" : "read",
2335 conn->id, (long)sc, ctask->itt, sc->request_bufflen,
2336 session->cmdsn, session->max_cmdsn - session->exp_cmdsn + 1);
2337 spin_unlock(&session->lock);
2339 if (!in_interrupt() && !down_trylock(&conn->xmitsema)) {
2340 spin_unlock_irq(host->host_lock);
2341 if (iscsi_data_xmit(conn))
2342 schedule_work(&conn->xmitwork);
2343 up(&conn->xmitsema);
2344 spin_lock_irq(host->host_lock);
2346 schedule_work(&conn->xmitwork);
2351 spin_unlock(&session->lock);
2352 debug_scsi("cmd 0x%x rejected (%d)\n", sc->cmnd[0], reason);
2353 return SCSI_MLQUEUE_HOST_BUSY;
2356 spin_unlock(&session->lock);
2357 printk(KERN_ERR "iscsi_tcp: cmd 0x%x is not queued (%d)\n",
2358 sc->cmnd[0], reason);
2359 sc->sense_buffer[0] = 0x70;
2360 sc->sense_buffer[2] = NOT_READY;
2361 sc->sense_buffer[7] = 0x6;
2362 sc->sense_buffer[12] = 0x08;
2363 sc->sense_buffer[13] = 0x00;
2364 sc->result = (DID_NO_CONNECT << 16);
2365 sc->resid = sc->request_bufflen;
2371 iscsi_pool_init(struct iscsi_queue *q, int max, void ***items, int item_size)
2375 *items = kmalloc(max * sizeof(void*), GFP_KERNEL);
2380 q->pool = kmalloc(max * sizeof(void*), GFP_KERNEL);
2381 if (q->pool == NULL) {
2386 q->queue = kfifo_init((void*)q->pool, max * sizeof(void*),
2388 if (q->queue == ERR_PTR(-ENOMEM)) {
2394 for (i = 0; i < max; i++) {
2395 q->pool[i] = kmalloc(item_size, GFP_KERNEL);
2396 if (q->pool[i] == NULL) {
2399 for (j = 0; j < i; j++)
2402 kfifo_free(q->queue);
2407 memset(q->pool[i], 0, item_size);
2408 (*items)[i] = q->pool[i];
2409 __kfifo_put(q->queue, (void*)&q->pool[i], sizeof(void*));
2415 iscsi_pool_free(struct iscsi_queue *q, void **items)
2419 for (i = 0; i < q->max; i++)
2425 static iscsi_connh_t
2426 iscsi_conn_create(iscsi_sessionh_t sessionh, uint32_t conn_idx)
2428 struct iscsi_session *session = iscsi_ptr(sessionh);
2429 struct iscsi_conn *conn = NULL;
2431 conn = kmalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
2433 goto conn_alloc_fail;
2434 memset(conn, 0, sizeof(struct iscsi_conn));
2436 conn->c_stage = ISCSI_CONN_INITIAL_STAGE;
2437 conn->in_progress = IN_PROGRESS_WAIT_HEADER;
2438 conn->id = conn_idx;
2439 conn->exp_statsn = 0;
2440 conn->tmabort_state = TMABORT_INITIAL;
2442 /* initial operational parameters */
2443 conn->hdr_size = sizeof(struct iscsi_hdr);
2444 conn->data_size = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH;
2445 conn->max_recv_dlength = DEFAULT_MAX_RECV_DATA_SEGMENT_LENGTH;
2447 spin_lock_init(&conn->lock);
2449 /* initialize general xmit PDU commands queue */
2450 conn->xmitqueue = kfifo_alloc(session->cmds_max * sizeof(void*),
2452 if (conn->xmitqueue == ERR_PTR(-ENOMEM))
2453 goto xmitqueue_alloc_fail;
2455 /* initialize write response PDU commands queue */
2456 conn->writequeue = kfifo_alloc(session->cmds_max * sizeof(void*),
2458 if (conn->writequeue == ERR_PTR(-ENOMEM))
2459 goto writequeue_alloc_fail;
2461 /* initialize general immediate & non-immediate PDU commands queue */
2462 conn->immqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*),
2464 if (conn->immqueue == ERR_PTR(-ENOMEM))
2465 goto immqueue_alloc_fail;
2467 conn->mgmtqueue = kfifo_alloc(session->mgmtpool_max * sizeof(void*),
2469 if (conn->mgmtqueue == ERR_PTR(-ENOMEM))
2470 goto mgmtqueue_alloc_fail;
2472 INIT_WORK(&conn->xmitwork, iscsi_xmitworker, conn);
2474 /* allocate login_mtask used for the login/text sequences */
2475 spin_lock_bh(&session->lock);
2476 if (!__kfifo_get(session->mgmtpool.queue,
2477 (void*)&conn->login_mtask,
2479 spin_unlock_bh(&session->lock);
2480 goto login_mtask_alloc_fail;
2482 spin_unlock_bh(&session->lock);
2484 /* allocate initial PDU receive place holder */
2485 if (conn->data_size <= PAGE_SIZE)
2486 conn->data = kmalloc(conn->data_size, GFP_KERNEL);
2488 conn->data = (void*)__get_free_pages(GFP_KERNEL,
2489 get_order(conn->data_size));
2491 goto max_recv_dlenght_alloc_fail;
2493 init_timer(&conn->tmabort_timer);
2494 init_MUTEX(&conn->xmitsema);
2495 init_waitqueue_head(&conn->ehwait);
2497 return iscsi_handle(conn);
2499 max_recv_dlenght_alloc_fail:
2500 spin_lock_bh(&session->lock);
2501 __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
2503 spin_unlock_bh(&session->lock);
2504 login_mtask_alloc_fail:
2505 kfifo_free(conn->mgmtqueue);
2506 mgmtqueue_alloc_fail:
2507 kfifo_free(conn->immqueue);
2508 immqueue_alloc_fail:
2509 kfifo_free(conn->writequeue);
2510 writequeue_alloc_fail:
2511 kfifo_free(conn->xmitqueue);
2512 xmitqueue_alloc_fail:
2515 return iscsi_handle(NULL);
2519 iscsi_conn_destroy(iscsi_connh_t connh)
2521 struct iscsi_conn *conn = iscsi_ptr(connh);
2522 struct iscsi_session *session = conn->session;
2524 down(&conn->xmitsema);
2525 set_bit(SUSPEND_BIT, &conn->suspend_tx);
2526 if (conn->c_stage == ISCSI_CONN_INITIAL_STAGE && conn->sock) {
2527 struct sock *sk = conn->sock->sk;
2530 * conn_start() has never been called!
2531 * need to cleanup the socket.
2533 write_lock_bh(&sk->sk_callback_lock);
2534 set_bit(SUSPEND_BIT, &conn->suspend_rx);
2535 write_unlock_bh(&sk->sk_callback_lock);
2537 sock_hold(conn->sock->sk);
2538 iscsi_conn_restore_callbacks(conn);
2539 sock_put(conn->sock->sk);
2540 sock_release(conn->sock);
2544 spin_lock_bh(&session->lock);
2545 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
2546 if (session->leadconn == conn) {
2548 * leading connection? then give up on recovery.
2550 session->state = ISCSI_STATE_TERMINATE;
2551 wake_up(&conn->ehwait);
2553 spin_unlock_bh(&session->lock);
2555 up(&conn->xmitsema);
2558 * Block until all in-progress commands for this connection
2562 spin_lock_bh(&conn->lock);
2563 if (!session->host->host_busy) { /* OK for ERL == 0 */
2564 spin_unlock_bh(&conn->lock);
2567 spin_unlock_bh(&conn->lock);
2568 msleep_interruptible(500);
2569 printk("conn_destroy(): host_busy %d host_failed %d\n",
2570 session->host->host_busy, session->host->host_failed);
2572 * force eh_abort() to unblock
2574 wake_up(&conn->ehwait);
2577 /* now free crypto */
2578 if (conn->hdrdgst_en || conn->datadgst_en) {
2580 crypto_free_tfm(conn->tx_tfm);
2582 crypto_free_tfm(conn->rx_tfm);
2583 if (conn->data_tx_tfm)
2584 crypto_free_tfm(conn->data_tx_tfm);
2585 if (conn->data_rx_tfm)
2586 crypto_free_tfm(conn->data_rx_tfm);
2589 /* free conn->data, size = MaxRecvDataSegmentLength */
2590 if (conn->data_size <= PAGE_SIZE)
2593 free_pages((unsigned long)conn->data,
2594 get_order(conn->data_size));
2596 spin_lock_bh(&session->lock);
2597 __kfifo_put(session->mgmtpool.queue, (void*)&conn->login_mtask,
2599 list_del(&conn->item);
2600 if (list_empty(&session->connections))
2601 session->leadconn = NULL;
2602 if (session->leadconn && session->leadconn == conn)
2603 session->leadconn = container_of(session->connections.next,
2604 struct iscsi_conn, item);
2606 if (session->leadconn == NULL)
2607 /* none connections exits.. reset sequencing */
2608 session->cmdsn = session->max_cmdsn = session->exp_cmdsn = 1;
2609 spin_unlock_bh(&session->lock);
2611 kfifo_free(conn->xmitqueue);
2612 kfifo_free(conn->writequeue);
2613 kfifo_free(conn->immqueue);
2614 kfifo_free(conn->mgmtqueue);
2619 iscsi_conn_bind(iscsi_sessionh_t sessionh, iscsi_connh_t connh,
2620 uint32_t transport_fd, int is_leading)
2622 struct iscsi_session *session = iscsi_ptr(sessionh);
2623 struct iscsi_conn *tmp = ERR_PTR(-EEXIST), *conn = iscsi_ptr(connh);
2625 struct socket *sock;
2628 /* lookup for existing socket */
2629 sock = sockfd_lookup(transport_fd, &err);
2631 printk(KERN_ERR "iscsi_tcp: sockfd_lookup failed %d\n", err);
2635 /* lookup for existing connection */
2636 spin_lock_bh(&session->lock);
2637 list_for_each_entry(tmp, &session->connections, item) {
2639 if (conn->c_stage != ISCSI_CONN_STOPPED ||
2640 conn->stop_stage == STOP_CONN_TERM) {
2641 printk(KERN_ERR "iscsi_tcp: can't bind "
2642 "non-stopped connection (%d:%d)\n",
2643 conn->c_stage, conn->stop_stage);
2644 spin_unlock_bh(&session->lock);
2651 /* bind new iSCSI connection to session */
2652 conn->session = session;
2654 list_add(&conn->item, &session->connections);
2656 spin_unlock_bh(&session->lock);
2658 if (conn->stop_stage != STOP_CONN_SUSPEND) {
2659 /* bind iSCSI connection and socket */
2662 /* setup Socket parameters */
2665 sk->sk_sndtimeo = 15 * HZ; /* FIXME: make it configurable */
2666 sk->sk_allocation = GFP_ATOMIC;
2668 /* FIXME: disable Nagle's algorithm */
2671 * Intercept TCP callbacks for sendfile like receive
2674 iscsi_conn_set_callbacks(conn);
2677 * set receive state machine into initial state
2679 conn->in_progress = IN_PROGRESS_WAIT_HEADER;
2683 session->leadconn = conn;
2686 * Unblock xmitworker(), Login Phase will pass through.
2688 clear_bit(SUSPEND_BIT, &conn->suspend_rx);
2689 clear_bit(SUSPEND_BIT, &conn->suspend_tx);
2695 iscsi_conn_start(iscsi_connh_t connh)
2697 struct iscsi_conn *conn = iscsi_ptr(connh);
2698 struct iscsi_session *session = conn->session;
2701 /* FF phase warming up... */
2703 if (session == NULL) {
2704 printk(KERN_ERR "iscsi_tcp: can't start unbound connection\n");
2708 sk = conn->sock->sk;
2710 write_lock_bh(&sk->sk_callback_lock);
2711 spin_lock_bh(&session->lock);
2712 conn->c_stage = ISCSI_CONN_STARTED;
2713 session->state = ISCSI_STATE_LOGGED_IN;
2715 switch(conn->stop_stage) {
2716 case STOP_CONN_RECOVER:
2718 * unblock eh_abort() if it is blocked. re-try all
2719 * commands after successful recovery
2721 session->conn_cnt++;
2722 conn->stop_stage = 0;
2723 conn->tmabort_state = TMABORT_INITIAL;
2725 wake_up(&conn->ehwait);
2727 case STOP_CONN_TERM:
2728 session->conn_cnt++;
2729 conn->stop_stage = 0;
2731 case STOP_CONN_SUSPEND:
2732 conn->stop_stage = 0;
2733 clear_bit(SUSPEND_BIT, &conn->suspend_rx);
2734 clear_bit(SUSPEND_BIT, &conn->suspend_tx);
2739 spin_unlock_bh(&session->lock);
2740 write_unlock_bh(&sk->sk_callback_lock);
2746 iscsi_conn_stop(iscsi_connh_t connh, int flag)
2748 struct iscsi_conn *conn = iscsi_ptr(connh);
2749 struct iscsi_session *session = conn->session;
2751 unsigned long flags;
2753 BUG_ON(!conn->sock);
2754 sk = conn->sock->sk;
2755 write_lock_bh(&sk->sk_callback_lock);
2756 set_bit(SUSPEND_BIT, &conn->suspend_rx);
2757 write_unlock_bh(&sk->sk_callback_lock);
2759 down(&conn->xmitsema);
2761 spin_lock_irqsave(session->host->host_lock, flags);
2762 spin_lock(&session->lock);
2763 conn->stop_stage = flag;
2764 conn->c_stage = ISCSI_CONN_STOPPED;
2765 set_bit(SUSPEND_BIT, &conn->suspend_tx);
2767 if (flag != STOP_CONN_SUSPEND)
2768 session->conn_cnt--;
2770 if (session->conn_cnt == 0 || session->leadconn == conn)
2771 session->state = ISCSI_STATE_FAILED;
2773 spin_unlock(&session->lock);
2774 spin_unlock_irqrestore(session->host->host_lock, flags);
2776 if (flag == STOP_CONN_TERM || flag == STOP_CONN_RECOVER) {
2777 struct iscsi_cmd_task *ctask;
2778 struct iscsi_mgmt_task *mtask;
2781 * Socket must go now.
2783 sock_hold(conn->sock->sk);
2784 iscsi_conn_restore_callbacks(conn);
2785 sock_put(conn->sock->sk);
2788 * flush xmit queues.
2790 spin_lock_bh(&session->lock);
2791 while (__kfifo_get(conn->writequeue, (void*)&ctask,
2793 __kfifo_get(conn->xmitqueue, (void*)&ctask,
2795 struct iscsi_r2t_info *r2t;
2798 * flush ctask's r2t queues
2800 while (__kfifo_get(ctask->r2tqueue, (void*)&r2t,
2802 __kfifo_put(ctask->r2tpool.queue, (void*)&r2t,
2805 spin_unlock_bh(&session->lock);
2807 iscsi_ctask_cleanup(conn, ctask);
2809 spin_lock_bh(&session->lock);
2812 while (__kfifo_get(conn->immqueue, (void*)&mtask,
2814 __kfifo_get(conn->mgmtqueue, (void*)&mtask,
2816 __kfifo_put(session->mgmtpool.queue,
2817 (void*)&mtask, sizeof(void*));
2820 spin_unlock_bh(&session->lock);
2823 * release socket only after we stopped data_xmit()
2824 * activity and flushed all outstandings
2826 sock_release(conn->sock);
2830 * for connection level recovery we should not calculate
2831 * header digest. conn->hdr_size used for optimization
2832 * in hdr_extract() and will be re-negotiated at
2835 if (flag == STOP_CONN_RECOVER)
2836 conn->hdr_size = sizeof(struct iscsi_hdr);
2838 up(&conn->xmitsema);
2842 iscsi_conn_send_generic(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
2843 char *data, uint32_t data_size)
2845 struct iscsi_session *session = conn->session;
2846 struct iscsi_nopout *nop = (struct iscsi_nopout *)hdr;
2847 struct iscsi_mgmt_task *mtask;
2849 spin_lock_bh(&session->lock);
2850 if (session->state == ISCSI_STATE_TERMINATE) {
2851 spin_unlock_bh(&session->lock);
2854 if (hdr->opcode == (ISCSI_OP_LOGIN | ISCSI_OP_IMMEDIATE) ||
2855 hdr->opcode == (ISCSI_OP_TEXT | ISCSI_OP_IMMEDIATE))
2857 * Login and Text are sent serially, in
2858 * request-followed-by-response sequence.
2859 * Same mtask can be used. Same ITT must be used.
2860 * Note that login_mtask is preallocated at conn_create().
2862 mtask = conn->login_mtask;
2864 BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
2865 BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
2867 if (!__kfifo_get(session->mgmtpool.queue,
2868 (void*)&mtask, sizeof(void*))) {
2869 spin_unlock_bh(&session->lock);
2875 * pre-format CmdSN and ExpStatSN for outgoing PDU.
2877 if (hdr->itt != cpu_to_be32(ISCSI_RESERVED_TAG)) {
2878 hdr->itt = mtask->itt | (conn->id << CID_SHIFT) |
2879 (session->age << AGE_SHIFT);
2880 nop->cmdsn = cpu_to_be32(session->cmdsn);
2881 if (conn->c_stage == ISCSI_CONN_STARTED &&
2882 !(hdr->opcode & ISCSI_OP_IMMEDIATE))
2885 /* do not advance CmdSN */
2886 nop->cmdsn = cpu_to_be32(session->cmdsn);
2888 nop->exp_statsn = cpu_to_be32(conn->exp_statsn);
2890 memcpy(&mtask->hdr, hdr, sizeof(struct iscsi_hdr));
2892 if (conn->c_stage == ISCSI_CONN_INITIAL_STAGE ||
2893 conn->stop_stage == STOP_CONN_RECOVER)
2894 iscsi_buf_init_virt(&mtask->headbuf, (char*)&mtask->hdr,
2895 sizeof(struct iscsi_hdr));
2897 /* this will update header digest */
2898 iscsi_buf_init_hdr(conn, &mtask->headbuf, (char*)&mtask->hdr,
2899 (u8 *)mtask->hdrext);
2901 spin_unlock_bh(&session->lock);
2904 memcpy(mtask->data, data, data_size);
2905 mtask->data_count = data_size;
2907 mtask->data_count = 0;
2909 mtask->xmstate = XMSTATE_IMM_HDR;
2911 if (mtask->data_count) {
2912 iscsi_buf_init_iov(&mtask->sendbuf, (char*)mtask->data,
2916 debug_scsi("mgmtpdu [op 0x%x hdr->itt 0x%x datalen %d]\n",
2917 hdr->opcode, hdr->itt, data_size);
2920 * since send_pdu() could be called at least from two contexts,
2921 * we need to serialize __kfifo_put, so we don't have to take
2922 * additional lock on fast data-path
2924 if (hdr->opcode & ISCSI_OP_IMMEDIATE)
2925 __kfifo_put(conn->immqueue, (void*)&mtask, sizeof(void*));
2927 __kfifo_put(conn->mgmtqueue, (void*)&mtask, sizeof(void*));
2929 schedule_work(&conn->xmitwork);
2935 iscsi_eh_host_reset(struct scsi_cmnd *sc)
2937 struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
2938 struct iscsi_conn *conn = ctask->conn;
2939 struct iscsi_session *session = conn->session;
2941 spin_lock_bh(&session->lock);
2942 if (session->state == ISCSI_STATE_TERMINATE) {
2943 debug_scsi("failing host reset: session terminated "
2944 "[CID %d age %d]", conn->id, session->age);
2945 spin_unlock_bh(&session->lock);
2948 spin_unlock_bh(&session->lock);
2950 debug_scsi("failing connection CID %d due to SCSI host reset "
2951 "[itt 0x%x age %d]", conn->id, ctask->itt,
2953 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
2959 iscsi_tmabort_timedout(unsigned long data)
2961 struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)data;
2962 struct iscsi_conn *conn = ctask->conn;
2963 struct iscsi_session *session = conn->session;
2965 spin_lock(&session->lock);
2966 if (conn->tmabort_state == TMABORT_INITIAL) {
2967 __kfifo_put(session->mgmtpool.queue,
2968 (void*)&ctask->mtask, sizeof(void*));
2969 conn->tmabort_state = TMABORT_TIMEDOUT;
2970 debug_scsi("tmabort timedout [sc %lx itt 0x%x]\n",
2971 (long)ctask->sc, ctask->itt);
2972 /* unblock eh_abort() */
2973 wake_up(&conn->ehwait);
2975 spin_unlock(&session->lock);
2979 iscsi_eh_abort(struct scsi_cmnd *sc)
2982 struct iscsi_cmd_task *ctask = (struct iscsi_cmd_task *)sc->SCp.ptr;
2983 struct iscsi_conn *conn = ctask->conn;
2984 struct iscsi_session *session = conn->session;
2986 conn->eh_abort_cnt++;
2987 debug_scsi("aborting [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
2990 * two cases for ERL=0 here:
2992 * 1) connection-level failure;
2993 * 2) recovery due protocol error;
2995 down(&conn->xmitsema);
2996 spin_lock_bh(&session->lock);
2997 if (session->state != ISCSI_STATE_LOGGED_IN) {
2998 if (session->state == ISCSI_STATE_TERMINATE) {
2999 spin_unlock_bh(&session->lock);
3000 up(&conn->xmitsema);
3003 spin_unlock_bh(&session->lock);
3005 struct iscsi_tm *hdr = &conn->tmhdr;
3008 * Still LOGGED_IN...
3011 if (!ctask->sc || sc->SCp.phase != session->age) {
3013 * 1) ctask completed before time out. But session
3014 * is still ok => Happy Retry.
3015 * 2) session was re-open during time out of ctask.
3017 spin_unlock_bh(&session->lock);
3018 up(&conn->xmitsema);
3021 conn->tmabort_state = TMABORT_INITIAL;
3022 spin_unlock_bh(&session->lock);
3025 * ctask timed out but session is OK
3026 * ERL=0 requires task mgmt abort to be issued on each
3027 * failed command. requests must be serialized.
3029 memset(hdr, 0, sizeof(struct iscsi_tm));
3030 hdr->opcode = ISCSI_OP_SCSI_TMFUNC | ISCSI_OP_IMMEDIATE;
3031 hdr->flags = ISCSI_TM_FUNC_ABORT_TASK;
3032 hdr->flags |= ISCSI_FLAG_CMD_FINAL;
3033 memcpy(hdr->lun, ctask->hdr.lun, sizeof(hdr->lun));
3034 hdr->rtt = ctask->hdr.itt;
3035 hdr->refcmdsn = ctask->hdr.cmdsn;
3037 rc = iscsi_conn_send_generic(conn, (struct iscsi_hdr *)hdr,
3040 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
3041 debug_scsi("abort sent failure [itt 0x%x]", ctask->itt);
3043 struct iscsi_r2t_info *r2t;
3046 * TMF abort vs. TMF response race logic
3048 spin_lock_bh(&session->lock);
3049 ctask->mtask = (struct iscsi_mgmt_task *)
3050 session->mgmt_cmds[(hdr->itt & ITT_MASK) -
3051 ISCSI_MGMT_ITT_OFFSET];
3053 * have to flush r2tqueue to avoid r2t leaks
3055 while (__kfifo_get(ctask->r2tqueue, (void*)&r2t,
3057 __kfifo_put(ctask->r2tpool.queue, (void*)&r2t,
3060 if (conn->tmabort_state == TMABORT_INITIAL) {
3061 conn->tmfcmd_pdus_cnt++;
3062 conn->tmabort_timer.expires = 3*HZ + jiffies;
3063 conn->tmabort_timer.function =
3064 iscsi_tmabort_timedout;
3065 conn->tmabort_timer.data = (unsigned long)ctask;
3066 add_timer(&conn->tmabort_timer);
3067 debug_scsi("abort sent [itt 0x%x]", ctask->itt);
3070 conn->tmabort_state == TMABORT_SUCCESS) {
3071 conn->tmabort_state = TMABORT_INITIAL;
3072 spin_unlock_bh(&session->lock);
3073 up(&conn->xmitsema);
3076 conn->tmabort_state = TMABORT_INITIAL;
3077 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
3079 spin_unlock_bh(&session->lock);
3082 up(&conn->xmitsema);
3086 * block eh thread until:
3088 * 1) abort response;
3090 * 3) session re-opened;
3091 * 4) session terminated;
3094 int p_state = session->state;
3096 rc = wait_event_interruptible(conn->ehwait,
3097 (p_state == ISCSI_STATE_LOGGED_IN ?
3098 (session->state == ISCSI_STATE_TERMINATE ||
3099 conn->tmabort_state != TMABORT_INITIAL) :
3100 (session->state == ISCSI_STATE_TERMINATE ||
3101 session->state == ISCSI_STATE_LOGGED_IN)));
3104 session->state = ISCSI_STATE_TERMINATE;
3108 if (signal_pending(current))
3109 flush_signals(current);
3111 if (session->state == ISCSI_STATE_TERMINATE)
3114 spin_lock_bh(&session->lock);
3115 if (sc->SCp.phase == session->age &&
3116 (conn->tmabort_state == TMABORT_TIMEDOUT ||
3117 conn->tmabort_state == TMABORT_FAILED)) {
3118 conn->tmabort_state = TMABORT_INITIAL;
3121 * ctask completed before tmf abort response or
3123 * But session is still ok => Happy Retry.
3125 spin_unlock_bh(&session->lock);
3128 spin_unlock_bh(&session->lock);
3129 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
3132 spin_unlock_bh(&session->lock);
3137 debug_scsi("abort success [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
3142 debug_scsi("abort failed [sc %lx itt 0x%x]\n", (long)sc, ctask->itt);
3146 del_timer_sync(&conn->tmabort_timer);
3148 down(&conn->xmitsema);
3150 struct sock *sk = conn->sock->sk;
3152 write_lock_bh(&sk->sk_callback_lock);
3153 iscsi_ctask_cleanup(conn, ctask);
3154 write_unlock_bh(&sk->sk_callback_lock);
3156 up(&conn->xmitsema);
3161 iscsi_r2tpool_alloc(struct iscsi_session *session)
3167 * initialize per-task: R2T pool and xmit queue
3169 for (cmd_i = 0; cmd_i < session->cmds_max; cmd_i++) {
3170 struct iscsi_cmd_task *ctask = session->cmds[cmd_i];
3173 * pre-allocated x4 as much r2ts to handle race when
3174 * target acks DataOut faster than we data_xmit() queues
3175 * could replenish r2tqueue.
3179 if (iscsi_pool_init(&ctask->r2tpool, session->max_r2t * 4,
3180 (void***)&ctask->r2ts, sizeof(struct iscsi_r2t_info))) {
3181 goto r2t_alloc_fail;
3184 /* R2T xmit queue */
3185 ctask->r2tqueue = kfifo_alloc(
3186 session->max_r2t * 4 * sizeof(void*), GFP_KERNEL, NULL);
3187 if (ctask->r2tqueue == ERR_PTR(-ENOMEM)) {
3188 iscsi_pool_free(&ctask->r2tpool, (void**)ctask->r2ts);