}
return 0;
}
-EXPORT_SYMBOL(blk_rq_append_bio);
static int __blk_rq_unmap_user(struct bio *bio)
{
* direct dma. else, set up kernel bounce buffers
*/
uaddr = (unsigned long) ubuf;
- if (blk_rq_aligned(q, ubuf, len) && !map_data)
+ if (blk_rq_aligned(q, uaddr, len) && !map_data)
bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
else
bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
struct bio *bio = NULL;
int ret;
- if (len > (q->max_hw_sectors << 9))
+ if (len > (queue_max_hw_sectors(q) << 9))
return -EINVAL;
if (!len)
return -EINVAL;
for (i = 0; i < iov_count; i++) {
unsigned long uaddr = (unsigned long)iov[i].iov_base;
- if (uaddr & queue_dma_alignment(q)) {
+ if (!iov[i].iov_len)
+ return -EINVAL;
+
+ /*
+ * Keep going so we check length of all segments
+ */
+ if (uaddr & queue_dma_alignment(q))
unaligned = 1;
- break;
- }
}
if (unaligned || (q->dma_pad_mask & len) || map_data)
unsigned int len, gfp_t gfp_mask)
{
int reading = rq_data_dir(rq) == READ;
+ unsigned long addr = (unsigned long) kbuf;
int do_copy = 0;
struct bio *bio;
int ret;
- if (len > (q->max_hw_sectors << 9))
+ if (len > (queue_max_hw_sectors(q) << 9))
return -EINVAL;
if (!len || !kbuf)
return -EINVAL;
- do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
+ do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
if (do_copy)
bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
else
if (IS_ERR(bio))
return PTR_ERR(bio);
- if (rq_data_dir(rq) == WRITE)
- bio->bi_rw |= (1 << BIO_RW);
+ if (!reading)
+ bio->bi_rw |= REQ_WRITE;
if (do_copy)
rq->cmd_flags |= REQ_COPY_USER;