ftrace: Fix regression where ftrace breaks when modules are loaded
[linux-2.6.git] / kernel / trace / blktrace.c
index 7b8ec02..6957aa2 100644 (file)
@@ -138,6 +138,13 @@ void __trace_note_message(struct blk_trace *bt, const char *fmt, ...)
                     !blk_tracer_enabled))
                return;
 
+       /*
+        * If the BLK_TC_NOTIFY action mask isn't set, don't send any note
+        * message to the trace.
+        */
+       if (!(bt->act_mask & BLK_TC_NOTIFY))
+               return;
+
        local_irq_save(flags);
        buf = per_cpu_ptr(bt->msg_data, smp_processor_id());
        va_start(args, fmt);
@@ -696,28 +703,21 @@ void blk_trace_shutdown(struct request_queue *q)
  *
  **/
 static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
-                                   u32 what)
+                            u32 what)
 {
        struct blk_trace *bt = q->blk_trace;
-       int rw = rq->cmd_flags & 0x03;
 
        if (likely(!bt))
                return;
 
-       if (rq->cmd_flags & REQ_DISCARD)
-               rw |= REQ_DISCARD;
-
-       if (rq->cmd_flags & REQ_SECURE)
-               rw |= REQ_SECURE;
-
        if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
                what |= BLK_TC_ACT(BLK_TC_PC);
-               __blk_add_trace(bt, 0, blk_rq_bytes(rq), rw,
+               __blk_add_trace(bt, 0, blk_rq_bytes(rq), rq->cmd_flags,
                                what, rq->errors, rq->cmd_len, rq->cmd);
        } else  {
                what |= BLK_TC_ACT(BLK_TC_FS);
-               __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), rw,
-                               what, rq->errors, 0, NULL);
+               __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
+                               rq->cmd_flags, what, rq->errors, 0, NULL);
        }
 }
 
@@ -758,53 +758,58 @@ static void blk_add_trace_rq_complete(void *ignore,
  * @q:         queue the io is for
  * @bio:       the source bio
  * @what:      the action
+ * @error:     error, if any
  *
  * Description:
  *     Records an action against a bio. Will log the bio offset + size.
  *
  **/
 static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
-                                    u32 what)
+                             u32 what, int error)
 {
        struct blk_trace *bt = q->blk_trace;
 
        if (likely(!bt))
                return;
 
+       if (!error && !bio_flagged(bio, BIO_UPTODATE))
+               error = EIO;
+
        __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what,
-                       !bio_flagged(bio, BIO_UPTODATE), 0, NULL);
+                       error, 0, NULL);
 }
 
 static void blk_add_trace_bio_bounce(void *ignore,
                                     struct request_queue *q, struct bio *bio)
 {
-       blk_add_trace_bio(q, bio, BLK_TA_BOUNCE);
+       blk_add_trace_bio(q, bio, BLK_TA_BOUNCE, 0);
 }
 
 static void blk_add_trace_bio_complete(void *ignore,
-                                      struct request_queue *q, struct bio *bio)
+                                      struct request_queue *q, struct bio *bio,
+                                      int error)
 {
-       blk_add_trace_bio(q, bio, BLK_TA_COMPLETE);
+       blk_add_trace_bio(q, bio, BLK_TA_COMPLETE, error);
 }
 
 static void blk_add_trace_bio_backmerge(void *ignore,
                                        struct request_queue *q,
                                        struct bio *bio)
 {
-       blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE);
+       blk_add_trace_bio(q, bio, BLK_TA_BACKMERGE, 0);
 }
 
 static void blk_add_trace_bio_frontmerge(void *ignore,
                                         struct request_queue *q,
                                         struct bio *bio)
 {
-       blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE);
+       blk_add_trace_bio(q, bio, BLK_TA_FRONTMERGE, 0);
 }
 
 static void blk_add_trace_bio_queue(void *ignore,
                                    struct request_queue *q, struct bio *bio)
 {
-       blk_add_trace_bio(q, bio, BLK_TA_QUEUE);
+       blk_add_trace_bio(q, bio, BLK_TA_QUEUE, 0);
 }
 
 static void blk_add_trace_getrq(void *ignore,
@@ -812,7 +817,7 @@ static void blk_add_trace_getrq(void *ignore,
                                struct bio *bio, int rw)
 {
        if (bio)
-               blk_add_trace_bio(q, bio, BLK_TA_GETRQ);
+               blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
        else {
                struct blk_trace *bt = q->blk_trace;
 
@@ -827,7 +832,7 @@ static void blk_add_trace_sleeprq(void *ignore,
                                  struct bio *bio, int rw)
 {
        if (bio)
-               blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ);
+               blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
        else {
                struct blk_trace *bt = q->blk_trace;
 
@@ -845,29 +850,21 @@ static void blk_add_trace_plug(void *ignore, struct request_queue *q)
                __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
 }
 
-static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q)
+static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
+                                   unsigned int depth, bool explicit)
 {
        struct blk_trace *bt = q->blk_trace;
 
        if (bt) {
-               unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
-               __be64 rpdu = cpu_to_be64(pdu);
-
-               __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0,
-                               sizeof(rpdu), &rpdu);
-       }
-}
-
-static void blk_add_trace_unplug_timer(void *ignore, struct request_queue *q)
-{
-       struct blk_trace *bt = q->blk_trace;
+               __be64 rpdu = cpu_to_be64(depth);
+               u32 what;
 
-       if (bt) {
-               unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
-               __be64 rpdu = cpu_to_be64(pdu);
+               if (explicit)
+                       what = BLK_TA_UNPLUG_IO;
+               else
+                       what = BLK_TA_UNPLUG_TIMER;
 
-               __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0,
-                               sizeof(rpdu), &rpdu);
+               __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
        }
 }
 
@@ -887,7 +884,7 @@ static void blk_add_trace_split(void *ignore,
 }
 
 /**
- * blk_add_trace_remap - Add a trace for a remap operation
+ * blk_add_trace_bio_remap - Add a trace for a bio-remap operation
  * @ignore:    trace callback data parameter (not used)
  * @q:         queue the io is for
  * @bio:       the source bio
@@ -899,9 +896,9 @@ static void blk_add_trace_split(void *ignore,
  *     it spans a stripe (or similar). Add a trace for that action.
  *
  **/
-static void blk_add_trace_remap(void *ignore,
-                               struct request_queue *q, struct bio *bio,
-                               dev_t dev, sector_t from)
+static void blk_add_trace_bio_remap(void *ignore,
+                                   struct request_queue *q, struct bio *bio,
+                                   dev_t dev, sector_t from)
 {
        struct blk_trace *bt = q->blk_trace;
        struct blk_io_trace_remap r;
@@ -1010,13 +1007,11 @@ static void blk_register_tracepoints(void)
        WARN_ON(ret);
        ret = register_trace_block_plug(blk_add_trace_plug, NULL);
        WARN_ON(ret);
-       ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL);
-       WARN_ON(ret);
-       ret = register_trace_block_unplug_io(blk_add_trace_unplug_io, NULL);
+       ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
        WARN_ON(ret);
        ret = register_trace_block_split(blk_add_trace_split, NULL);
        WARN_ON(ret);
-       ret = register_trace_block_remap(blk_add_trace_remap, NULL);
+       ret = register_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
        WARN_ON(ret);
        ret = register_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
        WARN_ON(ret);
@@ -1025,10 +1020,9 @@ static void blk_register_tracepoints(void)
 static void blk_unregister_tracepoints(void)
 {
        unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
-       unregister_trace_block_remap(blk_add_trace_remap, NULL);
+       unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
        unregister_trace_block_split(blk_add_trace_split, NULL);
-       unregister_trace_block_unplug_io(blk_add_trace_unplug_io, NULL);
-       unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL);
+       unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
        unregister_trace_block_plug(blk_add_trace_plug, NULL);
        unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
        unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
@@ -1815,21 +1809,5 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
        rwbs[i] = '\0';
 }
 
-void blk_fill_rwbs_rq(char *rwbs, struct request *rq)
-{
-       int rw = rq->cmd_flags & 0x03;
-       int bytes;
-
-       if (rq->cmd_flags & REQ_DISCARD)
-               rw |= REQ_DISCARD;
-
-       if (rq->cmd_flags & REQ_SECURE)
-               rw |= REQ_SECURE;
-
-       bytes = blk_rq_bytes(rq);
-
-       blk_fill_rwbs(rwbs, rw, bytes);
-}
-
 #endif /* CONFIG_EVENT_TRACING */