block: drop @tsk from attempt_plug_merge() and explain sync rules
[linux-2.6.git] / block / blk-iopoll.c
index 0671d46..58916af 100644 (file)
@@ -28,7 +28,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_iopoll);
  * Description:
  *     Add this blk_iopoll structure to the pending poll list and trigger the
  *     raise of the blk iopoll softirq. The driver must already have gotten a
- *     succesful return from blk_iopoll_sched_prep() before calling this.
+ *     successful return from blk_iopoll_sched_prep() before calling this.
  **/
 void blk_iopoll_sched(struct blk_iopoll *iop)
 {
@@ -115,9 +115,12 @@ static void blk_iopoll_softirq(struct softirq_action *h)
 
                local_irq_disable();
 
-               /* Drivers must not modify the NAPI state if they
-                * consume the entire weight.  In such cases this code
-                * still "owns" the NAPI instance and therefore can
+               /*
+                * Drivers must not modify the iopoll state, if they
+                * consume their assigned weight (or more, some drivers can't
+                * easily just stop processing, they have to complete an
+                * entire mask of commands).In such cases this code
+                * still "owns" the iopoll instance and therefore can
                 * move the instance around on the list at-will.
                 */
                if (work >= weight) {
@@ -199,7 +202,7 @@ static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self,
                local_irq_disable();
                list_splice_init(&per_cpu(blk_cpu_iopoll, cpu),
                                 &__get_cpu_var(blk_cpu_iopoll));
-               raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
+               __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
                local_irq_enable();
        }