]> nv-tegra.nvidia Code Review - linux-2.6.git/blobdiff - kernel/trace/ring_buffer.c
ring-buffer: Add missing unlock
[linux-2.6.git] / kernel / trace / ring_buffer.c
index 0287f9f52f5ae48098ce2694053121a640e5b3a2..9a0f9bf6a37b37295a5e4bf6a52d03b3472905fa 100644 (file)
@@ -207,6 +207,14 @@ EXPORT_SYMBOL_GPL(tracing_is_on);
 #define RB_MAX_SMALL_DATA      (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
 #define RB_EVNT_MIN_SIZE       8U      /* two 32bit words */
 
+#if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+# define RB_FORCE_8BYTE_ALIGNMENT      0
+# define RB_ARCH_ALIGNMENT             RB_ALIGNMENT
+#else
+# define RB_FORCE_8BYTE_ALIGNMENT      1
+# define RB_ARCH_ALIGNMENT             8U
+#endif
+
 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
 
@@ -1201,18 +1209,19 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
 
        for (i = 0; i < nr_pages; i++) {
                if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
-                       return;
+                       goto out;
                p = cpu_buffer->pages->next;
                bpage = list_entry(p, struct buffer_page, list);
                list_del_init(&bpage->list);
                free_buffer_page(bpage);
        }
        if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
-               return;
+               goto out;
 
        rb_reset_cpu(cpu_buffer);
        rb_check_pages(cpu_buffer);
 
+out:
        spin_unlock_irq(&cpu_buffer->reader_lock);
 }
 
@@ -1229,7 +1238,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
 
        for (i = 0; i < nr_pages; i++) {
                if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
-                       return;
+                       goto out;
                p = pages->next;
                bpage = list_entry(p, struct buffer_page, list);
                list_del_init(&bpage->list);
@@ -1238,6 +1247,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
        rb_reset_cpu(cpu_buffer);
        rb_check_pages(cpu_buffer);
 
+out:
        spin_unlock_irq(&cpu_buffer->reader_lock);
 }
 
@@ -1547,7 +1557,7 @@ rb_update_event(struct ring_buffer_event *event,
 
        case 0:
                length -= RB_EVNT_HDR_SIZE;
-               if (length > RB_MAX_SMALL_DATA)
+               if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
                        event->array[0] = length;
                else
                        event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
@@ -1722,11 +1732,11 @@ static unsigned rb_calculate_event_length(unsigned length)
        if (!length)
                length = 1;
 
-       if (length > RB_MAX_SMALL_DATA)
+       if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
                length += sizeof(event.array[0]);
 
        length += RB_EVNT_HDR_SIZE;
-       length = ALIGN(length, RB_ALIGNMENT);
+       length = ALIGN(length, RB_ARCH_ALIGNMENT);
 
        return length;
 }
@@ -2233,12 +2243,12 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
        if (ring_buffer_flags != RB_BUFFERS_ON)
                return NULL;
 
-       if (atomic_read(&buffer->record_disabled))
-               return NULL;
-
        /* If we are tracing schedule, we don't want to recurse */
        resched = ftrace_preempt_disable();
 
+       if (atomic_read(&buffer->record_disabled))
+               goto out_nocheck;
+
        if (trace_recursive_lock())
                goto out_nocheck;
 
@@ -2470,11 +2480,11 @@ int ring_buffer_write(struct ring_buffer *buffer,
        if (ring_buffer_flags != RB_BUFFERS_ON)
                return -EBUSY;
 
-       if (atomic_read(&buffer->record_disabled))
-               return -EBUSY;
-
        resched = ftrace_preempt_disable();
 
+       if (atomic_read(&buffer->record_disabled))
+               goto out;
+
        cpu = raw_smp_processor_id();
 
        if (!cpumask_test_cpu(cpu, buffer->cpumask))
@@ -2542,7 +2552,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
  * @buffer: The ring buffer to enable writes
  *
  * Note, multiple disables will need the same number of enables
- * to truely enable the writing (much like preempt_disable).
+ * to truly enable the writing (much like preempt_disable).
  */
 void ring_buffer_record_enable(struct ring_buffer *buffer)
 {
@@ -2578,7 +2588,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
  * @cpu: The CPU to enable.
  *
  * Note, multiple disables will need the same number of enables
- * to truely enable the writing (much like preempt_disable).
+ * to truly enable the writing (much like preempt_disable).
  */
 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
 {