@@ -809,7 +809,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
809
809
*
810
810
* You can see, it is legitimate for the previous pointer of
811
811
* the head (or any page) not to point back to itself. But only
812
- * temporarially .
812
+ * temporarily .
813
813
*/
814
814
815
815
#define RB_PAGE_NORMAL 0UL
@@ -906,7 +906,7 @@ static void rb_list_head_clear(struct list_head *list)
906
906
}
907
907
908
908
/*
909
- * rb_head_page_dactivate - clears head page ptr (for free list)
909
+ * rb_head_page_deactivate - clears head page ptr (for free list)
910
910
*/
911
911
static void
912
912
rb_head_page_deactivate (struct ring_buffer_per_cpu * cpu_buffer )
@@ -1780,7 +1780,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1780
1780
1781
1781
put_online_cpus ();
1782
1782
} else {
1783
- /* Make sure this CPU has been intitialized */
1783
+ /* Make sure this CPU has been initialized */
1784
1784
if (!cpumask_test_cpu (cpu_id , buffer -> cpumask ))
1785
1785
goto out ;
1786
1786
@@ -2325,7 +2325,7 @@ rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
2325
2325
2326
2326
/*
2327
2327
* If we need to add a timestamp, then we
2328
- * add it to the start of the resevered space.
2328
+ * add it to the start of the reserved space.
2329
2329
*/
2330
2330
if (unlikely (info -> add_timestamp )) {
2331
2331
bool abs = ring_buffer_time_stamp_abs (cpu_buffer -> buffer );
@@ -2681,7 +2681,7 @@ trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
2681
2681
* ring_buffer_nest_start - Allow to trace while nested
2682
2682
* @buffer: The ring buffer to modify
2683
2683
*
2684
- * The ring buffer has a safty mechanism to prevent recursion.
2684
+ * The ring buffer has a safety mechanism to prevent recursion.
2685
2685
* But there may be a case where a trace needs to be done while
2686
2686
* tracing something else. In this case, calling this function
2687
2687
* will allow this function to nest within a currently active
@@ -2699,7 +2699,7 @@ void ring_buffer_nest_start(struct ring_buffer *buffer)
2699
2699
preempt_disable_notrace ();
2700
2700
cpu = raw_smp_processor_id ();
2701
2701
cpu_buffer = buffer -> buffers [cpu ];
2702
- /* This is the shift value for the above recusive locking */
2702
+ /* This is the shift value for the above recursive locking */
2703
2703
cpu_buffer -> nest += NESTED_BITS ;
2704
2704
}
2705
2705
@@ -2718,7 +2718,7 @@ void ring_buffer_nest_end(struct ring_buffer *buffer)
2718
2718
/* disabled by ring_buffer_nest_start() */
2719
2719
cpu = raw_smp_processor_id ();
2720
2720
cpu_buffer = buffer -> buffers [cpu ];
2721
- /* This is the shift value for the above recusive locking */
2721
+ /* This is the shift value for the above recursive locking */
2722
2722
cpu_buffer -> nest -= NESTED_BITS ;
2723
2723
preempt_enable_notrace ();
2724
2724
}
@@ -2907,7 +2907,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
2907
2907
* @buffer: the ring buffer to reserve from
2908
2908
* @length: the length of the data to reserve (excluding event header)
2909
2909
*
2910
- * Returns a reseverd event on the ring buffer to copy directly to.
2910
+ * Returns a reserved event on the ring buffer to copy directly to.
2911
2911
* The user of this interface will need to get the body to write into
2912
2912
* and can use the ring_buffer_event_data() interface.
2913
2913
*
@@ -3009,7 +3009,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
3009
3009
* This function lets the user discard an event in the ring buffer
3010
3010
* and then that event will not be read later.
3011
3011
*
3012
- * This function only works if it is called before the the item has been
3012
+ * This function only works if it is called before the item has been
3013
3013
* committed. It will try to free the event from the ring buffer
3014
3014
* if another event has not been added behind it.
3015
3015
*
@@ -4127,7 +4127,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume);
4127
4127
* through the buffer. Memory is allocated, buffer recording
4128
4128
* is disabled, and the iterator pointer is returned to the caller.
4129
4129
*
4130
- * Disabling buffer recordng prevents the reading from being
4130
+ * Disabling buffer recording prevents the reading from being
4131
4131
* corrupted. This is not a consuming read, so a producer is not
4132
4132
* expected.
4133
4133
*
0 commit comments