@@ -150,7 +150,7 @@ static int traverse(struct seq_file *m, loff_t offset)
150
150
*/
151
151
ssize_t seq_read (struct file * file , char __user * buf , size_t size , loff_t * ppos )
152
152
{
153
- struct iovec iov = { .iov_base = buf , .iov_len = size };
153
+ struct iovec iov = { .iov_base = buf , .iov_len = size };
154
154
struct kiocb kiocb ;
155
155
struct iov_iter iter ;
156
156
ssize_t ret ;
@@ -217,20 +217,20 @@ ssize_t seq_read_iter(struct kiocb *iocb, struct iov_iter *iter)
217
217
m -> count -= n ;
218
218
m -> from += n ;
219
219
copied += n ;
220
- if (m -> count ) // hadn't managed to copy everything
220
+ if (m -> count ) // hadn't managed to copy everything
221
221
goto Done ;
222
222
}
223
223
// get a non-empty record in the buffer
224
224
m -> from = 0 ;
225
225
p = m -> op -> start (m , & m -> index );
226
226
while (1 ) {
227
227
err = PTR_ERR (p );
228
- if (!p || IS_ERR (p )) // EOF or an error
228
+ if (!p || IS_ERR (p )) // EOF or an error
229
229
break ;
230
230
err = m -> op -> show (m , p );
231
- if (err < 0 ) // hard error
231
+ if (err < 0 ) // hard error
232
232
break ;
233
- if (unlikely (err )) // ->show() says "skip it"
233
+ if (unlikely (err )) // ->show() says "skip it"
234
234
m -> count = 0 ;
235
235
if (unlikely (!m -> count )) { // empty record
236
236
p = m -> op -> next (m , p , & m -> index );
@@ -261,16 +261,17 @@ ssize_t seq_read_iter(struct kiocb *iocb, struct iov_iter *iter)
261
261
262
262
p = m -> op -> next (m , p , & m -> index );
263
263
if (pos == m -> index ) {
264
- pr_info_ratelimited ("buggy .next function %ps did not update position index\n" ,
265
- m -> op -> next );
264
+ pr_info_ratelimited (
265
+ "buggy .next function %ps did not update position index\n" ,
266
+ m -> op -> next );
266
267
m -> index ++ ;
267
268
}
268
- if (!p || IS_ERR (p )) // no next record for us
269
+ if (!p || IS_ERR (p )) // no next record for us
269
270
break ;
270
271
if (m -> count >= iov_iter_count (iter ))
271
272
break ;
272
273
err = m -> op -> show (m , p );
273
- if (err > 0 ) { // ->show() says "skip it"
274
+ if (err > 0 ) { // ->show() says "skip it"
274
275
m -> count = offs ;
275
276
} else if (err || seq_has_overflowed (m )) {
276
277
m -> count = offs ;
@@ -591,7 +592,7 @@ int single_open(struct file *file, int (*show)(struct seq_file *, void *),
591
592
EXPORT_SYMBOL (single_open );
592
593
593
594
int single_open_size (struct file * file , int (* show )(struct seq_file * , void * ),
594
- void * data , size_t size )
595
+ void * data , size_t size )
595
596
{
596
597
char * buf = seq_buf_alloc (size );
597
598
int ret ;
@@ -610,7 +611,8 @@ EXPORT_SYMBOL(single_open_size);
610
611
611
612
int single_release (struct inode * inode , struct file * file )
612
613
{
613
- const struct seq_operations * op = ((struct seq_file * )file -> private_data )-> op ;
614
+ const struct seq_operations * op =
615
+ ((struct seq_file * )file -> private_data )-> op ;
614
616
int res = seq_release (inode , file );
615
617
kfree (op );
616
618
return res ;
@@ -628,13 +630,14 @@ int seq_release_private(struct inode *inode, struct file *file)
628
630
EXPORT_SYMBOL (seq_release_private );
629
631
630
632
void * __seq_open_private (struct file * f , const struct seq_operations * ops ,
631
- int psize )
633
+ int psize )
632
634
{
633
635
int rc ;
634
636
void * private ;
635
637
struct seq_file * seq ;
636
638
637
- private = kzalloc (psize , GFP_KERNEL_ACCOUNT );
639
+ private
640
+ = kzalloc (psize , GFP_KERNEL_ACCOUNT );
638
641
if (private == NULL )
639
642
goto out ;
640
643
@@ -654,7 +657,7 @@ void *__seq_open_private(struct file *f, const struct seq_operations *ops,
654
657
EXPORT_SYMBOL (__seq_open_private );
655
658
656
659
int seq_open_private (struct file * filp , const struct seq_operations * ops ,
657
- int psize )
660
+ int psize )
658
661
{
659
662
return __seq_open_private (filp , ops , psize ) ? 0 : - ENOMEM ;
660
663
}
@@ -696,7 +699,7 @@ EXPORT_SYMBOL(seq_puts);
696
699
* In usual cases, it will be better to use seq_printf(). It's easier to read.
697
700
*/
698
701
void seq_put_decimal_ull_width (struct seq_file * m , const char * delimiter ,
699
- unsigned long long num , unsigned int width )
702
+ unsigned long long num , unsigned int width )
700
703
{
701
704
int len ;
702
705
@@ -747,7 +750,7 @@ EXPORT_SYMBOL(seq_put_decimal_ull);
747
750
* In usual cases, it will be better to use seq_printf(). It's easier to read.
748
751
*/
749
752
void seq_put_hex_ll (struct seq_file * m , const char * delimiter ,
750
- unsigned long long v , unsigned int width )
753
+ unsigned long long v , unsigned int width )
751
754
{
752
755
unsigned int len ;
753
756
int i ;
@@ -780,7 +783,8 @@ void seq_put_hex_ll(struct seq_file *m, const char *delimiter,
780
783
m -> count += len ;
781
784
}
782
785
783
- void seq_put_decimal_ll (struct seq_file * m , const char * delimiter , long long num )
786
+ void seq_put_decimal_ll (struct seq_file * m , const char * delimiter ,
787
+ long long num )
784
788
{
785
789
int len ;
786
790
@@ -904,7 +908,7 @@ struct list_head *seq_list_start(struct list_head *head, loff_t pos)
904
908
{
905
909
struct list_head * lh ;
906
910
907
- list_for_each (lh , head )
911
+ list_for_each (lh , head )
908
912
if (pos -- == 0 )
909
913
return lh ;
910
914
@@ -942,7 +946,7 @@ struct hlist_node *seq_hlist_start(struct hlist_head *head, loff_t pos)
942
946
{
943
947
struct hlist_node * node ;
944
948
945
- hlist_for_each (node , head )
949
+ hlist_for_each (node , head )
946
950
if (pos -- == 0 )
947
951
return node ;
948
952
return NULL ;
@@ -998,12 +1002,11 @@ EXPORT_SYMBOL(seq_hlist_next);
998
1002
* the _rcu list-mutation primitives such as hlist_add_head_rcu()
999
1003
* as long as the traversal is guarded by rcu_read_lock().
1000
1004
*/
1001
- struct hlist_node * seq_hlist_start_rcu (struct hlist_head * head ,
1002
- loff_t pos )
1005
+ struct hlist_node * seq_hlist_start_rcu (struct hlist_head * head , loff_t pos )
1003
1006
{
1004
1007
struct hlist_node * node ;
1005
1008
1006
- __hlist_for_each_rcu (node , head )
1009
+ __hlist_for_each_rcu (node , head )
1007
1010
if (pos -- == 0 )
1008
1011
return node ;
1009
1012
return NULL ;
@@ -1022,8 +1025,7 @@ EXPORT_SYMBOL(seq_hlist_start_rcu);
1022
1025
* the _rcu list-mutation primitives such as hlist_add_head_rcu()
1023
1026
* as long as the traversal is guarded by rcu_read_lock().
1024
1027
*/
1025
- struct hlist_node * seq_hlist_start_head_rcu (struct hlist_head * head ,
1026
- loff_t pos )
1028
+ struct hlist_node * seq_hlist_start_head_rcu (struct hlist_head * head , loff_t pos )
1027
1029
{
1028
1030
if (!pos )
1029
1031
return SEQ_START_TOKEN ;
@@ -1044,8 +1046,7 @@ EXPORT_SYMBOL(seq_hlist_start_head_rcu);
1044
1046
* the _rcu list-mutation primitives such as hlist_add_head_rcu()
1045
1047
* as long as the traversal is guarded by rcu_read_lock().
1046
1048
*/
1047
- struct hlist_node * seq_hlist_next_rcu (void * v ,
1048
- struct hlist_head * head ,
1049
+ struct hlist_node * seq_hlist_next_rcu (void * v , struct hlist_head * head ,
1049
1050
loff_t * ppos )
1050
1051
{
1051
1052
struct hlist_node * node = v ;
@@ -1066,13 +1067,13 @@ EXPORT_SYMBOL(seq_hlist_next_rcu);
1066
1067
*
1067
1068
* Called at seq_file->op->start().
1068
1069
*/
1069
- struct hlist_node *
1070
- seq_hlist_start_percpu ( struct hlist_head __percpu * head , int * cpu , loff_t pos )
1070
+ struct hlist_node * seq_hlist_start_percpu ( struct hlist_head __percpu * head ,
1071
+ int * cpu , loff_t pos )
1071
1072
{
1072
1073
struct hlist_node * node ;
1073
1074
1074
- for_each_possible_cpu (* cpu ) {
1075
- hlist_for_each (node , per_cpu_ptr (head , * cpu )) {
1075
+ for_each_possible_cpu (* cpu ) {
1076
+ hlist_for_each (node , per_cpu_ptr (head , * cpu )) {
1076
1077
if (pos -- == 0 )
1077
1078
return node ;
1078
1079
}
@@ -1090,9 +1091,9 @@ EXPORT_SYMBOL(seq_hlist_start_percpu);
1090
1091
*
1091
1092
* Called at seq_file->op->next().
1092
1093
*/
1093
- struct hlist_node *
1094
- seq_hlist_next_percpu ( void * v , struct hlist_head __percpu * head ,
1095
- int * cpu , loff_t * pos )
1094
+ struct hlist_node * seq_hlist_next_percpu ( void * v ,
1095
+ struct hlist_head __percpu * head ,
1096
+ int * cpu , loff_t * pos )
1096
1097
{
1097
1098
struct hlist_node * node = v ;
1098
1099
@@ -1114,5 +1115,5 @@ EXPORT_SYMBOL(seq_hlist_next_percpu);
1114
1115
1115
1116
void __init seq_file_init (void )
1116
1117
{
1117
- seq_file_cache = KMEM_CACHE (seq_file , SLAB_ACCOUNT | SLAB_PANIC );
1118
+ seq_file_cache = KMEM_CACHE (seq_file , SLAB_ACCOUNT | SLAB_PANIC );
1118
1119
}
0 commit comments