20
20
*/
21
21
void task_mem (struct seq_file * m , struct mm_struct * mm )
22
22
{
23
+ VMA_ITERATOR (vmi , mm , 0 );
23
24
struct vm_area_struct * vma ;
24
25
struct vm_region * region ;
25
- struct rb_node * p ;
26
26
unsigned long bytes = 0 , sbytes = 0 , slack = 0 , size ;
27
-
28
- mmap_read_lock (mm );
29
- for (p = rb_first (& mm -> mm_rb ); p ; p = rb_next (p )) {
30
- vma = rb_entry (p , struct vm_area_struct , vm_rb );
31
27
28
+ mmap_read_lock (mm );
29
+ for_each_vma (vmi , vma ) {
32
30
bytes += kobjsize (vma );
33
31
34
32
region = vma -> vm_region ;
@@ -82,15 +80,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
82
80
83
81
unsigned long task_vsize (struct mm_struct * mm )
84
82
{
83
+ VMA_ITERATOR (vmi , mm , 0 );
85
84
struct vm_area_struct * vma ;
86
- struct rb_node * p ;
87
85
unsigned long vsize = 0 ;
88
86
89
87
mmap_read_lock (mm );
90
- for (p = rb_first (& mm -> mm_rb ); p ; p = rb_next (p )) {
91
- vma = rb_entry (p , struct vm_area_struct , vm_rb );
88
+ for_each_vma (vmi , vma )
92
89
vsize += vma -> vm_end - vma -> vm_start ;
93
- }
94
90
mmap_read_unlock (mm );
95
91
return vsize ;
96
92
}
@@ -99,14 +95,13 @@ unsigned long task_statm(struct mm_struct *mm,
99
95
unsigned long * shared , unsigned long * text ,
100
96
unsigned long * data , unsigned long * resident )
101
97
{
98
+ VMA_ITERATOR (vmi , mm , 0 );
102
99
struct vm_area_struct * vma ;
103
100
struct vm_region * region ;
104
- struct rb_node * p ;
105
101
unsigned long size = kobjsize (mm );
106
102
107
103
mmap_read_lock (mm );
108
- for (p = rb_first (& mm -> mm_rb ); p ; p = rb_next (p )) {
109
- vma = rb_entry (p , struct vm_area_struct , vm_rb );
104
+ for_each_vma (vmi , vma ) {
110
105
size += kobjsize (vma );
111
106
region = vma -> vm_region ;
112
107
if (region ) {
@@ -190,17 +185,19 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
190
185
*/
191
186
static int show_map (struct seq_file * m , void * _p )
192
187
{
193
- struct rb_node * p = _p ;
194
-
195
- return nommu_vma_show (m , rb_entry (p , struct vm_area_struct , vm_rb ));
188
+ return nommu_vma_show (m , _p );
196
189
}
197
190
198
191
static void * m_start (struct seq_file * m , loff_t * pos )
199
192
{
200
193
struct proc_maps_private * priv = m -> private ;
201
194
struct mm_struct * mm ;
202
- struct rb_node * p ;
203
- loff_t n = * pos ;
195
+ struct vm_area_struct * vma ;
196
+ unsigned long addr = * pos ;
197
+
198
+ /* See m_next(). Zero at the start or after lseek. */
199
+ if (addr == -1UL )
200
+ return NULL ;
204
201
205
202
/* pin the task and mm whilst we play with them */
206
203
priv -> task = get_proc_task (priv -> inode );
@@ -216,10 +213,10 @@ static void *m_start(struct seq_file *m, loff_t *pos)
216
213
return ERR_PTR (- EINTR );
217
214
}
218
215
219
- /* start from the Nth VMA */
220
- for ( p = rb_first ( & mm -> mm_rb ); p ; p = rb_next ( p ))
221
- if (n -- == 0 )
222
- return p ;
216
+ /* start the next element from addr */
217
+ vma = find_vma ( mm , addr );
218
+ if (vma )
219
+ return vma ;
223
220
224
221
mmap_read_unlock (mm );
225
222
mmput (mm );
@@ -242,10 +239,10 @@ static void m_stop(struct seq_file *m, void *_vml)
242
239
243
240
static void * m_next (struct seq_file * m , void * _p , loff_t * pos )
244
241
{
245
- struct rb_node * p = _p ;
242
+ struct vm_area_struct * vma = _p ;
246
243
247
- ( * pos ) ++ ;
248
- return p ? rb_next ( p ) : NULL ;
244
+ * pos = vma -> vm_end ;
245
+ return find_vma ( vma -> vm_mm , vma -> vm_end ) ;
249
246
}
250
247
251
248
static const struct seq_operations proc_pid_maps_ops = {
0 commit comments