Skip to content

Commit 99691ad

Browse files
Vladimir Davydovtorvalds
authored andcommitted
mm: remove pointless struct in struct page definition
This patchset implements per kmemcg accounting of page tables (x86-only), pipe buffers, and unix socket buffers. Patches 1-3 are just cleanups that are not supposed to introduce any functional changes. Patches 4 and 5 move charge/uncharge to generic page allocator paths for the sake of accounting pipe and unix socket buffers. Patches 5-7 make x86 page tables, pipe buffers, and unix socket buffers accountable. This patch (of 8): ... to reduce indentation level thus leaving more space for comments. Link: http://lkml.kernel.org/r/f34ffe70fce2b0b9220856437f77972d67c14275.1464079537.git.vdavydov@virtuozzo.com Signed-off-by: Vladimir Davydov <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: Michal Hocko <[email protected]> Cc: Eric Dumazet <[email protected]> Cc: Minchan Kim <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent e77b085 commit 99691ad

File tree

1 file changed

+32
-36
lines changed

1 file changed

+32
-36
lines changed

include/linux/mm_types.h

Lines changed: 32 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -60,51 +60,47 @@ struct page {
6060
};
6161

6262
/* Second double word */
63-
struct {
64-
union {
65-
pgoff_t index; /* Our offset within mapping. */
66-
void *freelist; /* sl[aou]b first free object */
67-
/* page_deferred_list().prev -- second tail page */
68-
};
63+
union {
64+
pgoff_t index; /* Our offset within mapping. */
65+
void *freelist; /* sl[aou]b first free object */
66+
/* page_deferred_list().prev -- second tail page */
67+
};
6968

70-
union {
69+
union {
7170
#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
7271
defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
73-
/* Used for cmpxchg_double in slub */
74-
unsigned long counters;
72+
/* Used for cmpxchg_double in slub */
73+
unsigned long counters;
7574
#else
76-
/*
77-
* Keep _refcount separate from slub cmpxchg_double
78-
* data. As the rest of the double word is protected by
79-
* slab_lock but _refcount is not.
80-
*/
81-
unsigned counters;
75+
/*
76+
* Keep _refcount separate from slub cmpxchg_double data.
77+
* As the rest of the double word is protected by slab_lock
78+
* but _refcount is not.
79+
*/
80+
unsigned counters;
8281
#endif
82+
struct {
8383

84-
struct {
85-
86-
union {
87-
/*
88-
* Count of ptes mapped in mms, to show
89-
* when page is mapped & limit reverse
90-
* map searches.
91-
*/
92-
atomic_t _mapcount;
93-
94-
struct { /* SLUB */
95-
unsigned inuse:16;
96-
unsigned objects:15;
97-
unsigned frozen:1;
98-
};
99-
int units; /* SLOB */
100-
};
84+
union {
10185
/*
102-
* Usage count, *USE WRAPPER FUNCTION*
103-
* when manual accounting. See page_ref.h
86+
* Count of ptes mapped in mms, to show when
87+
* page is mapped & limit reverse map searches.
10488
*/
105-
atomic_t _refcount;
89+
atomic_t _mapcount;
90+
91+
unsigned int active; /* SLAB */
92+
struct { /* SLUB */
93+
unsigned inuse:16;
94+
unsigned objects:15;
95+
unsigned frozen:1;
96+
};
97+
int units; /* SLOB */
10698
};
107-
unsigned int active; /* SLAB */
99+
/*
100+
* Usage count, *USE WRAPPER FUNCTION* when manual
101+
* accounting. See page_ref.h
102+
*/
103+
atomic_t _refcount;
108104
};
109105
};
110106

0 commit comments

Comments
 (0)