@@ -1891,9 +1891,33 @@ static inline void mark_objexts_empty(struct slabobj_ext *obj_exts)
1891
1891
}
1892
1892
}
1893
1893
1894
+ static inline void mark_failed_objexts_alloc (struct slab * slab )
1895
+ {
1896
+ slab -> obj_exts = OBJEXTS_ALLOC_FAIL ;
1897
+ }
1898
+
1899
+ static inline void handle_failed_objexts_alloc (unsigned long obj_exts ,
1900
+ struct slabobj_ext * vec , unsigned int objects )
1901
+ {
1902
+ /*
1903
+ * If vector previously failed to allocate then we have live
1904
+ * objects with no tag reference. Mark all references in this
1905
+ * vector as empty to avoid warnings later on.
1906
+ */
1907
+ if (obj_exts & OBJEXTS_ALLOC_FAIL ) {
1908
+ unsigned int i ;
1909
+
1910
+ for (i = 0 ; i < objects ; i ++ )
1911
+ set_codetag_empty (& vec [i ].ref );
1912
+ }
1913
+ }
1914
+
1894
1915
#else /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
1895
1916
1896
1917
static inline void mark_objexts_empty (struct slabobj_ext * obj_exts ) {}
1918
+ static inline void mark_failed_objexts_alloc (struct slab * slab ) {}
1919
+ static inline void handle_failed_objexts_alloc (unsigned long obj_exts ,
1920
+ struct slabobj_ext * vec , unsigned int objects ) {}
1897
1921
1898
1922
#endif /* CONFIG_MEM_ALLOC_PROFILING_DEBUG */
1899
1923
@@ -1909,29 +1933,37 @@ static int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
1909
1933
gfp_t gfp , bool new_slab )
1910
1934
{
1911
1935
unsigned int objects = objs_per_slab (s , slab );
1912
- unsigned long obj_exts ;
1913
- void * vec ;
1936
+ unsigned long new_exts ;
1937
+ unsigned long old_exts ;
1938
+ struct slabobj_ext * vec ;
1914
1939
1915
1940
gfp &= ~OBJCGS_CLEAR_MASK ;
1916
1941
/* Prevent recursive extension vector allocation */
1917
1942
gfp |= __GFP_NO_OBJ_EXT ;
1918
1943
vec = kcalloc_node (objects , sizeof (struct slabobj_ext ), gfp ,
1919
1944
slab_nid (slab ));
1920
- if (!vec )
1945
+ if (!vec ) {
1946
+ /* Mark vectors which failed to allocate */
1947
+ if (new_slab )
1948
+ mark_failed_objexts_alloc (slab );
1949
+
1921
1950
return - ENOMEM ;
1951
+ }
1922
1952
1923
- obj_exts = (unsigned long )vec ;
1953
+ new_exts = (unsigned long )vec ;
1924
1954
#ifdef CONFIG_MEMCG
1925
- obj_exts |= MEMCG_DATA_OBJEXTS ;
1955
+ new_exts |= MEMCG_DATA_OBJEXTS ;
1926
1956
#endif
1957
+ old_exts = slab -> obj_exts ;
1958
+ handle_failed_objexts_alloc (old_exts , vec , objects );
1927
1959
if (new_slab ) {
1928
1960
/*
1929
1961
* If the slab is brand new and nobody can yet access its
1930
1962
* obj_exts, no synchronization is required and obj_exts can
1931
1963
* be simply assigned.
1932
1964
*/
1933
- slab -> obj_exts = obj_exts ;
1934
- } else if (cmpxchg (& slab -> obj_exts , 0 , obj_exts ) ) {
1965
+ slab -> obj_exts = new_exts ;
1966
+ } else if (cmpxchg (& slab -> obj_exts , old_exts , new_exts ) != old_exts ) {
1935
1967
/*
1936
1968
* If the slab is already in use, somebody can allocate and
1937
1969
* assign slabobj_exts in parallel. In this case the existing
0 commit comments