Skip to content

Bug #79454: Inefficient InnoDB row stats implementation #129

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions configure.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -924,6 +924,11 @@ IF(HAVE_LIBNUMA AND NOT WITH_NUMA)
MESSAGE(STATUS "Disabling NUMA on user's request")
ENDIF()

OPTION(WITH_LSE "Enable Large System Extensions for AArch64" OFF)
IF(WITH_LSE)
ADD_DEFINITIONS(-DHAVE_ARM64_LSE_ATOMICS)
ENDIF()

# needed for libevent
CHECK_TYPE_SIZE("socklen_t" SIZEOF_SOCKLEN_T)
IF(SIZEOF_SOCKLEN_T)
Expand Down
112 changes: 112 additions & 0 deletions storage/innobase/include/os0atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -295,6 +295,71 @@ amount of increment. */
# define os_atomic_increment_uint64(ptr, amount) \
os_atomic_increment(ptr, amount)

/**********************************************************//**
Same functions with no return value. These may have optimized implementations on
some architectures. */

#if defined(__aarch64__) && defined(HAVE_ARM64_LSE_ATOMICS)

# define ARM64_LSE_ATOMIC_STADD(ptr, amount, w, r) \
do { \
__asm__ __volatile__( \
"stadd" w " %" r "1, %0\n" \
: "+Q" (*ptr) \
: "r" (amount) \
: "memory"); \
} while(0)

# define os_atomic_increment_nr(ptr, amount) \
do { \
switch (sizeof(*ptr)) { \
case 1: ARM64_LSE_ATOMIC_STADD(ptr, amount, "b", "w"); break; \
case 2: ARM64_LSE_ATOMIC_STADD(ptr, amount, "h", "w"); break; \
case 4: ARM64_LSE_ATOMIC_STADD(ptr, amount, "", "w"); break; \
case 8: ARM64_LSE_ATOMIC_STADD(ptr, amount, "", ""); break; \
default: ut_ad(0); /* wrong operand size */ \
} \
} while (0)
#else
# define os_atomic_increment_nr(ptr, amount) \
os_atomic_increment(ptr, amount)
#endif

# define os_atomic_increment_lint_nr(ptr, amount) \
os_atomic_increment_nr(ptr, amount)

# define os_atomic_increment_ulint_nr(ptr, amount) \
os_atomic_increment_nr(ptr, amount)

# define os_atomic_increment_uint32_nr(ptr, amount ) \
os_atomic_increment_nr(ptr, amount)

# define os_atomic_increment_uint64_nr(ptr, amount) \
os_atomic_increment_nr(ptr, amount)

/* Non-atomic version of the functions with no return value. */

#if defined(__aarch64__) && defined(HAVE_ARM64_LSE_ATOMICS)
/* Atomic increment w/o fetching is faster than nonatomic one with it
on ThunderX. */
# define os_nonatomic_increment_nr(ptr, amount) \
os_atomic_increment_nr(ptr, amount)
#else
# define os_nonatomic_increment_nr(ptr, amount) (*(ptr) += (amount))
#endif

# define os_nonatomic_increment_lint_nr(ptr, amount) \
os_nonatomic_increment_nr(ptr, amount)

# define os_nonatomic_increment_ulint_nr(ptr, amount) \
os_nonatomic_increment_nr(ptr, amount)

# define os_nonatomic_increment_uint32_nr(ptr, amount ) \
os_nonatomic_increment_nr(ptr, amount)

# define os_nonatomic_increment_uint64_nr(ptr, amount) \
os_nonatomic_increment_nr(ptr, amount)

/* Returns the resulting value, ptr is pointer to target, amount is the
amount to decrement. */

Expand All @@ -318,6 +383,53 @@ amount to decrement. */
# define os_atomic_decrement_uint64(ptr, amount) \
os_atomic_decrement(ptr, amount)

/**********************************************************//**
Same functions with no return value. These may have optimized implementations on
some architectures. */

#if defined(__aarch64__) && defined(HAVE_ARM64_LSE_ATOMICS)
# define os_atomic_decrement_nr(ptr, amount) \
os_atomic_increment_nr(ptr, -amount)
#else
# define os_atomic_decrement_nr(ptr, amount) \
os_atomic_decrement(ptr, amount)
#endif

# define os_atomic_decrement_lint_nr(ptr, amount) \
os_atomic_decrement_nr(ptr, amount)

# define os_atomic_decrement_ulint_nr(ptr, amount) \
os_atomic_decrement_nr(ptr, amount)

# define os_atomic_decrement_uint32_nr(ptr, amount ) \
os_atomic_decrement_nr(ptr, amount)

# define os_atomic_decrement_uint64_nr(ptr, amount) \
os_atomic_decrement_nr(ptr, amount)

/* Non-atomic version of the functions with no return value. */

#if defined(__aarch64__) && defined(HAVE_ARM64_LSE_ATOMICS)
/* Atomic decrement without fetching is faster than nonatomic one with it
on AArch64. */
# define os_nonatomic_decrement_nr(ptr, amount) \
os_atomic_decrement_nr(ptr, amount)
#else
# define os_nonatomic_decrement_nr(ptr, amount) (*(ptr) -= (amount))
#endif

# define os_nonatomic_decrement_lint_nr(ptr, amount) \
os_nonatomic_decrement_nr(ptr, amount)

# define os_nonatomic_decrement_ulint_nr(ptr, amount) \
os_nonatomic_decrement_nr(ptr, amount)

# define os_nonatomic_decrement_uint32_nr(ptr, amount ) \
os_nonatomic_decrement_nr(ptr, amount)

# define os_nonatomic_decrement_uint64_nr(ptr, amount) \
os_nonatomic_decrement_nr(ptr, amount)

#endif

#define os_atomic_inc_ulint(m,v,d) os_atomic_increment_ulint(v, d)
Expand Down
1 change: 0 additions & 1 deletion storage/innobase/include/sync0sync.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@ Created 9/5/1995 Heikki Tuuri
#define sync0sync_h

#include "univ.i"
#include "ut0counter.h"

#if defined UNIV_PFS_MUTEX || defined UNIV_PFS_RWLOCK

Expand Down
41 changes: 37 additions & 4 deletions storage/innobase/include/ut0counter.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ Created 2012/04/12 by Sunny Bains
#include <my_rdtsc.h>
#include "univ.i"
#include "os0thread.h"
#include "os0atomic.h"

/** CPU cache line size */
#ifdef __powerpc__
Expand Down Expand Up @@ -108,6 +109,38 @@ struct single_indexer_t {

#define default_indexer_t counter_indexer_t


template <typename T>
UNIV_INLINE void add_noreturn(T &val, T n) {
val += n;
}

template <typename T>
UNIV_INLINE void sub_noreturn(T &val, T n) {
val -= n;
}

/* Template specializations for native word size */
template <>
inline void add_noreturn<ulint>(ulint &val, ulint n) {
os_nonatomic_increment_ulint_nr(&val, n);
}

template <>
inline void sub_noreturn<ulint>(ulint &val, ulint n) {
os_nonatomic_decrement_lint_nr(&val, n);
}

template <>
inline void add_noreturn<lint>(lint &val, lint n) {
os_nonatomic_increment_lint_nr(&val, n);
}

template <>
inline void sub_noreturn<lint>(lint &val, lint n) {
os_nonatomic_decrement_lint_nr(&val, n);
}

/** Class for using fuzzy counters. The counter is not protected by any
mutex and the results are not guaranteed to be 100% accurate but close
enough. Creates an array of counters and separates each element by the
Expand Down Expand Up @@ -151,7 +184,7 @@ class ib_counter_t {

ut_ad(i < UT_ARR_SIZE(m_counter));

m_counter[i] += n;
add_noreturn(m_counter[i], n);
}

/** Use this if you can use a unique identifier, saves a
Expand All @@ -163,7 +196,7 @@ class ib_counter_t {

ut_ad(i < UT_ARR_SIZE(m_counter));

m_counter[i] += n;
add_noreturn(m_counter[i], n);
}

/** If you can't use a good index id. Decrement by 1. */
Expand All @@ -176,7 +209,7 @@ class ib_counter_t {

ut_ad(i < UT_ARR_SIZE(m_counter));

m_counter[i] -= n;
sub_noreturn(m_counter[i], n);
}

/** Use this if you can use a unique identifier, saves a
Expand All @@ -188,7 +221,7 @@ class ib_counter_t {

ut_ad(i < UT_ARR_SIZE(m_counter));

m_counter[i] -= n;
sub_noreturn(m_counter[i], n);
}

/* @return total value - not 100% accurate, since it is not atomic. */
Expand Down