Skip to content

Update bucket settings to be compatible with dp9 and not set defaults for optional fields #121

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Sep 25, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
48 changes: 28 additions & 20 deletions ext/couchbase.cxx
Original file line number Diff line number Diff line change
Expand Up @@ -4370,18 +4370,20 @@ cb_Backend_document_query(VALUE self, VALUE statement, VALUE options)
static void
cb_generate_bucket_settings(VALUE bucket, couchbase::core::management::cluster::bucket_settings& entry, bool is_create)
{
if (VALUE bucket_type = rb_hash_aref(bucket, rb_id2sym(rb_intern("bucket_type"))); TYPE(bucket_type) == T_SYMBOL) {
if (bucket_type == rb_id2sym(rb_intern("couchbase")) || bucket_type == rb_id2sym(rb_intern("membase"))) {
entry.bucket_type = couchbase::core::management::cluster::bucket_type::couchbase;
} else if (bucket_type == rb_id2sym(rb_intern("memcached"))) {
entry.bucket_type = couchbase::core::management::cluster::bucket_type::memcached;
} else if (bucket_type == rb_id2sym(rb_intern("ephemeral"))) {
entry.bucket_type = couchbase::core::management::cluster::bucket_type::ephemeral;
if (VALUE bucket_type = rb_hash_aref(bucket, rb_id2sym(rb_intern("bucket_type"))); !NIL_P(bucket_type)) {
if (TYPE(bucket_type) == T_SYMBOL) {
if (bucket_type == rb_id2sym(rb_intern("couchbase")) || bucket_type == rb_id2sym(rb_intern("membase"))) {
entry.bucket_type = couchbase::core::management::cluster::bucket_type::couchbase;
} else if (bucket_type == rb_id2sym(rb_intern("memcached"))) {
entry.bucket_type = couchbase::core::management::cluster::bucket_type::memcached;
} else if (bucket_type == rb_id2sym(rb_intern("ephemeral"))) {
entry.bucket_type = couchbase::core::management::cluster::bucket_type::ephemeral;
} else {
throw ruby_exception(rb_eArgError, rb_sprintf("unknown bucket type, given %+" PRIsVALUE, bucket_type));
}
} else {
throw ruby_exception(rb_eArgError, rb_sprintf("unknown bucket type, given %+" PRIsVALUE, bucket_type));
throw ruby_exception(rb_eArgError, rb_sprintf("bucket type must be a Symbol, given %+" PRIsVALUE, bucket_type));
}
} else {
throw ruby_exception(rb_eArgError, rb_sprintf("bucket type must be a Symbol, given %+" PRIsVALUE, bucket_type));
}

if (VALUE name = rb_hash_aref(bucket, rb_id2sym(rb_intern("name"))); TYPE(name) == T_STRING) {
Expand All @@ -4390,10 +4392,12 @@ cb_generate_bucket_settings(VALUE bucket, couchbase::core::management::cluster::
throw ruby_exception(rb_eArgError, rb_sprintf("bucket name must be a String, given %+" PRIsVALUE, name));
}

if (VALUE quota = rb_hash_aref(bucket, rb_id2sym(rb_intern("ram_quota_mb"))); TYPE(quota) == T_FIXNUM) {
entry.ram_quota_mb = FIX2ULONG(quota);
} else {
throw ruby_exception(rb_eArgError, rb_sprintf("bucket RAM quota must be an Integer, given %+" PRIsVALUE, quota));
if (VALUE quota = rb_hash_aref(bucket, rb_id2sym(rb_intern("ram_quota_mb"))); !NIL_P(quota)) {
if (TYPE(quota) == T_FIXNUM) {
entry.ram_quota_mb = FIX2ULONG(quota);
} else {
throw ruby_exception(rb_eArgError, rb_sprintf("bucket RAM quota must be an Integer, given %+" PRIsVALUE, quota));
}
}

if (VALUE expiry = rb_hash_aref(bucket, rb_id2sym(rb_intern("max_expiry"))); !NIL_P(expiry)) {
Expand Down Expand Up @@ -4679,7 +4683,7 @@ cb_extract_bucket_settings(const couchbase::core::management::cluster::bucket_se
rb_hash_aset(bucket, rb_id2sym(rb_intern("name")), cb_str_new(entry.name));
rb_hash_aset(bucket, rb_id2sym(rb_intern("uuid")), cb_str_new(entry.uuid));
rb_hash_aset(bucket, rb_id2sym(rb_intern("ram_quota_mb")), ULL2NUM(entry.ram_quota_mb));
if (const auto &val = entry.max_expiry; val.has_value()) {
if (const auto& val = entry.max_expiry; val.has_value()) {
rb_hash_aset(bucket, rb_id2sym(rb_intern("max_expiry")), ULONG2NUM(val.value()));
}
switch (entry.compression_mode) {
Expand All @@ -4696,11 +4700,15 @@ cb_extract_bucket_settings(const couchbase::core::management::cluster::bucket_se
rb_hash_aset(bucket, rb_id2sym(rb_intern("compression_mode")), Qnil);
break;
}
if (const auto &val = entry.num_replicas; val.has_value()) {
if (const auto& val = entry.num_replicas; val.has_value()) {
rb_hash_aset(bucket, rb_id2sym(rb_intern("num_replicas")), ULONG2NUM(val.value()));
}
rb_hash_aset(bucket, rb_id2sym(rb_intern("replica_indexes")), entry.replica_indexes ? Qtrue : Qfalse);
rb_hash_aset(bucket, rb_id2sym(rb_intern("flush_enabled")), entry.flush_enabled ? Qtrue : Qfalse);
if (const auto& val = entry.replica_indexes; val.has_value()) {
rb_hash_aset(bucket, rb_id2sym(rb_intern("replica_indexes")), val.value() ? Qtrue : Qfalse);
}
if (const auto& val = entry.flush_enabled; val.has_value()) {
rb_hash_aset(bucket, rb_id2sym(rb_intern("flush_enabled")), val.value() ? Qtrue : Qfalse);
}
switch (entry.eviction_policy) {
case couchbase::core::management::cluster::bucket_eviction_policy::full:
rb_hash_aset(bucket, rb_id2sym(rb_intern("eviction_policy")), rb_id2sym(rb_intern("full")));
Expand Down Expand Up @@ -4754,10 +4762,10 @@ cb_extract_bucket_settings(const couchbase::core::management::cluster::bucket_se
rb_id2sym(rb_intern("history_retention_collection_default")),
entry.history_retention_collection_default.value() ? Qtrue : Qfalse);
}
if (const auto &val = entry.history_retention_bytes; val.has_value()) {
if (const auto& val = entry.history_retention_bytes; val.has_value()) {
rb_hash_aset(bucket, rb_id2sym(rb_intern("history_retention_bytes")), ULONG2NUM(val.value()));
}
if (const auto &val = entry.history_retention_duration; val.has_value()) {
if (const auto& val = entry.history_retention_duration; val.has_value()) {
rb_hash_aset(bucket, rb_id2sym(rb_intern("history_retention_duration")), ULONG2NUM(val.value()));
}

Expand Down
16 changes: 0 additions & 16 deletions lib/couchbase/management/bucket_manager.rb
Original file line number Diff line number Diff line change
Expand Up @@ -436,22 +436,6 @@ def ejection_policy=(val)

# @yieldparam [BucketSettings] self
def initialize
@bucket_type = :couchbase
@name = nil
@minimum_durability_level = nil
@healthy = true
@flush_enabled = false
@ram_quota_mb = 100
@num_replicas = 1
@replica_indexes = false
@max_expiry = 0
@compression_mode = :passive
@conflict_resolution_type = :sequence_number
@eviction_policy = :value_only
@storage_backend = nil
@history_retention_collection_default = nil
@history_retention_bytes = nil
@history_retention_duration = nil
yield self if block_given?
end
end
Expand Down
4 changes: 2 additions & 2 deletions test/bucket_manager_test.rb
Original file line number Diff line number Diff line change
Expand Up @@ -124,8 +124,8 @@ def test_update_bucket_history_retention_unsupported
res = @bucket_manager.get_bucket(bucket_name)

assert_nil res.history_retention_collection_default
assert_equal 0, res.history_retention_bytes
assert_equal 0, res.history_retention_duration
assert_nil res.history_retention_bytes
assert_nil res.history_retention_duration

assert_raises(Error::InvalidArgument) do
@bucket_manager.update_bucket(
Expand Down
4 changes: 2 additions & 2 deletions test/crud_test.rb
Original file line number Diff line number Diff line change
Expand Up @@ -365,7 +365,7 @@ def test_error_insert_get_with_expiration
assert_kind_of Time, res.expiry_time
now = Time.now

assert res.expiry_time >= now, "now: #{now} (#{now.to_i}), expiry_time: #{res.expiry_time} (#{res.expiry_time.to_i})"
assert_operator res.expiry_time, :>=, now, "now: #{now} (#{now.to_i}), expiry_time: #{res.expiry_time} (#{res.expiry_time.to_i})"
end

def test_expiry_option_as_time_instance
Expand Down Expand Up @@ -601,7 +601,7 @@ def test_upsert_get_projection_16_fields_and_expiry

assert_equal(expected, res.content, "expected result do not include field17, field18")
assert_kind_of(Time, res.expiry_time)
assert(res.expiry_time > Time.now)
assert_operator(res.expiry_time, :>, Time.now)
end

def test_upsert_get_projection_missing_path
Expand Down
2 changes: 1 addition & 1 deletion test/scan_test.rb
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ def validate_sampling_scan(scan_result, limit, ids_only: false)

assert_equal(ids_only, item.id_only)
end
assert(items.size <= limit)
assert_operator(items.size, :<=, limit)

return if ids_only

Expand Down
4 changes: 2 additions & 2 deletions test/search_test.rb
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ def test_simple_search
end
warn "search with at_plus took #{attempts} attempts, probably server bug" if attempts > 1

assert attempts < 20, "it is very suspicious that search with at_plus took more than 20 attempts (#{attempts})"
assert_operator attempts, :<, 20, "it is very suspicious that search with at_plus took more than 20 attempts (#{attempts})"
end

def test_doc_id_search_query
Expand Down Expand Up @@ -136,7 +136,7 @@ def test_doc_id_search_query
end
warn "search took #{attempts} attempts, probably a server bug" if attempts > 1

assert attempts < 20, "it is very suspicious that search took more than 20 attempts (#{attempts})"
assert_operator attempts, :<, 20, "it is very suspicious that search took more than 20 attempts (#{attempts})"
end
end
end
2 changes: 1 addition & 1 deletion test/subdoc_test.rb
Original file line number Diff line number Diff line change
Expand Up @@ -1197,7 +1197,7 @@ def test_expiration
res = @collection.get(doc_id, options)

assert_kind_of Time, res.expiry_time
assert res.expiry_time > Time.now
assert_operator res.expiry_time, :>, Time.now
end

def test_more_than_16_entries
Expand Down