Skip to content

Make 'rabbitmqctl rename_cluster_node' and 'rabbitmqctl update_cluster_nodes ' no-ops #10369

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 9 commits into from
Jan 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 2 additions & 12 deletions deps/rabbit/BUILD.bazel
Original file line number Diff line number Diff line change
Expand Up @@ -321,13 +321,6 @@ rabbitmq_integration_suite(
],
)

rabbitmq_integration_suite(
name = "cluster_rename_SUITE",
size = "large",
flaky = True,
shard_count = 3,
)

rabbitmq_integration_suite(
name = "cluster_SUITE",
size = "medium",
Expand All @@ -347,7 +340,7 @@ rabbitmq_integration_suite(
additional_beam = [
":test_clustering_utils_beam",
],
shard_count = 48,
shard_count = 45,
sharding_method = "case",
)

Expand Down Expand Up @@ -613,10 +606,7 @@ rabbitmq_integration_suite(
rabbitmq_integration_suite(
name = "per_vhost_connection_limit_SUITE",
size = "medium",
additional_beam = [
":cluster_rename_SUITE_beam_files",
],
shard_count = 10,
shard_count = 9,
)

rabbitmq_integration_suite(
Expand Down
12 changes: 0 additions & 12 deletions deps/rabbit/app.bzl
Original file line number Diff line number Diff line change
Expand Up @@ -169,7 +169,6 @@ def all_beam_files(name = "all_beam_files"):
"src/rabbit_mirror_queue_slave.erl",
"src/rabbit_mirror_queue_sync.erl",
"src/rabbit_mnesia.erl",
"src/rabbit_mnesia_rename.erl",
"src/rabbit_mnesia_to_khepri_record_converter.erl",
"src/rabbit_msg_file.erl",
"src/rabbit_msg_record.erl",
Expand Down Expand Up @@ -434,7 +433,6 @@ def all_test_beam_files(name = "all_test_beam_files"):
"src/rabbit_mirror_queue_slave.erl",
"src/rabbit_mirror_queue_sync.erl",
"src/rabbit_mnesia.erl",
"src/rabbit_mnesia_rename.erl",
"src/rabbit_mnesia_to_khepri_record_converter.erl",
"src/rabbit_msg_file.erl",
"src/rabbit_msg_record.erl",
Expand Down Expand Up @@ -717,7 +715,6 @@ def all_srcs(name = "all_srcs"):
"src/rabbit_mirror_queue_slave.erl",
"src/rabbit_mirror_queue_sync.erl",
"src/rabbit_mnesia.erl",
"src/rabbit_mnesia_rename.erl",
"src/rabbit_mnesia_to_khepri_record_converter.erl",
"src/rabbit_msg_file.erl",
"src/rabbit_msg_record.erl",
Expand Down Expand Up @@ -867,15 +864,6 @@ def test_suite_beam_files(name = "test_suite_beam_files"):
deps = ["//deps/amqp_client:erlang_app"],
)

erlang_bytecode(
name = "cluster_rename_SUITE_beam_files",
testonly = True,
srcs = ["test/cluster_rename_SUITE.erl"],
outs = ["test/cluster_rename_SUITE.beam"],
app_name = "rabbit",
erlc_opts = "//:test_erlc_opts",
deps = ["//deps/amqp_client:erlang_app"],
)
erlang_bytecode(
name = "clustering_management_SUITE_beam_files",
testonly = True,
Expand Down
120 changes: 1 addition & 119 deletions deps/rabbit/docs/rabbitmqctl.8
Original file line number Diff line number Diff line change
Expand Up @@ -463,124 +463,6 @@ is part of, as a ram node:
To learn more, see the
.Lk https://www.rabbitmq.com/clustering.html "RabbitMQ Clustering guide".
.\" ------------------------------------------------------------------
.It Cm rename_cluster_node Ar oldnode1 Ar newnode1 Op Ar oldnode2 Ar newnode2 ...
.Pp
Supports renaming of cluster nodes in the local database.
.Pp
This subcommand causes
.Nm
to temporarily become the node in order to make the change.
The local cluster node must therefore be completely stopped; other nodes
can be online or offline.
.Pp
This subcommand takes an even number of arguments, in pairs representing
the old and new names for nodes.
You must specify the old and new names for this node and for any other
nodes that are stopped and being renamed at the same time.
.Pp
It is possible to stop all nodes and rename them all simultaneously (in
which case old and new names for all nodes must be given to every node)
or stop and rename nodes one at a time (in which case each node only
needs to be told how its own name is changing).
.Pp
For example, this command will rename the node
.Qq rabbit@misshelpful
to the node
.Qq rabbit@cordelia
.sp
.Dl rabbitmqctl rename_cluster_node rabbit@misshelpful rabbit@cordelia
.Pp
Note that this command only changes the local database.
It may also be necessary to rename the local database directories
and configure the new node name.
For example:
.sp
.Bl -enum -compact
.It
Stop the node:
.sp
.Dl rabbitmqctl stop rabbit@misshelpful
.sp
.It
Rename the node in the local database:
.sp
.Dl rabbitmqctl rename_cluster_node rabbit@misshelpful rabbit@cordelia
.sp
.It
Rename the local database directories (note, you do not need to do this
if you have set the RABBITMQ_MNESIA_DIR environment variable):
.sp
.Bd -literal -offset indent -compact
mv \\
/var/lib/rabbitmq/mnesia/rabbit\\@misshelpful \\
/var/lib/rabbitmq/mnesia/rabbit\\@cordelia
mv \\
/var/lib/rabbitmq/mnesia/rabbit\\@misshelpful-rename \\
/var/lib/rabbitmq/mnesia/rabbit\\@cordelia-rename
mv \\
/var/lib/rabbitmq/mnesia/rabbit\\@misshelpful-plugins-expand \\
/var/lib/rabbitmq/mnesia/rabbit\\@cordelia-plugins-expand
.Ed
.sp
.It
If node name is configured e.g. using
.Ar /etc/rabbitmq/rabbitmq-env.conf
, it also needs to be updated there.
.sp
.It
Start the node when ready
.El
.\" ------------------------------------------------------------------
.It Cm update_cluster_nodes Ar clusternode
.Bl -tag -width Ds
.It Ar clusternode
The node to consult for up-to-date information.
.El
.Pp
Instructs an already clustered node to contact
.Ar clusternode
to cluster when booting up.
This is different from
.Cm join_cluster
since it does not join any cluster - it checks that the node is already
in a cluster with
.Ar clusternode .
.Pp
The need for this command is motivated by the fact that clusters can
change while a node is offline.
Consider a situation where node
.Va rabbit@A
and
.Va rabbit@B
are clustered.
.Va rabbit@A
goes down,
.Va rabbit@C
clusters with
.Va rabbit@B ,
and then
.Va rabbit@B
leaves the cluster.
When
.Va rabbit@A
starts back up, it'll try to contact
.Va rabbit@B ,
but this will fail since
.Va rabbit@B
is not in the cluster anymore.
The following command will rename node
.Va rabbit@B
to
.Va rabbit@C
on node
.Va rabbitA
.sp
.Dl update_cluster_nodes -n Va rabbit@A Va rabbit@B Va rabbit@C
.Pp
To learn more, see the
.Lk https://www.rabbitmq.com/clustering.html "RabbitMQ Clustering guide"
.El
.\" ------------------------------------------------------------------
.\" ## Classic Mirrored Queues
.\" ------------------------------------------------------------------
.Ss Replication
Expand All @@ -598,7 +480,7 @@ The queue will block while synchronisation takes place (all publishers
and consumers using the queue will block or temporarily see no activity).
This command can only be used with mirrored queues.
To learn more, see the
.Lk https://www.rabbitmq.com/ha.html "RabbitMQ Mirroring guide"
.Lk https://www.rabbitmq.com/ha.html "RabbitMQ Classic Queue Mirroring guide"
.Pp
Note that queues with unsynchronised replicas and active consumers
will become synchronised eventually (assuming that consumers make progress).
Expand Down
18 changes: 0 additions & 18 deletions deps/rabbit/src/rabbit_db_cluster.erl
Original file line number Diff line number Diff line change
Expand Up @@ -23,12 +23,6 @@
check_consistency/0,
cli_cluster_status/0]).

%% These two functions are not supported by Khepri and probably
%% shouldn't be part of this API in the future, but currently
%% they're needed here so they can fail when invoked using Khepri.
-export([rename/2,
update_cluster_nodes/1]).

-type node_type() :: disc_node_type() | ram_node_type().
-type disc_node_type() :: disc.
-type ram_node_type() :: ram.
Expand Down Expand Up @@ -390,15 +384,3 @@ cli_cluster_status_using_mnesia() ->

cli_cluster_status_using_khepri() ->
rabbit_khepri:cli_cluster_status().

rename(Node, NodeMapList) ->
case rabbit_khepri:is_enabled() of
true -> {error, not_supported};
false -> rabbit_mnesia_rename:rename(Node, NodeMapList)
end.

update_cluster_nodes(DiscoveryNode) ->
case rabbit_khepri:is_enabled() of
true -> {error, not_supported};
false -> rabbit_mnesia:update_cluster_nodes(DiscoveryNode)
end.
31 changes: 4 additions & 27 deletions deps/rabbit/src/rabbit_mnesia.erl
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
join_cluster/2,
reset/0,
force_reset/0,
update_cluster_nodes/1,
change_cluster_node_type/1,
forget_cluster_node/2,
force_load_next_boot/0,
Expand All @@ -37,7 +36,8 @@
dir/0,
cluster_status_from_mnesia/0,

%% Operations on the db and utils, mainly used in `rabbit_mnesia_rename' and `rabbit'
%% Operations on the db and utils, mainly used in `rabbit' and Mnesia-era modules
%% (some of which may now be gone)
init_db_unchecked/2,
copy_db/1,
check_mnesia_consistency/1,
Expand Down Expand Up @@ -284,27 +284,6 @@ change_cluster_node_type(Type) ->
ok = reset(),
ok = join_cluster(Node, Type).

-spec update_cluster_nodes(node()) -> 'ok'.

update_cluster_nodes(DiscoveryNode) ->
ensure_mnesia_not_running(),
ensure_mnesia_dir(),
Status = {AllNodes, _, _} = discover_cluster([DiscoveryNode]),
case rabbit_nodes:me_in_nodes(AllNodes) of
true ->
%% As in `check_consistency/0', we can safely delete the
%% schema here, since it'll be replicated from the other
%% nodes
_ = mnesia:delete_schema([node()]),
rabbit_node_monitor:write_cluster_status(Status),
rabbit_log:info("Updating cluster nodes from ~tp",
[DiscoveryNode]),
init_db_with_mnesia(AllNodes, node_type(), true, true, _Retry = false);
false ->
e(inconsistent_cluster)
end,
ok.

%% We proceed like this: try to remove the node locally. If the node
%% is offline, we remove the node if:
%% * This node is a disc node
Expand Down Expand Up @@ -1087,16 +1066,14 @@ e(Tag) -> throw({error, {Tag, error_description(Tag)}}).

error_description(clustering_only_disc_node) ->
"You cannot cluster a node if it is the only disc node in its existing "
" cluster. If new nodes joined while this node was offline, use "
"'update_cluster_nodes' to add them manually.";
" cluster.";
error_description(resetting_only_disc_node) ->
"You cannot reset a node when it is the only disc node in a cluster. "
"Please convert another node of the cluster to a disc node first.";
error_description(not_clustered) ->
"Non-clustered nodes can only be disc nodes.";
error_description(no_online_cluster_nodes) ->
"Could not find any online cluster nodes. If the cluster has changed, "
"you can use the 'update_cluster_nodes' command.";
"Could not find any online cluster nodes.";
error_description(inconsistent_cluster) ->
"The nodes provided do not have this node as part of the cluster.";
error_description(not_a_cluster_node) ->
Expand Down
Loading