Skip to content

Commit 7b3c555

Browse files
author
Bill Prin
committed
hegemonic review
1 parent 76fb09a commit 7b3c555

File tree

1 file changed

+19
-21
lines changed

1 file changed

+19
-21
lines changed

bigtable/metricscaler/metricscaler.py

Lines changed: 19 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -22,25 +22,6 @@
2222
from google.cloud import monitoring
2323

2424

25-
_MIN_NODE_COUNT = 3
26-
"""
27-
The minimum number of nodes to use. The default minimum is 3. If you have a
28-
lot of data, the rule of thumb is to not go below 2.5 TB per node for SSD
29-
clusters, and 8 TB for HDD. The bigtable.googleapis.com/disk/bytes_used
30-
metric is useful in figuring out the minimum number.
31-
of nodes.
32-
"""
33-
34-
_MAX_NODE_COUNT = 30
35-
"""
36-
The maximum number of nodes to use. The default maximum is 30 nodes per zone.
37-
If you need more quota, you can request more by following the instructions
38-
<a href="https://cloud.google.com/bigtable/quota">here</a>.
39-
"""
40-
41-
_SIZE_CHANGE_STEP = 3
42-
"""The number of nodes to change the cluster by."""
43-
4425

4526
def get_cpu_load():
4627
"""Returns the most recent Cloud Bigtable CPU load measurement.
@@ -70,6 +51,23 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up):
7051
bigtable_cluster (str): Cloud Bigtable cluster ID to scale
7152
scale_up (bool): If true, scale up, otherwise scale down
7253
"""
54+
_MIN_NODE_COUNT = 3
55+
"""
56+
The minimum number of nodes to use. The default minimum is 3. If you have a
57+
lot of data, the rule of thumb is to not go below 2.5 TB per node for SSD
58+
clusters, and 8 TB for HDD. The bigtable.googleapis.com/disk/bytes_used
59+
metric is useful in figuring out the minimum number of nodes.
60+
"""
61+
62+
_MAX_NODE_COUNT = 30
63+
"""
64+
The maximum number of nodes to use. The default maximum is 30 nodes per zone.
65+
If you need more quota, you can request more by following the instructions
66+
<a href="https://cloud.google.com/bigtable/quota">here</a>.
67+
"""
68+
69+
_SIZE_CHANGE_STEP = 3
70+
"""The number of nodes to change the cluster by."""
7371
# [START bigtable_scale]
7472
bigtable_client = bigtable.Client(admin=True)
7573
instance = bigtable_client.instance(bigtable_instance)
@@ -85,15 +83,15 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up):
8583
new_node_count = min(current_node_count + 3, _MAX_NODE_COUNT)
8684
cluster.serve_nodes = new_node_count
8785
cluster.update()
88-
print('Scaled up from {} up to {} nodes.'.format(
86+
print('Scaled up from {} to {} nodes.'.format(
8987
current_node_count, new_node_count))
9088
else:
9189
if current_node_count > _MIN_NODE_COUNT:
9290
new_node_count = max(
9391
current_node_count - _SIZE_CHANGE_STEP, _MIN_NODE_COUNT)
9492
cluster.serve_nodes = new_node_count
9593
cluster.update()
96-
print('Scaled down from {} up to {} nodes.'.format(
94+
print('Scaled down from {} to {} nodes.'.format(
9795
current_node_count, new_node_count))
9896
# [END bigtable_scale]
9997

0 commit comments

Comments
 (0)