22
22
from google .cloud import monitoring
23
23
24
24
25
- _MIN_NODE_COUNT = 3
26
- """
27
- The minimum number of nodes to use. The default minimum is 3. If you have a
28
- lot of data, the rule of thumb is to not go below 2.5 TB per node for SSD
29
- clusters, and 8 TB for HDD. The bigtable.googleapis.com/disk/bytes_used
30
- metric is useful in figuring out the minimum number.
31
- of nodes.
32
- """
33
-
34
- _MAX_NODE_COUNT = 30
35
- """
36
- The maximum number of nodes to use. The default maximum is 30 nodes per zone.
37
- If you need more quota, you can request more by following the instructions
38
- <a href="https://cloud.google.com/bigtable/quota">here</a>.
39
- """
40
-
41
- _SIZE_CHANGE_STEP = 3
42
- """The number of nodes to change the cluster by."""
43
-
44
25
45
26
def get_cpu_load ():
46
27
"""Returns the most recent Cloud Bigtable CPU load measurement.
@@ -70,6 +51,23 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up):
70
51
bigtable_cluster (str): Cloud Bigtable cluster ID to scale
71
52
scale_up (bool): If true, scale up, otherwise scale down
72
53
"""
54
+ _MIN_NODE_COUNT = 3
55
+ """
56
+ The minimum number of nodes to use. The default minimum is 3. If you have a
57
+ lot of data, the rule of thumb is to not go below 2.5 TB per node for SSD
58
+ clusters, and 8 TB for HDD. The bigtable.googleapis.com/disk/bytes_used
59
+ metric is useful in figuring out the minimum number of nodes.
60
+ """
61
+
62
+ _MAX_NODE_COUNT = 30
63
+ """
64
+ The maximum number of nodes to use. The default maximum is 30 nodes per zone.
65
+ If you need more quota, you can request more by following the instructions
66
+ <a href="https://cloud.google.com/bigtable/quota">here</a>.
67
+ """
68
+
69
+ _SIZE_CHANGE_STEP = 3
70
+ """The number of nodes to change the cluster by."""
73
71
# [START bigtable_scale]
74
72
bigtable_client = bigtable .Client (admin = True )
75
73
instance = bigtable_client .instance (bigtable_instance )
@@ -85,15 +83,15 @@ def scale_bigtable(bigtable_instance, bigtable_cluster, scale_up):
85
83
new_node_count = min (current_node_count + 3 , _MAX_NODE_COUNT )
86
84
cluster .serve_nodes = new_node_count
87
85
cluster .update ()
88
- print ('Scaled up from {} up to {} nodes.' .format (
86
+ print ('Scaled up from {} to {} nodes.' .format (
89
87
current_node_count , new_node_count ))
90
88
else :
91
89
if current_node_count > _MIN_NODE_COUNT :
92
90
new_node_count = max (
93
91
current_node_count - _SIZE_CHANGE_STEP , _MIN_NODE_COUNT )
94
92
cluster .serve_nodes = new_node_count
95
93
cluster .update ()
96
- print ('Scaled down from {} up to {} nodes.' .format (
94
+ print ('Scaled down from {} to {} nodes.' .format (
97
95
current_node_count , new_node_count ))
98
96
# [END bigtable_scale]
99
97
0 commit comments