Skip to content

Commit 50b34ac

Browse files
committed
clang format
1 parent cc0657e commit 50b34ac

File tree

1 file changed

+24
-21
lines changed

1 file changed

+24
-21
lines changed

src/libtorch.cc

Lines changed: 24 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
2626

2727
#include <stdint.h>
28+
2829
#include <exception>
2930

3031
#include "libtorch_utils.h"
@@ -232,7 +233,9 @@ ModelState::ModelState(TRITONBACKEND_Model* triton_model)
232233
enable_weight_sharing_(false), enable_tensor_fuser_pair_({false, true}),
233234
enable_jit_profiling_pair_({false, true}),
234235
enable_jit_executor_pair_({false, true}),
235-
enable_nvfuser_pair_({false, false}) {}
236+
enable_nvfuser_pair_({false, false})
237+
{
238+
}
236239

237240
TRITONSERVER_Error*
238241
ModelState::LoadModel(
@@ -995,7 +998,7 @@ ModelInstanceState::ValidateInputs(const size_t expected_input_cnt)
995998
TRITONSERVER_ERROR_INTERNAL,
996999
("Triton only supports 1 dimensional List of String as input "
9971000
"for "
998-
"'" +
1001+
"'" +
9991002
std::string(state_name) + "' for model '" +
10001003
model_state_->Name() + "'")
10011004
.c_str());
@@ -1133,7 +1136,7 @@ ModelInstanceState::ValidateOutputs()
11331136
TRITONSERVER_ERROR_INTERNAL,
11341137
("Triton only supports 1 dimensional List of String as output "
11351138
"for "
1136-
"'" +
1139+
"'" +
11371140
std::string(state_name) + "' for model '" +
11381141
model_state_->Name() + "'")
11391142
.c_str());
@@ -1668,7 +1671,7 @@ ModelInstanceState::GetNamingConvention(
16681671
("PyTorch model '" + model_state_->Name() +
16691672
"' is using sequence batching with state but state '" +
16701673
state_name +
1671-
"' does not follow the <name>__<index> naming convention. ")
1674+
"' does not follow the <name>__<index> naming convention. ")
16721675
.c_str());
16731676
} else {
16741677
// check if the index part of the name is not an integer
@@ -1680,13 +1683,13 @@ ModelInstanceState::GetNamingConvention(
16801683
}
16811684
}
16821685
if (!is_int) {
1683-
return TRITONSERVER_ErrorNew(
1684-
TRITONSERVER_ERROR_INVALID_ARG,
1685-
("PyTorch model '" + model_state_->Name() +
1686+
return TRITONSERVER_ErrorNew(
1687+
TRITONSERVER_ERROR_INVALID_ARG,
1688+
("PyTorch model '" + model_state_->Name() +
16861689
"' is using sequence batching with state but state '" +
16871690
state_name +
1688-
"' does not follow the <name>__<index> naming convention. ")
1689-
.c_str());
1691+
"' does not follow the <name>__<index> naming convention. ")
1692+
.c_str());
16901693
}
16911694
}
16921695
}
@@ -2202,21 +2205,21 @@ ModelInstanceState::ReadOutputTensors(
22022205
"' is a scalar which is not supported.")
22032206
.c_str());
22042207
}
2205-
if (output_tensor_pair.first != -1) {
2208+
if (output_tensor_pair.first != -1) {
22062209
responder.ProcessTensor(
22072210
name, output_dtype, batchn_shape, output_buffer, memory_type,
22082211
memory_id);
2209-
}
2210-
if (output_tensor_pair.second != -1) {
2211-
std::vector<TRITONBACKEND_State*> states;
2212-
states = responder.ProcessStateTensor(
2212+
}
2213+
if (output_tensor_pair.second != -1) {
2214+
std::vector<TRITONBACKEND_State*> states;
2215+
states = responder.ProcessStateTensor(
22132216
name, output_dtype, batchn_shape, output_buffer, memory_type,
22142217
memory_id);
2215-
// Update the states
2216-
for (auto& state : states) {
2217-
RETURN_IF_ERROR(TRITONBACKEND_StateUpdate(state));
2218+
// Update the states
2219+
for (auto& state : states) {
2220+
RETURN_IF_ERROR(TRITONBACKEND_StateUpdate(state));
2221+
}
22182222
}
2219-
}
22202223

22212224
} else {
22222225
responder.ProcessBatchOutput(
@@ -2251,9 +2254,9 @@ ModelInstanceState::ReadOutputTensors(
22512254
TRITONBACKEND_Output* response_output;
22522255
RESPOND_AND_SET_NULL_IF_ERROR(
22532256
&response, TRITONBACKEND_ResponseOutput(
2254-
response, &response_output, name.c_str(),
2255-
TRITONSERVER_TYPE_BYTES, batchn_shape.data(),
2256-
batchn_shape.size()));
2257+
response, &response_output, name.c_str(),
2258+
TRITONSERVER_TYPE_BYTES, batchn_shape.data(),
2259+
batchn_shape.size()));
22572260
string_buffer.emplace_back(new std::string());
22582261
cuda_copy |= SetStringOutputBuffer(
22592262
&output_list, &response, response_output, tensor_element_cnt,

0 commit comments

Comments
 (0)