Skip to content

Commit 5abb8ae

Browse files
committed
fix warning
1 parent 0ec4dab commit 5abb8ae

File tree

2 files changed

+2
-2
lines changed

2 files changed

+2
-2
lines changed

ggml-cuda.cu

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2542,7 +2542,7 @@ bool ggml_cuda_is_gpu_offloading(struct ggml_tensor * tensor) {
25422542
|| (tensor->src1 != nullptr && tensor->src1->backend == GGML_BACKEND_GPU);
25432543
}
25442544

2545-
bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor){
2545+
bool ggml_cuda_compute_forward(const struct ggml_compute_params * params, struct ggml_tensor * tensor){
25462546
ggml_cuda_func_t func;
25472547
const bool any_on_device = ggml_cuda_is_gpu_offloading(tensor);
25482548

ggml-cuda.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ void ggml_cuda_assign_buffers_no_scratch(struct ggml_tensor * tensor);
3232
void ggml_cuda_set_main_device(int main_device);
3333
void ggml_cuda_set_scratch_size(size_t scratch_size);
3434
void ggml_cuda_free_scratch(void);
35-
bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor);
35+
bool ggml_cuda_compute_forward(const struct ggml_compute_params * params, struct ggml_tensor * tensor);
3636

3737
#ifdef __cplusplus
3838
}

0 commit comments

Comments
 (0)