Skip to content

Commit 63fcbbb

Browse files
committed
Change label to avoid confusion - rocm hipblas users should obtain binaries from yellowrosecx fork. The rocm support in this repo requires self-compilation
1 parent 8b8eb18 commit 63fcbbb

File tree

1 file changed

+8
-8
lines changed

1 file changed

+8
-8
lines changed

koboldcpp.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -765,7 +765,7 @@ def show_new_gui():
765765
lib_option_pairs = [
766766
(lib_openblas, "Use OpenBLAS"),
767767
(lib_clblast, "Use CLBlast"),
768-
(lib_cublas, "Use CuBLAS/hipBLAS"),
768+
(lib_cublas, "Use CuBLAS"),
769769
(lib_default, "Use No BLAS"),
770770
(lib_noavx2, "NoAVX2 Mode (Old CPU)"),
771771
(lib_failsafe, "Failsafe Mode (Old CPU)")]
@@ -922,15 +922,15 @@ def setup_backend_tooltip(parent):
922922

923923
def changerunmode(a,b,c):
924924
index = runopts_var.get()
925-
if index == "Use CLBlast" or index == "Use CuBLAS/hipBLAS":
925+
if index == "Use CLBlast" or index == "Use CuBLAS":
926926
gpu_selector_label.grid(row=3, column=0, padx = 8, pady=1, stick="nw")
927927
quick_gpu_selector_label.grid(row=3, column=0, padx = 8, pady=1, stick="nw")
928928
if index == "Use CLBlast":
929929
gpu_selector_box.grid(row=3, column=1, padx=8, pady=1, stick="nw")
930930
quick_gpu_selector_box.grid(row=3, column=1, padx=8, pady=1, stick="nw")
931931
if gpu_choice_var.get()=="All":
932932
gpu_choice_var.set("1")
933-
elif index == "Use CuBLAS/hipBLAS":
933+
elif index == "Use CuBLAS":
934934
CUDA_gpu_selector_box.grid(row=3, column=1, padx=8, pady=1, stick="nw")
935935
CUDA_quick_gpu_selector_box.grid(row=3, column=1, padx=8, pady=1, stick="nw")
936936
else:
@@ -941,7 +941,7 @@ def changerunmode(a,b,c):
941941
quick_gpu_selector_box.grid_forget()
942942
CUDA_quick_gpu_selector_box.grid_forget()
943943

944-
if index == "Use CuBLAS/hipBLAS":
944+
if index == "Use CuBLAS":
945945
lowvram_box.grid(row=4, column=0, padx=8, pady=1, stick="nw")
946946
quick_lowvram_box.grid(row=4, column=0, padx=8, pady=1, stick="nw")
947947
mmq_box.grid(row=4, column=1, padx=8, pady=1, stick="nw")
@@ -952,7 +952,7 @@ def changerunmode(a,b,c):
952952
mmq_box.grid_forget()
953953
quick_mmq_box.grid_forget()
954954

955-
if index == "Use CLBlast" or index == "Use CuBLAS/hipBLAS":
955+
if index == "Use CLBlast" or index == "Use CuBLAS":
956956
gpu_layers_label.grid(row=5, column=0, padx = 8, pady=1, stick="nw")
957957
gpu_layers_entry.grid(row=5, column=1, padx=8, pady=1, stick="nw")
958958
quick_gpu_layers_label.grid(row=5, column=0, padx = 8, pady=1, stick="nw")
@@ -1147,7 +1147,7 @@ def export_vars():
11471147
gpuchoiceidx = int(gpu_choice_var.get())-1
11481148
if runopts_var.get() == "Use CLBlast":
11491149
args.useclblast = [[0,0], [1,0], [0,1], [1,1]][gpuchoiceidx]
1150-
if runopts_var.get() == "Use CuBLAS/hipBLAS":
1150+
if runopts_var.get() == "Use CuBLAS":
11511151
if gpu_choice_var.get()=="All":
11521152
args.usecublas = ["lowvram"] if lowvram_var.get() == 1 else ["normal"]
11531153
else:
@@ -1373,7 +1373,7 @@ def guilaunch():
13731373
blaschoice = tk.StringVar()
13741374
blaschoice.set("BLAS = 512")
13751375

1376-
runopts = ["Use OpenBLAS","Use CLBLast GPU #1","Use CLBLast GPU #2","Use CLBLast GPU #3","Use CuBLAS/hipBLAS GPU","Use No BLAS","NoAVX2 Mode (Old CPU)","Failsafe Mode (Old CPU)"]
1376+
runopts = ["Use OpenBLAS","Use CLBLast GPU #1","Use CLBLast GPU #2","Use CLBLast GPU #3","Use CuBLAS GPU","Use No BLAS","NoAVX2 Mode (Old CPU)","Failsafe Mode (Old CPU)"]
13771377
runchoice = tk.StringVar()
13781378
runchoice.set("Use OpenBLAS")
13791379

@@ -1824,7 +1824,7 @@ def onready_subprocess():
18241824
compatgroup = parser.add_mutually_exclusive_group()
18251825
compatgroup.add_argument("--noblas", help="Do not use OpenBLAS for accelerated prompt ingestion", action='store_true')
18261826
compatgroup.add_argument("--useclblast", help="Use CLBlast for GPU Acceleration. Must specify exactly 2 arguments, platform ID and device ID (e.g. --useclblast 1 0).", type=int, choices=range(0,9), nargs=2)
1827-
compatgroup.add_argument("--usecublas", help="Use CuBLAS/hipBLAS for GPU Acceleration. Requires CUDA. Select lowvram to not allocate VRAM scratch buffer. Enter a number afterwards to select and use 1 GPU. Leaving no number will use all GPUs.", nargs='*',metavar=('[lowvram|normal] [main GPU ID] [mmq]'), choices=['normal', 'lowvram', '0', '1', '2', '3', 'mmq'])
1827+
compatgroup.add_argument("--usecublas", help="Use CuBLAS for GPU Acceleration. Requires CUDA. Select lowvram to not allocate VRAM scratch buffer. Enter a number afterwards to select and use 1 GPU. Leaving no number will use all GPUs. For hipBLAS binaries, please check YellowRoseCx rocm fork.", nargs='*',metavar=('[lowvram|normal] [main GPU ID] [mmq]'), choices=['normal', 'lowvram', '0', '1', '2', '3', 'mmq'])
18281828
parser.add_argument("--gpulayers", help="Set number of layers to offload to GPU when using GPU. Requires GPU.",metavar=('[GPU layers]'), type=int, default=0)
18291829
parser.add_argument("--tensor_split", help="For CUDA with ALL GPU set only, ratio to split tensors across multiple GPUs, space-separated list of proportions, e.g. 7 3", metavar=('[Ratios]'), type=float, nargs='+')
18301830
parser.add_argument("--onready", help="An optional shell command to execute after the model has been loaded.", type=str, default="",nargs=1)

0 commit comments

Comments
 (0)