Skip to content

Commit

Permalink
examples : more CUDA leftovers (#0)
Browse files Browse the repository at this point in the history
  • Loading branch information
ggerganov committed Mar 27, 2024
1 parent af32597 commit 39060c7
Show file tree
Hide file tree
Showing 4 changed files with 10 additions and 10 deletions.
4 changes: 2 additions & 2 deletions examples/magika/CMakeLists.txt
Expand Up @@ -8,8 +8,8 @@ target_link_libraries(${TEST_TARGET} PRIVATE ggml common common-ggml)
#
# For GPU offloading

if (GGML_CUBLAS)
add_compile_definitions(GGML_USE_CUBLAS)
if (GGML_CUDA)
add_compile_definitions(GGML_USE_CUDA)
endif()

if (GGML_CLBLAST)
Expand Down
8 changes: 4 additions & 4 deletions examples/python/ggml/__init__.pyi
Expand Up @@ -568,8 +568,8 @@ class lib:
def ggml_cpu_has_clblast() -> int:
""" GGML_API int ggml_cpu_has_clblast (void);"""
...
def ggml_cpu_has_cublas() -> int:
""" GGML_API int ggml_cpu_has_cublas (void);"""
def ggml_cpu_has_cuda() -> int:
""" GGML_API int ggml_cpu_has_cuda (void);"""
...
def ggml_cpu_has_f16c() -> int:
""" GGML_API int ggml_cpu_has_f16c (void);"""
Expand Down Expand Up @@ -967,8 +967,8 @@ class lib:
def ggml_init(params: ffi.CData) -> ffi.CData:
""" GGML_API struct ggml_context * ggml_init(struct ggml_init_params params);"""
...
def ggml_init_cublas() -> None:
"""GGML_API void ggml_init_cublas(void);"""
def ggml_init_cuda() -> None:
"""GGML_API void ggml_init_cuda(void);"""
...
def ggml_internal_get_type_traits(type: int) -> ffi.CData:
""" ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type type);"""
Expand Down
4 changes: 2 additions & 2 deletions examples/simple/CMakeLists.txt
Expand Up @@ -12,8 +12,8 @@ set(TEST_TARGET simple-backend)
add_executable(${TEST_TARGET} simple-backend.cpp)
target_link_libraries(${TEST_TARGET} PRIVATE ggml)

if (GGML_CUBLAS)
add_compile_definitions(GGML_USE_CUBLAS)
if (GGML_CUDA)
add_compile_definitions(GGML_USE_CUDA)
endif()

if (GGML_METAL)
Expand Down
4 changes: 2 additions & 2 deletions examples/simple/simple-backend.cpp
Expand Up @@ -2,7 +2,7 @@
#include "ggml/ggml-alloc.h"
#include "ggml/ggml-backend.h"

#ifdef GGML_USE_CUBLAS
#ifdef GGML_USE_CUDA
#include "ggml-cuda.h"
#endif

Expand Down Expand Up @@ -44,7 +44,7 @@ struct simple_model {
// initialize the tensors of the model in this case two matrices 2x2
void load_model(simple_model & model, float * a, float * b, int rows_A, int cols_A, int rows_B, int cols_B) {
// initialize the backend
#ifdef GGML_USE_CUBLAS
#ifdef GGML_USE_CUDA
fprintf(stderr, "%s: using CUDA backend\n", __func__);
model.backend = ggml_backend_cuda_init(0); // init device 0
if (!model.backend) {
Expand Down

0 comments on commit 39060c7

Please sign in to comment.