@@ -829,9 +829,9 @@ struct common_init_result common_init_from_params(common_params & params) {
829
829
llama_model * model = nullptr ;
830
830
831
831
if (!params.hf_repo .empty () && !params.hf_file .empty ()) {
832
- model = common_load_model_from_hf (params.hf_repo . c_str () , params.hf_file . c_str () , params.model . c_str () , params.hf_token . c_str () , mparams);
832
+ model = common_load_model_from_hf (params.hf_repo , params.hf_file , params.model , params.hf_token , mparams);
833
833
} else if (!params.model_url .empty ()) {
834
- model = common_load_model_from_url (params.model_url . c_str () , params.model . c_str () , params.hf_token . c_str () , mparams);
834
+ model = common_load_model_from_url (params.model_url , params.model , params.hf_token , mparams);
835
835
} else {
836
836
model = llama_load_model_from_file (params.model .c_str (), mparams);
837
837
}
@@ -1342,17 +1342,17 @@ static bool common_download_file(const std::string & url, const std::string & pa
1342
1342
}
1343
1343
1344
1344
struct llama_model * common_load_model_from_url (
1345
- const char * model_url,
1346
- const char * path_model ,
1347
- const char * hf_token,
1345
+ const std::string & model_url,
1346
+ const std::string & local_path ,
1347
+ const std::string & hf_token,
1348
1348
const struct llama_model_params & params) {
1349
1349
// Basic validation of the model_url
1350
- if (! model_url || strlen (model_url) == 0 ) {
1350
+ if (model_url. empty () ) {
1351
1351
LOG_ERR (" %s: invalid model_url\n " , __func__);
1352
1352
return NULL ;
1353
1353
}
1354
1354
1355
- if (!common_download_file (model_url, path_model , hf_token)) {
1355
+ if (!common_download_file (model_url, local_path , hf_token)) {
1356
1356
return NULL ;
1357
1357
}
1358
1358
@@ -1363,9 +1363,9 @@ struct llama_model * common_load_model_from_url(
1363
1363
/* .no_alloc = */ true ,
1364
1364
/* .ctx = */ NULL ,
1365
1365
};
1366
- auto * ctx_gguf = gguf_init_from_file (path_model , gguf_params);
1366
+ auto * ctx_gguf = gguf_init_from_file (local_path. c_str () , gguf_params);
1367
1367
if (!ctx_gguf) {
1368
- LOG_ERR (" \n %s: failed to load input GGUF from %s\n " , __func__, path_model );
1368
+ LOG_ERR (" \n %s: failed to load input GGUF from %s\n " , __func__, local_path. c_str () );
1369
1369
return NULL ;
1370
1370
}
1371
1371
@@ -1384,13 +1384,13 @@ struct llama_model * common_load_model_from_url(
1384
1384
// Verify the first split file format
1385
1385
// and extract split URL and PATH prefixes
1386
1386
{
1387
- if (!llama_split_prefix (split_prefix, sizeof (split_prefix), path_model , 0 , n_split)) {
1388
- LOG_ERR (" \n %s: unexpected model file name: %s n_split=%d\n " , __func__, path_model , n_split);
1387
+ if (!llama_split_prefix (split_prefix, sizeof (split_prefix), local_path. c_str () , 0 , n_split)) {
1388
+ LOG_ERR (" \n %s: unexpected model file name: %s n_split=%d\n " , __func__, local_path. c_str () , n_split);
1389
1389
return NULL ;
1390
1390
}
1391
1391
1392
- if (!llama_split_prefix (split_url_prefix, sizeof (split_url_prefix), model_url, 0 , n_split)) {
1393
- LOG_ERR (" \n %s: unexpected model url: %s n_split=%d\n " , __func__, model_url, n_split);
1392
+ if (!llama_split_prefix (split_url_prefix, sizeof (split_url_prefix), model_url. c_str () , 0 , n_split)) {
1393
+ LOG_ERR (" \n %s: unexpected model url: %s n_split=%d\n " , __func__, model_url. c_str () , n_split);
1394
1394
return NULL ;
1395
1395
}
1396
1396
}
@@ -1417,14 +1417,14 @@ struct llama_model * common_load_model_from_url(
1417
1417
}
1418
1418
}
1419
1419
1420
- return llama_load_model_from_file (path_model , params);
1420
+ return llama_load_model_from_file (local_path. c_str () , params);
1421
1421
}
1422
1422
1423
1423
struct llama_model * common_load_model_from_hf (
1424
- const char * repo,
1425
- const char * model ,
1426
- const char * path_model ,
1427
- const char * hf_token,
1424
+ const std::string & repo,
1425
+ const std::string & remote_path ,
1426
+ const std::string & local_path ,
1427
+ const std::string & hf_token,
1428
1428
const struct llama_model_params & params) {
1429
1429
// construct hugging face model url:
1430
1430
//
@@ -1438,27 +1438,27 @@ struct llama_model * common_load_model_from_hf(
1438
1438
std::string model_url = " https://huggingface.co/" ;
1439
1439
model_url += repo;
1440
1440
model_url += " /resolve/main/" ;
1441
- model_url += model ;
1441
+ model_url += remote_path ;
1442
1442
1443
- return common_load_model_from_url (model_url. c_str (), path_model , hf_token, params);
1443
+ return common_load_model_from_url (model_url, local_path , hf_token, params);
1444
1444
}
1445
1445
1446
1446
#else
1447
1447
1448
1448
struct llama_model * common_load_model_from_url (
1449
- const char * /* model_url*/ ,
1450
- const char * /* path_model */ ,
1451
- const char * /* hf_token*/ ,
1449
+ const std::string & /* model_url*/ ,
1450
+ const std::string & /* local_path */ ,
1451
+ const std::string & /* hf_token*/ ,
1452
1452
const struct llama_model_params & /* params*/ ) {
1453
1453
LOG_WRN (" %s: llama.cpp built without libcurl, downloading from an url not supported.\n " , __func__);
1454
1454
return nullptr ;
1455
1455
}
1456
1456
1457
1457
struct llama_model * common_load_model_from_hf (
1458
- const char * /* repo*/ ,
1459
- const char * /* model */ ,
1460
- const char * /* path_model */ ,
1461
- const char * /* hf_token*/ ,
1458
+ const std::string & /* repo*/ ,
1459
+ const std::string & /* remote_path */ ,
1460
+ const std::string & /* local_path */ ,
1461
+ const std::string & /* hf_token*/ ,
1462
1462
const struct llama_model_params & /* params*/ ) {
1463
1463
LOG_WRN (" %s: llama.cpp built without libcurl, downloading from Hugging Face not supported.\n " , __func__);
1464
1464
return nullptr ;
0 commit comments