Skip to content

feat(initialize): default to first GPU when gpu_id not provided #125

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
@@ -99,7 +99,10 @@ def get_device(self):
The get device function will return the device for the DL Framework.
"""
if _is_gpu_available():
return int(self.context.system_properties.get("gpu_id"))
# there may be cases when gpu_id isn't provided, in which case
# then we default to the first GPU
gpu_id = self.context.system_properties.get('gpu_id') or 0
return int(gpu_id)
else:
return -1

16 changes: 15 additions & 1 deletion tests/unit/test_handler_service_with_context.py
Original file line number Diff line number Diff line change
@@ -23,7 +23,7 @@
from mms.metrics.metrics_store import MetricsStore
from mock import Mock
from sagemaker_huggingface_inference_toolkit import handler_service
from sagemaker_huggingface_inference_toolkit.transformers_utils import _load_model_from_hub, get_pipeline
from sagemaker_huggingface_inference_toolkit.transformers_utils import _is_gpu_available, _load_model_from_hub, get_pipeline


TASK = "text-classification"
@@ -63,6 +63,20 @@ def test_test_initialize(inference_handler):
inference_handler.initialize(CONTEXT)
assert inference_handler.initialized is True

@require_torch
@pytest.mark.skipif(not _is_gpu_available(), reason="No GPU available")
@slow
def test_initialize_without_gpu_id_fallback_to_first_gpu(inference_handler):
Comment on lines +66 to +69
Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I never got to test this myself since I don't have GPU access currently. Also not sure if we want to use the skipif on availability of GPU. I didn't see any such marks in the test file. But i think it makes sense to have it

with tempfile.TemporaryDirectory() as tmpdirname:
storage_folder = _load_model_from_hub(
model_id=MODEL,
model_dir=tmpdirname,
)
CONTEXT = Context(MODEL, storage_folder, {}, 1, None, "1.1.4")

inference_handler.initialize(CONTEXT)
assert inference_handler.initialized is True
assert inference_handler.device == 0

@require_torch
def test_handle(inference_handler):