|
9 | 9 | sys.path.insert(0, '..')
|
10 | 10 | from modzy import ApiClient
|
11 | 11 |
|
12 |
| - |
| 12 | + |
13 | 13 | # Always configure the logger level (ie: DEBUG, INFO, WARNING, ERROR, CRITICAL)
|
14 | 14 | logging.basicConfig(level=logging.INFO)
|
15 | 15 | logger = logging.getLogger(__name__)
|
16 | 16 |
|
17 | 17 | # The system admin can provide the right base API URL, the API key can be downloaded from your profile page on Modzy.
|
18 |
| -# You can configure those params as is described in the README file (as environment variables, or by using the .env file), |
| 18 | +# You can configure those params as described in the README file (as environment variables, or by using the .env file), |
19 | 19 | # or you can just update the BASE_URL and API_KEY variables and use this sample code (not recommended for production environments).
|
20 | 20 |
|
21 | 21 | dotenv.load_dotenv()
|
|
35 | 35 | # Create a Job with an aws input, wait, and retrieve results:
|
36 | 36 |
|
37 | 37 | # Get the model object:
|
38 |
| -# If you already know the model identifier (i.e.: you got from the URL of the model details page or the input sample), |
39 |
| -# you can skip this step. If you don't you can find the model identifier by using its name as follows: |
| 38 | +# If you already know the model identifier (i.e.: you got it from the URL of the model details page or the input sample), |
| 39 | +# you can skip this step. If you don't, you can find the model identifier by using its name as follows: |
40 | 40 | model = client.models.get_by_name("Facial Embedding")
|
41 | 41 | # Or if you already know the model id and want to know more about the model, you can use this instead:
|
42 | 42 | # model = client.models.get("f7e252e26a")
|
|
49 | 49 |
|
50 | 50 | # Get the model version object:
|
51 | 51 | # If you already know the model version and the input key(s) of the model version you can skip this step. Also, you can
|
52 |
| -# use the following code block to know about the inputs keys and skip the call on future job submissions. |
| 52 | +# use the following code block to know about the input keys and skip the call on future job submissions. |
53 | 53 | modelVersion = client.models.get_version(model, model.latest_version)
|
54 | 54 | # The info stored in modelVersion provides insights about the amount of time that the model can spend processing,
|
55 |
| -# the inputs, and output keys of the model. |
| 55 | +# the input, and output keys of the model. |
56 | 56 | logger.info("This model version is {}".format(modelVersion))
|
57 | 57 | logger.info(" timeouts: status {}ms, run {}ms ".format(modelVersion.timeout.status, modelVersion.timeout.run))
|
58 | 58 | logger.info(" inputs: ")
|
|
75 | 75 | BUCKET_NAME="<<BucketName>>"
|
76 | 76 | # The File Key: replace <<FileId>> (remember, this model needs an image as input)
|
77 | 77 | FILE_KEY="<<FileId>>"
|
78 |
| -# With the info about the model (identifier), the model version (version string, input/output keys), you are ready to |
| 78 | +# With the info about the model (identifier) and the model version (version string, input/output keys), you are ready to |
79 | 79 | # submit the job. Just prepare the source dictionary:
|
80 | 80 | sources = {"source-key": {"image": {'bucket': BUCKET_NAME, 'key': FILE_KEY}}}
|
81 |
| -# An inference job groups input data that you send to a model. You can send any amount of inputs to |
82 |
| -# process and you can identify and refer to a specific input by the key that you assign, for example we can add: |
| 81 | +# An inference job groups input data sent to a model. You can send any amount of inputs to |
| 82 | +# process and you can identify and refer to a specific input by the key assigned. For example we can add: |
83 | 83 | sources["second-key"] = {"image": {'bucket': BUCKET_NAME, 'key': FILE_KEY}}
|
84 | 84 | sources["another-key"] = {"image": {'bucket': BUCKET_NAME, 'key': FILE_KEY}}
|
85 | 85 | # If you send a wrong input key, the model fails to process the input.
|
|
93 | 93 | logger.info("job: %s", job)
|
94 | 94 | # The job moves to SUBMITTED, meaning that Modzy acknowledged the job and sent it to the queue to be processed.
|
95 | 95 | # We provide a helper method to listen until the job finishes processing. Its a good practice to set a max timeout
|
96 |
| -# if you're doing a test (ie: 2*status+run). Otherwise, if the timeout is set to None, it will listen until the job |
| 96 | +# if you're doing a test (ie: 2*status+run). Otherwise, if the timeout is set to None, it listens until the job |
97 | 97 | # finishes and moves to COMPLETED, CANCELED, or TIMEOUT.
|
98 | 98 | job.block_until_complete(timeout=None)
|
99 | 99 |
|
|
103 | 103 | # A completed job means that all the inputs were processed by the model. Check the results for each
|
104 | 104 | # input key provided in the source dictionary to see the model output.
|
105 | 105 | result = job.get_result()
|
106 |
| - # The result object has some useful info: |
| 106 | + # The results object has some useful info: |
107 | 107 | logger.info("Result: finished: {}, total: {}, completed: {}, failed: {}"
|
108 | 108 | .format(result.finished, result.total, result.completed, result.failed))
|
109 | 109 | # Notice that we are iterating through the same input source keys
|
110 | 110 | for key in sources:
|
111 |
| - # The result object has the individual results of each job input. In this case the output key is called |
| 111 | + # The results object has the individual results of each job input. In this case the output key is called |
112 | 112 | # results.json, so we can get the results as follows:
|
113 | 113 | try:
|
114 | 114 | model_res = result.get_source_outputs(key)['results.json']
|
|
0 commit comments