@@ -6,110 +6,112 @@ const fs = require('fs');
6
6
logger . level = "info" ;
7
7
8
8
// The system admin can provide the right base API URL, the API key can be downloaded from your profile page on Modzy.
9
- // You can config those params as is described in the readme file (as environment variables, or by using the .env file), or you
10
- // or you can just update the BASE_URL and API_KEY vars and use this sample code (not recommended for production environments).
9
+ // You can configure those params as described in the README file (as environment variables, or by using the .env file),
10
+ // or you can just update the BASE_URL and API_KEY variables and use this sample code (not recommended for production environments).
11
11
// The MODZY_BASE_URL should point to the API services route which may be different from the Modzy page URL.
12
12
// (ie: https://modzy.example.com/api).
13
13
const BASE_URL = process . env . MODZY_BASE_URL ;
14
- // The MODZY_API_KEY is your own personal API key. It is composed by a public part, a dot character and a private part
14
+ // The MODZY_API_KEY is your own personal API key. It is composed by a public part, a dot character, and a private part
15
15
// (ie: AzQBJ3h4B1z60xNmhAJF.uQyQh8putLIRDi1nOldh).
16
- const API_KEY = process . env . MODZY_API_KEY ;
16
+ const API_KEY = process . env . MODZY_API_KEY ;
17
17
18
- // Client initialization
18
+ // Client initialization:
19
19
// Initialize the ApiClient instance with the BASE_URL and the API_KEY to store those arguments
20
20
// for the following API calls.
21
21
const modzyClient = new modzy . ModzyClient ( BASE_URL , API_KEY ) ;
22
22
23
23
// Create a Job with an embedded input, wait, and retrieve results:
24
24
25
- async function createJobWithEmbeddedInput ( ) {
26
- try {
27
- // Get the model object:
28
- // If you already know the model identifier (i.e.: you got from the URL of the model details page or the input sample),
29
- // you can skip this step. If you don't you can find the model identifier by using its name as follows:
25
+ async function createJobWithEmbeddedInput ( ) {
26
+ try {
27
+ // Get the model object:
28
+ // If you already know the model identifier (i.e.: you got it from the URL of the model details page or the input sample),
29
+ // you can skip this step. If you don't, you can find the model identifier by using its name as follows:
30
30
let model = await modzyClient . getModelByName ( "Multi-Language OCR" ) ;
31
31
// Or if you already know the model id and want to know more about the model, you can use this instead:
32
- //let model = await modzyClient.getModel("c60c8dbd79");
33
-
34
- // The model identifier is under the modelId key. You can take a look at the other keys by uncommenting the following line
32
+ //let model = await modzyClient.getModel("c60c8dbd79");
33
+ //You can find more information about how to query the models on the model_samples.js file.
34
+
35
+ // The model identifier is under the modelId key. You can take a look at the other keys by uncommenting the following line
35
36
logger . info ( Object . keys ( model ) . toString ( ) . replace ( '\n' , ' ' ) ) ;
36
37
// Or just log the model identifier and the latest version
37
38
logger . info ( `The model identifier is ${ model . modelId } and the latest version is ${ model . latestVersion } ` ) ;
38
39
// Get the model version object:
39
- // If you already know the model version and the input key(s) of the model version you can skip this step. Also, you can
40
- // use the following code block to know about the inputs keys and skip the call on future job submissions.
40
+ // If you already know the model version and the input key(s) of the model version, you can skip this step. Also, you can
41
+ // use the following code block to know about the input keys and skip the call on future job submissions.
41
42
let modelVersion = await modzyClient . getModelVersion ( model . modelId , model . latestVersion ) ;
42
- // The info stored in modelVersion provides insights about the amount of time that the model can spend processing, the inputs , and
43
+ // The info stored in modelVersion provides insights about the amount of time that the model can spend processing, the input , and
43
44
// output keys of the model.
44
- logger . info ( `Ths model version is ${ modelVersion . version } ` ) ;
45
+ logger . info ( `This model version is ${ modelVersion . version } ` ) ;
45
46
logger . info ( ` timeouts: status ${ modelVersion . timeout . status } ms, run ${ modelVersion . timeout . run } ms ` ) ;
46
47
logger . info ( " inputs: " ) ;
47
- for ( key in modelVersion . inputs ) {
48
+ for ( key in modelVersion . inputs ) {
48
49
let input = modelVersion . inputs [ key ] ;
49
50
logger . info ( ` key ${ input . name } , type ${ input . acceptedMediaTypes } , description: ${ input . description } ` ) ;
50
51
}
51
52
logger . info ( " outputs: " )
52
- for ( key in modelVersion . outputs ) {
53
+ for ( key in modelVersion . outputs ) {
53
54
let output = modelVersion . outputs [ key ] ;
54
55
logger . info ( ` key ${ output . name } , type ${ output . mediaType } , description: ${ output . description } ` ) ;
55
56
}
56
57
57
- // Send the job:
58
- // An embedded input is a byte array encoded as a string in Base64, that's very handy for small to middle size files, for
59
- // bigger files can be a memory issue because you need to load the file in memory (load + encode) .
60
- const imageBytes = fs . readFileSync ( 'samples/image.png' ) ;
61
- let configBytes = fs . readFileSync ( 'samples/config.json' ) ;
62
- // With the info about the model (identifier), the model version (version string, input/output keys), you are ready to
63
- // submit the job. Just prepare the source object:
64
- let sources = { "source-key" : { "input" : imageBytes , "config.json" : configBytes } } ;
65
- // An inference job groups input data that you send to a model. You can send any amount of inputs to
66
- // process and you can identify and refer to a specific input by the key that you assign, for example we can add:
67
- sources [ "second-key" ] = { "input" : imageBytes , "config.json" :configBytes }
68
- // You don't need to load all the inputs from files, just convert to bytes as follows:
69
- configBytes = Buffer . from ( JSON . stringify ( { "languages" :[ "spa" ] } ) ) ;
70
- sources [ "another-key" ] = { "input" : imageBytes , "config.json" :configBytes }
71
- // If you send a wrong input key, the model fails to process the input.
72
- sources [ "wrong-key" ] = { "a.wrong.key" : imageBytes , "config.json" :configBytes }
73
- // If you send a correct input key, but some wrong values, the model fails to process the input.
74
- sources [ "wrong-value" ] = { "input" : configBytes , "config.json" :imageBytes }
75
- // When you have all your inputs ready, you can use our helper method to submit the job as follows:
76
- let job = await modzyClient . submitJobEmbedded ( model . modelId , modelVersion . version , "application/octet-stream" , sources ) ;
58
+ // Send the job:
59
+ // An embedded input is a byte array encoded as a string in Base64. This input type comes very handy for small to middle size files. However,
60
+ // it requires to load and encode files in memory which can be an issue for larger files .
61
+ const imageBytes = fs . readFileSync ( 'samples/image.png' ) ;
62
+ let configBytes = fs . readFileSync ( 'samples/config.json' ) ;
63
+ // With the info about the model (identifier) and the model version (version string, input/output keys), you are ready to
64
+ // submit the job. Just prepare the source object:
65
+ let sources = { "source-key" : { "input" : imageBytes , "config.json" : configBytes } } ;
66
+ // An inference job groups input data sent to a model. You can send any amount of inputs to
67
+ // process and you can identify and refer to a specific input by the key assigned. For example we can add:
68
+ sources [ "second-key" ] = { "input" : imageBytes , "config.json" : configBytes }
69
+ // You don't need to load all the inputs from files, just convert to bytes as follows:
70
+ configBytes = Buffer . from ( JSON . stringify ( { "languages" : [ "spa" ] } ) ) ;
71
+ sources [ "another-key" ] = { "input" : imageBytes , "config.json" : configBytes }
72
+ // If you send a wrong input key, the model fails to process the input.
73
+ sources [ "wrong-key" ] = { "a.wrong.key" : imageBytes , "config.json" : configBytes }
74
+ // If you send a correct input key, but some wrong values, the model fails to process the input.
75
+ sources [ "wrong-value" ] = { "input" : configBytes , "config.json" : imageBytes }
76
+ // When you have all your inputs ready, you can use our helper method to submit the job as follows:
77
+ let job = await modzyClient . submitJobEmbedded ( model . modelId , modelVersion . version , "application/octet-stream" , sources ) ;
77
78
// Modzy creates the job and queue for processing. The job object contains all the info that you need to keep track
78
79
// of the process, the most important being the job identifier and the job status.
79
- logger . info ( "job: " + job . jobIdentifier + " " + job . status ) ;
80
+ logger . info ( "job: " + job . jobIdentifier + " " + job . status ) ;
80
81
// The job moves to SUBMITTED, meaning that Modzy acknowledged the job and sent it to the queue to be processed.
81
- // We provide a helper method to listen until the job finishes processing. it will listen until the job finishes
82
+ // We provide a helper method to listen until the job finishes processing. It listens until the job finishes
82
83
// and moves to COMPLETED, CANCELED, or TIMEOUT.
83
84
job = await modzyClient . blockUntilComplete ( job ) ;
84
85
// Get the results:
85
86
// Check the status of the job. Jobs may be canceled or may reach a timeout.
86
- if ( job . status === "COMPLETED" ) {
87
+ if ( job . status === "COMPLETED" ) {
87
88
// A completed job means that all the inputs were processed by the model. Check the results for each
88
- // input keys provided in the source object to see the model output.
89
+ // input key provided in the source object to see the model output.
89
90
let result = await modzyClient . getResult ( job . jobIdentifier ) ;
90
91
// The result object has some useful info:
91
92
logger . info ( `Result: finished: ${ result . finished } , total: ${ result . total } , completed: ${ result . completed } , failed: ${ result . failed } ` ) ;
92
- // Notice that we are iterating through the same input sources keys
93
- for ( key in sources ) {
94
- // The result object has the individual results of each job input. In this case the output key is called
93
+ // Notice that we are iterating through the same input source keys
94
+ for ( key in sources ) {
95
+ // The results object has the individual results of each job input. In this case the output key is called
95
96
// results.json, so we can get the results as follows:
96
- if ( result . results [ key ] ) {
97
+ if ( result . results [ key ] ) {
97
98
let model_res = result . results [ key ] [ "results.json" ] ;
98
99
// The output for this model comes in a JSON format, so we can directly log the model results:
99
100
logger . info ( ` ${ key } : ${ JSON . stringify ( model_res ) } ` ) ;
100
101
}
101
- else {
102
+ else {
103
+ // If the model raises an error, we can get the specific error message:
102
104
logger . warn ( ` ${ key } : failure ${ result . failures [ key ] [ 'error' ] } ` ) ;
103
105
}
104
106
}
105
107
}
106
- else {
108
+ else {
107
109
log . warn ( `The job ends with status ${ job . status } ` ) ;
108
- }
109
- }
110
- catch ( error ) {
111
- logger . warn ( error ) ;
112
- }
110
+ }
111
+ }
112
+ catch ( error ) {
113
+ logger . warn ( error ) ;
114
+ }
113
115
}
114
116
115
117
0 commit comments