Skip to content

Commit 68daa33

Browse files
dimsbwagner5
andauthored
Fix issue using count/min/max of inference-accelerators (#414)
* Fix issue using count/min/max of inference-accelerators Signed-off-by: Davanum Srinivas <[email protected]> * add int32 conversion on mismatch int filter --------- Signed-off-by: Davanum Srinivas <[email protected]> Co-authored-by: Brandon Wagner <[email protected]>
1 parent 1a16a52 commit 68daa33

File tree

3 files changed

+11
-3
lines changed

3 files changed

+11
-3
lines changed

cmd/main.go

+2-2
Original file line numberDiff line numberDiff line change
@@ -174,7 +174,7 @@ Full docs can be found at github.com/aws/amazon-` + binName
174174
cli.ByteQuantityMinMaxRangeFlags(gpuMemoryTotal, nil, nil, "Number of GPUs' total memory (Example: 4 GiB)")
175175
cli.StringFlag(gpuManufacturer, nil, nil, "GPU Manufacturer name (Example: NVIDIA)", nil)
176176
cli.StringFlag(gpuModel, nil, nil, "GPU Model name (Example: K520)", nil)
177-
cli.IntMinMaxRangeFlags(inferenceAccelerators, nil, nil, "Total Number of inference accelerators (Example: 4)")
177+
cli.Int32MinMaxRangeFlags(inferenceAccelerators, nil, nil, "Total Number of inference accelerators (Example: 4)")
178178
cli.StringFlag(inferenceAcceleratorManufacturer, nil, nil, "Inference Accelerator Manufacturer name (Example: AWS)", nil)
179179
cli.StringFlag(inferenceAcceleratorModel, nil, nil, "Inference Accelerator Model name (Example: Inferentia)", nil)
180180
cli.StringOptionsFlag(placementGroupStrategy, nil, nil, "Placement group strategy: [cluster, partition, spread]", []string{"cluster", "partition", "spread"})
@@ -387,7 +387,7 @@ Full docs can be found at github.com/aws/amazon-` + binName
387387
GpuMemoryRange: cli.ByteQuantityRangeMe(flags[gpuMemoryTotal]),
388388
GPUManufacturer: cli.StringMe(flags[gpuManufacturer]),
389389
GPUModel: cli.StringMe(flags[gpuModel]),
390-
InferenceAcceleratorsRange: cli.IntRangeMe(flags[inferenceAccelerators]),
390+
InferenceAcceleratorsRange: cli.Int32RangeMe(flags[inferenceAccelerators]),
391391
InferenceAcceleratorManufacturer: cli.StringMe(flags[inferenceAcceleratorManufacturer]),
392392
InferenceAcceleratorModel: cli.StringMe(flags[inferenceAcceleratorModel]),
393393
PlacementGroupStrategy: cli.StringMe(flags[placementGroupStrategy]),

pkg/selector/selector.go

+8
Original file line numberDiff line numberDiff line change
@@ -484,6 +484,14 @@ func exec(instanceType ec2types.InstanceType, filterName string, filter filterPa
484484
if !isSupportedWithRangeInt64(iSpec, filter) {
485485
return false, nil
486486
}
487+
case *int32:
488+
var iSpec64 *int64
489+
if iSpec != nil {
490+
iSpec64 = aws.Int64(int64(*iSpec))
491+
}
492+
if !isSupportedWithRangeInt64(iSpec64, filter) {
493+
return false, nil
494+
}
487495
case *int:
488496
if !isSupportedWithRangeInt(iSpec, filter) {
489497
return false, nil

pkg/selector/types.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -162,7 +162,7 @@ type Filters struct {
162162
GPUModel *string
163163

164164
// InferenceAcceleratorsRange filters inference accelerators available to the instance type
165-
InferenceAcceleratorsRange *IntRangeFilter
165+
InferenceAcceleratorsRange *Int32RangeFilter
166166

167167
// InferenceAcceleratorManufacturer filters by inference acceleartor manufacturer
168168
InferenceAcceleratorManufacturer *string

0 commit comments

Comments
 (0)