|
| 1 | +--- |
| 2 | +"@infinitered/react-native-mlkit-object-detection": major |
| 3 | +"@infinitered/react-native-mlkit-image-labeling": major |
| 4 | +"@infinitered/react-native-mlkit-core": major |
| 5 | +--- |
| 6 | + |
| 7 | +Standardize API patterns and coordinate structures across ML Kit modules |
| 8 | + |
| 9 | +1. Separates model operations into three hooks with simpler APIs |
| 10 | + 1. loading the models, (`useObjectDetectionModels`, `useImageLabelingModels`) |
| 11 | + 2. initializing the provider (`useObjectDetectionProvider`, `useImageLabelingProvider`) |
| 12 | + 3. accessing models for inference, (`useObjectDetector`, `useImageLabeling`) |
| 13 | +2. Implements consistent naming patterns to make the APIs more legible |
| 14 | + - Removes "RNMLKit" prefix from non-native types |
| 15 | + - Use specific names for hooks (`useImageLabelingModels` instead of `useModels`) |
| 16 | + - Model configs are now `Configs`, instead of `AssetRecords` |
| 17 | +3. Moves base types into the `core` package to ensure consistency |
| 18 | +4. Fixes an issue with bounding box placement on portrait / rotated images on iOS |
| 19 | +5. Improves error handling and state management |
| 20 | +6. Updates documentation to match the new API |
| 21 | + |
| 22 | +## Breaking Changes |
| 23 | + |
| 24 | +### Image Labeling |
| 25 | + |
| 26 | +- Renamed `useModels` to `useImageLabelingModels` for clarity |
| 27 | +- Renamed `useImageLabeler` to `useImageLabeling` |
| 28 | +- Introduced new `useImageLabelingProvider` hook for cleaner context management |
| 29 | +- Added type-safe configurations with `ImageLabelingConfig` |
| 30 | +- Renamed model context provider from `ObjectDetectionModelContextProvider` to `ImageLabelingModelProvider` |
| 31 | + |
| 32 | +Here's how to update your app: |
| 33 | + |
| 34 | +#### Fetching the provider |
| 35 | + |
| 36 | +```diff |
| 37 | +- const MODELS: AssetRecord = { |
| 38 | ++ const MODELS: ImageLabelingConfig = { |
| 39 | + nsfwDetector: { |
| 40 | + model: require("./assets/models/nsfw-detector.tflite"), |
| 41 | + options: { |
| 42 | + maxResultCount: 5, |
| 43 | + confidenceThreshold: 0.5, |
| 44 | + } |
| 45 | + }, |
| 46 | +}; |
| 47 | + |
| 48 | +function App() { |
| 49 | +- const { ObjectDetectionModelContextProvider } = useModels(MODELS) |
| 50 | ++ const models = useImageLabelingModels(MODELS) |
| 51 | ++ const { ImageLabelingModelProvider } = useImageLabelingProvider(models) |
| 52 | + |
| 53 | + return ( |
| 54 | +- <ObjectDetectionModelContextProvider> |
| 55 | ++ <ImageLabelingModelProvider> |
| 56 | + {/* Rest of your app */} |
| 57 | +- </ObjectDetectionModelContextProvider> |
| 58 | ++ </ImageLabelingModelProvider> |
| 59 | + ) |
| 60 | +} |
| 61 | +``` |
| 62 | + |
| 63 | +#### Using the model |
| 64 | + |
| 65 | +```diff |
| 66 | +- const model = useImageLabeler("nsfwDetector") |
| 67 | ++ const detector = useImageLabeling("nsfwDetector") |
| 68 | + |
| 69 | +const labels = await detector.classifyImage(imagePath) |
| 70 | +``` |
| 71 | + |
| 72 | +### Object Detection |
| 73 | + |
| 74 | +- `useObjectDetectionModels` now requires an `assets` parameter |
| 75 | +- `useObjectDetector` is now `useObjectDetection` |
| 76 | +- Introduced new `useObjectDetectionProvider` hook for context management |
| 77 | +- Renamed and standardized type definitions: |
| 78 | + - `RNMLKitObjectDetectionObject` → `ObjectDetectionObject` |
| 79 | + - `RNMLKitObjectDetectorOptions` → `ObjectDetectorOptions` |
| 80 | + - `RNMLKitCustomObjectDetectorOptions` → `CustomObjectDetectorOptions` |
| 81 | +- Added new types: `ObjectDetectionModelInfo`, `ObjectDetectionConfig`, `ObjectDetectionModels` |
| 82 | +- Moved model configuration to typed asset records |
| 83 | +- Default model now included in models type union |
| 84 | + |
| 85 | +Here's how to update your app: |
| 86 | + |
| 87 | +#### Fetching the provider |
| 88 | + |
| 89 | +```diff |
| 90 | + |
| 91 | +- const MODELS: AssetRecord = { |
| 92 | ++ const MODELS: ObjectDetectionConfig = { |
| 93 | + birdDetector: { |
| 94 | + model: require("./assets/models/bird-detector.tflite"), |
| 95 | + options: { |
| 96 | + shouldEnableClassification: false, |
| 97 | + shouldEnableMultipleObjects: false, |
| 98 | + } |
| 99 | + }, |
| 100 | +}; |
| 101 | + |
| 102 | +function App() { |
| 103 | + |
| 104 | +- const { ObjectDetectionModelContextProvider } = useObjectDetectionModels({ |
| 105 | +- assets: MODELS, |
| 106 | +- loadDefaultModel: true, |
| 107 | +- defaultModelOptions: DEFAULT_MODEL_OPTIONS, |
| 108 | +- }) |
| 109 | + |
| 110 | ++ const models = useObjectDetectionModels({ |
| 111 | ++ assets: MODELS, |
| 112 | ++ loadDefaultModel: true, |
| 113 | ++ defaultModelOptions: DEFAULT_MODEL_OPTIONS, |
| 114 | ++ }) |
| 115 | ++ |
| 116 | ++ const { ObjectDetectionProvider } = useObjectDetectionProvider(models) |
| 117 | + |
| 118 | + return ( |
| 119 | +- <ObjectDetectionModelContextProvider> |
| 120 | ++ <ObjectDetectionProvider> |
| 121 | + {/* Rest of your app */} |
| 122 | +- </ObjectDetectionModelContextProvider> |
| 123 | ++ </ObjectDetectionProvider> |
| 124 | +) |
| 125 | +} |
| 126 | + |
| 127 | +``` |
| 128 | + |
| 129 | +#### Using the model |
| 130 | + |
| 131 | +```diff |
| 132 | +- const {models: {birdDetector} = useObjectDetectionModels({ |
| 133 | +- assets: MODELS, |
| 134 | +- loadDefaultModel: true, |
| 135 | +- defaultModelOptions: DEFAULT_MODEL_OPTIONS, |
| 136 | +- }) |
| 137 | +- |
| 138 | ++ const birdDetector = useObjectDetection("birdDetector") |
| 139 | + |
| 140 | +const objects = birdDetector.detectObjects(imagePath) |
| 141 | +``` |
| 142 | + |
| 143 | +### Face Detection |
| 144 | + |
| 145 | +- Changed option naming conventions to match ML Kit SDK patterns: |
| 146 | +- `detectLandmarks` → `landmarkMode` |
| 147 | +- `runClassifications` → `classificationMode` |
| 148 | +- Changed default `performanceMode` from `accurate` to `fast` |
| 149 | +- Renamed hook from `useFaceDetector` to `useFaceDetection` |
| 150 | +- Renamed context provider from `RNMLKitFaceDetectionContextProvider` to `FaceDetectionProvider` |
| 151 | +- Added comprehensive error handling |
| 152 | +- Added new state management with `FaceDetectionState` type |
| 153 | + |
| 154 | +Here's how to update your app: |
| 155 | + |
| 156 | +#### Using the detector |
| 157 | + |
| 158 | +```diff |
| 159 | +const options = { |
| 160 | +- detectLandmarks: true, |
| 161 | ++ landmarkMode: true, |
| 162 | +- runClassifications: true, |
| 163 | ++ classificationMode: true, |
| 164 | +} |
| 165 | +``` |
| 166 | + |
| 167 | +#### Using the provider |
| 168 | + |
| 169 | +```diff |
| 170 | +- import { RNMLKitFaceDetectionContextProvider } from "@infinitered/react-native-mlkit-face-detection" |
| 171 | ++ import { FaceDetectionProvider } from "@infinitered/react-native-mlkit-face-detection" |
| 172 | + |
| 173 | +function App() { |
| 174 | + return ( |
| 175 | +- <RNMLKitFaceDetectionContextProvider> |
| 176 | ++ <FaceDetectionProvider> |
| 177 | + {/* Rest of your app */} |
| 178 | +- </RNMLKitFaceDetectionContextProvider> |
| 179 | ++ </FaceDetectionProvider> |
| 180 | + ) |
| 181 | +} |
| 182 | +``` |
| 183 | + |
| 184 | +#### Using the hooks |
| 185 | + |
| 186 | +```diff |
| 187 | +- const detector = useFaceDetector() |
| 188 | ++ const detector = useFaceDetection() |
| 189 | + |
| 190 | +// useFacesInPhoto remains unchanged |
| 191 | +const { faces, status, error } = useFacesInPhoto(imageUri) |
| 192 | +``` |
| 193 | + |
| 194 | +### Core Module |
| 195 | + |
| 196 | +- Introduced shared TypeScript interfaces: |
| 197 | + - `ModelInfo<T>` |
| 198 | + - `AssetRecord<T>` |
| 199 | +- Standardized frame coordinate structure |
| 200 | +- Implemented consistent type patterns |
0 commit comments