@@ -102,6 +102,60 @@ void TfLiteVarArrayFree(T* a) {
102
102
free (a);
103
103
}
104
104
105
+ #ifndef TF_LITE_STATIC_MEMORY
106
+
107
+ TfLiteQuantization TfLiteQuantizationClone (const TfLiteQuantization& src) {
108
+ TfLiteQuantization dst;
109
+ dst.type = src.type ;
110
+ switch (src.type ) {
111
+ case kTfLiteNoQuantization :
112
+ break ;
113
+ case kTfLiteAffineQuantization : {
114
+ dst.params = calloc (1 , sizeof (TfLiteAffineQuantization));
115
+ const TfLiteAffineQuantization* const src_params =
116
+ (TfLiteAffineQuantization*)(src.params );
117
+ TfLiteAffineQuantization* const dst_params =
118
+ (TfLiteAffineQuantization*)(dst.params );
119
+ dst_params->quantized_dimension = src_params->quantized_dimension ;
120
+ dst_params->scale = TfLiteFloatArrayCopy (src_params->scale );
121
+ dst_params->zero_point = TfLiteIntArrayCopy (src_params->zero_point );
122
+ break ;
123
+ }
124
+ }
125
+ return dst;
126
+ }
127
+
128
+ TfLiteSparsity TfLiteSparsityClone (const TfLiteSparsity& src) {
129
+ TfLiteSparsity dst = src;
130
+ dst.traversal_order = TfLiteIntArrayCopy (src.traversal_order );
131
+ dst.block_map = TfLiteIntArrayCopy (src.block_map );
132
+ if (src.dim_metadata ) {
133
+ dst.dim_metadata = reinterpret_cast <TfLiteDimensionMetadata*>(
134
+ calloc (1 , sizeof (TfLiteDimensionMetadata) * src.dim_metadata_size ));
135
+ for (int i = 0 ; i < src.dim_metadata_size ; ++i) {
136
+ dst.dim_metadata [i] = src.dim_metadata [i];
137
+ dst.dim_metadata [i].array_segments =
138
+ TfLiteIntArrayCopy (src.dim_metadata [i].array_segments );
139
+ dst.dim_metadata [i].array_indices =
140
+ TfLiteIntArrayCopy (src.dim_metadata [i].array_indices );
141
+ }
142
+ }
143
+ return dst;
144
+ }
145
+
146
+ // Clones the source sparsity to a newly allocated object.
147
+ TfLiteSparsity* TfLiteSparsityClone (const TfLiteSparsity* const src) {
148
+ if (!src) {
149
+ return nullptr ;
150
+ }
151
+ TfLiteSparsity* dst =
152
+ reinterpret_cast <TfLiteSparsity*>(calloc (1 , sizeof (TfLiteSparsity)));
153
+ *dst = TfLiteSparsityClone (*src);
154
+ return dst;
155
+ }
156
+
157
+ #endif // TF_LITE_STATIC_MEMORY
158
+
105
159
} // namespace
106
160
107
161
extern " C" {
@@ -234,6 +288,55 @@ void TfLiteTensorFree(TfLiteTensor* t) {
234
288
t->sparsity = nullptr ;
235
289
}
236
290
291
+ TfLiteTensor TfLiteTensorClone (const TfLiteTensor src) {
292
+ // We copy all of the source data first, then we clone the fields that can't
293
+ // be shared between two tensor instances.
294
+ TfLiteTensor dst = src;
295
+ // Data that is owned by the original tensor mut be cloned. Check
296
+ // TfLiteTensorFree to find out which members are owned.
297
+ if (src.data .data ) {
298
+ const TfLiteAllocationStrategy allocation_strategy =
299
+ TfLiteTensorGetAllocationStrategy (&src);
300
+ switch (allocation_strategy) {
301
+ case kTfLiteAllocationStrategyUnknown :
302
+ // We don't know the allocation strategy, which means that the tensor
303
+ // doesn't own its data: we keep the copied pointer to the data.
304
+ break ;
305
+ case kTfLiteAllocationStrategyNone :
306
+ break ;
307
+ case kTfLiteAllocationStrategyMMap :
308
+ // Mmapped data is read-only and external to the interpreter. We keep
309
+ // the copied pointer to the data.
310
+ break ;
311
+ case kTfLiteAllocationStrategyArena :
312
+ // Arena tensors are allocated when the graph is prepared. There is no
313
+ // data associated to such a tensor between runs so we don't care about
314
+ // the value of `data`.
315
+ break ;
316
+ case kTfLiteAllocationStrategyMalloc :
317
+ dst.data .data = malloc (src.bytes );
318
+ std::memcpy (dst.data .data , src.data .data , src.bytes );
319
+ break ;
320
+ case kTfLiteAllocationStrategyNew :
321
+ // Special case for variant objects. They are allocated using new/delete
322
+ // but require using the `CloneTo` function.
323
+ if (src.allocation_type == kTfLiteVariantObject ) {
324
+ dst.data .data = reinterpret_cast <const VariantData*>(src.data .data )
325
+ ->CloneTo (nullptr );
326
+ } else {
327
+ dst.data .data = new char [src.bytes ];
328
+ std::memcpy (dst.data .data , src.data .data , src.bytes );
329
+ }
330
+ break ;
331
+ }
332
+ }
333
+ dst.dims = TfLiteIntArrayCopy (src.dims );
334
+ dst.dims_signature = TfLiteIntArrayCopy (src.dims_signature );
335
+ dst.quantization = TfLiteQuantizationClone (src.quantization );
336
+ dst.sparsity = TfLiteSparsityClone (src.sparsity );
337
+ return dst;
338
+ }
339
+
237
340
void TfLiteTensorReset (TfLiteType type, const char * name, TfLiteIntArray* dims,
238
341
TfLiteQuantizationParams quantization, char * buffer,
239
342
size_t size, TfLiteAllocationType allocation_type,
@@ -334,6 +437,14 @@ TfLiteStatus TfLiteTensorResizeMaybeCopy(size_t num_bytes, TfLiteTensor* tensor,
334
437
TfLiteStatus TfLiteTensorRealloc (size_t num_bytes, TfLiteTensor* tensor) {
335
438
return TfLiteTensorResizeMaybeCopy (num_bytes, tensor, true );
336
439
}
440
+
441
+ const TfLiteIntArray* TfLiteTensorGetDimsSignature (const TfLiteTensor* t) {
442
+ if (t->dims_signature != nullptr && t->dims_signature ->size != 0 ) {
443
+ return t->dims_signature ;
444
+ } else {
445
+ return t->dims ;
446
+ }
447
+ }
337
448
#endif // TF_LITE_STATIC_MEMORY
338
449
339
450
const char * TfLiteTypeGetName (TfLiteType type) {
@@ -399,11 +510,13 @@ TfLiteAllocationStrategy TfLiteTensorGetAllocationStrategy(
399
510
case kTfLiteDynamic :
400
511
return kTfLiteAllocationStrategyMalloc ;
401
512
case kTfLitePersistentRo :
402
- return kTfLiteAllocationStrategyUnknown ;
513
+ return kTfLiteAllocationStrategyMalloc ;
403
514
case kTfLiteCustom :
404
515
return kTfLiteAllocationStrategyUnknown ;
405
516
case kTfLiteVariantObject :
406
517
return kTfLiteAllocationStrategyNew ;
518
+ case kTfLiteNonCpu :
519
+ return kTfLiteAllocationStrategyUnknown ;
407
520
}
408
521
return kTfLiteAllocationStrategyUnknown ;
409
522
}
@@ -428,6 +541,8 @@ TfLiteRunStability TfLiteTensorGetBufferAddressStability(
428
541
return kTfLiteRunStabilityUnknown ;
429
542
case kTfLiteVariantObject :
430
543
return kTfLiteRunStabilityAcrossRuns ;
544
+ case kTfLiteNonCpu :
545
+ return kTfLiteRunStabilityUnknown ;
431
546
}
432
547
return kTfLiteRunStabilityUnknown ;
433
548
}
@@ -451,6 +566,8 @@ TfLiteRunStability TfLiteTensorGetDataStability(const TfLiteTensor* const t) {
451
566
return kTfLiteRunStabilityUnknown ;
452
567
case kTfLiteVariantObject :
453
568
return kTfLiteRunStabilitySingleRun ;
569
+ case kTfLiteNonCpu :
570
+ return kTfLiteRunStabilityUnknown ;
454
571
}
455
572
return kTfLiteRunStabilityUnknown ;
456
573
}
@@ -477,11 +594,13 @@ TfLiteRunStep TfLiteTensorGetDataKnownStep(const TfLiteTensor* t) {
477
594
return kTfLiteRunStepUnknown ;
478
595
case kTfLiteVariantObject :
479
596
return kTfLiteRunStepEval ;
597
+ case kTfLiteNonCpu :
598
+ return kTfLiteRunStepUnknown ;
480
599
}
481
600
return kTfLiteRunStepUnknown ;
482
601
}
483
602
484
- // Returns the operation steop when the shape of a tensor is computed.
603
+ // Returns the operation step when the shape of a tensor is computed.
485
604
//
486
605
// Some operations can precompute the shape of their results before the
487
606
// evaluation step. This makes the shape available earlier for subsequent
@@ -504,6 +623,8 @@ TfLiteRunStep TfLiteTensorGetShapeKnownStep(const TfLiteTensor* t) {
504
623
return kTfLiteRunStepUnknown ;
505
624
case kTfLiteVariantObject :
506
625
return kTfLiteRunStepEval ;
626
+ case kTfLiteNonCpu :
627
+ return kTfLiteRunStepUnknown ;
507
628
}
508
629
return kTfLiteRunStepUnknown ;
509
630
}
0 commit comments