We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 7652c5d commit f0ae23dCopy full SHA for f0ae23d
torchrec/quant/embedding_modules.py
@@ -252,6 +252,10 @@ def quantize_state_dict(
252
if tensor.dtype == torch.float:
253
tensor = tensor.half()
254
quant_res = tensor.view(torch.uint8)
255
+ elif data_type == DataType.FP32:
256
+ if tensor.dtype == torch.float16:
257
+ tensor = tensor.float()
258
+ quant_res = tensor.view(torch.uint8)
259
else:
260
quant_res = (
261
torch.ops.fbgemm.FloatOrHalfToFusedNBitRowwiseQuantizedSBHalf(
0 commit comments