Skip to content

Commit 3a5e528

Browse files
committed
remove all prints used to debug
1 parent 2a12088 commit 3a5e528

File tree

3 files changed

+4
-31
lines changed

3 files changed

+4
-31
lines changed

src/main/java/io/bioimage/modelrunner/pytorch/javacpp/JavaWorker.java

-6
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,6 @@ public static void main(String[] args) {
3535
try {
3636
pi = new PytorchJavaCPPInterface(false);
3737
} catch (IOException | URISyntaxException e) {
38-
e.printStackTrace();
3938
return;
4039
}
4140

@@ -88,16 +87,11 @@ private void executeScript(String script, Map<String, Object> inputs) {
8887
this.reportLaunch();
8988
try {
9089
if (script.equals("loadModel")) {
91-
update("STATY IN WORKER LOAD LOAD", null, null);
9290
pi.loadModel((String) inputs.get("modelFolder"), (String) inputs.get("modelSource"));
9391
} else if (script.equals("inference")) {
94-
update("STATY IN WORKER ------------RUN", null, null);
9592
pi.runFromShmas((List<String>) inputs.get("inputs"), (List<String>) inputs.get("outputs"));
9693
} else if (script.equals("close")) {
9794
pi.closeModel();
98-
} else {
99-
update("LOL WTF", null, null);
100-
update("LOL WTF -------------- " + script, null, null);
10195
}
10296
} catch(Exception | Error ex) {
10397
this.fail(Types.stackTrace(ex));

src/main/java/io/bioimage/modelrunner/pytorch/javacpp/PytorchJavaCPPInterface.java

-8
Original file line numberDiff line numberDiff line change
@@ -239,28 +239,21 @@ void run(List<Tensor<T>> inputTensors, List<Tensor<R>> outputTensors) throws Run
239239
}
240240

241241
protected void runFromShmas(List<String> inputs, List<String> outputs) throws IOException {
242-
System.out.println("REACH0");
243242
IValueVector inputsVector = new IValueVector();
244-
System.out.println("REACH1");
245243
for (String ee : inputs) {
246-
System.out.println("REACH2");
247244
Map<String, Object> decoded = Types.decode(ee);
248245
SharedMemoryArray shma = SharedMemoryArray.read((String) decoded.get(MEM_NAME_KEY));
249246
org.bytedeco.pytorch.Tensor inT = TensorBuilder.build(shma);
250247
inputsVector.put(new IValue(inT));
251248
if (PlatformDetection.isWindows()) shma.close();
252249
}
253-
System.out.println("REACH3");
254250
// Run model
255251
model.eval();
256-
System.out.println("REACH4");
257252
IValue output = model.forward(inputsVector);
258253
TensorVector outputTensorVector = null;
259254
if (output.isTensorList()) {
260-
System.out.println("SSECRET_KEY : 1 ");
261255
outputTensorVector = output.toTensorVector();
262256
} else {
263-
System.out.println("SSECRET_KEY : 2 ");
264257
outputTensorVector = new TensorVector();
265258
outputTensorVector.put(output.toTensor());
266259
}
@@ -269,7 +262,6 @@ protected void runFromShmas(List<String> inputs, List<String> outputs) throws IO
269262
int c = 0;
270263
for (String ee : outputs) {
271264
Map<String, Object> decoded = Types.decode(ee);
272-
System.out.println("ENTERED: " + ee);
273265
ShmBuilder.build(outputTensorVector.get(c ++), (String) decoded.get(MEM_NAME_KEY));
274266
}
275267
outputTensorVector.close();

src/main/java/io/bioimage/modelrunner/pytorch/javacpp/shm/ShmBuilder.java

+4-17
Original file line numberDiff line numberDiff line change
@@ -69,19 +69,14 @@ public static void build(Tensor tensor, String memoryName) throws IllegalArgumen
6969
{
7070
if (tensor.dtype().isScalarType(org.bytedeco.pytorch.global.torch.ScalarType.Byte)
7171
|| tensor.dtype().isScalarType(org.bytedeco.pytorch.global.torch.ScalarType.Char)) {
72-
System.out.println("SSECRET_KEY : BYTE ");
7372
buildFromTensorByte(tensor, memoryName);
7473
} else if (tensor.dtype().isScalarType(org.bytedeco.pytorch.global.torch.ScalarType.Int)) {
75-
System.out.println("SSECRET_KEY : INT ");
7674
buildFromTensorInt(tensor, memoryName);
7775
} else if (tensor.dtype().isScalarType(org.bytedeco.pytorch.global.torch.ScalarType.Float)) {
78-
System.out.println("SSECRET_KEY : FLOAT ");
7976
buildFromTensorFloat(tensor, memoryName);
8077
} else if (tensor.dtype().isScalarType(org.bytedeco.pytorch.global.torch.ScalarType.Double)) {
81-
System.out.println("SSECRET_KEY : SOUBKE ");
8278
buildFromTensorDouble(tensor, memoryName);
8379
} else if (tensor.dtype().isScalarType(org.bytedeco.pytorch.global.torch.ScalarType.Long)) {
84-
System.out.println("SSECRET_KEY : LONG ");
8580
buildFromTensorLong(tensor, memoryName);
8681
} else {
8782
throw new IllegalArgumentException("Unsupported tensor type: " + tensor.scalar_type());
@@ -98,10 +93,9 @@ private static void buildFromTensorByte(Tensor tensor, String memoryName) throws
9893
long flatSize = 1;
9994
for (long l : arrayShape) {flatSize *= l;}
10095
byte[] flat = new byte[(int) flatSize];
101-
ByteBuffer byteBuffer = ByteBuffer.allocateDirect((int) (flatSize));
96+
ByteBuffer byteBuffer = ByteBuffer.allocateDirect((int) (flatSize)).order(ByteOrder.LITTLE_ENDIAN);
10297
tensor.data_ptr_byte().get(flat);
10398
byteBuffer.put(flat);
104-
byteBuffer.rewind();
10599
shma.getDataBufferNoHeader().put(byteBuffer);
106100
if (PlatformDetection.isWindows()) shma.close();
107101
}
@@ -116,11 +110,10 @@ private static void buildFromTensorInt(Tensor tensor, String memoryName) throws
116110
long flatSize = 1;
117111
for (long l : arrayShape) {flatSize *= l;}
118112
int[] flat = new int[(int) flatSize];
119-
ByteBuffer byteBuffer = ByteBuffer.allocateDirect((int) (flatSize * Integer.BYTES));
113+
ByteBuffer byteBuffer = ByteBuffer.allocateDirect((int) (flatSize * Integer.BYTES)).order(ByteOrder.LITTLE_ENDIAN);
120114
IntBuffer floatBuffer = byteBuffer.asIntBuffer();
121115
tensor.data_ptr_int().get(flat);
122116
floatBuffer.put(flat);
123-
byteBuffer.rewind();
124117
shma.getDataBufferNoHeader().put(byteBuffer);
125118
if (PlatformDetection.isWindows()) shma.close();
126119
}
@@ -140,10 +133,6 @@ private static void buildFromTensorFloat(Tensor tensor, String memoryName) throw
140133
tensor.data_ptr_float().get(flat);
141134
floatBuffer.put(flat);
142135
shma.getDataBufferNoHeader().put(byteBuffer);
143-
System.out.println("equals " + (shma.getDataBufferNoHeader().get(100) == byteBuffer.get(100)));
144-
System.out.println("equals " + (shma.getDataBufferNoHeader().get(500) == byteBuffer.get(500)));
145-
System.out.println("equals " + (shma.getDataBufferNoHeader().get(300) == byteBuffer.get(300)));
146-
System.out.println("equals " + (shma.getDataBufferNoHeader().get(1000) == byteBuffer.get(1000)));
147136
if (PlatformDetection.isWindows()) shma.close();
148137
}
149138

@@ -157,11 +146,10 @@ private static void buildFromTensorDouble(Tensor tensor, String memoryName) thro
157146
long flatSize = 1;
158147
for (long l : arrayShape) {flatSize *= l;}
159148
double[] flat = new double[(int) flatSize];
160-
ByteBuffer byteBuffer = ByteBuffer.allocateDirect((int) (flatSize * Double.BYTES));
149+
ByteBuffer byteBuffer = ByteBuffer.allocateDirect((int) (flatSize * Double.BYTES)).order(ByteOrder.LITTLE_ENDIAN);
161150
DoubleBuffer floatBuffer = byteBuffer.asDoubleBuffer();
162151
tensor.data_ptr_double().get(flat);
163152
floatBuffer.put(flat);
164-
byteBuffer.rewind();
165153
shma.getDataBufferNoHeader().put(byteBuffer);
166154
if (PlatformDetection.isWindows()) shma.close();
167155
}
@@ -176,11 +164,10 @@ private static void buildFromTensorLong(Tensor tensor, String memoryName) throws
176164
long flatSize = 1;
177165
for (long l : arrayShape) {flatSize *= l;}
178166
long[] flat = new long[(int) flatSize];
179-
ByteBuffer byteBuffer = ByteBuffer.allocateDirect((int) (flatSize * Long.BYTES));
167+
ByteBuffer byteBuffer = ByteBuffer.allocateDirect((int) (flatSize * Long.BYTES)).order(ByteOrder.LITTLE_ENDIAN);
180168
LongBuffer floatBuffer = byteBuffer.asLongBuffer();
181169
tensor.data_ptr_long().get(flat);
182170
floatBuffer.put(flat);
183-
byteBuffer.rewind();
184171
shma.getDataBufferNoHeader().put(byteBuffer);
185172
if (PlatformDetection.isWindows()) shma.close();
186173
}

0 commit comments

Comments
 (0)