Skip to content

Commit d4a7ad7

Browse files
committed
all tests now passing on windows debug
1 parent 5c6b8f7 commit d4a7ad7

21 files changed

+673
-485
lines changed

.editorconfig

+2-2
Original file line numberDiff line numberDiff line change
@@ -63,8 +63,8 @@ dotnet_style_explicit_tuple_names = true:suggestion
6363
[*.cs]
6464

6565
# spaces before parens
66-
csharp_space_between_method_declaration_name_and_open_parenthesis = true
67-
csharp_space_between_method_call_name_and_opening_parenthesis = true
66+
csharp_space_between_method_declaration_name_and_open_parenthesis = false
67+
csharp_space_between_method_call_name_and_opening_parenthesis = false
6868
csharp_space_after_keywords_in_control_flow_statements = true
6969

7070
# Newline settings

src/Examples/AlexNet.cs

+3-3
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,8 @@ static void Main(string[] args)
2929

3030
using (var train = Data.Loader.CIFAR10(_dataLocation, _trainBatchSize))
3131
using (var test = Data.Loader.CIFAR10(_dataLocation, _testBatchSize, false))
32-
using (var model = new Model(_numClasses))
33-
using (var optimizer = NN.Optimizer.Adam(model.Parameters(), 0.001))
32+
using (var model = new Model("model", _numClasses))
33+
using (var optimizer = NN.Optimizer.Adam(model.GetParameters(), 0.001))
3434
{
3535
Stopwatch sw = new Stopwatch();
3636
sw.Start();
@@ -53,7 +53,7 @@ private class Model : CustomModule
5353
private readonly AdaptiveAvgPool2D avgPool;
5454
private readonly Sequential classifier;
5555

56-
public Model(int numClasses)
56+
public Model(string name, int numClasses) : base(name)
5757
{
5858
features = Sequential(
5959
("c1", Conv2D(3, 64, kernelSize: 3, stride: 2, padding: 1)),

src/Examples/MNIST.cs

+3-3
Original file line numberDiff line numberDiff line change
@@ -25,8 +25,8 @@ static void Main(string[] args)
2525

2626
using (var train = Data.Loader.MNIST(_dataLocation, _trainBatchSize))
2727
using (var test = Data.Loader.MNIST(_dataLocation, _testBatchSize, false))
28-
using (var model = new Model())
29-
using (var optimizer = NN.Optimizer.SGD(model.Parameters(), 0.01, 0.5))
28+
using (var model = new Model("model"))
29+
using (var optimizer = NN.Optimizer.SGD(model.GetParameters(), 0.01, 0.5))
3030
{
3131
Stopwatch sw = new Stopwatch();
3232
sw.Start();
@@ -50,7 +50,7 @@ private class Model : CustomModule
5050
private Linear fc1 = Linear(320, 50);
5151
private Linear fc2 = Linear(50, 10);
5252

53-
public Model()
53+
public Model(string name) : base(name)
5454
{
5555
RegisterModule("conv1", conv1);
5656
RegisterModule("conv2", conv2);

src/Native/CMakeLists.txt

+1-1
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ if(WIN32)
1717
add_compile_options($<$<CONFIG:Debug>:-DDEBUG>)
1818
add_compile_options($<$<CONFIG:Release>:-DNDEBUG>)
1919
add_compile_options($<$<CONFIG:Debug>:/Od>)
20-
add_compile_options($<$<CONFIG:Debug>:/MTd>) # /MT will static link the VC runtime library, so it doesn't need to be installed on the target machine
20+
add_compile_options($<$<CONFIG:Debug>:/MDd>) # /MT will static link the VC runtime library, so it doesn't need to be installed on the target machine
2121
add_compile_options($<$<CONFIG:Release>:/MT>)
2222
add_compile_options(/guard:cf)
2323
add_compile_options(/d2Zi+) # make optimized builds debugging easier

src/Native/LibTorchSharp/THSNN.cpp

+167-110
Large diffs are not rendered by default.

src/Native/LibTorchSharp/THSNN.h

+10-10
Original file line numberDiff line numberDiff line change
@@ -17,18 +17,18 @@ EXPORT_API(int) THSNN_Module_is_training(NNModule module);
1717
EXPORT_API(void) THSNN_Module_train(NNModule module);
1818
EXPORT_API(void) THSNN_Module_eval(NNModule module);
1919
EXPORT_API(long) THSNN_Module_children_size(const NNModule module);
20-
EXPORT_API(const char*) THSNN_Module_child_name(const NNModule module, const int index);
20+
EXPORT_API(NNModule) THSNN_Module_child(const NNModule module, const int index);
2121
EXPORT_API(const char*) THSNN_Module_name(const NNModule module);
2222
EXPORT_API(void) THSNN_Module_zero_grad(const NNModule module);
2323
EXPORT_API(void) THSNN_Module_save(const NNModule module, const char * location);
2424
EXPORT_API(NNModule) THSNN_Module_load(const char * location, const char * name);
25-
EXPORT_API(NNModule) THSNN_Module_register_module(const NNModule module, const char* name, const NNModule submodule);
25+
EXPORT_API(void) THSNN_Module_register_module(const NNModule module, const char* name, const NNModule submodule);
2626
EXPORT_API(void) THSNN_Module_dispose(const NNModule module);
2727

2828
EXPORT_API(void) THSNN_AnyModule_dispose(const NNAnyModule module);
2929
//EXPORT_API(NNModule) THSNN_AnyModule_get(const NNAnyModule module);
3030

31-
EXPORT_API(NNModule) THSNN_custom_module(const char** names, at::Tensor** parameters, const bool* require_grad, const int length, Tensor(*forward)(Tensor), NNAnyModule* outAsAnyModule);
31+
EXPORT_API(NNModule) THSNN_custom_module(const char* name, const char** names, at::Tensor** parameters, const bool* require_grad, const int length, Tensor(*forward)(Tensor), NNAnyModule* outAsAnyModule);
3232
EXPORT_API(NNModule) THSNN_AdaptiveAvgPool2d_ctor(const int64_t* sizes, const int length, NNAnyModule* outAsAnyModule);
3333
EXPORT_API(Tensor) THSNN_AdaptiveAvgPool2d_forward(const NNModule module, const Tensor tensor);
3434

@@ -57,19 +57,19 @@ EXPORT_API(void) THSNN_Linear_set_weight(const NNModule module, const Tensor
5757
EXPORT_API(NNModule) THSNN_ReLU_ctor(bool inplace, NNAnyModule* outAsAnyModule);
5858
EXPORT_API(Tensor) THSNN_ReLU_forward(const NNModule module, const Tensor tensor);
5959

60-
EXPORT_API(NNSequential) THSNN_Sequential_ctor();
61-
EXPORT_API(void) THSNN_Sequential_push_back(const NNSequential module, const char* name, const NNAnyModule submodule);
62-
EXPORT_API(Tensor) THSNN_Sequential_forward(const NNSequential module, const Tensor tensor);
60+
EXPORT_API(NNModule) THSNN_Sequential_ctor();
61+
EXPORT_API(void) THSNN_Sequential_push_back(const NNModule module, const char* name, const NNAnyModule submodule);
62+
EXPORT_API(Tensor) THSNN_Sequential_forward(const NNModule module, const Tensor tensor);
6363

6464
EXPORT_API(void) THSNN_Optimizer_zeroGrad(const Optimizer optimizer);
6565
EXPORT_API(void) THSNN_Optimizer_getParameters(const Optimizer optimizer, Tensor* (*allocator)(size_t length));
6666
EXPORT_API(void) THSNN_Optimizer_step(const Optimizer optimizer);
6767
EXPORT_API(void) THSNN_Optimizer_dispose(const Optimizer optimizer);
6868

69-
EXPORT_API(Tensor) THSTorch_binary_cross_entropy(const Tensor inputwrapper, const Tensor targetwrapper, const Tensor weightwrapper, const int64_t reduction);
70-
EXPORT_API(Tensor) THSTorch_mse_loss(const Tensor inputwrapper, const Tensor targetwrapper, const int64_t reduction);
71-
EXPORT_API(Tensor) THSTorch_nll_loss(const Tensor inputwrapper, const Tensor targetwrapper, const Tensor weightwrapper, const int64_t reduction);
72-
EXPORT_API(Tensor) THSTorch_poisson_nll_loss(const Tensor input, const Tensor target, const bool logInput, const bool full, const double eps, const int64_t reduction);
69+
EXPORT_API(Tensor) THSNN_binary_cross_entropy(const Tensor inputwrapper, const Tensor targetwrapper, const Tensor weightwrapper, const int64_t reduction);
70+
EXPORT_API(Tensor) THSNN_mse_loss(const Tensor inputwrapper, const Tensor targetwrapper, const int64_t reduction);
71+
EXPORT_API(Tensor) THSNN_nll_loss(const Tensor inputwrapper, const Tensor targetwrapper, const Tensor weightwrapper, const int64_t reduction);
72+
EXPORT_API(Tensor) THSNN_poisson_loss(const Tensor input, const Tensor target, const bool logInput, const bool full, const double eps, const int64_t reduction);
7373

7474
EXPORT_API(Optimizer) THSNN_Adam_ctor(const Tensor* parameters, const int len, const double learnig_rate);
7575

0 commit comments

Comments
 (0)