Browse Source

[MNT] change criterion to loss_fn

pull/1/head
Gao Enhao 2 years ago
parent
commit
81645f89da
10 changed files with 24 additions and 24 deletions
  1. +3
    -3
      abl/learning/readme.md
  2. +3
    -3
      docs/Intro/Learning.rst
  3. +3
    -3
      docs/Intro/Quick-Start.rst
  4. +2
    -2
      examples/hed/hed_bridge.py
  5. +2
    -2
      examples/hed/hed_example.ipynb
  6. +3
    -3
      examples/hwf/hwf_example.ipynb
  7. +2
    -2
      examples/mnist_add/mnist_add_example.ipynb
  8. +1
    -1
      examples/models/nn.py
  9. +4
    -4
      tests/conftest.py
  10. +1
    -1
      tests/test_basic_nn.py

+ 3
- 3
abl/learning/readme.md View File

@@ -31,7 +31,7 @@
**num_workers : int**
+ The number of workers used for loading data.

**criterion : torch.nn.Module**
**loss_fn : torch.nn.Module**
+ The loss function used for training.

**optimizer : torch.nn.Module**
@@ -60,13 +60,13 @@
> ```python
> # Three necessary component
> cls = LeNet5()
> criterion = nn.CrossEntropyLoss()
> loss_fn = nn.CrossEntropyLoss()
> optimizer = torch.optim.Adam(cls.parameters())
>
> # Initialize base_model
> base_model = BasicModel(
> cls,
> criterion,
> loss_fn,
> optimizer,
> torch.device("cuda:0"),
> batch_size=32,


+ 3
- 3
docs/Intro/Learning.rst View File

@@ -39,11 +39,11 @@ For a PyTorch-based neural network, we first need to encapsulate it within a ``B
import torchvision
cls = torchvision.models.resnet18(pretrained=True)

# criterion and optimizer are used for training
criterion = torch.nn.CrossEntropyLoss()
# loss_fn and optimizer are used for training
loss_fn = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(cls.parameters())

base_model = BasicNN(cls, criterion, optimizer)
base_model = BasicNN(cls, loss_fn, optimizer)
model = ABLModel(base_model)

Besides ``fit`` and ``predict``, ``BasicNN`` also implements the following methods:


+ 3
- 3
docs/Intro/Quick-Start.rst View File

@@ -84,17 +84,17 @@ To build the machine learning part, we need to wrap our machine learning model i
# The number of pseudo labels is 10
cls = LeNet5(num_classes=10)

Aside from the network, we need to define a criterion, an optimizer, and a device so as to create a ``BasicNN`` object. This class implements ``fit``, ``predict``, ``predict_proba`` and several other methods to enable the PyTorch-based neural network to work as a scikit-learn model.
Aside from the network, we need to define a loss_fn, an optimizer, and a device so as to create a ``BasicNN`` object. This class implements ``fit``, ``predict``, ``predict_proba`` and several other methods to enable the PyTorch-based neural network to work as a scikit-learn model.

.. code:: python

import torch
from abl.learning import BasicNN

criterion = torch.nn.CrossEntropyLoss()
loss_fn = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(cls.parameters(), lr=0.001, betas=(0.9, 0.99))
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
base_model = BasicNN(cls, criterion, optimizer, device)
base_model = BasicNN(cls, loss_fn, optimizer, device)

.. code:: python



+ 2
- 2
examples/hed/hed_bridge.py View File

@@ -32,14 +32,14 @@ class HEDBridge(SimpleBridge):
num_classes=len(self.reasoner.kb.pseudo_label_list)
)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
criterion = torch.nn.MSELoss()
loss_fn = torch.nn.MSELoss()
optimizer = torch.optim.RMSprop(
cls_autoencoder.parameters(), lr=0.001, alpha=0.9, weight_decay=1e-6
)

pretrain_model = BasicNN(
cls_autoencoder,
criterion,
loss_fn,
optimizer,
device,
save_interval=1,


+ 2
- 2
examples/hed/hed_example.ipynb View File

@@ -157,7 +157,7 @@
"source": [
"# Build necessary components for BasicNN\n",
"cls = SymbolNet(num_classes=4)\n",
"criterion = nn.CrossEntropyLoss()\n",
"loss_fn = nn.CrossEntropyLoss()\n",
"optimizer = torch.optim.RMSprop(cls.parameters(), lr=0.001, weight_decay=1e-6)\n",
"# optimizer = torch.optim.Adam(cls.parameters(), lr=0.001, betas=(0.9, 0.99))\n",
"device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")"
@@ -173,7 +173,7 @@
"# The function of BasicNN is to wrap NN models into the form of an sklearn estimator\n",
"base_model = BasicNN(\n",
" cls,\n",
" criterion,\n",
" loss_fn,\n",
" optimizer,\n",
" device,\n",
" batch_size=32,\n",


+ 3
- 3
examples/hwf/hwf_example.ipynb View File

@@ -95,7 +95,7 @@
"# Initialize necessary component for machine learning part\n",
"cls = SymbolNet(num_classes=len(kb.pseudo_label_list), image_size=(45, 45, 1))\n",
"device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
"criterion = nn.CrossEntropyLoss()\n",
"loss_fn = nn.CrossEntropyLoss()\n",
"optimizer = torch.optim.Adam(cls.parameters(), lr=0.001, betas=(0.9, 0.99))"
]
},
@@ -109,7 +109,7 @@
"# The function of BasicNN is to wrap NN models into the form of an sklearn estimator\n",
"base_model = BasicNN(\n",
" model=cls,\n",
" criterion=criterion,\n",
" loss_fn=loss_fn,\n",
" optimizer=optimizer,\n",
" device=device,\n",
" save_interval=1,\n",
@@ -199,7 +199,7 @@
"metadata": {},
"outputs": [],
"source": [
"bridge.train(train_data, loops=3, segment_size=1000, save_interval=1, save_dir=weights_dir)\n",
"bridge.train(train_data, train_data, loops=3, segment_size=1000, save_interval=1, save_dir=weights_dir)\n",
"bridge.test(test_data)"
]
}


+ 2
- 2
examples/mnist_add/mnist_add_example.ipynb View File

@@ -50,7 +50,7 @@
"source": [
"# Build necessary components for BasicNN\n",
"cls = LeNet5(num_classes=10)\n",
"criterion = nn.CrossEntropyLoss()\n",
"loss_fn = nn.CrossEntropyLoss()\n",
"optimizer = torch.optim.Adam(cls.parameters(), lr=0.001, betas=(0.9, 0.99))\n",
"device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")"
]
@@ -65,7 +65,7 @@
"# The function of BasicNN is to wrap NN models into the form of an sklearn estimator\n",
"base_model = BasicNN(\n",
" cls,\n",
" criterion,\n",
" loss_fn,\n",
" optimizer,\n",
" device,\n",
" batch_size=32,\n",


+ 1
- 1
examples/models/nn.py View File

@@ -66,7 +66,7 @@ class SymbolNet(nn.Module):
num_features = 64 * (image_size[0] // 4 - 1) * (image_size[1] // 4 - 1)
self.fc1 = nn.Sequential(nn.Linear(num_features, 120), nn.ReLU())
self.fc2 = nn.Sequential(nn.Linear(120, 84), nn.ReLU())
self.fc3 = nn.Sequential(nn.Linear(84, num_classes), nn.Softmax(dim=1))
self.fc3 = nn.Sequential(nn.Linear(84, num_classes))

def forward(self, x):
x = self.conv1(x)


+ 4
- 4
tests/conftest.py View File

@@ -44,18 +44,18 @@ class LeNet5(nn.Module):
@pytest.fixture
def basic_nn_instance():
model = LeNet5()
criterion = nn.CrossEntropyLoss()
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters())
return BasicNN(model, criterion, optimizer)
return BasicNN(model, loss_fn, optimizer)


# Fixture for base_model instance
@pytest.fixture
def base_model_instance():
model = LeNet5()
criterion = nn.CrossEntropyLoss()
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters())
return BasicNN(model, criterion, optimizer)
return BasicNN(model, loss_fn, optimizer)


# Fixture for ListData instance


+ 1
- 1
tests/test_basic_nn.py View File

@@ -27,7 +27,7 @@ class TestBasicNN(object):
def test_initialization(self, basic_nn_instance):
"""Test initialization of the BasicNN class"""
assert basic_nn_instance.model is not None
assert isinstance(basic_nn_instance.criterion, nn.Module)
assert isinstance(basic_nn_instance.loss_fn, nn.Module)
assert isinstance(basic_nn_instance.optimizer, optim.Optimizer)

def test_training_methods(self, basic_nn_instance, sample_data, sample_data_loader_with_label):


Loading…
Cancel
Save