neural-amp-modeler

Neural network emulator for guitar amplifiers
Log | Files | Refs | README | LICENSE

commit 4c982e5b530cd9c849b2b55c0f0da1cf1dab8cdf
parent 19b87011540058bbb7cdb4d89cc89c2abde50154
Author: Steven Atkinson <steven@atkinson.mn>
Date:   Sun, 13 Oct 2024 16:18:46 -0700

[BREAKING] Remove ONNX support, Bump Python requriement (#498)

* Remove ONNX export for LSTMs

* Remove Python 3.8 from workflow
Diffstat:
M.github/workflows/python-package.yml | 2+-
Menvironment_cpu.yml | 8+++-----
Menvironment_gpu.yml | 8+++-----
Mnam/_version.py | 2+-
Mnam/models/recurrent.py | 58----------------------------------------------------------
Mrequirements.txt | 4+---
Msetup.py | 2--
Mtests/test_nam/test_models/test_recurrent.py | 33---------------------------------
8 files changed, 9 insertions(+), 108 deletions(-)

diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml @@ -16,7 +16,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] + python-version: ["3.9", "3.10", "3.11", "3.12"] steps: - uses: actions/checkout@v3 diff --git a/environment_cpu.yml b/environment_cpu.yml @@ -4,18 +4,16 @@ name: nam channels: - - conda-forge # pytest-mock, onnxruntime + - conda-forge # pytest-mock - pytorch dependencies: - - python<3.11 # onnxruntime + - python>=3.9 - black - flake8 - h5py - jupyter - matplotlib - - numpy<2 # Until PyTorch 2.3 - - onnx - - onnxruntime!=1.16.0 + - numpy<2 - pip - pre-commit - pydantic diff --git a/environment_gpu.yml b/environment_gpu.yml @@ -4,19 +4,17 @@ name: nam channels: - - conda-forge # pytest-mock, onnxruntime + - conda-forge # pytest-mock - pytorch - nvidia # GPU dependencies: - - python<3.11 # onnxruntime + - python>=3.9 - black - flake8 - h5py - jupyter - matplotlib - - numpy<2 # Until PyTorch 2.3 - - onnx - - onnxruntime!=1.16.0 + - numpy<2 - pip - pre-commit - pydantic>=2.0.0 diff --git a/nam/_version.py b/nam/_version.py @@ -1 +1 @@ -__version__ = "0.10.1" +__version__ = "0.11.0" diff --git a/nam/models/recurrent.py b/nam/models/recurrent.py @@ -228,42 +228,6 @@ class LSTM(BaseNet): ) ) - def export_onnx(self, filename: Path): - if self._input_size != 1: - raise NotImplementedError("Multi-dimensional inputs not supported yet") - o = _ONNXWrapped(self) - x = torch.randn((64,)) # (S,) - h, c = [z[:, 0, :] for z in self._initial_state(1)] # (L,DH), (L,DH) - torch.onnx.export( - o, - (x, h, c), - filename, - input_names=["x", "hin", "cin"], - output_names=["y", "hout", "cout"], - dynamic_axes={"x": {0: "num_frames"}, "y": {0: "num_frames"}}, - ) - - def forward_onnx( - self, x: torch.Tensor, h: _LSTMHiddenType, c: _LSTMCellType - ) -> Tuple[torch.Tensor, _LSTMHiddenType, _LSTMCellType]: - """ - Forward pass used by ONNX export - Only supports scalar inputs right now. - - N: Sequeence length - L: Number of layers - DH: Hidden state dimension - - :param x: (N,) - :param state: (L, DH) - :param cell: (L, DH) - - :return: (N,), (L, DH), (L, DH) - """ - features, (h, c) = self._core(x[None, :, None], (h[:, None, :], c[:, None, :])) - y = self._apply_head(features) # (1,S) - return y[0, :], h[:, 0, :], c[:, 0, :] - def _apply_head(self, features: torch.Tensor) -> torch.Tensor: """ :param features: (B,S,DH) @@ -409,28 +373,6 @@ class LSTM(BaseNet): ) -class _ONNXWrapped(nn.Module): - def __init__(self, net: LSTM): - super().__init__() - self._net = net - - def forward( - self, x: torch.Tensor, hidden: _LSTMHiddenType, cell: _LSTMCellType - ) -> Tuple[torch.Tensor, _LSTMHiddenType, _LSTMCellType]: - """ - N: Sequeence length - L: Number of layers - DH: Hidden state dimension - - :param x: (N,) - :param state: (L, DH) - :param cell: (L, DH) - - :return: (N,), (L, DH), (L, DH) - """ - return self._net.forward_onnx(x, hidden, cell) - - # TODO refactor together diff --git a/requirements.txt b/requirements.txt @@ -6,9 +6,7 @@ auraloss==0.3.0 # 0.4.0 changes API for MRSTFT loss black flake8 matplotlib -numpy<2 # Until PyTorch 2.3 -onnx -onnxruntime!=1.16.0 # 1.16.0 has a bug to avoid! +numpy<2 pip pre-commit pydantic>=2.0.0 diff --git a/setup.py b/setup.py @@ -48,8 +48,6 @@ with open(ver_path) as ver_file: requirements = [ "auraloss==0.3.0", "matplotlib", - "onnx", - "onnxruntime!=1.16.0", # Has a bug to avoid "pydantic>=2.0.0", "pytorch_lightning", "scipy", diff --git a/tests/test_nam/test_models/test_recurrent.py b/tests/test_nam/test_models/test_recurrent.py @@ -5,8 +5,6 @@ from pathlib import Path from tempfile import TemporaryDirectory -import onnx -import onnxruntime import pytest import torch @@ -39,37 +37,6 @@ class TestLSTM(Base): cls._num_layers = num_layers cls._hidden_size = hidden_size - def test_export_onnx(self): - model = self._construct() - with TemporaryDirectory() as tmpdir: - filename = Path(tmpdir, "model.onnx") - model.export_onnx(filename) - onnx_model = onnx.load(filename) - session = onnxruntime.InferenceSession(str(filename)) - onnx.checker.check_model(onnx_model) - wrapped_model = recurrent._ONNXWrapped(model) - x = torch.Tensor([0.5, -0.5, 0.4, -0.4, 0.3, -0.3, 0.2]) - hin = torch.zeros((self._num_layers, self._hidden_size)) - cin = torch.zeros((self._num_layers, self._hidden_size)) - - with torch.no_grad(): - y_expected, hout_expected, cout_expected = [ - z.detach().cpu().numpy() for z in wrapped_model(x, hin, cin) - ] - - input_names = [z.name for z in session.get_inputs()] - onnx_inputs = { - i: z.detach().cpu().numpy() for i, z in zip(input_names, (x, hin, cin)) - } - y_actual, hout_actual, cout_actual = session.run([], onnx_inputs) - - def approx(val): - return pytest.approx(val, rel=1.0e-6, abs=1.0e-6) - - assert y_expected == approx(y_actual) - assert hout_expected == approx(hout_actual) - assert cout_expected == approx(cout_actual) - def test_get_initial_state_cpu(self): return self._t_initial_state("cpu")