neural-amp-modeler

Neural network emulator for guitar amplifiers
Log | Files | Refs | README | LICENSE

commit 044db9831892e83d4844c8313184fe9531b20f85
parent 9d6525e336461ac566995e12d6b9dabe538aaff2
Author: Steven Atkinson <steven@atkinson.mn>
Date:   Tue,  3 May 2022 17:45:41 -0700

Fiv env name, change plots, update README

Diffstat:
MREADME.md | 5+++++
Mbin/train/main.py | 9++++++---
Menvironment.yml | 2+-
3 files changed, 12 insertions(+), 4 deletions(-)

diff --git a/README.md b/README.md @@ -6,6 +6,9 @@ For the code to create the plugin with a trained model, see my ## How to use +This repository handles training, reamping, and exporting the weights of a model (to +use with [the iPlug2 plugin]()) + ### Train a model You'll need at least two mono wav files: the input (DI) and the amped sound (without the cab). @@ -33,6 +36,8 @@ bin/train/outputs/MyAmp Handy if you want to just check it out without going through the trouble of building the plugin. +For example: + ```bash python bin/run.py \ path/to/source.wav \ diff --git a/bin/train/main.py b/bin/train/main.py @@ -58,9 +58,12 @@ def plot( print(f"Took {t1 - t0} ({tx / (t1 - t0):.2f}x)") plt.figure(figsize=(16, 5)) - plt.plot(ds.x[window_start:window_end], label="Input") - plt.plot(output[window_start:window_end], label="Output") - plt.plot(ds.y[window_start:window_end], label="Target") + # plt.plot(ds.x[window_start:window_end], label="Input") + plt.plot(output[window_start:window_end], label="Prediction") + plt.plot(ds.y[window_start:window_end], linestyle="--", label="Target") + # plt.plot( + # ds.y[window_start:window_end] - output[window_start:window_end], label="Error" + # ) plt.title(f"NRMSE={_rms(torch.Tensor(output) - ds.y) / _rms(ds.y)}") plt.legend() if savefig is not None: diff --git a/environment.yml b/environment.yml @@ -2,7 +2,7 @@ # Created Date: Saturday February 13th 2021 # Author: Steven Atkinson (steven@atkinson.mn) -name: nam2 +name: nam channels: - pytorch dependencies: