64 lines
2.2 KiB
Python
64 lines
2.2 KiB
Python
# %% Import required packages
|
|
import torch
|
|
from torch.optim import Adam
|
|
from torch.nn import MSELoss
|
|
from src.models.transformer_model import TransformerModel
|
|
from src.models.rl_model import RLModel
|
|
from models.trading_model import TradingAgent
|
|
from src.training.train import train_transformer, train_rl
|
|
from src.data.data_preprocessing import load_processed_data
|
|
|
|
# %% Set device
|
|
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
|
|
# %% Load processed data
|
|
train_data = load_processed_data('../data/processed/train_data.csv')
|
|
test_data = load_processed_data('../data/processed/test_data.csv')
|
|
|
|
# %% Initialize models
|
|
transformer_model = TransformerModel().to(device)
|
|
rl_model = RLModel().to(device)
|
|
trading_agent = TradingAgent(transformer_model, rl_model)
|
|
|
|
# %% Set up the loss function and optimizer for Transformer model
|
|
criterion = MSELoss()
|
|
optimizer = Adam(transformer_model.parameters(), lr=0.001)
|
|
|
|
# %% Train Transformer Model
|
|
# Set the appropriate hyperparameters
|
|
transformer_model_hyperparams = {
|
|
"epochs": 10,
|
|
"batch_size": 32,
|
|
"learning_rate": 0.001,
|
|
}
|
|
train_transformer(transformer_model, train_data, criterion, optimizer, transformer_model_hyperparams)
|
|
|
|
# %% Evaluate Transformer Model on Test Data
|
|
# After training, it's a good practice to evaluate your model on a separate test set.
|
|
test_loss = evaluate_transformer(transformer_model, test_data, criterion)
|
|
print('Test Loss:', test_loss)
|
|
|
|
# %% Save Transformer Model
|
|
torch.save(transformer_model.state_dict(), '../models/transformer_model.pth')
|
|
|
|
# %% Train RL Model
|
|
# Set the appropriate hyperparameters
|
|
rl_model_hyperparams = {
|
|
"epochs": 500,
|
|
"batch_size": 32,
|
|
"learning_rate": 0.001,
|
|
"gamma": 0.99, # discount factor
|
|
"epsilon_start": 1.0, # exploration rate at the beginning
|
|
"epsilon_end": 0.01, # minimum exploration rate
|
|
"epsilon_decay": 0.995, # exponential decay rate for exploration probability
|
|
}
|
|
train_rl(trading_agent, train_data, rl_model_hyperparams)
|
|
|
|
# %% Evaluate RL Model on Test Data
|
|
# After training, it's a good practice to evaluate your model on a separate test set.
|
|
test_reward = evaluate_rl(trading_agent, test_data)
|
|
print('Test Reward:', test_reward)
|
|
|
|
# %% Save RL Model
|
|
torch.save(rl_model.state_dict(), '../models/rl_model.pth')
|