street-fighter-ai/000_image_stack_ram_based_reward/test.py

69 lines
2.0 KiB
Python
Raw Normal View History

2023-03-29 17:14:39 +00:00
import time
import retro
from stable_baselines3 import PPO
from street_fighter_custom_wrapper import StreetFighterCustomWrapper
def make_env(game, state):
def _init():
2023-03-30 18:10:25 +00:00
env = retro.make(
2023-03-29 17:14:39 +00:00
game=game,
state=state,
use_restricted_actions=retro.Actions.FILTERED,
obs_type=retro.Observations.IMAGE
)
2023-03-30 18:10:25 +00:00
env = StreetFighterCustomWrapper(env)
2023-03-29 17:14:39 +00:00
return env
return _init
game = "StreetFighterIISpecialChampionEdition-Genesis"
state_stages = [
2023-03-30 18:10:25 +00:00
"Champion.Level1.ChunLiVsGuile", # Average reward for random strategy: -102.3
"ChampionX.Level1.ChunLiVsKen", # Average reward for random strategy: -247.6
2023-03-29 17:14:39 +00:00
"Champion.Level2.ChunLiVsKen",
"Champion.Level3.ChunLiVsChunLi",
"Champion.Level4.ChunLiVsZangief",
"Champion.Level5.ChunLiVsDhalsim",
"Champion.Level6.ChunLiVsRyu",
"Champion.Level7.ChunLiVsEHonda",
"Champion.Level8.ChunLiVsBlanka",
"Champion.Level9.ChunLiVsBalrog",
"Champion.Level10.ChunLiVsVega",
"Champion.Level11.ChunLiVsSagat",
"Champion.Level12.ChunLiVsBison"
# Add other stages as necessary
]
env = make_env(game, state_stages[0])()
model = PPO(
"CnnPolicy",
env,
verbose=1
)
2023-03-30 18:10:25 +00:00
model_path = r"optuna/trial_1_best_model" # Average reward for optuna/trial_1_best_model: -82.3
model.load(model_path)
2023-03-29 17:14:39 +00:00
obs = env.reset()
done = False
2023-03-30 18:10:25 +00:00
num_episodes = 30
episode_reward_sum = 0
for _ in range(num_episodes):
done = False
obs = env.reset()
total_reward = 0
while not done:
timestamp = time.time()
obs, reward, done, info = env.step(env.action_space.sample())
if reward != 0:
total_reward += reward
print("Reward: {}, playerHP: {}, enemyHP:{}".format(reward, info['health'], info['enemy_health']))
env.render()
print("Total reward: {}".format(total_reward))
episode_reward_sum += total_reward
2023-03-29 17:14:39 +00:00
2023-03-30 18:10:25 +00:00
env.close()
print("Average reward for {}: {}".format(model_path, episode_reward_sum/num_episodes))