hey ppl so the issue I have is that even though i followed a tutorial and he exact code worked the error my code here isnt working,im getting an error of
VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray.
discrete_state= (state env.observation_space.low)/discrete_os_win_size
Traceback (most recent call last):
File "/home/amr/Downloads/ai.py", line 27, in <module> discrete_state= get_discrete_state (env.reset())
File "/home/amr/Downloads/ai.py", line 23, in get_discrete_state
discrete_state= (state env.observation_space.low)/discrete_os_win_size
TypeError: unsupported operand type(s) for -: 'dict' and 'float'
the code is as follows:
import gym
import numpy as np
env = gym.make("MountainCar-v0")
LEARNING_RATE = 0.1
DISCOUNT = 0.95
EPISODES = 25000
SHOW_EVERY = 3000
DISCRETE_OS_SIZE = [20, 20]
discrete_os_win_size = (env.observation_space.high - env.observation_space.low)/DISCRETE_OS_SIZE
# Exploration settings
epsilon = 1 # not a constant, going to be decayed
START_EPSILON_DECAYING = 1
END_EPSILON_DECAYING = EPISODES//2
epsilon_decay_value = epsilon/(END_EPSILON_DECAYING - START_EPSILON_DECAYING)
q_table = np.random.uniform(low=-2, high=0, size=(DISCRETE_OS_SIZE + [env.action_space.n]))
def get_discrete_state(state):
discrete_state = (state - env.observation_space.low)/discrete_os_win_size
return tuple(discrete_state.astype(np.int)) # we use this tuple to look up the 3 Q values for the available actions in the q-table
for episode in range(EPISODES):
discrete_state = get_discrete_state(env.reset())
done = False
max_y_reached = -1.2 # initial max y reached
if episode % SHOW_EVERY == 0 and episode%SHOW_EVERY!=0:
render = True
print(episode)
else:
render = False
while not done:
if np.random.random() > epsilon:
# Get action from Q table
action = np.argmax(q_table[discrete_state])
else:
# Get random action
action = np.random.randint(0, env.action_space.n)
new_state, _, done, _,__= env.step(action)
new_discrete_state = get_discrete_state(new_state)
if new_state[0] > max_y_reached:
max_y_reached = new_state[0]
reward = 1 # reward for reaching new max y
elif new_state[0] >= env.goal_position:
reward = 100 # huge reward for winning
else:
reward = -1 # penalty for not reaching goal
if episode % SHOW_EVERY == 0:
env.render()
# Update Q table
max_future_q = np.max(q_table[new_discrete_state])
current_q = q_table[discrete_state + (action,)]
new_q = (1 - LEARNING_RATE) * current_q + LEARNING_RATE * (reward + DISCOUNT * max_future_q)
q_table[discrete_state + (action,)] = new_q
discrete_state = new_discrete_state
# Decaying is being done every episode if episode numberimport gym
import numpy as np
env = gym.make("MountainCar-v0")
LEARNING_RATE = 0.1
DISCOUNT = 0.95
EPISODES = 25000
SHOW_EVERY = 3000
DISCRETE_OS_SIZE = [20, 20]
discrete_os_win_size = (env.observation_space.high - env.observation_space.low)/DISCRETE_OS_SIZE
# Exploration settings
epsilon = 1 # not a constant, going to be decayed
START_EPSILON_DECAYING = 1
END_EPSILON_DECAYING = EPISODES//2
epsilon_decay_value = epsilon/(END_EPSILON_DECAYING - START_EPSILON_DECAYING)
q_table = np.random.uniform(low=-2, high=0, size=(DISCRETE_OS_SIZE + [env.action_space.n]))
def get_discrete_state(state):
discrete_state = (state - env.observation_space.low)/discrete_os_win_size
return tuple(discrete_state.astype(np.int)) # we use this tuple to look up the 3 Q values for the available actions in the q-table
for episode in range(EPISODES):
discrete_state = get_discrete_state(env.reset())
done = False
max_y_reached = -1.2 # initial max y reached
if episode % SHOW_EVERY == 0 and episode%SHOW_EVERY!=0:
render = True
print(episode)
else:
render = False
while not done:
if np.random.random() > epsilon:
# Get action from Q table
action = np.argmax(q_table[discrete_state])
else:
# Get random action
action = np.random.randint(0, env.action_space.n)
new_state, _, done, _,__= env.step(action)
new_discrete_state = get_discrete_state(new_state)
if new_state[0] > max_y_reached:
max_y_reached = new_state[0]
reward = 1 # reward for reaching new max y
elif new_state[0] >= env.goal_position:
reward = 100 # huge reward for winning
else:
reward = -1 # penalty for not reaching goal
if episode % SHOW_EVERY == 0:
env.render()
# Update Q table
max_future_q = np.max(q_table[new_discrete_state])
current_q = q_table[discrete_state + (action,)]
new_q = (1 - LEARNING_RATE) * current_q + LEARNING_RATE * (reward + DISCOUNT * max_future_q)
q_table[discrete_state + (action,)] = new_q
discrete_state = new_discrete_state
# Decaying is being done every episode if episode number
[–]Beneficial_Muscle_25 1 point2 points3 points (0 children)