Skip to content
This repository was archived by the owner on May 6, 2021. It is now read-only.

Commit 3e40a0a

Browse files
committed
Int64 -> Int
1 parent c51d583 commit 3e40a0a

File tree

6 files changed

+23
-23
lines changed

6 files changed

+23
-23
lines changed

src/environments/atari.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ struct AtariEnv{To,F} <: AbstractEnv
99
actions::Array{Int32, 1}
1010
action_space::DiscreteSpace{Int}
1111
observation_space::To
12-
noopmax::Int64
12+
noopmax::Int
1313
end
1414

1515
action_space(env::AtariEnv) = env.action_space

src/environments/classic_control/cart_pole.jl

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ struct CartPoleEnvParams{T}
1414
tau::T
1515
thetathreshold::T
1616
xthreshold::T
17-
max_steps::Int64
17+
max_steps::Int
1818
end
1919

2020
mutable struct CartPoleEnv{T, R<:AbstractRNG} <: AbstractEnv
@@ -24,7 +24,7 @@ mutable struct CartPoleEnv{T, R<:AbstractRNG} <: AbstractEnv
2424
state::Array{T, 1}
2525
action::Int
2626
done::Bool
27-
t::Int64
27+
t::Int
2828
rng::R
2929
end
3030

src/environments/classic_control/mdp.jl

Lines changed: 13 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@ MDPEnv(model; rng=Random.GLOBAL_RNG) = MDPEnv(
6363
action_space(env::Union{MDPEnv, POMDPEnv}) = env.action_space
6464
observation_space(env::Union{MDPEnv, POMDPEnv}) = env.observation_space
6565

66-
observationindex(env, o) = Int64(o) + 1
66+
observationindex(env, o) = Int(o) + 1
6767

6868
function reset!(env::Union{POMDPEnv, MDPEnv})
6969
initialstate(env.model, env.rng)
@@ -89,13 +89,13 @@ end
8989
#####
9090
"""
9191
mutable struct SimpleMDPEnv
92-
ns::Int64
93-
na::Int64
94-
state::Int64
92+
ns::Int
93+
na::Int
94+
state::Int
9595
trans_probs::Array{AbstractArray, 2}
9696
reward::R
97-
initialstates::Array{Int64, 1}
98-
isterminal::Array{Int64, 1}
97+
initialstates::Array{Int, 1}
98+
isterminal::Array{Int, 1}
9999
rng::S
100100
A Markov Decision Process with `ns` states, `na` actions, current `state`,
101101
`na`x`ns` - array of transition probabilites `trans_props` which consists for
@@ -110,11 +110,11 @@ probabilities) `reward` of type `R` (see [`DeterministicStateActionReward`](@ref
110110
mutable struct SimpleMDPEnv{T,R,S<:AbstractRNG}
111111
observation_space::DiscreteSpace
112112
action_space::DiscreteSpace
113-
state::Int64
113+
state::Int
114114
trans_probs::Array{T, 2}
115115
reward::R
116-
initialstates::Array{Int64, 1}
117-
isterminal::Array{Int64, 1}
116+
initialstates::Array{Int, 1}
117+
isterminal::Array{Int, 1}
118118
rng::S
119119
end
120120

@@ -186,10 +186,10 @@ expected_rewards(r::NormalStateActionReward, ::Any) = r.mean
186186

187187
# run SimpleMDPEnv
188188
"""
189-
run!(mdp::SimpleMDPEnv, action::Int64)
189+
run!(mdp::SimpleMDPEnv, action::Int)
190190
Transition to a new state given `action`. Returns the new state.
191191
"""
192-
function run!(mdp::SimpleMDPEnv, action::Int64)
192+
function run!(mdp::SimpleMDPEnv, action::Int)
193193
if mdp.isterminal[mdp.state] == 1
194194
reset!(mdp)
195195
else
@@ -199,9 +199,9 @@ function run!(mdp::SimpleMDPEnv, action::Int64)
199199
end
200200

201201
"""
202-
run!(mdp::SimpleMDPEnv, policy::Array{Int64, 1}) = run!(mdp, policy[mdp.state])
202+
run!(mdp::SimpleMDPEnv, policy::Array{Int, 1}) = run!(mdp, policy[mdp.state])
203203
"""
204-
run!(mdp::SimpleMDPEnv, policy::Array{Int64, 1}) = run!(mdp, policy[mdp.state])
204+
run!(mdp::SimpleMDPEnv, policy::Array{Int, 1}) = run!(mdp, policy[mdp.state])
205205

206206

207207
function interact!(env::SimpleMDPEnv, action)

src/environments/classic_control/mountain_car.jl

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,17 +8,17 @@ struct MountainCarEnvParams{T}
88
max_pos::T
99
max_speed::T
1010
goal_pos::T
11-
max_steps::Int64
11+
max_steps::Int
1212
end
1313

1414
mutable struct MountainCarEnv{T, R<:AbstractRNG} <: AbstractEnv
1515
params::MountainCarEnvParams{T}
1616
action_space::DiscreteSpace
1717
observation_space::MultiContinuousSpace{(2,), 1}
1818
state::Array{T, 1}
19-
action::Int64
19+
action::Int
2020
done::Bool
21-
t::Int64
21+
t::Int
2222
rng::R
2323
end
2424

src/environments/classic_control/pendulum.jl

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ struct PendulumEnvParams{T}
99
m::T
1010
l::T
1111
dt::T
12-
max_steps::Int64
12+
max_steps::Int
1313
end
1414

1515
mutable struct PendulumEnv{T, R<:AbstractRNG} <: AbstractEnv
@@ -18,7 +18,7 @@ mutable struct PendulumEnv{T, R<:AbstractRNG} <: AbstractEnv
1818
observation_space::MultiContinuousSpace{(3,), 1}
1919
state::Array{T, 1}
2020
done::Bool
21-
t::Int64
21+
t::Int
2222
rng::R
2323
end
2424

src/environments/hanabi.jl

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -91,8 +91,8 @@ mutable struct HanabiEnv <: AbstractEnv
9191
state::Base.RefValue{Hanabi.LibHanabi.PyHanabiState}
9292
moves::Vector{Base.RefValue{Hanabi.LibHanabi.PyHanabiMove}}
9393
observation_encoder::Base.RefValue{Hanabi.LibHanabi.PyHanabiObservationEncoder}
94-
observation_space::MultiDiscreteSpace{Int64, 1}
95-
action_space::DiscreteSpace{Int64}
94+
observation_space::MultiDiscreteSpace{Int, 1}
95+
action_space::DiscreteSpace{Int}
9696
reward::HanabiResult
9797

9898
function HanabiEnv(;kw...)

0 commit comments

Comments
 (0)