Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- using Flux
- using Flux.Tracker
- using Flux: @epochs
- using Base.Iterators
- # Create Data
- srand(1234)
- batchSize = 256
- l = batchSize*100
- x = randn(Float32, 1, l)
- y = sin.(1./x)
- #Create batches of size batchSize
- batches = [(x[:, i], y[:, i]) for i in partition(1:l, batchSize)]
- # Create model, loss function, and training optimiser
- hidden_dim = 32
- typedInit(dims...) = Float32(5/3)*randn(Float32, dims...) .* sqrt(2.0f0/sum(dims)) #32 bit W initialization
- typedInitB(l) = zeros(Float32, l) #32 bit b initialization
- typedDense(n1, n2, f=identity) = Dense(n1, n2, f, initW=typedInit, initb=typedInitB)
- model = Chain(
- typedDense(1, hidden_dim, tanh),
- typedDense(hidden_dim, hidden_dim, tanh),
- typedDense(hidden_dim, hidden_dim, tanh),
- typedDense(hidden_dim, 1)
- )
- p = params(model)
- opt = ADAM(p, 0.001f0, β1=0.9f0, β2 = 0.999f0, ϵ=1f-8)
- loss(x, y) = Flux.mse(model(x), y)
- # Train model for 100 epochs
- @epochs 100 Flux.train!(loss, batches, opt, cb=evalcb)
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement