feat(randomNetwork): generate random networks with a simple function

This commit is contained in:
Mahdi Dibaiee 2016-07-18 17:07:12 +04:30
parent 23851a85f5
commit f5a0c23d99
3 changed files with 24 additions and 35 deletions

View File

@ -5,20 +5,6 @@ import Numeric.LinearAlgebra
import Data.List import Data.List
import Debug.Trace import Debug.Trace
-- 1x2 main = do
-- 2x3 + 1x3 putStrLn "Try the examples:"
-- 3x1 + 1x1 putStrLn "- stack exec example-xor"
-- main :: IO [()]
main =
let learning_rate = 0.5
ih = randomLayer 0 (2, 8)
ho = randomLayer 1 (8, 1)
network = ih :- O ho
inputs = [vector [0, 1], vector [1, 0], vector [1, 1], vector [0, 0]]
labels = [vector [1], vector [1], vector [0], vector [0]]
updated_network = session inputs network labels learning_rate (2, 1000)
results = map (`forward` updated_network) inputs
in print results

View File

@ -4,28 +4,23 @@ module Main where
import Data.List import Data.List
import Debug.Trace import Debug.Trace
-- 1x2 main = do
-- 2x3 + 1x3
-- 3x1 + 1x1
-- main :: IO [()]
main =
let learning_rate = 0.5 let learning_rate = 0.5
(iterations, epochs) = (2, 1000) (iterations, epochs) = (2, 1000)
ih = randomLayer 0 (2, 8) rnetwork = randomNetwork 0 2 [8] 1 -- two inputs, 8 nodes in a single hidden layer, 1 output
ho = randomLayer 1 (8, 1)
network = ih :- O ho
inputs = [vector [0, 1], vector [1, 0], vector [1, 1], vector [0, 0]] inputs = [vector [0, 1], vector [1, 0], vector [1, 1], vector [0, 0]]
labels = [vector [1], vector [1], vector [0], vector [0]] labels = [vector [1], vector [1], vector [0], vector [0]]
updated_network = session inputs network labels learning_rate (iterations, epochs) network = session inputs rnetwork labels learning_rate (iterations, epochs)
results = map (`forward` updated_network) inputs results = map (`forward` network) inputs
rounded = map (map round . toList) results rounded = map (map round . toList) results
in sequence [putStrLn $ "inputs: " ++ show inputs,
putStrLn $ "labels: " ++ show labels, putStrLn "parameters: "
putStrLn $ "learning rate: " ++ show learning_rate, putStrLn $ "- inputs: " ++ show inputs
putStrLn $ "iterations/epochs: " ++ show (iterations, epochs), putStrLn $ "- labels: " ++ show labels
putStrLn "...", putStrLn $ "- learning rate: " ++ show learning_rate
putStrLn $ "rounded result: " ++ show rounded, putStrLn $ "- iterations/epochs: " ++ show (iterations, epochs)
putStrLn $ "actual result: " ++ show results] putStrLn "results: "
putStrLn $ "- actual result: " ++ show results
putStrLn $ "- rounded result: " ++ show rounded

View File

@ -10,6 +10,7 @@ module Sibe
Output, Output,
forward, forward,
randomLayer, randomLayer,
randomNetwork,
train, train,
session, session,
shuffle, shuffle,
@ -45,6 +46,13 @@ module Sibe
biases = randomVector seed Uniform wc * 2 - 1 biases = randomVector seed Uniform wc * 2 - 1
in L biases weights in L biases weights
randomNetwork :: Seed -> Int -> [Int] -> Int -> Network
randomNetwork seed input [] output =
O $ randomLayer seed (input, output)
randomNetwork seed input (h:hs) output =
randomLayer seed (input, h) :-
randomNetwork (seed + 1) h hs output
logistic :: Double -> Double logistic :: Double -> Double
logistic x = 1 / (1 + exp (-x)) logistic x = 1 / (1 + exp (-x))