This commit is contained in:
Mahdi Dibaiee 2019-03-07 06:55:23 +03:30
parent 8477c02aae
commit fe3f539d7d
3 changed files with 28 additions and 12 deletions

Binary file not shown.

26
nn.py
View File

@ -11,6 +11,7 @@ import pandas as pd
import os.path import os.path
from utils import * from utils import *
# from predict import predicted_map
RANDOM_SEED = 1 RANDOM_SEED = 1
@ -23,6 +24,11 @@ np.random.seed(RANDOM_SEED)
df = pd.read_pickle('data.p') df = pd.read_pickle('data.p')
class MapHistory(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
print('EPOCH', epoch)
predicted_map('maps/{}'.format(epoch))
class Model(): class Model():
def __init__(self, name, batch_size=16, shuffle_buffer_size=500, learning_rate=0.001, epochs=1): def __init__(self, name, batch_size=16, shuffle_buffer_size=500, learning_rate=0.001, epochs=1):
self.name = name self.name = name
@ -42,6 +48,8 @@ class Model():
(training, test) = (self.dataset.take(self.TRAIN_SIZE).batch(self.batch_size).repeat(), (training, test) = (self.dataset.take(self.TRAIN_SIZE).batch(self.batch_size).repeat(),
self.dataset.skip(self.TRAIN_SIZE).batch(self.batch_size).repeat()) self.dataset.skip(self.TRAIN_SIZE).batch(self.batch_size).repeat())
# print(df.groupby(['biome_num']).agg({ 'biome_num': lambda x: x.count() / df.shape[0] }))
print('dataset: size={}, train={}, test={}'.format(dataset_size, self.TRAIN_SIZE, self.TEST_SIZE)) print('dataset: size={}, train={}, test={}'.format(dataset_size, self.TRAIN_SIZE, self.TEST_SIZE))
print('input_size={}'.format(features)) print('input_size={}'.format(features))
@ -58,6 +66,7 @@ class Model():
# 'kernel_regularizer': keras.regularizers.l2(l=0.01) # 'kernel_regularizer': keras.regularizers.l2(l=0.01)
} }
dropout = [keras.layers.Dropout(0.1, input_shape=[self.features])] dropout = [keras.layers.Dropout(0.1, input_shape=[self.features])]
# dropout = []
self.model = keras.Sequential(dropout + [ self.model = keras.Sequential(dropout + [
keras.layers.Dense(layers[0], activation=tf.nn.elu, **params) keras.layers.Dense(layers[0], activation=tf.nn.elu, **params)
] + [ ] + [
@ -69,7 +78,6 @@ class Model():
def compile(self, loss='mse', metrics=['accuracy'], optimizer=tf.train.AdamOptimizer, load_weights=True): def compile(self, loss='mse', metrics=['accuracy'], optimizer=tf.train.AdamOptimizer, load_weights=True):
if load_weights: if load_weights:
self.model.load_weights(self.path) self.model.load_weights(self.path)
print('loaded weights')
optimizer = optimizer(self.learning_rate) optimizer = optimizer(self.learning_rate)
@ -92,16 +100,19 @@ class Model():
def train(self): def train(self):
self.model.summary() self.model.summary()
checkpoint = keras.callbacks.ModelCheckpoint(self.path, monitor='acc', verbose=1, mode='max') checkpoint = keras.callbacks.ModelCheckpoint(self.path, monitor='val_loss', verbose=1, mode='min', save_best_only=True)
tensorboard = keras.callbacks.TensorBoard(log_dir='./logs', update_freq='epoch') tensorboard = keras.callbacks.TensorBoard(log_dir='./logs', update_freq='epoch')
map_callback = keras.callbacks.LambdaCallback(on_epoch_end=self.evaluate_print) # reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.2, patience=5, min_lr=0.0001)
# map_callback = MapHistory()
self.model.fit( self.model.fit(
self.training, self.training,
batch_size=self.batch_size, batch_size=self.batch_size,
epochs=self.epochs, epochs=self.epochs,
steps_per_epoch=int(self.dataset_size / self.batch_size), steps_per_epoch=int(self.TRAIN_SIZE / self.batch_size),
callbacks=[checkpoint, tensorboard], callbacks=[checkpoint, tensorboard],
validation_data=self.test,
validation_steps=int(self.TEST_SIZE / self.batch_size),
verbose=1 verbose=1
) )
@ -109,12 +120,13 @@ class Model():
return np.argmax(self.model.predict(a), axis=1) return np.argmax(self.model.predict(a), axis=1)
A = Model('a', epochs=2) A = Model('a', epochs=2)
B = Model('b', learning_rate=0.001, epochs=20) B = Model('b', learning_rate=0.0005, epochs=50)
# 24 so far
def compile_b(): def compile_b():
B.prepare_dataset(df, dataframe_to_dataset_biomes) B.prepare_dataset(df, dataframe_to_dataset_biomes)
B.create_model([32, 32], tf.nn.softmax) B.create_model([12], tf.nn.softmax)
B.compile(loss='sparse_categorical_crossentropy') B.compile(loss='sparse_categorical_crossentropy', load_weights=False)
def compile_a(): def compile_a():
A.prepare_dataset(df, dataframe_to_dataset_temp_precip) A.prepare_dataset(df, dataframe_to_dataset_temp_precip)

View File

@ -8,13 +8,12 @@ import time
def chunker(seq, size): def chunker(seq, size):
return (seq[pos:pos + size] for pos in range(0, len(seq), size)) return (seq[pos:pos + size] for pos in range(0, len(seq), size))
def predicted_map(path=None):
year = MAX_YEAR - 1 year = MAX_YEAR - 1
df = pd.read_pickle('data.p') df = pd.read_pickle('data.p')
compile_b()
for change in range(0, 1):
print('TEMPERATURE MODIFICATION OF {}'.format(change)) print('TEMPERATURE MODIFICATION OF {}'.format(change))
inputs = ['elevation', 'distance_to_water', 'latitude'] inputs = ['elevation', 'distance_to_water', 'latitude']
@ -50,4 +49,9 @@ for change in range(0, 1):
}, columns=columns) }, columns=columns)
new_data = new_data.append(f) new_data = new_data.append(f)
draw(new_data) draw(new_data, path=path)
if __name__ == "__main__":
compile_b()
predicted_map()