113 lines
3.3 KiB
Python
113 lines
3.3 KiB
Python
from __future__ import absolute_import, division, print_function
|
|
|
|
# TensorFlow and tf.keras
|
|
import tensorflow as tf
|
|
from tensorflow import keras
|
|
|
|
# Helper libraries
|
|
import numpy as np
|
|
import matplotlib.pyplot as plt
|
|
import pandas as pd
|
|
import os.path
|
|
|
|
from utils import *
|
|
|
|
RANDOM_SEED = 1
|
|
|
|
tf.enable_eager_execution()
|
|
|
|
tf.set_random_seed(RANDOM_SEED)
|
|
np.random.seed(RANDOM_SEED)
|
|
|
|
df = pd.read_pickle('data_final.p')
|
|
|
|
class Model():
|
|
def __init__(self, name, batch_size=100, shuffle_buffer_size=500, learning_rate=0.001, epochs=1):
|
|
self.name = name
|
|
self.path = "checkpoints/{}.hdf5".format(name)
|
|
|
|
self.batch_size = batch_size
|
|
self.shuffle_buffer_size = shuffle_buffer_size
|
|
self.learning_rate = learning_rate
|
|
self.epochs = epochs
|
|
|
|
def prepare_dataset(self, df, fn):
|
|
self.dataset_fn = fn
|
|
dataset_size, features, output_size, dataset = fn(df)
|
|
self.dataset = dataset.shuffle(self.shuffle_buffer_size)
|
|
self.TRAIN_SIZE = int(dataset_size * 0.85)
|
|
self.TEST_SIZE = dataset_size - self.TRAIN_SIZE
|
|
(training, test) = (self.dataset.take(self.TRAIN_SIZE).batch(self.batch_size).repeat(),
|
|
self.dataset.skip(self.TRAIN_SIZE).batch(self.batch_size).repeat())
|
|
|
|
self.dataset_size = dataset_size
|
|
self.features = features
|
|
self.output_size = output_size
|
|
self.training = training
|
|
self.test = test
|
|
|
|
def create_model(self, layers):
|
|
self.model = keras.Sequential([
|
|
keras.layers.Dense(layers[0], activation=tf.nn.relu, input_shape=[self.features])
|
|
] + [
|
|
keras.layers.Dense(n, activation=tf.nn.relu) for n in layers[1:]
|
|
] + [
|
|
keras.layers.Dense(self.output_size)
|
|
])
|
|
|
|
def compile(self):
|
|
self.model.load_weights(self.path)
|
|
optimizer = tf.train.AdamOptimizer(self.learning_rate)
|
|
|
|
self.model.compile(loss='mse',
|
|
optimizer=optimizer,
|
|
metrics=['mae', 'accuracy'])
|
|
|
|
def evaluate(self):
|
|
return self.model.evaluate(
|
|
self.test,
|
|
batch_size=self.batch_size,
|
|
steps=int(self.dataset_size / self.batch_size),
|
|
verbose=1
|
|
)
|
|
|
|
def train(self):
|
|
self.model.summary()
|
|
|
|
checkpoint = keras.callbacks.ModelCheckpoint(filepath, monitor='acc', verbose=1, mode='max')
|
|
|
|
self.model.fit(
|
|
self.training,
|
|
batch_size=self.batch_size,
|
|
epochs=self.epochs,
|
|
steps_per_epoch=int(self.dataset_size / self.batch_size),
|
|
callbacks=[checkpoint],
|
|
verbose=1
|
|
)
|
|
|
|
def predict(self, a):
|
|
return np.argmax(self.model.predict(a), axis=1)
|
|
|
|
A = Model('a', batch_size=100, shuffle_buffer_size=500, learning_rate=0.001, epochs=2)
|
|
B = Model('b', batch_size=100, shuffle_buffer_size=500, learning_rate=0.001, epochs=850)
|
|
|
|
if __name__ == "__main__":
|
|
B.prepare_dataset(df, dataframe_to_dataset_biomes)
|
|
B.create_model([64, 128])
|
|
B.compile()
|
|
|
|
# for inp, out in B.test.take(1).make_one_shot_iterator():
|
|
# print(inp, out)
|
|
|
|
# print(np.unique(nums))
|
|
# print(np.unique(predictions))
|
|
|
|
print('loss: {}, evaluation: {}'.format(*B.evaluate()))
|
|
|
|
# B.train()
|
|
|
|
A.prepare_dataset(df, dataframe_to_dataset_temp_precip)
|
|
A.create_model([4])
|
|
A.compile()
|
|
# A.train()
|