135 lines
3.5 KiB
Python
135 lines
3.5 KiB
Python
from __future__ import absolute_import, division, print_function
|
|
|
|
# TensorFlow and tf.keras
|
|
import tensorflow as tf
|
|
from tensorflow import keras
|
|
|
|
# Helper libraries
|
|
import numpy as np
|
|
import matplotlib.pyplot as plt
|
|
import pandas as pd
|
|
import os.path
|
|
|
|
from utils import *
|
|
|
|
RANDOM_SEED = 1
|
|
|
|
tf.enable_eager_execution()
|
|
|
|
tf.set_random_seed(RANDOM_SEED)
|
|
np.random.seed(RANDOM_SEED)
|
|
|
|
df = pd.read_pickle('data_final.p')
|
|
|
|
# temp and precipitation
|
|
def train_model_a():
|
|
filepath = "checkpoints/a.hdf5"
|
|
|
|
BATCH_SIZE = 100
|
|
SHUFFLE_BUFFER_SIZE = 500
|
|
LEARNING_RATE = 0.001
|
|
EPOCHS = 2
|
|
|
|
# dataset = dataframe_to_dataset_biomes(df)
|
|
dataset_size, features, output_size, dataset = dataframe_to_dataset_temp_precip(df)
|
|
dataset = dataset.shuffle(SHUFFLE_BUFFER_SIZE).batch(BATCH_SIZE)
|
|
TRAIN_SIZE = dataset_size * 0.85
|
|
TEST_SIZE = dataset_size - TRAIN_SIZE
|
|
(training, test) = (dataset.take(TRAIN_SIZE).repeat(), dataset.skip(TRAIN_SIZE).repeat())
|
|
|
|
model = keras.Sequential([
|
|
keras.layers.Dense(4, activation=tf.nn.relu, input_shape=[features]),
|
|
keras.layers.Dense(output_size)
|
|
])
|
|
|
|
model.load_weights(filepath)
|
|
|
|
optimizer = tf.train.AdamOptimizer(LEARNING_RATE)
|
|
|
|
model.compile(loss='mse',
|
|
optimizer=optimizer,
|
|
metrics=['mae', 'accuracy'])
|
|
|
|
model.summary()
|
|
|
|
checkpoint = keras.callbacks.ModelCheckpoint(filepath, monitor='acc', verbose=1, mode='max')
|
|
|
|
model.fit(
|
|
training,
|
|
batch_size=BATCH_SIZE,
|
|
epochs=EPOCHS,
|
|
steps_per_epoch=int(dataset_size / BATCH_SIZE),
|
|
callbacks=[checkpoint],
|
|
verbose=1
|
|
)
|
|
|
|
evaluation = model.evaluate(
|
|
test,
|
|
batch_size=BATCH_SIZE,
|
|
steps=int(dataset_size / BATCH_SIZE),
|
|
verbose=1
|
|
)
|
|
|
|
print(evaluation)
|
|
|
|
# 850 epochs so far
|
|
def train_model_b():
|
|
filepath = filepath="checkpoints/b.hdf5"
|
|
|
|
BATCH_SIZE = 100
|
|
SHUFFLE_BUFFER_SIZE = 500
|
|
LEARNING_RATE = 0.0005
|
|
EPOCHS = 400
|
|
|
|
# dataset = dataframe_to_dataset_biomes(df)
|
|
dataset_size, features, output_size, dataset = dataframe_to_dataset_biomes(df)
|
|
dataset = dataset.shuffle(SHUFFLE_BUFFER_SIZE)
|
|
TRAIN_SIZE = dataset_size * 0.85
|
|
TEST_SIZE = dataset_size - TRAIN_SIZE
|
|
(training, test) = (dataset.take(TRAIN_SIZE).batch(BATCH_SIZE).repeat(), dataset.skip(TRAIN_SIZE).batch(BATCH_SIZE).repeat())
|
|
|
|
model = keras.Sequential([
|
|
keras.layers.Dense(64, activation=tf.nn.relu, input_shape=[features]),
|
|
keras.layers.Dense(128, activation=tf.nn.relu),
|
|
keras.layers.Dense(output_size, activation=tf.nn.softmax)
|
|
])
|
|
|
|
model.load_weights(filepath)
|
|
|
|
optimizer = tf.train.AdamOptimizer(LEARNING_RATE)
|
|
|
|
model.compile(loss='sparse_categorical_crossentropy',
|
|
optimizer=optimizer,
|
|
metrics=['accuracy'])
|
|
|
|
model.summary()
|
|
|
|
checkpoint = keras.callbacks.ModelCheckpoint(filepath, monitor='acc', verbose=1, mode='max')
|
|
|
|
model.fit(
|
|
training,
|
|
epochs=EPOCHS,
|
|
verbose=1,
|
|
steps_per_epoch=int(dataset_size / BATCH_SIZE),
|
|
callbacks=[checkpoint]
|
|
)
|
|
# print(dataset.repeat().make_one_shot_iteraor().get_next())
|
|
|
|
# inp, out = test.make_one_shot_iterator().get_next()
|
|
# print(inp, out)
|
|
# print(np.argmax(model.predict(inp), axis=1))
|
|
|
|
evaluation = model.evaluate(
|
|
test,
|
|
batch_size=BATCH_SIZE,
|
|
steps=int(dataset_size / BATCH_SIZE),
|
|
verbose=1
|
|
)
|
|
|
|
print('loss: {}, accuracy: {}'.format(*evaluation))
|
|
|
|
# train_model_a()
|
|
train_model_b()
|
|
|
|
# train_model_a()
|