feat(web): auto-generated form
This commit is contained in:
@ -99,8 +99,8 @@ class Model():
|
||||
|
||||
def save(self, path):
|
||||
logger.debug('Saving model weights to path: %s', path)
|
||||
self.model.save_weights(path)
|
||||
return path
|
||||
self.model.save_weights(path + '/checkpoint.hd5')
|
||||
return path + '/checkpoint'
|
||||
|
||||
def evaluate(self):
|
||||
return self.model.evaluate(
|
||||
@ -119,15 +119,19 @@ class Model():
|
||||
|
||||
# map_callback = MapHistory()
|
||||
|
||||
extra_params = {}
|
||||
if self.class_weight:
|
||||
extra_params['class_weight'] = self.class_weight
|
||||
|
||||
out = self.model.fit(
|
||||
self.training_batched,
|
||||
batch_size=self.batch_size,
|
||||
epochs=self.epochs,
|
||||
steps_per_epoch=int(self.TRAIN_SIZE / self.batch_size),
|
||||
class_weight=self.class_weight,
|
||||
validation_data=self.test_batched,
|
||||
validation_steps=int(self.TEST_SIZE / self.batch_size),
|
||||
verbose=1
|
||||
verbose=1,
|
||||
**extra_params
|
||||
)
|
||||
|
||||
return out
|
||||
|
@ -4,7 +4,7 @@ import pandas as pd
|
||||
import tensorflow as tf
|
||||
from ray import tune
|
||||
from tensorflow import keras
|
||||
from utils import logger
|
||||
from utils import *
|
||||
from model import Model
|
||||
|
||||
B_params = {
|
||||
@ -45,11 +45,61 @@ class TuneB(tune.Trainable):
|
||||
def _restore(self, path):
|
||||
return self.model.restore(path)
|
||||
|
||||
def start_tuning(cpu=1, gpu=2, checkpoint_freq=1, checkpoint_at_end=True, resume=False, restore=None, stop=500):
|
||||
A_params = {
|
||||
'batch_size': tune.grid_search([5, 16, 32, 64]),
|
||||
'layers': tune.grid_search([[16, 16], [32, 32], [128, 128]]),
|
||||
'lr': tune.grid_search([1e-4, 1e-3, 1e-2]),
|
||||
'optimizer': tune.grid_search([tf.keras.optimizers.Adam]),
|
||||
}
|
||||
|
||||
class TuneA(tune.Trainable):
|
||||
def _setup(self, config):
|
||||
logger.debug('Ray Tune model configuration %s', config)
|
||||
|
||||
self.model = Model('a', epochs=1)
|
||||
|
||||
optimizer = config['optimizer']
|
||||
optimizer = config['optimizer'](lr=config['lr'])
|
||||
|
||||
self.model.prepare_for_use(
|
||||
df=df,
|
||||
batch_size=config['batch_size'],
|
||||
layers=config['layers'],
|
||||
optimizer=optimizer,
|
||||
dataset_fn=dataframe_to_dataset_temp_precip,
|
||||
loss='mse'
|
||||
)
|
||||
|
||||
def _train(self):
|
||||
logs = self.model.train(self.config)
|
||||
|
||||
metrics = {
|
||||
'mean_accuracy': logs.history['acc'][0],
|
||||
'loss': logs.history['loss'][0],
|
||||
'val_accuracy': logs.history['val_acc'][0],
|
||||
'val_loss': logs.history['val_loss'][0],
|
||||
}
|
||||
|
||||
return metrics
|
||||
|
||||
def _save(self, checkpoint_dir):
|
||||
return self.model.save(checkpoint_dir)
|
||||
|
||||
def _restore(self, path):
|
||||
return self.model.restore(path)
|
||||
|
||||
def start_tuning(model, cpu=1, gpu=2, checkpoint_freq=1, checkpoint_at_end=True, resume=False, restore=None, stop=500):
|
||||
ray.init()
|
||||
|
||||
tune.run(TuneB,
|
||||
config=B_params,
|
||||
if model == 'a':
|
||||
t = TuneA
|
||||
params = A_params
|
||||
else:
|
||||
t = TuneB
|
||||
params = B_params
|
||||
|
||||
tune.run(t,
|
||||
config=params,
|
||||
resources_per_trial={
|
||||
"cpu": cpu,
|
||||
"gpu": gpu
|
||||
@ -62,6 +112,5 @@ def start_tuning(cpu=1, gpu=2, checkpoint_freq=1, checkpoint_at_end=True, resume
|
||||
'training_iteration': stop
|
||||
})
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
fire.Fire(start_tuning)
|
||||
|
@ -1,90 +0,0 @@
|
||||
import fire
|
||||
import ray
|
||||
import pandas as pd
|
||||
import tensorflow as tf
|
||||
import numpy as np
|
||||
from tensorflow import keras
|
||||
from utils import *
|
||||
from model import Model
|
||||
from constants import *
|
||||
|
||||
CHECKPOINT = 'checkpoints/temp.h5'
|
||||
|
||||
SEED = 1
|
||||
np.random.seed(SEED)
|
||||
df = pd.read_pickle('data.p')
|
||||
|
||||
dataset_size, x_columns, y_columns, dataset = dataframe_to_dataset_temp_precip(df)
|
||||
batch_size = 5
|
||||
epochs = 500
|
||||
|
||||
def baseline_model():
|
||||
model = keras.models.Sequential()
|
||||
params = {
|
||||
'kernel_initializer': 'lecun_uniform',
|
||||
'bias_initializer': 'zeros',
|
||||
}
|
||||
model.add(keras.layers.Dense(x_columns, input_dim=x_columns, **params, activation='elu'))
|
||||
model.add(keras.layers.Dense(6, **params, activation='relu'))
|
||||
model.add(keras.layers.Dense(y_columns, **params))
|
||||
|
||||
model.compile(loss='mse', optimizer='adam', metrics=['mae'])
|
||||
return model
|
||||
|
||||
model = baseline_model()
|
||||
model.summary()
|
||||
|
||||
dataset = dataset.shuffle(500)
|
||||
TRAIN_SIZE = int(dataset_size * 0.85)
|
||||
TEST_SIZE = dataset_size - TRAIN_SIZE
|
||||
(training, test) = (dataset.take(TRAIN_SIZE),
|
||||
dataset.skip(TRAIN_SIZE))
|
||||
training_batched = training.batch(batch_size).repeat()
|
||||
test_batched = test.batch(batch_size).repeat()
|
||||
|
||||
logger.debug('Model dataset info: size=%s, train=%s, test=%s', dataset_size, TRAIN_SIZE, TEST_SIZE)
|
||||
|
||||
# model.load_weights(CHECKPOINT)
|
||||
|
||||
def predict():
|
||||
columns = INPUTS
|
||||
|
||||
YEAR = 2000
|
||||
|
||||
print(columns)
|
||||
print(df[0:batch_size])
|
||||
inputs = df[columns].to_numpy()
|
||||
inputs = normalize_ndarray(inputs, df[columns].to_numpy())
|
||||
print(inputs[0:batch_size])
|
||||
|
||||
out_columns = []
|
||||
for season in SEASONS:
|
||||
out_columns += ['temp_{}_{}'.format(season, YEAR), 'precip_{}_{}'.format(season, YEAR)]
|
||||
|
||||
print(out_columns)
|
||||
|
||||
out = model.predict(inputs)
|
||||
print(out)
|
||||
print(df[out_columns][0:batch_size])
|
||||
print(denormalize(out, df[out_columns].to_numpy()))
|
||||
|
||||
def train():
|
||||
tfb_callback = tf.keras.callbacks.TensorBoard(batch_size=batch_size, log_dir='temp_logs')
|
||||
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(filepath=CHECKPOINT, monitor='val_loss')
|
||||
|
||||
model.fit(training_batched,
|
||||
batch_size=batch_size,
|
||||
epochs=epochs,
|
||||
steps_per_epoch=int(TRAIN_SIZE / batch_size),
|
||||
validation_data=test_batched,
|
||||
validation_steps=int(TEST_SIZE / batch_size),
|
||||
callbacks=[tfb_callback, checkpoint_callback],
|
||||
verbose=1)
|
||||
|
||||
model.save_weights(CHECKPOINT)
|
||||
|
||||
# train()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
fire.Fire({ 'predict': predict, 'train': train })
|
@ -105,7 +105,7 @@ def dataframe_to_dataset_temp_precip(df):
|
||||
tf_output = tf.cast(tf_output, tf.float32)
|
||||
|
||||
logger.debug('dataset size: rows=%d, input_columns=%d, num_classes=%d', int(tf_inputs.shape[0]), input_columns, num_classes)
|
||||
return int(tf_inputs.shape[0]), input_columns, num_classes, tf.data.Dataset.from_tensor_slices((tf_inputs, tf_output))
|
||||
return int(tf_inputs.shape[0]), input_columns, num_classes, None, tf.data.Dataset.from_tensor_slices((tf_inputs, tf_output))
|
||||
|
||||
|
||||
flatten = lambda l: [item for sublist in l for item in sublist]
|
||||
|
Reference in New Issue
Block a user