feat(web): auto-generated form
This commit is contained in:
parent
e18fc7692b
commit
3bec4d7486
@ -99,8 +99,8 @@ class Model():
|
|||||||
|
|
||||||
def save(self, path):
|
def save(self, path):
|
||||||
logger.debug('Saving model weights to path: %s', path)
|
logger.debug('Saving model weights to path: %s', path)
|
||||||
self.model.save_weights(path)
|
self.model.save_weights(path + '/checkpoint.hd5')
|
||||||
return path
|
return path + '/checkpoint'
|
||||||
|
|
||||||
def evaluate(self):
|
def evaluate(self):
|
||||||
return self.model.evaluate(
|
return self.model.evaluate(
|
||||||
@ -119,15 +119,19 @@ class Model():
|
|||||||
|
|
||||||
# map_callback = MapHistory()
|
# map_callback = MapHistory()
|
||||||
|
|
||||||
|
extra_params = {}
|
||||||
|
if self.class_weight:
|
||||||
|
extra_params['class_weight'] = self.class_weight
|
||||||
|
|
||||||
out = self.model.fit(
|
out = self.model.fit(
|
||||||
self.training_batched,
|
self.training_batched,
|
||||||
batch_size=self.batch_size,
|
batch_size=self.batch_size,
|
||||||
epochs=self.epochs,
|
epochs=self.epochs,
|
||||||
steps_per_epoch=int(self.TRAIN_SIZE / self.batch_size),
|
steps_per_epoch=int(self.TRAIN_SIZE / self.batch_size),
|
||||||
class_weight=self.class_weight,
|
|
||||||
validation_data=self.test_batched,
|
validation_data=self.test_batched,
|
||||||
validation_steps=int(self.TEST_SIZE / self.batch_size),
|
validation_steps=int(self.TEST_SIZE / self.batch_size),
|
||||||
verbose=1
|
verbose=1,
|
||||||
|
**extra_params
|
||||||
)
|
)
|
||||||
|
|
||||||
return out
|
return out
|
||||||
|
@ -4,7 +4,7 @@ import pandas as pd
|
|||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
from ray import tune
|
from ray import tune
|
||||||
from tensorflow import keras
|
from tensorflow import keras
|
||||||
from utils import logger
|
from utils import *
|
||||||
from model import Model
|
from model import Model
|
||||||
|
|
||||||
B_params = {
|
B_params = {
|
||||||
@ -45,11 +45,61 @@ class TuneB(tune.Trainable):
|
|||||||
def _restore(self, path):
|
def _restore(self, path):
|
||||||
return self.model.restore(path)
|
return self.model.restore(path)
|
||||||
|
|
||||||
def start_tuning(cpu=1, gpu=2, checkpoint_freq=1, checkpoint_at_end=True, resume=False, restore=None, stop=500):
|
A_params = {
|
||||||
|
'batch_size': tune.grid_search([5, 16, 32, 64]),
|
||||||
|
'layers': tune.grid_search([[16, 16], [32, 32], [128, 128]]),
|
||||||
|
'lr': tune.grid_search([1e-4, 1e-3, 1e-2]),
|
||||||
|
'optimizer': tune.grid_search([tf.keras.optimizers.Adam]),
|
||||||
|
}
|
||||||
|
|
||||||
|
class TuneA(tune.Trainable):
|
||||||
|
def _setup(self, config):
|
||||||
|
logger.debug('Ray Tune model configuration %s', config)
|
||||||
|
|
||||||
|
self.model = Model('a', epochs=1)
|
||||||
|
|
||||||
|
optimizer = config['optimizer']
|
||||||
|
optimizer = config['optimizer'](lr=config['lr'])
|
||||||
|
|
||||||
|
self.model.prepare_for_use(
|
||||||
|
df=df,
|
||||||
|
batch_size=config['batch_size'],
|
||||||
|
layers=config['layers'],
|
||||||
|
optimizer=optimizer,
|
||||||
|
dataset_fn=dataframe_to_dataset_temp_precip,
|
||||||
|
loss='mse'
|
||||||
|
)
|
||||||
|
|
||||||
|
def _train(self):
|
||||||
|
logs = self.model.train(self.config)
|
||||||
|
|
||||||
|
metrics = {
|
||||||
|
'mean_accuracy': logs.history['acc'][0],
|
||||||
|
'loss': logs.history['loss'][0],
|
||||||
|
'val_accuracy': logs.history['val_acc'][0],
|
||||||
|
'val_loss': logs.history['val_loss'][0],
|
||||||
|
}
|
||||||
|
|
||||||
|
return metrics
|
||||||
|
|
||||||
|
def _save(self, checkpoint_dir):
|
||||||
|
return self.model.save(checkpoint_dir)
|
||||||
|
|
||||||
|
def _restore(self, path):
|
||||||
|
return self.model.restore(path)
|
||||||
|
|
||||||
|
def start_tuning(model, cpu=1, gpu=2, checkpoint_freq=1, checkpoint_at_end=True, resume=False, restore=None, stop=500):
|
||||||
ray.init()
|
ray.init()
|
||||||
|
|
||||||
tune.run(TuneB,
|
if model == 'a':
|
||||||
config=B_params,
|
t = TuneA
|
||||||
|
params = A_params
|
||||||
|
else:
|
||||||
|
t = TuneB
|
||||||
|
params = B_params
|
||||||
|
|
||||||
|
tune.run(t,
|
||||||
|
config=params,
|
||||||
resources_per_trial={
|
resources_per_trial={
|
||||||
"cpu": cpu,
|
"cpu": cpu,
|
||||||
"gpu": gpu
|
"gpu": gpu
|
||||||
@ -62,6 +112,5 @@ def start_tuning(cpu=1, gpu=2, checkpoint_freq=1, checkpoint_at_end=True, resume
|
|||||||
'training_iteration': stop
|
'training_iteration': stop
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
fire.Fire(start_tuning)
|
fire.Fire(start_tuning)
|
||||||
|
@ -1,90 +0,0 @@
|
|||||||
import fire
|
|
||||||
import ray
|
|
||||||
import pandas as pd
|
|
||||||
import tensorflow as tf
|
|
||||||
import numpy as np
|
|
||||||
from tensorflow import keras
|
|
||||||
from utils import *
|
|
||||||
from model import Model
|
|
||||||
from constants import *
|
|
||||||
|
|
||||||
CHECKPOINT = 'checkpoints/temp.h5'
|
|
||||||
|
|
||||||
SEED = 1
|
|
||||||
np.random.seed(SEED)
|
|
||||||
df = pd.read_pickle('data.p')
|
|
||||||
|
|
||||||
dataset_size, x_columns, y_columns, dataset = dataframe_to_dataset_temp_precip(df)
|
|
||||||
batch_size = 5
|
|
||||||
epochs = 500
|
|
||||||
|
|
||||||
def baseline_model():
|
|
||||||
model = keras.models.Sequential()
|
|
||||||
params = {
|
|
||||||
'kernel_initializer': 'lecun_uniform',
|
|
||||||
'bias_initializer': 'zeros',
|
|
||||||
}
|
|
||||||
model.add(keras.layers.Dense(x_columns, input_dim=x_columns, **params, activation='elu'))
|
|
||||||
model.add(keras.layers.Dense(6, **params, activation='relu'))
|
|
||||||
model.add(keras.layers.Dense(y_columns, **params))
|
|
||||||
|
|
||||||
model.compile(loss='mse', optimizer='adam', metrics=['mae'])
|
|
||||||
return model
|
|
||||||
|
|
||||||
model = baseline_model()
|
|
||||||
model.summary()
|
|
||||||
|
|
||||||
dataset = dataset.shuffle(500)
|
|
||||||
TRAIN_SIZE = int(dataset_size * 0.85)
|
|
||||||
TEST_SIZE = dataset_size - TRAIN_SIZE
|
|
||||||
(training, test) = (dataset.take(TRAIN_SIZE),
|
|
||||||
dataset.skip(TRAIN_SIZE))
|
|
||||||
training_batched = training.batch(batch_size).repeat()
|
|
||||||
test_batched = test.batch(batch_size).repeat()
|
|
||||||
|
|
||||||
logger.debug('Model dataset info: size=%s, train=%s, test=%s', dataset_size, TRAIN_SIZE, TEST_SIZE)
|
|
||||||
|
|
||||||
# model.load_weights(CHECKPOINT)
|
|
||||||
|
|
||||||
def predict():
|
|
||||||
columns = INPUTS
|
|
||||||
|
|
||||||
YEAR = 2000
|
|
||||||
|
|
||||||
print(columns)
|
|
||||||
print(df[0:batch_size])
|
|
||||||
inputs = df[columns].to_numpy()
|
|
||||||
inputs = normalize_ndarray(inputs, df[columns].to_numpy())
|
|
||||||
print(inputs[0:batch_size])
|
|
||||||
|
|
||||||
out_columns = []
|
|
||||||
for season in SEASONS:
|
|
||||||
out_columns += ['temp_{}_{}'.format(season, YEAR), 'precip_{}_{}'.format(season, YEAR)]
|
|
||||||
|
|
||||||
print(out_columns)
|
|
||||||
|
|
||||||
out = model.predict(inputs)
|
|
||||||
print(out)
|
|
||||||
print(df[out_columns][0:batch_size])
|
|
||||||
print(denormalize(out, df[out_columns].to_numpy()))
|
|
||||||
|
|
||||||
def train():
|
|
||||||
tfb_callback = tf.keras.callbacks.TensorBoard(batch_size=batch_size, log_dir='temp_logs')
|
|
||||||
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(filepath=CHECKPOINT, monitor='val_loss')
|
|
||||||
|
|
||||||
model.fit(training_batched,
|
|
||||||
batch_size=batch_size,
|
|
||||||
epochs=epochs,
|
|
||||||
steps_per_epoch=int(TRAIN_SIZE / batch_size),
|
|
||||||
validation_data=test_batched,
|
|
||||||
validation_steps=int(TEST_SIZE / batch_size),
|
|
||||||
callbacks=[tfb_callback, checkpoint_callback],
|
|
||||||
verbose=1)
|
|
||||||
|
|
||||||
model.save_weights(CHECKPOINT)
|
|
||||||
|
|
||||||
# train()
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
fire.Fire({ 'predict': predict, 'train': train })
|
|
@ -105,7 +105,7 @@ def dataframe_to_dataset_temp_precip(df):
|
|||||||
tf_output = tf.cast(tf_output, tf.float32)
|
tf_output = tf.cast(tf_output, tf.float32)
|
||||||
|
|
||||||
logger.debug('dataset size: rows=%d, input_columns=%d, num_classes=%d', int(tf_inputs.shape[0]), input_columns, num_classes)
|
logger.debug('dataset size: rows=%d, input_columns=%d, num_classes=%d', int(tf_inputs.shape[0]), input_columns, num_classes)
|
||||||
return int(tf_inputs.shape[0]), input_columns, num_classes, tf.data.Dataset.from_tensor_slices((tf_inputs, tf_output))
|
return int(tf_inputs.shape[0]), input_columns, num_classes, None, tf.data.Dataset.from_tensor_slices((tf_inputs, tf_output))
|
||||||
|
|
||||||
|
|
||||||
flatten = lambda l: [item for sublist in l for item in sublist]
|
flatten = lambda l: [item for sublist in l for item in sublist]
|
||||||
|
@ -7,32 +7,112 @@ import math
|
|||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
import base64
|
import base64
|
||||||
|
|
||||||
WIDTH = 900
|
parameters = {
|
||||||
HEIGHT = 450
|
'width': {
|
||||||
RATIO = WIDTH / HEIGHT
|
'default': 900,
|
||||||
|
'type': 'int',
|
||||||
|
},
|
||||||
|
'height': {
|
||||||
|
'default': 450,
|
||||||
|
'type': 'int',
|
||||||
|
},
|
||||||
|
'mountain_ratio': {
|
||||||
|
'default': 0.3,
|
||||||
|
'type': 'float',
|
||||||
|
'min': 0,
|
||||||
|
'max': 1,
|
||||||
|
'step': 0.01
|
||||||
|
},
|
||||||
|
'sharpness': {
|
||||||
|
'default': 0.7,
|
||||||
|
'type': 'float',
|
||||||
|
'min': 0,
|
||||||
|
'max': 1,
|
||||||
|
'step': 0.01
|
||||||
|
},
|
||||||
|
'max_elevation': {
|
||||||
|
'default': 30,
|
||||||
|
'type': 'int',
|
||||||
|
'min': 0,
|
||||||
|
'max': 50,
|
||||||
|
},
|
||||||
|
'ground_noise': {
|
||||||
|
'default': 15,
|
||||||
|
'type': 'int',
|
||||||
|
'min': 0,
|
||||||
|
'max': 50,
|
||||||
|
},
|
||||||
|
'water_proportion': {
|
||||||
|
'default': 0.6,
|
||||||
|
'type': 'float',
|
||||||
|
'min': 0,
|
||||||
|
'max': 0.99,
|
||||||
|
'step': 0.01
|
||||||
|
},
|
||||||
|
'mountain_jaggedness': {
|
||||||
|
'default': 1,
|
||||||
|
'type': 'int',
|
||||||
|
'min': 0,
|
||||||
|
'max': 5,
|
||||||
|
},
|
||||||
|
'mountain_sea_distance': {
|
||||||
|
'default': 50,
|
||||||
|
'type': 'int',
|
||||||
|
'min': 0,
|
||||||
|
'max': 200,
|
||||||
|
},
|
||||||
|
'mountain_sea_threshold': {
|
||||||
|
'default': 2,
|
||||||
|
'type': 'int',
|
||||||
|
'min': 0,
|
||||||
|
'max': 5,
|
||||||
|
},
|
||||||
|
'water_level': {
|
||||||
|
'default': 0,
|
||||||
|
'type': 'int',
|
||||||
|
},
|
||||||
|
'mountain_area_elevation': {
|
||||||
|
'default': 0.4,
|
||||||
|
'type': 'float',
|
||||||
|
'min': 0,
|
||||||
|
'max': 1,
|
||||||
|
'step': 0.01
|
||||||
|
},
|
||||||
|
'mountain_area_elevation_n': {
|
||||||
|
'default': 5,
|
||||||
|
'type': 'int',
|
||||||
|
'min': 0,
|
||||||
|
'max': 15,
|
||||||
|
},
|
||||||
|
'mountain_area_elevation_area': {
|
||||||
|
'default': 10,
|
||||||
|
'type': 'int',
|
||||||
|
'min': 0,
|
||||||
|
'max': 25,
|
||||||
|
},
|
||||||
|
'continents': {
|
||||||
|
'default': 5,
|
||||||
|
'type': 'int',
|
||||||
|
},
|
||||||
|
'seed': {
|
||||||
|
'default': '',
|
||||||
|
'type': 'int',
|
||||||
|
'description': 'Leave empty for a random seed generated from the current timestamp.'
|
||||||
|
},
|
||||||
|
|
||||||
MOUNTAIN_SEA_DISTANCE = 50
|
}
|
||||||
MOUNTAIN_SEA_THRESHOLD = 2
|
p = { k: parameters[k]['default'] for k in parameters }
|
||||||
MOUNTAIN_JAGGEDNESS = 1
|
|
||||||
|
|
||||||
CONTINENT_MAX_TRIALS = 1e4
|
CONTINENT_MAX_TRIALS = 1e4
|
||||||
MOUNTAIN_RATIO = 0.3
|
|
||||||
SEA_COLOR = np.array((53, 179, 220, 255)) / 255
|
SEA_COLOR = np.array((53, 179, 220, 255)) / 255
|
||||||
|
|
||||||
SHARPNESS = 0.7
|
|
||||||
WATER_LEVEL = 0
|
|
||||||
MAX_ELEVATION = 30
|
|
||||||
GROUND_NOISE = 15
|
|
||||||
WATER_PROPORTION = 0.6
|
|
||||||
GROUND_PROPORTION = 1 - WATER_PROPORTION
|
|
||||||
|
|
||||||
DIRECTIONS = [(-1, -1), (-1, 0), (-1, 1), (1, 1), (1, 0), (1, -1), (0, -1), (0, 1)]
|
DIRECTIONS = [(-1, -1), (-1, 0), (-1, 1), (1, 1), (1, 0), (1, -1), (0, -1), (0, 1)]
|
||||||
|
|
||||||
def s(x):
|
def s(x):
|
||||||
return -2 * x**3 + 3 * x**2
|
return -2 * x**3 + 3 * x**2
|
||||||
|
|
||||||
def is_ground(value):
|
def is_ground(value):
|
||||||
return value > WATER_LEVEL
|
return value > p['water_level']
|
||||||
|
|
||||||
def in_range(p, m, size):
|
def in_range(p, m, size):
|
||||||
x, y = p
|
x, y = p
|
||||||
@ -83,7 +163,7 @@ def continent_agent(ground, position, size):
|
|||||||
if not is_ground(ground[x, y]) and in_range((x, y), position, size):
|
if not is_ground(ground[x, y]) and in_range((x, y), position, size):
|
||||||
trials = 0
|
trials = 0
|
||||||
size -= 1
|
size -= 1
|
||||||
ground[x, y] = np.random.randint(1, GROUND_NOISE)
|
ground[x, y] = np.random.randint(1, p['ground_noise'])
|
||||||
else:
|
else:
|
||||||
trials += 1
|
trials += 1
|
||||||
|
|
||||||
@ -91,22 +171,19 @@ def neighbours(ground, position, radius):
|
|||||||
x, y = position
|
x, y = position
|
||||||
return ground[x-radius:x+radius+1, y-radius:y+radius+1]
|
return ground[x-radius:x+radius+1, y-radius:y+radius+1]
|
||||||
|
|
||||||
def away_from_sea(ground, position, radius=MOUNTAIN_SEA_DISTANCE):
|
def away_from_sea(ground, position, radius=p['mountain_sea_distance']):
|
||||||
ns = neighbours(ground, position, radius).flatten()
|
ns = neighbours(ground, position, radius).flatten()
|
||||||
sea = len([1 for x in ns if not is_ground(x)])
|
sea = len([1 for x in ns if not is_ground(x)])
|
||||||
|
|
||||||
return sea < MOUNTAIN_SEA_THRESHOLD
|
return sea < p['mountain_sea_threshold']
|
||||||
|
|
||||||
MOUNTAIN_AREA_ELEVATION = 0.4
|
def random_elevate_agent(ground, position, height, size=p['mountain_area_elevation_n']):
|
||||||
MOUNTAIN_AREA_ELEVATION_N = 5
|
position = position + np.random.random_integers(-p['mountain_area_elevation_area'], p['mountain_area_elevation_area'], size=2)
|
||||||
MOUNTAIN_AREA_ELEVATION_AREA = 10
|
|
||||||
def random_elevate_agent(ground, position, height, size=MOUNTAIN_AREA_ELEVATION_N):
|
|
||||||
position = position + np.random.random_integers(-MOUNTAIN_AREA_ELEVATION_AREA, MOUNTAIN_AREA_ELEVATION_AREA, size=2)
|
|
||||||
|
|
||||||
for i in range(size):
|
for i in range(size):
|
||||||
d = DIRECTIONS[np.random.randint(len(DIRECTIONS))]
|
d = DIRECTIONS[np.random.randint(len(DIRECTIONS))]
|
||||||
|
|
||||||
change = height * MOUNTAIN_AREA_ELEVATION + np.random.randint(MOUNTAIN_JAGGEDNESS + 1)
|
change = height * p['mountain_area_elevation'] + np.random.randint(p['mountain_jaggedness'] + 1)
|
||||||
new_index = bound_check(ground, position + np.array(d))
|
new_index = bound_check(ground, position + np.array(d))
|
||||||
|
|
||||||
if is_ground(ground[new_index]):
|
if is_ground(ground[new_index]):
|
||||||
@ -118,21 +195,21 @@ def mountain_agent(ground, position):
|
|||||||
return
|
return
|
||||||
|
|
||||||
x, y = position
|
x, y = position
|
||||||
height = np.random.randint(MAX_ELEVATION)
|
height = np.random.randint(p['max_elevation'])
|
||||||
|
|
||||||
ground[x, y] = height
|
ground[x, y] = height
|
||||||
|
|
||||||
last_height = height
|
last_height = height
|
||||||
for i in range(1, height):
|
for i in range(1, height):
|
||||||
for d in DIRECTIONS:
|
for d in DIRECTIONS:
|
||||||
change = np.random.randint(MOUNTAIN_JAGGEDNESS + 1)
|
change = np.random.randint(p['mountain_jaggedness'] + 1)
|
||||||
distance = np.array(d)*i
|
distance = np.array(d)*i
|
||||||
new_index = bound_check(ground, position + distance)
|
new_index = bound_check(ground, position + distance)
|
||||||
|
|
||||||
if is_ground(ground[new_index]):
|
if is_ground(ground[new_index]):
|
||||||
ground[new_index] = last_height - change
|
ground[new_index] = last_height - change
|
||||||
|
|
||||||
last_height = last_height - MOUNTAIN_JAGGEDNESS
|
last_height = last_height - p['mountain_jaggedness']
|
||||||
|
|
||||||
random_elevate_agent(ground, position, height)
|
random_elevate_agent(ground, position, height)
|
||||||
|
|
||||||
@ -141,13 +218,22 @@ def mountain_agent(ground, position):
|
|||||||
# def split_agent(ground, position, directions):
|
# def split_agent(ground, position, directions):
|
||||||
|
|
||||||
def constant_filter(a):
|
def constant_filter(a):
|
||||||
if a[0] > (1 - SHARPNESS):
|
if a[0] > (1 - p['sharpness']):
|
||||||
return max(1, a[0])
|
return max(1, a[0])
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
def generate_map(width=WIDTH, height=HEIGHT, continents=4):
|
def generate_map(**kwargs):
|
||||||
|
plt.clf()
|
||||||
|
|
||||||
|
p.update(kwargs)
|
||||||
|
|
||||||
|
np.random.seed(p['seed'] or None)
|
||||||
|
|
||||||
|
width, height = p['width'], p['height']
|
||||||
|
continents = p['continents']
|
||||||
|
|
||||||
ground = np.zeros((width, height))
|
ground = np.zeros((width, height))
|
||||||
ground_size = width * height * GROUND_PROPORTION
|
ground_size = width * height * (1 - p['water_proportion'])
|
||||||
|
|
||||||
# position = (int(width / 2), int(height / 2))
|
# position = (int(width / 2), int(height / 2))
|
||||||
# ground_size = width * height * GROUND_PROPORTION
|
# ground_size = width * height * GROUND_PROPORTION
|
||||||
@ -157,9 +243,9 @@ def generate_map(width=WIDTH, height=HEIGHT, continents=4):
|
|||||||
print(position)
|
print(position)
|
||||||
continent_agent(ground, position, size=ground_size)
|
continent_agent(ground, position, size=ground_size)
|
||||||
|
|
||||||
ground = ndimage.gaussian_filter(ground, sigma=(1 - SHARPNESS) * 20)
|
ground = ndimage.gaussian_filter(ground, sigma=(1 - p['sharpness']) * 20)
|
||||||
|
|
||||||
for i in range(int(ground_size * MOUNTAIN_RATIO / MAX_ELEVATION**2)):
|
for i in range(int(ground_size * p['mountain_ratio'] / p['max_elevation']**2)):
|
||||||
position = (np.random.randint(0, width), np.random.randint(0, height))
|
position = (np.random.randint(0, width), np.random.randint(0, height))
|
||||||
mountain_agent(ground, position)
|
mountain_agent(ground, position)
|
||||||
|
|
||||||
@ -169,7 +255,7 @@ def generate_map(width=WIDTH, height=HEIGHT, continents=4):
|
|||||||
|
|
||||||
ground = ndimage.gaussian_filter(ground, sigma=4)
|
ground = ndimage.gaussian_filter(ground, sigma=4)
|
||||||
ground = ndimage.generic_filter(ground, constant_filter, size=1)
|
ground = ndimage.generic_filter(ground, constant_filter, size=1)
|
||||||
print(np.min(ground), np.max(ground), MAX_ELEVATION)
|
print(np.min(ground), np.max(ground), p['max_elevation'])
|
||||||
print(np.unique(ground))
|
print(np.unique(ground))
|
||||||
|
|
||||||
plt.imshow(ground.T, cmap=greys, norm=norm)
|
plt.imshow(ground.T, cmap=greys, norm=norm)
|
||||||
@ -181,5 +267,5 @@ def generate_map(width=WIDTH, height=HEIGHT, continents=4):
|
|||||||
return figfile
|
return figfile
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
generate_map(WIDTH, HEIGHT)
|
generate_map()
|
||||||
plt.show()
|
plt.show()
|
||||||
|
@ -3,15 +3,26 @@ const mapSettings = document.getElementById('map-settings');
|
|||||||
const map = document.getElementById('map');
|
const map = document.getElementById('map');
|
||||||
const spinner = document.getElementById('spinner');
|
const spinner = document.getElementById('spinner');
|
||||||
|
|
||||||
|
function generate() {
|
||||||
|
spinner.classList.remove('d-none');
|
||||||
|
const formData = new FormData(mapSettings)
|
||||||
|
if (!formData.get('seed')) {
|
||||||
|
formData.set('seed', (new Date()).getTime() % 1e5);
|
||||||
|
}
|
||||||
|
const queryString = new URLSearchParams(formData).toString()
|
||||||
|
map.src = '/map?' + queryString;
|
||||||
|
map.classList.add('d-none');
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
mapSettings.addEventListener('submit', (e) => {
|
mapSettings.addEventListener('submit', (e) => {
|
||||||
console.log('form submit');
|
|
||||||
e.preventDefault();
|
e.preventDefault();
|
||||||
|
|
||||||
spinner.classList.remove('d-none');
|
generate()
|
||||||
map.src = '/map?q=' + (new Date()).getTime();
|
|
||||||
map.classList.add('d-none');
|
|
||||||
});
|
});
|
||||||
|
|
||||||
|
generate()
|
||||||
|
|
||||||
map.addEventListener('load', () => {
|
map.addEventListener('load', () => {
|
||||||
spinner.classList.add('d-none');
|
spinner.classList.add('d-none');
|
||||||
map.classList.remove('d-none');
|
map.classList.remove('d-none');
|
||||||
|
@ -10,5 +10,6 @@ aside {
|
|||||||
text-align: center;
|
text-align: center;
|
||||||
width: 300px;
|
width: 300px;
|
||||||
height: 100vh;
|
height: 100vh;
|
||||||
|
overflow-y: scroll;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -11,7 +11,7 @@
|
|||||||
<div class='row'>
|
<div class='row'>
|
||||||
<main class='col d-flex justify-content-center align-items-center'>
|
<main class='col d-flex justify-content-center align-items-center'>
|
||||||
<!-- <canvas id='board'></canvas> -->
|
<!-- <canvas id='board'></canvas> -->
|
||||||
<img src='/map' id='map'>
|
<img src='' id='map'>
|
||||||
|
|
||||||
<div class='spinner-border text-primary' role='status' id='spinner'>
|
<div class='spinner-border text-primary' role='status' id='spinner'>
|
||||||
<span class='sr-only'>Loading...</span>
|
<span class='sr-only'>Loading...</span>
|
||||||
@ -23,33 +23,18 @@
|
|||||||
|
|
||||||
<div class='panel px-4'>
|
<div class='panel px-4'>
|
||||||
<form class='mt-5' id='map-settings'>
|
<form class='mt-5' id='map-settings'>
|
||||||
|
{% for k, v in parameters.items() %}
|
||||||
<div class='form-group'>
|
<div class='form-group'>
|
||||||
<label name='preset'>Preset</label>
|
<label name='{{ k }}'>{{ k | replace('_', ' ') | title }}</label>
|
||||||
<select id='preset' name='preset' class='form-control'>
|
<input id='{{ k }}' name='{{ k }}'
|
||||||
<option value='desert'>Desert</option>
|
type="number"
|
||||||
<option value='forest'>Forest</option>
|
class='form-control'
|
||||||
</select>
|
min='{{ v["min"] }}' value='{{ v["default"] }}' max='{{ v["max"] }}' step='{{ v["step"] }}'>
|
||||||
</div>
|
{% if v["description"] %}
|
||||||
|
<small class="form-text text-muted">{{ v["description"] }}</small>
|
||||||
<div class='form-group'>
|
{% endif %}
|
||||||
<label name='mean-temperature'>Mean Temperature</label>
|
|
||||||
<input id='mean-temperature' name='mean-temperature' type='range' min='-50' max='50' class='form-control'>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div class='form-group'>
|
|
||||||
<label name='mean-precipitation'>Mean Precipitation</label>
|
|
||||||
<input id='mean-precipitation' name='mean-precipitation' type='range' min='-50' max='50' class='form-control'>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div class='form-group'>
|
|
||||||
<label name='min-elevation'>Minimum Elevation</label>
|
|
||||||
<input id='min-elevation' name='min-elevation' type='range' min='-100' max='100' class='form-control'>
|
|
||||||
</div>
|
|
||||||
|
|
||||||
<div class='form-group'>
|
|
||||||
<label name='max-elevation'>Maximum Elevation</label>
|
|
||||||
<input id='max-elevation' name='max-elevation' type='range' min='-100' max='100' class='form-control'>
|
|
||||||
</div>
|
</div>
|
||||||
|
{% endfor %}
|
||||||
|
|
||||||
<button type="submit" class="btn btn-primary">Generate</button>
|
<button type="submit" class="btn btn-primary">Generate</button>
|
||||||
</form>
|
</form>
|
||||||
|
@ -1,13 +1,23 @@
|
|||||||
from flask import Flask, render_template, make_response, send_file
|
from flask import Flask, render_template, make_response, send_file, request
|
||||||
from index import generate_map
|
from index import generate_map, parameters
|
||||||
|
|
||||||
app = Flask(__name__)
|
app = Flask(__name__)
|
||||||
|
|
||||||
@app.route('/')
|
@app.route('/')
|
||||||
def index():
|
def index():
|
||||||
return render_template('index.html')
|
return render_template('index.html', parameters=parameters)
|
||||||
|
|
||||||
|
def parse(key, value):
|
||||||
|
t = parameters[key]['type']
|
||||||
|
if t == 'int':
|
||||||
|
return int(value)
|
||||||
|
elif t == 'float':
|
||||||
|
return float(value)
|
||||||
|
else:
|
||||||
|
return value
|
||||||
|
|
||||||
@app.route('/map')
|
@app.route('/map')
|
||||||
def get_map():
|
def get_map():
|
||||||
res = send_file(generate_map(), mimetype='image/png')
|
params = { key: parse(key, request.args[key]) for key in request.args }
|
||||||
|
res = send_file(generate_map(**params), mimetype='image/png')
|
||||||
return res
|
return res
|
||||||
|
Loading…
Reference in New Issue
Block a user