diff --git a/checkpoints/b.hdf5 b/checkpoints/b.hdf5 index ef05303..0c24afd 100644 Binary files a/checkpoints/b.hdf5 and b/checkpoints/b.hdf5 differ diff --git a/draw.py b/draw.py index 0fe46a6..20a4d79 100644 --- a/draw.py +++ b/draw.py @@ -39,7 +39,7 @@ def draw(df, path=None): } for n in biome_numbers: - biomes[n] = MultiPoint(biomes[n]).buffer(1) + biomes[n] = MultiPoint(biomes[n]).buffer(0.5) # print(biomes[n]) # legend = biome_names[n] if not hasattr(biomes[n], '__iter__'): diff --git a/nn.py b/nn.py index c10bab7..4f9f287 100644 --- a/nn.py +++ b/nn.py @@ -55,18 +55,22 @@ class Model(): params = { 'kernel_initializer': 'lecun_uniform', 'bias_initializer': 'zeros', + # 'kernel_regularizer': keras.regularizers.l2(l=0.01) } - # dropout = keras.layersDropout(0.2, input_shape=[self.features]) - self.model = keras.Sequential([ - keras.layers.Dense(layers[0], activation=tf.nn.elu, input_shape=[self.features], **params) + dropout = [keras.layers.Dropout(0.1, input_shape=[self.features])] + self.model = keras.Sequential(dropout + [ + keras.layers.Dense(layers[0], activation=tf.nn.elu, **params) ] + [ keras.layers.Dense(n, activation=tf.nn.elu, **params) for n in layers[1:] ] + [ keras.layers.Dense(self.output_size, activation=out_activation, **params) ]) - def compile(self, loss='mse', metrics=['accuracy'], optimizer=tf.train.AdamOptimizer): - self.model.load_weights(self.path) + def compile(self, loss='mse', metrics=['accuracy'], optimizer=tf.train.AdamOptimizer, load_weights=True): + if load_weights: + self.model.load_weights(self.path) + print('loaded weights') + optimizer = optimizer(self.learning_rate) self.model.compile(loss=loss, @@ -81,12 +85,16 @@ class Model(): verbose=1 ) + def evaluate_print(self): + loss, accuracy = self.evaluate() + print('Test evaluation: loss: {}, accuracy: {}'.format(loss, accuracy)) + def train(self): self.model.summary() checkpoint = keras.callbacks.ModelCheckpoint(self.path, monitor='acc', verbose=1, mode='max') tensorboard = keras.callbacks.TensorBoard(log_dir='./logs', update_freq='epoch') - # map_callback = keras.callbacks.LambdaCallback(on_epoch_end=self.map_callback) + map_callback = keras.callbacks.LambdaCallback(on_epoch_end=self.evaluate_print) self.model.fit( self.training, @@ -101,11 +109,11 @@ class Model(): return np.argmax(self.model.predict(a), axis=1) A = Model('a', epochs=2) -B = Model('b', learning_rate=0.001, epochs=450) +B = Model('b', learning_rate=0.001, epochs=20) def compile_b(): B.prepare_dataset(df, dataframe_to_dataset_biomes) - B.create_model([32], tf.nn.softmax) + B.create_model([32, 32], tf.nn.softmax) B.compile(loss='sparse_categorical_crossentropy') def compile_a(): diff --git a/predict.py b/predict.py index a2f2960..c620878 100644 --- a/predict.py +++ b/predict.py @@ -17,7 +17,7 @@ compile_b() for change in range(0, 1): print('TEMPERATURE MODIFICATION OF {}'.format(change)) - inputs = ['latitude', 'longitude', 'elevation', 'distance_to_water'] + inputs = ['elevation', 'distance_to_water', 'latitude'] for season in SEASONS: inputs += [ @@ -25,8 +25,10 @@ for change in range(0, 1): 'precip_{}_{}'.format(season, year) ] + print(inputs) + # print(inputs) - frame = df[inputs] + frame = df[inputs + ['longitude']] # print(frame.head()) for season in SEASONS: @@ -38,7 +40,7 @@ for change in range(0, 1): for i, chunk in enumerate(chunker(frame, B.batch_size)): if chunk.shape[0] < B.batch_size: continue - input_data = normalize_ndarray(chunk.loc[:, chunk.columns != 'longitude'].values) + input_data = normalize_ndarray(chunk.loc[:, inputs].values) out = B.predict(input_data) f = pd.DataFrame({ @@ -48,5 +50,4 @@ for change in range(0, 1): }, columns=columns) new_data = new_data.append(f) - print(new_data) draw(new_data)