Spaces:
Runtime error
Runtime error
import numpy as np | |
from PIL import Image | |
from keras.models import Model | |
from keras.layers import Input, UpSampling2D, Conv2D, concatenate | |
# This is the vq-vae model from "Neural Discrete Representation Learning" | |
# https://arxiv.org/abs/1711.00937 | |
# by Aäron van den Oord, Oriol Vinyals, Koray Kavukcuoglu (Google DeepMind) | |
# ported to keras by @Ophirblum | |
class Encoder: | |
def __init__(self, input_shape, latent_dim, num_embeddings, commitment_cost): | |
self.input_shape = input_shape | |
self.latent_dim = latent_dim | |
self.num_embeddings = num_embeddings | |
self.commitment_cost = commitment_cost | |
self.encoder = None | |
def build(self): | |
x = Input(shape=self.input_shape, name='encoder_input') | |
# Downsampling path | |
h = Conv2D(64, 4, strides=2, activation='relu', padding='same')(x) | |
h = Conv2D(128, 4, strides=2, activation='relu', padding='same')(h) | |
h = Conv2D(256, 4, strides=2, activation='relu', padding='same')(h) | |
# Latent space | |
z = Conv2D(self.latent_dim, 4, strides=1, activation='linear', padding='same')(h) | |
# Instantiate Encoder Model | |
self.encoder = Model(x, z) | |
def encode(self, x): | |
assert self.encoder != None, "build the encoder first" | |
return self.encoder.predict(x) |