深度學習一行一行敲vae-npvc網路-tensorflow版(vae.py)

源碼地址:vae-npvc

論文地址:Voice Conversion from Non-parallel Corpora Using Variational Auto-encoder

class ConvVAE(object):n def __init__(self, arch, is_training=False):n n Variational auto-encoder implemented in 2D convolutional neural netsn Input:n `arch`: network architecture (`dict`)n `is_training`: (unused now) it was kept for historical reasons (for `BatchNorm`)n n self.arch = archn self._sanity_check()#來確認網路的信息,步長個數==卷積核個數==輸出層數n self.is_training = is_training#默認網路是不可以訓練的nn with tf.name_scope(SpeakerRepr):n self.y_emb = self._l2_regularized_embedding(n self.arch[y_dim],n self.arch[z_dim],n y_embedding)#這是得到一個正則化的一個參數nn self._generate = tf.make_template(n Generator,n self._generator)nn self._encode = tf.make_template(n Encoder,n self._encoder)#這只是對這個函數進行包裝,所以不需要傳入參數nn self.generate = self.decode # for VAE-GAN extensionn

_sanity_check函數:

def _sanity_check(self):n for net in [encoder, generator]:n assert len(self.arch[net][output]) == len(self.arch[net][kernel]) == len(self.arch[net][stride])n

這個是arch里存放的信息:

{y_emb_dim: 128,nmode: VAWGAN,nencoder: {stride: [[3, 1], [3, 1], [3, 1], [3, 1], [3, 1]],l2-reg: 1e-06,output: [16, 32, 64, 128, 256], kernel: [[7, 1], [7, 1], [7, 1], [7, 1], [7, 1]]}, ndiscriminator: {l2-reg: 1e-06, output: [16, 32, 64], feature_layer: 1, stride: [[3, 1], [3, 1], [3, 1]], merge_dim: 1024, kernel: [[7, 1], [7, 1], [115, 1]]},ntraining: {datadir: [./dataset/vcc2016/bin/*/SF1/100*.bin,./dataset/vcc2016/bin/*/TM3/100*.bin], nIterD: 5, alpha: 50.0, beta1: 0.5, lambda: 10, beta2: 0.999, batch_size: 16, lr: 0.0001, epoch: 200, max_iter: 200000},nz_dim: 128, nhwc: [513, 1, 1], ngenerator: {l2-reg: 1e-06, output: [32, 16, 8, 1], stride: [[3, 1], [3, 1], [3, 1], [1, 1]], hwc: [19, 1, 81], merge_dim: 171, kernel: [[9, 1], [7, 1], [7, 1], [1025, 1]]}, ny_dim: 10}n

是將網路共享權值的訓練

def _generator(self, z, y, is_training=None):n net = self.arch[generator]#得到網路配置n h, w, c = net[hwc]#hwc: [19, 1, 81]nn if y is not None:n y = tf.nn.embedding_lookup(self.y_emb, y)#這應該是一個正則化的配套操作,比較麻煩n x = self._merge([z, y], h * w * c)n else:n x = znn x = tf.reshape(x, [-1, c, h, w]) # channel firstn for i, (o, k, s) in enumerate(zip(net[output], net[kernel], net[stride])):n x = tf.layers.conv2d_transpose(x, o, k, s,n padding=same,n data_format=channels_first,n )n if i < len(net[output]) -1:n x = Layernorm(x, [1, 2, 3], ConvT-LN{}.format(i))n x = lrelu(x)n return xn

  1. 對輸入處理
  2. 反卷積+標準化(最後一層不標準化)

def _merge(self, var_list, fan_out, l2_reg=1e-6):n x = 0.n with slim.arg_scope(n [slim.fully_connected],n num_outputs=fan_out,n weights_regularizer=slim.l2_regularizer(l2_reg),n normalizer_fn=None,n activation_fn=None):n for var in var_list:n x = x + slim.fully_connected(var)n return slim.bias_add(x)n

另一個神操作:將標籤變數,通過全連接相加在一起。這個是將標籤與內容加在一起。

def Layernorm(x, axis, name):n n Layer normalization (Ba, 2016)n J: Z-normalization using all nodes of the layer on a per-sample basis.nn Input:n `x`: channel_first/NCHW format! (or fully-connected)n `axis`: listn `name`: must be assignedn n Example:n ```pythonn axis = [1, 2, 3]n x = tf.random_normal([64, 3, 10, 10])n name = D_layernormn ```n Return:n (x - u)/s * scale + offsetnn Source: n https://github.com/igul222/improved_wgan_training/blob/master/tflib/ops/layernorm.pyn n mean, var = tf.nn.moments(x, axis, keep_dims=True)n n_neurons = x.get_shape().as_list()[axis[0]]n offset = tf.get_variable(n name+.offset,n shape=[n_neurons] + [1 for _ in range(len(axis) -1)],n initializer=tf.zeros_initializern )n scale = tf.get_variable(n name+.scale,n shape=[n_neurons] + [1 for _ in range(len(axis) -1)],n initializer=tf.ones_initializern )n return tf.nn.batch_normalization(x, mean, var, offset, scale, 1e-5)n

這是一個標準化

def lrelu(x, leak=0.02, name="lrelu"):n Leaky ReLU n return tf.maximum(x, leak*x, name=name)n

激活函數

def _encoder(self, x, is_training=None):n net = self.arch[encoder]n for i, (o, k, s) in enumerate(zip(net[output], net[kernel], net[stride])):n x = conv2d_nchw_layernorm(n x, o, k, s, lrelu,n name=Conv2d-{}.format(i)n )n x = slim.flatten(x)n z_mu = tf.layers.dense(x, self.arch[z_dim])n z_lv = tf.layers.dense(x, self.arch[z_dim])n return z_mu, z_lvn

def conv2d_nchw_layernorm(x, o, k, s, activation, name):n n Input:n `x`: input in NCHW formatn `o`: num of output nodesn `k`: kernel sizen `s`: striden n with tf.variable_scope(name):n x = tf.layers.conv2d(n inputs=x,n filters=o,n kernel_size=k,n strides=s,n padding=same,n data_format=channels_first,n name=name,n )n x = Layernorm(x, [1, 2, 3], layernorm)n return activation(x)n

卷積+激活函數

def loss(self, x, y):#y是標籤,x是輸入幀n with tf.name_scope(loss):n z_mu, z_lv = self._encode(x)n z = GaussianSampleLayer(z_mu, z_lv)n xh = self._generate(z, y)nn D_KL = tf.reduce_mean(n GaussianKLD(n slim.flatten(z_mu),n slim.flatten(z_lv),n slim.flatten(tf.zeros_like(z_mu)),n slim.flatten(tf.zeros_like(z_lv)),n )n )n logPx = tf.reduce_mean(n GaussianLogDensity(n slim.flatten(x),n slim.flatten(xh),n tf.zeros_like(slim.flatten(xh))),n )#這個要看論文才可以了解的很清楚nn loss = dict()n loss[G] = - logPx + D_KLn loss[D_KL] = D_KLn loss[logP] = logPxnn tf.summary.scalar(KL-div, D_KL)n tf.summary.scalar(logPx, logPx)nn tf.summary.histogram(xh, xh)n tf.summary.histogram(x, x)n return lossn

GaussianSampleLayer函數:

def GaussianSampleLayer(z_mu, z_lv, name=GaussianSampleLayer):n with tf.name_scope(name):n eps = tf.random_normal(tf.shape(z_mu))n std = tf.sqrt(tf.exp(z_lv))n return tf.add(z_mu, tf.multiply(eps, std))n

這個就和VAE的原理有關了,encode產生的並不是x的編碼值,而是高斯分布的均值與方差。這個是訓練的時候的情況。而一個樣本其對應的是一個均值和其相應的波動,我們要從中取出一個樣本進行解碼。

GaussianKLD函數:

def GaussianKLD(mu1, lv1, mu2, lv2):n Kullback-Leibler divergence of two Gaussiansn *Assuming that each dimension is independentn mu: meann lv: log variancen Equation: http://stats.stackexchange.com/questions/7440/kl-divergence-between-two-univariate-gaussiansn n with tf.name_scope(GaussianKLD):n v1 = tf.exp(lv1)n v2 = tf.exp(lv2)n mu_diff_sq = tf.square(mu1 - mu2)n dimwise_kld = .5 * (n (lv2 - lv1) + tf.div(v1 + mu_diff_sq, v2 + EPSILON) - 1.)n return tf.reduce_sum(dimwise_kld, -1)n

這個是衡量產生的均值與方差與高斯分布的接近程度

GaussianLogDensity函數:

def GaussianLogDensity(x, mu, log_var, name=GaussianLogDensity):n with tf.name_scope(name):n c = tf.log(2. * PI)n var = tf.exp(log_var)n x_mu2 = tf.square(x - mu) # [Issue] not sure the dim works or not?n x_mu2_over_var = tf.div(x_mu2, var + EPSILON)n log_prob = -0.5 * (c + log_var + x_mu2_over_var)n log_prob = tf.reduce_sum(log_prob, -1) # keep_dims=True,n return log_probn

這個是衡量生成的mu與x之間的差異,當然啦,也考慮的高斯分布的因素。需要說明的是:y標籤是在生成的時候加進去的與loss是無關的。

_l2_regularized_embedding函數:

def _l2_regularized_embedding(self, n_class, h_dim, scope_name, var_name=y_emb):n with tf.variable_scope(scope_name):n embeddings = tf.get_variable(n name=var_name,n shape=[n_class, h_dim],n regularizer=slim.l2_regularizer(1e-6))n return embeddingsn

self.y_emb = self._l2_regularized_embedding(n self.arch[y_dim],n self.arch[z_dim],n y_embedding)n

y = tf.nn.embedding_lookup(self.y_emb, y)n

解釋一下這個神操作:

  1. y_emb:產生一個shape為【self.arch[y_dim],self.arch[z_dim]】帶l2正則化的隨機數
  2. tf.nn.embedding_lookup(self.y_emb, y):比如y=0這拿出其中的第一行,以此類推
  3. ???其實我對這個操作還是有很大的疑問的,從一堆隨機數中,選取某一行,會不會把標籤信息給丟失了,但是它的效果確實是好的。其實並不是隨機數,初始話是一個隨機數,然後它是可訓練的,後面的每一行的值就是訓練得到的,然後在用全連接層將標籤和內容融合到指定的維度

歡迎關注公眾號:huangxiaobai880

https://www.zhihu.com/video/934839444238950400
推薦閱讀:

如何評價中科院山世光老師開源的Seetaface人臉識別引擎?
對於一個可以窮舉的問題,比如五子棋,深度學習得到的模型和窮舉的演算法有啥異同?
CNN那麼多的網路有什麼區別嗎?如何對CNN網路進行修改?
中國象棋AI實現?
如何看待Baidu的Deep Speech 2語音識別系統入選MIT科技評論十大突破?

TAG:深度学习DeepLearning | 机器学习 | 源码阅读 |