Transfer Learning Goes Slow Down












0















I have tried to construct the transfer learning model but it is getting slow down over the iterative training.
Especially, the training or testing time getting shoot up linearly.
(from 4ms/step at the first testing to 2s440ms/step at the 26th testing, and it goes longer on and on)
I wonder if this problem is related with memory, and there are some way to solve the problem.
Below is my whole code.



For your instance:



function GenerateData: Generating features (30000 examples,41 features) and corresponding labels (30000 examples,1 label)



function sequenceData: Make the generated data as a sequence for each prediction



function distinctData: Distinct the sequence data by training(80% of 300 at the first and remained following training), testing(20% of training data) and prediction (1 row of featuresXlabel)



function minmaxData and minmaxUpdate: minmaxScaling of each data and storing the global maximum raw value



function training, testing and prediction are not so complicate functions for you.



from keras.models import Sequential, Model
from keras.layers import TimeDistributed, Dense, Dropout, Activation, LSTM
from keras.utils import np_utils
from datetime import datetime
import tensorflow as tf
import pandas as pd
import numpy as np
import keras as ks
import gc

class synthNeuralNetwork(object):
DEBUG = False
BATCH_SIZE = 50
SEQ = 60
RNN_SIZE = SEQ
CLASSES = 6
DROPOUT = True
DROP_RATE = 0.2
LEARNING_RATE = 0.001
HIDDEN_UNITS = [41,20,10]
FROZEN = True
DNN_LYR = len(HIDDEN_UNITS)
TEST_PERC = 0.2
N_PER_WK = 5
N_PER_MON = 20
N_PER_YR = 250
TOTAL_EPOCHS = 1
ITM = ''
LIST1 =
LIST2 =
LAST_TR = 0
LAST_TS = 0
MMAX = {'F_MX':np.array(),
'F_MN':np.array(),
'L_MX':np.array(),
'L_MN':np.array()}
tf.set_random_seed(777)

def __init__(self,
BATCH_SIZE = 50, SEQ = 60, CLASSES = 6,
LEARNING_RATE = 0.001, DEBUG = False):
if DEBUG:
self.DEBUG = True

def GenerateData(self):
x_raw = pd.DataFrame(np.random.rand(30000,41))
y_raw = pd.DataFrame(np.random.rand(30000,))
index1 = [x for x in range(500)]*60
index2 = [x for x in range(60)]*500
index1.sort()
idx_tup = list(zip(index1,index2))
x_raw.index = pd.MultiIndex.from_tuples(idx_tup, names=['idx1','idx2'])
y_raw.index = pd.MultiIndex.from_tuples(idx_tup, names=['idx1','idx2'])
idx1_list = x_raw.index.get_level_values('idx1').unique().tolist()
idx2_list = x_raw.index.get_level_values('idx2').unique().tolist()
self.LIST1 = idx1_list
self.LIST2 = idx2_list
self.LAST_TR = self.LIST1[0]
self.LAST_TS = self.LIST1[0]
x_dict = {}
y_dict = {}

for x in idx1_list:
x_dict[x] = x_raw.loc[x_raw.index.get_level_values('idx2')==x]
y_dict[x] = y_raw.loc[y_raw.index.get_level_values('idx2')==x]
return x_dict, y_dict

def sequenceData(self, x_dict, y_dict, predPoint=None, train=None):
x =
y =
if train==True:
startPoint = self.LAST_TR
else:
startPoint = self.LAST_TS
if startPoint < x_dict.index.get_level_values('idx1').tolist()[self.SEQ]:
startPoint = x_dict.index.get_level_values('idx1').tolist()[self.SEQ]
for i in [z for z in self.LIST1 if (z<=predPoint)&(z>startPoint)]:
x_tmp = np.array(x_dict.loc[x_dict.index.get_level_values('idx1')<=i].iloc[-self.SEQ:].dropna())
y_tmp = np.array(y_dict.loc[y_dict.index.get_level_values('idx1')<=i].iloc[-1].dropna())
if len(x_tmp)==self.SEQ:
x.append(x_tmp)
y.append(y_tmp)
return x, y

def distinctData(self, x, y, TEST_PERC = TEST_PERC, train=None):
unavailable = 20
x_av = x[:-unavailable]
y_av = y[:-unavailable]
TEST_LEN = int(np.shape(x_av)[0]*TEST_PERC)+1
if train==True:
f_train = np.array(x_av[:-TEST_LEN])
l_train = np.array(y_av[:-TEST_LEN])
f_test = np.array(x_av[-TEST_LEN:])
l_test = np.array(y_av[-TEST_LEN:])
self.LAST_TR = self.LIST1[self.LIST1.index(self.LAST_TR)+f_train.shape[0]]
self.LAST_TS = self.LIST1[self.LIST1.index(self.LAST_TR)+f_test.shape[0]]
else:
f_train = np.array()
l_train = np.array()
f_test = np.array(x_av)
l_test = np.array(y_av)
self.LAST_TS = self.LIST1[self.LIST1.index(self.LAST_TS)+f_test.shape[0]]
f_pred = np.array(x[-1])
return f_train, l_train, f_test, l_test, f_pred

def minmaxUpdate(self, f_arr, l_arr):
f_mx = f_arr.max(axis=0).max(axis=0)
f_mn = f_arr.min(axis=0).min(axis=0)
l_mx = l_arr.max(axis=0)
l_mn = l_arr.min(axis=0)
if len(self.MMAX['F_MX']) != 0:
f_mx = np.concatenate((f_mx,self.MMAX['F_MX'])).reshape((-1,np.shape(f_arr)[-1])).max(axis=0)
f_mn = np.concatenate((f_mn,self.MMAX['F_MN'])).reshape((-1,np.shape(f_arr)[-1])).min(axis=0)
l_mx = np.concatenate((l_mx,self.MMAX['L_MX'])).reshape((-1,np.shape(l_arr)[-1])).max(axis=0)
l_mn = np.concatenate((l_mn,self.MMAX['L_MN'])).reshape((-1,np.shape(l_arr)[-1])).min(axis=0)
self.MMAX['F_MX'] = f_mx
self.MMAX['F_MN'] = f_mn
self.MMAX['L_MX'] = l_mx
self.MMAX['L_MN'] = l_mn

def minmaxData(self, f_train, l_train, f_test, l_test, f_pred, TOTAL = True, train=None):
def classvalues(label):
shape = np.shape(label)
labels =
for x in np.squeeze(np.reshape(label,(1,-1))):
t = 0
for y in range(0,self.CLASSES):
if y/self.CLASSES <= x:
t = y
else:
break;
labels.append(t)
labels = np.reshape(labels,shape)
return labels
if train==True:
self.minmaxUpdate(f_train, l_train)
f_gp = self.MMAX['F_MX']-self.MMAX['F_MN']
l_gp = self.MMAX['L_MX']-self.MMAX['L_MN']
f_train = (f_train-self.MMAX['F_MN'])/f_gp
l_train = (l_train-self.MMAX['L_MN'])/l_gp
l_train = classvalues(l_train)
else:
self.minmaxUpdate(f_test, l_test)
f_gp = self.MMAX['F_MX']-self.MMAX['F_MN']
l_gp = self.MMAX['L_MX']-self.MMAX['L_MN']
f_test = (f_test-self.MMAX['F_MN'])/f_gp
l_test = (l_test-self.MMAX['L_MN'])/l_gp
l_test = classvalues(l_test)
f_pred = (f_pred-self.MMAX['F_MN'])/f_gp
return f_train, l_train, f_test, l_test, f_pred

def trainModel(self, f_train, l_train, TOTAL=True):
len_feat = np.shape(f_train)[-1]
total_epochs = self.TOTAL_EPOCHS
drop_rate = self.DROP_RATE

l_train = np_utils.to_categorical(l_train, num_classes=self.CLASSES)
model = Sequential()

if TOTAL:
for i in range(len(self.HIDDEN_UNITS)):
model.add(TimeDistributed(Dense(self.HIDDEN_UNITS[i],activation='relu',
kernel_initializer='glorot_normal'),
input_shape=(self.SEQ, len_feat)))
if self.DROPOUT == True:
model.add(Dropout(drop_rate))
model.add(LSTM(self.CLASSES))
model.add(Activation('softmax'))
model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
model.fit(f_train, l_train, batch_size=self.BATCH_SIZE, epochs=total_epochs, verbose=1)
ks.models.save_model(model, './snn_model/snn_model_%s_%s.h5' %(self.ITM,self.NUM))
ra = model.evaluate(f_train, l_train)[1]

else:
bs_model = ks.models.load_model('./snn_model/snn_model_%s_%s.h5' %(self.ITM,self.NUM))
for i in range(len(self.HIDDEN_UNITS)*int(self.DROPOUT+1)):
model.add(bs_model.layers[i])
if self.FROZEN == True:
for layer in model.layers:
layer.trainable = False
model_tr = model.output
model_tr = LSTM(self.CLASSES)(model_tr)
prediction = Activation('softmax')(model_tr)
model_final = Model(input=model.input, output=prediction)
model_final.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
model_final.fit(f_train, l_train, batch_size=self.BATCH_SIZE,
epochs=total_epochs, verbose=1)
model_final.save('./snn_model/snn_model_%s_%s.h5' %(self.ITM,self.NUM))
ra = model_final.evaluate(f_train, l_train)[1]
print("Train Accuracy:", ra)
return ra

def testModel(self, f_test, l_test):
l_test = np_utils.to_categorical(l_test, num_classes=self.CLASSES)
model = ks.models.load_model('./snn_model/snn_model_%s_%s.h5' %(self.ITM,self.NUM))
[co, ta] = model.evaluate(f_test,l_test)
print("Test Accuracy:",ta)
print("Test loss:",co)
return ta, co

def prediction(self, f_pred):
model = ks.models.load_model('./snn_model/snn_model_%s_%s.h5' %(self.ITM,self.NUM))
(a,b) = f_pred.shape
prediction = model.predict([f_pred.reshape(-1,a,b)])
arg = prediction.argmax(axis=-1)[0]
pre = prediction[0]
print("Prediction Class:",arg,'with percentage',pre[arg])
return arg, pre

if __name__ == '__main__':
snn = synthNeuralNetwork()
snn.NUM = '005'
snn.LBL = 0
snn.TOTAL_EPOCHS = 10
snn.FROZEN = True
sPoint = 200
x_dict, y_dict = snn.GenerateData()
predPointList = [z for z in snn.LIST1[::5] if z >= sPoint]
for x in snn.LIST2:
gc.collect()
snn.ITM = x
for y in predPointList:
if (y == predPointList[0]):
total = True
else:
total = False
if (y%60 == 0)|(total==True):
train = True
else:
train = False
x_seq, y_seq = snn.sequenceData(x_dict[x], y_dict[x], predPoint=y, train=train)
if len(x_seq) == 0:
continue;
f_train, l_train, f_test, l_test, f_pred = snn.distinctData(x_seq, y_seq, train=train)
f_train, l_train, f_test, l_test, f_pred = snn.minmaxData(f_train, l_train, f_test, l_test, f_pred, train=train)
if (train==True):
tf.Session().run([tf.global_variables_initializer(),tf.local_variables_initializer()])
ra = snn.trainModel(f_train, l_train, TOTAL=total)
ta, co = snn.testModel(f_test, l_test)
arg, pre = snn.prediction(f_pred)
print('n',x,y,datetime.now().strftime('%Y-%m-%d %H:%M:%S'),'n')
del(x_seq, y_seq, f_train, l_train, f_test, l_test)









share|improve this question



























    0















    I have tried to construct the transfer learning model but it is getting slow down over the iterative training.
    Especially, the training or testing time getting shoot up linearly.
    (from 4ms/step at the first testing to 2s440ms/step at the 26th testing, and it goes longer on and on)
    I wonder if this problem is related with memory, and there are some way to solve the problem.
    Below is my whole code.



    For your instance:



    function GenerateData: Generating features (30000 examples,41 features) and corresponding labels (30000 examples,1 label)



    function sequenceData: Make the generated data as a sequence for each prediction



    function distinctData: Distinct the sequence data by training(80% of 300 at the first and remained following training), testing(20% of training data) and prediction (1 row of featuresXlabel)



    function minmaxData and minmaxUpdate: minmaxScaling of each data and storing the global maximum raw value



    function training, testing and prediction are not so complicate functions for you.



    from keras.models import Sequential, Model
    from keras.layers import TimeDistributed, Dense, Dropout, Activation, LSTM
    from keras.utils import np_utils
    from datetime import datetime
    import tensorflow as tf
    import pandas as pd
    import numpy as np
    import keras as ks
    import gc

    class synthNeuralNetwork(object):
    DEBUG = False
    BATCH_SIZE = 50
    SEQ = 60
    RNN_SIZE = SEQ
    CLASSES = 6
    DROPOUT = True
    DROP_RATE = 0.2
    LEARNING_RATE = 0.001
    HIDDEN_UNITS = [41,20,10]
    FROZEN = True
    DNN_LYR = len(HIDDEN_UNITS)
    TEST_PERC = 0.2
    N_PER_WK = 5
    N_PER_MON = 20
    N_PER_YR = 250
    TOTAL_EPOCHS = 1
    ITM = ''
    LIST1 =
    LIST2 =
    LAST_TR = 0
    LAST_TS = 0
    MMAX = {'F_MX':np.array(),
    'F_MN':np.array(),
    'L_MX':np.array(),
    'L_MN':np.array()}
    tf.set_random_seed(777)

    def __init__(self,
    BATCH_SIZE = 50, SEQ = 60, CLASSES = 6,
    LEARNING_RATE = 0.001, DEBUG = False):
    if DEBUG:
    self.DEBUG = True

    def GenerateData(self):
    x_raw = pd.DataFrame(np.random.rand(30000,41))
    y_raw = pd.DataFrame(np.random.rand(30000,))
    index1 = [x for x in range(500)]*60
    index2 = [x for x in range(60)]*500
    index1.sort()
    idx_tup = list(zip(index1,index2))
    x_raw.index = pd.MultiIndex.from_tuples(idx_tup, names=['idx1','idx2'])
    y_raw.index = pd.MultiIndex.from_tuples(idx_tup, names=['idx1','idx2'])
    idx1_list = x_raw.index.get_level_values('idx1').unique().tolist()
    idx2_list = x_raw.index.get_level_values('idx2').unique().tolist()
    self.LIST1 = idx1_list
    self.LIST2 = idx2_list
    self.LAST_TR = self.LIST1[0]
    self.LAST_TS = self.LIST1[0]
    x_dict = {}
    y_dict = {}

    for x in idx1_list:
    x_dict[x] = x_raw.loc[x_raw.index.get_level_values('idx2')==x]
    y_dict[x] = y_raw.loc[y_raw.index.get_level_values('idx2')==x]
    return x_dict, y_dict

    def sequenceData(self, x_dict, y_dict, predPoint=None, train=None):
    x =
    y =
    if train==True:
    startPoint = self.LAST_TR
    else:
    startPoint = self.LAST_TS
    if startPoint < x_dict.index.get_level_values('idx1').tolist()[self.SEQ]:
    startPoint = x_dict.index.get_level_values('idx1').tolist()[self.SEQ]
    for i in [z for z in self.LIST1 if (z<=predPoint)&(z>startPoint)]:
    x_tmp = np.array(x_dict.loc[x_dict.index.get_level_values('idx1')<=i].iloc[-self.SEQ:].dropna())
    y_tmp = np.array(y_dict.loc[y_dict.index.get_level_values('idx1')<=i].iloc[-1].dropna())
    if len(x_tmp)==self.SEQ:
    x.append(x_tmp)
    y.append(y_tmp)
    return x, y

    def distinctData(self, x, y, TEST_PERC = TEST_PERC, train=None):
    unavailable = 20
    x_av = x[:-unavailable]
    y_av = y[:-unavailable]
    TEST_LEN = int(np.shape(x_av)[0]*TEST_PERC)+1
    if train==True:
    f_train = np.array(x_av[:-TEST_LEN])
    l_train = np.array(y_av[:-TEST_LEN])
    f_test = np.array(x_av[-TEST_LEN:])
    l_test = np.array(y_av[-TEST_LEN:])
    self.LAST_TR = self.LIST1[self.LIST1.index(self.LAST_TR)+f_train.shape[0]]
    self.LAST_TS = self.LIST1[self.LIST1.index(self.LAST_TR)+f_test.shape[0]]
    else:
    f_train = np.array()
    l_train = np.array()
    f_test = np.array(x_av)
    l_test = np.array(y_av)
    self.LAST_TS = self.LIST1[self.LIST1.index(self.LAST_TS)+f_test.shape[0]]
    f_pred = np.array(x[-1])
    return f_train, l_train, f_test, l_test, f_pred

    def minmaxUpdate(self, f_arr, l_arr):
    f_mx = f_arr.max(axis=0).max(axis=0)
    f_mn = f_arr.min(axis=0).min(axis=0)
    l_mx = l_arr.max(axis=0)
    l_mn = l_arr.min(axis=0)
    if len(self.MMAX['F_MX']) != 0:
    f_mx = np.concatenate((f_mx,self.MMAX['F_MX'])).reshape((-1,np.shape(f_arr)[-1])).max(axis=0)
    f_mn = np.concatenate((f_mn,self.MMAX['F_MN'])).reshape((-1,np.shape(f_arr)[-1])).min(axis=0)
    l_mx = np.concatenate((l_mx,self.MMAX['L_MX'])).reshape((-1,np.shape(l_arr)[-1])).max(axis=0)
    l_mn = np.concatenate((l_mn,self.MMAX['L_MN'])).reshape((-1,np.shape(l_arr)[-1])).min(axis=0)
    self.MMAX['F_MX'] = f_mx
    self.MMAX['F_MN'] = f_mn
    self.MMAX['L_MX'] = l_mx
    self.MMAX['L_MN'] = l_mn

    def minmaxData(self, f_train, l_train, f_test, l_test, f_pred, TOTAL = True, train=None):
    def classvalues(label):
    shape = np.shape(label)
    labels =
    for x in np.squeeze(np.reshape(label,(1,-1))):
    t = 0
    for y in range(0,self.CLASSES):
    if y/self.CLASSES <= x:
    t = y
    else:
    break;
    labels.append(t)
    labels = np.reshape(labels,shape)
    return labels
    if train==True:
    self.minmaxUpdate(f_train, l_train)
    f_gp = self.MMAX['F_MX']-self.MMAX['F_MN']
    l_gp = self.MMAX['L_MX']-self.MMAX['L_MN']
    f_train = (f_train-self.MMAX['F_MN'])/f_gp
    l_train = (l_train-self.MMAX['L_MN'])/l_gp
    l_train = classvalues(l_train)
    else:
    self.minmaxUpdate(f_test, l_test)
    f_gp = self.MMAX['F_MX']-self.MMAX['F_MN']
    l_gp = self.MMAX['L_MX']-self.MMAX['L_MN']
    f_test = (f_test-self.MMAX['F_MN'])/f_gp
    l_test = (l_test-self.MMAX['L_MN'])/l_gp
    l_test = classvalues(l_test)
    f_pred = (f_pred-self.MMAX['F_MN'])/f_gp
    return f_train, l_train, f_test, l_test, f_pred

    def trainModel(self, f_train, l_train, TOTAL=True):
    len_feat = np.shape(f_train)[-1]
    total_epochs = self.TOTAL_EPOCHS
    drop_rate = self.DROP_RATE

    l_train = np_utils.to_categorical(l_train, num_classes=self.CLASSES)
    model = Sequential()

    if TOTAL:
    for i in range(len(self.HIDDEN_UNITS)):
    model.add(TimeDistributed(Dense(self.HIDDEN_UNITS[i],activation='relu',
    kernel_initializer='glorot_normal'),
    input_shape=(self.SEQ, len_feat)))
    if self.DROPOUT == True:
    model.add(Dropout(drop_rate))
    model.add(LSTM(self.CLASSES))
    model.add(Activation('softmax'))
    model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
    model.fit(f_train, l_train, batch_size=self.BATCH_SIZE, epochs=total_epochs, verbose=1)
    ks.models.save_model(model, './snn_model/snn_model_%s_%s.h5' %(self.ITM,self.NUM))
    ra = model.evaluate(f_train, l_train)[1]

    else:
    bs_model = ks.models.load_model('./snn_model/snn_model_%s_%s.h5' %(self.ITM,self.NUM))
    for i in range(len(self.HIDDEN_UNITS)*int(self.DROPOUT+1)):
    model.add(bs_model.layers[i])
    if self.FROZEN == True:
    for layer in model.layers:
    layer.trainable = False
    model_tr = model.output
    model_tr = LSTM(self.CLASSES)(model_tr)
    prediction = Activation('softmax')(model_tr)
    model_final = Model(input=model.input, output=prediction)
    model_final.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
    model_final.fit(f_train, l_train, batch_size=self.BATCH_SIZE,
    epochs=total_epochs, verbose=1)
    model_final.save('./snn_model/snn_model_%s_%s.h5' %(self.ITM,self.NUM))
    ra = model_final.evaluate(f_train, l_train)[1]
    print("Train Accuracy:", ra)
    return ra

    def testModel(self, f_test, l_test):
    l_test = np_utils.to_categorical(l_test, num_classes=self.CLASSES)
    model = ks.models.load_model('./snn_model/snn_model_%s_%s.h5' %(self.ITM,self.NUM))
    [co, ta] = model.evaluate(f_test,l_test)
    print("Test Accuracy:",ta)
    print("Test loss:",co)
    return ta, co

    def prediction(self, f_pred):
    model = ks.models.load_model('./snn_model/snn_model_%s_%s.h5' %(self.ITM,self.NUM))
    (a,b) = f_pred.shape
    prediction = model.predict([f_pred.reshape(-1,a,b)])
    arg = prediction.argmax(axis=-1)[0]
    pre = prediction[0]
    print("Prediction Class:",arg,'with percentage',pre[arg])
    return arg, pre

    if __name__ == '__main__':
    snn = synthNeuralNetwork()
    snn.NUM = '005'
    snn.LBL = 0
    snn.TOTAL_EPOCHS = 10
    snn.FROZEN = True
    sPoint = 200
    x_dict, y_dict = snn.GenerateData()
    predPointList = [z for z in snn.LIST1[::5] if z >= sPoint]
    for x in snn.LIST2:
    gc.collect()
    snn.ITM = x
    for y in predPointList:
    if (y == predPointList[0]):
    total = True
    else:
    total = False
    if (y%60 == 0)|(total==True):
    train = True
    else:
    train = False
    x_seq, y_seq = snn.sequenceData(x_dict[x], y_dict[x], predPoint=y, train=train)
    if len(x_seq) == 0:
    continue;
    f_train, l_train, f_test, l_test, f_pred = snn.distinctData(x_seq, y_seq, train=train)
    f_train, l_train, f_test, l_test, f_pred = snn.minmaxData(f_train, l_train, f_test, l_test, f_pred, train=train)
    if (train==True):
    tf.Session().run([tf.global_variables_initializer(),tf.local_variables_initializer()])
    ra = snn.trainModel(f_train, l_train, TOTAL=total)
    ta, co = snn.testModel(f_test, l_test)
    arg, pre = snn.prediction(f_pred)
    print('n',x,y,datetime.now().strftime('%Y-%m-%d %H:%M:%S'),'n')
    del(x_seq, y_seq, f_train, l_train, f_test, l_test)









    share|improve this question

























      0












      0








      0








      I have tried to construct the transfer learning model but it is getting slow down over the iterative training.
      Especially, the training or testing time getting shoot up linearly.
      (from 4ms/step at the first testing to 2s440ms/step at the 26th testing, and it goes longer on and on)
      I wonder if this problem is related with memory, and there are some way to solve the problem.
      Below is my whole code.



      For your instance:



      function GenerateData: Generating features (30000 examples,41 features) and corresponding labels (30000 examples,1 label)



      function sequenceData: Make the generated data as a sequence for each prediction



      function distinctData: Distinct the sequence data by training(80% of 300 at the first and remained following training), testing(20% of training data) and prediction (1 row of featuresXlabel)



      function minmaxData and minmaxUpdate: minmaxScaling of each data and storing the global maximum raw value



      function training, testing and prediction are not so complicate functions for you.



      from keras.models import Sequential, Model
      from keras.layers import TimeDistributed, Dense, Dropout, Activation, LSTM
      from keras.utils import np_utils
      from datetime import datetime
      import tensorflow as tf
      import pandas as pd
      import numpy as np
      import keras as ks
      import gc

      class synthNeuralNetwork(object):
      DEBUG = False
      BATCH_SIZE = 50
      SEQ = 60
      RNN_SIZE = SEQ
      CLASSES = 6
      DROPOUT = True
      DROP_RATE = 0.2
      LEARNING_RATE = 0.001
      HIDDEN_UNITS = [41,20,10]
      FROZEN = True
      DNN_LYR = len(HIDDEN_UNITS)
      TEST_PERC = 0.2
      N_PER_WK = 5
      N_PER_MON = 20
      N_PER_YR = 250
      TOTAL_EPOCHS = 1
      ITM = ''
      LIST1 =
      LIST2 =
      LAST_TR = 0
      LAST_TS = 0
      MMAX = {'F_MX':np.array(),
      'F_MN':np.array(),
      'L_MX':np.array(),
      'L_MN':np.array()}
      tf.set_random_seed(777)

      def __init__(self,
      BATCH_SIZE = 50, SEQ = 60, CLASSES = 6,
      LEARNING_RATE = 0.001, DEBUG = False):
      if DEBUG:
      self.DEBUG = True

      def GenerateData(self):
      x_raw = pd.DataFrame(np.random.rand(30000,41))
      y_raw = pd.DataFrame(np.random.rand(30000,))
      index1 = [x for x in range(500)]*60
      index2 = [x for x in range(60)]*500
      index1.sort()
      idx_tup = list(zip(index1,index2))
      x_raw.index = pd.MultiIndex.from_tuples(idx_tup, names=['idx1','idx2'])
      y_raw.index = pd.MultiIndex.from_tuples(idx_tup, names=['idx1','idx2'])
      idx1_list = x_raw.index.get_level_values('idx1').unique().tolist()
      idx2_list = x_raw.index.get_level_values('idx2').unique().tolist()
      self.LIST1 = idx1_list
      self.LIST2 = idx2_list
      self.LAST_TR = self.LIST1[0]
      self.LAST_TS = self.LIST1[0]
      x_dict = {}
      y_dict = {}

      for x in idx1_list:
      x_dict[x] = x_raw.loc[x_raw.index.get_level_values('idx2')==x]
      y_dict[x] = y_raw.loc[y_raw.index.get_level_values('idx2')==x]
      return x_dict, y_dict

      def sequenceData(self, x_dict, y_dict, predPoint=None, train=None):
      x =
      y =
      if train==True:
      startPoint = self.LAST_TR
      else:
      startPoint = self.LAST_TS
      if startPoint < x_dict.index.get_level_values('idx1').tolist()[self.SEQ]:
      startPoint = x_dict.index.get_level_values('idx1').tolist()[self.SEQ]
      for i in [z for z in self.LIST1 if (z<=predPoint)&(z>startPoint)]:
      x_tmp = np.array(x_dict.loc[x_dict.index.get_level_values('idx1')<=i].iloc[-self.SEQ:].dropna())
      y_tmp = np.array(y_dict.loc[y_dict.index.get_level_values('idx1')<=i].iloc[-1].dropna())
      if len(x_tmp)==self.SEQ:
      x.append(x_tmp)
      y.append(y_tmp)
      return x, y

      def distinctData(self, x, y, TEST_PERC = TEST_PERC, train=None):
      unavailable = 20
      x_av = x[:-unavailable]
      y_av = y[:-unavailable]
      TEST_LEN = int(np.shape(x_av)[0]*TEST_PERC)+1
      if train==True:
      f_train = np.array(x_av[:-TEST_LEN])
      l_train = np.array(y_av[:-TEST_LEN])
      f_test = np.array(x_av[-TEST_LEN:])
      l_test = np.array(y_av[-TEST_LEN:])
      self.LAST_TR = self.LIST1[self.LIST1.index(self.LAST_TR)+f_train.shape[0]]
      self.LAST_TS = self.LIST1[self.LIST1.index(self.LAST_TR)+f_test.shape[0]]
      else:
      f_train = np.array()
      l_train = np.array()
      f_test = np.array(x_av)
      l_test = np.array(y_av)
      self.LAST_TS = self.LIST1[self.LIST1.index(self.LAST_TS)+f_test.shape[0]]
      f_pred = np.array(x[-1])
      return f_train, l_train, f_test, l_test, f_pred

      def minmaxUpdate(self, f_arr, l_arr):
      f_mx = f_arr.max(axis=0).max(axis=0)
      f_mn = f_arr.min(axis=0).min(axis=0)
      l_mx = l_arr.max(axis=0)
      l_mn = l_arr.min(axis=0)
      if len(self.MMAX['F_MX']) != 0:
      f_mx = np.concatenate((f_mx,self.MMAX['F_MX'])).reshape((-1,np.shape(f_arr)[-1])).max(axis=0)
      f_mn = np.concatenate((f_mn,self.MMAX['F_MN'])).reshape((-1,np.shape(f_arr)[-1])).min(axis=0)
      l_mx = np.concatenate((l_mx,self.MMAX['L_MX'])).reshape((-1,np.shape(l_arr)[-1])).max(axis=0)
      l_mn = np.concatenate((l_mn,self.MMAX['L_MN'])).reshape((-1,np.shape(l_arr)[-1])).min(axis=0)
      self.MMAX['F_MX'] = f_mx
      self.MMAX['F_MN'] = f_mn
      self.MMAX['L_MX'] = l_mx
      self.MMAX['L_MN'] = l_mn

      def minmaxData(self, f_train, l_train, f_test, l_test, f_pred, TOTAL = True, train=None):
      def classvalues(label):
      shape = np.shape(label)
      labels =
      for x in np.squeeze(np.reshape(label,(1,-1))):
      t = 0
      for y in range(0,self.CLASSES):
      if y/self.CLASSES <= x:
      t = y
      else:
      break;
      labels.append(t)
      labels = np.reshape(labels,shape)
      return labels
      if train==True:
      self.minmaxUpdate(f_train, l_train)
      f_gp = self.MMAX['F_MX']-self.MMAX['F_MN']
      l_gp = self.MMAX['L_MX']-self.MMAX['L_MN']
      f_train = (f_train-self.MMAX['F_MN'])/f_gp
      l_train = (l_train-self.MMAX['L_MN'])/l_gp
      l_train = classvalues(l_train)
      else:
      self.minmaxUpdate(f_test, l_test)
      f_gp = self.MMAX['F_MX']-self.MMAX['F_MN']
      l_gp = self.MMAX['L_MX']-self.MMAX['L_MN']
      f_test = (f_test-self.MMAX['F_MN'])/f_gp
      l_test = (l_test-self.MMAX['L_MN'])/l_gp
      l_test = classvalues(l_test)
      f_pred = (f_pred-self.MMAX['F_MN'])/f_gp
      return f_train, l_train, f_test, l_test, f_pred

      def trainModel(self, f_train, l_train, TOTAL=True):
      len_feat = np.shape(f_train)[-1]
      total_epochs = self.TOTAL_EPOCHS
      drop_rate = self.DROP_RATE

      l_train = np_utils.to_categorical(l_train, num_classes=self.CLASSES)
      model = Sequential()

      if TOTAL:
      for i in range(len(self.HIDDEN_UNITS)):
      model.add(TimeDistributed(Dense(self.HIDDEN_UNITS[i],activation='relu',
      kernel_initializer='glorot_normal'),
      input_shape=(self.SEQ, len_feat)))
      if self.DROPOUT == True:
      model.add(Dropout(drop_rate))
      model.add(LSTM(self.CLASSES))
      model.add(Activation('softmax'))
      model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
      model.fit(f_train, l_train, batch_size=self.BATCH_SIZE, epochs=total_epochs, verbose=1)
      ks.models.save_model(model, './snn_model/snn_model_%s_%s.h5' %(self.ITM,self.NUM))
      ra = model.evaluate(f_train, l_train)[1]

      else:
      bs_model = ks.models.load_model('./snn_model/snn_model_%s_%s.h5' %(self.ITM,self.NUM))
      for i in range(len(self.HIDDEN_UNITS)*int(self.DROPOUT+1)):
      model.add(bs_model.layers[i])
      if self.FROZEN == True:
      for layer in model.layers:
      layer.trainable = False
      model_tr = model.output
      model_tr = LSTM(self.CLASSES)(model_tr)
      prediction = Activation('softmax')(model_tr)
      model_final = Model(input=model.input, output=prediction)
      model_final.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
      model_final.fit(f_train, l_train, batch_size=self.BATCH_SIZE,
      epochs=total_epochs, verbose=1)
      model_final.save('./snn_model/snn_model_%s_%s.h5' %(self.ITM,self.NUM))
      ra = model_final.evaluate(f_train, l_train)[1]
      print("Train Accuracy:", ra)
      return ra

      def testModel(self, f_test, l_test):
      l_test = np_utils.to_categorical(l_test, num_classes=self.CLASSES)
      model = ks.models.load_model('./snn_model/snn_model_%s_%s.h5' %(self.ITM,self.NUM))
      [co, ta] = model.evaluate(f_test,l_test)
      print("Test Accuracy:",ta)
      print("Test loss:",co)
      return ta, co

      def prediction(self, f_pred):
      model = ks.models.load_model('./snn_model/snn_model_%s_%s.h5' %(self.ITM,self.NUM))
      (a,b) = f_pred.shape
      prediction = model.predict([f_pred.reshape(-1,a,b)])
      arg = prediction.argmax(axis=-1)[0]
      pre = prediction[0]
      print("Prediction Class:",arg,'with percentage',pre[arg])
      return arg, pre

      if __name__ == '__main__':
      snn = synthNeuralNetwork()
      snn.NUM = '005'
      snn.LBL = 0
      snn.TOTAL_EPOCHS = 10
      snn.FROZEN = True
      sPoint = 200
      x_dict, y_dict = snn.GenerateData()
      predPointList = [z for z in snn.LIST1[::5] if z >= sPoint]
      for x in snn.LIST2:
      gc.collect()
      snn.ITM = x
      for y in predPointList:
      if (y == predPointList[0]):
      total = True
      else:
      total = False
      if (y%60 == 0)|(total==True):
      train = True
      else:
      train = False
      x_seq, y_seq = snn.sequenceData(x_dict[x], y_dict[x], predPoint=y, train=train)
      if len(x_seq) == 0:
      continue;
      f_train, l_train, f_test, l_test, f_pred = snn.distinctData(x_seq, y_seq, train=train)
      f_train, l_train, f_test, l_test, f_pred = snn.minmaxData(f_train, l_train, f_test, l_test, f_pred, train=train)
      if (train==True):
      tf.Session().run([tf.global_variables_initializer(),tf.local_variables_initializer()])
      ra = snn.trainModel(f_train, l_train, TOTAL=total)
      ta, co = snn.testModel(f_test, l_test)
      arg, pre = snn.prediction(f_pred)
      print('n',x,y,datetime.now().strftime('%Y-%m-%d %H:%M:%S'),'n')
      del(x_seq, y_seq, f_train, l_train, f_test, l_test)









      share|improve this question














      I have tried to construct the transfer learning model but it is getting slow down over the iterative training.
      Especially, the training or testing time getting shoot up linearly.
      (from 4ms/step at the first testing to 2s440ms/step at the 26th testing, and it goes longer on and on)
      I wonder if this problem is related with memory, and there are some way to solve the problem.
      Below is my whole code.



      For your instance:



      function GenerateData: Generating features (30000 examples,41 features) and corresponding labels (30000 examples,1 label)



      function sequenceData: Make the generated data as a sequence for each prediction



      function distinctData: Distinct the sequence data by training(80% of 300 at the first and remained following training), testing(20% of training data) and prediction (1 row of featuresXlabel)



      function minmaxData and minmaxUpdate: minmaxScaling of each data and storing the global maximum raw value



      function training, testing and prediction are not so complicate functions for you.



      from keras.models import Sequential, Model
      from keras.layers import TimeDistributed, Dense, Dropout, Activation, LSTM
      from keras.utils import np_utils
      from datetime import datetime
      import tensorflow as tf
      import pandas as pd
      import numpy as np
      import keras as ks
      import gc

      class synthNeuralNetwork(object):
      DEBUG = False
      BATCH_SIZE = 50
      SEQ = 60
      RNN_SIZE = SEQ
      CLASSES = 6
      DROPOUT = True
      DROP_RATE = 0.2
      LEARNING_RATE = 0.001
      HIDDEN_UNITS = [41,20,10]
      FROZEN = True
      DNN_LYR = len(HIDDEN_UNITS)
      TEST_PERC = 0.2
      N_PER_WK = 5
      N_PER_MON = 20
      N_PER_YR = 250
      TOTAL_EPOCHS = 1
      ITM = ''
      LIST1 =
      LIST2 =
      LAST_TR = 0
      LAST_TS = 0
      MMAX = {'F_MX':np.array(),
      'F_MN':np.array(),
      'L_MX':np.array(),
      'L_MN':np.array()}
      tf.set_random_seed(777)

      def __init__(self,
      BATCH_SIZE = 50, SEQ = 60, CLASSES = 6,
      LEARNING_RATE = 0.001, DEBUG = False):
      if DEBUG:
      self.DEBUG = True

      def GenerateData(self):
      x_raw = pd.DataFrame(np.random.rand(30000,41))
      y_raw = pd.DataFrame(np.random.rand(30000,))
      index1 = [x for x in range(500)]*60
      index2 = [x for x in range(60)]*500
      index1.sort()
      idx_tup = list(zip(index1,index2))
      x_raw.index = pd.MultiIndex.from_tuples(idx_tup, names=['idx1','idx2'])
      y_raw.index = pd.MultiIndex.from_tuples(idx_tup, names=['idx1','idx2'])
      idx1_list = x_raw.index.get_level_values('idx1').unique().tolist()
      idx2_list = x_raw.index.get_level_values('idx2').unique().tolist()
      self.LIST1 = idx1_list
      self.LIST2 = idx2_list
      self.LAST_TR = self.LIST1[0]
      self.LAST_TS = self.LIST1[0]
      x_dict = {}
      y_dict = {}

      for x in idx1_list:
      x_dict[x] = x_raw.loc[x_raw.index.get_level_values('idx2')==x]
      y_dict[x] = y_raw.loc[y_raw.index.get_level_values('idx2')==x]
      return x_dict, y_dict

      def sequenceData(self, x_dict, y_dict, predPoint=None, train=None):
      x =
      y =
      if train==True:
      startPoint = self.LAST_TR
      else:
      startPoint = self.LAST_TS
      if startPoint < x_dict.index.get_level_values('idx1').tolist()[self.SEQ]:
      startPoint = x_dict.index.get_level_values('idx1').tolist()[self.SEQ]
      for i in [z for z in self.LIST1 if (z<=predPoint)&(z>startPoint)]:
      x_tmp = np.array(x_dict.loc[x_dict.index.get_level_values('idx1')<=i].iloc[-self.SEQ:].dropna())
      y_tmp = np.array(y_dict.loc[y_dict.index.get_level_values('idx1')<=i].iloc[-1].dropna())
      if len(x_tmp)==self.SEQ:
      x.append(x_tmp)
      y.append(y_tmp)
      return x, y

      def distinctData(self, x, y, TEST_PERC = TEST_PERC, train=None):
      unavailable = 20
      x_av = x[:-unavailable]
      y_av = y[:-unavailable]
      TEST_LEN = int(np.shape(x_av)[0]*TEST_PERC)+1
      if train==True:
      f_train = np.array(x_av[:-TEST_LEN])
      l_train = np.array(y_av[:-TEST_LEN])
      f_test = np.array(x_av[-TEST_LEN:])
      l_test = np.array(y_av[-TEST_LEN:])
      self.LAST_TR = self.LIST1[self.LIST1.index(self.LAST_TR)+f_train.shape[0]]
      self.LAST_TS = self.LIST1[self.LIST1.index(self.LAST_TR)+f_test.shape[0]]
      else:
      f_train = np.array()
      l_train = np.array()
      f_test = np.array(x_av)
      l_test = np.array(y_av)
      self.LAST_TS = self.LIST1[self.LIST1.index(self.LAST_TS)+f_test.shape[0]]
      f_pred = np.array(x[-1])
      return f_train, l_train, f_test, l_test, f_pred

      def minmaxUpdate(self, f_arr, l_arr):
      f_mx = f_arr.max(axis=0).max(axis=0)
      f_mn = f_arr.min(axis=0).min(axis=0)
      l_mx = l_arr.max(axis=0)
      l_mn = l_arr.min(axis=0)
      if len(self.MMAX['F_MX']) != 0:
      f_mx = np.concatenate((f_mx,self.MMAX['F_MX'])).reshape((-1,np.shape(f_arr)[-1])).max(axis=0)
      f_mn = np.concatenate((f_mn,self.MMAX['F_MN'])).reshape((-1,np.shape(f_arr)[-1])).min(axis=0)
      l_mx = np.concatenate((l_mx,self.MMAX['L_MX'])).reshape((-1,np.shape(l_arr)[-1])).max(axis=0)
      l_mn = np.concatenate((l_mn,self.MMAX['L_MN'])).reshape((-1,np.shape(l_arr)[-1])).min(axis=0)
      self.MMAX['F_MX'] = f_mx
      self.MMAX['F_MN'] = f_mn
      self.MMAX['L_MX'] = l_mx
      self.MMAX['L_MN'] = l_mn

      def minmaxData(self, f_train, l_train, f_test, l_test, f_pred, TOTAL = True, train=None):
      def classvalues(label):
      shape = np.shape(label)
      labels =
      for x in np.squeeze(np.reshape(label,(1,-1))):
      t = 0
      for y in range(0,self.CLASSES):
      if y/self.CLASSES <= x:
      t = y
      else:
      break;
      labels.append(t)
      labels = np.reshape(labels,shape)
      return labels
      if train==True:
      self.minmaxUpdate(f_train, l_train)
      f_gp = self.MMAX['F_MX']-self.MMAX['F_MN']
      l_gp = self.MMAX['L_MX']-self.MMAX['L_MN']
      f_train = (f_train-self.MMAX['F_MN'])/f_gp
      l_train = (l_train-self.MMAX['L_MN'])/l_gp
      l_train = classvalues(l_train)
      else:
      self.minmaxUpdate(f_test, l_test)
      f_gp = self.MMAX['F_MX']-self.MMAX['F_MN']
      l_gp = self.MMAX['L_MX']-self.MMAX['L_MN']
      f_test = (f_test-self.MMAX['F_MN'])/f_gp
      l_test = (l_test-self.MMAX['L_MN'])/l_gp
      l_test = classvalues(l_test)
      f_pred = (f_pred-self.MMAX['F_MN'])/f_gp
      return f_train, l_train, f_test, l_test, f_pred

      def trainModel(self, f_train, l_train, TOTAL=True):
      len_feat = np.shape(f_train)[-1]
      total_epochs = self.TOTAL_EPOCHS
      drop_rate = self.DROP_RATE

      l_train = np_utils.to_categorical(l_train, num_classes=self.CLASSES)
      model = Sequential()

      if TOTAL:
      for i in range(len(self.HIDDEN_UNITS)):
      model.add(TimeDistributed(Dense(self.HIDDEN_UNITS[i],activation='relu',
      kernel_initializer='glorot_normal'),
      input_shape=(self.SEQ, len_feat)))
      if self.DROPOUT == True:
      model.add(Dropout(drop_rate))
      model.add(LSTM(self.CLASSES))
      model.add(Activation('softmax'))
      model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
      model.fit(f_train, l_train, batch_size=self.BATCH_SIZE, epochs=total_epochs, verbose=1)
      ks.models.save_model(model, './snn_model/snn_model_%s_%s.h5' %(self.ITM,self.NUM))
      ra = model.evaluate(f_train, l_train)[1]

      else:
      bs_model = ks.models.load_model('./snn_model/snn_model_%s_%s.h5' %(self.ITM,self.NUM))
      for i in range(len(self.HIDDEN_UNITS)*int(self.DROPOUT+1)):
      model.add(bs_model.layers[i])
      if self.FROZEN == True:
      for layer in model.layers:
      layer.trainable = False
      model_tr = model.output
      model_tr = LSTM(self.CLASSES)(model_tr)
      prediction = Activation('softmax')(model_tr)
      model_final = Model(input=model.input, output=prediction)
      model_final.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
      model_final.fit(f_train, l_train, batch_size=self.BATCH_SIZE,
      epochs=total_epochs, verbose=1)
      model_final.save('./snn_model/snn_model_%s_%s.h5' %(self.ITM,self.NUM))
      ra = model_final.evaluate(f_train, l_train)[1]
      print("Train Accuracy:", ra)
      return ra

      def testModel(self, f_test, l_test):
      l_test = np_utils.to_categorical(l_test, num_classes=self.CLASSES)
      model = ks.models.load_model('./snn_model/snn_model_%s_%s.h5' %(self.ITM,self.NUM))
      [co, ta] = model.evaluate(f_test,l_test)
      print("Test Accuracy:",ta)
      print("Test loss:",co)
      return ta, co

      def prediction(self, f_pred):
      model = ks.models.load_model('./snn_model/snn_model_%s_%s.h5' %(self.ITM,self.NUM))
      (a,b) = f_pred.shape
      prediction = model.predict([f_pred.reshape(-1,a,b)])
      arg = prediction.argmax(axis=-1)[0]
      pre = prediction[0]
      print("Prediction Class:",arg,'with percentage',pre[arg])
      return arg, pre

      if __name__ == '__main__':
      snn = synthNeuralNetwork()
      snn.NUM = '005'
      snn.LBL = 0
      snn.TOTAL_EPOCHS = 10
      snn.FROZEN = True
      sPoint = 200
      x_dict, y_dict = snn.GenerateData()
      predPointList = [z for z in snn.LIST1[::5] if z >= sPoint]
      for x in snn.LIST2:
      gc.collect()
      snn.ITM = x
      for y in predPointList:
      if (y == predPointList[0]):
      total = True
      else:
      total = False
      if (y%60 == 0)|(total==True):
      train = True
      else:
      train = False
      x_seq, y_seq = snn.sequenceData(x_dict[x], y_dict[x], predPoint=y, train=train)
      if len(x_seq) == 0:
      continue;
      f_train, l_train, f_test, l_test, f_pred = snn.distinctData(x_seq, y_seq, train=train)
      f_train, l_train, f_test, l_test, f_pred = snn.minmaxData(f_train, l_train, f_test, l_test, f_pred, train=train)
      if (train==True):
      tf.Session().run([tf.global_variables_initializer(),tf.local_variables_initializer()])
      ra = snn.trainModel(f_train, l_train, TOTAL=total)
      ta, co = snn.testModel(f_test, l_test)
      arg, pre = snn.prediction(f_pred)
      print('n',x,y,datetime.now().strftime('%Y-%m-%d %H:%M:%S'),'n')
      del(x_seq, y_seq, f_train, l_train, f_test, l_test)






      python tensorflow transfer-learning






      share|improve this question













      share|improve this question











      share|improve this question




      share|improve this question










      asked Nov 26 '18 at 9:31









      user10309582user10309582

      105




      105
























          0






          active

          oldest

          votes











          Your Answer






          StackExchange.ifUsing("editor", function () {
          StackExchange.using("externalEditor", function () {
          StackExchange.using("snippets", function () {
          StackExchange.snippets.init();
          });
          });
          }, "code-snippets");

          StackExchange.ready(function() {
          var channelOptions = {
          tags: "".split(" "),
          id: "1"
          };
          initTagRenderer("".split(" "), "".split(" "), channelOptions);

          StackExchange.using("externalEditor", function() {
          // Have to fire editor after snippets, if snippets enabled
          if (StackExchange.settings.snippets.snippetsEnabled) {
          StackExchange.using("snippets", function() {
          createEditor();
          });
          }
          else {
          createEditor();
          }
          });

          function createEditor() {
          StackExchange.prepareEditor({
          heartbeatType: 'answer',
          autoActivateHeartbeat: false,
          convertImagesToLinks: true,
          noModals: true,
          showLowRepImageUploadWarning: true,
          reputationToPostImages: 10,
          bindNavPrevention: true,
          postfix: "",
          imageUploader: {
          brandingHtml: "Powered by u003ca class="icon-imgur-white" href="https://imgur.com/"u003eu003c/au003e",
          contentPolicyHtml: "User contributions licensed under u003ca href="https://creativecommons.org/licenses/by-sa/3.0/"u003ecc by-sa 3.0 with attribution requiredu003c/au003e u003ca href="https://stackoverflow.com/legal/content-policy"u003e(content policy)u003c/au003e",
          allowUrls: true
          },
          onDemand: true,
          discardSelector: ".discard-answer"
          ,immediatelyShowMarkdownHelp:true
          });


          }
          });














          draft saved

          draft discarded


















          StackExchange.ready(
          function () {
          StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53478141%2ftransfer-learning-goes-slow-down%23new-answer', 'question_page');
          }
          );

          Post as a guest















          Required, but never shown

























          0






          active

          oldest

          votes








          0






          active

          oldest

          votes









          active

          oldest

          votes






          active

          oldest

          votes
















          draft saved

          draft discarded




















































          Thanks for contributing an answer to Stack Overflow!


          • Please be sure to answer the question. Provide details and share your research!

          But avoid



          • Asking for help, clarification, or responding to other answers.

          • Making statements based on opinion; back them up with references or personal experience.


          To learn more, see our tips on writing great answers.




          draft saved


          draft discarded














          StackExchange.ready(
          function () {
          StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53478141%2ftransfer-learning-goes-slow-down%23new-answer', 'question_page');
          }
          );

          Post as a guest















          Required, but never shown





















































          Required, but never shown














          Required, but never shown












          Required, but never shown







          Required, but never shown

































          Required, but never shown














          Required, but never shown












          Required, but never shown







          Required, but never shown







          Popular posts from this blog

          Costa Masnaga

          Fotorealismo

          Sidney Franklin