注册 登录
编程论坛 Python论坛

python中使用TensorFlow和Keras跑深度学习

孤独的豚鼠 发布于 2023-10-22 15:54, 302 次点击
在使用github上下载的代码时出现这样的问题:
ValueError: Tensor-typed variable initializers must either be wrapped in an init_scope or callable (e.g., `tf.Variable(lambda : tf.truncated_normal([10, 40]))`) when building functions. Please file a feature request if this restriction inconveniences you.

这个是用别人封装好的简单循环单元(Simple Rucurrent Unit,SRU)跑出来的代码,所以不太清楚哪里有问题
程序代码:

# 调用库
from __future__ import print_function
from keras.preprocessing import sequence
from keras.models import Model
from keras.layers import Dense, Embedding, Input
from keras.layers import LSTM
from keras.datasets import imdb
from sru import SRU

# 设置参数
max_features = 20000
maxlen = 80  # cut texts after this number of words (among top max_features most common words)
batch_size = 128
depth = 1  # 网络中间层的层数

# 加载数据集
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')

# 数据集切片(划分训练集和测试集)
print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)

# 构建模型
print('Build model...')
ip = Input(shape=(maxlen,))
embed = Embedding(max_features, 128)(ip)
prev_input = embed
hidden_states = []
if depth > 1:  # 中间层不止1层
    for i in range(depth - 1):
        h, h_final, c_final = SRU(128, dropout=0.0, recurrent_dropout=0.0,
                                  return_sequences=True, return_state=True,
                                  unroll=True)(prev_input)
        prev_input = h
        hidden_states.append(c_final)

output_layer = SRU(128, dropout=0.0, recurrent_dropout=0.0, unroll=True)(prev_input)
op = Dense(1, activation='sigmoid')(output_layer)
# model = Sequential()
#
model.add(SRU(128, dropout=0.0, recurrent_dropout=0.0, unroll=True))
model = Model(inputs=ip, outputs=op)
model.summary()

# 编译模型
#
try using different optimizers and different optimizer configs
(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

# 训练模型
print('Train...')
model.fit(x_train, y_train,
          batch_size=batch_size,
          epochs=100,
          validation_data=(x_test, y_test))
0 回复
1