keras报错 : ValueError: logits and labels must have the same shape
keras报错 :ValueError: logits and labels must have the same shape问题背景问题原因解决办法整体代码问题背景在一步一步搭建 文本情感分类器的过程时,将数据处理成向量的形式;之后送入到 MLP模型中进行拟合训练;发生错误:ValueError: logits and labels must have the same shape ((None
·
问题背景
在一步一步搭建 文本情感分类器的过程时,将数据处理成向量的形式;
之后送入到 MLP模型中进行拟合训练;
发生错误:
ValueError: logits and labels must have the same shape ((None, 1) vs (None, 2))
问题原因
原因:
- logits 和 labels 需要有相同的形状
也就是我们的标签和逻辑量不匹配;因此才会报错
解决办法
去掉:将数值标签转化为逻辑变量的操作。
## ytrain, ytest = to_categorical(ytrain), to_categorical(ytest)
把这行给隐去;
整体代码
import numpy as np
import string
import re
from os import listdir
from collections import Counter
from nltk.corpus import stopwords
from tensorflow.keras.preprocessing.text import Tokenizer
# load doc into memory
def load_doc(filename):
# open the file as read only
file = open(filename, 'r', encoding='utf-8')
# read all text
text = file.read()
# close the file
file.close()
return text
# turn a doc into clean tokens
def clean_doc(doc):
# split into tokens by white space
tokens = doc.split()
# convert to lower case
tokens = [w.lower() for w in tokens]
# prepare regex for char filtering
re_punc = re.compile('[%s]' % re.escape(string.punctuation))
# remove punctuation from each word
tokens = [re_punc.sub('', w) for w in tokens]
# remove remaining tokens that are not alphabetic
tokens = [word for word in tokens if word.isalpha()]
# filter out stop words
stop_words = set(stopwords.words('english'))
tokens = [w for w in tokens if not w in stop_words]
# filter out short tokens
tokens = [word for word in tokens if len(word) > 1]
return tokens
######### Reviews to Lines of Tokens ########################
# load doc, clean and return line of tokens
def doc_to_line(filename, vocab):
# load the doc
doc = load_doc(filename)
# clean doc
tokens = clean_doc(doc)
# filter by vocab
tokens = [w for w in tokens if w in vocab]
return ' '.join(tokens)
# load all docs in a directory
def process_docs(directory, vocab, is_train):
lines = list()
# walk through all files in the folder
for filename in listdir(directory):
# skip any reviews in the test set
if is_train and filename.startswith('cv9'):
continue
if not is_train and not filename.startswith('cv9'):
continue
# create the full path of the file to open
path = directory + '/' + filename
# load and clean the doc
line = doc_to_line(path, vocab)
# add to list
lines.append(line)
return lines
# load and clean a dataset
def load_clean_dataset(vocab, is_train):
# load documents
neg = process_docs('./dataset/review_polarity/txt_sentoken/neg', vocab, is_train)
pos = process_docs('./dataset/review_polarity/txt_sentoken/pos', vocab, is_train)
docs = neg + pos
# prepare labels
labels = [0 for _ in range(len(neg))] + [1 for _ in range(len(pos))]
return docs, np.array(labels)
# fit a tokenizer
def create_tokenizer(lines):
tokenizer = Tokenizer()
tokenizer.fit_on_texts(lines)
return tokenizer
# load the vocabulary
vocab_filename = 'vocab.txt'
vocab = load_doc(vocab_filename)
vocab = set(vocab.split())
# load all reviews
train_docs, ytrain = load_clean_dataset(vocab, True)
test_docs, ytest = load_clean_dataset(vocab, False)
# create the tokenizer
tokenizer = create_tokenizer(train_docs)
# encode data
Xtrain = tokenizer.texts_to_matrix(train_docs, mode='freq')
Xtest = tokenizer.texts_to_matrix(test_docs, mode='freq')
print(Xtrain.shape, Xtest.shape)
print(type(Xtrain), type(ytrain))
from tensorflow.keras.models import *
from tensorflow.keras.layers import *
from tensorflow.keras.utils import plot_model, to_categorical
# define the model
def define_model(n_words):
# define network
model = Sequential()
model.add(Dense(50, activation='relu', input_shape=(n_words,)))
model.add(Dense(1, activation='sigmoid'))
# compile network
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# summarize defined model
model.summary()
plot_model(model, to_file='model.png', show_shapes=True)
return model
# define the model
n_words = Xtest.shape[1]
model = define_model(n_words)
# fit network
model.fit(Xtrain, ytrain, epochs=10, verbose=2)
# evaluate
loss, acc = model.evaluate(Xtest, ytest, verbose=0)
print('Test Accuracy: %f' % (acc*100))
更多推荐
已为社区贡献7条内容
所有评论(0)