2017-09-04 1 views
0

ロング短期記憶アルゴリズムを使って時系列プロジェクトを進めています。予測データを視覚化する必要があります

他の列をフィーチャーとして使用してout_meteo.csvの最後の列を予測する必要があります。アルゴリズムの終わりに、私は私のデータの正しい値をプロットできません:それは私にはMinMaxScalerおよびinverse_transform属性に関連すると思う非現実的で小さな値を与えます。ここ

ので、私は(私のCSV列の全て)の機能のすべてを読み取るために、それを変更した

import numpy 
import matplotlib.pyplot as plt 
from pandas import read_csv 
import math 
from keras.models import Sequential 
from keras.layers import Dense 
from keras.layers import LSTM 
from sklearn.preprocessing import MinMaxScaler 
from sklearn.metrics import mean_squared_error 

# convert an array of values into a dataset matrix 
def create_dataset(dataset, look_back=1): 
    dataX, dataY = [], [] 
    for i in range(len(dataset)-look_back-1): 
     a = dataset[i:(i+look_back), :] 
     dataX.append(a) 
     dataY.append(dataset[i + look_back, 0]) 
    return numpy.array(dataX), numpy.array(dataY) 

# fix random seed for reproducibility 
numpy.random.seed(7) 

# load the dataset 
dataframe = read_csv('out_meteo.csv', usecols=[5], engine='python', header=0) 
dataset = dataframe.values 
dataset = dataset.astype('float32') 

# normalize the dataset 
scaler = MinMaxScaler(feature_range=(0, 1)) 
dataset = scaler.fit_transform(dataset) 

# split into train and test sets 
train_size = int(len(dataset) * 0.7) 
test_size = len(dataset) - train_size 
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:] 

# reshape into X=t and Y=t+1 
look_back = 3 
trainX, trainY = create_dataset(train, look_back) 
testX, testY = create_dataset(test, look_back) 

# reshape input to be [samples, time steps, features] 
trainX = numpy.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1])) 
testX = numpy.reshape(testX, (testX.shape[0], 1, testX.shape[1])) 

# create and fit the LSTM network 
model = Sequential() 
model.add(LSTM(4, input_shape=(1, look_back))) 
model.add(Dense(1)) 
model.compile(loss='mean_squared_error', optimizer='adam') 
model.fit(trainX, trainY, epochs=15, batch_size=15, verbose=2) 

# make predictions 
trainPredict = model.predict(trainX) 
testPredict = model.predict(testX) 

# invert predictions 
trainPredict = scaler.inverse_transform(trainPredict) 
trainY = scaler.inverse_transform([trainY]) 
testPredict = scaler.inverse_transform(testPredict) 
testY = scaler.inverse_transform([testY]) 

# calculate root mean squared error 
trainScore = math.sqrt (mean_squared_error(trainY[0], trainPredict[:,:])) 
print('Train Score: %.2f RMSE' % (trainScore)) 
testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,:])) 
print('Test Score: %.2f RMSE' % (testScore)) 

# shift train predictions for plotting 
trainPredictPlot = numpy.empty_like(dataset) 
trainPredictPlot[:, :] = numpy.nan 
trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict 

# shift test predictions for plotting 
testPredictPlot = numpy.empty_like(dataset) 
testPredictPlot[:, :] = numpy.nan 
testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict 
# plot baseline and predictions 
plt.plot(scaler.inverse_transform(dataset)) 
plt.plot(trainPredictPlot) 
plt.plot(testPredictPlot) 
plt.show() 

(それが正常に動作します)私は他の機能を使用せずに、私の変数を予測するために使用されるPythonコードでありますここでは、私に間違ったプロットを与える最後のものです

import numpy 
import matplotlib.pyplot as plt 
from pandas import read_csv 
import math 
from keras.models import Sequential 
from keras.layers import Dense 
from keras.layers import LSTM 
from sklearn.preprocessing import MinMaxScaler 
from sklearn.metrics import mean_squared_error 

# convert an array of values into a dataset matrix 
def create_dataset(dataset, look_back=1): 
    dataX, dataY = [], [] 
    for i in range(len(dataset)-look_back-1): 
     a = dataset[i:(i+look_back), :] 
     dataX.append(a) 
     dataY.append(dataset[i + look_back, 0]) 
    return numpy.array(dataX), numpy.array(dataY) 

# fix random seed for reproducibility 
numpy.random.seed(7) 

# load the dataset 
dataframe = read_csv('out_meteo.csv', usecols=[5], engine='python', header=0) 
dataset = dataframe.values 
dataset = dataset.astype('float32') 

# normalize the dataset 
scaler = MinMaxScaler(feature_range=(0, 1)) 
dataset = scaler.fit_transform(dataset) 

# split into train and test sets 
train_size = int(len(dataset) * 0.7) 
test_size = len(dataset) - train_size 
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:] 

# reshape into X=t and Y=t+1 
look_back = 3 
trainX, trainY = create_dataset(train, look_back) 
testX, testY = create_dataset(test, look_back) 

# reshape input to be [samples, time steps, features] 
trainX = numpy.reshape(trainX, (trainX.shape[0], 1, trainX.shape[1])) 
testX = numpy.reshape(testX, (testX.shape[0], 1, testX.shape[1])) 

# create and fit the LSTM network 
model = Sequential() 
model.add(LSTM(4, input_shape=(1, look_back))) 
model.add(Dense(1)) 
model.compile(loss='mean_squared_error', optimizer='adam') 
model.fit(trainX, trainY, epochs=15, batch_size=15, verbose=2) 

# make predictions 
trainPredict = model.predict(trainX) 
testPredict = model.predict(testX) 

# invert predictions 
trainPredict = scaler.inverse_transform(trainPredict) 
trainY = scaler.inverse_transform([trainY]) 
testPredict = scaler.inverse_transform(testPredict) 
testY = scaler.inverse_transform([testY]) 

# calculate root mean squared error 
trainScore = math.sqrt (mean_squared_error(trainY[0], trainPredict[:,:])) 
print('Train Score: %.2f RMSE' % (trainScore)) 
testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,:])) 
print('Test Score: %.2f RMSE' % (testScore)) 

# shift train predictions for plotting 
trainPredictPlot = numpy.empty_like(dataset) 
trainPredictPlot[:, :] = numpy.nan 
trainPredictPlot[look_back:len(trainPredict)+look_back, :] = trainPredict 

# shift test predictions for plotting 
testPredictPlot = numpy.empty_like(dataset) 
testPredictPlot[:, :] = numpy.nan 
testPredictPlot[len(trainPredict)+(look_back*2)+1:len(dataset)-1, :] = testPredict 
# plot baseline and predictions 
plt.plot(scaler.inverse_transform(dataset)) 
plt.plot(trainPredictPlot) 
plt.plot(testPredictPlot) 
plt.show() 

答えて

0

回帰ANNのY値をスケールする必要はありません。ルックバック機能の後でスケールを呼び出してください。

look_back = 3 
trainX, trainY = create_dataset(train, look_back) 
testX, testY = create_dataset(test, look_back) 

scaler = MinMaxScaler(feature_range=(0, 1)) 
trainX = scaler.fit_transform(trainX) 
testX = scaler.transform(testX) 

すると予測

+0

対実際のプロット私は私の問題を編集した、ご回答ありがとうございました。いくつかの機能を使って作業し、最終結果をプロットすることに関連しています。 –

+0

非常に混乱します。あなたは機能をプロットしようとしているのですか、単に 'testY'と' testPredict'ですか?私はネットワークの直前でデータをスケールアップし、トレーニング後にスケールを解除する – DJK

関連する問題