ValueError: Ошибка при проверке ввода: ожидалось, что gru_5_input будет иметь форму (None, None, 10), но получил массив с формой (1, 4, 1)

1

Я пытаюсь сделать ежечасные предсказания, используя повторяющуюся нейронную сеть, используя TensorFlow и Keras в Python. Я назначил свои входы нейронной сети (None, None, 5), показанные в моем Изображение 174551.

Однако, я получаю сообщение об ошибке "

ValueError: Error when checking input: expected gru_3_input to have shape (None, None, 10) but got array with shape (1, 4, 1) My MVCE code isː

%matplotlib inline
#!pip uninstall keras
#!pip install keras==2.1.2
import tensorflow as tf
import pandas as pd 
from pandas import DataFrame 
import math




#####Create the Recurrent Neural Network###


model = Sequential()


model.add(GRU(units=5, 
                return_sequences=True,
                input_shape=(None, num_x_signals)))

## This line is going to map the above 512 values to just 1 (num_y_signal)
model.add(Dense(num_y_signals, activation='sigmoid'))

if False:
    from tensorflow.python.keras.initializers import RandomUniform

    # Maybe use lower init-ranges.##### I may have to change these during debugging####
    init = RandomUniform(minval=-0.05, maxval=0.05)

    model.add(Dense(num_y_signals,
                    activation='linear',
                    kernel_initializer=init))

warmup_steps = 5

def loss_mse_warmup(y_true, y_pred):

    #
    # Ignore the "warmup" parts of the sequences
    # by taking slices of the tensors.
    y_true_slice = y_true[:, warmup_steps:, :]
    y_pred_slice = y_pred[:, warmup_steps:, :]

    # These sliced tensors both have this shape:
    # [batch_size, sequence_length - warmup_steps, num_y_signals]

    # Calculate the MSE loss for each value in these tensors.
    # This outputs a 3-rank tensor of the same shape.
    loss = tf.losses.mean_squared_error(labels=y_true_slice,
                                        predictions=y_pred_slice)



    loss_mean = tf.reduce_mean(loss)

    return loss_mean

optimizer = RMSprop(lr=1e-3) ### This is somthing related to debugging 



model.compile(loss=loss_mse_warmup, optimizer=optimizer)#### I may have to make the output a singnal rather than the whole data set 

print(model.summary())


model.fit_generator(generator=generator,
                    epochs=20,
                    steps_per_epoch=100,
                    validation_data=validation_data)

Я не уверен, почему это может быть, но я считаю, что это может что-то сделать с изменением моих данных по обучению и тестированию. ɪ также приложили мое полное сообщение Изображение 174551 к моему коду, чтобы сделать проблему воспроизводимой.

  • 2
    форма validation_data и одного пакета training_generator не совпадает.
  • 2
    форма ваших validation_data (1,4,1) что не приемлемо в качестве ввода.
Показать ещё 5 комментариев
Теги:
tensorflow
keras
neural-network

1 ответ

1
Лучший ответ

Я не уверен в правильности, но вот он:

%matplotlib inline
#!pip uninstall keras
#!pip install keras==2.1.2
import tensorflow as tf
import pandas as pd 
from pandas import DataFrame 
import math
import numpy 
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
import datetime
from keras.layers import Input, Dense, GRU, Embedding
from keras.optimizers import RMSprop
from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard, ReduceLROnPlateau


datetime = [datetime.datetime(2012, 1, 1, 1, 0, 0) + datetime.timedelta(hours=i) for i in range(10)]




X=np.array([2.25226244,1.44078451,0.99174488,0.71179491,0.92824542,1.67776948,2.96399534,5.06257161,7.06504245,7.77817664
               ,0.92824542,1.67776948,2.96399534,5.06257161,7.06504245,7.77817664])

y= np.array([0.02062136,0.00186715,0.01517354,0.0129046 ,0.02231125,0.01492537,0.09646542,0.28444476,0.46289928,0.77817664
                ,0.02231125,0.01492537,0.09646542,0.28444476,0.46289928,0.77817664])

X = X[1:11]
y= y[1:11]

df = pd.DataFrame({'date':datetime,'y':y,'X':X})



df['t']= [x for x in range(10)]
df['X-1'] = df['X'].shift(-1)


x_data = df['X-1'].fillna(0)

y_data = y 

num_data = len(x_data) 

    #### training and testing split####
train_split = 0.6
num_train = int(train_split*num_data)   
num_test = num_data-num_train## number of observations in test set


    #input train test 
x_train = x_data[0:num_train].reshape(-1, 1)
x_test = x_data[num_train:].reshape(-1, 1)
    #print (len(x_train) +len( x_test))

    #output train test 
y_train = y_data[0:num_train].reshape(-1, 1)
y_test = y_data[num_train:].reshape(-1, 1)
    #print (len(y_train) + len(y_test))


    ### number of input signals 
num_x_signals = x_data.shape[0]
   # print (num_x_signals)

    ## number of output signals##

num_y_signals = y_data.shape[0]
    #print (num_y_signals)

    ####data scalling'###

x_scaler = MinMaxScaler(feature_range=(0,1))
x_train_scaled = x_scaler.fit_transform(x_train)



x_test_scaled = MinMaxScaler(feature_range=(0,1)).fit_transform(x_test)

y_scaler = MinMaxScaler()
y_train_scaled = y_scaler.fit_transform(y_train)
y_test_scaled = MinMaxScaler(feature_range=(0,1)).fit_transform(y_test)



def batch_generator(batch_size, sequence_length):
    """
    Generator function for creating random batches of training-data.
    """

    # Infinite loop.  providing the neural network with random data from the 
    # datase for x and y 
    while True:
        # Allocate a new array for the batch of input-signals.
        x_shape = (batch_size, sequence_length, num_x_signals)
        x_batch = np.zeros(shape=x_shape, dtype=np.float16)

        # Allocate a new array for the batch of output-signals.
        y_shape = (batch_size, sequence_length, num_y_signals)
        y_batch = np.zeros(shape=y_shape, dtype=np.float16)

        # Fill the batch with random sequences of data.
        for i in range(batch_size):
            # Get a random start-index.
            # This points somewhere into the training-data.
            idx = np.random.randint(num_train - sequence_length)

            # Copy the sequences of data starting at this index.
            x_batch[i] = x_train_scaled[idx:idx+sequence_length]
            y_batch[i] = y_train_scaled[idx:idx+sequence_length]

        yield (x_batch, y_batch)


batch_size =20




sequence_length = 2 


generator = batch_generator(batch_size=batch_size,
                            sequence_length=sequence_length)

x_batch, y_batch = next(generator)




#########Validation Set Start########

def batch_generator(batch_size, sequence_length):
    """
    Generator function for creating random batches of training-data.
    """

    # Infinite loop.  providing the neural network with random data from the 
    # datase for x and y 
    while True:
        # Allocate a new array for the batch of input-signals.
        x_shape = (batch_size, sequence_length, num_x_signals)
        x_batch = np.zeros(shape=x_shape, dtype=np.float16)

        # Allocate a new array for the batch of output-signals.
        y_shape = (batch_size, sequence_length, num_y_signals)
        y_batch = np.zeros(shape=y_shape, dtype=np.float16)

        # Fill the batch with random sequences of data.
        for i in range(batch_size):
            # Get a random start-index.
            # This points somewhere into the training-data.
            idx = np.random.randint(num_train - sequence_length)

            # Copy the sequences of data starting at this index.
            x_batch[i] = x_test_scaled[idx:idx+sequence_length]
            y_batch[i] = y_test_scaled[idx:idx+sequence_length]

        yield (x_batch, y_batch)
validation_data= next(batch_generator(batch_size,sequence_length))

# validation_data = (np.expand_dims(x_test_scaled, axis=0),
#                     np.expand_dims(y_test_scaled, axis=0))

#Validation set end

#####Create the Recurrent Neural Network###


model = Sequential()


model.add(GRU(units=5, 
                return_sequences=True,
                input_shape=(None, num_x_signals)))

## This line is going to map the above 512 values to just 1 (num_y_signal)
model.add(Dense(num_y_signals, activation='sigmoid'))

if False:
    from tensorflow.python.keras.initializers import RandomUniform

    # Maybe use lower init-ranges.##### I may have to change these during debugging####
    init = RandomUniform(minval=-0.05, maxval=0.05)

    model.add(Dense(num_y_signals,
                    activation='linear',
                    kernel_initializer=init))

warmup_steps = 5

def loss_mse_warmup(y_true, y_pred):

    #
    # Ignore the "warmup" parts of the sequences
    # by taking slices of the tensors.
    y_true_slice = y_true[:, warmup_steps:, :]
    y_pred_slice = y_pred[:, warmup_steps:, :]

    # These sliced tensors both have this shape:
    # [batch_size, sequence_length - warmup_steps, num_y_signals]

    # Calculate the MSE loss for each value in these tensors.
    # This outputs a 3-rank tensor of the same shape.
    loss = tf.losses.mean_squared_error(labels=y_true_slice,
                                        predictions=y_pred_slice)



    loss_mean = tf.reduce_mean(loss)

    return loss_mean

optimizer = RMSprop(lr=1e-3) ### This is somthing related to debugging 



model.compile(loss=loss_mse_warmup, optimizer=optimizer)#### I may have to make the output a singnal rather than the whole data set 

print(model.summary())


model.fit_generator(generator=generator,
                    epochs=20,
                    steps_per_epoch=100,
                    validation_data=validation_data)

Я только изменил часть кода между validation set start validation set end и validation set end.

  • 0
    @ Khan11 странные тренировки и потери Вэл.
  • 0
    @ Кришна Большое спасибо! Это работает до сих пор! Просто из любопытства, так как я новичок в Python и машинном обучении. Я хотел спросить, что странного в потере обучения и проверки? Спасибо
Показать ещё 4 комментария

Ещё вопросы

Сообщество Overcoder
Наверх
Меню