how i use dynamic dimension(None dimension) of keras.Layer in for loop?

Issue

I want build one keras Layer as follows.
The input dimension is (None,16,3) and i want used it in "for loop". but When I used this layer in Sequential Model ,i get this error:

ValueError: Cannot convert a partially known TensorShape to a Tensor: (?, 16, 3)

can someone help me??

class WeightedLayer(Layer):
 def __init__(self, n_input, n_memb, **kwargs):
    super(WeightedLayer, self).__init__( **kwargs)
    self.n = n_input   # 16 features
    self.m = n_memb    # 3 
    self.batch_size = None
    
 def build(self, batch_input_shape):
    #self.batch_size = batch_input_shape[0]
    self.batch_size = tf.shape(batch_input_shape)[0]
    super(WeightedLayer, self).build(batch_input_shape)
    
 def call(self, input_):
    CP = []
    for batch in range(self.batch_size):
        xd_shape = [self.m]
        c_shape = [1]
        cp = input_[batch,0,:]
        for d in range(1,self.n):
            c_shape.insert(0,self.m)
            xd_shape.insert(0,1)
            xd = tf.reshape(input_[batch,d,:], (xd_shape))
            c = tf.reshape(cp,(c_shape))
            cp = tf.matmul(c , xd)

        flat_cp = tf.reshape(cp,(1, self.m**self.n))
        CP.append(flat_cp)

    return tf.reshape(tf.stack(CP), (self.batch_size, self.m**self.n))

 def compute_output_shape(self,batch_input_shape):
  return tf.TensorShape([self.batch_size, self.m** self.n])


Model=keras.models.Sequential()
Model.add(Input(shape=(16,3), name='inputLayer'))
Model.add(WeightedLayer(n_input=16,n_memb=3))
Model.compile(loss= 'mean_squared_error' , optimizer= 'adam')
Model.fit(X_train, y_train,
      epochs=200,
      batch_size=10,
      validation_data = (X_test, y_test))

Solution

Call would make a symbolic graph which will run later many times, I guess tensorflow doesn’t allow python list to appear in the graph.
Follow this doc

A common pattern is to accumulate intermediate values from a loop.
Normally, this is accomplished by appending to a Python list or adding
entries to a Python dictionary. However, as these are Python side
effects, they will not work as expected in a dynamically unrolled
loop. Use tf.TensorArray to accumulate results from a dynamically
unrolled loop.

below example would be helpful. Use tf.TensorArray

import tensorflow as tf
from tensorflow.keras.layers import Layer, Input
from tensorflow import keras
import numpy as np


class WeightedLayer(Layer):
    def __init__(self, n_input, n_memb, **kwargs):
        super(WeightedLayer, self).__init__(**kwargs)
        self.n = n_input  # 16 features
        self.m = n_memb  # 3

    def build(self, batch_input_shape):
        super(WeightedLayer, self).build(batch_input_shape)

    def call(self, input_):
        batch_size = tf.shape(input_)[0]
        ta = tf.TensorArray(tf.float32, size=0, dynamic_size=True)
        for i in tf.range(batch_size):
            ta = ta.write(i, tf.random.normal((1,))[0])
        return ta.stack()

    def compute_output_shape(self, batch_input_shape):
        return tf.TensorShape([self.batch_size, ])


X_train = np.random.uniform(0, 1, (200, 16, 3))
X_test = np.random.uniform(0, 1, (200, 16, 3))
y_train = np.random.uniform(0, 1, (200,))
y_test = np.random.uniform(0, 1, (200,))

Model = keras.models.Sequential()
Model.add(Input(shape=(16, 3), name='inputLayer'))
Model.add(WeightedLayer(n_input=16, n_memb=3))
Model.compile(loss='mean_squared_error', optimizer='adam')
Model.fit(X_train, y_train,
          epochs=20,
          batch_size=10,
          validation_data=(X_test, y_test))

Answered By – FancyXun

This Answer collected from stackoverflow, is licensed under cc by-sa 2.5 , cc by-sa 3.0 and cc by-sa 4.0

Leave a Reply

(*) Required, Your email will not be published