Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

ValueError: Passed in object <KerasVariable shape=(1, 64), dtype=float64, path=dense_15/kernel> of type 'Variable', not tf.Tensor or tf.Variable or ExtensionType. #19874

Open
hgaur0007 opened this issue Jun 18, 2024 · 0 comments
Assignees
Labels

Comments

@hgaur0007
Copy link

hgaur0007 commented Jun 18, 2024

Hello Everyone,

Could you pleas help me out with the error display above while running the following code:
My Keras version is '3.3.3.dev2024060803'

import tensorflow as tf
import numpy as np
import time
from tensorflow.keras.layers import Dense
import tensorflow_probability as tfp

tf.random.set_seed(42)

class model (tf.keras.Model):
def init(self, layers, train_op, num_epoch, print_epoch):
super(model, self).init()
self.model_layers = layers
self.train_op = train_op
self.num_epoch = num_epoch
self.print_epoch = print_epoch
self.adam_loss_hist = []

def call(self, r):
    return self.r_val(r)

# Running the model    
def r_val(self, r):
   
    r = 2.0*(r - self.bounds["lb"])/(self.bounds["ub"] - self.bounds["lb"]) - 1.0
    for l in self.model_layers:
        r = l(r)
    return r 

def Stress(self, r):
    stress= 448.4*tf.exp(0.1231*(self.r_val(r))) - 0.409*tf.exp(-6.933*(self.r_val(r)))

    return stress
                           
    
# Return the first derivative
def Strain(self, r):
    strain = 0.009312499803*self.r_val(r) +0.009312500112
    return strain
    
                           
def Strain_Energy(self, r, N, a):
    def f(r):                               
        Energy = 0.03*0.015*1*(self.Stress(self.r_val(r)))*(self.Strain(self.r_val(r)))
        return Energy
    value = tf.constant(0, dtype=tf.float64)
    value2 = tf.constant(0, dtype=tf.float64)
    for N in range (1, N+1):            
        value += f(a+((N-(1/2))* ((r-a)/N))) 
    value2= ((r-a)/N)*value
    return value2


def RHS(self, r):
    Ext_En = ((3*0.03*0.015)/(4*184128.9683))*(self.Stress(self.r_val(r)))**2
    return Ext_En

    
#Custom loss function
def get_loss(self, r):
    LHS_Val = self.Strain_Energy(r, 2000, -1)
    RHS_Val = self.RHS(r)
    int_loss = tf.reduce_mean(tf.math.square(LHS_Val - RHS_Val))
    return int_loss
  
# get gradients
def get_grad(self, r):
    with tf.GradientTape() as tape:
        tape.watch(self.trainable_variables)
        L = self.get_loss(r)
    g = tape.gradient(L, self.trainable_variables)
    return L, g
  
# perform gradient descent
def network_learn(self, r):
    self.bounds = {"lb" : tf.math.reduce_min(r),
                   "ub" : tf.math.reduce_max(r)}
    for i in range(self.num_epoch):
        L, g = self.get_grad(r)
        self.train_op.apply_gradients(zip(g, self.trainable_variables))
        self.adam_loss_hist.append(L)
        if i%self.print_epoch==0:
            print("Epoch {} loss: {}".format(i, L))

rmin = -1
rmax = -0.7
numPts = 25
data_type = "float64"
a = tf.constant(-1, dtype=tf.float64)
rint = np.linspace(rmin, rmax, numPts).astype(data_type)
rint = np.array(rint)[np.newaxis].T

#define the model
tf.keras.backend.set_floatx(data_type)
l1 = tf.keras.layers.Dense(25, "tanh")
l2 = tf.keras.layers.Dense(25, "tanh")
l3 = tf.keras.layers.Dense(1, None)
train_op = tf.keras.optimizers.Adam()
num_epoch = 200
print_epoch = 10
pred_model = model([l1, l2, l3], train_op, num_epoch, print_epoch)

#convert the training data to tensors
rint_tf = tf.convert_to_tensor(rint)

#training
print("Training (ADAM)...")
t0 = time.time()
pred_model.network_learn(rint_tf)
t1 = time.time()
print("Time taken (ADAM)", t1-t0, "seconds")

Following Error Appears:

ValueError Traceback (most recent call last)
Cell In[46], line 4
2 print("Training (ADAM)...")
3 t0 = time.time()
----> 4 pred_model.network_learn(rint_tf)
5 t1 = time.time()
6 print("Time taken (ADAM)", t1-t0, "seconds")

Cell In[42], line 83, in model.network_learn(self, r)
80 self.bounds = {"lb" : tf.math.reduce_min(r),
81 "ub" : tf.math.reduce_max(r)}
82 for i in range(self.num_epoch):
---> 83 L, g = self.get_grad(r)
84 self.train_op.apply_gradients(zip(g, self.trainable_variables))
85 self.adam_loss_hist.append(L)

Cell In[42], line 73, in model.get_grad(self, r)
71 def get_grad(self, r):
72 with tf.GradientTape() as tape:
---> 73 tape.watch(self.trainable_variables)
74 L = self.get_loss(r)
75 g = tape.gradient(L, self.trainable_variables)

File ~\AppData\Roaming\Python\Python311\site-packages\tensorflow\python\eager\backprop.py:873, in GradientTape.watch(self, tensor)
864 def watch(self, tensor):
865 """Ensures that tensor is being traced by this tape.
866
867 Args:
(...)
871 ValueError: if it encounters something that is not a tensor.
872 """
--> 873 for t in _extract_tensors_and_variables(tensor):
874 if not backprop_util.IsTrainable(t):
875 logging.log_first_n(
876 logging.WARN, "The dtype of the watched tensor must be "
877 "floating (e.g. tf.float32), got %r", 5, t.dtype)

File ~\AppData\Roaming\Python\Python311\site-packages\tensorflow\python\eager\backprop.py:700, in _extract_tensors_and_variables(tensor)
698 yield from _extract_tensors_and_variables(components)
699 else:
--> 700 raise ValueError(f"Passed in object {obj} of type {type(obj).name!r}"
701 f", not tf.Tensor or tf.Variable or ExtensionType.")

ValueError: Passed in object <KerasVariable shape=(1, 64), dtype=float64, path=dense_15/kernel> of type 'Variable', not tf.Tensor or tf.Variable or ExtensionType.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
Projects
None yet
Development

No branches or pull requests

2 participants