1 year ago
#289981
crazycoders
Why am getting precision , recall as zero in ANFIS model using tensorflow in python
i have build ANFIS model with tensorflow for classification problem. For every epoch i am getting precision and recall as zero. I am using guassian membership function but when i print sigma it is giving 0.Used below code for training
## settings
n = X_train.shape[1] # no of input features
m = 2*n # number of fuzzy rules
learning_rate = 0.01
epochs = 1000
################################ train
X_train_t = tf.placeholder(tf.float32, shape=[None, n]) # Train input
y_train_t = tf.placeholder(tf.float32, shape=None) # Train output
mu = tf.get_variable(name="mu", shape=[m * n], initializer=tf.random_normal_initializer(0, 1)) # mean of Gaussian MFS
sigma = tf.get_variable(name="sigma", shape = [m * n], initializer=tf.random_normal_initializer(0, 1)) # std_dev of Gaussian MFS
w = tf.get_variable(name="w", shape= [1, m], initializer=tf.random_normal_initializer(0, 1))
rula = tf.reduce_prod(tf.reshape(tf.exp( -0.5* ((tf.tile(X_train_t, (1, m))- mu)**2) / (sigma**2)),
(-1, m, n)), axis=2) #activations
Y_train_t = tf.reduce_sum(rula*w,axis=1) / tf.clip_by_value(tf.reduce_sum(rula,axis=1), 1e-8, 1e8)
#loss = tf.losses.log_loss(y_train, Y_train) # loss function
loss = tf.losses.sigmoid_cross_entropy(y_train_t, Y_train_t) # loss function
#loss = tf.sqrt(tf.losses.mean_squared_error(y_train, Y_train))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss) # optimizer
################################ test
X_test_t = tf.placeholder(tf.float32, shape=[None, n]) # Test input
y_test_t = tf.placeholder(tf.float32, shape=None) # Train output
rula_test = tf.reduce_prod(tf.reshape(tf.exp( -0.5* ((tf.tile(X_test_t, (1, m))- mu)**2) / (sigma**2)),
(-1, m, n)), axis=2) # rule activation
Y_test_t = tf.reduce_sum(rula_test*w,axis=1) / tf.clip_by_value(tf.reduce_sum(rula_test,axis=1), 1e-8, 1e8)
loss_test = tf.losses.sigmoid_cross_entropy(y_test_t, Y_test_t) # loss function
################################ start session
x_axis = []
tr_loss, te_loss = [],[]
tr_prec, te_prec = [], []
tr_rec, te_rec = [], []
init=tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for e in range(epochs):
Y_train, loss_tr, _ = sess.run([Y_train_t, loss, optimizer], feed_dict={X_train_t: X_train, y_train_t: y_train})
Y_test, loss_te = sess.run([Y_test_t, loss_test], feed_dict={X_test_t: X_test, y_test_t: y_test})
if (e+1) % 10 == 0:
x_axis.append(e+1)
tr_loss.append(loss_tr)
te_loss.append(loss_te)
Y_train = np.where(Y_train > 0, 1, 0)
Y_test = np.where(Y_test > 0, 1, 0)
prec_tr = precision_score(y_train,Y_train)
prec_te = precision_score(y_test,Y_test)
rec_tr = recall_score(y_train,Y_train)
rec_te = recall_score(y_test,Y_test)
tr_prec.append(prec_tr)
te_prec.append(prec_te)
tr_rec.append(rec_tr)
te_rec.append(rec_te)
code is referenced from https://github.com/subhalingamd/ANFIS-diabetes-prediction/blob/main/main.py
I am new to this algorithm.Please, help me where am gone wrong.
python
tensorflow
deep-learning
neural-network
fuzzy
0 Answers
Your Answer