Phone

+919997782184

Email

support@roboticswithpython.com

Geeks of Coding

Join us on Telegram

Home Forums Cody Bank Rock and Mines using Tensorflow

Viewing 0 reply threads
  • Author
    Posts
    • #770
      Sahil KumarSahil Kumar
      Moderator

      To download code and datset

      https://github.com/ghostman-ai/Rock-and-Mines-using-Tensorflow.git

       

       

      #!/usr/bin/env python
      # coding: utf-8
      
      # In[3]:
      
      
      import pandas as pd
      import numpy as np
      import tensorflow as tf
      
      
      # In[4]:
      
      
      from sklearn.preprocessing import LabelEncoder
      
      
      # read and encode the dependent variable
      
      # In[5]:
      
      
      def read_data():
          ds = pd.read_csv(r"C:\Users\Lenovo\jupyter project\rock and mines\rock and mines.csv")
          ds.head(5)
          X = ds[ds.columns[0:60]].values
          y = ds[ds.columns[60]]
          encoder = LabelEncoder()
          encoder.fit(y)
          y = encoder.transform(y)
          Y = one_hot_encode(y)
          print(X.shape)
          return(X, Y)
      
      
      # Define the encoder function
      
      # In[6]:
      
      
      def one_hot_encode(labels):
          n_labels = len(labels)
          n_unique_labels = len(np.unique(labels))
          one_hot_encode = np.zeros((n_labels,n_unique_labels))
          one_hot_encode[np.arange(n_labels),labels] = 1
          return one_hot_encode
      
      
      # Read the encoded dataset and Shuffle the dataset to mix up the rows
      
      # In[7]:
      
      
      X, Y = read_data()
      from sklearn.utils import shuffle
      X, Y = shuffle(X, Y, random_state = 1)
      
      
      # In[8]:
      
      
      from sklearn.model_selection import train_test_split
      X_train, x_test, Y_train, y_test = train_test_split(X, Y, test_size=0.30, random_state=400)
      
      
      # In[9]:
      
      
      X_train
      
      
      # In[10]:
      
      
      Y_train
      
      
      # In[11]:
      
      
      x_test
      
      
      # In[12]:
      
      
      y_test
      
      
      # Define the parameters and variables to we=ork with tensor
      
      # In[13]:
      
      
      learning_rate = 0.3
      training_epochs = 1000
      cost_history = np.empty(shape=[1], dtype=float)
      n_dim = X.shape[1]
      print("n_dim", n_dim)
      n_class = 2
      model_path = (r"C:\Users\Lenovo\jupyter project\rock and mines")
      
      
      # Define the number of hidden layers and the number of neurons for each layer
      
      # In[14]:
      
      
      n_hidden_1 = 60
      n_hidden_2 = 60
      n_hidden_3 = 60
      n_hidden_4 = 60
      
      
      # In[19]:
      
      
      import tensorflow.compat.v1 as tf
      tf.disable_v2_behavior()
      x= tf.placeholder(tf.float32,[None, n_dim])
      y_ = tf.placeholder(tf.float32,[None, n_class])
      W = tf.Variable(tf.zeros([n_dim,n_class]))
      b = tf.Variable(tf.zeros([n_class]))
      
      
      # Define the model
      
      # In[20]:
      
      
      def multilayer_perceptron(x, weights, biases):
          # Hidden layer with RELU activations
          layer_1 = tf.add(tf.matmul(x, weights['W1']), biases['b1'])
          layer_1 = tf.nn.relu(layer_1)
          # Hidden layer with sigmoid activations
          layer_2 = tf.add(tf.matmul(layer_1, weights['W2']), biases['b2'])
          layer_2 = tf.nn.sigmoid(layer_2)
          # Hidden layer with sigmoid activations
          layer_3 = tf.add(tf.matmul(layer_2, weights['W3']), biases['b3'])
          layer_3 = tf.nn.sigmoid(layer_3)
          # Hidden layer with RELU activations
          layer_4 = tf.add(tf.matmul(layer_3, weights['W4']), biases['b4'])
          layer_4 = tf.nn.relu(layer_4)
          # Output layer with linear activations
          out_layer = tf.matmul(layer_4, weights['out']) + biases['out']
          return out_layer
      
      
      # define the weights and the biases for each layer
      
      # In[21]:
      
      
      weights = {
          'W1': tf.Variable(tf.truncated_normal([n_dim, n_hidden_1])),
          'W2': tf.Variable(tf.truncated_normal([n_hidden_1, n_hidden_2])),
          'W3': tf.Variable(tf.truncated_normal([n_hidden_2, n_hidden_3])),
          'W4': tf.Variable(tf.truncated_normal([n_hidden_3, n_hidden_4])),
          'out': tf.Variable(tf.truncated_normal([n_hidden_4, n_class]))
          }
      biases = {
          'b1': tf.Variable(tf.truncated_normal([n_hidden_1])),
          'b2': tf.Variable(tf.truncated_normal([n_hidden_2])),
          'b3': tf.Variable(tf.truncated_normal([n_hidden_3])),
          'b4': tf.Variable(tf.truncated_normal([n_hidden_4])),
          'out': tf.Variable(tf.truncated_normal([n_class]))}
      
      
      # Initialize all the variable
      
      # In[22]:
      
      
      init = tf.global_variables_initializer()
      saver = tf.train.Saver()
      
      
      # In[23]:
      
      
      y = multilayer_perceptron(x, weights, biases)
      
      
      # Define the cost function and Gradient descent optimizer
      
      # In[24]:
      
      
      cost_function = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=y, labels=y_))
      training_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost_function)
      
      sess = tf.Session()
      sess.run(init)
      
      
      # calculate the cost and accuracy for each epoch
      
      # In[35]:
      
      
      mse_history = []
      accuracy_history = []
      for epoch in range(training_epochs):
          sess.run(training_step, feed_dict={x:X_train, y_:Y_train})
          cost = sess.run(cost_function, feed_dict={x:X_train, y_:Y_train})
          cost_history = np.append(cost_history, cost)
          correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
          accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
          print("Accuracy: ", (sess.run(accuracy, feed_dict={x:x_test, y_:y_test})))
          pred_y = sess.run(y, feed_dict={x:x_test} )
          mse = tf.reduce_mean(tf.square(pred_y - y_test))
          mse_ = sess.run(mse)
          accuracy = (sess.run(accuracy, feed_dict={x:X_train, y_:Y_train}))
          accuracy_history.append(accuracy)
          print('epoch: ', epoch,' - ', 'cost: ', cost, " - MSE: ", mse_, "- Train Accuracy: ", accuracy)
      
      
      # In[36]:
      
      
      save_path = saver.save(sess, model_path)
      print("Model saved in file: %s", save_path)
      
      
      # plot mse and accuracy graph
      
      # In[37]:
      
      
      import matplotlib.pyplot as plt
      plt.plot(mse_history, "r")
      plt.show()
      plt.plot(accuracy_history, "r")
      plt.xlabel('Epoch')
      plt.ylabel('Accuracy')
      plt.show()
      
      
      # print the final accuracy
      
      # In[38]:
      
      
      correct_pred = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
      accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
      print("Test Accuracy: ", (sess.run(accuracy, feed_dict={x:x_test, y_:y_test} )))
      
      
      # print the final mean square error
      
      # In[39]:
      
      
      pred_y = sess.run(y, feed_dict={x:x_test})
      mse = tf.reduce_mean(tf.square(pred_y - y_test))
      print("MSE: %.4f" % sess.run(mse))
      
      
Viewing 0 reply threads
  • You must be logged in to reply to this topic.