Advertisement
MK7265

Untitled

Oct 2nd, 2023 (edited)
102
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
Python 8.30 KB | None | 0 0
  1. ------Practical 1----------
  2. #Breadth first search algo for romanian map problem
  3.  
  4. import queue as q
  5. from RMP import dict_gn
  6. def bfs(current_city, goal_city, explored_list, exploration_queue):
  7.     explored_list.append(current_city)
  8.     goal_reached = False
  9.     if current_city == goal_city:
  10.          return explored_list, True
  11.     for each_city in dict_gn[current_city].keys():
  12.         if each_city not in explored_list and each_city not in exploration_queue.queue:
  13.              exploration_queue.put(each_city)
  14.     try:
  15.          explored_list, goal_reached = bfs(exploration_queue.get(False), goal_city, explored_list, exploration_queue)
  16.     except q.Empty:
  17.         return explored_list, False
  18.     if goal_reached:
  19.         return explored_list, True
  20.     return explored_list, False
  21.        
  22.    
  23. def main():
  24.     start_city = 'Arad'
  25.     goal_city = 'Bucharest'
  26.     explored_list = []
  27.     exploration_queue = q.Queue()
  28.     exploration_queue.put(start_city)
  29.     goal_reached = False
  30.     explored_list, goal_reached = bfs(exploration_queue.get(False),goal_city, explored_list, exploration_queue)
  31.     if not goal_reached:
  32.          print('Could not find', goal_city)
  33.     print(explored_list)
  34.    
  35. main()            
  36.          
  37.  
  38. #Simple BFS hai padh lo agar pucha tho likh lena
  39.  
  40. graph = {
  41.   '5' : ['3','7'],
  42.   '3' : ['2', '4'],
  43.   '7' : ['8'],
  44.   '2' : [],
  45.   '4' : ['8'],
  46.   '8' : []
  47. }
  48.  
  49. visited = [] # List for visited nodes.
  50. queue = []     #Initialize a queue
  51.  
  52. def bfs(visited, graph, node): #function for BFS
  53.   visited.append(node)
  54.   queue.append(node)
  55.  
  56.  while queue:
  57.   m= queue.pop(0)
  58.   print(m,end="")
  59.   for neighbour in graph [m]:
  60.     if neighbour not in visited:
  61.       visited.append(neighbour)
  62.       queue.append(neighbour)
  63.  
  64. print(" Following is the Breadth First Search")
  65. bfs(visited, graph,'5')
  66.  
  67.  
  68.  
  69.  
  70. --------Practical 2---------
  71. #IDDFS (Iterative deep-depth first search) for romanian map
  72.  
  73. import queue as Q
  74. from RMP import dict_gn
  75.  
  76. start = 'Arad'
  77. goal = 'Bucharest'
  78. result = ' '
  79.  
  80. def DLS(city, visitedstack, startlimit, endlimit):
  81.     global result
  82.     found =0
  83.     result = result + city+'  '
  84.     visitedstack.append(city)
  85.     if city == goal:
  86.         return 1
  87.     if startlimit == endlimit:
  88.         return 0
  89.     for eachcity in dict_gn[city].keys():
  90.         if eachcity not in visitedstack:
  91.             found=DLS(eachcity, visitedstack, startlimit+1,endlimit)
  92.             if found:
  93.                 return found
  94.  
  95. def IDDFS(city, visitedstack, endlimit):
  96.     global result
  97.     for i in range(0, endlimit):
  98.         print("Searching at Limit : ",i)
  99.         found=DLS(city, visitedstack, 0, i)
  100.         if found:
  101.             print("City found")
  102.             break
  103.         else:
  104.             print("City not found")
  105.             print(result)
  106.             print("--------------")
  107.             result = ''
  108.             visitedstack= [ ]
  109.  
  110. visitedstack = [ ]
  111. IDDFS(start, visitedstack, 9)
  112. print("IDDFS Traversal from",start," to ", goal, " is: ")
  113. print(result)
  114.  
  115.  
  116. --------Practical 3-----------
  117. # A* Algorithm
  118.  
  119. import queue as Q
  120. from RMP import dict_gn
  121. from RMP import dict_hn
  122.  
  123. #f(n)=g(n) + h(n)
  124.  
  125. start='Arad'
  126. goal='Bucharest'
  127. result=''
  128.  
  129. def get_fn(citystr):
  130.     cities=citystr.split(" , ")
  131.     #01print("cities",cities)
  132.    
  133.     hn=gn=0
  134.     for ctr in range(0, len(cities)-1):
  135.         gn=gn+dict_gn[cities[ctr]][cities[ctr+1]]
  136.         #02print("gn",gn,"-----",ctr)
  137.  
  138.     hn=dict_hn[cities[len(cities)-1]]
  139.     #03print("hn",cities)
  140.     return(hn+gn)
  141.    
  142. def expand(cityq):
  143.     global result
  144.     tot, citystr, thiscity=cityq.get()
  145.     if thiscity==goal:
  146.         result=citystr+" : : "+str(tot)
  147.         return
  148.     for cty in dict_gn[thiscity]:
  149.         cityq.put((get_fn(citystr+" , "+cty), citystr+" , "+cty, cty))
  150.         #03print(cty)
  151.     expand(cityq)
  152.  
  153. def main():
  154.     cityq=Q.PriorityQueue()
  155.     thiscity=start
  156.     cityq.put((get_fn(start),start,thiscity))
  157.     expand(cityq)
  158.     print("The A* path with the total is: ")
  159.     print(result)
  160.  
  161. main()
  162.  
  163.  
  164.  
  165. --------Practical 4----------
  166. # Implement Machine Learning Algorithm using matplotlib, numpy, pandas
  167.  
  168. Simple hai khud se krlo group pe photo bhejta hu
  169.  
  170.  
  171.  
  172. --------Practical 5------------
  173. #Implement RBFS (recursive best first search) algorithm for romanian map
  174.  
  175. import queue as Q
  176.  
  177. from RMP import dict_gn
  178.  
  179. from RMP import dict_hn
  180.  
  181.  
  182. start = 'Arad'
  183. goal = 'Bucharest'
  184. result=''
  185.  
  186. def get_fn(citystr):
  187.     cities=citystr.split(',')
  188.     hn=gn=0
  189.     for ctr in range (0,len(cities)-1):
  190.         gn=gn+dict_gn[cities[ctr]][cities[ctr+1]]
  191.  
  192.     hn=dict_hn[cities[len(cities)-1]]
  193.     return (hn+gn)
  194.  
  195. def printout(cityq):
  196.     for i in range(0,cityq.qsize()):
  197.         print(cityq.queue[i])
  198.  
  199. def expand(cityq):
  200.     global result
  201.     tot,citystr,thiscity=cityq.get()
  202.     nexttot=999
  203.     if not cityq.empty():
  204.         nexttot,nextcitystr,nextthiscity=cityq.queue[0]
  205.     if thiscity==goal and tot<nexttot:
  206.         result=citystr+'::'+str(tot)
  207.         return
  208.     print("Expanded city---------------------",thiscity)
  209.     print("Second best f(n)------------------",nexttot)
  210.     tempq=Q.PriorityQueue()
  211.     for cty in dict_gn[thiscity]:
  212.         tempq.put((get_fn(citystr+','+cty),citystr+','+cty,cty))
  213.     for ctr in range(1,3):
  214.         ctrtot,ctrcitystr,ctrthiscity=tempq.get()
  215.         if ctrtot<nexttot:
  216.             cityq.put((ctrtot,ctrcitystr,ctrthiscity))
  217.         else:
  218.             cityq.put((ctrtot,citystr,thiscity))
  219.             break
  220.     printout(cityq)
  221.     expand(cityq)
  222.  
  223. def main ():
  224.     cityq=Q.PriorityQueue()
  225.     thiscity=start
  226.     cityq.put((999,"NA","NA"))
  227.     cityq.put((get_fn(start),start,thiscity))
  228.     expand(cityq)
  229.     print(result)
  230. main()
  231.        
  232.      
  233.  
  234.  
  235. --------Practical 6---------
  236. #Implement the decision tree learning algorithm
  237. import numpy as np
  238.  
  239. import pandas as pd
  240. import sklearn as sk
  241. from sklearn.metrics import confusion_matrix
  242. from sklearn.model_selection import train_test_split
  243. from sklearn.tree import DecisionTreeClassifier
  244. from sklearn.metrics import accuracy_score
  245. from sklearn.metrics import classification_report
  246.  
  247. #func importing dataset
  248. def importdata():
  249.       balance_data=pd.read_csv("balance-scale.data")
  250.  
  251.       #print the dataset shape
  252.       print("Dataset Length : ",len(balance_data))
  253.       print("============check1")
  254.       #printing the dataset observations
  255.       print("Dataset : ",balance_data.head())
  256.       print("============check2")
  257.       return balance_data
  258.  
  259. #func to split the dataset
  260. def splitdataset(balance_data):
  261.       #seperating the target variable
  262.       X=balance_data.values[:,1:5]
  263.       Y=balance_data.values[:,0]
  264.  
  265.       #splitting the dataset into train and test
  266.       X_train,X_test,y_train,y_test=train_test_split(X,Y,test_size=0.3,random_state=100)
  267.       return X,Y,X_train,X_test,y_train,y_test
  268.  
  269. #function to perform training with entropy
  270. def train_using_entropy(X_train,X_test,y_train,y_test):
  271.       #decision tree with entropy
  272.       clf_entropy=DecisionTreeClassifier(criterion="entropy",random_state=100,max_depth=3,min_samples_leaf=5)
  273.  
  274.       #performing training
  275.       clf_entropy.fit(X_train,y_train)
  276.       return clf_entropy
  277.  
  278. def prediction(X_test,clf_object):
  279.       y_pred=clf_object.predict(X_test)
  280.       print("Predicted Values : ")
  281.       print(y_pred)
  282.       return y_pred
  283.  
  284. def cal_accuracy(y_test,y_pred):
  285.       print("Accuracy : ",accuracy_score(y_test,y_pred)*100)
  286.  
  287. def main():
  288.       data=importdata()
  289.       X,Y,X_train,X_test,y_train,y_test=splitdataset(data)
  290.      
  291.       clf_entropy=train_using_entropy(X_train,X_test,y_train,y_test)
  292.  
  293.       print("Results using entropy : ")
  294.       y_pred_entropy=prediction(X_test,clf_entropy)
  295.       cal_accuracy(y_test,y_pred_entropy)
  296.  
  297. main()
  298.  
  299.  
  300. --------Practical 7----------
  301. # Implement Adaboost ( Adaptive Boosting )
  302.  
  303. import pandas
  304. from sklearn import model_selection
  305. from sklearn.ensemble import AdaBoostClassifier
  306. url="https://raw.githubusercontent.com/jbrownlee/Datasets/master/pima-indians-diabetes.data.csv"
  307. names=['preg','plas','pres','skin','test','mass','pedi','age','class']
  308. dataframe = pandas.read_csv(url,names=names)
  309. array = dataframe.values
  310. x=array[:0,:8]
  311. y=array[:,8]
  312. seed = 100
  313. num_trees  = 30
  314. model=AdaBoostClassifier(n_estimators=num_trees,random_statement)
  315. results=model_selection.cross_val_score(model,x,y)
  316. print(results.mean())
  317.  
  318.  
  319.  
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement