Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- ------Practical 1----------
- #Breadth first search algo for romanian map problem
- import queue as q
- from RMP import dict_gn
- def bfs(current_city, goal_city, explored_list, exploration_queue):
- explored_list.append(current_city)
- goal_reached = False
- if current_city == goal_city:
- return explored_list, True
- for each_city in dict_gn[current_city].keys():
- if each_city not in explored_list and each_city not in exploration_queue.queue:
- exploration_queue.put(each_city)
- try:
- explored_list, goal_reached = bfs(exploration_queue.get(False), goal_city, explored_list, exploration_queue)
- except q.Empty:
- return explored_list, False
- if goal_reached:
- return explored_list, True
- return explored_list, False
- def main():
- start_city = 'Arad'
- goal_city = 'Bucharest'
- explored_list = []
- exploration_queue = q.Queue()
- exploration_queue.put(start_city)
- goal_reached = False
- explored_list, goal_reached = bfs(exploration_queue.get(False),goal_city, explored_list, exploration_queue)
- if not goal_reached:
- print('Could not find', goal_city)
- print(explored_list)
- main()
- #Simple BFS hai padh lo agar pucha tho likh lena
- graph = {
- '5' : ['3','7'],
- '3' : ['2', '4'],
- '7' : ['8'],
- '2' : [],
- '4' : ['8'],
- '8' : []
- }
- visited = [] # List for visited nodes.
- queue = [] #Initialize a queue
- def bfs(visited, graph, node): #function for BFS
- visited.append(node)
- queue.append(node)
- while queue:
- m= queue.pop(0)
- print(m,end="")
- for neighbour in graph [m]:
- if neighbour not in visited:
- visited.append(neighbour)
- queue.append(neighbour)
- print(" Following is the Breadth First Search")
- bfs(visited, graph,'5')
- --------Practical 2---------
- #IDDFS (Iterative deep-depth first search) for romanian map
- import queue as Q
- from RMP import dict_gn
- start = 'Arad'
- goal = 'Bucharest'
- result = ' '
- def DLS(city, visitedstack, startlimit, endlimit):
- global result
- found =0
- result = result + city+' '
- visitedstack.append(city)
- if city == goal:
- return 1
- if startlimit == endlimit:
- return 0
- for eachcity in dict_gn[city].keys():
- if eachcity not in visitedstack:
- found=DLS(eachcity, visitedstack, startlimit+1,endlimit)
- if found:
- return found
- def IDDFS(city, visitedstack, endlimit):
- global result
- for i in range(0, endlimit):
- print("Searching at Limit : ",i)
- found=DLS(city, visitedstack, 0, i)
- if found:
- print("City found")
- break
- else:
- print("City not found")
- print(result)
- print("--------------")
- result = ''
- visitedstack= [ ]
- visitedstack = [ ]
- IDDFS(start, visitedstack, 9)
- print("IDDFS Traversal from",start," to ", goal, " is: ")
- print(result)
- --------Practical 3-----------
- # A* Algorithm
- import queue as Q
- from RMP import dict_gn
- from RMP import dict_hn
- #f(n)=g(n) + h(n)
- start='Arad'
- goal='Bucharest'
- result=''
- def get_fn(citystr):
- cities=citystr.split(" , ")
- #01print("cities",cities)
- hn=gn=0
- for ctr in range(0, len(cities)-1):
- gn=gn+dict_gn[cities[ctr]][cities[ctr+1]]
- #02print("gn",gn,"-----",ctr)
- hn=dict_hn[cities[len(cities)-1]]
- #03print("hn",cities)
- return(hn+gn)
- def expand(cityq):
- global result
- tot, citystr, thiscity=cityq.get()
- if thiscity==goal:
- result=citystr+" : : "+str(tot)
- return
- for cty in dict_gn[thiscity]:
- cityq.put((get_fn(citystr+" , "+cty), citystr+" , "+cty, cty))
- #03print(cty)
- expand(cityq)
- def main():
- cityq=Q.PriorityQueue()
- thiscity=start
- cityq.put((get_fn(start),start,thiscity))
- expand(cityq)
- print("The A* path with the total is: ")
- print(result)
- main()
- --------Practical 4----------
- # Implement Machine Learning Algorithm using matplotlib, numpy, pandas
- Simple hai khud se krlo group pe photo bhejta hu
- --------Practical 5------------
- #Implement RBFS (recursive best first search) algorithm for romanian map
- import queue as Q
- from RMP import dict_gn
- from RMP import dict_hn
- start = 'Arad'
- goal = 'Bucharest'
- result=''
- def get_fn(citystr):
- cities=citystr.split(',')
- hn=gn=0
- for ctr in range (0,len(cities)-1):
- gn=gn+dict_gn[cities[ctr]][cities[ctr+1]]
- hn=dict_hn[cities[len(cities)-1]]
- return (hn+gn)
- def printout(cityq):
- for i in range(0,cityq.qsize()):
- print(cityq.queue[i])
- def expand(cityq):
- global result
- tot,citystr,thiscity=cityq.get()
- nexttot=999
- if not cityq.empty():
- nexttot,nextcitystr,nextthiscity=cityq.queue[0]
- if thiscity==goal and tot<nexttot:
- result=citystr+'::'+str(tot)
- return
- print("Expanded city---------------------",thiscity)
- print("Second best f(n)------------------",nexttot)
- tempq=Q.PriorityQueue()
- for cty in dict_gn[thiscity]:
- tempq.put((get_fn(citystr+','+cty),citystr+','+cty,cty))
- for ctr in range(1,3):
- ctrtot,ctrcitystr,ctrthiscity=tempq.get()
- if ctrtot<nexttot:
- cityq.put((ctrtot,ctrcitystr,ctrthiscity))
- else:
- cityq.put((ctrtot,citystr,thiscity))
- break
- printout(cityq)
- expand(cityq)
- def main ():
- cityq=Q.PriorityQueue()
- thiscity=start
- cityq.put((999,"NA","NA"))
- cityq.put((get_fn(start),start,thiscity))
- expand(cityq)
- print(result)
- main()
- --------Practical 6---------
- #Implement the decision tree learning algorithm
- import numpy as np
- import pandas as pd
- import sklearn as sk
- from sklearn.metrics import confusion_matrix
- from sklearn.model_selection import train_test_split
- from sklearn.tree import DecisionTreeClassifier
- from sklearn.metrics import accuracy_score
- from sklearn.metrics import classification_report
- #func importing dataset
- def importdata():
- balance_data=pd.read_csv("balance-scale.data")
- #print the dataset shape
- print("Dataset Length : ",len(balance_data))
- print("============check1")
- #printing the dataset observations
- print("Dataset : ",balance_data.head())
- print("============check2")
- return balance_data
- #func to split the dataset
- def splitdataset(balance_data):
- #seperating the target variable
- X=balance_data.values[:,1:5]
- Y=balance_data.values[:,0]
- #splitting the dataset into train and test
- X_train,X_test,y_train,y_test=train_test_split(X,Y,test_size=0.3,random_state=100)
- return X,Y,X_train,X_test,y_train,y_test
- #function to perform training with entropy
- def train_using_entropy(X_train,X_test,y_train,y_test):
- #decision tree with entropy
- clf_entropy=DecisionTreeClassifier(criterion="entropy",random_state=100,max_depth=3,min_samples_leaf=5)
- #performing training
- clf_entropy.fit(X_train,y_train)
- return clf_entropy
- def prediction(X_test,clf_object):
- y_pred=clf_object.predict(X_test)
- print("Predicted Values : ")
- print(y_pred)
- return y_pred
- def cal_accuracy(y_test,y_pred):
- print("Accuracy : ",accuracy_score(y_test,y_pred)*100)
- def main():
- data=importdata()
- X,Y,X_train,X_test,y_train,y_test=splitdataset(data)
- clf_entropy=train_using_entropy(X_train,X_test,y_train,y_test)
- print("Results using entropy : ")
- y_pred_entropy=prediction(X_test,clf_entropy)
- cal_accuracy(y_test,y_pred_entropy)
- main()
- --------Practical 7----------
- # Implement Adaboost ( Adaptive Boosting )
- import pandas
- from sklearn import model_selection
- from sklearn.ensemble import AdaBoostClassifier
- url="https://raw.githubusercontent.com/jbrownlee/Datasets/master/pima-indians-diabetes.data.csv"
- names=['preg','plas','pres','skin','test','mass','pedi','age','class']
- dataframe = pandas.read_csv(url,names=names)
- array = dataframe.values
- x=array[:0,:8]
- y=array[:,8]
- seed = 100
- num_trees = 30
- model=AdaBoostClassifier(n_estimators=num_trees,random_statement)
- results=model_selection.cross_val_score(model,x,y)
- print(results.mean())
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement