Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- '''
- import math
- #task 1
- class AdvancedCalculus:
- def my_sin(self, theta):
- theta = math.fmod(theta + math.pi, 2*math.pi) - math.pi
- result = 0
- termsign = 1
- power = 1
- for i in range(10):
- result += (math.pow(theta, power)/math.factorial(power)) * termsign
- termsign *= -1
- power += 2
- return result
- def my_cos(self, theta):
- theta = math.fmod(theta + math.pi, 2*math.pi) - math.pi
- result = 0
- termsign = 1
- power = 0
- for i in range(10):
- result += (math.pow(theta, power)/math.factorial(power))*termsign
- termsign *= -1
- power += 2
- return result
- def my_tan(self, theta):
- return self.my_sin(theta)/self.my_cos(theta)
- @staticmethod
- def grad_into_rad(grads):
- return (grads*math.pi)/180
- def my_acos(self, value):
- delta = 1*10**(-5)
- x = math.pi*(1-value)/2
- last = x
- x += (self.my_cos(x-value))/self.my_sin(x)
- while abs(x-last) > delta:
- last = x
- x += (self.my_cos(x-value))/self.my_sin(x)
- return x
- def my_asin(self, value):
- approx_value = 0
- for i in range(5):
- coef = value**(2*i+1)
- f_up = math.factorial(2*i)
- f1_lw = 4**i
- f2_lw = (math.factorial(i))**2
- f3_lw = (2*i+1)
- approx_value += coef * (f_up/(f1_lw*f2_lw*f3_lw))
- return approx_value
- def my_acos(self, value):
- approx_value = 0
- for i in range(5):
- coef = value**(2*i+1)
- f_up = math.factorial(2*i)
- f1_lw = 4**i
- f2_lw = (math.factorial(i))**2
- f3_lw = (2*i+1)
- approx_value += coef * (f_up/(f1_lw*f2_lw*f3_lw))
- return math.pi/2 - approx_value
- def my_atan(self, value):
- approx_value = 0
- for i in range(1, 5):
- coef = value**(2*i-1)
- f_up = (-1)**(i-1)
- f_lw = (2*i-1)
- approx_value += (f_up/f_lw)*coef
- return approx_value
- # task 2
- tree = ['a', ['b', ['d',[],[]], ['e',[],[]] ], ['c', ['f',[],[]], []] ]
- print("Left Subtree: {0}".format(tree[1]),
- "Rigth Subtree: {0}".format(tree[2]))
- #task 3
- НЕ ДОДЕЛАЛ
- |
- |
- |
- V
- class Error(Exception):
- pass
- class ExistingValue(Error):
- pass
- class Tree:
- def __init__(self, data):
- self.left = None
- self.right = None
- self.data = data
- def insert(self, data):
- if self.data is None:
- self.data = data
- return
- if data < self.data:
- if self.left is None:
- self.left = Tree(data)
- else:
- self.left.insert(data)
- elif data > self.data:
- if self.right is None:
- self.right = Tree(data)
- else:
- self.right.insert(data)
- else:
- raise ExistingValue(f"Tree with data:{data} already exists")
- def print_tree(self, dir='root', level=0):
- print(f"[{dir}] #{level}-{self} | left-{self.left} | right-{self.right}")
- if self.left is not None:
- self.left.print_tree(dir='left', level=level+1)
- if self.right is not None:
- self.right.print_tree(dir='right', level=level+1)
- def __str__(self):
- return f"Tree({self.data})"
- tree = Tree(40)
- tree.insert(11)
- tree.insert(2)
- tree.insert(5)
- tree.insert(3)
- tree.insert(18)
- tree.insert(17)
- tree.insert(1)
- tree.insert(13)
- tree.print_tree()
- # Task 4
- import numpy as np
- import pandas as pd
- import seaborn as sn
- import matplotlib.pyplot as plt
- from sklearn.model_selection import train_test_split
- from sklearn.tree import DecisionTreeClassifier
- from sklearn.metrics import classification_report, confusion_matrix
- from sklearn import tree
- x = np.array([ [-1, -1],
- [-2, -1],
- [-3, -2],
- [1, 1],
- [2, 1],
- [3, 2] ])
- dataset = pd.DataFrame(data=x)
- x_train, x_test, y_train, y_test = train_test_split(
- dataset.iloc[:, :-1],
- dataset.iloc[:, -1],
- test_size = 0.20
- )
- classifier = DecisionTreeClassifier()
- classifier.fit(x_train, y_train)
- tree.plot_tree(classifier)
- y_pred = classifier.predict(x_test)
- print(confusion_matrix(y_test, y_pred))
- print(classification_report(y_test, y_pred))
- target = [0,0,0,1,1,1]
- # Task 5
- import pandas as pd
- import numpy as np
- import matplotlib.pyplot as plt
- from sklearn.tree import DecisionTreeRegressor
- from sklearn.model_selection import train_test_split
- from sklearn import tree
- from sklearn import metrics
- url = r'https://raw.githubusercontent.com/aniruddhachoudhury/Red-Wine-Quality/master/winequality-red.csv'
- dataset = pd.read_csv(url)
- dataset.head()
- print(dataset.shape)
- dataset.describe()
- plt.scatter(dataset['pH'], dataset['quality'], color='b')
- plt.xlabel("Качество")
- plt.ylabel("Кислотность (в процентах)")
- plt.show()
- x = dataset.iloc[:, :-1].values
- y = dataset.iloc[:, 1].values
- print(x, y, sep='\n')
- x_train, x_test, y_train, y_test = train_test_split(
- x, y, test_size=0.2, random_state=0
- )
- regressor = DecisionTreeRegressor()
- regressor.fit(x_train, y_train)
- tree.plot_tree(regressor)
- y_pred = regressor.predict(x_test)
- print(y_pred)
- df = pd.DataFrame({"Actual": y_test, "Predicted": y_pred})
- print(df)
- print("Mean Squared Error: ", metrics.mean_squared_error(y_test, y_pred))
- print("Mean Absolute Error: ", metrics.mean_absolute_error(y_test, y_pred))
- print(metrics.mean_absolute_error(y_test, y_pred) / np.average(y) * 100)
- '''
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement