Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import numpy as np
- import pandas as pd
- from sklearn.model_selection import train_test_split
- import matplotlib.pyplot as plt
- import random
- from sklearn.linear_model import LinearRegression
- from sklearn.preprocessing import PolynomialFeatures
- def format_date(date):
- day, month, year = map(int, date.split('.'))
- return float(year) + float(month)/12 + float(day)/(365 if year % 4 != 0 else 366)
- def reformate_date(number):
- year = int(number)
- month = (number - year) * (365 if year % 4 != 0 else 366) / 30
- days = (month - int(month)) * 30
- month = int(month)
- days = int(days)
- if month < 10:
- month = f'0{month}'
- return f'{days}.{month}.{year}'
- def function(x, coefficients):
- y = coefficients[0]
- for i in range(1, len(coefficients)):
- y += coefficients[i] * x ** i
- return y
- def f(x, coefficients):
- return x.dot(coefficients[1:]) + coefficients[0]
- def fun_name(coefficients):
- name = f'f(x)={np.round(coefficients[0], 2)}'
- for i in range(1, len(coefficients)):
- if coefficients[i] >= 0:
- name += '+'
- name += f'{np.round(coefficients[i], 2)}*x^{i}'
- return name
- def mean_squared_error(actual, predicted):
- n = len(actual)
- squared_errors = [(actual[i] - predicted[i]) ** 2 for i in range(n)]
- mse = sum(squared_errors) / n
- return mse[0]
- def gauss(A, b):
- n = A.shape[0]
- x = np.zeros((n, 1))
- for i in range(n):
- for j in range(i + 1, n):
- factor = A[j, i] / A[i, i]
- b[j] -= factor * b[i]
- for k in range(n):
- A[j, k] -= factor * A[i, k]
- for i in range(n - 1, -1, -1):
- x[i] = b[i]
- for j in range(i + 1, n):
- x[i] -= A[i, j] * x[j]
- x[i] /= A[i, i]
- x = [x[i][0] for i in range(len(x))]
- return x
- def least_squares_polynomial_regression(X, y):
- X = np.hstack((np.ones((X.shape[0], 1)), X))
- A = np.dot(X.T, X)
- b = np.dot(X.T, y)
- coefficients = gauss(A, b)
- return coefficients
- df = pd.DataFrame(columns=['Результат_на_400м', 'Дата'])
- df.loc[0] = [62.48, '22.02.2018']
- df.loc[1] = [58.60, '18.01.2019']
- df.loc[2] = [58.12, '28.01.2019']
- df.loc[3] = [56.88, '18.06.2019']
- df.loc[4] = [58.08, '18.01.2020']
- df.loc[5] = [57.17, '27.01.2020']
- df.loc[6] = [57.07, '03.02.2020']
- df.loc[7] = [56.22, '11.02.2020']
- df.loc[8] = [54.04, '26.08.2020']
- df.loc[9] = [53.65, '19.09.2020']
- df.loc[10] = [54.46, '27.01.2021']
- df.loc[11] = [54.07, '10.02.2021']
- df.loc[12] = [53.34, '16.06.2021']
- df.loc[13] = [53.91, '29.06.2021']
- df.loc[14] = [53.23, '22.01.2022']
- df.loc[15] = [53.54, '26.08.2022']
- df.loc[16] = [52.85, '17.09.2022']
- df.loc[17] = [53.26, '17.02.2023']
- df.loc[18] = [51.67, '04.05.2023']
- df.loc[19] = [51.16, '27.05.2023']
- df.loc[20] = [51.33, '07.06.2023']
- df.loc[21] = [51.15, '15.07.2023']
- df.loc[22] = [50.66, '28.07.2023']
- df['Форматована_дата'] = [format_date( df[df.columns[1]].loc[i] ) for i in range(len(df[df.columns[1]]))]
- print(df)
- data = df.drop(labels=[df.columns[1], df.columns[0]], axis=1)
- target = df.drop(labels=[df.columns[2], df.columns[1]], axis=1)
- target = np.copy(target)
- data = np.copy(data)
- degree = 1
- data_poly_manual = np.hstack([data ** i for i in range(1, degree + 1)])
- X_train, X_test, y_train, y_test = train_test_split(data_poly_manual, target, test_size=0.3, random_state=42)
- coefficients = least_squares_polynomial_regression(X_train, y_train)
- y_pred = f(X_test, coefficients)
- plt.figure(figsize=(10, 7))
- X = np.linspace(np.min(data), np.max(data), 100)
- plt.scatter(X_train[:, 0], y_train[:, 0], c='blue', label='Навчальна вибірка')
- plt.scatter(X_test[:, 0], y_test[:, 0], c='red', label='Тестова вибірка')
- plt.legend()
- plt.xlabel(f'x')
- plt.ylabel(f'y')
- plt.title('Побудова точок навчальної та тестової вибірки')
- plt.show()
- mse = mean_squared_error(y_test, y_pred)
- print(f"Mean Squared Error: {mse}\n")
- match_df = pd.DataFrame({f'x_test': X_test[:, 0],
- f'y_test': y_test[:, 0],
- f'y_test_pred': y_pred})
- print(match_df)
- print(f'\n{fun_name(coefficients)} ; p = {degree}')
- plt.figure(figsize=(10, 7))
- plt.scatter(X_train[:, 0], y_train[:, 0], c='blue', label='Навчальна вибірка')
- plt.scatter(X_test[:, 0], y_test[:, 0], c='red', label='Тестова вибірка')
- plt.plot(X, function(X, coefficients), label=f'p={degree}')
- plt.legend()
- plt.xlabel(f'x')
- plt.ylabel(f'y')
- plt.show()
- plt.figure(figsize=(15, 10))
- plt.scatter(X_train[:, 0], y_train[:, 0], c='blue', label='Навчальна вибірка')
- plt.scatter(X_test[:, 0], y_test[:, 0], c='red', label='Тестова вибірка')
- mse_df = pd.DataFrame({'MSE_Train': [], 'MSE_Test': []})
- for p in range(1, 21):
- degree = p
- data_poly_manual = np.hstack([data ** i for i in range(1, degree + 1)])
- X_train, X_test, y_train, y_test = train_test_split(data_poly_manual, target, test_size=0.3, random_state=42)
- coefficients = least_squares_polynomial_regression(X_train, y_train)
- y_pred = f(X_test, coefficients)
- mse_test = mean_squared_error(y_test, y_pred)
- mse_train = mean_squared_error(y_train, f(X_train, coefficients))
- mse_df.loc[p] = [mse_train, mse_test]
- if p < 6:
- plt.plot(X, function(X, coefficients), label=f' p={degree}')
- print(f'{fun_name(coefficients)} ; p = {degree}')
- plt.legend()
- plt.xlabel(f'x')
- plt.ylabel(f'y')
- plt.show()
- mse_df.index.names = ['p']
- print(mse_df)
- plt.figure(figsize=(10, 7))
- plt.plot(mse_df.index, mse_df['MSE_Train'], label='MSE_Train')
- plt.plot(mse_df.index, mse_df['MSE_Test'], label='MSE_Test')
- plt.xlabel(f'p')
- plt.ylabel(f'MSE')
- plt.legend()
- plt.show()
- mse_df_min = mse_df.idxmin()
- print(mse_df_min)
- degree = mse_df_min.loc['MSE_Test']
- data_poly_manual = np.hstack([data ** i for i in range(1, degree + 1)])
- X_train, X_test, y_train, y_test = train_test_split(data_poly_manual, target, test_size=0.3, random_state=42)
- coefficients = least_squares_polynomial_regression(X_train, y_train)
- my_coefficients = coefficients.copy()
- y_pred = f(X_test, coefficients)
- mse = mean_squared_error(y_test, y_pred)
- print(f'\n{fun_name(coefficients)} ; p = {degree}')
- plt.figure(figsize=(10, 7))
- plt.scatter(X_train[:, 0], y_train[:, 0], c='blue', label='Навчальна вибірка')
- plt.scatter(X_test[:, 0], y_test[:, 0], c='red', label='Тестова вибірка')
- plt.plot(X, function(X, coefficients), label=f'p={degree}')
- plt.legend()
- plt.xlabel(f'x')
- plt.ylabel(f'y')
- plt.show()
- mse_sk_df = pd.DataFrame({'MSE_Train_sk': [], 'MSE_Test_sk': []})
- for p in range(1, 21):
- x = data
- y = target
- x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=42)
- poly_features = PolynomialFeatures(degree=p)
- x_train_poly = poly_features.fit_transform(x_train)
- x_test_poly = poly_features.transform(x_test)
- model = LinearRegression()
- model.fit(x_train_poly, y_train)
- y_pred = model.predict(x_test_poly)
- test_error1 = mean_squared_error(y_test, y_pred)
- y_train_pred = model.predict(x_train_poly)
- train_error1 = mean_squared_error(y_train, y_train_pred)
- mse_sk_df.loc[p] = [train_error1, test_error1]
- mse_sk_df.index.names = ['p']
- mse_concat = pd.concat([mse_df, mse_sk_df], axis=1)
- print(mse_concat)
- mse_df_min_match = mse_concat.idxmin()
- print(mse_df_min_match)
- degree_sk = mse_df_min_match.loc['MSE_Test_sk']
- x = data
- y = target
- x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=42)
- poly_features = PolynomialFeatures(degree=degree_sk)
- x_train_poly = poly_features.fit_transform(x_train)
- x_test_poly = poly_features.transform(x_test)
- model = LinearRegression()
- model.fit(x_train_poly, y_train)
- y_pred = model.predict(x_test_poly)
- x_plot = X
- x_plot_poly = poly_features.transform(x_plot.reshape(-1, 1))
- y_plot = model.predict(x_plot_poly)
- coefficients = model.coef_
- intercept = model.intercept_
- coefficients[0, 0] = intercept
- coefficients = coefficients[0]
- print(f'\n{fun_name(coefficients)} ; p = {degree_sk}')
- print(f'{fun_name(my_coefficients)} ; p = {degree}')
- plt.figure(figsize=(10, 7))
- plt.title('Порівняння роботи з sklearn')
- plt.scatter(X_train[:, 0], y_train[:, 0], c='blue', label='Навчальна вибірка')
- plt.scatter(X_test[:, 0], y_test[:, 0], c='red', label='Тестова вибірка')
- plt.plot(x_plot, y_plot, label=f"p_sk={degree_sk}")
- plt.plot(X, function(X, my_coefficients), label=f'p={degree}')
- plt.xlabel("X")
- plt.ylabel("Y")
- plt.legend()
- plt.show()
- plt.figure(figsize=(10, 7))
- plt.title('Порівняння роботи з sklearn')
- plt.plot(mse_concat.index, mse_concat['MSE_Train'], label='MSE_Train')
- plt.plot(mse_concat.index, mse_concat['MSE_Test'], label='MSE_Test')
- plt.plot(mse_concat.index, mse_concat['MSE_Train_sk'], label='MSE_Train_sk')
- plt.plot(mse_concat.index, mse_concat['MSE_Test_sk'], label='MSE_Test_sk')
- plt.xlabel(f'p')
- plt.ylabel(f'MSE')
- plt.legend()
- plt.show()
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement