blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
135
| path
stringlengths 2
372
| src_encoding
stringclasses 26
values | length_bytes
int64 55
3.18M
| score
float64 2.52
5.19
| int_score
int64 3
5
| detected_licenses
sequencelengths 0
38
| license_type
stringclasses 2
values | code
stringlengths 55
3.18M
| used_libs
sequencelengths 1
5
|
---|---|---|---|---|---|---|---|---|---|---|---|
57a406711383bb9351bc16a5941cf3e316af7926 | Python | TimoBeck/IE598_F19_HW5 | /Assignment5.py | UTF-8 | 3,415 | 3.140625 | 3 | [] | no_license | import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn import svm
from sklearn import metrics
import scipy.stats as stats
from sklearn.metrics import mean_squared_error
df = pd.read_csv("hw5_treasury yield curve data.csv")
#Part 1 - EDA
df = df.dropna()
print('Summary statistic of our dataset')
print(df.describe())
print('Head of our dataset')
print(df.head())
print('Heat map of dataset:')
corMat = pd.DataFrame(df.corr())
plt.pcolor(corMat)
plt.show()
#Linear Regression
X, y = df.iloc[:,1:31].values, df.iloc[:, 31].values
X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=0.25, random_state=42)
reg = LinearRegression()
reg.fit(X_train, y_train)
y_test_pred = reg.predict(X_test)
y_train_pred = reg.predict(X_train)
print('R^2 train: %.3f, test: %.3f' %(r2_score(y_train, y_train_pred),r2_score(y_test, y_test_pred)))
# SVR Regressor
clf_svr = svm.SVR(kernel='linear')
clf_svr.fit(X_train,y_train)
y_pred_train_SVM = clf_svr.predict(X_train)
y_pred_test_SVM = clf_svr.predict(X_test)
print('R^2 train: %.3f, test: %.3f' %(r2_score(y_train, y_pred_train_SVM),r2_score(y_test, y_pred_test_SVM)))
#Part 2 - PCA
sc = StandardScaler()
X_train_std = sc.fit_transform(X_train)
X_test_std = sc.fit_transform(X_test)
# Compute and display the explained variance ratio for all components
pca = PCA(n_components=None)
X_train_pca = pca.fit_transform(X_train_std)
print(pca.explained_variance_ratio_)
plt.bar(range(1,31), pca.explained_variance_ratio_, alpha=0.5, align='center',label='individual explained variance')
cum_var = np.cumsum(pca.explained_variance_ratio_)
plt.step(range(1,31), cum_var, where='mid',label='cumulative explained variance')
plt.ylabel('Explained variance ratio')
plt.xlabel('Principal components')
plt.legend(loc='best')
plt.show()
# Compute and display the explained variance ratio for n_components = 3
pca_2 = PCA(n_components=3)
X_train_pca_n3 = pca_2.fit_transform(X_train_std)
features = range(pca_2.n_components_)
plt.bar(features, pca_2.explained_variance_ratio_)
plt.xticks(features)
plt.ylabel('variance')
plt.xlabel('PCA feature')
plt.show()
# Fit a linear regression after PCA with n = 3
X_test_pca_n3 = pca_2.transform(X_test_std)
reg.fit(X_train_pca_n3,y_train)
y_test_pred = reg.predict(X_test_pca_n3)
y_train_pred = reg.predict(X_train_pca_n3)
print('R^2 train: %.3f, test: %.3f' %(r2_score(y_train, y_train_pred),r2_score(y_test, y_test_pred)))
print('MSE train: %.3f, test: %.3f' % (mean_squared_error(y_train, y_train_pred),mean_squared_error(y_test, y_test_pred)))
# SVR Regressor after PCA with n = 3
clf_svr = svm.SVR(kernel='linear')
clf_svr.fit(X_train_pca_n3,y_train)
y_pred_train_SVM = clf_svr.predict(X_train_pca_n3)
y_pred_test_SVM = clf_svr.predict(X_test_pca_n3)
print('R^2 train: %.3f, test: %.3f' %(r2_score(y_train, y_pred_train_SVM),r2_score(y_test, y_pred_test_SVM)))
print('MSE train: %.3f, test: %.3f' % (mean_squared_error(y_train, y_pred_train_SVM),mean_squared_error(y_test, y_pred_test_SVM)))
print("My name is Timothee Becker")
print("My NetID is: tbecker5")
print("I hereby certify that I have read the University policy on Academic Integrity and that I am not in violation.")
| [
"matplotlib"
] |
6c0e37315d79270d8ffae6366a9cdadd18585c2e | Python | samfok/plot_prsim | /plot_prsim/plot_prsim.py | UTF-8 | 4,409 | 3.328125 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
class PRSIMPlotter(object):
"""Plots data from a PRSIM run
Parameters
----------
fsim: string
file containing PRSIM outputs
max_events: int or None
maximum number of events to read in
ignore_timing: Boolean
whether to ignore the timing information in the data
if you only care about the transition sequence
"""
def __init__(self, fsim, max_events=500, ignore_timing=False):
self.signals = {}
self.max_time = None
self.plot_rowsize = 1.5
self.read_file(fsim, max_events, ignore_timing)
def read_file(self, fsim, max_events=None, ignore_timing=False):
"""Reads in a file of PRSIM results
Parameters
----------
fsim: string
file containing PRSIM outputs
max_events: int or None
maximum number of events to read in
ignore_timing: Boolean
whether to ignore the timing information in the data
if you only care about the transition sequence
"""
with open(fsim, 'r') as fh:
lines = fh.readlines()
time = 0
prev_time = 0
n_unique_times = 0 # number of unique times stamps -1
n_events = 1
for line in lines:
if max_events and n_events > max_events:
break
tokens = line.strip().split(' ')
if len(tokens) > 0:
if tokens[0].isnumeric():
time = int(tokens[0])
if time > prev_time:
n_unique_times += 1
prev_time = time
signal = tokens[1]
if signal not in self.signals:
value = tokens[3]
self.signals[signal] = {
"v0":value,
"t0":time,
"transitions":[]}
if ignore_timing:
self.signals[signal]["t0"] = n_unique_times
else:
self.signals[signal]["transitions"].append(time)
if ignore_timing:
self.signals[signal]["transitions"][-1] = n_unique_times
n_events += 1
self.max_time = time
if ignore_timing:
self.max_time = n_unique_times
def get_signals(self):
"""Get the signals available"""
return list(self.signals.keys())
def plot_signal(self, ax, signal):
"""Plots an individual signal"""
transitions = self.signals[signal]["transitions"]
n_transitions = len(transitions)
n_pts = 2 + 2*n_transitions
time = np.zeros(n_pts)
trace = np.zeros(n_pts)
time[0] = self.signals[signal]["t0"]
trace[0] = self.signals[signal]["v0"]
for idx in range(n_transitions):
time[2*idx+1] = transitions[idx]
time[2*idx+2] = transitions[idx]
trace[2*idx+1] = trace[2*idx]
trace[2*idx+2] = -trace[2*idx]+1
time[-1] = self.max_time
trace[-1] = trace[-2]
ax.plot(time, trace)
def plot(self, signals=None):
"""Plots available signals
Parameters
----------
signals: list or None
if None, plots all signals
otherwise a list strings or list of list of strings
strings name signals
list of signals to be plotted on the same axis
"""
if signals == None:
signals = sorted(self.get_signals())
n_signals = len(signals)
fig, axs = plt.subplots(
nrows=n_signals,
figsize=(8, self.plot_rowsize*n_signals),
sharex=True)
for ax, signal in zip(axs, signals):
if isinstance(signal, str):
signal = [signal,]
for sig in signal:
self.plot_signal(ax, sig)
ax.set_ylim((-0.1, 1.1))
ax.set_yticks([0.5])
ax.set_yticklabels([signal])
axs[0].set_xlim((0, self.max_time))
return fig, axs
@staticmethod
def show():
plt.show()
| [
"matplotlib"
] |
1dd82185e4403f87284a153e4abd638b780ea04a | Python | noahcurtiss/Motor_Tests | /python_scripts3/grapher.py | UTF-8 | 3,230 | 2.609375 | 3 | [] | no_license | """
This code takes data collected from the step tests, cleans it up and compiles it into a
matrix with all the edited data (total_data) and three matricies used for graphing and
interpolation. The output is a .npy and a .txt file for each matrix.
"""
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.interpolate import griddata
import math
import serial
import string
path = '/home/mbshbn/Documents/Motor_Tests/tiger700/'
def bitfilter(matrix): #filters out all maxPWM values that are not 255
matrix2 = np.zeros(12)
sz = np.shape(matrix)
for i in range(0,sz[0]):
if matrix[i,2] == 255 and matrix[i,1] != 25.5:
matrix2 = np.r_['0,2',matrix2,matrix[i]]
matrix2 = np.delete(matrix2,(0),axis=0)
return matrix2
def rpmfilter(matrix,minrpm): #filters out all rpms less than minrpm
matrix2 = np.zeros(12) #helpful for lower commands when it takes a while to reach peak speed
sz = np.shape(matrix)
for i in range(0,sz[0]):
if matrix[i,4] >= minrpm:
matrix2 = np.r_['0,2',matrix2,matrix[i]]
matrix2 = np.delete(matrix2,(0),axis=0)
return matrix2
def forcefilter(matrix, risetime, cmd_bounds): #only includes data after risetime due to lag on force sensor
matrix2 = np.zeros(12)
for cmd in range(cmd_bounds[0],cmd_bounds[2]+cmd_bounds[1],cmd_bounds[1]):
cmd_matrix = matrix[matrix[:,0] == cmd]
matrix2 = np.r_['0,2',matrix2,cmd_matrix[risetime:]]
matrix2 = np.delete(matrix2,(0),axis=0)
return matrix2
def analysis(volt,cmd_bounds):
#fill in your own file name
A = np.loadtxt(path+('%.1f' % volt)+ '.txt',delimiter='\t')
v1_data = np.loadtxt(path+('%.1f' % volt)+ 'v1.txt')
v2_data = np.loadtxt(path+('%.1f' % volt)+ 'v2.txt')
v1 = np.mean(v1_data)
v2 = np.mean(v2_data) #v1=strain sensor reading with no mass, v2=reading with mass.
force_risetime = 130
sz = np.shape(A)
num = range(sz[0])
numMagnets = 12
rpm = A[:,4]*780/numMagnets
m = 0.5; d = .088; b = .084
#m=added mass(kg), d=distance of mass to pivot(m), b=distance of motor to pivot(m),
thrust = (m*d*9.81)*(A[:,7]-v1)/(v2-v1)/b
v_input = np.full((sz[0],1),volt)
A = np.c_[A,num,rpm,thrust,v_input]
A[:,1] = A[:,1]*0.1
A[:,6] = A[:,6]*.1
#0:cmd 1:current 2:maxPWM 3:temp 4:rawRPM 5:reserved1 6:voltage 7:rawForce
#8:count 9:RPM 10:thrust 11:inputVoltage
data = bitfilter(A)
data = rpmfilter(data,10)
forcedata = forcefilter(data,force_risetime,cmd_bounds)
for cmd in range(10,260,10):
if any(abs(cmd - forcedata[:,0])<0.01):
cmd_matrix = forcedata[abs(forcedata[:,0]-cmd)<0.01]
avg = np.mean(cmd_matrix[:,7])
plt.figure(1)
plt.scatter(cmd_matrix[:,8],cmd_matrix[:,7],marker='.')
plt.axhline(y=avg)
plt.ylabel('force')
plt.title(str(cmd))
plt.show()
# plt.figure(1)
# plt.plot(forcedata[:,6])
# plt.title('current over an entire test')
# plt.ylabel('current')
# plt.show()
return forcedata
cmd_bounds = [10, 10, 250] #[min_cmd, cmd_step, max_cmd]
volt_range = [10.8,0.2,12.6] #[min input_voltage, imput_voltage step, max input_voltage]
total_data = np.zeros(12)
volt = 14.8
forcedata = analysis(volt,cmd_bounds)
total_data = np.r_['0,2',total_data,forcedata]
total_data = np.delete(total_data,(0),axis=0) | [
"matplotlib"
] |
b5a3269f5c339923fec9fd6f7c23648d69d13cda | Python | AllenInstitute/bmtk | /bmtk/simulator/filternet/lgnmodel/transferfunction.py | UTF-8 | 2,268 | 2.9375 | 3 | [
"BSD-3-Clause"
] | permissive | from sympy.utilities.lambdify import lambdify
from sympy import Matrix
import sympy.parsing.sympy_parser as symp
import sympy.abc
import numpy as np
class ScalarTransferFunction(object):
def __init__(self, transfer_function_string, symbol=sympy.abc.s):
self.symbol = symbol
self.transfer_function_string = transfer_function_string
# replacing sympy.Heaviside() with np.heaviside() for better performance
modules = [{'Heaviside': lambda x, y=0.5: np.heaviside(x, 0.5)}, 'numpy', 'sympy']
self.closure = lambdify(self.symbol, symp.parse_expr(self.transfer_function_string), modules=modules)
def __call__(self, s):
return self.closure(s)
def to_dict(self):
return {'class': (__name__, self.__class__.__name__), 'function': self.transfer_function_string}
def imshow(self, rates, times=None, show=True):
import matplotlib.pyplot as plt
vals = [self(rate) for rate in rates]
times = np.linspace(0.0, 1.0, len(rates)) if times is None else times
fig, ax1 = plt.subplots()
ax1.set_xlabel('time (seconds)')
ax1.set_ylabel(str(self.symbol), color='b')
ax1.plot(times, rates, '--b')
ax1.tick_params(axis='y', labelcolor='b')
ax2 = ax1.twinx()
ax2.set_ylabel('transform', color='r')
ax2.plot(times, vals, 'r')
ax2.tick_params(axis='y', labelcolor='r')
plt.title(self.transfer_function_string)
if show:
plt.show()
class MultiTransferFunction(object):
def __init__(self, symbol_tuple, transfer_function_string):
self.symbol_tuple = symbol_tuple
self.transfer_function_string = transfer_function_string
modules = [{'Heaviside': lambda x, y=0.5: np.heaviside(x, 0.5)}, 'numpy', 'sympy']
self.closure = lambdify(self.symbol_tuple,symp.parse_expr(self.transfer_function_string), modules=modules)
def __call__(self, *s):
if isinstance(s[0], (float,)):
return self.closure(*s)
else:
return np.array(list(map(lambda x: self.closure(*x), zip(*s))))
def to_dict(self):
return {'class': (__name__, self.__class__.__name__), 'function': self.transfer_function_string}
| [
"matplotlib"
] |
e6279a4f1d6a2accfac8563adb1ddf8500d1df3b | Python | jeffreyong/GA_files | /week3/knn.py | UTF-8 | 3,006 | 3.328125 | 3 | [] | no_license | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 18 10:15:09 2017
@author: Work
"""
# read the iris data into a DataFrame
import pandas as pd
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
col_names = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'species']
iris = pd.read_csv(url, header=None, names=col_names)
iris.head()
# allow plots to appear in the notebook
%matplotlib inline
import matplotlib.pyplot as plt
# increase default figure and font sizes for easier viewing
plt.rcParams['figure.figsize'] = (6, 4)
plt.rcParams['font.size'] = 14
# create a custom colormap
from matplotlib.colors import ListedColormap
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
# map each iris species to a number
iris['species_num'] = iris.species.map({'Iris-setosa':0, 'Iris-versicolor':1, 'Iris-virginica':2})
# create a scatter plot of PETAL LENGTH versus PETAL WIDTH and color by SPECIES
iris.plot(kind='scatter', x='petal_length', y='petal_width', c='species_num', colormap=cmap_bold)
# create a scatter plot of SEPAL LENGTH versus SEPAL WIDTH and color by SPECIES
iris.plot(kind='scatter', x='sepal_length', y='sepal_width', c='species_num', colormap=cmap_bold)
iris.head()
# store feature matrix in "X"
feature_cols = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width']
X = iris[feature_cols]
# alternative ways to create "X"
X = iris.drop(['species', 'species_num'], axis=1)
X = iris.loc[:, 'sepal_length':'petal_width']
X = iris.iloc[:, 0:4]
# store response vector in "y"
y = iris['species_num']
# check X's type
print type(X)
print type(X.values)
# check y's type
print type(y)
print type(y.values)
# check X's shape (n = number of observations, p = number of features)
print X.shape
# check y's shape (single dimension with length n)
print y.shape
# import KNeighborsClassifier from sklearn.
# where is it? Google the documentation
from sklearn.neighbors import KNeighborsClassifier
# make an instance of a KNeighborsClassifier object
knn = KNeighborsClassifier(n_neighbors=1)
type(knn)
print knn
# fit the knn model. What might the function be called? Documentation...
knn = KNeighborsClassifier(n_neighbors=1)
knn.fit(X, y)
# make predictions on this input: [3, 5, 4, 2]
# Again, what might the prediction function be called for knn?
X1 = [[3, 5, 4, 2]]
print(knn.predict(X1))
# now make predictions for [3, 5, 4, 2], [5, 4, 3, 2]
X2 = [[3, 5, 4, 2], [5, 4, 3, 2]]
print(knn.predict(X2))
# confirm prediction is an numpy array
print type(knn.predict(X))
# instantiate the model (using the value K=5)
knn = KNeighborsClassifier(n_neighbors=5, weights='distance')
# fit the model with data
knn.fit(X, y)
X_new = [[3, 5, 4, 2], [5, 4, 3, 2]]
# predict the response for new observations
print(knn.predict(X_new))
# calculate predicted probabilities of class membership
knn.predict_proba(X_new)
print(knn.predict([[5.0, 3.6, 1.4, 0.2]]))
knn.predict_proba([[5.0, 3.6, 1.4, 0.2]])
| [
"matplotlib"
] |
c2c7e5dc5eced2116eeeea0302e7a70dd8d1ae55 | Python | 700gtk/ML_MNIST | /support.py | UTF-8 | 2,226 | 2.671875 | 3 | [
"MIT"
] | permissive | import pandas as pd
import matplotlib.pyplot as plt
import cv2
import numpy as np
def Plot_digit(row):
first_digit = row.reshape(28, 28)
plt.imshow(first_digit, cmap='binary')
plt.axis("off")
plt.show()
def Test(y_pred, y_answers):
correct = sum(y_pred == y_answers)
return correct/len(y_pred)
def Read_in_data(path, test=False):
data_as_csv = pd.read_csv(path)
if test:
return data_as_csv.iloc[:].to_numpy(), data_as_csv.iloc[0:, 0].to_numpy()
return data_as_csv.iloc[1:, 1:].to_numpy(), data_as_csv.iloc[1:, 0].to_numpy()
def predictions_to_submission(name, predictions):
eval_results_file = open(name + '.txt', "w")
eval_results_file.writelines('ImageId,Label\n')
for i in range(len(predictions)):
eval_results_file.writelines(str(i+1) + ',' + str(predictions[i]) + '\n')
eval_results_file.close()
def skelefy(X):
toRet = []
for x in X:
ret, img = cv2.threshold(x.reshape(28, 28, 1).astype(np.uint8), 127, 255, 0)
size = np.size(img)
skel = np.zeros(img.shape, np.uint8)
element = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))
done = False
while (not done):
eroded = cv2.erode(img, element)
temp = cv2.dilate(eroded, element)
temp = cv2.subtract(img, temp)
skel = cv2.bitwise_or(skel, temp)
img = eroded.copy()
zeros = size - cv2.countNonZero(img)
if zeros == size:
done = True
toRet.append(skel)
# toRet.append(skel.flatten())
return toRet
def join_count(X):
toRet = []
for x in X:
im_or = x.copy()
x.astype(np.int32)
# create kernel
kernel = np.ones((7, 7))
kernel[2:5, 2:5] = 0
print(kernel)
# apply kernel
res = cv2.filter2D(x, 3, kernel)
# filter results
loc = np.where(res > 2800)
print(len(loc[0]))
# draw circles on found locations
for j in range(len(loc[0])):
cv2.circle(im_or, (loc[1][j], loc[0][j]), 10, (127), 5)
# display result
cv2.imshow('Result', im_or)
cv2.waitKey(0)
cv2.destroyAllWindows() | [
"matplotlib"
] |
dfdbc98797542433fc9fbb96e8fbd5984e6fe1e3 | Python | Pyabecedarian/Machine-Learning-Assignment | /ml_ex2/ex2_reg.py | UTF-8 | 3,372 | 3.890625 | 4 | [] | no_license | """
%% Machine Learning Online Class - Exercise 2: Logistic Regression
%
% Instructions
% ------------
%
% This file contains code that helps you get started on the second part
% of the exercise which covers regularization with logistic regression.
%
% You will need to complete the following functions in this exericse:
%
% sigmoid.m
% costFunction.m
% predict.m
% costFunctionReg.m
%
% For this exercise, you will not need to change any code in this file,
% or any other files other than those mentioned above.
%
"""
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize as opt
from ml_ex2.costFunctionReg import costFunctionReg
from ml_ex2.plotData import plotData
from ml_ex2.mapFeature import mapFeature
from ml_ex2.plotDecisionBoundary import plotDecisionBoundary
# %% Load Data
# % The first two columns contains the X values and the third column
# % contains the label (y).
data = np.loadtxt('./ex2data2.txt', delimiter=',')
X = data[:,:2]
y = data[:, 2]
# plt.figure(figsize=(5,4))
plt.figure()
# plotData(X, y, xlabel='Mircochip Test 1', ylabel='Mircochip Test 2')
plotData(X, y)
plt.xlim(-1, 1.5)
plt.ylim(-0.8, 1.2)
plt.legend(['Admitted', 'Not admitted'], loc='upper right')
plt.show()
# %% =========== Part 1: Regularized Logistic Regression ============
# % In this part, you are given a dataset with data points that are not
# % linearly separable. However, you would still like to use logistic
# % regression to classify the data points.
# %
# % To do so, you introduce more features to use -- in particular, you add
# % polynomial features to our data matrix (similar to polynomial
# % regression).
# %
# Add Polynomial Features.
# Notice that mapFeature also adds a column of 1's
X = mapFeature(X[:,0], X[:,1])
m, n = X.shape
# Initializing theta
init_theta = np.zeros(n)
# Set regularization parameter λ to 1
lambd = 1
# Compute and display initial cost and gradient
cost, grad = costFunctionReg(init_theta, X, y, lambd)
print('Cost at initial theta (zeros): %.3f' % cost)
print('Expected cost (approx): 0.693\n')
print('Gradient at initial theta (zeros) - first five values only:\n')
# Set print options which determine the way floating point numbers, arrays
# and other NumPy objects are displayed.
# np.set_printoptions(suppress=True, precision=4)
print(' {} \n'.format(grad[:5]))
print('Expected gradients (approx) - first five values only:\n')
print(' 0.0085 0.0188 0.0001 0.0503 0.0115')
# %% ============= Part 2: Regularization and Accuracies =============
# % Optional Exercise:
# % In this part, you will get to try different values of lambda and
# % see how regularization affects the decision coundart
# %
# % Try the following values of lambda (0, 1, 10, 100).
# %
# % How does the decision boundary change when you vary lambda? How does
# % the training set accuracy vary?
# %
# % Initialize fitting parameters
_, n = X.shape
init_theta = np.zeros(n)
# init_theta = zeros(size(X, 2), 1)
# % Set regularization parameter lambda to 1 (you should vary this)
# lambd = 0 # Overfitting
lambd = 1 # Just good
# lambd = 100 # Underfitting
# ☆☆☆ Use "Newton Conjugate-Gradient" to optimize costFunctionReg(theta, X, y)
result = opt.minimize(fun=costFunctionReg, x0=init_theta, args=(X, y, lambd), method='TNC', jac=True)
plotDecisionBoundary(result.x, X, y)
| [
"matplotlib"
] |
41f8147ed6dfdf6f9ac368def4ad15560a1093d8 | Python | alexeyshm/ML_Projects | /sax.py | UTF-8 | 4,955 | 3.109375 | 3 | [] | no_license | import numpy as np
import pandas as pd
import itertools
from saxpy.alphabet import cuts_for_asize
from saxpy.sax import ts_to_string
from bokeh.layouts import column, row
from bokeh.io import curdoc
from bokeh.models import DataTable, ColumnDataSource, Slider, BoxAnnotation, HoverTool, TableColumn, Div
from bokeh.plotting import figure
from bokeh.palettes import Category20
class RandomTimeSeries:
'''
Class to generate a random time-series with SAX-representation
To initialize, provide number of observations and cardinality
Observations are generated as a random Gaussian distribution with randomly generated mean and std deviation
'''
def __init__(self, n, cardinality = None):
self.n = n
self.t = np.arange(n) + 1
noise = np.random.randn(n) * np.random.randint(1, 20) + np.random.randint(-10, 10)
trend = self.t * np.random.uniform(-0.05, 0.05)
seasonality = np.sin(self.t) * np.random.randint(1, 20)
self.values = noise + trend + seasonality
self.mean = np.mean(self.values)
self.std = np.std(self.values)
self.norm_values = (self.values - self.mean) / self.std
self.data = {'t': self.t, 'x': self.values}
if cardinality:
self.sax(cardinality)
def sax(self, cardinality):
'''
Creates SAX representation of the time series
:param cardinality: number of symbols to use in SAX representation
'''
self.cardinality = cardinality
self.cuts = cuts_for_asize(self.cardinality)
self.string = ts_to_string(self.norm_values, self.cuts)
#denormalize cuts for correct vizualisaton
self.cuts_den = self.cuts * self.std + self.mean
self.data['symbol'] = list(self.string)
self.sax_freq = self.generate_freq()
def generate_freq(self):
'''
Generate frequency table for SAX symbols
:return: pd.DataFrame
'''
freq = pd.Series(self.data['symbol']).value_counts()
freq = freq.to_frame().reset_index()
freq.columns = ['symbol', 'frequency']
return freq
def get_color():
'''
Color generator function
'''
yield from itertools.cycle(Category20[20])
def gen_plot(ts):
'''
Generates plot for given time-series
:param RandomTimeSeries ts:
'''
plot = figure(plot_width=1200, plot_height=400)
plot.ygrid.visible = False
plot.xgrid.visible = False
data = ColumnDataSource(data=ts.data)
plot.line('t', 'x', source=data)
#adding color band for each symbol
color = get_color()
band = BoxAnnotation(top=ts.cuts_den[1], fill_color=next(color), fill_alpha=0.1)
plot.add_layout(band)
for i in range(2, ts.cardinality):
band = BoxAnnotation(bottom=ts.cuts_den[i-1], top=ts.cuts_den[i], fill_color=next(color), fill_alpha=0.1)
plot.add_layout(band)
# Hover tool to check values for a data point
plot.add_tools(HoverTool(
tooltips=[
('value', '@x'),
('symbol', '@symbol'),
]))
# updating SAX string and frequency table
sax_string.text = ts.string
freq_data.data = ts.sax_freq
return plot
def set_cardinality(attr, old, new):
'''
Callback function to change cardinality for the time series by changing slider value
'''
ts.sax(new)
plot = gen_plot(ts)
global plot_row
if plot_row.children:
plot_row.children.pop()
plot_row.children.append(plot)
def set_n(attr, old, new):
'''
Callback function to generate new time series of given length by changing slider value
'''
global ts
ts = RandomTimeSeries(new, card_slider.value)
plot = gen_plot(ts)
global plot_row
if plot_row.children:
plot_row.children.pop()
plot_row.children.append(plot)
### MAIN
# Control sliders
n_slider = Slider(start=2, end=10000, value=10, step=1, title="No. observations", width=900)
n_slider.on_change('value', set_n)
card_slider = Slider(start=2, end=20, value=3, step=1, title="Cardinality", width=250)
card_slider.on_change('value', set_cardinality)
# Default time-series
ts = RandomTimeSeries(n_slider.value, card_slider.value)
# SAX string representation
sax_label = Div(text='<b>SAX representation of Time Series:</b>')
sax_string = Div(style={'overflow-x':'scroll','width':'950px'})
# Frequency table
freq_data = ColumnDataSource()
freq_columns = [TableColumn(field='symbol', title='Symbol'),
TableColumn(field='frequency', title='Frequency')]
freq_table = DataTable(source=freq_data, columns=freq_columns, width=200, index_position=None)
# Plot initialization
plot = gen_plot(ts)
# Setting layout
doc = curdoc()
curdoc().title = 'SAX Time Series'
plot_row = row(plot)
layout = column(
row(n_slider, card_slider),
plot_row,
row(freq_table, column(
sax_label,
sax_string
)
)
)
doc.add_root(layout)
| [
"bokeh"
] |
9b9d8d6b0f24ddc64b0e952bb4848f234f8cf854 | Python | phate09/SafeDRL | /runnables/tests/try_support_function.py | UTF-8 | 942 | 2.65625 | 3 | [] | no_license | import polyhedra.utils as utils
import matplotlib.pyplot as plt
import numpy as np
from sympy import Point
from sympy.abc import x, y
import polyhedra.utils as utils
p1 = Point(1, 1)
p2 = Point(2, 1)
p3 = Point(1, 0)
p4 = Point(x, y)
# %%
# plt.figure(figsize=(7, 7))
# plt.xlim(-1, 5)
# plt.ylim(-1, 5)
# plt.plot([1],[1],'o')
# plt.plot([2],[1],'o')
# # plt.plot([3*sqrt(2)/2],[3*sqrt(2)/2],'o')
# plt.plot([3/2],[3/2],'o')
# utils.newline((3/2,3/2),((2,1)))
# plt.show()
# %%
# Point(1,0).dot(Point(2,1))/Point(1,0).distance(Point(0,0))
Point.project(p2, Point(1, 0))
a = np.array([[2, 2], [2, 1.5], [1.5, 1.5]])
a1 = np.array([[1, 1]])
a2 = np.array([[2, 1]])
b = np.array([[0, 1]]) # direction
b1 = np.array([[2, 0]])
b2 = np.array([[1, 1]])
c = utils.project(a, b)
plt.figure(figsize=(7, 7))
plt.xlim(-1, 5)
plt.ylim(-1, 5)
plt.plot(a[:, 0], a[:, 1], 'o')
plt.plot(c[:, 0], c[:, 1], 'o')
# plt.plot(b2[:,0],b2[:,1],'o')
plt.show()
| [
"matplotlib"
] |
9fd72ba38a361a85bf5dfec633ca4b10ae71e0a8 | Python | akshayrana30/mila-courses | /1 - IFT6390 - Foundations of Machine Learning/Labs/Lab 04/solution_lab4.py | UTF-8 | 7,961 | 3.125 | 3 | [] | no_license | import sys
import matplotlib.pyplot as plt
import numpy as np
np.set_printoptions(precision=2)
np.random.seed(2)
##############################################################################
#
# QUESTION 1
#
##############################################################################
def error_counter(z):
return (z < 0).astype(int)
def linearloss(z):
return .5 * (z - 1) ** 2
def logisticloss(z):
return np.log(1 + np.exp(-z))
def perceptronloss(z):
return np.maximum(0, -z)
def svmloss(z):
return np.maximum(0, 1 - z)
zz = np.linspace(-3, 3, 1000)
plt.figure()
for loss in [error_counter, linearloss, logisticloss, perceptronloss, svmloss]:
plt.plot(zz, loss(zz), label=loss.__name__)
plt.ylim(-.5, 4.5)
plt.legend()
plt.grid()
plt.show()
IN_COLAB = 'google.colab' in sys.modules
if IN_COLAB:
iris = np.loadtxt('http://www.iro.umontreal.ca/~dift3395/files/iris.txt')
else:
iris = np.loadtxt('iris.txt')
##############################################################################
#
# PREPROCESSING
#
##############################################################################
def preprocess(data, label_subset, feature_subset, n_train):
"""Randomly split data into a train and test set
with the subset of classes in label_subset and the subset
of features in feature_subset.
"""
# extract only data with class label in label_subset
data = data[np.isin(data[:, -1], label_subset), :]
# remap labels to [-1, 1]
if len(label_subset) != 2:
raise Warning('We are dealing with binary classification.')
data[data[:, -1] == label_subset[0], -1] = -1
data[data[:, -1] == label_subset[1], -1] = 1
# extract chosen features + labels
data = data[:, feature_subset + [-1]]
# insert a column of 1s for the bias
data = np.insert(data, -1, 1, axis=1)
# separate into train and test
inds = np.arange(data.shape[0])
np.random.shuffle(inds)
train_inds = inds[:n_train]
test_inds = inds[n_train:]
trainset = data[train_inds]
testset = data[test_inds]
# normalize train set to mean 0 and standard deviation 1 feature-wise
# apply the same transformation to the test set
mu = trainset[:, :2].mean(axis=0)
sigma = trainset[:, :2].std(axis=0)
trainset[:, :2] = (trainset[:, :2] - mu) / sigma
testset[:, :2] = (testset[:, :2] - mu) / sigma
return trainset, testset
trainset, testset = preprocess(iris, label_subset=[1, 2], feature_subset=[2, 3], n_train=75)
##############################################################################
#
# HELPER FUNCTIONS
#
##############################################################################
def scatter(theset, marker='o'):
d1 = theset[theset[:, -1] > 0]
d2 = theset[theset[:, -1] < 0]
plt.scatter(d1[:, 0], d1[:, 1], c='b', marker=marker, label='class 1', alpha=.7)
plt.scatter(d2[:, 0], d2[:, 1], c='g', marker=marker, label='class 0', alpha=.7)
plt.xlabel('x_0')
plt.ylabel('x_1')
def finalize_plot(title):
plt.title(title)
plt.grid()
plt.legend()
plt.show()
scatter(trainset, marker='x')
scatter(testset, marker='^')
finalize_plot('train and test data')
def decision_boundary(w):
# hack to avoid changing the boundaries
xlim = plt.xlim()
ylim = plt.ylim()
xx = np.linspace(-10, 10, 2)
yy = -(w[2] + w[0] * xx) / w[1]
plt.plot(xx, yy, c='r', lw=2, label='f(x)=0')
# hack to avoid changing the boundaries
plt.xlim(xlim)
plt.ylim(ylim)
w0 = np.array([1, -1, 1])
scatter(trainset)
decision_boundary(w0)
finalize_plot('A random classifier')
##############################################################################
#
# BASE CLASS
#
##############################################################################
class LinearModel:
""""Abstract class for all linear models.
-------
Classe parent pour tous les modèles linéaires.
"""
def __init__(self, w0, reg):
self.w = np.array(w0, dtype=float)
self.reg = reg
def predict(self, X):
return np.dot(X, self.w)
def test(self, X, y):
return np.mean(self.predict(X) * y < 0)
def loss(self, X, y):
return 0
def gradient(self, X, y):
return self.w
def train(self, data, stepsize, n_steps):
X = data[:, :-1]
y = data[:, -1]
losses = []
errors = []
for _ in range(n_steps):
self.w -= stepsize * self.gradient(X, y)
losses += [self.loss(X, y)]
errors += [self.test(X, y)]
print("Training {} completed: the train error is {:.2f}%".format(self.__class__.__name__, errors[-1] * 100))
return np.array(losses), np.array(errors)
def test_model(modelclass, w0=[-1, 1, 1], reg=.1, stepsize=.2):
model = modelclass(w0, reg)
training_loss, training_error = model.train(trainset, stepsize, 100)
print("The test error is {:.2f}%".format(
model.test(testset[:, :-1], testset[:, -1]) * 100))
print('Final weights: ', model.w)
# learning curves
fig, (ax0, ax1) = plt.subplots(ncols=2, figsize=(8, 2))
ax0.plot(training_loss)
ax0.set_title('loss')
ax1.plot(training_error)
ax1.set_title('error rate')
fig.suptitle(modelclass.__name__)
# data plot
plt.figure()
scatter(trainset, marker='x')
scatter(testset, marker='^')
decision_boundary(model.w)
finalize_plot(modelclass.__name__)
test_model(LinearModel)
##############################################################################
#
# QUESTION 2
#
##############################################################################
class LinearRegression(LinearModel):
def __init__(self, w0, reg):
super().__init__(w0, reg)
def loss(self, X, y):
return .5 * np.mean((self.predict(X) - y) ** 2) + .5 * self.reg * np.sum(self.w ** 2)
def gradient(self, X, y):
return ((self.predict(X) - y)[:, np.newaxis] * X).mean(axis=0) + self.reg * self.w
test_model(LinearRegression)
##############################################################################
#
# QUESTION 3
#
##############################################################################
class Perceptron(LinearModel):
def __init__(self, w0, reg):
super().__init__(w0, reg)
def loss(self, X, y):
return .5 * np.mean(np.maximum(0, -y * self.predict(X))) + .5 * self.reg * np.sum(
self.w ** 2)
def gradient(self, X, y):
active = (y * self.predict(X) < 0).astype(float)
return - ((y * active)[:, np.newaxis] * X).mean(axis=0) + self.reg * self.w
test_model(Perceptron, reg=0, stepsize=1)
##############################################################################
#
# QUESTION 4
#
##############################################################################
class SVM(LinearModel):
def __init__(self, w0, reg):
super().__init__(w0, reg)
def loss(self, X, y):
return np.mean(np.maximum(0, 1 - y * self.predict(X))) + .5 * self.reg * np.sum(
self.w ** 2)
def gradient(self, X, y):
active = (y * self.predict(X) < 1).astype(float)
return - ((y * active)[:, np.newaxis] * X).mean(axis=0) + self.reg * self.w
test_model(SVM, reg=.001, stepsize=.5)
##############################################################################
#
# QUESTION 5
#
##############################################################################
class LogisticRegression(LinearModel):
def __init__(self, w0, reg):
super().__init__(w0, reg)
def loss(self, X, y):
return np.mean(np.log(1 + np.exp(-y * self.predict(X)))) + .5 * self.reg * np.sum(
self.w ** 2)
def gradient(self, X, y):
probas = 1 / (1 + np.exp(y * self.predict(X)))
return - ((y * probas)[:, np.newaxis] * X).mean(axis=0) + self.reg * self.w
test_model(LogisticRegression)
| [
"matplotlib"
] |
a546f97ed1f78d72a4fde935c6ba9d32ecd798e7 | Python | ptracton/LearningDSP | /Python/testing/fft_example.py | UTF-8 | 603 | 3 | 3 | [
"MIT"
] | permissive | #! /usr/bin/env python3
# https://stackoverflow.com/questions/25735153/plotting-a-fast-fourier-transform-in-python
import matplotlib.pyplot as plt
import scipy.fftpack
import numpy as np
import Signals
if __name__ == "__main__":
# Number of samplepoints
N = 1024
# sample spacing
T = 1.0 / N
x = np.linspace(0.0, N*T, N)
y = np.sin(50.0 * 2.0*np.pi*x)# + 0.5*np.sin(80.0 * 2.0*np.pi*x)
yf = scipy.fftpack.fft(y)
xf = np.linspace(0.0, 1.0/(2.0*T), N/2)
fig, ax = plt.subplots()
ax.plot(xf, 2.0/N * np.abs(yf[:N//2]))
plt.grid(True)
plt.show()
| [
"matplotlib"
] |
00d3f8db1b128eb677d5bfcd239fcdd83477b253 | Python | SumantBagri/vision | /plot.py | UTF-8 | 1,478 | 3.234375 | 3 | [] | no_license | from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
from scipy.ndimage.interpolation import shift
def plot_contour(cnt1=None, cnt2=None, axis1='X', axis2='Y', top_axis='Z'):
'''
To plot the contour in 3D with height of the person as the given axis
Inputs:
cnt1: first contour to be plotted
cnt2: second contour to be plotted
axis: axis along which it will be plotted in 3D
Output:
plots the contour in 3D
'''
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
x1 = []
y1 = []
z1 = []
x2 = []
y2 = []
z2 = []
if cnt1 is not None:
for points in cnt1:
x1.append(points[0][0])
z1.append(-points[0][1])
y1.append(0)
x1 = np.array(x1) - np.average(x1) # shifting the plot to the origin
if cnt2 is not None:
for points in cnt2:
y2.append(points[0][0])
z2.append(-points[0][1])
x2.append(0)
y2 = np.array(y2) - np.average(y2) # shifting the plot to origin
ax.scatter(x1, y1, z1, c='r', marker='D')
ax.scatter(x2, y2, z2, c='r', marker='*')
axes = plt.gca()
axes.set_xlim([-300,300])
axes.set_ylim([-300,300])
axes.set_zlim([-1600,100])
plt.gca().set_aspect('equal', adjustable='box')
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
#plt.axis('equal')
plt.show()
| [
"matplotlib"
] |
a1fb701a6ea3f37cbf8cc47d159b19ebf56fc2f8 | Python | imad24/pc_clustering | /src/visualization/visualize.py | UTF-8 | 3,558 | 2.765625 | 3 | [
"MIT"
] | permissive | import pandas as pd
import math
import numpy as np
from sklearn import metrics
import matplotlib.pyplot as plt
import copy as cp
import matplotlib.cm as cm
import seaborn as sns
from math import sqrt
#import statsmodels.api as sm
def cluster_2d_plot(X,labels, inertia = 0,info=["","",""]):
plt.figure(figsize=(16,7))
colors = [str(item/255.) for item in labels]
plt.suptitle("Clustering Method: (%s,%s,%s)"%(info[0],info[1],info[2]),size=14)
plt.subplot(1,2,1)
plt.scatter(X[:,0],X[:,1],cmap ="Paired" ,c=colors)
plt.xlabel("PCA01")
plt.ylabel("PCA02")
plt.subplot(1,2,2)
plt.scatter(X[:,0],X[:,2],cmap ="Paired" ,c=colors)
plt.xlabel("PCA01")
plt.ylabel("PCA03")
def cluster_plot(df,centroid_only= False,tick_frequency = 3,top = None, Normalized = True):
try:
# TODO: Create a validator class ?
for c in ["Cluster","Centroid"]:
if c not in df.columns:
raise ValueError("<<%s>> column not found in dataframe"%c)
clusters = list(set(df.Cluster))
if top:
clusters = np.random.randint(1,len(clusters),size= top)
else:
top = df.shape[0]
nc = min(len(clusters),top)
plt.figure(figsize=(15,nc*4))
for i,c in enumerate(clusters):
plt.subplot(nc,1,i+1)
cdf = df[df.Cluster==c]
medoid = cdf.Centroid.iloc[0]
plt.title("Cluster %d (%s): %d items"%(c,medoid,cdf.shape[0]))
if (centroid_only):
row = cdf.loc[medoid]
values = row.values[:-2]
if (Normalized): values = values/values.std()
plt.plot(values)
else:
for _ , row in cdf.iterrows():
values = row.values[:-2]
if (Normalized): values = values/values.std()
plt.plot(values)
except ValueError as ex:
print(ex)
def decorate_plot(cols,tick_frequency = 3, rotation = 70,color='lightblue'):
weeks = np.arange(len(cols))
for x in weeks:
if (x+1)%12 == 0: plt.axvline(x,c=color,linestyle='--')
plt.xticks(weeks[::tick_frequency], cols[::tick_frequency], rotation = rotation)
def circleOfCorrelations(components, explained_variance, cols):
plt.figure(figsize=(10,10))
plt.Circle((0,0),radius=10, color='k', fill=False)
plt.axis('equal')
circle1=plt.Circle((0,0),radius=1, color='k', fill=False)
fig = plt.gcf()
fig.gca().add_artist(circle1)
plt.axhline(y=0,c='k')
plt.axvline(x=0,c='k')
for idx in range(len(components)):
x = components[idx][0]
y = components[idx][1]
#plt.plot([0.0,x],[0.0,y],'k-')
plt.plot(x, y, 'rx')
month = columnToMonth(cols[idx])
plt.annotate("%02d"%month, xy=(x,y))
plt.xlabel("PC-0 (%s%%)" % str(explained_variance[0])[:4].lstrip("0."))
plt.ylabel("PC-1 (%s%%)" % str(explained_variance[1])[:4].lstrip("0."))
plt.xlim((-1.5,1.5))
plt.ylim((-1.5,1.5))
plt.title("Circle of Correlations")
def columnToMonth(txt):
weekNumber = int(txt[-2:])
month = int((weekNumber*7)/30 + 1)
return month
def GetMostCorrelatedTo(X_embedded,component,index,n=10,absl = True ):
ly = X_embedded.shape[1]+1
df_Xpca = pd.DataFrame(X_embedded,index=index,columns = np.arange(1,ly))
return df_Xpca.abs().nlargest(n,component) if absl else df_Xpca.nlargest(n,component)
# from matplotlib.patches import Ellipse | [
"matplotlib",
"seaborn"
] |
2ab429dfcc300a44f08db01c452af0d6181b71f4 | Python | SirMalamute/CovidXrays | /CovidML.py | UTF-8 | 1,887 | 2.84375 | 3 | [] | no_license | ###########Building the model
from tensorflow.keras import layers
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import numpy as np
from glob import glob
import matplotlib.pyplot as plt
def alexnet(in_shape=(256,256,1), n_classes=3, opt='sgd'):
in_layer = layers.Input(in_shape)
conv1 = layers.Conv2D(96, 11, strides=4, activation='relu')(in_layer)
pool1 = layers.MaxPool2D(3, 2)(conv1)
conv2 = layers.Conv2D(256, 5, strides=1, padding='same', activation='relu')(pool1)
pool2 = layers.MaxPool2D(3, 2)(conv2)
conv3 = layers.Conv2D(384, 3, strides=1, padding='same', activation='relu')(pool2)
conv4 = layers.Conv2D(256, 3, strides=1, padding='same', activation='relu')(conv3)
pool3 = layers.MaxPool2D(3, 2)(conv4)
flattened = layers.Flatten()(pool3)
dense1 = layers.Dense(4096, activation='relu')(flattened)
drop1 = layers.Dropout(0.5)(dense1)
dense2 = layers.Dense(4096, activation='relu')(drop1)
drop2 = layers.Dropout(0.5)(dense2)
preds = layers.Dense(n_classes, activation='softmax')(drop2)
model = Model(in_layer, preds)
model.compile(loss="categorical_crossentropy", optimizer=opt,
metrics=["accuracy"])
return model
####Flowing the data
train_path = './Datasets/Train'
validation_path = './Datasets/Validation'
folders = glob('./Datasets/Train/*')
train_data_gen = ImageDataGenerator(rescale=1./255, zoom_range=0.1)
test_data_gen = ImageDataGenerator(rescale=1./255)
train_set = train_data_gen.flow_from_directory(train_path, target_size=(256,256), batch_size=32, class_mode='categorical')
test_set = test_data_gen.flow_from_directory(validation_path, target_size=(256,256), batch_size=32, class_mode='categorical')
#### Instantiating the model
model = alexnet()
model.summary()
| [
"matplotlib"
] |
ff57d017a36d6389e3dab6c05958f50813859b6d | Python | LefanCheng/Technical-Exam-Data-Summarization-of-enron-dataset | /summarize-enron.py | UTF-8 | 4,519 | 3.171875 | 3 | [] | no_license | # Import Modules
import sys
import numpy as np
import pandas as pd
from collections import defaultdict
import matplotlib.pyplot as plt
import seaborn as sns
class summarize_enron():
def __init__(self):
# Import csv file with column names specified
self.column_names = ['time', 'message_id', 'sender', 'recipient', 'topic', 'mode']
# self.df = pd.read_csv('enron-event-history-all.csv', names=self.column_names)
self.df = pd.read_csv(sys.argv[1], names=self.column_names)
# Data Cleaning and Preprocessing:
# Detected 32 and 38 missing values(NaN) in Senders and recipient.
# Fill NaN with 'missing' which is easier to handle them if needed.
# Assume 'blank' is a legit person name instead of missing value.
self.df['recipient'].fillna('missing', inplace=True)
# Convert Unix time in milliseconds into readable date time
self.df['time'] = pd.to_datetime(self.df['time'], unit='ms')
# Discovered 2244 announcements and 3314 notes in sender
# which are in the top 10 prolific senders. Assuming they were sent by system
# not person, same as 'schedule'(852), 'outlook'(1160), 'arsystem'(299),
# remove these senders as they might be much less informative.
self.df = self.df[~self.df.sender.isin(['announcements', 'notes', 'schedule', 'outlook', 'arsystem'])]
# set time column as index
self.df.set_index('time', inplace=True)
def senders_count(self):
# Use defaultdict to create a dictionary that has name as key and count as value.
# If a key doesn't exist in defaultdict(int), it'll return 0
counter = defaultdict(int)
for name in self.df['sender']:
counter[name] += 1
return counter
def recipient_count(self):
# Same as senders_count, but split mutiple recipients.
counter = defaultdict(int)
for names in self.df['recipient']:
if '|' in names:
for name in names.split('|'):
counter[name] += 1
else:
counter[names] += 1
return counter
def q1_ouput_csv(self):
# Concatenate senders and recipients count into one by unique names
dic = {}
senders = self.senders_count()
recipients = self.recipient_count()
unique_names = set(list(senders) + list(recipients))
for name in unique_names:
dic[name] = {'senders': senders[name], 'recipients': recipients[name]}
# Contruct the dataframe using the dict
df = pd.DataFrame.from_dict(dic, orient='index').sort_values('senders', ascending=False).reset_index()
# Reset column name
df.rename(columns = {'index':'person'}, inplace = True)
# Refine the top 5 senders
self.top_senders = list(df.person[:5])
return df
def q2_prolific_senders(self):
sns.set(style="whitegrid")
plt.figure(figsize=(12,8))
for person in self.top_senders:
sns.lineplot(data=self.df['sender'][self.df['sender'] == person].resample('M').count(), label=person, palette = 'pastel')
plt.xticks(rotation=45)
plt.legend(loc='upper left')
plt.title("Most Prolific Senders - The Number of Emails Sent Over Time")
plt.xlabel("Month")
plt.ylabel("Number of Emails")
return plt
def q3_num_uniq_ppl_contacted(self):
sns.set(style="whitegrid")
plt.figure(figsize=(12,8))
# plot based on the number of unique senders for each top prolific senders as recipient in each month
for person in self.top_senders:
sns.lineplot(data=self.df[self.df['recipient'].str.contains(person)].resample('M').nunique()['sender'], label=person)
plt.xticks(rotation=45)
plt.legend(loc='upper left')
plt.title("Relative Number of Unique People who Contacted the Top 5 Prolific Senders")
plt.xlabel("Month")
plt.ylabel("Number of Unique Contacts")
return plt
if __name__=='__main__':
summarize_enron = summarize_enron()
# Question 1:
summarize_enron.q1_ouput_csv().to_csv('question_1_output.csv')
# Question 2:
summarize_enron.q2_prolific_senders().savefig('question_2_output.png')
# Question 3:
summarize_enron.q3_num_uniq_ppl_contacted().savefig('question_3_output.png') | [
"matplotlib",
"seaborn"
] |
e779fcea31e3c5b867f01492e071243e78589284 | Python | LucyWilcox/SoftDesSp15 | /toolbox/ml/learning_curve.py | UTF-8 | 1,507 | 3.5625 | 4 | [] | no_license | """ Exploring learning curves for classification of handwritten digits """
import matplotlib.pyplot as plt
import numpy
from sklearn.datasets import *
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import LogisticRegression
data = load_digits()
#print data.DESCR
num_trials = 100
train_percentages = range(5,95,5)
test_accuracies = numpy.zeros(len(train_percentages))
# train a model with training percentages between 5 and 90 (see train_percentages) and evaluate
# the resultant accuracy.
# You should repeat each training percentage num_trials times to smooth out variability
# for consistency with the previous example use model = LogisticRegression(C=10**-10) for your learner
digits = load_digits()
#print digits.DESCR
fig = plt.figure()
for i in range(10):
subplot = fig.add_subplot(5,2,i+1)
subplot.matshow(numpy.reshape(digits.data[i],(8,8)),cmap='gray')
plt.show()
for size in train_percentages:
accumulated_accuracy = 0.0
for i in range(num_trials):
X_train, X_test, y_train, y_test = train_test_split(data.data, data.target, train_size= size)
model = LogisticRegression(C=10**-10)
model.fit(X_train, y_train)
accumulated_accuracy += model.score(X_test, y_test)
test_accuracies[size/5.0 - 1] = accumulated_accuracy/num_trials
fig = plt.figure()
#print train_percentages
#print test_accuracies
plt.plot(train_percentages, test_accuracies)
plt.xlabel('Percentage of Data Used for Training')
plt.ylabel('Accuracy on Test Set')
plt.show()
| [
"matplotlib"
] |
c5c1590f6824645d0c251ec333be4cad5feee1a4 | Python | hadam1993/ScientificComputing | /createCubicSpline.py | UTF-8 | 16,594 | 3.84375 | 4 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 20 11:38:25 2020
@author: ahonts
This program for project 1 for the scientific
computing course. The project was to create a program
that takes in four data points and then constructs a
cubic spline that maintains C2 continuity.
The program gives the user the option to view an example
with four data points, constructs a cubic spline from
the data points, and then takes samples in between the
first and last input data points to create a plot.
In the plot there is the original function that was used
to start with 4 data points and there is also the
interpolated function that was created via a cubic spline.
The user of this program also has the capability to
input their own data and view the results from a cubic
spline that is created from their data.
The code for the construction of the cubic splines
can be found in the following methods:
create_banded_mat() this creates the matrix A
create_b_mat() this creates the vector b
spline_func() the function used to get outputs
from the spline
interpolate() decides which slopes to give to
spline_func() based on where the sample was taken
"""
import numpy as np
import matplotlib.pyplot as plt
from numpy.linalg import solve
def create_banded_mat(data):
'''
Args:
data: consists of the (x,y) coordinates that
are used to build the spline.
Returns:
returns the matrix A used to solve
for the unknown slopes of the splines that
will be used for data interpolation.
A is a banded matrix with a bandwith of 2
where the diagonal of the matrix is constructed
by 2*(delta_x_i + delta_x_i+1) where i ranges
from 1 to the number of data points.
The bottom band of the diagonal consists of
delta_x_i where i ranges from 3 to the number of
data points. The upperband of the diagonal
consists of delta_x_i where i ranges from
1 to the number of data points minus 2
'''
A = np.zeros((len(data)-2,len(data)-2))
for i in range(len(data)-2):
A[i][i] = 2*((data[i+1][0] - data[i][0]) +\
(data[i+2][0] - data[i+1][0]))
for i in range(len(data)-3):
A[i][i+1] = (data[i+1][0] - data[i][0])
for i in range(len(data)-3):
A[i+1][i] = data[i+3][0] - data[i+2][0]
return A
def create_b_mat(data,s):
'''
Args:
data: data consists of the (x,y) coordinates that
are used to build the spline.
s: s is an array that contains the inital and
end slopes.
Returns:
returns the vector b that is used to solve
for the unknown slopes of the splines used
in data interpolation. b is constructed by
3*(delta_x_i+1 * y_i_prime) where i ranges from
1 to the number of data points - 1.
If the initial and end slopes are not 0,
then the first element in the vector has
delta_x_2 * initial slope subtracted from it
and the last element in the vectior has
delta_x_end-1 * end slope subtracted from it
'''
b = np.zeros((len(data)-2,1))
for i in range(b.shape[0]):
delta_x_2 = data[i+2][0] - data[i+1][0]
delta_x_1 = data[i+1][0] - data[i][0]
y_prime_1 = (data[i+1][1] - data[i][1])/delta_x_1
y_prime_2 = (data[i+2][1] - data[i+1][1])/delta_x_2
b[i][0] = 3*(delta_x_2*y_prime_1 +\
delta_x_1*y_prime_2)
if i == 0 or i == b.shape[0] - 1:
b[i][0] -= delta_x_2*s[i]
return b
def spline_func(x_in,x1,x2,y1,y2,s1,s2):
'''
Args:
x_in: is the input we wish to interpolate
an output for. This input falls in between
the given inputs x1 and x2
x1: is the left given data point that x_in is
between
x2: is the right given data point that x_in is
between
y1: is the output of the left given data
point that x_in is between
y2: is the output of the right given data
point that x_in is between
s1: is slope of the spline function at x1
s2: is slope of the spline function at x2
Returns:
returns: the interpolation value for the
spline function at the input of x_in.
'''
delta_x = (x2 - x1)
y_prime = (y2 - y1)/delta_x
y_double_prime = (y_prime - s1)/(delta_x)
y_triple_prime = (s1 - 2*(y_prime) + s2)/(delta_x**2)
return (y1 + s1*(x_in - x1) +\
y_double_prime*(x_in - x1)**2 +\
y_triple_prime*((x_in - x1)**2)*(x_in-x2))
def interpolate(data,samples,s):
'''
Args:
data: consists of the (x,y) coordinates that
are used to build the spline.
samples: are the x coordinates we wish to
have and interpolated output for
s: is a vector of slopes where s_i is the
slope of the spline at x_i where i ranges
from 1 to number of data points
Returns:
returns: a list of outputs corresponding to
each of the the samples that were put
into this function
'''
outputs = []
for sample in samples:
if sample < data[0][0] or sample > data[len(data)-1][0]:
return None
if sample == data[len(data)-1][0]:
i = len(data)-1
outputs.append(
spline_func(sample,data[i][0],
data[i+1][0],data[i][1],
data[i+1][1],s[i],s[i+1]))
else:
for i in range(len(data)-1):
if sample >= data[i][0] and sample < data[i+1][0]:
outputs.append(
spline_func(sample,data[i][0],
data[i+1][0],data[i][1],
data[i+1][1],s[i],s[i+1]))
break
return outputs
def is_number(n):
'''
Args:
n: a string
Returns:
returns True if n is a number or False
if n is not a number
'''
try:
float(n)
except ValueError:
return False
return True
def get_input(xs):
'''
Args:
xs: is a list of inputs that have been
given to the program
Returns:
False if the user did not enter a number
or if the input is already used. Otherwise
it will return the number as a float
'''
x = input("please enter an input number: \n")
if is_number(x) == False:
print("you did not enter a number.\n")
return False
elif float(x) in xs:
print("You have already used the input %s."%x)
return False
return float(x)
def get_output(x):
'''
Args:
x: is the input for the current output that
we want from the user
Returns:
False if the user did not enter a number.
Otherwise it will return the output as a float
'''
y = input("please enter an output number for the input %f: \n"%x)
if is_number(y) == False:
print("you did not enter a number.\n")
return False
return float(y)
def get_num_data_pts():
'''
Args:
None
Returns:
False if the user did not enter a number or if
the user did not enter at least 3. We need
at least 3 data points to be able to construct
the spline. Otherwise it will return the
num_data_pts as an int
'''
num_data_pts = input("please enter how many data"+
"points you will enter: \n")
if is_number(num_data_pts) == False:
print('You did not enter a valid number \n')
return False
if int(num_data_pts) < 3:
print('Please enter a number larger than 3\n')
return False
return(int(num_data_pts))
def get_init_slope():
'''
Args:
None
Returns:
False if the user did not enter a number.
Otherwise it will return the slope given
by the user as a float
'''
s_1 = input("Please enter the slope of the inital point: \n")
if is_number(s_1) == False:
print('You did not enter a valid number \n')
return False
return(float(s_1))
def get_end_slope():
'''
Args:
None
Returns:
False if the user did not enter a number.
Otherwise it will return the slope given
by the user as a float
'''
s_end = input("Please enter the slope of the end point: \n")
if is_number(s_end) == False:
print('You did not enter a valid number \n')
return False
return(float(s_end))
def func(t):
return(5*np.sin(2*t+np.pi)*t + 4)
def four_point_example():
'''
This function will display the results of the
cubic spline interpolation from the 4 data points
(0.5,1.896322537980259) , (0.79,0.05016729632997663),
(1.62,4.795813609335379), (2.48,16.021808673227227)
These data points were collected from the function:
4sin(3*x)x +4
'''
print('\nThis example comes from the four data points\n'+\
'(0.5,1.896322537980259)\n(0.79,0.05016729632997663)\n'+\
'(1.62,4.795813609335379)\n(2.48,16.021808673227227)\n\n'+\
'These data points come from the function\n\n'+\
'4sin(3*x)x +4\n\nThe slopes of the cubic spline '+\
'at the end points are both 0\n'+\
'The graph shows the interpolated '+\
'function plotted against the true function\n'+\
'The red stars are the four data points')
data = [(0.5,1.896322537980259),
(0.79,0.05016729632997663),
(1.62,4.795813609335379),
(2.48,16.021808673227227)]
init_slope = 0.0
end_slope = 0.0
s = np.zeros((len(data),1))
#Create the matix A to solve for the unknown slopes
A = create_banded_mat(data)
#Create the vector b to solve for the unknown slopes
b = create_b_mat(data,s)
#Solve for the unknown slopes using numpy's
#Linear algebra solver
s_2 = solve(A,b)
#Fill the slopes into the s vector
s[1:len(data)-1][:] = s_2
#Set the initial slope
s[0][:] = init_slope
#Set the end slope
s[len(s)-1][:] = end_slope
#Get Samples ranging from the smallest input value
#to the highest input value
samples = np.arange(data[0][0],data[len(data)-1][0],.01)
#Interpolate the sample inputs and store the
#interpolated outputs in y_hat
y_hat = interpolate(data,samples,s)
y_actual = [func(i) for i in samples]
#plot the resulting interpolation
fig, ax = plt.subplots()
stars, = ax.plot([x[0] for x in data],[x[1] for x in data],'r*',label='stars')
interpolated, = ax.plot(samples,y_hat,'g--', label='Line 2')
actual, = ax.plot(samples,y_actual,'b-', label='Line 1')
ax.set_xlabel('input axis')
ax.set_ylabel('output axis')
plt.legend([stars,interpolated, actual], ['Given Data Points','Interpolated Function', 'Actual Function'])
plt.show()
def run_example_or_user():
example = input("input y to see example with 4"+\
" data points\ninput n to create"+\
" your own data:\n")
return example
def while_y_or_n(answer, func):
while answer != 'y' and answer != 'n':
answer = func()
return answer
def random_points_or_own():
answer = input("Would you like to see random data "+\
"or enter your own data?\n"
"input y for random"+\
" data points\ninput n to create"+\
" your own data:\n")
return answer
def show_data(data,init_slope,end_slope):
"""
This function solves for the matricies A and b
and then displays the data on a graph
"""
#Create the vector s for the slopes of the splines
s = np.zeros((len(data),1))
#Create the Matrix A to solve for the unknown slopes
A = create_banded_mat(data)
#Create the vector b to solve for the unknown slopes
b = create_b_mat(data,s)
#Solve for the unknown slopes using numpy's
#Linear algebra solver
s_2 = solve(A,b)
#Fill the slopes into the s vector
s[1:len(data)-1][:] = s_2
#Set the initial slope
s[0][:] = init_slope
#Set the end slope
s[len(s)-1][:] = end_slope
#Get Samples ranging from the smallest input value
#to the highest input value
samples = np.arange(data[0][0],data[len(data)-1][0],.01)
#Interpolate the sample inputs and store the
#interpolated outputs in y_hat
y_hat = interpolate(data,samples,s)
#plot the resulting interpolation
fig, ax = plt.subplots()
stars, = ax.plot([x[0] for x in data],[x[1] for x in data],'r*')
interpolated, = ax.plot(samples,y_hat,'g--',label='interpolated')
ax.set_xlabel('input axis')
ax.set_ylabel('output axis')
plt.legend([stars,interpolated], ['Given Data Points','Interpolated Function'])
plt.show()
def get_num_data_and_slopes():
"""
Asks the user for number of data points
they would like, the initial slope and end
slope.
"""
#Ask the user for the number of data points
#that they will enter
num_data_pts = get_num_data_pts()
#Ensure that the user entered a valid number
while num_data_pts == False:
num_data_pts = get_num_data_pts()
#Ask the user for an initaial slope
init_slope = get_init_slope()
#Ensure that the user entered a valid number
#0.0 is equivalent to False in python
#init_slope != 0.0 allows the user to enter that number
while init_slope == False and init_slope != 0.0:
init_slope = get_init_slope()
#Ask the user for an initaial slope
end_slope = get_end_slope()
#Ensure that the user entered a valid number
while end_slope == False and end_slope != 0.0:
end_slope = get_end_slope()
return (num_data_pts,init_slope,end_slope)
def print_data_points(data):
print("\nThe Following data points were generated"+\
"Randomly:\n\n")
for d in data:
print("(%f,%f)\n"%(d[0],d[1]))
def random_points():
"""
This function Generates random data points
with the domain between -10 and 10
The end of this function will display
the graph of the resulting spline
"""
#List of inputs
xs = []
# List of tuples that represent (x,y) coordinates
data = []
num_data_pts,init_slope,end_slope = get_num_data_and_slopes()
#Get all data points from the user
while len(data) < num_data_pts:
#Get an input value
x = np.random.randint(-10,10)*np.random.random()
while x in xs:
x = np.random.randint(-10,10)*np.random.random()
#Get the corresponding output value
y = np.random.randint(-10,10)*np.random.random()
#add x to the list of used inputs
xs.append(x)
#add the coordiante (x,y) to the data list
data.append((x,y))
#Ensure the data is sorted by input values
#in ascending order
data = sorted(data, key=lambda x: x[0])
print_data_points(data)
show_data(data,init_slope,end_slope)
def own_data():
"""
This function has the user enter their
own data points.
The end of this function will display
the graph of the resulting spline.
"""
#List of inputs
xs = []
# List of tuples that represent (x,y) coordinates
data = []
num_data_pts,init_slope,end_slope = get_num_data_and_slopes()
#Get all data points from the user
while len(data) < num_data_pts:
#Get an input value
x = get_input(xs)
while x == False and x != 0.0:
x = get_input(xs)
#Get the corresponding output value
y = get_output(x)
while y == False and y != 0.0:
y = get_output(x)
#add x to the list of used inputs
xs.append(x)
#add the coordiante (x,y) to the data list
data.append((x,y))
#Ensure the data is sorted by input values
#in ascending order
data = sorted(data, key=lambda x: x[0])
show_data(data,init_slope,end_slope)
def __main__():
example = run_example_or_user()
'''
while example != 'y' and example != 'n':
example = run_example_or_user()
'''
example = while_y_or_n(example, run_example_or_user)
if example == 'y':
four_point_example()
else:
answer = random_points_or_own()
answer = while_y_or_n(answer, random_points_or_own)
if answer == 'y':
random_points()
else:
own_data()
__main__() | [
"matplotlib"
] |
6b85763c76517fe3205a468b73407c11eb61245a | Python | samfaul135/cantera-py | /gasproperties.py | UTF-8 | 2,897 | 3.40625 | 3 | [] | no_license | # Viscosities & Prandtl Number
# Sam Faulk
import cantera as ct
import numpy as np
import matplotlib.pyplot as plt
import time
import math
dVisc = []
kVisc = []
Pr = []
temp = []
dens = []
# Set limits for density
dens_initial = 0.001
dens_now = dens_initial
dens_final = 100.0
dens_delta = 10.0
# Create increment of change to ensure that temp and dens are the same size
mag = np.log10(dens_final / dens_initial)
while dens_now <= dens_final:
# Keeps track of progress, as increments of temperture
print('{}kg/m^3 of {}kg/m^3'.format(dens_now,dens_final))
# Set limits for temperature
temp_initial = 300.0
temp_now = temp_initial
temp_final = 3000.0
temp_delta = (temp_final - temp_initial) / mag # define delta increment that "dens" and "temp" lists are same size
while temp_now <= temp_final:
print(' {}K of {}K'.format(temp_now,temp_final))
# Define empty lists for within the "while loop"
dVisc_within = []
kVisc_within = []
Pr_within = []
# Create gasA
gasA = ct.Solution('gri30.cti','gri30_mix')
gasA.TDX = temp_now , dens_now , 'O2:2.0 , CH4:1.0'
# Dynamic Viscosity
dVisc_within.append(gasA.viscosity)
dVisc = np.append(dVisc , dVisc_within , axis = 0)
# Kinematic Viscosity
kVisc_within.append(gasA.viscosity/gasA.density)
kVisc = np.append(kVisc , kVisc_within , axis = 0)
# Prandtl Number
Pr_within.append(gasA.cp*gasA.viscosity/gasA.thermal_conductivity)
Pr = np.append(Pr , Pr_within , axis = 0)
# Temperature increments
temp_now += temp_delta
# Density list
dens.append(dens_now)
# Density increments
dens_now *= dens_delta
# Temperature list
temp = np.arange(temp_initial , temp_final , temp_delta)
temp = np.append(temp , temp_final)
# Split Viscosity lists
mag = int(mag)
dVisc = np.split(dVisc,mag+1)
kVisc = np.split(kVisc,mag+1)
Pr = np.split(Pr,mag+1)
# Plot values
rnge = np.arange(mag+1)
# Plot Dynamic Viscosity
plt.figure(1)
for i in rnge:
plt.plot(temp, dVisc[i])
plt.xlabel('$Initial$ $Temperature$ (K)')
plt.ylabel('$Dynamic$ $Viscosity$ (Pa-s)')
plt.legend(('0.001 [kg/m^3]' , '0.01' , '0.1' , '1.0' , '10.0' , '100.0') , loc = 'best')
plt.grid()
plt.tight_layout()
plt.show()
# Plot Kinematic Viscosity
plt.figure(2)
for i in rnge:
plt.plot(temp, kVisc[i])
plt.xlabel('$Initial$ $Temperature$ (K)')
plt.ylabel('$Kinematic$ $Viscosity$ (m^2/s)')
plt.legend(('0.001 [kg/m^3]' , '0.01' , '0.1' , '1.0' , '10.0' , '100.0') , loc = 'best')
plt.grid()
plt.tight_layout()
plt.show()
# Plot Prandtl Number
plt.figure(3)
for i in rnge:
plt.plot(temp, Pr[i])
plt.xlabel('$Initial$ $Temperature$ (K)')
plt.ylabel('$Prandtl$ $Number$')
plt.legend(('0.001 [kg/m^3]' , '0.01' , '0.1' , '1.0' , '10.0' , '100.0') , loc = 'best')
plt.grid()
plt.tight_layout()
plt.show() | [
"matplotlib"
] |
da130bcc32ffd7b5cb5db29ff8716dce4998bf0d | Python | renyc432/sentiment-analysis | /cl_log_xgb_for.py | UTF-8 | 8,926 | 2.8125 | 3 | [] | no_license | import re
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import nltk
from wordcloud import WordCloud, ImageColorGenerator
from PIL import Image
import requests
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import f1_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import recall_score
from sklearn.metrics import precision_score
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from xgboost import XGBClassifier
from sklearn.tree import DecisionTreeClassifier
#%matplotlib inline
#path = 'C:\\Users\\rs\\Desktop\\Datasets\\NLP\\twitter_1.6M.csv'
train = pd.read_csv('https://raw.githubusercontent.com/dD2405/Twitter_Sentiment_Analysis/master/train.csv')
train_original=train.copy()
test = pd.read_csv('https://raw.githubusercontent.com/dD2405/Twitter_Sentiment_Analysis/master/test.csv')
test_original=test.copy()
#positive = 0
#negative = 1
# Combine train and test because tweets in test also contain information
combine = train.append(test,ignore_index=True, sort=True)
def remove_pattern(text, pattern):
r = re.findall(pattern, text)
for i in r:
# sub all occurence of i in text for ''
text = re.sub(i,'',text)
return text
# remove tweeter handlers
# np.vectorize is faster than for loop
combine['Tidy_Tweets'] = np.vectorize(remove_pattern)(combine['tweet'],'@[\w]*')
combine.head()
# replace none a-z characters (numbers, special characters, punctuations) with ' '
combine['Tidy_Tweets'] = combine['Tidy_Tweets'].str.replace('^a-zA-Z#',' ')
# remove short words as they usually do not contain significant information (such as 'a', 'an', 'hmm')
# This removes any word 3 characters or shorter
# This joins words with 4+ characters
combine['Tidy_Tweets'] = combine['Tidy_Tweets'].apply(lambda x: ' '.join([w for w in x.split() if len(w)>3]))
# Split each string into words
tokenized_tweet = combine['Tidy_Tweets'].apply(lambda x: x.split())
tokenized_tweet.head()
# Stemming strips the suffixes ('ing', 'ly', 's') from a word
# So 'player', 'played', 'plays', 'playing' all turn into play
# Look up how this works
ps = nltk.PorterStemmer()
# This is costly
tokenized_tweet = tokenized_tweet.apply(lambda x: [ps.stem(i) for i in x])
tokenized_tweet.head()
# Put it back to a string
tokenized_tweet_temp = tokenized_tweet.apply(lambda x: ' '.join(x))
combine['Tidy_Tweets'] = tokenized_tweet_temp
combine.head()
# Store all words from dataset
all_words_positive = ' '.join(w for w in combine['Tidy_Tweets'][combine['label']==0])
all_words_negative = ' '.join(w for w in combine[combine['label']==1]['Tidy_Tweets'])
# Extract hashtags from tweets
def hashtags_extract(x):
hashtags = []
for i in x:
# r: raw string, this resolves the problem that both python and regex uses \ for escape
# \w: matches a-z/A-Z, 0-9,_
# +: 1+ characters
ht = re.findall(r'#(\w+)',i)
hashtags.append(ht)
return hashtags
ht_positive = hashtags_extract(combine['Tidy_Tweets'][combine['label']==0])
# Unnest nested list
ht_positive_unnest = sum(ht_positive,[])
ht_negative = hashtags_extract(combine['Tidy_Tweets'][combine['label']==1])
ht_negative_unnest = sum(ht_negative,[])
# bag-of-words
bag_vectorizer = CountVectorizer(max_df=0.90, min_df=2, max_features=1000, stop_words='english')
bag_matrix = bag_vectorizer.fit_transform(combine['Tidy_Tweets'])
bag_features = bag_vectorizer.get_feature_names()
df_bag = pd.DataFrame(bag_matrix.toarray(),columns=bag_features)
# TF-IDF
#max_df, min_df: ignore word if frequency/count pass the max/min
tfidf_vectorizer = TfidfVectorizer(max_df=0.9, min_df=2, max_features=1000,stop_words='english')
tfidf_matrix = tfidf_vectorizer.fit_transform(combine['Tidy_Tweets'])
tfidf_features = tfidf_vectorizer.get_feature_names()
df_tfidf = pd.DataFrame(tfidf_matrix.toarray(),columns=tfidf_features)
train_bag = bag_matrix[:len(train)]
train_tfidf = tfidf_matrix[:len(train)]
# Train, validation split
x_train_bag,x_valid_bag,y_train_bag,y_valid_bag = train_test_split(train_bag,train['label'],
test_size = 0.3, random_state = 2)
x_train_tfidf,x_valid_tfidf,y_train_tfidf,y_valid_tfidf = train_test_split(train_tfidf,train['label'],
test_size = 0.3, random_state = 17)
# Logistic Regression
#L1 penalty: lasso; L2 penalty: ridge
Log_bag = LogisticRegression(random_state=0,solver='lbfgs')
Log_bag.fit(x_train_bag,y_train_bag)
log_pred_bag = Log_bag.predict_proba(x_valid_bag)
# why 0.3??
# first column = 0 (positive); second column = 1 (negative)
log_pred_int_bag = log_pred_bag[:,1]>=0.3
log_pred_int_bag = log_pred_int_bag.astype(np.int)
# calculate f1 score
f1_log_bag = f1_score(y_valid_bag, log_pred_int_bag)
auc_log_bag = roc_auc_score(y_valid_bag, log_pred_int_bag)
Log_tfidf = LogisticRegression(random_state=0,solver='lbfgs')
Log_tfidf.fit(x_train_tfidf, y_train_tfidf)
log_pred_tfidf = Log_tfidf.predict_proba(x_valid_tfidf)
log_pred_int_tfidf = log_pred_tfidf[:,1]>=0.3
log_pred_int_tfidf = log_pred_int_tfidf.astype(np.int)
f1_log_tfidf = f1_score(y_valid_tfidf, log_pred_int_tfidf)
auc_log_tfidf = roc_auc_score(y_valid_tfidf, log_pred_int_tfidf)
# XGBoost
xgb_bag = XGBClassifier(random_state=22,learning_rate=0.1, n_estimators=1000)
xgb_bag.fit(x_train_bag,y_train_bag)
xgb_pred_bag = xgb_bag.predict_proba(x_valid_bag)
xgb_pred_int_bag = (xgb_pred_bag[:,1]>= 0.3).astype(np.int)
f1_xgb_bag = f1_score(y_valid_bag, xgb_pred_int_bag)
auc_xgb_bag = roc_auc_score(y_valid_bag, xgb_pred_int_bag)
recall_xgb_bag = recall_score(y_valid_bag, xgb_pred_int_bag)
precision_xgb_bag = precision_score(y_valid_bag, xgb_pred_int_bag)
acc_xgb_bag = accuracy_score(y_valid_bag, xgb_pred_int_bag)
xgb_tfidf = XGBClassifier(random_state=22,learning_rate=0.1,n_estimators=1000)
xgb_tfidf.fit(x_train_tfidf,y_train_tfidf)
xgb_pred_tfidf = xgb_tfidf.predict_proba(x_valid_tfidf)
xgb_pred_int_tfidf = (xgb_pred_tfidf[:,1]>=0.3).astype(np.int)
f1_xgb_tfidf = f1_score(y_valid_tfidf, xgb_pred_int_tfidf)
auc_xgb_tfidf = roc_auc_score(y_valid_tfidf, xgb_pred_int_tfidf)
recall_xgb_tfidf = recall_score(y_valid_tfidf, xgb_pred_int_tfidf)
precision_xgb_tfidf = precision_score(y_valid_tfidf, xgb_pred_int_tfidf)
acc_xgb_tfidf = accuracy_score(y_valid_tfidf, xgb_pred_int_tfidf)
# Decision Trees
# criterion: gini or entropy (information gain)
tree_bag = DecisionTreeClassifier(random_state=1, criterion='entropy')
tree_bag.fit(x_train_bag,y_train_bag)
tree_pred_bag = tree_bag.predict_proba(x_valid_bag)
tree_pred_int_bag = (tree_pred_bag[:,1]>=0.3).astype(np.int)
f1_tree_bag = f1_score(y_valid_bag, tree_pred_int_bag)
auc_tree_bag = roc_auc_score(y_valid_bag, tree_pred_int_bag)
tree_tfidf = DecisionTreeClassifier(random_state=1,criterion='entropy')
tree_tfidf.fit(x_train_tfidf,y_train_tfidf)
tree_pred_tfidf = tree_tfidf.predict_proba(x_valid_tfidf)
tree_pred_int_tfidf = (tree_pred_tfidf[:,1]>=0.3).astype(np.int)
f1_tree_tfidf = f1_score(y_valid_tfidf, tree_pred_int_tfidf)
auc_tree_tfidf = roc_auc_score(y_valid_tfidf, tree_pred_int_tfidf)
# Word cloud
Mask = np.array(Image.open(requests.get('http://clipart-library.com/image_gallery2/Twitter-PNG-Image.png', stream=True).raw))
image_colors = ImageColorGenerator(Mask)
wc_0 = WordCloud(background_color='black', height = 1500, width = 4000, mask = Mask).generate(all_words_positive)
# Size of the image generated
plt.figure(figsize=(160,320))
plt.imshow(wc_0.recolor(color_func=image_colors),interpolation='hamming')
wc_1 = WordCloud(background_color='black', height = 1500, width = 4000, mask = Mask).generate(all_words_negative)
plt.figure(figsize=(160,320))
plt.imshow(wc_1.recolor(color_func=image_colors),interpolation='gaussian')
# Plot bar-plot
word_freq_positive = nltk.FreqDist(ht_positive_unnest) # This is similar to a dictionary
word_freq_negative = nltk.FreqDist(ht_negative_unnest) # This is similar to a dictionary
df_positive = pd.DataFrame({'Hashtags':list(word_freq_positive.keys()),'Count':list(word_freq_positive.values())})
df_positive_plot = df_positive.nlargest(20,columns='Count')
df_negative = pd.DataFrame({'Hashtags':list(word_freq_negative.keys()),'Count':list(word_freq_negative.values())})
df_negative_plot = df_negative.nlargest(20,columns='Count')
# seaborn library
sns.barplot(data=df_positive_plot,y='Hashtags',x='Count').set_title('Top 20 Positive Freq')
sns.despine()
sns.barplot(data=df_negative_plot,y='Hashtags',x='Count').set_title('TOP 20 Negative Freq')
sns.despine() | [
"matplotlib",
"seaborn"
] |
647c584cbf96fba0ddd81b8c54a565b26dc7407f | Python | yehongyu/add_model_implement | /tf/text_classification.py | UTF-8 | 2,813 | 2.59375 | 3 | [] | no_license | # coding=utf-8
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
imdb = keras.datasets.imdb
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=10000)
print train_data.shape
print train_labels.shape
print test_data.shape
print test_labels.shape
print(train_data[0])
print(train_labels[0])
print len(train_data[0]), len(train_data[1])
def get_word_index():
## 将整数转换回字词
word_src = imdb.get_word_index()
word_index = {}
count = 0
for k, v in word_src.items():
##print k, v
count += 1
if count < 10:
print k, v
word_index[k] = v+3
word_index["<PAD>"] = 0
word_index["<START>"] = 1
word_index["<UNK>"] = 2
word_index["<UNUSED>"] = 3
reverse_word_index = dict()
for key, value in word_index.items():
reverse_word_index[value] = key
return word_index, reverse_word_index
def decode_review(text_index, reverse_word_index):
res_list = []
for i in text_index:
res_list.append(reverse_word_index.get(i, '?'))
return ' '.join(res_list)
word_index, reverse_word_index = get_word_index()
print decode_review(train_data[0], reverse_word_index)
train_data = keras.preprocessing.sequence.pad_sequences(
train_data, value=word_index['<PAD>'],
padding='post', maxlen=256
)
test_data = keras.preprocessing.sequence.pad_sequences(
test_data, value=word_index['<PAD>'],
padding='post', maxlen=256
)
print len(train_data[0]), len(train_data[1])
print train_data[0]
## 构建模型
## 设计模型使用多少个层?每一层使用多少个隐藏单元
vocab_size = 10000
model = keras.Sequential()
model.add(keras.layers.Embedding(vocab_size, 16))
model.add(keras.layers.GlobalAveragePooling1D())
model.add(keras.layers.Dense(16, activation=tf.nn.relu))
model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))
model.summary()
## 选择优化器,损失函数,指标
model.compile(optimizer=tf.train.AdamOptimizer(),
loss='binary_crossentropy',
metrics=['accuracy'])
## 训练数据,测试数据
x_val = train_data[:10000]
partial_x_train = train_data[10000:]
y_val = train_labels[:10000]
partial_y_train = train_labels[10000:]
## 训练模型
history = model.fit(partial_x_train,
partial_y_train,
epochs=40,
batch_size=512,
validation_data=(x_val, y_val),
verbose=1)
results = model.evaluate(test_data, test_labels)
print results
history_dict = history.history
history_dict.keys()
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
| [
"matplotlib"
] |
4c9ac9128d217cf051ebbbd5ad146280a9b32bfa | Python | nsnmsak/graphillion_tutorial | /ja/tutorial_util.py | UTF-8 | 3,083 | 3.046875 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env python
# coding: utf-8
from graphillion import GraphSet
from graphviz import Digraph
import networkx as nx
import json
import matplotlib.pyplot as plt
from IPython.display import Image
def zdd_size(graph_set):
zdd = dump2zdd(graph_set.dumps().split("\n"))
return len(zdd)
def draw_zdd(graph_set, universe=None):
if not universe:
universe = GraphSet.universe()
zdd = dump2zdd(graph_set.dumps().split("\n"))
return draw(zdd, universe)
def draw(zdd, labels):
dot_str_lines = []
dot_str_lines.append("digraph top {")
dot_str_lines.append('node[ colorscheme = "rdylgn11", color = 3];')
dot = Digraph()
same_label_nodes = {}
for nid in zdd:
vals = zdd[nid]
label = vals['label']
lo = vals['lo']
hi = vals['hi']
if label not in same_label_nodes:
same_label_nodes[label] = []
same_label_nodes[label].append(nid)
dot.node(nid, str(labels[int(label)-1]))
dot.edge(nid, lo, style='dashed')
dot.edge(nid, hi, style='solid')
dot.node('T', '1', shape='square')
dot.node('B', '0', shape='square')
for labels in same_label_nodes.values():
with dot.subgraph() as c:
c.body.append("{rank= same;" + "; ".join(labels) + ";}")
return dot
def dump2zdd(arr):
nodes = {}
for elem in arr:
elems = elem.split()
if len(elems) != 4:
continue
nid, label, lo, hi = elems
nodes[nid] = {'label': label, 'lo': lo, 'hi': hi}
return nodes
def _encode_digit(val):
if isinstance(val, int):
return '_int' + str(val)
return val
def _decode_digit(val):
if isinstance(val, str) and val.startswith('_int'):
return int(val[4:])
return val
def _graph2nx_layout(graph):
dot = Digraph()
for u, v in graph.edges:
u = _encode_digit(u)
v = _encode_digit(v)
dot.edge(u, v)
json_obj = json.loads(dot.pipe(format='json'))
positions = {}
for node in json_obj['objects']:
name = _decode_digit(node['name'])
pos_pair = tuple(float(x) for x in node['pos'].split(','))
positions[name] = pos_pair
return positions
def draw_universe(universe=None):
draw_subgraph(None, universe)
def draw_subgraph(subgraph=None, universe=None):
if not universe:
universe = GraphSet.universe()
g = nx.Graph(sorted(universe))
if not subgraph:
subgraph = set([])
else:
subgraph = set(subgraph)
pos = _graph2nx_layout(g)
nx.draw_networkx_nodes(g, pos, node_color='#FFFFFF', edgecolors='#000000')
edge_weights = []
edge_colors = []
for edge in g.edges():
if edge in subgraph or (edge[1], edge[0]) in subgraph:
edge_weights.append(5)
edge_colors.append('#FF0000')
else:
edge_weights.append(1)
edge_colors.append('#000000')
nx.draw_networkx_labels(g, pos)
nx.draw_networkx_edges(g, pos, edge_color=edge_colors, width=edge_weights)
plt.show()
| [
"matplotlib"
] |
c9a91c979923230118486010da1341384509f835 | Python | Getrightdelpan/AWS | /Task1.py | UTF-8 | 17,505 | 2.796875 | 3 | [] | no_license | import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA, TruncatedSVD
import matplotlib.patches as mpatches
import time
import timeit
import pickle
import sys
from scipy.stats import chi2_contingency
# Classifier Libraries
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
import collections
from sklearn.impute import SimpleImputer
# Other Libraries
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from imblearn.pipeline import make_pipeline as imbalanced_make_pipeline
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import NearMiss
from imblearn.metrics import classification_report_imbalanced
from sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score, accuracy_score, classification_report
from collections import Counter
from sklearn.model_selection import KFold, StratifiedKFold
df1=pd.read_csv(r"C:\Users\Getright\Desktop\Training_part1.csv",sep=";")
df2=pd.read_csv(r"C:\Users\Getright\Desktop\Training_part2.csv",sep=";")
df=pd.merge(df1,df2,on="id")
df.drop(labels=['id'], axis=1, inplace=True)
df.info()
# 检验数据分布,为确保绘制的饼图为圆形,需执行如下代码
colors = ["#0101DF", "#DF0101"]
sns.countplot('Class', data=df, palette=colors)
plt.title('Class Distributions', fontsize=14)
plt.show()
# =============================================================================
# 变量相关性
# =============================================================================
#查看连续变量间的相关性
df.describe().T
sns.heatmap(data=df.select_dtypes(include=('float64','int64')).corr(),
annot=True, cmap='coolwarm')
df[['FAN','NUS']].to_csv(path_or_buf=r"C:\Users\Getright\Desktop\Unique.csv")
#寻找类别变量
def summarize_categoricals(df, show_levels=False):
"""
Display uniqueness in each column
"""
data = [[df[c].unique(), len(df[c].unique()), df[c].isnull().sum()] for c in df.columns]
df_temp = pd.DataFrame(data, index=df.columns,
columns=['Levels', 'No. of Levels', 'No. of Missing Values'])
return df_temp.iloc[:, 0 if show_levels else 1:]
def find_categorical(df, cutoff=20):
"""
Function to find categorical columns in the dataframe.
"""
cat_cols = []
for col in df.columns:
if len(df[col].unique()) <= cutoff:
cat_cols.append(col)
return cat_cols
def to_categorical(columns, df):
"""
Converts the columns passed in `columns` to categorical datatype
"""
for col in columns:
df[col] = df[col].astype('category')
return df
#强制转换为类别变量
summarize_categoricals(df, show_levels=True)
df = to_categorical(find_categorical(df), df)
df.info()
#查看类别变量间的相关性(Cramer's V statistic)
def cramers_corrected_stat(contingency_table):
"""
Computes corrected Cramer's V statistic for categorial-categorial association
"""
chi2 = chi2_contingency(contingency_table)[0]
n = contingency_table.sum().sum()
phi2 = chi2/n
r, k = contingency_table.shape
r_corrected = r - (((r-1)**2)/(n-1))
k_corrected = k - (((k-1)**2)/(n-1))
phi2_corrected = max(0, phi2 - ((k-1)*(r-1))/(n-1))
return (phi2_corrected / min( (k_corrected-1), (r_corrected-1)))**0.5
def categorical_corr_matrix(df):
"""
Computes corrected Cramer's V statistic between
all the categorical variables in the dataframe
"""
df = df.select_dtypes(include='category')
cols = df.columns
n = len(cols)
corr_matrix = pd.DataFrame(np.zeros(shape=(n, n)), index=cols, columns=cols)
for col1 in cols:
for col2 in cols:
if col1 == col2:
corr_matrix.loc[col1, col2] = 1
break
df_crosstab = pd.crosstab(df[col1], df[col2], dropna=False)
corr_matrix.loc[col1, col2] = cramers_corrected_stat(df_crosstab)
# Flip and add to get full correlation matrix
corr_matrix += np.tril(corr_matrix, k=-1).T
return corr_matrix
fig, ax = plt.subplots(figsize=(15, 10))
sns.heatmap(categorical_corr_matrix(df), annot=True, cmap='coolwarm',
cbar_kws={'aspect': 50}, square=True, ax=ax)
plt.xticks(rotation=60)
df[['ERG','GJAH']].to_csv(path_or_buf=r"C:\Users\Getright\Desktop\Unique.csv")
df[['RAS','XIN']].to_csv(path_or_buf=r"C:\Users\Getright\Desktop\Unique.csv")
# =============================================================================
# 构建二分类模型
# =============================================================================
#检查缺失值,用众数填充
print(df.isnull().sum())
features = []
for x in df.columns: # 取特征
features.append(x)
features_mode = {}
for f in features:
features_mode[f] = list(df[f].dropna().mode().values)[0]
df.fillna(features_mode,inplace=True)
#拆分解释变量和目标变量
x = df.drop(labels=['Class','NUS','GJAH','XIN'], axis=1)
y = df['Class']
#拆分连续变量和类别变量
categorical_columns = list(x.select_dtypes(include='category').columns)
numeric_columns = list(x.select_dtypes(exclude='category').columns)
#拆分训练集和验证集
from sklearn.model_selection import train_test_split
data_splits = train_test_split(x, y, test_size=0.25, random_state=0,
shuffle=True, stratify=y)
x_train, x_test, y_train, y_test = data_splits
#利用SMOTENC作Oversample
from imblearn.over_sampling import SMOTENC
smote = SMOTENC(categorical_features=(x_train.dtypes == "category").values,
random_state=42)
x_train, y_train = smote.fit_resample(x_train, y_train)
pd.Series(y_train).value_counts()
sns.countplot(x=y_train)
#独热编码和归一化处理
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder, StandardScaler, LabelEncoder
categorical_columns = list(x.select_dtypes(include='category').columns)
transformers = [('one_hot_encoder',
OneHotEncoder(drop='first',dtype='int'),
categorical_columns),
('standard_scaler', StandardScaler(), numeric_columns)]
x_trans = ColumnTransformer(transformers, remainder='passthrough')
x_train = x_trans.fit_transform(x_train)
x_test = x_trans.transform(x_test)
y_trans = LabelEncoder()
y_train = y_trans.fit_transform(y_train)
y_test = y_trans.transform(y_test)
feature_names = list(x_trans.named_transformers_['one_hot_encoder'] \
.get_feature_names(input_features=categorical_columns))
feature_names = feature_names + numeric_columns
from sklearn.metrics import confusion_matrix, classification_report, roc_auc_score, \
precision_recall_curve, roc_curve, accuracy_score
from sklearn.exceptions import NotFittedError
#最后用logistic Regression进行分类预测
def confusion_plot(matrix, labels=None):
""" Display binary confusion matrix as a Seaborn heatmap """
labels = labels if labels else ['Negative (y)', 'Positive (n)']
fig, ax = plt.subplots(nrows=1, ncols=1)
sns.heatmap(data=matrix, cmap='Blues', annot=True, fmt='d',
xticklabels=labels, yticklabels=labels, ax=ax)
ax.set_xlabel('PREDICTED')
ax.set_ylabel('ACTUAL')
ax.set_title('Confusion Matrix')
plt.close()
return fig
def roc_plot(y_true, y_probs, label, compare=False, ax=None):
""" Plot Receiver Operating Characteristic (ROC) curve
Set `compare=True` to use this function to compare classifiers. """
fpr, tpr, thresh = roc_curve(y_true, y_probs)
auc = round(roc_auc_score(y_true, y_probs), 2)
fig, axis = (None, ax) if ax else plt.subplots(nrows=1, ncols=1)
label = ' '.join([label, f'({auc})']) if compare else None
sns.lineplot(x=fpr, y=tpr, ax=axis, label=label)
if compare:
axis.legend(title='Classifier (AUC)', loc='lower right')
else:
axis.text(0.72, 0.05, f'AUC = { auc }', fontsize=12,
bbox=dict(facecolor='green', alpha=0.4, pad=5))
# Plot No-Info classifier
axis.fill_between(fpr, fpr, tpr, alpha=0.3, edgecolor='g',
linestyle='--', linewidth=2)
axis.set_xlim(0, 1)
axis.set_ylim(0, 1)
axis.set_title('ROC Curve')
axis.set_xlabel('False Positive Rate [FPR]\n(1 - Specificity)')
axis.set_ylabel('True Positive Rate [TPR]\n(Sensitivity or Recall)')
plt.close()
return axis if ax else fig
def precision_recall_plot(y_true, y_probs, label, compare=False, ax=None):
""" Plot Precision-Recall curve.
Set `compare=True` to use this function to compare classifiers. """
p, r, thresh = precision_recall_curve(y_true, y_probs)
p, r, thresh = list(p), list(r), list(thresh)
p.pop()
r.pop()
fig, axis = (None, ax) if ax else plt.subplots(nrows=1, ncols=1)
if compare:
sns.lineplot(r, p, ax=axis, label=label)
axis.set_xlabel('Recall')
axis.set_ylabel('Precision')
axis.legend(loc='lower left')
else:
sns.lineplot(thresh, p, label='Precision', ax=axis)
axis.set_xlabel('Threshold')
axis.set_ylabel('Precision')
axis.legend(loc='lower left')
axis_twin = axis.twinx()
sns.lineplot(thresh, r, color='limegreen', label='Recall', ax=axis_twin)
axis_twin.set_ylabel('Recall')
axis_twin.set_ylim(0, 1)
axis_twin.legend(bbox_to_anchor=(0.24, 0.18))
axis.set_xlim(0, 1)
axis.set_ylim(0, 1)
axis.set_title('Precision Vs Recall')
plt.close()
return axis if ax else fig
def feature_importance_plot(importances, feature_labels, ax=None):
fig, axis = (None, ax) if ax else plt.subplots(nrows=1, ncols=1, figsize=(5, 10))
sns.barplot(x=importances, y=feature_labels, ax=axis)
axis.set_title('Feature Importance Measures')
plt.close()
return axis if ax else fig
def train_clf(clf, x_train, y_train, sample_weight=None, refit=False):
train_time = 0
try:
if refit:
raise NotFittedError
y_pred_train = clf.predict(x_train)
except NotFittedError:
start = timeit.default_timer()
if sample_weight is not None:
clf.fit(x_train, y_train, sample_weight=sample_weight)
else:
clf.fit(x_train, y_train)
end = timeit.default_timer()
train_time = end - start
y_pred_train = clf.predict(x_train)
train_acc = accuracy_score(y_train, y_pred_train)
return clf, y_pred_train, train_acc, train_time
def report(clf, x_train, y_train, x_test, y_test, sample_weight=None,
refit=False, importance_plot=False, confusion_labels=None,
feature_labels=None, verbose=True):
""" Trains the passed classifier if not already trained and reports
various metrics of the trained classifier """
dump = dict()
## Train if not already trained
clf, train_predictions, \
train_acc, train_time = train_clf(clf, x_train, y_train,
sample_weight=sample_weight,
refit=refit)
## Testing
start = timeit.default_timer()
test_predictions = clf.predict(x_test)
end = timeit.default_timer()
test_time = end - start
test_acc = accuracy_score(y_test, test_predictions)
y_probs = clf.predict_proba(x_test)[:, 1]
roc_auc = roc_auc_score(y_test, y_probs)
## Model Memory
model_mem = round(model_memory_size(clf) / 1024, 2)
print(clf)
print("\n=============================> TRAIN-TEST DETAILS <======================================")
## Metrics
print(f"Train Size: {x_train.shape[0]} samples")
print(f" Test Size: {x_test.shape[0]} samples")
print("------------------------------------------")
print(f"Training Time: {round(train_time, 3)} seconds")
print(f" Testing Time: {round(test_time, 3)} seconds")
print("------------------------------------------")
print("Train Accuracy: ", train_acc)
print(" Test Accuracy: ", test_acc)
print("------------------------------------------")
print(" Area Under ROC: ", roc_auc)
print("------------------------------------------")
print(f"Model Memory Size: {model_mem} kB")
print("\n=============================> CLASSIFICATION REPORT <===================================")
## Classification Report
clf_rep = classification_report(y_test, test_predictions, output_dict=True)
print(classification_report(y_test, test_predictions,
target_names=confusion_labels))
if verbose:
print("\n================================> CONFUSION MATRIX <=====================================")
## Confusion Matrix HeatMap
display(confusion_plot(confusion_matrix(y_test, test_predictions),
labels=confusion_labels))
print("\n=======================================> PLOTS <=========================================")
## Variable importance plot
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(14, 10))
roc_axes = axes[0, 0]
pr_axes = axes[0, 1]
importances = None
if importance_plot:
if not feature_labels:
raise RuntimeError("'feature_labels' argument not passed "
"when 'importance_plot' is True")
try:
importances = pd.Series(clf.feature_importances_,
index=feature_labels) \
.sort_values(ascending=False)
except AttributeError:
try:
importances = pd.Series(clf.coef_.ravel(),
index=feature_labels) \
.sort_values(ascending=False)
except AttributeError:
pass
if importances is not None:
# Modifying grid
grid_spec = axes[0, 0].get_gridspec()
for ax in axes[:, 0]:
ax.remove() # remove first column axes
large_axs = fig.add_subplot(grid_spec[0:, 0])
# Plot importance curve
feature_importance_plot(importances=importances.values,
feature_labels=importances.index,
ax=large_axs)
large_axs.axvline(x=0)
# Axis for ROC and PR curve
roc_axes = axes[0, 1]
pr_axes = axes[1, 1]
else:
# remove second row axes
for ax in axes[1, :]:
ax.remove()
else:
# remove second row axes
for ax in axes[1, :]:
ax.remove()
## ROC and Precision-Recall curves
clf_name = clf.__class__.__name__
roc_plot(y_test, y_probs, clf_name, ax=roc_axes)
precision_recall_plot(y_test, y_probs, clf_name, ax=pr_axes)
fig.subplots_adjust(wspace=5)
fig.tight_layout()
display(fig)
## Dump to report_dict
dump = dict(clf=clf, train_acc=train_acc, train_time=train_time,
train_predictions=train_predictions, test_acc=test_acc,
test_time=test_time, test_predictions=test_predictions,
test_probs=y_probs, report=clf_rep, roc_auc=roc_auc,
model_memory=model_mem)
return clf, dump
def model_memory_size(clf):
return sys.getsizeof(pickle.dumps(clf))
from sklearn.linear_model import LogisticRegressionCV
confusion_lbs = ['n', 'y']
logit_cv = LogisticRegressionCV(Cs=10, class_weight='balanced', cv=5, dual=False,
fit_intercept=True, intercept_scaling=1.0, l1_ratios=None,
max_iter=500, multi_class='auto', n_jobs=None,
penalty='l1', random_state=0, refit=True,
scoring='f1', solver='liblinear', tol=0.0001,
verbose=0)
logit_cv, logit_report = report(logit_cv, x_train, y_train,
x_test, y_test, refit=True,
importance_plot=True,
feature_labels=feature_names,
confusion_labels=confusion_lbs) | [
"matplotlib",
"seaborn"
] |
ba35abb9be331698acbc93222d6e35fb808e065d | Python | QHCV/spider-projects | /sentiment_classification/cnn.py | UTF-8 | 4,423 | 2.6875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding:utf-8 _*-
"""
@author:quincyqiang
@license: Apache Licence
@file: cnn.py
@time: 2019-06-18 22:30
@description:
"""
import pandas as pd
import numpy as np
from utils import set_label
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import f1_score, accuracy_score
from scipy.stats import pearsonr
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten
from keras.layers import Conv1D, MaxPooling1D, Embedding, Dropout
from keras.models import Model
from keras.callbacks import ModelCheckpoint
import matplotlib.pyplot as plt
# plt.switch_backend('agg')
train = pd.read_csv('sina/sinanews.train', sep='\t', header=None)
train.columns = ['datetime', 'sentiment', 'news']
test = pd.read_csv('sina/sinanews.test', sep='\t', header=None)
test.columns = ['datetime', 'sentiment', 'news']
df = pd.concat([train, test], axis=0)
df['label'] = df['sentiment'].apply(lambda x: set_label(x))
tokenizer = Tokenizer()
texts = df['news']
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print('不同词汇的个数', len(word_index))
X = pad_sequences(sequences, maxlen=400)
lb = LabelBinarizer()
y = lb.fit_transform(df['label'].values)
print('Shape of X Tensor:', X.shape)
print('Shape of Label Tensor:', y.shape)
indices = np.arange(X.shape[0])
np.random.shuffle(indices)
X = X[indices]
y = y[indices]
train_size = len(train)
x_train = X[:train_size]
y_train = y[:train_size]
x_test = X[train_size:]
y_test = y[train_size:]
# 加载词向量
embeddings_index = {}
f = open('sgns.weibo.word',encoding='utf8')
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
f.close()
print('Total %s word vectors in 新浪微博 300维.' % len(embeddings_index))
embedding_matrix = np.random.random((len(word_index) + 1, 300))
# for word, i in word_index.items():
# embedding_vector = embeddings_index.get(word)
# if embedding_vector is not None:
# embedding_matrix[i] = embedding_vector
embedding_layer = Embedding(len(word_index) + 1,
300, weights=[embedding_matrix],
input_length=400, trainable=True)
sequence_input = Input(shape=(400,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
l_cov1 = Conv1D(128, 5, activation='relu')(embedded_sequences)
l_pool1 = MaxPooling1D(5)(l_cov1)
l_pool1=Dropout(0.25)(l_pool1)
l_cov2 = Conv1D(64, 5, activation='relu')(l_pool1)
l_pool2 = MaxPooling1D(5)(l_cov2)
l_cov3 = Conv1D(32, 5, activation='relu')(l_pool2)
l_pool3 = MaxPooling1D(3)(l_cov3) # global max pooling
l_flat = Flatten()(l_pool3)
l_dense = Dense(128, activation='relu')(l_flat)
preds = Dense(len(df['label'].value_counts()), activation='softmax')(l_dense)
model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['acc'])
print("Simplified convolutional neural network")
model.summary()
cp = ModelCheckpoint('models/model_cnn.hdf5', monitor='val_acc', verbose=1, save_best_only=True)
history = model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=10, batch_size=32, callbacks=[cp])
fig1 = plt.figure()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.legend(['Training loss', 'Validation Loss'], fontsize=18)
plt.xlabel('Epochs ', fontsize=16)
plt.ylabel('Loss', fontsize=16)
plt.title('Loss Curves :CNN', fontsize=16)
fig1.savefig('pictures/loss_cnn.png')
plt.show()
fig2=plt.figure()
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.legend(['Training Accuracy', 'Validation Accuracy'],fontsize=18)
plt.xlabel('Epochs ',fontsize=16)
plt.ylabel('Accuracy',fontsize=16)
plt.title('Accuracy Curves : CNN',fontsize=16)
fig2.savefig('pictures/accuracy_cnn.png')
plt.show()
from keras.utils.vis_utils import plot_model
plot_model(model, to_file='pictures/cnn_model.png', show_shapes=True, show_layer_names=True)
y_pred = model.predict(x_test)
y_test,y_pred=np.argmax(y_test,axis=1),np.argmax(y_pred,axis=1)
print("accuracy score:", accuracy_score(y_test, y_pred))
print("micro f1-score:", f1_score(y_test, y_pred, average='micro'))
print(pearsonr(y_test,y_pred)) | [
"matplotlib"
] |
ef7532de2865fa06b52f62cb21bab333655fd350 | Python | owerko/ztpi_batymetria | /main.py | UTF-8 | 2,649 | 2.515625 | 3 | [] | no_license | from funkcje import *
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.tri import Triangulation, TriAnalyzer, UniformTriRefiner
from matplotlib.ticker import (MultipleLocator, FormatStrFormatter)
# -*- coding: utf-8 -*-
if __name__ == '__main__':
sciezka_modelu = 'dane/model.xls'
katalog = 'dane'
punkty_all = []
sciezka = sciezki_plikow(katalog)
for i in sciezka:
for j in i[0]:
punkty = laczenie_czasow(j, i[1], i[2], i[3], sciezka_modelu, 10, 50)
punkty_all += punkty
print('Liczba punktów: ', len(punkty))
save_excel('dane_wyjsciowe/PKT.xls', punkty)
# Tworzenie mapy warstwicowej
x = []
y = []
z = []
for i in punkty_all:
x.append(i.X)
y.append(i.Y)
z.append(i.H)
minH = int(min(z) + 1)
maxH = int(max(z)) + 1
skok = 5
breaks = []
breaks.append(minH)
i = 0
while breaks[i] < maxH:
breaks.append(breaks[i] + skok)
i += 1
init_mask_frac = 20
plt.rcParams['axes.labelsize'] = 16
plt.rcParams['axes.titlesize'] = 20
plt.rcParams['contour.negative_linestyle'] = 'solid'
tri = Triangulation(y, x)
fig, ax = plt.subplots()
fig.set_size_inches(10, 15)
ax.tick_params(labelsize=15)
ax.yaxis.set_major_formatter(FormatStrFormatter('%d'))
ax.xaxis.set_major_formatter(FormatStrFormatter('%d'))
ax.set_aspect('equal')
ax.set_title("Mapa warstwicowa")
CS = ax.tricontour(tri, z, breaks, linewidths=[0.5, 0.25], colors='saddlebrown')
plt.clabel(CS, inline=5, fontsize=8)
ax.set_ylabel('X[m]')
ax.set_xlabel('Y[m]')
ax.triplot(tri, color='0.7')
plt.grid()
plt.savefig('plot.png', dpi=300)
plt.show()
# Tworzenie mapy hipsometrycznej
matplotlib.rcParams['contour.negative_linestyle'] = 'solid'
tri = Triangulation(y, x)
mask = TriAnalyzer(tri).get_flat_tri_mask(0.02)
tri.set_mask(mask)
fig, ax = plt.subplots()
fig.set_size_inches(10, 15)
ax.tick_params(labelsize=15)
ax.yaxis.set_major_formatter(FormatStrFormatter('%d'))
ax.xaxis.set_major_formatter(FormatStrFormatter('%d'))
ax.set_aspect('equal')
ax.set_title("Mapa warstwicowa")
CS = ax.tricontourf(tri, z, cmap='RdBu')
ax.tricontour(tri, z, breaks, linewidths=[0.5, 0.25], colors='saddlebrown')
plt.clabel(CS, inline=1, fontsize=10)
ax.set_ylabel('X[m]')
ax.set_xlabel('Y[m]')
plt.grid()
plt.savefig('plotHipso.png', dpi=300)
plt.show()
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_trisurf(x, y, z, linewidth=0.2, antialiased=True)
plt.show()
| [
"matplotlib"
] |
8763c86fa14e3eb248df5dd11b9d21d22367fed1 | Python | zhenchristopher/machine_learning | /CS156/Code/finalp13.py | UTF-8 | 3,774 | 2.53125 | 3 | [] | no_license | import random
import numpy as np
import copy as cp
from numpy import linalg as LA
from sklearn.svm import SVC
import matplotlib.pyplot as plt
def linreg(x,y): #function to perform linear regression
return np.dot(np.linalg.pinv(x),y)
def find_center(point,centers):
distances = np.array([LA.norm(point-centers[i]) for i in range(len(centers))])
return np.argmin(distances)
def lloyd(points,centers):
closest_centers = [find_center(points[i],centers) for i in range(len(points))]
for i in range(len(centers)):
cluster = [j for j in range(len(closest_centers)) if closest_centers[j] == i]
if len(cluster) == 0:
return "empty"
centers[i,0] = sum([points[cluster[k],0]/len(cluster) for k in range(len(cluster))])
centers[i,1] = sum([points[cluster[k],1]/len(cluster) for k in range(len(cluster))])
return [sum([LA.norm(points[i]-centers[closest_centers[i]])**2 for i in range(len(points))]),centers]
def rbf(points,numcenters,y,gamma):
centers = 2 * np.random.random_sample((numcenters,2)) - 1
phi = np.zeros([len(points),numcenters])
min_center = 0
min_center1 = 200
first = True
while min_center1 < min_center or first:
first = False
min_center = cp.copy(min_center1)
test = lloyd(points,centers)
if test == "empty":
return ["empty","empty"]
min_center1 = cp.copy(test[0])
centers = cp.copy(test[1])
for i in range(len(phi)):
for j in range(len(phi[0])):
phi[i,j] = np.exp((-gamma*LA.norm(points[i]-centers[j])**2))
return [linreg(phi,y),centers]
def inerror(w,centers,gamma,points): #find the in-sample error
error = 0
for i in range(len(points)):
y = np.sign(points[i,1]-points[i,0]+0.25*np.sin(np.pi*points[i,0]))
testy = 0
for j in range(len(w)):
testy += w[j]*np.exp(-gamma*LA.norm(points[i]-centers[j])**2)
if np.sign(testy) != y:
error += 1
#print(error)
return error/len(points)
def outerror(w,centers,gamma,points):
error = 0
for i in range(len(points)):
randx = points[i,0]
randy = points[i,1]
y = np.sign(randy-randx+0.25*np.sin(np.pi*randx))
testy = 0
for j in range(len(w)):
testy += w[j]*np.exp(-gamma*LA.norm(np.array([randx,randy])-centers[j])**2)
if np.sign(testy) != y:
error += 1
#print(error/1000)
return error/len(points)
reg_E_in = 0
counter = 0
a16 = 0
b16 = 0
c16 = 0
d16 = 0
e16 = 0
fail = 0
kern_better = 0
kern_better2 = 0
#parameters
gamma = 1.5
gamma2 = 2
K = 9
N = 100
K2 = 12
for i in range(N):
points = 2 * np.random.random_sample((100,2)) - 1
y = np.array([np.sign(points[i,1]-points[i,0]+0.25*np.sin(np.pi*points[i,0])) for i in range(len(points))])
[w,centers] = rbf(points,K,y,gamma)
[w2,centers2] = rbf(points,K2,y,gamma)
clf = SVC(float('Inf'), 'rbf', gamma=1.5).fit(points,y)
#plt.scatter(points[:,0],points[:,1])
#plt.scatter(centers[:,0],centers[:,1],c='red')
#plt.show()
#print(w)
#if clf.score(points,y) != 1:
# fail += 1
if w == "empty" or w2 == "empty" or clf.score(points,y) != 1:
continue
counter += 1
testpoints = 2 * np.random.random_sample((1000,2)) - 1
#a = outerror(w,centers,gamma,testpoints)
#b = 1-clf.score(testpoints,np.array([np.sign(testpoints[i,1]-testpoints[i,0]+0.25*np.sin(np.pi*testpoints[i,0])) for i in range(len(testpoints))]))
#c = outerror(w2,centers2,gamma,testpoints)
a = inerror(w,centers,gamma,points)
b = outerror(w,centers,gamma,testpoints)
c = inerror(w2,centers2,gamma,points)
d = outerror(w2,centers2,gamma,testpoints)
#if a == 0:
# reg_E_in += 1
if c < a and d > b:
a16 += 1
if c > a and d < b:
b16 += 1
if c > a and d > b:
c16 += 1
if c < a and d < b:
d16 += 1
if c == a and d == b:
e16 += 1
if b < a:
kern_better += 1
if b < c:
kern_better2 += 1
#(a,b,c)
print(a16, b16, c16, d16, e16)
#print(fail/N)
print(kern_better/counter, kern_better2/counter) | [
"matplotlib"
] |
7916788ea4befff956e5bca55c3d1e9bb412e189 | Python | thesaientist/BH-python-scripts | /Rotation 4/Carbon Management/Python Scripts/lifting_cost_LAR_plot.py | UTF-8 | 5,358 | 3.015625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Jun 3 12:31:23 2019
@author: 212566876
"""
#import seaborn as sns
import matplotlib.pyplot as plt
import openpyxl
import pandas as pd
# define any required constants
us_unconv_cost = 50 # $/bbl lifting cost for US Unconventional Production (2016 number, Rystad, as quoted in WSJ)
# load workbook
wb = openpyxl.load_workbook('LAM country data and plots.xlsx', data_only=True)
ws = wb['Carbon Tax Lifting Cost Impact']
# read in needed columns for plto data
countries = []
production = [] # country production rates (MMbbl/d), used for width of columns
base_cost = [] # base lifting cost ($/bbl)
s1_cost = [] # scenario 1 lifting cost (increment over base)
s2_cost = [] # scenario 2 lifting cost (increment over S1)
# collect country data
for i in range(3,11):
# countries.append(ws.cell(row=i, column=1).value) # Country shown
countries.append("Country " + str(i-2)) # Anonymous countries
production.append(ws.cell(row=i, column=2).value)
#cum_prod.append(ws.cell(row=i, column=3).value)
base_cost.append(ws.cell(row=i, column=4).value)
s1_cost.append(ws.cell(row=i, column=17).value)
s2_cost.append(ws.cell(row=i, column=18).value)
# convert lists to pandas dataframe
data = {'Country':countries, 'Production':production, 'Base Cost':base_cost, 'S1 Cost':s1_cost, 'S2 Cost':s2_cost}
lam = pd.DataFrame(data)
lam = lam.set_index('Country')
# lam = lam.drop(index='Peru')
lam = lam.drop(index='Country 6') # Anonymous
#lam = lam.drop(index='Bolivia')
# Calculate S1 and S2 costs as percentages of base cost
lam['S1 Percent'] = lam['S1 Cost']/lam['Base Cost']*100
lam['S2 Percent'] = lam['S2 Cost']/lam['Base Cost']*100
# Sort by base cost and calculate cumulative production column for use in plot
#lam = lam.sort_values(by='Base Cost')
lam = lam.sort_values(by='S1 Percent') # sort by S1 percent ($50/tCO2e carbon price incremental cost of supply)
#lam = lam.sort_values(by='S2 Percent') # sort by S2 percent ($50/tCO2e carbon price incremental cost of supply)
lam['Cumulative Production'] = lam['Production'].cumsum()
#lowest_cost = base_cost[0]
#for i in range(len(base_cost)):
# base_cost[i] -= lowest_cost # convert to relative cost
left = lam.iloc[:-1]['Cumulative Production'].tolist() # exclude last element in cumulative production (for use in plot)
left = [0.0] + left # include 0 at beginning of list for use in plot x-axis
# make lists for use in plot as bases for stacking columns
countries = lam.index.values
production = lam['Production'].tolist()
base_cost = lam['Base Cost'].tolist()
s1_percent = lam['S1 Percent'].tolist()
s2_percent = lam['S2 Percent'].tolist()
#s1_cum_cost = [base_cost[i] + s1_cost[i] for i in range(len(base_cost))]
### PLOT ###
fig, ax = plt.subplots()
#baseCol = ax.bar(left, base_cost, width = production,
# alpha = 0.6, align='edge', edgecolor = 'k', linewidth = 1)
#s1Col = ax.bar(left, s1_cost, width = production, bottom=base_cost,
# alpha = 0.6, align='edge', edgecolor = 'k', linewidth = 1)
s1Col = ax.bar(left, s1_percent, width = production,
alpha = 0.6, align='edge', edgecolor = 'k', linewidth = 1)
s2Col = ax.bar(left, s2_percent, width = production, bottom=s1_percent,
alpha = 0.6, align='edge', edgecolor = 'k', linewidth = 1)
#usUnConv = ax.hlines(us_unconv_cost, 0, cum_prod[-1], colors='maroon', linestyles='dashed', label='U.S. Unconventionals')
ax.set_ylim(0,22)
ax.set_xlabel('Total Crude Oil Production \n million bbl/d')
ax.set_ylabel('Incremental cost \n (%)')
ax.set_title('Carbon Pricing Impact on Crude Supply Costs')
#ax.legend((baseCol[0], s1Col[0], s2Col[0], usUnConv), ('Base', '\$50/tCO2e', '\$150/tCO2e', 'U.S. Unconventionals'))
#ax.legend((baseCol[0], s1Col[0], s2Col[0]), ('Base', '\$50/tCO2e', '\$150/tCO2e'))
ax.legend((s1Col[0], s2Col[0]), ('\$50/tCO2e', '\$150/tCO2e'), loc='upper left')
for i in range(7):
if i == 0:
ax.text((0 + lam.iloc[i]['Cumulative Production'])/2-0.45, 1, countries[i], rotation=0)
elif i==1:
ax.text((lam.iloc[i-1]['Cumulative Production'] + lam.iloc[i]['Cumulative Production'])/2-0.1, 3, countries[i], rotation=90)
elif i==2:
ax.text((lam.iloc[i-1]['Cumulative Production'] + lam.iloc[i]['Cumulative Production'])/2-0.1, 4, countries[i], rotation=90)
elif i==3:
ax.text((lam.iloc[i-1]['Cumulative Production'] + lam.iloc[i]['Cumulative Production'])/2-0.25, 12, countries[i], rotation=90)
elif i==4:
ax.text((lam.iloc[i-1]['Cumulative Production'] + lam.iloc[i]['Cumulative Production'])/2-0.10, 6.5, countries[i], rotation=90)
elif i==5:
ax.text((lam.iloc[i-1]['Cumulative Production'] + lam.iloc[i]['Cumulative Production'])/2-0.45, 1, countries[i], rotation=0)
elif i==6:
ax.text((lam.iloc[i-1]['Cumulative Production'] + lam.iloc[i]['Cumulative Production'])/2-0.65, 1, countries[i], rotation=0)
# peru
# elif i==3:
# ax.text((cum_prod[i] + cum_prod[i+1])/2-0.35, 100, countries[i], rotation=90)
# Shrink current axis by 20%
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.width*0.1, box.width, box.height])
plt.savefig('cost_curve.png', bbox_inches = 'tight', dpi=300)
| [
"matplotlib",
"seaborn"
] |
d8e5dc67c47935d52e39a1d9f0ae557afda108ea | Python | pablovicente/stock-index-prediction | /helpers/ml_dataset.py | UTF-8 | 12,519 | 2.640625 | 3 | [] | no_license | """
ml_dataset.py
Original author: Pablo Vicente
"""
import calendar
import numpy as np
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
import statsmodels.tsa.stattools as stats
from sklearn import feature_selection
from sklearn import ensemble
def generate_df_dataset(names_list, df_list, dfs_cols_dict):
"""
Parameters
--------------------
- names_list: a list contaning the name of each dataframe as string
- df_list: a list with the dataframes in the same order than the previous lits
- dfs_cols_dict: a dictionary where the key is a df name and the values are the colums to process
"""
rows, cols = df_list[0].shape
dates = df_list[0]['Date'].tolist()
dict_values = {'Date':dates}
dict_dfs_names = {}
col_list = []
for index in range(len(names_list)):
name = names_list[index]
df = df_list[index]
cols = dfs_cols_dict[name]
new_cols = [name+"_"+x for x in cols]
col_list.extend(new_cols)
for c, nc in zip(cols, new_cols):
dict_values[nc] = df[c]
temp_names_list = list(col_list)
temp_names_list.insert(0, 'Date')
df_dataset = pd.DataFrame(dict_values)
#Sort rows
df_dataset = df_dataset[temp_names_list]
return df_dataset
def remove_columns_from_dataset(dataset, predicting='close', shifted = False):
"""
"""
###########################
# predicting close price: #
###########################
colsToRemove = []
colsToShift = []
if predicting == 'close':
if not shifted:
colsToRemove.extend([col for col in dataset.columns if 'Date' in col])
colsToRemove.extend([col for col in dataset.columns if 'GOLD' in col and not '_AM' in col])
colsToRemove.extend([col for col in dataset.columns if 'SILVER' in col and not '_USD' in col])
colsToRemove.extend([col for col in dataset.columns if 'OIL_BRENT' in col and not '_USD' in col])
colsToRemove.extend([col for col in dataset.columns if 'PLAT' in col and not '_AM' in col])
colsToRemove.extend([col for col in dataset.columns if 'DJIA' in col and not '_Open' in col])
colsToRemove.extend([col for col in dataset.columns if 'HSI' in col and '_Date' in col])
colsToRemove.extend([col for col in dataset.columns if 'IBEX' in col and not '_Open' in col])
colsToRemove.extend([col for col in dataset.columns if 'N225' in col and'_Date' in col])
colsToRemove.extend([col for col in dataset.columns if 'SP500' in col and not '_Open' in col])
colsToRemove.remove('IBEX_RD_B1_Close')
else:
colsToRemove.extend([col for col in dataset.columns if 'Date' in col])
colsToShift.extend([col for col in dataset.columns if 'GOLD' in col and not '_AM' in col])
colsToShift.extend([col for col in dataset.columns if 'SILVER' in col and not '_USD' in col])
colsToShift.extend([col for col in dataset.columns if 'OIL_BRENT' in col and not '_USD' in col])
colsToShift.extend([col for col in dataset.columns if 'PLAT' in col and not '_AM' in col])
colsToShift.extend([col for col in dataset.columns if 'DJIA' in col and not '_Open' in col])
colsToShift.extend([col for col in dataset.columns if 'HSI' in col and '_Date' in col])
colsToShift.extend([col for col in dataset.columns if 'IBEX' in col and not '_Open' in col])
colsToShift.extend([col for col in dataset.columns if 'N225' in col and'_Date' in col])
colsToShift.extend([col for col in dataset.columns if 'SP500' in col and not '_Open' in col])
colsToShift.remove('IBEX_RD_B1_Close')
###########################
# predicting open price: #
###########################
if predicting == 'open' and not shifted:
colsToRemove.extend([col for col in dataset.columns if 'Date' in col])
colsToRemove.extend([col for col in dataset.columns if 'GOLD' in col])
colsToRemove.extend([col for col in dataset.columns if 'SILVER' in col])
colsToRemove.extend([col for col in dataset.columns if 'PLAT' in col])
colsToRemove.extend([col for col in dataset.columns if 'OIL_BRENT' in col])
colsToRemove.extend([col for col in dataset.columns if 'DJIA' in col])
colsToRemove.extend([col for col in dataset.columns if 'HSI' in col and '_Open' in col])
colsToRemove.extend([col for col in dataset.columns if 'IBEX' in col])
colsToRemove.extend([col for col in dataset.columns if 'N225' in col and'_Date' in col])
colsToRemove.extend([col for col in dataset.columns if 'SP500' in col])
colsToRemove.remove('IBEX_RD_B1_Open')
colsToShift = list(set(colsToShift) - set(colsToRemove))
df = dataset.drop(colsToRemove, axis = 1)
if shifted:
df[colsToShift] = df[colsToShift].shift(1)
df = df[1:]
df = df.reset_index(drop=True)
return df
def dataset_to_train(train_df, test_df, predicting='close', binary = False, shifted = False):
"""
Parameter
---------------------
- train_df: dataframe containing the training rows and all features
- test_df: dataframe containing the testing rows and all features
- namesToRemove: partial names to search to remove those columns
- colY: name of target
- binary: boolean detemines whether features will be binary only
- shifted: boolean detemines whether rows will be shifted by one
"""
colY = ''
colsToRemove = []
if predicting == 'close':
colY = 'IBEX_RD_B1_Close'
colsToRemove = ['IBEX_RD_B1_Close']
if predicting == 'open':
colY = 'IBEX_RD_B1_Open'
colsToRemove = ['IBEX_RD_B1_Open']
if(binary):
colsToRemove.extend([col for col in train_df.columns if '_B' not in col])
trainX = np.nan_to_num(np.asarray(train_df.drop(colsToRemove, axis = 1)))
testX = np.nan_to_num(np.asarray(test_df.drop(colsToRemove, axis = 1)))
else:
trainX = np.nan_to_num(np.asarray(train_df.drop(colsToRemove, axis = 1)))
testX = np.nan_to_num(np.asarray(test_df.drop(colsToRemove, axis = 1)))
if shifted:
trainY = np.nan_to_num(np.asarray(train_df[colY].shift(1)))
testY = np.nan_to_num(np.asarray(test_df[colY].shift(1)))
else:
trainY = np.nan_to_num(np.asarray(train_df[colY]))
testY = np.nan_to_num(np.asarray(test_df[colY]))
return trainX, trainY, testX, testY
def dataset_to_train_using_dates(dataset, trainDates, testDates, predicting = 'close', binary = False, shiftFeatures = False, shiftTarget = False):
"""
Parameter
---------------------
- dataset: dataframe containing all available columns for a set of dates
- trainDates: list containing the start training day and end training day
- testDates: list containing the start training day and end testing day
- namesToRemove: partial names to search to remove those columns
- colY: name of target
- binary: boolean detemines whether features will be binary only
- shifted: boolean detemines whether rows will be shifted by one
"""
if shiftFeatures==True and shiftTarget==True:
raise ValueError("Features and Target cannot be shifted at the same time")
#dataset = remove_columns_from_dataset(dataset, predicting = predicting, shifted = shiftFeatures)
#dataset = dataset[['GOLD_RD_B1_USD_AM','SILVER_RD_B1_USD','PLAT_RD_B1_USD_AM','OIL_BRENT_RD_B1_USD','DJIA_RD_B1_Open','HSI_RD_B1_Open','IBEX_RD_B1_Open','IBEX_RD_B1_Close','N225_RD_B1_Open','SP500_RD_B1_Open']]
###########################
# predicting close price: #
###########################
colY = ''
colsToRemove = []
if predicting == 'close':
colY = 'IBEX_RD_B1_Close'
colsToRemove = ['Date', 'IBEX_RD_B1_Close']
if predicting == 'open':
colY = 'IBEX_RD_B1_Open'
colsToRemove = ['Date', 'IBEX_RD_B1_Close', 'IBEX_RD_B1_Open']
train_df = dataset.iloc[trainDates[0]:trainDates[1]+1,]
test_df = dataset.iloc[testDates[0]:testDates[1]+1,]
if binary:
colsToRemove.extend([col for col in dataset.columns if '_B' not in col])
colsToRemove = list(set(colsToRemove))
trainX = np.nan_to_num(np.asarray(train_df.drop(colsToRemove, axis = 1)))
testX = np.nan_to_num(np.asarray(test_df.drop(colsToRemove, axis = 1)))
else:
colsToRemove = list(set(colsToRemove))
trainX = np.nan_to_num(np.asarray(train_df.drop(colsToRemove, axis = 1)))
testX = np.nan_to_num(np.asarray(test_df.drop(colsToRemove, axis = 1)))
if shiftTarget:
trainY = np.nan_to_num(np.asarray(train_df[colY].shift(1)))[:]
testY = np.nan_to_num(np.asarray(test_df[colY].shift(1)))[:]
trainX = trainX[1:]
testX = testX[1:]
else:
if shiftFeatures:
trainY = np.nan_to_num(np.asarray(train_df[colY].shift(-1)))
testY = np.nan_to_num(np.asarray(test_df[colY].shift(-1)))
trainX = trainX[1:-1,1:-1]
trainY = trainY[1:-1]
testX = testX[1:-1,1:-1]
testY = testY[1:-1]
else:
trainY = np.nan_to_num(np.asarray(train_df[colY]))
testY = np.nan_to_num(np.asarray(test_df[colY]))
#df = df.drop(dataset.index[-1,], axis=0)
columns_names = dataset.drop(colsToRemove, axis=1).columns.values
return trainX, trainY, testX, testY, columns_names
def train_arrays_experiments(df_x, df_y, trainDates, testDates):
"""
Parameter
---------------------
- dataset: dataframe containing all available columns for a set of dates
- trainDates: list containing the start training day and end training day
- testDates: list containing the start training day and end testing day
"""
train_df_x = df_x.iloc[trainDates[0]:trainDates[1]+1,]
train_df_y = df_y.iloc[trainDates[0]:trainDates[1]+1,]
test_df_x = df_x.iloc[testDates[0]:testDates[1]+1,]
test_df_y = df_y.iloc[testDates[0]:testDates[1]+1,]
trainX = np.nan_to_num(np.asarray(train_df_x))
testX = np.nan_to_num(np.asarray(test_df_x))
trainY = np.nan_to_num(np.asarray(train_df_y))
testY = np.nan_to_num(np.asarray(test_df_y))
return trainX, trainY, testX, testY
def np_train_arrays_experiments(df_x, df_y, trainDates, testDates):
"""
Parameter
---------------------
- dataset: dataframe containing all available columns for a set of dates
- trainDates: list containing the start training day and end training day
- testDates: list containing the start training day and end testing day
"""
train_x = df_x[trainDates[0]:trainDates[1]+1,]
train_y = df_y[trainDates[0]:trainDates[1]+1,]
test_x = df_x[testDates[0]:testDates[1]+1,]
test_y = df_y[testDates[0]:testDates[1]+1,]
return train_x, train_y, test_x, test_y
def only_train_array(df_x, df_y, trainDates):
"""
Parameter
---------------------
- dataset: dataframe containing all available columns for a set of dates
- trainDates: list containing the start training day and end training day
- testDates: list containing the start training day and end testing day
"""
train_df_x = df_x.iloc[trainDates[0]:trainDates[1],]
train_df_y = df_y.iloc[trainDates[0]:trainDates[1],]
trainX = np.nan_to_num(np.asarray(train_df_x))
trainY = np.nan_to_num(np.asarray(train_df_y))
return trainX, trainY
def only_test_array(df_x, df_y, testDates):
"""
Parameter
---------------------
- dataset: dataframe containing all available columns for a set of dates
- trainDates: list containing the start training day and end training day
- testDates: list containing the start training day and end testing day
"""
test_df_x = df_x.iloc[testDates[0]:testDates[1],]
test_df_y = df_y.iloc[testDates[0]:testDates[1],]
testX = np.nan_to_num(np.asarray(test_df_x))
testY = np.nan_to_num(np.asarray(test_df_y))
return testX, testY
| [
"matplotlib"
] |
229f5d535307d0ad19c4735544c06b0098efe9b2 | Python | VeraSchild/cp2k_bs_simulation | /plotbands.py | UTF-8 | 6,356 | 2.90625 | 3 | [] | no_license | """
Created Sep 28 2020
@author: Vera Schild
Based on the work of Alex McCab and Bram van der Linden
Creates a bandplot based on a .out file and .bs file. These files can be specified,
if not the newest/last modified files in the current folder will be used.
How to run example:
python plot_bands.py
- will use newest out- and bsfile
python plot_bands.py -o cp2k.out -b Sibulk.bs
- will use specified files, order in which they are called does not matter, you
- can also specify only one file.
"""
import matplotlib.pyplot as plt
import numpy as np
import argparse
import os
import re
parser = argparse.ArgumentParser(description="optional aguments")
parser.add_argument("--outfile", "-o")
parser.add_argument("--bandstructurefile", "-b")
args = parser.parse_args()
def main():
# Check if files where specified, differently get newest out and/or bs file
if args.outfile:
cp2koutfile = args.outfile
else:
cp2koutfile = max(filter(lambda f: f.endswith('k.out'), os.listdir(os.path.abspath(os.getcwd()))), key=os.path.getctime)
if args.bandstructurefile:
bsfile = args.bandstructurefile
else:
bsfile = max(filter(lambda f: f.endswith('.bs'), os.listdir(os.path.abspath(os.getcwd()))), key=os.path.getctime)
fermi_energy, special_kpoints, dist_special_kpoints, total_kpoints, project_name = readoutfile(cp2koutfile)
energies, n_bands = readbs(bsfile)
print("Plotting the bandstructure of {} for the special kpoints {}\nWith a fermi energy of {:.3f}eV and {} bands.".format(project_name, ' '.join([str(kpoint) for kpoint in special_kpoints]), fermi_energy, n_bands))
if len(total_kpoints) > len(energies):
total_kpoints = total_kpoints[:len(energies)]
elif len(total_kpoints) < len(energies):
energies = energies[:len(total_kpoints)]
plotBands(energies, n_bands, dist_special_kpoints, special_kpoints, fermi_energy, total_kpoints, project_name)
# reading the needed information out of the outfile by searching for a line which
# has some of the words in it of which we know are on the same line as the information
def readoutfile(outfile):
special_kpoints = []
dist_special_kpoints = []
skpoint_pos = []
total_kpoints = 0
with open(outfile) as f:
for line in f:
if "PROGRAM STARTED IN" in line:
project_name = line.split()[-1].split("/")[-1]
if "Number of K-Points in Set" in line:
total_kpoints = float(line.split()[-1]) - 1
if "Fermi energy:" in line:
fermi_energy = float(line.split()[-1]) * 27.2113838565563
if "Special K-Point" in line:
special_kpoints.append(line.split()[4])
skpoint_pos.append([float(i) for i in line.split()[5:]])
# getting the distince from the kx, ky, kz of 2 points
if len(dist_special_kpoints) == 0:
dist_special_kpoints.append(0)
else:
dist_special_kpoints.append(np.sqrt((skpoint_pos[-1][0]-skpoint_pos[-2][0])**2 + (skpoint_pos[-1][1]-skpoint_pos[-2][1])**2 + (skpoint_pos[-1][2]-skpoint_pos[-2][2])**2))
special_kpoints = ["$\Gamma$" if KPOINT == "GAMMA" else KPOINT for KPOINT in special_kpoints]
# go from ex. dist special kpoints [0, 0.5, 0.75] to [0, 0.5, 1.25]
for i in range(1, len(dist_special_kpoints)):
dist_special_kpoints[i] = dist_special_kpoints[i] * (total_kpoints/(len(special_kpoints)-1)) + dist_special_kpoints[i-1]
return fermi_energy, special_kpoints, dist_special_kpoints, total_kpoints, project_name
def restart(line):
try:
if int(line[1]) == 1:
return True
except:
return False
# read the bsfile: reading out the energies and number of bands from the correct lines
def readbs(bsfile):
energy = []
energies = []
with open(bsfile) as f:
for line in f:
line = line.split()
if restart(line):
energies = []
if not any(c.isalpha() for c in line):
if len(line) == 1:
if len(energy) != 0:
energies.append(energy)
else:
number_of_bands = int(line[0])
energy = get_energy(f, number_of_bands)
return energies, number_of_bands
def get_energy(file, number_of_bands):
lines_to_read = number_of_bands//4 + 1
energy = []
for lines in range(lines_to_read):
energy += file.readline().split()
return [float(e) for e in energy]
# Creating a list for x and y for the fermi line in the plot
def createFermiLine(x_axis_length,fermi_energy_ev ):
x = np.arange(0,x_axis_length,0.01)
y = np.empty(len(x))
y.fill(fermi_energy_ev)
return x, y
# Creating a scaled x axis
def createXAxis(k_paths, n_specialkpoints, total_kpoints):
x = []
for i in range(len(k_paths)-1):
x.append(np.linspace(k_paths[i], k_paths[i+1], total_kpoints/(n_specialkpoints-1)))
return np.array(x).reshape(1, len(x)*len(x[0]))[0]
def plotBands(energies, number_of_bands, kpoint_dis, special_kpoints, Ef, total_kpoints, project_name):
ymin = 0
ymax = 0
x_axis = createXAxis(kpoint_dis, len(special_kpoints), total_kpoints)
x_fermi, y_fermi = createFermiLine(max(x_axis), 0)
# plot the line of every band
for n in range(number_of_bands):
band_energies = [energy[n] for energy in energies]
re_aligned_energies = [energy - Ef for energy in band_energies]
plt.plot(x_axis,re_aligned_energies, color='b', linewidth=1)
if min(re_aligned_energies) < ymin:
ymin = min(re_aligned_energies)
if max(re_aligned_energies) > ymax:
ymax = max(re_aligned_energies)
plt.title(project_name)
plt.xticks(kpoint_dis, special_kpoints)
plt.ylabel('Energy (eV)')
plt.rc('axes', labelsize=20)
plt.rc('xtick', labelsize=20)
plt.rc('ytick', labelsize=20)
plt.xlim(min(x_axis), max(x_axis))
plt.ylim(ymin, ymax)
plt.plot(x_fermi,y_fermi,'k--', linewidth=1)
plt.savefig('Bandplot_%s.png'%project_name)
plt.show()
main()
| [
"matplotlib"
] |
93bdf1069faa50036e5a4992221d6d2f7aae86d3 | Python | boxside/Jabar-s_Water_Source | /Code/asd.py | UTF-8 | 1,753 | 2.515625 | 3 | [
"MIT"
] | permissive | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import geopandas as gpd
desired_width= 400
pd.set_option('display.width', desired_width)
pd.set_option('display.max_columns',10)
air = pd.read_csv(r"C:\Users\fikri\Desktop\pyproj\air.csv")
air_gdf = gpd.read_file(r'C:\Users\fikri\Desktop\pyproj\PROVINSI JAWA_BARAT\PROVINSI_JAWA_BARAT.shp')
air2 = air_gdf[['KABKOTNO', 'KABKOT']]
air4 = pd.DataFrame({ 'kode' :['04','17','16','01','07','03','09','05','12','15','08','10','18','14','13','02','11','06','79'], 'nama_kabupaten_kota' :
['KABUPATEN BANDUNG','KABUPATEN BANDUNG BARAT','KABUPATEN BEKASI','KABUPATEN BOGOR','KABUPATEN CIAMIS','KABUPATEN CIANJUR',
'KABUPATEN CIREBON','KABUPATEN GARUT','KABUPATEN INDRAMAYU','KABUPATEN KARAWANG',
'KABUPATEN KUNINGAN','KABUPATEN MAJALENGKA','KABUPATEN PANGANDARAN','KABUPATEN PURWAKARTA','KABUPATEN SUBANG','KABUPATEN SUKABUMI',
'KABUPATEN SUMEDANG','KABUPATEN TASIKMALAYA','KOTA BANJAR']})
air3= air[air['kondisi'] != 'RUSAK PARAH'].groupby('nama_kabupaten_kota')['jumlah'].sum().reset_index()
air5 = air3.merge(air4, left_on = 'nama_kabupaten_kota', right_on = 'nama_kabupaten_kota')
merged = air_gdf.set_index('KABKOTNO').join(air5.set_index('kode'))
merged['jumlah'].fillna('0', inplace = True)
merged['jumlah']=merged['jumlah'].astype(int)
fig, ax = plt.subplots(1, figsize=(20, 20))
ax.axis('off')
ax.set_title('Persebaran Ketersediaan Sumber Air Desa di Jawa Barat',
fontdict={'fontsize': '15', 'fontweight' : '3'})
fig = merged.plot(column='jumlah', cmap='Blues', linewidth=0.5, ax=ax, edgecolor='0.2',legend=True)
plt.show()
| [
"matplotlib",
"seaborn"
] |
72de8d108c6eb705e2a4ace6f33b68df3c682a49 | Python | JohnKeklak/Contagion | /contagion-v2.0.py | UTF-8 | 3,450 | 3 | 3 | [
"MIT"
] | permissive | '''
dN = E * p * N
'''
import random
import matplotlib.pyplot as plt
random.seed()
population_size = 10000
percent_isolated = 90
E = 3
p = 0.1
timesteps = 125 # days
number_of_simulations = 5
time_to_symptoms = 5
time_to_outcome = 10
full_scale = True
def run_one_day(population, E, p):
global time_to_symptoms
global time_to_outcome
for person in population:
if person['infected'] == True:
person['duration'] += 1
if person['duration'] == time_to_symptoms:
person['symptomatic'] = True
person['isolating'] = True
elif person['duration'] == time_to_outcome:
person['immune'] = True
person['infected'] = False
person['symptomatic'] = False
person['isolating'] = False
not_isolating = []
for person in population:
if person['isolating'] == False:
not_isolating += [person]
for person in not_isolating:
if person['infected'] == True:
for encounter in range(E):
other = random.choice(not_isolating)
if other['immune'] == False:
transmitted = random.uniform(0.0, 1.0)
if transmitted <= p:
other['infected'] = True
def count_infections(population):
number_infected = 0
number_showing_symptoms = 0
for person in population:
if person['infected'] == True:
number_infected += 1
if person['symptomatic'] == True:
number_showing_symptoms += 1
return [number_infected, number_showing_symptoms]
def run_one_simulation(E, p, timesteps):
population = []
global population_size
global percent_isolated
for i in range(population_size):
person = { 'infected': False, 'duration': 0, 'symptomatic': False,
'immune': False, 'isolating': False}
population += [person]
population[0]['infected'] = True
number_isolated = int(percent_isolated * population_size/100.0)
for i in range(1,number_isolated):
person = population[i]
person['isolating'] = True
infections_by_day = []
for timestep in range(timesteps):
run_one_day(population, E, p)
infections_this_day = count_infections(population)
infections_by_day += [infections_this_day]
#print('Day: {} Infections: {}'.format(timestep, infections_this_day))
return infections_by_day
results_all_runs = []
for i in range(number_of_simulations):
print('Simulation {}'.format(i))
results_this_run = run_one_simulation(E, p, timesteps)
results_all_runs += [results_this_run]
number_of_infections = []
number_of_infections_w_symptoms = []
for i in range(timesteps):
number_of_infections += [0.0]
number_of_infections_w_symptoms += [0.0]
for result in results_all_runs:
for i in range(timesteps):
result_this_day = result[i]
number_of_infections[i] += result_this_day[0]
number_of_infections_w_symptoms[i] += result_this_day[1]
for i in range(timesteps):
number_of_infections_w_symptoms[i] /= number_of_simulations
number_of_infections[i] /= number_of_simulations
number_of_infections[i] -= number_of_infections_w_symptoms[i]
for i in range(timesteps):
print('Day {}: {} {}'.format(i, int(number_of_infections[i]),
int(number_of_infections_w_symptoms[i])))
day_numbers = []
for i in range(timesteps):
day_numbers += [i]
plt.plot(day_numbers, number_of_infections)
plt.plot(day_numbers, number_of_infections_w_symptoms)
plt.title('{}% isolated, probability of transmission: {}'.format(percent_isolated, p))
if full_scale:
plt.ylim(0,10100)
#plt.ylim(0,1000)
plt.show()
| [
"matplotlib"
] |
4559e9aef722bdc6a2b8c3614a2a27223fbb2ed3 | Python | Arwa-Ibrahim/TableScanner | /image processing/pre_processing.py | UTF-8 | 7,042 | 3.015625 | 3 | [] | no_license | ####################################################################################################
# importing useful libs
####################################################################################################
%matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from math import hypot, pi, cos, sin
import numpy as np
import cv2 as cv
####################################################################################################
# define function that used to show the images using plt lib (if needed)
####################################################################################################
def showImg(img):
temp = cv.cvtColor(img, cv.COLOR_BGR2RGB)
testimageplot = plt.imshow(temp,'gray')
return temp
####################################################################################################
# define the pre_processing function
# - it takes input image and return no. of cols and rows and image cells
####################################################################################################
##########################################################################################
def pre_processing (img):
# ####################################################################################################
# # 1-Apply gaussian blurring to reduce noise
# # 2-convert image to grayscale and apply hough transform on it
# # 3-convert hough output to :
# # - vlines array that contain vertical lines
# # - hlines array that contain horizontal lines
# ####################################################################################################
vlines = []
hlines = []
blur = cv.GaussianBlur(img,(3,3),0)
gray = cv.cvtColor(blur,cv.COLOR_BGR2GRAY)
edges = cv.Canny(gray,25,150,apertureSize = 3)
lines = cv.HoughLines(edges,1,pi/180,100)
for i in range(len(lines)):
for rho,theta in lines[i]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a*rho
y0 = b*rho
x1 = int(x0 + 375*(-b))
y1 = int(y0 + 375*(a))
x2 = int(x0 - 500*(-b))
y2 = int(y0 - 500*(a))
if(-70 <= (x1-x2) <= 70):
vlines.append([x1,y1,x2,y2])
if(-70 <= (y1-y2) <= 70):
hlines.append([x1,y1,x2,y2])
# ####################################################################################################
# # remove duplication from vlines array (some of vlines in the table detected by more than one line)
# ####################################################################################################
dup = []
for i in range(len(vlines)) :
for j in range(len(vlines)):
if i >= j :
continue
if ((-15 <= vlines[i][0]-vlines[j][0] <= 15 ) or (-15 <= vlines[i][2]-vlines[j][2] <= 15 )):
dup.append(j)
dup.sort()
dup = list(set(dup))
for i in range(len(dup)):
vlines.pop(dup[i]-i)
# ####################################################################################################
# # remove duplication from hlines array (some of hlines in the table detected by more than one line)
# ####################################################################################################
hdup = []
for i in range(len(hlines)) :
for j in range(len(hlines)):
if i >= j :
continue
if ((-15 <= (hlines[i][3]-hlines[j][3]) <= 15 ) or (-15 <= (hlines[i][1]-hlines[j][1]) <= 15 )):
hdup.append(j)
hdup.sort()
hdup = list(set(hdup))
for i in range(len(hdup)):
z = hdup[i]-i
hlines.pop(z) ,z , len(hlines)
# ####################################################################################################
# # sort vlines and hlines w.r.t. x1 and y1 respectively
# ####################################################################################################
vlines_x1s = [ line[0] for line in vlines ]
vlines = [line for x1, line in sorted(zip(vlines_x1s, vlines)) ]
hlines_y1s = [ line[1] for line in hlines ]
hlines = [line for y1, line in sorted(zip(hlines_y1s, hlines))]
# ####################################################################################################
# # Raise an exception if there is no tables in the input image detected
# ####################################################################################################
n_cols = len(vlines) - 1
n_rows = len(hlines) - 1
if (n_cols == 0) or (n_rows == 0) :
raise Exception('There is no tables can be found')
# ####################################################################################################
# # get the intersection points
# ####################################################################################################
eps = 1e-10
inter_pts = []
for h in hlines:
for v in vlines:
x_h1, y_h1, x_h2, y_h2 = h
x_v1, y_v1, x_v2, y_v2 = v
m_h = (y_h2 - y_h1) / (x_h2 - x_h1)
c_h = y_h1 - m_h * x_h1
m_v = (y_v2 - y_v1) / (x_v2 - x_v1 + eps)
c_v = y_v1 - m_v * x_v1
x_inter = int((c_v - c_h) / (m_h - m_v + eps))
y_inter = int(m_h * x_inter + c_h)
inter_pts.append( (x_inter, y_inter) )
# ####################################################################################################
# # 1- segmentation :
# # using vlines & hlines & inter_pts (intersection points) arrays we can detect cells in the table and saving each cell as separeted image
# # 2- return no. of columns and rows oof the original table
# ####################################################################################################
cells = []
max_cell_width = -1
max_cell_height = -1
for i in range(n_cols * n_rows):
# offset w.r.t. to the cell row
offset = i // n_cols
start = i + offset
cell_coords = [ inter_pts[start], inter_pts[start + 1], inter_pts[start + n_cols + 1], inter_pts[start + n_cols + 2] ]
cells.append(cell_coords)
y1 = cell_coords[0][1] + 4
y2 = cell_coords[2][1] - 4
x1 = cell_coords[0][0] + 4
x2 = cell_coords[1][0] - 4
cell_array = img[y1:y2, x1:x2, :]
cv.imwrite('{}.png'.format(i), cell_array)
return (n_cols,n_rows)
####################################################################################################
# testing on test image
####################################################################################################
img = cv.imread("1.jpg")
x,y=pre_processing(img)
print(x,y)
| [
"matplotlib"
] |
0c374293b21f1061142960a1495042345cc38d43 | Python | akashpardeshi/IGL | /prime_analysis.py | UTF-8 | 5,187 | 3.625 | 4 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
from scipy.optimize import curve_fit
plt.style.use('ggplot')
######################################################
##### Functions for generating prime number data #####
######################################################
def generatePrimes(N, bit_vector=False):
"""
Returns a list of all primes <= N using sieve of Eratosthenes.
bit_vector: False results in a list of prime numbers
True results in a list where list[n] == 1 if n is prime else 0
"""
is_prime = [0, 0, 1] + [1 if n % 2 else 0 for n in range(3, N + 1)]
for p in range(3, int(N**0.5) + 1):
if is_prime[p]:
for m in range(p**2, N + 1, p):
is_prime[m] = 0
return is_prime if bit_vector else [n for n in range(2, N + 1) if is_prime[n]]
def generateGaps(N):
"""
Returns a list of the prime gaps for primes <= N.
"""
primes = generatePrimes(N)
return [primes[n+1] - primes[n] for n in range(len(primes) - 1)]
def pi(N):
"""
Returns a list where the nth index is pi(n) := number of primes <= n for n = 0..N
"""
primes = generatePrimes(N, bit_vector=True)
pi = [0]
for n in primes[1:]:
pi.append(pi[-1] + n)
return pi
def kthMomentPrimeGaps(N, k):
"""
Returns a list where list[i] = kth moment of [prime gaps for primes <= i] for i = 2..N
Define the kth moment of a sequence [a_1..a_n] := 1/n * sum(a_i**k for i = 1..n).
"""
gaps = generateGaps(N)
gaps_len = len(gaps)
moments = [0 for _ in range(gaps_len)]
for i in range(2, gaps_len):
moments[i] = moments[i-1] + gaps[i]**k
return [moments[i] / i for i in range(2, gaps_len)]
####################################################
##### Functions for plotting prime number data #####
####################################################
def plotPrimeNumberTheorem(N, save_fig=False):
"""
Plots the pi(n) with its asymptotic relation to n / log(n).
"""
pnt_data = np.array(pi(N))
n_data = np.array(range(2, len(pnt_data)))
log_data = n_data / np.log(n_data)
pnt_data = pnt_data[2:]
ratio_data = pnt_data / log_data
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12.8, 4.8))
fig.suptitle('Prime Number Theorem')
ax1.set_title('Prime Counting Function')
ax1.set(xlabel='$n$')
ax1.step(n_data, pnt_data, 'c', where='post', label='$\pi(n)$')
ax1.plot(n_data, log_data, 'r', label=r'$\frac{n}{\log n}$')
ax1.legend(loc='lower right')
ax2.set_title(r'Asymptotic Relationship of $\pi(n)$ and $\frac{n}{\log n}$')
ax2.set(xlabel='$n$')
ax2.plot(n_data, ratio_data, 'y', label=r'$\frac{\pi(n)}{\frac{n}{\log n}}$')
ax2.legend(loc='lower right')
if save_fig:
plt.savefig('figures/prime_number_theorem.png')
plt.show()
def plotPrimeGaps(N, save_fig=False):
"""
Plots the prime gaps for primes <= N.
"""
gap_data = generateGaps(N)
plt.title('Prime Gaps')
plt.xlabel('$n$')
plt.ylabel('$n$th prime gap')
plt.plot(range(1, len(gap_data) + 1), gap_data, 'g.', alpha=0.35, markersize=2)
if save_fig:
plt.savefig('figures/prime_gaps.png')
plt.show()
##########################################################
##### Functions for plotting prime number statistics #####
##########################################################
def plotPrimeGapsMoment(N, k, save_fig=False):
"""
Plots the kth moment data for each n = 1..N
"""
moment_data = kthMomentPrimeGaps(N, k)
moment_data_len = len(moment_data)
n_data = np.array(range(1, moment_data_len+1))
f = lambda x, a, b, c: a * np.log(x+1)**c + b
params = curve_fit(f, n_data, moment_data)
a, b, c = params[0]
model_data = [f(n, a, b, c) for n in n_data]
ratio_data = [d2 / d1 for d1, d2 in zip(model_data, moment_data)]
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12.8, 4.8))
ax1.set_title(f'Moment-{k} for Primes')
ax1.set(xlabel='$n$')
ax1.plot(n_data, moment_data, '.', markersize=7, color='C1', label='moment data')
ax1.plot(n_data, model_data, '-k', label=f'curve fit following $A\log(n+1)^C + B$\n(A = {a:.5}, B = {b:.5}, C = {c:.5})')
ax1.legend(loc='lower right')
ax2.set_title(f'Ratio of Model to Moment-{k} for Primes')
ax2.set(xlabel='$n$', ylabel='ratio')
ax2.plot(n_data, ratio_data, '.', markersize=7, color='C0')
ax2.plot(n_data, [k for _ in range(moment_data_len)], '-k', label='1')
ax2.legend(loc='upper right')
if save_fig:
plt.savefig(f'figures/prime_gaps_moment_{k}.png')
plt.show()
def plotPrimeGapsHistogram(N, save_fig=False):
"""
Plots histogram of prime gaps for primes <= N.
"""
gaps = generateGaps(N)
plt.title('Distribution of Prime Gaps')
plt.xlabel('gap size')
plt.ylabel('frequency')
plt.hist(gaps, bins='fd', color='lightseagreen')
if save_fig:
plt.savefig('figures/prime_gaps_histogram.png')
plt.show()
def main():
N, k = 10**7, 2
# plotPrimeNumberTheorem(N)
# plotPrimeGaps(N)
# plotPrimeGapsMoment(N, k)
# plotPrimeGapsHistogram(N)
main()
| [
"matplotlib"
] |
cb4fb061135232c9a15a4123a63383ff36dd41f9 | Python | callumparr/labjournal | /archive/old_scripts/count_s.py | UTF-8 | 2,681 | 2.515625 | 3 | [] | no_license | import pandas as pd
import numpy as np
import time
from multiprocessing import cpu_count, Pool
import functools
import pickle
import matplotlib.pyplot as plt
start_time = time.time()
with open("/dev/datasets/FairWind/_results/bowtie/coverage/merged_table.pickle", 'rb') as f:
large_table = pickle.load(f)
print(f"Pickle load is done [%f sec]" % (time.time() - start_time), end="\n")
plt.plot(list(large_table[:500]))
plt.ylabel('Number of bases')
plt.xlabel('Distance from restriction site, b')
plt.suptitle('Number per distance, 0.5b')
plt.savefig("/dev/datasets/FairWind/_results/bowtie/coverage/distance_count_500.svg")
plt.clf()
plt.plot(list(large_table[:5000]))
plt.ylabel('Number of bases')
plt.xlabel('Distance from restriction site, b')
plt.suptitle('Number per distance, 5kb')
plt.savefig("/dev/datasets/FairWind/_results/bowtie/coverage/distance_count_5000.svg")
exit()
# --------------------------
def chrom_handler(chrom):
start_time = time.time()
main_table = pd.read_csv('/dev/datasets/FairWind/_results/bowtie/coverage/restrict_gatc_hg19/' + chrom + '.txt', header=None, names=['restrict'])
main_table = main_table.apply(pd.to_numeric, errors='ignore')
main_table = main_table.groupby(['restrict']).size()
print(f"{chrom} is done [%.2f sec]" % (time.time() - start_time), end="\n")
return main_table
THREADS_NUM = cpu_count()
chr_list = []
for line in open('/dev/datasets/FairWind/_results/bowtie/coverage/chr_list', 'rt'):
chr_list += [line[:-1]]
pool = Pool(THREADS_NUM)
results = pool.map(chrom_handler, chr_list)
pool.close()
pool.join()
del pool
start_time = time.time()
large_table = pd.concat(results, axis=1)
del results
print(f"Concat is done [%f sec]" % (time.time() - start_time), end="\n")
large_table = large_table[:1000000]
start_time = time.time()
large_table = large_table.apply(np.nansum, axis=1)
print(f"Merging is done [%.2f sec]" % (time.time() - start_time), end="\n")
start_time = time.time()
with open("/dev/datasets/FairWind/_results/bowtie/coverage/merged_table.pickle", 'wb') as f:
pickle.dump(large_table, f)
print(f"Pickling merged is done [%f sec]" % (time.time() - start_time), end="\n")
plt.plot(list(large_table[:500]))
plt.ylabel('Depth average')
plt.xlabel('Distance from restriction site, b')
plt.suptitle('Distance coverage')
plt.savefig("/dev/datasets/FairWind/_results/bowtie/coverage/distance_count_500.svg")
plt.clf()
plt.plot(list(large_table[:5000]))
plt.ylabel('Depth average')
plt.xlabel('Distance from restriction site, b')
plt.suptitle('Distance coverage')
plt.savefig("/dev/datasets/FairWind/_results/bowtie/coverage/distance_count_5000.svg")
| [
"matplotlib"
] |
078e8229f63760c0454757b4812b5e5dcd407ea2 | Python | felix-clark/nflstats | /passing.py | UTF-8 | 18,498 | 2.734375 | 3 | [] | no_license | #!/usr/bin/env python3
# import prediction_models as pm
from ruleset import *
import dist_fit
import bayes_models as bay
import logging
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from sys import argv
import scipy.stats as st
# get a dataframe of the relevant positional players
def get_qb_df(years, datadir='./yearly_stats/', keepnames=None):
ls_dfs = []
for year in years:
csvName = '{}/fantasy_{}.csv'.format(datadir,year)
df = pd.read_csv(csvName)
df['year'] = year
valids = df.loc[df['pos'] == 'QB']
if keepnames is not None:
valids = valids[valids['name'].isin(keepnames)]
valids = valids.loc[valids['games_started'].astype(int) > 2]
# somehow there are QBs who started but didn't throw any passes...
valids = valids.loc[valids['passing_att'].astype(int) >= 4]
if valids.size == 0:
logging.warning('no qbs in {}'.format(year))
ls_dfs.append(valids)
allqbs = pd.concat(ls_dfs, ignore_index=True, verify_integrity=True)
allqbs = allqbs.drop(columns=['pos', 'Unnamed: 0'])
return allqbs
def get_qb_list(years, datadir='./yearly_stats/'):
posdf = get_qb_df(years, datadir)
posnames = posdf['name'].drop_duplicates().sort_values()
posnames.reset_index(drop=True,inplace=True)
return posnames
if __name__ == '__main__':
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
logging.getLogger('bayes_models').setLevel(logging.INFO)
logging.info('dummy') # somehow this line is needed for the custom log to work...
# starting at 1999 will let us have all data for everyone selected
years = range(1999, 2018)
posnames = get_qb_list(years)
years = range(1983, 2018)
posdf = get_qb_df(years, keepnames=posnames)
posdf['pass_att_pg'] = posdf['passing_att'] / posdf['games_played']
posdf['pass_cmp_pa'] = posdf['passing_cmp'] / posdf['passing_att']
# since completion percentage is not independent of the yardage, it may be better to consider yds/att.
posdf['pass_yds_pc'] = posdf['passing_yds'] / posdf['passing_cmp']
posdf['pass_yds_pa'] = posdf['passing_yds'] / posdf['passing_att']
# TDs per attempt instead of per game scales out short games properly
# posdf['pass_td_pg'] = posdf['passing_td'] / posdf['games_played'] # this is too highly correlated w/ pass attempts per game
posdf['pass_td_pa'] = posdf['passing_td'] / posdf['passing_att']
posdf['pass_td_pc'] = posdf['passing_td'] / posdf['passing_cmp']
posdf['pass_td_py'] = posdf['passing_td'] / posdf['passing_yds']
# these aren't necessarily rookies, but they represent 1st year playing in the NFL
rookiedf = pd.concat([posdf[posdf['name'] == name].head(1) for name in posnames])
# should be a little slicker to use numpy arrays
data_papg_inc = posdf['pass_att_pg'].values
data_pcpa_inc = posdf['pass_cmp_pa'].values
data_pypc_inc = posdf['pass_yds_pc'].values
data_pypa_inc = posdf['pass_yds_pa'].values
data_ptdpa_inc = posdf['pass_td_pa'].values
data_ptdpc_inc = posdf['pass_td_pc'].values
data_papg_rook = rookiedf['pass_att_pg'].values
data_pcpa_rook = rookiedf['pass_cmp_pa'].values
data_pypc_rook = rookiedf['pass_yds_pc'].values
data_pypa_rook = rookiedf['pass_yds_pa'].values
data_ptdpa_rook = rookiedf['pass_td_pa'].values
data_ptdpc_rook = rookiedf['pass_td_pc'].values
# _,(rrk,prk),cov,llpdf = dist_fit.to_neg_binomial( data_papg_rook )
# log.info('rookie: r = {}, p = {}, LL per dof = {}'.format(rrk, prk, llpdf))
# log.info('covariance:\n' + str(cov))
# _,(rinc,pinc),cov,llpdf = dist_fit.to_neg_binomial( data_papg_inc )
# log.info('all: r = {}, p = {}, LL per dof = {}'.format(rinc, pinc, llpdf))
# log.info('covariance:\n' + str(cov))
# stdf,stloc,stscale = st.t.fit(data_papg_rook)
# log.info('fit to student\'s t distribution:\n{}'.format((stdf,stloc,stscale)))
# weib_rook_res = st.weibull_min.fit(data_papg_rook, floc=0)
# log.info('fit rookies to weibull distribution:\n{}'.format(weib_rook_res))
# weib_inc_res = st.weibull_min.fit(data_papg_inc, floc=0)
# log.info('fit all to weibull distribution:\n{}'.format(weib_inc_res))
n_rookie_seasons = len(data_papg_rook)
n_rookie_games = rookiedf['games_played'].sum()
log.info('{} rookie seasons'.format(n_rookie_seasons))
rk_weight_mean = rookiedf['passing_att'].sum() / n_rookie_games
rk_weight_stddev = np.sqrt(n_rookie_seasons/(n_rookie_seasons-1)*(rookiedf['games_played'] * (rookiedf['pass_att_pg'] - rk_weight_mean)**2).sum()/ n_rookie_games)
log.info('weighted by games played, rookie pass attempt distribution has mean/std: {:.5g} \pm {:.5g}'.format(rk_weight_mean, rk_weight_stddev))
sns.set()
# plt.figure()
# rookplt = sns.pairplot(rookiedf, vars=['pass_att_pg','pass_cmp_pa','pass_yds_pc','pass_td_pc'])
# rookplt.savefig('rookie_qb_corrs.png')
# plt.figure()
# rookplt = sns.pairplot(rookiedf, vars=['pass_yds_pc','pass_td_pa','pass_td_pc','pass_td_py'])
# rookplt = sns.jointplot(['pass_td_pc','pass_td_py'], ['pass_cmp_pa','pass_yds_pc'], data=rookiedf)
# rookplt.figure.savefig('rookie_qb_td_corrs.png')
tdcorr = rookiedf[['pass_att_pg', 'pass_cmp_pa', 'pass_yds_pc', 'pass_td_pa', 'pass_td_pc', 'pass_td_py']].corr()
tdplt = sns.heatmap(tdcorr)
plt.show()
# # drew bledsoe has the most pass attempts per game: 70
# xfvals = np.linspace(-0.5, 80+0.5, 128)
# bins_papg = None # range(0,80)
# plt_gp = sns.distplot(data_papg_rook, bins=bins_papg,
# kde=False, norm_hist=True,
# hist_kws={'log':False, 'align':'left'})
# plt.plot(xfvals, dist_fit.neg_binomial(xfvals, rrk, prk), '--', lw=2, color='violet')
# plt.plot(xfvals, st.t.pdf(xfvals, stdf, stloc, stscale), '-', lw=1, color='blue')
# plt.plot(xfvals, st.norm.pdf(xfvals, rk_weight_mean, rk_weight_stddev), '-', lw=1, color='green')
# plt.plot(xfvals, st.weibull_min.pdf(xfvals, *weib_rook_res), '-', lw=1, color='red')
# plt.title('rookies')
# plt_gp.figure.savefig('pass_att_pg_rookie.png'.format())
# plt_gp.figure.show()
# plt.figure() # create a new figure
# plt_gp = sns.distplot(data_papg_inc, bins=bins_papg,
# kde=False, norm_hist=True,
# hist_kws={'log':False, 'align':'left'})
# plt.plot(xfvals, dist_fit.neg_binomial(xfvals, rinc, pinc), '--', lw=2, color='violet')
# plt.plot(xfvals, st.weibull_min.pdf(xfvals, *weib_inc_res), '-', lw=1, color='red')
# plt.title('all seasons')
# plt_gp.figure.savefig('pass_att_pg.png'.format())
# plt_gp.figure.show()
# plt_gp = sns.pairplot(rookiedf, vars = ['games_played', 'pass_att_pg'])
# plt_gp.savefig('pass_att_gs_qb.png')
# plt.show(block=True)
## completion PCT
pct_mean_rook = np.mean(data_pcpa_rook)
pct_var_rook = np.var(data_pcpa_rook)
alpha_pcpa_rook = pct_mean_rook*( pct_mean_rook*(1-pct_mean_rook)/pct_var_rook - 1 )
beta_pcpa_rook = (1-pct_mean_rook)*( pct_mean_rook*(1-pct_mean_rook)/pct_var_rook - 1 )
log.info('using statistical rookie cmp%: beta dist pars: a = {:.4g}, b = {:.4g}'.format(alpha_pcpa_rook, beta_pcpa_rook))
pct_mean_inc = np.mean(data_pcpa_inc)
pct_var_inc = np.var(data_pcpa_inc)
alpha_pcpa_inc = pct_mean_inc*( pct_mean_inc*(1-pct_mean_inc)/pct_var_inc - 1 )
beta_pcpa_inc = (1-pct_mean_inc)*( pct_mean_inc*(1-pct_mean_inc)/pct_var_inc - 1 )
log.info('using statistical inclusive cmp%: beta dist pars: a = {:.4g}, b = {:.4g}'.format(alpha_pcpa_inc, beta_pcpa_inc))
# xfvals = np.linspace(0.0, 1.0, 128)
# bins_pcpa = np.linspace(0,1,64+1)
# plt_gp = sns.distplot(data_pcpa_rook, bins=bins_pcpa,
# kde=False, norm_hist=True,
# hist_kws={'log':False, 'align':'mid'})
# plt.plot(xfvals, st.beta.pdf(xfvals, alpha_pcpa_rook, beta_pcpa_rook), '--', lw=2, color='violet')
# plt.title('rookie completion percentage')
# plt_gp.figure.savefig('pass_cmp_pa_rookie.png'.format())
# plt_gp.figure.show()
papg_avg_all = data_papg_inc.mean()
papg_var_all = data_papg_inc.var()
log.info('used for const model: average attempts per game (inclusive) = {:.4g} \pm {:.4g}'.format(papg_avg_all, np.sqrt(papg_var_all)))
p0stat = papg_avg_all/papg_var_all
r0stat = papg_avg_all**2/(papg_var_all-papg_avg_all)
# the distribution for passing attempts is underdispersed compared to neg. bin.
# it actually works well for pass completions, though
log.info('using the inclusive stats to set r,p would yield {:.4g},{:.4g}'.format(r0stat,p0stat))
p0stat = rk_weight_mean/rk_weight_stddev**2
r0stat = rk_weight_mean**2/(rk_weight_stddev**2 - rk_weight_mean)
log.info('using weighted rookie to set r,p would yield {:.4g},{:.4g}'.format(r0stat,p0stat))
pypc_avg_rook = data_pypc_rook.mean()
pypc_var_rook = data_pypc_rook.var()
xfvals = np.linspace(0, 40, 128)
bins_pypc = np.linspace(5,20,64)
plt.figure()
plt_gp = sns.distplot(data_pypc_rook, bins=bins_pypc,
kde=False, norm_hist=True,
hist_kws={'log':False, 'align':'mid'})
# plt.plot(xfvals, st.beta.pdf(xfvals, alpha_pcpa_rook, beta_pcpa_rook), '--', lw=2, color='violet')
plt.title('rookie yds/cmp')
plt_gp.figure.savefig('pass_yds_pc_rookie.png'.format())
plt_gp.figure.show()
pypa_avg_rook = data_pypa_rook.mean()
pypa_var_rook = data_pypa_rook.var()
log.info('sigma/mu for pass yards per completion: {:.4g}'.format(np.sqrt(pypc_var_rook)/pypc_avg_rook))
log.info('sigma/mu for pass yards per attempt: {:.4g}'.format(np.sqrt(pypa_var_rook)/pypa_avg_rook))
###############################################################
# define a baseline model that is just a constant gaussian w/ the inclusive distribution
cgaussmodel = bay.const_gauss_model(papg_avg_all, papg_var_all)
# a memory of 1-1/N corresponds to an exponentially-falling window w/ length scale N
memory = 1 - 1/2**4
# r0,p0 = rrk,prk
# log.info('using rookie r,p = {},{}'.format(r0,p0))
# r0,p0 = rinc,pinc
# log.info('using inclusive r,p = {},{}'.format(r0,p0))
log.info('using (weighted) statistical mean and stddev')
r0,p0 = r0stat,p0stat
log.info('starting mean = {:.4g}'.format(r0*(1-p0)/p0))
r0 = 1.0*r0
# # if scale down r and beta to be on the per-game level?
# beta0 /= 1 # scaling doesn't seem to help
# p0p = beta0/(1+beta0)
p0 = 1.0*p0
log.info( 'using (possibly reduced) per-game r, b = {:.4g}, {:.4g}'.format(r0, p0))
lrp = 1.0 # why does the bayes model have worse performance
nbmodel = bay.neg_binomial_model(r0, p0, lrp, mem=memory)
# hyperparameters for t model
t_mu0 = rk_weight_mean
t_mu0 = papg_avg_all
t_nu0 = 1 # turning this down low reduces EVSE and MSE but increases the KLD
t_alpha0 = 2.0 # needs to be > 1 to have a well-defined variance.
# if alpha is large, the contribution to the MSE term is small, though in practice it doesn't seem to change it much
# t_alpha0 = n_rookie_seasons # 211 rookie seasons, tho turning this up makes the variance not change much (?)
# t_beta0 = rk_weight_stddev**2 * (t_alpha0-1)/(1+1.0/t_nu0)
t_beta0 = papg_var_all * (t_alpha0-1)/(1+1.0/t_nu0)
tmodel = bay.t_model(t_mu0, t_nu0, t_alpha0, t_beta0, lrp, mem=memory)
## model for cmp %
lr_cmp_pct = 1.0
mem_cmp_pct = 1-1/2**3
# the separated model works if the learn rate is slowed
# the ratio model is decent too if we speed up learning, but the scale is not very dynamic (all widths the same)
# both slightly under-predict the data (positive residuals, on average); possibly because completion % tends to increase w/ time?
# maybe using the full (non-rookie) would help -- there's not much difference. it's more important to use the separated model
pcpa_betamodel_sep = bay.beta_model(alpha_pcpa_rook, beta_pcpa_rook, lr=lr_cmp_pct/16, mem=mem_cmp_pct)
pcpa_betamodel_ratio = bay.beta_model(alpha_pcpa_rook, beta_pcpa_rook, lr=lr_cmp_pct*16, mem=mem_cmp_pct)
## models for ypc
ypc_r0 = pypc_avg_rook**2/(pypc_var_rook - pypc_avg_rook)
ypc_p0 = pypc_avg_rook/pypc_var_rook
pypc_nbmodel = bay.neg_binomial_model(ypc_r0, ypc_p0, lr=1.0, mem=1-1/2**4)
# maybe use inclusive instead of rookie?
ypc_mu0 = pypc_avg_rook
ypc_nu0 = 1
ypc_alpha0 = 2
ypc_beta0 = pypc_var_rook * (ypc_alpha0-1)/(1+1/ypc_nu0)
pypc_tmodel = bay.t_model(ypc_mu0, ypc_nu0, ypc_alpha0, ypc_beta0, lr=1.0, mem=1.0)
# collect models for easy abstraction
papg_struct = {'df':pd.DataFrame(), 'desc':'pass attempts per game', 'models':{'cgauss':cgaussmodel, 'studentt':tmodel}, 'models_sep':{'nbinom':nbmodel}}
pcpa_struct = {'df':pd.DataFrame(), 'desc':'completion percentage', 'models':{'beta_ratio':pcpa_betamodel_ratio}, 'models_sep':{'beta_sep':pcpa_betamodel_sep}}
pypc_struct = {'df':pd.DataFrame(), 'desc':'yards per completion', 'models':{'studentt':pypc_tmodel}, 'models_sep':{'nbinom':pypc_nbmodel}}
for pname in posnames:
pdata = posdf[posdf['name'] == pname]
# explicitly turn into numpy arrays
pdata_gs = pdata['games_played'].values
pdata_pa = pdata['passing_att'].values
pdata_pc = pdata['passing_cmp'].values
pdata_pyds = pdata['passing_yds'].values
# pdata_papg = pdata_pa / pdata_gs
# pdata_pcpa = pdata_pc / pdata_pa
# we should really do an analysis of the covariance between attempts and completion % on a per-player basis
career_length = pdata_gs.size
if career_length < 2: continue
weights = np.full(pdata_gs.shape, 1.0)
# weights = pdata_gs / np.max(pdata_gs)
normkld = False
qlist = [(papg_struct,pdata_pa,pdata_gs)
,(pcpa_struct,pdata_pc,pdata_pa)
,(pypc_struct,pdata_pyds,pdata_pc)]
for qstruct,numq,denq in qlist:
ratioq = numq/denq
df = qstruct['df']
df = df.append(pd.DataFrame([{'name':pname, 'model':'data', 'career_year':iy+1,
'ev':ratioq[iy], 'kld':0,
'weight':weights[iy]} for iy in range(career_length)]), ignore_index=True)
for mname,model in qstruct['models'].items():
mses = model.mse(ratioq, weights=weights)*weights
klds = model.kld(ratioq, weights=weights, normalize=normkld)*weights
evses = model.evse(ratioq, weights=weights)
evs = model.evs(ratioq, weights=weights)
vrs = model.vars(ratioq, weights=weights)
res = model.residuals(ratioq, weights=weights)
df = df.append([{'name':pname, 'model':mname, 'residuals':res[iy],
'ev':evs[iy], 'scale':np.sqrt(vrs[iy]),
'mse':mses[iy], 'kld':klds[iy],
'evse':evses[iy], 'career_year':iy+1,
'weight':weights[iy]} for iy in range(career_length)], ignore_index=True)
for mname,model in qstruct['models_sep'].items():
mses = model.mse((numq,denq), weights=weights)*weights
klds = model.kld((numq,denq), weights=weights, normalize=normkld)*weights
evses = model.evse((numq,denq), weights=weights)
evs = model.evs((numq,denq), weights=weights)
vrs = model.vars((numq,denq), weights=weights)
res = model.residuals((numq,denq), weights=weights)
df = df.append([{'name':pname, 'model':mname, 'residuals':res[iy],
'ev':evs[iy], 'scale':np.sqrt(vrs[iy]),
'mse':mses[iy], 'kld':klds[iy],
'evse':evses[iy], 'career_year':iy+1,
'weight':weights[iy]} for iy in range(career_length)], ignore_index=True)
qstruct['df'] = df # we have to re-assign
papgdf = papg_struct['df']
pcpadf = pcpa_struct['df']
papgdf.reset_index(drop=True, inplace=True)
pcpadf.reset_index(drop=True, inplace=True)
# with all the data stored, add some extra stats to the data "model"
career_long = papgdf['career_year'].max()
for iy in range(1,career_long+1):
mask = (papgdf['model'] == 'data') & (papgdf['career_year'] == iy)
reldf = papgdf[mask]
meanyr = reldf['ev'].mean()
stdyr = reldf['ev'].std()
papgdf.loc[mask,'scale'] = stdyr
papgdf.loc[mask,'residuals'] = (reldf['ev'] - meanyr)/stdyr
for df in [papgdf, pcpadf]:
df['rmse'] = np.sqrt(df['mse'])
df['revse'] = np.sqrt(df['evse'])
df['norm_rmse'] = df['rmse'] / df['scale']
df['norm_revse'] = df['revse'] / df['scale']
log.info('total player-seasons: {}'.format(papgdf[papgdf['model'] == 'data']['weight'].sum()))
for st in [papg_struct, pcpa_struct, pypc_struct]:
log.info(' evaluation of {} models:'.format(st['desc']))
mnames = [mn for mn in st['models'].keys()] + [mn for mn in st['models_sep'].keys()]
for model in mnames:
for stat in ['evse', 'mse', 'kld']:
df = st['df']
thismodel = df[df['model'] == model]
val = (thismodel[stat]*thismodel['weight']).sum()/thismodel['weight'].sum()
if stat in ['evse', 'mse']: val = np.sqrt(val)
log.info('{} for {} model: {:.4g}'.format(stat, model, val))
plot_vars = []
if 'noplt' not in argv: plot_vars += ['ev', 'residuals', 'scale']
if 'all' in argv:
plot_vars += ['norm_rmse', 'norm_revse', 'rmse', 'revse', 'kld']
plt_structs = []
if 'att' in argv: plt_structs.append(papg_struct)
if 'cmp' in argv: plt_structs.append(pcpa_struct)
if 'yds' in argv: plt_structs.append(pypc_struct)
for st in plt_structs:
for var in plot_vars:
plt.figure()
varplt = sns.boxenplot(data=st['df'], x='career_year', y=var, hue='model')
plt.title(st['desc'])
plt.show(block=True)
| [
"matplotlib",
"seaborn"
] |
1fffd857e16adbbf0d07268c044f380ae7dc98dd | Python | AlextheYounga/primordial-ooze | /primordialooze/primordialooze.py | UTF-8 | 26,014 | 3.390625 | 3 | [
"MIT"
] | permissive | """
PrimordialOoze is a genetic algorithm (GA) library for those who want something
very simple and don't want to spend time figuring out the more complicated libraries
that are out there.
See the README or the docstrings in this file for the documentation.
"""
import math
import multiprocessing
import numpy as np
class Simulation:
"""
A GA simulation. The general workflow for this is:
```python
import primordialooze as po
import pandas
import matplotlib.pyplot
sim = po.Simulation(nagents, shape, fitnessfunction)
bestagent, fitness = sim.run()
# Dump and plot
fname = "stats.csv"
sim.dump_history_csv(fname)
df = pandas.read_csv(fname)
df = df.drop(['GenerationIndex'], axis=1)
df.plot()
plt.show()
```
"""
def __init__(self, population, shape, fitnessfunc, *, seedfunc=None, selectionfunc=None,
crossoverfunc=None, mutationfunc=None, elitismfunc=None, nworkers=0,
max_agents_per_generation=None, min_agents_per_generation=None):
"""
## Args
The following list contains the arguments that are needed. These do not have default values
since the values for these will change dramatically depending on the problem.
- **population**: The number of agents in the first generation. We will generate this many agents
in the initial generation, each of which is a Numpy Array of shape=`shape`.
They will be mutated according to `mutationfunc`, and evaluated each generation
by `fitnessfunc`.
- **shape**: The shape of each agent in the population. Must be a list-like. The shape of the agents
must be a 1D array of whatever length like `(7,)`.
- **fitnessfunc**: The function to use to evaluate the fitness of each agent in the generation.
Must have signature: `fitnessfunc(agent) -> scalar float`. This function
will be evaluated on every single agent in the gene pool at each generation.
If this function is slow, it probably makes sense to use multiprocessing, unless the
gene pool is quite small. See `nworkers`.
## Keyword Args
These arguments contain (mostly) sensible defaults, but you should definitely make sure these
defaults work for you. You will almost certainly want to change some of these to fit your problem.
- **seedfunc**: The function to use to create the first generation of agents. The function must have
the signature `seedfunc() -> agent of shape 'shape'`. We call this function
`population` times. When `None`, defaults to uniform random
over the range [-1.0, 1.0) in each dimension.
- **selectionfunc**: The function to use to select the agents that are allowed to breed to create the
next generation. Signature must be `selectionfunc(population, evaluations) -> selected_agents`,
where `population` is an n-dimensional array of shape (nagents, agent_length),
`evaluations` is an array of shape (nagents,); `evaluations[i]` contains
the fitness value for `population[i, :]`; `selected_agents` is an n-dimensional array
of shape (nagents_selected, agent_length), which must contain the selected agents.
`population` and `evaluations` are pre-sorted so that `population[0, :]`, corresponds
to `evalutaion[0]` and has the highest evaluation value. Agents which are not selected
are simply discarded, i.e., they will not appear in the next generation (unless randomly
created again as part of crossover/mutation).
If `None`, defaults to selecting the top ten percent.
- **crossoverfunc**: Crossover function to use. Must have signature `crossoverfunc(agents) -> new_agents`,
where `agents` is an n-dimensional array of shape (nselected_agents, agent_length),
and where `new_agents` must be an n-dimensional array of shape (nagents, agent_length).
This function is applied after the selection function is used to determine which
agents will enter the new generation and this function is used exclusively on those
selected agents. Typically, `new_agents` will constitute the entirety of the new generation,
with one exception being if elitism is used (see below) and another exception being
if the mutation function adds new individuals to the gene pool, rather than just mutating
existing ones.
If `None`, defaults to 2-point crossover used on randomly selected pairs from the
breeding agents until `population` agents (or, if `elitismfunc` is None, `0.9 * population`).
- **mutationfunc**: The function to use to apply mutations to the gene pool. The signature must be
`mutationfunc(agents) -> new_agents`, where `agents` is the value returned from
`crossoverfunc` and `new_agents` must be an n-dimensional array of shape (nagents, agent_length).
This function is applied to the result of `crossoverfunc`.
When `None`, defaults to setting each value in 0.05 of the agents to a random value,
where the random value is drawn from a Gaussian distribution of mean = the value being replaced
and stdev = 0.25.
- **elitismfunc**: A function of signature `elitismfunc(generation_index) -> float in range [0.0, 1.0]`.
This function takes the index of the generation (0 for the first generation, 1 for the second, etc.)
and returns the fraction of top-performers to hold over as-is to the next generation.
The elites are duplicated and then, after the new
generation is created via the selectionfunc -> crossoverfunc -> mutationfunc pipeline, they are
reintroduced into the gene pool. This means that if the above pipeline generates 100 agents
and the elitism is set to take 10, the new generation will be composed of 110 agents. If this
is confusing, see `max_agents_per_generation` and `min_agents_per_generation`.
When `None`, defaults to a function that simply returns 0.1 (or 10%) of the gene pool regardless of the
generation.
- **nworkers**: The number of processes to use to parallelize the fitness function. This will default to 0, which will
mean no parallelism at all. `None` will use the number of cores. Otherwise, should be a positive integer.
- **max_agents_per_generation**: The maximum agents to allow into a generation. If the selection, crossover, mutation,
and elitism functions are not handled properly, it is possible for the number of
agents to change per generation. While this may be desired in some circumstances, it
is often not. If this value is negative, we will allow the generations to grow to arbitrary
size. If it is nonzero, after selection, crossover, mutation, and elitism, we will
take all of the candidates as long as they do not number more than this value. If they do,
we take this many at random.
This value defaults to `None`, which means we use `population` as the max.
- **min_agents_per_generation**: The minimum agents to allow making a new generation. If the selection, crossover, mutation,
and elitism functions are not handled properly, it is possible for the number of
agents to change per generation. While this may be desired in some circumstances, it
is often not. If this value is negative or zero, we will allow the generations
to shrink to zero, after which the simulation will stop. If it is nonzero, after selection,
crossover, mutation, and elitism, we will cycle through the candidate agents in random
order, duplicating them until this value is met. Note that we attempt to spread out the
duplication evenly amongst all candidates.
This value defaults to `None`, which means we use `population` as the min.
"""
# Validate population
if population <= 0:
raise ValueError("Population must be > 0 but is {}".format(population))
population = int(population)
# Validate shape
for i, dim in enumerate(shape):
if dim <= 0:
raise ValueError("Shape must contain no negative values, but contains {} at index {}".format(dim, i))
try:
_testagent = np.ndarray(shape=shape)
except Exception:
raise ValueError("There is something wrong with your shape parameter. It must be a list-like of integers greater than zero but is: {}.".format(shape))
# Do not validate functions; may take too long
# Validate nworkers
if nworkers is None:
nworkers = multiprocessing.cpu_count()
if nworkers <= 0:
raise ValueError("Something is wrong with multiprocessing.cpu_count(). Try passing in a number for nworkers instead of None.")
elif nworkers < 0:
raise ValueError("Nworkers must be zero (for no multiprocessing), None, or a positive integer, but is: {}".format(nworkers))
nworkers = int(nworkers)
# If we have negative max_agents, we actually want infinity
if max_agents_per_generation is not None and max_agents_per_generation < 0:
max_agents_per_generation = math.inf
# We allow negative min_agents for compatibility with max_agents, but we just
# interpret it as zero
if min_agents_per_generation is not None and min_agents_per_generation < 0:
min_agents_per_generation = 0
self._initial_population_size = population
self._shape = shape
self._fitnessfunc = fitnessfunc
self._seedfunc = self._default_seedfunc if seedfunc is None else seedfunc
self._selectionfunc = self._default_selectionfunc if selectionfunc is None else selectionfunc
self._crossoverfunc = self._default_crossoverfunc if crossoverfunc is None else crossoverfunc
self._mutationfunc = self._default_mutationfunc if mutationfunc is None else mutationfunc
self._elitismfunc = self._default_elitismfunc if elitismfunc is None else elitismfunc
self._nworkers = nworkers
self._max_agents_per_generation = population if max_agents_per_generation is None else max_agents_per_generation
self._min_agents_per_generation = population if min_agents_per_generation is None else min_agents_per_generation
self.statistics = []
self.best_agents = []
if self._max_agents_per_generation < self._min_agents_per_generation:
raise ValueError("max_agents_per_generation {} is less than min_agents_per_generation {}".format(self._max_agents_per_generation, self._min_agents_per_generation))
def dump_history_csv(self, fpath):
"""
Saves this simulation's statistics as a CSV file at `fpath` in the form:
```
Generation Index, Maximum, Minimum, Average
```
"""
with open(fpath, 'w') as f:
f.write("GenerationIndex, Maximum, Minimum, Average\n")
for s in self.statistics:
f.write("{}, {}, {}, {}\n".format(s.generationidx, s.maxval, s.minval, s.avgval))
def run(self, niterations=100, fitness=None, printprogress=True):
"""
Runs the constructed simulation.
Either runs until `niterations` have passed, or runs until the best fitness is `fitness` or greater.
Returns the best agent along with its fitness.
## Keyword Args
- **niterations**: The number of iterations to run the simulation to. Defaults to 100. If `None`,
`fitness` will be used (and must not be None). If both this and `fitness` is
specified, we will stop as soon as one or the other condition is met.
- **fitness**: The fitness level to converge on. As soon as one or more agents have this fitness level
or higher, the simulation will stop. Defaults to `None`. If `None` (the default),
`niterations` will be used (and must not be None). If this and `niterations` is
specified, we will stop as soon as one or the other condition is met.
- **printprogress**: If `True` (the default), we will print a progress indication after each generation.
## Returns
- The agent with the highest fitness score after the simulation ends.
- The fitness of this agent.
"""
# Validate args
if niterations is None and fitness is None:
raise ValueError("`niterations` and `fitness` must not both be None.")
# First seed the gene pool
listagents = [self._seedfunc() for _ in range(self._initial_population_size)]
self._agents = np.array(listagents)
self._fitnesses = np.zeros((self._initial_population_size,))
iteridx = 0
while not self._check_if_done(niterations, fitness, iteridx, printprogress):
# Evaluate the gene pool
self._fitnesses = self._evaluate_fitnesses()
# Sort the fitnesses along with the agents and reverse
sorted_indexes = np.argsort(self._fitnesses)[::-1]
self._fitnesses = self._fitnesses[sorted_indexes]
self._agents = self._agents[sorted_indexes]
# Calculate statistics
self._save_stats(iteridx)
# Elitism to duplicate the elites
eliteratio = self._elitismfunc(iteridx)
assert eliteratio <= 1.0, "The elitism function must produce a value between 0.0 and 1.0"
assert eliteratio >= 0.0, "The elitism function must produce a value between 0.0 and 1.0"
nelites = int(eliteratio * self._agents.shape[0])
elites = np.copy(self._agents[0:nelites])
elites = np.reshape(elites, (-1, self._agents.shape[1]))
# Select breeding agents with selection function
self._agents = self._selectionfunc(self._agents, self._fitnesses)
assert len(self._agents.shape) == 2, "Selection function must return an ndarray of shape (nagents, agent_length), but has shape: {}".format(self._agents.shape)
# Breed them using crossover
self._agents = self._crossoverfunc(self._agents)
assert len(self._agents.shape) == 2, "Crossover function must return an ndarray of shape (nagents, agent_length), but has shape: {}".format(self._agents.shape)
# Mutate the results
self._agents = self._mutationfunc(self._agents)
assert len(self._agents.shape) == 2, "Mutation function must return an ndarray of shape (nagents, agent_length), but has shape: {}".format(self._agents.shape)
# Construct the new gene pool from the mutation results and the elites
## Append any elites that were held over
np.append(self._agents, elites, axis=0)
## Take as many as max_agents (but don't take more than we actually have), but randomized
np.random.shuffle(self._agents)
mx = min(self._max_agents_per_generation, self._agents.shape[0])
self._agents = self._agents[0:mx, :]
## Now cycle through the agents, duplicating one at a time until we have at least min_agents
i = 0
while self._agents.shape[0] < self._min_agents_per_generation:
self._agents = np.append(self._agents, np.expand_dims(self._agents[i], 0), axis=0)
i += 1
if i >= self._agents.shape[0]:
i = 0
# Increment the generation index
iteridx += 1
if printprogress:
print()
# Return the fittest agent and its fitness score
return self.best_agents[-1], self.statistics[-1].maxval
def _save_stats(self, iteridx):
"""
Saves the statistics from this generation.
"""
maxval = np.max(self._fitnesses)
minval = np.min(self._fitnesses)
avgval = np.mean(self._fitnesses)
stats = Statistics(maxval, minval, avgval, iteridx)
self.statistics.append(stats)
# Sort the fitnesses along with the agents and reverse
sorted_indexes = np.argsort(self._fitnesses)[::-1]
sorted_agents = self._agents[sorted_indexes]
self.best_agents.append(sorted_agents[0, :])
def _check_if_done(self, niterations, fitness, iteridx, prnt):
"""
Returns `True` if the simulation is complete, `False` if not.
"""
assert not (niterations is None and fitness is None), "niterations and fitness cannot both be None"
if niterations is None:
niterations = math.inf
if fitness is None:
fitness = math.inf
# Check if the max fitness value is >= fitness
finished_by_fitness = np.max(self._fitnesses) >= fitness
# Check if iteridx >= niterations
finished_by_iterations = iteridx >= niterations
# Now print an update if the user wants
if prnt:
maxsigns = 20
if niterations != math.inf:
# We are interested in niterations
fraction_complete = iteridx / niterations
else:
# We are trying to converge on a particular value
fraction_complete = np.max(self._fitnesses) / fitness
npounds = int(fraction_complete * maxsigns)
ndots = maxsigns - npounds
msg = "Progress: [{}{}] Best Fitness: {} Worst Fitness: {}".format(
"#" * npounds, "." * ndots, np.max(self._fitnesses), np.min(self._fitnesses)
)
print(msg, end="\r")
return finished_by_fitness or finished_by_iterations
def _evaluate_fitnesses(self):
"""
Applies the fitness function to every agent currently in the gene pool
and fills in self._fitnesses with this information.
Will use multiprocessing if this class was initialized with it.
"""
# If self._nworkers != 0, we are using multiprocessing, otherwise we aren't
if self._nworkers == 0:
# Don't use multiprocessing
fitnesses = np.apply_along_axis(self._fitnessfunc, axis=1, arr=self._agents)
else:
# Make a pool
# Split up the agents
with multiprocessing.Pool(self._nworkers) as p:
fitnesses = np.array(p.map(self._fitnessfunc, self._agents))
return fitnesses
def _default_seedfunc(self):
"""
Default seed function to create the first generation of agents. Each time this is called, it creates
a new agent from uniform random of shape `self._shape` over the values[-1.0, 1.0).
"""
return np.random.uniform(low=-1.0, high=1.0, size=self._shape)
def _default_selectionfunc(self, population, fitnesses):
"""
Default selection function for selecting agents allowed to breed to create the next generation.
Simply takes the top 10% of the given population and return them.
Population and evaluations are pre-sorted so that index 0 is the fittest.
This is guaranteed to take at least one agent.
"""
tenpercent = int(population.shape[0] * 0.1)
if tenpercent < 1:
tenpercent = 1
return self._agents[0:tenpercent, :]
def _default_crossoverfunc(self, agents):
"""
Applies 2-point crossover to the agents to generate children. Mating pairs are chosen at random
without replacement until the next generation has `self._initial_population_size` agents in it
unless self._elitismfunc == self._default_elitismfunc, in which case we only go to
0.9 * `self._initial_population_size` agents (since 0.1 are kept by the elitism function).
Once all agents have been chosen, we do it again. We repeat this process until the next generation
has the right number of agents in it.
Always mates at least one pair, unless the population is currently 1, in which case we simply
return that agent unchanged.
"""
nagents = self._initial_population_size
# Determine how many agents to mate/create (we create one agent per parent - two per pair)
if self._elitismfunc == self._default_elitismfunc:
nagents = int(0.9 * nagents)
if nagents < 2:
nagents = 2
if agents.shape[0] < 2:
# We can't do much with less than 2 agents
return agents
# Create agents by choosing two agents randomly and swapping two parts of them
created_agents = []
so_far_mated = set()
remaining = [i for i in range(agents.shape[0])]
while len(created_agents) < nagents:
# Draw a random index from the remaining indexes
idx1 = np.random.choice(remaining)
remaining.remove(idx1)
# If that was the last one, we need to dump so-far-mated and
# start going through them again
if not remaining:
remaining = list(so_far_mated)
so_far_mated.clear()
# Draw another
idx2 = np.random.choice(remaining)
remaining.remove(idx2)
# Mate the two
newa, newb = self._mate_two_agents(agents[idx1, :], agents[idx2, :])
# Add the result to the list of agents we are going to return
created_agents.append(newa)
created_agents.append(newb)
# Add to the set of so-far-mated
so_far_mated.add(idx1)
so_far_mated.add(idx2)
# If we have run out of items in remaining, dump so-far-mated
# and start cycling back through them
if not remaining:
remaining = list(so_far_mated)
so_far_mated.clear()
return np.array(created_agents)
def _default_mutationfunc(self, agents):
"""
Applies Gaussian noise to each value in 5% of agents, where mean=value and stdev=0.25.
Always mutates at least one individual.
"""
# TODO: Test that we always mutate at least one agent.
# TODO: Test that the underlying distribution for a bunch of mutated points is gaussian, mean=x, stdev=0.25.
nagents = int(0.05 * agents.shape[0])
if nagents < 1:
nagents = 1
idxs = np.random.choice(agents.shape[0], size=nagents, replace=False)
agents[idxs, :] = np.random.normal(agents[idxs, :], 0.25)
return agents
def _default_elitismfunc(self, genindex):
"""
Simply returns 0.1, regardless of `genindex`. This means that 10% of the gene pool (the top
10% specifically) will be re-injected into the next generation unchanged.
"""
return 0.1
def _mate_two_agents(self, a1, a2):
"""
Returns two new agents after mating a1 and a2 via 2-point crossover.
"""
assert len(a1.shape) == 1, "a1 must be a row vector, but has shape {}".format(a1.shape)
assert len(a2.shape) == 1, "a2 must be a row vector, but has shape {}".format(a2.shape)
# Find a random index
i = np.random.choice(a1.shape[0])
# Find another random index
j = np.random.choice(a1.shape[0])
# Sort them
low, high = sorted([i, j])
# Take a1[0:low] and a2[0:low] and swap them
a1_up_to_low = a1[0:low]
a2_up_to_low = a2[0:low]
a1[0:low] = a2_up_to_low
a2[0:low] = a1_up_to_low
# Take a1[high:] and a2 [high:] and swap them
a1_from_high = a1[high:]
a2_from_high = a2[high:]
a1[high:] = a2_from_high
a2[high:] = a1_from_high
return a1, a2
class Statistics:
def __init__(self, maxval, minval, avgval, generationidx):
self.maxval = maxval
self.minval = minval
self.avgval = avgval
self.generationidx = generationidx
if __name__ == "__main__":
import matplotlib.pyplot as plt
import os
import pandas
nagents = 1000
sim = Simulation(nagents, shape=(2,), fitnessfunc=lambda agent: (-1.2* agent[0]**2) - (0.75 * agent[1]**2) + 5.0)
best, value = sim.run(niterations=10000, fitness=4.99999)
msg = "(best, value): ({}, {})".format(best, value)
fname = "stats.csv"
sim.dump_history_csv(fname)
df = pandas.read_csv(fname)
df = df.drop(['GenerationIndex'], axis=1)
df.plot()
plt.show()
| [
"matplotlib"
] |
bc70a551926b186fe8ef76a71b0faeec3b17211c | Python | rjsilmaro/python-for-data-science-ai-and-development | /data_analysis.py | UTF-8 | 1,109 | 3.59375 | 4 | [] | no_license | # Import pandas library
import pandas as pd
path = "https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%205/data/diabetes.csv"
df = pd.read_csv(path)
# show the first 5 rows using dataframe.head() method
print("The first 5 rows of the dataframe")
df.head(5)
# view the dimensions of the dataframe
df.shape
# Statistical Overview of dataset
df.info()
# prints information about a DataFrame including the index dtype and columns, non-null values and memory usage
df.describe()
# identify these missing values
missing_data = df.isnull()
missing_data.head(5)
# Count missing values in each column
for column in missing_data.columns.values.tolist():
print(column)
print (missing_data[column].value_counts())
print("")
# .dtype() to check the data type
# .astype() to change the data type
df.dtypes
## visualization
# import libraries
import matplotlib.pyplot as plt
import seaborn as sns
labels= 'Diabetic','Not Diabetic'
plt.pie(df['Outcome'].value_counts(),labels=labels,autopct='%0.02f%%')
plt.legend()
plt.show() | [
"matplotlib",
"seaborn"
] |
74b1a44a40bd15570fde4462f1e738562bc4d6fa | Python | blawton/neural_net | /3.2.py | UTF-8 | 9,790 | 2.84375 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import math
import os
import pickle
import tensorflow as tf
import sys
data_path = "/CIFAR-10/"
def one_hot_encoded(class_numbers, num_classes=None):
num_classes = np.max(class_numbers) + 1
return np.eye(num_classes, dtype=float)[class_numbers]
def _get_file_path(filename=""):
return os.path.join(data_path, "cifar-10-batches-py/", filename)
def _unpickle(filename):
file_path = _get_file_path(filename)
print("Loading data: " + file_path)
with open(file_path, mode='rb') as file:
data = pickle.load(file, encoding='bytes')
return data
def _convert_images(raw):
raw_float = np.array(raw, dtype=float) / 255.0
images = raw_float.reshape([-1, 3, 32, 32])
images = images.transpose([0, 2, 3, 1])
return images
def _load_data(filename):
data = _unpickle(filename)
raw_images = data[b'data']
cls = np.array(data[b'labels'])
images = _convert_images(raw_images)
return images, cls
#def load_class_names():
# raw = _unpickle(filename="batches.meta")[b'label_names']
# names = [x.decode('utf-8') for x in raw]
# return names
def load_training_data():
images = np.zeros(shape=[50000, 32, 32, 3], dtype=float)
cls = np.zeros(shape=[50000], dtype=int)
begin = 0
for i in range(5):
images_batch, cls_batch = _load_data(filename="data_batch_" + str(i + 1))
num_images = len(images_batch)
end = begin + num_images
images[begin:end, :] = images_batch
cls[begin:end] = cls_batch
begin = end
return images, cls, one_hot_encoded(class_numbers=cls, num_classes=10)
def load_test_data():
images, cls = _load_data(filename="test_batch")
return images, cls, one_hot_encoded(class_numbers=cls, num_classes=10)
# Global constants describing the CIFAR-10 data set.
IMAGE_SIZE = 32
NUM_CLASSES = 10
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000
NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000
batch_size = 10
# Constants describing the training process.
MOVING_AVERAGE_DECAY = 0.9999 # The decay to use for the moving average.
NUM_EPOCHS_PER_DECAY = 350.0 # Epochs after which learning rate decays.
LEARNING_RATE_DECAY_FACTOR = 0.1 # Learning rate decay factor.
INITIAL_LEARNING_RATE = 0.001 # Initial learning rate.
X = tf.placeholder(tf.float32, shape=[None, 32, 32, 3], name='X')
kernel = tf.get_variable('weights', [5, 5, 3, 64],
initializer=tf.truncated_normal_initializer(stddev=.01, dtype=tf.float32), dtype=tf.float32)
biases = tf.get_variable('biases', [64], initializer=tf.constant_initializer(0.0), dtype=tf.float32)
kernel2 = tf.get_variable('weights2', [5, 5, 64, 64],
initializer=tf.truncated_normal_initializer(stddev=.01, dtype=tf.float32), dtype=tf.float32)
biases2 = tf.get_variable('biases2', [64], initializer=tf.constant_initializer(0.1), dtype=tf.float32)
weights3 = tf.get_variable('weights3', [4096, 384],
initializer=tf.truncated_normal_initializer(stddev=.04, dtype=tf.float32), dtype=tf.float32)
biases3 = tf.get_variable('biases3', [384], initializer=tf.constant_initializer(0.1), dtype=tf.float32)
weights4 = tf.get_variable('weights4', [384, 192],
initializer=tf.truncated_normal_initializer(stddev=.04, dtype=tf.float32), dtype=tf.float32)
biases4 = tf.get_variable('biases4', [192], initializer=tf.constant_initializer(0.1), dtype=tf.float32)
weights5 = tf.get_variable('weights5', [192, NUM_CLASSES],
initializer=tf.truncated_normal_initializer(stddev=1/192.0, dtype=tf.float32), dtype=tf.float32)
biases5 = tf.get_variable('biases5', [NUM_CLASSES], initializer=tf.constant_initializer(0.0), dtype=tf.float32)
def inference(images):
conv = tf.nn.conv2d(images, kernel, [1, 1, 1, 1], padding='SAME')
pre_activation = tf.nn.bias_add(conv, biases)
conv1 = tf.nn.relu(pre_activation, name='conv1')
pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
padding='SAME', name='pool1')
norm1 = tf.nn.lrn(pool1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm1')
conv2 = tf.nn.conv2d(norm1, kernel2, [1, 1, 1, 1], padding='SAME')
pre_activation2 = tf.nn.bias_add(conv2, biases2)
conv2f = tf.nn.relu(pre_activation2, name='conv2f')
print(conv2f)
norm2 = tf.nn.lrn(conv2f, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75,
name='norm2')
print(norm2)
pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1],
strides=[1, 2, 2, 1], padding='SAME', name='pool2')
print(pool2)
reshape = tf.reshape(pool2, [batch_size, -1])
dim = reshape.get_shape()[1].value
local3 = tf.nn.relu(tf.matmul(reshape, weights3) + biases3, name='local3')
local4 = tf.nn.relu(tf.matmul(local3, weights4) + biases4, name='local4')
softmax_linear = tf.add(tf.matmul(local4, weights5), biases5, name='softmax_linear')
return softmax_linear
tpred = inference(X)
Y = tf.placeholder(tf.float32, shape=[None], name='Y')
def loss(logits, labels):
"""Add L2Loss to all the trainable variables.
Add summary for "Loss" and "Loss/avg".
Args:
logits: Logits from inference().
labels: Labels from distorted_inputs or inputs(). 1-D tensor
of shape [batch_size]
Returns:
Loss tensor of type float.
"""
# Calculate the average cross entropy loss across the batch.
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits, name='cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name='cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
# The total loss is defined as the cross entropy loss plus all of the weight
# decay terms (L2 loss).
return tf.add_n(tf.get_collection('losses'), name='total_loss')
tloss = loss(tpred, Y)
def train(total_loss, global_step):
"""Train CIFAR-10 model.
Create an optimizer and apply to all trainable variables. Add moving
average for all trainable variables.
Args:
total_loss: Total loss from loss().
global_step: Integer Variable counting the number of training steps
processed.
Returns:
train_op: op for training.
"""
# Variables that affect learning rate.
num_batches_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN / batch_size
decay_steps = int(num_batches_per_epoch * NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(INITIAL_LEARNING_RATE,
global_step,
decay_steps,
LEARNING_RATE_DECAY_FACTOR,
staircase=True)
tf.summary.scalar('learning_rate', lr)
# Generate moving averages of all losses and associated summaries.
loss_sum = tf.summary.scalar("loss", total_loss)
# Compute gradients.
opt = tf.train.GradientDescentOptimizer(lr)
grads = opt.compute_gradients(total_loss)
# Apply gradients.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
tf.summary.histogram(var.op.name, var)
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
tf.summary.histogram(var.op.name + '/gradients', grad)
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
train_op = tf.no_op(name='train')
return train_op
global_step = tf.Variable(0, name='global_step', trainable=False)
mytrain_op = train(tloss, global_step)
#load data
xtrain = np.empty((50000, 32, 32, 3))
ytrain = np.empty(50000)
xtest = np.empty((10000, 32, 32, 3))
ytest = np.empty(10000)
xtrain, ytrain, _ = load_training_data()
xtest, ytest, _ = load_test_data()
xtrain = np.float32(xtrain / 255.0)
ytrain = np.float32(ytrain)
xtest = np.float32(xtest / 255.0)
ytest = np.float32(ytest)
batches = 10
batch = 0
test = 0
tests = 10
correct = 0.0
epoch = 0
epochs = 600
progress_update = 100
lossarray = np.zeros(batches*epochs)
losslabels= np.arange(batches*epochs)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
with sess.as_default():
for epoch in range(epochs):
#np.random.shuffle(xtrain)
#np.random.shuffle(ytrain)
for batch in range(batches):
_, currentloss = sess.run([mytrain_op, tloss], feed_dict={X: xtrain[batch*batch_size:batch*batch_size + batch_size, :, :, :],
Y: ytrain[batch*batch_size:batch*batch_size + batch_size]})
lossarray[batches*epoch+batch]=currentloss
if ((epoch*batches+batch+1) % progress_update) == 0:
print('{} % complete'.format((epoch*batches + batch + 1)/(epochs*batches)*100))
print(currentloss)
batch += 1
epoch += 1
for test in range(tests):
testresults = tpred.eval(feed_dict={X: xtrain[test*batch_size:test*batch_size + batch_size, :, :, :]})
matches = (np.argmax(testresults, axis=1) == ytrain[test*batch_size:test*batch_size + batch_size])
print(np.argmax(testresults, axis=1))
print(ytrain[test*batch_size:test*batch_size + batch_size])
correct += int(np.sum(matches))
test += 1
print(correct/(tests*batch_size))
plt.plot(losslabels, lossarray)
plt.show()
| [
"matplotlib"
] |
32d4b2da1c281184190c22945e7cd3ada7ba2e87 | Python | Tatotavella/Workshop-3_PBATEOTW_Chile | /lammps-awsem/tools/deproto.py | UTF-8 | 626 | 2.6875 | 3 | [] | no_license | import os
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
data_dir = os.path.dirname(__file__) #<-- Absolute directory
rel_path = "MC.state"
dire = os.path.join(data_dir, rel_path)
g = open(dire,'r')
res = []
data = {}
for line in g:
inter = [x for x in line.split('\t')]
for r in inter[1:len(inter)-1]:
res.append(r)
data.update({r:[]}) #Charge over steps
break
for line in g:
inter = [x for x in line.split('\t')]
step = inter[0]
for idx,c in enumerate(inter[1:len(inter)-1]):
data[res[idx]].append(float(c))
g.close()
plt.plot(data[res[2]],'bo')
plt.show()
| [
"matplotlib"
] |
e92539ebd251b871b365dee75746f33e1cbee211 | Python | kroschenko/IHSMarkit_NN_course | /code/mlp/iris_dataset_classification.py | UTF-8 | 1,407 | 2.953125 | 3 | [] | no_license | from network import Network
from layer import FullyConnectedLayer
from activate_functions import Logistic
from backpropagation import *
import matplotlib.pyplot as plt
import sklearn.datasets as datasets
from sklearn.model_selection import train_test_split
RANDOM_SEED = 42
def prepareData():
irises_dataset = datasets.load_iris()
data = irises_dataset['data']
labels = irises_dataset['target']
return train_test_split(data, labels, test_size=0.33, random_state=RANDOM_SEED)
def plot(error_curve):
plt.plot([x for x in range(0, len(error_curve))], error_curve)
plt.show()
def testing(net, data, labels):
output = net.activate(data)
answer = output.argmax(1)
percentage = (answer == labels).sum() / float(data.shape[0]) * 100
return percentage
net = Network()
layer_1 = FullyConnectedLayer(Logistic(), 4, 256)
layer_2 = FullyConnectedLayer(Logistic(), 256, 3)
net.append_layer(layer_1)
net.append_layer(layer_2)
params = Backprop_params(100, 1e-5, 1, 0.9, True, [0.01, 0.01], 0)
method = Backpropagation(params, net)
data_all = prepareData()
train_data = data_all[0]
test_data = data_all[1]
train_labels = data_all[2]
test_labels = data_all[3]
error_curve = method.train(train_data, train_labels)
print "Train efficiency: " + str(testing(net, train_data, train_labels))
print "Test efficiency: " + str(testing(net, test_data, test_labels))
plot(error_curve)
| [
"matplotlib"
] |
932f593e3690853f76a328a55a059aad19fb44de | Python | ghoshs/Statistical-Natural-Language-Preocessing | /Ex5/crossvalidation.py | UTF-8 | 5,115 | 2.890625 | 3 | [] | no_license | import nltk
from nltk.tokenize import RegexpTokenizer
import math
from collections import Counter
from itertools import izip
import matplotlib.pyplot as plt
def perplexity_bigram(test_unigram_freq, test_bigram_freq, train_unigram_freq, train_cond_prob, alpha, V, N, reserved, alphah, Ntrain):
pp = 0.0
for wh in test_bigram_freq: # for every unique sequence in test data
pw_given_h = 0.0
pw = train_unigram_freq[wh[1]]/float(Ntrain) if wh[1] in train_unigram_freq else reserved
# if the history h of the bigram (wh) is not found in the training corpus, alphah = 1.0 such that P(w|h) is the estimate of P(w)
ah = alphah[wh[0]] if wh[0] in alphah else 1.0
# check if bigram is present in train data, ie, N(w,h) > 0
if wh not in train_cond_prob: # N(w,h) == 0
pw_given_h = ah * pw
else: # N(w,h) > 0
pw_given_h = train_cond_prob[wh]
pp = pp + (test_bigram_freq[wh] / float(N)) * math.log(pw_given_h)
pp = math.exp(-pp)
return pp
# returns the smoothed bigram probability distribution P(w|h)
def smooth_bigram(Nwh, d, Pw, Nh, R, alphah):
Pwh = {}
for wh in Nwh:
pw = Pw[wh[1]] # wh[0] = history word; wh[1] = current word; Pw = P(w)
# alphah = a(h) = dR(h)/N(h)
Pwh[wh] = (Nwh[wh] - d) / float(Nh[wh[0]]) + alphah[wh[0]] * pw
return Pwh
# returns the smoothed unigram distribution and the reserved probability for unseen words
def smooth_unigram(alpha, N, d, Nw):
smooth_p = {}
V = len(Nw)
for w in Nw:
smooth_p[w] = (Nw[w] -d) / float(N) + alpha /float(V)
return smooth_p, alpha/float(V)
# returns the backing-off weight
def get_alphah(R, d, N):
alphah = {}
for h in N:
alphah[h] = d * R[h] / float(N[h])
return alphah
# return R for all words such that R(h) = # bigrams such that h is the 1st word of the bigram
def create_R_unigram(unigram_freq, bigram_freq):
R = {}
for h in unigram_freq:
for wh in bigram_freq:
if h is wh[0]: # N(wh)>0
if h not in R:
R[h] = 1
else:
R[h] = R[h] + 1
# when the word is the last word in the corpus and occurs only once, it is not the history for any other word in the vocabulary
if h not in R:
R[h] = 0
return R
# create a bigram dictionary from tokens along with their absolute frequencies
def create_bigram_dict(tokens):
bigram_dict = dict(Counter(izip(tokens, tokens[1:])))
return bigram_dict
# create a unigram dictionary from tokens along with their absolute frequencies
def create_unigram_dict(tokens):
unigram_dict = dict(Counter(tokens))
return unigram_dict
# return the tokenized text
def tokenize(filename):
text = open(filename, 'r').read().decode('utf8')
# tokens = text.split()
# return tokens
# create a word tokenizer
tokenizer = RegexpTokenizer(r'\w+')
# tokenize the text into words
tokens = tokenizer.tokenize(text)
# convert text to lower case and ignore all unconvertable utf8 characters since these are likely punctuation marks
tokens = [x.lower().encode('ascii','ignore') for x in tokens]
return tokens
# perform k cross validation
def k_cross_validate(d, k, filename):
tokens = tokenize(filename)
l = len(tokens)
pp = 0.0
for i in range(k):
# print('Fold = '+str(i+1))
start_idx = (l/k) * i
end_idx = start_idx + (l/k) if i < k-1 else l
test = tokens[start_idx: end_idx]
train = tokens[0:start_idx]
for i in range(l - end_idx):
train.append(tokens[end_idx + i])
# total tokens in test and train corpus
N_test = len(test)
N_train = len(train)
unigram_freq_train = create_unigram_dict(train)
bigram_freq_train = create_bigram_dict(train)
V = len(unigram_freq_train)
alpha = d*V*V / float(N_train)
# return P(w): smoothed unigram distribution
unigram_smooth, uni_reserved_prob = smooth_unigram(alpha, N_train, d, unigram_freq_train)
# create R(h) for all unigrams
R = create_R_unigram(unigram_freq_train, bigram_freq_train)
alphah = get_alphah(R, d, unigram_freq_train)
# return P(w|h) for all train bigrams
bigram_smoothed_cond_train = smooth_bigram(bigram_freq_train, d, unigram_smooth, unigram_freq_train, R, alphah)
unigram_freq_test = create_unigram_dict(test)
bigram_freq_test = create_bigram_dict(test)
pp = pp + perplexity_bigram(unigram_freq_test, bigram_freq_test, unigram_freq_train, bigram_smoothed_cond_train, alpha, V, N_test, uni_reserved_prob, alphah, N_train)
return pp/float(k)
def cross_validation(filename):
d = [x/10.0 for x in list(range(1, 11))]
perplexity = []
k = 5
for i in range(len(d)):
print('Evaluating for d='+str(d[i]))
perplexity.append(k_cross_validate(d[i], k, filename))
print('Perplexity = '+str(perplexity[len(perplexity)-1]))
d_pt = d[perplexity.index(min(perplexity))]
print('Optimal value for discounting parameter, d = '+str(d_pt))
bi, = plt.plot(d, perplexity, '-bo', label = "perplexity", linewidth = 2.0)
plt.axhline(y=min(perplexity), color='g', linestyle='-')
plt.xlabel('Discounting parameter (d)')
plt.ylabel('Cross-Validation Perplexity')
plt.legend(handles = [bi], numpoints = 1)
plt.grid(True)
plt.savefig('cross_validation_perplexity.png')
plt.show()
cross_validation('./materials_ex5/text.txt') | [
"matplotlib"
] |
115172dcc949b501fbee86d629cf95a74bb7f9d4 | Python | Archercober/underwater_robot-practice | /auv.py | UTF-8 | 2,697 | 3.609375 | 4 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
海中ロボット学
デッドレコニング測位の演習
ファイルを読み込んで、処理して、出力する
'''
import numpy as np
import matplotlib.pyplot as plt
import csv
def drawVehicle2D(pos, heading, axis):
'''
世界座標系にAUVを描画する。AUVは三角形とする。
pos: AUVの水平位置 (x, y) [m]
heading: AUVの方位角 [deg]
axis: 描画するaxis
'''
length = 2.0 # AUVの長さ [m]
width = 0.8 # AUVの幅 [m]
p1 = np.array([length/3.0*2, 0]) # AUVの形を表す3角形の頂点
p2 = np.array([-length/3.0, width/2.0])
p3 = np.array([-length/3.0, -width/2.0])
sina = np.sin(heading*np.pi/180)
cosa = np.cos(heading*np.pi/180)
R = np.array([[cosa, -sina], [sina, cosa]]) #回転行列
p1 = pos + np.dot(R, p1) # 世界座標系に変換
p2 = pos + np.dot(R, p2)
p3 = pos + np.dot(R, p3)
axis.plot([p1[1], p2[1], p3[1], p1[1]], [p1[0], p2[0], p3[0], p1[0]], 'k') # 三角形を描画
axis.plot(pos[1], pos[0], 'ok') # 中心を描画
filename = 'F:/dataset.csv' # ファイル名
x = np.array([0.0, 0.0]) # 水平位置の初期値 x[m], y[m]
a = 0.0 # 方位の初期値 [deg]
t = 0.0 # 経過時間 [sec]
f = open(filename, 'r') # ファイルを開く
reader = csv.reader(f) # csvモジュールを使う準備
header = next(reader) # 1行目(ヘッダ)を読み飛ばす
# print header
fig, ax = plt.subplots() # 航跡プロットの準備
ax.set_xlabel('Y [m]') #X軸を上向きに描画するため、XY軸を入れ替える
ax.set_ylabel('X [m]')
ax.grid(True) # グリッドを表示
ax.axis('equal') # 軸のスケールを揃える(重要!)
#ax.set_xlim(-10, 10) #プロットする範囲を指定
#ax.set_ylim(-10, 10)
drawVehicle2D(x, a, ax) # 初期状態をプロット
for row in reader:
t_new = float(row[0]) # 最新データの時刻 [sec]
v = np.array([float(row[1]), float(row[2])]) #水平速度 ベクトル表記 (u, v) [m/s]
r = float(row[3]) # ヨー角速度 [deg/s]
dt = t_new - t # 前のデータとの時間差
t = t_new
sina = np.sin(a*np.pi/180)
cosa = np.cos(a*np.pi/180)
R = np.array([[cosa, -sina], [sina, cosa]]) # 回転行列 R(a)
x = x + np.dot(R, v) * dt # 水平位置の更新
a = a + r * dt # 方位の更新
print(x, a)
drawVehicle2D(x, a, ax) # 現在の状態をプロット
# plt.pause(.1) # リアルタイムに描画させるために必要
f.close() # ファイルを閉じる
plt.show() # グラフウィンドウを開いたままにする | [
"matplotlib"
] |
e4e9889c0051bd6a26c24450b033af8b3ce388cd | Python | ivanchang309/Conway-s-Game-of-Life | /main.py | UTF-8 | 4,975 | 2.8125 | 3 | [] | no_license | import sys, random, math
import pygame
#graphs
'''
import matplotlib.pyplot as plt
plt.xlabel("x axis")
plt.title("title")
'''
#Want to learn more about pygame? Here is a good link:
URL = "http://kidscancode.org/lessons/"
flip = 0
GRAY = (50, 50, 50)
# Pygame template - skeleton for a new pygame project
# set up asset
WIDTH = 800
HEIGHT = 600
FPS = 30
checkingdelete = False
X = 0
Y = 0
xcheck = 0
ycheck = 0
alive = False
neighbors = 0
# define colors
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
RED = (255, 0, 0)
GREEN = (0, 100, 0)
BLUE = (0, 0, 255)
step_size = 10
COLORS = [RED, GREEN, BLUE, BLACK]
'''
def draw_grid():
for x in range(0, WIDTH, step_size):
pygame.draw.line(screen, GRAY, (x, 0), (x, HEIGHT))
for y in range(0, HEIGHT, step_size):
pygame.draw.line(screen, GRAY, (0, y), (WIDTH, y))
'''
def _debug(msg):
print(msg)
def draw_grid():
for x in range(0, WIDTH, step_size):
pygame.draw.line(screen, GRAY, (x, 0), (x, HEIGHT))
for y in range(0, HEIGHT, step_size):
pygame.draw.line(screen, GRAY, (0, y), (WIDTH, y))
def findplace(pos):
x, y = pos
return (x // step_size + 1, y // step_size + 1)
def place(pos):
x, y = pos
return ((x - 1) * step_size, (y - 1) * step_size)
class Player(pygame.sprite.Sprite):
def __init__(self, name, x, y):
global xcheck, ycheck, alive, neighbors
pygame.sprite.Sprite.__init__(self)
lif = pygame.image.load(name).convert_alpha()
self.image = pygame.transform.scale(lif, (step_size, step_size))
self.rect = self.image.get_rect()
self.rect.center = (x,y)
self.rect.x = x
self.rect.y = y
def update(self):
pass
def checkdelete(self, x, y):
if x == self.rect.x:
if y == self.rect.y:
return True
return False
def check(self, alive, neighbors):
global step_size
_debug('check:' + str(self.rect.x) + ', ' + str(self.rect.y))
for ycheck in range(0, 36, 1):
for xcheck in range(0, 49, 1):
xcheck *= step_size
ycheck *= step_size
if xcheck == self.rect.x:
if ycheck == self.rect.y:
alive = True
elif xcheck - step_size == self.rect.x:
if ycheck - step_size == self.rect.y:
neighbors += 1
elif xcheck - step_size == self.rect.x:
if ycheck == self.rect.y:
neighbors += 1
elif xcheck - step_size == self.rect.x:
if ycheck + step_size == self.rect.y:
neighbors += 1
elif xcheck == self.rect.x:
if ycheck - step_size == self.rect.y:
neighbors += 1
elif xcheck == self.rect.x:
if ycheck + step_size == self.rect.y:
neighbors += 1
elif xcheck + step_size == self.rect.x:
if ycheck - step_size == self.rect.y:
neighbors += 1
elif xcheck + step_size == self.rect.x:
if ycheck == self.rect.y:
neighbors += 1
elif xcheck + step_size == self.rect.x:
if ycheck + step_size == self.rect.y:
neighbors += 1
if alive == True:
if neighbors < 2 or neighbors > 3:
self.kill()
elif alive == False:
if neighbors == 3:
life = Player('life.jpg', lifex, lifey)
all_sprites.add(life)
# initialize pygame and create window
pygame.init()
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("LayeredComputer")
#add more layers :)
clock = pygame.time.Clock()
all_sprites = pygame.sprite.Group()
running = True
while running:
# keep loop running at the right
clock.tick(FPS)
# Process input (events)
for event in pygame.event.get():
# check for closing window
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.MOUSEBUTTONDOWN:
lifex, lifey = place(findplace(event.pos))
for x in all_sprites:
checkingdelete = x.checkdelete(lifex, lifey)
if checkingdelete == True:
_debug('remove:' + str(lifex) + ', ' + str(lifey))
x.kill()
break
if checkingdelete == False:
life = Player('life.jpg', lifex, lifey)
all_sprites.add(life)
_debug('adde:' + str(lifex) + ', ' + str(lifey))
checkingdelete = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
for gamelife in all_sprites:
gamelife.check(False, 0)
#start
'''
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
while event.key != pygame.K_RIGHT:
print('hi')
'''
# Update
all_sprites.update()
screen.fill(BLACK)
draw_grid()
# Draw / render
all_sprites.draw(screen)
#player image dot get recked
# *after* drawing everything, flip the display
pygame.display.flip()
pygame.time.delay(1)
pygame.quit()
| [
"matplotlib"
] |
3a4dc9cd4f7c6782821f0e79d88577de52c777bc | Python | antoine-spahr/Label-Efficient-Volumetric-Deep-Semantic-Segmentation-of-ICH | /code/figure_scripts/view_volume.py | UTF-8 | 11,185 | 2.671875 | 3 | [
"MIT"
] | permissive | """
author: Antoine Spahr
date : 30.10.2020
----------
TO DO :
"""
import matplotlib
import matplotlib.pyplot as plt
import pyvista as pv
import nibabel as nib
import numpy as np
import os
import click
import sys
import ast
sys.path.append('../')
from src.utils.plot_utils import imshow_pred
from src.utils.ct_utils import window_ct
@click.command()
@click.argument("vol_fn", type=click.Path(exists=True))
@click.argument("slice", type=str)
@click.option("--pred_fn", type=click.Path(exists=True), default=None, help="The prediction mask to display if provided.")
@click.option("--trgt_fn", type=click.Path(exists=True), default=None, help="The target mask to display if provided.")
@click.option("--pred_color", type=str, default='tomato', help='The color of the prediction mask.')
@click.option("--trgt_color", type=str, default='forestgreen', help='The color of the target mask.')
@click.option("--win", type=str, default='[50, 200]', help="The windowing to apply to the CT-scan as [win_center, win_width]. Default: [50, 200].")
@click.option("--cam_view", type=str, default=None, help="The camera position to be specified at pyvista with shape [(pos1, pos2, pos3), (foc1, foc2, foc3), (viewup1, viewup2, viewup3)]. Default: isotropic view")
@click.option("--isoval", type=float, default=1.0, help="The Isovalue used to generate a mesh form the volume. Default is 1.0.")
@click.option("--vol_alpha", type=float, default=0.3, help="The volume opacity for the 3D rendering. Default 0.3.")
@click.option("--overlap/--no_overlap", default=True, help="Whether the target and prediction are plotteed on the same image or separately. Default True.")
@click.option("--save_fn", type=click.Path(exists=False), default=None, help="Where to save the figure. Default is the current location named as slice1_slice2_slice3.pdf.")
def main(vol_fn, slice, pred_fn, trgt_fn, pred_color, trgt_color, win, cam_view, isoval, vol_alpha, overlap, save_fn):
"""
Provide an axial, sagital, coronal and 3D view of the Nifti volume at vol_fn. The view are cross sections given by
the integer in slice ([axial, sagital, coronal]). If a prediction and/or target is provided, the mask is/are overlaid
on top on the views.
"""
slice = ast.literal_eval(slice)
win = ast.literal_eval(win)
cam_view = ast.literal_eval(cam_view) if cam_view else None
# load volume
vol_nii = nib.load(vol_fn)
aspect_ratio = vol_nii.header['pixdim'][3] / vol_nii.header['pixdim'][2]
vol = np.rot90(vol_nii.get_fdata(), k=1, axes=(0,1))
vol = window_ct(vol, win_center=win[0], win_width=win[1], out_range=(0,1))
# load prediction
if pred_fn:
pred_nii = nib.load(pred_fn)
pred = np.rot90(pred_nii.get_fdata(), k=1, axes=(0,1))
# load prediction
if trgt_fn:
trgt_nii = nib.load(trgt_fn)
trgt = np.rot90(trgt_nii.get_fdata(), k=1, axes=(0,1))
# get 3D rendering
data = pv.wrap(vol)
data.spacing = vol_nii.header['pixdim'][1:4]
surface = data.contour([isoval],)
if pred_fn:
data_pred = pv.wrap(pred)
data_pred.spacing = pred_nii.header['pixdim'][1:4]
surface_pred = data_pred.contour([1],)
if trgt_fn:
data_trgt = pv.wrap(trgt)
data_trgt.spacing = trgt_nii.header['pixdim'][1:4]
surface_trgt = data_trgt.contour([1],)
cpos = cam_view
if not overlap and pred_fn is not None and trgt_fn is not None:
# make 3D pred rendering
p = pv.Plotter(off_screen=True, window_size=[512, 512])
p.background_color = 'black'
p.add_mesh(surface, opacity=vol_alpha, clim=data.get_data_range(), color='lightgray')
p.add_mesh(surface_pred, opacity=1, color=pred_color)
if cpos:
p.camera_position = cpos
else:
p.view_isometric()
_, vol3Drender_pred = p.show(screenshot=True)
# make 3D trgt rendering
p = pv.Plotter(off_screen=True, window_size=[512, 512])
p.background_color = 'black'
p.add_mesh(surface, opacity=vol_alpha, clim=data.get_data_range(), color='lightgray')
p.add_mesh(surface_trgt, opacity=1, color=trgt_color)
if cpos:
p.camera_position = cpos
else:
p.view_isometric()
_, vol3Drender_trgt = p.show(screenshot=True)
# Make figure
if pred_fn is None:
pred = np.zeros_like(vol).astype(bool)
if trgt_fn is None:
trgt = np.zeros_like(vol).astype(bool)
fig, axs = plt.subplots(2,4,figsize=(10,5))
# Axial
imshow_pred(vol[:,:,slice[0]], pred[:,:,slice[0]].astype(bool),
im_cmap='gray', pred_color=pred_color, pred_alpha=0.8, target_color=trgt_color, target_alpha=0.8,
imshow_kwargs=dict(aspect='equal', interpolation='nearest'), legend=False, ax=axs[0,0])
axs[0,0].set_axis_off()
axs[0,0].set_title('Axial', color='white')
imshow_pred(vol[:,:,slice[0]], np.zeros_like(vol)[:,:,slice[0]].astype(bool), trgt[:,:,slice[0]].astype(bool),
im_cmap='gray', pred_color=pred_color, pred_alpha=0.8, target_color=trgt_color, target_alpha=0.8,
imshow_kwargs=dict(aspect='equal', interpolation='nearest'), legend=False, ax=axs[1,0])
axs[1,0].set_axis_off()
# Sagital
legend, legend_kwargs = False, None
if pred_fn is not None or trgt_fn is not None:
legend = True
legend_kwargs = dict(loc='upper center', ncol=2, frameon=False, labelcolor='white',
framealpha=0.0, fontsize=10, bbox_to_anchor=(0.5, -0.2),
bbox_transform=axs[1,1].transAxes)
imshow_pred(np.rot90(vol[:,slice[1],:], axes=(0,1)), np.rot90(pred[:,slice[1],:], axes=(0,1)).astype(bool),
im_cmap='gray', pred_color=pred_color, pred_alpha=0.8, target_color=trgt_color, target_alpha=0.8,
imshow_kwargs=dict(aspect=aspect_ratio, interpolation='nearest'), legend=False, ax=axs[0,1])
axs[0,1].set_axis_off()
axs[0,1].set_title('Sagital', color='white')
imshow_pred(np.rot90(vol[:,slice[1],:], axes=(0,1)), np.rot90(np.zeros_like(vol)[:,slice[1],:], axes=(0,1)).astype(bool),
np.rot90(trgt[:,slice[1],:], axes=(0,1)).astype(bool),
im_cmap='gray', pred_color=pred_color, pred_alpha=0.8, target_color=trgt_color, target_alpha=0.8,
imshow_kwargs=dict(aspect=aspect_ratio, interpolation='nearest'), legend=legend, legend_kwargs=legend_kwargs, ax=axs[1,1])
axs[1,1].set_axis_off()
# Coronal
imshow_pred(np.rot90(vol[slice[2],:,:], axes=(0,1)), np.rot90(pred[slice[2],:,:], axes=(0,1)).astype(bool),
im_cmap='gray', pred_color=pred_color, pred_alpha=0.8, target_color=trgt_color, target_alpha=0.8,
imshow_kwargs=dict(aspect=aspect_ratio, interpolation='nearest'), legend=False, ax=axs[0,2])
axs[0,2].set_axis_off()
axs[0,2].set_title('Coronal', color='white')
imshow_pred(np.rot90(vol[slice[2],:,:], axes=(0,1)), np.rot90(np.zeros_like(vol)[slice[2],:,:], axes=(0,1)).astype(bool),
np.rot90(trgt[slice[2],:,:], axes=(0,1)).astype(bool),
im_cmap='gray', pred_color=pred_color, pred_alpha=0.8, target_color=trgt_color, target_alpha=0.8,
imshow_kwargs=dict(aspect=aspect_ratio, interpolation='nearest'), legend=False, ax=axs[1,2])
axs[1,2].set_axis_off()
# 3D rendering
axs[0,3].imshow(vol3Drender_pred, cmap='gray')
axs[0,3].set_axis_off()
axs[0,3].set_title('3D rendering', color='white')
axs[1,3].imshow(vol3Drender_trgt, cmap='gray')
axs[1,3].set_axis_off()
# save figure
fig.set_facecolor('black')
fig.tight_layout()
save_fn = save_fn if save_fn else f'A{slice[0]}_S{slice[1]}_C{slice[2]}.pdf'
fig.savefig(save_fn, dpi=300, bbox_inches='tight')
else:
# make 3D rendering
p = pv.Plotter(off_screen=True, window_size=[512, 512])
p.background_color = 'black'
p.add_mesh(surface, opacity=vol_alpha, clim=data.get_data_range(), color='lightgray')
if pred_fn:
p.add_mesh(surface_pred, opacity=1, color=pred_color)
if trgt_fn:
p.add_mesh(surface_trgt, opacity=1, color=trgt_color)
if cpos:
p.camera_position = cpos
else:
p.view_isometric()
_, vol3Drender = p.show(screenshot=True)
# Make figure
if pred_fn is None:
pred = np.zeros_like(vol).astype(bool)
if trgt_fn is None:
trgt = np.zeros_like(vol).astype(bool)
fig, axs = plt.subplots(1,4,figsize=(10,6))
# Axial
imshow_pred(vol[:,:,slice[0]], pred[:,:,slice[0]].astype(bool), trgt[:,:,slice[0]].astype(bool),
im_cmap='gray', pred_color=pred_color, pred_alpha=0.8, target_color=trgt_color, target_alpha=0.8,
imshow_kwargs=dict(aspect='equal', interpolation='nearest'), legend=False, ax=axs[0])
axs[0].set_axis_off()
axs[0].set_title('Axial', color='white')
# Sagital
legend, legend_kwargs = False, None
if pred_fn is not None or trgt_fn is not None:
legend = True if trgt_fn is not None and pred_fn is not None else False
legend_kwargs = dict(loc='upper center', ncol=2, frameon=False, labelcolor='white',
framealpha=0.0, fontsize=10, bbox_to_anchor=(0.5, -0.1),
bbox_transform=axs[1].transAxes)
imshow_pred(np.rot90(vol[:,slice[1],:], axes=(0,1)), np.rot90(pred[:,slice[1],:], axes=(0,1)).astype(bool),
np.rot90(trgt[:,slice[1],:], axes=(0,1)).astype(bool),
im_cmap='gray', pred_color=pred_color, pred_alpha=0.8, target_color=trgt_color, target_alpha=0.8,
imshow_kwargs=dict(aspect=aspect_ratio, interpolation='nearest'), legend=legend, legend_kwargs=legend_kwargs, ax=axs[1])
axs[1].set_axis_off()
axs[1].set_title('Sagital', color='white')
# Coronal
imshow_pred(np.rot90(vol[slice[2],:,:], axes=(0,1)), np.rot90(pred[slice[2],:,:], axes=(0,1)).astype(bool),
np.rot90(trgt[slice[2],:,:], axes=(0,1)).astype(bool),
im_cmap='gray', pred_color=pred_color, pred_alpha=0.8, target_color=trgt_color, target_alpha=0.8,
imshow_kwargs=dict(aspect=aspect_ratio, interpolation='nearest'), legend=False, ax=axs[2])
axs[2].set_axis_off()
axs[2].set_title('Coronal', color='white')
# 3D rendering
axs[3].imshow(vol3Drender, cmap='gray')
axs[3].set_axis_off()
axs[3].set_title('3D rendering', color='white')
# save figure
fig.set_facecolor('black')
fig.tight_layout()
save_fn = save_fn if save_fn else f'A{slice[0]}_S{slice[1]}_C{slice[2]}.pdf'
fig.savefig(save_fn, dpi=300, bbox_inches='tight')
if __name__ == '__main__':
main()
| [
"matplotlib",
"pyvista"
] |
59676d8b3533ad90a26e09f26388d234a218990d | Python | DuyPham97/Sentiment-analysis-Hong-Kong | /TextBlob on Hong Kong dataset.py | UTF-8 | 700 | 3.28125 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import matplotlib.pyplot as plt
# In[2]:
#Divide into 10 bins
sentiment_df = pd.read_csv('HongKong.csv')
fig, ax = plt.subplots(figsize=(8, 6))
# Plot histogram with break at zero
sentiment_df.hist(bins=[-1, -0.75, -0.5, -0.25, 0.0, 0.25, 0.5, 0.75, 1],
ax=ax,
color="purple")
plt.title("Sentiments from Tweets on Hong Kong")
plt.show()
# In[3]:
#Binary result; 0 or 1
fig, ax = plt.subplots(figsize=(8, 6))
# Plot histogram with break at zero
sentiment_df.hist(bins=[-1, 0.0, 1],
ax=ax,
color="purple")
plt.title("Sentiments from Tweets on Hong Kong")
plt.show()
| [
"matplotlib"
] |
ae41213a9abaacc94c155bc944d4bee0a32fea23 | Python | jcvdwlt/PokerPlayer | /display_results.py | UTF-8 | 997 | 2.96875 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
def plot_outcomes(outcomes, ax):
n_games, n_players, n_runs = outcomes.shape
outcomes = np.cumsum(outcomes, axis=0)
means = np.mean(outcomes, axis=2)
mins = np.percentile(outcomes, q=90, axis=2)
maxes = np.percentile(outcomes, q=10, axis=2)
# std = np.std(outcomes, axis=2)
# mins = means - std/2
# maxes = means + std/2
cols = ['C0', 'C1', 'C2']
xx = range(n_games)
for i in range(n_players):
ax.plot(xx, means[:, i], c=cols[i])
ax.fill_between(xx, mins[:, i], maxes[:, i], color=cols[i], alpha=0.3)
ax.set_xlabel('Round', fontsize=14)
ax.set_ylabel('Cumulative outcome', fontsize=14)
# outcomes = np.load('mlp_rb_rand_outcomes.npy')
outcomes = np.load('dumps/outcomes.npy')
f, ax = plt.subplots(nrows=1, ncols=1, figsize=(15, 10))
plot_outcomes(outcomes, ax)
ax.legend(['RL Value Function', 'Rational', 'Random'], fontsize=14)
plt.savefig('outcomes.png')
plt.show()
| [
"matplotlib"
] |
b4af3918d6fe7f3a26808bf0891148a594fe3247 | Python | lelechen63/head | /talking-heads/dataset/dataset.py | UTF-8 | 10,540 | 2.640625 | 3 | [] | no_license | """
This package performs the pre-processing of the VoxCeleb dataset in order to have it ready for training, speeding the
process up.
"""
import logging
import os
from datetime import datetime
import pickle as pkl
import random
from multiprocessing import Pool
import PIL
import cv2
import matplotlib
# matplotlib.use('pdf')
import matplotlib.pyplot as plt
import numpy as np
from torch.utils.data import Dataset
import torch
import torchvision.transforms as transforms
import mmcv
from io import BytesIO
from PIL import Image
# region DATASET PREPARATION
# region DATASET RETRIEVAL
class Lmark2rgbDataset(Dataset):
""" Dataset object used to access the pre-processed VoxCelebDataset """
def __init__(self, dataset_dir, resolution = 256, train = 'train'):
"""
Instantiates the Dataset.
:param root: Path to the folder where the pre-processed dataset is stored.
:param extension: File extension of the pre-processed video files.
:param shuffle: If True, the video files will be shuffled.
:param transform: Transformations to be done to all frames of the video files.
:param shuffle_frames: If True, each time a video is accessed, its frames will be shuffled.
"""
self.train = train
self.output_shape = tuple([resolution, resolution])
self.num_frames = 4
self.root = dataset_dir
if self.train =='train':
_file = open(os.path.join(dataset_dir, 'txt', "front_rt2.pkl"), "rb")
# self.data = pkl.load(_file)
self.data = pkl._Unpickler(_file)
self.data.encoding = 'latin1'
self.data = self.data.load()
_file.close()
elif self.train =='test':
_file = open(os.path.join(dataset_dir, 'txt', "front_rt2.pkl"), "rb")
self.data = pkl._Unpickler(_file)
self.data.encoding = 'latin1'
self.data = self.data.load()
# self.data = pkl.load(_file)
_file.close()
print (len(self.data))
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5), inplace=True)
])
def __len__(self):
return len(self.data)
def __getitem__(self, index):
v_id = self.data[index][0]
reference_id = self.data[index][1]
video_path = os.path.join(self.root, 'unzip', v_id + '.mp4')
ani_video_path = os.path.join(self.root, 'unzip', v_id + '_ani.mp4')
lmark_path = os.path.join(self.root, 'unzip', v_id + '.npy')
lmark = np.load(lmark_path)[:,:,:-1]
v_length = lmark.shape[0]
real_video = mmcv.VideoReader(video_path)
ani_video = mmcv.VideoReader(ani_video_path)
# sample frames for embedding network
input_indexs = set(random.sample(range(0,64), self.num_frames))
# we randomly choose a target frame
target_id = np.random.choice([0, v_length - 1])
reference_frames = []
for t in input_indexs:
rgb_t = mmcv.bgr2rgb(real_video[t])
lmark_t = lmark[t]
lmark_rgb = plot_landmarks( lmark_t)
# lmark_rgb = np.array(lmark_rgb)
# resize 224 to 256
rgb_t = cv2.resize(rgb_t, self.output_shape)
lmark_rgb = cv2.resize(lmark_rgb, self.output_shape)
# to tensor
rgb_t = self.transform(rgb_t)
lmark_rgb = self.transform(lmark_rgb)
reference_frames.append(torch.cat([rgb_t, lmark_rgb],0)) # (6, 256, 256)
reference_frames = torch.stack(reference_frames)
############################################################################
target_rgb = real_video[target_id]
reference_rgb = real_video[reference_id]
reference_ani = ani_video[reference_id]
target_ani = ani_video[target_id]
target_lmark = lmark[target_id]
target_rgb = mmcv.bgr2rgb(target_rgb)
target_rgb = cv2.resize(target_rgb, self.output_shape)
target_rgb = self.transform(target_rgb)
target_ani = mmcv.bgr2rgb(target_ani)
target_ani = cv2.resize(target_ani, self.output_shape)
target_ani = self.transform(target_ani)
# reference_rgb = mmcv.bgr2rgb(reference_rgb)
# reference_rgb = cv2.resize(reference_rgb, self.output_shape)
# reference_rgb = self.transform(reference_rgb)
# reference_ani = mmcv.bgr2rgb(reference_ani)
# reference_ani = cv2.resize(reference_ani, self.output_shape)
# reference_ani = self.transform(reference_ani)
target_lmark = plot_landmarks(target_lmark)
# target_lmark = np.array(target_lmark)
target_lmark = cv2.resize(target_lmark, self.output_shape)
target_lmark = self.transform(target_lmark)
input_dic = {'v_id' : v_id, 'target_lmark': target_lmark, 'reference_frames': reference_frames,
'target_rgb': target_rgb, 'target_ani': target_ani
}
return input_dic
def plot_landmarks1( landmarks):
"""
Creates an RGB image with the landmarks. The generated image will be of the same size as the frame where the face
matching the landmarks.
The image is created by plotting the coordinates of the landmarks using matplotlib, and then converting the
plot to an image.
Things to watch out for:
* The figure where the landmarks will be plotted must have the same size as the image to create, but matplotlib
only accepts the size in inches, so it must be converted to pixels using the DPI of the screen.
* A white background is printed on the image (an array of ones) in order to keep the figure from being flipped.
* The axis must be turned off and the subplot must be adjusted to remove the space where the axis would normally be.
:param frame: Image with a face matching the landmarks.
:param landmarks: Landmarks of the provided frame,
:return: RGB image with the landmarks as a Pillow Image.
"""
dpi = 100
fig = plt.figure(figsize=(224/ dpi,224 / dpi), dpi=dpi)
ax = fig.add_subplot(111)
ax.axis('off')
plt.imshow(np.ones((224,224)))
plt.subplots_adjust(left=0, right=1, top=1, bottom=0)
# Head
ax.plot(landmarks[0:17, 0], landmarks[0:17, 1], linestyle='-', color='green', lw=2)
# Eyebrows
ax.plot(landmarks[17:22, 0], landmarks[17:22, 1], linestyle='-', color='orange', lw=2)
ax.plot(landmarks[22:27, 0], landmarks[22:27, 1], linestyle='-', color='orange', lw=2)
# Nose
ax.plot(landmarks[27:31, 0], landmarks[27:31, 1], linestyle='-', color='blue', lw=2)
ax.plot(landmarks[31:36, 0], landmarks[31:36, 1], linestyle='-', color='blue', lw=2)
# Eyes
ax.plot(landmarks[36:42, 0], landmarks[36:42, 1], linestyle='-', color='red', lw=2)
ax.plot(landmarks[42:48, 0], landmarks[42:48, 1], linestyle='-', color='red', lw=2)
# Mouth
ax.plot(landmarks[48:60, 0], landmarks[48:60, 1], linestyle='-', color='purple', lw=2)
fig.canvas.draw()
data = PIL.Image.frombuffer('RGB', fig.canvas.get_width_height(), fig.canvas.tostring_rgb(), 'raw', 'RGB', 0, 1)
plt.close(fig)
# print ('++++++++++++++++++++++++++++++++')
return data
def plot_landmarks( landmarks):
# landmarks = np.int32(landmarks)
"""
Creates an RGB image with the landmarks. The generated image will be of the same size as the frame where the face
matching the landmarks.
The image is created by plotting the coordinates of the landmarks using matplotlib, and then converting the
plot to an image.
Things to watch out for:
* The figure where the landmarks will be plotted must have the same size as the image to create, but matplotlib
only accepts the size in inches, so it must be converted to pixels using the DPI of the screen.
* A white background is printed on the image (an array of ones) in order to keep the figure from being flipped.
* The axis must be turned off and the subplot must be adjusted to remove the space where the axis would normally be.
:param frame: Image with a face matching the landmarks.
:param landmarks: Landmarks of the provided frame,
:return: RGB image with the landmarks as a Pillow Image.
"""
# print (landmarks[0:17].shape)
# print(type(landmarks))
# points = np.array([[1, 4], [5, 6], [7, 8], [4, 4]])
# print (points.shape)
blank_image = np.zeros((224,224,3), np.uint8)
# cv2.polylines(blank_image, np.int32([points]), True, (0,255,255), 1)
cv2.polylines(blank_image, np.int32([landmarks[0:17]]) , True, (0,255,255), 2)
cv2.polylines(blank_image, np.int32([landmarks[17:22]]), True, (255,0,255), 2)
cv2.polylines(blank_image, np.int32([landmarks[22:27]]) , True, (255,0,255), 2)
cv2.polylines(blank_image, np.int32([landmarks[27:31]]) , True, (255,255, 0), 2)
cv2.polylines(blank_image, np.int32([landmarks[31:36]]) , True, (255,255, 0), 2)
cv2.polylines(blank_image, np.int32([landmarks[36:42]]) , True, (255,0, 0), 2)
cv2.polylines(blank_image, np.int32([landmarks[42:48]]) , True, (255,0, 0), 2)
cv2.polylines(blank_image, np.int32([landmarks[48:60]]) , True, (0, 0, 255), 2)
return blank_image
# import torchvision
# import time
# dataset = Lmark2rgbDataset('/home/cxu-serve/p1/lchen63/voxceleb/', 256, 'train')
# data_loader = torch.utils.data.DataLoader(dataset,
# batch_size=1,
# num_workers=1,
# shuffle=False, drop_last=True)
# t1 = time.time()
# print (len(data_loader))
# for (step, gg) in enumerate(data_loader):
# print (time.time() - t1)
# print (gg['v_id'])
# print (gg['reference_frames'].shape)
# # 'v_id' : v_id, 'target_lmark': target_lmark, 'reference_frames': reference_frames,
# # 'target_rgb': target_rgb, 'target_ani': target_ani
# print (gg['reference_frames'].shape)
# print (gg['target_lmark'].shape)
# print (gg['reference_frames'].shape)
# inputs = [gg['target_lmark'], gg['target_rgb'], gg['target_ani'], gg['reference_frames'][:,0,1]]
# fake_im = torch.stack(inputs, dim = 1)
# fake_store = fake_im.data.contiguous().view(4*1,3,256,256)
# torchvision.utils.save_image(fake_store,
# "./tmp/vis_%05d.png"%step,normalize=True)
# if step == 1:
# break
| [
"matplotlib"
] |
a27d9e5f6e2352e5979472bc405c68f1b1aa5ab7 | Python | Andromedanita/PHY407 | /independent/trial1.py | UTF-8 | 1,505 | 3.5 | 4 | [] | no_license | import numpy as np
import matplotlib.pylab as plt
from scipy import interpolate
#-----------------------------------------------------------
# Function
#-----------------------------------------------------------
def lin_interp(x,x0,y0,x1,y1):
'''
linear interpolation between points (x0,y0) and (x1,y1)
'''
y = y0 + (y1-y0)*((x-x0)/(x1-x0))
return y
#-----------------------------------------------------------
# Code Starts Here
#-----------------------------------------------------------
def ff(x):
if x<2:
return 0
else:
return 1.
x_array = np.linspace(0.,4,20)
y_array = np.zeros(len(x_array))
for k in range(len(x_array)):
y_array[k] = ff(x_array[k])
xvals = np.linspace(0.01,3.99,30)
n = len(xvals)
yvals = np.zeros(n)
for h in range(len(xvals)):
print h
m = 0
while m<len(x_array):
if xvals[h]>x_array[m] and xvals[h]<x_array[m+1]:
yvals[h] = lin_interp(xvals[h],x_array[m],y_array[m],x_array[m+1],y_array[m+1])
m+=1
f = interpolate.interp1d(x_array,y_array,kind='linear')
py_interp = f(xvals)
#plotting
plt.ion()
plt.plot(x_array,y_array,'bo')
plt.plot(x_array,y_array,'b')
plt.plot(xvals,py_interp,'g')
plt.plot(xvals,yvals,'r')
plt.xlabel("x")
plt.ylabel("y")
plt.legend(("knots","Actual function","python interpolated values","my interpolated values"),loc='best')
plt.ylim(-1,2)
plt.title("Step Function")
plt.show()
| [
"matplotlib"
] |
81c32c5302a68fbb91e14ffdc95b431c4ef8a677 | Python | SNBQT/Limited-Data-Rolling-Bearing-Fault-Diagnosis-with-Few-shot-Learning | /utils.py | UTF-8 | 4,526 | 2.828125 | 3 | [] | no_license | import matplotlib.pyplot as plt
import pandas as pd
from imblearn.metrics import classification_report_imbalanced
import seaborn as sns
from sklearn.metrics import f1_score,accuracy_score,confusion_matrix
import itertools
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
def confusion_plot(pred, y_true):
sns.set(rc={'figure.figsize':(5,4)})
fault_labels = np.unique(y_true)
print(fault_labels)
cm_array = confusion_matrix(y_true, pred,labels=fault_labels)
df_cm = pd.DataFrame(cm_array, index = fault_labels,
columns = fault_labels)
sns.heatmap(df_cm,annot=True)
plt.show()
print(classification_report_imbalanced(np.array(y_true), np.array(pred)))
return plt
def plot_confusion_matrix(cm, classes=None,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
mpl.rcParams.update(mpl.rcParamsDefault)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.figure(figsize=(4, 4))
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar(shrink=0.7)
tick_marks = np.arange(len(list(range(cm.shape[0]))))
# plt.xticks(tick_marks, classes, rotation=45)
plt.xticks(tick_marks, classes)
plt.yticks(tick_marks, classes,rotation=90)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.tight_layout()
return plt
def plot_pairs(pairs,plot_idx=None):
nc,w,h = pairs[0].shape[0:3]
if not plot_idx:
plot_idx = list(range(nc))
fig, ax = plt.subplots(nrows=len(plot_idx),ncols=4, figsize=(16, len(plot_idx)))
for i,v in enumerate(plot_idx):
ax[i][0].plot(pairs[0][v,:,0,0])
ax[i][0].get_yaxis().set_visible(False)
ax[i][0].get_xaxis().set_visible(False)
ax[i][1].plot(pairs[1][v,:,0,0])
ax[i][1].get_yaxis().set_visible(False)
ax[i][1].get_xaxis().set_visible(False)
ax[i][2].plot(pairs[0][v,:,1,0])
ax[i][2].get_yaxis().set_visible(False)
ax[i][2].get_xaxis().set_visible(False)
ax[i][3].plot(pairs[1][v,:,1,0])
ax[i][3].get_yaxis().set_visible(False)
ax[i][3].get_xaxis().set_visible(False)
plt.show()
def noise_rw(x,snr,isplot = False):
snr1 = 10 ** (snr / 10.0)
xpower = np.sum(x ** 2,axis=0) / len(x)
npower = xpower / snr1
noise = np.random.normal(0, np.sqrt(npower), x.shape)
noise_data=x+noise
if(isplot):
print(snr,snr1,npower)
print(np.sum(noise ** 2)/len(x))
fig, axs = plt.subplots(nrows=3,ncols=x.shape[1], figsize=(8*x.shape[1], 6))
for i in range(x.shape[1]):
axs[0][i].plot(x[:,i])
axs[0][i].set_title(signal_labels[i] + ' signal')
axs[0][i].get_xaxis().set_visible(False)
axs[1][i].plot(noise[:,i])
axs[1][i].set_title(signal_labels[i] +' noise')
axs[1][i].get_xaxis().set_visible(False)
axs[2][i].plot(noise_data[:,i])
axs[2][i].set_title(signal_labels[i] +' noise signal')
plt.show()
return noise_data
def plot_with_labels(data):
#loop through labels and plot each cluster
sns.set(rc={'figure.figsize':(5,5)})
plt.figure()
for i, label in enumerate(range(10)):
#add data points
plt.scatter(x=data.loc[data['label']==label, 'x'],
y=data.loc[data['label']==label,'y'],
color=cm.rainbow(int(255 * i / 9)),
alpha=0.20)
#add label
plt.annotate(label,
data.loc[data['label']==label,['x','y']].mean(),
horizontalalignment='center',
verticalalignment='center',
size=14,
weight='bold',
color='black') | [
"matplotlib",
"seaborn"
] |
0aa8724f4bfdf91c4d908f8a15984d0bd6c49b24 | Python | hemengf/my_python_lib | /interference_pattern/red_amber_green/red_amber_8bit.py | UTF-8 | 312 | 3.03125 | 3 | [] | no_license | from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(0,20, 0.001)
red = 1+np.cos(4*np.pi*(x+0.630/4)/0.630)
amber = 1+ np.cos(4*np.pi*(x+0*0.59/4)/0.590)
plt.plot(x, red+amber)
plt.title('red and amber 8bit')
plt.plot(x, red, 'r')
plt.plot(x, amber, 'y')
plt.show()
| [
"matplotlib"
] |
b7883360522d8fb89fb6ec3685345cef6b9e3602 | Python | JacobJeppesen/StreamlitTemplates | /boardgame/headtohead.py | UTF-8 | 11,452 | 3.375 | 3 | [
"MIT"
] | permissive | import numpy as np
import pandas as pd
import altair as alt
import streamlit as st
from typing import List, Tuple
SPACES = ' ' * 10
def load_page(df: pd.DataFrame,
player_list: List[str]) -> None:
""" In this section you can compare two players against each other based on their respective performances.
Please note that the Head to Head section is meant for games that were played with 2 players against each other.
Sections:
* The Winner
* Stats per Game
Parameters:
-----------
df : pandas.core.frame.DataFrame
The data to be used for the analyses of played board game matches.
player_list : list of str
List of players that participated in the board games
"""
player_one, player_two = prepare_layout(player_list)
two_player_matches, matches_df = check_if_two_player_matches_exist(df, player_one, player_two)
if two_player_matches:
sidebar_frequency_graph(matches_df)
extract_winner(df, player_one, player_two)
stats_per_game(matches_df, player_one, player_two)
else:
st.header("🏳️ Error")
st.write("No two player matches were played with **{}** and **{}**. "
"Please select different players".format(player_one, player_two))
def prepare_layout(player_list: List[str]) -> Tuple[str, str]:
""" Create the layout for the page including general selection options
Parameters:
-----------
player_list : list of str
List of players that participated in the board games
"""
# Choose players
st.title("🎲 Head to Head")
st.write("In this section you can compare two players against each other based on their"
"respective performances. Please note that the *Head to Head* section is meant for "
"games that were played with 2 players against each other. ")
st.sidebar.subheader("Please select two players")
player_one = st.sidebar.selectbox("Select player one", player_list, index=0)
player_two = st.sidebar.selectbox("Select player two", player_list, index=1)
return player_one, player_two
def check_if_two_player_matches_exist(df: pd.DataFrame,
player_one: str,
player_two: str) -> Tuple[bool, pd.DataFrame]:
""" Checks if player_one and player_two have played against each other in two player games
Parameters:
-----------
df : pandas.core.frame.DataFrame
The data to be used for the analyses of played board game matches.
player_one : str
One of the players in the game
player_two : str
One of the players in the game
Returns:
--------
boolean
True if there are matches played between player_one and player_two
False otherwise
matches_df: pandas.core.frame.DataFrame
Data with only the two players selected and where two player games have been played
"""
matches_df = df.loc[(df[player_one + "_played"] == 1) &
(df[player_two + "_played"] == 1) &
(df["Nr_players"] == 2), :]
if (len(matches_df) == 0) | (player_one == player_two):
return False, matches_df
else:
return True, matches_df
def sidebar_frequency_graph(matches_df: pd.DataFrame) -> None:
""" Extracts and visualizes the frequency of games
Parameters:
-----------
matches_df: pandas.core.frame.DataFrame
Data with only the two players selected and where two player games have been played
"""
to_plot = matches_df.sort_values("Date").set_index("Date").resample("3D").count().reset_index()
chart = alt.Chart(to_plot).mark_area(
color='goldenrod',
opacity=1
).encode(
x='Date',
y=alt.Y('Players', title='Number of Games'),
).properties(background='transparent')
if len(to_plot) > 0:
st.sidebar.altair_chart(chart)
def extract_winner(df: pd.DataFrame,
player_one: str,
player_two: str) -> None:
""" Extract the winner of the two players
Parameters:
-----------
df : pandas.core.frame.DataFrame
The data to be used for the analyses of played board game matches.
player_one : str
One of the players in the game
player_two : str
One of the players in the game
"""
# Extract common games
games = df.loc[(df[player_one + "_played"] == 1) &
(df[player_two + "_played"] == 1) &
(df["Nr_players"] == 2), :]
player_one_won = len(games[games[player_one + "_winner"] == 1])
player_two_won = len(games[games[player_two + "_winner"] == 1])
to_plot = pd.DataFrame([[player_one_won, player_one],
[player_two_won, player_two]], columns=['Results', 'Player'])
if player_one_won != player_two_won:
if player_one_won > player_two_won:
percentage = round(player_one_won / len(games) * 100, 2)
winner = player_one
else:
percentage = round(player_two_won / len(games) * 100, 2)
winner = player_two
st.header("**♟** The Winner - {}**♟**".format(winner))
st.write("The winner is decided simply by the amount of games won one by either player.")
st.write("{}🔹 Out of {} games, {} games were won by **{}** "
"whereas {} games were won by **{}**".format(SPACES, len(games), player_one_won, player_one,
player_two_won, player_two))
st.write("{}🔹 In other words, {}% of games were won by **{}** who is the clear winner!".format(SPACES,
percentage,
winner))
else:
winner = player_one + " and " + player_two
st.header("**♟** The Winners - {}**♟**".format(winner))
st.write("The winner is decided simply by the amount of games won one by either player.")
st.write("{}🔹 Out of {} games, {} games were won by **{}** "
"whereas {} games were won by **{}**".format(SPACES, len(games), player_one_won, player_one,
player_two_won, player_two))
st.write("{}🔹 In other words, it is a **tie**!".format(SPACES))
bars = alt.Chart(to_plot).mark_bar().encode(
x='Results:Q',
y='Player:O',
color='Player:O'
)
text = bars.mark_text(
align='left',
baseline='middle',
dx=3 # Nudges text to right so it doesn't appear on top of the bar
).encode(
text='Results:Q'
)
st.write(bars + text)
def stats_per_game(matches_df: pd.DataFrame,
player_one: str,
player_two: str) -> None:
""" Show statistics per game
Parameters:
-----------
matches_df : pandas.core.frame.DataFrame
Data with only the two players selected and where two player games have been played
player_one : str
One of the players in the game
player_two : str
One of the players in the game
"""
st.header("**♟** Stats per Game **♟**")
st.write("Please select a game below to see the statistics for both players.")
game_selection_df = game_selection(matches_df)
scores_over_time(player_one, player_two, game_selection_df)
general_stats_game(player_one, player_two, game_selection_df)
def game_selection(matches_df: pd.DataFrame) -> pd.DataFrame:
""" Select game and filter data based on the game
Parameters:
-----------
matches_df: pandas.core.frame.DataFrame
Data with only the two players selected and where two player games have been played
Returns:
--------
game_selection_df : pandas.core.frame.DataFrame
Filtered data based on the selected game
"""
games = list(matches_df.Game.unique())
games.sort()
game = st.selectbox("Select a game", games)
game_selection_df = matches_df.loc[(matches_df.Game == game), :]
return game_selection_df
def scores_over_time(player_one: str,
player_two: str,
game_selection_df: pd.DataFrame) -> None:
""" Visualize scores over time for a specific game for two players
Parameters:
-----------
player_one : str
One of the players in the game
player_two : str
One of the players in the game
game_selection_df : pandas.core.frame.DataFrame
Filtered data based on the selected game
"""
player_one_vals = list(game_selection_df[player_one + '_score'].values)
player_two_vals = list(game_selection_df[player_two + '_score'].values)
vals = player_one_vals + player_two_vals
player_indices = [player_one if i < len(player_one_vals) else player_two for i, _ in enumerate(vals)]
indices = list(np.arange(len(vals) / 2))
indices = indices + indices
to_plot = pd.DataFrame(np.array([indices, vals, player_indices]).T, columns=['Indices', 'Scores', 'Players'])
to_plot.Indices = to_plot.Indices.astype(float)
to_plot.Scores = to_plot.Scores.astype(float)
st.write("Here you can see how games have progressed since the beginning. There is purposefully"
" no time displayed as that might clutter the visualization. All scores on the left hand side"
" were the first matches and scores on the right are the last.")
colors = ['#2196F3', '#FF5722']
chart = alt.Chart(to_plot,
title="Scores over time").mark_line().encode(
alt.X('Indices', axis=None, scale=alt.Scale(domain=(0, max(to_plot.Indices)))),
y='Scores:Q',
color=alt.Color('Players', scale=alt.Scale(range=colors))
).configure_axis(
grid=False
).configure_view(
strokeOpacity=0
)
st.altair_chart(chart)
def general_stats_game(player_one: str,
player_two: str,
game_selection_df: pd.DataFrame) -> None:
""" Show general statistics of a specific game for two players
Parameters:
-----------
player_one : str
One of the players in the game
player_two : str
One of the players in the game
game_selection_df : pandas.core.frame.DataFrame
Filtered data based on the selected game
"""
result = pd.DataFrame(columns=['Player', 'Avg', 'Min', 'Max', 'Number'])
for player in [player_one, player_two]:
values = game_selection_df.loc[(game_selection_df[player + "_played"] == 1), player + "_score"].values
result.loc[len(result), :] = [player, round(np.mean(values)), min(values),
max(values), len(values)]
st.write("You can see the average statistics for each player such that comparison is possible.")
bars = alt.Chart(result).mark_bar().encode(
x='Avg:Q',
y='Player:O',
color='Player:O'
).properties(
title='Statistics'
)
text = bars.mark_text(
align='left',
baseline='middle',
dx=3 # Nudges text to right so it doesn't appear on top of the bar
).encode(
text='Avg:Q'
)
st.write(bars + text)
| [
"altair"
] |
ed571b5b1f42eac5bacad75d6d813185f82f36ec | Python | kunal097/Heart-attack-prevention-system | /src/sanjivini/utils.py | UTF-8 | 727 | 3.0625 | 3 | [] | no_license | import string
import random
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime, timedelta
import pygal
raw_data = string.ascii_letters + string.digits #+ string.punctuation
def generate_key():
key = ""
for i in range(0,25):
key+=raw_data[random.randint(0,len(raw_data)-1)]
return key
def generate_graph():
# X = np.array([1,2,3,4,5])
# y = np.array([6,7,8,9,10])
date_chart = pygal.Line(x_label_rotation=20)
date_chart.x_labels = map(lambda d: d.strftime('%Y-%m-%d'), [
datetime(2013, 1, 2),
datetime(2013, 1, 12),
datetime(2013, 2, 2),
datetime(2013, 2, 22)])
date_chart.add("Visits", [300, 412, 823, 672])
graph = date_chart.render().decode("utf-8")
return graph
| [
"matplotlib"
] |
3c6bb3b20b380fcf35724d2cd0ccd31d79eefa09 | Python | eastmountyxz/AI-for-TensorFlow | /blog24-LSTM恶意请求分类/LSTM_data.py | UTF-8 | 6,927 | 2.828125 | 3 | [] | no_license | # coding=utf-8
# By:Eastmount CSDN 2020-11-15
import pickle
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder,OneHotEncoder
from keras.models import Model
from keras.layers import LSTM, Activation, Dense, Dropout, Input, Embedding
from keras.optimizers import RMSprop
from keras.preprocessing.text import Tokenizer
from keras.preprocessing import sequence
from keras.callbacks import EarlyStopping
from keras.models import load_model
from load_pj import classification_pj
import time
start = time.clock()
#---------------------------------------第一步 数据读取------------------------------------
#读取测数据集
train_df = pd.read_csv("all_data_url_random_fenci_train.csv")
val_df = pd.read_csv("all_data_url_random_fenci_val.csv")
test_df = pd.read_csv("all_data_url_random_fenci_test.csv")
print(train_df.head())
#解决中文显示问题
plt.rcParams['font.sans-serif'] = ['KaiTi'] # 指定默认字体 SimHei黑体
plt.rcParams['axes.unicode_minus'] = False # 解决保存图像是负号'
#---------------------------------第二步 OneHotEncoder()编码---------------------------------
#对数据集的标签数据进行编码
train_y = train_df.label
print("Label:")
print(train_y[:10])
val_y = val_df.label
test_y = test_df.label
le = LabelEncoder()
train_y = le.fit_transform(train_y).reshape(-1,1)
print("LabelEncoder")
print(train_y[:10])
print(len(train_y))
val_y = le.transform(val_y).reshape(-1,1)
test_y = le.transform(test_y).reshape(-1,1)
## 对数据集的标签数据进行one-hot编码
ohe = OneHotEncoder()
train_y = ohe.fit_transform(train_y).toarray()
val_y = ohe.transform(val_y).toarray()
test_y = ohe.transform(test_y).toarray()
print("OneHotEncoder:")
print(train_y[:10])
#-------------------------------第三步 使用Tokenizer对词组进行编码-------------------------------
#使用Tokenizer对词组进行编码
#当我们创建了一个Tokenizer对象后,使用该对象的fit_on_texts()函数,以空格去识别每个词
#可以将输入的文本中的每个词编号,编号是根据词频的,词频越大,编号越小
max_words = 5000
max_len = 600
tok = Tokenizer(num_words=max_words) #使用的最大词语数为5000
tok.fit_on_texts(train_df.fenci)
print(tok)
#保存训练好的Tokenizer和导入
with open('tok.pickle', 'wb') as handle:
pickle.dump(tok, handle, protocol=pickle.HIGHEST_PROTOCOL)
# loading
with open('tok.pickle', 'rb') as handle:
tok = pickle.load(handle)
#使用word_index属性可以看到每次词对应的编码
#使用word_counts属性可以看到每个词对应的频数
for ii,iterm in enumerate(tok.word_index.items()):
if ii < 10:
print(iterm)
else:
break
print("===================")
for ii,iterm in enumerate(tok.word_counts.items()):
if ii < 10:
print(iterm)
else:
break
#使用tok.texts_to_sequences()将数据转化为序列
#使用sequence.pad_sequences()将每个序列调整为相同的长度
#对每个词编码之后,每句语料中的每个词就可以用对应的编码表示,即每条语料可以转变成一个向量了
train_seq = tok.texts_to_sequences(train_df.fenci)
val_seq = tok.texts_to_sequences(val_df.fenci)
test_seq = tok.texts_to_sequences(test_df.fenci)
#将每个序列调整为相同的长度
train_seq_mat = sequence.pad_sequences(train_seq,maxlen=max_len)
val_seq_mat = sequence.pad_sequences(val_seq,maxlen=max_len)
test_seq_mat = sequence.pad_sequences(test_seq,maxlen=max_len)
print(train_seq_mat.shape) #(10000, 600)
print(val_seq_mat.shape) #(5000, 600)
print(test_seq_mat.shape) #(5000, 600)
print(train_seq_mat[:2])
#-------------------------------第四步 建立LSTM模型并训练-------------------------------
## 定义LSTM模型
inputs = Input(name='inputs',shape=[max_len])
## Embedding(词汇表大小,batch大小,每个新闻的词长)
layer = Embedding(max_words+1, 128, input_length=max_len)(inputs)
layer = LSTM(128)(layer)
layer = Dense(128, activation="relu", name="FC1")(layer)
layer = Dropout(0.3)(layer)
layer = Dense(2, activation="softmax", name="FC2")(layer)
model = Model(inputs=inputs, outputs=layer)
model.summary()
model.compile(loss="categorical_crossentropy",
optimizer=RMSprop(),
metrics=["accuracy"])
# 增加判断 防止再次训练
flag = "test"
if flag == "train":
print("模型训练")
#模型训练
model_fit = model.fit(train_seq_mat, train_y, batch_size=128, epochs=10,
validation_data=(val_seq_mat,val_y),
callbacks=[EarlyStopping(monitor='val_loss',min_delta=0.0001)] #当val-loss不再提升时停止训练
)
#保存模型
model.save('my_model.h5')
del model # deletes the existing model
#计算时间
elapsed = (time.clock() - start)
print("Time used:", elapsed)
else:
print("模型预测")
# 导入已经训练好的模型
model = load_model('my_model.h5')
#--------------------------------------第五步 预测及评估--------------------------------
#对测试集进行预测
test_pre = model.predict(test_seq_mat)
#评价预测效果,计算混淆矩阵 参数顺序
confm = metrics.confusion_matrix(np.argmax(test_y,axis=1),np.argmax(test_pre,axis=1))
print(confm)
#混淆矩阵可视化
Labname = ['正常', '异常']
print(metrics.classification_report(np.argmax(test_y,axis=1),np.argmax(test_pre,axis=1)))
classification_pj(np.argmax(test_pre,axis=1),np.argmax(test_y,axis=1))
plt.figure(figsize=(8,8))
sns.heatmap(confm.T, square=True, annot=True,
fmt='d', cbar=False, linewidths=.6,
cmap="YlGnBu")
plt.xlabel('True label',size = 14)
plt.ylabel('Predicted label', size = 14)
plt.xticks(np.arange(2)+0.8, Labname, size = 12)
plt.yticks(np.arange(2)+0.4, Labname, size = 12)
plt.show()
#--------------------------------------第六步 验证算法--------------------------------
#使用tok对验证数据集重新预处理,并使用训练好的模型进行预测
val_seq = tok.texts_to_sequences(val_df.fenci)
#将每个序列调整为相同的长度
val_seq_mat = sequence.pad_sequences(val_seq,maxlen=max_len)
#对验证集进行预测
val_pre = model.predict(val_seq_mat)
print(metrics.classification_report(np.argmax(val_y,axis=1),np.argmax(val_pre,axis=1)))
classification_pj(np.argmax(val_pre,axis=1),np.argmax(val_y,axis=1))
#计算时间
elapsed = (time.clock() - start)
print("Time used:", elapsed)
| [
"matplotlib",
"seaborn"
] |
0d2d94b77b7167e46cfac56a66585dacf12146f7 | Python | benrendle/K2_Scale_Heights | /scale_heights.py | UTF-8 | 7,790 | 2.875 | 3 | [] | no_license | ''' Current functions used to calculate the scale height of the different
Milky Way structural components. Includes reading in of current C3/C6
input files (w/ spectro and w/o spectro parameters).
Edited: 22/01/2018; Ben Rendle '''
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import scipy.odr.odrpack as odrpack
def space_density(obs):
''' Calculate the space density for a given band above the Galactic plane '''
# obs['Zabs'] = np.abs(obs['Z'])
a = min(obs['Z'])
b = max(obs['Z'])
theta = 0.5*np.sqrt(116)
t_theta = np.tan(theta*(np.pi/180))**2
bins = np.linspace(np.log10(100),np.log10(5000),19)
Z = np.log10(np.abs(obs['Z'])*1000)
hist, bin_edges = np.histogram(Z, bins = bins)
# print(bin_edges)
# print(hist)
volume = []
# volume of square based pyramid section
for i in range(len(bin_edges)-1):
V1 = (4*t_theta*(10**bin_edges[i])**3)/3
V2 = (4*t_theta*(10**bin_edges[i+1])**3)/3
Vol = V2 - V1
volume.append(Vol)
# volume of cube
# for i in range(len(bin_edges)-1):
# V1 = (2*np.sqrt(t_theta)*10**max(bin_edges))**2 * (10**(bin_edges[i+1]) - 10**(bin_edges[i]))
# volume.append(V1)
rho = []
rho10 = np.zeros(len(volume))
for i in range(len(volume)):
density = hist[i]/volume[i]
rho10[i] = density
rho.append(np.log10(density)) # In units number of stars/pc^3
print(rho)
bin_10 = 10**bin_edges[1:]
print( len(bin_10), len(rho10))
''' Least squares fitting '''
def f(Par,z):
return Par[0]*np.exp(-z/Par[1])
mpar, cpar, empar, ecpar = [], [], [], []
linear = odrpack.Model(f)
mydata = odrpack.RealData(bin_10[5:12], rho10[5:12])#, sy=df2['feh_err'])
myodr = odrpack.ODR(mydata, linear, beta0=[1e-3, 500],maxit=20000)
myoutput = myodr.run()
mpar.append(myoutput.beta[0])
cpar.append(myoutput.beta[1])
empar.append(myoutput.sd_beta[0])
ecpar.append(myoutput.sd_beta[1])
# myoutput.pprint()
b = bin_10[14:]
b = np.delete(b,[1])
r = rho10[14:]
r = np.delete(r,[1])
def f1(Par,z):
return Par[0]*np.exp(-z/Par[1])
mpar1, cpar1, empar1, ecpar1 = [], [], [], []
linear1 = odrpack.Model(f1)
mydata1 = odrpack.RealData(b, r)#, sy=df2['feh_err'])
myodr1 = odrpack.ODR(mydata1, linear1, beta0=[5e-4, 1000],maxit=20000)
myoutput1 = myodr1.run()
mpar1.append(myoutput1.beta[0])
cpar1.append(myoutput1.beta[1])
empar1.append(myoutput1.sd_beta[0])
ecpar1.append(myoutput1.sd_beta[1])
# myoutput1.pprint()
# def f2(Par,z):
# return Par[0]*np.exp(-z/Par[1])
# mpar2, cpar2, empar2, ecpar2 = [], [], [], []
# linear2 = odrpack.Model(f2)
# mydata2 = odrpack.RealData(bin_10[4:], rho10[4:])#, sy=df2['feh_err'])
# myodr2 = odrpack.ODR(mydata2, linear2, beta0=[1e-4, 600],maxit=20000)
# myoutput2 = myodr2.run()
# mpar2.append(myoutput2.beta[0])
# cpar2.append(myoutput2.beta[1])
# empar2.append(myoutput2.sd_beta[0])
# ecpar2.append(myoutput2.sd_beta[1])
# myoutput2.pprint()
plt.figure()
plt.scatter(bin_10,rho)
plt.scatter(bin_10[5:12],rho[5:12],color='r')
plt.scatter(bin_10[13:],rho[13:],color='y')
# plt.scatter(b,np.log10(r))
plt.plot(bin_10,np.log10(myoutput.beta[0]*np.exp(-bin_10/myoutput.beta[1])),color='orange', \
label=r'$\rho_{0} =$ %.4g pc$^{-3}$, H$_{\rm{z}} = $ %.4g pc'%(mpar[0],cpar[0]))
plt.plot(bin_10,np.log10(myoutput1.beta[0]*np.exp(-bin_10/myoutput1.beta[1])),color='orange',linestyle='--', \
label=r'$\rho_{0} =$ %.4g pc$^{-3}$, H$_{\rm{z}} = $ %.4g pc'%(mpar1[0],cpar1[0]))
plt.ylim(-7.75,-3.75)
plt.xlim(100, 5100)
plt.tick_params(labelsize=15)
plt.ylabel(r'Log Space Density',fontsize=20)
plt.xlabel(r'Z [pc]',fontsize=20)
plt.legend(prop={'size':10})
plt.tight_layout()
# plt.savefig('/home/bmr135/Dropbox/K2Poles/pop_trends/211117/C3_Gilmore_Reid.png')
# plt.show()
def space_density2(obs):
''' Improved calculation of the volume for determining the space density '''
theta = np.deg2rad(np.sqrt(116))
t1 = theta/2.0
alpha_c3 = np.deg2rad(61.4) # degrees
alpha_c6 = np.deg2rad(50.4) # degrees
bins = np.linspace(np.log10(100),np.log10(5000),19)
Z = np.log10(np.abs(obs['Z'])*1000)
hist, bin_edges = np.histogram(Z, bins = bins)
print(bin_edges)
print(hist)
volume = []
for i in range(len(bin_edges)-1):
x2 = lambda x: ((10**bin_edges[i])**3)*(1-np.cos(theta))*np.cos(t1)*(np.sin(alpha_c3 + t1 - x)**-3)
x3 = lambda xa: ((10**bin_edges[i+1])**3)*(1-np.cos(theta))*np.cos(t1)*(np.sin(alpha_c3 + t1 - xa)**-3)
vol1 = integrate.quad(x2,0,theta)
vol2 = integrate.quad(x3,0,theta)
Vol = vol2[0] - vol1[0]
volume.append(Vol)
print(volume)
rho = []
rho10 = np.zeros(len(volume))
for i in range(len(volume)):
density = hist[i]/volume[i]
rho10[i] = density
rho.append(np.log10(density)) # In units number of stars/pc^3
print(rho)
bin_10 = 10**bin_edges[1:]
# x = pd.DataFrame()
# x['Z'] = bin_10
# x['log_rho'] = np.log10(rho10)
# x['log_sig_rho'] = np.log10(np.sqrt(hist)/volume)
# x['rho'] = 10**np.log10(rho10)
# x['sig_rho'] = 10**np.log10(np.sqrt(hist)/volume)
# print(x)
# x.to_csv('/home/ben/Dropbox/K2Poles/pop_trends/Ioana_Scale_Heights/C6',index=False)
sig_rho = np.sqrt(hist)/volume
''' Least squares fitting '''
def f(Par,z):
return Par[0]*np.exp(-z/Par[1])
mpar, cpar, empar, ecpar = [], [], [], []
linear = odrpack.Model(f)
mydata = odrpack.RealData(bin_10[6:11], rho10[6:11], sy=sig_rho[6:11])
myodr = odrpack.ODR(mydata, linear, beta0=[1e-3, 500],maxit=20000)
myoutput = myodr.run()
mpar.append(myoutput.beta[0])
cpar.append(myoutput.beta[1])
empar.append(myoutput.sd_beta[0])
ecpar.append(myoutput.sd_beta[1])
# myoutput.pprint()
b = bin_10[12:]
# b = np.delete(b,[1])
r = rho10[12:]
# r = np.delete(r,[1])
sig = sig_rho[12:]
# sig = np.delete(sig,[1])
def f1(Par,z):
return Par[0]*np.exp(-z/Par[1])
mpar1, cpar1, empar1, ecpar1 = [], [], [], []
linear1 = odrpack.Model(f1)
mydata1 = odrpack.RealData(b, r, sy=sig)
myodr1 = odrpack.ODR(mydata1, linear1, beta0=[5e-4, 1000],maxit=20000)
myoutput1 = myodr1.run()
mpar1.append(myoutput1.beta[0])
cpar1.append(myoutput1.beta[1])
empar1.append(myoutput1.sd_beta[0])
ecpar1.append(myoutput1.sd_beta[1])
# myoutput1.pprint()
plt.figure()
plt.scatter(bin_10,rho)
# plt.scatter(bin_10[6:11],rho[6:11],color='r')
# plt.scatter(bin_10[12:16],rho[12:16],color='y')
# plt.scatter(b,np.log10(r))
plt.plot(bin_10,np.log10(myoutput.beta[0]*np.exp(-bin_10/myoutput.beta[1])),color='orange', \
label=r'$\rho_{0} =$ %.4g pc$^{-3}$, H$_{\rm{z}} = $ %.4g pc'%(mpar[0],cpar[0]))
plt.plot(bin_10,np.log10(myoutput1.beta[0]*np.exp(-bin_10/myoutput1.beta[1])),color='orange',linestyle='--', \
label=r'$\rho_{0} =$ %.4g pc$^{-3}$, H$_{\rm{z}} = $ %.4g pc'%(mpar1[0],cpar1[0]))
plt.ylim(-7.75,-3.5)
plt.xlim(100, 5100)
plt.tick_params(labelsize=15)
plt.ylabel(r'Log Space Density',fontsize=20)
plt.xlabel(r'Z [pc]',fontsize=20)
plt.legend(prop={'size':10})
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
if __name__ == "__main__":
C3_True = pd.read_csv('.../C3')
C3_spec = pd.read_csv('.../C3_spectro')
C6_True = pd.read_csv('.../C6')
C6_spec = pd.read_csv('.../C6_spectro')
space_density2(C6_True)
space_density2(C3_True)
plt.show()
| [
"matplotlib"
] |
5723b2dd056a9bec930b444ed588ddebe69b5710 | Python | ANU-NSC-Strategy-Statecraft-Cyberspace/balkanisation | /balkanisation.py | UTF-8 | 1,964 | 3.015625 | 3 | [
"MIT"
] | permissive | from context import Context
import numpy as np
import random
import matplotlib.pyplot as plt
import matplotlib.animation as animation
np.random.seed(0)
random.seed(0)
class Parameters:
def __init__(self):
# Simulation settings
self.user_prob_block_threat = 0.0 # Probability that an ordinary user will block a threat
self.country_prob_block_threat = 1.0 # Probability that a country will block a threat
self.edges_per_node = 1 # Number of edges per node: 1 is a tree, 2 is a scale-free network. Any higher is too dense to be interesting.
self.num_users = 100
self.num_countries = 10
# Display settings
self.skip_frames = 2 # Frames of animation to run between packet sends (higher numbers makes for smoother (but slower) video)
self.steps = 10000 # How many packet sends to do in total
self.show = True # Whether to render a matplotlib animation on-screen
self.save = False # Whether to save the matplotlib animation as a video
self.fps = 200 # Animation frames-per-second
self.save_file = 'balkanisation.mp4'
self.plot = True # Whether to plot the degree of balkanisation over time after the simulation concludes
param = Parameters()
context = Context(param)
make_animation = param.show or param.save
fig, ax = plt.subplots()
def update(i):
ax.clear()
context.update(i, draw=make_animation, plot=param.plot)
if make_animation:
graph_ani = animation.FuncAnimation(fig, update, param.steps * param.skip_frames, init_func=lambda: None, interval=1000//param.fps, blit=False, repeat=False)
else:
for i in range(param.steps * param.skip_frames):
update(i)
if param.show:
plt.show()
if param.save:
graph_ani.save(param.save_file, fps=param.fps)
plt.close()
if param.plot:
context.plot()
plt.show()
plt.close()
| [
"matplotlib"
] |
33d75d78a4fd5a4f49dbd9ce8073a084b0b76899 | Python | tsuish/bittsui_ever.github.io | /bill/bill-py/bill-2019-08.py | UTF-8 | 1,748 | 3.5625 | 4 | [] | no_license | # -*- coding:utf8 -*-
# 绘制2019年8月消费 条形图(横着的)
from matplotlib import pyplot as plt
import matplotlib
# 设置中文编码
matplotlib.rc("font", family="SimHei", weight='bold')
# a表示日期(2019年8月),
# b表示对应的日消费(单位为元)
a = ["1日", "2日", "3日", "4日", "5日", "6日", "7日", "8日", "9日", "10日",
"11日", "12日", "13日", "14日", "15日", "16日", "17日", "18日", "19日", "20日",
"21日", "22日", "23日", "24日", "25日", "26日", "27日", "28日", "29日", "30日", "31日"]
b = [65, 21, 26.5, 36, 22.5, 15.9, 23, 20, 23, 8,
20, 18, 18, 8.5, 17.29, 40, 16.68, 15.1, 14, 21,
18, 60.5, 10.1, 24.79, 30.57, 19.5, 40.65, 13, 32.9, 21.79, 14]
# 设置图形大小
# 设置大小应该在绘制图形前面
plt.figure(figsize=(15, 8), dpi=80)
plt.xlabel('横轴:日期', fontsize=15, color="r")
plt.ylabel('纵轴:金额\n 单位:元', fontsize=15, color="r")
# 绘制条形图(横着的)
# 前面两个分别是y轴和x轴的刻度,都是数字数组
# height调整条形图的宽度
# color调整条形图的颜色
plt.bar(range(len(a)), b, width=0.8, color="orange", label="2019年8月账单:总计736.27")
# 添加图例
# 设置完label之后,还要legend()才显示
plt.legend()
plt.title("2019年8月账单", fontsize=18)
print(type(int(b[0])))
for i in a:
myi = int(i.split("日")[0])
plt.text(myi-1.3, int(b[myi-1])+0.8, str(b[myi-1]), fontsize=13)
# 设置x轴的刻度
# 第一个表示刻度值,
# 第二个表示刻度值对应的备注
plt.xticks(range(len(a)), a)
plt.grid(axis="y", linestyle=":", color="g")
# 显示绘制图形
plt.show()
| [
"matplotlib"
] |
40724c76989c2a4c85d957019591ba2596a26671 | Python | jameszrx/Data-Analysis-Project | /Regression_Validation/Database_backup_dataset/network-3-piecewise.py | UTF-8 | 2,005 | 3 | 3 | [
"Apache-2.0"
] | permissive | # refer:
# http://www.programcreek.com/python/example/83247/sklearn.cross_validation.KFold
# https://onlinecourses.science.psu.edu/stat501/node/310
# http://scikit-learn.org/stable/auto_examples/linear_model/plot_ols.html
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, cross_validation
def weekday(str):
if(str=='Monday'):
return 1;
elif(str=='Tuesday'):
return 2;
elif(str=='Wednesday'):
return 3;
elif(str=='Thursday'):
return 4;
elif(str=='Friday'):
return 5;
elif(str=='Saturday'):
return 6;
else:
return 7;
def getnumber(str):
p=str.split('_');
return (float)(p[len(p)-1]);
def select(dataset, index):
res=[]
for x in index:
res.append(dataset[x])
return res
f=open('/Users/kay/Downloads/network_backup_dataset.csv','r')
lines=f.readlines()[2:18590]
f.close()
for workflow in range(5):
feature_wf=[]
result_wf=[]
for line in lines:
tmp=[]
p=line.split(',');
if(getnumber(p[3])==workflow):
tmp.append(float(p[0]))
tmp.append(weekday(p[1]))
tmp.append(float(p[2]))
tmp.append(getnumber(p[3]))
tmp.append(getnumber(p[4]))
tmp.append(float(p[6]))
feature_wf.append(tmp)
result_wf.append(float(p[5]))
kf = cross_validation.KFold(len(feature_wf), 10, True)
rmse=[]
score=[]
reg=linear_model.LinearRegression()
for train, test in kf:
feature_wf_train=select(feature_wf, train)
feature_wf_test=select(feature_wf, test)
result_wf_train=select(result_wf, train)
result_wf_test=select(result_wf, test)
reg.fit(feature_wf_train, result_wf_train)
predict=reg.predict(feature_wf)
rmse.append(np.sqrt((predict - result_wf) ** 2).mean())
score.append(reg.score(feature_wf, result_wf))
print 'workflow id:', workflow
print rmse
plt.show()
| [
"matplotlib"
] |
2c1bf9abeb707fe4534a4e01159d5cc65a2d577a | Python | tamily-duoy/pyfile | /untitled/00ll/2016.09.10/2016.10.31/kMeans_my.py | UTF-8 | 10,500 | 3.203125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
In the general case, we have a "target function" that we want to minimize,
and we also have its "gradient function". Also, we have chosen a starting
value for the parameters "theta_0".
"""
#### K-均值聚类支持函数
from numpy import *
#import kMeans_my
#from numpy import *
#import kMeans_my
#from numpy import *
###导入数据
def loadDataSet(fileName): #general function to parse tab -delimited floats
dataMat = [] #assume last column is target value
fr = open(fileName)
for line in fr.readlines():
curLine = line.strip().split('\t')
fltLine = map(float,curLine) #map all elements to float()
dataMat.append(fltLine)
return dataMat
#dataMat = mat(kMeans_my.loadDataSet('testSet.txt'))
#定义两个向量的欧氏距离。
def distEclud(vecA,vecB):
return sqrt(sum(power(vecA-vecB,2)))
## 构建簇质心 #初始化k个簇的质心函数centroid
#传入的数据是Numpy的矩阵格式。
def randCent(dataSet,k):
n = shape(dataSet)[1]
centroids = mat(zeros((k,n)))
for j in range(n):
minJ = min(dataSet[:,j]) #找出矩阵dataMat第J列最小值。
rangeJ = float(max(dataSet[:,j])-minJ) #计算第J列最大值最小值之差。
#赋予一个随机质心,它的值在整个数据集的边界之内。
centroids[:,j] = minJ + rangeJ * random.rand(k,1)
return centroids #返回一个随机的质心矩阵
import kMeans_my
from numpy import *
dataMat = mat(kMeans_my.loadDataSet('testSet.txt'))
# min(dataMat[:,0])
# min(dataMat[:,1])
# max(dataMat[:,1])
# max(dataMat[:,0])
#kMeans_my.randCent(dataMat,2)
#kMeans_my.distEclud(dataMat[0],dataMat[1])
### k-均值聚类算法
def kMeans(dataSet,k,distMeas=distEclud,createCent = randCent):
m = shape(dataSet)[0] # 获得行数m
clusterAssment = mat(zeros((m,2))) #初始化一个矩阵,用来记录簇索引和存储误差。
centroids = createCent(dataSet,k) #随机的得到一个质心矩阵簇。
clusterChanged = True
while clusterChanged:
clusterChanged = False
for i in range(m): #对每个点寻找最近的质心
minDist= inf
minIndex = -1
for j in range(k): #遍历质心簇,寻找最近质心
distJI = distMeas(centroids[j,:],dataSet[i,:]) #计算数据点和质心的欧氏距离
if distJI < minDist:
minDist = distJI
minIndex = j
if clusterAssment[i,0] != minIndex:
clusterChanged =True
clusterAssment[i,:] = minIndex, minDist**2
print centroids
for cent in range(k): #更新质心位置。
ptsInClust = dataSet[nonzero(clusterAssment[:,0].A==cent)[0]]
# print clusterAssment[:,0].A;
#print clusterAssment[:,0].A==cent;
print nonzero(clusterAssment[:,0].A==cent);
print nonzero(clusterAssment[:,0].A==cent)[0];
centroids[cent,:] = mean(ptsInClust,axis=0)
print centroids[cent,:]
return centroids,clusterAssment
reload(kMeans_my)
dataMat = mat(kMeans_my.loadDataSet('testSet.txt'))
# myCentroids, clusteAssing = kMeans_my.kMeans(dataMat, 4)
#kMeans(dataMat,4);
###二分K-均值聚类算法
def biKmeans(dataSet,k,distMeas=distEclud):
m = shape(dataSet) [0]
clusterAssment = mat(zeros((m,2)))
centroid0 = mean(dataSet,axis=0).tolist()[0] #tolist()生成list。 axis=0 为列
centList = [centroid0]
for j in range(m): # 按行
clusterAssment[j,1] = distMeas(mat(centroid0),dataSet[j,:])**2 # 求距离
###对簇划分
while (len(centList) < k):
lowestSSE = inf
for i in range(len(centList)): # len(centList) <= k
ptsInCurrCluster = dataSet[nonzero(clusterAssment[:,0].A == i)[0],:] # 列[:,0] 每个簇所有点视为一个小数据集。 i 第i个簇 。 取出第i个簇的所有数据,返回行号,对应到数据集的行。
centroidMat, splitClustAss = kMeans(ptsInCurrCluster,2,distMeas) # 对每个簇划为2,距离度量。 返回质心和簇划分(索引,距离)
sseSplit = sum(splitClustAss[:, 1]) #误差
sseNotSplit = sum(clusterAssment[nonzero(clusterAssment[:,0].A != i)[0],1]) #簇误差
print "sseSplit, and notSplit:",sseSplit,sseNotSplit
# 返回 centroidMat,splitClustAss
####实际划分簇操作
if (sseSplit + sseNotSplit) < lowestSSE:
bestCentToSplit = i
bestNewCents = centroidMat
bestClustAss = splitClustAss.copy()
lowestSSE = sseSplit + sseNotSplit
bestClustAss[nonzero(bestClustAss[:0].A==1)[0],0] = len(centList) #编号为0 , 1 的结果簇。
bestClustAss[nonzero(bestClustAss[:,0].A==0)[0],0] = bestCentToSplit
print 'the bestCentToSplit is:' , bestCentToSplit
print 'the len of bestClustAss is:' , len(bestClustAss)
centList[bestCentToSplit] = bestNewCents[0,:] # 簇bestCentToSplit的质心坐标
centList.append(bestNewCents[1,:]) # 簇bestCentToSplit的质心坐标添加到质心矩阵
clusterAssment[nonzero(clusterAssment[:,0].A == bestCentToSplit)[0],:] = bestClustAss # clusterAssment的行坐标更新为splitClustAss某簇的行坐标。
return mat(centList), clusterAssment #返回质心坐标,簇的行
reload(kMeans_my)
dataMat3 = mat(kMeans_my.loadDataSet('testSet2.txt'))
centList, myNewAssments = kMeans_my.biKmeans(dataMat3,3)
# # centList
# ###雅虎提供API,给定地址返回地址对应的经度纬度。
# import urllib
# import json
# ###对一个地址进行地理编码
# def geoGrab(stAddress, city): #从雅虎返回一个字典
# apiStem = 'http://where.yahooapis.com/geocode?' #create a dict and constants for the goecoder
# params = {} #创建字典,为字典设置值。
# params['flags'] = 'J'#JSON return type
# params['appid'] = 'aaa0VN6k'
# params['location'] = '%s %s' % (stAddress, city)
# url_params = urllib.urlencode(params) #字典转换为字符串。
# yahooApi = apiStem + url_params #print url_params
# print yahooApi
# c=urllib.urlopen(yahooApi) #打开URL读取,URL传递字符串
# return json.loads(c.read()) #将json格式解码为字典。
#
# from time import sleep
# def massPlaceFind(fileName): #封装信息,并保存到文件。
# fw = open('places.txt', 'w') #打开一个以tab分隔的文本文件
# for line in open(fileName).readlines():
# line = line.strip()
# lineArr = line.split('\t')
# retDict = geoGrab(lineArr[1], lineArr[2]) #获取第2,3列结果
# if retDict['ResultSet']['Error'] == 0: #输出字典看有没有错误
# lat = float(retDict['ResultSet']['Results'][0]['latitude']) #若无错误,则读取经纬度。
# lng = float(retDict['ResultSet']['Results'][0]['longitude'])
# print "%s\t%f\t%f" % (lineArr[0], lat, lng)
# fw.write('%s\t%f\t%f\n' % (line, lat, lng)) #添加到原来对应的行,同时写到新文件中。
# else: print "error fetching"
# sleep(1) #休眠,防止频繁-调用。
# fw.close()
# #import kMeans_my
# #geoResults = kMeans_my.geoGrab('1 VA Center','Augusta,ME')
# #geoResults
#
#
# #####对地理坐标进行聚类
# ###球面距离计算
# def distSLC(vecA, vecB): #球面余弦定理
# a = sin(vecA[0,1]*pi/180) * sin(vecB[0,1]*pi/180) #pi/180转换为弧度 ,pi ,numpy
# b = cos(vecA[0,1]*pi/180) * cos(vecB[0,1]*pi/180) * \
# cos(pi * (vecB[0,0]-vecA[0,0]) /180)
# return arccos(a + b)*6371.0
#
# ####簇绘图函数
# import matplotlib
# import matplotlib.pyplot as plt
# def clusterClubs(numClust=5): #希望得到簇数目为5
# datList = [] # 初始化,创建新列表
# #####将文本文件的解析、聚类及画图都封装在一起
# ##文件解析
# for line in open('places.txt').readlines(): #对于读取文件的行,for循环。
# lineArr = line.split('\t')
# datList.append([float(lineArr[4]), float(lineArr[3])]) #获取文本第5,4列;这两列分别对应着经度和纬度。并添加到创建的新列表datList中。
# dataMat = mat(datList) #将基于经纬度创建的列表datList矩阵化。
# ##聚类
# myCentroids, clustAssing = biKmeans(dataMat, numClust, distMeas=distSLC) #调用Kmeans函数,并使用球面余弦定理计算距离,返回myCentroids, clustAssing。
# fig = plt.figure() #可视化簇和簇质心。
# ####为了画出这幅图,首先创建一幅画,一个矩形
# rect = [0.1, 0.1, 0.8, 0.8] #创建矩形。
# scatterMarkers = ['s', 'o', '^', '8', 'p', \
# 'd', 'v', 'h', '>', '<'] #使用唯一标记来标识每个簇。
# axprops = dict(xticks=[], yticks=[])
# ax0 = fig.add_axes(rect, label='ax0', **axprops) #绘制一幅图,图0
# imgP = plt.imread('Portland.png') #调用 imread 函数,基于一幅图像,来创建矩阵。
# ax0.imshow(imgP) #调用imshow ,绘制(基于图像创建)矩阵的图。
# ax1 = fig.add_axes(rect, label='ax1', frameon=False) #绘制衣服新图,图1。 作用:使用两套坐标系统(不做任何偏移或缩放)。
# ###遍历每个簇,把它画出来。
# for i in range(numClust): # 簇号循环。
# ptsInCurrCluster = dataMat[nonzero(clustAssing[:, 0].A == i)[0], :] #挑选出该簇所有点。
# markerStyle = scatterMarkers[i % len(scatterMarkers)] #从前面创建的标记列表中获得标记。使用索引i % len(scatterMarkers)选择标记形状。 作用:更多的图可以使用这些标记形状。
# ax1.scatter(ptsInCurrCluster[:, 0].flatten().A[0], ptsInCurrCluster[:, 1].flatten().A[0], marker=markerStyle,s=90)
# #每个簇的所有点ptsInCurrCluster,根据标记画出图形。
# ax1.scatter(myCentroids[:, 0].flatten().A[0], myCentroids[:, 1].flatten().A[0], marker='+', s=300) #使用 + 标记来表示簇中心,并在图中显示。
# plt.show()
#
| [
"matplotlib"
] |
2580468492e0c0287d73f8e243121a17831e7eb8 | Python | mattbellis/matts-work-environment | /PyROOT/CoGeNT/summarize_log_files_plot_by_energy_slice.py | UTF-8 | 3,694 | 2.515625 | 3 | [] | no_license | #!/usr/bin/env python
import sys
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats.stats as stats
from math import *
################################################################################
# Parse the file name
################################################################################
################################################################################
# main
################################################################################
def main():
par_names = ['nflat', 'nexp', 'exp_slope', 'flat_mod_amp', 'exp_mod_amp', 'cg_mod_amp', 'flat_mod_phase', 'exp_mod_phase', 'cg_mod_phase']
par_names_for_table = ['$N_{flat}$', '$N_{exp}$', '$\\alpha$', '$A_{flat}$', '$A_{exp}$', '$A_{cg}$', '$\phi_{flat}$', '$\phi_{exp}$', '$\phi_{cg}$']
info_flags = ['e_lo', 'exponential_modulation', 'flat_modulation', 'cosmogenic_modulation', 'add_gc', 'gc_flag']
values = []
nlls = []
file_info = []
for i,file_name in enumerate(sys.argv):
#print file_name
#print len(nlls)
if i>0:
values.append({})
file_info.append({})
infile = open(file_name)
for line in infile:
if 'none' in line:
vals = line.split()
name = vals[0]
#par_names.index(name)
values[i-1][name] = [float(vals[2]),float(vals[4])]
#nlls.append(float(vals[3]))
#print line
elif 'likelihood:' in line:
vals = line.split()
values[i-1]['nll'] = float(vals[3])
elif 'INFO:' in line:
vals = line.split()
#print vals
file_info[i-1][vals[1]] = float(vals[2])
values[i-1][vals[1]] = float(vals[2])
#print "NLLS"
#print nlls
#print file_info
for f in file_info:
print f
x = []
y = []
xerr = []
yerr = []
#print values
for v in values:
#print v
if v['flat_modulation']==0:
elo = v['e_lo']
ehi = v['e_hi']
nll0 = v['nll']
for vv in values:
#print vv
if vv['flat_modulation']==1.0 and elo == vv['e_lo'] and ehi == vv['e_hi']:
nll1 = vv['nll']
width = ehi-elo
width_cutoff = 0.8
if width>width_cutoff-0.05 and width<width_cutoff+0.05:
Dpct = 100.0*stats.chisqprob(2*abs(nll0-nll1),2)
x.append((ehi+elo)/2.0)
xerr.append((ehi-elo)/2.0)
y.append(Dpct)
yerr.append(0.001)
print "%4.1f %4.1f %f %f %f" % (elo,ehi,nll0,nll1,Dpct)
############################################################################
# Plot the data
############################################################################
fig1 = plt.figure(figsize=(12, 8), dpi=90, facecolor='w', edgecolor='k')
subplots = []
for i in range(1,2):
division = 110 + i
subplots.append(fig1.add_subplot(division))
plot = plt.errorbar(x, y, xerr=xerr, yerr=yerr, fmt='o')
subplots[0].set_xlim(0,3.5)
subplots[0].set_ylim(0,100.0)
#exit()
plt.show()
################################################################################
################################################################################
if __name__ == "__main__":
main()
| [
"matplotlib"
] |
52ea30b76e11f87527f3df0f5dc8d7642e2713bf | Python | kongxiangcong/crocodile | /crocodile/bins.py | UTF-8 | 20,863 | 2.671875 | 3 | [
"Apache-2.0"
] | permissive |
import datetime
from itertools import product, combinations
import numpy
import random
from simanneal import Annealer
import sys
import time
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
class Bin:
def __init__(self, iu0, iu1, iv0, iv1, iw0, iw1, bins,
wplanes=1, uchunks=1, vchunks=1):
"""
Constructs a bin. In the process the bin will be reduced as much
as possible, and visibility and cost statistics will be cached.
"""
self.iu0 = iu0
self.iu1 = iu1
self.iv0 = iv0
self.iv1 = iv1
self.iw0 = iw0
self.iw1 = iw1
self.wplanes = wplanes
self.uchunks = uchunks
self.vchunks = vchunks
# Automatically reduce and determine statistics
if not self._reduce(bins.density):
self.iu1 = self.iu0
self.nvis = 0
self.cost = 0
self.cost_direct = 0
else:
uvw = bins.bin_to_uvw(numpy.arange(self.iw0, self.iw1), coords=2)
wsum = self._calc_nvis(bins.density,(1,2))
wcounts = numpy.transpose([uvw, wsum])
self.nvis = self._calc_nvis(bins.density)
self.cost = self._calc_cost(bins, wcounts)
def __repr__(self):
return "Bin(%d, %d, %d, %d, %d, %d, %d)" % \
(self.iu0, self.iu1, self.iv0, self.iv1, self.iw0, self.iw1, self.wplanes)
def is_zero(self):
""" Is this bin empty? """
return self.nvis == 0 or self.iu0 >= self.iu1 or self.iv0 >= self.iv1 or self.iw0 >= self.iw1
def _reduce(self, density):
"""Reduce bin size as much as possible without reducing the number of
visibilities contained."""
while self.iu0 < self.iu1 and numpy.sum(density[self.iw0:self.iw1, self.iv0:self.iv1, self.iu0]) == 0:
self.iu0 += 1
while self.iu0 < self.iu1 and numpy.sum(density[self.iw0:self.iw1, self.iv0:self.iv1, self.iu1-1]) == 0:
self.iu1 -= 1
while self.iv0 < self.iv1 and numpy.sum(density[self.iw0:self.iw1, self.iv0, self.iu0:self.iu1]) == 0:
self.iv0 += 1
while self.iv0 < self.iv1 and numpy.sum(density[self.iw0:self.iw1, self.iv1-1, self.iu0:self.iu1]) == 0:
self.iv1 -= 1
while self.iw0 < self.iw1 and numpy.sum(density[self.iw0, self.iv0:self.iv1, self.iu0:self.iu1]) == 0:
self.iw0 += 1
while self.iw0 < self.iw1 and numpy.sum(density[self.iw1-1, self.iv0:self.iv1, self.iu0:self.iu1]) == 0:
self.iw1 -= 1
# Empty?
return not (self.iu0 >= self.iu1 or self.iv0 >= self.iv1 or self.iw0 >= self.iw1)
def _calc_nvis(self, density, axis=None):
""" Determine how many visibilities lie in this bin """
return numpy.sum(density[self.iw0:self.iw1, self.iv0:self.iv1, self.iu0:self.iu1], axis=axis)
def _calc_cost(self, bins, wcounts):
"""Calculate cost for gridding this bin. Automatically determines
optimal number of w-planes. """
if self.is_zero():
return 0
wplanes = max(1, self.wplanes)
uchunks = self.uchunks
vchunks = self.vchunks
cost = self._calc_cost_wplane(bins, wcounts, wplanes, uchunks, vchunks)
# Determine number of u/v chunks
cost0 = cost
while uchunks > 1:
ncost = self._calc_cost_wplane(bins, wcounts, wplanes,
uchunks-1, vchunks)
if ncost > cost: break
uchunks -= 1
cost = ncost
if cost == cost0:
while True:
ncost = self._calc_cost_wplane(bins, wcounts, wplanes,
uchunks+1, vchunks)
if ncost > cost: break
uchunks += 1
cost = ncost
cost0 = cost
while vchunks > 1:
ncost = self._calc_cost_wplane(bins, wcounts, wplanes,
uchunks, vchunks-1)
if ncost > cost: break
vchunks -= 1
cost = ncost
if cost == cost0:
while True:
ncost = self._calc_cost_wplane(bins, wcounts, wplanes,
uchunks, vchunks+1)
if ncost > cost: break
vchunks += 1
cost = ncost
# Decrease benificial? Always try up to steps of 2
while wplanes > 1:
ncost = self._calc_cost_wplane(bins, wcounts, wplanes-1,
uchunks, vchunks)
if ncost > cost:
if wplanes <= 2: break
ncost = self._calc_cost_wplane(bins, wcounts, wplanes-2,
uchunks, vchunks)
if ncost > cost: break
wplanes -= 1
wplanes -= 1
cost = ncost
# Increase benificial? Test quite some way out if we have a
# lot of w-planes already
if wplanes == max(1, self.wplanes):
success = True
while success:
success = False
for i in range(2+wplanes//10):
ncost = self._calc_cost_wplane(bins, wcounts, wplanes+1+i,
uchunks, vchunks)
if ncost <= cost:
success = True
wplanes += 1+i
cost = ncost
break
# Check whether it is better to not do w-stacking
self.cost_direct = self._calc_cost_direct(bins, wcounts)
if self.cost_direct < cost:
self.wplanes = 0
self.uchunks = 1
self.vchunks = 1
return self.cost_direct
else:
self.wplanes = wplanes
self.uchunks = uchunks
self.vchunks = vchunks
return cost
def _calc_cost_direct(self, bins, wcounts):
"""Calculate cost for gridding this bin directly (without w-plane
trickery)"""
if self.is_zero():
return 0
# Determine grid coordinates
u0, v0, w0 = bins.bin_to_uvw(numpy.array([self.iu0, self.iv0, self.iw0]))
u1, v1, w1 = bins.bin_to_uvw(numpy.array([self.iu1, self.iv1, self.iw1]))
#print("w: %.f - %.f" % (w0, w1))
# Determine w-kernel size for w-planes and for grid transfer
args = bins.args
ws, nvis = numpy.transpose(wcounts)
# w = max(numpy.abs(w0), numpy.abs(w1))
u_w = numpy.sqrt( (numpy.abs(ws) * args.theta/2)**2 +
(numpy.sqrt(numpy.abs(ws))**3 * args.theta / 2 / numpy.pi / args.epsw) )
# Be rather pessimistic about required w-size
nw = 1 + 2 * numpy.ceil(u_w / args.ustep)
#print("u_w = %f (%d px)" % (u_w, numpy.ceil(u_w / args.wstep)))
# Kernel pixel sizes - we need to account for the w-kernel
# (dependent on w) and the A-kernel (assumed constant)
#print("nwkernel = %d" % numpy.sqrt(numpy.ceil(u_w / args.ustep)**2))
nkernel2 = numpy.ceil(numpy.sqrt(nw**2 + args.asize**2))**2
#print("direct:")
#print(numpy.transpose([ws, nvis, nw, 8 * nkernel2 * nvis]))
#print("nkernel = %d" % numpy.sqrt(nkernel2))
# Determine cost
c_Grid = numpy.sum(8 * nvis * nkernel2)
#print("c_Grid = %.1f kflop" % (c_Grid / 1000))
return c_Grid
def _calc_cost_wplane(self, bins, wcounts, wplanes, uchunks, vchunks):
"""Calculate cost for gridding this bin using a given number of
w-planes and u/v chunks"""
if self.is_zero():
return 0
#print("wplanes = %d:" % wplanes)
# Determine grid coordinates
u0, v0, w0 = bins.bin_to_uvw(numpy.array([self.iu0, self.iv0, self.iw0]))
u1, v1, w1 = bins.bin_to_uvw(numpy.array([self.iu1, self.iv1, self.iw1]))
# Bin coordinate
#print("iu: %.f - %.f" % (self.iu0, self.iu1), end=' ')
#print("iv: %.f - %.f" % (self.iv0, self.iv1), end=' ')
#print("iw: %.f - %.f" % (self.iw0, self.iw1))
# Real coordinates
#print("u: %.f - %.f" % (u0, u1), end=' ')
#print("v: %.f - %.f" % (v0, v1), end=' ')
#print("w: %.f - %.f" % (w0, w1))
# Grid cell coordinates
args = bins.args
#print("cu: %.f - %.f" % (u0/args.ustep, u1/args.ustep), end=' ')
#print("cv: %.f - %.f" % (v0/args.ustep, v1/args.ustep), end=' ')
#print("cw: %.f - %.f" % (w0/args.wstep, w1/args.wstep))
# Determine w-kernel size for w-planes and for grid transfer
args = bins.args
ws, nvis = numpy.transpose(wcounts)
d_iw = self.iw1 - self.iw0
wp = numpy.arange(0, wplanes * d_iw, wplanes)
diwp = (wp - d_iw/2 - numpy.floor(wp/d_iw)*d_iw) / wplanes
dwp = diwp / d_iw * (w1 - w0)
# Determine (grid) size of kernel to bring w-plane to w=0 plane
w = max(numpy.abs(w0), numpy.abs(w1))
u_w = numpy.sqrt( (w * args.theta/2)**2 +
(numpy.sqrt(w)**3 * args.theta / 2 / numpy.pi / args.epsw) )
nw = 1 + 2 * numpy.ceil(u_w / args.ustep)
#print("w = %.1f, u_w = %.1f, nw = %d" % (w, u_w, nw))
# Determine size of kernel to w-project visibilities to their w-plane
d_w = numpy.abs(w1 - w0) / 2 / wplanes
u_d_w = numpy.sqrt( (numpy.abs(dwp) * args.theta/2)**2 +
(numpy.sqrt(numpy.abs(dwp))**3 * args.theta / 2 / numpy.pi / args.epsw) )
ndw = 1 + 2 * numpy.ceil(u_d_w / args.ustep)
#print("u_w = %f (%d px)" % (u_w, numpy.ceil(u_w / args.ustep)))
#print("u_d_w = %f (%d px)" % (u_d_w, numpy.ceil(u_d_w / args.ustep)))
# Kernel pixel sizes - we need to account for the w-kernel
# (dependent on w) and the A-kernel (assumed constant)
nkernel2 = ndw**2 + args.asize**2
nsb_kernel = numpy.sqrt(nw**2 + args.asize**2)
nsubgrid2 = numpy.ceil((u1-u0) / args.ustep / uchunks + nsb_kernel)* \
numpy.ceil((v1-v0) / args.ustep / vchunks + nsb_kernel)
# (not quite the same as with IDG!)
#print("nwkernel = %d" % numpy.sqrt(numpy.ceil(u_d_w / args.ustep)**2))
#print("nkernel = %d" % numpy.sqrt(nkernel2))
#print("nsubgrid = %d" % numpy.sqrt(nsubgrid2))
#print(numpy.transpose([dwp, nvis, ndw, 8 * nvis * nkernel2]))
# Determine cost
c_Grid = numpy.sum(8 * nvis * nkernel2)
c_FFT = uchunks * vchunks * \
5 * numpy.ceil(numpy.log(nsubgrid2)/numpy.log(2))*nsubgrid2 * (wplanes+1)
c_Add = uchunks * vchunks * \
8 * nsubgrid2 * wplanes
#print("nvis = %d" % self.nvis)
# print("wplanes=%d, uchunks=%d, vchunks=%d" % (wplanes, uchunks, vchunks))
# print("nw=%d" % (nw))
# print("c_Grid = %.1f kflop" % (c_Grid / 1000))
# print("c_FFT = %.1f kflop" % (c_FFT / 1000))
# print("c_Add = %.1f kflop" % (c_Add / 1000))
return c_Grid + c_FFT + c_Add
def split_u(self, at_u, bins):
""" Split the bin along a u-plane. Returns two new bins. """
assert(at_u >= self.iu0 and at_u < self.iu1)
bin1 = Bin(at_u, self.iu1, self.iv0, self.iv1, self.iw0, self.iw1, bins, self.wplanes)
bin2 = Bin(self.iu0, at_u, self.iv0, self.iv1, self.iw0, self.iw1, bins, self.wplanes)
return bin1, bin2
def split_v(self, at_v, bins):
""" Split the bin along a v-plane. Returns two new bins. """
assert(at_v >= self.iv0 and at_v < self.iv1)
bin1 = Bin(self.iu0, self.iu1, at_v, self.iv1, self.iw0, self.iw1, bins, self.wplanes)
bin2 = Bin(self.iu0, self.iu1, self.iv0, at_v, self.iw0, self.iw1, bins, self.wplanes)
return bin1, bin2
def split_w(self, at_w, bins):
""" Split the bin along a w-plane. Returns two new bins. """
assert(at_w >= self.iw0 and at_w < self.iw1)
bin1 = Bin(self.iu0, self.iu1, self.iv0, self.iv1, at_w, self.iw1, bins, self.wplanes)
bin2 = Bin(self.iu0, self.iu1, self.iv0, self.iv1, self.iw0, at_w, bins, self.wplanes)
return bin1, bin2
def merge(self, other, bins):
"""
Merge two bins. Note that the new bin might contain more
visibilities than both original bins combined.
"""
return Bin(min(self.iu0, other.iu0),
max(self.iu1, other.iu1),
min(self.iv0, other.iv0),
max(self.iv1, other.iv1),
min(self.iw0, other.iw0),
max(self.iw1, other.iw1),
bins,
self.wplanes)
def overlaps(self, other):
""" Checks whether two bins overlap """
return \
max(self.iu0, other.iu0) < min(self.iu1, other.iu1) and \
max(self.iv0, other.iv0) < min(self.iv1, other.iv1) and \
max(self.iw0, other.iw0) < min(self.iw1, other.iw1)
def distance(self, other):
du = max(0, max(self.iu0, other.iu0) - min(self.iu1, other.iu1))
dv = max(0, max(self.iv0, other.iv0) - min(self.iv1, other.iv1))
dw = max(0, max(self.iw0, other.iw0) - min(self.iw1, other.iw1))
return du*du + dv*dv + dw*dw
class BinSet(Annealer):
def __init__(self, bin_to_uvw, args, density,
initial_state=None,
name = datetime.datetime.now().strftime("%Y-%m-%dT%Hh%Mm%Ss"),
add_cost=0,
pop_method='random', merge_prop=4,
max_merge_distance=100, max_bins=100,
progress_image=None,
**kwargs):
"""Initialises a bin set for optimisation
:param bin_to_uvw: Coordinate conversation rule
:param args: Gridding parameters
:param density: Array with grid visibility density
:param pop_method: How bins are selected for modification
('random' or 'biased')
:param merge_prop: Probability modifier for merges. Higher
values means that the optimisation prefers less bins.
:param progress_image: Write a visualisation of the progress
to this file name for every update (slow!)
"""
self.bin_to_uvw = bin_to_uvw
self.args = args
self.density = density
self.name = name
# Parameters
self.add_cost = add_cost
self.pop_method = pop_method
self.merge_prop = merge_prop
self.max_merge_distance = max_merge_distance
self.max_bins = max_bins
self.progress_image = progress_image
self.extra_energy_per_bin = 0.001
# Make bins
if initial_state is not None:
initial_state = [Bin(*coords, bins=self) for coords in initial_state]
initial_state = list(filter(lambda b: b.nvis > 0, initial_state))
super(BinSet, self).__init__(initial_state=initial_state, **kwargs)
self.copy_strategy = 'method'
# Find initial statistics
self.cost0 = sum(b.cost for b in self.state)
self.nvis0 = sum(b.nvis for b in self.state)
@property
def bins(self):
return self.state
def pop_bin(self):
""" Remove a random bin from the bin set. """
if self.pop_method == 'random':
# Select entirely randomly - performs okay
return self.state.pop(int(random.uniform(0, len(self.state))))
elif self.pop_method == 'biased':
# Select in a biased fashion. Likely not worth the extra cost
totals = numpy.array([b.nvis for b in self.state])
totals = numpy.log(totals)
totals = numpy.cumsum(totals)
r = random.uniform(0, totals[-1])
i = numpy.searchsorted(totals, r, side='right')
return self.state.pop(i)
assert False, "unknown pop method %s" % self.pop_method
def push_bin(self,b):
""" Add a new bin to our bin set. Skips if empty """
if not b.is_zero():
assert b.nvis > 0
self.state.append(b)
def move(self):
""" Make a random move """
op = random.randint(0,6)
e = self.energy()
if op == 0:
b = self.pop_bin()
b0, b1 = b.split_u(random.randint(b.iu0, b.iu1-1), self)
assert b0.nvis + b1.nvis == b.nvis
self.push_bin(b0)
self.push_bin(b1)
elif op == 1:
b = self.pop_bin()
b0, b1 = b.split_v(random.randint(b.iv0, b.iv1-1), self)
assert b0.nvis + b1.nvis == b.nvis
self.push_bin(b0)
self.push_bin(b1)
elif op == 2:
b = self.pop_bin()
b0, b1 = b.split_w(random.randint(b.iw0, b.iw1-1), self)
assert b0.nvis + b1.nvis == b.nvis
self.push_bin(b0)
self.push_bin(b1)
elif op >= 3 and len(self.state) >= 2:
b0 = self.pop_bin()
# Compile list of merge candidates
candidates = list(filter(lambda b: b0.distance(b) < self.max_merge_distance, self.state))
random.shuffle(candidates)
success = False
for b1 in candidates:
self.state.remove(b1)
b_n = b0.merge(b1, self)
# Visibility sum works out? Then we can proceed
nvis = b0.nvis + b1.nvis
if b_n.nvis == nvis:
self.push_bin(b_n)
success = True
break
# Put bins back
self.push_bin(b1)
success = False
if not success:
self.push_bin(b0)
def energy(self):
return (self.add_cost + sum(b.cost for b in self.state)
) / self.nvis0 + self.extra_energy_per_bin * len(self.state)
def update(self, step, T, E, acceptance, improvement):
# Check consistency
assert sum(b.nvis for b in self.state) == self.nvis0
if acceptance is not None:
# Copied
def time_string(seconds):
"""Returns time in seconds as a string formatted HHHH:MM:SS."""
s = int(round(seconds)) # round to nearest second
h, s = divmod(s, 3600) # get hours and remainder
m, s = divmod(s, 60) # split remainder into minutes and seconds
return '%i:%02i:%02i' % (h, m, s)
# Compose message
elapsed = time.time() - self.start
remain = (self.steps - step) * (elapsed / step)
title = ('T=%.5f E=%.2f Bins=%.d Acc=%.2f%% Imp=%.2f%% Time=%s/%s' %
(T, E-self.extra_energy_per_bin * len(self.state),
len(self.state), 100.0 * acceptance, 100.0 * improvement,
time_string(elapsed), time_string(elapsed+remain)))
print('\r'+title, file=sys.stderr, end='\r')
if self.progress_image is not None:
self.visualize(title, save=self.progress_image)
def visualize(self, title='', save=None):
# Make figure
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_title(title)
ax.set_xlabel('u')
ax.set_ylabel('v')
ax.set_zlabel('w')
ax.view_init(elev=90., azim=-0)
# Find non-zero coordinates, convert to uvw
iw,iv,iu = self.density.nonzero()
u,v,w = numpy.transpose(self.bin_to_uvw(numpy.transpose([iu,iv,iw])))
ax.scatter(u, v, w, c= 'red',s=self.density[iw,iv,iu]/self.nvis0*10000,lw=0)
# Helper for drawing a box
def draw_box(x0, x1, y0, y1, z0, z1, *args):
r = [0, 1]
for s, e in combinations(numpy.array(list(product(r, r, r))), 2):
if numpy.sum(numpy.abs(s-e)) == 1:
xyz = numpy.array([s, e])
xyz *= numpy.array([x1-x0, y1-y0, z1-z0])
xyz += numpy.array([x0, y0, z0])
uvw = numpy.transpose(self.bin_to_uvw(xyz))
ax.plot3D(*uvw, color="b", lw=0.5)
for b in self.state:
if b.wplanes > 0:
draw_box(b.iu0, b.iu1, b.iv0, b.iv1, b.iw0, b.iw1)
fig.tight_layout()
if save is not None:
plt.savefig(save, dpi=300)
else:
plt.show()
plt.close()
def save_state(self, fname=None, *args, **kwargs):
if fname is None:
true_energy = self.energy() - self.extra_energy_per_bin * len(self.state)
fname = self.name + "_E%.1f.state" % true_energy
super(BinSet, self).save_state(fname, *args, **kwargs)
| [
"matplotlib"
] |
2d8d82400a19edcd0505aa945a1be0761bdb5f8f | Python | mmichaelzhang/CSMATH | /hw01_test.py | UTF-8 | 545 | 2.96875 | 3 | [] | no_license | import numpy as np
import numpy.polynomial as poly
import matplotlib.pyplot as plt
import hw01_points_estimation
from hw01_points_estimation import *
x=np.linspace(0, 1, 10)
m=len(x)
y=np.sin(2*np.pi*x)
#add noise
sigma=0.05
mu=0.0
x=x+sigma*np.random.randn(m)+mu
y=y+sigma*np.random.randn(m)+mu
#parameters
w=PointsEstimate(x, y, 4, 0)
rp=poly.Polynomial(np.array(w).reshape(-1))
#plot
plt.figure(0);
rp_x=np.linspace(0, 1, 100)
rp_y=rp(rp_x)
plt.plot(rp_x, rp_y, 'b', x, y, 'or', rp_x, sin(2*np.pi*rp_x), 'g')
plt.show() | [
"matplotlib"
] |
19a5385f4dbbd0b0dc5a1f7782c16929d33fce3f | Python | arturogzm93/EDA-NBA | /main.py | UTF-8 | 10,651 | 2.71875 | 3 | [] | no_license |
# IMPORTAR LIBRERIAS
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
%matplotlib inline
# CARGAR LAS BASES DE DATOS
stats = pd.read_csv('../../BBDD/Seasons_Stats.csv')
players = pd.read_csv('../../BBDD/Players.csv')
# SE JUNTAN AMBAS BASES DE DATOS A TRAVES DE LA COLUMNA JUGADOR
nba = pd.merge(stats, players, on = 'Player')
# SE MUESTRAN LAS 5 PRIMERAS FILAS DEL DATAFRAME
nba.head()
# VISTA GENERAL DEL DATAFRAME
nba.info()
# COMPROBAR LA CANTIDAD DE VALORES NULOS O NAN
nba.isnull().sum()
# SE COMPRUEBA EL TOTAL DE COLUMNAS CON SUS NOMBRES
nba.columns
# BORRAR COLUMNAS INNECESARIAS
nba.drop(columns = ['Unnamed: 0_x', 'Unnamed: 0_y', 'blanl', 'blank2', 'Age', 'Tm', 'GS', 'MP', 'PER', 'TS%', '3PAr', 'FTr',
'ORB%', 'DRB%', 'TRB%', 'AST%', 'STL%','BLK%', 'TOV%', 'USG%', 'OWS', 'DWS', 'WS', 'WS/48', 'OBPM', 'DBPM', 'BPM', 'VORP',
'FG', 'FG%', '3P', '3P%', '2P', 'eFG%', 'FT', 'FTA', 'FT%', 'ORB', 'DRB', 'BLK', 'PF', 'birth_city', 'birth_state',
'collage', 'born'], inplace = True)
# ASIGNAR NUEVO ORDEN A LAS COLUMNAS
nba = nba[['Year', 'Player', 'Pos', 'height', 'weight', 'G', 'FGA', '3PA', '2PA', 'TRB', 'AST', 'STL', 'TOV', 'PTS']]
# COMPROBAR LAS DIFERENTES POSICIONES EXISTENTES
nba['Pos'].unique()
# SE ESTABLECEN Y SE AGRUPAN TODAS LAS POSICIONES EXISTENTES EN 3 POSICIONES NUEVAS
nba.loc[nba['Pos'] == 'SG', 'Pos'] = 'F'
nba.loc[nba['Pos'] == 'PG', 'Pos'] = 'G'
nba.loc[nba['Pos'] == 'PF', 'Pos'] = 'C'
nba.loc[nba['Pos'] == 'SF', 'Pos'] = 'F'
nba.loc[nba['Pos'] == 'SF-SG', 'Pos'] = 'F'
nba.loc[nba['Pos'] == 'PF-C', 'Pos'] = 'C'
nba.loc[nba['Pos'] == 'SG-PG', 'Pos'] = 'G'
nba.loc[nba['Pos'] == 'SG-PF', 'Pos'] = 'F'
nba.loc[nba['Pos'] == 'C-SF', 'Pos'] = 'C'
nba.loc[nba['Pos'] == 'SG-SF', 'Pos'] = 'F'
nba.loc[nba['Pos'] == 'C-PF', 'Pos'] = 'C'
nba.loc[nba['Pos'] == 'PG-SG', 'Pos'] = 'G'
nba.loc[nba['Pos'] == 'SF-PF', 'Pos'] = 'F'
nba.loc[nba['Pos'] == 'PF-SF', 'Pos'] = 'F'
nba.loc[nba['Pos'] == 'PG-SF', 'Pos'] = 'F'
# RESETEAR LOS VALORES DEL INDICE
nba.reset_index(drop = True, inplace = True)
# CAMBIAR LOS TIPOS DE LAS COLUMNAS
nba = nba.astype({'Year': 'int64',
'FGA': 'int64',
'3PA': 'int64'})
# CREAR NUEVA COLUMNA LLAMADA 'DECADE'
nba['Decade'] = nba['Year']
# CREAR Y EDITAR VALORES EN LA COLUMNA DECADE EN BASE A LOS AÑOS
nba.loc[nba['Decade'] < 1990, 'Decade'] = 1980
nba.loc[(nba['Decade'] > 1989) & (nba['Decade'] < 2000), 'Decade'] = 1990
nba.loc[(nba['Decade'] > 1999) & (nba['Decade'] < 2010), 'Decade'] = 2000
nba.loc[nba['Decade'] > 2009, 'Decade'] = 2010
# ORDENAR COLUMNAS
nba = nba[['Decade', 'Year', 'Player', 'Pos', 'height', 'weight', 'FGA', '3PA', '2PA', 'PTS', 'TRB', 'AST', 'STL', 'TOV']]
# INFORMACION GENERAL DEL DATAFRAME
nba.info()
# ESTADISTICOS DEL DATAFRAME
round(nba.describe(), 2)
# CORRELACIONES ENTRE VARIABLES
plt.figure(figsize=(20,10))
sns.heatmap(nba.corr(),
vmin = -1,
vmax = 1,
cmap=sns.color_palette("coolwarm", as_cmap=True),
square = True,
linewidths = 0.5,);
# SUMATORIOS TOTALES DEL DATAFRAME POR DECADA Y POSICION
round(nba.groupby(['Decade', 'Pos']).sum(), 2)
# SUMATORIOS TOTALES DEL DATAFRAME POR AÑO
round(nba.groupby(['Year']).sum(), 2)
# MEDIAS DE ALTURA Y PESO POR POSICION Y DECADA
plt.figure(figsize=(20,5))
# ALTURA
plt.subplot(1, 2, 1)
sns.barplot(data= nba, x="Decade", y="height", hue='Pos', hue_order=('G', 'F', 'C'))
plt.title('MEDIA DE ALTURAS POR POSICION')
plt.legend().remove()
plt.yticks(np.arange(0, 221, 10))
# PESO
plt.subplot(1, 2, 2)
sns.barplot(data= nba, x="Decade", y="weight", hue='Pos', hue_order=('G', 'F', 'C'))
plt.title('MEDIA DE PESOS POR POSICION')
plt.yticks(np.arange(0, 121, 10));
# CANTIDAD DE TIROS TOTALES
plt.figure(figsize=(20,5))
sns.lineplot(data = nba,
x = 'Year',
y = 'FGA',
linewidth = 3,
estimator= sum,
ci= None,
color='#003FFB')
plt.axvline(1980, color='#000000', linestyle='--')
plt.axvline(1990, color='#000000', linestyle='--')
plt.axvline(2000, color='#000000', linestyle='--')
plt.axvline(2010, color='#000000', linestyle='--')
plt.xticks(np.arange(1980, 2018, 1))
plt.xticks(rotation=-30)
plt.title('TOTAL DE TIROS POR AÑO');
# COMPARATIVA DE 2PA Y 3PA POR AÑO
plt.figure(figsize=(20, 5))
# TOTAL DE 2PA POR AÑO
plt.subplot(1, 2, 1)
sns.lineplot(data = nba,
x = 'Year',
y = '2PA',
linewidth = 3,
estimator= sum,
ci= None,
color='#FF7400')
plt.axvline(1980, color='#000000', linestyle='--')
plt.axvline(1990, color='#000000', linestyle='--')
plt.axvline(2000, color='#000000', linestyle='--')
plt.axvline(2010, color='#000000', linestyle='--')
plt.xticks(np.arange(1980, 2018, 2))
plt.xticks(rotation=-30)
plt.title('TOTAL DE 2PA POR AÑO');
# TOTAL DE 3PA POR AÑO
plt.subplot(1, 2, 2)
sns.lineplot(data = nba,
x = 'Year',
y = '3PA',
linewidth = 3,
estimator= sum,
ci= None,
color='#9500FB')
plt.axvline(1980, color='#000000', linestyle='--')
plt.axvline(1990, color='#000000', linestyle='--')
plt.axvline(2000, color='#000000', linestyle='--')
plt.axvline(2010, color='#000000', linestyle='--')
plt.xticks(np.arange(1980, 2018, 2))
plt.xticks(rotation=-30)
plt.title('TOTAL DE 3PA POR AÑO');
# CANTIDAD DE 2PA Y 3PA POR POSICION Y AÑO
plt.figure(figsize=(20,5))
# FIGURA 2PA
plt.subplot(1, 2, 1)
sns.lineplot(data = nba,
x = 'Year',
y = '2PA',
hue = 'Pos',
hue_order=('G', 'F', 'C'),
linewidth = 3,
estimator= sum,
ci= None);
plt.axvline(1980, color='#000000', linestyle='--')
plt.axvline(1990, color='#000000', linestyle='--')
plt.axvline(2000, color='#000000', linestyle='--')
plt.axvline(2010, color='#000000', linestyle='--')
plt.xticks(np.arange(1980, 2019, 2))
plt.xticks(rotation=-30)
plt.title('TOTAL DE TIROS DE 2 POR POSICION Y AÑO');
# FIGURA 3PA
plt.subplot(1, 2, 2)
sns.lineplot(data = nba,
x = 'Year',
y = '3PA',
hue = 'Pos',
hue_order=('G', 'F', 'C'),
linewidth = 3,
estimator= sum,
ci= None);
plt.axvline(1980, color='#000000', linestyle='--')
plt.axvline(1990, color='#000000', linestyle='--')
plt.axvline(2000, color='#000000', linestyle='--')
plt.axvline(2010, color='#000000', linestyle='--')
plt.xticks(np.arange(1980, 2019, 2))
plt.xticks(rotation=-30)
plt.title('TOTAL DE TRIPLES POR POSICION Y AÑO');
# ROBOS Y PERDIDAS POR AÑO
plt.figure(figsize=(20,5))
# ROBOS
plt.subplot(1, 2, 1)
sns.barplot(x = 'Year',
y = 'STL',
palette = 'viridis',
data = nba,
ci = None,
estimator=sum)
plt.xticks(rotation=-55)
plt.title('TOTAL ROBOS POR AÑO');
# PERDIDAS
plt.subplot(1, 2, 2)
sns.barplot(x = 'Year',
y = 'TOV',
palette = 'rocket_r',
data = nba,
ci = None,
estimator=sum)
plt.xticks(rotation=-55)
plt.title('TOTAL PERDIDAS POR AÑO');
# CREAR DATAFRAME DE PIVOTS
center = nba[nba['Pos'] == 'C']
# EVOLUCION PIVOT 1
plt.figure(figsize=(20,5));
# TRIPLES
plt.subplot(1, 2, 1)
sns.barplot(x = '3PA',
y = 'Decade',
palette = 'ch:start=.2,rot=-.3',
data = center,
ci = None,
estimator=sum,
orient='h')
plt.title('TRIPLES TIRADOS POR DECADA');
# PUNTOS
plt.subplot(1, 2, 2)
sns.lineplot(data = center,
x = 'Year',
y = 'PTS',
linewidth = 3,
estimator= sum,
ci= None,
color='#0AC577')
plt.axvline(1980, color='#000000', linestyle='--')
plt.axvline(1990, color='#000000', linestyle='--')
plt.axvline(2000, color='#000000', linestyle='--')
plt.axvline(2010, color='#000000', linestyle='--')
plt.xticks(np.arange(1980, 2019, 2))
plt.xticks(rotation=-30)
plt.title('TOTAL PUNTOS POR AÑO');
# EVOLUCION PIVOT 2
plt.figure(figsize=(20,5));
# REBOTES
plt.subplot(1, 2, 1)
sns.barplot(x = 'Year',
y = 'TRB',
palette = 'viridis',
data = center,
ci = None,
estimator=sum)
plt.xticks(rotation=-55)
plt.title('TOTAL DE REBOTES POR AÑO');
# ASISTENCIAS
plt.subplot(1, 2, 2)
sns.lineplot(data = center,
x = 'Year',
y = 'AST',
linewidth = 3,
estimator= sum,
ci= None,
color='#DE6B00')
plt.axvline(1980, color='#000000', linestyle='--')
plt.axvline(1990, color='#000000', linestyle='--')
plt.axvline(2000, color='#000000', linestyle='--')
plt.axvline(2010, color='#000000', linestyle='--')
plt.xticks(np.arange(1980, 2019, 2))
plt.xticks(rotation=-55)
plt.title('TOTAL ASISTENCIAS POR AÑO');
# CREAR DATAFRAME DE BASES
guard = nba[nba['Pos'] == 'G']
# EVOLUCION BASE 1
plt.figure(figsize=(20,5));
# TIROS
plt.subplot(1, 2, 1)
sns.barplot(data = guard,
x = 'Year',
y = 'FGA',
linewidth = 3,
estimator= sum,
ci= None,
palette='crest')
plt.xticks(rotation=-55)
plt.title('TIROS TOTALES POR AÑO');
# PUNTOS
plt.subplot(1, 2, 2)
sns.lineplot(data = guard,
x = 'Year',
y = 'PTS',
linewidth = 3,
estimator= sum,
ci= None,
color='#FB0000')
plt.axvline(1980, color='#000000', linestyle='--')
plt.axvline(1990, color='#000000', linestyle='--')
plt.axvline(2000, color='#000000', linestyle='--')
plt.axvline(2010, color='#000000', linestyle='--')
plt.xticks(np.arange(1980, 2019, 2))
plt.xticks(rotation=-30)
plt.title('TOTAL PUNTOS POR AÑO');
# EVOLUCION BASE 2
plt.figure(figsize=(20,5))
# ASISTENCIAS
plt.subplot(1, 2, 1)
sns.lineplot(data = guard,
x = 'Year',
y = 'AST',
linewidth = 3,
estimator= sum,
ci= None,
color='#003FFB')
plt.axvline(1980, color='#000000', linestyle='--')
plt.axvline(1990, color='#000000', linestyle='--')
plt.axvline(2000, color='#000000', linestyle='--')
plt.axvline(2010, color='#000000', linestyle='--')
plt.xticks(np.arange(1980, 2019, 2))
plt.xticks(rotation=-30)
plt.title('TOTAL ASISTENCIAS POR AÑO');
# REBOTES
plt.subplot(1, 2, 2)
sns.barplot(x = 'Year',
y = 'TRB',
palette = 'dark:salmon_r',
data = guard,
ci = None,
estimator=sum)
plt.xticks(rotation=-55)
plt.title('TOTAL DE REBOTES POR AÑO');
| [
"matplotlib",
"seaborn"
] |
4d2546cd06ee18b1d5b2f64749c34710af424944 | Python | liuyuzhou/ai_pre_sourcecode | /chapter6/legend_exp_5.py | UTF-8 | 417 | 3.609375 | 4 | [] | no_license | import matplotlib.pyplot as plt
line1, = plt.plot([1, 2, 3], label="Line 1", linestyle='--')
line2, = plt.plot([3, 2, 1], label="Line 2", linewidth=4)
# 为第一个线条创建图例
first_legend = plt.legend(handles=[line1], loc=1)
# 手动将图例添加到当前轴域
ax = plt.gca().add_artist(first_legend)
# 为第二个线条创建另一个图例
plt.legend(handles=[line2], loc=4)
plt.show()
| [
"matplotlib"
] |
18f16c46a592d4bc2c84b60407ae79f847b53788 | Python | 4dv3ntur3/bit_seoul | /keras/keras49_cp_2_fashion.py | UTF-8 | 5,032 | 2.8125 | 3 | [] | no_license | #2020-11-18 (8일차)
#fashon_mnist -> CNN: checkpoints / model.fit() 이후 model.save() / model.save_weights()
#흑백
from tensorflow.keras.datasets import fashion_mnist
#이미지 분류-> OneHotEncoding
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import Dense, Conv2D, LSTM
from tensorflow.keras.layers import Flatten, MaxPooling2D #maxpooling2d는 들어가도 되고 안 들어가도 됨 필수 아님
import matplotlib.pyplot as plt
import numpy as np
(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()
# print(x_train[0])
# print("y_train[0]: ", y_train[0])
#데이터 구조 확인
print(x_train.shape, x_test.shape) #(60000, 28, 28) (10000, 28, 28)
print(y_train.shape, y_test.shape) #(60000,) (10000,)
#1. 데이터 전처리
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
# print(y_test)
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2], 1).astype('float32')/255.
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], 1).astype('float32')/255.
# x_predict = x_train[:10]
# y_answer = y_train[:10]
#2. 모델
model = Sequential()
model.add(Conv2D(200, (3, 3), padding='same', input_shape=(x_train.shape[1], x_train.shape[2], 1))) #padding 주의!
model.add(Conv2D(180, (2, 2), padding='valid'))
model.add(Conv2D(100, (3, 3), strides=2)) #padding default=valid
model.add(Conv2D(50, (2, 2)))
model.add(Conv2D(30, (2, 2)))
model.add(Conv2D(10, (3, 3)))
model.add(MaxPooling2D(pool_size=2)) #pool_size default=2
model.add(Flatten())
model.add(Dense(10, activation='relu')) #flatten하면서 곱하고 dense에서 또 100 곱함
#Conv2d의 activation default='relu'
#LSTM의 activation default='tanh'
# model.add(Dense(50, activation='relu'))
model.add(Dense(10, activation='softmax')) #label: 0~9 (항상 dataset label 확인)
#3. 컴파일, 훈련
# modelpath = './model/{epoch:02d}-{val_loss:.4f}.hdf5'
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
early_stopping = EarlyStopping(monitor='loss', patience=10, mode='auto')
# cp = ModelCheckpoint(filepath=modelpath,
# monitor='val_loss',
# save_best_only=True,
# mode='auto'
# )
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
hist = model.fit(x_train, y_train, epochs=100, batch_size=32, verbose=1,
validation_split=0.2, callbacks=[early_stopping])
#모델+가중치
model.save('./save/fashion_cnn_model_weights.h5')
model.save_weights('./save/fashion_cnn_weights.h5')
#4. 평가, 예측
result = model.evaluate(x_test, y_test, batch_size=32)
print("=======fashion_cnn=======")
model.summary()
print("loss: ", result[0])
print("acc: ", result[1])
# #정답
# y_answer = np.argmax(y_answer, axis=1)
# #예측값
# y_predict = model.predict(x_test)
# y_predict = np.argmax(y_predict, axis=1)
# print("예측값: ", y_predict)
# print("정답: ", y_test)
'''
'''
'''
=======fashion_cnn=======
loss: 0.43879228830337524
acc: 0.850600004196167
예측값: [9 0 0 3 1 2 7 4 5 5]
정답: [9 0 0 3 0 2 7 2 5 5]
=======fashion_cnn=======
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 28, 28, 200) 2000
_________________________________________________________________
conv2d_1 (Conv2D) (None, 27, 27, 180) 144180
_________________________________________________________________
conv2d_2 (Conv2D) (None, 13, 13, 100) 162100
_________________________________________________________________
conv2d_3 (Conv2D) (None, 12, 12, 50) 20050
_________________________________________________________________
conv2d_4 (Conv2D) (None, 11, 11, 30) 6030
_________________________________________________________________
conv2d_5 (Conv2D) (None, 9, 9, 10) 2710
_________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 4, 4, 10) 0
_________________________________________________________________
flatten (Flatten) (None, 160) 0
_________________________________________________________________
dense (Dense) (None, 10) 1610
_________________________________________________________________
dense_1 (Dense) (None, 10) 110
=================================================================
Total params: 338,790
Trainable params: 338,790
Non-trainable params: 0
_________________________________________________________________
loss: 0.3941965699195862
acc: 0.8639000058174133
''' | [
"matplotlib"
] |
553abed7cafa3eb6ad0ce531dc6288306f027c2c | Python | SiddhantSadangi/cv-tutorial | /utils/contrast.py | UTF-8 | 593 | 3.078125 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
def stretch(image, mode = 'linear'):
temp = image.copy()
if(len(temp.shape) == 3):
for i in range(image.shape[2]):
minI = np.min(image[:,:,i])
maxI = np.max(image[:,:,i])
minO = 0
maxO = 255
temp[:,:,i] = (image[:,:,i]-minI)*(((maxO-minO)/(maxI-minI))+minO)
else:
minI = np.min(image)
maxI = np.max(image)
minO = 0
maxO = 255
temp = (image-minI)*(((maxO-minO)/(maxI-minI))+minO)
return temp
| [
"matplotlib"
] |
57c7c6171e6019c8028be6525d814e023ce7b836 | Python | tsando/hotel-price-disparity | /undercut.py | UTF-8 | 10,625 | 2.765625 | 3 | [] | no_license | import sys
import pandas as pd
import numpy as np
np.random.seed(0)
import pickle
import time
from sklearn.utils import resample
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
import seaborn as sns
_ = sns.set(style="whitegrid", rc={"figure.figsize": (12, 6),
# "legend.fontsize": "large",
"axes.titlesize": "large",
"xtick.labelsize": "large",
"ytick.labelsize": "large",
})
##############################################################
# HELPER FUNCTIONS
##############################################################
def print_runtime(start_time):
end_time = time.time()
run_time = end_time - start_time
print('Run time: {}h:{}m:{}s'.format(int(run_time / 3600), int(run_time / 60) % 60, int(run_time % 60)))
def save_pickle(object, path_and_name):
# Saves pickle
with open(path_and_name, 'wb') as fp:
pickle.dump(object, fp)
pass
def open_pickle(path_and_name):
# Opens pickle - note required 'rb' inside open given above 'wb'
with open(path_and_name, 'rb') as fp:
object = pickle.load(fp)
return object
def prepare_and_save_pickle(size=None, name='df.p'):
print('Preparing pickle ... took:')
start_time = time.time()
# Read data
df = pd.read_csv('data/ds_test_000000000000')
# Slim data to make it faster
if size:
df = df.iloc[:size]
# Convert datetime col to datetime obj (takes long! :()
df['received_time'] = pd.to_datetime(df['received_time'])
save_pickle(df, 'data/' + name)
print_runtime(start_time)
return df
def get_continent_dict():
df = pd.read_csv('data/country_continent.csv', keep_default_na=False)
continent_dict = df.set_index('iso 3166 country').to_dict()['continent code']
return continent_dict
def map_country(x):
try:
return continent_dict[x]
except KeyError:
return 'other'
def map_currency(x):
# Major currencies
if x in ['USD', 'EUR', 'JPY', 'GBP', 'CAD', 'AUD']:
return x
else:
return 'other'
def remove_price_outliers(df, column_name):
# Using IQR * 1.5 methodology
column = df[column_name]
Q1 = column.quantile(0.25)
Q3 = column.quantile(0.75)
IQR = Q3 - Q1
# Exclude cases when prices are 0
filtered = df.query('0 < ' + column_name + ' <= (@Q3 + 1.5 * @IQR)')
# filtered = filtered[filtered[column_name]>0.0]
mask = df.isin(filtered)[column_name]
return ~mask
def run_simple(X, y, model):
print('Running model... took:')
start_time = time.time()
# split the data with 80% in train
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0, train_size=0.8)
# fit the model on one set of data
model.fit(X_train, y_train)
# evaluate the model on the second set of data
y_test_model = model.predict(X_test)
print(accuracy_score(y_test, y_test_model))
print_runtime(start_time)
# print('X_train={}, y_train={}'.format(np.sum(X_train.index.tolist()), np.sum(y_train)))
# print('y_test={}, y_test_model={}'.format(np.sum(y_test), np.sum(y_test_model)))
pass
##############################################################
# MAIN
##############################################################
# df = prepare_and_save_pickle()
start_time = time.time()
df = open_pickle('data/df.p')
print('Before anything: ', df.shape)
# #################################
# DATA CLEANING
# #################################
# Note: using full dataset for this for now, but in practice should only do on train)
# We have missing data, so drop for simplicity drop them now (seems to be in ota_price and user_country)
# NOTE: With more time one could try to impute these
df = df.dropna()
# Get all currencies to upper case so they are not duplicated as cat
df['currency'] = df['currency'].apply(lambda x: x.upper())
# Lots of outliers in the prices on a per currency basis, so will apply a remove procedure based on IQR
grouped = df[['currency', 'direct_price', 'ota_price']].groupby('currency')
for name, group in grouped:
outlier = (remove_price_outliers(group, 'direct_price')) | (remove_price_outliers(group, 'ota_price'))
df.loc[group.index, 'price_outlier'] = np.where(outlier == 1, 1, 0)
# print(name)
df = df[df['price_outlier'] != 1]
# Remove this column as we no longer need it
df = df.drop('price_outlier', axis=1)
print('After cleaning: ', df.shape)
# #################################
# DOWNSAMPLING
# #################################
# This is required as we have serious class imbalance for the undercut class
# Create currency agnostic measure of disparity which can be used for binary target
# (already handled cases of div by zero earlier)
df['price_ratio'] = df['ota_price'] / df['direct_price']
# Remove outliers in price_ratio
outlier = remove_price_outliers(df, 'price_ratio')
df = df[~outlier]
# This will be the target in our classifier
df['undercut'] = np.where(df['price_ratio'] < 1, 1, 0)
# Separate majority and minority classes
df_majority = df[df['undercut'] == 0]
df_minority = df[df['undercut'] == 1]
# Downsample majority class
df_majority_downsampled = resample(df_majority,
replace=False, # sample without replacement
n_samples=df_minority.shape[0], # to match minority class
random_state=0) # reproducible results
# Combine minority class with downsampled majority class
df = pd.concat([df_majority_downsampled, df_minority])
# Reset index since we removed lots of rows
df = df.reset_index(drop=True)
# Display new class counts
print('After downsampling: ', df.shape)
print('New class balance:', df['undercut'].value_counts())
# #################################
# PREPROC & FEATURE ENG
# #################################
# Cast relevant cols as category type
cats = ['client', 'hotel', 'currency', 'ota', 'user_country']
for cat in cats:
df[cat] = df[cat].astype('category')
# Rename long client and hotel cats for just numbers
for col in ['client', 'hotel']:
n_col = np.unique(df[col])
df[col] = df[col].cat.rename_categories(np.arange(1, n_col.shape[0] + 1))
# ------- PRICE -------
# Rescale the direct price (Note again we are doing this on the whole set instead of train, which is cheating)
min_max_scaler = preprocessing.MinMaxScaler()
grouped = df[['currency', 'direct_price']].groupby('currency')
for name, group in grouped:
# print(name)
df.loc[group.index, 'direct_price_scaled'] = min_max_scaler.fit_transform(group[['direct_price']])
# ------- TIME -------
# Re-index to received_time so can groupby time and create time features quickly from this
# Note: this is faster than applying lambda func over all rows to get hour, weekday, month and year
temp = df[['received_time']].copy()
temp.index = temp['received_time']
# Cyclic time - get sine and cosine
df['h'] = temp.index.hour
df['w'] = temp.index.weekday
df['m'] = temp.index.month
# Hours numbered 0-23
df['h_sin'] = np.sin(df['h'] * (2. * np.pi / 24))
df['h_cos'] = np.cos(df['h'] * (2. * np.pi / 24))
# Weeks numbered 0-6
df['w_sin'] = np.sin(df['w'] * (2. * np.pi / 7))
df['w_cos'] = np.cos(df['w'] * (2. * np.pi / 7))
# Months numbered 1-12, hence we subtract 1
df['m_sin'] = np.sin((df['m'] - 1) * (2. * np.pi / 12))
df['m_cos'] = np.cos((df['m'] - 1) * (2. * np.pi / 12))
# Non-cyclic time
# Years
df['yr'] = temp.index.year
# Rebase years
df['yr_r'] = np.max(df['yr']) - df['yr']
# ------- AGGREGATIONS -------
# Note: this should be done on train set only
# so technically we are cheating a bit; assumption is dist doesn't change much
# Is the hotel part of a chain?
hotels_per_client = df[['client', 'hotel']].groupby('client').nunique().sort_values('hotel', ascending=False)
hotels_per_client = hotels_per_client.rename(columns={'hotel': 'n_hotels'})
hotels_per_client = hotels_per_client[['n_hotels']]
df = df.join(hotels_per_client, on='client')
df['chain'] = np.where(df['n_hotels'] > 1, 1, 0)
# nunique number of adults per hotel equates to number of rooms requested
rooms_per_hotel = df[['hotel', 'adults']].groupby('hotel').nunique().sort_values('adults', ascending=False)
rooms_per_hotel = rooms_per_hotel.rename(columns={'adults': 'n_rooms'})
rooms_per_hotel = rooms_per_hotel[['n_rooms']]
df = df.join(rooms_per_hotel, on='hotel')
searches_by_hotel = df[['hotel', 'received_time']].groupby('hotel').nunique().sort_values('received_time',
ascending=False)
searches_by_hotel = searches_by_hotel.rename(columns={'received_time': 'n_searches'})
searches_by_hotel = searches_by_hotel[['n_searches']]
df = df.join(searches_by_hotel, on='hotel')
# ------- CATEGORICAL -------
# Reduce dimensionality of cat features
continent_dict = open_pickle('data/continent_dict.p')
df['user_continent'] = df['user_country'].apply(lambda x: map_country(x))
df['currency_v2'] = df['currency'].apply(lambda x: map_currency(x))
df['major_currency'] = np.where(df['currency_v2'] != 'other', 1, 0)
# #################################
# DEFINE X and y
# #################################
features_dict = {'orig': {
'cat': {'enc': ['ota'], 'bin': []},
'num': ['adults', 'children', 'direct_price_scaled']
},
'eng': {
'cat': {'enc': ['user_continent', 'currency_v2'], 'bin': ['chain', 'major_currency']},
'num': ['h_sin', 'h_cos', 'w_sin', 'w_cos', 'm_sin', 'm_cos', 'yr_r',
'n_hotels', 'n_rooms', 'n_searches']
}
}
f_num = features_dict['orig']['num'] + features_dict['eng']['num']
X = df[f_num].copy()
f_bin = features_dict['orig']['cat']['bin'] + features_dict['eng']['cat']['bin']
X[f_bin] = df[f_bin]
print(X.shape)
f_enc = features_dict['orig']['cat']['enc'] + features_dict['eng']['cat']['enc']
for col in f_enc:
# print(col)
prefix = col[:3] if 'continent' not in col else 'cont'
X = X.join(pd.get_dummies(df[col], prefix=prefix))
# Binary target
y = df['undercut']
print(X.shape, y.shape)
# #################################
# MODEL
# #################################
model = LogisticRegression(random_state=0)
run_simple(X, y, model)
| [
"matplotlib",
"seaborn"
] |
2cfce7941cf9a33735d8dbd2fcc54b4f0efeb448 | Python | andres2631996/Artificial-Neural-Networks-Deep-Architectures-DD2437- | /Lab2_Part2.py | UTF-8 | 13,636 | 3 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Sep 14 17:38:55 2019
@author: Usuario
"""
# Code for Part 2 of Lab 2 in ANNDA
# Import necessary packages
import time
start=time.time()
import numpy as np
import matplotlib.pyplot as plt
import operator
# Function for weight updates in SOM algorithm
def neighborhood(pattern,weights,winner,epoch_num,total_epochs,eta,initial_neighbors,circular=False):
# Get the weight updates for a decreasing neighborhood with the amount of epochs
increment=np.zeros(weights.shape)
#points=np.linspace(0,weights.shape[0]-1,weights.shape[0]).astype(int)
if weights.shape[0]>10:
inferior=winner-(initial_neighbors-((initial_neighbors-1)/total_epochs)*epoch_num)
superior=winner+(initial_neighbors-((initial_neighbors-1)/total_epochs)*epoch_num)
else:
if epoch_num<int(total_epochs/2):
inferior=winner-initial_neighbors
superior=winner+initial_neighbors
elif epoch_num>int(total_epochs/3) and epoch_num<int(2*total_epochs/3):
inferior=winner-int(initial_neighbors/2)
superior=winner+int(initial_neighbors/2)
else:
inferior=winner
superior=winner
if circular==False:
if inferior<0:
inferior=0
elif superior>weights.shape[0]-1:
superior=weights.shape[0]-1
points=np.linspace(inferior,superior-1,superior-inferior).astype(int)
increment[points,:]=eta*(-weights[points,:]+pattern) # All increments
if circular==True:
if superior>=weights.shape[0]:
points2=np.linspace(inferior,weights.shape[0]-1,weights.shape[0]-inferior).astype(int)
points1=np.linspace(0,superior-weights.shape[0],superior-weights.shape[0]+1).astype(int)
points=np.concatenate((points1,points2),axis=0)
#points=np.linspace(inferior,superior,superior-inferior+1).astype(int)
increment[points,:]=eta*(-weights[points,:]+pattern) # All increments
else:
if superior!=inferior:
points=np.linspace(inferior,superior,superior-inferior+1).astype(int)
increment[points,:]=eta*(-weights[points,:]+pattern) # All increments
else:
increment[winner,:]=eta*(-weights[winner,:]+pattern) # All increments
return increment
# SOM function
def SOM_2D(patterns,weights,total_epochs,eta,initial_neighbors,circular=False,replacement=False):
# Patterns is a 2D array where each row represents a separate pattern
# Weights is a 2D array where each row represents the weight vector of one node
# Total_epochs is the total number of epochs to apply
# eta is the learning rate
# Get the order of the nodes
order=[] # List of winner nodes at last iteration
for j in range(total_epochs):
for i in range(patterns.shape[0]): # Iterate through all input patterns
distance=weights-patterns[i,:] # Matrix with all the Euclidean distances
distance_vector=np.linalg.norm(distance,axis=1) # Vector with the scalar distances
winner=np.argmin(distance_vector) # Index of closest node to input pattern
weights+=neighborhood(patterns[i,:],weights,winner,j,total_epochs,eta,initial_neighbors,circular)
#if j==total_epochs-2 and i==patterns.shape[0]-1:
#print(weights,patterns)
if j==total_epochs-1:
if replacement==True:
order.append(winner)
weights[winner,:]=np.ones(2)
else:
order.append(winner)
return order
def neighborhood3D(pattern,weights,winner_node,epoch_num,total_epochs,eta,initial_neighbors,circular=False):
# Get the weight updates for a decreasing neighborhood with the amount of epochs
increment=np.zeros(weights.shape)
#points=np.linspace(0,weights.shape[0]-1,weights.shape[0]).astype(int)
winner_node=np.array(winner_node)
if epoch_num<int(total_epochs/4):
[inf_h,inf_v]=winner_node-np.array([initial_neighbors,initial_neighbors])
[sup_h,sup_v]=winner_node+np.array([initial_neighbors,initial_neighbors])
elif epoch_num>int(total_epochs/4) and epoch_num<int(total_epochs/2):
[inf_h,inf_v]=winner_node-np.array([int(initial_neighbors)/2,int(initial_neighbors/2)])
[sup_h,sup_v]=winner_node+np.array([int(initial_neighbors)/2,int(initial_neighbors/2)])
elif epoch_num>int(total_epochs/2) and epoch_num<int(3*total_epochs/4):
[inf_h,inf_v]=winner_node-np.array([int(initial_neighbors)/3,int(initial_neighbors/3)])
[sup_h,sup_v]=winner_node+np.array([int(initial_neighbors)/3,int(initial_neighbors/3)])
else:
[inf_h,inf_v]=winner_node
[sup_h,sup_v]=winner_node
if circular==False:
if inf_h<0:
inf_h=0
if inf_v<0:
inf_v=0
if sup_h>=weights.shape[0]:
sup_h=weights.shape[0]-1
if sup_v>=weights.shape[1]:
sup_v=weights.shape[1]-1
#hor_points=np.linspace(inf_h,sup_h,sup_h-inf_h+1).astype(int) # Horizontal neighbor nodes
#ver_points=np.linspace(inf_v,sup_v,sup_v-inf_v+1).astype(int) # Vertical neighbor nodes
increment[int(inf_h):int(sup_h),int(inf_v):int(sup_v),:]=eta*(-weights[int(inf_h):int(sup_h),int(inf_v):int(sup_v),:]+pattern) # All increments
#gauss=np.exp(-(points-winner)**2/((total_epochs-epoch_num+1)*1.2)) # Gaussian weights
#distrib=np.dot(increment.T,gauss) # Gaussian distribution for weight update
return increment
def SOM_3D(patterns,weights,total_epochs,eta,initial_neighbors,circular=False,replacement=False):
# Patterns is a 2D array where each row represents a separate pattern
# Weights is a 3D array where each plane represents one attribute
# Total_epochs is the total number of epochs to apply
# eta is the learning rate
# Get the order of the nodes
order=np.zeros((patterns.shape[0],2)) # List of winner nodes at last iteration
for j in range(total_epochs):
for i in range(patterns.shape[0]): # Iterate through all input patterns
distance=weights-patterns[i,:] # Matrix with all the Euclidean distances
distance_vector=np.linalg.norm(distance,axis=2) # Vector with the scalar distances
winner=np.argmin(distance_vector) # Index of closest node to input pattern
winner_node=[winner//weights.shape[0],np.remainder(winner,weights.shape[1])]
weights+=neighborhood3D(patterns[i,:],weights,winner_node,j,total_epochs,eta,initial_neighbors,circular)
#if j==total_epochs-2 and i==patterns.shape[0]-1:
#print(weights,patterns)
if j==total_epochs-1:
order[i,:]=winner_node
return order
#%%
# 4.1 Topological ordering of animal species
# Import animal data
names=open('animalnames.txt').read().split() # List with animal names
num_animals=len(names) # Number of animals
attributes=open('animalattributes.txt').read().split() # List with attribute names
num_attributes=len(attributes) # Number of attributes
raw_data=open('animals.dat').read() # Raw patterns: string with 0, 1, comas and \n
# Get a vector with just 0s and 1s
patterns=[]
for i in range(len(raw_data)):
if raw_data[i]=='0':
patterns.append(0)
elif raw_data[i]=='1':
patterns.append(1)
patterns=np.array(patterns) # Transform list of patterns into array
patterns=np.reshape(patterns,(num_animals,num_attributes)) # Reshape pattern array into 2D matrix
# SOM hyperparameters
eta=0.2 # Learning rate
epochs=20 # Epochs
num_nodes=100 # Number of nodes for the map
# Initial weight matrix
weights=np.random.rand(num_nodes,num_attributes)
# Call the SOM function for the specified parameters
order=SOM_2D(patterns,weights,epochs,eta,initial_neighbors=50,circular=False) # Winning nodes for each case at the last iteration
# Organize names and scores in a dictionary
relation={names[0]:order[0]}
for i in range(1,len(names)):
relation.update({names[i]:order[i]})
# Sort the dictionary
sorted_relation = sorted(relation.items(), key=operator.itemgetter(1))
print(sorted_relation)
#%%
# 4.2 City ordering
# Obtain the information for the cities
raw_input=open('cities.dat').read().split() # Raw patterns: list of strings and floats
# Keep only those lit elements that can be converted into floats
patterns=[]
# Remove comas from list entries and save floats in a new list
for i in range(len(raw_input)):
if raw_input[i].find(',')!=-1:
#raw_input[i][raw_input[i].find(',')]=""
try:
patterns.append(float(raw_input[i][0:-2]))
except ValueError:
patterns.append(-1)
if raw_input[i].find(';')!=-1:
#raw_input[i][raw_input[i].find(',')]=""
try:
patterns.append(float(raw_input[i][0:-2]))
except ValueError:
patterns.append(-1)
# Remove -1 entries from list
patterns=np.array(patterns) # Convert to array
keep=np.where(patterns!=-1) # Keep non -1 indexes
patterns=patterns[keep]
# Reshape patterns into 10x2 matrix
patterns=np.reshape(patterns,(10,2))
plt.figure()
plt.scatter(patterns[:,0],patterns[:,1])
plt.title('Original cities')
# Node weights
weights=np.random.rand(patterns.shape[0],patterns.shape[1])
# Hyperparameters
epochs=1000
eta=1
city_order=SOM_2D(patterns,weights,epochs,eta,initial_neighbors=2,circular=True,replacement=True)
print(city_order)
# Plotting resulting path
plt.figure()
plt.scatter(patterns[:,0],patterns[:,1])
for i in range(len(city_order)-1):
plt.plot(np.linspace(patterns[city_order[i],0],patterns[city_order[i+1],0],100),np.linspace(patterns[city_order[i],1],patterns[city_order[i+1],1],100),'r')
plt.plot(np.linspace(patterns[city_order[-1],0],patterns[city_order[0],0],100),np.linspace(patterns[city_order[-1],1],patterns[city_order[0],1],100),'r')
plt.title('Shortest path between cities')
#%%
# 4.3 Votes of MPs
# Import necessary files
# File with MP names
with open('mpnames.txt') as f:
mp_names = f.readlines()
num_mps=len(mp_names)
for i in range(num_mps):
mp_names[i]=mp_names[i][0:-2]
# File with MP parties
with open('mpparty.dat') as f:
raw_party = f.readlines()
# File with MP sex
with open('mpsex.dat') as f:
raw_sex = f.readlines()
# File with MP district
with open('mpdistrict.dat') as f:
raw_district = f.readlines()
mp_sex=[]
mp_party=[]
mp_district=[]
mp_party.append(1)
for i in range(len(raw_party)):
if i<len(raw_sex):
if raw_sex[i]=='\t 0\n':
mp_sex.append(0)
elif raw_sex[i]=='\t 1\n':
mp_sex.append(1)
if i<len(raw_district):
mp_district.append(int(raw_district[i][-3:-1]))
if raw_party[i]=='\t 0\n':
mp_party.append(0)
elif raw_party[i]=='\t 1\n':
mp_party.append(1)
elif raw_party[i]=='\t 2\n':
mp_party.append(2)
elif raw_party[i]=='\t 3\n':
mp_party.append(3)
elif raw_party[i]=='\t 4\n':
mp_party.append(4)
elif raw_party[i]=='\t 5\n':
mp_party.append(5)
elif raw_party[i]=='\t 6\n':
mp_party.append(6)
elif raw_party[i]=='\t 7\n':
mp_party.append(7)
# Import patterns of votes
num_votes=31 # Number of votes per MP
raw_data=open('votes.dat').read() # Raw patterns: list of strings and floats
patterns=[]
for i in range(len(raw_data)):
if raw_data[i]=='0':
patterns.append(0)
elif raw_data[i]=='0.5':
patterns.append(0.5)
elif raw_data[i]=='1':
patterns.append(1)
patterns=np.array(patterns)
patterns=np.reshape(patterns,(num_mps,num_votes))
# Define weight matrix and hyperparameters
weights=np.random.rand(10,10,num_votes)
epochs=1000
eta=1
# Call the SOM algorithm
vote_order=SOM_3D(patterns,weights,epochs,eta,initial_neighbors=2,circular=False,replacement=False)
# Plots for different parties in the 10x10 grid
plt.figure()
plt.xlim(0,10)
plt.ylim(0,10)
for j in range(8):
for i in range(vote_order.shape[0]):
if mp_party[i] == j:
plt.plot(vote_order[i,0], vote_order[i,1], color='#000099', marker='o')
if j!=7:
plt.figure()
plt.xlim(0,10)
plt.ylim(0,10)
# Plots for the districts
plt.figure()
plt.xlim(0,10)
plt.ylim(0,10)
for j in range(30):
for i in range(vote_order.shape[0]):
if mp_district[i] == j:
plt.plot(vote_order[i,0], vote_order[i,1], color='r', marker='o')
if j!=29:
plt.figure()
plt.xlim(0,10)
plt.ylim(0,10)
# Plots for the sex
plt.figure()
plt.xlim(0,10)
plt.ylim(0,10)
for j in range(2):
for i in range(vote_order.shape[0]):
if mp_sex[i] == j:
plt.plot(vote_order[i,0], vote_order[i,1], color='g', marker='o')
if j!=1:
plt.figure()
plt.xlim(0,10)
plt.ylim(0,10)
end=time.time()
print(end-start) | [
"matplotlib"
] |
51800afba0a04f859acb85abe03965a270353a1e | Python | janiosl/python.ds | /ml/T1/code/visualizar_J_surface.py | UTF-8 | 795 | 2.96875 | 3 | [] | no_license | import os
import numpy as np
import matplotlib.pyplot as plt
#Usado para plotar 3d em fig.gca
from mpl_toolkits.mplot3d import Axes3D
def plot(J):
# Valores de theta0 e theta1 informados no enunciado do trabalho
theta0 = np.arange(-10, 10, 0.01)
theta1 = np.arange(-1, 4, 0.01)
# Comandos necessarios para o matplotlib plotar em 3D
fig = plt.figure()
ax = fig.gca(projection='3d')
# Plotando o grafico de superficie
theta0, theta1 = np.meshgrid(theta0, theta1)
surf = ax.plot_surface(theta0, theta1, J)
plt.xlabel('theta_0')
plt.ylabel('theta_1')
plt.show()
filename = 'target/plot1.3.2.png'
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
plt.savefig(filename)
return surf
| [
"matplotlib"
] |
5e60efd5b30cd685f63b21ece62f5e137460a641 | Python | DatLQ95/tue_project | /server_model.py | UTF-8 | 3,194 | 2.625 | 3 | [] | no_license | import random
import simpy
import numpy as np
import matplotlib.pyplot as plt
SEED = 42
average_processing_time = 25
response_times =[]
queue_lengths = []
waiting_times = []
concurrency = 100
num_cores = 4
class Client():
# initial with parameters:
def __init__(self, env, out_pipe, in_pipe, index):
self.env = env
self.out_pipe = out_pipe
self.in_pipe = in_pipe
self.index = index
self.request = dict()
self.action = [env.process(self.run(in_pipe=self.in_pipe[i], out_pipe = self.out_pipe[i], index= i)) for i in range(4)]
#prepare the request to send
def prepare_request(self):
processing_time = random.expovariate(1/average_processing_time)
arrival_time = self.env.now
self.request = {1: processing_time, 2: self.index, 3: arrival_time}
# send the request:
def send_request(self, out_pipe):
out_pipe.put(self.request)
pass
def analyse_response(self, response, index):
response_time = self.env.now - response[3]
response_times.append(response_time)
print("response = " + str(response_time))
print("index = " + str(index))
pass
def run(self, in_pipe, out_pipe, index):
while True:
self.prepare_request()
self.send_request(out_pipe)
#wait for the request to comeback:
response = yield in_pipe.get(filter=lambda x: True if x[2] == self.index else False)
self.analyse_response(response, index)
class Server():
def __init__(self, env, in_pipe, out_pipe, index):
self.env = env
self.out_pipe = out_pipe
self.in_pipe = in_pipe
self.processing_time = 0
self.request = dict()
self.response = dict()
self.index = index
self.action = [env.process(self.run(in_pipe=self.in_pipe[i], out_pipe = self.out_pipe[i])) for i in range(len(in_pipe))]
#prepare the request to send
def process_request(self):
self.processing_time = self.request[1]
arrival_time = self.request[3]
waiting_time = self.env.now - arrival_time
waiting_times.append(waiting_time)
# queue_length = len(self.in_pipe.items)
# queue_lengths.append(queue_length)
# send the request:
# def prepare_response(self):
# yield self.env.timeout(self.processing_time)
def send_response(self, out_pipe):
self.response = self.request
out_pipe.put(self.response)
def run(self, in_pipe, out_pipe):
while True:
#wait for request:
self.request = yield in_pipe.get()
self.process_request()
yield self.env.timeout(self.processing_time)
self.send_response(out_pipe)
random.seed(SEED)
environment = simpy.Environment()
to_server=[simpy.Store(environment) for i in range(4)]
to_client=[simpy.FilterStore(environment) for i in range(4)]
clients = [Client(env=environment, out_pipe=to_server, in_pipe=to_client, index=i) for i in range(concurrency)]
servers = [Server(env=environment, in_pipe=to_server, out_pipe=to_client, index=i) for i in range(num_cores)]
environment.run(100) | [
"matplotlib"
] |
27031dcf7a7254796d5535e251feb2d3f6f268bf | Python | hasansajedi/StartMachineLerning | /projects/Time Series Analysis and Forecasting/2.py | UTF-8 | 2,536 | 2.8125 | 3 | [] | no_license | # https://datahack.analyticsvidhya.com/contest/practice-problem-time-series-2/
import warnings # `do not disturbe` mode
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
from sklearn.metrics import mean_squared_error
from math import sqrt
RMSE = pd.DataFrame(columns=['method', 'result'])
# Importing data
df_test = pd.read_csv('data/test.csv')
# Subsetting the dataset
# Index 11856 marks the end of year 2013
df = pd.read_csv('data/train.csv')
# Creating train and test set
# Index 10392 marks the end of October 2013
train = df[0:]
# train = df[0:10392]
# test = df[10392:]
# Aggregating the dataset at daily level
df['Timestamp'] = pd.to_datetime(df['Datetime'], format='%d-%m-%Y %H:%M')
df.index = df['Timestamp']
train['Timestamp'] = pd.to_datetime(train['Datetime'], format='%d-%m-%Y %H:%M')
train.index = train['Timestamp']
df_test['Timestamp'] = pd.to_datetime(df_test['Datetime'], format='%d-%m-%Y %H:%M')
df_test.index = df_test['Timestamp']
test = df_test
y_hat_avg = test.copy()
fit1 = sm.tsa.statespace.SARIMAX(train.Count, order=(1, 1, 1),
seasonal_order=(1, 1, 1, 12),
enforce_stationarity=False,
enforce_invertibility=False).fit()
y_hat_avg['Count'] = fit1.predict(start="00:00 26-09-2014", end="00:00 26-04-2015", dynamic=True, full_results=True)
print(y_hat_avg.head())
# RMSE = RMSE.append({"method": 'SARIMA', "result": sqrt(mean_squared_error(test.Count, y_hat_avg.Count))},
# ignore_index=True)
# print("------------------------ RESULT ------------------------")
# print(fit1.summary().tables[1])
# print(y_hat_avg.columns)
y_hat_avg = y_hat_avg.drop(['Timestamp','Datetime'], axis=1)
y_hat_avg.to_csv('result.csv', index=None)
# fit1.plot_diagnostics(figsize=(15, 12))
# plt.show()
# pred = fit1.get_prediction(start=pd.to_datetime('09-2014'), dynamic=False)
# pred_ci = pred.conf_int()
# print(pred.summary)
# pred.predicted_mean.plot(ax=ax, label='One-step ahead Forecast', alpha=.7)
# ax.fill_between(pred_ci.index,
# pred_ci.iloc[:, 0],
# pred_ci.iloc[:, 1], color='k', alpha=.2)
# plt.figure(figsize=(16, 8))
# plt.plot(train['Count'], label='Train')
# plt.plot(test['Count'], label='Test')
# plt.plot(pred.predicted, label='SARIMA')
# plt.legend(loc='best')
# plt.show()
| [
"matplotlib"
] |
a83efdca2557f2b2c4b7924abba3959e7b6d3f7f | Python | nilestate15/EENG699-Project1 | /project1_template.py | UTF-8 | 4,762 | 2.96875 | 3 | [] | no_license | import scipy.io as spio
import numpy as np
import llh2ecef as l2e
import matplotlib.pyplot as plt
import math
# CONSTANTS
A = 6378137.0 # meters
E = 0.0818191908426 # unitless
E2 = 0.00669437999013
# load mat file
mat = spio.loadmat('proj1_flight_trajectory.mat', squeeze_me=True)
trajectory = mat['K']
# Extract variable vectors from struct
t = trajectory['t'].tolist() # [seconds]
lat = trajectory['lat'].tolist() # [radians]
lon = trajectory['lon'].tolist() # [radians]
h = trajectory['h'].tolist() # [meters] (above ellipsoid)
# Stack list vectors into columns of LLH numPy array
LLH = np.vstack((lat, lon, h)).T # LLH in 3 columns
def llh2ecef(llh):
"""
Convert latitude [rad], longitude [rad], and height [m above ellipsoid]
values into Earth Centered Earth Fixed (ECEF) [m] coordinates
:param llh 1x3 numpy array of geodetic coordinates
llh[0] = latitude (radians)
llh[1] = longitude (radians)
llh[2] = height (meters above ellipsoid)
:return ecef 1x3 numpy array of ECEF coordinates [x y z in meters]
"""
# print("llh =", llh)
sin_lat = np.sin(llh[0]) # define for multiple reuse
a_ke = A / np.sqrt(1 - E2 * sin_lat**2)
rho = (a_ke + llh[2]) * np.cos(llh[0])
ecef = np.array([rho * np.cos(llh[1]),
rho * np.sin(llh[1]),
(a_ke * (1 - E2) + llh[2]) * sin_lat])
return ecef
# Geodetic to Local Level
# YOUR CODE HERE
# Setting origin of local-level frame
origin = LLH[0]
origin[2] = 0.
enuG2L = []
# Conversion Process from Geodetic to local-level frame (starts from index 1 due to index 0 being origin)
coords = LLH[1:]
for i in range(len(coords)):
coord = coords[i]
# differences between coordinates
diff_lat = coord[0] - origin[0]
diff_lon = coord[1] - origin[1]
diff_h = coord[2] - origin[2]
# Equivalent Radius
Rm = (A * (1 - E**2)) / ((1 - E**2 * math.sin(coord[0])**2))**(3/2)
Rp = (A) / ((1 - E**2 * math.sin(coord[0])**2))**(1/2)
# Conversion to meters
Pe = (Rp + coord[2]) * math.cos(coord[0]) * diff_lon
Pn = (Rm + coord[2]) * diff_lat
Pu = diff_h
# Putting local-level coordinates in array
PG = np.array([Pe, Pn, Pu])
enuG2L += [PG]
enuG2L = np.array(enuG2L)
# Geodetic to ECEF to Local Level
# YOUR CODE HERE
# Converting Geodetic origin to ECEF origin
ecef_origin = llh2ecef(origin)
enuE2L = []
# DCM matrix
CGE = np.array([[-(math.sin(origin[1])), math.cos(origin[1]), 0.0],
[-(math.sin(origin[0]) * math.cos(origin[1])), -(math.sin(origin[0]) * math.sin(origin[1])), math.cos(origin[0])],
[(math.cos(origin[0]) * math.cos(origin[1])), (math.cos(origin[0]) * math.sin(origin[1])), math.sin(origin[0])]])
# Conversion Process from ECEF to local-level frame (starts from index 1 due to index 0 being origin)
for n in range(len(coords)):
# Converting Geodetic Coords to ECEF
coord_4e = coords[n]
ecef_coord = llh2ecef(coord_4e)
# Finding deltas of X,Y,Z
diff_x = ecef_coord[0] - ecef_origin[0]
diff_y = ecef_coord[1] - ecef_origin[1]
diff_z = ecef_coord[2] - ecef_origin[2]
# Making PE array
PE = np.array([diff_x, diff_y, diff_z]).T
# Solving to find PG for coord
ecef_PG = np.dot(CGE, PE)
# Adding to array
enuE2L += [ecef_PG]
enuE2L = np.array(enuE2L)
# Plotting
# YOUR CODE HERE
# Plot 1 (Horizontal Position)
plot1 = plt.figure(1)
plt.title('Horizontal Position')
plt.plot(enuG2L[:,0], enuG2L[:,1], marker = '.', markersize = 5)
plt.plot(enuE2L[:,0], enuE2L[:,1], marker = '.', markersize = 5)
plt.legend(['enuG2L origin', 'enuE2L origin', 'enuG2L', 'enuE2L'])
plt.xlabel('Easting (m)')
plt.ylabel('Northing (m)')
plt.grid(linestyle = '--', linewidth = 0.5)
# Plot 2 (Altitude vs Time)
t = np.array (range(1, len(enuE2L)+1))
plot2 = plt.figure(2)
plt.title('Altitude vs Time')
plt.plot(t, enuG2L[:,2], marker = '.', markersize = 5)
plt.plot(t, enuE2L[:,2], marker = '.', markersize = 5)
plt.legend(['enuG2L', 'enuE2L'])
plt.xlabel('Time (s)')
plt.ylabel('Altitude (m)')
plt.grid(linestyle = '--', linewidth = 0.5)
# Plot 3 (Delta of enuE2L and enuG2L)
fig1, (ax1, ax2, ax3) = plt.subplots(3)
delta_e = enuE2L[:,0] - enuG2L[:,0]
delta_n = enuE2L[:,1] - enuG2L[:,1]
delta_u = enuE2L[:,2] - enuG2L[:,2]
fig1.suptitle('Deltas of enuE2L and enuEGL vs Time')
ax1.plot(t, delta_e, marker = '.', markersize = 5)
ax1.set_xlabel('Time (s)')
ax1.set_ylabel('Delta Easting (m)')
ax2.plot(t, delta_n, marker = '.', markersize = 5)
ax2.set_xlabel('Time (s)')
ax2.set_ylabel('Delta Northing (m)')
ax3.plot(t, delta_u, marker = '.', markersize = 5)
ax3.set_xlabel('Time (s)')
ax3.set_ylabel('Delta Up (m)')
plt.show() # show all plots
| [
"matplotlib"
] |
f25ff03d4d6243d2fb97a2822584339759815159 | Python | UPTOLIMIT/One-stop-mathematical-modeling-contest | /code/lines/ErrorBandCurve.py | UTF-8 | 1,108 | 2.859375 | 3 | [] | no_license | import numpy as np
from scipy.interpolate import splprep, splev
import matplotlib.pyplot as plt
from matplotlib.path import Path
from matplotlib.patches import PathPatch
N = 400
t = np.linspace(0, 2 * np.pi, N)
r = 0.5 + np.cos(t)
x, y = r * np.cos(t), r * np.sin(t)
# Error amplitudes depending on the curve parameter *t*
# (actual values are arbitrary and only for illustrative purposes):
err = 0.05 * np.sin(2 * t) ** 2 + 0.04 + 0.02 * np.cos(9 * t + 2)
# calculate normals via derivatives of splines
tck, u = splprep([x, y], s=0)
dx, dy = splev(u, tck, der=1)
l = np.hypot(dx, dy)
nx = dy / l
ny = -dx / l
# end points of errors
xp = x + nx * err
yp = y + ny * err
xn = x - nx * err
yn = y - ny * err
vertices = np.block([[xp, xn[::-1]],
[yp, yn[::-1]]]).T
codes = Path.LINETO * np.ones(len(vertices), dtype=Path.code_type)
codes[0] = codes[len(xp)] = Path.MOVETO
path = Path(vertices, codes)
patch = PathPatch(path, facecolor='C0', edgecolor='none', alpha=0.3)
fig, ax = plt.subplots()
ax.plot(x, y)
ax.add_patch(patch)
plt.savefig("./ErrorBandCurve.png",dpi=600)
plt.show() | [
"matplotlib"
] |
0801c914f43ec350b84529a639dbbd4dbc543a69 | Python | fergusbarratt/PhysicsAlgorithms | /MolecularDynamics/md.py | UTF-8 | 3,326 | 2.796875 | 3 | [
"MIT"
] | permissive | import numpy as np
from scipy.integrate import odeint
import matplotlib.pyplot as plt
from numpy.random import rand
import matplotlib.colors as colors
class VectorTools(object):
def _distance(self, vec_1, vec_2):
return np.sum([(x - y)**2 for x, y in zip(vec_1, vec_2)])
def _index_matrix_like(self, A, re_spacing=1):
A = np.asarray(A)
ind_mat = np.zeros_like(A)
for row_ind, col in enumerate(ind_mat):
for col_ind, elem in enumerate(col):
ind_mat[row_ind, col_ind] = np.asarray([row_ind, col_ind])
return ind_mat * re_spacing
def _flip_second(dims):
dims[1] = -dims[1]
return dims
def _flip_first(dims):
dims[0] = -dims[0]
return dims
class Solid(VectorTools):
def __init__(
self,
initial_displacements,
initial_velocities,
masses,
lennard_jones_params,
friction_coefficients=None,
lattice_spacing=0.2):
'''initial_displacements is nxn matrix of tuples, and
initial_velocities are nxn matrices for nxn solids'''
if not friction_coefficients:
self.friction_coefficients = np.zeros_like(initial_displacements)
self.initial_positions = np.add(self._index_matrix_like(
initial_displacements, lattice_spacing), initial_displacements)
self.initial_velocities = np.asarray(initial_velocities)
self.masses = np.asarray(masses)
self.non_bonding_potential = self._non_bonding(lennard_jones_params)
self.position_data = [self.initial_positions[i, j]
for i, j in np.ndindex(
self.initial_positions.shape[:2])]
def _lennard_jones(self, diameter, well_depth):
def potential(r):
return 4 * well_depth * ((diameter / r)**12 - (diameter / r)**6)
return potential
def _non_bonding(self, lennard_jones_params):
def potential(x, y):
r = np.asarray([x, y])
return np.sum([self._lennard_jones(*lennard_jones_params)(
self._distance(r, r1)) for r1 in self.position_data
if (self._distance(r, r1)) != 0])
return potential
# rand is called only once a row
dims = [2, 2]
initial_displacements = rand(*dims, 2)
initial_velocities = [0, 0, 0]
masses = [1, 1, 1]
lennard_jones_params = (1, 1)
flipFirst = VectorTools._flip_first
testSolid = Solid(initial_displacements, initial_velocities,
masses, lennard_jones_params, lattice_spacing=0.1)
xs = [testSolid.initial_positions[i, j][0]
for i, j in np.ndindex(testSolid.initial_positions.shape[:2])]
ys = [testSolid.initial_positions[i, j][1]
for i, j in np.ndindex(testSolid.initial_positions.shape[:2])]
sizes = [1000 for _ in range(len(initial_displacements))]
print([2*dim for dim in flipFirst(dims)])
X, Y = np.meshgrid(
np.linspace(*[2*dim for dim in flipFirst(dims)], 150),
np.linspace(*[2*dim for dim in flipFirst(dims)], 150))
vfunc = np.vectorize(testSolid.non_bonding_potential)
Z = vfunc(X, Y)
fig, ax = plt.subplots()
# plt.scatter(xs, ys, s=sizes)
pcm = ax.contourf(Z, cmap="seismic")
plt.colorbar(pcm)
plt.show()
| [
"matplotlib"
] |
ecc50694d64403b45cff8f637f6f5f7dff32d603 | Python | Dipesh13/level_zero | /model_kmeans.py | UTF-8 | 1,257 | 2.96875 | 3 | [] | no_license | import matplotlib.pyplot as plt
# import seaborn as sns; sns.set()
import numpy as np
# Generate some data
from sklearn.datasets.samples_generator import make_blobs
X, y_true = make_blobs(n_samples=400, centers=4,
cluster_std=0.60, random_state=0)
X = X[:, ::-1] # flip axes for better plotting
# Plot the data with K Means Labels
# from sklearn.cluster import KMeans
# kmeans = KMeans(4, random_state=0)
# labels = kmeans.fit(X).predict(X)
# plt.scatter(X[:, 0], X[:, 1], c=labels, s=40, cmap='viridis');
# plt.show()
from sklearn.cluster import KMeans
from scipy.spatial.distance import cdist
def plot_kmeans(kmeans, X, n_clusters=4, rseed=0, ax=None):
labels = kmeans.fit_predict(X)
# plot the input data
ax = ax or plt.gca()
ax.axis('equal')
ax.scatter(X[:, 0], X[:, 1], c=labels, s=40, cmap='viridis', zorder=2)
# plot the representation of the KMeans model
centers = kmeans.cluster_centers_
radii = [cdist(X[labels == i], [center]).max()
for i, center in enumerate(centers)]
for c, r in zip(centers, radii):
ax.add_patch(plt.Circle(c, r, fc='#CCCCCC', lw=3, alpha=0.5, zorder=1))
plt.show()
kmeans = KMeans(n_clusters=4, random_state=0)
plot_kmeans(kmeans, X) | [
"matplotlib",
"seaborn"
] |
d87ae9f5cf9dc46e502e29ea2094d67621277bd8 | Python | HoaNguyen55/streamlit_football_manager | /main.py | UTF-8 | 23,997 | 2.578125 | 3 | [] | no_license | import base64
import pandas as pd
import numpy as np
import sqlite3
from sqlite3 import Connection
import streamlit as st
from datetime import date, datetime
import os.path
import copy as cp
import re
import plotly.graph_objects as go
import time
def welcome(enabled):
if enabled:
t = st.empty()
text = "WELCOME TO MY FIRST APPLICATION"
for i in range(len(text) + 1):
t.markdown("# %s" % text[0:i])
time.sleep(0.01)
class main:
def __init__(self):
self.menu = ['Trang Chủ', 'Hỏi Đáp', 'Biểu Đồ', 'Trợ Giúp']
self.pos = ["Thủ Môn", "Hậu Vệ", 'Tiền Vệ', 'Tiền Đạo']
self.club = ['Việt Nam', 'Nhật Bản', 'Saudi Arabia', 'Trung Quốc', 'Úc', 'Oman']
self.removeOpt = ('Xóa tất cả', 'Xóa từng dòng')
self.qaOpt = ('Tìm tên cầu thủ', 'Lọc độ tuổi cầu thủ', 'Vị trí và Câu lạc bộ')
self.saveOpt = ('Lưu Biểu Đồ', 'Lưu Dữ Liệu')
if 'flagOpenFile' not in st.session_state:
st.session_state.flagOpenFile = False
if 'flag' not in st.session_state:
st.session_state.flag = False
if 'welcome' not in st.session_state:
st.session_state.welcome = True
self.home()
def home(self):
# Menu choice
with st.form(key='Form1'):
choice = st.sidebar.selectbox("Menu", self.menu)
# Select menu
if choice == self.menu[0]: # Trang chủ
if st.button('Welcome'):
st.session_state.welcome = True
welcome(st.session_state.welcome)
st.title('Trang chủ')
st.image("football-manager-champion.jpg")
buttonOpenFile = st.file_uploader("Tải file dữ liệu lên", type=["db", "csv", "xlsx"])
st.info("After upload file completely, please click x to work around clear the cache")
with st.expander("Bật tắt hiển thị dữ liệu"):
if buttonOpenFile is not None:
st.info('Dữ liệu được thêm hoàn tất')
_, fileExtension = os.path.splitext(str(buttonOpenFile.name))
if fileExtension in ['.xlsx', '.xls']:
df = pd.read_excel(str(buttonOpenFile.name), engine='openpyxl')
st.session_state.flagOpenFile = False
st.session_state.flag = True
elif fileExtension in ['.csv']:
df = pd.read_csv(str(buttonOpenFile.name), encoding='utf-8')
st.session_state.flagOpenFile = False
st.session_state.flag = True
else: # for *.db file
conn = self.get_connection(str(buttonOpenFile.name))
self.init_db(conn)
split_db_name = str(buttonOpenFile.name).split('.')
db_name = split_db_name[0]
df = pd.DataFrame(self.get_data(conn, db_name))
st.session_state.flagOpenFile = True
st.session_state.flag = True
if st.session_state.flag:
st.session_state.ssDf = df
else:
st.warning('File dữ liệu chưa được thêm mới')
# Input Data
with st.expander('Hiển thị nhập dữ liệu'):
nameValue, yearValue, numValue, clubValue, posValue = self.nhapDuLieu()
if nameValue != '' or len(nameValue) != 0:
if st.button('Thêm'):
newYearValue = datetime.strptime(str(yearValue), '%Y-%m-%d').strftime('%d/%m/%Y')
if st.session_state.flagOpenFile:
lst = np.array([nameValue, newYearValue, posValue, clubValue, numValue])
else:
lst = [nameValue, newYearValue, posValue, clubValue, numValue]
self.importTable(lst)
st.success("Thêm dữ liệu cầu thủ <<< {} >>> hoàn tất".format(nameValue))
else:
st.warning('Người dùng cần nhập đầy đủ thông tin')
# Delete data
col = st.columns(2)
boxRemove = col[0].selectbox('Lựa Chọn', options=self.removeOpt)
buttonRemove = col[0].button('Xóa')
with st.expander('Hiển thị chỉnh sửa dữ liệu'):
nameValue, yearValue, numValue, clubValue, posValue = self.nhapDuLieuEdit()
lineNumEdit = st.number_input("Nhập số dòng", min_value=1, format='%d', help='Nhập số dòng cần chỉnh sửa')
newYearVal = datetime.strptime(str(yearValue), '%Y-%m-%d').strftime('%d/%m/%Y')
if st.button('Sửa'):
if st.session_state.flagOpenFile:
lst = np.array([nameValue, newYearVal, posValue, clubValue, numValue])
else:
lst = [nameValue, newYearVal, posValue, clubValue, numValue]
st.session_state.ssDf.iloc[lineNumEdit] = lst
if boxRemove == 'Xóa từng dòng':
id_row_rmv = col[1].text_input('Nhập số dòng cần xóa')
id_row_rmv = self.randNumInput(id_row_rmv)
if buttonRemove and id_row_rmv is not None:
st.write(st.session_state.ssDf['Họ và Tên'][id_row_rmv])
st.info('Độ dài dữ liệu trước khi xóa: ' + str(len(st.session_state.ssDf)))
st.session_state.ssDf = st.session_state.ssDf.drop(id_row_rmv)
st.session_state.ssDf = st.session_state.ssDf.reset_index(drop=True)
st.info('Độ dài dữ liệu sau khi xóa: ' + str(len(st.session_state.ssDf)))
else:
if buttonRemove:
st.session_state.ssDf = st.session_state.ssDf.drop(index=list(range(len(st.session_state.ssDf))))
if st.checkbox('Show data info', value=False):
st.dataframe(st.session_state.ssDf)
buttonSave = st.button('Lưu Dữ Liệu')
if buttonSave:
st.markdown(self.download_link(st.session_state.ssDf), unsafe_allow_html=True)
elif choice == self.menu[1]: # Hỏi đáp
try:
_newDf = None
_copy_Df = None
split_space_word = []
st.title('Hỏi Đáp')
self.check_database_exist()
boxQa = st.selectbox('Lựa Chọn Câu Hỏi', options=self.qaOpt)
st.markdown('*Lựa chọn cột hiển thị*')
col = st.columns(5)
filter_col1 = col[0].checkbox('Họ và Tên', True)
filter_col2 = col[1].checkbox('Ngày Sinh', True)
filter_col3 = col[2].checkbox('Vị Trí', True)
filter_col4 = col[3].checkbox('Câu Lạc Bộ', True)
filter_col5 = col[4].checkbox('Số Áo', True)
col_filter_list = [filter_col1, filter_col2, filter_col3, filter_col4, filter_col5]
if boxQa == self.qaOpt[0]:
names = st.text_input('Tìm cầu thủ') # Người dùng nhập 1 hoặc nhiều tên
# và cách nhau bằng dấu phẩy
optionSearch = st.radio('Cách tìm kiếm', ('Chính Xác', 'Tương Đối'), index=0)
if st.session_state.ssDf is not None and len(names) > 0:
split_comma = names.split(',')
for word in split_comma:
word_list = re.findall(r"[\w']+", word)
split_space_word.extend(word_list)
_newDf = self.search_string(split_space_word, optionSearch)
elif boxQa == self.qaOpt[1]:
old_lst = st.slider('Nhập Tuổi', min_value=18, max_value=50, value=[18, 20], step=1) # Nhập số tuổi
# của cầu thủ
if st.session_state.ssDf is not None and len(old_lst) > 0:
_newDf = self.search_number(old_lst)
elif boxQa == self.qaOpt[2]:
col1 = st.sidebar.selectbox("Vị Trí", self.pos)
col2 = st.sidebar.selectbox("Câu Lạc Bộ", self.club)
col_lst = [col1, col2]
if st.session_state.ssDf is not None:
_newDf = self.search_col(col_lst)
buttonQa = st.button('Trả Lời')
if buttonQa:
if _newDf is not None:
_newDf = self.filter_col(_newDf, col_filter_list)
import time
latest_iteration = st.empty()
bar = st.progress(0)
num = 10
for i in range(0, num + 1, 1):
latest_iteration.text(f'{num - i} seconds left')
bar.progress((100 // num) * i)
time.sleep(0.1)
st.dataframe(_newDf)
else:
st.error('Không có dữ liệu để trả lời câu hỏi. Vui lòng kiểm tra lại thông tin nhập')
except:
st.error('Chưa có dữ liệu')
elif choice == self.menu[2]: # Biểu đồ
st.title('Biểu Đồ')
try:
cp_df = cp.deepcopy(st.session_state.ssDf)
self.check_database_exist()
if cp_df is None:
return
fig = None
num_club_dict = self.cal_string_club(cp_df['Câu Lạc Bộ'])
num_pos_dict = self.cal_string_pos(cp_df['Câu Lạc Bộ'], num_club_dict, cp_df['Vị Trí'])
chart_visual = st.sidebar.selectbox('Lựa chọn biểu đồ',
('Bar Chart', 'Pie Chart'))
opt_club = self.club[:]
opt_club.insert(0, 'Tất Cả')
list_club_keys = list(num_club_dict.keys())
list_club_val = list(num_club_dict.values())
list_pos_of_club_keys = list(num_pos_dict.keys())
list_pos_of_club_val = list(num_pos_dict.values())
list_pos_of_club_in_keys = list(list_pos_of_club_val[0].keys())
if chart_visual == 'Bar Chart':
detail = st.sidebar.checkbox('Chi Tiết',
help='Thể hiện chi tiết số lượng từng vị trí trong đội bóng')
# create list to append into go.Bar
graph_bar = []
for i in range(len(list_club_keys)):
graph_bar.append(go.Bar(name=list_club_keys[i],
x=[list_club_keys[i]],
y=[list_club_val[i]]))
fig = go.Figure(data=graph_bar)
# Change the bar mode
fig.update_layout(title='Số Lượng Cầu Thủ Của Từng Đội Bóng World Cup 2021',
barmode='group',
xaxis_title="Các Quốc Gia Tham Gia World Cup 2022 Bảng A",
yaxis_title="Số Lượng Cầu Thủ",
font=dict(
family="Courier New, monospace",
size=15))
if detail:
x_axis_i = []
y_axis_0 = [] # position Thu Mon
y_axis_1 = [] # position Hau Ve
y_axis_2 = [] # position Tien Ve
y_axis_3 = [] # position Tien Dao
for y in range(len(list_club_keys)):
x_axis_i.append(list_pos_of_club_keys[y])
x_axis_0 = x_axis_i
for i in range(len(list_pos_of_club_in_keys)):
for y in range(len(list(list_pos_of_club_val))):
if i == 0:
y_axis_0.append(list(list_pos_of_club_val[y].values())[i])
if i == 1:
y_axis_1.append(list(list_pos_of_club_val[y].values())[i])
if i == 2:
y_axis_2.append(list(list_pos_of_club_val[y].values())[i])
if i == 3:
y_axis_3.append(list(list_pos_of_club_val[y].values())[i])
y_axis_0 = y_axis_0
y_axis_1 = y_axis_1
y_axis_2 = y_axis_2
y_axis_3 = y_axis_3
fig = go.Figure(data=[
go.Bar(name=list_pos_of_club_in_keys[0],
x=x_axis_0,
y=y_axis_0),
go.Bar(name=list_pos_of_club_in_keys[1],
x=x_axis_0,
y=y_axis_1),
go.Bar(name=list_pos_of_club_in_keys[2],
x=x_axis_0,
y=y_axis_2),
go.Bar(name=list_pos_of_club_in_keys[3],
x=x_axis_0,
y=y_axis_3)])
# Change the bar mode
fig.update_layout(title='Số Lượng Vị Trí Cầu Thủ Đội Bóng World Cup 2021',
barmode='group',
xaxis_title="Các Quốc Gia Tham Gia World Cup 2022 Bảng A",
yaxis_title="Số Lượng Cầu Thủ",
font=dict(
family="Courier New, monospace",
size=15)
)
elif chart_visual == 'Pie Chart':
fig = go.Figure(data=[go.Pie(labels=list_club_keys,
values=list_club_val,
hovertemplate="%{label} "
"<br>Số lượng cầu thủ: %{value} </br> "
"Tỉ lệ phần trăm: %{percent}")])
st.write(fig)
except:
st.error('Chưa có dữ liệu')
elif choice == self.menu[3]: # Liên hệ
st.title('Liên Hệ')
self.info()
def nhapDuLieu(self):
nameValue = st.text_input("Tên Đầy Đủ", help='Nhập họ và tên cầu thủ')
col1, col2 = st.columns(2)
yearValue = col1.date_input('Ngày Sinh', help='Nhập ngày tháng năm sinh cầu thủ',
min_value=datetime(1950, 1, 1), max_value=datetime.now())
numValue = col2.number_input("Số Áo", min_value=1, format='%d', help='Nhập số áo cầu thủ')
clubValue = col1.selectbox("Câu Lạc Bộ", tuple(self.club), help='Chọn câu lạc bộ cầu thủ đang tham gia')
posValue = col2.selectbox("Vị Trí", tuple(self.pos), help='Chọn vị trí của cầu thủ')
return nameValue, yearValue, numValue, clubValue, posValue
def nhapDuLieuEdit(self):
nameValue = st.text_input("Tên Đầy Đủ", help='Nhập họ và tên cầu thủ', key='name')
col1, col2 = st.columns(2)
yearValue = col1.date_input('Ngày Sinh', help='Nhập ngày tháng năm sinh cầu thủ',
min_value=datetime(1950, 1, 1), max_value=datetime.now(), key='date')
numValue = col2.number_input("Số Áo", min_value=1, format='%d', help='Nhập số áo cầu thủ', key='number')
clubValue = col1.selectbox("Câu Lạc Bộ", tuple(self.club), help='Chọn câu lạc bộ cầu thủ tham gia', key='club')
posValue = col2.selectbox("Vị Trí", tuple(self.pos), help='Chọn vị trí của cầu thủ', key='pos')
return nameValue, yearValue, numValue, clubValue, posValue
@staticmethod
def getList(inputDict):
return list(inputDict.keys())
def cal_string_pos(self, clubDf, clubdict, posLstDf):
out_pos_dict = {}
out_dict = {}
clubdict_keylst = list(clubdict.keys())
clubdict_vallst = list(clubdict.values())
for i in range(len(posLstDf)):
for j in range(len(posLstDf)):
if posLstDf[i] == posLstDf[j]:
out_pos_dict[posLstDf[i]] = 0
else:
pass
pos_dict = self.getList(out_pos_dict)
for i in range(len(clubDf)):
for j in range(len(clubdict_keylst)):
if clubDf[i] == clubdict_keylst[j]:
for k in range(len(pos_dict)):
if pos_dict[k] == posLstDf[i]:
out_pos_dict[pos_dict[k]] = out_pos_dict.get(pos_dict[k]) + 1
out_dict[clubdict_keylst[j]] = out_pos_dict
if sum(out_pos_dict.values()) == clubdict_vallst[j]:
out_pos_dict = out_pos_dict.fromkeys(out_pos_dict, 0)
return out_dict
def cal_string_club(self, inputClubLstDf):
out_club_dict = {}
for i in range(len(inputClubLstDf)):
for j in range(len(inputClubLstDf)):
if inputClubLstDf[i] == inputClubLstDf[j]:
out_club_dict[inputClubLstDf[i]] = 0
else:
pass
pos_dict = self.getList(out_club_dict)
for i in range(len(inputClubLstDf)):
for j in range(len(pos_dict)):
if pos_dict[j] == inputClubLstDf[i]:
out_club_dict[pos_dict[j]] = out_club_dict.get(pos_dict[j]) + 1
return out_club_dict
@staticmethod
def check_database_exist():
if st.session_state.ssDf is not None:
_copy_Df = cp.deepcopy(st.session_state.ssDf)
st.dataframe(_copy_Df)
col1, col2 = st.columns([4, 1])
clearDB = col2.button('Clear dữ liệu')
if clearDB:
_copy_Df = _copy_Df[0:0]
col1.warning('Dữ liệu chưa được nhập')
else:
col1.info('Dữ liệu đã được nhập')
@staticmethod
def search_col(col_list=None):
obj = None
cp_df = cp.deepcopy(st.session_state.ssDf)
if col_list is not None:
obj = cp_df[cp_df['Vị Trí'] == col_list[0]]
obj = obj[obj['Câu Lạc Bộ'] == col_list[1]]
return obj
@staticmethod
def filter_col(df, col_filter_list=None):
show_names_lst = []
if col_filter_list is not None:
for i in range(len(col_filter_list)):
if col_filter_list[i]:
show_names_lst.append(df.columns[i])
obj = df[show_names_lst]
return obj
@staticmethod
def session_state_df(dataframe):
if dataframe is not None:
st.session_state.ssDf = dataframe
return st.dataframe(st.session_state.ssDf)
@staticmethod
def search_number(oldList):
result = []
today = date.today()
currentYear = today.strftime('%Y')
cp_df = cp.deepcopy(st.session_state.ssDf)
for dateIdx in cp_df['Ngày Sinh']:
date_transfer_df = datetime.strptime(dateIdx, '%d/%m/%Y')
year_player = date_transfer_df.strftime('%Y')
old_player = int(currentYear) - int(year_player)
if oldList[0] <= int(old_player) <= oldList[1]:
result.append(True)
else:
result.append(False)
cp_df['result'] = result
tuoiDf = cp_df[cp_df['result'] == True]
obj = tuoiDf.drop(columns='result')
return obj
@staticmethod
def search_string(word_list_name, option=0):
cp_df = cp.deepcopy(st.session_state.ssDf)
if option == 'Chính Xác':
obj = cp_df[np.logical_and.reduce([cp_df['Họ và Tên'].str.contains(word) for word in word_list_name])]
else:
obj = cp_df[cp_df['Họ và Tên'].str.contains('|'.join(word_list_name))]
return obj
@staticmethod
def rmvDuplicateValInLst(list_value):
new_list = list(dict.fromkeys(list_value))
return new_list
def graph(self):
pass
@staticmethod
def init_db(conn: Connection):
conn.commit()
@staticmethod
def get_data(conn: Connection, db_name):
db_select = "SELECT * FROM " + db_name
df = pd.read_sql(db_select, con=conn)
return df
@staticmethod
def get_connection(path: str):
return sqlite3.connect(path, check_same_thread=False)
def download_link(self, df):
from io import BytesIO
time_file = self.date_time()
db_name = 'database' + time_file
output = BytesIO()
writer = pd.ExcelWriter(output, engine='xlsxwriter')
df.to_excel(writer, sheet_name='Sheet1', index=False)
writer.save()
val = output.getvalue()
b64 = base64.b64encode(val) # val looks like b'...'
obj_1 = f'<a href="data:application/octet-stream;base64,{b64.decode()}"'
obj_2 = ' download=' + db_name + '.xlsx' + '>'
obj_3 = '<input type="button" value="Download File"></a>'
obj_download = obj_1 + obj_2 + obj_3
return obj_download
@staticmethod
def info():
st.subheader('FOOTBALL MANAGER\n')
st.code("The Application is in demo phase\n"
"---------------------------------------------"
"\nPlease contact me through out these infomation below"
"\nMember of Football Manager:"
"\n Name : Nguyễn Lê Minh Hòa"
"\n Mobile: 0944 886 896")
@staticmethod
def importTable(lst):
# Thêm vào database table
if len(st.session_state.ssDf) > 0:
st.session_state.ssDf.loc[-1] = lst
st.session_state.ssDf.index += 1
else:
st.session_state.ssDf.loc[0] = lst
st.session_state.ssDf.sort_index(inplace=True)
@staticmethod
def randNumInput(numStr):
if len(numStr) == 0:
return
numStrLst = numStr.split(',')
array = []
for i in range(len(list(numStrLst))):
if len(numStrLst[i]) > 1:
d = numStrLst[i].split('-')
for y in range(int(d[0]), int(d[1]) + 1):
array.append(y)
else:
array.append(int(numStrLst[i]))
return array
@staticmethod
def getDifferentVal(lst):
res = []
for i in lst:
if i not in res:
res.append(i)
res = sorted(res)
return res
@staticmethod
def date_time():
time_zone = datetime.now()
current_time = (time_zone.strftime("%X")).replace(":", ".")
time_file = time_zone.strftime("_%d-" + "%m-" + "%y_" + current_time)
return time_file
if __name__ == '__main__':
main()
| [
"plotly"
] |
7726daf766525c3657d6e1aee8d0421e81828ccb | Python | ellieshuojin/Python | /ConstructNetwork.py | UTF-8 | 2,352 | 3.578125 | 4 | [] | no_license | import networkx as nx
import matplotlib.pyplot as plt
# a list of emplyees on the same shift
listWorkers = [['Manager1', 1, 3, 4, 5],
['Manager1', 1, 2, 3],
['Manager1', 3, 5, 6, 7],
['Manager1', 1, 7, 8],
['Manager1', 2, 4, 6, 7],
['Manager2', 4, 8, 9, 10, 11],
['Manager2', 8, 10, 11, 12]]
# this dataset shows which employees work together this particular week.
# there is a for loop over different days of the week, and within that for loop is another for loop for employees working on that day.
# employees are added to the network as a node, if they are not already in the network (i.e., iNode not in G).
# when the for loop for employees are done, then edges are added with nested for loops.
# if two employees are not connected (i.e. jNode not in G[iNode]), then an edge is added between them.
# if they are connected, then the edge weight is increased by 1.
# first, creating a graph
G=nx.Graph()
# loop over days
for iDay in range(len(listWorkers)):
# adding nodes. Check if node exists first before adding
for iNode in listWorkers[iDay]:
if iNode not in G:
G.add_node(iNode)
# adding edges. Check if edge exists before adding
for i,iNode in enumerate(listWorkers[iDay]):
for j,jNode in enumerate(listWorkers[iDay],i+1):
# if iNode and jNode are not connected
if (iNode!=jNode) and (jNode not in G[iNode]):
G.add_edge(iNode,jNode,weight=1.0)
# if iNode and jNode are connected
elif iNode!=jNode:
G[iNode][jNode]['weight'] += 1.0
# drawing the graph --- Kamada-Kawai layout
pos = nx.kamada_kawai_layout(G) # positions for all nodes
# nodes
nx.draw_networkx_nodes(G, pos)
# edges
edgeweight = [ d['weight'] for (u,v,d) in G.edges(data=True)]
nx.draw_networkx_edges(G, pos, width=edgeweight)
# labels
nx.draw_networkx_labels(G, pos)
plt.axis('off')
plt.show()
# Saving the graph in different formats
# adjacency list
nx.write_adjlist(G,'CoWorking.adjllist')
# edge list
nx.write_edgelist(G,'CoWorking.edgelist')
# GML
nx.write_gml(G,'CoWorking.gml')
# loading the graph
Gadj = nx.read_adjlist('CoWorking.adjllist')
Gedge = nx.read_edgelist('CoWorking.edgelist')
Ggml = nx.read_gml('CoWorking.gml')
| [
"matplotlib"
] |
0209d43fc1cd6a39cfa9ab8b782fecb19d080599 | Python | afzm4/cs3001hw6 | /main.py | UTF-8 | 8,233 | 2.953125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Nov 10 16:08:37 2018
@author: Andrew
"""
import seaborn as sns
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.datasets import fetch_california_housing
from sklearn.linear_model import LinearRegression, LassoCV, RidgeCV, ElasticNetCV, Ridge, Lasso, ElasticNet
from sklearn.model_selection import train_test_split, GridSearchCV, validation_curve
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import svm, datasets
from sklearn.kernel_ridge import KernelRidge
from sklearn.ensemble.partial_dependence import plot_partial_dependence
from sklearn.ensemble.partial_dependence import partial_dependence
def main():
houses = fetch_california_housing()
digits = datasets.load_iris()
data = houses.data
names = houses.feature_names
target = houses.target
#Q1
#DistPlots for all 8 features, individually
#sns.distplot(data[:,0], axlabel=names[0])
#sns.distplot(data[:,1], axlabel=names[1])
#sns.distplot(data[:,2], axlabel=names[2])
#sns.distplot(data[:,3], axlabel=names[3])
#sns.distplot(data[:,4], axlabel=names[4])
#sns.distplot(data[:,5], axlabel=names[5])
#sns.distplot(data[:,6], axlabel=names[6])
#sns.distplot(data[:,7], axlabel=names[7])
#Target DistPlot
#sns.distplot(houses.target, axlabel='Target')
test = max(data[:,2])
test2 = max(data[:,5])
housingDF = pd.DataFrame(data=data, columns=names)
#All 8 DistPlots together
#fig1 = housingDF.hist(bins=40, figsize=(9, 6))
print("")
print("Dependency on Targets: ")
clf = GradientBoostingRegressor(n_estimators=100, max_depth=4, learning_rate=0.1, loss='huber',random_state=1)
clf.fit(data,target)
feat = [0,1,2,3,4,5,6,7]
'''fig, axs = plot_partial_dependence(clf, data, feat, feature_names=names,n_jobs=3, grid_resolution=50)
fig.suptitle('Dependence of the target on each feature: ')
plt.subplots_adjust(top=0.9, wspace=0.6, hspace=0.6)
plt.show()'''
#fig, axs = plot_partial_dependence()
#Q3
X_train, X_test, y_train, y_test = train_test_split(data, target)
#linear regression
lin = LinearRegression().fit(X_train, y_train)
print("Linear Score: ", lin.score(X_test, y_test))
#Ridge regression w/ CV
rid = RidgeCV().fit(X_train, y_train)
print("Ridge Score: ", rid.score(X_test, y_test))
#Lasso regression w/ CV
lasso = LassoCV().fit(X_train, y_train)
print("Lasso Score: ", lasso.score(X_test, y_test))
#Elastic Net regression w/ CV
ela = ElasticNetCV().fit(X_train, y_train)
print("ElasticNet Score: ", ela.score(X_test, y_test))
#Using StandardScaler
scaler = StandardScaler()
dataSTD = scaler.fit_transform(data, target)
X_train2, X_test2, y_train2, y_test2 = train_test_split(dataSTD, target)
print("")
print("With Standardization:")
#linear regression STD
lin = LinearRegression().fit(X_train2, y_train2)
print("Linear Score: ", lin.score(X_test2, y_test2))
#Ridge regression w/ CV STD
rid = RidgeCV().fit(X_train2, y_train2)
print("Ridge Score: ", rid.score(X_test2, y_test2))
#Lasso regression w/ CV STD
lasso = LassoCV().fit(X_train2, y_train2)
print("Lasso Score: ", lasso.score(X_test2, y_test2))
#Elastic Net regression w/ CV STD
ela = ElasticNetCV().fit(X_train2, y_train2)
print("ElasticNet Score: ", ela.score(X_test2, y_test2))
#Q4
print("")
estimator = Ridge()
paramsR = {'alpha': [25,10,4,2,1.0,0.8,0.5,0.3,0.2,0.1,0.05,0.02,0.01],
'fit_intercept': [True, False],
}
gsCVR = GridSearchCV(estimator, paramsR)
param_range = np.logspace(-3,7,200)
train_scores, test_scores = validation_curve(Ridge(), data, target, "alpha", param_range=param_range, cv=5)
test_scores_mean = np.mean(test_scores, axis=1)
plt.title("Validation Curve with Ridge")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
lw = 2
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="navy", lw=lw)
plt.legend(loc="best")
plt.show()
alphas = np.logspace(-3, 7, 200)
coefs = []
for a in alphas:
ridge = Ridge(alpha=a, fit_intercept=False)
ridge.fit(data, target)
coefs.append(ridge.coef_)
ax = plt.gca()
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1])
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients of each feature')
plt.axis('tight')
plt.legend()
plt.show()
gsCVR.fit(X_train, y_train)
#print(gsCVR.best_params_)
rid = Ridge(alpha=25, fit_intercept=True).fit(X_train, y_train)
print("Ridge Score(w/ best parameters): ", rid.score(X_test, y_test))
estimator = LassoCV()
paramsL = {'cv': [3,4,5,6],
'fit_intercept': [True, False],
'normalize': [True, False],
'precompute': [True, False]
}
gsCVL = GridSearchCV(estimator, paramsL)
gsCVL.fit(X_train, y_train)
#print(gsCVL.best_params_)
param_range = np.logspace(-7,3,200)
train_scores, test_scores = validation_curve(Lasso(), data, target, "alpha", param_range=param_range, cv=5)
test_scores_mean = np.mean(test_scores, axis=1)
plt.title("Validation Curve with Lasso")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
lw = 2
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="navy", lw=lw)
plt.legend(loc="best")
plt.show()
'''alphas = np.logspace(-7, 3, 200)
coefs = []
for a in alphas:
lasso1 = Lasso(alpha=a, fit_intercept=False)
lasso1.fit(data, target)
coefs.append(lasso1.coef_)
ax = plt.gca()
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1])
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Lasso coefficients of each feature')
plt.axis('tight')
plt.legend()
plt.show()'''
las = LassoCV(cv=3, fit_intercept=True, normalize=True, precompute=True).fit(X_train, y_train)
print("Lasso Score(w/ best parameters): ", las.score(X_test, y_test))
estimator = ElasticNetCV()
paramsL = {'cv': [3,4,5,6],
'normalize': [True, False],
'precompute': [True, False]
}
gsCVE = GridSearchCV(estimator, paramsL)
gsCVE.fit(X_train, y_train)
train_scores, test_scores = validation_curve(ElasticNet(), data, target, "alpha", param_range=param_range, cv=3)
test_scores_mean = np.mean(test_scores, axis=1)
plt.title("Validation Curve with ElasticNet")
plt.xlabel("$\gamma$")
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
lw = 2
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="navy", lw=lw)
plt.legend(loc="best")
plt.show()
'''alphas = np.logspace(-7, 3, 200)
coefs = []
for a in alphas:
eN1 = ElasticNet(alpha=a, fit_intercept=False)
eN1.fit(data, target)
coefs.append(eN1.coef_)
ax = plt.gca()
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1])
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('ElasticNet coefficients of each feature')
plt.axis('tight')
plt.legend()
plt.show()'''
#print(gsCVE.best_params_)
en = ElasticNetCV(cv=3, normalize=False, precompute=True).fit(X_train, y_train)
print("ElasticNet Score(w/ best parameters): ", en.score(X_test, y_test))
if __name__ == '__main__':
main() | [
"matplotlib",
"seaborn"
] |
da0021e0fc866691fdc204450e8578ce37de81d3 | Python | tzaeru/brain-color-cnn-practice | /wavelets.py | UTF-8 | 2,433 | 2.765625 | 3 | [
"Unlicense"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import pywt
x = np.linspace(0, 1, num=512)
data = np.sin(250 * np.pi * x**2)
wavelet = 'db2'
level = 4
order = "freq" # other option is "normal"
interpolation = 'nearest'
cmap = plt.cm.cool
# Construct wavelet packet
wp = pywt.WaveletPacket(data, wavelet, 'symmetric', maxlevel=level)
nodes = wp.get_level(level, order=order)
labels = [n.path for n in nodes]
values = np.array([n.data for n in nodes], 'd')
values = abs(values)
# Show signal and wavelet packet coefficients
fig = plt.figure()
fig.subplots_adjust(hspace=0.2, bottom=.03, left=.07, right=.97, top=.92)
ax = fig.add_subplot(2, 1, 1)
ax.set_title("linchirp signal")
ax.plot(x, data, 'b')
ax.set_xlim(0, x[-1])
ax = fig.add_subplot(2, 1, 2)
ax.set_title("Wavelet packet coefficients at level %d" % level)
ax.imshow(values, interpolation=interpolation, cmap=cmap, aspect="auto",
origin="lower", extent=[0, 1, 0, len(values)])
ax.set_yticks(np.arange(0.5, len(labels) + 0.5), labels)
# Show spectrogram and wavelet packet coefficients
fig2 = plt.figure()
ax2 = fig2.add_subplot(211)
ax2.specgram(data, NFFT=64, noverlap=1, Fs=2, cmap=cmap,
interpolation='bilinear')
ax2.set_title("Spectrogram of signal")
ax3 = fig2.add_subplot(212)
ax3.imshow(values, origin='upper', extent=[-1, 1, -1, 1],
interpolation='nearest')
ax3.set_title("Wavelet packet coefficients")
plt.show()
'''
from scipy import signal
import matplotlib.pyplot as plt
import numpy as np
import pywt
t = np.linspace(-1, 1, 200, endpoint=False)
sig = np.cos(2 * np.pi * 7 * t) + signal.gausspulse(t - 0.4, fc=2)
widths = np.arange(1, 31)
#plt.imshow(cwtmatr, extent=[-1, 1, 1, 31], cmap='PRGn', aspect='auto',
# vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max())
#plt.imshow(cwtmatr, extent=[-1, 1, 1, 31], cmap='PRGn', aspect='auto',
# vmax=abs(cwtmatr).max(), vmin=-abs(cwtmatr).max())
print(pywt.wavelist(kind='discrete'))
(cA, cD) = pywt.dwt(sig, 'db1')
print(cA)
plt.plot(cA, widths)
plt.show()
while i < 1:
temp_y=np.random.random()
x.append(i)
y.append(temp_y)
sig = np.cos(2 * np.pi * 7 * t + i) + signal.gausspulse(t - 0.4, fc=2)
widths = np.arange(1, 31)
(cA, cD) = pywt.dwt(sig, 'db1')
print(cA)
plt.plot(cA, cD)
i+=1
plt.show()
plt.pause(0.001) #Note this correction
plt.clf()''' | [
"matplotlib"
] |
cd29b1d05d0ab73d98371ffe9e71d6204425653d | Python | claudejpschmit/MSc-RW1 | /build/plotRW.py | UTF-8 | 363 | 2.71875 | 3 | [] | no_license | import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
import sys
x,y,z = np.loadtxt(sys.argv[1], unpack = True)
mpl.rcParams['legend.fontsize'] = 10
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(x, y, z, label='parametric curve')
plt.title(r'Random Walk with $N = 100$ steps')
plt.show() | [
"matplotlib"
] |
5d6109f8a065bb923c27017199c7ef1be90869b7 | Python | atm5tc/rtlsdrwebserver | /playsounds.py | UTF-8 | 1,215 | 2.96875 | 3 | [] | no_license | import numpy as np
import sounddevice as sd
import soundfile as sf
import scipy.io.wavfile as wav
import matplotlib.pyplot as plt
import sounddevice as sd
AUDIO_SAMPLE_RATE = 44100 # Hz
# generate a tone of the given duration (in seconds) at the given frequency
def gen_tone(amplitude,tone_duration, frequency):
x = np.arange(AUDIO_SAMPLE_RATE * tone_duration)
tone = amplitude*np.sin(2 * np.pi * frequency/AUDIO_SAMPLE_RATE * x)
return tone
def play(tone):
#scikits.audiolab.play(fs=AUDIO_SAMPLE_RATE)
sd.play(tone, AUDIO_SAMPLE_RATE)
sd.wait()
sf.write('Tone.wav',tone, AUDIO_SAMPLE_RATE)
#fSK modulation
def modulateFSK(list, tone1, tone2):
sound = np.array([])
for i in list:
if i == 1:
sound = np.append(sound, tone1)
else:
sound = np.append(sound, tone2)
play(sound)
def main():
tone1 = gen_tone(1,.0625, 900)
tone2 = gen_tone(1,.0625, 300)
while True:
print("Please enter Message")
array = [1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1]
print(array)
modulateFSK(array, tone1, tone2)
message = input()
if __name__ == '__main__':
main() | [
"matplotlib"
] |
04e062c28975170e8e3efd711982edcf8ef7e21b | Python | hubertshum/POSE-ID-on | /utils/data_viz.py | UTF-8 | 6,700 | 2.875 | 3 | [
"CC-BY-4.0"
] | permissive | import matplotlib.pyplot as plt
import cv2
import os
def plot_images(dir_im, t = 0, best_worst_cases = {}):
"""
This function plots the selected query with the 5 most similar poses (included the query itself), and the least similar one
Args:
dir_im: rendered image directory
t: the query ID
best_worst_cases: the dictionary which stores the results of the comparison
"""
fig = plt.figure(figsize=(40, 40))
im_list = [a[0] for a in best_worst_cases[list(best_worst_cases.keys())[t]][0]] + [best_worst_cases[list(best_worst_cases.keys())[t]][1][0]]
columns = 6
rows = 1
for i in range(1, columns*rows +1):
im = cv2.imread(dir_im + '/'+im_list[i-1] + '_rendered.png')
im = cv2.resize(im, (200,400))
ax = fig.add_subplot(rows, columns, i)
plt.imshow(im[:,:,::-1])
#plt.axis('off')
ax.tick_params(labelbottom=False, bottom = False, labelleft = False, left = False)
if i == 1:
plt.title("Query", fontsize= 14)
ax.set_xlabel(im_list[i-1], fontsize= 13)
elif i > 1 and i < columns*rows:
plt.title("Closest result " + str(i-1), fontsize= 14)
ax.set_xlabel(im_list[i-1], fontsize= 13)
else:
plt.title("Farthest result " + str(1), fontsize= 14)
ax.set_xlabel(im_list[i-1], fontsize= 13)
plt.show()
print("Query: ",im_list[0], '\n')
print("---------------\n")
print("Closest results: \n")
for i in range(1,5):
print(im_list[i], '\n')
print("---------------\n")
print("Farthest result: ", im_list[5])
def save_images(dir_im, t = 0, method = 1, dictionary = {}):
"""
This function saves the selected query image with the 5 most similar poses (included the query itself), and the least similar one.
Args:
dir_im: rendered image directory
t: the query ID
method: the selected matching class method. [1/2]
dictionary: the dictionary which stores the results of the comparison
"""
assert method == 1 or method == 2, "Invalid method"
dir_res = os.path.join(os.getcwd(), "Results_method_" + str(method))
if not os.path.exists(dir_res):
os.makedirs(dir_res)
fig = plt.figure(figsize=(40, 40))
im_list = [a[0] for a in dictionary[list(dictionary.keys())[t]][0]] + [dictionary[list(dictionary.keys())[t]][1][0]]
columns = 6
rows = 1
for i in range(1, columns*rows +1):
im = cv2.imread(dir_im + '/'+im_list[i-1] + '_rendered.png')
im = cv2.resize(im, (200,400))
ax = fig.add_subplot(rows, columns, i)
plt.imshow(im[:,:,::-1])
#plt.axis('off')
ax.tick_params(labelbottom=False, bottom = False, labelleft = False, left = False)
if i == 1:
plt.title("Query", fontsize= 14)
ax.set_xlabel(im_list[i-1], fontsize= 13)
elif i > 1 and i < columns*rows:
plt.title("Closest result " + str(i-1), fontsize= 14)
ax.set_xlabel(im_list[i-1], fontsize= 13)
else:
plt.title("Farthest result " + str(1), fontsize= 14)
ax.set_xlabel(im_list[i-1], fontsize= 13)
#plt.show()
plt.savefig(dir_res+"/{}.png".format(im_list[0]))
plt.close()
def show_pose(i, dict_joints, dir_im):
"""
Visualize the single pose.
Args:
i: the query ID
dict_joints: the dictionary which stores the joints
dir_im: rendered image directory
"""
punti_prova = dict_joints[list(dict_joints.keys())[i]]
fig, ax = plt.subplots(figsize = (15,15))
im = cv2.imread(dir_im+'/'+list(dict_joints.keys())[i] + "_rendered.png")
print(os.listdir(dir_im)[i])
plt.imshow(im/255.0)
for n in range(len(punti_prova)):
plt.plot(punti_prova[n][0], punti_prova[n][1], 'ro')
ax.annotate(n, (punti_prova[n][0], punti_prova[n][1]))
return
def show_single_pose(i, punti_prova, dict_joints, dir_im):
"""
Visualize the single pose, given the keypoints (even the noisy ones).
Args:
i: the query ID
punti_prova: the list of keypoints of the selected query
dict_joints: the dictionary which stores the joints
dir_im: rendered image directory
"""
fig, ax = plt.subplots(figsize = (15,15))
im = cv2.imread(dir_im+'/'+list(dict_joints.keys())[i] + "_rendered.png")
print(os.listdir(dir_im)[i])
plt.imshow(im/255.0)
for n in range(len(punti_prova)):
plt.plot(punti_prova[n][0], punti_prova[n][1], 'ro')
ax.annotate(n, (punti_prova[n][0], punti_prova[n][1]))
return
def watch_samples(n_cl_show, df_cls, rec_poses, n_cluster_list, dir_im):
"""
Show the poses reconstructed from the centroids resulting from clustering with n_cl_show clusters, and up to five samples for each cluster.
Args:
n_cl_show: the number of clusters of the selected clustering to visualize
df_cls: a dictionary composed by the dataframes (containing the statue names, their features and their cluster labels) for each clustering
rec_poses: a dictionary composed by the reconstructed poses of each clustering
n_cluster_list: a list with the number of clusters for each clustering
dir_im: rendered image directory
"""
links = [[0,1],[1,2],[2,3],[3,4],[1,5],[5,6],[1,8],[7,6],[8,9],[8,12],[9,10],[10,11],[12,13],[13,14]]
all_samples = {}
for n in n_cluster_list:
samples = []
df_clustering = df_cls[n]
for i in range(n):
a = df_clustering[df_clustering['label'] == i]
samples.append(list(a['label'].sample(min(5, len(a))).index))
all_samples[n] = samples
for j in range(n_cl_show):
rec_pose = rec_poses[n_cl_show][j]
im_list = all_samples[n_cl_show][j]
fig = plt.figure(figsize=(40, 40))
columns = min(5, len(im_list))
rows = 1
for i in range(1, columns*rows +1):
im = cv2.imread(dir_im + '/'+im_list[i-1] + '_rendered.png')
im = cv2.resize(im, (200,400))
ax = fig.add_subplot(rows, columns, i)
#plt.axis('off')
ax.tick_params(labelbottom=False, bottom = False, labelleft = False, left = False)
if i == 1:
for n in range(len(rec_pose)):
plt.plot(rec_pose[n][0], rec_pose[n][1], 'ro')
ax.annotate(n, (rec_pose[n][0], rec_pose[n][1]))
ax.set_aspect(aspect = "equal")
for l in range(len(links)):
p1, p2 = links[l]
plt.plot([rec_pose[p1][0], rec_pose[p2][0]],[rec_pose[p1][1], rec_pose[p2][1]], '-')
else:
plt.imshow(im[:,:,::-1])
plt.title("Random example " + str(i-1), fontsize= 14)
ax.set_xlabel(im_list[i-1], fontsize= 13)
plt.show()
return | [
"matplotlib"
] |
29ff07dc30028ba7b9c7c09ce6f2a492d495e153 | Python | AlexandreAdam/PHY6669 | /homework1/python/numero2.py | UTF-8 | 4,225 | 2.796875 | 3 | [] | no_license | """
=============================================
Title: Two point correlation of BOSS sample (2)
Author(s): Alexandre Adam
Last modified: Feb. 11th 2021
Description: We make a collection of random
catalogue and compute error bars for different
estimators fof the correlation function
=============================================
"""
from count_in_spheres import *
import astropy.units as u
from glob import glob
import matplotlib.pylab as pylab
plt.style.use("science")
params = {'legend.fontsize': 'x-large',
'figure.figsize': (6, 6),
'axes.labelsize': 'x-large',
'axes.titlesize':'x-large',
'xtick.labelsize':'x-large',
'ytick.labelsize':'x-large'
}
pylab.rcParams.update(params)
SIZE = 8000
def create_mocks_population(kde, theta, phi, N, population):
"""
kde: kde instance fitted on radial distance in the survey
N: Number of samples in each mock catalogue
population: Number of catalogue to produce
"""
for i in tqdm(range(population)):
r, _theta, _phi = cone_synthetic_catalogue(kde, theta, phi, N)
x, y, z = spherical_to_cartesian(r, _theta, _phi)
positions = np.column_stack([x, y, z])
np.savetxt(f"mocks/mock_sample_{i:03d}.txt", positions)
def correlation_plot(xi, bins, args, color="b"):
bin_center = (bins[1:] + bins[:-1])/2
xi_mean = xi.mean(axis=0)
xi_std = xi.std(axis=0)
plt.figure()
plt.plot(bin_center, xi.mean(axis=0), color=color, lw=3)
plt.fill_between(bin_center, xi_mean + xi_std, xi_mean - xi_std, color=color, alpha=0.5)
plt.xlabel(r"$r$ [Mpc]")
if args.estimator == "count-in-sphere":
plt.ylabel(r"$\mathcal{N}(<r)$")
plt.axhline(1, color="k", ls="--")
plt.title("Compte des voisins")
else:
plt.ylabel(r"$\xi(r)$")
plt.axhline(0, color="k", ls="--")
plt.title(args.estimator)
plt.savefig(f"../tex/figures/coorelation_{args.estimator}_8000.png")
plt.show()
def main(args):
bins = np.linspace(20, 400, args.bins)
if args.plot_results:
xi = np.loadtxt(f"correlation_{args.estimator}_8000.txt")
correlation_plot(xi, bins, args)
return
h = args.hubble
cosmo = FlatLambdaCDM(100 * h, Omega_m)
data = pd.read_csv(data_path, skiprows=1)
# preprocessing
data = data[(data["Spec_redshift"] > 1e-4) & (data["Spec_redshift"] < 2)]
r = cosmo.comoving_distance(data["Spec_redshift"].to_numpy()).value # Mpc
theta = np.deg2rad(data["dec"]).to_numpy()
phi = np.deg2rad(data["ra"]).to_numpy()
weights, kde = kde_weights(r)
if args.create_mocks:
create_mocks_population(kde, theta, phi, SIZE, args.population)
return
positions = np.column_stack(spherical_to_cartesian(r, theta, phi)).astype(np.float64)
xi = []
for f in tqdm(glob("mocks/mock_sample_*.txt")):
positions_rand = np.loadtxt(f).astype(np.float64)
indexes = np.random.choice(range(len(data)), size=SIZE, replace=False)
_xi = two_point_correlation_estimator(positions[indexes], positions_rand, weights[indexes], bins, estimator=args.estimator)
xi.append(_xi)
xi = np.row_stack(xi)
np.savetxt(f"correlation_{args.estimator}_8000.txt", xi)
if __name__ == "__main__":
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("--hubble", required=False, default=0.6774, type=float, help="Hubble parameter")
parser.add_argument("--omega_m", required=False, default=0.3089, help="Matter density parameter")
parser.add_argument("--create_mocks", action="store_true", required=False, help="Creat mocks, required for first use")
parser.add_argument("--population", required=False, default=100, help="Number of mocks to create")
parser.add_argument("--bins", required=False, default=200, help="Number of bins for the distance")
parser.add_argument("--estimator", required=False, default="count-in-sphere", help="Estimator, can be count_in_sphere, Peebles-Davis, Hamilton, Landy-Szalay")
parser.add_argument("--plot_results", required=False, action="store_true", help="Plot results after simulation")
args = parser.parse_args()
main(args)
| [
"matplotlib"
] |
c243ac52a82f2f3b186c9c91a8196ed9ecdffbae | Python | deepak-buddha/WiSe20_Team_10_Main | /models/level2/LogReg_ChampWr.py | UTF-8 | 1,078 | 2.8125 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.1
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
df = pd.read_csv('../../data/processed/LOLOracleDataWr.csv')
df.head()
# ### Lets implement logistic regression model now
from sklearn.model_selection import train_test_split
X = df.drop('Winner',axis=1)
y = df['Winner']
X_train,X_test,y_train,y_test = train_test_split(X,y,test_size=0.3,random_state=101)
from sklearn.linear_model import LogisticRegression
logmodel = LogisticRegression()
logmodel.fit(X_train,y_train)
predictions = logmodel.predict(X_test)
from sklearn.metrics import confusion_matrix,classification_report
print(confusion_matrix(y_test,predictions))
print('\n')
print(classification_report(y_test,predictions))
| [
"matplotlib",
"seaborn"
] |
20e39e98522350ff5e7e1d2e9760974be741c7cf | Python | Ama284/NUMPY-SCIPY-CONCEPT-IN-PYTHON | /Scipy Concept.py | UTF-8 | 7,131 | 3.1875 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# # Python-SciPy Library
# IMPORT DATA
# In[5]:
import pandas as pd
#importing data
data = pd.read_excel('C:/Users/AMIT-/OneDrive/Documents/somecars1.xlsx')
#print the dataset
data
# # scipy.cluster
# To divide a dataset into k clusters
# In[6]:
#import libraries
import pandas as pd
from scipy.cluster.vq import kmeans, vq
#importing data
data = pd.read_excel('C:/Users/AMIT-/OneDrive/Documents/somecars1.xlsx')
#find out centroids with the help of kmeans functions
#k, number of clusters required
centroid, _ = kmeans(data,3)
#find out the cluster index for each record with vector quantization function
#vq(data,centroid)
idx, _ = vq(data,centroid)
#print the cluster index array
idx
# In[7]:
#also print the centroids
centroid
# Now perform data whitening
# In[8]:
#import libraries
import pandas as pd
from scipy.cluster.vq import kmeans, vq, whiten
#importing data
data = pd.read_excel('C:/Users/AMIT-/OneDrive/Documents/somecars1.xlsx')
#whiten data
data = whiten(data)
#find out centroids with the help of kmeans functions
#k, number of clusters required
centroid, _ = kmeans(data,3)
#find out the cluster index for each record with vector quantization function
#vq(data,centroid)
idx, _ = vq(data,centroid)
#print the cluster index array
idx
# In[9]:
#also print the centroids
centroid
# # scipy.stats
# In[8]:
#import numpy
import numpy as np
#create the marks array
coffee = np.array([15,18,20,26,32,38,32,24,21,16,13,11,14])
print(coffee.mean(), coffee.std())
#let us see the data distribution by plotting it
import matplotlib.pyplot as plt
plt.plot(range(13),coffee)
# In[7]:
#import numpy
import numpy as np
#create the marks array
coffee = np.array([15,18,20,26,32,38,32,24,21,16,13,11,14])
from scipy import stats
#find the zscore
print(stats.zscore(coffee))
print(coffee.mean(), coffee.std())
#let us see the data distribution by plotting it
import matplotlib.pyplot as plt
plt.plot(range(13),coffee)
# In[9]:
#import numpy
import numpy as np
#create the marks array
coffee = np.array([15,18,20,26,32,38,32,24,21,16,13,11])
#import scipy stats
from scipy import stats
#find the zscore
print(coffee.mean(), coffee.std())
#let us see the data distribution by plotting it
import matplotlib.pyplot as plt
plt.plot(range(12),coffee)
# In[10]:
#import numpy
import numpy as np
from scipy import stats
#create the numpy array consisting of frequency of people going to gym and frequency of smoking
obs = np.array([[7,1,3],[87,18,84],[12,3,4],[9,1,7]])
#since we are lookingfor only p values, ignore the rest
_,p,_,_ = stats.chi2_contingency(obs)
#print p
p
# # scipy signal
# In[12]:
#scipy.signal uses FFT to resample a 1D signal.
import matplotlib.pyplot as plt
import numpy as np
from scipy import signal
#Now let us create a signal with 200 data point
t = np.linspace(-10, 10, 200) #Defining Time Interval
y = np.sin(t)
x_resampled=signal.resample(y, 100) #Number of required samples is 100
plt.plot(t, y)
#for x axis slice t into 2 step size
plt.plot(t[::2], x_resampled, 'o')
plt.show()
# In[13]:
import numpy as np
t = np.linspace(-10, 10, 200)
x = np.sin(t)
from scipy import signal
x_resampled = signal.resample(x, 25) # Number of required samples is 25
plt.plot(t, x)
plt.plot(t[::8], x_resampled, 'o')
plt.show()
# # scipy.optimize
# In[14]:
#generate one function and plot with matplotlib
#import matplotlib
import matplotlib.pyplot as plt
#import numpy
import numpy as np
x= np.arange(0.0,1.0,0.1)
#create function
def f(x):
return -np.exp(-(x-0.7)**2)
#plot function
plt.plot(x,f(x),'o-')
plt.grid()
# In[15]:
#find at which x value we get the minimum function
from scipy import optimize
#generating the function
import numpy as np
def f(x):
return -np.exp(-(x-0.7)**2)
#find the minimum of the function
result = optimize .minimize_scalar(f)
#now find the corresponding x value
x_min = result.x
#print the x value
x_min
# # scipy.integrate
# In[16]:
#import scipy integrate
import scipy.integrate as intg
#create one function to find the integration
def integrad(x):
return x**2
#apply quad() function, get only the answer, ignore rest
ans,_ = intg.quad(integrad,0,1)
#print ans
ans
# # scipy.fftpack
# In[18]:
# create one noisy signal
import matplotlib.pyplot as plt
import numpy as np
#create a signal with time_step=0.02
time_step = 0.02
period = 5
time_vec= np.arange(0,20, time_step)
sig = np.sin(2*np.pi/period*time_vec)+ 0.5*np.random.randn(time_vec.size)
plt.plot(time_vec,sig)
plt.show()
# In[20]:
# Apply fft
from scipy import fftpack
#Since we didnt not know the signal frequency, we only knew the sampling time step of the signal sig.
#The function fftfreq() returns the FFT sample frequency points.
sample_freq = fftpack.fftfreq(sig.size, d = time_step)
#now apply the fft() in the signal to find the discrete fourier transform
sig_fft = fftpack.fft(sig)
#Calculate the absolute value element-wise
power = np.abs(sig_fft)
plt.figure(figsize=(20,5))
#plot the absolute values of each sample_freq
plt.plot(sample_freq, power)
plt.show()
#here at sample_freq = 0.2 and -0.2 we have absolute values of 5.15943859e+02 = 515.9438593147901
#print(sample_freq)
#print(power)
# In[21]:
# Apply inverse fft
#Filter out the sample frequencies that are greater than 0 with numpy.where(condition)
pos_mask = np.where(sample_freq > 0)
#Apply the fiter on smaple_freq and store the +ve sample_freq on freqs
freqs = sample_freq[pos_mask]
#print(power[pos_mask].argmax())
#Find the peak frequency, here we focus on only the positive frequencies
peak_freq = freqs[power[pos_mask].argmax()]
#now get an array copy of the signal where we already applied fft.
high_freq_fft = sig_fft.copy()
#assign the ones greater than peak freq as 0 in order to remove the noise
high_freq_fft[np.abs(sample_freq) > peak_freq] = 0
#print(high_freq_fft)
#Now apply inverese fft on the new high_freq_fft this will be the filtered signal
filtered_sig = fftpack.ifft(high_freq_fft)
#plot
plt.figure(figsize=(6, 5))
#now plot the original signal for reference
plt.plot(time_vec, sig, label='Original signal')
#now plot the filtered signal
plt.plot(time_vec, filtered_sig, linewidth=3, label='Filtered signal')
#add label, legend
plt.xlabel('Time [s]')
plt.ylabel('Amplitude')
plt.legend(loc='best')
#show
plt.show()
# # scipy.linalg
# In[22]:
# Determinant of a square matrix
#import scipy linalg package
from scipy import linalg
#import numpy to the square matrix
import numpy as np
data = np.array([[1,2,3],[3,4,5],[5,6,7]])
#find determinant
linalg.det(data)
# In[23]:
# Inverse of a square matrix
#import scipy linalg package
from scipy import linalg
#import numpy to the square matrix
import numpy as np
data = np.array([[1,2,3],[3,4,5],[5,6,7]])
#find determinant
linalg.inv(data)
# In[24]:
# Eigen values of a square matrix
#import scipy linalg package
from scipy import linalg
#import numpy to the square matrix
import numpy as np
data = np.array([[1,2,3],[3,4,5],[5,6,7]])
#find determinant
linalg.eigvals(data)
# In[ ]:
| [
"matplotlib"
] |
fe7e54c8429449b52a86a48e9753abded76d946b | Python | stbmilu/wscw | /cs_to_csv.py | UTF-8 | 2,356 | 3 | 3 | [] | no_license | import os
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pymongo import MongoClient
# # # # Database for crowdsourcing # # # #
MONGO_HOST = 'mongodb://localhost:27017/'
client = MongoClient(MONGO_HOST)
db = client.final_tweets
fear = db.fear
happy = db.happy
anger = db.anger
excitement = db.excitement
pleasant = db.pleasant
surprise = db.surprise
# # # # randomly choose 20 tweets for each class # # # #
fear1 = random.sample(list(fear.find()), 20)
happy1 = random.sample(list(happy.find()), 20)
anger1 = random.sample(list(anger.find()), 20)
excitement1 = random.sample(list(excitement.find()), 20)
pleasant1 = random.sample(list(pleasant.find()), 20)
surprise1 = random.sample(list(surprise.find()),20)
tweet_text = []
tweet_id = []
tweet_emotion = []
# # # # store data in csv file # # # #
def get_tweet_text_for_df(tweets):
for tweet in tweets:
tweet_text.append(tweet["full_text"])
tweet_id.append(tweet["id"])
tweet_emotion.append(tweet["emotion"])
# # # # generate csv file for crowdsourcing # # # #
def tweets_to_data_frame(filename):
get_tweet_text_for_df(fear1)
get_tweet_text_for_df(happy1)
get_tweet_text_for_df(anger1)
get_tweet_text_for_df(excitement1)
get_tweet_text_for_df(pleasant1)
get_tweet_text_for_df(surprise1)
df = pd.DataFrame(data=tweet_text, columns=['tweet_text'])
df['tweet_id'] = tweet_id
df['tweet_emotion'] = tweet_emotion
df.to_csv (os.getcwd() + "/Final_results/crowdsourcing_results/" + filename + ".csv", index=False, header=True)
# # # # anothoer way of creating dataframe csv file # # # #
def tweets_to_data_frame2(tweets, filename):
"""fail due to np array, len([tweet["id"] for tweet in tweets]) = 0 """
"""need to fix this issue"""
df = pd.DataFrame(data=[tweet["full_text"] for tweet in tweets], columns=['tweets'])
df['id'] = np.array([tweet["id"] for tweet in tweets])
df['date'] = np.array([tweet["created_at"] for tweet in tweets])
df.to_csv (os.getcwd() + "/Final_results/crowdsourcing_results/" + filename + ".csv", index=False, header=True)
if __name__ == "__main__":
print("The dataset is generated in the path shown as below: ")
print(os.getcwd()+ "/Final_results/crowdsourcing_results/")
tweets_to_data_frame("emotion_dataset")
| [
"matplotlib"
] |
1f66d798aee3563d36fa0b9a5a1bd970563de8ff | Python | tanvi1gupta/Udemy_Machine_Learning | /Machine Learning A-Z/Part 2 - Regression/Section 9 - Random Forest Regression/Random_Forest_Regression/random_forest_2.py | UTF-8 | 1,098 | 3.328125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 16 14:49:00 2017
@author: tgupta2
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:,1:2].values
y = dataset.iloc[:,-1].values
from sklearn.ensemble import RandomForestRegressor
#mse --> mean square error
regressor = RandomForestRegressor(n_estimators=300, criterion= "mse" ,random_state=0)
regressor.fit(X,y)
y_pred = regressor.predict(6.5)
# Visualising the Regression results
plt.scatter(X, y, color = 'red')
plt.plot(X, regressor.predict(X), color = 'blue')
plt.title('Truth or Bluff (Regression Model)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show()
# Visualising the Regression results (for higher resolution and smoother curve)
X_grid = np.arange(min(X), max(X), 0.1)
X_grid = X_grid.reshape((len(X_grid), 1))
plt.scatter(X, y, color = 'red')
plt.plot(X_grid, regressor.predict(X_grid), color = 'blue')
plt.title('Truth or Bluff (Regression Model)')
plt.xlabel('Position level')
plt.ylabel('Salary')
plt.show() | [
"matplotlib"
] |
91b9ac3f92983b9010d7978210fafb75cc2b487f | Python | glukicov/EDMTracking | /DB/countSubRuns.py | UTF-8 | 12,519 | 2.59375 | 3 | [
"MIT"
] | permissive | # /*
# * Gleb Lukicov ([email protected])
# * Created: 21 August 2019
# * Estimating golden runs in gm2
# * /
import psycopg2 #db query
import argparse # command line inputs sub
import numpy as np # fast arrays
import matplotlib.dates as mdate # mpl date format
import datetime
import os, sys
from collections import defaultdict
#Define some constants
START_TIME = datetime.datetime(2019,3,18,0,0,0) #Run-2
END_TIME = datetime.datetime(2019,7,6,8,0,0) # Run-2
BNL = 8.6348e9 # e+/e-
CUT_SUBRUN = 10
CUT_DELTA = 0.0 # min
CUT_DELTA_DAY = CUT_DELTA/float(60*24) # days (mpl date format)
CUT_CTAG = 50.0
UNPACK = 9 # MB
TRACK = 170 # MB
#Storage devices
runs_dump=[] # store all runs in the range (duplicates)
all_runs=[] # store all runs in the range (unique)
good_runs=[] # runs with more than CUT_SUBRUN subruns
good_deltas = [ ] # stop-start times for good runs
good_times = [ [], [] ] # start and stop times for good runs
good_subruns = [] # subruns per good run
good_ctags = [] # inst/N
#golden selection:
golden_runs = []
golden_deltas = []
golden_ctags = []
golden_subruns = []
###========================READ FROM FHICL AND WRITE ALIGNMENT CONSTANTS===============================##
def main():
sys.stdout = Logger() ### Duplicate all cout into a log file
print "Starting on:", datetime.datetime.now()
print "Looking for subruns in range:",START_TIME,"to",END_TIME
# ###OPEN CONNECTION (as a writer!)
dsn = "dbname=gm2_online_prod user=gm2_writer host=g2db-priv port=5433"
cnx = psycopg2.connect(dsn)
cur = cnx.cursor()
#count total subruns in range
command = "select count(*) from gm2dq.subrun_time where start_time >= '" + str(START_TIME) + \
"'::timestamp and end_time <= '" + str(END_TIME) + "'::timestamp ;"
cur.execute(command)
rows = cur.fetchall()
total_subruns = 0
for row in rows:
total_subruns = int(row[0])
# store all runs in range
command = "select run from gm2dq.subrun_time where start_time >= '" + str(START_TIME) + \
"'::timestamp and end_time <= '" + str(END_TIME) + "'::timestamp ;"
cur.execute(command)
rows = cur.fetchall()
for row in rows:
runs_dump.append(int(row[0]))
all_runs=set(runs_dump) # unique
print "Total runs:",len(all_runs),"with",total_subruns,"subruns"
#now loop over all runs and get the last and first subruns ordered by time to get the run duration (delta)
for i_run in all_runs:
command = "select start_time, end_time from gm2dq.subrun_time where run = "+ str(i_run) +\
" ORDER BY start_time ASC;"
cur.execute(command)
rows = cur.fetchall()
# for runs with more than N subruns...
if ( len(rows) >= CUT_SUBRUN ):
first = rows[0][0] # 1st element of 1st return
last = rows[-1][1] # 2nd element of last return
# if run crashed on the last subrun, get (n-1)th
if (last.year == 1969):
last = rows[-2][1]
# same check for the first subrun get the 2nd element then
if (first.year == 1969):
first = rows[1][0]
else:
delta = mdate.date2num(last)-mdate.date2num(first)
if (delta >= CUT_DELTA_DAY):
good_runs.append(i_run)
good_deltas.append( delta )
good_times[0].append(first)
good_times[1].append(last)
print 'Found',len(good_runs), "runs with more than",CUT_SUBRUN,"subruns and longer than", CUT_DELTA,"min"
#count ctags for the good runs based on the start and stop time of a run
ctag_sum=0.0 # instantaneous ctag
for i in range(len(good_runs)):
start = good_times[0][i]
stop = good_times[1][i]
command = "select time, ctags from gm2ctag_dqm where time >= '"+ str(start)+ "'::timestamp and time <= '"+ str(stop) +"'::timestamp ;"
cur.execute(command)
rows = cur.fetchall()
if (len(rows) == 0):
#remove a run if there is not ctag information
del good_runs[i]
del good_times[0][i]
del good_times[1][i]
del good_deltas[i]
else:
for row in rows:
ctag_sum+=float(row[1])
good_ctags.append(ctag_sum/float(len(rows))) # instantaneous ctag
ctag_sum=0.0
# now that we know the good runs, count total good subruns
for i_run in good_runs:
command = "select count(*) from gm2dq.subrun_time where run = "+ str(i_run) +" ;"
cur.execute(command)
rows = cur.fetchall()
for row in rows:
good_subruns.append(int(row[0])) # per good run
#get ctags for good runs
good_CTAGRun, good_ignoreRuns = getNearline(good_runs, cur, cnx)
# remove runs with no nearline info
print "Removed",len(good_ignoreRuns),"runs with no nearline info"
for i_run in good_ignoreRuns:
element_id_array = [i for i,x in enumerate(good_runs) if x == i_run]
element_id=element_id_array[0] # safe for list of unique runs
del good_runs[element_id]
del good_ctags[element_id]
del good_times[0][element_id]
del good_times[1][element_id]
del good_deltas[element_id]
del good_subruns[element_id]
print 'Found',len(good_runs), "runs,",np.sum(good_subruns),"subruns, with ctag information from gm2ctag_dqm"
if ( (len(good_runs) != len(good_deltas)) or len(good_runs) != len(good_ctags) or len(good_runs) != len(good_subruns) or len(good_runs) !=len(good_CTAGRun)):
print "The good run info is not of the same size!"
sys.exit()
print "Total of",sum(good_subruns),"good subruns in",len(good_runs),"runs with more than",CUT_SUBRUN,"subruns and longer than", CUT_DELTA,"min"
#write all the good runs, duration, ctag, and subruns per run
f=open("DBDump_all.csv", "w+")
for i in range(len(good_runs)):
f.write(str(good_runs[i])+ " "+str(good_deltas[i]) + " " + str(good_ctags[i]) + " "\
+ str(good_subruns[i]) + " " + str(good_CTAGRun[i])+ "\n")
print 'Wrote info (DBDump_all.csv) for',len(good_runs),"good runs"
##
## Golden runs
##
#now form a list of golden runs based on ctag
for i in range(len(good_runs)):
if (good_ctags[i] >= CUT_CTAG):
golden_runs.append(good_runs[i])
golden_deltas.append(good_deltas[i])
golden_ctags.append(good_ctags[i])
golden_subruns.append(good_subruns[i])
#get ctags for golden runs
golden_CTAGRun, golden_ignoreRuns = getNearline(golden_runs, cur, cnx)
# remove runs with no nearline info
print "Removed",len(golden_ignoreRuns),"runs with no nearline info"
for i_run in golden_ignoreRuns:
element_id_array = [i for i,x in enumerate(golden_runs) if x == i_run]
element_id=element_id_array[0] # safe for list of unique runs
del golden_runs[element_id]
del golden_ctags[element_id]
del golden_times[0][element_id]
del golden_times[1][element_id]
del golden_deltas[element_id]
del golden_subruns[element_id]
if ( (len(golden_runs) != len(golden_deltas)) or len(golden_runs) != len(golden_ctags) or len(golden_runs) != len(golden_subruns) or len(golden_runs) !=len(golden_CTAGRun)):
print "The gold run info is not of the same size!"
sys.exit()
print "Total of",sum(golden_subruns),"golden subruns in",len(golden_runs),"runs after CTAG cut of",CUT_CTAG
f=open("DBDump_golden.csv", "w+")
for i in range(len(golden_runs)):
f.write(str(golden_runs[i])+ " "+str(golden_deltas[i]) + " " + str(golden_ctags[i])\
+ " " + str(golden_subruns[i]) + " " + str(golden_CTAGRun[i])+ "\n")
print 'Wrote info (DBDump_golden.csv) for',len(golden_runs),"golden runs"
print "Estimation for UK tracking: Unpacked:",round(UNPACK*sum(golden_subruns)*1e-6,2),\
"TB Tracks:",round(TRACK*sum(golden_subruns)*1e-6,2),"TB"
# close communication with the databaes before committing
cur.close()
cnx.commit()
cnx.close()
print "Finished on:", datetime.datetime.now()
### Helper functions
# Mark's function
def getNearline(all_runs, cur, cnx):
CTAGRun = []
ignoreRuns=[] # runs with no ctag
print "Getting nearline info (slow) for ctags..."
# Get the nearline ctag for these runs
for i_run in all_runs:
#get the ctag from nearline table
sql = "select nearline_ctag from nearline_processing where run_number = "+str(i_run)+" ;"
cur.execute(sql)
rows = cur.fetchall()
ctags = 0
nsr = 0 # subruns
ncx = 0 # subruns with ctag
for row in rows:
ct = int(row[0]) # per subrun
nsr = nsr + 1
if (ct > 0):
ncx = ncx + 1 # count non-zero ctag subruns
ctags += ct # sum-up
frac = float(ncx)/float(nsr) # fraction with ctag
if (frac == 0):
ignoreRuns.append(i_run)
if (frac != 0):
corrFac = 1.0/frac # correct for non-ctag subruns s
ctagsTotal = ctags*corrFac
if (i_run > 26088 and i_run < 26168): # HACK SINCE NEARLINE IS WRONG (multiply by 2.88: average of previous 50 runs to last 10)
ctagsTotal = ctagsTotal*2.88
if (i_run > 26476 and i_run < 26491): # RATIO OF DQM/NL SLOPES
ctagsTotal = ctagsTotal*1.79
CTAGRun.append(ctagsTotal)
return CTAGRun, ignoreRuns
### Duplicate all cout into a log file
class Logger(object):
def __init__(self):
self.terminal = sys.stdout
self.log = open("countSubRuns.log", "w+")
def write(self, message):
self.terminal.write(message)
self.log.write(message)
def flush(self):
self.terminal.flush()
self.log.flush()
if __name__=="__main__":
main()
# ##### Some psql shortcuts ####
# select * from gm2dq.subrun_time;
# psql -U gm2_writer -h localhost -p 5433 -d gm2_online_prod
# psql -U gm2_reader -h ifdbprod.fnal.gov -d gm2_online_prod -p 5452
# [connects to the Production DB, which is duplicated from the Online DB]
# \dt - list all table
# \dn - list all schemas
# \dt gm2tracker_sc.* - list all tables under the Tracker SC schema
# set schema 'gm2tracker_sc';
# set schema 'gm2dq';
# select * from slow_control_items where name like '%HV%' limit 10;
# select * from slow_control_data where scid=946 limit 10;
#cur.execute("select * from slow_control_data where scid=946 limit 10;")
# fetch all the rows
# rows = cur.fetchall()
# print rows
# CREATE TABLE gm2dq.tracker_hv (
# id SERIAL PRIMARY KEY,
# station smallint,
# hv_status BIT(64),
# run integer,
# subrun integer
# );
#####Correlating Run Sunbrun with timestamp ######
#You could access the content of DAQ ODB in table "gm2daq_odb", like the following
#select json_data->'Experiment'->'Security'->'RPC hosts'->'Allowed hosts' from gm2daq_odb ;
#select run_num, json_data->'Runinfo'->'Start time' from gm2daq_odb where run_num = 8000;
# select Subrun, json_data->'Logger'->'Channels'->'0'->'Settings' from gm2daq_odb
'''
#print ("len(rows)", len(rows))
for i_row in range(len(rows)):
#print ("i_subrun", i_subrun, "i_row", i_row)
i_subrun+=1
subrun = int(rows[i_row][1])
run = int(rows[i_row][0])
# get start time of the first subrun
if (subrun == 0):
start_time = rows[i_row][2]
all_runs.append(run)
#print "start_time", start_time
# exit logic for the last element
if (i_row == len(rows)-1):
#print "reached last row"
subrunCount.append(i_subrun)
end_time = rows[i_row][3]
times[0].append(start_time)
times[1].append(end_time)
break
next_run = int(rows[i_row+1][0]) # (!) this is handled by interrupt exit log
if ( run == next_run):
pass # do nothing
else:
subrunCount.append(i_subrun)
end_time = rows[i_row][3]
times[0].append(start_time)
times[1].append(end_time)
i_subrun=0
print "start_time", start_time
print "end_time", end_time
print ("run", run, "subrun", subrun, "next_run", next_run)
''' | [
"matplotlib"
] |
fd3f3da8bfafcf408297060561539829f0476d91 | Python | VRosi/interview_analysis | /real_tok.py | UTF-8 | 5,029 | 3.046875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
This script allows to generate JSON files containing lemmatization, frequency of words in corpus considering :
- the studied term
- the studied question
It also allows to plot the most frequent words considering a cutoff frequency of words.
"""
import json
import pathlib
import numpy as np
import pandas as pd
import nltk
import re
import csv
import matplotlib.pyplot as plt
from nltk.stem.snowball import FrenchStemmer
#%% FUNCTIONS
def lemmatization(word, lemms):
lemmed = ""
for index, lemm in enumerate(lemms):
if word in lemms[lemm]:
lemmed = lemm
if lemmed == word:
break
return lemmed
def n_gram(list, n):
"""
This function use the zip function to help us generate n-grams
Concatentate the tokens into ngrams and return
OUTPUT : - n-gram string
"""
ngrams = zip(*[list[i:] for i in range(n)])
return [" ".join(ngram) for ngram in ngrams]
def plot_wordF(plot_list, cutoff):
plot_list = [word for word in word_list if word[1] >= cutoff]
indexes, values = list(zip(*plot_list))
bar_width = 0.35
plt.barh(indexes, values)
plt.gca().invert_yaxis()
plt.show()
#%% INIT
term = "rugueux"
question = "Q2"
# cutoff for plot
cutoff = 2
with open("./corpus_rearranged_1/"+ term +".json", encoding="utf-8") as json_file:
data = json.load(json_file)
# load stop words in a file
stopWord_path = '/Users/VictorRosi/Documents/GitHub/interview_analysis/'
file = "new_stopwords_fr.txt"
stopFile = open(stopWord_path+file, 'r', encoding="utf-8")
yourResult = np.array([line.split('\n')
for line in stopFile.readlines()])[:, 0]
stopWord = list(yourResult)
lemm_file = '/Users/VictorRosi/Documents/GitHub/interview_analysis/lemm_file.json'
# Open lemm file
with open(lemm_file, encoding="utf-8") as json_file:
lemms = json.load(json_file)
#%% TOKENIZE
word_list = []
unDup = {}
answers = []
lemm_dic = {}
index_ID = []
tokenizer = nltk.RegexpTokenizer(r'\w+')
for i, k in enumerate(data[question]):
answers.append(k['answer'])
index_ID.append(k['expertID'])
for index, answer in enumerate(answers):
tokenized = []
# kill duplicates
tokenized = tokenizer.tokenize(answer.lower())
tokenized = list(dict.fromkeys(tokenized))
for j, word in enumerate(tokenized):
if word not in stopWord:
res = lemmatization(word, lemms)
if res == '':
res = word
if res not in stopWord:
if res in lemm_dic:
lemm_dic[res].append([index_ID[index], word])
else:
lemm_dic[res] = [[index_ID[index], word]]
#%%
# Only to count number of people using a word related to a lemm
# for i, lemm in enumerate(set(lemm_dic)):
# tmp = []
# for j, word in enumerate(lemm_dic[lemm]):
# tmp.append(word[0])
# tmp = list(dict.fromkeys(tmp))
# print(lemm, tmp, lemm_dic[lemm])
#%% CREATE DICTIONNARY
new_dic = {}
for i, lemm in enumerate(set(lemm_dic)):
# Only to count number of people using a word related to a lemm
tmp_freq = []
tmp_word = []
tmp_ID = []
for j, word in enumerate(lemm_dic[lemm]):
tmp_freq.append(word[0])
tmp_word.append(word[1])
tmp_freq = list(dict.fromkeys(tmp_freq))
tmp_word = list(dict.fromkeys(tmp_word))
new_dic[lemm] = {'freq' : len(tmp_freq), 'words' : tmp_word, 'ID' : tmp_freq}
#%% SORT & PLOT
# cutoff frequency of words
# plot_dic = {}
# for index, lemm in enumerate(set(new_dic)):
# plot_dic[lemm] = new_dic[lemm]['freq']
# # sort from most to least used
# word_list = sorted(plot_dic.items(), key=lambda t: t[1], reverse=True)
# plot_wordF(word_list, 3)
#%% GENERATE JSON FILE WITH ARCHITCTURE :
"""
{'lemm':{
'freq' : '',
'words' : '',
'ID' : '',
},
{...}
}
"""
path_j = './corpus_lemm/json/'
#result_dict = dict(zip(lol_word, lol_freq))
jsonfile = json.dumps(new_dic, indent=2)
with open(path_j +term + "_"+question+"_.json", 'w') as f_output:
f_output.write(jsonfile)
# for index, answer in enumerate(answers):
# tokenized = []
# # kill duplicates
# tokenized = tokenizer.tokenize(answer.lower())
# tokenized = list(dict.fromkeys(tokenized))
# for j, word in enumerate(tokenized):
# if word not in stopWord:
# res = lemmatization(word, lemms)
# if res == '':
# #print(word)
# res = word
# if res not in stopWord:
# word_list.append(res)
# if res in lemm_dic and word not in lemm_dic[res]:
# lemm_dic[res].append(word)
# else:
# lemm_dic[res] = [word] | [
"matplotlib"
] |
5c87e009aa5b37b233c93ebb4a4bf1142fc90379 | Python | lahi0008/RepresentationGraphiqueLoisDeProbabilitesEtFonctionDeRepartition | /LoiBinomiale.py | UTF-8 | 439 | 2.71875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 4 16:16:32 2020
@author: papico
"""
#Loi Binomiale
import numpy as np
import scipy.stats as sps
import matplotlib.pyplot as plt
n, p, N = 20, 0.3, int(1e4)
B = np.random.binomial(n, p, N)
f = sps.binom.pmf(np.arange(n+1), n, p)
plt.hist(B,bins=n+1,normed=1,range=(0.5,n+.5),color = "white",label="loi empirique")
plt.stem(np.arange(n+1),f,"r",label="loi theorique")
plt.legend()
plt.grid() | [
"matplotlib"
] |
83f22127426383023c202a57db0db25aa10249bf | Python | Lioscro/Cassiopeia | /scripts/post_process_tree.py | UTF-8 | 6,510 | 2.59375 | 3 | [
"MIT"
] | permissive | from __future__ import division
import sys
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
import pandas as pd
from tqdm import tqdm
sys.setrecursionlimit(10000)
import pickle as pic
import argparse
import networkx as nx
from collections import defaultdict
from pylab import *
from SingleCellLineageTracing.TreeSolver import convert_network_to_newick_format
def post_process_tree(G):
"""
Given a networkx graph in the form of a tree, assign sample identities to character states.
:param graph: Networkx Graph as a tree
:return: postprocessed tree as a Networkx object
"""
new_nodes = []
new_edges = []
def prune_leaves(G):
nodes_to_remove = []
root = [n for n in G if G.in_degree(n) == 0][0]
# first remove paths to leaves that don't correspond to samples
_leaves = [n for n in G if G.out_degree(n) == 0]
for n in _leaves:
# if we have this case, where the leaf doesn't have a sample label in the name, we need to remove this path
#if "target" not in n:
if "target" not in n:
nodes_to_remove.append(n)
return nodes_to_remove
nodes_to_remove = prune_leaves(G)
while len(nodes_to_remove) > 0:
for n in set(nodes_to_remove):
G.remove_node(n)
nodes_to_remove = prune_leaves(G)
# remove character strings from node name
node_dict = {}
for n in tqdm(G.nodes, desc="removing character strings from sample names"):
spl = n.split("_")
if "|" in spl[0] and "target" in n:
nn = "_".join(spl[1:])
node_dict[n] = nn
G = nx.relabel_nodes(G, node_dict)
node_dict2 = {}
for n in G.nodes:
spl = n.split("_")
if "target" in n:
if spl[-1] == "target":
name = "_".join(spl[:-1])
else:
name = "_".join(spl[:-2])
# if this target is a leaf, just rename it
# else we must add an extra 'redundant' leaf here
if G.out_degree(n) == 0:
node_dict2[n] = name
else:
new_nodes.append(name)
new_edges.append((n, name))
G.add_nodes_from(new_nodes)
G.add_edges_from(new_edges)
G = nx.relabel_nodes(G, node_dict2)
# remove any nodes that are not on the path from the root
#root = [n for n in G if G.in_degree(n) == 0][0]
#nodes_to_remove = []
#desc = nx.descendants(G, root)
#for n in G.nodes:
# if n not in desc:
# nodes_to_remove.append(n)
#for n in set(nodes_to_remove):
# G.remove_node(n)
return G
def assign_samples_to_charstrings(G, cm):
new_nodes = []
new_edges = []
nodes_to_remove = []
root = [n for n in G if G.in_degree(n) == 0][0]
cm["lookup"] = cm.apply(lambda x: "|".join(x), axis=1)
for n in G:
if n in cm['lookup'].values:
_nodes = cm.loc[cm["lookup"] == n].index
_nodes = map(lambda x: x + "_target", _nodes)
for new_node in _nodes:
new_nodes.append(new_node)
new_edges.append((n, new_node))
G.add_nodes_from(new_nodes)
G.add_edges_from(new_edges)
#_leaves = [n for n in G if G.out_degree(n) == 0]
#for n in _leaves:
# if n not in cm.index:
# paths = nx.all_simple_paths(G, root, n)
# for p in paths:
# for n in p:
# if n != root and G.out_degree(n) <= 1:
# nodes_to_remove.append(n)
#for n in set(nodes_to_remove):
# G.remove_node(n)
return G
def tree_collapse(graph):
"""
Given a networkx graph in the form of a tree, collapse two nodes togethor if there are no mutations seperating the two nodes
:param graph: Networkx Graph as a tree
:return: Collapsed tree as a Networkx object
"""
new_network = nx.DiGraph()
for edge in graph.edges():
if edge[0].split('_')[0] == edge[1].split('_')[0]:
if graph.out_degree(edge[1]) != 0:
for node in graph.successors(edge[1]):
new_network.add_edge(edge[0], node)
else:
new_network.add_edge(edge[0], edge[1])
else:
new_network.add_edge(edge[0], edge[1])
return new_network
def add_redundant_leaves(G, cm):
"""
To fairly take into account sample purity, we'll add back in 'redundant' leaves (i.e.
leaves that were removed because of non-unique character strings).
"""
# create lookup value for duplicates
cm["lookup"] = cm.astype('str').apply('|'.join, axis=1)
net_nodes = np.intersect1d(cm.index, [n for n in G])
uniq = cm.loc[net_nodes]
# find all non-unique character states in cm
nonuniq = np.setdiff1d(cm.index, np.array(uniq))
for n in nonuniq:
new_node = str(n)
try:
_leaf = uniq.index[uniq["lookup"] == cm.loc[n]["lookup"]][0]
parents = list(G.predecessors(_leaf))
for p in parents:
G.add_edge(p, new_node)
except:
continue
return G
def main():
parser = argparse.ArgumentParser()
parser.add_argument("netfp", type=str, help="Networkx pickle file")
parser.add_argument("char_fp", type=str, help="Character matrix")
parser.add_argument("out_fp", type=str, help="Output file -- will be written as a newick file!")
parser.add_argument("--map_states", action="store_true", default=False, help="Map character states to sampleID with provided character matrix")
parser.add_argument("--collapse", action="store_true", default=False, help="Collapse unweighted edges")
args = parser.parse_args()
netfp = args.netfp
char_fp = args.char_fp
map_states = args.map_states
collapse = args.collapse
out_fp = args.out_fp
if out_fp.split(".")[-1] != 'txt':
print("Warning! output is a newick file")
G = nx.read_gpickle(netfp)
cm = pd.read_csv(char_fp, sep='\t', index_col = 0)
if map_states:
G = assign_samples_to_charstrings(G, cm)
if collapse:
G = tree_collapse(G)
G = post_process_tree(G)
G = add_redundant_leaves(G, cm)
stem = ".".join(out_fp.split(".")[:-1])
pic.dump(G, open(stem + ".pkl", "wb"))
newick = convert_network_to_newick_format(G)
with open(out_fp, "w") as f:
f.write(newick)
| [
"matplotlib"
] |
7b1dd59c27af37be8bd4174df978922659b79579 | Python | kronenflex/Machine-Learning-Personal-Projects | /Part 1 - Data Preprocessing/data_prepocessing.py | UTF-8 | 1,462 | 2.84375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sun Jun 14 21:45:19 2020
@author: DiegoIgnacioPavezOla
"""
#Plantilla de pre procesado
#Importar las libreria
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#importar el dataset
dataset = pd.read_csv('Data.csv')
X = dataset.iloc[:, :-1].values
Y = dataset.iloc[:, 3].values
#tratamientos de los NAs
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(missing_values = np.nan, strategy = "mean")
imputer = imputer.fit(X[:, 1:3])
X[:, 1:3] = imputer.transform(X[:, 1:3])
#Codificar datos categoricos
from sklearn import preprocessing
le_X = preprocessing.LabelEncoder()
X[:, 0] = le_X.fit_transform(X[:,0])
#transformar variable categorica a dummy
from sklearn.preprocessing import OneHotEncoder
from sklearn.compose import ColumnTransformer
#Variable X
ct = ColumnTransformer(
[('one_hot_encoder', OneHotEncoder(categories='auto'),[0])],
remainder='passthrough')
X = np.array(ct.fit_transform(X), dtype = np.float)
#Variable Y
le_Y = preprocessing.LabelEncoder()
Y[:] = le_Y.fit_transform(Y)
#Dividir el dataset en conjunto de entrenamiento y testing
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2, random_state = 0)
# Escalado de variables
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
| [
"matplotlib"
] |
33b66c494aab5adc8d359975587a850e94e36bd0 | Python | mtdysart/spotify_recent_tracks_viz | /spotify_recent_tracks_viz/scatter.py | UTF-8 | 8,500 | 2.921875 | 3 | [] | no_license | from bokeh.plotting import figure
from bokeh.models import ColumnDataSource, Select, TextInput, DateRangeSlider, HoverTool, CheckboxGroup, Label
from bokeh.layouts import row, column
import pandas as pd
import datetime
from datetime import date
import statsmodels.api as sm
import numpy as np
import html
class Scatter:
# X axis choices
AXIS_MAP = {
"Tempo": "tempo",
"Duration (sec)": "duration_s",
"Danceability": "danceability",
"Energy": "energy",
"Loudness": "loudness",
"Speechiness": "speechiness",
"Acousticness": "acousticness",
"Instrumentalness": "instrumentalness",
"Liveness": "liveness",
"Valence": "valence"
}
# Tooltips for circle glyphs
CIRC_TOOLTIPS = [
("Track", "@track_name"),
("Artist", "@artist_name"),
("Times Played", "@count")
]
def __init__(self, df: pd.DataFrame):
# Initialize data sources for scatter plot and regression line
self.backing_df = df
self.circ_source = ColumnDataSource({'x': [], 'y': [], 'track_name': [], 'artist_name': [], 'count': [], 'circle_size': []})
self.line_source = ColumnDataSource({'x': [], 'y_pred': []})
# Initialize widgets
self.x_axis = Select(title="X Axis", options=list(self.AXIS_MAP.keys()), value="Tempo")
self.y_axis = Select(title="Y Axis", options=list(self.AXIS_MAP.keys()), value="Duration (sec)")
time_start = datetime.datetime(1970, 1, 1, hour=0, minute=0, second=0)
time_end = datetime.datetime(1970, 1, 1, hour=23, minute=59, second=59)
start_date = min(self.backing_df['date_played'])
start_dt = datetime.datetime(year=start_date.year, month=start_date.month, day=start_date.day, hour=0, minute=0, second=0)
end_date = max(self.backing_df['date_played'])
end_dt = datetime.datetime(year=end_date.year, month=end_date.month, day=end_date.day, hour=23, minute=59, second=59)
date_step_size = 1000*60*60*24 # Step size of 1 day in ms
self.date_slider = DateRangeSlider(title="Date Range", start=start_dt, end=end_dt, value=(start_dt, end_dt), format="%d %b %Y", step=date_step_size)
time_step_size = 1000*60*30 # 30 minues in ms
self.time_slider = DateRangeSlider(title="Time Range", value=(time_start, time_end), start=time_start, end=time_end, format="%X", step=time_step_size)
self.track_name = TextInput(title="Song name includes")
self.artist_name = TextInput(title="Artist name includes")
self.reg_line_check = CheckboxGroup(labels=["Add Regression Line"], active=[])
# Create the hover tools
self.points_hover = HoverTool(tooltips=self.CIRC_TOOLTIPS, names=["circles"])
self.line_hover = HoverTool(tooltips=[], names=["reg_line"])
# Create the scatter plot and regression line
self.plot = figure(title="Scatter", plot_height=450, plot_width=800, tools=[self.points_hover, self.line_hover])
self.plot.circle(x="x", y="y", source=self.circ_source, size="circle_size", fill_alpha=0.6, name="circles")
self.reg_line = self.plot.line(x='x', y='y_pred', source=self.line_source, color='#FFAF87', name="reg_line")
self.layout = row(column(self.x_axis, self.y_axis, self.date_slider, self.time_slider, self.track_name, self.artist_name, self.reg_line_check),
self.plot)
# Fill data and create events for on change
self.update()
self.on_change()
def on_change(self):
"""
Creates on change events for all widgets in the scatter plot.
"""
widgets = [self.x_axis, self.y_axis, self.date_slider, self.time_slider, self.track_name, self.artist_name]
for control in widgets:
control.on_change("value", lambda attr, old, new : self.update())
self.reg_line_check.on_change("active", lambda attr, old, new : self.update())
def update(self):
"""
Updates the data source and regression line based on current values of all widgets.
"""
new_df = self.get_selected()
# Get number of individual plays and then remove duplicate tracks for plotting
num_plays = len(new_df)
new_df.drop_duplicates(subset='track_id', inplace=True)
# Choose the x and y axis
x_name = self.AXIS_MAP[self.x_axis.value]
y_name = self.AXIS_MAP[self.y_axis.value]
self.plot.xaxis.axis_label = self.x_axis.value
self.plot.yaxis.axis_label = self.y_axis.value
# Calculate correlation coefficient between x and y axis
corr = np.corrcoef(new_df[x_name], new_df[y_name])[0, 1] if not new_df.empty else 0
self.plot.title.text = f"{num_plays} track plays selected, correlation: {round(corr, 2)}"
# Provide the new selected data to the Data Source
data_dict = {
'x': new_df[x_name],
'y': new_df[y_name],
'track_name': new_df['song_name'],
'artist_name': new_df['artist_name'],
'count': new_df['counts'],
'circle_size': new_df['circle_size']
}
self.circ_source.data = data_dict
# Update the regression line if more than one track is selected
if len(new_df) <= 1:
self.reg_line.visible = False
else:
x = sm.add_constant(new_df[x_name])
reg_model = sm.OLS(new_df[y_name], x)
results = reg_model.fit()
y_pred = list(map(lambda x : results.params.iloc[1] * x + results.params.iloc[0], new_df[x_name]))
reg_data_dict = {
'x': new_df[x_name],
'y_pred': y_pred
}
self.line_source.data = reg_data_dict
# Update hover tool for regression line
self.line_hover.tooltips = [
("Y=", f"{round(results.params.iloc[1], 2)}x + {round(results.params.iloc[0], 2)}"),
("R\u00b2", str(round(results.rsquared, 2)))
]
self.reg_line.visible = (len(self.reg_line_check.active) > 0)
def get_selected(self):
"""
Filter data based on widget values. Returns filtered DataFrame
"""
df = self.backing_df
if not self.track_name.value.isspace():
df = df[df['song_name'].str.lower().str.contains(self.track_name.value.strip().lower())]
if not self.artist_name.value.isspace():
df = df[df['artist_name'].str.lower().str.contains(self.artist_name.value.strip().lower())]
# Filter by date played
date_begin = pd.to_datetime(self.date_slider.value[0], unit='ms')
date_end = pd.to_datetime(self.date_slider.value[1], unit='ms')
df = df[(date_begin <= df['date_played']) & (df['date_played'] <= date_end)]
# Filter by time played
time_begin = pd.to_datetime(self.time_slider.value[0], unit='ms').time()
time_end = pd.to_datetime(self.time_slider.value[1], unit='ms').time()
df = df[(time_begin <= df['time_played']) & (df['time_played'] <= time_end)]
# Join the counts and circle size columns to the df
df = self.get_selected_counts(df)
return df
def get_selected_counts(self, df):
"""
If no tracks are selected, simply join empty columns for counts and circle_size.
Otherwise, compute the counts and circle sizes, and join those columns to the df.
Arguemnts:
-df : filtered DataFrame
Returns filtered DataFrame with additional columns for counts and circle_size.
"""
if df.empty:
df['counts'] = pd.Series([])
df['circle_size'] = pd.Series([])
return df
df_counts = df.groupby(['song_name', 'artist_name']).size().reset_index(name='counts')
df_counts = df_counts.apply(self.apply_circle_sizes, axis=1)
return pd.merge(df, df_counts, on=['song_name', 'artist_name'], how='left')
def apply_circle_sizes(self, row):
"""
Determines the size of each circle based on the number of times that track has been played.
"""
if row['counts'] == 1:
row['circle_size'] = 5
elif 1 < row['counts'] <= 5:
row['circle_size'] = 7
elif row['counts'] > 5:
row['circle_size'] = 10
return row
| [
"bokeh"
] |
7f8f23d5958ad9f9eb9660690570583e84ff3932 | Python | qks1lver/mass | /src/core.py | UTF-8 | 1,876 | 2.703125 | 3 | [] | no_license | #!/usr/bin/env python3
# Import
import os
import cobra
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from multiprocessing import Pool
# Classes
class Model:
def __init__(self, p_model='', verbose=False):
self.p_model = p_model
self.verbose = verbose
self.model = None
def load(self, p_data=''):
if p_data:
self.p_model = p_data
if not self.p_model:
raise ValueError('Missing data path, self.p_data={}'.format(self.p_model))
if self.p_model.endswith('.xml'):
# Load SBML
if self.verbose:
print('Loading SBML: %s ...' % self.p_model)
self.model = cobra.io.read_sbml_model(self.p_model)
if self.verbose:
print('Loaded SBML: %s' % self.p_model)
else:
raise ValueError('Cannot identify file type for %s' % self.p_model)
return
def analyze(self, fva_fractions=None):
if not fva_fractions:
fva_fractions = [1., 0.95]
if self.verbose:
print('Performing flux balance analysis (pFBA) ...')
df = pd.DataFrame({'flux': cobra.flux_analysis.pfba(self.model).x[:10]})
if self.verbose:
print('Performing flux variability analysis (FVA) ...')
with Pool(processes=os.cpu_count()) as pool:
res = pool.map(self._fva, fva_fractions)
fva_min, fva_max = zip(*res)
df['min'] = fva_min
df['max'] = fva_max
print(df.head())
sns.set()
sns.scatterplot(data=df)
plt.show()
return
def _fva(self, fraction):
fva = cobra.flux_analysis.flux_variability_analysis(self.model, reaction_list=self.model.reactions[:10], fraction_of_optimum=fraction, loopless=True)
return fva['minimum'], fva['maximum']
| [
"matplotlib",
"seaborn"
] |
a8ae0d0c745f1c90279892d2d2f82f1802ba1379 | Python | dr-rodriguez/MovingMast | /movingmast/plotting.py | UTF-8 | 6,597 | 2.71875 | 3 | [
"BSD-3-Clause"
] | permissive | # Functions to handle plotting
from .polygon import parse_s_region
from bokeh.plotting import figure, output_file, show, output_notebook
from bokeh.layouts import column
from bokeh.models import Arrow, VeeHead, HoverTool, Slider, ColumnDataSource
from bokeh.palettes import Spectral7 as palette
import matplotlib.pyplot as plt
def polygon_bokeh(stcs, display=True):
patch_xs = parse_s_region(stcs)['ra']
patch_ys = parse_s_region(stcs)['dec']
p = figure(plot_width=700, x_axis_label="RA (deg)", y_axis_label="Dec (deg)")
data = {'x': [patch_xs], 'y': [patch_ys]}
p.patches('x', 'y', source=data, fill_alpha=0.1, line_color="black", line_width=0.5)
p.add_layout(Arrow(end=VeeHead(line_color="black", line_width=1), line_width=2,
x_start=patch_xs[0], y_start=patch_ys[0],
x_end=patch_xs[1], y_end=patch_ys[1]))
p.x_range.flipped = True
if display:
output_notebook()
show(p)
else:
return p
def quick_bokeh(stcs, outfile='test.html'):
patch_xs = parse_s_region(stcs)['ra']
patch_ys = parse_s_region(stcs)['dec']
p = figure(plot_width=700)
data = {'x': [patch_xs], 'y': [patch_ys]}
p.patches('x', 'y', source=data, fill_alpha=0.1, line_color="black", line_width=0.5)
p.y_range.flipped = True
output_file(outfile)
show(p)
def quick_plot(stcs):
patch_xs = parse_s_region(stcs)['ra']
patch_ys = parse_s_region(stcs)['dec']
f, ax = plt.subplots(figsize=(8, 4))
ax.scatter(patch_xs, patch_ys, edgecolors="black", marker='.', linestyle='None', s=50,
facecolors='black')
for i in range(len(patch_xs)):
ax.text(patch_xs[i], patch_ys[i], str(i))
plt.show()
def _individual_plot_data(df):
# Generate individual patch information for each POLYGON of an observation, currently only for Kepler and K2
plot_data = []
for i, row in df.iterrows():
stcs_list = [f'POLYGON {s.strip()}' for s in row['s_region'].split('POLYGON') if s != '']
for stcs in stcs_list:
if 'CIRCLE' in stcs:
# Sometimes circles are in the data and get strings with 'POLYGON CIRCLE'
stcs = stcs.replace('POLYGON ', '')
# Add patches with the observation footprings
coords = parse_s_region(stcs)
patch_xs = [coords['ra']]
patch_ys = [coords['dec']]
data = ColumnDataSource({'x': patch_xs, 'y': patch_ys,
'obs_collection': [row['obs_collection']],
'instrument_name': [row['instrument_name']],
'obs_id': [row['obs_id']],
'target_name': [row['target_name']],
'proposal_pi': [row['proposal_pi']],
'obs_mid_date': [row['obs_mid_date']],
'filters': [row['filters']]})
plot_data.append(data)
return plot_data
def mast_bokeh(eph, mast_results, stcs=None, display=False):
# Function to produce a Bokeh plot of MAST results with the target path
p = figure(plot_width=700, x_axis_label="RA (deg)", y_axis_label="Dec (deg)")
# Target path
eph_data = {'eph_x': eph['RA'], 'eph_y': eph['DEC'], 'Date': eph['datetime_str']}
eph_plot1 = p.line(x='eph_x', y='eph_y', source=eph_data, line_width=2,
line_color='black', legend=eph['targetname'][0])
eph_plot2 = p.circle(x='eph_x', y='eph_y', source=eph_data, fill_color="black",
size=12, legend=eph['targetname'][0])
p.add_tools(HoverTool(renderers=[eph_plot1, eph_plot2], tooltips=[('Date', "@Date")]))
# Target footprint
patch_xs = parse_s_region(stcs)['ra']
patch_ys = parse_s_region(stcs)['dec']
stcs_data = {'stcs_x': [patch_xs], 'stcs_y': [patch_ys]}
p.patches('stcs_x', 'stcs_y', source=stcs_data, fill_alpha=0., line_color="grey", line_width=0.8,
line_dash='dashed', legend='Search Area')
# Prepare MAST footprints
obsDF = mast_results.to_pandas()
obsDF['coords'] = obsDF.apply(lambda x: parse_s_region(x['s_region']), axis=1)
for col in mast_results.colnames:
if isinstance(obsDF[col][0], bytes):
obsDF[col] = obsDF[col].str.decode('utf-8')
# Loop over missions, coloring each separately
mast_plots = []
for mission, color in zip(obsDF['obs_collection'].unique(), palette):
ind = obsDF['obs_collection'] == mission
# Some missions have very complex STCS and need to be treated separately
if mission in ('Kepler', 'K2', 'K2FFI'):
plot_data = _individual_plot_data(obsDF[ind])
for data in plot_data:
mast_plots.append(p.patches('x', 'y', source=data, legend=mission,
fill_color=color, fill_alpha=0.3, line_color="white", line_width=0.5))
else:
# Add patches with the observation footprings
patch_xs = [c['ra'] for c in obsDF['coords'][ind]]
patch_ys = [c['dec'] for c in obsDF['coords'][ind]]
data = {'x': patch_xs, 'y': patch_ys, 'obs_collection': obsDF['obs_collection'][ind],
'instrument_name': obsDF['instrument_name'][ind], 'obs_id': obsDF['obs_id'][ind],
'target_name': obsDF['target_name'][ind], 'proposal_pi': obsDF['proposal_pi'][ind],
'obs_mid_date': obsDF['obs_mid_date'][ind], 'filters': obsDF['filters'][ind]}
mast_plots.append(p.patches('x', 'y', source=data, legend=mission,
fill_color=color, fill_alpha=0.3, line_color="white", line_width=0.5))
# Add hover tooltip for MAST observations
tooltip = [("obs_id", "@obs_id"),
("target_name", "@target_name"),
("instrument_name", "@instrument_name"),
("filters", "@filters"),
('obs_mid_date', '@obs_mid_date')]
p.add_tools(HoverTool(renderers=mast_plots, tooltips=tooltip))
# Additional settings
p.legend.click_policy = "hide"
p.x_range.flipped = True
# Slider for alpha settings
slider = Slider(start=0, end=1, step=0.01, value=0.3, title="Footprint opacity")
for i in range(len(mast_plots)):
slider.js_link('value', mast_plots[i].glyph, 'fill_alpha')
final = column(p, slider)
if display:
output_notebook()
show(final)
else:
return final
| [
"matplotlib",
"bokeh"
] |
67605015491e57689a2a7e30782cbb3cf1a97346 | Python | raphael-group/belayer | /src/slideseq_helpers.py | UTF-8 | 8,132 | 2.734375 | 3 | [] | no_license | import os
from pprint import pprint
from csv import reader
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import pandas as pd
import seaborn as sns
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.metrics import adjusted_rand_score, average_precision_score, precision_recall_curve
from sklearn import linear_model,preprocessing
from scipy.stats import mode, poisson, chi2
from scipy.spatial import ConvexHull, convex_hull_plot_2d, Delaunay
from scipy.optimize import minimize, Bounds
from scipy.linalg import orth, eigh
from scipy import sparse, io
import statsmodels.api
import statsmodels as sm
import anndata
import scanpy as sc
from glmpca import glmpca
import alphashape
import networkx as nx
from shapely.geometry import Point
from shapely.geometry.polygon import Polygon
def alpha_shape(points, alpha, only_outer=True):
"""
Compute the alpha shape (concave hull) of a set of points.
:param points: np.array of shape (n,2) points.
:param alpha: alpha value.
:param only_outer: boolean value to specify if we keep only the outer border
or also inner edges.
:return: set of (i,j) pairs representing edges of the alpha-shape. (i,j) are
the indices in the points array.
"""
assert points.shape[0] > 3, "Need at least four points"
def add_edge(edges, i, j):
"""
Add a line between the i-th and j-th points,
if not in the list already
"""
if (i, j) in edges or (j, i) in edges:
# already added
assert (j, i) in edges, "Can't go twice over same directed edge right?"
if only_outer:
# if both neighboring triangles are in shape, it is not a boundary edge
edges.remove((j, i))
return
edges.add((i, j))
tri = Delaunay(points)
edges = set()
# Loop over triangles:
# ia, ib, ic = indices of corner points of the triangle
for ia, ib, ic in tri.simplices:
pa = points[ia]
pb = points[ib]
pc = points[ic]
# Computing radius of triangle circumcircle
# www.mathalino.com/reviewer/derivation-of-formulas/derivation-of-formula-for-radius-of-circumcircle
a = np.sqrt((pa[0] - pb[0]) ** 2 + (pa[1] - pb[1]) ** 2)
b = np.sqrt((pb[0] - pc[0]) ** 2 + (pb[1] - pc[1]) ** 2)
c = np.sqrt((pc[0] - pa[0]) ** 2 + (pc[1] - pa[1]) ** 2)
s = (a + b + c) / 2.0
area = np.sqrt(s * (s - a) * (s - b) * (s - c))
circum_r = a * b * c / (4.0 * area)
# print(circum_r)
if circum_r < alpha:
add_edge(edges, ia, ib)
add_edge(edges, ib, ic)
add_edge(edges, ic, ia)
return edges
# find closest point in grid {xlist X ylist} to a single cell=[x,y]
# can filter to only look at a subset select_grid_pts of grid points
# if there is no such grid point you should try increasing the radius
def dist_to_grid(cell, xlist, ylist, select_grid_pts=None, radius=1):
x,y=cell
xl_ind_topright = np.searchsorted( xlist,x)
yl_ind_topright = np.searchsorted( ylist,y)
cands=[ np.array([xlist[xl_ind_topright],ylist[yl_ind_topright]]) ]
for r in range(1,radius+1):
cands.append( np.array([xlist[xl_ind_topright], ylist[yl_ind_topright-r]]) )
cands.append( np.array([xlist[xl_ind_topright-r], ylist[yl_ind_topright]]) )
cands.append( np.array([xlist[xl_ind_topright-r], ylist[yl_ind_topright-r]]) )
if select_grid_pts is not None:
cands=[cand for cand in cands if tuple(cand) in select_grid_pts]
if len(cands) > 0:
min_pt=cands[ np.argmin( [np.linalg.norm(cell-cand) for cand in cands] ) ]
min_dist=np.linalg.norm(min_pt-cell)
# https://stackoverflow.com/questions/25823608/find-matching-rows-in-2-dimensional-numpy-array
min_ind=np.where((np.array(select_grid_pts) == (min_pt[0],min_pt[1])).all(axis=1))[0][0]
return min_dist,min_pt,min_ind
else:
return -1, -1, -1
#########################################################################################################
# solves harmonic equation on a small grid (finite difference method)
# then rounds each cell to nearest neighbor in grid (using radius variable)
# if something does not work, try making grid_spacing smaller and increasing the radius
# Assumes len(boundary_array) = 2
# TODO: extend to >2 approximate layer boundaries
def harmonic_slideseq(coords, boundary_array, grid_spacing=40, radius=1):
# STEP 1: make grid
minX=np.floor( np.min(coords[:,0]) / grid_spacing ) * grid_spacing
maxX=np.ceil( np.max(coords[:,0]) / grid_spacing ) * grid_spacing + 1
minY=np.floor( np.min(coords[:,1]) / grid_spacing ) * grid_spacing
maxY=np.ceil( np.max(coords[:,1]) / grid_spacing ) * grid_spacing + 1
xlist=np.arange(minX, maxX, grid_spacing)
ylist=np.arange(minY, maxY, grid_spacing)
xv,yv=np.meshgrid(xlist,ylist)
######################################################################
# STEP 2: create alpha-shape of coords
edges = list(alpha_shape(coords, alpha=1000, only_outer=True))
G = nx.DiGraph(edges)
edge_pts=list(nx.simple_cycles(G))[0]
edge_pts=[(coords[e,0],coords[e,1]) for e in edge_pts]
# STEP 2.5: restrict to grid points lying inside alpha-shape
G=nx.generators.lattice.grid_2d_graph(len(xlist), len(ylist))
A=nx.adjacency_matrix(G) # adjacency matrix of grid graph
polygon=Polygon(edge_pts)
grid_pts_in_polygon=[]
grid_pts_in_polygon_inds=[]
for ind,node in enumerate(G.nodes()):
x_old,y_old=node # graph nodes go from (0,0) to (n,m), need to convert to our coords, eg (2400,2200)
x=x_old*grid_spacing+minX
y=y_old*grid_spacing+minY
point=Point((x,y))
if polygon.contains(point):
grid_pts_in_polygon.append((x,y))
grid_pts_in_polygon_inds.append(ind)
all_pts=np.array( grid_pts_in_polygon )
A=A[np.ix_(grid_pts_in_polygon_inds,grid_pts_in_polygon_inds)] # restrict to grid points in polygon
######################################################################
# STEP 3: assign grid points to either left boundary or right boundary (or both/neither)
left_boundary, right_boundary=boundary_array
left_inds=[]
right_inds=[]
lr_inds=[]
for c in np.arange( left_boundary.shape[0] ):
cell=left_boundary[c,:]
_,_,closest_grid_ind=dist_to_grid(cell, xlist, ylist, select_grid_pts=grid_pts_in_polygon)
left_inds.append(closest_grid_ind)
lr_inds.append(closest_grid_ind)
for c in np.arange( right_boundary.shape[0] ):
cell=right_boundary[c,:]
_,_,closest_grid_ind=dist_to_grid(cell, xlist, ylist, select_grid_pts=grid_pts_in_polygon)
right_inds.append(closest_grid_ind)
lr_inds.append(closest_grid_ind)
non_lr_inds=[t for t in range(len(grid_pts_in_polygon)) if t not in lr_inds]
######################################################################
# STEP 4: harmonic interpolation (following http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.69.5962&rep=rep1&type=pdf)
D=np.diag(np.asarray(np.sum(A,0)).flatten())
L=D-A
u=-1 * np.ones(A.shape[0])
u[left_inds]=0
u[right_inds]=100
uB=u[lr_inds]
RT=L[ np.ix_(non_lr_inds, lr_inds) ]
L_U=L[np.ix_(non_lr_inds,non_lr_inds)]
# solve system of eqns (eqn (7) in above textbook)
print('starting harmonic interpolation...')
uothers,_=sparse.linalg.cg(L_U, (-RT@uB).T)
u[non_lr_inds]=uothers
######################################################################
# STEP 5: assign each cell to nearest grid point
N=coords.shape[0]
depth=np.zeros(N)
for i in range(N):
cell=coords[i,:]
_,_,closest_grid_ind=dist_to_grid(cell, xlist, ylist, select_grid_pts=grid_pts_in_polygon)
depth[i]=u[closest_grid_ind]
return depth
| [
"matplotlib",
"seaborn"
] |
cee7e9552fe7ec48d0729325f6be34f090e9b130 | Python | BigBigGamer/scdiode | /data/script_heat2.py | UTF-8 | 1,593 | 2.703125 | 3 | [] | no_license | import matplotlib.pyplot as plt
from matplotlib import rc
import matplotlib
from matplotlib.backends.backend_pdf import PdfPages
from scipy.optimize import curve_fit
def UfromJ(J,Js,Rb,n,T=300):
k = 1.380649*10**(-23) # Дж/К
e = 1.6*10**(-19)
phiT = k*T/e
# print(J,Js)
U = n*phiT*np.log(J/Js+1)+J*Rb
return U
plt.rc('text', usetex = True)
plt.rc('font', size=20, family = 'serif')
plt.rc('text.latex',unicode=True)
# plt.rc('legend', fontsize=13)
plt.rc('text.latex', preamble=r'\usepackage[russian]{babel}')
import numpy as np
volt01 = np.array([0,0.2,0.26,0.3,0.34,0.36,0.38,0.4,0.44])
# Aps
curr01 = np.array([0,1,8,14,28,38,54,74,100]) / 1000
# Volts
volt1 = np.array([0,0.1,0.2,0.3,0.36,0.38,0.4,0.42])
# Aps
curr1 = np.array([0,0,4,20,38,64,76,100]) / 1000
curr_t = np.linspace(0,100/1000,200)
popt, pcov = curve_fit(UfromJ,curr1,volt1,p0 = [10**(-4),2,2])
# popt, pcov = curve_fit(UfromJ,curr1,volt1,p0 = [10**(-4),7,1],bounds = ([0.028001*10**(-3),0,0],[np.inf,np.inf,np.inf]))
perr = np.sqrt(np.diag(pcov))
voltsTheory = UfromJ(curr_t,Js=popt[0],Rb=popt[1],n=popt[2])
print('Approximation Done:\nJs = {} Amps\nRb = {} Ohm\nn = {}'.format(popt[0],popt[1],popt[2]))
print(perr)
plt.figure(figsize = (10,7))
plt.plot(voltsTheory,curr_t*1000,'k-',label = 'Approximation, T = 340K')
plt.plot(volt1,curr1*1000,'ro',label = 'Experiment, T = 340K')
plt.plot(volt01,curr01*1000,'bs',label = 'Experiment, T = 298K')
plt.legend()
plt.grid(which = 'both')
plt.xlabel(r'$U, V$')
plt.ylabel(r'$J, mA$')
# plt.savefig('imgs/vah12str.png',dpi=500)
plt.show()
| [
"matplotlib"
] |
41520c962ad2989380b26150d0bbfc5ac837a85c | Python | u20806389/CBT-700-Class-Group | /reproductions/Example/Example_02_08.py | UTF-8 | 1,450 | 2.546875 | 3 | [] | no_license | from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from utils import tf, feedback, marginsclosedloop
from utilsplot import bode, step_response_plot
# Loop shaping is an iterative procedure where the designer
# 1. shapes and reshapes |L(jw)| after computing PM and GM,
# 2. the peaks of closed loop frequency responses (Mt and Ms),
# 3. selected closed-loop time responses,
# 4. the magnitude of the input signal
#
# 1 to 4 are the important frequency domain measures used to assess
# performance and characterise speed of response
s = tf([1, 0], 1)
Kc = 0.05
# plant model
G = 3*(-2*s + 1)/((10*s + 1)*(5*s + 1))
# Controller model
K = Kc*(10*s + 1)*(5*s + 1)/(s*(2*s + 1)*(0.33*s + 1))
# closed-loop transfer function
L = G*K
# magnitude and phase of L
plt.figure('Figure 2.19')
bode(L, -2, 1)
# From the figure we can calculate w180
# w180 = 0.44
GM, PM, wc, wb, wbt, valid = marginsclosedloop(L)
print('GM:', np.round(GM, 2))
print('PM:', np.round(PM*np.pi/180, 2), "rad or", np.round(PM, 2), "deg")
print('wb:', np.round(wb, 2))
print('wc:', np.round(wc, 2))
print('wbt:', np.round(wbt, 4))
# Response to step in reference for loop shaping design
# y = Tr, r(t) = 1 for t > 0
# u = KSr, r(t) = 1 for t > 0
plt.figure('Figure 2.20')
T = feedback(L, 1)
S = feedback(1, L)
u = K*S
step_response_plot(T, u, 50, 0)
# magnitude and phase of K
plt.figure('Figure 2.21')
bode(K, -2, 1)
plt.show()
| [
"matplotlib"
] |
d7f4e688c0f9da8e63e95cd5520ef8f572a77dd3 | Python | carlosmertens/Flowers-Classifier | /predict.py | UTF-8 | 6,371 | 3.0625 | 3 | [
"MIT"
] | permissive | # PROGRAMMER: Mertens Moreno, Carlos Edgar
# DATE CREATED: 05/19/2018
# REVISED DATE: 06/10/2018 - improved according to reviewer's feedback
# PURPOSE: Predict a image of a flower using the trained network checkpoint.
# Use argparse Expected Call with <> indicating expected user input:
# python predict.py --input <path to a flower image> --checkpoint <checkpoint file to load
# pre-trained model>
# --category_names <json file with the labels> --top_k <number of top predictions>
# --gpu <to use GPU for training and testing>
# Example call:
# >>python train.py --input flowers/test/100/image_07896.jpg --checkpoint save_directory/checkpoint.pth
# --gpu
# Imports
import matplotlib.pyplot as plt
import torch
import numpy as np
from torchvision import models
from PIL import Image
from classifier import ModelClassifier
import json
import argparse
def get_args():
"""
Function to retrieve and parse the command line arguments,
then to return these arguments as an ArgumentParser object.
Parameters:
None.
Returns:
parser.parse_args(): inputed or default argument objects.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, default='flowers/test/100/image_07896.jpg',
help="path to folder of images")
parser.add_argument('--checkpoint', type=str, default='save_directory/checkpoint.pth',
help='file to load the checkpoint')
parser.add_argument('--category_names', type=str, default='cat_to_name.json',
help='file to map the real names')
parser.add_argument('--top_k', type=int, default=3, help='top classes predicted to return')
parser.add_argument('--gpu', action='store_true', help='hyperparameters for GPU')
return parser.parse_args()
# Call function to get command line arguments
in_arg = get_args()
def load_checkpoint(filepath):
"""
Function to load the checkpoint and to rebuild the trained network for prediction.
Parameters:
filepath: Path to the checkpoint file.
Returns:
model: Pre-trained model loaded and classifiers updated to predict image.
criterion: The criterion used to pre-trained the network.
optimizer: The optimozer used to pre-trained the network.
epochs: The epochs used to pre-trained the network.
class_idx: Classes to index saved on trining
"""
# Load the checkpoint
checkpoint = torch.load(filepath)
# Load the network
if checkpoint['arch'] == "densenet121":
model = models.densenet121(pretrained=True)
elif checkpoint['arch'] == "vgg16":
model = models.vgg16(pretrained=True)
model.classifier = ModelClassifier(checkpoint['in_features'], checkpoint['hidden_units'],
checkpoint['hidden_units2'])
model.load_state_dict(checkpoint['state_dict'])
criterion = checkpoint['criterion']
optimizer = checkpoint['optimizer']
epochs = checkpoint['epochs']
class_idx = checkpoint['model.class_to_idx']
return model, criterion, optimizer, epochs, class_idx
# Call function to load the model and its hyperparameters
model, criterion, optimizer, epochs, model.class_to_idx = load_checkpoint(in_arg.checkpoint)
def process_image(image):
'''
Scales, crops, and normalizes a PIL image for a PyTorch model.
Parameters:
image: Image to be processed
Returns:
ing: an Numpy array to pass it to the model
'''
# Open image
img = Image.open(image)
# Resize image, keep aspect radio, keep 256 pixels on the shortest side
if img.size[0] >= img.size[1]:
img.thumbnail((1024,256))
else:
img.thumbnail((256,1024))
# Set variables, , calculate box dimension, crop image
width = img.size[0] / 2
height = img.size[1] / 2
box = (width - 112, height - 112, width + 112, height + 112)
img = img.crop(box)
# Set mean and standard deviations,
mean = [0.485, 0.456, 0.406]
stdv = [0.229, 0.224, 0.225]
# Convert color chanel from 0-255 to 0-1
np_image = np.array(img)
img = np_image/255
# Normalize and re-order dimension
img = (img-mean)/stdv
img = img.transpose((2,0,1))
return img
# Called function to load the image and process it to be predicted
test_image = process_image(in_arg.input)
def predict(image, model, topk=in_arg.top_k):
'''
Function to predict the class (or classes) of an image using a trained deep learning model.
Parameters:
image: Image already prepared to pass through the network
model: Model chosen to predict the image
topk: Number of top predictions chosen
Returns:
top_probs: Top probabilities in porcentage
top_classes: Top classes indexed on the network
'''
model.eval()
# Set variables
img = np.expand_dims(image, axis=0)
inputs = torch.from_numpy(img).float()
with torch.no_grad():
# Check for GPU
if in_arg.gpu and torch.cuda.is_available():
model.cuda()
inputs = inputs.cuda()
# Pass input through the network
output = model.forward(inputs)
# Get probabilities
ps = torch.exp(output).data.topk(topk)
# Unpacking the Tensor
probs, classes = ps
# Convert to array
probs = probs.data[0].cpu().numpy()
top_probs = [round(each * 1, 3) for each in probs]
idx_to_class = dict(zip(model.class_to_idx.values(), model.class_to_idx.keys()))
classes = classes.data[0].cpu().numpy()
top_classes = [idx_to_class[each] for each in classes]
return top_probs, top_classes
# Call function to predict the image
probs_k, classes_k = predict(test_image, model)
# Load json file with the names to categorize
with open(in_arg.category_names, 'r') as f:
cat_to_name = json.load(f)
# Label the top classes
classes_k_name = [cat_to_name[each] for each in classes_k]
# Display results
print('\n\n*** PREDICTIONS ***\n')
for i in range(0, in_arg.top_k):
name = classes_k_name[i].title()
porc = round(probs_k[i]*100, )
print(name, porc,"%")
| [
"matplotlib"
] |
bf0cc9f5b745864ccecb9d50e6fdc4b5ee95f049 | Python | SteffenSunde/aoc_2018_py | /aoc_2018_py/day10.py | UTF-8 | 3,447 | 3.625 | 4 | [] | no_license | import numpy as np
import re
from typing import List, Tuple
import math
import matplotlib.pyplot as plt # For debugging
class Point:
def __init__(self, pos_x, pos_y, vel_x=0, vel_y=0):
self.position = (pos_x, pos_y)
self.velocity = (vel_x, vel_y)
def inc(self, t=1):
return Point(self.position[0] + self.velocity[0]*t,
self.position[1] + self.velocity[1]*t,
self.velocity[0], self.velocity[1])
def __repr__(self):
return "Pos: {}, vel: {}".format(self.position, self.velocity)
def read_points(file_path) -> List[Point]:
points = []
with open(file_path) as data_file:
for line in data_file:
numbers = list(map(int, re.findall("(\-?\d+)", line)))
points.append(Point(*numbers))
return points
def parse_sky(stars: List[Point]) -> str:
# Takes a list of points and returns a printable string
# representation.
min_x, min_y, max_x, max_y = bounding_box_corners(stars)
grid = [[' ']*(max_x - min_x + 1) for _ in range(min_y, max_y + 1)]
for p in stars:
grid[p.position[1] - min_y][p.position[0] - min_x] = '#'
message = ""
for row in grid:
message += "".join(row) + '\n'
return message
def bounding_box(stars: List[Point]) -> int:
# Computes the half circumference of the bounding box.
x_min = stars[0].position[0]
x_max = stars[0].position[0]
y_min = stars[0].position[1]
y_max = stars[0].position[1]
for i in range(1, len(stars)):
x_min = min(x_min, stars[i].position[0])
x_max = max(x_max, stars[i].position[0])
y_min = min(y_min, stars[i].position[1])
y_max = max(y_max, stars[i].position[1])
return (x_max - x_min) + (y_max - y_min)
def bounding_box_corners(stars: List[Point]) -> Tuple[int, int, int, int]:
# Computes the half circumference of the bounding box.
x_min = stars[0].position[0]
x_max = stars[0].position[0]
y_min = stars[0].position[1]
y_max = stars[0].position[1]
for i in range(1, len(stars)):
x_min = min(x_min, stars[i].position[0])
x_max = max(x_max, stars[i].position[0])
y_min = min(y_min, stars[i].position[1])
y_max = max(y_max, stars[i].position[1])
return x_min, y_min, x_max, y_max
def part_one():
"""
Reads point poisitons and velocities from file and integrates
the velocity until a message appears.
Assumptions:
- All points contributes to the message
- Points velocities are constant
- Minimising the bounding box will reveal the message
- Golden section search should do the trick
"""
points = read_points("inputs/day10.input")
# Golden section search, assuming minima will be within [0, 20000]
low = 0
high = 20000
# graph = [high, low] # For debugging (Convergence plot)
while(high - low > 1):
box_low = bounding_box([p.inc(low) for p in points])
box_high = bounding_box([p.inc(high) for p in points])
if box_high > box_low:
high = low + 0.6108*(high - low)
elif box_high < box_low:
low = high - 0.6108*(high - low)
# graph.append(high)
# graph.append(low)
# plt.plot(graph)
# plt.show()
sec = int((high + low) / 2)
message = [p.inc(sec) for p in points]
print(parse_sky(message))
return sec
part_one() | [
"matplotlib"
] |
1a8baa4cfeeacbbd87c99775ec8e38b7175ce13c | Python | rchoudhury7/outliers | /experiments/CRTS/randomforest.py | UTF-8 | 1,555 | 2.75 | 3 | [] | no_license | import sys
sys.path.append("..")
sys.path.append("../../data")
sys.path.append("../../lib/speedynn")
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import generate
import exps_util
from sklearn.preprocessing import StandardScaler
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.metrics import accuracy_score, matthews_corrcoef
# get dataframe
df = generate.get_data("crts_labeled", shuffle=True)
# get labels and patterns
Y = df['class'].values
features = list(df.columns)
features.remove('class')
X = df[features].values
print("All available classes: %s" % str(set(Y)))
# make binary classification task
#X, Y = exps_util.make_binary(X, Y, class1=[13], class2=[8,9,10])
X, Y = exps_util.make_binary(X, Y, class1=[2], class2=None)
print("Number of elements for class %i: %i" % (1, (Y==1).sum()))
print("Number of elements for class %i: %i" % (2, (Y==2).sum()))
# split up into training and test (and scale the patterns)
train_amount = 0.5
Xtrain, Xtest = X[:int(train_amount*len(X))], X[int(train_amount*len(X)):]
ytrain, ytest = Y[:int(train_amount*len(X))], Y[int(train_amount*len(X)):]
scaler = StandardScaler().fit(Xtrain)
Xtrain = scaler.transform(Xtrain)
Xtest = scaler.transform(Xtest)
model = ExtraTreesClassifier(n_estimators=100, bootstrap=True, max_features=10, criterion="gini", n_jobs=-1, random_state=0)
model.fit(Xtrain, ytrain)
preds = model.predict(Xtest)
print("\nClassification error: %f" % (1.0 - accuracy_score(ytest,preds)))
print("MCC: %f" % (matthews_corrcoef(ytest, preds)))
| [
"matplotlib"
] |
eb6db041f3f09798a116c21bb2deef30fb07ab18 | Python | moodycam/MARS-5470-4470 | /Hopkins/Notes/Brady_Notes.py | UTF-8 | 1,761 | 2.8125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
#%%
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
#%%
""" Setting Up the Plot """
#%%
fig, ax = plt.subplots(figsize=(2,2))
#%%
fig, ax = plt.subplots(figsize=(3,3), nrows=2, ncols=2)
ax[0,0].set_facecolor('black')
#%%
""" Line Plots """
#%%
t = np.linspace(-np.pi, np.pi, 64, endpoint=True)
c = np.cos(t)
s = np.sin(t)
#%%
fig, ax = plt.subplots(figsize=(5,5))
ax.plot(t, s, label = 'sine', linewidth=2, linestyle='--', color='red')
ax.plot(t,c, label = 'cosine', linewidth=2, linestyle='-.', color='#A9A9A9')
plt.legend()
#%%
fig, ax = plt.subplots(figsize=(5,5))
ax.plot(t, s, label = 'sine', linewidth=2, linestyle='-', color='red',
marker='o', markersize=5)
ax.plot(t,c, label = 'cosine', linewidth=2, linestyle='-', color='#A9A9A9')
plt.legend()
#%%
fig, ax = plt.subplots(figsize=(6,3), ncols = 2)
ax[0].plot(t,s)
ax[1].plot(t,c)
#%%
fig, ax = plt.subplots(figsize=(6,3), ncols = 2, sharey=True, sharex=True)
ax[0].plot(t,s)
ax[1].plot(t,c)
#%%
fig, ax = plt.subplots(figsize=(5,5))
ax.plot(t, c)
ax.plot(t,s)
########
ax.set_title('Sines and Cosines', fontsize=18)
ax.set_xlabel('Time')
ax.set_ylabel('Magnitude')
ax.set_ylim([-2,2])
ax.set_xlim([-3,3])
#%%
ax.set('Sines and Cosines', xlabel='Time', ylabel='Magnitude', ylim=([-2,2]), xlim=([-3,3]))
#%%
plt.style.available
#%%
plt.style.use('seaborn-bright')
fig,ax = plt.subplots(figsize=(5,5))
ax.plot(t,s)
#%%
""" Meshes """
#%%
import cartopy #Couldn't get to work
import cartopy.crs as ccrs
import cartopy.feature as cfeature
#%%
fig, ax = plt.subplots(figzise=(5,5), subplot_kw=dict(projection=ccrs.Robinson()))
| [
"matplotlib"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.