blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
135
| path
stringlengths 2
372
| src_encoding
stringclasses 26
values | length_bytes
int64 55
3.18M
| score
float64 2.52
5.19
| int_score
int64 3
5
| detected_licenses
sequencelengths 0
38
| license_type
stringclasses 2
values | code
stringlengths 55
3.18M
| used_libs
sequencelengths 1
5
|
---|---|---|---|---|---|---|---|---|---|---|---|
ee92269ab11111536b7b926e909d4a4c4766270b | Python | PanditRohit/COVID-19-Data-Analysis | /COVID-19 Data Analysis.py | UTF-8 | 2,943 | 3.28125 | 3 | [] | no_license | # Import Libraries
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
#Import Covid-19 Dataset
corona_dataset_csv = pd.read_csv("C:/Users/pandit/Downloads/covid19_Confirmed_dataset.csv")
corona_dataset_csv.head()
corona_dataset_csv.shape
#Delete the useless columns after Exploratory Data Analysis
corona_dataset_csv.drop(["Lat","Long"],axis=1,inplace=True)
corona_dataset_csv.head(10)
#Aggregating the rows by the country
corona_dataset_aggregated=corona_dataset_csv.groupby("Country/Region").sum()
corona_dataset_aggregated.head()
corona_dataset_aggregated.shape
#Visualizing Data related to Countries
corona_dataset_aggregated.loc["China"].plot()
corona_dataset_aggregated.loc["India"].plot()
corona_dataset_aggregated.loc["Italy"].plot()
plt.legend()
#Calculating a good measure
corona_dataset_aggregated.loc['China'][:3].plot()
corona_dataset_aggregated.loc['China'].diff().plot()
#Finding maximum infection rates for Selected countries
corona_dataset_aggregated.loc['China'].diff().max()
corona_dataset_aggregated.loc['India'].diff().max()
corona_dataset_aggregated.loc['Italy'].diff().max()
#Find maximum infection rate for all the countries
countries = list(corona_dataset_aggregated.index)
max_infection_rates=[]
for c in countries :
max_infection_rates.append(corona_dataset_aggregated.loc[c].diff().max())
corona_dataset_aggregated["max_infection_rates"] = max_infection_rates
corona_dataset_aggregated.head()
#Create a new dataframe with only needed column
corona_data = pd.DataFrame(corona_dataset_aggregated["max_infection_rates"])
corona_data.head()
#Now import world happiness report dataset
happiness_report = pd.read_csv("C:/Users/pandit/Downloads/worldwide_happiness_report.csv")
happiness_report.head()
#Make a list of useless columns and drop the same
useless_cols = ["Overall rank","Score","Generosity","Perceptions of corruption"]
happiness_report.drop(useless_cols,axis=1,inplace=True)
happiness_report.head()
#Change the indices of the dataframe
happiness_report.set_index("Country or region",inplace=True)
happiness_report.head()
#Check the shapes of both the dataframes
corona_data.head()
corona_data.shape
happiness_report.shape
#Join the two datasets using inner join as the shape of the datasets vary
data=corona_data.join(happiness_report,how="inner")
data.head()
#Create a correlation matrix
data.corr()
#Visualization of the results
data.head()
#Plotting GDP vs maximum infection rates
x = data["GDP per capita"]
y = data["max_infection_rates"]
sns.scatterplot(x,np.log(y))
sns.regplot(x,np.log(y))
#Plotting social support vs maximum infection rates
x = data["Social support"]
y = data["max_infection_rates"]
sns.scatterplot(x,np.log(y))
sns.regplot(x,np.log(y))
#Plotting Healthy life expectancy vs maximum infection rates
x = data["Healthy life expectancy"]
y = data["max_infection_rates"]
sns.scatterplot(x,np.log(y))
sns.regplot(x,np.log(y)) | [
"matplotlib",
"seaborn"
] |
e435765339472ea9173527d546970d87a0d1c7fc | Python | elofamomo/btTTVT | /sampling.py | UTF-8 | 10,333 | 2.578125 | 3 | [] | no_license | """This file contains code used in "Think DSP",
by Allen B. Downey, available from greenteapress.com
Copyright 2015 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import thinkdsp
import thinkplot
import numpy as np
import matplotlib.pyplot as plt
PI2 = 2 * np.pi
FORMATS = ['pdf', 'eps']
def plot_beeps():
wave = thinkdsp.read_wave('253887__themusicalnomad__positive-beeps.wav')
wave.normalize()
thinkplot.preplot(3)
# top left
ax1 = plt.subplot2grid((4, 2), (0, 0), rowspan=2)
plt.setp(ax1.get_xticklabels(), visible=False)
wave.plot()
thinkplot.config(title='Input waves', legend=False)
# bottom left
imp_sig = thinkdsp.Impulses([0.01, 0.4, 0.8, 1.2],
amps=[1, 0.5, 0.25, 0.1])
impulses = imp_sig.make_wave(start=0, duration=1.3,
framerate=wave.framerate)
ax2 = plt.subplot2grid((4, 2), (2, 0), rowspan=2, sharex=ax1)
impulses.plot()
thinkplot.config(xlabel='Time (s)')
# center right
convolved = wave.convolve(impulses)
ax3 = plt.subplot2grid((4, 2), (1, 1), rowspan=2)
plt.title('Convolution')
convolved.plot()
thinkplot.config(xlabel='Time (s)')
thinkplot.save(root='sampling1',
formats=FORMATS,
legend=False)
XLIM = [-22050, 22050]
def plot_am():
wave = thinkdsp.read_wave('105977__wcfl10__favorite-station.wav')
wave.unbias()
wave.normalize()
# top
ax1 = thinkplot.preplot(6, rows=4)
spectrum = wave.make_spectrum(full=True)
spectrum.plot(label='spectrum')
thinkplot.config(xlim=XLIM, xticklabels='invisible')
#second
carrier_sig = thinkdsp.CosSignal(freq=10000)
carrier_wave = carrier_sig.make_wave(duration=wave.duration,
framerate=wave.framerate)
modulated = wave * carrier_wave
ax2 = thinkplot.subplot(2, sharey=ax1)
modulated.make_spectrum(full=True).plot(label='modulated')
thinkplot.config(xlim=XLIM, xticklabels='invisible')
# third
demodulated = modulated * carrier_wave
demodulated_spectrum = demodulated.make_spectrum(full=True)
ax3 = thinkplot.subplot(3, sharey=ax1)
demodulated_spectrum.plot(label='demodulated')
thinkplot.config(xlim=XLIM, xticklabels='invisible')
#fourth
ax4 = thinkplot.subplot(4, sharey=ax1)
demodulated_spectrum.low_pass(10000)
demodulated_spectrum.plot(label='filtered')
thinkplot.config(xlim=XLIM, xlabel='Frequency (Hz)')
thinkplot.save(root='sampling2',
formats=FORMATS)
#carrier_spectrum = carrier_wave.make_spectrum(full=True)
#carrier_spectrum.plot()
#convolved = spectrum.convolve(carrier_spectrum)
#convolved.plot()
#reconvolved = convolved.convolve(carrier_spectrum)
#reconvolved.plot()
def sample(wave, factor):
"""Simulates sampling of a wave.
wave: Wave object
factor: ratio of the new framerate to the original
"""
ys = np.zeros(len(wave))
ys[::factor] = wave.ys[::factor]
ts = wave.ts[:]
return thinkdsp.Wave(ys, ts, wave.framerate)
def make_impulses(wave, factor):
ys = np.zeros(len(wave))
ys[::factor] = 1
ts = np.arange(len(wave)) / wave.framerate
return thinkdsp.Wave(ys, ts, wave.framerate)
def plot_segments(original, filtered):
start = 1
duration = 0.01
original.segment(start=start, duration=duration).plot(color='gray')
filtered.segment(start=start, duration=duration).plot()
def plot_sampling(wave, root):
ax1 = thinkplot.preplot(2, rows=2)
wave.make_spectrum(full=True).plot(label='spectrum')
thinkplot.config(xlim=XLIM, xticklabels='invisible')
ax2 = thinkplot.subplot(2)
sampled = sample(wave, 4)
sampled.make_spectrum(full=True).plot(label='sampled')
thinkplot.config(xlim=XLIM, xlabel='Frequency (Hz)')
thinkplot.save(root=root,
formats=FORMATS)
def plot_sampling2(wave, root):
ax1 = thinkplot.preplot(6, rows=4)
wave.make_spectrum(full=True).plot(label='spectrum')
thinkplot.config(xlim=XLIM, xticklabels='invisible')
ax2 = thinkplot.subplot(2)
impulses = make_impulses(wave, 4)
impulses.make_spectrum(full=True).plot(label='impulses')
thinkplot.config(xlim=XLIM, xticklabels='invisible')
ax3 = thinkplot.subplot(3)
sampled = wave * impulses
spectrum = sampled.make_spectrum(full=True)
spectrum.plot(label='sampled')
thinkplot.config(xlim=XLIM, xticklabels='invisible')
ax4 = thinkplot.subplot(4)
spectrum.low_pass(5512.5)
spectrum.plot(label='filtered')
thinkplot.config(xlim=XLIM, xlabel='Frequency (Hz)')
thinkplot.save(root=root,
formats=FORMATS)
def plot_sampling3(wave, root):
ax1 = thinkplot.preplot(6, rows=3)
wave.make_spectrum(full=True).plot(label='spectrum')
thinkplot.config(xlim=XLIM, xticklabels='invisible')
impulses = make_impulses(wave, 4)
ax2 = thinkplot.subplot(2)
sampled = wave * impulses
spectrum = sampled.make_spectrum(full=True)
spectrum.plot(label='sampled')
thinkplot.config(xlim=XLIM, xticklabels='invisible')
ax3 = thinkplot.subplot(3)
spectrum.low_pass(5512.5)
spectrum.plot(label='filtered')
thinkplot.config(xlim=XLIM, xlabel='Frequency (Hz)')
thinkplot.save(root=root,
formats=FORMATS)
#filtered = spectrum.make_wave()
#plot_segments(wave, filtered)
def make_boxcar(spectrum, factor):
"""Makes a boxcar filter for the given spectrum.
spectrum: Spectrum to be filtered
factor: sampling factor
"""
fs = np.copy(spectrum.fs)
hs = np.zeros_like(spectrum.hs)
cutoff = spectrum.framerate / 2 / factor
for i, f in enumerate(fs):
if abs(f) <= cutoff:
hs[i] = 1
return thinkdsp.Spectrum(hs, fs, spectrum.framerate, full=spectrum.full)
def plot_sinc_demo(wave, factor, start=None, duration=None):
def make_sinc(t, i, y):
"""Makes a shifted, scaled copy of the sinc function."""
sinc = boxcar.make_wave()
sinc.shift(t)
sinc.roll(i)
sinc.scale(y * factor)
return sinc
def plot_mini_sincs(wave):
"""Plots sinc functions for each sample in wave."""
t0 = wave.ts[0]
for i in range(0, len(wave), factor):
sinc = make_sinc(t0, i, wave.ys[i])
seg = sinc.segment(start, duration)
seg.plot(color='green', linewidth=0.5, alpha=0.3)
if i == 0:
total = sinc
else:
total += sinc
seg = total.segment(start, duration)
seg.plot(color='blue', alpha=0.5)
sampled = sample(wave, factor)
spectrum = sampled.make_spectrum()
boxcar = make_boxcar(spectrum, factor)
start = wave.start if start is None else start
duration = wave.duration if duration is None else duration
sampled.segment(start, duration).plot_vlines(color='gray')
wave.segment(start, duration).plot(color='gray')
plot_mini_sincs(wave)
def plot_sincs(wave):
start = 1.0
duration = 0.01
factor = 4
short = wave.segment(start=start, duration=duration)
#short.plot()
sampled = sample(short, factor)
#sampled.plot_vlines(color='gray')
spectrum = sampled.make_spectrum(full=True)
boxcar = make_boxcar(spectrum, factor)
sinc = boxcar.make_wave()
sinc.shift(sampled.ts[0])
sinc.roll(len(sinc)//2)
thinkplot.preplot(2, cols=2)
sinc.plot()
thinkplot.config(xlabel='Time (s)')
thinkplot.subplot(2)
boxcar.plot()
thinkplot.config(xlabel='Frequency (Hz)',
ylim=[0, 1.05],
xlim=[-boxcar.max_freq, boxcar.max_freq])
thinkplot.save(root='sampling6',
formats=FORMATS)
return
# CAUTION: don't call plot_sinc_demo with a large wave or it will
# fill memory and crash
plot_sinc_demo(short, 4)
thinkplot.config(xlabel='Time (s)')
thinkplot.save(root='sampling7',
formats=FORMATS)
start = short.start + 0.004
duration = 0.00061
plot_sinc_demo(short, 4, start, duration)
thinkplot.config(xlabel='Time (s)',
xlim=[start, start+duration],
ylim=[-0.06, 0.17], legend=False)
thinkplot.save(root='sampling8',
formats=FORMATS)
def kill_yticklabels():
axis = plt.gca()
plt.setp(axis.get_yticklabels(), visible=False)
def show_impulses(wave, factor, i):
thinkplot.subplot(i)
thinkplot.preplot(2)
impulses = make_impulses(wave, factor)
impulses.segment(0, 0.001).plot_vlines(linewidth=2, xfactor=1000)
if i == 1:
thinkplot.config(title='Impulse train',
ylim=[0, 1.05])
else:
thinkplot.config(xlabel='Time (ms)',
ylim=[0, 1.05])
thinkplot.subplot(i+1)
impulses.make_spectrum(full=True).plot()
kill_yticklabels()
if i == 1:
thinkplot.config(title='DFT of impulse train',
xlim=[-22400, 22400])
else:
thinkplot.config(xlabel='Frequency (Hz)',
xlim=[-22400, 22400])
def plot_impulses(wave):
thinkplot.preplot(rows=2, cols=2)
show_impulses(wave, 4, 1)
show_impulses(wave, 8, 3)
thinkplot.save('sampling9',
formats=FORMATS)
def main():
wave = thinkdsp.read_wave('328878__tzurkan__guitar-phrase-tzu.wav')
wave.normalize()
plot_sampling3(wave, 'sampling5')
plot_sincs(wave)
plot_beeps()
plot_am()
wave = thinkdsp.read_wave('263868__kevcio__amen-break-a-160-bpm.wav')
wave.normalize()
plot_impulses(wave)
plot_sampling(wave, 'sampling3')
plot_sampling2(wave, 'sampling4')
if __name__ == '__main__':
main()
| [
"matplotlib"
] |
abc604ebfa2ec99d8d085881172baa783c2b5325 | Python | haidaodao/sdc-term3-p4-capstone-carla | /graph_waypoints.py | UTF-8 | 2,608 | 3.15625 | 3 | [] | no_license | #!/usr/bin/env python
import matplotlib.pyplot as plt
import csv
from math import cos, sin
wp_x = []
wp_y = []
path = './data/wp_yaw_const.csv'
def closest_waypoint(waypoints, pos):
best_distance = float('inf')
best_wp_index = 0
best_wp = None
for i, wp in enumerate(waypoints):
dist = ((pos[0] - wp[0]) ** 2 + (pos[1] - wp[1]) ** 2) ** 0.5
if dist < best_distance:
best_distance = dist
best_wp, best_wp_index = wp, i
yaw = waypoints[best_wp_index][2]
nx = pos[0] - waypoints[best_wp_index][0]
ny = pos[1] - waypoints[best_wp_index][1]
# Check if waypoint is ahead of stop line
ahead = nx * cos(0 - yaw) - ny * sin(0 - yaw)
if ahead < 0:
best_wp_index -= 1
print (best_wp_index)
return best_wp_index
def all_stop_line_closest_wp(waypoints, traffic_light_posts):
stop_wp = []
stop_x = []
stop_y = []
for pos in traffic_light_posts:
print (pos)
best_index = closest_waypoint(waypoints, pos)
# print (best_index)
stop_x.append(waypoints[best_index][0])
stop_y.append(waypoints[best_index][1])
return stop_x, stop_y
traffic_light_pos = [[1148.56, 1184.65], \
[1559.2, 1158.43], \
[2122.14, 1526.79], \
[2175.237, 1795.71], \
[1493.29, 2947.67], \
[821.96, 2905.8], \
[161.76, 2303.82], \
[351.84, 1574.65]]
# IMPORT WAYPOINTS
waypoints = []
with open(path, 'r') as file:
reader = csv.reader(file, delimiter=',')
for i in reader:
# print (i)
x, y, yaw = float(i[0]), \
float(i[1]), \
float(i[3])
waypoints.append([x, y, yaw])
wp_x.append(x)
wp_y.append(y)
# print (waypoints)
# IMPORT TRAFFIC LIGHT POSITIONS
light_x = []
light_y = []
for pos in traffic_light_pos:
light_x.append(pos[0])
light_y.append(pos[1])
# FIND CLOSET WAYPOINT TO TRAFFIC LIGHT
stop_x, stop_y = all_stop_line_closest_wp(waypoints, traffic_light_pos)
# print ("X stops", stop_x)
# print ("Y stops", stop_y)
# print ("Waypoint Index: ", closest_waypoint(waypoints, traffic_light_pos[0]))
# print ("Waypoint Index: ", closest_waypoint(waypoints, traffic_light_pos[1]))
plt.plot(wp_x, wp_y, label='Waypoints Path')
plt.plot(light_x, light_y, 'ro', label='Traffic Light Posts')
plt.plot(stop_x, stop_y, 'g^', label='Stopping Waypoints')
plt.plot
plt.xlabel('x')
plt.ylabel('y')
plt.title('Simulator path')
plt.legend()
plt.show()
| [
"matplotlib"
] |
d138ddaab06030d1555161e0e06144546ced464c | Python | jhadhiraj1/predictive_analytics | /predictive_analytics.py | UTF-8 | 4,706 | 2.984375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 22 14:36:01 2021
@author: RUDRA
"""
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.svm import SVR
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
data=pd.read_csv("insurance.csv")
count_nan=data.isnull().sum()
print(count_nan[count_nan>0])
data['bmi'].fillna(data['bmi'].mean(),inplace=True)
count_nan=data.isnull().sum()
print(count_nan)
print(data['smoker'])
print(data)
sex=data.iloc[:,1:2].values
smoker=data.iloc[:,4:5].values
le=LabelEncoder()
sex[:,0]=le.fit_transform(sex[:,0])
sex=pd.DataFrame(sex)
sex.columns=['sex']
le_sex_mapping=dict(zip(le.classes_,le.transform(le.classes_)))
print(le_sex_mapping)
le=LabelEncoder()
smoker[:,0]=le.fit_transform(smoker[:,0])
smoker=pd.DataFrame(smoker)
smoker.coulumn=['smoker']
le_smoker_mapping=dict(zip(le.classes_,le.transform(le.classes_)))
print(le_smoker_mapping)
print(data['smoker'])
region=data.iloc[:,5:6].values
ohe=OneHotEncoder()
region=ohe.fit_transform(region).toarray()
region=pd.DataFrame(region)
region.columns=['northeast','northwest','southeast','southwest']
print(region[:10])
X_num=data[['age','bmi','children']]
X_final=pd.concat([X_num,sex,smoker,region],axis=1)
y_final=data[['expenses']].copy()
X_train,X_test,y_train,y_test=train_test_split(X_final,y_final,test_size=0.33,random_state=0)
##NOrmalization using MinMax
n_scaler=MinMaxScaler()
X_train=n_scaler.fit_transform(X_train.astype(np.float))
X_test=n_scaler.transform(X_test.astype(np.float))
##Normalization using Standardization
s_scaler=StandardScaler()
X_train=s_scaler.fit_transform(X_train.astype(np.float))
X_test=s_scaler.transform(X_test.astype(np.float))
lr=LinearRegression().fit(X_train,y_train)
y_train_pred=lr.predict(X_train)
y_test_pred=lr.predict(X_test)
print("lr co-efficient is {}".format(lr.coef_))
print("Intercep {}".format(lr.intercept_))
print("y_train Score: %.3f and y_test score: %.3f" % (lr.score(X_train,y_train),lr.score(X_test,y_test)))
##Applying Polynomial Features to the datas
poly_f=PolynomialFeatures(degree=2)
poly_X=poly_f.fit_transform(X_final)
X_train,X_test,y_train,y_test=train_test_split(poly_X,y_final,test_size=0.33,random_state=0)
s_scaler=StandardScaler()
X_train=s_scaler.fit_transform(X_train.astype(np.float))
X_test=s_scaler.transform(X_test.astype(np.float))
poly_lr=LinearRegression().fit(X_train,y_train)
poly_y_train_pred=poly_lr.predict(X_train)
poly_y_test_pred=poly_lr.predict(X_test)
print("Polynomoial lr Co-efficient:{}".format(poly_lr.coef_))
print("Y-intercept is :{}".format(poly_lr.intercept_))
print("y_train score: %.3f and y_test score:%.3f"
% (poly_lr.score(X_train,y_train),poly_lr.score(X_test,y_test)))
## SVR Modelling
svr=SVR(kernel='linear',C=300)
X_train,X_test,y_train,y_test=train_test_split(X_final,y_final,test_size=0.33,random_state=0)
s_scaler=StandardScaler()
X_train=s_scaler.fit_transform(X_train.astype(np.float))
X_test=s_scaler.transform(X_test.astype(np.float))
svr=svr.fit(X_train,y_train.values.ravel())
y_train_pred=svr.predict(X_train)
y_test_pred=svr.predict(X_test)
print("y_train score: %.3f and y_test score: %.3f" %(svr.score(X_train,y_train),svr.score(X_test,y_test)))
dt=DecisionTreeRegressor(random_state=0);
dt=dt.fit(X_train,y_train.values.ravel())
y_train_pred=dt.predict(X_train)
y_test_pred=dt.predict(X_test)
print("y_train Score : %.3f and y_test score :%.3f" %(dt.score(X_train,y_train),dt.score(X_test,y_test)))
## Random Forest Regressor
rf=RandomForestRegressor(n_estimators=100,
criterion='mse',
random_state=1,
n_jobs=-1)
X_train,X_test,y_train,y_test=train_test_split(X_final,y_final,test_size=0.33,random_state=0)
n_scaler=StandardScaler()
X_train=n_scaler.fit_transform(X_train.astype(np.float))
X_test=n_scaler.transform(X_test.astype(np.float))
rf=rf.fit(X_train,y_train.values.ravel())
y_train_pred=rf.predict(X_train)
y_test_pred=rf.predict(X_test)
print("y_train score :%.3f and y_test score: %.3f"%(rf.score(X_train,y_train),rf.score(X_test,y_test))) | [
"matplotlib"
] |
f2f3772d22c33ab225566c39dd12a142ee862bf9 | Python | Combustion-Zhen/OpenFOAM_py | /SFD/python2.7/plot_scat.py | UTF-8 | 3,049 | 2.828125 | 3 | [] | no_license | #Zhen Lu, 03/04/2017 <[email protected]>
# plot Sandia Flame results, as title, the scatter at different x/D
import glob
from file_read import csv_read, cm2inch, SF_read
# suppress the display of matplotlib.pyplot
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
# data to plot
# only one var, two columns, left: exp., right: sim. rows for x/D
var = 'T'
# import data
xD=[]
data={}
expr={}
for filename in glob.glob('scat*.csv'):
pos = filename.find('.csv')
z = float('{0}.{1}'.format(filename[7:9],filename[9:pos]))
xD.append(z)
data.update({z:csv_read(filename)})
expr.update({z:SF_read('D.scat',filename[7:pos],'all')})
xD.sort()
# plot
# use TEX for interpreter
plt.rc('text',usetex=True)
# use serif font
plt.rc('font',family='serif')
# figure and axes parameters
# total width is fixed
plot_width =19.0
subplot_h =4.0
margin_left =2.0
margin_right =0.3
margin_bottom =1.5
margin_top =1.0
space_width =0.0
space_height =1.0
ftsize =12
# total height determined by the number of vars
plot_height =(subplot_h+space_height)*float(len(xD)) \
-space_height+margin_top+margin_bottom
# min and max of axis
xmin = 0.0
xmax = 1.0
xtick= (0.0,0.2,0.4,0.6,0.8)
# generate the figure
fig, axes = plt.subplots(len(xD),2,
sharex='col',sharey='all',
figsize=cm2inch(plot_width, plot_height))
# generate the axis
for x in xD:
axes[xD.index(x),0].scatter(expr[x]['Z'],expr[x][var],
marker='.',c='k',edgecolor='none')
axes[xD.index(x),1].scatter(data[x]['Z'],data[x][var],
marker='.',c='k',edgecolor='none')
# ylabel, temperature has a unit
if var == 'T':
axes[xD.index(x),0].set_ylabel(r"$\tilde {0}\;(\mathrm{{K}})$".format(var),
fontsize=ftsize)
else:
axes[xD.index(x),0].set_ylabel(r"$\tilde Y\;{0}$".format(var),
fontsize=ftsize)
# location note
# the text position determined by axes axis
axes[xD.index(x),1].text(0.7,2000,'$x/D={0:.2g}$'.format(x),
fontsize=ftsize)
# ylabel, temperature has a unit
# title and xlabel
axes[0,0].set_title('Exp.',fontsize=ftsize)
axes[0,1].set_title('Sim.',fontsize=ftsize)
for i in range(2):
axes[len(xD)-1,i].set_xlim(xmin,xmax)
axes[len(xD)-1,i].set_xticks(xtick)
axes[len(xD)-1,i].set_xlabel(r'$\tilde Z$',fontsize=ftsize)
axes[len(xD)-1,1].set_xticks(xtick+(xmax,))
# legend
# set margins
plt.subplots_adjust(left =margin_left/plot_width,
bottom =margin_bottom/plot_height,
right =1.0-margin_right/plot_width,
top =1.0-margin_top/plot_height,
wspace =space_width/plot_width,
hspace =space_height/plot_height)
# save plot
plt.savefig('radial_scat.png',dpi=400)
plt.savefig('radial_scat.pdf')
plt.savefig('radial_scat.eps')
| [
"matplotlib"
] |
98c417d6de736140ac9bfe9d0bdf19b56028ec4a | Python | Domdoug/LSTM_Mortality_Table_Actuarial | /4-expectativa_vida_lstm-Bidirecional.py | UTF-8 | 13,592 | 2.984375 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[1]:
# importar bibliotecas
import pandas as pd
from pandas.tseries.offsets import DateOffset
import numpy as np
import os
import matplotlib.pyplot as plt
# get_ipython().run_line_magic('matplotlib', 'inline')
#from sklearn.metrics import mean_squared_error
from statsmodels.tools.eval_measures import rmse
from sklearn.preprocessing import MinMaxScaler
from keras.preprocessing.sequence import TimeseriesGenerator
from keras.callbacks import CSVLogger
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
from keras.layers import Bidirectional
import time
import warnings
warnings.filterwarnings("ignore")
# ### Modelagem com a utilização do pacote embeding para Bi-direcional
# #####
# Os humanos não começam a pensar do zero a cada segundo. Ao ler este ensaio, você entende cada palavra com base na sua compreensão das palavras anteriores. Você não joga tudo fora e começa a pensar do zero novamente. Seus pensamentos têm persistência.
#
# As redes neurais tradicionais não podem fazer isso, e isso parece uma grande falha. Por exemplo, imagine que você queira classificar que tipo de evento está acontecendo em todos os momentos do filme. Não está claro como uma rede neural tradicional poderia usar seu raciocínio sobre eventos anteriores do filme para informar os posteriores.
#
# Redes neurais recorrentes abordam esse problema. São redes com loops, permitindo que as informações persistam.
#
# Em comparação com o LSTM, BLSTMou BiLSTMpossui duas redes, uma pastinformação de acesso na forwarddireção e outro acesso futurena reversedireção.
#
# LSTMse suas variantes bidirecionais são populares porque tentaram aprender como e quando esquecer e quando não usar portas em sua arquitetura. Em RNNarquiteturas anteriores , o desaparecimento de gradientes era um grande problema e fazia com que essas redes não aprendessem muito.
#
# Usando Bidirecional LSTMs, você alimenta o algoritmo de aprendizado com os dados originais uma vez do começo ao fim e uma vez do fim ao começo. Existem debates aqui, mas geralmente ele aprende mais rápido que a abordagem unidirecional, embora dependa da tarefa.
#
# #### 1 - carregar base tratada
# In[2]:
# Verifica a pasta corrente
pasta = os.getcwd()
# In[3]:
pasta_resultados = os.path.join(pasta, "resultados")
pasta_graficos = os.path.join(pasta, "graficos1")
# In[ ]:
# Regex notation by "\s+". This means a single space, or multiple spaces are all to be treated as a single separator.
# df_dados = pd.read_csv('bltper_1x1.txt', skiprows=2, sep = '\s+')
df_dados = pd.read_csv(os.path.join(pasta, "dados") + "/" + 'bltper_1x1.txt', skiprows=2, sep = '\s+')
# In[ ]:
df_dados.head().append(df_dados.tail())
# #### 2 - criar features, entre elas a logqx, que corresponde já convertido a escala logarítma da probabilidade de morte
#
# In[ ]:
# Tratamento da idade 110+ para os anos.
# DataFrame.loc[condition, column_name] = new_value
df_dados.loc[(df_dados.Age == '110+'),'Age'] = 110
# In[ ]:
# Criar a feature log qx
df_dados['logqx'] = np.log(df_dados['qx'])
# Aproveitar e corrigir a tipagem da feature Age
df_dados["Age"] = df_dados["Age"].astype(int)
# In[ ]:
df_dados.head().append(df_dados.tail())
# In[ ]:
df_dados.shape
# #### 3 - Criar a feature tempo t, com base na feature ano, que corresponde ao elemento temporal da série
# In[8]:
# Preparar dataset
#serie = {'t': ano, 'logqx_prob': logqx_prob}
df_lstm = pd.DataFrame(df_dados, columns=['Age','Year','logqx'])
df_lstm['t'] = pd.to_datetime(df_lstm['Year'], format='%Y')
#df_lstm.drop(['ano'], axis=1, inplace=True)
df_lstm.set_index('t', inplace=True)
# In[9]:
#df_lstm[df_lstm['t'].dt.year == 1998]
df_lstm.head()
# In[10]:
df_lstm[df_lstm['Age']==0]
# #### 4 - Separar a base em base de treino e base de teste para cada idade x ao longo dos anos t, ou seja, para a idade 0, entre os anos 1998 a 2018, para a idade 1, no mesmo período e assim por diante.
# ##### Rotina LSTM, métricas e gráficos
# In[12]:
# proximos testes: n_epochs = 1000 e base teste de 3 anos. Sugestão. No programa em script.
# Usar, também, o código para salvar o log do compilie
predict_res = []
pred_actual_rmse_res = []
w_max = max(df_dados['Age']) # definir maior idade nas tábuas. testes: 3
# inicio do cronometro do processamento
start = time.time()
n_input = 30 # 10 # Length of the output sequences (in number of timesteps). Corresponde ao número de dados
# que usaremos para a rede. No caso, 10 anos na idade = 0, 10 anos na idade=1, etc.Vamos testar com 3 anos??
n_features = 1 # Número de features, variáveis. O modelo é univariavel (qx) para cada idade.
n_epochs = 500 # 1000 #500
n_batch = 2 # Number of timeseries samples in each batch (except maybe the last one).
n_neurons = 50
t_projecao = 30
# (#batch_size,#inputs,#features) 3D
for x in range(0, w_max+1):
# Série para cada idade ao longo dos anos de 1998 a 2018
#serie = df_lstm[df_lstm['idade']==x]['logqx_prob']
serie = df_lstm[df_lstm['Age']==x]
serie.drop(['Age', 'Year'], axis=1, inplace=True)
# Separar base de treino e teste === preparar dados
treino, teste = serie[:-30], serie[-30:]
# Padronizar dados: Normalizar entre 0 e 1
scaler = MinMaxScaler()
scaler.fit(treino)
treino = scaler.transform(treino)
teste = scaler.transform(teste)
#generator = TimeseriesGenerator(treino, treino, length=n_input, batch_size=n_batch)
# length: The number of lag observations to use in the input portion of each sample (e.g. 3)
# That is the desired number of lag observations to use as input = VAmos tentar 21: 2018-1998
# batch_size: The number of samples to return on each iteration (e.g. 32)
# The samples are not shuffled by default. This is useful for some recurrent neural networks
# like LSTMs that maintain state across samples within a batch.
# both the data and target for this generator is “treino”.
generator = TimeseriesGenerator(treino, treino, length=n_input, batch_size=n_batch)
# ============================ CAMADAS =========== CAMADAS =================================
# A camada LSTM já possui, em sua construção, funções default de ativação:
# activation="tanh",recurrent_activation="sigmoid",
# três funções sigmoide e 1 tangente hiperbólica
# Modelo bidirecional
model = Sequential()
model.add(Bidirectional(LSTM(n_neurons, return_sequences=True), input_shape=(n_input, n_features)))
model.add(Bidirectional(LSTM(n_neurons)))
model.add(Dropout(0.20))
model.add(Dense(1))
#model.add(Activation('softmax'))
#model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.compile(optimizer='adam', loss='mse', metrics=["mae"])
#model = Sequential()
# #reshape the data into LSTM required (#batch,#timesteps,#features)
#Adding the first LSTM layer and some Dropout regularisation
#model.add(LSTM(n_neurons, activation='relu', input_shape=(n_input, n_features), return_sequences=True))
#model.add(LSTM(n_neurons, activation='relu', input_shape=(n_input, n_features)))
#model.add(Dropout(0.20))
# Adding a second LSTM layer and some Dropout regularisation
#model.add(LSTM(n_neurons))
#model.add(Dropout(0.20))
# Adding a third LSTM layer and some Dropout regularisation
#model.add(LSTM(n_neurons, return_sequences=True))
#model.add(Dropout(0.20))
# Adding a fourth LSTM layer and some Dropout regularisation
#model.add(LSTM(n_neurons))
#model.add(Dropout(0.20))
# Adding the output layer
#model.add(Dense(1))
# ============================ CAMADAS =========== CAMADAS =================================
#model.add(Dense(y.shape[1], activation='sigmoid'))
#model.compile(optimizer='adam', loss='mse', metrics=["mae"])
# fit model
#model.fit_generator(generator, epochs=n_epochs)
# ADAPTADO PARA A ATUALIZAÇÃO DO KERAS (28/11/2020)
csv_logger = CSVLogger('log_modelo_demography_bidirecional.csv', append=True, separator=';')
model.fit(generator, epochs=n_epochs, callbacks=[csv_logger])
# model.fit(X_train, Y_train, callbacks=[csv_logger])
#Previsão
pred_list = []
batch = treino[-n_input:].reshape((1, n_input, n_features))
for i in range(n_input): # n_input
pred_list.append(model.predict(batch)[0])
batch = np.append(batch[:,1:,:], [[pred_list[i]]], axis=1)
#inverse transform forecasts and test. Need to scale them back so we can compare the final results
df_predict = pd.DataFrame(scaler.inverse_transform(pred_list),
index=serie[-n_input:].index, columns=['Prediction'])
df_teste = pd.concat([serie, df_predict], axis=1)
# Gráfico da estimativa, com a base de teste
#plt.figure(figsize=(10,5))
'''
fig = plt.figure(figsize=(10,5))
ax = fig.add_subplot(111)
ax.plot(df_teste.index, df_teste['logqx'])
ax.plot(df_teste.index, df_teste['Prediction'], color='r')
ax.legend(loc='best', fontsize='xx-large', labels=['logqx', 'Estimativa'])
fig.suptitle('logqx Bi-Direcional LSTM dataset teste na idade = %i' %x, fontweight="bold")
plt.savefig(pasta_graficos + '/' + 'prev_Bi_Direcional_LSTM_test_idade'+str(x)+'.png')
'''
pred_actual_rmse = rmse(df_teste.iloc[-n_input:, [0]], df_teste.iloc[-n_input:, [1]])
print("idade:", x, "rmse: ", pred_actual_rmse)
pred_actual_rmse_res.append(pred_actual_rmse)
treino = serie
scaler.fit(treino)
treino = scaler.transform(treino)
#generator = TimeseriesGenerator(treino, treino, length=n_input, batch_size=n_batch)
#model.fit_generator(generator,epochs=n_epochs)
# ADAPTADO PARA A ATUALIZAÇÃO DO KERAS (28/11/2020)
# length: The number of lag observations to use in the input portion of each sample (e.g. 3)
# batch_size: The number of samples to return on each iteration (e.g. 32)
generator = TimeseriesGenerator(treino, treino, length=n_input, batch_size=n_batch)
#model.fit(generator, epochs=n_epochs, batch_size=n_batch)
model.fit(generator, epochs=n_epochs)
pred_list = []
batch = treino[-n_input:].reshape((1, n_input, n_features))
for i in range(n_input):
pred_list.append(model.predict(batch)[0])
batch = np.append(batch[:,1:,:],[[pred_list[i]]],axis=1)
# prever para t_projecao anos
add_dates = [serie.index[-1] + DateOffset(years=x) for x in range(0, t_projecao + 1)]
#add_dates = [serie.index[-1] + pd.offsets.YearBegin(x) for x in range(0,6)]
future_dates = pd.DataFrame(index=add_dates[1:],columns=serie.columns)
#inverse transform forecasts and test. Need to scale them back so we can compare the final results
df_predict = pd.DataFrame(scaler.inverse_transform(pred_list),
index=future_dates[-n_input:].index, columns=['Prediction'])
predict_res.append(df_predict.values.tolist())
df_proj = pd.concat([serie,df_predict], axis=1)
'''
fig = plt.figure(figsize=(10,5))
ax = fig.add_subplot(111) # "111" means "1x1 grid, first subplot"
ax.plot(df_proj.index, df_proj['logqx'])
ax.plot(df_proj.index, df_proj['Prediction'], color='r')
ax.legend(loc='best', fontsize='xx-large', labels=['logqx', 'Predição'])
plt.xticks(fontsize=18)
plt.yticks(fontsize=16)
fig.suptitle('Logqx Bi-Direcional LSTM projetado na idade = %i' %x, fontweight = "bold") # Título parametrizado com a idade
plt.savefig(pasta_graficos + '/' + 'proj_Bi_Direcional_LSTM_log_qx'+str(x)+'.png')
'''
# fim do cronometro do processamento
end = time.time()
hours, rem = divmod(end-start, 3600)
minutes, seconds = divmod(rem, 60)
print()
print('Tempo de processamento:')
print('{:0>2}:{:0>2}:{:05.2f}'.format(int(hours), int(minutes), seconds))
print()
#Tempo de processamento: 22:32:14.93
# Tempo de processamento sem imprimir os gráficos: 18:35:29.37
# #### 5 - Valores de RMSE por idade
# In[13]:
pd.DataFrame(pred_actual_rmse_res) # RMSE para cada idade
# In[52]:
#### 5 - Base resultante dos anos de 2019 a 2028, por idade
# In[14]:
df_lstm_res = pd.DataFrame(predict_res)
# In[15]:
df_lstm_res.head()
# In[16]:
df_lstm_res[0][0][0]
# In[17]:
df_lstm_res.info()
# In[18]:
# Função para unir as listas em linha
def unirSeries(df, explode):
idx = df.index.repeat(df[explode[0]].str.len())
df1 = pd.concat([
pd.DataFrame({x: np.concatenate(df[x].values)}) for x in explode], axis=1)
df1.index = idx
return df1.join(df.drop(explode, 1), how='left')
# In[19]:
colunas = np.arange(2020, 2050)
df_temp = pd.DataFrame(predict_res, columns=colunas)
df_lstm_res = unirSeries(df_temp,colunas)
df_lstm_res = df_lstm_res.reset_index(drop=True)
# In[20]:
df_lstm_res.head()
# In[21]:
df_forecast_res_exp = pd.DataFrame(np.exp(df_lstm_res))
# In[22]:
df_forecast_res_exp.head()
# In[ ]:
# Gravar resultados
# In[23]:
df_forecast_res_exp.to_csv(pasta_resultados + '/' + 'lstm_previsao_qx_500_Bi_Direcional_demography.csv')
# In[24]:
pd.DataFrame(pred_actual_rmse_res).to_csv(pasta_resultados + '/' + 'pred_actual_rmse_res_500_Bi_Direcional_demography.csv', header=['RMSE'])
# In[ ]:
| [
"matplotlib"
] |
a8e6c9b155b7fea2843ab234592911ffd5c20076 | Python | ideProject/tribo | /matplot_test.py | UTF-8 | 116 | 2.515625 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
data = np.random.randn(1000)
plt.hist(data, bins=30)
plt.show() | [
"matplotlib"
] |
c5ccdd97c16a369006efa97019f42fc8d3c7c709 | Python | terdenan/cs102 | /homework04/getHistory.py | UTF-8 | 712 | 2.5625 | 3 | [] | no_license | import requests
import plotly.plotly as py
import plotly.graph_objs as go
from datetime import datetime
from vk_api import get_messages_history
from pprint import pprint as pp
from collections import Counter
def count_dates_from_messages(messages):
def parse(d):
return datetime.fromtimestamp(d).strftime("%Y-%m-%d")
msg_list = [parse(c.get('date')) for c in messages]
counted = Counter(msg_list)
x = []
y = []
for key in counted:
x.append(key)
y.append(counted[key])
return x, y
if __name__ == '__main__':
messages = get_messages_history(223703977)
x, y = count_dates_from_messages(messages)
data = [go.Scatter(x=x, y=y)]
py.plot(data)
| [
"plotly"
] |
a61509497f4b666c0c079910c969cf52ab1dda71 | Python | wubonian/ADSAlgo | /PathPlan/InterpolatingCurve/CubicSpline.py | UTF-8 | 2,337 | 3.40625 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
class cubic_spline:
def __init__(self, x, y):
self.x = x
self.y = y
self.n = len(x)
self.a = []
self.b = []
self.c = []
self.d = []
self.d_mtx = []
def calc_d_mtx(self):
A = np.zeros((self.n, self.n))
for i in range(self.n):
if i == 0:
A[i][i] = 2
A[i][i+1] = 1
elif i == self.n - 1:
A[i][i] = 2
A[i-1][i] = 1
else:
A[i-1][i] = 1
A[i][i] = 2
A[i][i+1] = 1
Y = np.zeros((self.n, 1))
for i in range(self.n):
if i == 0:
Y[i] = 3*(self.y[i+1] - self.y[0])
elif i == self.n-1:
Y[i] = 3*(self.y[i] - self.y[i-1])
else:
Y[i] = 3*(self.y[i+1] - self.y[i-1])
IA = np.linalg.inv(A)
self.d_mtx = np.dot(IA, Y)
def calc_coef(self):
for i in range(self.n-1):
self.a.append(self.y[i])
self.b.append(self.d_mtx[i])
c_tmp = 3*(self.y[i+1] - self.y[i]) - 2*self.d_mtx[i] - self.d_mtx[i+1]
self.c.append(c_tmp)
d_tmp = 2*(self.y[i] - self.y[i+1]) + self.d_mtx[i] + self.d_mtx[i+1]
self.d.append(d_tmp)
def find_index(self, u):
i = 0
while self.x[i] < u:
i = i+1
if u > self.x[0]:
return i - 1
else:
return 0
def calc_prop(self, i, u):
return (u - self.x[i]) / (self.x[i+1] - self.x[i])
def eval_val(self, u):
i = self.find_index(u)
p = self.calc_prop(i, u)
y = self.a[i] + self.b[i]*p + self.c[i]*p*p + self.d[i]*p*p*p
return y
def main():
x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y = [2, 3, 5, 4, 8, 9, 4, 2, 3, 5, 7]
cubic = cubic_spline(x, y)
cubic.calc_d_mtx()
cubic.calc_coef()
xs = np.linspace(0, 10, 1000)
ys = []
for x_tmp in xs:
y_tmp = cubic.eval_val(x_tmp)
ys.append(y_tmp)
plt.figure(figsize=(5, 5))
ax = plt.gca()
ax.set_xlim([-5, 20])
ax.set_ylim([-5, 20])
plt.plot(x, y, '-g')
plt.plot(xs, ys, '-y')
plt.show()
if __name__ == "__main__":
main() | [
"matplotlib"
] |
750dbfa49f1c96652db147d30fbfceb7005fc2fa | Python | geckotian96/qbb2019-answers | /day4-morning/01-histogram.py | UTF-8 | 1,664 | 3.203125 | 3 | [] | no_license | #!/usr/bin/env python3
"""
USAGE: ./00-scatter.py <ctab>
plot fpkm
"""
#../results/stringtie/SRR072893/t_data.ctab
import sys
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
fpkms=[]
for i, line in enumerate (open(sys.argv[1])):
if i ==0:
continue
fields = line.rstrip("\n").split("\t")
if float(fields[-1]) > 0:
fpkms.append(float(fields[-1]))
#notice that fpkms field is a string. you need to convert to float first
#the reason to use log is becuase most of the data gather in the 10000, so need to take log to further visualize the sub columns
my_data = np.log2(fpkms)
# mu = 0
# sigma =1
# x = np.linspace (-15, 15, 100) #range of the normal distribution with how many groups
# y = stats.norm.pdf (x, mu, sigma)
#a=-2.1, mu=5.8, signma = 2.8
mu1=0
sigma1=1
x1 = np.linspace (-15, 15, 100) #range of the normal distribution with how many groups
y1= stats.norm.pdf (x1, mu1, sigma1)
a=-2.1
mu=5.8
sigma=2.8
x2 = np.linspace(-15, 15, 100)
y2= stats.skewnorm.pdf(x2, a, mu, sigma)
fig, ax = plt.subplots()
ax.hist(my_data, bins=100, density=True) #bin=100 can divide the range into 100 columns, default is 10. Density??
ax.plot(x1, y1, label="Normal distribution")
ax.legend()
ax.plot(x2, y2, label="Skew curve")
ax.legend()
ax.set_xlabel ("Log2(FPKM)")
ax.set_ylabel ("Probability")
fig.suptitle("Distribution Curve of FPKM")
plt.text(-15, 0.20, "a=-2.1, mu=5.8, signma=2.8")
fig.savefig("fpkms.png")
plt.close(fig)
#x = np.linspace(norm.ppf(0.01), --> x coordinace
#norm.ppf(0.99), 100)
#ax.plot(x, norm.pdf(x),
#'r-', lw=5, alpha=0.6, label='norm pdf') | [
"matplotlib"
] |
f3f40c5c36a301d20d65a1524362502ed6aa564d | Python | hbates00/12.009-Nonlinear-Dynamics-in-the-Natural-Environment | /PSET 1/PSET4_1.py | UTF-8 | 1,725 | 3.75 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 16 16:43:08 2017
@author: Haley
"""
#numpy random, generate list of random 1 or neg 1 and sum to get a number -- this is position
import numpy as np
import matplotlib.pyplot as plt
# Gets location after number of steps n and returns a location
def get_location(t):
choice = np.array([-1, 1])
moves = np.random.choice(choice, t)
return np.sum(moves)
# Builds a vector of walker position
def do_the_thing(n, t):
a = map(lambda p: get_location(t), range(n))
return a
# Creates and populates an rxr A matrix
def matrix(r):
mat = np.zeros((r, r))
for i in xrange(r):
for j in xrange(r):
if j == (i + 1) or j == (i - 1):
mat[i][j] = .5
else:
pass
return mat
# Advances a given number of time steps, returns a vector of probabilities
def do_it(r, t):
t_0 = np.array([0] * r)
t_0[len(t_0)/2.0] = 1
A = np.linalg.matrix_power(matrix(r), t)
b = np.dot(A, np.transpose(t_0))
return b
# Plots a histagram of walker locations
def plot(n, t, b):
pos = do_the_thing(n, t)
plt.hist(pos, bins = b, color = 'white', label = 'Simulated Values')
# Plots the probability histogram
def plot_probabilities(n, t, r):
probs = do_it(r, t) * 2 * n
x = range((-r/2), (r/2))
plt.plot(x, probs, label = 'Predicted Values')
plot(50000, 50, 15)
plot_probabilities(50000, 50, 50)
plt.xlabel('Position')
plt.ylabel('Number of Walkers at Position')
plt.title('Number of Random Walkers at Position After 50 Steps')
plt.legend()
plt.show() | [
"matplotlib"
] |
66a94bfb126ee8ae2c5afe9d7fc4956e7e3617e9 | Python | samithaj/COPDGene | /feature_selection/aim1_redraw_figure.py | UTF-8 | 849 | 3 | 3 | [] | no_license | """ Draw plots from data in gap_4/features_sel_backward_gap_run_1.csv
Label Y axis
"""
import csv
import matplotlib.pyplot as plt
import numpy as np
# Load GAP values
file_csv = open("gap_4/features_sel_backward_gap_run_1.csv","rb")
reader = csv.reader(file_csv)
lines = [line for line in reader]
file_csv.close()
gap_value = []
fs_index = [55]
fs_name = ['pre_FVC']
for i in range(61):
gap_value.append(lines[62-i][1])
fs_index.append(lines[62-i][2])
fs_name.append(lines[62-i][3])
gap_value.append(lines[1][1])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(range(1,63),gap_value)
ax.plot((13,13),(-0.05,gap_value[12]),'r--')
ax.annotate('MAX(13, 0.2658)',xy=(13,gap_value[12]))
plt.xlabel("The Number of Features")
plt.ylabel("GAP statistic(Clustering Quality)")
plt.title("Backward Search with GAP statistic")
plt.show()
| [
"matplotlib"
] |
82d86634717983bd6aa6c5fa3c596263ec6996cd | Python | seanwayland/matplotlibpractice | /pymaceuticals_starter.py | UTF-8 | 3,244 | 3.3125 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# ## Observations and Insights
#
# ## Dependencies and starter code
# In[1]:
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as st
# Study data files
mouse_metadata = "data/Mouse_metadata.csv"
study_results = "data/Study_results.csv"
# Read the mouse data and the study results
mouse_metadata = pd.read_csv(mouse_metadata)
study_results = pd.read_csv(study_results)
print(mouse_metadata.groupby('Mouse ID').count())
# Combine the data into a single dataset
ds = pd.merge(mouse_metadata, study_results, how="left", on=["Mouse ID"])
print(fourFive)
#print(ds)
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
dsMean = pd.DataFrame(ds.groupby("Drug Regimen").mean())
dsMean = dsMean['Tumor Volume (mm3)']
dsMedian = pd.DataFrame(ds.groupby("Drug Regimen").median())
dsMedian = dsMedian['Tumor Volume (mm3)']
dsVar = pd.DataFrame(ds.groupby("Drug Regimen").var())
dsVar = dsVar['Tumor Volume (mm3)']
dsStd = pd.DataFrame(ds.groupby("Drug Regimen").std())
dsStd = dsStd['Tumor Volume (mm3)']
dsSem = pd.DataFrame(ds.groupby("Drug Regimen").sem())
dsSem = dsSem['Tumor Volume (mm3)']
dsStats = pd.DataFrame([dsMean,dsMedian,dsVar,dsStd,dsSem],index=['mean','Median','Var','Std','SEM'])
#print(dsStats)
# ## Summary statistics
# In[2]:
# ## Bar plots
# In[3]:
# Generate a bar plot showing number of data points for each treatment regimen using pandas
dsCount = ds.groupby("Drug Regimen").count()
dsCount['data points'] = dsCount['Mouse ID']
ax = dsCount.plot.bar(y = "data points")
# In[4]:
# Generate a bar plot showing number of data points for each treatment regimen using pyplot
#plt.bar(dsCount, x = 'Drug Regimen', y = 'data points', align='center', alpha=0.5)
labels = dsCount.index.tolist()
#print(labels)
plt.bar(labels, dsCount['Mouse ID'])
plt.title('Data Points by Drug Regimen')
#plt.show()
# ## Pie plots
# In[5]:
# Generate a pie plot showing the distribution of female versus male mice using pandas
SexCount = mouse_metadata.groupby("Sex").count()
SexCount['sex'] = SexCount['Mouse ID']
print(SexCount)
#plot = SexCount.plot.pie(y='sex')
# In[6]:
# Generate a pie plot showing the distribution of female versus male mice using pyplot
pie = plt.pie(SexCount['sex'])
plt.show()
# ## Quartiles, outliers and boxplots
# In[7]:
# Calculate the final tumor volume of each mouse across four of the most promising treatment regimens. Calculate the IQR and quantitatively determine if there are any potential outliers.
fourFive = ds[ds['Timepoint']==45]
# get all mice for each drug
# Capomulin, Ramicane, Infubinol, and Ceftamin
# In[8]:
# Generate a box plot of the final tumor volume of each mouse across four regimens of interest
# ## Line and scatter plots
# In[9]:
# Generate a line plot of time point versus tumor volume for a mouse treated with Capomulin
# In[10]:
# Generate a scatter plot of mouse weight versus average tumor volume for the Capomulin regimen
# In[11]:
# Calculate the correlation coefficient and linear regression model for mouse weight and average tumor volume for the Capomulin regimen
# In[ ]:
| [
"matplotlib"
] |
b0ef290ef16eb0c5e793da6db635d312939f0499 | Python | meck93/UU_ML_Project | /dqn/environment.py | UTF-8 | 4,315 | 2.671875 | 3 | [
"MIT"
] | permissive | import gym
import numpy as np
import retro
from baselines.common.atari_wrappers import FrameStack
import cv2
from config import HEIGHT, N_FRAMES, WIDTH # hyperparameters
cv2.ocl.setUseOpenCL(False)
class PreprocessFrames(gym.ObservationWrapper):
def __init__(self, env):
"""Preprocess and wrap frames to HEIGHTxWIDTH."""
gym.ObservationWrapper.__init__(self, env)
self.width = WIDTH
self.height = HEIGHT
self.observation_space = gym.spaces.Box(low=0, high=255, shape=(self.height, self.width, 1), dtype=np.uint8)
def observation(self, frame):
# transform color to grayscale
frame_gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
# crop the image top and bottom since it's static
frame_cropped = frame_gray[9:-35, :]
# normalize the values to range [0,1]
frame_normalized = frame_cropped / 255.0
# resize the cropped image to WIDTHxHEIGHT
frame = cv2.resize(frame_normalized, (self.width, self.height), interpolation=cv2.INTER_AREA)
return frame[:, :, None]
class MarioDiscretizer(gym.ActionWrapper):
"""
Wrap a gym-retro environment and make it use discrete
actions for the SuperMarioBros game.
"""
def __init__(self, env):
super(MarioDiscretizer, self).__init__(env)
# All buttons of the NES
buttons = ['B', None, 'SELECT', 'START', 'UP', 'DOWN', 'LEFT', 'RIGHT', 'A']
# Custom discrete actions defined by ourselves
# Limits the number of possible actions and should improve training time
# actions = [[None], ['LEFT'], ['RIGHT'], ['RIGHT', 'A'], ['RIGHT', 'B'], ['RIGHT', 'A', 'B'], ['A'], ['A', 'A']]
actions = [[None], ['LEFT'], ['RIGHT'], ['RIGHT', 'A'], ['A'], ['A', 'A']]
self._actions = []
for action in actions:
arr = np.array([False] * len(buttons))
for button in action:
arr[buttons.index(button)] = True
self._actions.append(arr)
# maps each action to a discrete number
self.action_space = gym.spaces.Discrete(len(self._actions))
def action(self, a): # pylint: disable=W0221
return self._actions[a].copy()
def make_custom_env(disc_acts=True):
"""
Create an environment with some standard wrappers.
"""
env = retro.make(game='SuperMarioBros3-Nes', state="1Player.World1.Level1.state",
scenario="./data/scenario.json", record="./recordings/")
if disc_acts:
# Build the actions array
env = MarioDiscretizer(env)
# PreprocessFrame
env = PreprocessFrames(env)
# Stack N_FRAMES number of frames
env = FrameStack(env, N_FRAMES)
return env
# TODO: code that can be used to plot the preprocessing
# import matplotlib.pyplot as plt
# f, axs = plt.subplots(2, 2, figsize=(15, 15))
# axs[0, 0].set_title("Raw Input Image")
# axs[0, 0].imshow(frame)
# axs[0, 0].set_ylim((224, 0))
# axs[0, 0].set_yticks(np.arange(0, 225, 224//16))
# axs[0, 0].set_xlim((0, 240))
# axs[0, 0].set_xticks(np.arange(0, 241, 240//16))
# # transform color to grayscale
# frame_gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
# axs[0, 1].set_title("Gray-Scale Image")
# axs[0, 1].imshow(frame_gray, cmap="gray", vmin=0, vmax=255)
# axs[0, 1].set_ylim((224, 0))
# axs[0, 1].set_yticks(np.arange(0, 225, 224//16))
# axs[0, 1].set_xlim((0, 240))
# axs[0, 1].set_xticks(np.arange(0, 241, 240//16))
# # crop the image top and bottom since it's static
# frame_cropped = frame_gray[9:-35, :]
# axs[1, 0].set_title("Cropped Image")
# axs[1, 0].imshow(frame_cropped, cmap="gray", vmin=0, vmax=255)
# axs[1, 0].set_ylim((224, 0))
# axs[1, 0].set_yticks(np.arange(0, 225, 224//16))
# axs[1, 0].set_xlim((0, 240))
# axs[1, 0].set_xticks(np.arange(0, 241, 240//16))
# # normalize the values to range [0,1]
# frame_normalized = frame_cropped / 255.0
# # resize the cropped image to WIDTHxHEIGHT
# frame = cv2.resize(frame_normalized, (self.width, self.height), interpolation=cv2.INTER_AREA)
# axs[1, 1].set_title("Downsized Image")
# axs[1, 1].imshow(frame, cmap="gray", vmin=0, vmax=1)
# axs[1, 1].set_ylim((84, 0))
# axs[1, 1].set_yticks(np.arange(0, 85, 84//7))
# axs[1, 1].set_xlim((0, 84))
# axs[1, 1].set_xticks(np.arange(0, 85, 84//7))
# plt.show()
| [
"matplotlib"
] |
b90f4f9d02871e5847d5fc4503074a99cdb46566 | Python | NishanthMHegde/Pandas-practice | /pandasgraphs.py | UTF-8 | 1,042 | 3.421875 | 3 | [] | no_license | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
df1 = pd.read_csv('df1.csv', index_col=0)
df2 = pd.read_csv('df2.csv')
#df1
print("df1")
print(df1.head())
print("\n\n")
#plotting a histogram
print("plotting a histogram")
df1['A'].hist(bins=50)
plt.show()
print("\n\n")
#another way of plotting a histogram
print("another way of plotting a histogram")
df1['B'].plot(kind='hist', bins=50)
plt.show()
print("\n\n")
#yet another way of plotting a histogram
print("yet another way of plotting a histogram")
df1['C'].plot.hist(bins=60)
plt.show()
print("\n\n")
#bar graph
print("bar graph")
df2.plot.bar()
plt.show()
print("\n\n")
#stacked bar graph
print("stacked bar graph")
df2.plot.bar(stacked=True)
plt.show()
print("\n\n")
#line graph
print("line graph")
df2.plot.line(figsize=(10,5))
plt.show()
print("\n\n")
#area graph
print("area graph")
df2.plot.area(alpha=0.75)
plt.show()
print("\n\n")
#scatter graph
print("scatter graph")
df2.plot.scatter(x='a', y='b', s = df2['c'])
plt.show()
print("\n\n") | [
"matplotlib"
] |
0a271df930365656572b30bd0c305611861ba6a3 | Python | pkozilek/kaggle-titanic | /src/features_analysis.py | UTF-8 | 4,452 | 3.078125 | 3 | [] | no_license | import plotly as plt
import numpy as np
import pandas as pd
import functions.graphs as g
features = pd.read_csv('datasets/features.csv', index_col=0)
features_survived = features.loc[features.Survived == 1]
features_died = features.loc[features.Survived == 0]
features_ac = features.loc[features.Age != 0] # Age completed
features_ac_survived = features_ac.loc[features_ac.Survived == 1]
features__ac_died = features_ac.loc[features_ac.Survived == 0]
### Bias Analysis
# Age bias
age_bias = g.histogram(
data_list=[features_survived.Age, features_died.Age],
labels=['Survived', 'Died'],
title='Normalized age bias analysis',
x_bins=[-1, 100, 5],
histnorm='percent',
xaxis_title='Age',
yaxis_title='%'
)
# Sex bias
sex_bias = g.histogram(
data_list=[features_survived.Sex, features_died.Sex],
labels=['Survived', 'Died'],
title='Normalized sex bias analysis',
x_bins=[1, 3, 1],
histnorm='percent',
xaxis_title='Sex',
yaxis_title='%'
)
# Pclass bias
pclass = g.histogram(
data_list=[features_survived.Pclass, features_died.Pclass],
labels=['Survived', 'Died'],
title='Normalized Pclass bias analysis',
x_bins=[0, 20, 1],
histnorm='percent',
xaxis_title='Pclass',
yaxis_title='%'
)
# SibSp bias
sibsp = g.histogram(
data_list=[features_survived.SibSp, features_died.SibSp],
labels=['Survived', 'Died'],
title='Normalized SibSp bias analysis',
x_bins=[0, 9, 1],
histnorm='percent',
xaxis_title='SibSp',
yaxis_title='%'
)
# Parch bias
parch = g.histogram(
data_list=[features_survived.Parch, features_died.Parch],
labels=['Survived', 'Died'],
title='Normalized Parch bias analysis',
x_bins=[0, 7, 1],
histnorm='percent',
xaxis_title='Parch',
yaxis_title='%'
)
# Embarked bias
embarked = g.histogram(
data_list=[features_survived.Embarked, features_died.Embarked],
labels=['Survived', 'Died'],
title='Normalized Embarked bias analysis',
x_bins=[0, 4, 1],
histnorm='percent',
xaxis_title='Embarked',
yaxis_title='%'
)
# Fare bias
fare = g.histogram(
data_list=[features_survived.Fare, features_died.Fare],
labels=['Survived', 'Died'],
title='Normalized Fare bias analysis',
x_bins=[0, 100, 5],
histnorm='percent',
xaxis_title='Fare',
yaxis_title='%'
)
# age_bias.show()
# sex_bias.show()
# pclass.show()
# sibsp.show()
# parch.show()
# embarked.show()
# fare.show()
### Age correlation analysis
age_sex_scatter = g.scatterplot(
x=[features_ac_survived.Age, features__ac_died.Age],
y=[features_ac_survived.Sex, features__ac_died.Sex],
labels=['Survived', 'Died'],
x_jitter=0.5,
y_jitter=0.25,
title='Sex x Age',
xaxis_title='Age',
yaxis_title='Sex'
)
age_pclass_scatter = g.scatterplot(
x=[features_ac_survived.Age, features__ac_died.Age],
y=[features_ac_survived.Pclass, features__ac_died.Pclass],
labels=['Survived', 'Died'],
x_jitter=0.5,
y_jitter=0.25,
title='Pclass x Age',
xaxis_title='Age',
yaxis_title='Pclass'
)
age_sibsp_scatter = g.scatterplot(
x=[features_ac_survived.Age, features__ac_died.Age],
y=[features_ac_survived.SibSp, features__ac_died.SibSp],
labels=['Survived', 'Died'],
x_jitter=0.5,
y_jitter=0.25,
title='SibSp x Age',
xaxis_title='Age',
yaxis_title='SibSp'
)
age_parch_scatter = g.scatterplot(
x=[features_ac_survived.Age, features__ac_died.Age],
y=[features_ac_survived.Parch, features__ac_died.Parch],
labels=['Survived', 'Died'],
x_jitter=0.5,
y_jitter=0.25,
title='Parch x Age',
xaxis_title='Age',
yaxis_title='Parch'
)
age_embarked_scatter = g.scatterplot(
x=[features_ac_survived.Age, features__ac_died.Age],
y=[features_ac_survived.Embarked, features__ac_died.Embarked],
labels=['Survived', 'Died'],
x_jitter=0.5,
y_jitter=0.25,
title='Embarked x Age',
xaxis_title='Age',
yaxis_title='Embarked'
)
age_fare_scatter = g.scatterplot(
x=[features_ac_survived.Age, features__ac_died.Age],
y=[features_ac_survived.Fare, features__ac_died.Fare],
labels=['Survived', 'Died'],
x_jitter=0.5,
y_jitter=0.25,
title='Fare x Age',
xaxis_title='Age',
yaxis_title='Fare'
)
# age_sex_scatter.show()
# age_pclass_scatter.show()
# age_sibsp_scatter.show()
# age_parch_scatter.show()
# age_embarked_scatter.show()
# age_fare_scatter.show() | [
"plotly"
] |
2c0a45344ce5db816a1b9b87232689e56ccbf009 | Python | gauravsaxena1997/pycode | /matplotlib/16.annotations_and_text.py | UTF-8 | 748 | 2.8125 | 3 | [] | no_license | import matplotlib.pyplot as plt
import csv
from matplotlib import style
import numpy as np
style.use('mystyle')
x,y = np.loadtxt('file.txt', delimiter=',', unpack=True)
plt.plot(x,y,label='Loaded from file')
# -------------------Text---------------------
font_dict = {
'family':'serif',
'color':'darkred',
'size':10
}
plt.text(9,6,'Text',fontdict=font_dict)
# -------------------------------------------
# -----------------Annotation----------------
plt.annotate('Annotation',(10,8),
xytext=(0.4,0.9), textcoords='axes fraction',
arrowprops= dict(facecolor='w',color='m') )
# ------------------------------------------
plt.xlabel('xlabel')
plt.ylabel('ylabel')
plt.title('matplotlib\nfirst graph')
plt.legend()
plt.show()
| [
"matplotlib"
] |
0bd83ee004ef026cf05c529462a0d44e0e2fbc13 | Python | dho619/ImageTransformations | /P06/p06.py | UTF-8 | 572 | 2.859375 | 3 | [] | no_license | '''
Transformação de perspectiva
'''
import cv2
import numpy as np
import matplotlib.pyplot as plt
img = cv2.imread('senko.jpeg')
rows,cols,ch = img.shape
pts1 = np.float32([[306,665],[618,652],[278,987],[639,990]])
pts2 = np.float32([[0,0],[300,0],[0,300],[300,300]])
M = cv2.getPerspectiveTransform(pts1,pts2)
dst = cv2.warpPerspective(img,M,(300,300))
cv2.imwrite('senko_TransformacaoDePerspectiva.png', dst)
plt.subplot(121),plt.imshow(img),plt.title('Input')
plt.subplot(122),plt.imshow(dst),plt.title('Output')
plt.show()
print('..|imagem salva com sucesso')
| [
"matplotlib"
] |
34f8902b5967546a30d46dee410e4c779b3f2017 | Python | fgurri/kpi | /nautilus/plots.py | UTF-8 | 62,315 | 2.546875 | 3 | [] | no_license | from django.db import connections
import configparser
import datetime
import pandas as pd
import mysql.connector
from mysql.connector import Error
import plotly as py
import plotly.graph_objs as go
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
import numpy as np
import dateutil.relativedelta
import nautilus.utils as u
import nautilus.queries as q
""" Generates a offline plotly plot with the graph 'total visits per month'.
Return HTML div code that builds the graph. It is necessary to include 'plotly.js'
in your html file.
usage::
>>> import plots
>>> plot = plot_visits_per_month()
>>> render (request, 'someViewWithPlot.html', {'plot_name': plot})
:param:
:rtype: string
"""
def plot_visits_per_month():
try:
connection = connections['datawarehouse']
sql = 'SELECT f_month as Mes, CONCAT(f_year, "-", f_monthname) as MesNom, sum(f_count) as Total FROM datawarehouse.dm1_visits_per_agenda GROUP BY f_month, CONCAT(f_year, "-", f_monthname) ORDER BY f_month ASC'
df = pd.read_sql(sql, connection)
trace_visites = go.Scatter(x=df['MesNom'],
y=df['Total'],
mode='lines+markers',
name='Visites per mes')
linear_x = np.r_[0:len(df)]
linear_x = np.arange(0, len(df)).reshape(-1, 1)
poly_reg = PolynomialFeatures(degree=4)
X_poly = poly_reg.fit_transform(linear_x)
pol_reg = LinearRegression()
pol_reg.fit(X_poly, df['Total'])
predicted_y = pol_reg.predict(poly_reg.fit_transform(linear_x))
trace_regression_visites = go.Scatter(x=df['MesNom'],
y=predicted_y,
mode='lines',
name='Tendencia')
trace_omi_annotation = go.Scatter(x=["2017-Dec", "2017-Dec"],
y=[0, df['Total'].max()],
mode='lines',
name='Inici odontologia a OMI360',
line=dict(dash='dot'))
data = [trace_visites, trace_regression_visites, trace_omi_annotation]
layout = go.Layout(
title='Evolució del número de visites per mes',
titlefont=dict(family='Arial, sans-serif',
size=24,
color='green'),
xaxis=dict(showticklabels=True,
tickangle=45,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'),
yaxis=dict(titlefont=dict(family='Arial, sans-serif',
size=18,
color='lightgrey'),
showticklabels=True,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'))
fig = go.Figure(data=data, layout=layout)
return py.offline.plot(fig, include_plotlyjs=False, output_type='div')
except Error as e:
print("Error while connecting to MySQL", e)
""" Generates a offline plotly plot with the graph 'Distribution of visits per speciality'.
Return HTML div code that builds the graph. It is necessary to include 'plotly.js'
in your html file.
usage::
>>> import plots
>>> plot = plot_distribution_visits_per_speciality('201801', '201812')
>>> render (request, 'someViewWithPlot.html', {'plot_name': plot})
:param p_first_month: starting month of range values in YYYYMM format
:param p_last_month: ending month of range values in YYYYMM format
:rtype: string
"""
def plot_distribution_visits_per_speciality(p_first_month, p_last_month):
try:
connection = connections['datawarehouse']
sql = 'SELECT f_nomEspecialitat as Spec, sum(f_count) as Total FROM datawarehouse.dm1_visits_per_agenda WHERE f_month >= '+str(p_first_month)+' and f_month <= '+str(p_last_month)+' GROUP BY f_nomEspecialitat ORDER BY sum(f_count) DESC'
df = pd.read_sql(sql, connection)
trace = go.Pie(labels=df['Spec'], values=df['Total'])
graph_title = 'Distribució visites per especialitat (Del ' + u.yyyymmToMonthName(p_first_month) + ' al ' + u.yyyymmToMonthName(p_last_month) + ')'
data = [trace]
layout = go.Layout(
title=graph_title,
titlefont=dict(family='Arial, sans-serif', size=24, color='green'),
autosize=False,
width=1000,
height=700,
xaxis=dict(showticklabels=True,
tickangle=45,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'),
yaxis=dict(titlefont=dict(family='Arial, sans-serif',
size=18,
color='lightgrey'),
showticklabels=True,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'))
fig = go.Figure(data=data, layout=layout)
return py.offline.plot(fig, include_plotlyjs=False, output_type='div')
except Error as e:
print("Error while connecting to MySQL", e)
""" Generates a offline plotly plot with the graph 'Visits per month by speciality'.
You can choose to filter by speciality or by agenda, but one of both must be set.
Return HTML div code that builds the graph. It is necessary to include 'plotly.js'
in your html file.
usage::
>>> import plots
>>> plot = plot_visits_per_month_speciality(p_id_especiality=19)
>>> render (request, 'someViewWithPlot.html', {'plot_name': plot})
:param p_id_especiality: speciality identifier
:param p_id_agenda: agenda identifier
:rtype: string
"""
def plot_visits_per_month_speciality(p_id_especiality=None, p_id_agenda=None):
try:
connection = connections['datawarehouse']
sql = 'SELECT CONCAT(f_year, "-", f_monthname) as MesNom, f_month, sum(f_count) as Total FROM datawarehouse.dm1_visits_per_agenda WHERE '
if (p_id_especiality is None) and (p_id_agenda is None):
p_id_especiality = 19 # medicina general per defecte
if (p_id_especiality is not None) and (p_id_especiality != ""):
sql = sql + 'f_idEspecialitat='+str(p_id_especiality)+' '
else:
if (p_id_agenda is not None) and (p_id_agenda != ""):
sql = sql + 'f_idAgenda=\''+str(p_id_agenda)+'\' '
sql = sql + 'GROUP BY CONCAT(f_year, "-", f_monthname), f_month ORDER BY f_month ASC'
df = pd.read_sql(sql, connection)
if df.empty:
return None
linear_x = np.r_[0:len(df)]
linear_x = np.arange(0, len(df)).reshape(-1, 1)
poly_reg = PolynomialFeatures(degree=4)
X_poly = poly_reg.fit_transform(linear_x)
pol_reg = LinearRegression()
pol_reg.fit(X_poly, df['Total'])
predicted_y = pol_reg.predict(poly_reg.fit_transform(linear_x))
trace_regression = go.Scatter(x=df['MesNom'],
y=predicted_y,
mode='lines',
name='Tendencia Especialitat')
trace = go.Scatter(x=df['MesNom'],
y=df['Total'],
mode='lines+markers',
name='Visites per mes')
graph_title = 'Evolució mensual de visites'
if p_id_especiality is not None:
graph_title = q.get_Spec_Name(p_id_especiality) + ': '+ graph_title
if p_id_agenda is not None:
graph_title = q.get_Agenda_Name(p_id_agenda) + ': '+ graph_title
data = [trace, trace_regression]
layout = go.Layout(
title=graph_title,
titlefont=dict(family='Arial, sans-serif', size=24, color='green'),
xaxis=dict(showticklabels=True,
tickangle=45,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'),
yaxis=dict(titlefont=dict(family='Arial, sans-serif',
size=18,
color='lightgrey'),
showticklabels=True,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'))
fig = go.Figure(data=data, layout=layout)
return py.offline.plot(fig, include_plotlyjs=False, output_type='div')
except Error as e:
print("Error while connecting to MySQL", e)
""" Generates a offline plotly plot with the graph 'Frequency by agenda'.
Return HTML div code that builds the graph. It is necessary to include 'plotly.js'
in your html file.
usage::
>>> import plots
>>> plot = plot_frequency_per_agenda(p_id_agenda='AG100')
>>> render (request, 'someViewWithPlot.html', {'plot_name': plot})
:param p_id_agenda: agenda identifier
:rtype: string
"""
def plot_frequency_per_agenda(p_id_agenda):
try:
connection = connections['datawarehouse']
sql = 'SELECT CONCAT(f_year, "-", f_monthname) as MesNom, f_month as Mes, f_count/f_patients as rep FROM datawarehouse.dm1_visits_per_agenda WHERE f_idAgenda=\''+str(p_id_agenda)+'\' ORDER BY f_month ASC'
df = pd.read_sql(sql, connection)
trace_frequency = go.Scatter(x=df['MesNom'],
y=df['rep'],
mode='lines+markers',
name='repetitivitat')
linear_x = np.r_[0:len(df)]
linear_x = np.arange(0, len(df)).reshape(-1, 1)
poly_reg = PolynomialFeatures(degree=4)
X_poly = poly_reg.fit_transform(linear_x)
pol_reg = LinearRegression()
pol_reg.fit(X_poly, df['rep'])
predicted_y = pol_reg.predict(poly_reg.fit_transform(linear_x))
trace_regression = go.Scatter(x=df['MesNom'],
y=predicted_y,
mode='lines',
name='regressio repetitivitat')
graph_title = q.get_Agenda_Name(p_id_agenda) + ': repetitivitat'
data = [trace_frequency, trace_regression]
layout = go.Layout(
title=graph_title,
titlefont=dict(family='Arial, sans-serif', size=24, color='green'),
xaxis=dict(showticklabels=True,
tickangle=45,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'),
yaxis=dict(showticklabels=True,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'))
fig = go.Figure(data=data, layout=layout)
return py.offline.plot(fig, include_plotlyjs=False, output_type='div')
except Error as e:
print("Error while connecting to MySQL", e)
""" Generates a offline plotly plot with the graph 'Patients per month'.
Return HTML div code that builds the graph. It is necessary to include 'plotly.js'
in your html file.
usage::
>>> import plots
>>> plot = plot_patients_per_month()
>>> render (request, 'someViewWithPlot.html', {'plot_name': plot})
:param: None
:rtype: string
"""
def plot_patients_per_month():
try:
connection = connections['datawarehouse']
sql = 'SELECT f_month as Mes , CONCAT(LEFT(f_month, 4), "-", f_monthname) as MesNom, f_patients as Patients, f_new_patients FROM datawarehouse.dm2_stats_per_month ORDER BY f_month ASC'
df = pd.read_sql(sql, connection)
trace_patients = go.Scatter(x=df['MesNom'],
y=df['Patients'],
mode='lines+markers',
name='Total pacients')
linear_x = np.r_[0:len(df)]
linear_x = np.arange(0, len(df)).reshape(-1, 1)
poly_reg = PolynomialFeatures(degree=4)
X_poly = poly_reg.fit_transform(linear_x)
pol_reg = LinearRegression()
pol_reg.fit(X_poly, df['Patients'])
predicted_y = pol_reg.predict(poly_reg.fit_transform(linear_x))
trace_regression_patients = go.Scatter(x=df['MesNom'],
y=predicted_y,
mode='lines',
name='Tendencia total pacients')
trace_omi_annotation = go.Scatter(x=["2017-Dec", "2017-Dec"],
y=[0, df['Patients'].max()],
mode='lines',
name='Inici odontologia a OMI360',
line=dict(dash='dot'))
graph_title = 'Pacients per mes'
data = [trace_patients, trace_regression_patients, trace_omi_annotation]
layout = go.Layout(
title=graph_title,
titlefont=dict(family='Arial, sans-serif', size=24, color='green'),
xaxis=dict(showticklabels=True,
tickangle=45,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'),
yaxis=dict(showticklabels=True,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'))
fig = go.Figure(data=data, layout=layout)
return py.offline.plot(fig, include_plotlyjs=False, output_type='div')
except Error as e:
print("Error while connecting to MySQL", e)
""" Generates a offline plotly plot with the graph 'New patients per month'.
Return HTML div code that builds the graph. It is necessary to include 'plotly.js'
in your html file.
usage::
>>> import plots
>>> plot = plot_new_patients_per_month()
>>> render (request, 'someViewWithPlot.html', {'plot_name': plot})
:param: None
:rtype: string
"""
def plot_new_patients_per_month():
try:
connection = connections['datawarehouse']
sql = 'SELECT f_month as Mes, CONCAT(LEFT(f_month, 4), "-", f_monthname) as MesNom, f_patients as Patients, f_new_patients as NewPatients FROM datawarehouse.dm2_stats_per_month ORDER BY f_month ASC'
df = pd.read_sql(sql, connection)
trace_new_patients = go.Scatter(x=df['MesNom'],
y=df['NewPatients'],
mode='lines+markers',
name='Pacients nous')
linear_x = np.r_[0:len(df)]
linear_x = np.arange(0, len(df)).reshape(-1, 1)
poly_reg = PolynomialFeatures(degree=4)
X_poly = poly_reg.fit_transform(linear_x)
pol_reg = LinearRegression()
pol_reg.fit(X_poly, df['NewPatients'])
predicted_y = pol_reg.predict(poly_reg.fit_transform(linear_x))
trace_regression_new_patients = go.Scatter(x=df['MesNom'],
y=predicted_y,
mode='lines',
name='Tendencia nous pacients')
trace_omi_annotation = go.Scatter(x=["2017-Dec", "2017-Dec"],
y=[0, df['NewPatients'].max()],
mode='lines',
name='Inici odontologia a OMI360',
line=dict(dash='dot'))
graph_title = 'Pacients nous per mes'
data = [trace_new_patients, trace_regression_new_patients, trace_omi_annotation]
layout = go.Layout(
title=graph_title,
titlefont=dict(family='Arial, sans-serif', size=24, color='green'),
xaxis=dict(showticklabels=True,
tickangle=45,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'),
yaxis=dict(showticklabels=True,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'))
fig = go.Figure(data=data, layout=layout)
return py.offline.plot(fig, include_plotlyjs=False, output_type='div')
except Error as e:
print("Error while connecting to MySQL", e)
""" Generates a offline plotly plot with the graph 'Distribution patients vs new patients'.
Return HTML div code that builds the graph. It is necessary to include 'plotly.js'
in your html file.
usage::
>>> import plots
>>> plot = plot_distribution_new_patients()
>>> render (request, 'someViewWithPlot.html', {'plot_name': plot})
:param: None
:rtype: string
"""
def plot_distribution_new_patients():
try:
connection = connections['datawarehouse']
sql = 'SELECT f_month as Mes, CONCAT(LEFT(f_month, 4), "-", f_monthname) as MesNom, f_patients-f_new_patients as Patients, f_new_patients as NewPatients FROM datawarehouse.dm2_stats_per_month ORDER BY f_month ASC'
df = pd.read_sql(sql, connection)
trace_new_patients = go.Scatter(x=df['MesNom'],
y=df['NewPatients'],
mode='lines',
name='Pacients nous',
stackgroup='one',
groupnorm='percent')
trace_patients = go.Scatter(x=df['MesNom'],
y=df['Patients'],
mode='lines',
name='Pacients vells',
stackgroup='one')
graph_title = 'Distribució nous pacients'
data = [trace_new_patients, trace_patients]
layout = go.Layout(
title=graph_title,
titlefont=dict(family='Arial, sans-serif', size=24, color='green'),
showlegend=True,
xaxis=dict(
showticklabels=True,
tickangle=45,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
type='category',
),
yaxis=dict(
type='linear',
range=[1, 100],
dtick=20,
ticksuffix='%'))
fig = go.Figure(data=data, layout=layout)
return py.offline.plot(fig, include_plotlyjs=False, output_type='div')
except Error as e:
print("Error while connecting to MySQL", e)
""" Generates a offline plotly plot with the graph 'month evolution of new Patients per speciality'.
Return HTML div code that builds the graph. It is necessary to include 'plotly.js'
in your html file.
usage::
>>> import plots
>>> plot = plot_new_patients_per_speciality_per_month()
>>> render (request, 'someViewWithPlot.html', {'plot_name': plot})
:param: None
:rtype: string
"""
def plot_new_patients_per_speciality_per_month():
try:
connection = connections['datawarehouse']
sql = 'SELECT f_nomEspecialitat as Spec, CONCAT(LEFT(f_month,4), "-", f_monthname) as MesNom, f_month as Mes, sum(f_newPatients) as NewPatients FROM dm2_newpatient_per_month_agenda GROUP BY f_nomEspecialitat, CONCAT(LEFT(f_month,4), "-", f_monthname), f_month'
df = pd.read_sql(sql, connection)
arraySpecs = df['Spec'].unique()
df = df.set_index('Spec')
data = list()
for spec in arraySpecs:
df_spec = pd.DataFrame(df.loc[df.index == spec, ['MesNom', 'Mes', 'NewPatients']])
df_spec = df_spec.sort_values(['Mes'], ascending=[1])
trace = go.Scatter(x=df_spec['MesNom'],
y=df_spec['NewPatients'],
mode='lines',
name=spec,
stackgroup='one',
groupnorm='percent')
data.append(trace)
layout = go.Layout(
title='Evolució de la distribució de nous pacients per especialitat',
titlefont=dict(family='Arial, sans-serif', size=24, color='green'),
showlegend=True,
xaxis=dict(
showticklabels=True,
tickangle=45,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
type='category',
),
yaxis=dict(
type='linear',
range=[1, 100],
dtick=20,
ticksuffix='%'))
fig = go.Figure(data=data, layout=layout)
return py.offline.plot(fig, include_plotlyjs=False, output_type='div')
except Error as e:
print("Error while connecting to MySQL", e)
""" Generates a offline plotly plot with the graph 'New Patients per Speciality or Agenda'.
You can choose to call by spec o agenda, but not both. If you set both values Spec is used.
Return HTML div code that builds the graph. It is necessary to include 'plotly.js'
in your html file.
usage::
>>> import plots
>>> plot = plot_evolution_new_patients_per_spec(p_id_agenda='100')
>>> render (request, 'someViewWithPlot.html', {'plot_name': plot})
:param: None
:rtype: string
"""
def plot_evolution_new_patients_per_spec(p_id_especiality=None, p_id_agenda=None):
try:
connection = connections['datawarehouse']
sql = 'SELECT f_month as Mes, CONCAT(LEFT(f_month,4), "-", f_monthname) as MesNom, sum(f_newPatients) as NewPatients from dm2_newpatient_per_month_agenda WHERE '
if (p_id_especiality is None or p_id_especiality == '') and p_id_agenda is None:
p_id_especiality = 19# medicina general per defecte
if (p_id_especiality is not None) and p_id_especiality != "":
sql = sql + 'f_idEspecialitat=' + str(p_id_especiality) + ' '
else:
if (p_id_agenda is not None) and p_id_agenda != "":
sql = sql + 'f_idAgenda=\'' + str(p_id_agenda) + '\' '
sql = sql + 'GROUP BY f_month, CONCAT(LEFT(f_month,4), "-", f_monthname) '
sql = sql + 'ORDER BY f_month ASC'
df = pd.read_sql(sql, connection)
if df.empty:
return None
trace_new_patients = go.Scatter(x=df['MesNom'],
y=df['NewPatients'],
mode='lines+markers',
name='Pacients nous')
linear_x = np.r_[0:len(df)]
linear_x = np.arange(0, len(df)).reshape(-1, 1)
poly_reg = PolynomialFeatures(degree=4)
X_poly = poly_reg.fit_transform(linear_x)
pol_reg = LinearRegression()
pol_reg.fit(X_poly, df['NewPatients'])
predicted_y = pol_reg.predict(poly_reg.fit_transform(linear_x))
trace_regression_new_patients = go.Scatter(x=df['MesNom'],
y=predicted_y,
mode='lines',
name='Tendencia nous pacients')
graph_title = 'Evolució del número de pacients nous'
if p_id_especiality is not None:
graph_title = q.get_Spec_Name(p_id_especiality) + ': '+ graph_title
if p_id_agenda is not None:
graph_title = q.get_Agenda_Name(p_id_agenda) + ': '+ graph_title
data = [trace_new_patients, trace_regression_new_patients]
layout = go.Layout(
title=graph_title,
titlefont=dict(family='Arial, sans-serif', size=24, color='green'),
showlegend=True,
xaxis=dict(
showticklabels=True,
tickangle=45,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
type='category',
),
yaxis=dict(showticklabels=True,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'))
fig = go.Figure(data=data, layout=layout)
return py.offline.plot(fig, include_plotlyjs=False, output_type='div')
except Error as e:
print("Error while connecting to MySQL", e)
""" Generates a offline plotly plot with the graph 'Distribution of new patients by speciality'.
Return HTML div code that builds the graph. It is necessary to include 'plotly.js'
in your html file.
usage::
>>> import plots
>>> plot = plot_distribution_new_patients_per_spec('201801', '201812')
>>> render (request, 'someViewWithPlot.html', {'plot_name': plot})
:param p_first_month: starting month of range values in YYYYMM format
:param p_last_month: ending month of range values in YYYYMM format
:rtype: string
"""
def plot_distribution_new_patients_per_spec(p_first_month, p_last_month):
try:
connection = connections['datawarehouse']
sql = 'SELECT f_nomEspecialitat as Spec, sum(f_newPatients) as NewPatients FROM datawarehouse.dm2_newpatient_per_month_agenda WHERE f_month >= '+str(p_first_month)+' and f_month <= '+str(p_last_month)+' GROUP BY f_nomEspecialitat ORDER BY sum(f_newPatients) DESC'
df = pd.read_sql(sql, connection)
trace_new_patients = go.Pie(labels=df['Spec'], values=df['NewPatients'])
graph_title = 'Nous pacients per especialitat (Del ' + u.yyyymmToMonthName(p_first_month) + ' al ' + u.yyyymmToMonthName(p_last_month) + ')'
data = [trace_new_patients]
layout = go.Layout(
title=graph_title,
titlefont=dict(family='Arial, sans-serif', size=24, color='green'),
showlegend=True,
xaxis=dict(
showticklabels=True,
tickangle=45,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
type='category',
),
yaxis=dict(showticklabels=True,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'))
fig = go.Figure(data=data, layout=layout)
return py.offline.plot(fig, include_plotlyjs=False, output_type='div')
except Error as e:
print("Error while connecting to MySQL", e)
""" Generates a offline plotly plot with the graph 'New Patients per agenda'.
Optionally, you can show by speciallity. If no speciality is set it shows whole data.
Return HTML div code that builds the graph. It is necessary to include 'plotly.js'
in your html file.
usage::
>>> import plots
>>> plot = plot_first_blood_per_agenda('201801', '201812')
>>> render (request, 'someViewWithPlot.html', {'plot_name': plot})
:param p_first_month: starting month of range values in YYYYMM format
:param p_last_month: ending month of range values in YYYYMM format
:param p_id_especiality: optional identifier of speciality
:rtype: string
"""
def plot_first_blood_per_agenda(p_first_month, p_last_month, p_id_especiality=None):
try:
connection = connections['datawarehouse']
sql = 'SELECT f_nomAgenda as nomAgenda, sum(f_totalVisits) as Total, COUNT(*) as Patients, sum(f_totalVisits)/COUNT(*) as PerPatient FROM datawarehouse.dm_first_visit WHERE f_month between '+str(p_first_month)+' and '+str(p_last_month)
if p_id_especiality is not None:
sql = sql + ' AND f_idEspecialitat=' + str(p_id_especiality) + ' '
sql = sql + ' GROUP BY f_nomAgenda'
sql = sql + ' ORDER BY sum(f_totalVisits) DESC'
df = pd.read_sql(sql, connection)
trace_visits = go.Bar(x=df['nomAgenda'],
y=df['Total'],
name='Total visites al centre')
trace_per_patient = go.Bar(x=df['nomAgenda'],
y=df['PerPatient'],
name='Mitjana per pacient')
graph_title = 'Visites per captació en agenda (Del ' + u.yyyymmToMonthName(p_first_month) + ' al ' + u.yyyymmToMonthName(p_last_month) + ')'
if p_id_especiality is not None:
graph_title = q.get_Spec_Name(p_id_especiality) + ': ' + graph_title
data = [trace_visits, trace_per_patient]
layout = go.Layout(
title=graph_title,
titlefont=dict(family='Arial, sans-serif', size=24, color='green'),
showlegend=True,
xaxis=dict(
showticklabels=True,
tickangle=45,
tickfont=dict(family='Old Standard TT, serif',
size=12,
color='black'),
),
yaxis=dict(showticklabels=True,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'))
fig = go.Figure(data=data, layout=layout)
return py.offline.plot(fig, include_plotlyjs=False, output_type='div')
except Error as e:
print("Error while connecting to MySQL", e)
""" Generates a offline plotly plot with the graph 'Last visits per month'.
Return HTML div code that builds the graph. It is necessary to include 'plotly.js'
in your html file.
usage::
>>> import plots
>>> plot = plot_last_visits_per_month()
>>> render (request, 'someViewWithPlot.html', {'plot_name': plot})
:param: None
:rtype: string
"""
def plot_last_visits_per_month():
try:
connection = connections['datawarehouse']
# we don't want to show future months, so we filter till past month
now = datetime.datetime.now() + dateutil.relativedelta.relativedelta(months=-1)
last_month = str(now.year) + str(now.month+1).zfill(2)
sql = 'SELECT f_lastmonth as Mes, CONCAT(LEFT(f_lastmonth,4), "-", f_lastmonthname) as MesNom, count(*) as Total FROM datawarehouse.dm_first_visit WHERE f_lastmonth < '+ last_month + ' GROUP BY f_lastmonth, CONCAT(LEFT(f_lastmonth,4), "-", f_lastmonthname) ORDER BY f_lastmonth ASC'
df = pd.read_sql(sql, connection)
trace_visits = go.Scatter(x=df['MesNom'],
y=df['Total'],
mode='lines+markers',
name='Ultimes visites per mes')
linear_x = np.r_[0:len(df)]
linear_x = np.arange(0, len(df)).reshape(-1, 1)
poly_reg = PolynomialFeatures(degree=4)
X_poly = poly_reg.fit_transform(linear_x)
pol_reg = LinearRegression()
pol_reg.fit(X_poly, df['Total'])
predicted_y = pol_reg.predict(poly_reg.fit_transform(linear_x))
trace_regression_visits = go.Scatter(x=df['MesNom'],
y=predicted_y,
mode='lines',
name='Tendencia')
trace_omi_annotation = go.Scatter(x=["2017-Dec", "2017-Dec"],
y=[0, df['Total'].max()],
mode='lines',
name='Inici odontologia a OMI360',
line=dict(dash='dot'))
data = [trace_visits, trace_regression_visits, trace_omi_annotation]
layout = go.Layout(
title='Evolució del número de últimes visites per mes',
titlefont=dict(family='Arial, sans-serif',
size=24,
color='green'),
xaxis=dict(showticklabels=True,
tickangle=45,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'),
yaxis=dict(titlefont=dict(family='Arial, sans-serif',
size=18,
color='lightgrey'),
showticklabels=True,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'))
fig = go.Figure(data=data, layout=layout)
return py.offline.plot(fig, include_plotlyjs=False, output_type='div')
except Error as e:
print("Error while connecting to MySQL", e)
""" Generates a offline plotly plot with the graph 'Visits per patient'.
Return HTML div code that builds the graph. It is necessary to include 'plotly.js'
in your html file.
usage::
>>> import plots
>>> plot = plot_visits_per_patient()
>>> render (request, 'someViewWithPlot.html', {'plot_name': plot})
:param: None
:rtype: string
"""
def plot_visits_per_patient():
try:
connection = connections['datawarehouse']
df = pd.read_sql('SELECT f_numHistoria as Patient, f_totalVisits as Total FROM datawarehouse.dm_first_visit', connection)
#max outliners to 50 for better visualization
df[df['Total'] > 50] = 50
trace_visits = go.Histogram(x=df['Total'],
name='Visites per pacient')
data = [trace_visits]
layout = go.Layout(
title='Distribució del número de visites per pacient',
titlefont=dict(family='Arial, sans-serif',
size=24,
color='green'),
xaxis=dict(showticklabels=True,
tickangle=45,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none',
title='Visites'),
yaxis=dict(showticklabels=True,
title='Número de pacients',
tickformat='.0f',
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'))
fig = go.Figure(data=data, layout=layout)
return py.offline.plot(fig, include_plotlyjs=False, output_type='div')
except Error as e:
print("Error while connecting to MySQL", e)
""" Generates a offline plotly plot with the graph 'Distribution casual vs fidelizied'.
Return HTML div code that builds the graph. It is necessary to include 'plotly.js'
in your html file.
usage::
>>> import plots
>>> plot = plot_distribution_casual_vs_fidelizied()
>>> render (request, 'someViewWithPlot.html', {'plot_name': plot})
:param: None
:rtype: string
"""
def plot_distribution_casual_vs_fidelizied():
try:
connection = connections['datawarehouse']
sql = 'SELECT f_month as mes, CONCAT(LEFT(f_month,4), "-", f_monthname) as MesNom, f_casuals as casuals, f_fidelitzats as fidelitzats, f_visits_casuals as visitsCasuals, f_visits_fidelitzats as visitsFidelitzats FROM datawarehouse.dm2_stats_per_month ORDER BY f_month ASC'
df = pd.read_sql(sql, connection)
trace_casual = go.Scatter(x=df['MesNom'],
y=df['casuals'],
mode='lines',
name='Pacients casuals',
stackgroup='one',
groupnorm='percent')
trace_fidelizied = go.Scatter(x=df['MesNom'],
y=df['fidelitzats'],
mode='lines',
name='Pacients fidelitzats',
stackgroup='one')
data = [trace_casual, trace_fidelizied]
layout = go.Layout(
title='Distribució pacients casuals vs fidelitzats',
titlefont=dict(family='Arial, sans-serif', size=24, color='green'),
showlegend=True,
xaxis=dict(
showticklabels=True,
tickangle=45,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
type='category',
),
yaxis=dict(
type='linear',
range=[1, 100],
dtick=20,
ticksuffix='%'))
fig = go.Figure(data=data, layout=layout)
plotdiv_patients = py.offline.plot(fig, include_plotlyjs=False, output_type='div')
trace_visits_casual = go.Scatter(x=df['MesNom'],
y=df['visitsCasuals'],
mode='lines',
name='Visites casuals',
stackgroup='one',
groupnorm='percent')
trace_visits_fidelizied = go.Scatter(x=df['MesNom'],
y=df['visitsFidelitzats'],
mode='lines',
name='Visites fidelitzats',
stackgroup='one')
data = [trace_visits_casual, trace_visits_fidelizied]
layout = go.Layout(
title='Distribució visites casuals vs fidelitzats',
titlefont=dict(family='Arial, sans-serif', size=24, color='green'),
showlegend=True,
xaxis=dict(
showticklabels=True,
tickangle=45,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
type='category',
),
yaxis=dict(
type='linear',
range=[1, 100],
dtick=20,
ticksuffix='%'))
fig = go.Figure(data=data, layout=layout)
plotdiv_visits = py.offline.plot(fig, include_plotlyjs=False, output_type='div')
return plotdiv_patients, plotdiv_visits
except Error as e:
print("Error while connecting to MySQL", e)
""" Generates a offline plotly plot with the graph 'Distance to last visit'.
Return HTML div code that builds the graph. It is necessary to include 'plotly.js'
in your html file.
usage::
>>> import plots
>>> plot = plot_distance_to_lastmonth()
>>> render (request, 'someViewWithPlot.html', {'plot_name': plot})
:param: None
:rtype: string
"""
def plot_distance_to_lastmonth():
try:
connection = connections['datawarehouse']
df = pd.read_sql('SELECT f_numHistoria as Patient, PERIOD_DIFF(IF(f_lastmonth=f_month,EXTRACT(YEAR_MONTH FROM CURRENT_DATE()),f_lastmonth), f_month) as mesos FROM datawarehouse.dm_first_visit', connection)
df = df[df['mesos']>0]
trace_distance = go.Histogram(x=df['mesos'],
name='Mesos des de última visita')
data = [trace_distance]
layout = go.Layout(
title='Conteig dels mesos que fa que no ve cada pacient',
titlefont=dict(family='Arial, sans-serif',
size=24,
color='green'),
xaxis=dict(showticklabels=True,
tickangle=45,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none',
title='Mesos des de última visita'),
yaxis=dict(showticklabels=True,
title='Número de pacients',
tickformat='.0f',
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'))
fig = go.Figure(data=data, layout=layout)
return py.offline.plot(fig, include_plotlyjs=False, output_type='div')
except Error as e:
print("Error while connecting to MySQL", e)
""" Callcenter plots to analyse performance.
usage::
>>> import plots
>>> plots = plots_callcenter_period('2018/01/01', '2018/01/31')
>>> render (request, 'someViewWithPlot.html', {'plot_name': plot[1]})
:param p_date_ini: string representation of starting date in YYYY/MM/DD format
:param p_date_fin: string representation of ending date in YYYY/MM/DD format
:rtype: array of plots
"""
def plots_callcenter_period (p_date_ini, p_date_fin):
try:
connection = connections['datawarehouse']
# format date to use in a between condition: YYYYMMDD
date_ini = datetime.datetime.strptime(str(p_date_ini), "%d/%m/%Y").strftime("%Y/%m/%d")
date_fin = datetime.datetime.strptime(str(p_date_fin), "%d/%m/%Y").strftime("%Y/%m/%d")
sql = "SELECT f_hour, sum(f_total) as total, sum(f_answered) as answered, sum(f_not_answered) as not_answered, if(sum(f_answered)>0,sum(f_not_answered)/sum(f_answered),sum(f_not_answered)) as overcall_factor FROM dm3_callcenter_general WHERE f_day BETWEEN \'"+date_ini+"\' AND \'"+date_fin+"\' and f_dst_id='6000' and f_hour between '07' and '23' GROUP BY f_hour ORDER BY f_hour ASC"
df = pd.read_sql(sql, connection)
if df.empty:
return "No hi han dades en el periode triat.", None, None, None, None, None
trace_answered = go.Scatter(x=df['f_hour'],
y=df['answered'],
mode='lines',
name='Contestades',
stackgroup='one',
fillcolor='#81d386',
line = dict(
color = ('#81d386'),),
hovertemplate = '%{y:.2f}%',
groupnorm='percent')
trace_not_answered = go.Scatter(x=df['f_hour'],
y=df['not_answered'],
mode='lines',
name='No contestades',
hovertemplate = '%{y:.2f}%',
line = dict(
color = ('#f28282'),),
fillcolor= '#f28282',
stackgroup='one')
graph_title = 'Distribució contestades vs no contestades'
data = [trace_answered, trace_not_answered]
layout = go.Layout(
title=graph_title,
titlefont=dict(family='Arial, sans-serif', size=24, color='green'),
showlegend=True,
xaxis=dict(
showticklabels=True,
ticksuffix='h',
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
tickmode='linear',
),
yaxis=dict(type='linear',
range=[1, 100],
dtick=20,
ticksuffix='%')
)
fig = go.Figure(data=data, layout=layout)
plot_distrib = py.offline.plot(fig, include_plotlyjs=False, output_type='div')
trace_abs_lost_calls = go.Bar(x=df['f_hour'],
y=df['not_answered'],
name='total no agafades')
layout = go.Layout(
title='Quantitat trucades no agafades',
titlefont=dict(family='Arial, sans-serif', size=24, color='green'),
showlegend=True,
xaxis=dict(
showticklabels=True,
ticksuffix='h',
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
tickmode='linear',
),
)
data=[trace_abs_lost_calls]
fig = go.Figure(data=data, layout=layout)
plot_abs_values = py.offline.plot(fig, include_plotlyjs=False, output_type='div')
df = pd.read_sql("select f_week_day_order, f_week_day, f_hour, sum(f_not_answered) as not_answered, sum(f_total) as total from dm3_callcenter_general where f_day BETWEEN \'"+date_ini+"\' AND \'"+date_fin+"\' and f_dst_id='6000' and f_hour between '07' and '22' group by f_week_day_order, f_week_day, f_hour order by f_week_day_order ASC, f_hour ASC", connection)
days = df['f_week_day'].unique()
hours = sorted(df['f_hour'].unique())
values = []
for hour in hours:
line = []
for day in days:
v = df.loc[(df['f_week_day'] == day) & (df['f_hour'] == hour), 'not_answered'].values
if v.size >0:
line.append(v[0])
else:
line.append(0)
values.append(line)
trace_heatmap_no_answer = go.Heatmap(z=values, x=days, y=hours, colorscale='Reds')
data = [trace_heatmap_no_answer]
layout = go.Layout(
title='No contestades (valor absolut)',
titlefont=dict(family='Arial, sans-serif',
size=24,
color='green'),
yaxis=dict(
showticklabels=True,
ticksuffix='h',
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
tickmode='linear',
),
)
fig = go.Figure(data=data, layout=layout)
plot_heatmap_no_answer = py.offline.plot(fig, include_plotlyjs=False, output_type='div')
# total calls
total_values = []
for hour in hours:
line = []
for day in days:
v = df.loc[(df['f_week_day'] == day) & (df['f_hour'] == hour), 'total'].values
if v.size >0:
line.append(v[0])
else:
line.append(0)
total_values.append(line)
trace_heatmap_total = go.Heatmap(z=total_values, x=days, y=hours, colorscale='Blues', reversescale=True)
data = [trace_heatmap_total]
layout = go.Layout(
title='Rebudes (valor absolut)',
titlefont=dict(family='Arial, sans-serif',
size=24,
color='green'),
yaxis=dict(
showticklabels=True,
ticksuffix='h',
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
tickmode='linear',
),
)
fig = go.Figure(data=data, layout=layout)
plot_heatmap_total = py.offline.plot(fig, include_plotlyjs=False, output_type='div')
df = pd.read_sql("select f_day, f_hour, sum(f_not_answered) as not_answered, sum(f_total) as total from dm3_callcenter_general where f_day BETWEEN \'"+date_ini+"\' AND \'"+date_fin+"\' and f_dst_id='6000' and f_hour between '07' and '22' group by f_day, f_hour order by f_day ASC, f_hour ASC", connection)
days = df['f_day'].unique()
hours = sorted(df['f_hour'].unique())
values_day = []
values_total = []
for hour in hours:
line = []
line_total = []
for day in days:
v = df.loc[(df['f_day'] == day) & (df['f_hour'] == hour), 'not_answered'].values
v_total = df.loc[(df['f_day'] == day) & (df['f_hour'] == hour), 'total'].values
if v.size >0:
line.append(v[0])
else:
line.append(0)
if v_total.size >0:
line_total.append(v_total[0])
else:
line_total.append(0)
values_day.append(line)
values_total.append(line_total)
trace_heatmap_per_day_no_answer = go.Heatmap(z=values_day, x=days, y=hours, colorscale='Reds')
data = [trace_heatmap_per_day_no_answer]
layout = go.Layout(
title='No contestades per dia (valor absolut)',
titlefont=dict(family='Arial, sans-serif',
size=24,
color='green'),
yaxis=dict(
showticklabels=True,
ticksuffix='h',
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
tickmode='linear',
),
)
fig = go.Figure(data=data, layout=layout)
plot_heatmap_per_day_no_answer = py.offline.plot(fig, include_plotlyjs=False, output_type='div')
trace_heatmap_per_day_total = go.Heatmap(z=values_total, x=days, y=hours, colorscale='Blues', reversescale=True)
data = [trace_heatmap_per_day_total]
layout = go.Layout(
title='Rebudes per dia (valor absolut)',
titlefont=dict(family='Arial, sans-serif',
size=24,
color='green'),
yaxis=dict(
showticklabels=True,
ticksuffix='h',
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
tickmode='linear',
),
)
fig = go.Figure(data=data, layout=layout)
plot_heatmap_per_day_total = py.offline.plot(fig, include_plotlyjs=False, output_type='div')
return plot_distrib, plot_abs_values, plot_heatmap_no_answer, plot_heatmap_total, plot_heatmap_per_day_no_answer, plot_heatmap_per_day_total
except Error as e:
print("Error while connecting to MySQL", e)
""" Callcenter plots representing evolution
usage::
>>> import plots
>>> plots = plots_callcenter_evo()
>>> render (request, 'someViewWithPlot.html', {'plot_name': plot[1]})
:param: None
:rtype: array of plots
"""
def plots_callcenter_evo():
try:
connection = connections['datawarehouse']
current_month = datetime.date.today().strftime('%Y%m')
sql = 'SELECT f_month, sum(f_total) as total, sum(f_answered) as answered, sum(f_not_answered) as not_answered, sum(f_answered)/sum(f_total) as percent_answered, sum(f_not_answered)/sum(f_total) as percent_not_answered FROM datawarehouse.dm3_callcenter_general WHERE f_month < ' + current_month +' GROUP BY f_month ORDER BY f_month ASC'
df = pd.read_sql(sql, connection)
df['f_month_order'] = np.arange(len(df))
trace_total = go.Scatter(x=df['f_month_order'],
y=df['total'],
mode='lines',
name='Rebudes per mes',
line = dict(
color = ('blue'),
dash = 'solid'),)
trace_answered = go.Scatter(x=df['f_month_order'],
y=df['answered'],
mode='lines',
name='Contestades per mes',
line = dict(
color = ('green'),
dash = 'solid',),)
trace_not_answered = go.Scatter(x=df['f_month_order'],
y=df['not_answered'],
mode='lines',
name='No contestades per mes',
line = dict(
color = ('red'),
dash = 'solid',),)
linear_x = np.r_[0:len(df)]
linear_x = np.arange(0, len(df)).reshape(-1, 1)
poly_reg = PolynomialFeatures(degree=4)
X_poly = poly_reg.fit_transform(linear_x)
pol_reg = LinearRegression()
pol_reg.fit(X_poly, df['total'])
predicted_y_total = pol_reg.predict(poly_reg.fit_transform(linear_x))
pol_reg.fit(X_poly, df['answered'])
predicted_y_answered = pol_reg.predict(poly_reg.fit_transform(linear_x))
pol_reg.fit(X_poly, df['not_answered'])
predicted_y_not_answered = pol_reg.predict(poly_reg.fit_transform(linear_x))
trace_regression_total = go.Scatter(x=df['f_month_order'],
y=predicted_y_total,
mode='lines',
name='Tendencia trucades rebudes',
line = dict(
color = ('blue'),
dash = 'dot'),)
trace_regression_answered = go.Scatter(x=df['f_month_order'],
y=predicted_y_answered,
mode='lines',
name='Tendencia trucades contestades',
line = dict(
color = ('green'),
dash = 'dot'),)
trace_regression_not_answered = go.Scatter(x=df['f_month_order'],
y=predicted_y_not_answered,
mode='lines',
name='Tendencia trucades no contestades',
line = dict(
color = ('red'),
dash = 'dot'),)
data = [trace_total, trace_regression_total, trace_answered, trace_regression_answered, trace_not_answered, trace_regression_not_answered]
layout = go.Layout(
title='Evolució del número de trucades per mes',
titlefont=dict(family='Arial, sans-serif',
size=24,
color='green'),
xaxis=dict(showticklabels=True,
tickangle=60,
tickfont=dict(family='Old Standard TT, serif',
size=10,
color='black'),
showexponent='none',
tickmode = 'array',
tickvals = df['f_month_order'],
ticktext = df['f_month']),
yaxis=dict(titlefont=dict(family='Arial, sans-serif',
size=18,
color='lightgrey'),
showticklabels=True,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'))
fig = go.Figure(data=data, layout=layout)
plot_absolute = py.offline.plot(fig, include_plotlyjs=False, output_type='div')
trace_visits_casual = go.Scatter(x=df['f_month_order'],
y=df['percent_answered'],
mode='lines',
name='Contestades',
stackgroup='one',
groupnorm='percent')
trace_visits_fidelizied = go.Scatter(x=df['f_month_order'],
y=df['percent_not_answered'],
mode='lines',
name='No contestades',
stackgroup='one')
data = [trace_visits_casual, trace_visits_fidelizied]
layout = go.Layout(
title='Distribució contestades vs no contestades mes a mes',
titlefont=dict(family='Arial, sans-serif',
size=24,
color='green'),
xaxis=dict(showticklabels=True,
tickangle=60,
tickfont=dict(family='Old Standard TT, serif',
size=10,
color='black'),
showexponent='none',
tickmode = 'array',
tickvals = df['f_month_order'],
ticktext = df['f_month']),
yaxis=dict(titlefont=dict(family='Arial, sans-serif',
size=18,
color='lightgrey'),
showticklabels=True,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'))
fig = go.Figure(data=data, layout=layout)
plot_distrib_percent = py.offline.plot(fig, include_plotlyjs=False, output_type='div')
return plot_absolute, plot_distrib_percent, plot_absolute
except Error as e:
print("Error while connecting to MySQL", e)
def plots_ext_performance(p_date_ini, p_date_fin):
try:
connection = connections['datawarehouse']
# format date to use in a between condition: YYYYMMDD
date_ini = datetime.datetime.strptime(str(p_date_ini), "%d/%m/%Y").strftime("%Y/%m/%d")
date_fin = datetime.datetime.strptime(str(p_date_fin), "%d/%m/%Y").strftime("%Y/%m/%d")
sql = "SELECT f_extension as extension, sum(f_answered) as answered, sum(f_spoken_time) as spoken_time, sum(f_spoken_time)/sum(f_answered) as time_per_call FROM dm3_callcenter_per_extension WHERE f_day BETWEEN \'"+date_ini+"\' AND \'"+date_fin+"\' and f_extension IN (100, 101, 102, 104, 111, 112) GROUP BY f_extension"
df = pd.read_sql(sql, connection)
lines = []
line = {}
calls = 0
spoken_time = 0
time_per_call = 0
total = 0
for index, row in df.iterrows():
calls += row[1]
spoken_time += row[2]
time_per_call += row[3]
total += 1
line = {'extension': row[0], 'answered': f'{row[1]:10.0f}', 'spoken_time': f'{row[2]/3600:10.2f}', 'time_per_call': f'{row[3]/60:10.2f}'}
lines.append(line)
# add averages
if total > 0:
line = {'extension': 'promig', 'answered': f'{calls/total:10.0f}', 'spoken_time': f'{spoken_time/(3600*total):10.2f}', 'time_per_call': f'{time_per_call/(60*total):10.2f}'}
lines.append(line)
sql = "SELECT f_extension as extension, f_day, f_hour, sum(f_answered) as answered, sum(f_spoken_time) as spoken_time, sum(f_spoken_time)/sum(f_answered) as time_per_call FROM dm3_callcenter_per_extension WHERE f_day BETWEEN \'"+date_ini+"\' AND \'"+date_fin+"\' and f_extension IN (100, 101, 102, 104, 111, 112) GROUP BY f_extension, f_day, f_hour ORDER BY f_extension, f_day, f_hour"
df = pd.read_sql(sql, connection)
extensions = sorted(df['extension'].unique())
days = sorted(df['f_day'].unique())
hours = sorted(df['f_hour'].unique())
plots = []
for extension in extensions:
values_day = []
for hour in hours:
line = []
for day in days:
v = df.loc[(df['extension'] == extension) & (df['f_day'] == day) & (df['f_hour'] == hour), 'answered'].values
if v.size >0:
line.append(v[0])
else:
line.append(0)
values_day.append(line)
trace_heatmap_per_extension = go.Heatmap(z=values_day, x=days, y=hours, colorscale='Reds', name=extension)
graph_title = 'Trucades ateses per l\'extensió ' + extension
layout = go.Layout(
title=graph_title,
titlefont=dict(family='Arial, sans-serif',
size=24,
color='green'),
yaxis=dict(
showticklabels=True,
ticksuffix='h',
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
tickmode='linear',
),
)
heatmap_data = [trace_heatmap_per_extension]
fig = go.Figure(data=heatmap_data, layout=layout)
plots.append(py.offline.plot(fig, include_plotlyjs=False, output_type='div'))
return lines, plots
except Error as e:
print("Error while connecting to MySQL", e)
| [
"plotly"
] |
8fefca3afd7864bd1ae1df08b3c1e58f494d01b4 | Python | antoniosj/data-science-playground | /machine-learning-az/data_processing_template.py | UTF-8 | 1,923 | 3.609375 | 4 | [] | no_license | # data preprocessing
# importing libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# importing dataset
dataset = pd.read_csv('Data.csv')
# creates matrix of independents variables until last column minus 1
X = dataset.iloc[:, :-1].values
# creates matrix of dependent variable. The last column.
lastColumn = 3
Y = dataset.iloc[:, lastColumn].values
"""
# tirar a media para preencher valor faltando
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0)
# fit colunas 1 e 2 do index
imputer = imputer.fit(X[:, 1:3])
X[:, 1:3] = imputer.transform(X[:, 1:3])"""
"""
# Encoding categorical data
# Colunas que tem um mesmo padrão (ex: 10 linhas com 3 tipos de países vão ser divididos
# em 0, 1 e 2 e depois vão ter 3 valores (0, 0, 1) para o ml não achar que um valor é
# maior que o outro apenas por causa da categoria dele
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
# X = colunas independentes e X[:, ] = todas as linhas da coluna 0
labelEncoder_X = LabelEncoder()
X[:, 0] = labelEncoder_X.fit_transform(X[:, 0])
#OneHotEncoder vai transformar minhas categorias em uma tabela de 0, 0, 1
oneHotEncoder = OneHotEncoder(categorical_features = [0])
# lembrar de formatar os valores em .0f
X = oneHotEncoder.fit_transform(X).toarray()
# Y = coluna dependente
labelEncoder_Y = LabelEncoder()
# como essa coluna só são dois valores (0 e 1) não precisa do onehotencoder.
Y = labelEncoder_Y.fit_transform(Y)
"""
# Splitting the dataset into training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2, random_state = 0)
# Feature scalling
# Standard or Normalization
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
| [
"matplotlib"
] |
4b56ae0997fede4b989912d881f79888d454d73a | Python | qapquiz/TSP_With_GA | /main.py | UTF-8 | 4,965 | 3.296875 | 3 | [] | no_license | from Town import Town
from costMatrix import CostMatrix
from salesMan import SalesMan
import random
#import matplotlib.pyplot as plt
#initial variable for Genetic Algorithm
MAX_ITERATION = 1000
MAX_POPULATION = 20
PC = 0.8
PM = 0.05
#create list of object town
townList = list()
townData = open("town.txt", 'r')
while townData:
line = townData.readline().split()
if (line == []):
break;
townList.append(Town(line[0], line[1], line[2]))
#createCostMatrix
costMatrixDict = CostMatrix(townList).createCostMatrix()
#create population
populationList = list()
for i in range(20):
populationList.append(SalesMan())
populationList[i].randomFirstPath(townList)
populationList[i].calculateFitness(costMatrixDict, townList)
#start iteration
iteration = 1
while iteration <= MAX_ITERATION:
#roulette selection
#sort populationList
populationList = sorted(populationList, key=lambda population: population.fitness)
#end sort populationList
sumFitness = 0
for population in populationList:
sumFitness = sumFitness + population.getFitness()
probability = 0
sumProbabilities = 0
for population in populationList:
population.setProbability(sumProbabilities + ((float(population.getFitness()) / float(sumFitness))))
sumProbabilities += population.getProbability() - sumProbabilities
population.setProbability(1 - population.getProbability())
#print "fitness: " + str(population.getFitness())
#print "prob: " + str(population.getProbability())
#selection phase
populationList = populationList[::-1]
populationIndex = 0
populationIndexSelectionList = list()
#/2
while (len(populationIndexSelectionList) != MAX_POPULATION):
randNumber = random.uniform(0, 1)
for population in populationList:
if randNumber < population.getProbability():
populationIndexSelectionList.append(populationIndex)
break
#if populationIndex not in populationIndexSelectionList:
# populationIndexSelectionList.append(populationIndex)
# break
populationIndex = populationIndex + 1
populationIndex = 0
populationSelectionList = list()
#/2
for i in range(MAX_POPULATION):
populationSelectionList.append(populationList[populationIndexSelectionList[i]])
#print "SelectionList: " + str(populationIndexSelectionList)
#end selection phase
#crossover phase
crossoverCount = 0
while crossoverCount < (MAX_POPULATION):
parent1 = populationSelectionList[crossoverCount]
parent2 = populationSelectionList[crossoverCount+1]
#occur crossover
if random.uniform(0, 1) < PC:
child1 = SalesMan()
child2 = SalesMan()
#child1
child1.setPath(parent1.getPath()[:100])
while len(child1.getPath()) != len(townList):
parent2Path = parent2.getPath()
for town in parent2Path:
if town not in child1.getPath():
child1.getPath().append(town)
#child2
child2.setPath(parent2.getPath()[:100])
while len(child2.getPath()) != len(townList):
parent1Path = parent1.getPath()
for town in parent1Path:
if town not in child2.getPath():
child2.getPath().append(town)
populationList.append(child1)
populationList.append(child2)
#not occur crossover then copy parent to child
else:
child1 = SalesMan()
child2 = SalesMan()
child1.setPath(parent1.getPath())
child1.setFitness(parent1.getFitness())
child2.setPath(parent2.getPath())
child2.setFitness(parent2.getFitness())
populationList.append(child1)
populationList.append(child2)
crossoverCount = crossoverCount + 2
#print "========================================================================="
#print child1.getPath()
child1.calculateFitness(costMatrixDict, townList)
#print child1.getFitness()
#end crossover phase
#end roulette selection
#print "len of populationList: " + str(len(populationList))
calculateFitnessCount = 20
while calculateFitnessCount < len(populationList):
populationList[calculateFitnessCount].calculateFitness(costMatrixDict, townList)
calculateFitnessCount = calculateFitnessCount + 1
mutationIndex = 0
for population in populationList:
if mutationIndex >= 20:
pathIndex = 0
pathIndexList = list()
for path in population.getPath():
if random.uniform(0, 1) < PM:
pathIndexList.append(pathIndex)
if len(pathIndexList) == 2:
temp = population.getPath()[pathIndexList[0]]
population.getPath()[pathIndexList[0]] = population.getPath()[pathIndexList[1]]
population.getPath()[pathIndexList[1]] = temp
pathIndexList = list()
pathIndex = pathIndex + 1
mutationIndex = mutationIndex + 1
#sort populationList
populationList = sorted(populationList, key=lambda population: population.fitness)
#end sort populationList
#for population in populationList:
# print population.getFitness()
delIndex = 20
while delIndex < len(populationList):
del populationList[delIndex]
iteration = iteration + 1
#end iteration
print populationList[0].getPath()
print "Total distance: " + str(populationList[0].getFitness())
#return Answer | [
"matplotlib"
] |
e6075aefd576879dd36f907009922e0cfac9657a | Python | Tusharcoder18/Sea-Level-Predictor | /sea_level_predictor.py | UTF-8 | 1,236 | 3.390625 | 3 | [] | no_license | import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import linregress
def draw_plot():
# Read data from file
df = pd.read_csv('epa-sea-level.csv')
# Create scatter plot
plt.scatter(x='Year', y='CSIRO Adjusted Sea Level', data=df)
# Create first line of best fit
lin_result = linregress(x=df['Year'], y=df['CSIRO Adjusted Sea Level'])
slope = lin_result.slope
intercept = lin_result.intercept
year_extended = pd.Series([int(i) for i in range(1880, 2050)])
best_fit1 = slope * year_extended + intercept
plt.plot(year_extended, best_fit1, 'r')
# Create second line of best fit
recent = df[df['Year'] >= 2000]
lin_result = linregress(x=recent['Year'], y=recent['CSIRO Adjusted Sea Level'])
slope = lin_result.slope
intercept = lin_result.intercept
year_extended = pd.Series([int(i) for i in range(2000, 2050)])
best_fit2 = intercept + slope * year_extended
plt.plot(year_extended, best_fit2, 'g')
# Add labels and title
plt.xlabel('Year')
plt.ylabel('Sea Level (inches)')
plt.title('Rise in Sea Level')
# Save plot and return data for testing (DO NOT MODIFY)
plt.savefig('sea_level_plot.png')
return plt.gca() | [
"matplotlib"
] |
d20403de3a82124f0bc1b9c065774658c47e245a | Python | Karel-van-de-Plassche/petra-plot | /DSC.py | UTF-8 | 3,853 | 3.09375 | 3 | [
"MIT"
] | permissive | import matplotlib as mpl
from matplotlib import cycler
import matplotlib.pyplot as plt
plt.style.use('./paper.mplstyle') # Choose the settings file to use
import numpy as np
from IPython import embed
import pandas as pd
from itertools import chain
import os
def load_file(filename):
with open(filename, 'r', encoding = "ISO-8859-1") as file_:
lines = file_.readlines() # Read the complete file
dfs = []
while len(lines) > 0: # Keep reading until all chunks are read
name_ind = lines.index('Curve Name:\n') # Look for 'Curve Name:'
del lines[name_ind]
curve_name = lines[name_ind].strip()
print(curve_name)
start_ind = lines.index('Curve Values:\n') # Values start after 'Curve Values'
column_names = lines[start_ind+1].split() # The line after that are the column names
column_units = lines[start_ind+2].split() # And after that the units
results_ind = lines.index('Results:\n') # The values stop when we find 'Results:'
df = pd.DataFrame(np.loadtxt(lines[start_ind+3:results_ind]),
columns=column_names) # Now put it in a table
df.set_index('Tr', inplace=True) # The 'x-axis' is Tr
dfs.append(df['Value']) # And we only need the Value column
try:
end_ind = lines.index('Curve Name:\n') # Try to find the next chunk
except ValueError:
end_ind = len(lines) # If we can't find it, we're done!
del lines[:end_ind]
#results = pd.concat(dfs, axis=1) # Now, merge all chuncks together
heating = pd.concat(dfs[1::2], axis=1) # Merge all heating chunks
cooling = pd.concat(dfs[0::2], axis=1) # And all cooling chunks
for set in chain([heating, cooling]): # For both heating and cooling
set.columns = reversed(range(1, len(set.columns) + 1)) # Number them N..1
set.index.name = 'Temperature [$\degree$C]' # And rename the x-axis
heating.columns = ['Heating ' + str(col) for col in heating.columns] # Now prepend Heating to the column names
cooling.columns = ['Cooling ' + str(col) for col in cooling.columns] # And Cooling
#labels = []
#for ii in range(1, len(results.columns) // 2 + 1):
# labels.append('Heating ' + str(ii))
# labels.append('Cooling ' + str(ii))
#labels = list(reversed(labels))
#results.columns = labels
return heating, cooling
def plot_heating_cooling(heating, cooling, shift=0.2, base_shift=0.0):
fig = plt.figure()
ax = fig.add_subplot(111)
for ii in range(1, len(cooling.columns) + 1):
cooling.iloc[:, ii:] = cooling.iloc[:, ii:] + shift # Shift all curves, the 2nd one the most
cooling += base_shift # And shift all columns a set amount
for ii in range(1, len(heating.columns) + 1):
heating.iloc[:, ii:] = heating.iloc[:, ii:] - shift
heating -= base_shift
for ii in range(len(heating.columns)):
for set in chain([heating, cooling]): # Now plot Heating ii and Cooling ii in pairs
ax.plot(set.iloc[:, ii], label=set.iloc[:, ii].name)
ax.legend() # Plot the legend
#cooling.plot(ax=ax)
#heating.plot(ax=ax)
cmap = plt.get_cmap('tab20') # Choose the colors by name: https://matplotlib.org/examples/color/colormaps_reference.html
plt.rc('axes', prop_cycle=(cycler('color', cmap.colors)))
root = 'DSC' # This is the main folder to look for files
for filename in os.listdir(root): # For every folder in the root folder
if filename.endswith('.txt'): # If it ends with .txt
path_to_file = os.path.join(root, filename)
heating, cooling = load_file(path_to_file) # Read the file and put it in a table
plot_heating_cooling(heating, cooling, shift=0.2, base_shift=0.0) # And plot the curves
plt.show()
| [
"matplotlib"
] |
602036d1f09959ff9dbdad8a43cf2c26d17446d1 | Python | marcinu456/Modelowanie-Komputerowe | /Nowy folder (2)/Nowy folder/WykresyPython/zad3.py | UTF-8 | 1,258 | 3.390625 | 3 | [] | no_license | import sys
import numpy as np
from numpy.lib import median
import matplotlib.pyplot as plt
def gauss(data):
mean = sum([ k*v for k, v in data.items()])/sum(data.values())#średnia
mse = sum([v*(k - mean)**2 for k, v in data.items()])/sum(data.values())#wariacia
rmse = np.sqrt(mse)#odchylenie
mx = max([ v for k, v in data.items()])
med = np.median([ k*v for k, v in data.items()])
ret = [ (mx * np.exp( -(((x - med))**2)/(2*mse) )) for x in data.keys()]#rozklad normalny
print("dzieci najprawdopodbniej są w odlgełośći ",3*rmse, "kroków")
return ret
def main():
x = np.loadtxt('spacery/odlegloscPo1000.txt', unpack=True)
valCount = dict()
for pos in x:
if pos in valCount:
valCount[pos] += 10
else:
valCount[pos] = 0
valCount = {k: v for k, v in sorted(valCount.items(), key=lambda item: item[0])}
plt.plot(valCount.keys(), gauss(valCount))
plt.xlabel("X")
plt.ylabel("dzieci")
if __name__ == "__main__":
main()
plt.legend()
plt.show()
# Bardziej prawdopodobne jest to, że dziecko będzię bliżej odległości 1
# Odległośc 3sigma można uznać, za wartość graniczną gdzię znajduję się
# 99.7% procent wszystkich dzieci
| [
"matplotlib"
] |
69dc68b6961443954bc6396ec43c203db782a761 | Python | edfong/npl | /experiments/LogReg_ARD/run_NPL_logreg.py | UTF-8 | 2,971 | 2.71875 | 3 | [
"BSD-3-Clause"
] | permissive | """
main script for running NPL
"""
import numpy as np
import scipy as sp
import pandas as pd
import matplotlib.pyplot as plt
import copy
import time
from npl import bootstrap_logreg as bbl
import pickle
def load_data(dataset,seed):
#load polish
if dataset == 'Polish':
year = 3
with open('./data/pc_train_y{}_seed{}'.format(year,seed), 'rb') as handle:
pc_train = pickle.load(handle)
#Move into vectors
y = pd.to_numeric(pc_train['y'].values[:,0])
x = pc_train['x'].values
D_data = pc_train['D']
N_data = pc_train['N']
#prior and loss settings from paper
alph_conc = 0 #prior strength
gamma = 1/N_data #loss scaling relative to log-likelihood
#load adult
if dataset == 'Adult':
with open('./data/ad_train_seed{}'.format(seed), 'rb') as handle:
ad_train = pickle.load(handle)
#Move into vectors
y = np.uint8(ad_train['y'])[:,0]
x = ad_train['x'].values
D_data = ad_train['D']
N_data = ad_train['N']
#prior and loss settings from paper
alph_conc = 0
gamma = 1/N_data
#load arcene
if dataset == 'Arcene':
with open('./data/ar_train_seed{}'.format(seed), 'rb') as handle:
ar_train = pickle.load(handle)
N_data = ar_train['N']
D_data = ar_train['D']
y = np.int8(ar_train['y'].values.reshape(N_data,))
x = ar_train['x'].values
#prior and loss settings from paper
alph_conc = 1
gamma = 1/N_data
return y,x,alph_conc,gamma,N_data,D_data
def main(dataset, B_postsamples):
#same parameters between datasets
T_trunc = 100
a=1
b = 1 #rate of gamma hyperprior
for i in range(30):
seed = 100+i
np.random.seed(seed)
y,x,alph_conc,gamma,N_data,D_data = load_data(dataset,seed)
start= time.time()
#carry out posterior bootstrap
beta_bb, ll_b = bbl.bootstrap_logreg(B_postsamples,alph_conc,T_trunc,y,x,N_data,D_data,a,b,gamma)
end = time.time()
print ('Time elapsed = {}'.format(end - start))
#convert to dataframe and save
dict_bb = {'beta': beta_bb, 'll_b': ll_b, 'time': end-start}
par_bb = pd.Series(data = dict_bb)
#Polish
if dataset == 'Polish':
par_bb.to_pickle('./parameters/par_bb_logreg_c{}_a{}_b{}_gN_pol_B{}_seed{}'.format(alph_conc,a,b,B_postsamples,seed))
#Adult
if dataset == 'Adult':
par_bb.to_pickle('./parameters/par_bb_logreg_c{}_a{}_b{}_gN_ad_B{}_seed{}'.format(alph_conc,a,b,B_postsamples,seed))
#Arcene
if dataset == 'Arcene':
par_bb.to_pickle('./parameters/par_bb_logreg_c{}_a{}_b{}_gN_ar_B{}_seed{}'.format(alph_conc,a,b,B_postsamples,seed))
if __name__=='__main__':
main('Polish',2000)
main('Adult',2000)
main('Arcene',2000)
| [
"matplotlib"
] |
fcf7e4efb410c260f3b0ae77d795ab050cb568dc | Python | shadimohagheghi/Generative-Models-for-Multigroup-Connectivity-Structures | /Liaison_model.py | UTF-8 | 5,124 | 2.578125 | 3 | [] | no_license | import networkx as nx
from allfunctions import draw_degdist
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from almost_clique import almost_clique
from numpy import inf
import matplotlib.pyplot as plt
matplotlib.rc('xtick', labelsize=25)
matplotlib.rc('ytick', labelsize=25)
plt.close("all")
##########################################################
##########################################################
def Liaison_model():
clique_size = [3,4,5,6,7,8,9,10]; # clique sizes
clique_edges=np.asarray(clique_size)
clique_edges = np.divide(np.multiply(clique_edges,clique_edges+1),2)
ep = 0.1;
clique_num = np.zeros(len(clique_size)); # number of cliques of given size
for i in range(len(clique_size)):
clique_num[i] = int(2.5321*(50.0/(clique_size[i])**3));
clique_num = map(int,clique_num)
# comment this next line to generate a small sample graph easy to visualize
#clique_num = [0,7,3,1,0,0,0,0]
###########################################################
###########################################################
def li_num(size): # number of liaisons given clique size
maxli = 2; p = float(size)**2/200;
a = np.random.binomial(maxli, p, size=1) +1
return a
###########################################################
###########################################################
# almost cliques are generated
Gclique,li,li_cliquewise,clique_sizes,all_liaisons,clique_lead,nstart = almost_clique(clique_size,clique_num,li_num)
print(len(clique_lead)), 'cliques formed'
################################################
#### liaison model #############################
m = 2; ext_li = int(float(sum(clique_num))/m); # deciding number of external liaisons
Gli = nx.barabasi_albert_graph(ext_li,2); #only option for external liaison model
print "Barabasi Albert Graph = ", Gli.edges()
plt.figure(1)
nx.draw(Gli, pos=nx.circular_layout(Gli))
limodel_deglist = np.zeros(len(Gli));
for i in range(len(Gli)):
limodel_deglist[i] = len(Gli[i])
ord_limodel = sorted(range(len(limodel_deglist)),key=lambda x:limodel_deglist[x])
print "ord_limodel = ", ord_limodel
clique_ext_list = np.zeros(sum(clique_num))
for i in range(sum(clique_num)): # randomly assign cliques to external liaisons
clique_ext_list[i] = np.random.randint(ext_li);
print "clique_ext_list = ", clique_ext_list
cliquenodes = len(Gclique);
for i in range(len(Gli)):
for j in range(len(Gli)):
if j in Gli[i]:
Gclique.add_edge(cliquenodes+i,cliquenodes+j)
for i in range(ext_li):
dums = np.where(clique_ext_list==i);
for j in range(len(dums[0])):
for k in range(len(li_cliquewise[dums[0][j]])):
Gclique.add_edge(cliquenodes+i,li_cliquewise[dums[0][j]][k])
degthis,a1,a2 = draw_degdist(Gclique,1,'b',0)
figcolor = 'b'
plt.figure(2)
plt.scatter((a1),(a2),c=figcolor,marker='o',s=400,alpha=0.5)
plt.plot((a1),(a2),linewidth=2,c=figcolor)
plt.xlabel('node degree',fontsize=30)
plt.ylabel('number of nodes',fontsize=30)
plt.axis([-2, 45, -19, 670])
clique_list = [];
for i in range(len(clique_size)):
dum = np.linspace(clique_size[i],clique_size[i], clique_num[i])
clique_list = np.hstack((clique_list,dum ))
colors = []; c = 0;
for i in range(len(clique_list)):
colors.extend(np.linspace(c,c,clique_list[i]))
c = c + 1
for i in range(ext_li):
colors.append(20); c = c + 1
#pos=nx.spring_layout(Gclique,iterations=200)
posx = []; posy = [];
for i in range(len(clique_list)):
centerx = np.cos(2*np.pi*i/len(clique_list))
centery = np.sin(2*np.pi*i/len(clique_list))
x1 = []; y1 = [];
for j in range(int(clique_list[i])):
x1.append(centerx + 0.2*np.cos(2*np.pi*j/clique_list[i]))
y1.append(centery + 0.2*np.sin(2*np.pi*j/clique_list[i]))
posx.extend(x1); posy.extend(y1);
print ext_li
x1 = []; y1 = [];
print "ext_li=",ext_li
for j in range(ext_li):
x1.append(0.5*np.cos(2*np.pi*j/ext_li))
y1.append(0.5*np.sin(2*np.pi*j/ext_li))
posx.extend(x1); posy.extend(y1);
pos = np.transpose(np.vstack((posx,posy)))
plt.figure(3)
nx.draw(Gclique,pos,node_color=colors,node_size=800,cmap=plt.cm.Blues)
plt.show()
print 'diameter of liaison network is', nx.diameter(Gclique)
print 'avg clustering coeff is', nx.average_clustering(Gclique)
print 'avg shortest path length', nx.average_shortest_path_length(Gclique)
plt.show()
return Gclique
Gclique = Liaison_model()
| [
"matplotlib"
] |
6c8f7a3a3b183b25e1439c26e1f4756cea5a3f94 | Python | petercunning/notebook | /pcolor.py | UTF-8 | 1,441 | 2.875 | 3 | [
"GFDL-1.1-only",
"MIT"
] | permissive | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import numpy as np
from iris.cube import Cube
from iris.coords import DimCoord
# <codecell>
def create_cube():
lon1d = np.arange(5)
lat1d = np.arange(4)
data = np.random.random((len(lat1d),len(lon1d)))
cube = Cube(data)
lon = DimCoord(lon1d, standard_name='longitude',
units='degrees', circular=False)
lat = DimCoord(lat1d, standard_name='latitude',
units='degrees')
cube.add_dim_coord(lon, 1)
cube.add_dim_coord(lat, 0)
return cube
# <codecell>
cube = create_cube()
# <codecell>
x = cube.coord(axis='X')
x.guess_bounds()
x
# <codecell>
y = cube.coord(axis='Y')
y.guess_bounds()
y
# <codecell>
%matplotlib inline
import matplotlib.pyplot as plt
plt.pcolormesh(x.points, y.points, cube.data)
# <codecell>
import iris.quickplot as qplt
cs = qplt.pcolormesh(cube)
# <codecell>
import cartopy.crs as ccrs
from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter
fig, ax = plt.subplots(subplot_kw=dict(projection=ccrs.PlateCarree()))
cs = qplt.pcolormesh(cube)
ax.set_xticks(x.points, crs=ccrs.PlateCarree())
ax.set_yticks(y.points, crs=ccrs.PlateCarree())
lon_formatter = LongitudeFormatter(zero_direction_label=True)
lat_formatter = LatitudeFormatter()
ax.xaxis.set_major_formatter(lon_formatter)
ax.yaxis.set_major_formatter(lat_formatter)
# <codecell>
# <codecell>
| [
"matplotlib"
] |
7f1e595c478d60f368d15ac1a9695929608497b3 | Python | a2liu/mosquitoSim | /run3.py | UTF-8 | 869 | 3.71875 | 4 | [] | no_license | import time
import matplotlib
from sim import population
#This script simulates the decay of a set of populations that have varying attributes
def getPopAttributes(iteration):
#Determine population attributes based on iteration number
# Initial population size
popSize = 10000
# percentage of initial pop. infected
perInfect = .5
# % chance that offspring of an infected male is male
ratio = .5
# Growth of population per generation. 0 means no growth, 1 means double every year, etc.
growth = 6
return (popSize, perInfect, ratio, growth)
start = time.process_time()
# Amount of populations to simulate
popNum = 1000
# Max iteration count
maxIter = 10000
for x in range(1, popNum):
pop = population(*getPopAttributes(x))
print(x,pop.decay(maxIter))
stop = time.process_time()
print("Time Elapsed:", stop - start)
| [
"matplotlib"
] |
4d33c2137c8938cbaad4e1fc436f8e291d230790 | Python | mmilunovic/bin-genetic-algorithm | /bin-gen-algorithm.py | UTF-8 | 7,422 | 2.96875 | 3 | [] | no_license | # %matplotlib inline
import random
import numpy as np
import matplotlib.pyplot as plt
import math
from mpl_toolkits import mplot3d
pi = 3.1415
def levy_function(chromosome):
x = chromosome[0]
y = chromosome[1]
tmp1 = math.pow(math.sin(3*pi*x), 2)
tmp2 = math.pow((x - 1), 2) * (1 + math.pow(math.sin(3*pi*y), 2))
tmp3 = math.pow((y - 1), 2) * (1 + math.pow(math.sin(2*pi*y), 2))
return tmp1 + tmp2 + tmp3
def l_show(x, y):
tmp1 = math.pow(math.sin(3*pi*x), 2)
tmp2 = math.pow((x - 1), 2) * (1 + math.pow(math.sin(3*pi*y), 2))
tmp3 = math.pow((y - 1), 2) * (1 + math.pow(math.sin(2*pi*y), 2))
return tmp1 + tmp2 + tmp3
levy_vectorized = np.vectorize(l_show)
x = np.linspace(-13, 13, 30)
y = np.linspace(-13, 13, 30)
X, Y = np.meshgrid(x, y)
Z = levy_vectorized(X, Y)
ax = plt.axes(projection='3d')
ax.plot_surface(X, Y, Z, rstride=1, cstride=1,
cmap='plasma', edgecolor='none')
ax.set_title('Levijeva funkcija br.13');
ax.view_init(50, 35)
def bin_encode(chromosome, bin_val, min_val, precision):
ret = ""
for g in chromosome:
val = round((g - min_val)/bin_val)
ret += bin(val)[2:].rjust(precision,'0')
return ret
def bin_encode_chromosomes(chromosomes, precision, max_val, min_val):
bin_val = (max_val - min_val) / (2**precision-1)
bin_chromosomes = [ bin_encode(c, bin_val, min_val, precision) for c in chromosomes]
return bin_chromosomes
def bin_decode(chromosome, bin_val, min_val, precision):
ret = []
for idx in range(0, len(chromosome), precision):
g = int(chromosome[idx:idx + precision], 2)
ret.append(g * bin_val + min_val)
return ret
def bin_decode_chromosomes(chromosomes, precision, max_val, min_val):
bin_val = (max_val - min_val) / (2**precision-1)
bin_chromosomes = [ bin_decode(c, bin_val, min_val, precision) for c in chromosomes]
return bin_chromosomes
def two_point_crossover(pairs):
length = len(pairs[0])
children = []
for (a,b) in pairs:
r1 = random.randrange(0, length)
r2 = random.randrange(0, length)
if r1 < r2:
children.append(a[:r1] + b[r1:r2] + a[r2:])
children.append(b[:r1] + a[r1:r2] + b[r2:])
else:
children.append(a[:r2] + b[r2:r1] + a[r1:])
children.append(b[:r2] + a[r2:r1] + b[r1:])
return children
def inv_mutation(chromosomes, mutation_rate):
mutated_chromosomes = []
for chromosome in chromosomes:
if random.random() < mutation_rate:
r1 = random.randrange(0, len(chromosome) - 1)
r2 = random.randrange(0, len(chromosome) - 1)
if r1 < r2:
mutated_chromosomes.append(chromosome[:r1] + chromosome[r1:r2][::-1] + chromosome[r2:])
else:
mutated_chromosomes.append(chromosome[:r2] + chromosome[r2:r1][::-1] + chromosome[r1:])
else:
mutated_chromosomes.append(chromosome)
return mutated_chromosomes
def generate_inital_chromosomes(length, max, min, pop_size):
return [ [random.uniform(min,max) for j in range(length)] for i in range(pop_size)]
def population_stats(costs):
return costs[0], sum(costs)/len(costs)
def rank_chromosomes(cost, chromosomes):
costs = list(map(cost, chromosomes))
ranked = sorted( list(zip(chromosomes,costs)), key = lambda c:c[1])
return list(zip(*ranked))
def natural_selection(chromosomes, n_keep):
return chromosomes[:n_keep]
def pairing(parents):
pairs = []
i = 0
for i in range(0, len(parents), 2):
pairs.append([parents[i], parents[i+1]])
return pairs
def genetic(cost_func , extent, population_size, mutation_rate = 0.3, chromosome_length = 2, precision = 13, max_iter = 500):
min_val = extent[0]
max_val = extent[1]
avg_list = []
best_list = []
curr_best = 10000
same_best_count = 0
chromosomes = generate_inital_chromosomes(chromosome_length, max_val, min_val, population_size)
for iter in range(max_iter):
ranked, costs = rank_chromosomes(cost_func, chromosomes)
best, average = population_stats(costs)
parents = natural_selection(ranked, population_size)
parents = bin_encode_chromosomes(parents, precision, max_val, min_val)
pairs = pairing(parents)
children = two_point_crossover(pairs)
chromosomes = parents + children
chromosomes = inv_mutation(chromosomes, mutation_rate)
chromosomes = bin_decode_chromosomes(chromosomes, precision, max_val, min_val)
print("Generation: ",iter+1," Average: {:.3f}".format(average)," Curr best: {:.3f}".format(best),
"[X, Y] = {:.3f} {:.3f}".format(chromosomes[0][0],chromosomes[0][1]))
print("-------------------------")
avg_list.append(average)
if best < curr_best:
best_list.append(best)
curr_best = best
same_best_count = 0
else:
same_best_count += 1
best_list.append(best)
if(cost_func(chromosomes[0]) < 0.05):
avg_list = avg_list[:iter]
best_list = best_list[:iter]
all_avg_list.append(avg_list)
all_best_list.append(best_list)
generations_list.append(iter)
print("\nSolution found ! Chromosome content: [X, Y] = {:.3f} {:.3f}\n".format(chromosomes[0][0],chromosomes[0][1]))
return
if same_best_count > 20:
print("\nStopped due to convergance.Best chromosome [X, Y] = {:.3f} {:.3f}\n".format(chromosomes[0][0],chromosomes[0][1]))
avg_list = avg_list[:iter]
best_list = best_list[:iter]
all_avg_list.append(avg_list)
all_best_list.append(best_list)
generations_list.append(iter)
return
if iter == 499:
avg_list = avg_list[:iter]
best_list = best_list[:iter]
all_avg_list.append(avg_list)
all_best_list.append(best_list)
generations_list.append(iter)
print("\nStopped due to max number of iterations, solution not found. Best chromosome [X, Y] = {:.3f} {:.3f}\n".format(chromosomes[0][0],chromosomes[0][1]))
def display_stats(all_avg_list, all_best_list, generations_list):
c = 0
colors = ['red', 'green', 'blue', 'yellow', 'orange']
for average_list in all_avg_list:
x_axis = list(range(generations_list[c]))
y_axis = average_list
plt.plot(x_axis, y_axis, linewidth=3, color=colors[c], label=str(c + 1))
plt.title('Average cost function value', fontsize=19)
plt.xlabel('Generation', fontsize=10)
plt.ylabel('Cost function')
c += 1
plt.legend(loc='upper right')
plt.show()
c = 0
for best_list in all_best_list:
x_axis = list(range(generations_list[c]))
y_axis = best_list
plt.plot(x_axis, y_axis, color=colors[c], label=str(c + 1))
plt.title('Best cost function value', fontsize=19)
plt.xlabel('Generation')
plt.ylabel('Cost function')
c += 1
plt.legend(loc='upper right')
plt.show()
number_of_chromosomes = [20, 100, 150]
all_avg_list = []
generations_list = []
all_best_list = []
run_number = 5
for x in number_of_chromosomes:
print("==========================")
for k in range(0, run_number):
print("\n", k + 1, ": run of genetic algorithm with ", x ," chromosomes.\n")
genetic(levy_function, [10, -10], x)
display_stats(all_avg_list, all_best_list, generations_list)
all_best_list = []
all_avg_list = []
generations_list = []
| [
"matplotlib"
] |
ef36aa10ece1fcbff5c642451ddcb9595c80b44f | Python | judyliou/CS224W-Analysis-of-Networks | /hw0/hw0_2.py | UTF-8 | 1,224 | 2.65625 | 3 | [] | no_license | import snap
import numpy as np
import matplotlib.pyplot as plt
wiki = snap.LoadEdgeList(snap.PNGraph, "wiki-Vote.txt", 0, 1, '\t')
degree = []
cnt = []
CntV = snap.TIntPr64V()
snap.GetOutDegCnt(wiki, CntV)
for i in CntV:
if i.GetVal1() != 0 and i.GetVal2() != 0:
degree.append(i.GetVal1())
cnt.append(i.GetVal2())
degree = np.array(degree)
cnt = np.array(cnt)
fig = plt.figure()
plt.figure(figsize=(12,8))
ax = plt.gca()
ax.scatter(degree, cnt, c='red', alpha=0.5, edgecolors='none', s=80)
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlim([degree.min(), degree.max()])
ax.set_title('Out-degree Distribution', fontsize=30)
ax.set_xlabel('Degree (log)', fontsize=24)
ax.set_ylabel('Count (log)', fontsize=24)
plt.savefig('hw0_2.1.png')
degree = np.log10(degree)
cnt = np.log10(cnt)
a, b = np.polyfit(degree, cnt, 1)
print('a =', a)
print('b =', b)
# Make theoretical line to plot
x = np.array([degree.min(), degree.max()])
y = a * x + b
plt.figure(figsize=(12,8))
plt.plot(degree, cnt, 'ro')
plt.plot(x, y, 'b', linewidth=3)
plt.xlabel('Degree (log)', fontsize=20)
plt.ylabel('Count (log)', fontsize=20)
plt.title('Out-degree Distribution', fontsize=30)
plt.savefig('hw0_2.2.png')
| [
"matplotlib"
] |
176b6444afaf6905ea4e304eacac8a6577e970d9 | Python | hpqcp/Dryer-Project1 | /base/data_preProcess.py | UTF-8 | 2,343 | 2.671875 | 3 | [] | no_license | import numpy as np
import pandas as pd
from pandas import DataFrame
import matplotlib.pyplot as plt
from functools import reduce
'''
读取EXCEL
_path,路径,_sheetNum第几个Sheet , _colName选择列
Return : DataFrame
'''
def readExcel(_path,_sheetNum,_colNum=[]):
if len(_colNum) <= 0 :
dfData = pd.read_excel(_path, sheet_name=_sheetNum)
else:
dfData = pd.read_excel(_path,sheet_name=_sheetNum ,usecols=_colNum)
return dfData
'''
计算统计指标
_path,路径,_sheetNum第几个Sheet , _colName选择列
Return : DataFrame
'''
def computeIndex(_df):
count = _df.count()
max = _df.max()
min = _df.min()
mean = _df.mean()
std = _df.std()
dfRtn = DataFrame({'Count':count,'Max':max,'Min':min,'Mean':mean,'Std':std})
return dfRtn
'''判断是否有空值,True 有空值 , False 无空值
'''
def isContainMissValue(_df):
for i in _df.isnull().any():
if i :
return True
return False
'''
空值处理
'''
def FillMissValue(_df):
return 0
'''
'''
def compute_ChangePoint(_series,_mode="first"):
minList = computeIndex(_series).values[:,2]
if _mode == "first" :
minIndexList = [_series[_series[i] == minList[i]].index.values[0] for i in range(0, _series.shape[1], 1)][:]
else:
minIndexList = [_series[_series[i] == minList[i]].index.values[-1] for i in range(0, _series.shape[1], 1)][:]
# minIndexList = _series[_series[0] == minList[0]].index.values[-1]
return minIndexList
# return _series
# return [_series[_series[i]>minList[2]][i] for i in range(1, len(_series) - 1, 1)][:]
# print(minList[0])
# return _series[_series[0]>minList[0] and _series[0].index ][0]
'''
'''
def wave_peakTrough(_series):
minList = computeIndex(_series).values[:, 2]
min1 = [_series[_series[i] == minList[i]].index.values[1] for i in range(0, _series.shape[1], 1)][:]
min2 = [_series[_series[i] == minList[i]].index.values[-1] for i in range(0, _series.shape[1], 1)][:]
#从最小点往前
max1 = [_series[_series[i] ==max(_series[i])].index.values[-1] for i in range(0, _series.shape[1], 1)][:]
max2 = [_series[_series[i] == max(_series[i])].index.values[0] for i in range(0, _series.shape[1], 1)][:]
# _series[_series[i] == ]
# print(max0,max1)
return minIndex
| [
"matplotlib"
] |
34cf0ab5db0f312f1b57cc5462e535e0a9e87f69 | Python | SabyasachiNITD/DIP-LAB | /DAY2/4.py | UTF-8 | 366 | 2.609375 | 3 | [] | no_license | import cv2
import numpy as np
import matplotlib.pyplot as plt
img =cv2.imread("2nd.tif",0)
img1 = img - (img & 240)
img1 = cv2.equalizeHist(img1)
fig=plt.figure()
ax1= fig.add_subplot(1,2,1)
ax3=fig.add_subplot(1,2,2)
ax1.imshow(img,cmap="gray",interpolation=None)
ax3.imshow(img1,cmap="gray",interpolation=None)
ax1.axis("off")
ax3.axis("off")
plt.show()
| [
"matplotlib"
] |
71f6cc6134c49498124001208552b49107976581 | Python | EParisot/ft_linear_regression | /train.py | UTF-8 | 7,182 | 2.71875 | 3 | [] | no_license | import os
import random
import json
import time
import click
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import time
from threading import Thread
class Trainer(object):
def __init__(self, data_file, sep, plot, model_file, epochs, learning_rate):
self.data_file = data_file
self.sep = sep
self.plot = plot
self.model_file = model_file
self.epochs = epochs
self.learning_rate = learning_rate
self.learning_rate_hist = []
self.model = {"theta_0": 0.0,
"theta_1": 0.0,
"x_min": 0,
"x_max": 0,
"y_min": 0,
"y_max": 0}
self.x_data = []
self.y_data = []
self.labels = []
self.acc = []
self.loss = []
# Read data
self.read_data()
if len(self.x_data) != len(self.y_data) or len(self.x_data) == 0:
print("Error : no valid data found in %s" % self.data_file)
exit(0)
# Read model
if len(self.model_file):
self.read_model()
def read_data(self):
if os.path.exists(self.data_file):
with open(self.data_file) as f:
for line in f:
line = line.replace('\n', '')
line_data = line.split(self.sep)
if len(line_data) == 2 and all([value.isdigit() for value in line_data]):
self.x_data.append(int(line_data[0]))
self.y_data.append(int(line_data[1]))
elif len(line_data) == 2:
self.labels.append(line_data[0])
self.labels.append(line_data[1])
self.normalise()
def normalise(self):
x_min = min(self.x_data)
x_max = max(self.x_data)
y_min = min(self.y_data)
y_max = max(self.y_data)
self.model["x_min"] = x_min
self.model["x_max"] = x_max
self.model["y_min"] = y_min
self.model["y_max"] = y_max
for i, _ in enumerate(self.x_data):
self.x_data[i] -= x_min
self.x_data[i] /= (x_max - x_min)
self.y_data[i] -= y_min
self.y_data[i] /= (y_max - y_min)
def read_model(self):
if os.path.exists(self.model_file):
with open(self.model_file, "r") as f:
check = f.read(2)
f.seek(0)
if len(check) != 0 and check[0] != "\n" and check != "{}":
data = json.load(f)
self.model["theta_0"] = data["theta_0"]
self.model["theta_1"] = data["theta_1"]
self.model["x_min"] = data["x_min"]
self.model["x_max"] = data["x_max"]
self.model["y_min"] = data["y_min"]
self.model["y_max"] = data["y_max"]
def save_model(self):
if not os.path.exists(self.model_file):
mode = "w+"
else:
mode = "w"
with open(self.model_file, mode) as f:
json.dump(self.model, f)
def animate(self):
plt.clf()
x_data, y_data = [list(t) for t in zip(*sorted(zip(self.x_data, self.y_data)))]
plt.scatter(x_data, y_data)
if len(self.labels):
plt.xlabel(self.labels[0])
plt.ylabel(self.labels[1])
# result
x1 = min(x_data)
y1 = self.estimate(x1)
x2 = max(x_data)
y2 = self.estimate(x2)
plt.plot([x1, x2], [y1, y2], c="r")
plt.twinx().twiny()
# plot learning rate history
plt.plot(self.learning_rate_hist, label="Learning Rate")
plt.legend()
plt.draw()
plt.pause(1/self.epochs)
def train(self):
theta_0 = 0.0
theta_1 = 0.0
# read model
if self.model["theta_0"] != theta_0 or self.model["theta_1"] != theta_1:
theta_0 = self.model["theta_0"]
theta_1 = self.model["theta_1"]
# process train
self.train_loop()
# write model file
self.save_model()
# plot result
if self.plot:
plt.figure("Train history")
plt.plot(self.acc, label="acc")
plt.plot(self.loss, label="loss")
plt.legend()
plt.show(block=True)
def train_loop(self):
# shuffle datas
l = list(zip(self.x_data, self.y_data))
random.shuffle(l)
x_data, y_data = zip(*l)
# loop on epochs
for epoch in range(self.epochs):
print("Training... Epoch : %d" % (epoch + 1))
loss, acc = self.train_epoch(x_data, y_data)
self.acc.append(acc)
self.loss.append(loss)
self.learning_rate_hist.append(self.learning_rate)
# print
print("loss : %f ; acc : %f" % (round(loss, 2), round(acc, 2)))
if self.plot:
self.animate()
def train_epoch(self, X, Y):
n = float(len(X))
# cost
b_vect = []
a_vect = []
for i, _ in enumerate(X):
error_b = self.estimate(X[i]) - Y[i]
b_vect.append(error_b)
error_a = error_b * X[i]
a_vect.append(error_a)
loss_b_prime = sum(b_vect)
loss_a_prime = sum(a_vect)
# gradient descent
tmp_theta_0 = self.learning_rate * loss_b_prime / n
tmp_theta_1 = self.learning_rate * loss_a_prime / n
self.model["theta_0"] -= tmp_theta_0
self.model["theta_1"] -= tmp_theta_1
# metrics
new_loss_tab = []
acc_tab = []
for i, _ in enumerate(X):
error = self.estimate(X[i]) - Y[i]
error_sq = error ** 2
new_loss_tab.append(error_sq)
acc_tab.append(1)
new_loss = sum(new_loss_tab) / n
acc = float(sum(acc_tab) / n)
# adjust LR
if len(self.loss) > 0:
if new_loss >= self.loss[-1]:
self.model["theta_0"] += self.learning_rate * tmp_theta_0 / n
self.model["theta_1"] += self.learning_rate * tmp_theta_1 / n
self.learning_rate *= 0.5
else:
self.learning_rate *= 1.05
return new_loss, acc
def estimate(self, x):
y = self.model["theta_0"] + self.model["theta_1"] * x
return y
@click.command()
@click.argument("data_file", type=click.Path(exists=True))
@click.argument("model_file", default="model.json")
@click.option("-sep", "sep", default=",", help="csv separator")
@click.option("-p", "plot", is_flag=True, help="plot data")
@click.option("-e", "epochs", default=1, help="epochs to train")
@click.option("-l", "learning_rate", default=0.1, help="learning rate")
def main(data_file, sep, plot, model_file, epochs, learning_rate):
trainer = Trainer(data_file, sep, plot, model_file, epochs, learning_rate)
if trainer.plot:
plt.ion()
trainer.train()
if __name__ == "__main__":
main()
| [
"matplotlib"
] |
2b371258ebe17251c416b9ebb80255cf1e6774df | Python | Lyuyangdaisy/DS_package | /chinese/output_in_3d_plot/app_3d_graph.py | UTF-8 | 5,021 | 2.78125 | 3 | [
"MIT"
] | permissive | #coding = utf-8
# Author: Hu Baitao
#Function : app on browser drawing 3d graph
#Date : 2020-08-27
#zhuliang3000.xlsx为案例表
import xlrd
import plotly.graph_objs as go
import numpy as np
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_table
from dash.dependencies import Input, Output, State
import pandas as pd
#输入函数
def para_input():
print('请输入数据文件路径,.xlsx文件') #D:\homework\Research 2020 S\10 cycle old alg\zhuliang3000.xlsx
filename = input()
return filename
#读取表格,处理数据
def read_xlsx(filename):
# file_object = xlrd.open_workbook(filename)
# sheetnames = file_object.sheet_names()
# sheetwork = file_object.sheet_by_name(sheetnames[0])
# nrows = sheetwork.nrows
# ncols = sheetwork.ncols
# data = []
# data_title = []
#
# for i in range(ncols):
# data_title.append(sheetwork.cell_value(0,i))
# data.append(data_title)
#
# for j in range(ncols):
# new_row_data = []
# for k in range(1,nrows):
# new_row_data.append(sheetwork.cell_value(k,j))
# data.append(new_row_data)
data = pd.read_excel(filename)
return data
# 展示数据
def generate_table(dataframe, max_rows):
return html.Table([
html.Thead(
html.Tr([html.Th(col) for col in dataframe.columns])
),
html.Tbody([
html.Tr([
html.Td(dataframe.iloc[i][col]) for col in dataframe.columns
]) for i in range(min(len(dataframe), max_rows))
])
])
def generate_graph(dataframe, x_value, y_value, z_value):
trace = go.Scatter3d(
x=dataframe[x_value], y=dataframe[y_value], z=dataframe[z_value], mode='markers', marker=dict(
size=5,
color=dataframe[z_value], # set color to an array/list of desired values
colorscale='Viridis'
)
)
layout = go.Layout(title='主量元素分析',
scene=dict(
xaxis_title=x_value,
yaxis_title=y_value,
zaxis_title=z_value
),
height= 800,
width= 1000
)
fig = go.Figure(data=[trace], layout=layout)
return fig
def main():
filename = para_input()
df = read_xlsx(filename)
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div(children=[
html.H1(
children='3D Graph',
style = {
'textAlign': 'center',
# 'color': colors['text']
}
),
html.H4(children='主量3000'),
dcc.Dropdown(
id='num_row',
options=[{'label': 'show first 10 rows', 'value': 10},
{'label': 'show first 25 rows', 'value': 25},
{'label': 'show first 50 rows', 'value': 50},
{'label': 'show first 100 rows', 'value': 100}],
value=10
),
dash_table.DataTable(
id='table',
columns=[{"name": i, "id": i} for i in df.columns],
data=df.to_dict('records'),
page_size =10,
),
html.Label('选择三个主元素'),
dcc.Checklist(
id='box-section',
options=[
{'label': 'TRUE VALUE', 'value': 'TRUE VALUE'},
{'label': 'SIO2(WT%)', 'value': 'SIO2(WT%)'},
{'label': 'TIO2(WT%)', 'value': 'TIO2(WT%)'},
{'label': 'AL2O3(WT%)', 'value': 'AL2O3(WT%)'},
{'label': 'CR2O3(WT%)', 'value': 'CR2O3(WT%)'},
{'label': 'FEOT(WT%)', 'value': 'FEOT(WT%)'},
{'label': 'CAO(WT%)', 'value': 'CAO(WT%)'},
{'label': 'MGO(WT%)', 'value': 'MGO(WT%)'},
{'label': 'MNO(WT%)', 'value': 'MNO(WT%)'},
{'label': 'K2O(WT%)', 'value': 'K2O(WT%)'},
{'label': 'NA2O(WT%)', 'value': 'NA2O(WT%)'}
],
value=['TRUE VALUE', 'SIO2(WT%)','TIO2(WT%)']
),
html.Button(id='submit-button-state', n_clicks=0,children='Submit'),
dcc.Graph(
id='graph with main element',
figure= generate_graph(df,'TRUE VALUE','SIO2(WT%)','TIO2(WT%)')
)
])
@app.callback(
Output('table','page_size'),
[Input('num_row', 'value')])
def update_row_num(row_num):
return row_num
@app.callback(
Output('graph with main element', 'figure'),
[Input('submit-button-state', 'n_clicks')],
[State('box-section', 'value')])
def update_figure(n_clicks, box_value):
fig = generate_graph(df, box_value[0], box_value[1], box_value[2])
return fig
app.run_server(debug=True)
if __name__=='__main__':
main() | [
"plotly"
] |
cdf95c826bd71984c2e88dd1a9974efb53573e7e | Python | nihaomiao/PRICAI18_MVF-CasCNN | /MVFCasCNN/SaveHeatmapToFig.py | UTF-8 | 1,743 | 2.796875 | 3 | [
"MIT"
] | permissive | # Transforming heatmap Matrix to Figure and saving them
# Author: Haomiao Ni
import os
import matplotlib.pyplot as plt
from scipy.sparse import load_npz, lil_matrix
from scipy.signal import medfilt2d
import numpy as np
def run(MatPath, FigPath, heatthre, medflag):
dpi = 1000.0
FileList = os.listdir(MatPath)
FileList.sort()
plt.ioff()
fig = plt.figure(frameon=False)
for FileName in FileList:
print FileName
if os.path.splitext(FileName)[1] == '.npz':
file = os.path.join(MatPath, FileName)
heatmap = load_npz(file)
heatmap = lil_matrix(heatmap)
heatmap = np.array(heatmap.todense())
if heatthre:
# threshold 0.5
heatmap[np.logical_and(heatmap<0.5, heatmap>0)] = 0.1
if medflag:
# post processing
heatmap = medfilt2d(heatmap, (3, 3))
heatmap[0, 0] = 1.0
fig.clf()
fig.set_size_inches(heatmap.shape[1]/dpi, heatmap.shape[0]/dpi)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
cm = plt.cm.get_cmap('jet')
ax.imshow(heatmap, cmap=cm, aspect='auto')
postfix = FileName.split('_')[-1]
FigName = FileName.replace(postfix,"FIG.jpg")
fig.savefig(os.path.join(FigPath, FigName), dpi=int(dpi))
if __name__ == "__main__":
heatthre = False # choose False to show those pixels whose predictions are less than 0.5
medflag = False # choose True to median filter heatmaps
MatPath = ''
FigPath = ""
if not os.path.exists(FigPath):
os.makedirs(FigPath)
run(MatPath, FigPath, heatthre, medflag)
| [
"matplotlib"
] |
53d6cd6a98dbe37ab48d0d21207d35e5f618d751 | Python | robertjankowski/social-media-influence-on-covid-pandemic | /scripts/visualization.py | UTF-8 | 7,338 | 2.6875 | 3 | [
"MIT"
] | permissive | import sys
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
sys.path.append("../")
from scripts.network import degree_node_size, degree_selected_nodes_size
from scripts.virtual_layer import *
DEFAULT_CONFIG = {
'font.size': 16,
'font.family': 'sans-serif',
'font.sans-serif': ['DejaVu Sans']
}
def load_matplotlib():
plt.rcParams.update(DEFAULT_CONFIG)
plt.rc('text', usetex=True)
plt.rc('figure', figsize=(8, 6))
def save_figure(filename: str):
"""
Save matplotlib figure in correct extension
:param filename: Name of output plot
"""
extension = filename.split('.')[-1]
if extension == "png":
plt.savefig(filename, bbox_inches='tight', dpi=300)
elif extension == "pdf" or extension == "svg":
plt.savefig(filename, bbox_inches='tight')
else:
print('Error. Cannot save figure, unsupported extension: [{}]'.format(extension))
def draw_network(g: nx.Graph, ax=None, pos=None, node_size_list=None, node_size_scale=10,
edge_alpha=0.1, node_border_color='black', node_border_width=0.5):
"""
Draw nx.Graph on matplotlib axis
:param g: nx.Graph
:param ax: matplotlib canvas
:param pos: position of nodes (e.g. from nx.spring_layout(g))
:param node_size_list: list of node sizes
:param node_size_scale: float
:param edge_alpha: float
:param node_border_color: float
:param node_border_width: float
"""
if pos is None:
pos = nx.spring_layout(g)
if node_size_list is None:
node_size_list = degree_node_size(g, node_size_scale)
nx.draw_networkx_edges(g, ax=ax, alpha=edge_alpha, pos=pos, connectionstyle='arc3, rad = 0.1')
nx.draw_networkx_nodes(g, node_size=node_size_list, ax=ax, pos=pos,
edgecolors=node_border_color, linewidths=node_border_width)
def draw_epidemic_layer(g: nx.Graph, ax=None, pos=None, node_size_scale=10, edge_alpha=0.1,
node_border_color='black', node_border_width=0.5):
if pos is None:
pos = nx.spring_layout(g)
susceptible_nodes = []
infected_nodes = []
quarantined_nodes = []
recovered_nodes = []
dead_nodes = []
for node in g.nodes:
node_status = g.nodes[node]['l1_status']
if node_status is None:
print('Node should have `l1_status` field. Exiting...')
return
if node_status == 'S':
susceptible_nodes.append(node)
elif node_status == 'I':
infected_nodes.append(node)
elif node_status == 'Q':
quarantined_nodes.append(node)
elif node_status == 'R':
recovered_nodes.append(node)
elif node_status == 'D':
dead_nodes.append(node)
susceptible_nodes_sizes = degree_selected_nodes_size(g, susceptible_nodes, node_size_scale)
infected_nodes_sizes = degree_selected_nodes_size(g, infected_nodes, node_size_scale)
quarantined_nodes_sizes = degree_selected_nodes_size(g, quarantined_nodes, node_size_scale)
recovered_nodes_sizes = degree_selected_nodes_size(g, recovered_nodes, node_size_scale)
dead_nodes_sizes = degree_selected_nodes_size(g, dead_nodes, node_size_scale)
nx.draw_networkx_edges(g, ax=ax, alpha=edge_alpha, pos=pos, connectionstyle='arc3,rad=0.1',
arrowstyle='<->')
# Susceptible nodes
nx.draw_networkx_nodes(g, nodelist=susceptible_nodes, node_size=susceptible_nodes_sizes,
node_color='orange', ax=ax, pos=pos, edgecolors=node_border_color,
linewidths=node_border_width, label='susceptible')
# Infected nodes
nx.draw_networkx_nodes(g, nodelist=infected_nodes, node_size=infected_nodes_sizes,
node_color='lightblue', ax=ax, pos=pos, edgecolors=node_border_color,
linewidths=node_border_width, label='infected')
# Quarantined nodes
nx.draw_networkx_nodes(g, nodelist=quarantined_nodes, node_size=quarantined_nodes_sizes,
node_color='brown', ax=ax, pos=pos, edgecolors=node_border_color,
linewidths=node_border_width, label='quarantined')
# Recovered nodes
nx.draw_networkx_nodes(g, nodelist=recovered_nodes, node_size=recovered_nodes_sizes,
node_color='green', ax=ax, pos=pos, edgecolors=node_border_color,
linewidths=node_border_width, label='recovered')
# Dead nodes
nx.draw_networkx_nodes(g, nodelist=dead_nodes, node_size=dead_nodes_sizes,
node_color='black', ax=ax, pos=pos, edgecolors=node_border_color,
linewidths=node_border_width, label='dead')
def draw_virtual_layer(g: nx.Graph, ax=None, pos=None, node_size_scale=10, edge_alpha=0.1,
node_border_color='black', node_border_width=0.5):
if pos is None:
pos = nx.spring_layout(g)
positive_nodes = []
negative_nodes = []
for node in g.nodes:
node_opinion = get_opinion(g, node)
if node_opinion == 1:
positive_nodes.append(node)
elif node_opinion == -1:
negative_nodes.append(node)
positive_node_sizes = degree_selected_nodes_size(g, positive_nodes, node_size_scale)
negative_nodes_sizes = degree_selected_nodes_size(g, negative_nodes, node_size_scale)
nx.draw_networkx_edges(g, ax=ax, alpha=edge_alpha, pos=pos, connectionstyle='arc3,rad=0.1',
arrowstyle='<->', edgelist=g.edges)
# Positive opinions
nx.draw_networkx_nodes(g, nodelist=positive_nodes, node_size=positive_node_sizes,
node_color='red', ax=ax, pos=pos, edgecolors=node_border_color,
linewidths=node_border_width, label='+1')
# Negative opinions
nx.draw_networkx_nodes(g, nodelist=negative_nodes, node_size=negative_nodes_sizes,
node_color='blue', ax=ax, pos=pos, edgecolors=node_border_color,
linewidths=node_border_width, label='-1')
def plot_heatmap(array, xtickslabels: list, ytickslabels: list, colorscale_label: str, title_label: str):
"""
Plot heatmap from 2d array with x and y ticks labels
:param array: 2d array
:param xtickslabels:
:param ytickstlabels:
:param colorscale_label:
:param title_label:
"""
xticks_labels = ['{:.2f}'.format(l) for l in xtickslabels]
yticks_labels = ['{:.2f}'.format(b) for b in ytickslabels]
sns.heatmap(array, annot=False, cmap="YlGnBu",
yticklabels=yticks_labels, xticklabels=xticks_labels,
vmin=0, vmax=1, cbar_kws={'label': colorscale_label})
plt.title(title_label)
def plot_imshow(df, xlabel, ylabel, cmap=mpl.cm.Reds):
plt.imshow(df, cmap=cmap)
xticks = [float(x) for x in df.columns]
plt.xticks(range(len(xticks)), xticks)
yticks = [int(x) for x in df.index]
plt.yticks(range(len(yticks)), yticks)
plt.colorbar()
plt.xlabel(xlabel, fontsize=20)
plt.ylabel(ylabel, fontsize=20)
| [
"matplotlib",
"seaborn"
] |
886b4d5566dfbeb3178536b193c41cf34a61c34d | Python | hectorgarbisu/TFRegresion-sine | /TFRegresion1.py | UTF-8 | 2,235 | 2.71875 | 3 | [] | no_license | import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import random
dataLenght = 100
step = 10
alpha = 0.3
nW_hidden = 10
# t = np.arange(0,20.0, 0.1)
# data = np.sin(t)
t = range(dataLenght)
data = [np.sin(2*np.pi*i/dataLenght)/2 for i in range(dataLenght)]
x = tf.placeholder("float", [None, step])
y_ = tf.placeholder("float", [None, 1])
# W = tf.Variable(np.float32(np.random.rand(step, 1))*0.1)
# b = tf.Variable(np.float32(np.random.rand(1))*0.1)
# y = tf.sigmoid(tf.matmul(x, W) + b)
W_hidden = tf.Variable(tf.truncated_normal([step, nW_hidden]))
b_hidden = tf.Variable(tf.truncated_normal([nW_hidden]))
W_output = tf.Variable(tf.truncated_normal([nW_hidden, 1]))
b_output = tf.Variable(tf.truncated_normal([1]))
y_hidden = tf.tanh(tf.matmul(x, W_hidden) + b_hidden)
# y = tf.sigmoid(tf.matmul(y_hidden, W_output) + b_output)
y = tf.tanh(tf.matmul(y_hidden, W_output) + b_output)
error_measure = tf.reduce_sum(tf.square(y_ - y))
# cross_entropy = -tf.reduce_sum(y_*tf.log(y))
train = tf.train.GradientDescentOptimizer(alpha).minimize(error_measure)
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
print "----------------------"
print " Start training... "
print "----------------------"
for epoch in range(1000):
for jj in range((len(data)-step)):
xs = np.atleast_2d([data[(i+jj)%len(data)] for i in range(step)])
ys = np.atleast_2d(data[(step+jj)%len(data)])
# print xs, ys
sess.run(train, feed_dict={x: xs, y_: ys})
print sess.run(error_measure, feed_dict={x: xs, y_: ys})
"""if epoch % 50 == 0:
print "Iteration #:", epoch, "Error: ", sess.run(error_measure, feed_dict={x: xs, y_: ys})
print sess.run(y, feed_dict={x: xs})
print ys
print "----------------------------------------------------------------------------------"
"""
print "----------------------"
print " Start testing... "
print "----------------------"
outs = data[:step]
for i in range(len(data)):
xs = np.atleast_2d(outs[i:])
#print xs
out = sess.run(y, feed_dict={x: xs})
outs.append(out[0][0])
plt.plot(t, data)
plt.plot(step-1, outs[step-1], 'ro')
plt.plot(t, outs[:len(data)])
plt.show()
| [
"matplotlib"
] |
5c540d0f01bf2aca126d822e09a90d99907c1d71 | Python | JNU-Room/ML | /ML_MH/study/minimizingCost.py | UTF-8 | 589 | 2.921875 | 3 | [] | no_license | import tensorflow as ts
X = [1.,2.,3.]
Y = [1.,2.,3.]
m = len(X)
W = ts.placeholder(ts.float32)
hypothesis = ts.mul(W,X)
cost = ts.reduce_sum(ts.pow(hypothesis-Y,2))/m
init = ts.initialize_all_variables()
sess = ts.Session()
sess.run(init)
W_val, cost_val = [], []
for i in range(-30, 51):
xPos = i*0.1
yPos = sess.run(cost, feed_dict={W: xPos})
print('{:3.1f}, {:3.1f}'.format(xPos, yPos))
W_val.append(xPos)
cost_val.append(yPos)
sess.close()
import matplotlib.pyplot as plt
plt.plot(W_val, cost_val, 'ro')
plt.ylabel('cost')
plt.xlabel('W')
plt.show() | [
"matplotlib"
] |
35947b84944284c29ab4309fa1556bc761266ee3 | Python | nitant200/Analysis_of_Faculty_Participation_Data | /apps/syllabus.py | UTF-8 | 1,811 | 3.21875 | 3 | [
"MIT"
] | permissive | import streamlit as st
import pandas as pd
import matplotlib.pyplot as plt
import datetime
def app():
st.title('Syllabus')
st.write('This page will show the graphs and tables based on the Faculty Particpation in Syllabus')
data = st.file_uploader("Upload your relevant excel file")
df = pd.read_csv(data)
u1 = df['NameofUniversity'].value_counts()
st.write('**Graph of University-Wise Count of Teacher attending Syllabus**')
st.bar_chart(u1)
df['Date'] = df['Date'].astype('datetime64[ns]')
df_1 = df['Date'].dt.year
# df_1
col = []
for i in df_1:
col.append(i)
col=list(set(col))
df1=pd.DataFrame(data=None,columns=['Count'])
for i in col:
mask = (df['Date'] > str(i)+'0615') & (df['Date'] <= str(i)+'1215')
mask1 = (df['Date'] > str(i)+'1215') & (df['Date'] <= str(i+1)+'0615')
test5=df.loc[mask]
test6=df.loc[mask1]
c1=test5['Date']
c2=test6['Date']
t1=c1.shape[0]
t2=c2.shape[0]
t1=pd.DataFrame([t1],columns=['Count'],index=['ODD sem '+str(i)])
df1=df1.append(pd.DataFrame(t1))
t2=pd.DataFrame([t2],columns=['Count'],index=['EVEN sem '+str(i)])
df1=df1.append(pd.DataFrame(t2))
st.write('**Graph of Semester-Wise Count of Teacher attending Syllabus**')
st.bar_chart(df1)
df2=pd.DataFrame(data=None,columns=['Count'])
for i in col:
mask = (df['Date'] > str(i)+'0615') & (df['Date'] <= str(i+1)+'0615')
test5=df.loc[mask]
c1=test5['Date']
t1=c1.shape[0]
t1=pd.DataFrame([t1],columns=['Count'],index=[str(i)+'-'+str(i+1)])
df2=df2.append(pd.DataFrame(t1))
st.write('**Graph of Year-Wise Count of Teacher attending Syllabus**')
st.bar_chart(df2)
| [
"matplotlib"
] |
4a474398d1c436995b0449b02c9b111728aa1fc1 | Python | yling01/analysis_template | /5clusterAnalysis/source/Cluster.py | UTF-8 | 16,102 | 3.140625 | 3 | [] | no_license | '''
Tim Ling
Last update: 2020.07.03
'''
import numpy as np
from scipy.spatial.distance import pdist
import math
import matplotlib.pyplot as plt
import colorsys
from scipy.spatial import distance
'''
Parameters:
projection: (np.array [n observations, m dimensions]) projection of data points
radius: (float) cutoff for two data points to be neighbors
min_sample_number: (int) the minimum number of data points that a cluster has to have
Returns:
n_cluster_: (np.array) the cluster information of all the data points
n_noise_: (np.array) the noise information of all the data points
Does:
Using sklearn package to do cluster analysis
Note:
This function is currently not working, it needs to be tuned further for
proper function.
'''
def cluster(projection, radius=0.5, min_sample_number=100):
db = DBSCAN(eps=radius, min_samples=min_sample_number).fit(projection)
core_samples_mask = np.zeros_like(db.labels_, dtype=bool)
core_samples_mask[db.core_sample_indices_] = True
labels = db.labels_
n_clusters_ = len(set(labels)) - (1 if -1 in labels else 0)
n_noise_ = list(labels).count(-1)
return n_clusters_, n_noise_
'''
Parameters:
num_colors: (int) the number of color to return
Returns:
colors: (np.array [n, 3]) the rgb values of colors
Does:
Randomly generate n color rgb's
Note:
This should be replaced with a more stable function where
colors should be as distinct as 'possible'.
'''
def get_colors(num_colors):
colors=[]
for i in np.arange(0., 360., 360. / num_colors):
hue = i/360.
lightness = (50 + np.random.rand() * 10)/100.
saturation = (90 + np.random.rand() * 10)/100.
colors.append(colorsys.hls_to_rgb(hue, lightness, saturation))
return colors
'''
Parameters:
i: (int) the ith index
j: (int) the jth index
n: (int) the number of data points
Returns:
index: (int) the index of the distance in the compact distance matrix
Does:
Finds the index of the distance between data i and j in the
compact distance matrix
'''
def square_to_condensed(i, j, n):
assert i != j, "no diagonal elements in condensed matrix"
if i < j:
i, j = j, i
return int(n*j - j*(j+1)/2 + i - 1 - j)
'''
Parameters:
distance_mtx: (np.array) the compact distance mtx of data points
density: (np.array) the density of all cells
distance_cutoff: (float) distance_cutoff to calculate rho
Returns:
rho: (np.array) the density of the data points
rho_order: (np.array) the array to sort rho
nearest_neighbor: (np.array) the nearest neighbor for all data points
delta: (np.array) the shortest distance to the point
with a higher rho value
Does:
Calculates rho, delta, rho_order and nearest_neighbor
Notes:
The point with the highest density has to be selected as the
cluster center because it does not have a nearest neighbor.
It is a known issue that the program will fail if this point is
not selected as the cluster center.
Reference:
Rodriguez, A., and A. Laio. “Clustering by Fast Search and Find of Density
Peaks.” Science, vol. 344, no. 6191, 2014, pp.1492–1496., doi:10.1126/science.1242072.
'''
def calculate_rho_delta(distance_mtx, density, distance_cutoff):
max_distance = np.amax(distance_mtx)
rho = np.copy(density)
num_datapoint = len(density)
delta = np.full(num_datapoint,max_distance)
nearest_neighbor = np.ones(num_datapoint, dtype=int) * -1
for i in range(num_datapoint-1):
for j in range(i + 1, num_datapoint):
index_distance_mtx = square_to_condensed(i, j, num_datapoint)
ij_distance = distance_mtx[index_distance_mtx]
exponent = ij_distance / distance_cutoff
adder = density[j] * math.exp(-exponent**2)
rho[i] = rho[i] + adder
rho[j] = rho[j] + adder
rho_order = np.flip(np.argsort(rho))
rho_sorted = rho[rho_order]
delta[rho_order[0]] = -1.0
for i in range(1, num_datapoint):
for j in range(0, i):
rho_orderI = rho_order[i]
rho_orderJ = rho_order[j]
index_distance_mtx = square_to_condensed(rho_orderI, rho_orderJ, num_datapoint)
ij_distance = distance_mtx[index_distance_mtx]
if ij_distance < delta[rho_orderI]:
delta[rho_orderI] = ij_distance
nearest_neighbor[rho_orderI] = rho_orderJ
nearest_neighbor[rho_order[0]] = 0
delta[rho_order[0]] = np.amax(delta)
assert(not np.any(nearest_neighbor == -1))
return rho, rho_order, nearest_neighbor, delta
'''
Parameters:
event: a click event
Returns:
None
Does:
Enables the customer selection of cluster center.
The cluster centers are labelled green.
'''
def onpick3(event):
ind = event.ind
current_color = col.get_facecolors()[ind]
if current_color[0][0]: #selecting a new point
col._facecolors[ind,:] = (0, 1, 0, 1) #plots the point green
else: #deselect a old point
col._facecolors[ind,:] = (1, 0, 0, 1) #plots the point green
fig.canvas.draw()
'''
Parameters:
density_clean: (np.array) the density mtx
distance_cutoff_percent: (float) the percent of data points to drop
delta_cutoff: (float) in automated cluster mode, the cutoff for delta
interactive: (bool) True to select cluster center in the interactive mode
Returns:
rho: (np.array) the density of the data points
delta: (np.array) the shortest distance to the point
with a higher rho value
cluster_center_index: (list) stores the cluster center index
distance_mtx_condensed: (np.array) compact distance list
distance_cutoff: (float) distance_cutoff to calculate rho
Does:
Implementation of the density peak based clustering algorithm
Reference:
Rodriguez, A., and A. Laio. “Clustering by Fast Search and Find of Density
Peaks.” Science, vol. 344, no. 6191, 2014, pp.1492–1496., doi:10.1126/science.1242072.
'''
def DB_cluster(density_clean, distance_cutoff_percent=0.02, delta_cutoff=0.5, interactive=False):
distance_mtx_condensed = pdist(density_clean[:,0:-1])
density = density_clean[:,-1]
cluster_center_index = []
num_datapoint = len(density)
cluster = np.full(num_datapoint, -1)
num_cluster = 0
distance_cutoff_index = math.ceil(distance_cutoff_percent * len(distance_mtx_condensed))
distance_cutoff = np.sort(distance_mtx_condensed)[distance_cutoff_index]
rho, rho_order, nearest_neighbor, delta = calculate_rho_delta(distance_mtx_condensed, density, distance_cutoff)
if interactive:
global fig, axis, col
fig, axis = plt.subplots(dpi=200)
mask = delta > delta_cutoff
color = np.array([1, 0, 0, 1] * num_datapoint).reshape(-1, 4) #original poitns: all red
for index, decider in enumerate(mask):
if decider:
color[index] = [0, 1, 0, 1] #color those above threshold gree
col = axis.scatter(rho, delta, c=color, marker='.', picker=True)
axis.set_title("Decision Graph", fontsize='xx-large')
axis.set_ylabel(r"$\delta$", fontsize='x-large')
axis.set_xlabel(r"$\rho$", fontsize='x-large')
fig.canvas.mpl_connect('pick_event', onpick3)
plt.show()
for index, point_color in enumerate(col.get_facecolors()):
point_color = point_color.flatten()
if not point_color[0]: #if green, meaning selected
num_cluster += 1
cluster[index] = num_cluster
cluster_center_index.append(index)
plt.close('all')
else:
for i in range(num_datapoint):
if delta[i] >= delta_cutoff:
num_cluster += 1
cluster[i] = num_cluster
cluster_center_index.append(i)
for i in range(num_datapoint):
index = rho_order[i]
if cluster[index] == -1:
cluster[index] = cluster[nearest_neighbor[index]]
assert(not np.any(cluster == -1))
return rho, delta, cluster, cluster_center_index, distance_mtx_condensed, distance_cutoff
'''
Parameters:
cluster: (np.array) the cluster assignment of the data points
distance_mtx_condensed: (np.array) compact distance list
rho: (np.array) density of the data points on the decision graph
distance_cutoff: (float) distance_cutoff to calculate rho
Returns:
halo: (np.array) the halo of the data points
Does:
Calculates the halo of the data points
Reference:
Rodriguez, A., and A. Laio. “Clustering by Fast Search and Find of Density
Peaks.” Science, vol. 344, no. 6191, 2014, pp.1492–1496., doi:10.1126/science.1242072.
'''
def calculate_halo(cluster, distance_mtx_condensed, distance_cutoff, rho):
num_cluster = len(np.unique(cluster))
num_datapoint = len(cluster)
halo = np.copy(cluster)
if num_cluster > 1:
bord_rho = np.zeros(num_cluster)
for i in range(num_datapoint - 1):
for j in range(i + 1, num_datapoint):
index_distance_mtx = square_to_condensed(i, j, num_datapoint)
if (cluster[i] != cluster[j]) and (distance_mtx_condensed[index_distance_mtx] < distance_cutoff):
rho_ave = 0.5 * (rho[i] + rho[j])
if rho_ave > bord_rho[cluster[i] - 1]:
bord_rho[cluster[i] - 1] = rho_ave
if rho_ave > bord_rho[cluster[j] - 1]:
bord_rho[cluster[j] - 1] = rho_ave
for i in range(num_datapoint):
if rho[i] < bord_rho[cluster[i] - 1]:
halo[i] = 0
for i in range(num_cluster):
nc = 0
nh = 0
for j in range(num_datapoint):
if cluster[j] == i:
nc += 1
if halo[j] == i:
nh += 1
return halo
'''
Parameters:
rho: (np.array) the density of the data points
delta: (np.array) the shortest distance to the point
with a higher rho value
cluster_center_index: (list) stores the cluster center index
file_name: (str) the file name of the decision graph
dir_name: (str) the directory name to store the file
Returns:
None
Does:
Draws the decision grpah
'''
def draw_clustered_decision_graph(rho, delta, cluster_center_index, file_name, dir_name):
file_name = dir_name + '/' + file_name
assert (len(rho) == len(delta))
fig = plt.figure(figsize = (10, 10), dpi = 300)
left, bot, right, top = (0.2, 0.2, 0.8, 0.8)
axis = fig.add_axes([left, bot, right, top])
num_datapoint = len(rho)
color = get_colors(len(cluster_center_index))
mask = np.isin(np.arange(num_datapoint), cluster_center_index)
axis.scatter(rho[~mask], delta[~mask], c="black", marker='.')
axis.scatter(rho[mask], delta[mask], c=color, marker='.')
axis.set_title("Clustered Decision Graph", fontsize='xx-large')
axis.set_ylabel(r"$\delta$", fontsize='x-large')
axis.set_xlabel(r"$\rho$", fontsize='x-large')
fig.savefig(file_name, bbox_inches='tight')
'''
Parameters:
cluster_assignment: (np.array) the cluster assignment for
all data points
Returns:
The population for all clusters identified
Does:
Calculates the population for all clusters
'''
def calculate_population(cluster_assignment):
frame_length = len(cluster_assignment)
population = []
for i in np.unique(cluster_assignment):
if i == 0:
continue
population.append(len(np.where(cluster_assignment == i)[0]) / frame_length * 100)
population.sort(reverse=True)
return population
'''
Parameters:
projection: (np.array) projection of the data points
cluster: (np.array) cluster assignment of the data points
density_cube: (np.array) the density matrix along with the axis
Returns:
projection_cluster_assignment: (np.array) the assignment of cluster of each projection
Does:
Puts the data points into the clusters
Reference:
Rodriguez, A., and A. Laio. “Clustering by Fast Search and Find of Density
Peaks.” Science, vol. 344, no. 6191, 2014, pp.1492–1496., doi:10.1126/science.1242072.
'''
def assign_projection_cluster(projection, cluster, density_cube):
assert len(cluster) == len(density_cube)
cube_matrix = density_cube[:,0:-1]
dimension = cube_matrix.shape[1]
projection_cluster_assignment = np.zeros(len(projection), dtype=int)
for projection_index, data in enumerate(projection):
in_range = True
min_dist_index = np.argmin(distance.cdist([data], cube_matrix, 'euclidean')[0])
for i in range(dimension):
measurement = np.unique(cube_matrix[:,i])
if round(abs(data[i] - cube_matrix[min_dist_index][i]), 16) >= round(0.5 * (measurement[1] - measurement[0]), 16):
in_range = False
break
if in_range:
projection_cluster_assignment[projection_index] = cluster[min_dist_index]
return projection_cluster_assignment
'''
Parameters:
n_clusters: (int) the number of clusters to get
dihedral: (np.array) the dihedral angles of the residues
cluster_assignment: (np.array) cluster assignment of all data points
Returns:
dihedral_clusters: (np.array) the original dihedral angles in the cluster
zero_cluster_dihedral: (np.array) the dihedral angles that are not in any cluter
Does:
Obtain the clusters on the original dihedral angles
'''
def get_top_clusters(n_clusters, dihedral, cluster_assignment):
zero_cluster_indices = np.argwhere(cluster_assignment==0)
zero_cluster_dihedral = dihedral[zero_cluster_indices]
cluster_assignment = np.delete(cluster_assignment, zero_cluster_indices)
dihedral = np.delete(dihedral, zero_cluster_indices, axis=0)
clusters, cluster_point_count = np.unique(cluster_assignment, return_counts=True)
assert n_clusters <= len(clusters)
assert len(dihedral) == len(cluster_assignment)
clusters_sorted = clusters[np.flip(np.argsort(cluster_point_count))]
dihedral_clusters = []
for i in range(n_clusters):
cluster = clusters_sorted[i]
point_indices = np.argwhere(cluster_assignment == cluster)
dihedral_cluster = dihedral[point_indices.flatten()]
dihedral_clusters.append(dihedral_cluster)
return dihedral_clusters, zero_cluster_dihedral
'''
Parameters:
density_clean: (np.array) the clean density mtx
projection: (np.array) projection of the data points
file_name: (str) the file name of the decision graph
dir_name: (str) the directory name to store the file
Returns:
projection_cluster_assignment: (np.array) the assignment of cluster of each projection
Does:
A wrapper that calls multiple functions to get the clustering results
'''
def get_cluster_assignment(density_clean, projection, file_name, interactive, dir_name):
rho, delta, cluster, cluster_center_index, distance_mtx_condensed, distance_cutoff = DB_cluster(density_clean, interactive=interactive)
halo = calculate_halo(cluster, distance_mtx_condensed, distance_cutoff, rho)
draw_clustered_decision_graph(rho, delta, cluster_center_index, file_name, dir_name)
return assign_projection_cluster(projection, cluster, density_clean), cluster
def write_cluster_ndx(fileName, cluster_assignment):
cluster, count = np.unique(cluster_assignment, return_counts=True)
cluster = cluster[1:]
count = count[1:]
cluster = cluster[np.argsort(count)]
with open(fileName, "wb+") as fo:
fo.write(b"[ state 0 ]\n")
cluster_to_write = np.where(cluster_assignment == 0)[0] + 1
np.savetxt(fo, cluster_to_write, fmt="%10d")
with open(fileName, "ab") as fo:
for i in range(len(cluster)):
fo.write(b"[ state %d ]\n" % (i + 1))
cluster_to_write = np.where(cluster_assignment == cluster[i])[0] + 1
np.savetxt(fo, cluster_to_write, fmt="%10d")
| [
"matplotlib"
] |
6b6f0dc4ad2b4bdfc4ad36969ee632da1258b142 | Python | yilunh98/DQN-OpenAI | /q3_dqn.py | UTF-8 | 8,383 | 2.9375 | 3 | [] | no_license | # EECS545 HW6: DQN.
import gym
import math
import random
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from collections import namedtuple
from itertools import count
from PIL import Image
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as T
random.seed(403)
np.random.seed(403)
torch.manual_seed(403)
env = gym.make('CartPole-v1')
env.seed(403)
env = env.unwrapped
Transition = namedtuple('Transition',
('state', 'action', 'next_state', 'reward'))
class ReplayMemory(object):
def __init__(self, capacity):
self.capacity = capacity
self.memory = []
self.position = 0
def push(self, *args):
"""Saves a transition."""
if len(self.memory) < self.capacity:
self.memory.append(None)
self.memory[self.position] = Transition(*args)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
return random.sample(self.memory, batch_size)
def __len__(self):
return len(self.memory)
class DQN(nn.Module):
def __init__(self):
super(DQN, self).__init__()
self.fc1 = nn.Linear(env.observation_space.shape[0], 50)
self.fc1.weight.data.normal_(0, 0.1)
self.fc2 = nn.Linear(50, env.action_space.n,)
self.fc2.weight.data.normal_(0, 0.1)
def forward(self, x):
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
out = F.softmax(x, dim=1)
return out
def select_action(state, policy_net, eps_end, eps_start, eps_decay, steps_done, device):
sample = random.random()
eps_threshold = eps_end + (eps_start - eps_end) * math.exp(-1. * steps_done / eps_decay)
########### TODO: Epsilon-greedy action selection ##############
### with probability eps_threshold, take random action ###
### with probability 1-eps_threshold, take the greedy action ###
################################################################
if sample > eps_threshold:
actval = policy_net(state)
action = torch.max(actval,1)[1].view(1,1)
else:
action = torch.tensor([[np.random.randint(0, env.action_space.n)]],device=device)
return action
def optimize(policy_net, target_net, optimizer, memory, batch_size, gamma, device):
"""Learning step of DQN."""
if len(memory) < batch_size:
return
# Converts batch-array of transitions to Transition of batch-arrays.
# (see https://stackoverflow.com/a/19343/3343043 for detailed explanation)
transitions = memory.sample(batch_size)
batch = Transition(*zip(*transitions))
# Compute a mask of non-final states and concatenate the batch elements
# (a final state would've been the one after which simulation ended)
non_final_mask = torch.tensor(tuple(map(lambda s: s is not None,
batch.next_state)), device=device, dtype=torch.bool)
non_final_next_states = torch.cat([s for s in batch.next_state
if s is not None])
state_batch = torch.cat(batch.state)
action_batch = torch.cat(batch.action)
reward_batch = torch.cat(batch.reward)
########### TODO: Compute the current Q(s_t, a_t) #########
### Q-value computed using the policy network ###
### (i.e., the current q-network) ###
### then we select the columns of actions taken. ###
### These are the actions which would've been taken ###
### for each batch state according to policy_net ###
###########################################################
state_action_values = policy_net(state_batch).gather(1, action_batch)
########### TODO: Compute the target Q(s_t, a_t) for non-final state s_t ######
### Q-value computed using the target network ###
### (i.e., the older q-network) using Bellman equation. ###
### Hint: Select the best q-value using max(1)[0]. ###
### Hint2: Use "non_final_mask" and "non_final_next_states" so that ###
### resulting target value is either target Q value or 0 (final state) ###
###############################################################################
next_state_values = torch.zeros(batch_size, device=device)
next_state_values[non_final_mask] = target_net(non_final_next_states).max(1)[0]
target_state_action_values = reward_batch + gamma*next_state_values
########### TODO: Compute loss using either l2 or smooth l1 #######
### Note: you can use pytorch loss functions (e.g. F)
###################################################################
loss = F.l1_loss(state_action_values, target_state_action_values)
# Update parameters
optimizer.zero_grad()
loss.backward()
for param in policy_net.parameters():
param.grad.data.clamp_(-1, 1)
optimizer.step()
def plot_durations(episode_durations, save_path):
plt.figure(2)
plt.clf()
durations_t = torch.tensor(episode_durations, dtype=torch.float)
plt.title('Training...')
plt.xlabel('Episode')
plt.ylabel('Duration')
plt.plot(durations_t.numpy())
# Plot 100-episode running averages
if len(durations_t) >= 100:
means = durations_t.unfold(0, 100, 1).mean(1).view(-1)
means = torch.cat((torch.zeros(99), means))
plt.plot(means.numpy())
plt.savefig(save_path)
plt.show()
plt.close()
def main():
# env = gym.make('CartPole-v1')
# env.seed(403) # Do not change the seed
# Env info
print("Action_space", env.action_space)
print("Observation_space", env.observation_space)
# Use GPU when available (optional)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# Hyper-parameters
BATCH_SIZE = 256
GAMMA = 0.99
EPS_START = 0.3
EPS_END = 0.01
EPS_DECAY = 10000
TARGET_UPDATE = 20 # The target network update frequency
num_episodes = 1000
# Build the network and the optimizer
policy_net = DQN().to(device)
target_net = DQN().to(device)
target_net.load_state_dict(policy_net.state_dict())
target_net.eval()
optimizer = optim.Adam(policy_net.parameters(), lr=0.002)
memory = ReplayMemory(10000)
steps_done = 0
episode_durations = []
for i_episode in range(num_episodes):
# Initialize the environment and state
state = env.reset()
state = torch.tensor([state], device=device, dtype=torch.float32)
env.render(mode='rgb_array')
for t in count():
# Select and perform an action
action = select_action(state, policy_net, eps_end=EPS_END,
eps_start=EPS_START, eps_decay=EPS_DECAY,
steps_done=steps_done, device=device)
next_state, reward, done, _ = env.step(action.item())
next_state = torch.tensor([next_state], device=device, dtype=torch.float32)
reward = torch.tensor([reward], device=device)
if done:
next_state=None
steps_done += 1
# env.render(mode='rgb_array')
########### TODO: Store the transition in memory ###########
memory.push(state, action, next_state, reward)
# ----------------------------------------------------------
# Move to the next state
state = next_state
# Perform one step of the optimization (on the target network)
optimize(policy_net=policy_net, target_net=target_net, optimizer=optimizer,
memory=memory, batch_size=BATCH_SIZE, gamma=GAMMA, device=device)
if done:
episode_durations.append(t + 1)
print('episode', i_episode, 'duration', episode_durations[-1])
break
# Update the target network, copying all weights and biases from the policy network
if i_episode % TARGET_UPDATE == 0:
target_net.load_state_dict(policy_net.state_dict())
print('Complete')
env.close()
plot_durations(episode_durations, 'dqn_reward.png')
if __name__ == '__main__':
main()
| [
"matplotlib"
] |
4dc2ffaa244621d325edfb594846d160c91c2a9f | Python | Bluestone47/multi-armed-bandits | /offline_evaluate.py | UTF-8 | 3,028 | 3.015625 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from epsilon_greedy import EpsGreedy
from ucb import UCB
from beta_thompson import BetaThompson
from linucb import LinUCB
from lin_thompson import LinThompson
def offlineEvaluate(mab, arms, rewards, contexts, nrounds=None):
"""
Offline evaluation of a multi-armed bandit
Arguments
=========
mab : instance of MAB
arms : 1D int array, shape (nevents,)
integer arm id for each event
rewards : 1D float array, shape (nevents,)
reward received for each event
contexts : 2D float array, shape (nevents, mab.narms*nfeatures)
contexts presented to the arms (stacked horizontally)
for each event.
nrounds : int, optional
number of matching events to evaluate `mab` on.
Returns
=======
out : 1D float array
rewards for the matching events
"""
history = [] # history of arms
out = [] # history of payoffs
count = 0 # count of events
for t in range(nrounds):
while True:
arm = mab.play(len(history) + 1, contexts[count])
if count >= len(arms):
return out # reach the end of the logged dataset
if count < len(arms) and arms[count] - 1 == arm:
break
count += 1
mab.update(arm, rewards[count], contexts[count]) # arm (0-9), arms (1-10)
history.append(arm)
out.append(rewards[count])
count += 1
# print(mab.total_rewards)
print(mab.action_attempts)
print(mab.estimate_value)
cum_mean = np.cumsum(out) / np.arange(1, len(out) + 1)
plt.plot(cum_mean, label=mab.__class__.__name__)
return out
if __name__ == '__main__':
arms = []
rewards = []
contexts = []
dataset_file = open('dataset.txt', 'r')
for line in dataset_file:
event = line.split(' ')[:-1]
event = list(map(int, event))
arms.append(event[0])
rewards.append(event[1])
contexts.append(event[2:])
dataset_file.close()
# mab = EpsGreedy(10, 0.05)
# results_EpsGreedy = offlineEvaluate(mab, arms, rewards, contexts, 800)
# print('EpsGreedy average reward', np.mean(results_EpsGreedy))
# mab = UCB(10, 1.0)
# results_UCB = offlineEvaluate(mab, arms, rewards, contexts, 800)
# print('UCB average reward', np.mean(results_UCB))
# mab = BetaThompson(10, 1.0, 1.0)
# results_BetaThompson = offlineEvaluate(mab, arms, rewards, contexts, 800)
# print('BetaThompson average reward', np.mean(results_BetaThompson))
mab = LinUCB(10, 10, 1.0)
results_LinUCB = offlineEvaluate(mab, arms, rewards, contexts, 800)
print('LinUCB average reward', np.mean(results_LinUCB))
# mab = LinThompson(10, 10, 1.0)
# results_LinThompson = offlineEvaluate(mab, arms, rewards, contexts, 800)
# print('LinThompson average reward', np.mean(results_LinThompson))
plt.xlabel('Rounds')
plt.ylabel('Mean Cumulative Reward')
plt.legend()
plt.show()
| [
"matplotlib"
] |
7b48fa788fef8ae04e801e9b961bef6f79fddc97 | Python | suiup/pybullet | /self_learning/terrain/terrain_create_formula05.py | UTF-8 | 423 | 2.796875 | 3 | [] | no_license | import numpy as np
import plotly.graph_objs as go
a = np.linspace(-1, 1, 50)
b = np.linspace(-1, 1, 50)
x, y = np.meshgrid(a, b)
z = x ** 2 + y ** 2 + x * y + x + y - 3
list = [i for i in z]
dataStr = ""
for i in list:
dataStr += ",\t".join(('%.5f' % j) for j in i) + "\n"
print(dataStr)
with open("test.txt", "w") as f:
f.write(dataStr)
fig = go.Figure(data=[
go.Surface(x=x, y=y, z=z),
])
fig.show()
| [
"plotly"
] |
12d0778cbf0df73ee2c19d52dbb0d1d0798c0c67 | Python | arkadiusz-jagodzinski/Backend_SAPMAway | /spamchecker/spamcheck.py | UTF-8 | 1,671 | 3.03125 | 3 | [] | no_license | import os
import matplotlib.pyplot as plt
from joblib import dump
from joblib import load
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import make_pipeline
from wordcloud import WordCloud
clf = load(os.getcwd() + "/res/mlp_clf")
def isSpam(text):
return clf.predict_proba([text])[0, 1]
def retrain_and_save_model(spam_data):
"""
Trenuje model od zera na podstawie danych z bazy, następnie zapisuje wagi do folderu /res/. Nie wymaga przeładowania zapisanego modelu.
:param spam_data: Tablica numpy, taka że w pierwszej kolumnie są teksty kolejnych smsów, a w drugiej wartość bool oznaczająca czy dany SMS jest spamem.
:type spam_data: numpy array
:return:
"""
clf = make_pipeline(TfidfVectorizer("english"),
MLPClassifier(activation='logistic', hidden_layer_sizes=10, learning_rate='constant',
solver='adam'))
clf.fit(spam_data[0, :], spam_data[1, :])
dump(clf, os.getcwd() + os.path.sep + 'res' + os.path.sep + 'mlp_clf')
def generate_word_cloud():
"""
Funkcja generuje, zwraca i zapisuje do pliku /res/cloud.png chumrę głównych słów spamu.
:return: obraz z chmurą słów spamu
"""
wc = WordCloud(background_color="white", width=1000, height=1000, normalize_plurals=True).generate_from_frequencies(
clf[0].vocabulary_)
fig = plt.imshow(wc)
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
plt.savefig(os.path.sep + 'res' + os.path.sep + 'cloud.png', bbox_inches='tight')
return fig
| [
"matplotlib"
] |
8558f4d93300f844d9b6049590b5ef328c3ea1d5 | Python | MarianoDel/emacs_dexel_boost36v | /boost_ccm_digital_med_03.py | UTF-8 | 7,610 | 2.6875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
#usar python3
import numpy as np
import matplotlib.pyplot as plt
from sympy import *
from math import log, sqrt, exp
from scipy.signal import lti, bode, lsim, TransferFunction, step, step2
from scipy.signal import cont2discrete, dbode
from tc_udemm import sympy_to_lti, lti_to_sympy
"""
Boost Voltage-Mode, este es un modelo alternativo.
Large Signal - derivado de una respuesta escalon del Boost.
El modelo de baja senial, no representa bien el arranque
del conversor. Esto es porque la ganancia alrededor del punto de trabajo
es demasiado grande y no es representativa del arranque.
Lo que ensayo es una respuesta escalon del boost en modo ccm
y con el resultado deduzco una ecuacion de segundo orden.
"""
##########################################################
# Step response of the pulse by pulse converter model. #
# Always on CCM. Step from 0.17 to 0.67 without feedback #
##########################################################
# From the simulation results:
fn = 136.4
Max_peak_value = 56
Final_value = 35.6
Input_step_value = 0.53
sense_probe_alpha = 1.8 / (1.8 + 22)
# Auxiliary calcs
wn = fn * 2 * np.pi
Mp = Max_peak_value / Final_value - 1
log_mp_2 = (log(Mp))**2
psi = sqrt(log_mp_2/(np.pi**2+log_mp_2))
Mp2 = exp((-psi*np.pi)/sqrt(1-psi**2))
# print(f'Mp: {Mp}, psi vale: {psi}, Mp revisado: {Mp2}')
#TF without constant
s = Symbol('s')
Plant_num = Final_value * wn**2 / Input_step_value
Plant_den = s**2 + 2 * psi * wn * s + wn**2
Plant_out = Plant_num/Plant_den
Plant_out_sim = Plant_out.simplify()
# print ('Plant_out: ')
# print (Plant_out_sim)
#####################################################
# Desde aca utilizo ceros y polos que entrego sympy #
#####################################################
# planta = sympy_to_lti(Plant_out_sim)
sensado = sympy_to_lti(Plant_out_sim * sense_probe_alpha / 3.3)
# print ("planta con sympy:")
# print (planta)
##########################################################
# Convierto Planta Digital - Sensado Digital en realidad #
# por Tustin #
##########################################################
Fsampling = 24000
Tsampling = 1 / Fsampling
planta_dig_tustin_n, planta_dig_tustin_d, td = cont2discrete((sensado.num, sensado.den), Tsampling, method='tustin')
#normalizo con TransferFunction
print ("Planta Digital:")
planta_dig_tustin = TransferFunction(planta_dig_tustin_n, planta_dig_tustin_d, dt=td)
print (planta_dig_tustin)
################################################
# Respuesta escalon de la planta punto a punto #
# entrando con Duty propuesto como escalon #
################################################
tiempo_de_simulacion = 0.2
print('td:')
print (td)
t = np.arange(0, tiempo_de_simulacion, td)
# Planta Digital por Tustin
b_planta = np.transpose(planta_dig_tustin_n)
a_planta = np.transpose(planta_dig_tustin_d)
Duty = Input_step_value
vin_plant = np.ones(t.size) * Duty
vout_plant = np.zeros(t.size)
# for i in range(2, len(vin_plant)):
# ########################################
# # aplico la transferencia de la planta #
# ########################################
# vout_plant[i] = b_planta[0]*vin_plant[i] \
# + b_planta[1]*vin_plant[i-1] \
# + b_planta[2]*vin_plant[i-2] \
# - a_planta[1]*vout_plant[i-1] \
# - a_planta[2]*vout_plant[i-2]
# fig, ax = plt.subplots()
# ax.set_title('Respuesta de la Planta Open Loop')
# ax.set_ylabel('Vout')
# ax.set_xlabel('Tiempo en muestras')
# ax.grid()
# ax.plot(t, vin_plant, 'r')
# ax.plot(t, vout_plant, 'c')
# plt.tight_layout()
# plt.show()
############################
# PID Digital #
# ki_dig = ki / Fsampling #
# kp_dig = kp - ki_dig / 2 #
# kd_dig = kd * Fsampling #
############################
# con undersampling = 100 y 50
ki_dig = 6 / 128
kp_dig = 1 / 128
kd_dig = 2
# con undersampling = 100 y 50
# ki_dig = 6 / 128
# kp_dig = 1
# kd_dig = 0
# con undersampling = 10
# ki_dig = 1 / 128
# kp_dig = 42 / 128
# kd_dig = 3
k1 = kp_dig + ki_dig + kd_dig
k2 = -kp_dig - 2*kd_dig
k3 = kd_dig
## este es el pid digital
b_pid = [k1, k2, k3]
a_pid = [1, -1]
print ("")
print (f"kp_dig: {kp_dig} ki_dig: {ki_dig} kd_dig: {kd_dig}")
print ("")
pid_dig = TransferFunction(b_pid, a_pid, dt=td)
print ("PID Digital:")
print (pid_dig)
#########################################
# Realimento punto a punto con setpoint #
#########################################
# Respuesta escalon de la planta punto a punto
tiempo_de_simulacion = 0.5
print('td:')
print (td)
t = np.arange(0, tiempo_de_simulacion, td)
# Planta Digital por Tustin
b_planta = np.transpose(planta_dig_tustin_n)
a_planta = np.transpose(planta_dig_tustin_d)
vout_plant = np.zeros(t.size)
vin_plant = np.zeros(t.size)
############################################
# Armo la senial que quiero en el SETPOINT #
############################################
vin_setpoint = np.ones(t.size) * 36 * sense_probe_alpha * 1000 / 3.3
vin_setpoint.astype(int)
vin_setpoint = vin_setpoint / 1000
d = np.zeros(t.size)
error = np.zeros(t.size)
max_d_pwm = 0.85
under_roof = 49
undersampling = 0
integral_term = np.zeros(t.size)
for i in range(2, len(vout_plant)):
###################################################
# primero calculo el error, siempre punto a punto #
###################################################
dummy_adc_out = int(vout_plant[i-1] * 1000)
dummy_adc_out = dummy_adc_out / 1000
# vout_plant[i - 1] = dummy_adc_out / 1000
# error[i] = vin_setpoint[i] - vout_plant[i-1]
error[i] = vin_setpoint[i] - dummy_adc_out
#############################################################
# aplico lazo PID y ajusto los maximo y minimos que permito #
#############################################################
if undersampling < under_roof:
#nada
undersampling = undersampling + 1
d[i] = d[i-1]
integral_term[i] = integral_term[i-1]
else:
undersampling = 0
# desarmo k1 y el termino integral para tener mejor definicion
integral_term[i] = integral_term[i-1] + ki_dig * error[i]
k1 = kp_dig * error[i] + integral_term[i] + kd_dig * error[i]
if integral_term[i] > 0.001:
integral_term[i] = 0
d[i] = k1 + b_pid[1] * error[i-1] + b_pid[2] * error[i-2] - a_pid[1] * d[i-1]
# d[i] = b_pid[0] * error[i] + b_pid[1] * error[i-1] + b_pid[2] * error[i-2] - a_pid[1] * d[i-1]
dummy_d = int(d[i] * 1000)
d[i] = dummy_d / 1000
if d[i] > max_d_pwm:
d[i] = max_d_pwm
if d[i] < 0:
d[i] = 0
########################################
# aplico la transferencia de la planta #
########################################
vin_plant[i] = d[i]
vout_plant[i] = b_planta[0]*vin_plant[i] \
+ b_planta[1]*vin_plant[i-1] \
+ b_planta[2]*vin_plant[i-2] \
- a_planta[1]*vout_plant[i-1] \
- a_planta[2]*vout_plant[i-2]
fig, ax = plt.subplots()
ax.set_title('Respuesta Realimentada punto a punto')
ax.set_ylabel('Vout')
ax.set_xlabel('Tiempo en muestras')
ax.grid()
ax.plot(t, d, 'r')
ax.plot(t, error, 'g')
ax.plot(t, vin_setpoint, 'y')
ax.plot(t, integral_term, 'b')
# ax.stem(t, vout_plant / sense_probe_alpha)
# ax.plot(t, vout_plant / sense_probe_alpha, 'c')
ax.plot(t, vout_plant, 'c')
plt.tight_layout()
plt.show()
| [
"matplotlib"
] |
9b4482f896befb42812cb1ee5423cce5e8812ba1 | Python | llwyd/pink | /models/eq.py | UTF-8 | 1,527 | 3 | 3 | [] | no_license | # cross over filter experiments
import matplotlib.pyplot as plt
import numpy as np
from scipy import signal
def fft(x,fs,fft_len):
F = np.fft.fft(x,fft_len,norm='ortho')
F = np.abs(F)
F = norm(F)
Ff = (fs/2)*np.linspace(0,1,int(fft_len/2))
Fdb = 20*np.log10(F[:int(len(F)/2)]);
return F, Ff, Fdb
def norm( n ):
return n/np.max(np.abs(n))
fs = 44100
sig_len = 8192 * 8
bands = 5
step = (np.log(fs/2) - np.log(20)) / (bands-1)
cutoff = np.zeros(bands-1)
cutoff[0] = np.exp(step)*20
for i in range(1,bands-1):
cutoff[i] = np.exp(step) * cutoff[i-1]
total_bands = bands - 1
gains = [1, 0.5, 1.2,1]
# input signal
h = signal.unit_impulse(sig_len)
sos = []
# first band lpf
for i in range(0, bands-2):
sos_coeff = signal.butter(1,cutoff[i],'lp',fs=fs,output='sos')
sos.append(sos_coeff)
sos_coeff = signal.butter(1,cutoff[i],'hp',fs=fs,output='sos')
sos.append(sos_coeff)
print(cutoff[i])
y = []
num_filters = len(sos)
for i in range(num_filters):
x = signal.sosfilt(sos[i],h)
y.append(x)
y_sum = 0
Ydb_bands = []
Yf_bands = []
for i in range(num_filters):
y_sum += y[i]
Y,Yf,Ydb = fft(y[i],fs,sig_len)
Ydb_bands.append(Ydb)
Yf_bands.append(Yf)
Y,Yf,Ydb = fft(y_sum,fs,sig_len)
#plt.figure(1)
#plt.plot(y_sum)
plt.figure(2)
for i in range(num_filters):
plt.semilogx(Yf_bands[i],Ydb_bands[i])
plt.semilogx(Yf,Ydb)
plt.hlines(-3,0,max(Yf))
plt.ylim(-30,5)
plt.xlabel('Frequency (Hz)')
plt.ylabel('Magnitude (dB)')
plt.show()
| [
"matplotlib"
] |
7d7a769a80ccabd6909a46a80bc97ea0fba6525a | Python | HosseinlGholami/ML-clustering | /hw5-2-1.py | UTF-8 | 1,373 | 2.59375 | 3 | [] | no_license | import pandas as pd
import numpy as np
path='F:\Data\data2-x.csv'
df=pd.read_csv(path)
path='F:\Data\data2-y.csv'
df1=pd.read_csv(path)
from sklearn.cluster import KMeans
score=list()
for i in [10]:
kmeans = KMeans(
n_clusters=i, #tedad cluster
init='k-means++', #doortarin noghgte,
#ndarray ham baraye delkhah (n_clusters, n_features)
n_init=10, #tedad bar hayi ke algoritm bayad ejra shavad
max_iter=300, #maximom tekar algoritm
tol=0.0001,#tolerance baraye converg shodan
precompute_distances='auto', #if active (faster but takes more memory)copy_x=True,
algorithm='full'
)
Learned = kmeans.fit(df)
labels = Learned.predict(df)
unique, counts = np.unique(labels, return_counts=True)
unique1, counts1 = np.unique(df1, return_counts=True)
# import matplotlib.pyplot as plt
# plt.plot(unique,counts,'ro')
# plt.xlabel(i)
# plt.show()
print('____________clustering____________')
for i in range(10):
print( unique[i],':',counts[i],'\n')
print('____________labaled data____________')
for i in range(10):
print( unique1[i],':',counts1[i],'\n')
# Getting the cluster labels
# Centroid values
#centroids = kmeans.cluster_centers_ | [
"matplotlib"
] |
ff63bfcacc6d030c6c9842f9428b3361f89b369c | Python | lorransr/axyz-dashboard | /app/sdk.py | UTF-8 | 5,640 | 2.8125 | 3 | [] | no_license | import pandas as pd
import plotly.express as px
import streamlit as st
color_dict = {
"correct": "green",
"generic_position": "gray",
"no_calibration": "goldenrod",
"incorrect": "red",
}
def get_daily_overview_plot(data):
# set datetime
df_daily = pd.DataFrame(data["daily_overview"])
df_daily.ride_end = pd.to_datetime(df_daily.ride_end)
# select columns without "pct"
selected_columns = []
for column in df_daily.columns.to_list():
if "pct" in column:
continue
else:
selected_columns.append(column)
df_daily_filtered = df_daily[selected_columns]
# resample in year_week
df_daily_filtered = df_daily[selected_columns].copy()
year_week = df_daily_filtered.ride_end.dt.strftime("%Y-%U")
df_daily_filtered.insert(0,"year_week",year_week)
df_daily_reindexed = (df_daily_filtered
.groupby("year_week")[selected_columns[1:]]
.sum()
.reset_index())
# prepare df to plot
df_daily_melted = df_daily_reindexed.melt(id_vars="year_week")
df_daily_melted.columns = ["year_week","calibration_result","count"]
#calculating percentage
df_perct = df_daily_reindexed.iloc[:,1:]
df_perct = df_perct.div(df_perct.sum(axis=1), axis=0)
df_perct.loc[:,"year_week"] = df_daily_reindexed.year_week
df_perct_melted = df_perct.melt(id_vars="year_week")
df_daily_melted.loc[:,"percentage"] = df_perct_melted["value"]
# generate figure
fig = px.bar(
df_daily_melted,
x="year_week",
y="count",
color="calibration_result",
color_discrete_map=color_dict,
hover_data= {
"percentage":":.1%"},
width=350,
height=400
)
fig.update_xaxes(title = "Year - Week")
fig.update_yaxes(title = "Number of Trips")
fig.update_layout(
title="Calibrations Results per Week of Year",
xaxis = dict(
tickmode = 'linear',
fixedrange = True
),
yaxis = dict(
fixedrange = True
)
)
for trace in fig.data:
trace.name = trace.name.replace("_"," ")
return fig
def get_version_overview_plot(data):
df_version = pd.DataFrame(data["version_overview"])
selected_columns = []
for column in df_version.columns.to_list():
if "pct" in column:
continue
else:
selected_columns.append(column)
selected_columns.remove("sdk_version")
df_version_melted = df_version.melt(
id_vars="sdk_version",
value_vars=selected_columns,
var_name="calibration_result",
value_name="count",
)
#calculating percentage
df_perct = df_version[selected_columns]
df_perct = df_perct.div(df_perct.sum(axis=1), axis=0)
df_perct.loc[:,"sdk_version"] = df_version.sdk_version
df_perct_melted = df_perct.melt(id_vars="sdk_version")
df_version_melted.loc[:,"percentage"] = df_perct_melted["value"]
#ordering data
df_version_melted.loc[:,"lvl1"] = df_version_melted.sdk_version.apply(lambda x: int(x.split(".")[0]))
df_version_melted.loc[:,"lvl2"] = df_version_melted.sdk_version.apply(lambda x: int(x.split(".")[1]))
df_version_melted.loc[:,"lvl3"] = df_version_melted.sdk_version.apply(lambda x: int(x.split(".")[2]))
df_version_melted.sort_values(by=["lvl1","lvl2","lvl3"],ascending=True,inplace=True)
fig = px.bar(
df_version_melted,
x="sdk_version",
y="percentage",
color="calibration_result",
color_discrete_map=color_dict,
hover_data={"percentage":":.1%",
"count":True},
width=350,
height=400
)
fig.update_xaxes(title="SDK version")
fig.update_yaxes(title="Number of Trips")
fig.update_layout(
title="Calibrations Results per SDK Versions",
xaxis = dict(
tickmode = 'linear',
fixedrange = True
),
yaxis = dict(
fixedrange = True
),
yaxis_tickformat = '%'
)
for trace in fig.data:
trace.name = trace.name.replace("_"," ")
return fig
def path_to_image(pose):
path = "https://raw.githubusercontent.com/lorransr/axyz-dashboard/master/app/assets/images/positions/{}.png".format(pose)
return '<img src="'+ path + '" width="60" >'
def get_driver_summary(data):
df_driver = pd.DataFrame(data["driver_summary"])
df_driver.loc[:,"user_response_pose"] = df_driver.user_response_pose.astype("int")
df_driver.loc[:,"position_image"] = (df_driver
.user_response_pose
.apply(lambda x: path_to_image(x)))
df_driver = df_driver[df_driver.user_response_pose != 0]
new_columns = []
for column in df_driver.columns:
new_columns.append(column.replace("_"," "))
df_driver.columns = new_columns
float_frmt = lambda x: '{:,.0f}'.format(x) if x > 1e3 else '{:,.1f}'.format(x)
frmt = {"accuracy":float_frmt}
return df_driver.to_html(escape=False,formatters = frmt)
def load_page(data):
daily_overview = get_daily_overview_plot(data)
version_overview = get_version_overview_plot(data)
st.plotly_chart(
daily_overview,
config=dict(displayModeBar=False))
st.plotly_chart(
version_overview,
config=dict(displayModeBar=False))
st.markdown("### %Correct x User x Position")
df_driver = get_driver_summary(data)
# st_ms = st.multiselect("Columns", df_driver.columns.tolist(),df_driver.columns.tolist())
st.markdown(df_driver, unsafe_allow_html=True)
| [
"plotly"
] |
9cc3a5f2c2cf40a0792d6377d6697eb5fe0bb9f5 | Python | q2806060/python-note | /numpydemo/05/day05/demo04_poly.py | UTF-8 | 363 | 3.1875 | 3 | [] | no_license | """
demo04_poly.py 多项式函数
"""
import numpy as np
import matplotlib.pyplot as mp
x = np.linspace(-20, 20, 1000)
y = 4*x**3 + 3*x**2 -1000*x + 1
# 求驻点坐标
P = np.array([4, 3, -1000, 1])
Q = np.polyder(P)
xs = np.roots(Q)
ys = np.polyval(P, xs)
mp.plot(x, y, color='dodgerblue')
mp.scatter(xs, ys, s=60, marker='s', c='red',
zorder=3)
mp.show()
| [
"matplotlib"
] |
e31df015868d7eefd10d2ff6d1f50266f5e34d61 | Python | silviu20/fzwork | /PythonWork/kaggle/iris/submission/predict_iris_type_v1d.py | UTF-8 | 5,190 | 2.9375 | 3 | [] | no_license | #!/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn import linear_model
from sklearn import svm
import xgboost as xgb
from sklearn.grid_search import GridSearchCV
import matplotlib.pyplot as plt
iris = pd.read_csv ('../input/Iris.csv')
print iris.head()
print iris.tail()
print iris.isnull().sum()
print iris.info()
print iris.shape
# Pair plot for two columns
sns.pairplot(iris.drop('Id', axis=1), hue='Species')
# Scatter plot for two columns (numeric feature)
# iris.plot.scatter(x='SepalLengthCm', y='PetalLengthCm')
# sns.boxplot(x='SepalLengthCm', y='Species', datat=iris)
features=['SepalLengthCm', 'SepalWidthCm', 'PetalLengthCm', 'PetalWidthCm']
# print iris.corr(features)
# sepalLengthCm= iris[['SepalLengthCm', 'SepalWidthCm']]
# print sepalLengthCm
# print sepalLengthCm.shape, type(sepalLengthCm)
# The distributuon of one column
# sns.distplot(iris['SepalLengthCm'])
iris_features= iris[features]
print iris_features
print iris_features.shape, type(iris_features)
print iris_features.corr()
print '#'*80
plt.scatter(iris['SepalLengthCm'], iris['SepalWidthCm'])
plt.show()
print '#################################################'
X=iris[features]
y=iris['Species']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
knc_model =KNeighborsClassifier(n_neighbors=5)
knc_model.fit(X_train,y_train)
print 'KNN: Accuracy with a single train/test split', knc_model.score(X_test, y_test)
# scores = cross_val_score(knc_model, X, y, cv=5)
scores = cross_val_score(knc_model, X_train, y_train, cv=5)
print 'KNN: the mean of Accuracy with a cross value train/test split is: ', scores.mean()
print 'KNN:The std of Accuracy with a cross value train/test split is', scores.std()
test_data=[[4.6, 3.1, 1.5,0.2]]
test_data2=[[6.5,3.0,5.2,2.0 ]]
res=knc_model.predict(test_data)
print res
res2=knc_model.predict(test_data2)
print res2
print '#'*50
log_reg_model = linear_model.LogisticRegression()
log_reg_model.fit(X_train, y_train)
print 'Linear regression: Accuracy with a single train/test split', log_reg_model.score(X_test, y_test)
scores_log_reg_model = cross_val_score(log_reg_model, X_train, y_train, cv=5)
print 'Linear regression: the mean of Accuracy with a cross value train/test split is: ', scores_log_reg_model.mean()
print 'Linear regression: The std of Accuracy with a cross value train/test split is', scores_log_reg_model.std()
print '#'*50
clf_model = svm.SVC()
clf_model.fit(X_train, y_train)
print 'SVM: Accuracy with a single train/test split', clf_model.score(X_test, y_test)
scores_clf_model = cross_val_score(clf_model, X_train, y_train, cv=5)
print 'SVM: the mean of Accuracy with a cross value train/test split is: ', scores_clf_model.mean()
print 'SVM: The std of Accuracy with a cross value train/test split is', scores_clf_model.std()
print '#'*50
xgb_model = xgb.XGBClassifier()
xgb_model.fit(X_train, y_train)
print 'XGBoost: Accuracy with a single train/test split', xgb_model.score(X_test, y_test)
scores_xgb_model = cross_val_score(xgb_model, X_train, y_train, cv=5)
print 'XGBoost: the mean of Accuracy with a cross value train/test split is: ', scores_xgb_model.mean()
print 'XGBoost: The std of Accuracy with a cross value train/test split is', scores_xgb_model.std()
print '#'*50
parameter_candidates = [ {'C':[1, 10, 100], 'kernel':['linear']},{ 'C':[1, 10, 100],'gamma':[0.001, 0.0001], 'kernel':['rbf'] }]
clf_model2 = GridSearchCV(estimator =svm.SVC(), param_grid = parameter_candidates, n_jobs=-1)
clf_model2.fit(X_train, y_train)
print 'Best score: ', clf_model2.best_score_
print 'Best C: ', clf_model2.best_estimator_.C
print 'Best kernel: ', clf_model2.best_estimator_.kernel
print 'Best gamma: ', clf_model2.best_estimator_.gamma
print '#'*50
clf_model3 = svm.SVC(C=1, gamma='auto', kernel='linear')
clf_model3.fit(X_train, y_train)
print 'SVM: Accuracy with a single train/test split', clf_model3.score(X_test, y_test)
scores_clf_model3 = cross_val_score(clf_model3, X_train, y_train, cv=5)
print 'SVM: the mean of Accuracy with a cross value train/test split is: ', scores_clf_model3.mean()
print 'SVM: The std of Accuracy with a cross value train/test split is', scores_clf_model3.std()
print '#'*50
# plt.scatter(iris[:50]['SepalLengthCm'], iris[:50]['SepalWidthCm'], label='Iris-virginica')
# plt.scatter(iris[51:101]['SepalLengthCm'], iris[51:101]['SepalWidthCm'], label='Iris-setosa')
# plt.show()
# f, ax = plt.subplots()
sns.lmplot('SepalLengthCm', 'SepalWidthCm', data=iris, hue='Species')
plt.show()
# clf_model = svm.SVC()
# clf_model.fit(X_train, y_train)
# print 'SVM: Accuracy with a single train/test split', clf_model.score(X_test, y_test)
# scores_clf_model = cross_val_score(clf_model, X_train, y_train, cv=5)
# print 'SVM: the mean of Accuracy with a cross value train/test split is: ', scores_clf_model.mean()
# print 'SVM: The std of Accuracy with a cross value train/test split is', scores_clf_model.std()
| [
"matplotlib",
"seaborn"
] |
4c095c08f282d388fd64de00604354b620ec2649 | Python | xepoo/vnpy | /vnpy_slim/strategies/MLP_data_analysis.py | UTF-8 | 19,239 | 2.828125 | 3 | [
"MIT"
] | permissive | # encoding: UTF-8
import warnings
warnings.filterwarnings("ignore")
from pymongo import MongoClient, ASCENDING
import pymysql
import pandas as pd
import numpy as np
from datetime import datetime
import talib
import matplotlib.pyplot as plt
import scipy.stats as st
from sklearn.model_selection import train_test_split
# LogisticRegression 逻辑回归
from sklearn.linear_model import LogisticRegression
# DecisionTreeClassifier 决策树
from sklearn.tree import DecisionTreeClassifier
# SVC 支持向量分类
from sklearn.svm import SVC
# MLP 神经网络
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import GridSearchCV
class DataAnalyzerforSklearn(object):
"""
这个类是为了SVM做归纳分析数据,以未来6个bar的斜率线性回归为判断分类是否正确。
不是直接分析HLOC,而且用下列分非线性参数(就是和具体点位无关)
1.Percentage
2.std
4.MACD
5.CCI
6.ATR
7. 该bar之前的均线斜率
8. RSI
"""
def __init__(
self,
exportpath="D:\\SynologyDrive\\future_data\\",
datformat=['datetime', 'high', 'low', 'open', 'close', 'volume']):
self.collection = None
self.df = pd.DataFrame()
self.exportpath = exportpath
self.datformat = datformat
self.startBar = 2
self.endBar = 12
self.step = 2
self.pValue = 0.05
#-----------------------------------------导入数据-------------------------------------------------
def db2df(self,
symbol,
start,
end,
mysqlhost="localhost",
mysqlport=3306,
user="root",
password="root",
database="",
export2csv=False):
"""读取MongoDB数据库行情记录,输出到Dataframe中"""
self.collection = symbol
conn = pymysql.connect(host=mysqlhost,
port=mysqlport,
user=user,
password=password,
database=database,
charset='utf8',
use_unicode=True)
sql = "SELECT `datetime`, `high_price` as `high`, `low_price` as `low`, `open_price` as `open`, `close_price` as `close`, `volume` " \
"FROM dbbardata where symbol='%s' and `datetime`>str_to_date('%s','%%Y-%%m-%%d') and `datetime`<str_to_date('%s','%%Y-%%m-%%d')" % (symbol, start, end)
self.df = pd.read_sql(sql=sql, con=conn)
self.df = self.df[self.datformat]
self.df = self.df.reset_index(drop=True)
print(sql)
print(self.df.shape)
path = self.exportpath + self.collection + ".csv"
if export2csv == True:
self.df.to_csv(path, index=True, header=True)
return self.df
def csv2df(self, csvpath, dataname="csv_data", export2csv=False):
"""读取csv行情数据,输入到Dataframe中"""
csv_df = pd.read_csv(csvpath)
self.df = csv_df[self.datformat]
self.df["datetime"] = pd.to_datetime(self.df['datetime'])
self.df = self.df.reset_index(drop=True)
path = self.exportpath + dataname + ".csv"
if export2csv == True:
self.df.to_csv(path, index=True, header=True)
return self
def df2Barmin(self, inputdf, barmins, crossmin=1, export2csv=False):
"""输入分钟k线dataframe数据,合并多多种数据,例如三分钟/5分钟等,如果开始时间是9点1分,crossmin = 0;如果是9点0分,crossmin为1"""
dfbarmin = pd.DataFrame()
highBarMin = 0
lowBarMin = 0
openBarMin = 0
volumeBarmin = 0
datetime = 0
for i in range(0, len(inputdf) - 1):
bar = inputdf.iloc[i, :].to_dict()
if openBarMin == 0:
openBarmin = bar["open"]
if highBarMin == 0:
highBarMin = bar["high"]
else:
highBarMin = max(bar["high"], highBarMin)
if lowBarMin == 0:
lowBarMin = bar["low"]
else:
lowBarMin = min(bar["low"], lowBarMin)
closeBarMin = bar["close"]
datetime = bar["datetime"]
volumeBarmin += int(bar["volume"])
# X分钟已经走完
if not (bar["datetime"].minute + crossmin) % barmins: # 可以用X整除
# 生成上一X分钟K线的时间戳
barMin = {
'datetime': datetime,
'high': highBarMin,
'low': lowBarMin,
'open': openBarmin,
'close': closeBarMin,
'volume': volumeBarmin
}
dfbarmin = dfbarmin.append(barMin, ignore_index=True)
highBarMin = 0
lowBarMin = 0
openBarMin = 0
volumeBarmin = 0
if export2csv == True:
dfbarmin.to_csv(self.exportpath + "bar" + str(barmins) +
str(self.collection) + ".csv",
index=True,
header=True)
return dfbarmin
#-----------------------------------------开始计算指标-------------------------------------------------
def dfcci(self, inputdf, n, export2csv=True):
"""调用talib方法计算CCI指标,写入到df并输出"""
dfcci = inputdf
dfcci["cci"] = None
for i in range(n, len(inputdf)):
df_ne = inputdf.loc[i - n + 1:i, :]
cci = talib.CCI(np.array(df_ne["high"]), np.array(df_ne["low"]),
np.array(df_ne["close"]), n)
dfcci.loc[i, "cci"] = cci[-1]
dfcci = dfcci.fillna(0)
dfcci = dfcci.replace(np.inf, 0)
if export2csv == True:
dfcci.to_csv(self.exportpath + "dfcci" + str(self.collection) +
".csv",
index=True,
header=True)
return dfcci
def dfatr(self, inputdf, n, export2csv=True):
"""调用talib方法计算ATR指标,写入到df并输出"""
dfatr = inputdf
for i in range((n + 1), len(inputdf)):
df_ne = inputdf.loc[i - n:i, :]
atr = talib.ATR(np.array(df_ne["high"]), np.array(df_ne["low"]),
np.array(df_ne["close"]), n)
dfatr.loc[i, "atr"] = atr[-1]
dfatr = dfatr.fillna(0)
dfatr = dfatr.replace(np.inf, 0)
if export2csv == True:
dfatr.to_csv(self.exportpath + "dfatr" + str(self.collection) +
".csv",
index=True,
header=True)
return dfatr
def dfrsi(self, inputdf, n, export2csv=True):
"""调用talib方法计算ATR指标,写入到df并输出"""
dfrsi = inputdf
dfrsi["rsi"] = None
for i in range(n + 1, len(inputdf)):
df_ne = inputdf.loc[i - n:i, :]
rsi = talib.RSI(np.array(df_ne["close"]), n)
dfrsi.loc[i, "rsi"] = rsi[-1]
dfrsi = dfrsi.fillna(0)
dfrsi = dfrsi.replace(np.inf, 0)
if export2csv == True:
dfrsi.to_csv(self.exportpath + "dfrsi" + str(self.collection) +
".csv",
index=True,
header=True)
return dfrsi
def Percentage(self, inputdf, export2csv=True):
"""调用talib方法计算CCI指标,写入到df并输出"""
dfPercentage = inputdf
# dfPercentage["Percentage"] = None
for i in range(1, len(inputdf)):
# if dfPercentage.loc[i,"close"]>dfPercentage.loc[i,"open"]:
# percentage = ((dfPercentage.loc[i,"high"] - dfPercentage.loc[i-1,"close"])/ dfPercentage.loc[i-1,"close"])*100
# else:
# percentage = (( dfPercentage.loc[i,"low"] - dfPercentage.loc[i-1,"close"] )/ dfPercentage.loc[i-1,"close"])*100
if dfPercentage.loc[i - 1, "close"] == 0.0:
percentage = 0
else:
percentage = ((dfPercentage.loc[i, "close"] -
dfPercentage.loc[i - 1, "close"]) /
dfPercentage.loc[i - 1, "close"]) * 100.0
dfPercentage.loc[i, "Perentage"] = percentage
dfPercentage = dfPercentage.fillna(0)
dfPercentage = dfPercentage.replace(np.inf, 0)
if export2csv == True:
dfPercentage.to_csv(self.exportpath + "Percentage_" +
str(self.collection) + ".csv",
index=True,
header=True)
return dfPercentage
def dfMACD(self, inputdf, n, export2csv=False):
"""调用talib方法计算MACD指标,写入到df并输出"""
dfMACD = inputdf
for i in range(n, len(inputdf)):
df_ne = inputdf.loc[i - n + 1:i, :]
macd, signal, hist = talib.MACD(np.array(df_ne["close"]), 12, 26,
9)
#dfMACD.loc[i, "macd"] = macd[-1]
#dfMACD.loc[i, "signal"] = signal[-1]
dfMACD.loc[i, "hist"] = hist[-1]
dfMACD = dfMACD.fillna(0)
dfMACD = dfMACD.replace(np.inf, 0)
if export2csv == True:
dfMACD.to_csv(self.exportpath + "macd" + str(self.collection) +
".csv",
index=True,
header=True)
return dfMACD
def dfSTD(self, inputdf, n, export2csv=False):
"""调用talib方法计算MACD指标,写入到df并输出"""
dfSTD = inputdf
for i in range(n, len(inputdf)):
df_ne = inputdf.loc[i - n + 1:i, :]
std = talib.STDDEV(np.array(df_ne["close"]), n)
dfSTD.loc[i, "std"] = std[-1]
dfSTD = dfSTD.fillna(0)
dfSTD = dfSTD.replace(np.inf, 0)
if export2csv == True:
dfSTD.to_csv(self.exportpath + "dfSTD" + str(self.collection) +
".csv",
index=True,
header=True)
return dfSTD
#-----------------------------------------加入趋势分类-------------------------------------------------
def addTrend(self, inputdf, n=30, export2csv=False):
"""以未来6个bar的斜率线性回归为判断分类是否正确"""
dfTrend = inputdf
for i in range(1, len(dfTrend) - n - 1):
histRe = np.array(dfTrend["close"])[i:i + n]
xAixs = np.arange(n) + 1
res = st.linregress(y=histRe, x=xAixs)
if res.pvalue < self.pValue + 0.02:
if res.slope > 0.5:
dfTrend.loc[i, "tradeindictor"] = 1
elif res.slope < -0.5:
dfTrend.loc[i, "tradeindictor"] = -1
dfTrend = dfTrend.fillna(0)
dfTrend = dfTrend.replace(np.inf, 0)
if export2csv == True:
dfTrend.to_csv(self.exportpath + "addTrend" +
str(self.collection) + ".csv",
index=True,
header=True)
return dfTrend
def addTrend2(self, inputdf, n=30, export2csv=False):
"""以未来6个bar的斜率线性回归为判断分类是否正确"""
dfTrend = inputdf
for i in range(1, len(dfTrend) - n - 1):
histRe = np.array(dfTrend["close"])[i:i + n]
m = histRe.mean()
if m > 1.001 * dfTrend.loc[i, "close"]:
dfTrend.loc[i, "tradeindictor"] = 1
elif m < 0.999 * dfTrend.loc[i, "close"]:
dfTrend.loc[i, "tradeindictor"] = -1
else:
dfTrend.loc[i, "tradeindictor"] = 0
dfTrend = dfTrend.fillna(0)
dfTrend = dfTrend.replace(np.inf, 0)
if export2csv == True:
dfTrend.to_csv(self.exportpath + "addTrend" +
str(self.collection) + ".csv",
index=True,
header=True)
return dfTrend
def GirdValuate(X_train, y_train):
"""1)LogisticRegression
逻辑回归
2)DecisionTreeClassifier
决策树
3)SVC
支持向量分类
4)MLP
神经网络"""
clf_DT = DecisionTreeClassifier()
param_grid_DT = {'max_depth': [1, 2, 3, 4, 5, 6]}
clf_Logit = LogisticRegression()
param_grid_logit = {'solver': ['liblinear', 'lbfgs', 'newton-cg', 'sag']}
clf_svc = SVC()
param_grid_svc = {
'kernel': ('linear', 'poly', 'rbf', 'sigmoid'),
'C': [1, 2, 4],
'gamma': [0.125, 0.25, 0.5, 1, 2, 4]
}
clf_mlp = MLPClassifier()
param_grid_mlp = {
"hidden_layer_sizes": [(100, ), (100, 30)],
"solver": ['adam', 'sgd', 'lbfgs'],
"max_iter": [20],
"verbose": [False]
}
#打包参数集合
clf = [clf_DT, clf_Logit, clf_mlp, clf_svc]
param_grid = [
param_grid_DT, param_grid_logit, param_grid_mlp, param_grid_svc
]
from sklearn.model_selection import StratifiedKFold # 交叉验证
kflod = StratifiedKFold(n_splits=10, shuffle=True,
random_state=7) # 将训练/测试数据集划分10个互斥子集,这样方便多进程测试
#网格测试
print("begin GridSearchCV")
for i in range(0, 4):
grid = GridSearchCV(clf[i],
param_grid[i],
scoring='accuracy',
n_jobs=-1,
cv=kflod,
verbose=1)
grid.fit(X_train, y_train)
print(grid.best_params_, ': ', grid.best_score_)
if __name__ == '__main__':
# 读取数据
# exportpath = "C:\\Users\shui0\OneDrive\Documents\Project\\"
exportpath = "D:\\SynologyDrive\\future_data\\"
DA = DataAnalyzerforSklearn(exportpath)
#数据库导入
# start = datetime.strptime("20160501", '%Y%m%d')
# end = datetime.strptime("20170501", '%Y%m%d')
start = "2016-05-01"
end = "2018-12-20"
# df = DA.db2df(symbol="cmain", start=start, end=end, mysqlhost="walkright.synology.me",
# mysqlport=3307, user="root", password="zaq1xsw2CDE",database="future_schema")
df = DA.db2df(symbol="cmain",
start=start,
end=end,
mysqlhost="localhost",
mysqlport=3306,
user="root",
password="root",
database="futures_schema")
df5min = DA.df2Barmin(df, 5)
df5minAdd = DA.addTrend2(df5min, export2csv=True)
df5minAdd = DA.dfMACD(df5minAdd, n=34, export2csv=True)
df5minAdd = DA.dfatr(df5minAdd, n=25, export2csv=True)
df5minAdd = DA.dfrsi(df5minAdd, n=35, export2csv=True)
df5minAdd = DA.dfcci(df5minAdd, n=30, export2csv=True) # no use
df5minAdd = DA.dfSTD(df5minAdd, n=30, export2csv=True)
df5minAdd = DA.Percentage(df5minAdd, export2csv=True)
#划分测试验证。
df_test = df5minAdd.loc[60:, :] #只从第60个开始分析,因为之前很多是空值
y = np.array(df_test["tradeindictor"]) #只保留结果趋势结果,转化为数组
X = df_test.drop([
"tradeindictor", "close", "datetime", "high", "low", "open", "volume"
], axis=1).values #不是直接分析HLOC,只保留特征值,转化为数组
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=0.3,
random_state=0) #三七
# X_train = X[:8689]
# y_train = y[:8689]
# X_test = X[-8690:]
# y_test = y[-8690:]
print("训练集长度: %s, 测试集长度: %s" % (len(X_train), len(X_test)))
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import SelectPercentile
from sklearn.feature_selection import mutual_info_classif
#特征工作,可以按照百分比选出最高分特征类,取最优70%,也可以用SelectKBest,指定要几个特征类。
selectPer = SelectPercentile(mutual_info_classif, percentile=70)
#selectPer = SelectKBest(mutual_info_classif, k=7)
print("x_train.shpae:", X_train.shape)
print("y_train.shpae:", y_train.shape)
X_train = selectPer.fit_transform(X_train, y_train)
# pddf = pd.DataFrame(X_train)
# pddf.to_csv(exportpath + "after_fit_transform" + ".csv",
# index=True,
# header=True)
# pddf = pd.DataFrame(X_test)
# pddf.to_csv(exportpath + "test_before_transform" + ".csv",
# index=True,
# header=True)
X_test = selectPer.transform(X_test)
# pddf = pd.DataFrame(X_test)
# pddf.to_csv(exportpath + "after_before_transform" + ".csv",
# index=True,
# header=True)
# 也可以用Fpr选择
# selectFea=SelectFpr(alpha=0.01)
# X_train_new = selectFea.fit_transform(X_train, y_train)
# X_test_new = selectFea.transform(X_test)
# 这里使用下面模式进行分析,然后利用网格调参
#GirdValuate(X_train, y_train)
# 使用选取最好的模型,进行测试看看拼接
# • 模型预测:model.predict()
# • Accuracy:metrics.accuracy_score()
# • Presicion:metrics.precision_score()
# • Recall:metrics.recall_score()
from sklearn import metrics
clf_selected = MLPClassifier(hidden_layer_sizes=(100, 30),
max_iter=20,
solver='adam') #此处填入网格回测最优模型和参数,
# {'hidden_layer_sizes': (100, 30), 'max_iter': 20, 'solver': 'adam', 'verbose': False} : 0.9897016507648039
clf_selected.fit(X_train, y_train)
#print("X_test:", X_test[-1:])
print("X_test.shape:", X_test.shape)
y_pred = clf_selected.predict(X_test)
#accuracy
accuracy = metrics.accuracy_score(y_true=y_test, y_pred=y_pred)
print('accuracy:', accuracy)
#precision
precision = metrics.precision_score(y_true=y_test,
y_pred=y_pred,
average="micro")
print('precision:', precision)
#recall
recall = metrics.recall_score(y_true=y_test,
y_pred=y_pred,
average="micro")
print('recall:', recall)
#实际值和预测值
print("y_test.shape:", y_test.shape)
print("y_pred.shape:", y_pred.shape)
dfresult = pd.DataFrame({'Actual': y_test, 'Predict': y_pred})
dfresult.to_csv(exportpath + "result" + ".csv", index=True, header=True)
import joblib
#模型保存到本地
joblib.dump(clf_selected, 'clf_selected.m')
#模型的恢复
clf_tmp = joblib.load('clf_selected.m')
| [
"matplotlib"
] |
ad33a889bb9110345b125fb450edac787a1e4235 | Python | kalekundert/ligrna | /notebook/20170329_test_multiple_spacers/n17_spacer_uniformity_test.py | UTF-8 | 3,884 | 2.71875 | 3 | [] | no_license | #!/usr/bin/env python3
"""
Create a plot showing the likelihood the distribution of nucleotides observed
at each position in the spacer.
"""
import random
import numpy as np
import matplotlib.pyplot as plt
from debugtools import p, pp, pv
from pprint import pprint
from color_me import ucsf
positions = np.arange(18)
j_from_nuc = dict(A=0, T=1, G=2, C=3)
nuc_from_j = {v:k for k, v in j_from_nuc.items()}
def calc_frequencies(spacers):
frequencies = [np.zeros(4) for i in positions]
for spacer in spacers:
for i, nuc in enumerate(spacer):
frequencies[i][j_from_nuc[nuc]] += 1
return frequencies
def calc_pvals(frequencies):
from scipy.stats import chisquare as chi2
pvals = np.array([chi2(f).pvalue for f in frequencies])
# Apply the Bonferroni multiple testing correction. This correction seems to
# be generally frowned upon, but it's appropriate in this case because I'm
# interesting in the global null hypothesis (e.g. are all the null hypothesis
# rejected/accepted). It's also known to be conservative, which means that if
# the null hypothesis is rejected, I can be pretty confident that something
# else is going on.
return pvals * len(pvals)
if __name__ == '__main__':
# Load the spacer sequences.
with open('n17_spacers.tsv') as file:
sequenced_spacers = [x.strip() for x in file.readlines()]
with open('doench_spacers.tsv') as file:
doench_spacers = [x.split()[1][7:25] for x in file.readlines()]
random_spacers = [
''.join(random.choice('ATCG') for _ in positions)
for _ in sequenced_spacers
]
# Calculate χ² p-values for both sets of spacers.
sequenced_freqs = calc_frequencies(sequenced_spacers)
sequenced_pvals = calc_pvals(sequenced_freqs)
doench_freqs = calc_frequencies(doench_spacers)
doench_pvals = calc_pvals(doench_freqs)
random_freqs = calc_frequencies(random_spacers)
random_pvals = calc_pvals(random_freqs)
# Plot the p-values on the left axis.
fig, ax1 = plt.subplots()
fig.patch.set_color('white')
ax1.plot(positions, -np.log10(sequenced_pvals), 'ko', mec='none', label='Sequenced spacers')
ax1.plot(positions, -np.log10(doench_pvals), 'k^', mec='none', label='Doench spacers')
ax1.plot(positions, -np.log10(random_pvals), 'ko', mfc='none', label='Uniform random spacers')
ax1.axhline(-np.log10(0.01), color='k', linestyle='--', label='99% confidence level')
ax1.set_xlabel('position')
ax1.set_ylabel('χ² uniformity test\n-log(Bonferonni-corrected p)')
ax1.set_xticks(positions)
ticklabels = list(17 - positions)
ticklabels[-1] = -1
ax1.set_xticklabels(ticklabels)
ax1.set_xlim(min(positions) - 0.5, max(positions) + 0.5)
ax1.legend(loc='best')
# Plot the nucleotide frequencies on the right axis.
ax2 = ax1.twinx()
ax1.set_zorder(ax2.get_zorder()+1)
ax1.patch.set_visible(False)
color = ucsf.dark_grey[0]
color_from_nuc = dict(
A=ucsf.blue[2],
T=ucsf.orange[2],
G=ucsf.olive[2],
C=ucsf.red[2],
)
for i in positions:
for j in range(4):
nuc = nuc_from_j[j]
freq = sequenced_freqs[i][j] / sum(sequenced_freqs[i])
ax2.text(i, freq, nuc,
color=color_from_nuc[nuc],
family='monospace',
horizontalalignment='center',
verticalalignment='center',
zorder=10,
)
ax2.tick_params('y', colors=color)
ax2.set_ylim(0, 1)
ax2.set_ylabel('nucleotide frequencies\n(sequenced spacers)',
rotation=270,
verticalalignment='baseline',
color=color)
fig.tight_layout()
plt.savefig('n17_spacer_uniformity_test.svg')
plt.show()
| [
"matplotlib"
] |
0d2862015fe0dade05662751ddedd2469b363200 | Python | HoriaGiuvelca/Image-Search-Engine | /search.py | UTF-8 | 1,009 | 2.546875 | 3 | [] | no_license | from colordescriptor import ColorDescriptor
from searcher import Searcher
import argparse
import cv2
import matplotlib.pyplot as mpl
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--index", required = True, help = "Path-ul catre fisierul 'baza_de_date'")
ap.add_argument("-q", "--query", required = True, help = "Path-ul catre imaginea care se doreste a fi 'cautata'")
ap.add_argument("-r", "--result-path", required = True, help = "Path-ul catre fisierul cu poze")
args = vars(ap.parse_args())
cd = ColorDescriptor((8, 12, 3))
query = cv2.imread(args["query"])
features = cd.describe(query)
searcher = Searcher(args["index"])
results = searcher.search(features)
#afiseaza query-ul
query = cv2.resize(query, (1366, 768))
cv2.imshow("Query", query)
#afiseaza rezultatele
for(score, resultID) in results:
result = cv2.imread("./" + resultID)
result = cv2.resize(result, (1366, 768))
mpl.figure()
cv2.imshow("Result", result)
cv2.waitKey(0) | [
"matplotlib"
] |
b4fe0daba365c8bd98070f5211141d8f1705ea31 | Python | mherkhachatryan/python-codes | /visaulization/Exponential Cut .py | UTF-8 | 4,922 | 2.875 | 3 | [] | no_license |
# coding: utf-8
# ## Importing Packages
# In[19]:
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mlp
mlp.style.use("ggplot")
# ## Constants
# In[20]:
n_0 = 10**11
k = 100
e = np.linspace(1,10**6,100)
cutoff = 10**4
# ## Ploting the figure
# In[38]:
fig = plt.figure(figsize=(15,12))
#plot alpha = 2.2
ax1 = fig.add_subplot(331)
ax1.plot(e, n_0 *(e/k)**(-2.2),ls = "--", color = "#2413dd",label = "n(e)" )
ax1.plot(e,n_0 *(e/k)**(-2.2)*np.exp(-e/(cutoff)), color = "r",label = "f(e)")
ax1.set_xscale("log")
ax1.set_yscale("log")
ax1.set_title(r'$\alpha = 2.20 $', fontsize = 14)
ax1.set_ylim(10**-45,10**18)
plt.setp(ax1.get_xticklabels(), visible=False)
#plot alpha = 2.92
ax2 = fig.add_subplot(332)
ax2.plot(e, n_0 * (e/k)**(-2.92),ls = "--", color = "#2413dd",label = "n(e)" )
ax2.plot(e,n_0 *(e/k)**(-2.92)*np.exp(-e/(cutoff)), color = "r",label = "f(e)")
ax2.set_xscale("log")
ax2.set_yscale("log")
ax2.set_title("Exponential Cut" +"\n"+r'$\alpha = 2.92 $', fontsize = 14)
plt.setp(ax2.get_xticklabels(), visible=False)
plt.setp(ax2.get_yticklabels(), visible=False)
ax2.set_ylim(10**-45,10**18)
#plot alpha = 3.65
ax3 = fig.add_subplot(333)
ax3.plot(e, n_0 * (e/k)**(-3.65),ls = "--", color = "#2413dd",label = "n(e)" )
ax3.plot(e,n_0 *(e/k)**(-3.65)*np.exp(-e/(cutoff)), color = "r",label = "f(e)")
ax3.set_xscale("log")
ax3.set_yscale("log")
ax3.set_title(r'$\alpha = 3.65 $', fontsize = 14)
plt.setp(ax3.get_xticklabels(), visible=False)
plt.setp(ax3.get_yticklabels(), visible=False)
ax3.set_ylim(10**-45,10**18)
#plot alpha = 4.38
ax4 = fig.add_subplot(334)
ax4.plot(e, n_0 * (e/k)**(-4.38),ls = "--", color = "#2413dd",label = "n(e)" )
ax4.plot(e,n_0 *(e/k)**(-4.38)*np.exp(-e/(cutoff)), color = "r",label = "f(e)")
ax4.set_ylabel(r'$ n(e) $'+","+ r'$ f(e) $', fontsize = 20)
ax4.set_xscale("log")
ax4.set_yscale("log")
ax4.set_title(r'$\alpha = 4.38 $', fontsize = 14)
plt.setp(ax4.get_xticklabels(), visible=False)
ax4.set_ylim(10**-45,10**18)
#adding text, constants of formulas
constants = r'$n_0 = 10^{11} $' +"\n" + r'$ k=100 $' +"\n" + r'$ \alpha $' +" is the title"
boxprop = dict(boxstyle = "round", facecolor = "#ceaf40", alpha = 0.56)
ax4.text(1.432, 4*10**-22, constants, fontsize = 15, bbox = boxprop)
# plot alpha = 5.1
ax5 = fig.add_subplot(335)
ax5.plot(e, n_0 * (e/k)**(-5.1),ls = "--", color = "#2413dd",label = "n(e)" )
ax5.plot(e,n_0 *(e/k)**(-5.1)*np.exp(-e/(cutoff)), color = "r",label = "f(e)")
ax5.set_xscale("log")
ax5.set_yscale("log")
ax5.set_title(r'$\alpha = 5.1 $', fontsize = 14)
ax5.set_ylim(10**-34,10**14)
plt.setp(ax5.get_xticklabels(), visible=False)
plt.setp(ax5.get_yticklabels(), visible=False)
ax5.set_ylim(10**-45,10**18)
#plot alpha = 5.82
ax6 = fig.add_subplot(336)
ax6.plot(e, n_0 * (e/k)**(-5.82),ls = "--", color = "#2413dd",label = "n(e)" )
ax6.plot(e,n_0 *(e/k)**(-5.82)*np.exp(-e/(cutoff)), color = "r",label = "f(e)")
ax6.set_xscale("log")
ax6.set_yscale("log")
ax6.set_title(r'$\alpha = 5.82 $', fontsize = 14)
plt.setp(ax6.get_xticklabels(), visible=False)
plt.setp(ax6.get_yticklabels(), visible=False)
ax6.set_ylim(10**-45,10**18)
# plot alpha = 6.55
ax7 = fig.add_subplot(337)
ax7.plot(e, n_0 * (e/k)**(-6.55),ls = "--", color = "#2413dd",label = "n(e)" )
ax7.plot(e,n_0 *(e/k)**(-6.55)*np.exp(-e/(cutoff)), color = "r",label = "f(e)")
ax7.set_xscale("log")
ax7.set_yscale("log")
ax7.set_title(r'$\alpha = 6.55 $', fontsize = 14)
ax7.set_ylim(10**-45,10**18)
#adding text
#find boxprops at ax4 , text section
text = "\n" +"Formulas" + "\n" + r'$n \left( e \right) = n_0 * \left( \frac{e}{k} \right) ^ {-\alpha} $' + "\n" + r'$f \left( e \right) = n_0 * \left( \frac{e}{k} \right) ^ {-\alpha} *Exp \left( \frac{e}{cutoff} \right) $'
ax7.text(1.432,1*10**-29,text, fontsize =16 , bbox = boxprop)
#plot alpha = 7.28
ax8 = fig.add_subplot(338)
ax8.plot(e, n_0 * (e/k)**(-7.28),ls = "--", color = "#2413dd",label = "n(e)" )
ax8.plot(e,n_0 *(e/k)**(-7.28)*np.exp(-e/(cutoff)), color = "r",label = "f(e)")
ax8.set_xlabel("e", fontsize = 20)
ax8.set_xscale("log")
ax8.set_yscale("log")
ax8.set_title(r'$\alpha = 7.28 $', fontsize = 14)
plt.setp(ax8.get_yticklabels(), visible=False)
ax8.set_ylim(10**-45,10**18)
# plot alpha = 8.0
ax9 = fig.add_subplot(339)
ax9.plot(e, n_0 * (e/k)**(-8),ls = "--", color = "#2413dd",label = "n(e)" )
ax9.plot(e,n_0 *(e/k)**(-8)*np.exp(-e/(cutoff)), color = "r",label = "f(e)")
ax9.set_xscale("log")
ax9.set_yscale("log")
ax9.set_title(r'$\alpha = 8.00 $', fontsize = 14)
plt.setp(ax9.get_yticklabels(), visible=False)
ax9.set_ylim(10**-45,10**18)
plt.legend()
plt.tight_layout(pad = 0.1, h_pad = 0.8, w_pad = -0.2)
plt.savefig("C:/Users/user/Desktop/Mher/Exponential Cut.pdf", dpi =350 )
plt.show()
# #### this is the exponential cut for given formulas, find them in the plot, all axis have the same scale (logarithmical) and same range _alpha_ varies from 2.2 to 8
| [
"matplotlib"
] |
01503377c8038b44ea3d26551e08a1e379e51ffb | Python | anuragmakineni/bag_utils | /scripts/error_plot.py | UTF-8 | 2,133 | 2.6875 | 3 | [] | no_license | #!/usr/bin/env python
import rosbag
import matplotlib
matplotlib.use('Qt4Agg')
import rospy
import math
import numpy as np
import matplotlib.pyplot as plt
#get bag data
robots = ['/pico03/', '/pico04/', '/pico05/']
path = '/home/anuragmakineni/Desktop/cc_1.bag'
start_index = 1433
end_index = 2604
fig = plt.figure()
for robot in robots:
odom_x = np.empty((0))
odom_y = np.empty((0))
odom_z = np.empty((0))
setpoint_x = np.empty((0))
setpoint_y = np.empty((0))
setpoint_z = np.empty((0))
error = np.empty((0))
time = np.empty((0))
time_pos = np.empty((0))
bag = rosbag.Bag(path)
topic = robot + 'odom'
for topic, msg, t in bag.read_messages(topics=[topic]):
odom_x = np.append(odom_x, msg.pose.pose.position.x)
odom_y = np.append(odom_y, msg.pose.pose.position.y)
odom_z = np.append(odom_z, msg.pose.pose.position.z)
time = np.append(time, msg.header.stamp.to_sec())
topic = robot + 'position_cmd'
for topic, msg, t in bag.read_messages(topics=[topic]):
setpoint_x = np.append(setpoint_x, msg.position.x)
setpoint_y = np.append(setpoint_y, msg.position.y)
setpoint_z = np.append(setpoint_z, msg.position.z)
time_pos = np.append(time_pos, msg.header.stamp.to_sec())
i=0
for t in time_pos:
delta_t = abs(time - t)
o_i = np.argmin(delta_t)
error_x = setpoint_x[i] - odom_x[o_i]
error_y = setpoint_y[i] - odom_y[o_i]
error_z = setpoint_z[i] - odom_z[o_i]
total_error = np.sqrt(error_x * error_x + error_y * error_y + error_z * error_z)
error = np.append(error, total_error)
i = i+1
time_pos_cropped = time_pos[start_index:end_index]
error_cropped = error[start_index:end_index]
plt.plot(time_pos_cropped - time_pos_cropped[0], error_cropped)
print('\n' + robot + " max Error: " + str(np.amax(error_cropped)))
print(robot + " std dev: " + str(np.std(error_cropped)))
plt.title('Total Error vs. Time')
plt.grid(1)
plt.legend(robots)
plt.xlabel('Time (s)')
plt.ylabel('Error (m)')
plt.show()
bag.close()
| [
"matplotlib"
] |
b550758222b3170d8469d2849d6b4cb6acdd14c6 | Python | MoritzMoeller/reinforcement_learning | /tabular_q_learning.py | UTF-8 | 3,296 | 2.734375 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 16 10:44:00 2017
@author: mo
"""
import gym
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
SHOW_MOVIE = True
SHOW_RETURN = False
# pick grid_size = 8 for 'FrozenLake8x8-v0', grid_size = 4 for 'FrozenLake-v0'
grid_size = 8
# functions that convert Q table to dislayed objects
def Q_to_V(Q):
V = np.amax(Q,1)
return V.reshape((grid_size,grid_size))
def Q_to_greedy_quiver(Q):
a_greedy = np.argmax(Q,1)
vert = np.zeros(np.shape(a_greedy))
hori = np.zeros(np.shape(a_greedy))
for i in range(len(a_greedy)):
a = a_greedy[i]
#down: 0 left: 1 up: 2 right:3
if a == 0:
vert[i] = -1
hori[i] = 0
if a == 1:
vert[i] = 0
hori[i] = -1
if a == 2:
vert[i] = 1
hori[i] = 0
if a == 3:
vert[i] = 0
hori[i] = 1
return (hori.reshape(grid_size,grid_size), vert.reshape(grid_size,grid_size))
# set up visualisation
fig, ax = plt.subplots()
im_list = []
quiv_list = []
X, Y = np.meshgrid(np.arange(0, grid_size), np.arange(0, grid_size))
# initialise environment
env = gym.make('FrozenLake8x8-v0')
# parameters of the learning algorithm
gamma = .99
alpha = .85
eps = .05
number_of_episodes = 20000
Q = np.zeros((env.observation_space.n, env.action_space.n))
rets = []
ret = 0
s = env.reset()
i = 0
j = 0
FOUNDGOAL = False
# loop over episodes
while i < number_of_episodes:
# eps-greedy strategy with decreasing eps
a = np.argmax(Q[s,:] + np.random.randn(1,env.action_space.n)*(1./(i+1)))
(s_new, rew, done, _) = env.step(a)
# first encounter of goal state gets reported:
if ((rew != 0) & (FOUNDGOAL == False)) :
FOUNDGOAL = True
print("found goal! in episode %i " %j)
if done:
Q[s,a] = Q[s,a] + alpha*(rew - Q[s,a])
# paint a picture
if FOUNDGOAL:
i = i + 1
quiv_list.append(Q_to_greedy_quiver(Q))
im_list.append(Q_to_V(Q))
if i%1000 == 0:
print("Episode count: %i" %i)
s = env.reset()
rets.append(ret + rew)
ret = 0
j = j + 1
else:
Q[s,a] = Q[s,a] + alpha*(rew + gamma*np.amax(Q[s_new,:]) - Q[s,a])
s = s_new
ret = ret + rew
(U,V) = quiv_list[0]
im = ax.imshow(im_list[0], cmap=plt.get_cmap('CMRmap'), vmin=0, vmax=1)
quiv = ax.quiver(X,Y,U,V)
# making it into an animation
def updatefig(j):
# set the data in the axesimage object
(U,V) = quiv_list[j]
quiv.set_UVC(U,V)
im.set_array(im_list[j])
return im,quiv,
# kick off the animation
ani = animation.FuncAnimation(fig, updatefig, frames=range(1000), interval=50, blit=False)
if SHOW_MOVIE:
# Does not work in IPython console!
plt.show()
# calc ret avg over k epidodes:
k = 50
avg_rets = np.zeros(len(rets) - k)
for j in range(len(avg_rets)):
avg_rets[j] = np.mean(rets[j:j+k])
if SHOW_RETURN:
plt.plot(avg_rets)
| [
"matplotlib"
] |
f379de497711edd598fc7cbd51c4e53643d5f02d | Python | UselessOldQian/Airvis | /hourly_graph.py | UTF-8 | 3,271 | 2.953125 | 3 | [] | no_license | # -*- coding:utf-8 -*-
from collections import defaultdict
import itertools
import numpy as np
import matplotlib.pyplot as plt
import json
class HourlyGraph(object):
"""Graph class."""
def __init__(self, gid):
"""Initialize Graph instance.
Args:
gid: id of this graph.
is_undirected: whether this graph is directed or not.
eid_auto_increment: whether to increment edge ids automatically.
"""
self.gid = gid
self.vertices = dict()
self.edge = defaultdict(list)
self.vertices = defaultdict(list)
self.counter = itertools.count()
self.vid_dict = {}
self.tid_dict = {}
self.xy_dict = {}
def get_vidandtid_dict(self,vid_dict,tid_dict,xy_dict):
self.vid_dict = vid_dict
self.tid_dict = tid_dict
self.xy_dict = xy_dict
def add_vertex(self, vid, ti):
"""Add a vertex to the graph."""
if vid in self.vertices[ti]:
return self
self.vertices[ti].append(vid)
return self
def add_edge(self, frm, to, ts, te):
'''
:param frm:
:param to:
:param ts: start time
:param te: end time
:return:
'''
if (frm in self.vertices[ts] and
to in self.vertices[te] and
(frm, to) in self.edge[(ts,te)]):
return self
self.edge[(ts,te)].append((frm, to))
return self
def is_contain(self, name):
for t in self.vertices.keys():
for v in self.vertices[t]:
if v == name:
return True
return False
def get_vertex_num(self):
ret_len = 0
for var in self.vertices.keys():
ret_len += len(self.vertices[var])
return ret_len
def set_gid(self, gid):
self.gid = gid
def merge(self, other):
for tid in other.vertices.keys():
for vid in other.vertices[tid]:
self.add_vertex(vid, tid)
for estart_end in other.edge.keys():
for edge_list in other.edge[estart_end]:
self.add_edge(edge_list[0], edge_list[1], estart_end[0], estart_end[1])
return self
def getGraph(self):
print('gid:', self.gid)
print('edge:', self.edge)
print('vertices:',self.vertices)
print('-------------------')
def plot(self):
"""Visualize the graph."""
try:
import networkx as nx
import matplotlib.pyplot as plt
except Exception as e:
print('Can not plot graph: {}'.format(e))
return
gnx = nx.DiGraph()
vlbs = {}
for tid, vid_list in self.vertices.items():
for vid in vid_list:
vlbs[vid] = tid
for tid, vid in self.vertices.items():
gnx.add_nodes_from(vid, label=tid)
for tid, edges in self.edge.items():
gnx.add_edges_from(edges, label=tid)
plt.figure(self.gid)
nx.draw(gnx, arrows=True, with_labels=True)
for i in self.vid_dict.keys():
print(str(i)+':'+self.vid_dict[i])
#plt.show()
plt.savefig('subgraph_result/'+ str(self.gid) +'.png')
| [
"matplotlib"
] |
530c260422756b4f20611fad322b5337024429c1 | Python | kacperChwialkowski/mcmc | /2dimNormal/2dimNormal.py | UTF-8 | 1,154 | 2.546875 | 3 | [] | no_license | from pandas import DataFrame
import seaborn
from sampplers.MetropolisHastings import metropolis_hastings
from stat_test.quadratic_time import GaussianQuadraticTest, QuadraticMultiple, QuadraticMultiple2
__author__ = 'kcx'
import numpy as np
def logg(c):
def log_normal(x):
return -np.dot(x,x)/c
return log_normal
def grad_log_dens(x):
return -x
arr = np.empty((0,2))
arr2 = np.empty((0,2))
for c in [1.0,1.3,2.0,3.0]:
print('c',c)
log_normal = logg(c)
for i in range(23):
print(i)
x= metropolis_hastings(log_normal, chain_size=500, thinning=15,x_prev=np.random.randn(2))
me = GaussianQuadraticTest(grad_log_dens)
qm = QuadraticMultiple(me)
qm2 = QuadraticMultiple2(me)
accept_null,p_val = qm.is_from_null(0.05, x, 0.1)
p_val2 = qm2.is_from_null(0.05, x, 0.1)
print(p_val2)
arr = np.vstack((arr, np.array([c,min(p_val)])))
arr2 = np.vstack((arr2, np.array([c,p_val2])))
df = DataFrame(arr)
pr = seaborn.boxplot(x=0,y=1,data=df)
seaborn.plt.show()
df = DataFrame(arr2)
pr = seaborn.boxplot(x=0,y=1,data=df)
seaborn.plt.show()
| [
"seaborn"
] |
4a7f9b09022b0798e7d0a131c5661099b6ca1997 | Python | SandeshJIT/TheSparkFoundationTask1 | /task1.py | UTF-8 | 1,381 | 3.953125 | 4 | [] | no_license | #--- Program to predict the Students score based on the numbers of hours the student studies --#
#-- Spark Foundation Data Science and Business Analyst Internship Task 1 --#
#importing necessary packages for Linear Regression
import csv
import numpy as np
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
#Creating an empty list to take read CSV file input
x = list()
#Reading and storing the values in CSV FILE
with open('task1.csv','r') as fp:
read = csv.reader(fp)
for r in read:
x.append(r)
#Printing the list
print(x)
#Taking Hours in "a" and Score in "b"
a = np.array(x[1][0],dtype="float64")
b = np.array(x[1][1],dtype="float64")
for i in x[1:]:
a =np.insert(a , 0 ,float(i[0]))
b =np.insert(b , 0 ,float(i[1]))
#Making it 2 Dimenssional array
a=a.reshape(-1,1)
b=b.reshape(-1,1)
#Creating a linearRegression model
model = LinearRegression().fit(a,b)
#Printing the Coef of the model
r_sq = model.score(a, b)
print(r_sq)
#Ploting the data in a graph and seeing the coef visualy
plt.scatter(a,b)
y_pred = model.predict(a)
print(y_pred)
plt.plot(a,y_pred,color="red")
plt.show()
#Preficting the Score when student studies for 9.25hrs every day
y_pred = model.predict([[9.25]])
print("\nIf the student studies for 9.25hrs/day this model predicts that he will score ",y_pred[0,0])
| [
"matplotlib"
] |
4eadd3e1fd81a6de31c13bfa9d6c47bd93d3e373 | Python | zub1984/CIFAR-10-Multi-Class-Classification-using-Pytorch | /MLP.py | UTF-8 | 7,211 | 2.875 | 3 | [] | no_license | """
Multilayer Perceptron approach to CIFAR-10 Multi-class classification
Using Pytorch and Cross Entropy Loss
"""
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
import torch.utils.data as td
import random, time
import torchvision
class SevenFullyConnectedNet(nn.Module):
def __init__(self):
super(SevenFullyConnectedNet, self).__init__()
self.fc1 = nn.Linear(3 * 32 * 32, 2634) # equal spacing between in/out variables
self.fc2 = nn.Linear(2634, 2196) # equal spacing between in/out variables
self.fc3 = nn.Linear(2196, 1758) # equal spacing between in/out variables
self.fc4 = nn.Linear(1758, 1320) # equal spacing between in/out variables
self.fc5 = nn.Linear(1320, 882) # equal spacing between in/out variables
self.fc6 = nn.Linear(882, 444) # equal spacing between in/out variables
self.fc7 = nn.Linear(444, 10) # equal spacing between in/out variables
def forward(self, x):
x = x.view(-1, 3 * 32 * 32)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
x = F.relu(self.fc4(x))
x = F.relu(self.fc5(x))
x = F.relu(self.fc6(x))
"""
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
x = self.fc4(x)
x = self.fc5(x)
x = self.fc6(x)
"""
x = self.fc7(x)
return x
def cifar_loaders(batch_size, shuffle_test=False):
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.225, 0.225, 0.225])
train = datasets.CIFAR10('./', train=True, download=True,
transform=transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomCrop(32, 4),
transforms.ToTensor(),
normalize,
]))
test = datasets.CIFAR10('./', train=False,
transform=transforms.Compose([transforms.ToTensor(), normalize]))
train_loader = torch.utils.data.DataLoader(train, batch_size=batch_size,
shuffle=True, pin_memory=True)
test_loader = torch.utils.data.DataLoader(test, batch_size=batch_size,
shuffle=shuffle_test, pin_memory=True)
return train_loader, test_loader
batch_size = 64
test_batch_size = 64
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
train_loader, _ = cifar_loaders(batch_size)
_, test_loader = cifar_loaders(test_batch_size)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# ----------------------------------------------------------------------------------------------------------------------
# neural net initialization
# ----------------------------------------------------------------------------------------------------------------------
learning_rate = 1e-2
num_epochs = 1
net = SevenFullyConnectedNet()
net.to(device)
# ----------------------------------------------------------------------------------------------------------------------
# Loss function and optimizer
# ----------------------------------------------------------------------------------------------------------------------
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9, weight_decay=5e-4)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', patience=5, factor=0.1, verbose=True, cooldown=10)
# ----------------------------------------------------------------------------------------------------------------------
# Train the network
# ----------------------------------------------------------------------------------------------------------------------
print('Run Start Time: ', time.ctime())
begin_time = time.time()
filename = 'Results_' + str(learning_rate) + '_MLP_' + str(time.time()) + '.txt'
f = open(filename, 'w')
f.write('Run Start Time: ' + str(time.ctime()))
print('Learning Rate: %f' % learning_rate)
f.write('Learning Rate\t%f\n' % learning_rate)
max_accuracy = 0
for epoch in range(num_epochs): # loop over the dataset multiple times
start_time = time.time()
running_loss = 0.0
for i, data in enumerate(train_loader, 0):
# get the inputs
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# Forward pass
outputs = net(inputs)
loss = criterion(outputs, labels)
# Backward pass + Optimize
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
print('Epoch[% d/% d]: Loss: %.4f' % (epoch + 1, num_epochs, running_loss / (i + 1)))
f.write("Epoch\t%d\tLoss\t%f\t" % (epoch + 1, running_loss / (i + 1)))
end_time = time.time()
print("Epoch[%d] total time taken: %f" % (epoch+1, end_time - start_time))
# ------------------------------------------------------------------------------------------------------------------
# Test the network
# ------------------------------------------------------------------------------------------------------------------
class_correct = list(0. for i in range(10))
class_total = list(0. for i in range(10))
correct = 0
total = 0
with torch.no_grad():
for data in test_loader:
images, labels = data
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
correct_matrix = (predicted == labels)
print('===============================================')
print('out ', outputs.data)
print('pred ', predicted)
print('labels ', labels)
print('correct', correct_matrix)
c = correct_matrix.squeeze()
for i in range(len(labels)):
label = labels[i]
class_correct[label] += c[i].item()
class_total[label] += 1
total += labels.size(0)
correct += correct_matrix.sum().item()
max_accuracy = max(max_accuracy, int(100 * correct / total))
scheduler.step(max_accuracy)
for i in range(10):
print('Accuracy of %5s [%d/%d]: %2f %%' % (classes[i], class_correct[i], class_total[i],
100 * class_correct[i] / class_total[i]))
# f.write('Accuracy of %5s [%d/%d]\t%2f %%\n' % (classes[i], class_correct[i], class_total[i],
# 100 * class_correct[i] / class_total[i]))
print('Accuracy of the network [%d/%d]: %f %%' % (correct, total, 100 * correct / total))
f.write('Accuracy of the network [%d/%d]\t%f %%\n' % (correct, total, 100 * correct / total))
print('Finished Training: ', time.ctime())
f.write('Finished Training: ' + str(time.ctime()) + '\n')
run_time = time.time() - begin_time
print('Total Runtime: %f' % run_time)
f.write('Total Runtime\t%f\n' % run_time)
| [
"matplotlib"
] |
b3438f65063a25ae30f593d4e357a0fdc3e2bb91 | Python | rodosara/dsp_nilm | /combined_ground_truth.py | UTF-8 | 6,185 | 2.546875 | 3 | [] | no_license | #!/usr/bin/python
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
from tqdm import tqdm
from sklearn.metrics import confusion_matrix
import datetime as dt
import re, os
import parameters
pd.options.display.float_format = '{:f}'.format
def channels_name(folder):
names = np.empty((0,1), dtype='S20')
files = np.array(os.listdir(folder))
for a in range(0, len(files), 1):
if files[a][0] == '.':
files = np.delete(files, a, axis=0)
for a in range(0, len(files), 1):
filename, file_extension = os.path.splitext(files[a])
names = np.append(names, filename)
files = files.reshape(len(files),1)
names = names.reshape(len(names),1)
channels = np.column_stack((files, names))
return channels
def calc_ground_truth(folder, power_1hz):
channels = channels_name(folder)
ground_truth = np.empty((0,6), dtype=float)
#pbar1 = tqdm(total=len(channels))
for a in range(0, len(channels), 1):
appliance_events = np.loadtxt(folder+channels[a,0], dtype=float, delimiter=' ')
# Selezione intervallo
start = np.abs((appliance_events[:,2]*1000).astype(int)-power_1hz[0,0]).argmin()
if (appliance_events[start,2]*1000).astype(int) < power_1hz[0,0]:
start += 1
#print "START:", start
end = np.abs((appliance_events[:,2]*1000).astype(int)-power_1hz[-1,0]).argmin()
if (appliance_events[end,2]*1000).astype(int) > power_1hz[-1,0]:
end -= 1
#print "END:", end
appliance_events = appliance_events[start:end+1,:]
#pbar1.update(a)
ground_truth = np.append(ground_truth, appliance_events, axis=0)
#pbar1.close()
ground_truth = ground_truth[np.argsort(ground_truth[:,0])]
ground_truth[:,2] *= 1000
ground_truth = ground_truth.astype(int)
return ground_truth
def label_calc(gt):
if gt[4] == 1:
label_gt = str(parameters.appliance(gt[5])+'_ON')
elif gt[4] == 0:
label_gt = str(parameters.appliance(gt[5])+'_OFF')
return label_gt
def debug(ground_truth, combined, decision_nan, uncombined):
print " "
print "COMBINED GROUND_TRUTH DEBUG"
print "COMBINED:"
print combined
print combined.shape, combined.dtype, "|", combined.nbytes, "bytes"
print " "
print "GROUND_TRUTH:"
print ground_truth
print ground_truth.shape, ground_truth.dtype, "|", ground_truth.nbytes, "bytes"
print " "
print "DECISION_NAN:"
print decision_nan
print decision_nan.shape, decision_nan.dtype, "|", decision_nan.nbytes, "bytes"
print " "
count = 0
for a in range(0, len(combined), 1):
if combined[a,2][-1] != combined[a,4][-1]:
count += 1
print int(combined[a,3].astype(float))/1000, combined[a,4], count
print " "
print "DIFFERENCE GROUND_TRUTH-COMBINED:", ground_truth.shape[0] - combined.shape[0]
print "UNCOMBINED:"
print uncombined
print uncombined.shape, uncombined.dtype, "|", uncombined.nbytes, "bytes"
def save_array(array, name, train_test):
folder = str('./'+train_test+'/results_'+train_test+'/')
if not os.path.exists(folder):
os.makedirs(folder)
filename = str(folder+name+'.dat')
np.savetxt(filename, array, delimiter=' ', fmt='%s')
# MAIN
def main(power_1hz, decision, events, folder, train_test):
print "COMBINED GOUND_TRUTH..."
combined_buffer = np.empty((0,7), dtype=float)
combined = np.empty((0,6), dtype=float)
decision_nan = np.empty((0,6), dtype=float)
graph, ax = plt.subplots()
plt.title("OUTPUT PLOT")
ground_truth = calc_ground_truth(folder, power_1hz)
a = 0
while a < len(decision):
pos = abs(decision[a,0].astype(float)-(ground_truth[:,2]).astype(int)).argmin()
label_decision = str(decision[a,3]+"_"+decision[a,4])
label_gt = label_calc(ground_truth[pos,:])
if a+1 == len(decision) or pos != abs(decision[a+1,0].astype(float)-(ground_truth[:,2])).argmin():
combined_element = np.concatenate(([int(decision[a,0].astype(float))],[decision[a,1].astype(float)],[label_decision],[ground_truth[pos,2]],[label_gt],[decision[a,2].astype(float)])).reshape(1,6)
combined = np.append(combined, combined_element, axis=0)
else:
# Rende la funzione iniettiva
while a+1 != len(decision) and pos == abs(decision[a+1,0].astype(float)-(ground_truth[:,2])).argmin():
min_buffer = np.amin(abs(decision[a,0].astype(float)-(ground_truth[:,2])))
buffer_element = np.concatenate(([int(decision[a,0].astype(float))],[decision[a,1].astype(float)],[label_decision],[ground_truth[pos,2]],[label_gt],[decision[a,2].astype(float)],[min_buffer])).reshape(1,7)
combined_buffer = np.append(combined_buffer, buffer_element, axis=0)
a += 1
pos = abs(decision[a,0].astype(float)-(ground_truth[:,2])).argmin()
label_decision = str(decision[a,3]+"_"+decision[a,4])
label_gt = label_calc(ground_truth[pos,:])
# Considera anche l'ultimo elemento del buffer
if a+1 != len(decision) and pos != abs(decision[a+1,0].astype(float)-(ground_truth[:,2])).argmin() and pos == abs(decision[a-1,0].astype(float)-(ground_truth[:,2])).argmin():
min_buffer = np.amin(abs(decision[a,0].astype(float)-(ground_truth[:,2])))
buffer_element = np.concatenate(([int(decision[a,0].astype(float))],[decision[a,1].astype(float)],[label_decision],[ground_truth[pos,2]],[label_gt],[decision[a,2].astype(float)],[min_buffer])).reshape(1,7)
combined_buffer = np.append(combined_buffer, buffer_element, axis=0)
pos_buffer = (combined_buffer[:,6].astype(float)).argmin()
combined_element = combined_buffer[pos_buffer,0:6].reshape(1,6)
combined = np.append(combined, combined_element, axis=0)
decision_nan = np.append(decision_nan, np.delete(combined_buffer[:,0:6], pos_buffer, axis=0), axis=0)
decision_nan[:,4] = "NAN_NAN"
combined_buffer = np.empty((0,7), dtype=float)
a += 1
uncombined = ground_truth
for a in range(0, len(combined), 1):
pos = np.where(combined[a,3].astype(float) == uncombined[:,2])
uncombined = np.delete(uncombined, pos[0][0], axis=0)
save_array(decision, "decision", train_test)
save_array(ground_truth, "ground_truth", train_test)
save_array(combined, "combined", train_test)
save_array(decision_nan, "decision_nan", train_test)
save_array(uncombined, "uncombined", train_test)
#debug(ground_truth, combined, decision_nan, uncombined)
print "... DONE"
| [
"matplotlib"
] |
7246a294ad01e8e442bf969607dc645335d488ff | Python | jyotijangid/people-counter-improved-algorithm | /noise_image.py | UTF-8 | 2,440 | 2.84375 | 3 | [] | no_license | import numpy as np
import os
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
'''def noisy(noise_typ,image):
if noise_typ == "gauss":
row,col,ch= image.shape
mean = 0
var = 0.1
sigma = var**0.5
gauss = np.random.normal(mean,sigma,(row,col,ch))
gauss = gauss.reshape(row,col,ch)
noisy = image + gauss
return noisy
elif noise_typ == "s&p":
row,col,ch = image.shape
s_vs_p = 0.5
amount = 0.004
out = np.copy(image)
# Salt mode
num_salt = np.ceil(amount * image.size * s_vs_p)
coords = [np.random.randint(0, i - 1, int(num_salt))
for i in image.shape
out[coords] = 1
# Pepper mode
num_pepper = np.ceil(amount* image.size * (1. - s_vs_p))
coords = [np.random.randint(0, i - 1, int(num_pepper))
for i in image.shape]
out[coords] = 0
return out
else noise_typ == "poisson":
vals = len(np.unique(image))
vals = 2 ** np.ceil(np.log2(vals))
noisy = np.random.poisson(image * vals) / float(vals)
return noisy
else noise_typ =="speckle":
row,col,ch = image.shape
gauss = np.random.randn(row,col,ch)
gauss = gauss.reshape(row,col,ch)
noisy = image + image * gauss
return noisy
image=cv2.imread("flower.jpg")
plt.imshow(image)
plt.show()'''
image = mpimg.imread('flower.jpg')
plt.imshow(image)
row,col,ch = image.shape
gauss = np.random.randn(row,col,ch)
gauss = gauss.reshape(row,col,ch)
noisy = image + image * gauss
out_image = noisy("gauss", image)
plt.imshow(out_image)
out_image = noisy("speckle", image)
plt.imshow(out_image)
out_image = noisy("poisson", image)
plt.imshow(out_image)
out_image = noisy("s&p", image)
plt.imshow(out_image)
gray = cv2.cvtColor(out_image, cv2.COLOR_BGR2GRAY)
plt.imshow(gray)
pixel = image[10,10]
print(pixel)
blue=image[:,:,0]
print(blue)
from PIL import Image
pix_value = list(out_image.getdata())
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# Our operations on the frame come here
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Display the resulting frame
cv2.imshow('frame',gray)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
| [
"matplotlib"
] |
b49aa2815a6dc2b9064d4e4c4e7c1e3eb3302b23 | Python | nadeemqwerty/DL_assignments | /Assignment1/line_class.py | UTF-8 | 3,194 | 2.5625 | 3 | [] | no_license | from __future__ import print_function
import numpy as np
np.random.seed(37)
from neauron import *
import cv2
import matplotlib.pyplot as plt
import os
def load_data(folder):
images = []
clss = []
for filename in os.listdir(folder):
if(filename.endswith(".jpg")):
img = cv2.imread(os.path.join(folder,filename))
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = np.array(img)
img = img.ravel()
images.append(img)
cl =1
filename = filename[:-4]
for w in filename.split("_"):
cl = cl*(int(w)+1)
cl = cl - 1
clss.append(cl)
return images,clss
def split_data():
images, clss = load_data("Line")
x_train = []
y_train = []
x_val = []
y_val = []
for i in range(96):
x_train = x_train + images[i*1000:i*1000+800]
y_train = y_train + clss[i*1000:i*1000+800]
x_val = x_val + images[i*1000+801:i*1000+1000]
y_val = y_val + images[i*1000+801:i*1000+1000]
x_train = np.array(x_train,dtype = float32)/255.
x_val = np.array(x_val,dtype = float32)/255.
return x_train, y_train, x_val, y_val
epochs = 32
X_train, y_train, X_val, y_val = split_data()
model = []
model.append(Dense(X_train.shape[1],64,0.4))
model.append(sigmoid())
model.append(Dense(64,10,0.2))
def forward(model, X):
activations = []
input = X
for l in model:
activations.append(l.forward(input))
input = activations[-1]
return activations
def predict(model,X):
logits = forward(model,X)[-1]
return logits.argmax(axis=-1)
def train(model,X,y):
layer_activations = forward(model,X)
layer_inputs = [X]+layer_activations
logits = layer_activations[-1]
loss = softmax_crossentropy_with_logits(logits,y)
loss_grad = grad_softmax_crossentropy_with_logits(logits,y)
for layer_index in range(len(model))[::-1]:
layer = model[layer_index]
loss_grad = layer.backward(layer_inputs[layer_index],loss_grad)
return np.mean(loss)
from tqdm import trange
def iterate_minibatches(inputs, targets, batchsize, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.random.permutation(len(inputs))
for start_idx in trange(0, len(inputs) - batchsize + 1, batchsize):
if shuffle:
excerpt = indices[start_idx:start_idx + batchsize]
else:
excerpt = slice(start_idx, start_idx + batchsize)
yield inputs[excerpt], targets[excerpt]
train_log = []
val_log = []
for epoch in range(epochs):
for x_batch,y_batch in iterate_minibatches(X_train,y_train,batchsize=32,shuffle=True):
train(model,x_batch,y_batch)
train_log.append(np.mean(predict(model,X_train)==y_train))
val_log.append(np.mean(predict(model,X_val)==y_val))
print("Epoch",epoch)
print("Train accuracy:",train_log[-1])
print("Val accuracy:",val_log[-1])
plt.plot(train_log,label='train accuracy')
plt.plot(val_log,label='val accuracy')
plt.legend(loc='best')
plt.grid()
plt.show()
| [
"matplotlib"
] |
d438b0d23cfcc6e462f1ab457abc4c98af772914 | Python | lion-x/FinancialEngineering | /code/histogram.py | UTF-8 | 2,265 | 3.359375 | 3 | [] | no_license | import pandas as pd
import os
import matplotlib.pyplot as plt
def symbol_to_path(symbol, base_dir="../data"):
return os.path.join(base_dir, "{}.csv".format(symbol))
def get_data(symbols, dates):
df = pd.DataFrame(index=dates)
if 'SPY' not in symbols:
symbols.insert(0, 'SPY')
for symbol in symbols:
df_temp = pd.read_csv(symbol_to_path(symbol), index_col='Date', parse_dates=True,
usecols=['Date', 'Adj Close'], na_values=['nan'])
df_temp = df_temp.rename(columns={'Adj Close': symbol})
df = df.join(df_temp)
if symbol == 'SPY':
df = df.dropna(subset=["SPY"])
return df
def plot_data(df, title="Stock Price", xlabel="Date", ylabel="Price"):
ax = df.plot(title=title, fontsize=2)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
plt.show()
def compute_daily_returns(df):
daily_returns = df.copy()
daily_returns.iloc[0, :] = 0
daily_returns[1:] = (df[1:] / df[:-1].values) - 1
return daily_returns
def test_run():
start_date = '2010-01-01'
end_date = '2020-12-31'
dates = pd.date_range(start_date, end_date)
print(dates)
symbols = ['SPY', 'XOM']
df = get_data(symbols, dates)
plot_data(df['SPY'])
daily_returns = compute_daily_returns(df)
plot_data(daily_returns['SPY'], title="Daily returns", ylabel='Daily return')
print(daily_returns)
daily_returns_SPY_mean = daily_returns['SPY'].mean()
print("mean = ", daily_returns_SPY_mean)
daily_returns_SPY_std = daily_returns['SPY'].std()
print("std = ", daily_returns_SPY_std)
daily_returns['SPY'].hist(bins=40)
plt.axvline(daily_returns_SPY_mean, color='w', linestyle='dashed', linewidth=2)
plt.axvline(daily_returns_SPY_mean + daily_returns_SPY_std, color='r', linestyle='dashed', linewidth=2)
plt.axvline(daily_returns_SPY_mean - daily_returns_SPY_std, color='r', linestyle='dashed', linewidth=2)
plt.show()
daily_returns_SPY_kurtosis = daily_returns['SPY'].kurtosis()
print("kurtosis = ", daily_returns_SPY_kurtosis)
daily_returns['XOM'].hist(bins=40, label='XOM')
daily_returns['SPY'].hist(bins=40, label='SPY')
plt.show()
if __name__ == "__main__":
test_run()
| [
"matplotlib"
] |
76a59a1bbd044e4af1ae94dc8dec43fddcb44f63 | Python | WANGPeisheng1997/HandwrittenTextRecognition | /selective_search+cnn/main.py | UTF-8 | 2,196 | 2.671875 | 3 | [] | no_license | import torch
from PIL import Image, ImageOps
import numpy as np
from skimage import io
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from model import LeNet
from predict import predict, preprocessing, grayscale
from detect import NMS, selective_search
def load_network(network, save_path):
network.load_state_dict(torch.load(save_path))
return network
# model = LeNet()
# model = load_network(model, "mnist_lenet.pt")
#
# for group_name in ['a','b','c','d']:
# print("Group:" + group_name)
# for i in range(10):
# test(model, "test/%d%s.png" % (i, group_name), i)
def detect_and_predict(image, model):
# preprocess
image = ImageOps.invert(image)
image = grayscale(image)
image.convert("RGB").save("test/crop.png")
# detect
skimage = io.imread("test/crop.png")
candidates = selective_search(skimage)
candidates_boxs = []
for x, y, w, h in candidates:
candidates_boxs.append([x, y, x + w, y + h, w * h])
candidates_boxs = np.array(candidates_boxs)
keep = NMS(candidates_boxs, 0.7)
# predict
fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
ax.imshow(skimage)
predict_dict = {}
for x1, y1, x2, y2, _ in candidates_boxs[keep]:
cropped_image = image.crop((x1, y1, x2, y2))
cropped_image = ImageOps.invert(cropped_image)
cropped_image = preprocessing(cropped_image)
pred, confidence = predict(model, cropped_image)
predict_dict[x1] = pred
plt.text(x1, y1, "%d %.3f" % (pred, confidence), color='r')
sorted_dict = sorted(predict_dict.items(), key=lambda x: x[0])
predict_string = ""
for x1, char in sorted_dict:
predict_string += str(char)
plt.text(0, 0, predict_string, color='r')
for x1, y1, x2, y2, area in candidates_boxs[keep]:
print(x1, y1, x2, y2, area)
rect = mpatches.Rectangle(
(x1, y1), x2 - x1, y2 - y1, fill=False, edgecolor='red', linewidth=1)
ax.add_patch(rect)
plt.show()
model = LeNet()
model = load_network(model, "mnist_lenet.pt")
image = Image.open("test/string3.png").convert('L')
detect_and_predict(image, model)
| [
"matplotlib"
] |
499b029283b7a96d1e7ce9b06c8259174465397d | Python | pschafran/Notes | /scripts/plotCovAlongContigs.py | UTF-8 | 5,626 | 2.609375 | 3 | [] | no_license | #! /home/ps997/miniconda3/bin/python
# Needs to run with python 3 on our server!!!
# Usage: plotCovAlongContigs.py depth_output_from_samtools.txt SmoothingFactor (% of data to average across, must be between 1-100. Suggested setting from 1-15)
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
from matplotlib import colors
from numpy import log
import matplotlib.patches as patches
from mpl_toolkits.mplot3d import Axes3D
def running_mean(x, N):
cumsum = np.cumsum(np.insert(x, 0, 0))
return (cumsum[N:] - cumsum[:-N]) / float(N)
file = open(sys.argv[1],"r")
smoothingFactor = float(sys.argv[2])
if smoothingFactor < 1:
print("WARNING: Smoothing was set to less than 1. Now set to 1.")
smoothingFactor = 1
if smoothingFactor > 100:
print("WARNING: Smooting was set to greater than 100. Now set to 100.")
smoothingFactor = 100
contigList = []
posList = []
depthList = []
contigDict = {}
assemblySize = 0
contigLengthList = []
indexer = 0
print('Parsing depth file...')
for line in file:
splitline = line.strip("\n").split("\t")
contig = splitline[0]
pos = splitline[1]
depth = splitline[2]
try:
contigDict[contig][0].append(int(pos))
contigDict[contig][1].append(float(depth))
except:
contigDict[contig] = [[],[]]
contigDict[contig][0].append(int(pos))
contigDict[contig][1].append(float(depth))
print('Finding high coverage outlier regions...')
for key in contigDict.keys():
assemblySize += len(contigDict[key][1])
contigLengthList.append(len(contigDict[key][1]))
contigDict[key].append(np.median(contigDict[key][1])) # contigDict[key][2]
contigDict[key].append(np.std(contigDict[key][1])) # contigDict[key][3]
contigDict[key].append(np.min(contigDict[key][1])) # contigDict[key][4]
contigDict[key].append(np.max(contigDict[key][1])) # contigDict[key[5]
if any(x > contigDict[key][2]+(3*contigDict[key][3]) for x in contigDict[key][1]):
print(' %s' %(key))
outfile = open("%s_hiCovRegions.txt" %(key), "w")
outfile.write("position\tcoverage\n")
for i in range(1, len(contigDict[key][0])):
if contigDict[key][1][i-1] > contigDict[key][2]+(3*contigDict[key][3]):
outfile.write("%d\t%f\n" %(contigDict[key][0][i-1], contigDict[key][1][i-1]))
outfile.close()
n50size = 0
n50list = []
for length in sorted(contigLengthList, reverse=True):
if n50size < assemblySize*0.5:
n50list.append(length)
n50size += length
else:
n50list.append(length)
n50size += length
n50 = np.min(n50list)
n50 = 0
print("Assembly Size: %s" %(assemblySize))
movAvgDict = {}
print('Smoothing data...')
for key in contigDict.keys():
smoothingBases = int(len(contigDict[key][1]) * (smoothingFactor/100))
movAvgDict[key] = [np.array(running_mean(contigDict[key][0], smoothingBases)).tolist(), np.array(running_mean(contigDict[key][1], smoothingBases)).tolist()]
print('Plotting charts...')
for key in movAvgDict.keys():
fig, ax = plt.subplots(1,1)
ax.plot(movAvgDict[key][0], movAvgDict[key][1])
ax.set_xlabel("Position")
ax.set_ylabel("Read Depth")
ax.set_title("%s" %(key))
plt.grid(which='major',axis='both',linestyle='dashed')
#ax.set_ylim(ymin=(contigDict[key][4] - 100), ymax = (contigDict[key][5] + 100))
ax.set_ylim(ymin=0, ymax=10000)
ax.set_axisbelow(True)
ax.text(0.7, 1.1, "Median Depth: %d" %(contigDict[key][2]), horizontalalignment='left', verticalalignment='center', transform=ax.transAxes, bbox=dict(facecolor='white', edgecolor='white', alpha=0.5))
ax.text(0.7, 1.05, "Standard Deviation: %d" %(contigDict[key][3]), horizontalalignment='left', verticalalignment='center', transform=ax.transAxes, bbox=dict(facecolor='white', edgecolor='white', alpha=0.5))
plt.savefig("%s_%s.pdf" %(key,smoothingFactor),format = "pdf")
plt.close()
fig, ax = plt.subplots(len(n50list), 1, sharex=True, sharey=True, tight_layout=True, figsize = (8,len(n50list)))
index = 0
for key in movAvgDict.keys():
if len(contigDict[key][0]) >= n50:
ax[index].plot(movAvgDict[key][0], movAvgDict[key][1])
ax[index].text(1.01, 0.5, key, horizontalalignment='left', verticalalignment='center',transform=ax[index].transAxes, bbox=dict(facecolor='white', edgecolor='white', alpha=0.5))
ax[index].spines['right'].set_visible(False)
ax[index].spines['top'].set_visible(False)
ax[index].set_ylabel("Read Depth")
index += 1
ax[index-1].set_xlabel("Position")
plt.savefig("%s_n50contigs_%s.pdf" %((sys.argv[1].split(".txt")[0]), smoothingFactor),format = "pdf")
plt.close()
fig, ax = plt.subplots(len(n50list), 1, sharex=True, sharey=True, tight_layout=True, figsize = (8,len(n50list)))
index = 0
for key in movAvgDict.keys():
halfway = int(len(n50list)/2)
if len(contigDict[key][0]) >= n50:
ax[index].plot(movAvgDict[key][0], movAvgDict[key][1])
ax[index].text(1.01, 0.5, key, horizontalalignment='left', verticalalignment='center',transform=ax[index].transAxes, bbox=dict(facecolor='white', edgecolor='white', alpha=0.5))
ax[index].spines['right'].set_visible(False)
ax[index].spines['top'].set_visible(False)
ax[index].set_yscale('log')
if index == halfway:
ax[index].set_ylabel("Read Depth")
index += 1
ax[index-1].set_xlabel("Position")
plt.savefig("%s_n50contigs_logCov_%s.pdf" %((sys.argv[1].split(".txt")[0]), smoothingFactor),format = "pdf")
plt.close()
fig = plt.figure()
ax = fig.gca(projection='3d')
index = 0
for key in movAvgDict.keys():
if len(contigDict[key][0]) >= n50:
ax.plot(movAvgDict[key][0], movAvgDict[key][1], zs=index, zdir='y', label= "%s" %(key))
index -= 1
plt.savefig("%s_3Dn50contigs_%s.pdf" %(sys.argv[1].split(".txt")[0], smoothingFactor),format = "pdf")
plt.close()
| [
"matplotlib"
] |
ad71e2debcf58de03ff1a695bd1a3d01346d8de5 | Python | banacorn/Hakaru-FLOLAC16 | /plot.py | UTF-8 | 570 | 3.046875 | 3 | [] | no_license | #! /usr/bin/env python3
import argparse
import numpy as np
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser(description='plotting data')
parser.add_argument('filename')
def plot(filename):
data = []
with open(filename) as f:
next(f)
for l in f:
x, y = [float(d) for d in l.strip().split()]
data.append((x, y))
data = np.array(data)
plt.plot(data[:,0], data[:,1], drawstyle='steps', linewidth=2)
plt.show()
if __name__ == '__main__':
args = parser.parse_args()
plot(args.filename)
| [
"matplotlib"
] |
126fe8396cb0aa26e4202413fbb18444a2a61e6d | Python | AntoineDarveau/jwst-mtl | /AMI/NRM_pipeline_Xara/Python_tools.py | UTF-8 | 6,682 | 2.875 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 7 14:49:19 2015
@author: asoulain
"""
import pickle, warnings
import numpy as np
import matplotlib.pyplot as plt
from astropy.nddata import Cutout2D
from scipy.signal import medfilt2d
warnings.filterwarnings("ignore", module='astropy.io.votable.tree')
warnings.filterwarnings("ignore", module='astropy.io.votable.xmlutil')
def mastorad(mas):
"""
Short Summary
-------------
Convert angle in milli arc-sec to radians
Parameters
----------
mas: float
angle in milli arc-sec
Returns
-------
rad: float
angle in radians
"""
rad = mas * (10**(-3)) / (3600 * 180 / np.pi)
return rad
def radtomas(rad):
"""
Short Summary
-------------
Convert input angle in radians to milli arc sec
Parameters
----------
rad: float
input angle in radians
Returns
-------
mas: float
input angle in milli arc sec
"""
mas = rad * (3600. * 180 / np.pi) * 10.**3
return mas
def crop_max(img, dim, filtmed = True, f = 3):
"""
Short Summary
-------------
Resize an image on the brightest pixel.
Parameters
----------
img : numpy.array
input image.
dim : int
resized dimension.
filtmed : boolean
True if perform a median filter on the image (to blur bad pixels).
f : float
if filtmed == True, kernel size of the median filter.()
Returns
-------
cutout: numpy.array
Resized image.
"""
if filtmed:
im_med = medfilt2d(img, f)
else:
im_med = img.copy()
pos_max = np.where(im_med == im_med.max())
X = pos_max[1][0]+1
Y = pos_max[0][0]+1
position = (X, Y)
cutout = Cutout2D(img, position, dim)
return cutout.data, position
def norm_max(tab):
"""
Short Summary
-------------
Normalize an array or a list by the maximum.
Parameters
----------
tab : numpy.array, list
input array or list.
Returns
-------
tab_norm : numpy.array, list
Normalized array.
"""
tab_norm = tab/np.max(tab)
return tab_norm
def crop_center(img, dim):
"""
Short Summary
-------------
Resize an image on the center.
Parameters
----------
img : numpy.array
input image.
dim : int
resized dimension.
Returns
-------
cutout: numpy.array
Resized image.
"""
b = img.shape[0]
position = (b//2, b//2)
cutout = Cutout2D(img, position, dim)
return cutout.data
def crop_position(img, X, Y, dim):
"""
Short Summary
-------------
Resize an image on a defined position.
Parameters
----------
img : numpy.array
input image.
X, Y : int
Position to resize (new center of the image).
dim : int
resized dimension.
Returns
-------
cutout: numpy.array
Resized image.
"""
position = (X, Y)
cutout = Cutout2D(img, position, dim)
return cutout.data
def plot_JWST_ins_limit(inst):
"""
Plot JWST instrument limits (sensitivity and saturation) for different filters.
"""
file = open('/Users/asoulain/Documents/Add_Python_PATH/save_limit_JWST.dpy', 'rb')
dic_JWST = pickle.load(file)
file.close()
if inst == 'NIRCAM':
color = 'royalblue'
elif inst == 'NIRISS':
color = 'orange'
elif inst == 'MIRI':
color = 'crimson'
i = 1
l_filt = list(dic_JWST[inst].keys())
for filt in l_filt:
wl1 = dic_JWST[inst][filt]['wl0'] - dic_JWST[inst][filt]['bw']/2.
wl2 = dic_JWST[inst][filt]['wl0'] + dic_JWST[inst][filt]['bw']/2.
fmax = dic_JWST[inst][filt]['fmax']
fmin = dic_JWST[inst][filt]['fmin']
if i == 1:
plt.fill_between([wl1, wl2], fmin, fmax, color = color, alpha = .2, label = 'JWST/'+inst)
else:
plt.fill_between([wl1, wl2], fmin, fmax, color = color, alpha = .2)
i += 1
return None
def plot_JWST_limit():
plot_JWST_ins_limit('NIRCAM')
plot_JWST_ins_limit('NIRISS')
plot_JWST_ins_limit('MIRI')
return None
def gauss_2d_asym(X, param):
"""
Short Summary
-------------
Creates 2D oriented gaussian with an asymmetrical grid.
Parameters
----------
X : list.
Input values :
- X[0] : x coordinates [pixels]
- X[1] : y coordinates [pixels]
- X[2] : pixels scale [mas]
param : dict.
Input parameters, with the present keys =
- A : amplitude.
- x0 : x offset from the center [mas].
- y0 : y offset from the center [mas].
- fwhm_x : width in x direction [mas].
- fwhm_y : width in y direction [mas].
- theta : orientation [deg].()
Returns
-------
im: numpy.array
image of a 2D gaussian function.
"""
x_1d = X[0]
y_1d = X[1]
pixel_scale = X[2]
dim = len(x_1d)
x, y = np.meshgrid(x_1d, y_1d)
fwhmx = param['fwhm_x']/pixel_scale
fwhmy = param['fwhm_y']/pixel_scale
sigma_x = (fwhmx / np.sqrt(8 * np.log(2)))
sigma_y = (fwhmy / np.sqrt(8 * np.log(2)))
amplitude = param['A']
x0 = dim//2 + param['x0']/pixel_scale
y0 = dim//2 + param['y0']/pixel_scale
theta = np.deg2rad(param['theta'])
size_x = len(x)
size_y = len(y)
im = np.zeros([size_y, size_x])
x0 = float(x0)
y0 = float(y0)
a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2)
b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2)
c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2)
im = amplitude*np.exp( - (a*((x-x0)**2) + 2*b*(x-x0)*(y-y0)
+ c*((y-y0)**2)))
return im
def conv_fft(image, psf):
"""
Compute 2D convolution with the PSF, passing through Fourier space.
"""
fft_im = np.fft.fft2(image)
fft_psf = np.fft.fft2(psf)
fft_conv = fft_im*fft_psf
conv = abs(np.fft.fftshift(np.fft.ifft2(fft_conv)))
return conv
class A(object):
pass
class AllMyFields:
def __init__(self, dictionary):
for k, v in dictionary.items():
if type(v) == dict:
a = A()
for key in v.keys():
a.__dict__[key] = v[key]
setattr(self, k, a)
else:
setattr(self, k, v)
| [
"matplotlib"
] |
346986d590ad29b07746bb6cb1fec2d835681609 | Python | GaiYu0/vertex-cut | /sbm.py | UTF-8 | 4,501 | 2.984375 | 3 | [] | no_license | from __future__ import division
import math
import numpy as np
import scipy.sparse as sp
import networkx as nx
import matplotlib.pyplot as plt
class SSBM:
def __init__(self, n, k, a=10.0, b=2.0, regime='constant', rng=None):
"""Symmetric Stochastic Block Model.
n - number of nodes
k - number of communities
a - probability scale for intra-community edge
b - probability scale for inter-community edge
regime - If "logaritm", this generates SSBM(n, k, a*log(n)/n, b*log(n)/n)
If "constant", this generates SSBM(n, k, a/n, b/n)
If "mixed", this generates SSBM(n, k, a*log(n)/n, b/n)
"""
self.n = n
self.k = k
if regime == 'logarithm':
if math.sqrt(a) - math.sqrt(b) >= math.sqrt(k):
print('SSBM model with possible exact recovery.')
else:
print('SSBM model with impossible exact recovery.')
self.a = a * math.log(n) / n
self.b = b * math.log(n) / n
elif regime == 'constant':
snr = (a - b) ** 2 / (k * (a + (k - 1) * b))
if snr > 1:
print('SSBM model with possible detection.')
else:
print('SSBM model that may not have detection (snr=%.5f).' % snr)
self.a = a / n
self.b = b / n
elif regime == 'mixed':
self.a = a * math.log(n) / n
self.b = b / n
else:
raise ValueError('Unknown regime: %s' % regime)
if rng is None:
self.rng = np.random.RandomState()
else:
self.rng = rng
self._graph = None
def generate(self):
self.generate_communities()
print('Finished generating communities.')
self.generate_edges()
print('Finished generating edges.')
def generate_communities(self):
nodes = list(range(self.n))
size = self.n // self.k
self.block_size = size
self.comm2node = [nodes[i*size:(i+1)*size] for i in range(self.k)]
self.node2comm = [nid // size for nid in range(self.n)]
def generate_edges(self):
# TODO: dedup edges
us = []
vs = []
# generate intra-comm edges
for i in range(self.k):
sp_mat = sp.random(self.block_size, self.block_size,
density=self.a,
random_state=self.rng,
data_rvs=lambda l: np.ones(l))
u = sp_mat.row + i * self.block_size
v = sp_mat.col + i * self.block_size
us.append(u)
vs.append(v)
# generate inter-comm edges
for i in range(self.k):
for j in range(self.k):
if i == j:
continue
sp_mat = sp.random(self.block_size, self.block_size,
density=self.b,
random_state=self.rng,
data_rvs=lambda l: np.ones(l))
u = sp_mat.row + i * self.block_size
v = sp_mat.col + j * self.block_size
us.append(u)
vs.append(v)
us = np.hstack(us)
vs = np.hstack(vs)
self.sp_mat = sp.coo_matrix((np.ones(us.shape[0]), (us, vs)), shape=(self.n, self.n))
@property
def graph(self):
if self._graph is None:
self._graph = nx.from_scipy_sparse_matrix(self.sp_mat, create_using=nx.DiGraph())
return self._graph
def plot(self):
x = self.sp_mat.row
y = self.sp_mat.col
plt.scatter(x, y, s=0.5, marker='.', c='k')
plt.savefig('ssbm-%d-%d.pdf' % (self.n, self.k))
plt.clf()
# plot out degree distribution
out_degree = [d for _, d in self.graph.out_degree().items()]
plt.hist(out_degree, 100, normed=True)
plt.savefig('ssbm-%d-%d_out_degree.pdf' % (self.n, self.k))
plt.clf()
if __name__ == '__main__':
n = 1000
k = 10
ssbm = SSBM(n, k, regime='mixed', a=4, b=1)
ssbm.generate()
g = ssbm.graph
print('#nodes:', g.number_of_nodes())
print('#edges:', g.number_of_edges())
ssbm.plot()
#lg = nx.line_graph(g)
# plot degree distribution
#degree = [d for _, d in lg.degree().items()]
#plt.hist(degree, 100, normed=True)
#plt.savefig('lg<ssbm-%d-%d>_degree.pdf' % (n, k))
#plt.clf()
| [
"matplotlib"
] |
abcc14b8e2781a7ffd8bdfded41340bf9f4cee05 | Python | wangzizhao/VRNN | /transformation/lorenz.py | UTF-8 | 1,066 | 2.90625 | 3 | [] | no_license | import numpy as np
from scipy.integrate import odeint
from transformation.base import transformation
class lorenz_transformation(transformation):
def transform(self, Z_prev):
'''
Integrates the lorenz ODEs
'''
sigma, rho, beta, dt = self.params
def lorenz_equation(Z, t, sigma, rho, beta):
x, y, z = Z
xd = sigma * (y - x)
yd = (rho - z) * x - y
zd = x * y - beta * z
return [xd, yd, zd]
t = np.arange(0, 2 * dt, dt)
Z = odeint(lorenz_equation, Z_prev, t, args=(sigma, rho, beta))[1, :]
return Z
# test code
if __name__ == "__main__":
import matplotlib.pyplot as plt
lorenz_params = (10.0, 28.0, 8.0 / 3.0, 0.01)
Dz = 3
T = 1500
batch_size = 10
lorenz = lorenz_transformation(lorenz_params)
Z = np.zeros((T, Dz))
Z[0] = np.random.uniform(low=0, high=1, size=Dz)
for t in range(1, T):
Z[t] = lorenz.transform(Z[t - 1])
plt.figure()
plt.plot(Z[:, 0], Z[:, 1])
plt.show()
| [
"matplotlib"
] |
3f580c447b70a90eabacd520118cf31267f9ef22 | Python | diegopetrola/bar_chart_race | /utils/hist.py | UTF-8 | 627 | 3.421875 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
t = np.linspace(0.0, 2.0, 201)
s = np.sin(2 * np.pi * t)
# 1) RGB tuple:
fig, ax = plt.subplots(facecolor=(.18, .31, .31))
# 2) hex string:
# ax.set_facecolor('#eafff5')
# 3) gray level string:
ax.set_title('Voltage vs. time chart', color='0.7')
# 4) single letter color string
ax.set_xlabel('time (s)', color='c')
# 5) a named color:
ax.set_ylabel('voltage (mV)', color='peachpuff')
# 6) a named xkcd color:
ax.plot(t, s, 'xkcd:crimson')
# 7) Cn notation:
ax.plot(t, .7*s, color='C4', linestyle='--')
# 8) tab notation:
ax.tick_params(labelcolor='tab:orange')
plt.show()
| [
"matplotlib"
] |
382161416c61bd1e46828d7bc80d85370a27ff84 | Python | csaftoiu/deeplearning-udacity | /assignments/main-assignment1.py | UTF-8 | 2,084 | 2.578125 | 3 | [
"Unlicense"
] | permissive | from __future__ import print_function
import random
import matplotlib.pyplot as plt
import numpy as np
from assignments import loading, dataset, classification
def main():
# initialize
np.random.seed(133)
train_datasets, test_datasets = loading.load_datasets()
train_sizes = (
# 50,
# 100,
1000,
# 5000,
# 10000, 50000, 200000
)
for train_size in train_sizes:
training_sets = dataset.get_training_sets(
train_datasets, test_datasets,
train_size=train_size, valid_size=train_size, test_size=10000,
store_pickle=True)
import pprint; pprint.pprint(dataset.measure_overlap(training_sets))
training_sets = dataset.sanitize_sets(training_sets)
print(training_sets['train']['data'].shape)
training_sets = dataset.mapsets(dataset.flatten, training_sets)
print(training_sets['train']['data'].shape)
lr = classification.fit_sklearn_logisic_regression(training_sets['train'])
# lr = classification.fit_sklearn_sgd(training_data['train'])
print("Accuracy trained from %d samples on %d 'valid' samples is %.2f%%" % (
train_size, train_size // 20, classification.get_accuracy(lr, training_sets['valid']) * 100,
))
# # check some from test
# test = training_data['test']
# for _ in xrange(100):
# i = random.randint(0, len(test['data']) - 1)
# guess = lr.predict(classification.flatten_image_arrays(np.array([test['data'][i]])))
# if guess == test['labels'][i]:
# print("Correctly guessed %s!" % (letter_for(guess),))
# else:
# print("Incorrectly guessed %s for %s" % (
# letter_for(guess), letter_for(test['labels'][i])))
#
# plt.imshow(test['data'][i])
# plt.show()
# sanitized_data = sanitize_training_data(training_data)
if __name__ == "__main__":
main()
# predictions = lr.predict(flatten_image_arrays(data['data']))
| [
"matplotlib"
] |
ccf4d030816fc27cd66b76c262fc6218e8bb785a | Python | masih68kh/spark | /PageRank/myPageRank.py | UTF-8 | 1,798 | 2.859375 | 3 | [] | no_license | from pyspark import SparkContext
import time
import os
if os.path.exists("result.txt"):
os.system('rm -rf result.txt')
os.system('rm fig.png')
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
error_list = []
def add(x,y):
return x+y
def is_close(rdd1, rdd2, epsilon=1e-5):
"""
checks is two rdds are close enough
rdds should be in the form of key-value pair and each elements (int, float)
"""
error = rdd1.join(rdd2).mapValues(lambda score_tuple: (score_tuple[0]-score_tuple[1])**2)\
.aggregate(0, lambda flt, pair: flt+pair[1], lambda flt1,flt2: flt1+flt2)
error_list.append(error)
return error < epsilon
startTime = time.time()
sc = SparkContext('local[20]',"myPageRank")
originalRDD = sc.textFile('graph').map(eval).mapValues(lambda target: [target])\
.reduceByKey(lambda x,y: x+y, numPartitions=20).cache()
# originalRDD contains (w, [outDegreeW' ...])
N = originalRDD.count()
scoresRDD = originalRDD.keys().map(lambda w: (w,1./N)).partitionBy(20).cache()
gamma = 0.15
for it in range(40):
print("********")
print("interation # %d"%it)
joined = originalRDD.join(scoresRDD)
old_scoresRDD = scoresRDD
scoresRDD = joined.flatMap(lambda pair: [(w,pair[1][1]/len(pair[1][0])) for w in pair[1][0]])\
.reduceByKey(add, numPartitions=20).mapValues(lambda s: gamma/N + (1-gamma)*s).cache()
if is_close(scoresRDD, old_scoresRDD, epsilon=-1e-8):
break
old_scoresRDD.unpersist()
scoresRDD = scoresRDD.sortBy(lambda pair: -pair[1])
scoresRDD.saveAsTextFile('result.txt')
endTime = time.time()
print("************")
print("time : ", endTime-startTime, " s")
fig = plt.figure(figsize=(5,5))
ax = fig.add_axes((0.2,0.2,0.7,0.7))
ax.plot(error_list)
fig.savefig("fig.png")
| [
"matplotlib"
] |
ead11471bc476f90025201353529d1d7f8ee1192 | Python | jan-pfr/MachineLearning_python | /exercises/exercise3/exercise3_1.py | UTF-8 | 5,063 | 2.765625 | 3 | [] | no_license |
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten,\
Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
import numpy as np
import glob
import cv2
import os
from keras.preprocessing.image import ImageDataGenerator
from sklearn.model_selection import train_test_split
#from keras.utils import to_categorical
from sklearn.metrics import confusion_matrix
import random
import matplotlib.pyplot as plt
filelist_A = 'saved_images/A/'
filelist_B = 'saved_images/B/'
IMG_HEIGHT = 500
IMG_WIDTH = 500
x_A_images = [x for x in sorted(os.listdir(filelist_A)) if x[-4:] == '.jpg']
x_A = np.empty((len(x_A_images), IMG_HEIGHT, IMG_WIDTH, 3), dtype='float32') #Leeres Array
y_A = np.ones((x_A.shape[0],1)) # y mit Einsen
for i, name in enumerate(x_A_images):
im = cv2.imread(filelist_A + name, cv2.IMREAD_UNCHANGED)
#im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY).astype('float32')
im = (im - np.min(im)) / (np.max(im) - np.min(im))
x_A[i] = im # Array wird befüllt
x_B_images = [x for x in sorted(os.listdir(filelist_B)) if x[-4:] == '.jpg']
x_B = np.empty((len(x_B_images), IMG_HEIGHT, IMG_WIDTH, 3), dtype='float32') # Leeres Array
y_B = np.zeros((x_B.shape[0], 1)) # y mit Nullen
for i, name in enumerate(x_B_images):
im = cv2.imread(filelist_B + name, cv2.IMREAD_UNCHANGED)
#im = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY).astype('float32')
im = (im - np.min(im)) / (np.max(im) - np.min(im))
x_B[i] = im # Array wird befüllt
fig, ax = plt.subplots(1,2, figsize = (8,4))
ax[0].imshow(x_A[3], cmap='gray')
ax[1].imshow(x_B[3], cmap='gray')
ax[0].imshow(cv2.cvtColor(x_A[40], cv2.COLOR_BGR2GRAY), cmap='gray')
ax[1].imshow(cv2.cvtColor(x_B[40], cv2.COLOR_BGR2GRAY), cmap='gray')
plt.show()
#concatenate the two classes for training and validation, x contains the image, y contains the labels (0 or 1)
# Zusammenfügen der Daten aus A und B
x = np.concatenate((x_B, x_A))
y = np.concatenate((y_B, y_A))
x.shape
#Versuch, durch Data Augmentation mehr Daten zu erstellen
imageDataGenerator = ImageDataGenerator(width_shift_range=0.1, rotation_range=40, shear_range= 0.2, zoom_range=0.2, fill_mode='nearest', horizontal_flip=True, vertical_flip=True)
#divide the data into training and test data
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.1, random_state=1)
#x_train1, x_train2, y_train1, y_train2 = train_test_split(x_train, y_train, test_size=0.1, random_state=40)
print(x_train.shape)
#print(x_train1.shape)
#print(x_train2.shape)
# Create a sequential model, Alexnet
model = Sequential()
# 1st Convolutional Layer
model.add(Conv2D(filters=96, input_shape=(500, 500,3), kernel_size=(11,11),\
strides=(4,4), padding='valid'))
model.add(Activation('relu'))
# Pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid'))
# Batch Normalisation before passing it to the next layer
model.add(BatchNormalization())
# 2nd Convolutional Layer
model.add(Conv2D(filters=256, kernel_size=(11,11), strides=(1,1), padding='valid'))
model.add(Activation('relu'))
# Pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid'))
# Batch Normalisation
model.add(BatchNormalization())
# 3rd Convolutional Layer
model.add(Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), padding='valid'))
model.add(Activation('relu'))
# Batch Normalisation
model.add(BatchNormalization())
# 4th Convolutional Layer
model.add(Conv2D(filters=384, kernel_size=(3,3), strides=(1,1), padding='valid'))
model.add(Activation('relu'))
# Batch Normalisation
model.add(BatchNormalization())
# 5th Convolutional Layer
model.add(Conv2D(filters=256, kernel_size=(3,3), strides=(1,1), padding='valid'))
model.add(Activation('relu'))
# Pooling
model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding='valid'))
# Batch Normalisation
model.add(BatchNormalization())
# Passing it to a dense layer
model.add(Flatten())
# 1st Dense Layer
model.add(Dense(4096, input_shape=(500*500*3,)))
model.add(Activation('relu'))
# Add Dropout to prevent overfitting
model.add(Dropout(0.5))
# Batch Normalisation
model.add(BatchNormalization())
# 2nd Dense Layer
model.add(Dense(4096))
model.add(Activation('relu'))
# Add Dropout
model.add(Dropout(0.5))
# Batch Normalisation
model.add(BatchNormalization())
# 3rd Dense Layer
model.add(Dense(1000))
model.add(Activation('relu'))
# Add Dropout
model.add(Dropout(0.5))
# Batch Normalisation
model.add(BatchNormalization())
# Output Layer
model.add(Dense(2))
model.add(Activation('softmax'))#davor: softmax
model.summary()
# Compile
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# Train
model.fit(
imageDataGenerator.flow(x_train, y_train, batch_size=32),
validation_data=imageDataGenerator.flow(x_test, y_test, batch_size=32),
steps_per_epoch=len(x_train) // 32, epochs=10, shuffle=True)
# Test
y_pred = np.argmax(model.predict(x_test), axis=-1)
cmat = confusion_matrix(y_test, y_pred)
print(cmat)
print(y_test.ravel())
print(y_pred.ravel()) | [
"matplotlib"
] |
b437b5f315a7274f95d31a8d91c8b4f5f940ba22 | Python | yashwanthguguloth24/control | /ketan/codes/ee18btech11049.py | UTF-8 | 1,269 | 2.984375 | 3 | [] | no_license | # License
'''
Code by Laxman Reddy
April 22,2020
Released under GNU GPL
'''
#Bode plot using scipy in python
from scipy import signal
import matplotlib.pyplot as plt
from pylab import*
#if using termux
import subprocess
import shlex
#end if
#Defining the transfer function
s1 = signal.lti([75,0.2*75], [1, 16 ,100,0]) #Transfer Function = 75(1+0.2s)/s(s^2+16s+100)
#signal.bode takes transfer function as input and returns frequency,magnitude and phase arrays
w,mag,phase = signal.bode(s1)
subplot(2,1,1)
#plt.xlabel('Frequency(rad/s)')
plt.ylabel('Magnitude(deg)')
# plt.title('Magnitude plot')
plt.semilogx(w, mag,'b') # Bode Magnitude plot
plt.axhline(y = 0,xmin=0,xmax= .35,color = 'r',linestyle='dashed')
plt.axvline(x = .22,ymin=0,color='k',linestyle='dashed')
plt.plot(.22,0,'o')
plt.text(0.757,0, '({}, {})'.format(0.757,-220.15))
plt.grid()
subplot(2,1,2)
plt.xlabel('Frequency(rad/s)')
plt.ylabel('Phase(deg)')
# plt.title('Phase plot')
plt.semilogx(w,phase) # Bode phase plot
plt.axhline(y = -180,xmin=0,color = 'r',linestyle='dashed')
plt.grid()
#if using termux
plt.savefig('./figs/ee18btech11049.pdf')
plt.savefig('./figs/ee18btech11049.eps')
subprocess.run(shlex.split("termux-open ./figs/ee18btech11049.pdf"))
#else
#plt.show()
| [
"matplotlib"
] |
b99ee4df6a8a9b06ef03e20d54e703afc2d66e71 | Python | chaitanya0906/ANGinPython | /week1.py | UTF-8 | 1,181 | 3.34375 | 3 | [] | no_license | import sys
import json
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def computeCost(X, y, theta):
temp = np.dot(X, theta) - y
return np.sum(np.power(temp, 2)) / (2*m)
def gradientDescent(X,y,theta,alpha,iteration):
for i in range(iteration):
temp= np.dot(X,theta)-y
temp=np.dot(X.T,temp)
theta = theta -(alpha/m)*temp
return theta
data = pd.read_csv('ex1data1.txt', header = None)
#reading from dataset
X= data.iloc[:,0] # read first column
y = data.iloc[:,1] # read second column
m=len(y)
data.head()
plt.scatter(X, y)
plt.xlabel('Population of City in 10,000s')
plt.ylabel('Profit in $10,000s')
plt.show()
X=X[:,np.newaxis] #code to convert to 2 Ranked array
y=y[:,np.newaxis] #code to convert to 2 Ranked array
theta= np.zeros([2,1])
iteration=1500
alpha=0.01
ones=np.ones((m,1))
X = np.hstack((ones, X)) # adding the intercept term
J = computeCost(X, y, theta)
print(J)
theta= gradientDescent(X,y,theta,alpha,iteration)
print(theta)
J = computeCost(X, y, theta)
print(J)
plt.scatter(X[:,1], y)
plt.xlabel('Population of City in 10,000s')
plt.ylabel('Profit in $10,000s')
plt.plot(X[:,1], np.dot(X, theta))
plt.show()
| [
"matplotlib"
] |
768b6b57832f006b1bdc974f083740e5014be7e0 | Python | BeiHuiFeng/PaperPic | /paper/drawSpectralChinesePaper.py | UTF-8 | 4,784 | 2.71875 | 3 | [] | no_license | # 根据数据画出所需要的光谱图象
import matplotlib.pyplot as plt
from matplotlib import gridspec
from astropy.io import fits
import os
import copy
import numpy as np
import random
#对单个光谱数据的处理
def readfits(path,fileName):
dfu = fits.open(path + '/'+fileName)
#初始波长
beginWave = dfu[0].header['COEFF0']
#步长
step = dfu[0].header['CD1_1']
#光谱中的流量
flux = dfu[0].data[0]
#求出波长,求出与流量对应的波长
wave = np.array([10**(beginWave + step*j) for j in range(len(flux))])
data = [wave,flux]
#-------------------------------------------
# return data,poistion
return data
# 取其中一部分波长和流量
def chooseSpectralData(data):
# 截取的范围
choose_wave = []
choose_flux = []
wave, flux = data[0], data[1]
length = len(wave)
for index in range(length):
if 3900 < wave[index] < 6800:
choose_wave.append(wave[index])
choose_flux.append(flux[index])
# print(choose_wave[:35])
return [choose_wave, choose_flux]
#数据文件中的光谱数据
def exractData(fileName):
listFile = os.listdir(fileName)
dataSet = []
allPoistion = []
for file in listFile:
dfu = fits.open(fileName + '/'+file)
#读出数据并且保存
data,poistion = readfits(fileName,file)
dataSet.append(data)
allPoistion.append(poistion)
#os.chdir(os.pardir)
return dataSet,np.array(allPoistion)
def drawPic(data, word):
wave,flux = data[0],data[1]
gap = (max(flux) - min(flux)) / 8
max_y = max(flux) + gap
min_y = min(flux) - gap
text_pos = max(flux)
# print('wave:', wave)
# print('flux:', flux)
fig = plt.figure(figsize=(9,7))
plt.ylabel('flux (relative)', fontsize=15)
# 画4105的标识线
plt.vlines(4101.734, min_y, max_y, colors = 'gray', linewidth=1.0, linestyles = ':')
plt.text(4105, text_pos, r'$H\delta$')
# Hbeta的标识线
plt.vlines(4861.325, min_y, max_y, colors = 'gray', linewidth=1.0, linestyles = ':')
plt.text(4866, text_pos, r'$H\beta$')
# OIII的标识线
# 适合A5
# plt.vlines(4958.911, min_y, max_y, colors = 'gray', linewidth=1.0, linestyles = ':')
# plt.text(4962, text_pos-1500, r'OIII')
# 适合K5
plt.vlines(4958.911, min_y, max_y, colors = 'gray', linewidth=1.0, linestyles = ':')
plt.text(4962, text_pos-4500, r'OIII')
plt.vlines(5006.843, min_y, max_y, colors = 'gray', linewidth=1.0, linestyles = ':')
plt.text(5010, text_pos, r'OIII')
# 画5875.67的标识线
plt.vlines(5875.67, min_y, max_y, colors = 'gray', linewidth=1.0, linestyles = ':')
plt.text(5780, text_pos, r'Hel')
# 画OI的标识线
plt.vlines(6300.304, min_y, max_y, colors = 'gray', linewidth=1.0, linestyles = ':')
plt.text(6305, text_pos, r'OI')
# 下面是NII
plt.vlines(6548.040, min_y, max_y, colors = 'gray', linewidth=1.0, linestyles = ':')
plt.text(6453, text_pos, r'NII')
# 画6564的标识线
# 适合A5
# plt.vlines(6562.800, min_y, max_y, colors = 'gray', linewidth=1.0, linestyles = ':')
# plt.text(6568, text_pos-1500, r'$H\alpha$')
# 适合K5
plt.vlines(6562.800, min_y, max_y, colors = 'gray', linewidth=1.0, linestyles = ':')
plt.text(6568, text_pos-4500, r'$H\alpha$')
plt.vlines(6583.460, min_y, max_y, colors = 'gray', linewidth=1.0, linestyles = ':')
plt.text(6590, text_pos, r'NII')
# 下面是SII
plt.vlines(6716.440, min_y, max_y, colors = 'gray', linewidth=1.0, linestyles = ':')
plt.text(6721, text_pos, r'SII')
# 下面是Ca
plt.vlines(3933.66, min_y, max_y, colors = 'gray', linewidth=1.0, linestyles = ':')
plt.text(3800, text_pos, r'CaK')
plt.vlines(3968.45, min_y, max_y, colors = 'gray', linewidth=1.0, linestyles = ':')
plt.text(3972, text_pos, r'CaH')
# 下面是Na
plt.vlines(5891.94, min_y, max_y, colors = 'gray', linewidth=1.0, linestyles = ':')
plt.text(5897, text_pos, r'NaD')
plt.xlabel('wavelength ('+word+')', fontsize=15)
plt.plot(wave, flux, c='black', linewidth=0.8)
plt.show()
if __name__ == '__main__':
# data = readfits('newSpectral/A5V','spec-55889-F8906_sp01-122.fits.gz')
# data = readfits('newSpectral/A5V','spec-55903-B90304_sp03-132.fits.gz')
# data = readfits('newSpectral/K5','spec-56199-EG000313N173308V_1_sp02-001.fits.gz')
data = readfits('newSpectral/K5','spec-56200-EG004228N273834V_1_sp06-148.fits.gz')
word = 'A'
with open('word.txt', 'r', encoding='utf-8') as f:
word = f.read()
# print(word)
choose_data = chooseSpectralData(data)
drawPic(choose_data,word)
print()
| [
"matplotlib"
] |
7593e80d98ea873cf2dc62c55c78d333be1d630b | Python | nlmarc98/Thesis | /AdaptiveEntropy/RiF.py | UTF-8 | 4,767 | 2.59375 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 1 14:24:57 2019
@author: Luc
"""
import numpy as np
from scipy.stats import vonmises, norm
import scipy.interpolate as interp
import matplotlib.pyplot as plt
from numpy.matlib import repmat
def generativeModel(a_oto,b_oto,sigma_prior,kappa_ver, kappa_hor,tau,frames,rods):
# this is actually also a stimulus input (so similar to frames and rods)
theta_head = [0*np.pi/180, 30*np.pi/180]
# you might want to run a loop over head-angles
j=1
# Aocr is normally a free parameter (the uncompensated ocular counterroll)
Aocr = 14.6*np.pi/180 # convert to radians and fixed across subjects
# compute the number of stimuli
frame_num = len(frames)
rod_num = len(rods)
# head_num = len(head)
# the theta_rod I need at high density for the cumulative density function
theta_rod=np.linspace(-np.pi,np.pi,10000)
# allocate memory for the lookup table (P) and for the MAP estimate
P = np.zeros([rod_num,frame_num])
MAP = np.zeros([frame_num])
mu = np.zeros([frame_num])
#P = np.zeros([rod_num,frame_num,head_num])
# move through the frame vector
for i in range(0,frame_num):
# the frame in retinal coordinates
frame_retinal = -(frames[i]-theta_head[j])-Aocr*np.sin(theta_head[j]);
# make sure we stay in the -45 to 45 deg range
if frame_retinal > np.pi/4:
frame_retinal = frame_retinal - np.pi/2;
elif frame_retinal < -np.pi/4:
frame_retinal = frame_retinal + np.pi/2;
# compute how the kappa's changes with frame angle
kappa1 = kappa_ver-(1-np.cos(np.abs(2*frame_retinal)))*tau*(kappa_ver-kappa_hor)
kappa2 = kappa_hor+(1-np.cos(np.abs(2*frame_retinal)))*(1-tau)*(kappa_ver-kappa_hor)
# probability distributions for the four von-mises
P_frame1 = vonmises.pdf(-theta_rod+frame_retinal,kappa1)
P_frame2 = vonmises.pdf(-theta_rod+np.pi/2+frame_retinal,kappa2)
P_frame3 = vonmises.pdf(-theta_rod+np.pi+frame_retinal,kappa1)
P_frame4 = vonmises.pdf(-theta_rod+3*np.pi/2+frame_retinal,kappa2)
# add the probability distributions
P_frame = (P_frame1+P_frame2+P_frame3+P_frame4)
P_frame = P_frame/np.sum(P_frame) # normalize to one
# the otoliths have head tilt dependent noise (note a and b switched from CLemens et a;. 2009)
#print(a_oto+b_oto*theta_head[j])
P_oto = norm.pdf(theta_rod,theta_head[j],a_oto+b_oto*theta_head[j])
# the prior is always oriented with gravity
P_prior = norm.pdf(theta_rod,0,sigma_prior)
# compute the (cumulative) density of all distributions convolved
# NOTE THIS IS THE HEAD ORIENTATION IN SPACE!
M=np.multiply(np.multiply(P_oto, P_frame),P_prior)
cdf=np.cumsum(M)/np.sum(M)
# now shift the x-axis, to make it rod specific
E_s_cumd = theta_rod-theta_head[j]+Aocr*np.sin(theta_head[j]);
# now use a spline interpolation to get a continuous P(theta)
spline_coefs=interp.splrep(E_s_cumd,cdf, s = 0)
P[:,i] = interp.splev(rods,spline_coefs, der = 0)
# find the MAP
index = np.argmax(M)
MAP[i]=-E_s_cumd[index] # negative sign to convert to 'on retina'
index = np.argmax(cdf>0.5)
mu[i] =-E_s_cumd[index]
# construct the P(right) matrix
#for k in range(0,rod_num):
# index = np.argmax(E_s_cumd>rods[k])
# P[k][i]=cdf[index]
return P, MAP, mu
# stimuli and generative parameters
nframes = 251
nrods = 11111
# stimuli should be all in radians
rods= np.linspace(-15,15.0,nrods)*np.pi/180
frames = np.linspace(-45,45,nframes)*np.pi/180.0
# parameters (in radians, but not for b_oto)
a_oto = 3.2*np.pi/180
b_oto = 0.12
sigma_prior = 10.0*np.pi/180
kappa_ver = 45.0
kappa_hor = 1.45
tau =0.9
P, MAP, mu =generativeModel(a_oto,b_oto,sigma_prior,kappa_ver,kappa_hor,tau,frames,rods)
plt.plot(rods*180/np.pi,P)
plt.xlabel('rod [deg]')
plt.ylabel('P(right)')
plt.figure()
frames_new=frames[:,np.newaxis]
rods_new=rods[:,np.newaxis]
plt.contourf(repmat(rods_new*180/np.pi,1,nframes),repmat(frames_new.transpose()*180/np.pi,nrods,1),P)
plt.xlabel('rod [deg]')
plt.ylabel('frame [deg]')
PSE = np.zeros(len(frames))
for k in range(0,len(frames)):
index = np.argmax(P[:,k]>0.5)
print(index)
PSE[k]=-rods[index]
plt.figure()
plt.plot(frames*180/np.pi,MAP*180/np.pi,frames*180/np.pi,mu*180/np.pi)
| [
"matplotlib"
] |
d4495324c3e00840884d5b08146f1793ad93bb6e | Python | ubombar/CVPR-MultiPoseEstimation | /CVPR-MultiPoseEstimation/datasetdemo.py | UTF-8 | 3,276 | 2.625 | 3 | [] | no_license | import torch
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import json
import cv2
def load_pair(image_path, anno_path):
with open(anno_path, 'r')as f:
annotation = json.loads(f.read())
image = np.array(Image.open(image_path))
name = image_path.split('/')[-1]
return image, annotation[name]
def plot_image(image_original, image_boxed, image_pose, i, dataset, add_title=True, height=3):
plt.subplot(height, 3, 3 * i + 1)
plt.imshow(image_original)
plt.axis('off')
if add_title: plt.title(dataset + ' original')
plt.subplot(height, 3, 3 * i + 2)
plt.imshow(image_boxed)
plt.axis('off')
if add_title: plt.title(dataset + ' bounding boxes')
plt.subplot(height, 3, 3 * i + 3)
plt.imshow(image_pose)
plt.axis('off')
if add_title: plt.title(dataset + ' pose')
def draw_bbox(image, annotation, dxy=(0, 0)):
size = (image.shape[0] + image.shape[1]) // 250
image_boxed = np.copy(image)
for box in annotation['boxes']:
x1, y1, x2, y2 = box
image_boxed = cv2.rectangle(image_boxed, (int(x1 + dxy[0]), int(y1 + dxy[1])), (int(x2 + dxy[0]), int(y2 + dxy[1])), (0,255,0), size)
return image_boxed
def draw_pose(image, annotation, dxy=(0, 0)):
size = (image.shape[0] + image.shape[1]) // 200
result = np.copy(image)
for kps in annotation['keypoints']:
for i in range(0, len(kps), 2):
x, y = int(kps[i] + dxy[0]), int(kps[i + 1] + dxy[1])
result = cv2.circle(result, (x,y), size, (255,0,0), -1)
return result
coco_image, coco_anno = load_pair('./demods/COCO_val2014_000000001700.jpg', './datasets/annotations/coco_processed_val2014.json')
crowd_image, crowd_anno = load_pair('./demods/100481.jpg', './datasets/annotations/crowdpose_processed_set.json')
lsp_image, lsp_anno = load_pair('./demods/im0116.jpg', './datasets/annotations/lsp_processed_set.json')
coco_boxed = draw_bbox(coco_image, coco_anno)
crowd_boxed = draw_bbox(crowd_image, crowd_anno)
lsp_boxed = draw_bbox(lsp_image, lsp_anno)
coco_pose = draw_pose(coco_image, coco_anno)
crowd_pose = draw_pose(crowd_image, crowd_anno)
lsp_pose = draw_pose(lsp_image, lsp_anno)
# crop
coco_image = coco_image[:450, 150:450, :]
crowd_image = crowd_image
lsp_image = lsp_image[180:, 160:650, :]
coco_boxed = coco_boxed[:450, 150:450, :]
crowd_boxed = crowd_boxed
lsp_boxed = lsp_boxed[180:, 160:650, :]
coco_pose = coco_pose[:450, 150:450, :]
crowd_pose = crowd_pose
lsp_pose = lsp_pose[180:, 160:650, :]
plot_image(coco_image, coco_boxed, coco_pose, 0, 'Coco14')
plot_image(crowd_image, crowd_boxed, crowd_pose, 1, 'Crowdpose')
plot_image(lsp_image, lsp_boxed, lsp_pose, 2, 'LSP')
plt.show()
'''
print(coco_image.shape)
with plt.style.context("seaborn-white"):
plt.figure(figsize=(15, 15))
plt.subplot(3, 3, 1)
plt.imshow(coco_image[:450, 150:450, :]); plt.title('COCO 2014'); plt.axis('off')
plt.subplot(3, 2, 3)
plt.imshow(lsp_image[180:, 160:650, :]); plt.title('LSP'); plt.axis('off')
plt.subplot(3, 2, 5)
plt.imshow(crowd_image); plt.title('Crowdpose'); plt.axis('off')
plt.show()
''' | [
"matplotlib"
] |
d432943aae7eee533663b754341480423b07e000 | Python | Vegctrp/lang100knock | /Ch08/079.py | UTF-8 | 7,853 | 2.75 | 3 | [] | no_license | import re
import codecs
from stemming.porter2 import stem
import numpy as np
import pickle
import matplotlib.pyplot as plt
negfile="../data/rt-polaritydata/rt-polarity.neg"
posfile="../data/rt-polaritydata/rt-polarity.pos"
sentiment_file="./sentiment.txt"
outfile="./073result.txt"
use_stopword=["./stopword1.txt","stopword2.txt"]
pattern=re.compile(r"[a-zA-Z']+")
def make_StopWordList(swlist):
stopword_list=[]
for file in use_stopword:
with open(file) as s:
swlist=pattern.findall(s.read())
swlist_lower=[stopword.lower() for stopword in swlist]
stopword_list.extend(swlist_lower)
stopword_list=list(set(stopword_list))
return stopword_list
stopword_list=make_StopWordList(use_stopword)
def isStop(word,stopwordlist=stopword_list):
return word.lower() in stopword_list
def make_reviewWordList():
with codecs.open(negfile,"r",encoding="utf-8",errors='ignore') as negtxt,codecs.open(posfile,"r",encoding="utf-8",errors='ignore') as postxt:
neg=negtxt.read()
pos=postxt.read()
sentences=neg+" "+pos
match=pattern.findall(sentences)
return match
def make_featureWordList(reviewWordList):
wordlist_ws=[]
for word in reviewWordList:
if not (isStop(word) or word==""):
wordlist_ws.append(stem(word))
featureWordList=list(set(wordlist_ws))
return featureWordList
def make_matrix(featureWordList,i):
feature_vecs=[]
point_vec=[]
test_feature_vec=[]
test_point_vec=[]
with open(sentiment_file,"r") as sent:
length=len(sent.readlines())
with open(sentiment_file,"r") as sent:
for k,line in enumerate(sent.readlines()):
#if length/5*i<=k and length/5*(i+1)>k:
# test_sent.append(line)
#else:
point=line[0]
word_list=pattern.findall(line)
stem_list=[stem(word) if not isStop(word) else "." for word in word_list]
stem_set=set(stem_list)
line_vector=[1 if feature in stem_set else 0 for feature in featureWordList]
if length/5*i<=k and length/5*(i+1)>k:
test_feature_vec.append(line_vector)
if point=="+":
test_point_vec.append("1")
else:
test_point_vec.append("0")
else:
feature_vecs.append(line_vector)
if point=="+":
point_vec.append("1")
else:
point_vec.append("0")
feature_mat=np.array(feature_vecs,dtype=float)
point_mat=np.array(point_vec,dtype=float)
test_feature_mat=np.array(test_feature_vec,dtype=float)
test_point_mat=np.array(test_point_vec,dtype=float)
print(np.shape(feature_mat))
print(np.shape(test_feature_mat))
return feature_mat,point_mat,test_feature_mat,test_point_mat
class LogisticRegression():
def __init__(self,matx,maty,learning_rate,epoch):
self.trainx=matx
self.trainy=maty
#self.theta=np.random.rand(self.trainx.shape[1]+1)
self.theta=np.zeros(self.trainx.shape[1]+1)
self.learning_rate=learning_rate
self.epoch=epoch
def set_testdata(self,matx,maty):
self.testx=matx
self.testy=maty
def standardize(self):
mu=self.trainx.mean()
sigma=self.trainx.std()
self.trainx=(self.trainx-mu)/sigma
def add_x0(self):
x0=np.ones([self.trainx.shape[0],1])
self.trainx=np.hstack([x0,self.trainx])
xx0=np.ones([self.testx.shape[0],1])
self.testx=np.hstack([xx0,self.testx])
def shapingx(self):
self.standardize()
self.add_x0()
def sigmoid(self,x):
return 1.0/(1.0+np.exp(-np.dot(x,self.theta)))
def update_theta(self):
grad=np.dot(self.trainy-self.sigmoid(self.trainx),self.trainx)/int(np.shape(self.trainy)[0])
new_theta=self.theta+self.learning_rate*grad
self.theta=new_theta
def cost(self):
m=self.trainx.shape[0]
h=self.sigmoid(self.trainx)
j=1/m*np.sum(-self.trainy*np.log(h)-(np.ones(m)-self.trainy)*np.log(np.ones(m)-h))
return j
def test_cost(self):
m=self.testx.shape[0]
h=self.sigmoid(self.testx)
j=1/m*np.sum(-self.testy*np.log(h)-(np.ones(m)-self.testy)*np.log(np.ones(m)-h))
return j
def learning(self,num):
outx=[]
outy_test=[]
outy_train=[]
for i in range(self.epoch):
self.update_theta()
traincost=self.cost()
testcost=self.test_cost()
outx.append(i+1)
outy_train.append(traincost)
outy_test.append(testcost)
print(str(i)+" train :"+str(traincost)+", test : "+str(testcost))
#plt.plot(outx,outy_train,color="r",label="train")
#plt.plot(outx,outy_test,color="b",label="test")
#plt.legend()
#plt.savefig("./figure"+str(num)+".png")
#########################################################################################
featureWordList=make_featureWordList(make_reviewWordList())
prec_N=[]
prec_num=[]
rec_N=[]
rec_num=[]
qrat_N=[]
qrat_num=[]
rat=[x for x in range(1,10,1)]
rat2=[x*0.1 for x in range(1,10,1)]
for i in range(5):
feature_mat,point_mat,test_feature_mat,test_point_mat=make_matrix(featureWordList,i)
lll=LogisticRegression(feature_mat,point_mat,10,300)
#lll.shapingx()
lll.set_testdata(test_feature_mat,test_point_mat)
lll.add_x0()
lll.learning(i)
theta=lll.theta
for ratio in rat:
precision_N=0
precision_num=0
recall_N=0
recall_num=0
qrate_N=0
qrate_num=0
for x,y in zip(test_feature_mat,test_point_mat):
x=np.insert(x,0,1)
ans_label="1" if y==1.0 else "-1"
fv=np.array(x).reshape(1,12088)
predict=lll.sigmoid(fv)
if predict>ratio/10:
predict_label="1"
else:
predict_label="-1"
p=predict[0]
if predict_label=="1":
precision_N+=1
if ans_label=="1":
precision_num+=1
if ans_label=="1":
recall_N+=1
if predict_label=="1":
recall_num+=1
if ans_label==predict_label:
qrate_num+=1
qrate_N+=1
if i==0:
prec_N.append(precision_N)
prec_num.append(precision_num)
rec_N.append(recall_N)
rec_num.append(recall_num)
qrat_N.append(qrate_N)
qrat_num.append(qrate_num)
else:
prec_N[ratio-1]+=precision_N
prec_num[ratio-1]+=precision_num
rec_N[ratio-1]+=recall_N
rec_num[ratio-1]+=recall_num
qrat_N[ratio-1]+=qrate_N
qrat_num[ratio-1]+=qrate_num
prec=[]
rec=[]
qrat=[]
f1rat=[]
for i in range(9):
answer_rate=qrat_num[i]/qrat_N[i]
precision=prec_num[i]/prec_N[i]
recall=rec_num[i]/rec_N[i]
F1score=2*(precision*recall)/(precision+recall)
print("precision : "+str(precision)+", recall : "+str(recall)+", F1score : "+str(F1score))
prec.append(precision)
rec.append(recall)
qrat.append(answer_rate)
f1rat.append(F1score)
plt.plot(rat2,prec,color="green",label="precision")
plt.plot(rat2,rec,color="blue",label="recall")
plt.plot(rat2,qrat,color="red",label="正解率")
plt.plot(rat2,f1rat,color="black",label="F1score")
plt.legend()
plt.grid(axis='both')
plt.show() | [
"matplotlib"
] |
196c990237ec8392614f9235aa2110e139995b10 | Python | treverhines/HinesHetland2016-ElMayor | /JGRSubmission/Supplementary/effective_viscosity.py | UTF-8 | 2,537 | 3.4375 | 3 | [] | no_license | #!/usr/bin/env python
import numpy as np
import sympy as sp
import matplotlib.pyplot as plt
def ivt(uhat,s,n):
'''
Extension of the initial value theorem which computes the n'th
derivative of u(t) evaluated at t=0 from the Laplace transform of
u(t), uhat(s).
This is eq. (A.5) in Appendix A
PARAMETERS
----------
uhat: Laplace transform of u. This is a symbolic expression
containing s
s: Laplace domain variable
n: the derivative order
RETURNS
-------
u_n: symbolic expression for the nth derivative of u evaluated at
t=0
'''
if n == 0:
expr = s*uhat
u_0 = expr.limit(s,np.inf)
return u_0
elif n > 0:
expr = s**(n+1)*uhat - sum(s**m*ivt(uhat,s,n-m) for m in range(1,n+1))
u_n = expr.limit(s,np.inf)
return u_n
def ilt(uhat,s,t,N):
'''
Evaluates the inverse Laplace transform of uhat(s) through a Taylor
series expansion
This is a combination of eqs. (A.7) and (A.5) in Appendix A
PARAMETERS
----------
uhat: Laplace transform of u. This is a symbolic expression
containing s
s: Laplace domain variable
t: time domain variable
N: order of the Taylor series expansion
RETURNS
-------
series: symbolic expression for the series expansion of the u(x)
about x=0
'''
series = sum((ivt(uhat,s,n)*t**n)/sp.factorial(n) for n in range(N+1))
return series
s = sp.symbols('s')
t = sp.symbols('t')
mu = sp.symbols('mu')
muk = sp.symbols('mu_K')
muk1 = sp.symbols('mu_K1')
muk2 = sp.symbols('mu_K2')
etam = sp.symbols('eta_M')
etak = sp.symbols('eta_K')
etak1 = sp.symbols('eta_K1')
etak2 = sp.symbols('eta_K2')
sigma = sp.symbols('sigma_0')
# COMPUTE CREEP COMPLIANCE FOR MAXWELL MATERIAL
eps_hat = sigma*(1/mu + 1/(etam*s))/s
# COMPUTE CREEP COMPLIANCE FOR KELVIN MATERIAL
eps_hat = sigma/((etak*s + muk)*s)
# COMPUTE CREEP COMPLIANCE FOR ZENER MATERIAL
eps_hat = sigma/(mu*s) + sigma/((etak*s + muk)*s)
# COMPUTE CREEP COMPLIANCE FOR BURGERS MATERIAL
eps_hat = sigma*(1/mu + 1/(etam*s))/s + sigma/((etak*s + muk)*s)
# COMPUTE CREEP COMPLIANCE FOR GENERAL KELVIN MATERIAL
eps_hat = sigma*(1/mu + 1/(etam*s))/s + sigma/((etak1*s + muk1)*s) + sigma/((etak2*s + muk2)*s)
eps_t = ilt(eps_hat,s,t,5)
#numeps = eps_t.subs(muk,1)
#numeps = numeps.subs(etak,1)
#numeps = numeps.subs(sigma,1)
#numeps = sp.lambdify(t,numeps)
#times = np.linspace(0,10,100)
#u = numeps(times)
#plt.plot(times,u)
#plt.show()
eta_eff = (sigma/eps_t.diff(t)).subs(t,0).simplify().expand()
sp.pprint(eta_eff)
sp.pprint(eps_t)
| [
"matplotlib"
] |
271e320d2674f494db1d1219f3d055a38bd3d4ed | Python | Danlowe95/projects | /rectdrawer.py | UTF-8 | 4,013 | 2.734375 | 3 | [] | no_license | #dan lowe/ jonathan wrona
from xml.dom import minidom
from scipy import misc
import xml.etree.ElementTree as et
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import math
import cv2
##############################################################
#
# ClickTracker2.py is a snippet of code for tracking the
# clicks that a user has made on an image and writing that
# data to an xml file. To test that xml file is named
# test.xml in the same folder as ClickTracker.py.
#
# This version of ClickTracker can update the file after
# each event or live.
#
# Author: Dan Lowe/Jonathan Wrona
#
##############################################################
class ClickTracker(object):
file_ = 'test.xml'
previous_pos_x = -1.0
previous_pos_y = -1.0
button_pressed = False
root = et.Element('data')
curr_event = et.Element('event')
brush_size = 10
def __init__(self, ax, file_):
self.showverts = True
self.figure = plt.figure(1)
self.file_ = file_
canvas = self.figure.canvas
canvas.mpl_connect('button_press_event', self.button_press_callback)
canvas.mpl_connect('button_release_event', self.button_release_callback)
#canvas.mpl_connect('motion_notify_event', self.on_move)
def button_press_callback(self, event):
if(event.button == 1 or event.button == 3):
self.button_pressed = True
self.curr_event = et.SubElement(self.root, 'event')
self.curr_event.set('button', get_mouse_button(event.button))
self.curr_event.set('size', str(self.brush_size))
current_pos_x = int(math.floor(event.xdata))
current_pos_y = int(math.floor(event.ydata))
coord = et.SubElement(self.curr_event, 'coord')
coord.set('x', str(current_pos_x))
coord.set('y', str(current_pos_y))
#UNCOMMENT IF UPDATING LIVE#
tree = et.ElementTree(self.root)
tree.write(self.file_)
has_two_points()
def button_release_callback(self, event):
self.button_pressed = False
previous_pos_x = -1.0
previous_pos_y = -1.0
if(len(self.curr_event.findall('coord')) <= 0):
self.root.remove(self.root[-1])
#UNCOMMENT IF UPDATING AFTER EACH EVENT$
#tree = et.ElementTree(self.root)
#tree.write(self.file_)
# def on_move(self, event):
# if(self.button_pressed):
# current_pos_x = int(math.floor(event.xdata))
# current_pos_y = int(math.floor(event.ydata))
# if(not(current_pos_x == self.previous_pos_x and current_pos_y == self.previous_pos_y)):
# print 'updating path'
# self.previous_pos_x = current_pos_x
# self.previous_pos_y = current_pos_y
# coord = et.SubElement(self.curr_event, 'coord')
# coord.set('x', str(current_pos_x))
# coord.set('y', str(current_pos_y))
# #UNCOMMENT IF UPDATING LIVE#
# tree = et.ElementTree(self.root)
# tree.write(self.file_)
# has_two_points()
def get_mouse_button(button):
if(button == 1):
return "left"
elif(button == 3):
return "right"
def has_two_points():
# xmldoc = minidom.parse('test.xml')
# itemlist = xmldoc.getElementsByTagName('event')
# if(itemlist.length > 1):
tree = et.parse('test.xml')
root = tree.getroot()
print len(list(root))
if(len(list(root)) %2 == 0):
pts = []
for neighbor in root.iter('coord'):
x = int(neighbor.get('x'))
y = int(neighbor.get('y'))
pts.append((x,y))
print 'img draw'
cv2.rectangle(img, pts[len(pts)-2], pts[len(pts)-1], (255), 8)
ax.images.pop()
ax.imshow(img)
plt.draw()
def click_demo():
f = open('test.xml', 'w')
f.close()
print 'img assign'
global img
#img = misc.lena()
origin = ''
img = mpimg.imread('zebra.jpg')
#img = np.random.uniform(255, 0, size=(100, 100))
global ax
ax = plt.subplot(111)
ax.imshow(img)
ct = ClickTracker(ax, 'test.xml')
plt.title('Choose two points for your rectange by left clicking')
plt.show()
# itemlist = xmldoc.getElementsByTagName('event')
# print len(itemList)
# print itemlist[0].attributes['left'].value
# for s in itemlist :
# print s.attributes['name'].value
####create rectangle
if __name__ == '__main__':
click_demo()
| [
"matplotlib"
] |
bd81830252cd318cbfe51e3d709867dad15e051a | Python | jgliss/pydoas | /pydoas/helpers.py | UTF-8 | 3,775 | 3.09375 | 3 | [
"BSD-3-Clause",
"LicenseRef-scancode-philippe-de-muyter"
] | permissive | # -*- coding: utf-8 -*-
#
# Pydoas is a Python library for the post-analysis of DOAS result data
# Copyright (C) 2017 Jonas Gliß ([email protected])
#
# This program is free software: you can redistribute it and/or
# modify it under the terms of the BSD 3-Clause License
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See BSD 3-Clause License for more details
# (https://opensource.org/licenses/BSD-3-Clause)
"""
Module containing all sorts of helper methods
"""
import matplotlib.cm as colormaps
import matplotlib.colors as colors
from datetime import datetime, time, date
from matplotlib.pyplot import draw
from numpy import linspace, hstack, vectorize, int, floor, log10, isnan
exponent = lambda num: int(floor(log10(abs(num))))
time_delta_to_seconds = vectorize(lambda x: x.total_seconds())
def to_datetime(value):
"""Method to evaluate time and / or date input and convert to datetime"""
if isinstance(value, datetime):
return value
elif isinstance(value, date):
return datetime.combine(value, time())
elif isinstance(value, time):
return datetime.combine(date(1900,1,1), value)
else:
raise ValueError("Conversion into datetime object failed for input: "
"%s (type: %s)" %(value, type(value)))
def isnum(val):
"""Checks if input is number (int or float) and not nan
:returns: bool, True or False
"""
if isinstance(val, (int, float)) and not isnan(val):
return True
return False
def shifted_color_map(vmin, vmax, cmap = None):
"""Shift center of a diverging colormap to value 0
.. note::
This method was found `here <http://stackoverflow.com/questions/
7404116/defining-the-midpoint-of-a-colormap-in-matplotlib>`_
(last access: 17/01/2017). Thanks to `Paul H <http://stackoverflow.com/
users/1552748/paul-h>`_ who provided it.
Function to offset the "center" of a colormap. Useful for
data with a negative min and positive max and if you want the
middle of the colormap's dynamic range to be at zero level
:param vmin: lower end of data value range
:param vmax: upper end of data value range
:param cmap: colormap (if None, use default cmap: seismic)
:return:
- shifted colormap
"""
if cmap is None:
cmap = colormaps.seismic
midpoint = 1 - abs(vmax)/(abs(vmax) + abs(vmin))
cdict = {
'red': [],
'green': [],
'blue': [],
'alpha': []
}
# regular index to compute the colors
reg_index = linspace(0, 1, 257)
# shifted index to match the data
shift_index = hstack([
linspace(0.0, midpoint, 128, endpoint=False),
linspace(midpoint, 1.0, 129, endpoint=True)
])
for ri, si in zip(reg_index, shift_index):
r, g, b, a = cmap(ri)
cdict['red'].append((si, r, r))
cdict['green'].append((si, g, g))
cdict['blue'].append((si, b, b))
cdict['alpha'].append((si, a, a))
return colors.LinearSegmentedColormap('shiftedcmap', cdict)
def _print_list(lst):
"""Print a list rowwise"""
for item in lst:
print(item)
def rotate_xtick_labels(ax, deg=30, ha="right"):
"""Rotate xtick labels in matplotlib axes object"""
draw()
lbls = ax.get_xticklabels()
lbls = [lbl.get_text() for lbl in lbls]
ax.set_xticklabels(lbls, rotation = 30, ha = "right")
draw()
return ax
def find_fitted_species_doasis_header(file_name):
"""Search all fitted species in header of DOASIS resultfile"""
raise NotImplementedError
| [
"matplotlib"
] |
8c7f320b43a73c0bea505cb3e65c7c6634d7f1b7 | Python | shravanc/tensorflow_certification | /timeseries/week2/exp/prepare_data.py | UTF-8 | 969 | 2.578125 | 3 | [] | no_license | import pandas as pd
import tensorflow as tf
from lib.utils import plot_series
import numpy as np
import matplotlib.pyplot as plt
path = "/home/shravan/python_programs/time_series/test_time_series.csv"
new_file = "/home/shravan/python_programs/time_series/f1.csv"
new_file = "/home/shravan/python_programs/time_series/f2.csv"
df = pd.read_csv(path, names=["series"])
series = df['series'].values
time = list(range(0, len(series)))
values = df['series'].values
dataset = tf.data.Dataset.range(1000)
#dataset = tf.data.Dataset.from_tensor_slices(values)
dataset = dataset.window(25, shift=1, drop_remainder=True)
dataset = dataset.flat_map(lambda window: window.batch(25))
dataset = dataset.map(lambda window: (window[:-1], window[-1: ]))
fp = open(new_file, 'w+')
for x, y in dataset:
a = x.numpy().tolist()
a = ','.join([str(i) for i in a])
a += f",{str(y.numpy()[0])}"
print(a)
print(x.numpy(), y.numpy())
fp.write(a + '\r\n')
#break
fp.close()
| [
"matplotlib"
] |
18e12e469e49b56deb7f4e35bd044467ffd464b0 | Python | CooperCao/megabot | /settings.py | UTF-8 | 2,124 | 2.578125 | 3 | [] | no_license | ## =================================================================================
## Packages
## =================================================================================
import os# to use mail in sh
import time
import random
import numpy as np
import matplotlib.pyplot as plt
#from scipy import signal# using correlation for pattern matching
from PIL import Image
import pyautogui# keyboard, mouse control using python also takes screenshots
import cv2# template matching to find a pattern in an image
import pytesseract# text recognition
import pyperclip# to get clipboard content
## =================================================================================
## Variables definition
## =================================================================================
dofusRoot = "/home/aurelien/git/megabot.git/trunk/"
picRoot = dofusRoot + "pictures/"
player = "[email protected]"
## =================================================================================
## Fetching Dofus window and screen size
## =================================================================================
## For now I only got the up/left and down/right corners postitions using
## pyautogui.position()
mapBbox = (241, 76), (1169, 615)
screenBbox = (), ()
inventoryFirstCellBbox = (976, 268), (999, 285)#(968, 250), (1015, 300)
bankInventoryFirstCellBbox = (933, 277), (966, 308)
## =================================================================================
## Sending mail to user
## =================================================================================
def mail(subject, text = "", adress = player, attachments = []):
command = 'mail -s "%s" %s < %smail.txt' % (subject, adress, dofusRoot)
os.system(command)
return command
## =================================================================================
## Middle of a bounding box
## =================================================================================
def bBoxMiddle(bBox):
"""return the point in the middle of a bounding box"""
x0, y0 = bBox[0]
x1, y1 = bBox[1]
return (x0 + x1) / 2 , (y0 + y1) /2
| [
"matplotlib"
] |
3bed5341de83583fe11458fad41ad561c29673c5 | Python | jackhamel16/MSU-EMresearch-Misc | /sz_plotter.py | UTF-8 | 1,370 | 3.515625 | 4 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
def get_bloch_data(file_name):
"""
Opens a bloch.dat file and extracts the time data and s data from it
file_name is the name of the path to the file / file name
returns list of the time at each step and a list of s data
"""
time_list = []
s_list = []
bloch_file = open(file_name)
for line in bloch_file:
line_list = line.split()
time_list.append(line_list[0])
s_list.append(line_list[1:])
bloch_file.close()
return time_list, s_list
def plot_sz(s_list):
"""
Plots sz comp of pseduospin of each dot at each timestep
Also prints the dot count
"""
figure = plt.figure()
axes = figure.add_subplot(111)
axes.set_title("Pseudospin-Z of all system dots")
axes.set_xlabel("Timestep")
axes.set_ylabel("Pseudospin-Z")
count = 0
for dot in range(len(s_list[0])//3):
dot1 = [float(time[2+dot*3]) for time in s_list]
axes.plot(dot1)
count += 1
print("Dot count: "+str(count))
def main():
"""
Runs two functions above and produces a plot of the sz component of each\
dot throughout all simulation time
"""
bloch_file_name = "../sim_results/run8/bloch.dat"
time_list, s_list = get_bloch_data(bloch_file_name)
plot_sz(s_list) | [
"matplotlib"
] |
4563678e2105d375191e055563498bbbde94ecfe | Python | caramaria2/masterExperiments | /ID63.py | UTF-8 | 2,400 | 2.640625 | 3 | [] | no_license | import pandas as pd
import time
import re
import string
import nltk
import seaborn as sns
import matplotlib.pyplot as plt
import neattext.functions as nfx
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn.metrics import multilabel_confusion_matrix, classification_report
from skmultilearn.problem_transform import BinaryRelevance, ClassifierChain, LabelPowerset
def build_model(model, mlb_estimator, xtrain, ytrain, xtest, ytest):
clf = mlb_estimator(model)
clf.fit(xtrain, ytrain)
clf_predict = clf.predict(xtest)
r = classification_report(ytest, clf_predict)
print(r)
writeLog(r)
def lowerCase(text):
text_low = text.str.lower()
return text_low
def removePunctuation(text):
text = "".join([c for c in text if c not in string.punctuation])
return text
def tokenizeText(text):
tokenized_text = re.split('\W+', text)
return tokenized_text
def removeStopwords(tokenized_text):
text = [word for word in tokenized_text if word not in stopwords]
return text
def writeLog(log):
f = open(filename, "a")
logN = str(log)
f.write(logN + '\n') # Ensures linebreak for automated analysis
f.close()
#---------- End of functions--------
#Create Log File
path = "./logfiles/"
timestamp = int(round(time.time() * 1000))
filename = path+str(timestamp)+'.csv';
f = open(filename, "x")
#Read Data
data = pd.read_csv('../SentimentData63.csv' , index_col=0)
stopwords = nltk.corpus.stopwords.words('english')
#Prerocessing
corpus = data['comment'].apply(nfx.remove_stopwords)
corpus = corpus.apply(nfx.remove_punctuations)
corpus = corpus.apply(nfx.remove_urls)
corpus = corpus.apply(nfx.remove_hashtags)
corpus = corpus.apply(nfx.remove_emails)
#feature extraction
tfidf = TfidfVectorizer().fit_transform(corpus)
all_features = tfidf
plot = data[['NA','BUG', 'FUNC', 'NON_FUNC']]
#Multilabel
all_lablesML = data[['NA','BUG', 'FUNC', 'NON_FUNC']]
X_train, X_test, y_train, y_test = train_test_split(all_features, all_lablesML, test_size=0.2, random_state=42)
writeLog("DT")
clf_chain_model =build_model(DecisionTreeClassifier(), LabelPowerset, X_train, y_train, X_test, y_test)
writeLog("SVC")
clf_chain_model =build_model(SVC(), LabelPowerset, X_train, y_train, X_test, y_test)
| [
"matplotlib",
"seaborn"
] |
91275254e8ffd81763f7a27885f850e8acb39e9c | Python | DavidPits/BookBuster | /plotAnalyzer.py | UTF-8 | 8,775 | 2.515625 | 3 | [] | no_license | import os
import pickle
import random
import numpy as np
import goodreads as gr
from goodreads import client
import re
from string import punctuation
import nltk
import wikipedia as wk
import re
import os
from gensim import corpora, models, similarities
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
import json
import plotly.express as px
EUCLIDEAN = 2
COSINE = 1
nltk.download('punkt')
def text_to_tokens(text):
book_punctuation = set(punctuation).union({'‘', '’', '“', '”'}).union('0-9')
if "<br />" in text:
text = text.replace("<br />", ' ')
for char in book_punctuation:
text = text.replace(char, '')
tokens = word_tokenize(text.lower())
stop_words = stopwords.words('english')
list_of_words = [word for word in tokens if word not in stop_words]
# stemmer = PorterStemmer()
# list_of_words = [stemmer.stem(word) for word in list_of_words]
return list_of_words
def load_word2vec():
""" Load Word2Vec Vectors
Return:
wv_from_bin: All 3 million embeddings, each lengh 300
"""
import gensim.downloader as api
wv_from_bin = api.load("word2vec-google-news-300")
vocab = list(wv_from_bin.vocab.keys())
print(wv_from_bin.vocab[vocab[0]])
print("Loaded vocab size %i" % len(vocab))
return wv_from_bin
def create_or_load_slim_w2v(words_list, cache_w2v=False):
"""
returns word2vec dict only for words which appear in the dataset.
:param words_list: list of words to use for the w2v dict
:param cache_w2v: whether to save locally the small w2v dictionary
:return: dictionary which maps the known words to their vectors
"""
w2v_path = "w2v_dict.pkl"
if not os.path.exists(w2v_path):
full_w2v = load_word2vec()
w2v_emb_dict = {k: full_w2v[k] for k in words_list if k in full_w2v}
if cache_w2v:
save_pickle(full_w2v, w2v_path)
else:
w2v_emb_dict = load_pickle(w2v_path)
return w2v_emb_dict
def get_w2v_average(sent, word_to_vec: dict, embedding_dim):
"""
This method gets a sentence and returns the average word embedding of the words consisting
the sentence.
:param sent: the sentence object
:param word_to_vec: a dictionary mapping words to their vector embeddings
:param embedding_dim: the dimension of the word embedding vectors
:return The average embedding vector as numpy ndarray.
"""
words_in_dict = 0
avg_vec = np.zeros(embedding_dim)
normalizing_factor = 1
bottom_of_harominc_seris = 1
words_in_sent_counter = 0
for word in sent:
words_in_sent_counter += 1
if type(word_to_vec.get(word, 0)) == int:
continue
else:
words_in_dict += 1
avg_vec += word_to_vec[word] * normalizing_factor
if words_in_dict == 0:
return np.zeros(embedding_dim)
else:
return avg_vec / words_in_sent_counter
def save_pickle(obj, path):
with open(path, "wb") as f:
pickle.dump(obj, f)
def load_pickle(path):
with open(path, "rb") as f:
return pickle.load(f)
def LDA(plots):
num_words = 50
num_topics = 1
dictionary = corpora.Dictionary(plots)
# dictionary.filter_extremes(no_below=1, no_above=0.8)
corpus = [dictionary.doc2bow(text) for text in plots]
lda = models.LdaModel(corpus, num_topics=num_topics, id2word=dictionary, update_every=5,
chunksize=100, passes=1)
# topics = lda.print_topics(num_topics, num_words=num_words)
# lda = models.LdaModel(corpus, num_topics=1, id2word=dictionary, update_every=5,
# chunksize=10000, passes=100)
# topics = lda.print_topics(1, num_words=20)
topics_matrix = lda.show_topics(num_topics=num_topics, formatted=False, num_words=num_words)
topics_matrix = np.array([topic[1] for topic in topics_matrix])
topic_words = topics_matrix[:, :, 0]
return [str(word) for word in topic_words[0]]
def book_embedding(book_disc, word2vec_dict):
most_promenent_words = LDA(([text_to_tokens(book_disc)]))
embedding = get_w2v_average(most_promenent_words, word2vec_dict, 300)
return embedding
import pandas as pd
def find_sim(book1_title, book2_title, book2emd):
"""
Finding the euclidean similitry between 2 books(books objects are from good reads api) based on their summary.
:param book1: gr book object
:param book2: gr book object
:param word2vec_dict: trained word2vec embeddings.
:return: euclidean distance
"""
return np.linalg.norm(book2emd[book1_title] - book2emd[book2_title])
def calcaulte_cosine(bk1_features_emb, bk2_features_emb):
norm1 = np.linalg.norm(bk1_features_emb)
norm2 = np.linalg.norm(bk2_features_emb)
point_wise_mult = bk1_features_emb * bk2_features_emb
return point_wise_mult / (norm1 * norm2)
# Usefull dont delete
import pandas as pd
def create_word_voc(despcretion_list):
set_words = set()
i = 0
for desc in despcretion_list:
book_words = LDA([text_to_tokens(str(desc))])
set_words = set_words.union(book_words)
i += 1
print(i)
return set_words
def get_org_score(books, chosen_book, word2vec_dict):
scores = np.zeros(30000)
# TODO maybe theere should be a range and not the highest since books could be too much (man) sometimes.
for book, i in zip(books, range(30000)):
scores[i] = find_sim(chosen_book, book, word2vec_dict, COSINE)
originality_score = scores.sort()[-10:].mean() # TODO TRY LESHLEL
return originality_score
from requests import get
from bs4 import BeautifulSoup
def find_all(string, substring):
"""
Function: Returning all the index of substring in a string
Arguments: String and the search string
Return:Returning a list
"""
length = len(substring)
c = 0
indexes = []
while c < len(string):
if string[c:c + length] == substring:
indexes.append(c)
c = c + 1
return indexes
def find_total_amount_of_generes(urls):
genres_set = {}
for url in urls:
get_book_genres(genres_set, url)
save_pickle(genres_set, "genres_dict.pickle")
import matplotlib.pyplot as plt
def get_book_genres(genres_set, url):
page = get(url)
content = BeautifulSoup(page.content, "html.parser")
keys = '<a class="actionLinkLite bookPageGenreLink" href="/genres/'
str_cont = str(content)
genre_index = (find_all(str_cont, keys))
curr_book_vector = []
for ind in genre_index:
ind = ind + len(keys)
string_that_contain_genre = str_cont[ind:ind + 50]
current_genr = string_that_contain_genre.split("\"")[0]
if current_genr in genres_set.keys():
curr_book_vector.append(genres_set[current_genr])
return curr_book_vector
def get_books_genres_by_ff(books_url):
genres = load_pickle("genre2num.pickle")
all_books_vec = []
for url in books_url:
all_books_vec.append(get_book_genres(genres, url))
return all_books_vec
def dist_from_book(book_title, title2emb, all, Graphs=False):
dist = []
for book in (all):
dist.append(find_sim(book_title, str(book), title2emb))
dist.sort()
if Graphs == True:
x = np.arange(1000)
y = dist[:1000]
lst = []
names = []
for k in y:
lst.append(k[0])
names.append(k[1])
print(len(x), len(y))
print(y)
print(x)
print(lst)
fig, ax = plt.subplots()
lst = np.log(lst) / np.log(50)
ax.scatter(x, lst)
for i in range(len(names)):
names[i] = names[i].split('(')[0]
print(names[i])
for i, txt in enumerate(names):
ax.annotate(txt, (x[i], lst[i]))
ax.annotate("Harry Potter", (0, 0))
plt.show()
return np.array(dist[:15]).mean()
def get_all_books_embeddings():
all = exctract_data_csv("book-description", True)
all2 = exctract_data_csv("title", True)
w2v = load_pickle("word2vecDict.pickle")
all_books_emb = {}
i = 0
for book_disc, title in zip(all, all2):
i += 1
print(i)
all_books_emb[title] = book_embedding(str(book_disc), w2v)
save_pickle(all_books_emb, "all_books_emb.pickle")
def get_average_orig():
orig_scores = []
w2v = load_pickle("/cs/usr/deven14423/Desktop/CoursesExcerises/BookBuster11/pickles/all_books_emb.pickle")
all = exctract_data_csv("title", True)
i = 0
for title in all:
plt = str(title)
orig_scores.append(dist_from_book(plt, w2v, all))
i += 1
print(i)
return (np.array(orig_scores).mean())
| [
"matplotlib",
"plotly"
] |
89c99a4da48a70ae9f525c1547935d37defed4c0 | Python | Anxidote/Anxidote.github.io | /final.py | UTF-8 | 1,279 | 3.015625 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 21 23:22:35 2020
@author: aditisaxena
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 19 00:01:15 2020
@author: aditisaxena
"""
# Random Forest Regression
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pickle
# Importing the dataset
dataset = pd.read_csv('DASS.csv')
X = dataset.iloc[:,0:21].values
y = dataset.iloc[:, 22:25].values
# Taking care of missing data
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0)
imputer = imputer.fit(X[:, 0:21])
X[:, 0:21] = imputer.transform(X[:, 0:21])
# Splitting the dataset into the Training set and Test seT
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Fitting Random Forest Regression to the dataset
from sklearn.ensemble import RandomForestRegressor
regressor = RandomForestRegressor(n_estimators = 10, random_state = 0)
regressor.fit(X, y)
# Predicting a new result
y_pred = regressor.predict(X_test)
pickle.dump(regressor,open('Predictor1.pkl','wb'))
model = pickle.load(open('Predictor1.pkl','rb'))
| [
"matplotlib"
] |
1d65e8ab68d9c005d23856993373f9e8f2cc3e5a | Python | Romulus83/python | /iq_size_Backward_elimination_OLS.py | UTF-8 | 3,240 | 3.921875 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 19 16:52:20 2020
@author: user
"""
"""
Q. (Create a program that fulfills the following specification.)
iq_size.csv
Are a person's brain size and body size (Height and weight) predictive of his or her intelligence?
Import the iq_size.csv file
It Contains the details of 38 students, where
Column 1: The intelligence (PIQ) of students
Column 2: The brain size (MRI) of students (given as count/10,000).
Column 3: The height (Height) of students (inches)
Column 4: The weight (Weight) of student (pounds)
What is the IQ of an individual with a given brain size of 90, height of 70 inches, and weight 150 pounds ?
Build an optimal model and conclude which is more useful in predicting intelligence Height, Weight or brain size.
"""
# Importing the libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# Importing the dataset
dataset = pd.read_csv('iq_size.csv')
dataset.ndim
dataset.shape
# Check for categorical data
dataset.dtypes
# Check for missing data
dataset.isnull().any(axis=0)
# Seperate features and labels
features = dataset.iloc[:, 1:].values
print(features)
labels = dataset.iloc[:, [0]].values
print(labels)
#it is to remove they columns which are not effect on our model.
import statsmodels.api as sm
import numpy as np
features_obj = features[:, [0,1,2]]
features_obj = sm.add_constant(features_obj)
while (True):
regressor_OLS = sm.OLS(endog = labels,exog =features_obj).fit()
p_values = regressor_OLS.pvalues
if p_values.max() > 0.05 :
features_obj = np.delete(features_obj, p_values.argmax(),1)
else:
break
#In Features_obj we get only one column(Brain) which is most important for our dataset.
# NOTE: - Using Polynomial algorithm.
#so now we will take Polinomial algorithm and we do solve now.
features = dataset.iloc[:,[1]].values #In Features_obj we get only one column(Brain) which is most important for our dataset.
print(features)
labels = dataset.iloc[:, [0]].values
print(labels)
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
poly_object = PolynomialFeatures(degree = 5)
print(features.shape)
features_poly = poly_object.fit_transform(features)
print(features_poly)
print(features_poly.shape) # x0 x1 x2 x3 x4 x5
# Algo is same for Polynomial Regression, its only the data format is changed
lin_reg_2 = LinearRegression()
lin_reg_2.fit(features_poly, labels)
# This will give error dim1 != dim 6
# We need to convert the data into polynomial format
#print (lin_reg_2.predict([[1981]]))
print ("Predicting result with Polynomial Regression")
print (lin_reg_2.predict(poly_object.transform([[90]]))) #if our Brain Size =90
#Output:- [[100.32445269]]
# This value has huge difference from the Linear Regression
# But if we visualize it, we will come to know that Poly is better predictions
# Visualising the Polynomial Regression results
plt.scatter(features, labels, color = 'red')
plt.plot(features, lin_reg_2.predict(poly_object.fit_transform(features)), color = 'blue')
plt.title('Polynomial Regression')
plt.xlabel('Year')
plt.ylabel('Claims Paid')
plt.show()
| [
"matplotlib"
] |
fac1049547c08f3f993b67a9422de90094d8fa8d | Python | Afrophysics/p133lab | /impedance_lb.py | UTF-8 | 2,668 | 3.53125 | 4 | [] | no_license | '''This code is dedicated to the DC circuit observations for a battery with an uknown internal resistance. The goal of this code
is to calculate the potential within the battery and the resistance of the internal resistor using a linear fit model for the IV
diagram of this'''
import numpy as np
import matplotlib.pyplot as plt
#Set up recorded parameters for applied external resistance from variable resistor box and
#the observed potential for the given resistance
R = np.array([10.2, 40.1, 200, 990, 40000])
err_r = np.repeat(0.3, len(R))
V = np.array([0.111, 0.420, 1.760, 4.91, 8.72])
err_v = np.array([0.001, 0.001, 0.001, 0.01, 0.01])
I = V/R
err_cur = ((err_v**2 + (I * err_r)**2)**0.5)/R
print("Part (a) is meant to build the algorithm")
#Expectation value of the nth-power of the x-array weighted by the inverse of the y-variance
def u(x, sig, n):
tp = x**n
bt = sig**2
ary = tp/bt
return np.sum(ary)
#Expectation value of the inner product between the the nth-power of the x-array and the y-array weighted by the inverse of the y-variance
def w(x, y, sig, n):
tp = y*(x**n)
bt = sig**2
ary = tp/bt
return np.sum(ary)
#Linear-fit algorithm using least-squares method
def linfit(x_data, y_data, y_error):
u_0 = u(x_data, y_error, 0)
u_1 = u(x_data, y_error, 1)
u_2 = u(x_data, y_error, 2)
w_0 = w(x_data, y_data, y_error, 0)
w_1 = w(x_data, y_data, y_error, 1)
w_2 = w(x_data, y_data, y_error, 2)
D = (u_0 * u_2) - (u_1 ** 2)
stnd_1 = (u_0 * w_1) - (w_0 * u_1)
stnd_2 = (u_2 * w_0) - (w_1 * u_1)
slope = stnd_1/D
var_sl = u_0/D
y_intr = stnd_2/D
var_yt = u_2/D
return np.array([slope, y_intr, var_sl, var_yt])
print("Part (a) complete")
print("Part (b) is shall test the algorithm using the sample data below:")
x = I
y = V
yerr = err_v
print('x data points: %s'%x)
print('y data points: %s'%y)
print('y-error data points: %s'%yerr)
rs = linfit(x, y, yerr)
print("results")
print("slope = %.3f +/- %.3f"%(rs[0], rs[2]**0.5))
print("intercept = %.3f +/- %.3f"%(rs[1], rs[3]**0.5))
print("Part (b) complete")
print('')
print("Part (c) we shall plot our results")
import matplotlib.pyplot as plt
fig = plt.figure()
plt.errorbar(x, y, yerr = yerr, fmt='.', label='measured volts')
smpl_x = np.linspace(0, 0.0109, 31)
#plot the linear approximation
f_x = rs[1] + (rs[0] * smpl_x)
plt.plot(smpl_x, f_x, '--', label='%.2f + %.2f I'%(rs[1], rs[0]))
plt.grid(True)
plt.title('DC Lab')
plt.legend()
plt.xlabel('Current')
plt.ylabel('Potential')
plt.show()
#fig.savefig('Linear_fit_ex.png')
print("Problem 2 complete")
| [
"matplotlib"
] |
33305447a16ef79d6b8a87507bd7e2e507616372 | Python | WenHui-Zhou/Django-Example | /MartM/Login/views.py | UTF-8 | 11,529 | 2.671875 | 3 | [] | no_license | #coding utf-8
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# encoding=utf-8
from django.shortcuts import render
from django.http import HttpResponse
from django.views.decorators.csrf import csrf_exempt
from Login.models import Users,SaleD,ShopList
import os
import xlrd
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import pandas as pd
import matplotlib.pyplot as plt
import time
from time import strftime,gmtime
from pylab import *
from pandas.core.frame import DataFrame
mpl.rcParams['font.sans-serif'] = ['SimHei']
def FirstPage(request):
# return HttpResponse('111111')
return render(request,'LoginPage.html')
def HomePage(request):
# return HttpResponse('111111')
return render(request,'index.html')
@csrf_exempt #保证表单可以使用
def loginCheck(request):
Uname = request.POST['username']
Upassword = request.POST['password']
a = list(Users.objects.values_list().filter(name = Uname))
if a.__len__() != 0: #找到记录 a = [(1,'name','password')]
print('找到记录')
if a[0][2] == Upassword:
print('登入成功')
return render(request, 'index.html')
else:
print('登入失败')
err = '密码错误!'
return render(request, 'LoginPage.html',{'err':err})
else:
print('登入失败')
err = '账号错误!'
return render(request, 'LoginPage.html', {'err':err})
def aSum(SaleDStruct):
a = SaleDStruct.Gapple+SaleDStruct.Gorange+SaleDStruct.Gbowl+\
SaleDStruct.Gchopstick+SaleDStruct.Grag+SaleDStruct.Gtissue+SaleDStruct.Gnoddle+SaleDStruct.Gham
return a
def DrawHisgram(request):
print('柱状图')
# os.chdir('C:\\Users\\ZHOU\\Desktop\\Django')
# salesdata = pd.DataFrame(pd.read_excel('1.xlsx', sheet_name=1))
LineData = list(SaleD.objects.all().order_by('Gdate'))
apple = list()
orange = list()
bowl = list()
chopstick = list()
rag = list()
tissue = list()
noddle = list()
ham = list()
date = list()
count = list()
for i in range(0,12):
apple.append(LineData[i].Gapple)
orange.append(LineData[i].Gorange)
bowl.append(LineData[i].Gbowl)
chopstick.append(LineData[i].Gchopstick)
rag.append(LineData[i].Grag)
tissue.append(LineData[i].Gtissue)
noddle.append(LineData[i].Gnoddle)
ham.append(LineData[i].Gham)
date.append(LineData[i].Gdate)
count.append(aSum(LineData[i]))
GdateSet = {"日期": date, "苹果": apple, "橘子": orange, "碗": bowl, "筷子": chopstick, "抹布": rag, "纸巾": tissue,
'方便面': noddle, '火腿肠': ham,'count':count}
salesdata = DataFrame(GdateSet)
salesdata = salesdata.groupby('日期')['count'].agg(sum)
a = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12])
plt.xticks(a, ('1月', '2月', '3月', '4月', '5月', '6月', '7月', '8月', '9月', '10月', '11月', '12月'))
plt.bar(a, salesdata, color='#99CC01')
plt.xlabel(u'月份')
plt.ylabel(u'金额/元')
plt.title(u'2016年各月销售额统计')
plt.legend([u'销售额'], loc='upper right')
plt.grid(color='#95a5a6', linestyle='--', linewidth=1, axis='y', alpha=0.4)
plt.plot()
picName = time.strftime("%Y_%m_%d_%H_%M_%S")
path = 'C:/Users/ZHOU/Desktop/Django/MartM/Login/static/img/' + picName + 'His.png'
plt.savefig(path)
path = '../static/img/' + picName + 'His.png'
plt.close('all')
return render(request,'Hisgram.html',{'path':path})
def DrawLine(request):
print('折线图')
# os.chdir('C:\\Users\\ZHOU\\Desktop\\Django')
# salesdata = pd.DataFrame(pd.read_excel('1.xlsx'))
LineData = list(SaleD.objects.all().order_by('Gdate'))
apple = list()
orange = list()
bowl = list()
chopstick = list()
rag = list()
tissue = list()
noddle = list()
ham = list()
date = list()
for i in range(0,12):
apple.append(LineData[i].Gapple)
orange.append(LineData[i].Gorange)
bowl.append(LineData[i].Gbowl)
chopstick.append(LineData[i].Gchopstick)
rag.append(LineData[i].Grag)
tissue.append(LineData[i].Gtissue)
noddle.append(LineData[i].Gnoddle)
ham.append(LineData[i].Gham)
date.append(LineData[i].Gdate)
GdateSet = {"日期":date,"苹果":apple,"橘子":orange,"碗":bowl,"筷子":chopstick,"抹布":rag,"纸巾":tissue,'方便面':noddle,'火腿肠':ham}
salesdata = DataFrame(GdateSet)
# 绘图
salesdata = salesdata.set_index('日期')
names = ['1月', '2月', '3月', '4月', '5月', '6月', '7月', '8月', '9月', '10月', '11月', '12月']
x = range(len(names))
y1 = salesdata['苹果']
y2 = salesdata['橘子']
y3 = salesdata['碗']
y4 = salesdata['筷子']
y5 = salesdata['抹布']
y6 = salesdata['纸巾']
y7 = salesdata['方便面']
y8 = salesdata['火腿肠']
plt.xticks(x, names)
plt.plot(x, y1, marker='*', ms=5, mec='r', label=u'苹果')
plt.plot(x, y2, marker='*', ms=5, label=u'橘子')
plt.plot(x, y3, marker='*', ms=5, label=u'碗')
plt.plot(x, y4, marker='*', ms=5, label=u'筷子')
plt.plot(x, y5, marker='*', ms=5, label=u'抹布')
plt.plot(x, y6, marker='*', ms=5, label=u'纸巾')
plt.plot(x, y7, marker='*', ms=5, label=u'方便面')
plt.plot(x, y8, marker='*', ms=5, label=u'火腿肠')
plt.legend()
plt.xlabel(u"月份")
plt.ylabel(u"销售额")
plt.title("超市2016年度销售走势图")
plt.plot()
picName = time.strftime("%Y_%m_%d_%H_%M_%S")
path = 'C:/Users/ZHOU/Desktop/Django/MartM/Login/static/img/' + picName + 'Line.png'
plt.savefig(path)
path = '../static/img/' + picName + 'Line.png'
plt.close('all')
return render(request, 'LineGraph.html', {'path': path})
def DrawPai(request):
print('饼图')
# data = xlrd.open_workbook('C:\\Users\\ZHOU\\Desktop\\Django\\1.xlsx')
# sheet1 = data.sheet_by_name(u'Sheet3')
X = []
Gtitle = ['Gapple','Gorange','Gbowl','Gchopstick','Grag','Gtissue','Gnoddle','Gham']
for i in range(0, 8):
temp = Gtitle[i]
X.append(sum(list(SaleD.objects.values_list(temp,flat=True))))
print(X)
Y = ['苹果', '橘子', '碗', '筷子', '抹布', '纸巾', '方便面', '火腿肠']
print(Y)
fig = plt.figure()
plt.pie(X, labels=Y, autopct='%1.2f%%')
plt.title("2016年超市销售额饼图")
plt.plot()
picName = time.strftime("%Y_%m_%d_%H_%M_%S")
path = 'C:/Users/ZHOU/Desktop/Django/MartM/Login/static/img/' + picName + 'Pai.png'
plt.savefig(path)
path = '../static/img/' + picName + 'Pai.png'
plt.close('all')
return render(request, 'PaiGraph.html', {'path': path})
def load_data_set(): # 读入数据库。
# F1 = open(r"C:\Users\Administrator\Desktop\1.txt", "r")
# List_row = F1.readlines()
Glist = list(ShopList.objects.all())
list_source = []
for i in range(Glist.__len__()):
column_list = Glist[i].ListContent.strip().split("|")
list_source.append(column_list)
return list_source
# 生成1项集C1
def create_C1(data_set):
C1 = set()
for t in data_set:
for item in t:
item_set = frozenset([item])
C1.add(item_set)
return C1
# 判断是否满足先验性质
def is_apriori(Ck_item, Lksub1):
for item in Ck_item:
sub_Ck = Ck_item - frozenset([item])
if sub_Ck not in Lksub1:
return False
return True
# 连接Lk-1,生成Ck,剪枝
def create_Ck(Lksub1, k):
Ck = set()
len_Lksub1 = len(Lksub1)
list_Lksub1 = list(Lksub1)
for i in range(len_Lksub1):
for j in range(1, len_Lksub1):
l1 = list(list_Lksub1[i])
l2 = list(list_Lksub1[j])
l1.sort()
l2.sort()
if l1[0:k - 2] == l2[0:k - 2]:
Ck_item = list_Lksub1[i] | list_Lksub1[j]
# pruning
if is_apriori(Ck_item, Lksub1):
Ck.add(Ck_item)
return Ck
# 筛选,扫描数据库,生成频繁k项集Lk
def generate_Lk_by_Ck(data_set, Ck, min_support, support_data):
Lk = set()
item_count = {}
for t in data_set:
for item in Ck:
if item.issubset(t):
if item not in item_count:
item_count[item] = 1
else:
item_count[item] += 1
t_num = float(len(data_set))
for item in item_count:
if (item_count[item] / t_num) >= min_support:
Lk.add(item)
support_data[item] = item_count[item] / t_num
return Lk
# 所有频繁项集L
def generate_L(data_set, k, min_support):
support_data = {}
C1 = create_C1(data_set)
L1 = generate_Lk_by_Ck(data_set, C1, min_support, support_data)
Lksub1 = L1.copy()
L = []
L.append(Lksub1)
for i in range(2, k + 1):
Ci = create_Ck(Lksub1, i)
Li = generate_Lk_by_Ck(data_set, Ci, min_support, support_data)
Lksub1 = Li.copy()
L.append(Lksub1)
return L, support_data
# 生成强关联项
def generate_big_rules(L, support_data, min_conf):
big_rule_list = []
sub_set_list = []
for i in range(0, len(L)):
for freq_set in L[i]:
for sub_set in sub_set_list:
if sub_set.issubset(freq_set):
conf = support_data[freq_set] / support_data[freq_set - sub_set]
big_rule = (freq_set - sub_set, sub_set, conf)
if conf >= min_conf and big_rule not in big_rule_list:
big_rule_list.append(big_rule)
sub_set_list.append(freq_set)
return big_rule_list
def Apriori(request):
data_set = load_data_set()
L, support_data = generate_L(data_set, k=3, min_support=0.2)
big_rules_list = generate_big_rules(L, support_data, min_conf=0.7)
print("=" * 50)
print("频繁项集")
print('**' * 30)
freqList = []
for Lk in list(L):
for freq_set in Lk:
print(list(freq_set), "支持度:", support_data[freq_set])
if support_data[freq_set]>=0.4:
freqList.append(' 和 '.join(list(freq_set)) + " 支持度: " + repr(support_data[freq_set])+' 建议加大进货力度')
else:
freqList.append(' 和 '.join(list(freq_set)) + " 支持度: " + repr(support_data[freq_set]))
freqList.append('*************************************')
print("=" * 50)
ContactList = []
print("强关联项")
for item in big_rules_list:
print(list(item[0]), "=>", list(item[1]), "置信度: ", item[2])
ContactList.append(' 和 '.join(list(item[0])) + " => "+' 和 '.join(list(item[1])) + " 置信度: " + repr(item[2]))
return render(request,'Apriori.html',{"freqList":freqList,"ContactList":ContactList})
def DataDisp(request):
DataDispSet = list(SaleD.objects.all().order_by('Gdate'))
ShoppingList = list(ShopList.objects.all())
return render(request,'DataDisplay.html',{"cols":DataDispSet,"shopList":ShoppingList}) | [
"matplotlib"
] |
f5c5dd8e543db5c7cdb3426578cc7c4a7b01aaa2 | Python | rohithsrinivaas/HPCE-AM5080 | /13-03-2020/LUDecomp.py | UTF-8 | 8,234 | 2.59375 | 3 | [] | no_license | # Program to decompose a matrix to LU form
# Importing the required libraries
import math
import random
import numpy as np
from mpi4py import MPI
import seaborn as sns
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
nProcs = comm.Get_size()
size = nProcs
np.set_printoptions(precision=3)
np.set_printoptions(suppress=True)
random.seed(1)
m = None
k = None
n = None
A = None
if rank == 0:
A = np.array([[1.00,0.00,1,2],[0,1,-2,0],[1,2,-1,0],[2,1,3,-2.00]])
m,n = np.shape(A)
print A
for i in range(m):
col_max = A[i,i]
ind_max = i
for j in range(i+1,n):
if A[j,i]>=col_max:
col_max = A[j,i]
ind_max = j
A[[i,ind_max]]=A[[ind_max,i]]
print A
m = comm.bcast(m,root = 0)
n = comm.bcast(n,root = 0)
B = np.full(n,1.00)
sub_row = np.full(n,1.00)
comm.Scatter(A,B,root = 0)
# print B
iter = 0
while iter < m:
if rank == iter:
sub_row = B
sub_row = comm.bcast(sub_row,root=iter)
comm.barrier()
if rank > iter:
alpha = (B[iter]/sub_row[iter])
for i in range(n):
B[i] = B[i] - alpha*sub_row[i]
iter+=1
comm.barrier()
comm.Gather(B,A,root=0)
if rank ==0:
print A
# A = np.random.rand(4,6)
# B = np.random.rand(6,6)
# print A
# print B
# m,k = np.shape(A)
# k,n = np.shape(B)
# C = np.full((m,n),0.00)
# # To factorize nProc so that iProcs/jProcs ~ iLength/jLength
# k = comm.bcast(k,root = 0)
# if nProcs == 4:
# iProcs = 2
# jProcs = 2
# elif nProcs == 6:
# if m>n:
# jProcs = 3
# iProcs = 2
# else:
# iProcs = 2
# jProcs = 3
# iLength = m/iProcs
# jLength = n/jProcs
# # print("iProcs = {}, jProcs = {}").format(iProcs,jProcs)
# if rank == 0:
# k = 0
# for i in range(0,iProcs):
# for j in range(0,jProcs):
# rows = A[i*iLength:(i+1)*iLength,:]
# rows.tolist()
# cols = B[:,j*jLength:(j+1)*jLength]
# comm.send(rows, dest = i*jProcs+j, tag = 0)
# comm.send(cols, dest = i*jProcs+j, tag = 0)
# rows = None
# cols = None
# rows = comm.recv(source = 0,tag = 0)
# cols = comm.recv(source = 0,tag = 0)
# # print("Rows - {}, cols - {} for Rank - {}").format(rows,cols,rank)
# ans = np.dot(rows,cols)
# # print ans
# comm.send(ans, dest = 0, tag = 0)
# if rank == 0:
# final_ans = None
# for i in range(iProcs):
# row_ans = None
# for j in range(jProcs):
# segment = comm.recv(source = i*jProcs+j, tag = 0)
# segment = np.asarray(segment)
# # print segment
# if j == 0:
# row_ans = segment
# else:
# row_ans = np.hstack((row_ans,segment))
# # print("RowAns - {} ").format(row_ans)
# if i ==0:
# final_ans = row_ans
# else:
# final_ans = np.vstack((final_ans,row_ans))
# print final_ans
# print np.dot(A,B)
# print("Rank - {}, Data - {}").format(rank,data)
# shift_x = rank%jProcs
# shift_y = rank/jProcs
# if rank <iProcs:
# for j in range(jProcs):
# comm.send(data,dest = shift_x*jProcs+j,tag = shift_x)
# else:
# for i in range(iProcs):
# comm.send(data,dest = i*jProcs+shift_y,tag = shift_y)
# comm.recv()
# # iProc =
# Declaring the array and setting the seed for random
# del_x = 0.025
# del_y = 0.025
# del_x = 0.0025
# del_y = 0.0025
# del_t = 0.1
# length = 0.2
# time_max = 1000
# alpha = 10**(-4)
# r = alpha * del_t /(del_x**2)
# size_list = int(length/(del_x*(nProcs)**0.5))
# side_size = int(length/(del_x))
# block_size = size_list*size_list
# local_temp = np.full((size_list,size_list),0.00)
# temp_1 = local_temp
# temp_2 = local_temp
# if rank == 0:
# local_temp[:,0]= 300.00
# local_temp[0,:] = 400.00
# elif rank == 1:
# local_temp[:,size_list-1] = 100
# local_temp[0,:] = 400.00
# elif rank == 2:
# local_temp[:,0] = 300.00
# else :
# local_temp[:,size_list-1] = 100.00
# global_temp = None
# local_temp = np.reshape(local_temp,(1,block_size))
# # print(local_temp)
# local_temp.tolist()
# comm.send(local_temp, dest=0, tag=rank**2)
# open('Temperature_Profile.txt', 'w').close()
# # print(local_temp)
# comm.barrier()
# if rank == 0:
# # print "Printing the Received buffer"
# local_temp_0 = None
# local_temp_1 = None
# local_temp_2 = None
# local_temp_3 = None
# local_temp_0 = comm.recv(source=0, tag=0)
# local_temp_1 = comm.recv(source=1, tag=1)
# local_temp_2 = comm.recv(source=2, tag=4)
# local_temp_3 = comm.recv(source=3, tag=9)
# local_temp_0 = np.asarray(local_temp_0)
# local_temp_1 = np.asarray(local_temp_1)
# local_temp_2 = np.asarray(local_temp_2)
# local_temp_3 = np.asarray(local_temp_3)
# local_temp_0 = np.reshape(local_temp_0,(size_list,size_list))
# local_temp_1 = np.reshape(local_temp_1,(size_list,size_list))
# local_temp_2 = np.reshape(local_temp_2,(size_list,size_list))
# local_temp_3 = np.reshape(local_temp_3,(size_list,size_list))
# # print(local_temp_0)
# # print(local_temp_1)
# # print(local_temp_2)
# # print(local_temp_3)
# global_hor_1 = np.hstack((local_temp_0,local_temp_1))
# global_hor_2 = np.hstack((local_temp_2,local_temp_3))
# global_temp = np.vstack((global_hor_1,global_hor_2))
# # print global_temp
# global_temp.tolist()
# global_temp = comm.bcast(global_temp,root=0)
# global_temp = np.asarray(global_temp)
# global_temp = np.reshape(global_temp,(size_list*2,size_list*2))
# if rank ==0:
# print global_temp
# comm.barrier()
# origin_shift_x = 0
# origin_shift_y = 0
# if rank == 1:
# origin_shift_y = size_list
# elif rank == 2:
# origin_shift_x = size_list
# elif rank == 3:
# origin_shift_x = size_list
# origin_shift_y = size_list
# else:
# origin_shift_y = 0
# origin_shift_x = 0
# local_temp = np.reshape(local_temp,(size_list,size_list))
# # print("Printing local Temp - {}").format(local_temp)
# # print np.shape(global_temp)
# time_iter = 1
# while time_iter <= 1:
# diff_temp = np.full((size_list,size_list),0.00)
# for i in range(size_list):
# for j in range(size_list):
# x = origin_shift_x + i
# y = origin_shift_y + j
# if x == 0 or y == 0 or y == side_size-1:
# diff_temp[i,j] = 0
# elif x == side_size-1:
# diff_temp[i,j] = r*(global_temp[x,y+1] + global_temp[x,y-1] + 2*global_temp[x-1,y] - 4 * global_temp[x,y])/4
# elif x>=0 and y>=0 and x<side_size and y<side_size:
# diff_temp[i,j] = r*(global_temp[x+1,y] + global_temp[x,y+1] + global_temp[x,y-1] + global_temp[x-1,y] - 4 * global_temp[x,y])/4
# else:
# print("Index out of bounds - x - {} y - {} i - {} j - {} rank - {}").format(x,y,i,j,rank)
# local_temp = np.add(local_temp,diff_temp)
# # print("Rank = {}, local temp - {}").format(rank,local_temp)
# local_temp.tolist()
# comm.send(local_temp, dest=0, tag=rank**2)
# # print(local_temp)
# comm.barrier()
# if rank == 0:
# # print "Printing the Received buffer"
# local_temp_0 = None
# local_temp_1 = None
# local_temp_2 = None
# local_temp_3 = None
# local_temp_0 = comm.recv(source=0, tag=0)
# local_temp_1 = comm.recv(source=1, tag=1)
# local_temp_2 = comm.recv(source=2, tag=4)
# local_temp_3 = comm.recv(source=3, tag=9)
# local_temp_0 = np.asarray(local_temp_0)
# local_temp_1 = np.asarray(local_temp_1)
# local_temp_2 = np.asarray(local_temp_2)
# local_temp_3 = np.asarray(local_temp_3)
# local_temp_0 = np.reshape(local_temp_0,(size_list,size_list))
# local_temp_1 = np.reshape(local_temp_1,(size_list,size_list))
# local_temp_2 = np.reshape(local_temp_2,(size_list,size_list))
# local_temp_3 = np.reshape(local_temp_3,(size_list,size_list))
# # print(local_temp_0)
# # print(local_temp_1)
# # print(local_temp_2)
# # print(local_temp_3)
# global_hor_1 = np.hstack((local_temp_0,local_temp_1))
# global_hor_2 = np.hstack((local_temp_2,local_temp_3))
# global_temp = np.vstack((global_hor_1,global_hor_2))
# # print global_temp
# global_temp.tolist()
# global_temp = comm.bcast(global_temp,root=0)
# global_temp = np.asarray(global_temp)
# global_temp = np.reshape(global_temp,(size_list*2,size_list*2))
# local_temp = np.asarray(local_temp)
# local_temp = np.reshape(local_temp,(size_list,size_list))
# time_iter += 1
# if rank ==0:
# print global_temp
# plot_temp = sns.heatmap(global_temp)
# fig = plot_temp.get_figure()
# fig.savefig("time_iter.png")
# fig.clf()
# temp_csv = np.reshape(global_temp,(1,side_size**2))
# with open("Temperature_Profile.txt", "a") as myfile:
# np.savetxt(myfile, temp_csv, fmt='%1.4e', delimiter=",") | [
"seaborn"
] |
82d75ace9d14b9c79070a2c5956236c202af0366 | Python | Anusha-Kokkinti/Linear-Regression--Analysis | /BCD5.py | UTF-8 | 2,811 | 2.84375 | 3 | [] | no_license | #%atplotlib qt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
import pandas as pt
def cost_computation(x1,x2,x3,y,t,n,m,b):
sum1=0
z=0
for i in range(x1.size):
sum1=sum1+(((t*x3[i]+n*x2[i]+m*x1[i]+b)-y[i])**2)
z=sum1/(2*(x1.size))
return z
def stepGradient(b_current, m_current,n_current,t_current,x3,x2,x1,y, learningRate):
b_gradient = 0
m_gradient,n_gradient,t_gradient = 0, 0,0
#n_gradient = 0
N = float(x1.size)
for i in range(x1.size):
b_gradient-=(2/N)*(y[i]-((n_current*x2[i])+(m_current*x1[i])+b_current));t_gradient-=(2/N)*x3[i]*(y[i]-((t_gradient*x3[i])+(n_current*x2[i])+(m_current*x1[i])+b_current))
m_gradient-=(2/N)*x1[i]*(y[i]-((n_current*x2[i])+(m_current*x1[i])+b_current));n_gradient-=(2/N)*x2[i]*(y[i]-((n_current*x2[i])+(m_current*x1[i])+b_current))
new_b=b_current-(learningRate*b_gradient);new_t=t_current-(learningRate*t_gradient)
new_m=m_current-(learningRate*m_gradient);new_n=n_current-(learningRate*n_gradient)
return (new_b, new_m, new_n,new_t)
def main():
#initializing the data set
data=pt.read_excel('BreastCancerData.xlsx')
x1=np.array(data[u'Perimeter'],dtype=np.float64)
x2=np.array(data[u'Area'],dtype=np.float64);x3=np.array(data[u'FractalDimension'],dtype=np.float64);y=np.array(data[u'Compactness'],dtype=np.float64)
mvalues,tvalues=np.zeros(x1.size+1),np.zeros(x1.size+1)
bvalues,nvalues=np.zeros(x1.size+1),np.zeros(x1.size+1)
learningRate=0.00000092
mvalue,nvalue,tvalue=1,1,1
bvalue,error=1,[]
z,newz=1000000,100000
i=0
while(z>0.009):
#print mvalue,bvalue,z
newbvalue,newmvalue,newnvalue,newtvalue=stepGradient(bvalue,mvalue,nvalue,tvalue,x3,x2,x1,y,learningRate);z=newz
newz=cost_computation(x1,x2,x3,y,tvalue,nvalue,mvalue,bvalue);error.append(newz)
mvalue,bvalue,nvalue,tvalue=newmvalue,newbvalue,newnvalue,newtvalue
print tvalue,nvalue,mvalue,bvalue,z,i
i=i+1;plt.figure(1),plt.title('Cost Over Iterations'),plt.xlabel('No. Of Iterations'),plt.ylabel('Cost Function'),plt.scatter(i,newz,color='r')
'''xaxis=np.linspace(60,200,num=50);yaxis=np.linspace(250,2500,num=50);taxis=np.linspace(60,200,num=50)
newx1=xaxis;zaxis=[tvalue*taxis[i]+nvalue*yaxis[i]+mvalue*xaxis[i]+bvalue for i in range(50)]
fig=plt.figure()
ax=fig.add_subplot(111,projection='3d')
ax.scatter(x1, x2, y,c='r',marker='o'),ax.scatter(xaxis, yaxis, zaxis,marker='*'),ax.set_xlabel('Perimeter'),ax.set_ylabel('Area'),ax.set_zlabel('Compactness'),ax.set_title('Performance of Model when Area is added')
#plt.scatter(y,x,color='r')
#plt.scatter(y,x) '''
plt.show()
if __name__=="__main__":
main()
| [
"matplotlib"
] |
63a1fb3e0201946a6ebe02f8054cba7a89d91a50 | Python | zietzm/modifiers-analysis | /data/computed/severity_modifier_normalization.py | UTF-8 | 17,395 | 2.75 | 3 | [] | no_license | import sys
import pandas as pd
import numpy as np
import os
from collections import Counter
import matplotlib.pyplot as plt
% matplotlib inline
from gensim.models import Word2Vec
import nltk
def clean_string_list(input_list, list_output = False, chars_to_ignore = None, min_char_count = None) :
output_list = []
for s in input_list:
if chars_to_ignore :
for char in chars_to_ignore :
s = s.replace(char, ' ')
s = s.split(' ')
s = [str(x).lower() for x in s]
s = [str(x).strip() for x in s]
s = [x for x in s if (x != '-')]
if min_char_count : s = [x for x in s if (len(x) >= min_char_count)]
s = [x for x in s if not (x.isdigit())]
s = [x for x in s if x]
if list_output == False : s = ' '.join(s)
output_list.append(s)
return output_list
def get_dataframes_from_csv() :
# Dump extracted_parents.csv into pandas DataFrame
extracted = pd.read_csv('extracted_parents.csv', sep = ',')
# Remove missing criteria strings in df
extracted = extracted.dropna(how = 'any')
# Drop duplicates
extracted = extracted.drop_duplicates()
# Reset index after drop
extracted = extracted.reset_index(drop = True)
# REMOVE ROWS WITH CRITERIA STRINGS THAT DO NOT INCLUDE MATCHED STRING
indices = [] ; matches = [] ; criteria = []
for idx, criteria_string in enumerate(list(extracted['criteria_string'])) :
if (extracted.loc[idx]['matched_string'] in criteria_string) == False :
indices.append(idx)
matches.append(extracted.loc[idx]['matched_string'])
criteria.append(criteria_string)
# print('Number rows missing relevant matched_string value: {}'.format(len(indices)))
extracted = extracted.drop(indices)
extracted = extracted.reset_index(drop = True)
confirmation_bool = (extracted.shape[0] == (292320 - 62395))
# NORMALIZE WHITESPACE AND LOWER ALL CASES IN CRITERIA STRING
cleaned_criteria = clean_string_list(list(extracted['criteria_string']))
extracted.drop('criteria_string', axis = 1, inplace = True)
extracted['criteria_string'] = cleaned_criteria
# SET FILE PATHS
FILE_ROOT_PATH = 'computed/'
CONCEPTS_PATH = FILE_ROOT_PATH + 'concepts_with_modifiers.tsv'
PARENT_PATH = FILE_ROOT_PATH + 'parent_to_descendant_synonyms.tsv'
PARENT_SYNONYM_PATH = FILE_ROOT_PATH + 'parents_synonyms.tsv'
PARENT_MODIFIED_CHILDREN_PATH = FILE_ROOT_PATH + 'parents_with_modified_children.tsv'
# DUMP RELEVANT FILES TO DATAFRAMES
concepts = pd.read_csv(CONCEPTS_PATH, sep = '\t')
parents = pd.read_csv(PARENT_PATH, sep = '\t')
synonyms = pd.read_csv(PARENT_SYNONYM_PATH, sep = '\t')
modified = pd.read_csv(PARENT_MODIFIED_CHILDREN_PATH, sep = '\t')
# REMOVE DUPLICATES FROM DATAFRAMES
concepts = concepts.drop_duplicates()
concepts = concepts.reset_index(drop = True)
parents = parents.drop_duplicates()
parents = parents.reset_index(drop = True)
synonyms = synonyms.drop_duplicates()
synonyms = synonyms.dropna(how = 'all')
synonyms.reset_index(drop = True)
modified = modified.drop_duplicates()
modified = modified.dropna(how = 'any')
modified = modified.reset_index(drop = True)
return extracted, concepts, parents, synonyms, modified
# Function to split top n_rows into the validation set
def split_train_val(n_rows, save = 0) :
# Top 100 rows serve as validation set
validation_extracted = extracted.head(n = n_rows).copy()
# Remaining rows serve as training set
train_extracted = extracted.tail(n = extracted.shape[0] - (n_rows + 1)).copy()
# Reset index after drop
train_extracted = train_extracted.reset_index(drop = True)
if save == 1 :
# Export validation df to .csv file
validation_extracted.to_csv('validation_extracted_parents.csv', encoding = 'utf-8', index = False)
# Export training df to .csv file
train_extracted.to_csv('train_extracted_parents.csv', encoding = 'utf-8', index = False)
return train_extracted, validation_extracted
# FUNCTIONS TO QUERY AND NAVIGATE DATAFRAMES
def get_synonyms(parent) :
parent_list = list(synonyms['parent_concept_name'])
id_list = list(synonyms['parent_concept_id'])
if parent in parent_list :
return_df = synonyms[synonyms['parent_concept_name'] == parent]
return list(return_df['concept_synonym_name'])
elif parent in id_list :
return_df = synonyms[synonyms['parent_concept_id'] == parent]
return list(return_df['concept_synonym_name'])
else :
parent = str(parent)
parent_list_lower = [str(x).lower() for x in parent_list]
if parent.lower() in parent_list_lower :
idx = parent_list_lower.index(parent.lower())
return_df = synonyms[synonyms['parent_concept_name'] == parent_list[idx]]
return list(return_df['concept_synonym_name'])
else :
print('Parent concept {} does not have synonyms.'.format(parent))
return None
def get_parent_concept_id(parent) :
parent_list = list(synonyms['parent_concept_name'])
if parent in parent_list :
return list(synonyms[synonyms['parent_concept_name'] == parent]['parent_concept_id'])[0]
else :
parent_list_lower = [str(x).lower() for x in parent_list]
if parent.lower() in parent_list_lower :
idx = parent_list_lower.index(parent.lower())
return list(synonyms[synonyms['parent_concept_name'] == parent_list[idx]]['parent_concept_id'])[0]
else :
print('Parent concept {} not found.'.format(parent))
return None
def get_train_criteria(train) :
train_criteria = []
for s in train['criteria_string'] :
chars_to_ignore = [',', '.', ':', ';', '(', ')', '[', ']', '#', '%', '<', '>', '/', '"', '*', '-', '―']
s = s.split(' ')
s = clean_string_list(s, list_output = False, chars_to_ignore = chars_to_ignore)
train_criteria.append(s)
return train_criteria
def get_embeddings_model(split) :
train, val = split_train_val(split)
train_criteria = get_train_criteria(train)
return train, val, Word2Vec(train_criteria, min_count = 1)
def get_closest_string(query, sentence, model, return_index = 0) :
max_sim = -10
most_similar = None
if model == None : model = Word2Vec(train_criteria, min_count=1)
for word in sentence :
simularity = model.wv.similarity(query, word)
if max_sim < simularity :
max_sim = simularity
most_similar = word
if return_index == 0 :
return most_similar
else :
return sentence.index(word)
# INPUT:
# pandas.DataFrame WITH COLUMNS'criteria_string', 'matched_string', 'parent_concept_id'
# OUTPUT:
# return_ids: LIST OF OMOP CDM CODES ASSOCIATED WITH DESCENDANTS TO GIVEN parent_concept_id
# return_names: LIST OF DESCENDANTS TO GIVEN parent_concept_id
# matches: LIST OF BEST STRING MATCHES TO manual_string GIVEN matched_string
# match_indices: LIST OF FIRST INDEX OF MATCHED STRING IN CLEANED criteria_string
def get_indices_and_potetial_synonyms(df, window = 0, back = False, threshold = 10) :
return_ids = []
return_names = []
matches = []
match_indices = []
closest_descendents = []
model = None
required_columns = set(['criteria_string', 'matched_string', 'parent_concept_id'])
if required_columns.issubset(set(df.columns)) :
detected_string = []
for i, query in enumerate(list(df['criteria_string'])) :
chars_to_ignore = [',', '.', ':', ';', '(', ')', '[', ']', '#', '%', '<', '>', '/', '"', '*', '-', '―']
query = query.split(' ')
query = clean_string_list(query, chars_to_ignore = chars_to_ignore)
parent_concept_id = list(df['parent_concept_id'])[i]
match_candidates = get_descendents_df(parent_concept_id)
candidate_ids = set(match_candidates['descendant_concept_id'])
candidate_names = set(match_candidates['descendant_synonym_name'])
return_names.append(candidate_names)
return_ids.append(candidate_ids)
candidate_words = list(df['matched_string'])[i].split(' ')
match = [x for x in query if candidate_words[0] in x]
match_index = 100
if match and len(candidate_words) == 1 :
matches.append(match[0])
match_index = query.index(match[0])
match_indices.append(match_index)
elif match and len(candidate_words) > 1 :
match_index = query.index(match[0])
match_indices.append(match_index)
matches.append(' '.join(query[match_index:match_index+len(candidate_words)]))
elif len(candidate_words) > 1 :
match = [x for x in query if candidate_words[1] in x]
match_index = query.index(match[0])
matches.append(match[0])
match_indices.append(match_index)
else :
match_index = get_index_of_closest(candidate_words[0], query)
if match_index is None :
print('Term {} was not found in criteria string.'.format(' '.join(candidate_words)))
elif match_index > threshold :
match_index = get_closest_string(matched_modifier, s, model = model, return_index = 1)
match_indices.append(match_index)
else :
if len(candidate_words) == 1 :
match_indices.append(match_index)
match = query[match_index]
matches.append(match)
else :
match_indices.append(match_index)
match = ' '.join(query[match_index:match_index+len(candidate_words)])
matches.append(match)
if window == 0 :
search_string = match[0]
elif match_index - window > 0 and match_index + window < len(query) and back == True :
search_string = query[match_index - window : match_index + window]
elif match_index - window > 0 :
search_string = query[match_index - window : match_index]
elif back == True :
search_string = query[match_index : match_index + window]
else :
search_string = match[0]
closest_descendent = get_closest_in_list(search_string, list(match_candidates['descendant_synonym_name']))
closest_descendents.append(closest_descendent)
return matches, match_indices, closest_descendents
def check_modifiers(df, window = 3, severity_modifiers = None, threshold = 100) :
matches, match_indices, closest_descendents = get_indices_and_potetial_synonyms(df,
window = 0,
back = True,
threshold = threshold)
required_columns = set(['criteria_string', 'matched_string', 'parent_concept_name'])
if required_columns.issubset(set(df.columns)) :
modified = []
detected_string = []
for idx, s in enumerate(list(df['criteria_string'])) :
# CLEAN criteria_string
chars_to_ignore = [',', '.', ':', ';', '(', ')', '[', ']',
'#', '%', '<', '>', '/', '"', '*', '-', '―']
s = s.split(' ')
s = clean_string_list(s, list_output = False, chars_to_ignore = chars_to_ignore)
if severity_modifiers == None : severity_modifiers = set(['severe', 'significant', 'major'])
else :
severity_modifiers = list(severity_modifiers)
severity_modifiers = set([str(x).lower() for x in severity_modifiers])
# CHECK IF PREDEFINED SEVERITY MODIFIERS ARE IN criteria_string
matched_modifier_set = severity_modifiers.intersection(set(s))
if len(matched_modifier_set) >= 1:
matched_modifier = list(matched_modifier_set)[0]
modifier_index = s.index(matched_modifier)
# CHECK EXACT STRING MATCH BETWEEN matched_string AND criteria_string
matched_string = list(df['matched_string'])[idx].split(' ')
parent_concept_name = list(df['parent_concept_name'])[idx].split(' ')
parent_concept_name = set([str(x).lower() for x in parent_concept_name])
concept_matched_string_set = set(parent_concept_name).union(set(matched_string))
concept_matched_string_set = set(s).intersection(concept_matched_string_set)
if len(concept_matched_string_set) >= 1 :
concept_match = list(concept_matched_string_set)[0]
concept_index = s.index(concept_match)
# MODIFIER IN FRONT OF CONCEPT
if concept_index >= modifier_index and abs(concept_index-modifier_index) < window:
modified.append(1)
detected_string.append(' '.join(s[modifier_index:concept_index+1]))
else :
modified.append(0)
detected_string.append('')
else:
if set(s).issubset(set(model.wv.vocab)) :
closest_index = get_closest_string(matched_modifier, s, model = model, return_index = 1)
if closest_index >= modifier_index and abs(closest_index-modifier_index) < window:
modified.append(1)
detected_string.append(' '.join(s[modifier_index:concept_index+1]))
else :
modified.append(0)
detected_string.append('')
else :
modified.append(0)
detected_string.append('')
else :
modified.append(0)
detected_string.append('')
temp = []
for x in df['severity_modifier'] :
if x != 'unmarked' :
temp.append(1)
else :
temp.append(0)
df['truth_label'] = temp
if 'modified' in df.columns : df.drop('modified', axis = 1)
df['MODIFIED_GUESS'] = modified
df['CONCEPT_GUESS'] = matches
df['DESCENDANT_GUESS'] = closest_descendents
return df
else :
print('Check if input DataFrame columns contain {}'.format(required_columns))
return None
def get_parent_concept_id(parent) :
if parent in list(hr['parent_concept_name']) :
return hr[hr['parent_concept_name'] == parent]['parent_concept_id'][0]
else :
parent = str(parent).lower()
parent_list = list(hr['parent_concept_name'])
parent_list_lower = [str(x).lower() for x in parent_list]
if parent in parent_list_lower :
idx = parent_list_lower.index(parent.lower())
return hr[hr['parent_concept_name'] == list(hr['parent_concept_name'])[idx]]['parent_concept_id'][0]
else :
print('Parent concept {} does not found.'.format(parent))
return None
def get_descendents_df(parent, return_id = 1) :
if parent in list(parents['parent_concept_id']) :
return parents[parents['parent_concept_id'] == parent]
elif get_parent_concept_id(parent) != None :
parent = get_parent_concept_id(parent)
return parents[parents['parent_concept_id'] == parent]
else :
print('Parent concept {} does not found.'.format(parent))
return None
def edit_distance_list(search, search_space) :
return_list = []
for s in search_space:
return_list.append(nltk.edit_distance(s, search))
return return_list
def get_index_of_closest(search, search_space) :
distances = edit_distance_list(search, search_space)
return distances.index(min(distances))
def get_closest_in_list(search, search_space) :
return search_space[get_index_of_closest(search, search_space)]
def main() :
if sys.argv : csv_import = sys.argv
else : csv_import = 'annotate_notes_hr2479.csv'
extracted, concepts, parents, synonyms, modified = get_dataframes_from_csv()
df = pd.read_csv(csv_import.head().copy(), sep = ',')
df = df.dropna(subset=['snomed_id'])
df = df.reset_index(drop = True)
df = check_modifiers(df, window = 3, severity_modifiers = None)
df = df[['NCT_id', 'matched_string', 'criteria_string', 'parent_concept_id',
'parent_concept_name', 'CONCEPT_GUESS', 'DESCENDANT_GUESS']]
df.to_csv('method_output.csv', sep = ',')
if __name__== "__main__":
main()
| [
"matplotlib"
] |
537232a4e76f407f93e0f5f6a629055ba8f3e9ac | Python | alan-yjzhang/AIProjectExamples1 | /reinforcement_learning/agent_linear.py | UTF-8 | 8,146 | 3.734375 | 4 | [] | no_license | """Linear QL agent
Simple Policy-learning algorithm
In this project, we address the task of learning control policies for text-based games using reinforcement learning.
In these games, all interactions between players and the virtual world are through text.
The current world state is described by elaborate text, and the underlying state is not directly observable.
Players read descriptions of the state and respond with natural language commands to take actions.
For this project you will conduct experiments on a small Home World, which mimic the environment of a typical house.The world consists of a few rooms, and each room contains a representative object that the player can interact with.
For instance, the kitchen has an apple that the player can eat. The goal of the player is to finish some quest. An example of a quest given to the player in text is You are hungry now .
To complete this quest, the player has to navigate through the house to reach the kitchen and eat the apple.
In this game, the room is hidden from the player, who only receives a description of the underlying room.
At each step, the player read the text describing the current room and the quest, and respond with some command (e.g., eat apple ).
The player then receives some reward that depends on the state and his/her command.
In order to design an autonomous game player, we will employ a reinforcement learning framework to learn command policies using game rewards as feedback.
Since the state observable to the player is described in text, we have to choose a mechanism that maps text descriptions into vector representations.
A naive approach is to create a map that assigns a unique index for each text description. -- agent_tabular_ql.py
However, such approach becomes difficult to implement when the number of textual state descriptions are huge.
An alternative method is to use a bag-of-words representation derived from the text description. -- agent_linear.py
Deep-learning approach -- agent_dqn.py
"""
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import framework
import utils
DEBUG = False
GAMMA = 0.5 # discounted factor
TRAINING_EP = 0.5 # epsilon-greedy parameter for training
TESTING_EP = 0.05 # epsilon-greedy parameter for testing
NUM_RUNS = 10
NUM_EPOCHS = 600
NUM_EPIS_TRAIN = 25 # number of episodes for training at each epoch
NUM_EPIS_TEST = 50 # number of episodes for testing
ALPHA = 0.001 # learning rate for training
ACTIONS = framework.get_actions()
OBJECTS = framework.get_objects()
NUM_ACTIONS = len(ACTIONS)
NUM_OBJECTS = len(OBJECTS)
def tuple2index(action_index, object_index):
"""Converts a tuple (a,b) to an index c"""
return action_index * NUM_OBJECTS + object_index
def index2tuple(index):
"""Converts an index c to a tuple (a,b)"""
return index // NUM_OBJECTS, index % NUM_OBJECTS
# pragma: coderesponse template name="linear_epsilon_greedy"
def epsilon_greedy(state_vector, theta, epsilon):
"""Returns an action selected by an epsilon-greedy exploration policy
Note that the Q-learning algorithm does not specify how we should interact in the world so as to learn quickly.
It merely updates the values based on the experience collected. If we explore randomly, i.e., always select actions at random, we would most likely not get anywhere.
A better option is to exploit what we have already learned, as summarized by current Q-values.
a typical exploration strategy is to follow a so-called epsilon-greedy policy: with probability epsilon take a random action out of 𝐶 with probability 1−epsilon follow that policy.
The value of 𝜀 here balances exploration vs exploitation. A large value of 𝜀 means exploring more (randomly), not using much of what we have learned.
A small 𝜀, on the other hand, will generate experience consistent with the current estimates of Q-values.
Args:
state_vector (np.ndarray): extracted vector representation
theta (np.ndarray): current weight matrix
epsilon (float): the probability of choosing a random command
Returns:
(int, int): the indices describing the action/object to take
"""
# TODO Your code here
action_index, object_index = None, None
return (action_index, object_index)
# pragma: coderesponse end
# pragma: coderesponse template
def linear_q_learning(theta, current_state_vector, action_index, object_index,
reward, next_state_vector, terminal):
"""Update theta for a given transition
Note: Q(s,c,theta) can be accessed through q_value = (theta @ state_vector)[tuple2index(action_index, object_index)]
Args:
theta (np.ndarray): current weight matrix
current_state_vector (np.ndarray): vector representation of current state
action_index (int): index of the current action
object_index (int): index of the current object
reward (float): the immediate reward the agent recieves from playing current command
next_state_vector (np.ndarray): vector representation of next state
terminal (bool): True if this epsiode is over
Returns:
None
"""
# TODO Your code here
theta = None # TODO Your update here
# pragma: coderesponse end
def run_episode(for_training):
""" Runs one episode
If for training, update Q function
If for testing, computes and return cumulative discounted reward
Args:
for_training (bool): True if for training
Returns:
None
"""
epsilon = TRAINING_EP if for_training else TESTING_EP
epi_reward = None
# initialize for each episode
# TODO Your code here
(current_room_desc, current_quest_desc, terminal) = framework.newGame()
while not terminal:
# Choose next action and execute
current_state = current_room_desc + current_quest_desc
current_state_vector = utils.extract_bow_feature_vector(
current_state, dictionary)
# TODO Your code here
if for_training:
# update Q-function.
# TODO Your code here
pass
if not for_training:
# update reward
# TODO Your code here
pass
# prepare next step
# TODO Your code here
if not for_training:
return epi_reward
def run_epoch():
"""Runs one epoch and returns reward averaged over test episodes"""
rewards = []
for _ in range(NUM_EPIS_TRAIN):
run_episode(for_training=True)
for _ in range(NUM_EPIS_TEST):
rewards.append(run_episode(for_training=False))
return np.mean(np.array(rewards))
def run():
"""Returns array of test reward per epoch for one run"""
global theta
theta = np.zeros([action_dim, state_dim])
single_run_epoch_rewards_test = []
pbar = tqdm(range(NUM_EPOCHS), ncols=80)
for _ in pbar:
single_run_epoch_rewards_test.append(run_epoch())
pbar.set_description(
"Avg reward: {:0.6f} | Ewma reward: {:0.6f}".format(
np.mean(single_run_epoch_rewards_test),
utils.ewma(single_run_epoch_rewards_test)))
return single_run_epoch_rewards_test
if __name__ == '__main__':
state_texts = utils.load_data('game.tsv')
dictionary = utils.bag_of_words(state_texts)
state_dim = len(dictionary)
action_dim = NUM_ACTIONS * NUM_OBJECTS
# set up the game
framework.load_game_data()
epoch_rewards_test = [] # shape NUM_RUNS * NUM_EPOCHS
for _ in range(NUM_RUNS):
epoch_rewards_test.append(run())
epoch_rewards_test = np.array(epoch_rewards_test)
x = np.arange(NUM_EPOCHS)
fig, axis = plt.subplots()
axis.plot(x, np.mean(epoch_rewards_test,
axis=0)) # plot reward per epoch averaged per run
axis.set_xlabel('Epochs')
axis.set_ylabel('reward')
axis.set_title(('Linear: nRuns=%d, Epilon=%.2f, Epi=%d, alpha=%.4f' %
(NUM_RUNS, TRAINING_EP, NUM_EPIS_TRAIN, ALPHA)))
| [
"matplotlib"
] |
9187898cffbae5acf8f0b129f653aa458938106a | Python | furkanoruc/furkanoruc.github.io | /Ship Domain Violation Prediction on AIS Data via ML Applications.py | UTF-8 | 6,166 | 2.515625 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 14 03:40:03 2021
@author: furkanoruc
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 26 17:10:47 2020
@author: furkanoruc
"""
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
import seaborn as sns
from sklearn.metrics import (confusion_matrix, recall_score, precision_score, roc_auc_score,
accuracy_score, f1_score, plot_confusion_matrix, confusion_matrix,
roc_curve, auc)
"""
dataset = pd.read_csv('RESULTS-Area1_Mart_Nisan_Mayis_2014.csv',sep=',')
filenames = ['RESULTS-Area1_Aralik_2014_Ocak_Subat_2015.csv',
'RESULTS-Area2_Aralik_2014_Ocak_Subat_2015.csv',
'RESULTS-Area3_Aralik_2014_Ocak_Subat_2015.csv',
'RESULTS-Area4_Aralik_2014_Ocak_Subat_2015.csv',
'RESULTS-Area5_Aralik_2014_Ocak_Subat_2015.csv',
'RESULTS-Area6_Aralik_2014_Ocak_Subat_2015.csv',
'RESULTS-Area7_Aralik_2014_Ocak_Subat_2015.csv',
'RESULTS-Area8_Aralik_2014_Ocak_Subat_2015.csv',
'RESULTS-Area9_Aralik_2014_Ocak_Subat_2015.csv',
'RESULTS-Area10_Aralik_2014_Ocak_Subat_2015.csv',
'RESULTS-Area11_Aralik_2014_Ocak_Subat_2015.csv',
'RESULTS-Area12_Aralik_2014_Ocak_Subat_2015.csv',
'RESULTS-Area13_Aralik_2014_Ocak_Subat_2015.csv']
for file in filenames:
df = pd.read_csv(file,sep=',')
dataset = dataset.append(df)
"""
backuup_dataset = dataset
backup_dataset = backuup_dataset
dataset = dataset.drop_duplicates()
dataset = dataset.dropna()
dataset.head(10)
X = dataset.iloc[:, lambda dataset: [1,3]].values
R = dataset.iloc[:, 10].values
X_train, X_test, Y_train, Y_test = train_test_split(X, R, test_size = 0.25, random_state = 14)
X_test.shape
#KNN Implement
def KNeighbors(X, y, X_test, y_test):
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import roc_curve, roc_auc_score, accuracy_score, plot_confusion_matrix, auc
import matplotlib.pyplot as plt
accuracy_score_holder = []
misclassified_sample_count = []
precision_score_holder = []
roc_auc_score_holder = []
for i in range (7,8):
knn=KNeighborsClassifier(n_neighbors=i, p=2, metric='minkowski')
knn.fit(X, y)
myprediction_knn = knn.predict(X_test)
accuracy_score_holder.append(accuracy_score(y_test, myprediction_knn))
misclassified_sample_count.append((y_test != myprediction_knn).sum())
roc_auc_score_holder.append(roc_auc_score(y_test, myprediction_knn))
k = knn.predict(X_test)
precision = precision_score(y_test, myprediction_knn, average = 'macro')
recall = recall_score(y_test, myprediction_knn, average = 'macro')
i = i + 1
#print("Accuracy Score:", accuracy_score_holder)
#print("Precision Score:", precision_score_holder)
#print("Misclassified samples:", misclassified_sample_count)
#print("ROC - AUC Score:", roc_auc_score_holder)
plot_confusion_matrix(knn, X_test, y_test)
plt.show()
return accuracy_score_holder[0], roc_auc_score_holder[0], misclassified_sample_count[0], precision, recall
knn_accuracy, knn_roc_auc, knn_misclassified_count, knn_precision, knn_recall = KNeighbors(X_train, Y_train, X_test, Y_test)
violation_check_distribution = dataset["Violation Check"].value_counts()
def perceptron(X, y, X_test, y_test):
from sklearn.linear_model import Perceptron
from sklearn.metrics import accuracy_score, roc_auc_score, roc_curve, plot_confusion_matrix, auc
import matplotlib.pyplot as plt
for i in range (30,31):
myperceptron = Perceptron(penalty = 'elasticnet', max_iter = i, eta0 = 0.001, random_state = 60)
myperceptron.fit(X, y)
myprediction_perceptron = myperceptron.predict(X_test)
precision = precision_score(y_test, myprediction_perceptron, average = 'macro')
recall = recall_score(y_test, myprediction_perceptron, average = 'macro')
#print(accuracy_score(y_test,myprediction_perceptron))
#print("Accuracy Score: ", accuracy_score(y_test,myprediction_perceptron))
#print("Number of Misclassified Samples: ", (y_test != myprediction_perceptron).sum())
#print("ROC - AUC Score:", roc_auc_score(y_test, myprediction_perceptron))
plot_confusion_matrix(myperceptron, X_test, y_test)
plt.show()
return accuracy_score(y_test,myprediction_perceptron), roc_auc_score(y_test, myprediction_perceptron), (y_test != myprediction_perceptron).sum(), precision, recall
perceptron_accuracy, perceptron_roc_auc, perceptron_misclassified_count, perceptron_precision, perceptron_recall = perceptron(X_train, Y_train, X_test, Y_test)
from package import (data_balancer, MultiLayerPerceptron, softmax, sigmoid, Loss_Perceptron,Weight_Initialization,
KNeighbors, forest, svm, perceptron)
mlp_accuracy_train, mlp_accuracy_test, w1,w2,b1,b2,classes_train, classes_test = MultiLayerPerceptron(X_train, Y_train, X_test, Y_test, hidden_layer_units=100, alpha=0.03, epoch=10)
mlp_recall = recall_score(Y_test, classes_test, average = 'macro')
mlp_precision = precision_score(Y_test, classes_test, average = 'macro')
mlp_roc_auc = roc_auc_score(Y_test, classes_test)
f, ax = plt.subplots(1,2,figsize=(12,6))
sns.heatmap(confusion_matrix(Y_test, classes_test));
sns.heatmap(confusion_matrix(Y_train, classes_train), fmt='.0f', annot=True,ax=ax[0],
xticklabels=[0,1], yticklabels=[0,1], cmap = 'Oranges');
plt.show()
random_forest_accuracy, random_forest_Roc_Auc, random_forest_Misclassified_Count, random_forest_precision = forest(X_train, Y_train, X_test, Y_test)
#Some Desc Stat
length_mean = dataset.iloc[:,0].mean()
speed_mean = dataset.iloc[:,2].mean()
plt.hist(dataset.iloc[:,2], bins = 0.6);
plt.show()
plt.hist2d(dataset.iloc[:,0], dataset.iloc[:,2], bins=30, cmap='Blues')
| [
"matplotlib",
"seaborn"
] |
a62ced7bc90efba1ce419a81fd020cd94a19442f | Python | julescarpentier/classifii | /notebooks/rationales.py | UTF-8 | 4,947 | 2.625 | 3 | [] | no_license | from __future__ import absolute_import, division, print_function, unicode_literals
import os
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.utils import to_categorical
from justifii.database import db_session
from justifii.models import Text, Label
from models import fully_conv_with_rationales, fully_conv_without_rationales
from utilities.embedding import get_embedding_matrix, get_pre_trained_trainable_embedding_layer
tf.keras.backend.clear_session() # For easy reset of notebook state.
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# The GPU id to use, usually either "0" or "1"
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
MAX_SEQUENCE_LENGTH = 1000
MAX_NUM_WORDS = 20000
VALIDATION_SPLIT = 0.2
# ensure the output folder exists
try:
os.makedirs('output')
except OSError:
pass
# Prepare text samples and their labels
print('Retrieving texts from database')
texts = []
labels = []
nb_labels = Label.query.count()
rationales = []
for text in Text.query.filter(Text.rationales.any()):
texts.append(text.get_content())
labels.append(text.label.target)
rationales.append(text.get_r(nb_labels, MAX_SEQUENCE_LENGTH))
rationales = np.asarray(rationales)
db_session.remove()
print('Retrieved {} texts.'.format(len(texts)))
# Vectorize the text samples into a 2D integer tensor
tokenizer = Tokenizer(num_words=MAX_NUM_WORDS)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
print('Found {} unique tokens.'.format(len(word_index)))
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = to_categorical(np.asarray(labels))
print('Shape of data tensor:', data.shape)
print('Shape of label tensor:', labels.shape)
indices = np.arange(data.shape[0])
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
rationales = rationales[indices]
num_validation_samples = int(VALIDATION_SPLIT * data.shape[0])
# split the data into a training set and a validation set
x_train = data[:-num_validation_samples]
y_train = labels[:-num_validation_samples]
r_train = rationales[:-num_validation_samples]
x_val = data[-num_validation_samples:]
y_val = labels[-num_validation_samples:]
r_val = rationales[-num_validation_samples:]
print('Preparing embedding matrix.')
# prepare embedding matrix
num_words = min(MAX_NUM_WORDS, len(word_index) + 1)
embedding_matrix = get_embedding_matrix(num_words, MAX_NUM_WORDS, word_index)
# Embedding layer
embedding_layer = get_pre_trained_trainable_embedding_layer(num_words, embedding_matrix, MAX_SEQUENCE_LENGTH)
print('Training models.')
model_with_rationales = fully_conv_with_rationales.get_compiled_model(embedding_layer, MAX_SEQUENCE_LENGTH, nb_labels)
model_without_rationales = fully_conv_without_rationales.get_compiled_model(embedding_layer, MAX_SEQUENCE_LENGTH,
nb_labels)
history_with_rationales = model_with_rationales.fit(x_train, (y_train, r_train), batch_size=16, epochs=10,
validation_data=(x_val, (y_val, r_val)))
history_without_rationales = model_without_rationales.fit(x_train, y_train, batch_size=16, epochs=10,
validation_data=(x_val, y_val))
# model_with_rationales.save('output/fully_conv_with_rationales.h5')
# model_without_rationales.save('output/fully_conv_without_rationales.h5')
# print('Saved models')
# Plot accuracy
plt.figure()
plt.plot(history_with_rationales.history['val_topic_acc'])
plt.plot(history_without_rationales.history['val_acc'])
plt.plot(history_with_rationales.history['topic_acc'], '--')
plt.plot(history_without_rationales.history['acc'], '--')
plt.title('Compared accuracies')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['R', 'No R', 'R', 'No R'], loc='upper left')
plt.savefig('output/fully_conv_rationales_acc.png')
plt.close()
# Plot trainable, pre-trained and both losses
plt.figure()
plt.plot(history_with_rationales.history['val_topic_loss'])
plt.plot(history_without_rationales.history['val_loss'])
plt.plot(history_with_rationales.history['topic_loss'], '--')
plt.plot(history_without_rationales.history['loss'], '--')
plt.title('Compared losses')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['R', 'No R', 'R', 'No R'], loc='upper left')
plt.savefig('output/fully_conv_rationales_loss.png')
plt.close()
# Plot everything
plt.figure()
for metric, values in history_with_rationales.history.items():
plt.plot(values, '-' if 'val' in metric else '--', label=metric)
plt.title('Everything to show with rationales')
plt.ylabel('Metrics')
plt.xlabel('Epoch')
plt.legend(loc='upper left')
plt.savefig('output/fully_conv_rationales_everything.png')
plt.close()
| [
"matplotlib"
] |
cec39195f8eae96bb1258cba684c9c3baf55bcf8 | Python | alireidacr/speed_test | /speed_test.py | UTF-8 | 6,036 | 2.984375 | 3 | [] | no_license | # python script to regulary test internet speed and perform analytics
import math
import os
import time
import statistics as stats
import matplotlib.pyplot as plt
import datetime
from shutil import copyfile
# read in measurement parameters
configFile = open("./config.txt", 'r')
for line in configFile.readlines():
if line[0] != '#':
vals = line.split()
configFile.close()
interval = float(vals[0])
measurements = int(vals[1])
repeats = int(vals[2])
def measure():
# set file paths for data, dump and temp files
start_time = time.asctime()
start_time = start_time.replace(' ', '_')
start_time = start_time.replace(':', '_')
os.mkdir(start_time)
# copy config file into output directory
config_str = os.path.join(start_time, 'config.txt')
copyfile("./config.txt", config_str)
out_str = os.path.join(start_time, 'data.txt')
dump_str = os.path.join(start_time, 'dump.txt')
temp_str = os.path.join(start_time, 'temp.txt')
summ_str = os.path.join(start_time, 'summary_stats.txt')
for loop in range(measurements):
distances = []
latencies = []
downloads = []
uploads = []
measurement_time = time.time()
for counter in range(repeats):
os.system("speedtest-cli > " + temp_str)
distances.append(getServerDistance(temp_str))
latencies.append(getLatency(temp_str))
downloads.append(getDownloadSpeed(temp_str))
uploads.append(getUploadSpeed(temp_str))
appendDumpFile(temp_str, dump_str)
mean_dist = sum(distances)/repeats
mean_latency = sum(latencies)/repeats
mean_download = sum(downloads)/repeats
mean_upload = sum(uploads)/repeats
std_download = stats.stdev(downloads)/math.sqrt(repeats)
std_upload = stats.stdev(uploads)/math.sqrt(repeats)
std_latency = stats.stdev(latencies)/math.sqrt(repeats)
datafile = open(out_str, 'a+')
format_str = "{0} {1} {2} {3} {4} {5} {6} {7}\n"
datafile.write(format_str.format(measurement_time, mean_dist, mean_latency, mean_download, mean_upload, std_download, std_upload, std_latency))
datafile.close()
time.sleep(interval*60)
return start_time
def analyse(start_time):
data_str = os.path.join(start_time, 'data.txt')
summ_str = os.path.join(start_time, 'summary_stats.txt')
# populate lists with download, upload, time data
downloads = []
downloads_error = []
uploads = []
uploads_error = []
times = []
latencies = []
latencies_error = []
datafile = open(data_str, 'r')
for line in datafile.readlines():
data = line.split()
downloads.append(float(data[3]))
downloads_error.append(float(data[5]))
uploads.append(float(data[4]))
uploads_error.append(float(data[6]))
latencies.append(float(data[2]))
latencies_error.append(float(data[7]))
times.append(float(data[0]))
datafile.close()
times = [datetime.datetime.fromtimestamp(measurement) for measurement in times]
plt.errorbar(times, downloads, yerr=downloads_error, label=r"Download Speed")
plt.errorbar(times, uploads, yerr=uploads_error, label=r"Upload Speed")
plt.xlabel("Time of Measurement")
plt.xticks(rotation = (30))
plt.ylabel(r"Bandwidth $(Mbs^{-1})$")
plt.title("Variation of Bandwidth Over Time")
plt.legend()
bandwidth_str = os.path.join(start_time, 'bandwidth.png')
plt.savefig(bandwidth_str)
plt.close()
plt.errorbar(times, latencies, yerr=latencies_error)
plt.xlabel("Time of Measurement")
plt.xticks(rotation = (30))
plt.ylabel("Latency (ms)")
plt.title("Variation of Latency Over Time")
latency_str = os.path.join(start_time, 'latency.png')
plt.savefig(latency_str)
# generate summary statistics
mean_download = sum(downloads)/len(downloads)
mean_upload = sum(uploads)/len(uploads)
mean_latency = sum(latencies)/len(latencies)
download_std = stats.stdev(downloads)
upload_std = stats.stdev(uploads)
latency_std = stats.stdev(latencies)
summFile = open(summ_str, 'w')
download_str = "Mean Download Speed: {0} Mb/s, Standard Deviation: {1} \n".format(format(mean_download, '.3f'), format(download_std, '.3f'))
summFile.write(download_str)
upload_str = "Mean Upload Speed: {0} Mb/s, Standard Deviation: {1} \n".format(format(mean_upload, '.3f'), format(upload_std, '.3f'))
summFile.write(upload_str)
latency_str = "Mean Latency: {0} ms, Standard Deviation: {1} \n".format(format(mean_latency, '.1f'), format(latency_std, '.1f'))
summFile.write(latency_str)
summFile.close()
print("Run Completed Successfully")
def main():
start_time = measure()
analyse(start_time)
def getServerDistance(temp_str):
tempFile = open(temp_str, 'r')
lines = tempFile.readlines()
start_pos = lines[4].find('[')
end_pos = lines[4].find(']')
tempFile.close()
distance = lines[4][start_pos+1:end_pos-2]
return float(distance)
def getLatency(temp_str):
tempFile = open(temp_str, 'r')
lines = tempFile.readlines()
start_pos = lines[4].find(':')
tempFile.close()
latency = lines[4][start_pos+2:-3]
return float(latency)
def appendDumpFile(temp_str, dump_str):
tempFile = open(temp_str, 'r')
dumpfile = open(dump_str, 'a+')
for line in tempFile:
dumpfile.write(line)
dumpfile.write('\n')
tempFile.close()
dumpfile.close()
def getDownloadSpeed(temp_str):
tempFile = open(temp_str, 'r')
lines = tempFile.readlines()
start_pos = lines[6].find(':')
download = lines[6][start_pos+2:-7]
return float(download)
def getUploadSpeed(temp_str):
tempFile = open(temp_str, 'r')
lines = tempFile.readlines()
start_pos = lines[8].find(':')
upload = lines[8][start_pos+2:-7]
return float(upload)
main()
| [
"matplotlib"
] |
52c737e90a86d4a33e0f02a235ea43b252601673 | Python | dehbi16/FloorPlan | /Pix2Pix_test.py | UTF-8 | 1,397 | 3.0625 | 3 | [] | no_license | from keras.models import load_model
from numpy import load
from numpy import vstack
import matplotlib.pyplot as plt
from numpy.random import randint
# load and prepare training images
def load_real_samples(filename):
# load compressed arrays
data = load(filename)
# unpack arrays
X1, X2 = data['arr_0'], data['arr_1']
# scale from [0,255] to [-1,1]
for i in range(len(X1)):
X1[i] = (X1[i] - 127.5) / 127.5
X2[i] = (X2[i] - 6.5) / 6.5
return [X1, X2]
# plot source, generated and target images
def plot_images(src_img, gen_img, tar_img):
src_img = (src_img + 1) / 2.0
gen_img = (gen_img + 1) / 2.0
tar_img = (tar_img + 1) / 2.0
images = vstack((src_img, gen_img, tar_img))
# scale from [-1,1] to [0,1]
titles = ['Source', 'Generated', 'Expected']
# plot images row by row
for i in range(len(images)):
# define subplot
plt.subplot(1, 3, 1 + i)
# turn off axis
plt.axis('off')
# plot raw pixel data
plt.imshow(images[i], cmap="Greys")
# show title
plt.title(titles[i])
plt.show()
[X1, X2] = load_real_samples('floor_plan.npz')
print('Loaded', X1.shape, X2.shape)
model = load_model('model_000010.h5')
ix = randint(0, len(X1), 1)
src_image, tar_image = X1[ix], X2[ix]
gen_image = model.predict(src_image)
plot_images(src_image, gen_image, tar_image) | [
"matplotlib"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.