Dataset Viewer (First 5GB)
blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
135
| path
stringlengths 2
372
| src_encoding
stringclasses 26
values | length_bytes
int64 55
3.18M
| score
float64 2.52
5.19
| int_score
int64 3
5
| detected_licenses
sequencelengths 0
38
| license_type
stringclasses 2
values | code
stringlengths 55
3.18M
| used_libs
sequencelengths 1
5
|
---|---|---|---|---|---|---|---|---|---|---|---|
ee92269ab11111536b7b926e909d4a4c4766270b | Python | PanditRohit/COVID-19-Data-Analysis | /COVID-19 Data Analysis.py | UTF-8 | 2,943 | 3.28125 | 3 | [] | no_license | # Import Libraries
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
#Import Covid-19 Dataset
corona_dataset_csv = pd.read_csv("C:/Users/pandit/Downloads/covid19_Confirmed_dataset.csv")
corona_dataset_csv.head()
corona_dataset_csv.shape
#Delete the useless columns after Exploratory Data Analysis
corona_dataset_csv.drop(["Lat","Long"],axis=1,inplace=True)
corona_dataset_csv.head(10)
#Aggregating the rows by the country
corona_dataset_aggregated=corona_dataset_csv.groupby("Country/Region").sum()
corona_dataset_aggregated.head()
corona_dataset_aggregated.shape
#Visualizing Data related to Countries
corona_dataset_aggregated.loc["China"].plot()
corona_dataset_aggregated.loc["India"].plot()
corona_dataset_aggregated.loc["Italy"].plot()
plt.legend()
#Calculating a good measure
corona_dataset_aggregated.loc['China'][:3].plot()
corona_dataset_aggregated.loc['China'].diff().plot()
#Finding maximum infection rates for Selected countries
corona_dataset_aggregated.loc['China'].diff().max()
corona_dataset_aggregated.loc['India'].diff().max()
corona_dataset_aggregated.loc['Italy'].diff().max()
#Find maximum infection rate for all the countries
countries = list(corona_dataset_aggregated.index)
max_infection_rates=[]
for c in countries :
max_infection_rates.append(corona_dataset_aggregated.loc[c].diff().max())
corona_dataset_aggregated["max_infection_rates"] = max_infection_rates
corona_dataset_aggregated.head()
#Create a new dataframe with only needed column
corona_data = pd.DataFrame(corona_dataset_aggregated["max_infection_rates"])
corona_data.head()
#Now import world happiness report dataset
happiness_report = pd.read_csv("C:/Users/pandit/Downloads/worldwide_happiness_report.csv")
happiness_report.head()
#Make a list of useless columns and drop the same
useless_cols = ["Overall rank","Score","Generosity","Perceptions of corruption"]
happiness_report.drop(useless_cols,axis=1,inplace=True)
happiness_report.head()
#Change the indices of the dataframe
happiness_report.set_index("Country or region",inplace=True)
happiness_report.head()
#Check the shapes of both the dataframes
corona_data.head()
corona_data.shape
happiness_report.shape
#Join the two datasets using inner join as the shape of the datasets vary
data=corona_data.join(happiness_report,how="inner")
data.head()
#Create a correlation matrix
data.corr()
#Visualization of the results
data.head()
#Plotting GDP vs maximum infection rates
x = data["GDP per capita"]
y = data["max_infection_rates"]
sns.scatterplot(x,np.log(y))
sns.regplot(x,np.log(y))
#Plotting social support vs maximum infection rates
x = data["Social support"]
y = data["max_infection_rates"]
sns.scatterplot(x,np.log(y))
sns.regplot(x,np.log(y))
#Plotting Healthy life expectancy vs maximum infection rates
x = data["Healthy life expectancy"]
y = data["max_infection_rates"]
sns.scatterplot(x,np.log(y))
sns.regplot(x,np.log(y)) | [
"matplotlib",
"seaborn"
] |
e435765339472ea9173527d546970d87a0d1c7fc | Python | elofamomo/btTTVT | /sampling.py | UTF-8 | 10,333 | 2.578125 | 3 | [] | no_license | """This file contains code used in "Think DSP",
by Allen B. Downey, available from greenteapress.com
Copyright 2015 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function, division
import thinkdsp
import thinkplot
import numpy as np
import matplotlib.pyplot as plt
PI2 = 2 * np.pi
FORMATS = ['pdf', 'eps']
def plot_beeps():
wave = thinkdsp.read_wave('253887__themusicalnomad__positive-beeps.wav')
wave.normalize()
thinkplot.preplot(3)
# top left
ax1 = plt.subplot2grid((4, 2), (0, 0), rowspan=2)
plt.setp(ax1.get_xticklabels(), visible=False)
wave.plot()
thinkplot.config(title='Input waves', legend=False)
# bottom left
imp_sig = thinkdsp.Impulses([0.01, 0.4, 0.8, 1.2],
amps=[1, 0.5, 0.25, 0.1])
impulses = imp_sig.make_wave(start=0, duration=1.3,
framerate=wave.framerate)
ax2 = plt.subplot2grid((4, 2), (2, 0), rowspan=2, sharex=ax1)
impulses.plot()
thinkplot.config(xlabel='Time (s)')
# center right
convolved = wave.convolve(impulses)
ax3 = plt.subplot2grid((4, 2), (1, 1), rowspan=2)
plt.title('Convolution')
convolved.plot()
thinkplot.config(xlabel='Time (s)')
thinkplot.save(root='sampling1',
formats=FORMATS,
legend=False)
XLIM = [-22050, 22050]
def plot_am():
wave = thinkdsp.read_wave('105977__wcfl10__favorite-station.wav')
wave.unbias()
wave.normalize()
# top
ax1 = thinkplot.preplot(6, rows=4)
spectrum = wave.make_spectrum(full=True)
spectrum.plot(label='spectrum')
thinkplot.config(xlim=XLIM, xticklabels='invisible')
#second
carrier_sig = thinkdsp.CosSignal(freq=10000)
carrier_wave = carrier_sig.make_wave(duration=wave.duration,
framerate=wave.framerate)
modulated = wave * carrier_wave
ax2 = thinkplot.subplot(2, sharey=ax1)
modulated.make_spectrum(full=True).plot(label='modulated')
thinkplot.config(xlim=XLIM, xticklabels='invisible')
# third
demodulated = modulated * carrier_wave
demodulated_spectrum = demodulated.make_spectrum(full=True)
ax3 = thinkplot.subplot(3, sharey=ax1)
demodulated_spectrum.plot(label='demodulated')
thinkplot.config(xlim=XLIM, xticklabels='invisible')
#fourth
ax4 = thinkplot.subplot(4, sharey=ax1)
demodulated_spectrum.low_pass(10000)
demodulated_spectrum.plot(label='filtered')
thinkplot.config(xlim=XLIM, xlabel='Frequency (Hz)')
thinkplot.save(root='sampling2',
formats=FORMATS)
#carrier_spectrum = carrier_wave.make_spectrum(full=True)
#carrier_spectrum.plot()
#convolved = spectrum.convolve(carrier_spectrum)
#convolved.plot()
#reconvolved = convolved.convolve(carrier_spectrum)
#reconvolved.plot()
def sample(wave, factor):
"""Simulates sampling of a wave.
wave: Wave object
factor: ratio of the new framerate to the original
"""
ys = np.zeros(len(wave))
ys[::factor] = wave.ys[::factor]
ts = wave.ts[:]
return thinkdsp.Wave(ys, ts, wave.framerate)
def make_impulses(wave, factor):
ys = np.zeros(len(wave))
ys[::factor] = 1
ts = np.arange(len(wave)) / wave.framerate
return thinkdsp.Wave(ys, ts, wave.framerate)
def plot_segments(original, filtered):
start = 1
duration = 0.01
original.segment(start=start, duration=duration).plot(color='gray')
filtered.segment(start=start, duration=duration).plot()
def plot_sampling(wave, root):
ax1 = thinkplot.preplot(2, rows=2)
wave.make_spectrum(full=True).plot(label='spectrum')
thinkplot.config(xlim=XLIM, xticklabels='invisible')
ax2 = thinkplot.subplot(2)
sampled = sample(wave, 4)
sampled.make_spectrum(full=True).plot(label='sampled')
thinkplot.config(xlim=XLIM, xlabel='Frequency (Hz)')
thinkplot.save(root=root,
formats=FORMATS)
def plot_sampling2(wave, root):
ax1 = thinkplot.preplot(6, rows=4)
wave.make_spectrum(full=True).plot(label='spectrum')
thinkplot.config(xlim=XLIM, xticklabels='invisible')
ax2 = thinkplot.subplot(2)
impulses = make_impulses(wave, 4)
impulses.make_spectrum(full=True).plot(label='impulses')
thinkplot.config(xlim=XLIM, xticklabels='invisible')
ax3 = thinkplot.subplot(3)
sampled = wave * impulses
spectrum = sampled.make_spectrum(full=True)
spectrum.plot(label='sampled')
thinkplot.config(xlim=XLIM, xticklabels='invisible')
ax4 = thinkplot.subplot(4)
spectrum.low_pass(5512.5)
spectrum.plot(label='filtered')
thinkplot.config(xlim=XLIM, xlabel='Frequency (Hz)')
thinkplot.save(root=root,
formats=FORMATS)
def plot_sampling3(wave, root):
ax1 = thinkplot.preplot(6, rows=3)
wave.make_spectrum(full=True).plot(label='spectrum')
thinkplot.config(xlim=XLIM, xticklabels='invisible')
impulses = make_impulses(wave, 4)
ax2 = thinkplot.subplot(2)
sampled = wave * impulses
spectrum = sampled.make_spectrum(full=True)
spectrum.plot(label='sampled')
thinkplot.config(xlim=XLIM, xticklabels='invisible')
ax3 = thinkplot.subplot(3)
spectrum.low_pass(5512.5)
spectrum.plot(label='filtered')
thinkplot.config(xlim=XLIM, xlabel='Frequency (Hz)')
thinkplot.save(root=root,
formats=FORMATS)
#filtered = spectrum.make_wave()
#plot_segments(wave, filtered)
def make_boxcar(spectrum, factor):
"""Makes a boxcar filter for the given spectrum.
spectrum: Spectrum to be filtered
factor: sampling factor
"""
fs = np.copy(spectrum.fs)
hs = np.zeros_like(spectrum.hs)
cutoff = spectrum.framerate / 2 / factor
for i, f in enumerate(fs):
if abs(f) <= cutoff:
hs[i] = 1
return thinkdsp.Spectrum(hs, fs, spectrum.framerate, full=spectrum.full)
def plot_sinc_demo(wave, factor, start=None, duration=None):
def make_sinc(t, i, y):
"""Makes a shifted, scaled copy of the sinc function."""
sinc = boxcar.make_wave()
sinc.shift(t)
sinc.roll(i)
sinc.scale(y * factor)
return sinc
def plot_mini_sincs(wave):
"""Plots sinc functions for each sample in wave."""
t0 = wave.ts[0]
for i in range(0, len(wave), factor):
sinc = make_sinc(t0, i, wave.ys[i])
seg = sinc.segment(start, duration)
seg.plot(color='green', linewidth=0.5, alpha=0.3)
if i == 0:
total = sinc
else:
total += sinc
seg = total.segment(start, duration)
seg.plot(color='blue', alpha=0.5)
sampled = sample(wave, factor)
spectrum = sampled.make_spectrum()
boxcar = make_boxcar(spectrum, factor)
start = wave.start if start is None else start
duration = wave.duration if duration is None else duration
sampled.segment(start, duration).plot_vlines(color='gray')
wave.segment(start, duration).plot(color='gray')
plot_mini_sincs(wave)
def plot_sincs(wave):
start = 1.0
duration = 0.01
factor = 4
short = wave.segment(start=start, duration=duration)
#short.plot()
sampled = sample(short, factor)
#sampled.plot_vlines(color='gray')
spectrum = sampled.make_spectrum(full=True)
boxcar = make_boxcar(spectrum, factor)
sinc = boxcar.make_wave()
sinc.shift(sampled.ts[0])
sinc.roll(len(sinc)//2)
thinkplot.preplot(2, cols=2)
sinc.plot()
thinkplot.config(xlabel='Time (s)')
thinkplot.subplot(2)
boxcar.plot()
thinkplot.config(xlabel='Frequency (Hz)',
ylim=[0, 1.05],
xlim=[-boxcar.max_freq, boxcar.max_freq])
thinkplot.save(root='sampling6',
formats=FORMATS)
return
# CAUTION: don't call plot_sinc_demo with a large wave or it will
# fill memory and crash
plot_sinc_demo(short, 4)
thinkplot.config(xlabel='Time (s)')
thinkplot.save(root='sampling7',
formats=FORMATS)
start = short.start + 0.004
duration = 0.00061
plot_sinc_demo(short, 4, start, duration)
thinkplot.config(xlabel='Time (s)',
xlim=[start, start+duration],
ylim=[-0.06, 0.17], legend=False)
thinkplot.save(root='sampling8',
formats=FORMATS)
def kill_yticklabels():
axis = plt.gca()
plt.setp(axis.get_yticklabels(), visible=False)
def show_impulses(wave, factor, i):
thinkplot.subplot(i)
thinkplot.preplot(2)
impulses = make_impulses(wave, factor)
impulses.segment(0, 0.001).plot_vlines(linewidth=2, xfactor=1000)
if i == 1:
thinkplot.config(title='Impulse train',
ylim=[0, 1.05])
else:
thinkplot.config(xlabel='Time (ms)',
ylim=[0, 1.05])
thinkplot.subplot(i+1)
impulses.make_spectrum(full=True).plot()
kill_yticklabels()
if i == 1:
thinkplot.config(title='DFT of impulse train',
xlim=[-22400, 22400])
else:
thinkplot.config(xlabel='Frequency (Hz)',
xlim=[-22400, 22400])
def plot_impulses(wave):
thinkplot.preplot(rows=2, cols=2)
show_impulses(wave, 4, 1)
show_impulses(wave, 8, 3)
thinkplot.save('sampling9',
formats=FORMATS)
def main():
wave = thinkdsp.read_wave('328878__tzurkan__guitar-phrase-tzu.wav')
wave.normalize()
plot_sampling3(wave, 'sampling5')
plot_sincs(wave)
plot_beeps()
plot_am()
wave = thinkdsp.read_wave('263868__kevcio__amen-break-a-160-bpm.wav')
wave.normalize()
plot_impulses(wave)
plot_sampling(wave, 'sampling3')
plot_sampling2(wave, 'sampling4')
if __name__ == '__main__':
main()
| [
"matplotlib"
] |
abc604ebfa2ec99d8d085881172baa783c2b5325 | Python | haidaodao/sdc-term3-p4-capstone-carla | /graph_waypoints.py | UTF-8 | 2,608 | 3.15625 | 3 | [] | no_license | #!/usr/bin/env python
import matplotlib.pyplot as plt
import csv
from math import cos, sin
wp_x = []
wp_y = []
path = './data/wp_yaw_const.csv'
def closest_waypoint(waypoints, pos):
best_distance = float('inf')
best_wp_index = 0
best_wp = None
for i, wp in enumerate(waypoints):
dist = ((pos[0] - wp[0]) ** 2 + (pos[1] - wp[1]) ** 2) ** 0.5
if dist < best_distance:
best_distance = dist
best_wp, best_wp_index = wp, i
yaw = waypoints[best_wp_index][2]
nx = pos[0] - waypoints[best_wp_index][0]
ny = pos[1] - waypoints[best_wp_index][1]
# Check if waypoint is ahead of stop line
ahead = nx * cos(0 - yaw) - ny * sin(0 - yaw)
if ahead < 0:
best_wp_index -= 1
print (best_wp_index)
return best_wp_index
def all_stop_line_closest_wp(waypoints, traffic_light_posts):
stop_wp = []
stop_x = []
stop_y = []
for pos in traffic_light_posts:
print (pos)
best_index = closest_waypoint(waypoints, pos)
# print (best_index)
stop_x.append(waypoints[best_index][0])
stop_y.append(waypoints[best_index][1])
return stop_x, stop_y
traffic_light_pos = [[1148.56, 1184.65], \
[1559.2, 1158.43], \
[2122.14, 1526.79], \
[2175.237, 1795.71], \
[1493.29, 2947.67], \
[821.96, 2905.8], \
[161.76, 2303.82], \
[351.84, 1574.65]]
# IMPORT WAYPOINTS
waypoints = []
with open(path, 'r') as file:
reader = csv.reader(file, delimiter=',')
for i in reader:
# print (i)
x, y, yaw = float(i[0]), \
float(i[1]), \
float(i[3])
waypoints.append([x, y, yaw])
wp_x.append(x)
wp_y.append(y)
# print (waypoints)
# IMPORT TRAFFIC LIGHT POSITIONS
light_x = []
light_y = []
for pos in traffic_light_pos:
light_x.append(pos[0])
light_y.append(pos[1])
# FIND CLOSET WAYPOINT TO TRAFFIC LIGHT
stop_x, stop_y = all_stop_line_closest_wp(waypoints, traffic_light_pos)
# print ("X stops", stop_x)
# print ("Y stops", stop_y)
# print ("Waypoint Index: ", closest_waypoint(waypoints, traffic_light_pos[0]))
# print ("Waypoint Index: ", closest_waypoint(waypoints, traffic_light_pos[1]))
plt.plot(wp_x, wp_y, label='Waypoints Path')
plt.plot(light_x, light_y, 'ro', label='Traffic Light Posts')
plt.plot(stop_x, stop_y, 'g^', label='Stopping Waypoints')
plt.plot
plt.xlabel('x')
plt.ylabel('y')
plt.title('Simulator path')
plt.legend()
plt.show()
| [
"matplotlib"
] |
d138ddaab06030d1555161e0e06144546ced464c | Python | jhadhiraj1/predictive_analytics | /predictive_analytics.py | UTF-8 | 4,706 | 2.984375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 22 14:36:01 2021
@author: RUDRA
"""
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.svm import SVR
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
data=pd.read_csv("insurance.csv")
count_nan=data.isnull().sum()
print(count_nan[count_nan>0])
data['bmi'].fillna(data['bmi'].mean(),inplace=True)
count_nan=data.isnull().sum()
print(count_nan)
print(data['smoker'])
print(data)
sex=data.iloc[:,1:2].values
smoker=data.iloc[:,4:5].values
le=LabelEncoder()
sex[:,0]=le.fit_transform(sex[:,0])
sex=pd.DataFrame(sex)
sex.columns=['sex']
le_sex_mapping=dict(zip(le.classes_,le.transform(le.classes_)))
print(le_sex_mapping)
le=LabelEncoder()
smoker[:,0]=le.fit_transform(smoker[:,0])
smoker=pd.DataFrame(smoker)
smoker.coulumn=['smoker']
le_smoker_mapping=dict(zip(le.classes_,le.transform(le.classes_)))
print(le_smoker_mapping)
print(data['smoker'])
region=data.iloc[:,5:6].values
ohe=OneHotEncoder()
region=ohe.fit_transform(region).toarray()
region=pd.DataFrame(region)
region.columns=['northeast','northwest','southeast','southwest']
print(region[:10])
X_num=data[['age','bmi','children']]
X_final=pd.concat([X_num,sex,smoker,region],axis=1)
y_final=data[['expenses']].copy()
X_train,X_test,y_train,y_test=train_test_split(X_final,y_final,test_size=0.33,random_state=0)
##NOrmalization using MinMax
n_scaler=MinMaxScaler()
X_train=n_scaler.fit_transform(X_train.astype(np.float))
X_test=n_scaler.transform(X_test.astype(np.float))
##Normalization using Standardization
s_scaler=StandardScaler()
X_train=s_scaler.fit_transform(X_train.astype(np.float))
X_test=s_scaler.transform(X_test.astype(np.float))
lr=LinearRegression().fit(X_train,y_train)
y_train_pred=lr.predict(X_train)
y_test_pred=lr.predict(X_test)
print("lr co-efficient is {}".format(lr.coef_))
print("Intercep {}".format(lr.intercept_))
print("y_train Score: %.3f and y_test score: %.3f" % (lr.score(X_train,y_train),lr.score(X_test,y_test)))
##Applying Polynomial Features to the datas
poly_f=PolynomialFeatures(degree=2)
poly_X=poly_f.fit_transform(X_final)
X_train,X_test,y_train,y_test=train_test_split(poly_X,y_final,test_size=0.33,random_state=0)
s_scaler=StandardScaler()
X_train=s_scaler.fit_transform(X_train.astype(np.float))
X_test=s_scaler.transform(X_test.astype(np.float))
poly_lr=LinearRegression().fit(X_train,y_train)
poly_y_train_pred=poly_lr.predict(X_train)
poly_y_test_pred=poly_lr.predict(X_test)
print("Polynomoial lr Co-efficient:{}".format(poly_lr.coef_))
print("Y-intercept is :{}".format(poly_lr.intercept_))
print("y_train score: %.3f and y_test score:%.3f"
% (poly_lr.score(X_train,y_train),poly_lr.score(X_test,y_test)))
## SVR Modelling
svr=SVR(kernel='linear',C=300)
X_train,X_test,y_train,y_test=train_test_split(X_final,y_final,test_size=0.33,random_state=0)
s_scaler=StandardScaler()
X_train=s_scaler.fit_transform(X_train.astype(np.float))
X_test=s_scaler.transform(X_test.astype(np.float))
svr=svr.fit(X_train,y_train.values.ravel())
y_train_pred=svr.predict(X_train)
y_test_pred=svr.predict(X_test)
print("y_train score: %.3f and y_test score: %.3f" %(svr.score(X_train,y_train),svr.score(X_test,y_test)))
dt=DecisionTreeRegressor(random_state=0);
dt=dt.fit(X_train,y_train.values.ravel())
y_train_pred=dt.predict(X_train)
y_test_pred=dt.predict(X_test)
print("y_train Score : %.3f and y_test score :%.3f" %(dt.score(X_train,y_train),dt.score(X_test,y_test)))
## Random Forest Regressor
rf=RandomForestRegressor(n_estimators=100,
criterion='mse',
random_state=1,
n_jobs=-1)
X_train,X_test,y_train,y_test=train_test_split(X_final,y_final,test_size=0.33,random_state=0)
n_scaler=StandardScaler()
X_train=n_scaler.fit_transform(X_train.astype(np.float))
X_test=n_scaler.transform(X_test.astype(np.float))
rf=rf.fit(X_train,y_train.values.ravel())
y_train_pred=rf.predict(X_train)
y_test_pred=rf.predict(X_test)
print("y_train score :%.3f and y_test score: %.3f"%(rf.score(X_train,y_train),rf.score(X_test,y_test))) | [
"matplotlib"
] |
f2f3772d22c33ab225566c39dd12a142ee862bf9 | Python | Combustion-Zhen/OpenFOAM_py | /SFD/python2.7/plot_scat.py | UTF-8 | 3,049 | 2.828125 | 3 | [] | no_license | #Zhen Lu, 03/04/2017 <[email protected]>
# plot Sandia Flame results, as title, the scatter at different x/D
import glob
from file_read import csv_read, cm2inch, SF_read
# suppress the display of matplotlib.pyplot
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
# data to plot
# only one var, two columns, left: exp., right: sim. rows for x/D
var = 'T'
# import data
xD=[]
data={}
expr={}
for filename in glob.glob('scat*.csv'):
pos = filename.find('.csv')
z = float('{0}.{1}'.format(filename[7:9],filename[9:pos]))
xD.append(z)
data.update({z:csv_read(filename)})
expr.update({z:SF_read('D.scat',filename[7:pos],'all')})
xD.sort()
# plot
# use TEX for interpreter
plt.rc('text',usetex=True)
# use serif font
plt.rc('font',family='serif')
# figure and axes parameters
# total width is fixed
plot_width =19.0
subplot_h =4.0
margin_left =2.0
margin_right =0.3
margin_bottom =1.5
margin_top =1.0
space_width =0.0
space_height =1.0
ftsize =12
# total height determined by the number of vars
plot_height =(subplot_h+space_height)*float(len(xD)) \
-space_height+margin_top+margin_bottom
# min and max of axis
xmin = 0.0
xmax = 1.0
xtick= (0.0,0.2,0.4,0.6,0.8)
# generate the figure
fig, axes = plt.subplots(len(xD),2,
sharex='col',sharey='all',
figsize=cm2inch(plot_width, plot_height))
# generate the axis
for x in xD:
axes[xD.index(x),0].scatter(expr[x]['Z'],expr[x][var],
marker='.',c='k',edgecolor='none')
axes[xD.index(x),1].scatter(data[x]['Z'],data[x][var],
marker='.',c='k',edgecolor='none')
# ylabel, temperature has a unit
if var == 'T':
axes[xD.index(x),0].set_ylabel(r"$\tilde {0}\;(\mathrm{{K}})$".format(var),
fontsize=ftsize)
else:
axes[xD.index(x),0].set_ylabel(r"$\tilde Y\;{0}$".format(var),
fontsize=ftsize)
# location note
# the text position determined by axes axis
axes[xD.index(x),1].text(0.7,2000,'$x/D={0:.2g}$'.format(x),
fontsize=ftsize)
# ylabel, temperature has a unit
# title and xlabel
axes[0,0].set_title('Exp.',fontsize=ftsize)
axes[0,1].set_title('Sim.',fontsize=ftsize)
for i in range(2):
axes[len(xD)-1,i].set_xlim(xmin,xmax)
axes[len(xD)-1,i].set_xticks(xtick)
axes[len(xD)-1,i].set_xlabel(r'$\tilde Z$',fontsize=ftsize)
axes[len(xD)-1,1].set_xticks(xtick+(xmax,))
# legend
# set margins
plt.subplots_adjust(left =margin_left/plot_width,
bottom =margin_bottom/plot_height,
right =1.0-margin_right/plot_width,
top =1.0-margin_top/plot_height,
wspace =space_width/plot_width,
hspace =space_height/plot_height)
# save plot
plt.savefig('radial_scat.png',dpi=400)
plt.savefig('radial_scat.pdf')
plt.savefig('radial_scat.eps')
| [
"matplotlib"
] |
98c417d6de736140ac9bfe9d0bdf19b56028ec4a | Python | Domdoug/LSTM_Mortality_Table_Actuarial | /4-expectativa_vida_lstm-Bidirecional.py | UTF-8 | 13,592 | 2.984375 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[1]:
# importar bibliotecas
import pandas as pd
from pandas.tseries.offsets import DateOffset
import numpy as np
import os
import matplotlib.pyplot as plt
# get_ipython().run_line_magic('matplotlib', 'inline')
#from sklearn.metrics import mean_squared_error
from statsmodels.tools.eval_measures import rmse
from sklearn.preprocessing import MinMaxScaler
from keras.preprocessing.sequence import TimeseriesGenerator
from keras.callbacks import CSVLogger
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
from keras.layers import Bidirectional
import time
import warnings
warnings.filterwarnings("ignore")
# ### Modelagem com a utilização do pacote embeding para Bi-direcional
# #####
# Os humanos não começam a pensar do zero a cada segundo. Ao ler este ensaio, você entende cada palavra com base na sua compreensão das palavras anteriores. Você não joga tudo fora e começa a pensar do zero novamente. Seus pensamentos têm persistência.
#
# As redes neurais tradicionais não podem fazer isso, e isso parece uma grande falha. Por exemplo, imagine que você queira classificar que tipo de evento está acontecendo em todos os momentos do filme. Não está claro como uma rede neural tradicional poderia usar seu raciocínio sobre eventos anteriores do filme para informar os posteriores.
#
# Redes neurais recorrentes abordam esse problema. São redes com loops, permitindo que as informações persistam.
#
# Em comparação com o LSTM, BLSTMou BiLSTMpossui duas redes, uma pastinformação de acesso na forwarddireção e outro acesso futurena reversedireção.
#
# LSTMse suas variantes bidirecionais são populares porque tentaram aprender como e quando esquecer e quando não usar portas em sua arquitetura. Em RNNarquiteturas anteriores , o desaparecimento de gradientes era um grande problema e fazia com que essas redes não aprendessem muito.
#
# Usando Bidirecional LSTMs, você alimenta o algoritmo de aprendizado com os dados originais uma vez do começo ao fim e uma vez do fim ao começo. Existem debates aqui, mas geralmente ele aprende mais rápido que a abordagem unidirecional, embora dependa da tarefa.
#
# #### 1 - carregar base tratada
# In[2]:
# Verifica a pasta corrente
pasta = os.getcwd()
# In[3]:
pasta_resultados = os.path.join(pasta, "resultados")
pasta_graficos = os.path.join(pasta, "graficos1")
# In[ ]:
# Regex notation by "\s+". This means a single space, or multiple spaces are all to be treated as a single separator.
# df_dados = pd.read_csv('bltper_1x1.txt', skiprows=2, sep = '\s+')
df_dados = pd.read_csv(os.path.join(pasta, "dados") + "/" + 'bltper_1x1.txt', skiprows=2, sep = '\s+')
# In[ ]:
df_dados.head().append(df_dados.tail())
# #### 2 - criar features, entre elas a logqx, que corresponde já convertido a escala logarítma da probabilidade de morte
#
# In[ ]:
# Tratamento da idade 110+ para os anos.
# DataFrame.loc[condition, column_name] = new_value
df_dados.loc[(df_dados.Age == '110+'),'Age'] = 110
# In[ ]:
# Criar a feature log qx
df_dados['logqx'] = np.log(df_dados['qx'])
# Aproveitar e corrigir a tipagem da feature Age
df_dados["Age"] = df_dados["Age"].astype(int)
# In[ ]:
df_dados.head().append(df_dados.tail())
# In[ ]:
df_dados.shape
# #### 3 - Criar a feature tempo t, com base na feature ano, que corresponde ao elemento temporal da série
# In[8]:
# Preparar dataset
#serie = {'t': ano, 'logqx_prob': logqx_prob}
df_lstm = pd.DataFrame(df_dados, columns=['Age','Year','logqx'])
df_lstm['t'] = pd.to_datetime(df_lstm['Year'], format='%Y')
#df_lstm.drop(['ano'], axis=1, inplace=True)
df_lstm.set_index('t', inplace=True)
# In[9]:
#df_lstm[df_lstm['t'].dt.year == 1998]
df_lstm.head()
# In[10]:
df_lstm[df_lstm['Age']==0]
# #### 4 - Separar a base em base de treino e base de teste para cada idade x ao longo dos anos t, ou seja, para a idade 0, entre os anos 1998 a 2018, para a idade 1, no mesmo período e assim por diante.
# ##### Rotina LSTM, métricas e gráficos
# In[12]:
# proximos testes: n_epochs = 1000 e base teste de 3 anos. Sugestão. No programa em script.
# Usar, também, o código para salvar o log do compilie
predict_res = []
pred_actual_rmse_res = []
w_max = max(df_dados['Age']) # definir maior idade nas tábuas. testes: 3
# inicio do cronometro do processamento
start = time.time()
n_input = 30 # 10 # Length of the output sequences (in number of timesteps). Corresponde ao número de dados
# que usaremos para a rede. No caso, 10 anos na idade = 0, 10 anos na idade=1, etc.Vamos testar com 3 anos??
n_features = 1 # Número de features, variáveis. O modelo é univariavel (qx) para cada idade.
n_epochs = 500 # 1000 #500
n_batch = 2 # Number of timeseries samples in each batch (except maybe the last one).
n_neurons = 50
t_projecao = 30
# (#batch_size,#inputs,#features) 3D
for x in range(0, w_max+1):
# Série para cada idade ao longo dos anos de 1998 a 2018
#serie = df_lstm[df_lstm['idade']==x]['logqx_prob']
serie = df_lstm[df_lstm['Age']==x]
serie.drop(['Age', 'Year'], axis=1, inplace=True)
# Separar base de treino e teste === preparar dados
treino, teste = serie[:-30], serie[-30:]
# Padronizar dados: Normalizar entre 0 e 1
scaler = MinMaxScaler()
scaler.fit(treino)
treino = scaler.transform(treino)
teste = scaler.transform(teste)
#generator = TimeseriesGenerator(treino, treino, length=n_input, batch_size=n_batch)
# length: The number of lag observations to use in the input portion of each sample (e.g. 3)
# That is the desired number of lag observations to use as input = VAmos tentar 21: 2018-1998
# batch_size: The number of samples to return on each iteration (e.g. 32)
# The samples are not shuffled by default. This is useful for some recurrent neural networks
# like LSTMs that maintain state across samples within a batch.
# both the data and target for this generator is “treino”.
generator = TimeseriesGenerator(treino, treino, length=n_input, batch_size=n_batch)
# ============================ CAMADAS =========== CAMADAS =================================
# A camada LSTM já possui, em sua construção, funções default de ativação:
# activation="tanh",recurrent_activation="sigmoid",
# três funções sigmoide e 1 tangente hiperbólica
# Modelo bidirecional
model = Sequential()
model.add(Bidirectional(LSTM(n_neurons, return_sequences=True), input_shape=(n_input, n_features)))
model.add(Bidirectional(LSTM(n_neurons)))
model.add(Dropout(0.20))
model.add(Dense(1))
#model.add(Activation('softmax'))
#model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.compile(optimizer='adam', loss='mse', metrics=["mae"])
#model = Sequential()
# #reshape the data into LSTM required (#batch,#timesteps,#features)
#Adding the first LSTM layer and some Dropout regularisation
#model.add(LSTM(n_neurons, activation='relu', input_shape=(n_input, n_features), return_sequences=True))
#model.add(LSTM(n_neurons, activation='relu', input_shape=(n_input, n_features)))
#model.add(Dropout(0.20))
# Adding a second LSTM layer and some Dropout regularisation
#model.add(LSTM(n_neurons))
#model.add(Dropout(0.20))
# Adding a third LSTM layer and some Dropout regularisation
#model.add(LSTM(n_neurons, return_sequences=True))
#model.add(Dropout(0.20))
# Adding a fourth LSTM layer and some Dropout regularisation
#model.add(LSTM(n_neurons))
#model.add(Dropout(0.20))
# Adding the output layer
#model.add(Dense(1))
# ============================ CAMADAS =========== CAMADAS =================================
#model.add(Dense(y.shape[1], activation='sigmoid'))
#model.compile(optimizer='adam', loss='mse', metrics=["mae"])
# fit model
#model.fit_generator(generator, epochs=n_epochs)
# ADAPTADO PARA A ATUALIZAÇÃO DO KERAS (28/11/2020)
csv_logger = CSVLogger('log_modelo_demography_bidirecional.csv', append=True, separator=';')
model.fit(generator, epochs=n_epochs, callbacks=[csv_logger])
# model.fit(X_train, Y_train, callbacks=[csv_logger])
#Previsão
pred_list = []
batch = treino[-n_input:].reshape((1, n_input, n_features))
for i in range(n_input): # n_input
pred_list.append(model.predict(batch)[0])
batch = np.append(batch[:,1:,:], [[pred_list[i]]], axis=1)
#inverse transform forecasts and test. Need to scale them back so we can compare the final results
df_predict = pd.DataFrame(scaler.inverse_transform(pred_list),
index=serie[-n_input:].index, columns=['Prediction'])
df_teste = pd.concat([serie, df_predict], axis=1)
# Gráfico da estimativa, com a base de teste
#plt.figure(figsize=(10,5))
'''
fig = plt.figure(figsize=(10,5))
ax = fig.add_subplot(111)
ax.plot(df_teste.index, df_teste['logqx'])
ax.plot(df_teste.index, df_teste['Prediction'], color='r')
ax.legend(loc='best', fontsize='xx-large', labels=['logqx', 'Estimativa'])
fig.suptitle('logqx Bi-Direcional LSTM dataset teste na idade = %i' %x, fontweight="bold")
plt.savefig(pasta_graficos + '/' + 'prev_Bi_Direcional_LSTM_test_idade'+str(x)+'.png')
'''
pred_actual_rmse = rmse(df_teste.iloc[-n_input:, [0]], df_teste.iloc[-n_input:, [1]])
print("idade:", x, "rmse: ", pred_actual_rmse)
pred_actual_rmse_res.append(pred_actual_rmse)
treino = serie
scaler.fit(treino)
treino = scaler.transform(treino)
#generator = TimeseriesGenerator(treino, treino, length=n_input, batch_size=n_batch)
#model.fit_generator(generator,epochs=n_epochs)
# ADAPTADO PARA A ATUALIZAÇÃO DO KERAS (28/11/2020)
# length: The number of lag observations to use in the input portion of each sample (e.g. 3)
# batch_size: The number of samples to return on each iteration (e.g. 32)
generator = TimeseriesGenerator(treino, treino, length=n_input, batch_size=n_batch)
#model.fit(generator, epochs=n_epochs, batch_size=n_batch)
model.fit(generator, epochs=n_epochs)
pred_list = []
batch = treino[-n_input:].reshape((1, n_input, n_features))
for i in range(n_input):
pred_list.append(model.predict(batch)[0])
batch = np.append(batch[:,1:,:],[[pred_list[i]]],axis=1)
# prever para t_projecao anos
add_dates = [serie.index[-1] + DateOffset(years=x) for x in range(0, t_projecao + 1)]
#add_dates = [serie.index[-1] + pd.offsets.YearBegin(x) for x in range(0,6)]
future_dates = pd.DataFrame(index=add_dates[1:],columns=serie.columns)
#inverse transform forecasts and test. Need to scale them back so we can compare the final results
df_predict = pd.DataFrame(scaler.inverse_transform(pred_list),
index=future_dates[-n_input:].index, columns=['Prediction'])
predict_res.append(df_predict.values.tolist())
df_proj = pd.concat([serie,df_predict], axis=1)
'''
fig = plt.figure(figsize=(10,5))
ax = fig.add_subplot(111) # "111" means "1x1 grid, first subplot"
ax.plot(df_proj.index, df_proj['logqx'])
ax.plot(df_proj.index, df_proj['Prediction'], color='r')
ax.legend(loc='best', fontsize='xx-large', labels=['logqx', 'Predição'])
plt.xticks(fontsize=18)
plt.yticks(fontsize=16)
fig.suptitle('Logqx Bi-Direcional LSTM projetado na idade = %i' %x, fontweight = "bold") # Título parametrizado com a idade
plt.savefig(pasta_graficos + '/' + 'proj_Bi_Direcional_LSTM_log_qx'+str(x)+'.png')
'''
# fim do cronometro do processamento
end = time.time()
hours, rem = divmod(end-start, 3600)
minutes, seconds = divmod(rem, 60)
print()
print('Tempo de processamento:')
print('{:0>2}:{:0>2}:{:05.2f}'.format(int(hours), int(minutes), seconds))
print()
#Tempo de processamento: 22:32:14.93
# Tempo de processamento sem imprimir os gráficos: 18:35:29.37
# #### 5 - Valores de RMSE por idade
# In[13]:
pd.DataFrame(pred_actual_rmse_res) # RMSE para cada idade
# In[52]:
#### 5 - Base resultante dos anos de 2019 a 2028, por idade
# In[14]:
df_lstm_res = pd.DataFrame(predict_res)
# In[15]:
df_lstm_res.head()
# In[16]:
df_lstm_res[0][0][0]
# In[17]:
df_lstm_res.info()
# In[18]:
# Função para unir as listas em linha
def unirSeries(df, explode):
idx = df.index.repeat(df[explode[0]].str.len())
df1 = pd.concat([
pd.DataFrame({x: np.concatenate(df[x].values)}) for x in explode], axis=1)
df1.index = idx
return df1.join(df.drop(explode, 1), how='left')
# In[19]:
colunas = np.arange(2020, 2050)
df_temp = pd.DataFrame(predict_res, columns=colunas)
df_lstm_res = unirSeries(df_temp,colunas)
df_lstm_res = df_lstm_res.reset_index(drop=True)
# In[20]:
df_lstm_res.head()
# In[21]:
df_forecast_res_exp = pd.DataFrame(np.exp(df_lstm_res))
# In[22]:
df_forecast_res_exp.head()
# In[ ]:
# Gravar resultados
# In[23]:
df_forecast_res_exp.to_csv(pasta_resultados + '/' + 'lstm_previsao_qx_500_Bi_Direcional_demography.csv')
# In[24]:
pd.DataFrame(pred_actual_rmse_res).to_csv(pasta_resultados + '/' + 'pred_actual_rmse_res_500_Bi_Direcional_demography.csv', header=['RMSE'])
# In[ ]:
| [
"matplotlib"
] |
a8e6c9b155b7fea2843ab234592911ffd5c20076 | Python | ideProject/tribo | /matplot_test.py | UTF-8 | 116 | 2.515625 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
data = np.random.randn(1000)
plt.hist(data, bins=30)
plt.show() | [
"matplotlib"
] |
c5ccdd97c16a369006efa97019f42fc8d3c7c709 | Python | terdenan/cs102 | /homework04/getHistory.py | UTF-8 | 712 | 2.5625 | 3 | [] | no_license | import requests
import plotly.plotly as py
import plotly.graph_objs as go
from datetime import datetime
from vk_api import get_messages_history
from pprint import pprint as pp
from collections import Counter
def count_dates_from_messages(messages):
def parse(d):
return datetime.fromtimestamp(d).strftime("%Y-%m-%d")
msg_list = [parse(c.get('date')) for c in messages]
counted = Counter(msg_list)
x = []
y = []
for key in counted:
x.append(key)
y.append(counted[key])
return x, y
if __name__ == '__main__':
messages = get_messages_history(223703977)
x, y = count_dates_from_messages(messages)
data = [go.Scatter(x=x, y=y)]
py.plot(data)
| [
"plotly"
] |
a61509497f4b666c0c079910c969cf52ab1dda71 | Python | wubonian/ADSAlgo | /PathPlan/InterpolatingCurve/CubicSpline.py | UTF-8 | 2,337 | 3.40625 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
class cubic_spline:
def __init__(self, x, y):
self.x = x
self.y = y
self.n = len(x)
self.a = []
self.b = []
self.c = []
self.d = []
self.d_mtx = []
def calc_d_mtx(self):
A = np.zeros((self.n, self.n))
for i in range(self.n):
if i == 0:
A[i][i] = 2
A[i][i+1] = 1
elif i == self.n - 1:
A[i][i] = 2
A[i-1][i] = 1
else:
A[i-1][i] = 1
A[i][i] = 2
A[i][i+1] = 1
Y = np.zeros((self.n, 1))
for i in range(self.n):
if i == 0:
Y[i] = 3*(self.y[i+1] - self.y[0])
elif i == self.n-1:
Y[i] = 3*(self.y[i] - self.y[i-1])
else:
Y[i] = 3*(self.y[i+1] - self.y[i-1])
IA = np.linalg.inv(A)
self.d_mtx = np.dot(IA, Y)
def calc_coef(self):
for i in range(self.n-1):
self.a.append(self.y[i])
self.b.append(self.d_mtx[i])
c_tmp = 3*(self.y[i+1] - self.y[i]) - 2*self.d_mtx[i] - self.d_mtx[i+1]
self.c.append(c_tmp)
d_tmp = 2*(self.y[i] - self.y[i+1]) + self.d_mtx[i] + self.d_mtx[i+1]
self.d.append(d_tmp)
def find_index(self, u):
i = 0
while self.x[i] < u:
i = i+1
if u > self.x[0]:
return i - 1
else:
return 0
def calc_prop(self, i, u):
return (u - self.x[i]) / (self.x[i+1] - self.x[i])
def eval_val(self, u):
i = self.find_index(u)
p = self.calc_prop(i, u)
y = self.a[i] + self.b[i]*p + self.c[i]*p*p + self.d[i]*p*p*p
return y
def main():
x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y = [2, 3, 5, 4, 8, 9, 4, 2, 3, 5, 7]
cubic = cubic_spline(x, y)
cubic.calc_d_mtx()
cubic.calc_coef()
xs = np.linspace(0, 10, 1000)
ys = []
for x_tmp in xs:
y_tmp = cubic.eval_val(x_tmp)
ys.append(y_tmp)
plt.figure(figsize=(5, 5))
ax = plt.gca()
ax.set_xlim([-5, 20])
ax.set_ylim([-5, 20])
plt.plot(x, y, '-g')
plt.plot(xs, ys, '-y')
plt.show()
if __name__ == "__main__":
main() | [
"matplotlib"
] |
750dbfa49f1c96652db147d30fbfceb7005fc2fa | Python | geckotian96/qbb2019-answers | /day4-morning/01-histogram.py | UTF-8 | 1,664 | 3.203125 | 3 | [] | no_license | #!/usr/bin/env python3
"""
USAGE: ./00-scatter.py <ctab>
plot fpkm
"""
#../results/stringtie/SRR072893/t_data.ctab
import sys
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
fpkms=[]
for i, line in enumerate (open(sys.argv[1])):
if i ==0:
continue
fields = line.rstrip("\n").split("\t")
if float(fields[-1]) > 0:
fpkms.append(float(fields[-1]))
#notice that fpkms field is a string. you need to convert to float first
#the reason to use log is becuase most of the data gather in the 10000, so need to take log to further visualize the sub columns
my_data = np.log2(fpkms)
# mu = 0
# sigma =1
# x = np.linspace (-15, 15, 100) #range of the normal distribution with how many groups
# y = stats.norm.pdf (x, mu, sigma)
#a=-2.1, mu=5.8, signma = 2.8
mu1=0
sigma1=1
x1 = np.linspace (-15, 15, 100) #range of the normal distribution with how many groups
y1= stats.norm.pdf (x1, mu1, sigma1)
a=-2.1
mu=5.8
sigma=2.8
x2 = np.linspace(-15, 15, 100)
y2= stats.skewnorm.pdf(x2, a, mu, sigma)
fig, ax = plt.subplots()
ax.hist(my_data, bins=100, density=True) #bin=100 can divide the range into 100 columns, default is 10. Density??
ax.plot(x1, y1, label="Normal distribution")
ax.legend()
ax.plot(x2, y2, label="Skew curve")
ax.legend()
ax.set_xlabel ("Log2(FPKM)")
ax.set_ylabel ("Probability")
fig.suptitle("Distribution Curve of FPKM")
plt.text(-15, 0.20, "a=-2.1, mu=5.8, signma=2.8")
fig.savefig("fpkms.png")
plt.close(fig)
#x = np.linspace(norm.ppf(0.01), --> x coordinace
#norm.ppf(0.99), 100)
#ax.plot(x, norm.pdf(x),
#'r-', lw=5, alpha=0.6, label='norm pdf') | [
"matplotlib"
] |
f3f40c5c36a301d20d65a1524362502ed6aa564d | Python | hbates00/12.009-Nonlinear-Dynamics-in-the-Natural-Environment | /PSET 1/PSET4_1.py | UTF-8 | 1,725 | 3.75 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Mar 16 16:43:08 2017
@author: Haley
"""
#numpy random, generate list of random 1 or neg 1 and sum to get a number -- this is position
import numpy as np
import matplotlib.pyplot as plt
# Gets location after number of steps n and returns a location
def get_location(t):
choice = np.array([-1, 1])
moves = np.random.choice(choice, t)
return np.sum(moves)
# Builds a vector of walker position
def do_the_thing(n, t):
a = map(lambda p: get_location(t), range(n))
return a
# Creates and populates an rxr A matrix
def matrix(r):
mat = np.zeros((r, r))
for i in xrange(r):
for j in xrange(r):
if j == (i + 1) or j == (i - 1):
mat[i][j] = .5
else:
pass
return mat
# Advances a given number of time steps, returns a vector of probabilities
def do_it(r, t):
t_0 = np.array([0] * r)
t_0[len(t_0)/2.0] = 1
A = np.linalg.matrix_power(matrix(r), t)
b = np.dot(A, np.transpose(t_0))
return b
# Plots a histagram of walker locations
def plot(n, t, b):
pos = do_the_thing(n, t)
plt.hist(pos, bins = b, color = 'white', label = 'Simulated Values')
# Plots the probability histogram
def plot_probabilities(n, t, r):
probs = do_it(r, t) * 2 * n
x = range((-r/2), (r/2))
plt.plot(x, probs, label = 'Predicted Values')
plot(50000, 50, 15)
plot_probabilities(50000, 50, 50)
plt.xlabel('Position')
plt.ylabel('Number of Walkers at Position')
plt.title('Number of Random Walkers at Position After 50 Steps')
plt.legend()
plt.show() | [
"matplotlib"
] |
66a94bfb126ee8ae2c5afe9d7fc4956e7e3617e9 | Python | samithaj/COPDGene | /feature_selection/aim1_redraw_figure.py | UTF-8 | 849 | 3 | 3 | [] | no_license | """ Draw plots from data in gap_4/features_sel_backward_gap_run_1.csv
Label Y axis
"""
import csv
import matplotlib.pyplot as plt
import numpy as np
# Load GAP values
file_csv = open("gap_4/features_sel_backward_gap_run_1.csv","rb")
reader = csv.reader(file_csv)
lines = [line for line in reader]
file_csv.close()
gap_value = []
fs_index = [55]
fs_name = ['pre_FVC']
for i in range(61):
gap_value.append(lines[62-i][1])
fs_index.append(lines[62-i][2])
fs_name.append(lines[62-i][3])
gap_value.append(lines[1][1])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(range(1,63),gap_value)
ax.plot((13,13),(-0.05,gap_value[12]),'r--')
ax.annotate('MAX(13, 0.2658)',xy=(13,gap_value[12]))
plt.xlabel("The Number of Features")
plt.ylabel("GAP statistic(Clustering Quality)")
plt.title("Backward Search with GAP statistic")
plt.show()
| [
"matplotlib"
] |
82d86634717983bd6aa6c5fa3c596263ec6996cd | Python | seanwayland/matplotlibpractice | /pymaceuticals_starter.py | UTF-8 | 3,244 | 3.3125 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# ## Observations and Insights
#
# ## Dependencies and starter code
# In[1]:
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as st
# Study data files
mouse_metadata = "data/Mouse_metadata.csv"
study_results = "data/Study_results.csv"
# Read the mouse data and the study results
mouse_metadata = pd.read_csv(mouse_metadata)
study_results = pd.read_csv(study_results)
print(mouse_metadata.groupby('Mouse ID').count())
# Combine the data into a single dataset
ds = pd.merge(mouse_metadata, study_results, how="left", on=["Mouse ID"])
print(fourFive)
#print(ds)
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
dsMean = pd.DataFrame(ds.groupby("Drug Regimen").mean())
dsMean = dsMean['Tumor Volume (mm3)']
dsMedian = pd.DataFrame(ds.groupby("Drug Regimen").median())
dsMedian = dsMedian['Tumor Volume (mm3)']
dsVar = pd.DataFrame(ds.groupby("Drug Regimen").var())
dsVar = dsVar['Tumor Volume (mm3)']
dsStd = pd.DataFrame(ds.groupby("Drug Regimen").std())
dsStd = dsStd['Tumor Volume (mm3)']
dsSem = pd.DataFrame(ds.groupby("Drug Regimen").sem())
dsSem = dsSem['Tumor Volume (mm3)']
dsStats = pd.DataFrame([dsMean,dsMedian,dsVar,dsStd,dsSem],index=['mean','Median','Var','Std','SEM'])
#print(dsStats)
# ## Summary statistics
# In[2]:
# ## Bar plots
# In[3]:
# Generate a bar plot showing number of data points for each treatment regimen using pandas
dsCount = ds.groupby("Drug Regimen").count()
dsCount['data points'] = dsCount['Mouse ID']
ax = dsCount.plot.bar(y = "data points")
# In[4]:
# Generate a bar plot showing number of data points for each treatment regimen using pyplot
#plt.bar(dsCount, x = 'Drug Regimen', y = 'data points', align='center', alpha=0.5)
labels = dsCount.index.tolist()
#print(labels)
plt.bar(labels, dsCount['Mouse ID'])
plt.title('Data Points by Drug Regimen')
#plt.show()
# ## Pie plots
# In[5]:
# Generate a pie plot showing the distribution of female versus male mice using pandas
SexCount = mouse_metadata.groupby("Sex").count()
SexCount['sex'] = SexCount['Mouse ID']
print(SexCount)
#plot = SexCount.plot.pie(y='sex')
# In[6]:
# Generate a pie plot showing the distribution of female versus male mice using pyplot
pie = plt.pie(SexCount['sex'])
plt.show()
# ## Quartiles, outliers and boxplots
# In[7]:
# Calculate the final tumor volume of each mouse across four of the most promising treatment regimens. Calculate the IQR and quantitatively determine if there are any potential outliers.
fourFive = ds[ds['Timepoint']==45]
# get all mice for each drug
# Capomulin, Ramicane, Infubinol, and Ceftamin
# In[8]:
# Generate a box plot of the final tumor volume of each mouse across four regimens of interest
# ## Line and scatter plots
# In[9]:
# Generate a line plot of time point versus tumor volume for a mouse treated with Capomulin
# In[10]:
# Generate a scatter plot of mouse weight versus average tumor volume for the Capomulin regimen
# In[11]:
# Calculate the correlation coefficient and linear regression model for mouse weight and average tumor volume for the Capomulin regimen
# In[ ]:
| [
"matplotlib"
] |
b0ef290ef16eb0c5e793da6db635d312939f0499 | Python | meck93/UU_ML_Project | /dqn/environment.py | UTF-8 | 4,315 | 2.671875 | 3 | [
"MIT"
] | permissive | import gym
import numpy as np
import retro
from baselines.common.atari_wrappers import FrameStack
import cv2
from config import HEIGHT, N_FRAMES, WIDTH # hyperparameters
cv2.ocl.setUseOpenCL(False)
class PreprocessFrames(gym.ObservationWrapper):
def __init__(self, env):
"""Preprocess and wrap frames to HEIGHTxWIDTH."""
gym.ObservationWrapper.__init__(self, env)
self.width = WIDTH
self.height = HEIGHT
self.observation_space = gym.spaces.Box(low=0, high=255, shape=(self.height, self.width, 1), dtype=np.uint8)
def observation(self, frame):
# transform color to grayscale
frame_gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
# crop the image top and bottom since it's static
frame_cropped = frame_gray[9:-35, :]
# normalize the values to range [0,1]
frame_normalized = frame_cropped / 255.0
# resize the cropped image to WIDTHxHEIGHT
frame = cv2.resize(frame_normalized, (self.width, self.height), interpolation=cv2.INTER_AREA)
return frame[:, :, None]
class MarioDiscretizer(gym.ActionWrapper):
"""
Wrap a gym-retro environment and make it use discrete
actions for the SuperMarioBros game.
"""
def __init__(self, env):
super(MarioDiscretizer, self).__init__(env)
# All buttons of the NES
buttons = ['B', None, 'SELECT', 'START', 'UP', 'DOWN', 'LEFT', 'RIGHT', 'A']
# Custom discrete actions defined by ourselves
# Limits the number of possible actions and should improve training time
# actions = [[None], ['LEFT'], ['RIGHT'], ['RIGHT', 'A'], ['RIGHT', 'B'], ['RIGHT', 'A', 'B'], ['A'], ['A', 'A']]
actions = [[None], ['LEFT'], ['RIGHT'], ['RIGHT', 'A'], ['A'], ['A', 'A']]
self._actions = []
for action in actions:
arr = np.array([False] * len(buttons))
for button in action:
arr[buttons.index(button)] = True
self._actions.append(arr)
# maps each action to a discrete number
self.action_space = gym.spaces.Discrete(len(self._actions))
def action(self, a): # pylint: disable=W0221
return self._actions[a].copy()
def make_custom_env(disc_acts=True):
"""
Create an environment with some standard wrappers.
"""
env = retro.make(game='SuperMarioBros3-Nes', state="1Player.World1.Level1.state",
scenario="./data/scenario.json", record="./recordings/")
if disc_acts:
# Build the actions array
env = MarioDiscretizer(env)
# PreprocessFrame
env = PreprocessFrames(env)
# Stack N_FRAMES number of frames
env = FrameStack(env, N_FRAMES)
return env
# TODO: code that can be used to plot the preprocessing
# import matplotlib.pyplot as plt
# f, axs = plt.subplots(2, 2, figsize=(15, 15))
# axs[0, 0].set_title("Raw Input Image")
# axs[0, 0].imshow(frame)
# axs[0, 0].set_ylim((224, 0))
# axs[0, 0].set_yticks(np.arange(0, 225, 224//16))
# axs[0, 0].set_xlim((0, 240))
# axs[0, 0].set_xticks(np.arange(0, 241, 240//16))
# # transform color to grayscale
# frame_gray = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
# axs[0, 1].set_title("Gray-Scale Image")
# axs[0, 1].imshow(frame_gray, cmap="gray", vmin=0, vmax=255)
# axs[0, 1].set_ylim((224, 0))
# axs[0, 1].set_yticks(np.arange(0, 225, 224//16))
# axs[0, 1].set_xlim((0, 240))
# axs[0, 1].set_xticks(np.arange(0, 241, 240//16))
# # crop the image top and bottom since it's static
# frame_cropped = frame_gray[9:-35, :]
# axs[1, 0].set_title("Cropped Image")
# axs[1, 0].imshow(frame_cropped, cmap="gray", vmin=0, vmax=255)
# axs[1, 0].set_ylim((224, 0))
# axs[1, 0].set_yticks(np.arange(0, 225, 224//16))
# axs[1, 0].set_xlim((0, 240))
# axs[1, 0].set_xticks(np.arange(0, 241, 240//16))
# # normalize the values to range [0,1]
# frame_normalized = frame_cropped / 255.0
# # resize the cropped image to WIDTHxHEIGHT
# frame = cv2.resize(frame_normalized, (self.width, self.height), interpolation=cv2.INTER_AREA)
# axs[1, 1].set_title("Downsized Image")
# axs[1, 1].imshow(frame, cmap="gray", vmin=0, vmax=1)
# axs[1, 1].set_ylim((84, 0))
# axs[1, 1].set_yticks(np.arange(0, 85, 84//7))
# axs[1, 1].set_xlim((0, 84))
# axs[1, 1].set_xticks(np.arange(0, 85, 84//7))
# plt.show()
| [
"matplotlib"
] |
b90f4f9d02871e5847d5fc4503074a99cdb46566 | Python | NishanthMHegde/Pandas-practice | /pandasgraphs.py | UTF-8 | 1,042 | 3.421875 | 3 | [] | no_license | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
df1 = pd.read_csv('df1.csv', index_col=0)
df2 = pd.read_csv('df2.csv')
#df1
print("df1")
print(df1.head())
print("\n\n")
#plotting a histogram
print("plotting a histogram")
df1['A'].hist(bins=50)
plt.show()
print("\n\n")
#another way of plotting a histogram
print("another way of plotting a histogram")
df1['B'].plot(kind='hist', bins=50)
plt.show()
print("\n\n")
#yet another way of plotting a histogram
print("yet another way of plotting a histogram")
df1['C'].plot.hist(bins=60)
plt.show()
print("\n\n")
#bar graph
print("bar graph")
df2.plot.bar()
plt.show()
print("\n\n")
#stacked bar graph
print("stacked bar graph")
df2.plot.bar(stacked=True)
plt.show()
print("\n\n")
#line graph
print("line graph")
df2.plot.line(figsize=(10,5))
plt.show()
print("\n\n")
#area graph
print("area graph")
df2.plot.area(alpha=0.75)
plt.show()
print("\n\n")
#scatter graph
print("scatter graph")
df2.plot.scatter(x='a', y='b', s = df2['c'])
plt.show()
print("\n\n") | [
"matplotlib"
] |
0a271df930365656572b30bd0c305611861ba6a3 | Python | pkozilek/kaggle-titanic | /src/features_analysis.py | UTF-8 | 4,452 | 3.078125 | 3 | [] | no_license | import plotly as plt
import numpy as np
import pandas as pd
import functions.graphs as g
features = pd.read_csv('datasets/features.csv', index_col=0)
features_survived = features.loc[features.Survived == 1]
features_died = features.loc[features.Survived == 0]
features_ac = features.loc[features.Age != 0] # Age completed
features_ac_survived = features_ac.loc[features_ac.Survived == 1]
features__ac_died = features_ac.loc[features_ac.Survived == 0]
### Bias Analysis
# Age bias
age_bias = g.histogram(
data_list=[features_survived.Age, features_died.Age],
labels=['Survived', 'Died'],
title='Normalized age bias analysis',
x_bins=[-1, 100, 5],
histnorm='percent',
xaxis_title='Age',
yaxis_title='%'
)
# Sex bias
sex_bias = g.histogram(
data_list=[features_survived.Sex, features_died.Sex],
labels=['Survived', 'Died'],
title='Normalized sex bias analysis',
x_bins=[1, 3, 1],
histnorm='percent',
xaxis_title='Sex',
yaxis_title='%'
)
# Pclass bias
pclass = g.histogram(
data_list=[features_survived.Pclass, features_died.Pclass],
labels=['Survived', 'Died'],
title='Normalized Pclass bias analysis',
x_bins=[0, 20, 1],
histnorm='percent',
xaxis_title='Pclass',
yaxis_title='%'
)
# SibSp bias
sibsp = g.histogram(
data_list=[features_survived.SibSp, features_died.SibSp],
labels=['Survived', 'Died'],
title='Normalized SibSp bias analysis',
x_bins=[0, 9, 1],
histnorm='percent',
xaxis_title='SibSp',
yaxis_title='%'
)
# Parch bias
parch = g.histogram(
data_list=[features_survived.Parch, features_died.Parch],
labels=['Survived', 'Died'],
title='Normalized Parch bias analysis',
x_bins=[0, 7, 1],
histnorm='percent',
xaxis_title='Parch',
yaxis_title='%'
)
# Embarked bias
embarked = g.histogram(
data_list=[features_survived.Embarked, features_died.Embarked],
labels=['Survived', 'Died'],
title='Normalized Embarked bias analysis',
x_bins=[0, 4, 1],
histnorm='percent',
xaxis_title='Embarked',
yaxis_title='%'
)
# Fare bias
fare = g.histogram(
data_list=[features_survived.Fare, features_died.Fare],
labels=['Survived', 'Died'],
title='Normalized Fare bias analysis',
x_bins=[0, 100, 5],
histnorm='percent',
xaxis_title='Fare',
yaxis_title='%'
)
# age_bias.show()
# sex_bias.show()
# pclass.show()
# sibsp.show()
# parch.show()
# embarked.show()
# fare.show()
### Age correlation analysis
age_sex_scatter = g.scatterplot(
x=[features_ac_survived.Age, features__ac_died.Age],
y=[features_ac_survived.Sex, features__ac_died.Sex],
labels=['Survived', 'Died'],
x_jitter=0.5,
y_jitter=0.25,
title='Sex x Age',
xaxis_title='Age',
yaxis_title='Sex'
)
age_pclass_scatter = g.scatterplot(
x=[features_ac_survived.Age, features__ac_died.Age],
y=[features_ac_survived.Pclass, features__ac_died.Pclass],
labels=['Survived', 'Died'],
x_jitter=0.5,
y_jitter=0.25,
title='Pclass x Age',
xaxis_title='Age',
yaxis_title='Pclass'
)
age_sibsp_scatter = g.scatterplot(
x=[features_ac_survived.Age, features__ac_died.Age],
y=[features_ac_survived.SibSp, features__ac_died.SibSp],
labels=['Survived', 'Died'],
x_jitter=0.5,
y_jitter=0.25,
title='SibSp x Age',
xaxis_title='Age',
yaxis_title='SibSp'
)
age_parch_scatter = g.scatterplot(
x=[features_ac_survived.Age, features__ac_died.Age],
y=[features_ac_survived.Parch, features__ac_died.Parch],
labels=['Survived', 'Died'],
x_jitter=0.5,
y_jitter=0.25,
title='Parch x Age',
xaxis_title='Age',
yaxis_title='Parch'
)
age_embarked_scatter = g.scatterplot(
x=[features_ac_survived.Age, features__ac_died.Age],
y=[features_ac_survived.Embarked, features__ac_died.Embarked],
labels=['Survived', 'Died'],
x_jitter=0.5,
y_jitter=0.25,
title='Embarked x Age',
xaxis_title='Age',
yaxis_title='Embarked'
)
age_fare_scatter = g.scatterplot(
x=[features_ac_survived.Age, features__ac_died.Age],
y=[features_ac_survived.Fare, features__ac_died.Fare],
labels=['Survived', 'Died'],
x_jitter=0.5,
y_jitter=0.25,
title='Fare x Age',
xaxis_title='Age',
yaxis_title='Fare'
)
# age_sex_scatter.show()
# age_pclass_scatter.show()
# age_sibsp_scatter.show()
# age_parch_scatter.show()
# age_embarked_scatter.show()
# age_fare_scatter.show() | [
"plotly"
] |
2c0a45344ce5db816a1b9b87232689e56ccbf009 | Python | gauravsaxena1997/pycode | /matplotlib/16.annotations_and_text.py | UTF-8 | 748 | 2.8125 | 3 | [] | no_license | import matplotlib.pyplot as plt
import csv
from matplotlib import style
import numpy as np
style.use('mystyle')
x,y = np.loadtxt('file.txt', delimiter=',', unpack=True)
plt.plot(x,y,label='Loaded from file')
# -------------------Text---------------------
font_dict = {
'family':'serif',
'color':'darkred',
'size':10
}
plt.text(9,6,'Text',fontdict=font_dict)
# -------------------------------------------
# -----------------Annotation----------------
plt.annotate('Annotation',(10,8),
xytext=(0.4,0.9), textcoords='axes fraction',
arrowprops= dict(facecolor='w',color='m') )
# ------------------------------------------
plt.xlabel('xlabel')
plt.ylabel('ylabel')
plt.title('matplotlib\nfirst graph')
plt.legend()
plt.show()
| [
"matplotlib"
] |
0bd83ee004ef026cf05c529462a0d44e0e2fbc13 | Python | dho619/ImageTransformations | /P06/p06.py | UTF-8 | 572 | 2.859375 | 3 | [] | no_license | '''
Transformação de perspectiva
'''
import cv2
import numpy as np
import matplotlib.pyplot as plt
img = cv2.imread('senko.jpeg')
rows,cols,ch = img.shape
pts1 = np.float32([[306,665],[618,652],[278,987],[639,990]])
pts2 = np.float32([[0,0],[300,0],[0,300],[300,300]])
M = cv2.getPerspectiveTransform(pts1,pts2)
dst = cv2.warpPerspective(img,M,(300,300))
cv2.imwrite('senko_TransformacaoDePerspectiva.png', dst)
plt.subplot(121),plt.imshow(img),plt.title('Input')
plt.subplot(122),plt.imshow(dst),plt.title('Output')
plt.show()
print('..|imagem salva com sucesso')
| [
"matplotlib"
] |
34f8902b5967546a30d46dee410e4c779b3f2017 | Python | fgurri/kpi | /nautilus/plots.py | UTF-8 | 62,315 | 2.546875 | 3 | [] | no_license | from django.db import connections
import configparser
import datetime
import pandas as pd
import mysql.connector
from mysql.connector import Error
import plotly as py
import plotly.graph_objs as go
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
import numpy as np
import dateutil.relativedelta
import nautilus.utils as u
import nautilus.queries as q
""" Generates a offline plotly plot with the graph 'total visits per month'.
Return HTML div code that builds the graph. It is necessary to include 'plotly.js'
in your html file.
usage::
>>> import plots
>>> plot = plot_visits_per_month()
>>> render (request, 'someViewWithPlot.html', {'plot_name': plot})
:param:
:rtype: string
"""
def plot_visits_per_month():
try:
connection = connections['datawarehouse']
sql = 'SELECT f_month as Mes, CONCAT(f_year, "-", f_monthname) as MesNom, sum(f_count) as Total FROM datawarehouse.dm1_visits_per_agenda GROUP BY f_month, CONCAT(f_year, "-", f_monthname) ORDER BY f_month ASC'
df = pd.read_sql(sql, connection)
trace_visites = go.Scatter(x=df['MesNom'],
y=df['Total'],
mode='lines+markers',
name='Visites per mes')
linear_x = np.r_[0:len(df)]
linear_x = np.arange(0, len(df)).reshape(-1, 1)
poly_reg = PolynomialFeatures(degree=4)
X_poly = poly_reg.fit_transform(linear_x)
pol_reg = LinearRegression()
pol_reg.fit(X_poly, df['Total'])
predicted_y = pol_reg.predict(poly_reg.fit_transform(linear_x))
trace_regression_visites = go.Scatter(x=df['MesNom'],
y=predicted_y,
mode='lines',
name='Tendencia')
trace_omi_annotation = go.Scatter(x=["2017-Dec", "2017-Dec"],
y=[0, df['Total'].max()],
mode='lines',
name='Inici odontologia a OMI360',
line=dict(dash='dot'))
data = [trace_visites, trace_regression_visites, trace_omi_annotation]
layout = go.Layout(
title='Evolució del número de visites per mes',
titlefont=dict(family='Arial, sans-serif',
size=24,
color='green'),
xaxis=dict(showticklabels=True,
tickangle=45,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'),
yaxis=dict(titlefont=dict(family='Arial, sans-serif',
size=18,
color='lightgrey'),
showticklabels=True,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'))
fig = go.Figure(data=data, layout=layout)
return py.offline.plot(fig, include_plotlyjs=False, output_type='div')
except Error as e:
print("Error while connecting to MySQL", e)
""" Generates a offline plotly plot with the graph 'Distribution of visits per speciality'.
Return HTML div code that builds the graph. It is necessary to include 'plotly.js'
in your html file.
usage::
>>> import plots
>>> plot = plot_distribution_visits_per_speciality('201801', '201812')
>>> render (request, 'someViewWithPlot.html', {'plot_name': plot})
:param p_first_month: starting month of range values in YYYYMM format
:param p_last_month: ending month of range values in YYYYMM format
:rtype: string
"""
def plot_distribution_visits_per_speciality(p_first_month, p_last_month):
try:
connection = connections['datawarehouse']
sql = 'SELECT f_nomEspecialitat as Spec, sum(f_count) as Total FROM datawarehouse.dm1_visits_per_agenda WHERE f_month >= '+str(p_first_month)+' and f_month <= '+str(p_last_month)+' GROUP BY f_nomEspecialitat ORDER BY sum(f_count) DESC'
df = pd.read_sql(sql, connection)
trace = go.Pie(labels=df['Spec'], values=df['Total'])
graph_title = 'Distribució visites per especialitat (Del ' + u.yyyymmToMonthName(p_first_month) + ' al ' + u.yyyymmToMonthName(p_last_month) + ')'
data = [trace]
layout = go.Layout(
title=graph_title,
titlefont=dict(family='Arial, sans-serif', size=24, color='green'),
autosize=False,
width=1000,
height=700,
xaxis=dict(showticklabels=True,
tickangle=45,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'),
yaxis=dict(titlefont=dict(family='Arial, sans-serif',
size=18,
color='lightgrey'),
showticklabels=True,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'))
fig = go.Figure(data=data, layout=layout)
return py.offline.plot(fig, include_plotlyjs=False, output_type='div')
except Error as e:
print("Error while connecting to MySQL", e)
""" Generates a offline plotly plot with the graph 'Visits per month by speciality'.
You can choose to filter by speciality or by agenda, but one of both must be set.
Return HTML div code that builds the graph. It is necessary to include 'plotly.js'
in your html file.
usage::
>>> import plots
>>> plot = plot_visits_per_month_speciality(p_id_especiality=19)
>>> render (request, 'someViewWithPlot.html', {'plot_name': plot})
:param p_id_especiality: speciality identifier
:param p_id_agenda: agenda identifier
:rtype: string
"""
def plot_visits_per_month_speciality(p_id_especiality=None, p_id_agenda=None):
try:
connection = connections['datawarehouse']
sql = 'SELECT CONCAT(f_year, "-", f_monthname) as MesNom, f_month, sum(f_count) as Total FROM datawarehouse.dm1_visits_per_agenda WHERE '
if (p_id_especiality is None) and (p_id_agenda is None):
p_id_especiality = 19 # medicina general per defecte
if (p_id_especiality is not None) and (p_id_especiality != ""):
sql = sql + 'f_idEspecialitat='+str(p_id_especiality)+' '
else:
if (p_id_agenda is not None) and (p_id_agenda != ""):
sql = sql + 'f_idAgenda=\''+str(p_id_agenda)+'\' '
sql = sql + 'GROUP BY CONCAT(f_year, "-", f_monthname), f_month ORDER BY f_month ASC'
df = pd.read_sql(sql, connection)
if df.empty:
return None
linear_x = np.r_[0:len(df)]
linear_x = np.arange(0, len(df)).reshape(-1, 1)
poly_reg = PolynomialFeatures(degree=4)
X_poly = poly_reg.fit_transform(linear_x)
pol_reg = LinearRegression()
pol_reg.fit(X_poly, df['Total'])
predicted_y = pol_reg.predict(poly_reg.fit_transform(linear_x))
trace_regression = go.Scatter(x=df['MesNom'],
y=predicted_y,
mode='lines',
name='Tendencia Especialitat')
trace = go.Scatter(x=df['MesNom'],
y=df['Total'],
mode='lines+markers',
name='Visites per mes')
graph_title = 'Evolució mensual de visites'
if p_id_especiality is not None:
graph_title = q.get_Spec_Name(p_id_especiality) + ': '+ graph_title
if p_id_agenda is not None:
graph_title = q.get_Agenda_Name(p_id_agenda) + ': '+ graph_title
data = [trace, trace_regression]
layout = go.Layout(
title=graph_title,
titlefont=dict(family='Arial, sans-serif', size=24, color='green'),
xaxis=dict(showticklabels=True,
tickangle=45,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'),
yaxis=dict(titlefont=dict(family='Arial, sans-serif',
size=18,
color='lightgrey'),
showticklabels=True,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'))
fig = go.Figure(data=data, layout=layout)
return py.offline.plot(fig, include_plotlyjs=False, output_type='div')
except Error as e:
print("Error while connecting to MySQL", e)
""" Generates a offline plotly plot with the graph 'Frequency by agenda'.
Return HTML div code that builds the graph. It is necessary to include 'plotly.js'
in your html file.
usage::
>>> import plots
>>> plot = plot_frequency_per_agenda(p_id_agenda='AG100')
>>> render (request, 'someViewWithPlot.html', {'plot_name': plot})
:param p_id_agenda: agenda identifier
:rtype: string
"""
def plot_frequency_per_agenda(p_id_agenda):
try:
connection = connections['datawarehouse']
sql = 'SELECT CONCAT(f_year, "-", f_monthname) as MesNom, f_month as Mes, f_count/f_patients as rep FROM datawarehouse.dm1_visits_per_agenda WHERE f_idAgenda=\''+str(p_id_agenda)+'\' ORDER BY f_month ASC'
df = pd.read_sql(sql, connection)
trace_frequency = go.Scatter(x=df['MesNom'],
y=df['rep'],
mode='lines+markers',
name='repetitivitat')
linear_x = np.r_[0:len(df)]
linear_x = np.arange(0, len(df)).reshape(-1, 1)
poly_reg = PolynomialFeatures(degree=4)
X_poly = poly_reg.fit_transform(linear_x)
pol_reg = LinearRegression()
pol_reg.fit(X_poly, df['rep'])
predicted_y = pol_reg.predict(poly_reg.fit_transform(linear_x))
trace_regression = go.Scatter(x=df['MesNom'],
y=predicted_y,
mode='lines',
name='regressio repetitivitat')
graph_title = q.get_Agenda_Name(p_id_agenda) + ': repetitivitat'
data = [trace_frequency, trace_regression]
layout = go.Layout(
title=graph_title,
titlefont=dict(family='Arial, sans-serif', size=24, color='green'),
xaxis=dict(showticklabels=True,
tickangle=45,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'),
yaxis=dict(showticklabels=True,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'))
fig = go.Figure(data=data, layout=layout)
return py.offline.plot(fig, include_plotlyjs=False, output_type='div')
except Error as e:
print("Error while connecting to MySQL", e)
""" Generates a offline plotly plot with the graph 'Patients per month'.
Return HTML div code that builds the graph. It is necessary to include 'plotly.js'
in your html file.
usage::
>>> import plots
>>> plot = plot_patients_per_month()
>>> render (request, 'someViewWithPlot.html', {'plot_name': plot})
:param: None
:rtype: string
"""
def plot_patients_per_month():
try:
connection = connections['datawarehouse']
sql = 'SELECT f_month as Mes , CONCAT(LEFT(f_month, 4), "-", f_monthname) as MesNom, f_patients as Patients, f_new_patients FROM datawarehouse.dm2_stats_per_month ORDER BY f_month ASC'
df = pd.read_sql(sql, connection)
trace_patients = go.Scatter(x=df['MesNom'],
y=df['Patients'],
mode='lines+markers',
name='Total pacients')
linear_x = np.r_[0:len(df)]
linear_x = np.arange(0, len(df)).reshape(-1, 1)
poly_reg = PolynomialFeatures(degree=4)
X_poly = poly_reg.fit_transform(linear_x)
pol_reg = LinearRegression()
pol_reg.fit(X_poly, df['Patients'])
predicted_y = pol_reg.predict(poly_reg.fit_transform(linear_x))
trace_regression_patients = go.Scatter(x=df['MesNom'],
y=predicted_y,
mode='lines',
name='Tendencia total pacients')
trace_omi_annotation = go.Scatter(x=["2017-Dec", "2017-Dec"],
y=[0, df['Patients'].max()],
mode='lines',
name='Inici odontologia a OMI360',
line=dict(dash='dot'))
graph_title = 'Pacients per mes'
data = [trace_patients, trace_regression_patients, trace_omi_annotation]
layout = go.Layout(
title=graph_title,
titlefont=dict(family='Arial, sans-serif', size=24, color='green'),
xaxis=dict(showticklabels=True,
tickangle=45,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'),
yaxis=dict(showticklabels=True,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'))
fig = go.Figure(data=data, layout=layout)
return py.offline.plot(fig, include_plotlyjs=False, output_type='div')
except Error as e:
print("Error while connecting to MySQL", e)
""" Generates a offline plotly plot with the graph 'New patients per month'.
Return HTML div code that builds the graph. It is necessary to include 'plotly.js'
in your html file.
usage::
>>> import plots
>>> plot = plot_new_patients_per_month()
>>> render (request, 'someViewWithPlot.html', {'plot_name': plot})
:param: None
:rtype: string
"""
def plot_new_patients_per_month():
try:
connection = connections['datawarehouse']
sql = 'SELECT f_month as Mes, CONCAT(LEFT(f_month, 4), "-", f_monthname) as MesNom, f_patients as Patients, f_new_patients as NewPatients FROM datawarehouse.dm2_stats_per_month ORDER BY f_month ASC'
df = pd.read_sql(sql, connection)
trace_new_patients = go.Scatter(x=df['MesNom'],
y=df['NewPatients'],
mode='lines+markers',
name='Pacients nous')
linear_x = np.r_[0:len(df)]
linear_x = np.arange(0, len(df)).reshape(-1, 1)
poly_reg = PolynomialFeatures(degree=4)
X_poly = poly_reg.fit_transform(linear_x)
pol_reg = LinearRegression()
pol_reg.fit(X_poly, df['NewPatients'])
predicted_y = pol_reg.predict(poly_reg.fit_transform(linear_x))
trace_regression_new_patients = go.Scatter(x=df['MesNom'],
y=predicted_y,
mode='lines',
name='Tendencia nous pacients')
trace_omi_annotation = go.Scatter(x=["2017-Dec", "2017-Dec"],
y=[0, df['NewPatients'].max()],
mode='lines',
name='Inici odontologia a OMI360',
line=dict(dash='dot'))
graph_title = 'Pacients nous per mes'
data = [trace_new_patients, trace_regression_new_patients, trace_omi_annotation]
layout = go.Layout(
title=graph_title,
titlefont=dict(family='Arial, sans-serif', size=24, color='green'),
xaxis=dict(showticklabels=True,
tickangle=45,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'),
yaxis=dict(showticklabels=True,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'))
fig = go.Figure(data=data, layout=layout)
return py.offline.plot(fig, include_plotlyjs=False, output_type='div')
except Error as e:
print("Error while connecting to MySQL", e)
""" Generates a offline plotly plot with the graph 'Distribution patients vs new patients'.
Return HTML div code that builds the graph. It is necessary to include 'plotly.js'
in your html file.
usage::
>>> import plots
>>> plot = plot_distribution_new_patients()
>>> render (request, 'someViewWithPlot.html', {'plot_name': plot})
:param: None
:rtype: string
"""
def plot_distribution_new_patients():
try:
connection = connections['datawarehouse']
sql = 'SELECT f_month as Mes, CONCAT(LEFT(f_month, 4), "-", f_monthname) as MesNom, f_patients-f_new_patients as Patients, f_new_patients as NewPatients FROM datawarehouse.dm2_stats_per_month ORDER BY f_month ASC'
df = pd.read_sql(sql, connection)
trace_new_patients = go.Scatter(x=df['MesNom'],
y=df['NewPatients'],
mode='lines',
name='Pacients nous',
stackgroup='one',
groupnorm='percent')
trace_patients = go.Scatter(x=df['MesNom'],
y=df['Patients'],
mode='lines',
name='Pacients vells',
stackgroup='one')
graph_title = 'Distribució nous pacients'
data = [trace_new_patients, trace_patients]
layout = go.Layout(
title=graph_title,
titlefont=dict(family='Arial, sans-serif', size=24, color='green'),
showlegend=True,
xaxis=dict(
showticklabels=True,
tickangle=45,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
type='category',
),
yaxis=dict(
type='linear',
range=[1, 100],
dtick=20,
ticksuffix='%'))
fig = go.Figure(data=data, layout=layout)
return py.offline.plot(fig, include_plotlyjs=False, output_type='div')
except Error as e:
print("Error while connecting to MySQL", e)
""" Generates a offline plotly plot with the graph 'month evolution of new Patients per speciality'.
Return HTML div code that builds the graph. It is necessary to include 'plotly.js'
in your html file.
usage::
>>> import plots
>>> plot = plot_new_patients_per_speciality_per_month()
>>> render (request, 'someViewWithPlot.html', {'plot_name': plot})
:param: None
:rtype: string
"""
def plot_new_patients_per_speciality_per_month():
try:
connection = connections['datawarehouse']
sql = 'SELECT f_nomEspecialitat as Spec, CONCAT(LEFT(f_month,4), "-", f_monthname) as MesNom, f_month as Mes, sum(f_newPatients) as NewPatients FROM dm2_newpatient_per_month_agenda GROUP BY f_nomEspecialitat, CONCAT(LEFT(f_month,4), "-", f_monthname), f_month'
df = pd.read_sql(sql, connection)
arraySpecs = df['Spec'].unique()
df = df.set_index('Spec')
data = list()
for spec in arraySpecs:
df_spec = pd.DataFrame(df.loc[df.index == spec, ['MesNom', 'Mes', 'NewPatients']])
df_spec = df_spec.sort_values(['Mes'], ascending=[1])
trace = go.Scatter(x=df_spec['MesNom'],
y=df_spec['NewPatients'],
mode='lines',
name=spec,
stackgroup='one',
groupnorm='percent')
data.append(trace)
layout = go.Layout(
title='Evolució de la distribució de nous pacients per especialitat',
titlefont=dict(family='Arial, sans-serif', size=24, color='green'),
showlegend=True,
xaxis=dict(
showticklabels=True,
tickangle=45,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
type='category',
),
yaxis=dict(
type='linear',
range=[1, 100],
dtick=20,
ticksuffix='%'))
fig = go.Figure(data=data, layout=layout)
return py.offline.plot(fig, include_plotlyjs=False, output_type='div')
except Error as e:
print("Error while connecting to MySQL", e)
""" Generates a offline plotly plot with the graph 'New Patients per Speciality or Agenda'.
You can choose to call by spec o agenda, but not both. If you set both values Spec is used.
Return HTML div code that builds the graph. It is necessary to include 'plotly.js'
in your html file.
usage::
>>> import plots
>>> plot = plot_evolution_new_patients_per_spec(p_id_agenda='100')
>>> render (request, 'someViewWithPlot.html', {'plot_name': plot})
:param: None
:rtype: string
"""
def plot_evolution_new_patients_per_spec(p_id_especiality=None, p_id_agenda=None):
try:
connection = connections['datawarehouse']
sql = 'SELECT f_month as Mes, CONCAT(LEFT(f_month,4), "-", f_monthname) as MesNom, sum(f_newPatients) as NewPatients from dm2_newpatient_per_month_agenda WHERE '
if (p_id_especiality is None or p_id_especiality == '') and p_id_agenda is None:
p_id_especiality = 19# medicina general per defecte
if (p_id_especiality is not None) and p_id_especiality != "":
sql = sql + 'f_idEspecialitat=' + str(p_id_especiality) + ' '
else:
if (p_id_agenda is not None) and p_id_agenda != "":
sql = sql + 'f_idAgenda=\'' + str(p_id_agenda) + '\' '
sql = sql + 'GROUP BY f_month, CONCAT(LEFT(f_month,4), "-", f_monthname) '
sql = sql + 'ORDER BY f_month ASC'
df = pd.read_sql(sql, connection)
if df.empty:
return None
trace_new_patients = go.Scatter(x=df['MesNom'],
y=df['NewPatients'],
mode='lines+markers',
name='Pacients nous')
linear_x = np.r_[0:len(df)]
linear_x = np.arange(0, len(df)).reshape(-1, 1)
poly_reg = PolynomialFeatures(degree=4)
X_poly = poly_reg.fit_transform(linear_x)
pol_reg = LinearRegression()
pol_reg.fit(X_poly, df['NewPatients'])
predicted_y = pol_reg.predict(poly_reg.fit_transform(linear_x))
trace_regression_new_patients = go.Scatter(x=df['MesNom'],
y=predicted_y,
mode='lines',
name='Tendencia nous pacients')
graph_title = 'Evolució del número de pacients nous'
if p_id_especiality is not None:
graph_title = q.get_Spec_Name(p_id_especiality) + ': '+ graph_title
if p_id_agenda is not None:
graph_title = q.get_Agenda_Name(p_id_agenda) + ': '+ graph_title
data = [trace_new_patients, trace_regression_new_patients]
layout = go.Layout(
title=graph_title,
titlefont=dict(family='Arial, sans-serif', size=24, color='green'),
showlegend=True,
xaxis=dict(
showticklabels=True,
tickangle=45,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
type='category',
),
yaxis=dict(showticklabels=True,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'))
fig = go.Figure(data=data, layout=layout)
return py.offline.plot(fig, include_plotlyjs=False, output_type='div')
except Error as e:
print("Error while connecting to MySQL", e)
""" Generates a offline plotly plot with the graph 'Distribution of new patients by speciality'.
Return HTML div code that builds the graph. It is necessary to include 'plotly.js'
in your html file.
usage::
>>> import plots
>>> plot = plot_distribution_new_patients_per_spec('201801', '201812')
>>> render (request, 'someViewWithPlot.html', {'plot_name': plot})
:param p_first_month: starting month of range values in YYYYMM format
:param p_last_month: ending month of range values in YYYYMM format
:rtype: string
"""
def plot_distribution_new_patients_per_spec(p_first_month, p_last_month):
try:
connection = connections['datawarehouse']
sql = 'SELECT f_nomEspecialitat as Spec, sum(f_newPatients) as NewPatients FROM datawarehouse.dm2_newpatient_per_month_agenda WHERE f_month >= '+str(p_first_month)+' and f_month <= '+str(p_last_month)+' GROUP BY f_nomEspecialitat ORDER BY sum(f_newPatients) DESC'
df = pd.read_sql(sql, connection)
trace_new_patients = go.Pie(labels=df['Spec'], values=df['NewPatients'])
graph_title = 'Nous pacients per especialitat (Del ' + u.yyyymmToMonthName(p_first_month) + ' al ' + u.yyyymmToMonthName(p_last_month) + ')'
data = [trace_new_patients]
layout = go.Layout(
title=graph_title,
titlefont=dict(family='Arial, sans-serif', size=24, color='green'),
showlegend=True,
xaxis=dict(
showticklabels=True,
tickangle=45,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
type='category',
),
yaxis=dict(showticklabels=True,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'))
fig = go.Figure(data=data, layout=layout)
return py.offline.plot(fig, include_plotlyjs=False, output_type='div')
except Error as e:
print("Error while connecting to MySQL", e)
""" Generates a offline plotly plot with the graph 'New Patients per agenda'.
Optionally, you can show by speciallity. If no speciality is set it shows whole data.
Return HTML div code that builds the graph. It is necessary to include 'plotly.js'
in your html file.
usage::
>>> import plots
>>> plot = plot_first_blood_per_agenda('201801', '201812')
>>> render (request, 'someViewWithPlot.html', {'plot_name': plot})
:param p_first_month: starting month of range values in YYYYMM format
:param p_last_month: ending month of range values in YYYYMM format
:param p_id_especiality: optional identifier of speciality
:rtype: string
"""
def plot_first_blood_per_agenda(p_first_month, p_last_month, p_id_especiality=None):
try:
connection = connections['datawarehouse']
sql = 'SELECT f_nomAgenda as nomAgenda, sum(f_totalVisits) as Total, COUNT(*) as Patients, sum(f_totalVisits)/COUNT(*) as PerPatient FROM datawarehouse.dm_first_visit WHERE f_month between '+str(p_first_month)+' and '+str(p_last_month)
if p_id_especiality is not None:
sql = sql + ' AND f_idEspecialitat=' + str(p_id_especiality) + ' '
sql = sql + ' GROUP BY f_nomAgenda'
sql = sql + ' ORDER BY sum(f_totalVisits) DESC'
df = pd.read_sql(sql, connection)
trace_visits = go.Bar(x=df['nomAgenda'],
y=df['Total'],
name='Total visites al centre')
trace_per_patient = go.Bar(x=df['nomAgenda'],
y=df['PerPatient'],
name='Mitjana per pacient')
graph_title = 'Visites per captació en agenda (Del ' + u.yyyymmToMonthName(p_first_month) + ' al ' + u.yyyymmToMonthName(p_last_month) + ')'
if p_id_especiality is not None:
graph_title = q.get_Spec_Name(p_id_especiality) + ': ' + graph_title
data = [trace_visits, trace_per_patient]
layout = go.Layout(
title=graph_title,
titlefont=dict(family='Arial, sans-serif', size=24, color='green'),
showlegend=True,
xaxis=dict(
showticklabels=True,
tickangle=45,
tickfont=dict(family='Old Standard TT, serif',
size=12,
color='black'),
),
yaxis=dict(showticklabels=True,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'))
fig = go.Figure(data=data, layout=layout)
return py.offline.plot(fig, include_plotlyjs=False, output_type='div')
except Error as e:
print("Error while connecting to MySQL", e)
""" Generates a offline plotly plot with the graph 'Last visits per month'.
Return HTML div code that builds the graph. It is necessary to include 'plotly.js'
in your html file.
usage::
>>> import plots
>>> plot = plot_last_visits_per_month()
>>> render (request, 'someViewWithPlot.html', {'plot_name': plot})
:param: None
:rtype: string
"""
def plot_last_visits_per_month():
try:
connection = connections['datawarehouse']
# we don't want to show future months, so we filter till past month
now = datetime.datetime.now() + dateutil.relativedelta.relativedelta(months=-1)
last_month = str(now.year) + str(now.month+1).zfill(2)
sql = 'SELECT f_lastmonth as Mes, CONCAT(LEFT(f_lastmonth,4), "-", f_lastmonthname) as MesNom, count(*) as Total FROM datawarehouse.dm_first_visit WHERE f_lastmonth < '+ last_month + ' GROUP BY f_lastmonth, CONCAT(LEFT(f_lastmonth,4), "-", f_lastmonthname) ORDER BY f_lastmonth ASC'
df = pd.read_sql(sql, connection)
trace_visits = go.Scatter(x=df['MesNom'],
y=df['Total'],
mode='lines+markers',
name='Ultimes visites per mes')
linear_x = np.r_[0:len(df)]
linear_x = np.arange(0, len(df)).reshape(-1, 1)
poly_reg = PolynomialFeatures(degree=4)
X_poly = poly_reg.fit_transform(linear_x)
pol_reg = LinearRegression()
pol_reg.fit(X_poly, df['Total'])
predicted_y = pol_reg.predict(poly_reg.fit_transform(linear_x))
trace_regression_visits = go.Scatter(x=df['MesNom'],
y=predicted_y,
mode='lines',
name='Tendencia')
trace_omi_annotation = go.Scatter(x=["2017-Dec", "2017-Dec"],
y=[0, df['Total'].max()],
mode='lines',
name='Inici odontologia a OMI360',
line=dict(dash='dot'))
data = [trace_visits, trace_regression_visits, trace_omi_annotation]
layout = go.Layout(
title='Evolució del número de últimes visites per mes',
titlefont=dict(family='Arial, sans-serif',
size=24,
color='green'),
xaxis=dict(showticklabels=True,
tickangle=45,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'),
yaxis=dict(titlefont=dict(family='Arial, sans-serif',
size=18,
color='lightgrey'),
showticklabels=True,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'))
fig = go.Figure(data=data, layout=layout)
return py.offline.plot(fig, include_plotlyjs=False, output_type='div')
except Error as e:
print("Error while connecting to MySQL", e)
""" Generates a offline plotly plot with the graph 'Visits per patient'.
Return HTML div code that builds the graph. It is necessary to include 'plotly.js'
in your html file.
usage::
>>> import plots
>>> plot = plot_visits_per_patient()
>>> render (request, 'someViewWithPlot.html', {'plot_name': plot})
:param: None
:rtype: string
"""
def plot_visits_per_patient():
try:
connection = connections['datawarehouse']
df = pd.read_sql('SELECT f_numHistoria as Patient, f_totalVisits as Total FROM datawarehouse.dm_first_visit', connection)
#max outliners to 50 for better visualization
df[df['Total'] > 50] = 50
trace_visits = go.Histogram(x=df['Total'],
name='Visites per pacient')
data = [trace_visits]
layout = go.Layout(
title='Distribució del número de visites per pacient',
titlefont=dict(family='Arial, sans-serif',
size=24,
color='green'),
xaxis=dict(showticklabels=True,
tickangle=45,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none',
title='Visites'),
yaxis=dict(showticklabels=True,
title='Número de pacients',
tickformat='.0f',
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'))
fig = go.Figure(data=data, layout=layout)
return py.offline.plot(fig, include_plotlyjs=False, output_type='div')
except Error as e:
print("Error while connecting to MySQL", e)
""" Generates a offline plotly plot with the graph 'Distribution casual vs fidelizied'.
Return HTML div code that builds the graph. It is necessary to include 'plotly.js'
in your html file.
usage::
>>> import plots
>>> plot = plot_distribution_casual_vs_fidelizied()
>>> render (request, 'someViewWithPlot.html', {'plot_name': plot})
:param: None
:rtype: string
"""
def plot_distribution_casual_vs_fidelizied():
try:
connection = connections['datawarehouse']
sql = 'SELECT f_month as mes, CONCAT(LEFT(f_month,4), "-", f_monthname) as MesNom, f_casuals as casuals, f_fidelitzats as fidelitzats, f_visits_casuals as visitsCasuals, f_visits_fidelitzats as visitsFidelitzats FROM datawarehouse.dm2_stats_per_month ORDER BY f_month ASC'
df = pd.read_sql(sql, connection)
trace_casual = go.Scatter(x=df['MesNom'],
y=df['casuals'],
mode='lines',
name='Pacients casuals',
stackgroup='one',
groupnorm='percent')
trace_fidelizied = go.Scatter(x=df['MesNom'],
y=df['fidelitzats'],
mode='lines',
name='Pacients fidelitzats',
stackgroup='one')
data = [trace_casual, trace_fidelizied]
layout = go.Layout(
title='Distribució pacients casuals vs fidelitzats',
titlefont=dict(family='Arial, sans-serif', size=24, color='green'),
showlegend=True,
xaxis=dict(
showticklabels=True,
tickangle=45,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
type='category',
),
yaxis=dict(
type='linear',
range=[1, 100],
dtick=20,
ticksuffix='%'))
fig = go.Figure(data=data, layout=layout)
plotdiv_patients = py.offline.plot(fig, include_plotlyjs=False, output_type='div')
trace_visits_casual = go.Scatter(x=df['MesNom'],
y=df['visitsCasuals'],
mode='lines',
name='Visites casuals',
stackgroup='one',
groupnorm='percent')
trace_visits_fidelizied = go.Scatter(x=df['MesNom'],
y=df['visitsFidelitzats'],
mode='lines',
name='Visites fidelitzats',
stackgroup='one')
data = [trace_visits_casual, trace_visits_fidelizied]
layout = go.Layout(
title='Distribució visites casuals vs fidelitzats',
titlefont=dict(family='Arial, sans-serif', size=24, color='green'),
showlegend=True,
xaxis=dict(
showticklabels=True,
tickangle=45,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
type='category',
),
yaxis=dict(
type='linear',
range=[1, 100],
dtick=20,
ticksuffix='%'))
fig = go.Figure(data=data, layout=layout)
plotdiv_visits = py.offline.plot(fig, include_plotlyjs=False, output_type='div')
return plotdiv_patients, plotdiv_visits
except Error as e:
print("Error while connecting to MySQL", e)
""" Generates a offline plotly plot with the graph 'Distance to last visit'.
Return HTML div code that builds the graph. It is necessary to include 'plotly.js'
in your html file.
usage::
>>> import plots
>>> plot = plot_distance_to_lastmonth()
>>> render (request, 'someViewWithPlot.html', {'plot_name': plot})
:param: None
:rtype: string
"""
def plot_distance_to_lastmonth():
try:
connection = connections['datawarehouse']
df = pd.read_sql('SELECT f_numHistoria as Patient, PERIOD_DIFF(IF(f_lastmonth=f_month,EXTRACT(YEAR_MONTH FROM CURRENT_DATE()),f_lastmonth), f_month) as mesos FROM datawarehouse.dm_first_visit', connection)
df = df[df['mesos']>0]
trace_distance = go.Histogram(x=df['mesos'],
name='Mesos des de última visita')
data = [trace_distance]
layout = go.Layout(
title='Conteig dels mesos que fa que no ve cada pacient',
titlefont=dict(family='Arial, sans-serif',
size=24,
color='green'),
xaxis=dict(showticklabels=True,
tickangle=45,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none',
title='Mesos des de última visita'),
yaxis=dict(showticklabels=True,
title='Número de pacients',
tickformat='.0f',
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'))
fig = go.Figure(data=data, layout=layout)
return py.offline.plot(fig, include_plotlyjs=False, output_type='div')
except Error as e:
print("Error while connecting to MySQL", e)
""" Callcenter plots to analyse performance.
usage::
>>> import plots
>>> plots = plots_callcenter_period('2018/01/01', '2018/01/31')
>>> render (request, 'someViewWithPlot.html', {'plot_name': plot[1]})
:param p_date_ini: string representation of starting date in YYYY/MM/DD format
:param p_date_fin: string representation of ending date in YYYY/MM/DD format
:rtype: array of plots
"""
def plots_callcenter_period (p_date_ini, p_date_fin):
try:
connection = connections['datawarehouse']
# format date to use in a between condition: YYYYMMDD
date_ini = datetime.datetime.strptime(str(p_date_ini), "%d/%m/%Y").strftime("%Y/%m/%d")
date_fin = datetime.datetime.strptime(str(p_date_fin), "%d/%m/%Y").strftime("%Y/%m/%d")
sql = "SELECT f_hour, sum(f_total) as total, sum(f_answered) as answered, sum(f_not_answered) as not_answered, if(sum(f_answered)>0,sum(f_not_answered)/sum(f_answered),sum(f_not_answered)) as overcall_factor FROM dm3_callcenter_general WHERE f_day BETWEEN \'"+date_ini+"\' AND \'"+date_fin+"\' and f_dst_id='6000' and f_hour between '07' and '23' GROUP BY f_hour ORDER BY f_hour ASC"
df = pd.read_sql(sql, connection)
if df.empty:
return "No hi han dades en el periode triat.", None, None, None, None, None
trace_answered = go.Scatter(x=df['f_hour'],
y=df['answered'],
mode='lines',
name='Contestades',
stackgroup='one',
fillcolor='#81d386',
line = dict(
color = ('#81d386'),),
hovertemplate = '%{y:.2f}%',
groupnorm='percent')
trace_not_answered = go.Scatter(x=df['f_hour'],
y=df['not_answered'],
mode='lines',
name='No contestades',
hovertemplate = '%{y:.2f}%',
line = dict(
color = ('#f28282'),),
fillcolor= '#f28282',
stackgroup='one')
graph_title = 'Distribució contestades vs no contestades'
data = [trace_answered, trace_not_answered]
layout = go.Layout(
title=graph_title,
titlefont=dict(family='Arial, sans-serif', size=24, color='green'),
showlegend=True,
xaxis=dict(
showticklabels=True,
ticksuffix='h',
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
tickmode='linear',
),
yaxis=dict(type='linear',
range=[1, 100],
dtick=20,
ticksuffix='%')
)
fig = go.Figure(data=data, layout=layout)
plot_distrib = py.offline.plot(fig, include_plotlyjs=False, output_type='div')
trace_abs_lost_calls = go.Bar(x=df['f_hour'],
y=df['not_answered'],
name='total no agafades')
layout = go.Layout(
title='Quantitat trucades no agafades',
titlefont=dict(family='Arial, sans-serif', size=24, color='green'),
showlegend=True,
xaxis=dict(
showticklabels=True,
ticksuffix='h',
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
tickmode='linear',
),
)
data=[trace_abs_lost_calls]
fig = go.Figure(data=data, layout=layout)
plot_abs_values = py.offline.plot(fig, include_plotlyjs=False, output_type='div')
df = pd.read_sql("select f_week_day_order, f_week_day, f_hour, sum(f_not_answered) as not_answered, sum(f_total) as total from dm3_callcenter_general where f_day BETWEEN \'"+date_ini+"\' AND \'"+date_fin+"\' and f_dst_id='6000' and f_hour between '07' and '22' group by f_week_day_order, f_week_day, f_hour order by f_week_day_order ASC, f_hour ASC", connection)
days = df['f_week_day'].unique()
hours = sorted(df['f_hour'].unique())
values = []
for hour in hours:
line = []
for day in days:
v = df.loc[(df['f_week_day'] == day) & (df['f_hour'] == hour), 'not_answered'].values
if v.size >0:
line.append(v[0])
else:
line.append(0)
values.append(line)
trace_heatmap_no_answer = go.Heatmap(z=values, x=days, y=hours, colorscale='Reds')
data = [trace_heatmap_no_answer]
layout = go.Layout(
title='No contestades (valor absolut)',
titlefont=dict(family='Arial, sans-serif',
size=24,
color='green'),
yaxis=dict(
showticklabels=True,
ticksuffix='h',
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
tickmode='linear',
),
)
fig = go.Figure(data=data, layout=layout)
plot_heatmap_no_answer = py.offline.plot(fig, include_plotlyjs=False, output_type='div')
# total calls
total_values = []
for hour in hours:
line = []
for day in days:
v = df.loc[(df['f_week_day'] == day) & (df['f_hour'] == hour), 'total'].values
if v.size >0:
line.append(v[0])
else:
line.append(0)
total_values.append(line)
trace_heatmap_total = go.Heatmap(z=total_values, x=days, y=hours, colorscale='Blues', reversescale=True)
data = [trace_heatmap_total]
layout = go.Layout(
title='Rebudes (valor absolut)',
titlefont=dict(family='Arial, sans-serif',
size=24,
color='green'),
yaxis=dict(
showticklabels=True,
ticksuffix='h',
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
tickmode='linear',
),
)
fig = go.Figure(data=data, layout=layout)
plot_heatmap_total = py.offline.plot(fig, include_plotlyjs=False, output_type='div')
df = pd.read_sql("select f_day, f_hour, sum(f_not_answered) as not_answered, sum(f_total) as total from dm3_callcenter_general where f_day BETWEEN \'"+date_ini+"\' AND \'"+date_fin+"\' and f_dst_id='6000' and f_hour between '07' and '22' group by f_day, f_hour order by f_day ASC, f_hour ASC", connection)
days = df['f_day'].unique()
hours = sorted(df['f_hour'].unique())
values_day = []
values_total = []
for hour in hours:
line = []
line_total = []
for day in days:
v = df.loc[(df['f_day'] == day) & (df['f_hour'] == hour), 'not_answered'].values
v_total = df.loc[(df['f_day'] == day) & (df['f_hour'] == hour), 'total'].values
if v.size >0:
line.append(v[0])
else:
line.append(0)
if v_total.size >0:
line_total.append(v_total[0])
else:
line_total.append(0)
values_day.append(line)
values_total.append(line_total)
trace_heatmap_per_day_no_answer = go.Heatmap(z=values_day, x=days, y=hours, colorscale='Reds')
data = [trace_heatmap_per_day_no_answer]
layout = go.Layout(
title='No contestades per dia (valor absolut)',
titlefont=dict(family='Arial, sans-serif',
size=24,
color='green'),
yaxis=dict(
showticklabels=True,
ticksuffix='h',
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
tickmode='linear',
),
)
fig = go.Figure(data=data, layout=layout)
plot_heatmap_per_day_no_answer = py.offline.plot(fig, include_plotlyjs=False, output_type='div')
trace_heatmap_per_day_total = go.Heatmap(z=values_total, x=days, y=hours, colorscale='Blues', reversescale=True)
data = [trace_heatmap_per_day_total]
layout = go.Layout(
title='Rebudes per dia (valor absolut)',
titlefont=dict(family='Arial, sans-serif',
size=24,
color='green'),
yaxis=dict(
showticklabels=True,
ticksuffix='h',
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
tickmode='linear',
),
)
fig = go.Figure(data=data, layout=layout)
plot_heatmap_per_day_total = py.offline.plot(fig, include_plotlyjs=False, output_type='div')
return plot_distrib, plot_abs_values, plot_heatmap_no_answer, plot_heatmap_total, plot_heatmap_per_day_no_answer, plot_heatmap_per_day_total
except Error as e:
print("Error while connecting to MySQL", e)
""" Callcenter plots representing evolution
usage::
>>> import plots
>>> plots = plots_callcenter_evo()
>>> render (request, 'someViewWithPlot.html', {'plot_name': plot[1]})
:param: None
:rtype: array of plots
"""
def plots_callcenter_evo():
try:
connection = connections['datawarehouse']
current_month = datetime.date.today().strftime('%Y%m')
sql = 'SELECT f_month, sum(f_total) as total, sum(f_answered) as answered, sum(f_not_answered) as not_answered, sum(f_answered)/sum(f_total) as percent_answered, sum(f_not_answered)/sum(f_total) as percent_not_answered FROM datawarehouse.dm3_callcenter_general WHERE f_month < ' + current_month +' GROUP BY f_month ORDER BY f_month ASC'
df = pd.read_sql(sql, connection)
df['f_month_order'] = np.arange(len(df))
trace_total = go.Scatter(x=df['f_month_order'],
y=df['total'],
mode='lines',
name='Rebudes per mes',
line = dict(
color = ('blue'),
dash = 'solid'),)
trace_answered = go.Scatter(x=df['f_month_order'],
y=df['answered'],
mode='lines',
name='Contestades per mes',
line = dict(
color = ('green'),
dash = 'solid',),)
trace_not_answered = go.Scatter(x=df['f_month_order'],
y=df['not_answered'],
mode='lines',
name='No contestades per mes',
line = dict(
color = ('red'),
dash = 'solid',),)
linear_x = np.r_[0:len(df)]
linear_x = np.arange(0, len(df)).reshape(-1, 1)
poly_reg = PolynomialFeatures(degree=4)
X_poly = poly_reg.fit_transform(linear_x)
pol_reg = LinearRegression()
pol_reg.fit(X_poly, df['total'])
predicted_y_total = pol_reg.predict(poly_reg.fit_transform(linear_x))
pol_reg.fit(X_poly, df['answered'])
predicted_y_answered = pol_reg.predict(poly_reg.fit_transform(linear_x))
pol_reg.fit(X_poly, df['not_answered'])
predicted_y_not_answered = pol_reg.predict(poly_reg.fit_transform(linear_x))
trace_regression_total = go.Scatter(x=df['f_month_order'],
y=predicted_y_total,
mode='lines',
name='Tendencia trucades rebudes',
line = dict(
color = ('blue'),
dash = 'dot'),)
trace_regression_answered = go.Scatter(x=df['f_month_order'],
y=predicted_y_answered,
mode='lines',
name='Tendencia trucades contestades',
line = dict(
color = ('green'),
dash = 'dot'),)
trace_regression_not_answered = go.Scatter(x=df['f_month_order'],
y=predicted_y_not_answered,
mode='lines',
name='Tendencia trucades no contestades',
line = dict(
color = ('red'),
dash = 'dot'),)
data = [trace_total, trace_regression_total, trace_answered, trace_regression_answered, trace_not_answered, trace_regression_not_answered]
layout = go.Layout(
title='Evolució del número de trucades per mes',
titlefont=dict(family='Arial, sans-serif',
size=24,
color='green'),
xaxis=dict(showticklabels=True,
tickangle=60,
tickfont=dict(family='Old Standard TT, serif',
size=10,
color='black'),
showexponent='none',
tickmode = 'array',
tickvals = df['f_month_order'],
ticktext = df['f_month']),
yaxis=dict(titlefont=dict(family='Arial, sans-serif',
size=18,
color='lightgrey'),
showticklabels=True,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'))
fig = go.Figure(data=data, layout=layout)
plot_absolute = py.offline.plot(fig, include_plotlyjs=False, output_type='div')
trace_visits_casual = go.Scatter(x=df['f_month_order'],
y=df['percent_answered'],
mode='lines',
name='Contestades',
stackgroup='one',
groupnorm='percent')
trace_visits_fidelizied = go.Scatter(x=df['f_month_order'],
y=df['percent_not_answered'],
mode='lines',
name='No contestades',
stackgroup='one')
data = [trace_visits_casual, trace_visits_fidelizied]
layout = go.Layout(
title='Distribució contestades vs no contestades mes a mes',
titlefont=dict(family='Arial, sans-serif',
size=24,
color='green'),
xaxis=dict(showticklabels=True,
tickangle=60,
tickfont=dict(family='Old Standard TT, serif',
size=10,
color='black'),
showexponent='none',
tickmode = 'array',
tickvals = df['f_month_order'],
ticktext = df['f_month']),
yaxis=dict(titlefont=dict(family='Arial, sans-serif',
size=18,
color='lightgrey'),
showticklabels=True,
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
showexponent='none'))
fig = go.Figure(data=data, layout=layout)
plot_distrib_percent = py.offline.plot(fig, include_plotlyjs=False, output_type='div')
return plot_absolute, plot_distrib_percent, plot_absolute
except Error as e:
print("Error while connecting to MySQL", e)
def plots_ext_performance(p_date_ini, p_date_fin):
try:
connection = connections['datawarehouse']
# format date to use in a between condition: YYYYMMDD
date_ini = datetime.datetime.strptime(str(p_date_ini), "%d/%m/%Y").strftime("%Y/%m/%d")
date_fin = datetime.datetime.strptime(str(p_date_fin), "%d/%m/%Y").strftime("%Y/%m/%d")
sql = "SELECT f_extension as extension, sum(f_answered) as answered, sum(f_spoken_time) as spoken_time, sum(f_spoken_time)/sum(f_answered) as time_per_call FROM dm3_callcenter_per_extension WHERE f_day BETWEEN \'"+date_ini+"\' AND \'"+date_fin+"\' and f_extension IN (100, 101, 102, 104, 111, 112) GROUP BY f_extension"
df = pd.read_sql(sql, connection)
lines = []
line = {}
calls = 0
spoken_time = 0
time_per_call = 0
total = 0
for index, row in df.iterrows():
calls += row[1]
spoken_time += row[2]
time_per_call += row[3]
total += 1
line = {'extension': row[0], 'answered': f'{row[1]:10.0f}', 'spoken_time': f'{row[2]/3600:10.2f}', 'time_per_call': f'{row[3]/60:10.2f}'}
lines.append(line)
# add averages
if total > 0:
line = {'extension': 'promig', 'answered': f'{calls/total:10.0f}', 'spoken_time': f'{spoken_time/(3600*total):10.2f}', 'time_per_call': f'{time_per_call/(60*total):10.2f}'}
lines.append(line)
sql = "SELECT f_extension as extension, f_day, f_hour, sum(f_answered) as answered, sum(f_spoken_time) as spoken_time, sum(f_spoken_time)/sum(f_answered) as time_per_call FROM dm3_callcenter_per_extension WHERE f_day BETWEEN \'"+date_ini+"\' AND \'"+date_fin+"\' and f_extension IN (100, 101, 102, 104, 111, 112) GROUP BY f_extension, f_day, f_hour ORDER BY f_extension, f_day, f_hour"
df = pd.read_sql(sql, connection)
extensions = sorted(df['extension'].unique())
days = sorted(df['f_day'].unique())
hours = sorted(df['f_hour'].unique())
plots = []
for extension in extensions:
values_day = []
for hour in hours:
line = []
for day in days:
v = df.loc[(df['extension'] == extension) & (df['f_day'] == day) & (df['f_hour'] == hour), 'answered'].values
if v.size >0:
line.append(v[0])
else:
line.append(0)
values_day.append(line)
trace_heatmap_per_extension = go.Heatmap(z=values_day, x=days, y=hours, colorscale='Reds', name=extension)
graph_title = 'Trucades ateses per l\'extensió ' + extension
layout = go.Layout(
title=graph_title,
titlefont=dict(family='Arial, sans-serif',
size=24,
color='green'),
yaxis=dict(
showticklabels=True,
ticksuffix='h',
tickfont=dict(family='Old Standard TT, serif',
size=14,
color='black'),
tickmode='linear',
),
)
heatmap_data = [trace_heatmap_per_extension]
fig = go.Figure(data=heatmap_data, layout=layout)
plots.append(py.offline.plot(fig, include_plotlyjs=False, output_type='div'))
return lines, plots
except Error as e:
print("Error while connecting to MySQL", e)
| [
"plotly"
] |
8fefca3afd7864bd1ae1df08b3c1e58f494d01b4 | Python | antoniosj/data-science-playground | /machine-learning-az/data_processing_template.py | UTF-8 | 1,923 | 3.609375 | 4 | [] | no_license | # data preprocessing
# importing libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# importing dataset
dataset = pd.read_csv('Data.csv')
# creates matrix of independents variables until last column minus 1
X = dataset.iloc[:, :-1].values
# creates matrix of dependent variable. The last column.
lastColumn = 3
Y = dataset.iloc[:, lastColumn].values
"""
# tirar a media para preencher valor faltando
from sklearn.preprocessing import Imputer
imputer = Imputer(missing_values = 'NaN', strategy = 'mean', axis = 0)
# fit colunas 1 e 2 do index
imputer = imputer.fit(X[:, 1:3])
X[:, 1:3] = imputer.transform(X[:, 1:3])"""
"""
# Encoding categorical data
# Colunas que tem um mesmo padrão (ex: 10 linhas com 3 tipos de países vão ser divididos
# em 0, 1 e 2 e depois vão ter 3 valores (0, 0, 1) para o ml não achar que um valor é
# maior que o outro apenas por causa da categoria dele
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
# X = colunas independentes e X[:, ] = todas as linhas da coluna 0
labelEncoder_X = LabelEncoder()
X[:, 0] = labelEncoder_X.fit_transform(X[:, 0])
#OneHotEncoder vai transformar minhas categorias em uma tabela de 0, 0, 1
oneHotEncoder = OneHotEncoder(categorical_features = [0])
# lembrar de formatar os valores em .0f
X = oneHotEncoder.fit_transform(X).toarray()
# Y = coluna dependente
labelEncoder_Y = LabelEncoder()
# como essa coluna só são dois valores (0 e 1) não precisa do onehotencoder.
Y = labelEncoder_Y.fit_transform(Y)
"""
# Splitting the dataset into training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2, random_state = 0)
# Feature scalling
# Standard or Normalization
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
| [
"matplotlib"
] |
4b56ae0997fede4b989912d881f79888d454d73a | Python | qapquiz/TSP_With_GA | /main.py | UTF-8 | 4,965 | 3.296875 | 3 | [] | no_license | from Town import Town
from costMatrix import CostMatrix
from salesMan import SalesMan
import random
#import matplotlib.pyplot as plt
#initial variable for Genetic Algorithm
MAX_ITERATION = 1000
MAX_POPULATION = 20
PC = 0.8
PM = 0.05
#create list of object town
townList = list()
townData = open("town.txt", 'r')
while townData:
line = townData.readline().split()
if (line == []):
break;
townList.append(Town(line[0], line[1], line[2]))
#createCostMatrix
costMatrixDict = CostMatrix(townList).createCostMatrix()
#create population
populationList = list()
for i in range(20):
populationList.append(SalesMan())
populationList[i].randomFirstPath(townList)
populationList[i].calculateFitness(costMatrixDict, townList)
#start iteration
iteration = 1
while iteration <= MAX_ITERATION:
#roulette selection
#sort populationList
populationList = sorted(populationList, key=lambda population: population.fitness)
#end sort populationList
sumFitness = 0
for population in populationList:
sumFitness = sumFitness + population.getFitness()
probability = 0
sumProbabilities = 0
for population in populationList:
population.setProbability(sumProbabilities + ((float(population.getFitness()) / float(sumFitness))))
sumProbabilities += population.getProbability() - sumProbabilities
population.setProbability(1 - population.getProbability())
#print "fitness: " + str(population.getFitness())
#print "prob: " + str(population.getProbability())
#selection phase
populationList = populationList[::-1]
populationIndex = 0
populationIndexSelectionList = list()
#/2
while (len(populationIndexSelectionList) != MAX_POPULATION):
randNumber = random.uniform(0, 1)
for population in populationList:
if randNumber < population.getProbability():
populationIndexSelectionList.append(populationIndex)
break
#if populationIndex not in populationIndexSelectionList:
# populationIndexSelectionList.append(populationIndex)
# break
populationIndex = populationIndex + 1
populationIndex = 0
populationSelectionList = list()
#/2
for i in range(MAX_POPULATION):
populationSelectionList.append(populationList[populationIndexSelectionList[i]])
#print "SelectionList: " + str(populationIndexSelectionList)
#end selection phase
#crossover phase
crossoverCount = 0
while crossoverCount < (MAX_POPULATION):
parent1 = populationSelectionList[crossoverCount]
parent2 = populationSelectionList[crossoverCount+1]
#occur crossover
if random.uniform(0, 1) < PC:
child1 = SalesMan()
child2 = SalesMan()
#child1
child1.setPath(parent1.getPath()[:100])
while len(child1.getPath()) != len(townList):
parent2Path = parent2.getPath()
for town in parent2Path:
if town not in child1.getPath():
child1.getPath().append(town)
#child2
child2.setPath(parent2.getPath()[:100])
while len(child2.getPath()) != len(townList):
parent1Path = parent1.getPath()
for town in parent1Path:
if town not in child2.getPath():
child2.getPath().append(town)
populationList.append(child1)
populationList.append(child2)
#not occur crossover then copy parent to child
else:
child1 = SalesMan()
child2 = SalesMan()
child1.setPath(parent1.getPath())
child1.setFitness(parent1.getFitness())
child2.setPath(parent2.getPath())
child2.setFitness(parent2.getFitness())
populationList.append(child1)
populationList.append(child2)
crossoverCount = crossoverCount + 2
#print "========================================================================="
#print child1.getPath()
child1.calculateFitness(costMatrixDict, townList)
#print child1.getFitness()
#end crossover phase
#end roulette selection
#print "len of populationList: " + str(len(populationList))
calculateFitnessCount = 20
while calculateFitnessCount < len(populationList):
populationList[calculateFitnessCount].calculateFitness(costMatrixDict, townList)
calculateFitnessCount = calculateFitnessCount + 1
mutationIndex = 0
for population in populationList:
if mutationIndex >= 20:
pathIndex = 0
pathIndexList = list()
for path in population.getPath():
if random.uniform(0, 1) < PM:
pathIndexList.append(pathIndex)
if len(pathIndexList) == 2:
temp = population.getPath()[pathIndexList[0]]
population.getPath()[pathIndexList[0]] = population.getPath()[pathIndexList[1]]
population.getPath()[pathIndexList[1]] = temp
pathIndexList = list()
pathIndex = pathIndex + 1
mutationIndex = mutationIndex + 1
#sort populationList
populationList = sorted(populationList, key=lambda population: population.fitness)
#end sort populationList
#for population in populationList:
# print population.getFitness()
delIndex = 20
while delIndex < len(populationList):
del populationList[delIndex]
iteration = iteration + 1
#end iteration
print populationList[0].getPath()
print "Total distance: " + str(populationList[0].getFitness())
#return Answer | [
"matplotlib"
] |
e6075aefd576879dd36f907009922e0cfac9657a | Python | Tusharcoder18/Sea-Level-Predictor | /sea_level_predictor.py | UTF-8 | 1,236 | 3.390625 | 3 | [] | no_license | import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import linregress
def draw_plot():
# Read data from file
df = pd.read_csv('epa-sea-level.csv')
# Create scatter plot
plt.scatter(x='Year', y='CSIRO Adjusted Sea Level', data=df)
# Create first line of best fit
lin_result = linregress(x=df['Year'], y=df['CSIRO Adjusted Sea Level'])
slope = lin_result.slope
intercept = lin_result.intercept
year_extended = pd.Series([int(i) for i in range(1880, 2050)])
best_fit1 = slope * year_extended + intercept
plt.plot(year_extended, best_fit1, 'r')
# Create second line of best fit
recent = df[df['Year'] >= 2000]
lin_result = linregress(x=recent['Year'], y=recent['CSIRO Adjusted Sea Level'])
slope = lin_result.slope
intercept = lin_result.intercept
year_extended = pd.Series([int(i) for i in range(2000, 2050)])
best_fit2 = intercept + slope * year_extended
plt.plot(year_extended, best_fit2, 'g')
# Add labels and title
plt.xlabel('Year')
plt.ylabel('Sea Level (inches)')
plt.title('Rise in Sea Level')
# Save plot and return data for testing (DO NOT MODIFY)
plt.savefig('sea_level_plot.png')
return plt.gca() | [
"matplotlib"
] |
d20403de3a82124f0bc1b9c065774658c47e245a | Python | Karel-van-de-Plassche/petra-plot | /DSC.py | UTF-8 | 3,853 | 3.09375 | 3 | [
"MIT"
] | permissive | import matplotlib as mpl
from matplotlib import cycler
import matplotlib.pyplot as plt
plt.style.use('./paper.mplstyle') # Choose the settings file to use
import numpy as np
from IPython import embed
import pandas as pd
from itertools import chain
import os
def load_file(filename):
with open(filename, 'r', encoding = "ISO-8859-1") as file_:
lines = file_.readlines() # Read the complete file
dfs = []
while len(lines) > 0: # Keep reading until all chunks are read
name_ind = lines.index('Curve Name:\n') # Look for 'Curve Name:'
del lines[name_ind]
curve_name = lines[name_ind].strip()
print(curve_name)
start_ind = lines.index('Curve Values:\n') # Values start after 'Curve Values'
column_names = lines[start_ind+1].split() # The line after that are the column names
column_units = lines[start_ind+2].split() # And after that the units
results_ind = lines.index('Results:\n') # The values stop when we find 'Results:'
df = pd.DataFrame(np.loadtxt(lines[start_ind+3:results_ind]),
columns=column_names) # Now put it in a table
df.set_index('Tr', inplace=True) # The 'x-axis' is Tr
dfs.append(df['Value']) # And we only need the Value column
try:
end_ind = lines.index('Curve Name:\n') # Try to find the next chunk
except ValueError:
end_ind = len(lines) # If we can't find it, we're done!
del lines[:end_ind]
#results = pd.concat(dfs, axis=1) # Now, merge all chuncks together
heating = pd.concat(dfs[1::2], axis=1) # Merge all heating chunks
cooling = pd.concat(dfs[0::2], axis=1) # And all cooling chunks
for set in chain([heating, cooling]): # For both heating and cooling
set.columns = reversed(range(1, len(set.columns) + 1)) # Number them N..1
set.index.name = 'Temperature [$\degree$C]' # And rename the x-axis
heating.columns = ['Heating ' + str(col) for col in heating.columns] # Now prepend Heating to the column names
cooling.columns = ['Cooling ' + str(col) for col in cooling.columns] # And Cooling
#labels = []
#for ii in range(1, len(results.columns) // 2 + 1):
# labels.append('Heating ' + str(ii))
# labels.append('Cooling ' + str(ii))
#labels = list(reversed(labels))
#results.columns = labels
return heating, cooling
def plot_heating_cooling(heating, cooling, shift=0.2, base_shift=0.0):
fig = plt.figure()
ax = fig.add_subplot(111)
for ii in range(1, len(cooling.columns) + 1):
cooling.iloc[:, ii:] = cooling.iloc[:, ii:] + shift # Shift all curves, the 2nd one the most
cooling += base_shift # And shift all columns a set amount
for ii in range(1, len(heating.columns) + 1):
heating.iloc[:, ii:] = heating.iloc[:, ii:] - shift
heating -= base_shift
for ii in range(len(heating.columns)):
for set in chain([heating, cooling]): # Now plot Heating ii and Cooling ii in pairs
ax.plot(set.iloc[:, ii], label=set.iloc[:, ii].name)
ax.legend() # Plot the legend
#cooling.plot(ax=ax)
#heating.plot(ax=ax)
cmap = plt.get_cmap('tab20') # Choose the colors by name: https://matplotlib.org/examples/color/colormaps_reference.html
plt.rc('axes', prop_cycle=(cycler('color', cmap.colors)))
root = 'DSC' # This is the main folder to look for files
for filename in os.listdir(root): # For every folder in the root folder
if filename.endswith('.txt'): # If it ends with .txt
path_to_file = os.path.join(root, filename)
heating, cooling = load_file(path_to_file) # Read the file and put it in a table
plot_heating_cooling(heating, cooling, shift=0.2, base_shift=0.0) # And plot the curves
plt.show()
| [
"matplotlib"
] |
602036d1f09959ff9dbdad8a43cf2c26d17446d1 | Python | marcinu456/Modelowanie-Komputerowe | /Nowy folder (2)/Nowy folder/WykresyPython/zad3.py | UTF-8 | 1,258 | 3.390625 | 3 | [] | no_license | import sys
import numpy as np
from numpy.lib import median
import matplotlib.pyplot as plt
def gauss(data):
mean = sum([ k*v for k, v in data.items()])/sum(data.values())#średnia
mse = sum([v*(k - mean)**2 for k, v in data.items()])/sum(data.values())#wariacia
rmse = np.sqrt(mse)#odchylenie
mx = max([ v for k, v in data.items()])
med = np.median([ k*v for k, v in data.items()])
ret = [ (mx * np.exp( -(((x - med))**2)/(2*mse) )) for x in data.keys()]#rozklad normalny
print("dzieci najprawdopodbniej są w odlgełośći ",3*rmse, "kroków")
return ret
def main():
x = np.loadtxt('spacery/odlegloscPo1000.txt', unpack=True)
valCount = dict()
for pos in x:
if pos in valCount:
valCount[pos] += 10
else:
valCount[pos] = 0
valCount = {k: v for k, v in sorted(valCount.items(), key=lambda item: item[0])}
plt.plot(valCount.keys(), gauss(valCount))
plt.xlabel("X")
plt.ylabel("dzieci")
if __name__ == "__main__":
main()
plt.legend()
plt.show()
# Bardziej prawdopodobne jest to, że dziecko będzię bliżej odległości 1
# Odległośc 3sigma można uznać, za wartość graniczną gdzię znajduję się
# 99.7% procent wszystkich dzieci
| [
"matplotlib"
] |
69dc68b6961443954bc6396ec43c203db782a761 | Python | edfong/npl | /experiments/LogReg_ARD/run_NPL_logreg.py | UTF-8 | 2,971 | 2.71875 | 3 | [
"BSD-3-Clause"
] | permissive | """
main script for running NPL
"""
import numpy as np
import scipy as sp
import pandas as pd
import matplotlib.pyplot as plt
import copy
import time
from npl import bootstrap_logreg as bbl
import pickle
def load_data(dataset,seed):
#load polish
if dataset == 'Polish':
year = 3
with open('./data/pc_train_y{}_seed{}'.format(year,seed), 'rb') as handle:
pc_train = pickle.load(handle)
#Move into vectors
y = pd.to_numeric(pc_train['y'].values[:,0])
x = pc_train['x'].values
D_data = pc_train['D']
N_data = pc_train['N']
#prior and loss settings from paper
alph_conc = 0 #prior strength
gamma = 1/N_data #loss scaling relative to log-likelihood
#load adult
if dataset == 'Adult':
with open('./data/ad_train_seed{}'.format(seed), 'rb') as handle:
ad_train = pickle.load(handle)
#Move into vectors
y = np.uint8(ad_train['y'])[:,0]
x = ad_train['x'].values
D_data = ad_train['D']
N_data = ad_train['N']
#prior and loss settings from paper
alph_conc = 0
gamma = 1/N_data
#load arcene
if dataset == 'Arcene':
with open('./data/ar_train_seed{}'.format(seed), 'rb') as handle:
ar_train = pickle.load(handle)
N_data = ar_train['N']
D_data = ar_train['D']
y = np.int8(ar_train['y'].values.reshape(N_data,))
x = ar_train['x'].values
#prior and loss settings from paper
alph_conc = 1
gamma = 1/N_data
return y,x,alph_conc,gamma,N_data,D_data
def main(dataset, B_postsamples):
#same parameters between datasets
T_trunc = 100
a=1
b = 1 #rate of gamma hyperprior
for i in range(30):
seed = 100+i
np.random.seed(seed)
y,x,alph_conc,gamma,N_data,D_data = load_data(dataset,seed)
start= time.time()
#carry out posterior bootstrap
beta_bb, ll_b = bbl.bootstrap_logreg(B_postsamples,alph_conc,T_trunc,y,x,N_data,D_data,a,b,gamma)
end = time.time()
print ('Time elapsed = {}'.format(end - start))
#convert to dataframe and save
dict_bb = {'beta': beta_bb, 'll_b': ll_b, 'time': end-start}
par_bb = pd.Series(data = dict_bb)
#Polish
if dataset == 'Polish':
par_bb.to_pickle('./parameters/par_bb_logreg_c{}_a{}_b{}_gN_pol_B{}_seed{}'.format(alph_conc,a,b,B_postsamples,seed))
#Adult
if dataset == 'Adult':
par_bb.to_pickle('./parameters/par_bb_logreg_c{}_a{}_b{}_gN_ad_B{}_seed{}'.format(alph_conc,a,b,B_postsamples,seed))
#Arcene
if dataset == 'Arcene':
par_bb.to_pickle('./parameters/par_bb_logreg_c{}_a{}_b{}_gN_ar_B{}_seed{}'.format(alph_conc,a,b,B_postsamples,seed))
if __name__=='__main__':
main('Polish',2000)
main('Adult',2000)
main('Arcene',2000)
| [
"matplotlib"
] |
fcf7e4efb410c260f3b0ae77d795ab050cb568dc | Python | shadimohagheghi/Generative-Models-for-Multigroup-Connectivity-Structures | /Liaison_model.py | UTF-8 | 5,124 | 2.578125 | 3 | [] | no_license | import networkx as nx
from allfunctions import draw_degdist
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from almost_clique import almost_clique
from numpy import inf
import matplotlib.pyplot as plt
matplotlib.rc('xtick', labelsize=25)
matplotlib.rc('ytick', labelsize=25)
plt.close("all")
##########################################################
##########################################################
def Liaison_model():
clique_size = [3,4,5,6,7,8,9,10]; # clique sizes
clique_edges=np.asarray(clique_size)
clique_edges = np.divide(np.multiply(clique_edges,clique_edges+1),2)
ep = 0.1;
clique_num = np.zeros(len(clique_size)); # number of cliques of given size
for i in range(len(clique_size)):
clique_num[i] = int(2.5321*(50.0/(clique_size[i])**3));
clique_num = map(int,clique_num)
# comment this next line to generate a small sample graph easy to visualize
#clique_num = [0,7,3,1,0,0,0,0]
###########################################################
###########################################################
def li_num(size): # number of liaisons given clique size
maxli = 2; p = float(size)**2/200;
a = np.random.binomial(maxli, p, size=1) +1
return a
###########################################################
###########################################################
# almost cliques are generated
Gclique,li,li_cliquewise,clique_sizes,all_liaisons,clique_lead,nstart = almost_clique(clique_size,clique_num,li_num)
print(len(clique_lead)), 'cliques formed'
################################################
#### liaison model #############################
m = 2; ext_li = int(float(sum(clique_num))/m); # deciding number of external liaisons
Gli = nx.barabasi_albert_graph(ext_li,2); #only option for external liaison model
print "Barabasi Albert Graph = ", Gli.edges()
plt.figure(1)
nx.draw(Gli, pos=nx.circular_layout(Gli))
limodel_deglist = np.zeros(len(Gli));
for i in range(len(Gli)):
limodel_deglist[i] = len(Gli[i])
ord_limodel = sorted(range(len(limodel_deglist)),key=lambda x:limodel_deglist[x])
print "ord_limodel = ", ord_limodel
clique_ext_list = np.zeros(sum(clique_num))
for i in range(sum(clique_num)): # randomly assign cliques to external liaisons
clique_ext_list[i] = np.random.randint(ext_li);
print "clique_ext_list = ", clique_ext_list
cliquenodes = len(Gclique);
for i in range(len(Gli)):
for j in range(len(Gli)):
if j in Gli[i]:
Gclique.add_edge(cliquenodes+i,cliquenodes+j)
for i in range(ext_li):
dums = np.where(clique_ext_list==i);
for j in range(len(dums[0])):
for k in range(len(li_cliquewise[dums[0][j]])):
Gclique.add_edge(cliquenodes+i,li_cliquewise[dums[0][j]][k])
degthis,a1,a2 = draw_degdist(Gclique,1,'b',0)
figcolor = 'b'
plt.figure(2)
plt.scatter((a1),(a2),c=figcolor,marker='o',s=400,alpha=0.5)
plt.plot((a1),(a2),linewidth=2,c=figcolor)
plt.xlabel('node degree',fontsize=30)
plt.ylabel('number of nodes',fontsize=30)
plt.axis([-2, 45, -19, 670])
clique_list = [];
for i in range(len(clique_size)):
dum = np.linspace(clique_size[i],clique_size[i], clique_num[i])
clique_list = np.hstack((clique_list,dum ))
colors = []; c = 0;
for i in range(len(clique_list)):
colors.extend(np.linspace(c,c,clique_list[i]))
c = c + 1
for i in range(ext_li):
colors.append(20); c = c + 1
#pos=nx.spring_layout(Gclique,iterations=200)
posx = []; posy = [];
for i in range(len(clique_list)):
centerx = np.cos(2*np.pi*i/len(clique_list))
centery = np.sin(2*np.pi*i/len(clique_list))
x1 = []; y1 = [];
for j in range(int(clique_list[i])):
x1.append(centerx + 0.2*np.cos(2*np.pi*j/clique_list[i]))
y1.append(centery + 0.2*np.sin(2*np.pi*j/clique_list[i]))
posx.extend(x1); posy.extend(y1);
print ext_li
x1 = []; y1 = [];
print "ext_li=",ext_li
for j in range(ext_li):
x1.append(0.5*np.cos(2*np.pi*j/ext_li))
y1.append(0.5*np.sin(2*np.pi*j/ext_li))
posx.extend(x1); posy.extend(y1);
pos = np.transpose(np.vstack((posx,posy)))
plt.figure(3)
nx.draw(Gclique,pos,node_color=colors,node_size=800,cmap=plt.cm.Blues)
plt.show()
print 'diameter of liaison network is', nx.diameter(Gclique)
print 'avg clustering coeff is', nx.average_clustering(Gclique)
print 'avg shortest path length', nx.average_shortest_path_length(Gclique)
plt.show()
return Gclique
Gclique = Liaison_model()
| [
"matplotlib"
] |
6c8f7a3a3b183b25e1439c26e1f4756cea5a3f94 | Python | petercunning/notebook | /pcolor.py | UTF-8 | 1,441 | 2.875 | 3 | [
"GFDL-1.1-only",
"MIT"
] | permissive | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import numpy as np
from iris.cube import Cube
from iris.coords import DimCoord
# <codecell>
def create_cube():
lon1d = np.arange(5)
lat1d = np.arange(4)
data = np.random.random((len(lat1d),len(lon1d)))
cube = Cube(data)
lon = DimCoord(lon1d, standard_name='longitude',
units='degrees', circular=False)
lat = DimCoord(lat1d, standard_name='latitude',
units='degrees')
cube.add_dim_coord(lon, 1)
cube.add_dim_coord(lat, 0)
return cube
# <codecell>
cube = create_cube()
# <codecell>
x = cube.coord(axis='X')
x.guess_bounds()
x
# <codecell>
y = cube.coord(axis='Y')
y.guess_bounds()
y
# <codecell>
%matplotlib inline
import matplotlib.pyplot as plt
plt.pcolormesh(x.points, y.points, cube.data)
# <codecell>
import iris.quickplot as qplt
cs = qplt.pcolormesh(cube)
# <codecell>
import cartopy.crs as ccrs
from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter
fig, ax = plt.subplots(subplot_kw=dict(projection=ccrs.PlateCarree()))
cs = qplt.pcolormesh(cube)
ax.set_xticks(x.points, crs=ccrs.PlateCarree())
ax.set_yticks(y.points, crs=ccrs.PlateCarree())
lon_formatter = LongitudeFormatter(zero_direction_label=True)
lat_formatter = LatitudeFormatter()
ax.xaxis.set_major_formatter(lon_formatter)
ax.yaxis.set_major_formatter(lat_formatter)
# <codecell>
# <codecell>
| [
"matplotlib"
] |
7f1e595c478d60f368d15ac1a9695929608497b3 | Python | a2liu/mosquitoSim | /run3.py | UTF-8 | 869 | 3.71875 | 4 | [] | no_license | import time
import matplotlib
from sim import population
#This script simulates the decay of a set of populations that have varying attributes
def getPopAttributes(iteration):
#Determine population attributes based on iteration number
# Initial population size
popSize = 10000
# percentage of initial pop. infected
perInfect = .5
# % chance that offspring of an infected male is male
ratio = .5
# Growth of population per generation. 0 means no growth, 1 means double every year, etc.
growth = 6
return (popSize, perInfect, ratio, growth)
start = time.process_time()
# Amount of populations to simulate
popNum = 1000
# Max iteration count
maxIter = 10000
for x in range(1, popNum):
pop = population(*getPopAttributes(x))
print(x,pop.decay(maxIter))
stop = time.process_time()
print("Time Elapsed:", stop - start)
| [
"matplotlib"
] |
4d33c2137c8938cbaad4e1fc436f8e291d230790 | Python | mmilunovic/bin-genetic-algorithm | /bin-gen-algorithm.py | UTF-8 | 7,422 | 2.96875 | 3 | [] | no_license | # %matplotlib inline
import random
import numpy as np
import matplotlib.pyplot as plt
import math
from mpl_toolkits import mplot3d
pi = 3.1415
def levy_function(chromosome):
x = chromosome[0]
y = chromosome[1]
tmp1 = math.pow(math.sin(3*pi*x), 2)
tmp2 = math.pow((x - 1), 2) * (1 + math.pow(math.sin(3*pi*y), 2))
tmp3 = math.pow((y - 1), 2) * (1 + math.pow(math.sin(2*pi*y), 2))
return tmp1 + tmp2 + tmp3
def l_show(x, y):
tmp1 = math.pow(math.sin(3*pi*x), 2)
tmp2 = math.pow((x - 1), 2) * (1 + math.pow(math.sin(3*pi*y), 2))
tmp3 = math.pow((y - 1), 2) * (1 + math.pow(math.sin(2*pi*y), 2))
return tmp1 + tmp2 + tmp3
levy_vectorized = np.vectorize(l_show)
x = np.linspace(-13, 13, 30)
y = np.linspace(-13, 13, 30)
X, Y = np.meshgrid(x, y)
Z = levy_vectorized(X, Y)
ax = plt.axes(projection='3d')
ax.plot_surface(X, Y, Z, rstride=1, cstride=1,
cmap='plasma', edgecolor='none')
ax.set_title('Levijeva funkcija br.13');
ax.view_init(50, 35)
def bin_encode(chromosome, bin_val, min_val, precision):
ret = ""
for g in chromosome:
val = round((g - min_val)/bin_val)
ret += bin(val)[2:].rjust(precision,'0')
return ret
def bin_encode_chromosomes(chromosomes, precision, max_val, min_val):
bin_val = (max_val - min_val) / (2**precision-1)
bin_chromosomes = [ bin_encode(c, bin_val, min_val, precision) for c in chromosomes]
return bin_chromosomes
def bin_decode(chromosome, bin_val, min_val, precision):
ret = []
for idx in range(0, len(chromosome), precision):
g = int(chromosome[idx:idx + precision], 2)
ret.append(g * bin_val + min_val)
return ret
def bin_decode_chromosomes(chromosomes, precision, max_val, min_val):
bin_val = (max_val - min_val) / (2**precision-1)
bin_chromosomes = [ bin_decode(c, bin_val, min_val, precision) for c in chromosomes]
return bin_chromosomes
def two_point_crossover(pairs):
length = len(pairs[0])
children = []
for (a,b) in pairs:
r1 = random.randrange(0, length)
r2 = random.randrange(0, length)
if r1 < r2:
children.append(a[:r1] + b[r1:r2] + a[r2:])
children.append(b[:r1] + a[r1:r2] + b[r2:])
else:
children.append(a[:r2] + b[r2:r1] + a[r1:])
children.append(b[:r2] + a[r2:r1] + b[r1:])
return children
def inv_mutation(chromosomes, mutation_rate):
mutated_chromosomes = []
for chromosome in chromosomes:
if random.random() < mutation_rate:
r1 = random.randrange(0, len(chromosome) - 1)
r2 = random.randrange(0, len(chromosome) - 1)
if r1 < r2:
mutated_chromosomes.append(chromosome[:r1] + chromosome[r1:r2][::-1] + chromosome[r2:])
else:
mutated_chromosomes.append(chromosome[:r2] + chromosome[r2:r1][::-1] + chromosome[r1:])
else:
mutated_chromosomes.append(chromosome)
return mutated_chromosomes
def generate_inital_chromosomes(length, max, min, pop_size):
return [ [random.uniform(min,max) for j in range(length)] for i in range(pop_size)]
def population_stats(costs):
return costs[0], sum(costs)/len(costs)
def rank_chromosomes(cost, chromosomes):
costs = list(map(cost, chromosomes))
ranked = sorted( list(zip(chromosomes,costs)), key = lambda c:c[1])
return list(zip(*ranked))
def natural_selection(chromosomes, n_keep):
return chromosomes[:n_keep]
def pairing(parents):
pairs = []
i = 0
for i in range(0, len(parents), 2):
pairs.append([parents[i], parents[i+1]])
return pairs
def genetic(cost_func , extent, population_size, mutation_rate = 0.3, chromosome_length = 2, precision = 13, max_iter = 500):
min_val = extent[0]
max_val = extent[1]
avg_list = []
best_list = []
curr_best = 10000
same_best_count = 0
chromosomes = generate_inital_chromosomes(chromosome_length, max_val, min_val, population_size)
for iter in range(max_iter):
ranked, costs = rank_chromosomes(cost_func, chromosomes)
best, average = population_stats(costs)
parents = natural_selection(ranked, population_size)
parents = bin_encode_chromosomes(parents, precision, max_val, min_val)
pairs = pairing(parents)
children = two_point_crossover(pairs)
chromosomes = parents + children
chromosomes = inv_mutation(chromosomes, mutation_rate)
chromosomes = bin_decode_chromosomes(chromosomes, precision, max_val, min_val)
print("Generation: ",iter+1," Average: {:.3f}".format(average)," Curr best: {:.3f}".format(best),
"[X, Y] = {:.3f} {:.3f}".format(chromosomes[0][0],chromosomes[0][1]))
print("-------------------------")
avg_list.append(average)
if best < curr_best:
best_list.append(best)
curr_best = best
same_best_count = 0
else:
same_best_count += 1
best_list.append(best)
if(cost_func(chromosomes[0]) < 0.05):
avg_list = avg_list[:iter]
best_list = best_list[:iter]
all_avg_list.append(avg_list)
all_best_list.append(best_list)
generations_list.append(iter)
print("\nSolution found ! Chromosome content: [X, Y] = {:.3f} {:.3f}\n".format(chromosomes[0][0],chromosomes[0][1]))
return
if same_best_count > 20:
print("\nStopped due to convergance.Best chromosome [X, Y] = {:.3f} {:.3f}\n".format(chromosomes[0][0],chromosomes[0][1]))
avg_list = avg_list[:iter]
best_list = best_list[:iter]
all_avg_list.append(avg_list)
all_best_list.append(best_list)
generations_list.append(iter)
return
if iter == 499:
avg_list = avg_list[:iter]
best_list = best_list[:iter]
all_avg_list.append(avg_list)
all_best_list.append(best_list)
generations_list.append(iter)
print("\nStopped due to max number of iterations, solution not found. Best chromosome [X, Y] = {:.3f} {:.3f}\n".format(chromosomes[0][0],chromosomes[0][1]))
def display_stats(all_avg_list, all_best_list, generations_list):
c = 0
colors = ['red', 'green', 'blue', 'yellow', 'orange']
for average_list in all_avg_list:
x_axis = list(range(generations_list[c]))
y_axis = average_list
plt.plot(x_axis, y_axis, linewidth=3, color=colors[c], label=str(c + 1))
plt.title('Average cost function value', fontsize=19)
plt.xlabel('Generation', fontsize=10)
plt.ylabel('Cost function')
c += 1
plt.legend(loc='upper right')
plt.show()
c = 0
for best_list in all_best_list:
x_axis = list(range(generations_list[c]))
y_axis = best_list
plt.plot(x_axis, y_axis, color=colors[c], label=str(c + 1))
plt.title('Best cost function value', fontsize=19)
plt.xlabel('Generation')
plt.ylabel('Cost function')
c += 1
plt.legend(loc='upper right')
plt.show()
number_of_chromosomes = [20, 100, 150]
all_avg_list = []
generations_list = []
all_best_list = []
run_number = 5
for x in number_of_chromosomes:
print("==========================")
for k in range(0, run_number):
print("\n", k + 1, ": run of genetic algorithm with ", x ," chromosomes.\n")
genetic(levy_function, [10, -10], x)
display_stats(all_avg_list, all_best_list, generations_list)
all_best_list = []
all_avg_list = []
generations_list = []
| [
"matplotlib"
] |
ef36aa10ece1fcbff5c642451ddcb9595c80b44f | Python | judyliou/CS224W-Analysis-of-Networks | /hw0/hw0_2.py | UTF-8 | 1,224 | 2.65625 | 3 | [] | no_license | import snap
import numpy as np
import matplotlib.pyplot as plt
wiki = snap.LoadEdgeList(snap.PNGraph, "wiki-Vote.txt", 0, 1, '\t')
degree = []
cnt = []
CntV = snap.TIntPr64V()
snap.GetOutDegCnt(wiki, CntV)
for i in CntV:
if i.GetVal1() != 0 and i.GetVal2() != 0:
degree.append(i.GetVal1())
cnt.append(i.GetVal2())
degree = np.array(degree)
cnt = np.array(cnt)
fig = plt.figure()
plt.figure(figsize=(12,8))
ax = plt.gca()
ax.scatter(degree, cnt, c='red', alpha=0.5, edgecolors='none', s=80)
ax.set_yscale('log')
ax.set_xscale('log')
ax.set_xlim([degree.min(), degree.max()])
ax.set_title('Out-degree Distribution', fontsize=30)
ax.set_xlabel('Degree (log)', fontsize=24)
ax.set_ylabel('Count (log)', fontsize=24)
plt.savefig('hw0_2.1.png')
degree = np.log10(degree)
cnt = np.log10(cnt)
a, b = np.polyfit(degree, cnt, 1)
print('a =', a)
print('b =', b)
# Make theoretical line to plot
x = np.array([degree.min(), degree.max()])
y = a * x + b
plt.figure(figsize=(12,8))
plt.plot(degree, cnt, 'ro')
plt.plot(x, y, 'b', linewidth=3)
plt.xlabel('Degree (log)', fontsize=20)
plt.ylabel('Count (log)', fontsize=20)
plt.title('Out-degree Distribution', fontsize=30)
plt.savefig('hw0_2.2.png')
| [
"matplotlib"
] |
176b6444afaf6905ea4e304eacac8a6577e970d9 | Python | hpqcp/Dryer-Project1 | /base/data_preProcess.py | UTF-8 | 2,343 | 2.671875 | 3 | [] | no_license | import numpy as np
import pandas as pd
from pandas import DataFrame
import matplotlib.pyplot as plt
from functools import reduce
'''
读取EXCEL
_path,路径,_sheetNum第几个Sheet , _colName选择列
Return : DataFrame
'''
def readExcel(_path,_sheetNum,_colNum=[]):
if len(_colNum) <= 0 :
dfData = pd.read_excel(_path, sheet_name=_sheetNum)
else:
dfData = pd.read_excel(_path,sheet_name=_sheetNum ,usecols=_colNum)
return dfData
'''
计算统计指标
_path,路径,_sheetNum第几个Sheet , _colName选择列
Return : DataFrame
'''
def computeIndex(_df):
count = _df.count()
max = _df.max()
min = _df.min()
mean = _df.mean()
std = _df.std()
dfRtn = DataFrame({'Count':count,'Max':max,'Min':min,'Mean':mean,'Std':std})
return dfRtn
'''判断是否有空值,True 有空值 , False 无空值
'''
def isContainMissValue(_df):
for i in _df.isnull().any():
if i :
return True
return False
'''
空值处理
'''
def FillMissValue(_df):
return 0
'''
'''
def compute_ChangePoint(_series,_mode="first"):
minList = computeIndex(_series).values[:,2]
if _mode == "first" :
minIndexList = [_series[_series[i] == minList[i]].index.values[0] for i in range(0, _series.shape[1], 1)][:]
else:
minIndexList = [_series[_series[i] == minList[i]].index.values[-1] for i in range(0, _series.shape[1], 1)][:]
# minIndexList = _series[_series[0] == minList[0]].index.values[-1]
return minIndexList
# return _series
# return [_series[_series[i]>minList[2]][i] for i in range(1, len(_series) - 1, 1)][:]
# print(minList[0])
# return _series[_series[0]>minList[0] and _series[0].index ][0]
'''
'''
def wave_peakTrough(_series):
minList = computeIndex(_series).values[:, 2]
min1 = [_series[_series[i] == minList[i]].index.values[1] for i in range(0, _series.shape[1], 1)][:]
min2 = [_series[_series[i] == minList[i]].index.values[-1] for i in range(0, _series.shape[1], 1)][:]
#从最小点往前
max1 = [_series[_series[i] ==max(_series[i])].index.values[-1] for i in range(0, _series.shape[1], 1)][:]
max2 = [_series[_series[i] == max(_series[i])].index.values[0] for i in range(0, _series.shape[1], 1)][:]
# _series[_series[i] == ]
# print(max0,max1)
return minIndex
| [
"matplotlib"
] |
34cf0ab5db0f312f1b57cc5462e535e0a9e87f69 | Python | SabyasachiNITD/DIP-LAB | /DAY2/4.py | UTF-8 | 366 | 2.609375 | 3 | [] | no_license | import cv2
import numpy as np
import matplotlib.pyplot as plt
img =cv2.imread("2nd.tif",0)
img1 = img - (img & 240)
img1 = cv2.equalizeHist(img1)
fig=plt.figure()
ax1= fig.add_subplot(1,2,1)
ax3=fig.add_subplot(1,2,2)
ax1.imshow(img,cmap="gray",interpolation=None)
ax3.imshow(img1,cmap="gray",interpolation=None)
ax1.axis("off")
ax3.axis("off")
plt.show()
| [
"matplotlib"
] |
71f6cc6134c49498124001208552b49107976581 | Python | EParisot/ft_linear_regression | /train.py | UTF-8 | 7,182 | 2.71875 | 3 | [] | no_license | import os
import random
import json
import time
import click
import matplotlib.pyplot as plt
import matplotlib.animation as animation
import time
from threading import Thread
class Trainer(object):
def __init__(self, data_file, sep, plot, model_file, epochs, learning_rate):
self.data_file = data_file
self.sep = sep
self.plot = plot
self.model_file = model_file
self.epochs = epochs
self.learning_rate = learning_rate
self.learning_rate_hist = []
self.model = {"theta_0": 0.0,
"theta_1": 0.0,
"x_min": 0,
"x_max": 0,
"y_min": 0,
"y_max": 0}
self.x_data = []
self.y_data = []
self.labels = []
self.acc = []
self.loss = []
# Read data
self.read_data()
if len(self.x_data) != len(self.y_data) or len(self.x_data) == 0:
print("Error : no valid data found in %s" % self.data_file)
exit(0)
# Read model
if len(self.model_file):
self.read_model()
def read_data(self):
if os.path.exists(self.data_file):
with open(self.data_file) as f:
for line in f:
line = line.replace('\n', '')
line_data = line.split(self.sep)
if len(line_data) == 2 and all([value.isdigit() for value in line_data]):
self.x_data.append(int(line_data[0]))
self.y_data.append(int(line_data[1]))
elif len(line_data) == 2:
self.labels.append(line_data[0])
self.labels.append(line_data[1])
self.normalise()
def normalise(self):
x_min = min(self.x_data)
x_max = max(self.x_data)
y_min = min(self.y_data)
y_max = max(self.y_data)
self.model["x_min"] = x_min
self.model["x_max"] = x_max
self.model["y_min"] = y_min
self.model["y_max"] = y_max
for i, _ in enumerate(self.x_data):
self.x_data[i] -= x_min
self.x_data[i] /= (x_max - x_min)
self.y_data[i] -= y_min
self.y_data[i] /= (y_max - y_min)
def read_model(self):
if os.path.exists(self.model_file):
with open(self.model_file, "r") as f:
check = f.read(2)
f.seek(0)
if len(check) != 0 and check[0] != "\n" and check != "{}":
data = json.load(f)
self.model["theta_0"] = data["theta_0"]
self.model["theta_1"] = data["theta_1"]
self.model["x_min"] = data["x_min"]
self.model["x_max"] = data["x_max"]
self.model["y_min"] = data["y_min"]
self.model["y_max"] = data["y_max"]
def save_model(self):
if not os.path.exists(self.model_file):
mode = "w+"
else:
mode = "w"
with open(self.model_file, mode) as f:
json.dump(self.model, f)
def animate(self):
plt.clf()
x_data, y_data = [list(t) for t in zip(*sorted(zip(self.x_data, self.y_data)))]
plt.scatter(x_data, y_data)
if len(self.labels):
plt.xlabel(self.labels[0])
plt.ylabel(self.labels[1])
# result
x1 = min(x_data)
y1 = self.estimate(x1)
x2 = max(x_data)
y2 = self.estimate(x2)
plt.plot([x1, x2], [y1, y2], c="r")
plt.twinx().twiny()
# plot learning rate history
plt.plot(self.learning_rate_hist, label="Learning Rate")
plt.legend()
plt.draw()
plt.pause(1/self.epochs)
def train(self):
theta_0 = 0.0
theta_1 = 0.0
# read model
if self.model["theta_0"] != theta_0 or self.model["theta_1"] != theta_1:
theta_0 = self.model["theta_0"]
theta_1 = self.model["theta_1"]
# process train
self.train_loop()
# write model file
self.save_model()
# plot result
if self.plot:
plt.figure("Train history")
plt.plot(self.acc, label="acc")
plt.plot(self.loss, label="loss")
plt.legend()
plt.show(block=True)
def train_loop(self):
# shuffle datas
l = list(zip(self.x_data, self.y_data))
random.shuffle(l)
x_data, y_data = zip(*l)
# loop on epochs
for epoch in range(self.epochs):
print("Training... Epoch : %d" % (epoch + 1))
loss, acc = self.train_epoch(x_data, y_data)
self.acc.append(acc)
self.loss.append(loss)
self.learning_rate_hist.append(self.learning_rate)
# print
print("loss : %f ; acc : %f" % (round(loss, 2), round(acc, 2)))
if self.plot:
self.animate()
def train_epoch(self, X, Y):
n = float(len(X))
# cost
b_vect = []
a_vect = []
for i, _ in enumerate(X):
error_b = self.estimate(X[i]) - Y[i]
b_vect.append(error_b)
error_a = error_b * X[i]
a_vect.append(error_a)
loss_b_prime = sum(b_vect)
loss_a_prime = sum(a_vect)
# gradient descent
tmp_theta_0 = self.learning_rate * loss_b_prime / n
tmp_theta_1 = self.learning_rate * loss_a_prime / n
self.model["theta_0"] -= tmp_theta_0
self.model["theta_1"] -= tmp_theta_1
# metrics
new_loss_tab = []
acc_tab = []
for i, _ in enumerate(X):
error = self.estimate(X[i]) - Y[i]
error_sq = error ** 2
new_loss_tab.append(error_sq)
acc_tab.append(1)
new_loss = sum(new_loss_tab) / n
acc = float(sum(acc_tab) / n)
# adjust LR
if len(self.loss) > 0:
if new_loss >= self.loss[-1]:
self.model["theta_0"] += self.learning_rate * tmp_theta_0 / n
self.model["theta_1"] += self.learning_rate * tmp_theta_1 / n
self.learning_rate *= 0.5
else:
self.learning_rate *= 1.05
return new_loss, acc
def estimate(self, x):
y = self.model["theta_0"] + self.model["theta_1"] * x
return y
@click.command()
@click.argument("data_file", type=click.Path(exists=True))
@click.argument("model_file", default="model.json")
@click.option("-sep", "sep", default=",", help="csv separator")
@click.option("-p", "plot", is_flag=True, help="plot data")
@click.option("-e", "epochs", default=1, help="epochs to train")
@click.option("-l", "learning_rate", default=0.1, help="learning rate")
def main(data_file, sep, plot, model_file, epochs, learning_rate):
trainer = Trainer(data_file, sep, plot, model_file, epochs, learning_rate)
if trainer.plot:
plt.ion()
trainer.train()
if __name__ == "__main__":
main()
| [
"matplotlib"
] |
2b371258ebe17251c416b9ebb80255cf1e6774df | Python | Lyuyangdaisy/DS_package | /chinese/output_in_3d_plot/app_3d_graph.py | UTF-8 | 5,021 | 2.78125 | 3 | [
"MIT"
] | permissive | #coding = utf-8
# Author: Hu Baitao
#Function : app on browser drawing 3d graph
#Date : 2020-08-27
#zhuliang3000.xlsx为案例表
import xlrd
import plotly.graph_objs as go
import numpy as np
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_table
from dash.dependencies import Input, Output, State
import pandas as pd
#输入函数
def para_input():
print('请输入数据文件路径,.xlsx文件') #D:\homework\Research 2020 S\10 cycle old alg\zhuliang3000.xlsx
filename = input()
return filename
#读取表格,处理数据
def read_xlsx(filename):
# file_object = xlrd.open_workbook(filename)
# sheetnames = file_object.sheet_names()
# sheetwork = file_object.sheet_by_name(sheetnames[0])
# nrows = sheetwork.nrows
# ncols = sheetwork.ncols
# data = []
# data_title = []
#
# for i in range(ncols):
# data_title.append(sheetwork.cell_value(0,i))
# data.append(data_title)
#
# for j in range(ncols):
# new_row_data = []
# for k in range(1,nrows):
# new_row_data.append(sheetwork.cell_value(k,j))
# data.append(new_row_data)
data = pd.read_excel(filename)
return data
# 展示数据
def generate_table(dataframe, max_rows):
return html.Table([
html.Thead(
html.Tr([html.Th(col) for col in dataframe.columns])
),
html.Tbody([
html.Tr([
html.Td(dataframe.iloc[i][col]) for col in dataframe.columns
]) for i in range(min(len(dataframe), max_rows))
])
])
def generate_graph(dataframe, x_value, y_value, z_value):
trace = go.Scatter3d(
x=dataframe[x_value], y=dataframe[y_value], z=dataframe[z_value], mode='markers', marker=dict(
size=5,
color=dataframe[z_value], # set color to an array/list of desired values
colorscale='Viridis'
)
)
layout = go.Layout(title='主量元素分析',
scene=dict(
xaxis_title=x_value,
yaxis_title=y_value,
zaxis_title=z_value
),
height= 800,
width= 1000
)
fig = go.Figure(data=[trace], layout=layout)
return fig
def main():
filename = para_input()
df = read_xlsx(filename)
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.layout = html.Div(children=[
html.H1(
children='3D Graph',
style = {
'textAlign': 'center',
# 'color': colors['text']
}
),
html.H4(children='主量3000'),
dcc.Dropdown(
id='num_row',
options=[{'label': 'show first 10 rows', 'value': 10},
{'label': 'show first 25 rows', 'value': 25},
{'label': 'show first 50 rows', 'value': 50},
{'label': 'show first 100 rows', 'value': 100}],
value=10
),
dash_table.DataTable(
id='table',
columns=[{"name": i, "id": i} for i in df.columns],
data=df.to_dict('records'),
page_size =10,
),
html.Label('选择三个主元素'),
dcc.Checklist(
id='box-section',
options=[
{'label': 'TRUE VALUE', 'value': 'TRUE VALUE'},
{'label': 'SIO2(WT%)', 'value': 'SIO2(WT%)'},
{'label': 'TIO2(WT%)', 'value': 'TIO2(WT%)'},
{'label': 'AL2O3(WT%)', 'value': 'AL2O3(WT%)'},
{'label': 'CR2O3(WT%)', 'value': 'CR2O3(WT%)'},
{'label': 'FEOT(WT%)', 'value': 'FEOT(WT%)'},
{'label': 'CAO(WT%)', 'value': 'CAO(WT%)'},
{'label': 'MGO(WT%)', 'value': 'MGO(WT%)'},
{'label': 'MNO(WT%)', 'value': 'MNO(WT%)'},
{'label': 'K2O(WT%)', 'value': 'K2O(WT%)'},
{'label': 'NA2O(WT%)', 'value': 'NA2O(WT%)'}
],
value=['TRUE VALUE', 'SIO2(WT%)','TIO2(WT%)']
),
html.Button(id='submit-button-state', n_clicks=0,children='Submit'),
dcc.Graph(
id='graph with main element',
figure= generate_graph(df,'TRUE VALUE','SIO2(WT%)','TIO2(WT%)')
)
])
@app.callback(
Output('table','page_size'),
[Input('num_row', 'value')])
def update_row_num(row_num):
return row_num
@app.callback(
Output('graph with main element', 'figure'),
[Input('submit-button-state', 'n_clicks')],
[State('box-section', 'value')])
def update_figure(n_clicks, box_value):
fig = generate_graph(df, box_value[0], box_value[1], box_value[2])
return fig
app.run_server(debug=True)
if __name__=='__main__':
main() | [
"plotly"
] |
cdf95c826bd71984c2e88dd1a9974efb53573e7e | Python | nihaomiao/PRICAI18_MVF-CasCNN | /MVFCasCNN/SaveHeatmapToFig.py | UTF-8 | 1,743 | 2.796875 | 3 | [
"MIT"
] | permissive | # Transforming heatmap Matrix to Figure and saving them
# Author: Haomiao Ni
import os
import matplotlib.pyplot as plt
from scipy.sparse import load_npz, lil_matrix
from scipy.signal import medfilt2d
import numpy as np
def run(MatPath, FigPath, heatthre, medflag):
dpi = 1000.0
FileList = os.listdir(MatPath)
FileList.sort()
plt.ioff()
fig = plt.figure(frameon=False)
for FileName in FileList:
print FileName
if os.path.splitext(FileName)[1] == '.npz':
file = os.path.join(MatPath, FileName)
heatmap = load_npz(file)
heatmap = lil_matrix(heatmap)
heatmap = np.array(heatmap.todense())
if heatthre:
# threshold 0.5
heatmap[np.logical_and(heatmap<0.5, heatmap>0)] = 0.1
if medflag:
# post processing
heatmap = medfilt2d(heatmap, (3, 3))
heatmap[0, 0] = 1.0
fig.clf()
fig.set_size_inches(heatmap.shape[1]/dpi, heatmap.shape[0]/dpi)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
cm = plt.cm.get_cmap('jet')
ax.imshow(heatmap, cmap=cm, aspect='auto')
postfix = FileName.split('_')[-1]
FigName = FileName.replace(postfix,"FIG.jpg")
fig.savefig(os.path.join(FigPath, FigName), dpi=int(dpi))
if __name__ == "__main__":
heatthre = False # choose False to show those pixels whose predictions are less than 0.5
medflag = False # choose True to median filter heatmaps
MatPath = ''
FigPath = ""
if not os.path.exists(FigPath):
os.makedirs(FigPath)
run(MatPath, FigPath, heatthre, medflag)
| [
"matplotlib"
] |
53d6cd6a98dbe37ab48d0d21207d35e5f618d751 | Python | robertjankowski/social-media-influence-on-covid-pandemic | /scripts/visualization.py | UTF-8 | 7,338 | 2.6875 | 3 | [
"MIT"
] | permissive | import sys
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
sys.path.append("../")
from scripts.network import degree_node_size, degree_selected_nodes_size
from scripts.virtual_layer import *
DEFAULT_CONFIG = {
'font.size': 16,
'font.family': 'sans-serif',
'font.sans-serif': ['DejaVu Sans']
}
def load_matplotlib():
plt.rcParams.update(DEFAULT_CONFIG)
plt.rc('text', usetex=True)
plt.rc('figure', figsize=(8, 6))
def save_figure(filename: str):
"""
Save matplotlib figure in correct extension
:param filename: Name of output plot
"""
extension = filename.split('.')[-1]
if extension == "png":
plt.savefig(filename, bbox_inches='tight', dpi=300)
elif extension == "pdf" or extension == "svg":
plt.savefig(filename, bbox_inches='tight')
else:
print('Error. Cannot save figure, unsupported extension: [{}]'.format(extension))
def draw_network(g: nx.Graph, ax=None, pos=None, node_size_list=None, node_size_scale=10,
edge_alpha=0.1, node_border_color='black', node_border_width=0.5):
"""
Draw nx.Graph on matplotlib axis
:param g: nx.Graph
:param ax: matplotlib canvas
:param pos: position of nodes (e.g. from nx.spring_layout(g))
:param node_size_list: list of node sizes
:param node_size_scale: float
:param edge_alpha: float
:param node_border_color: float
:param node_border_width: float
"""
if pos is None:
pos = nx.spring_layout(g)
if node_size_list is None:
node_size_list = degree_node_size(g, node_size_scale)
nx.draw_networkx_edges(g, ax=ax, alpha=edge_alpha, pos=pos, connectionstyle='arc3, rad = 0.1')
nx.draw_networkx_nodes(g, node_size=node_size_list, ax=ax, pos=pos,
edgecolors=node_border_color, linewidths=node_border_width)
def draw_epidemic_layer(g: nx.Graph, ax=None, pos=None, node_size_scale=10, edge_alpha=0.1,
node_border_color='black', node_border_width=0.5):
if pos is None:
pos = nx.spring_layout(g)
susceptible_nodes = []
infected_nodes = []
quarantined_nodes = []
recovered_nodes = []
dead_nodes = []
for node in g.nodes:
node_status = g.nodes[node]['l1_status']
if node_status is None:
print('Node should have `l1_status` field. Exiting...')
return
if node_status == 'S':
susceptible_nodes.append(node)
elif node_status == 'I':
infected_nodes.append(node)
elif node_status == 'Q':
quarantined_nodes.append(node)
elif node_status == 'R':
recovered_nodes.append(node)
elif node_status == 'D':
dead_nodes.append(node)
susceptible_nodes_sizes = degree_selected_nodes_size(g, susceptible_nodes, node_size_scale)
infected_nodes_sizes = degree_selected_nodes_size(g, infected_nodes, node_size_scale)
quarantined_nodes_sizes = degree_selected_nodes_size(g, quarantined_nodes, node_size_scale)
recovered_nodes_sizes = degree_selected_nodes_size(g, recovered_nodes, node_size_scale)
dead_nodes_sizes = degree_selected_nodes_size(g, dead_nodes, node_size_scale)
nx.draw_networkx_edges(g, ax=ax, alpha=edge_alpha, pos=pos, connectionstyle='arc3,rad=0.1',
arrowstyle='<->')
# Susceptible nodes
nx.draw_networkx_nodes(g, nodelist=susceptible_nodes, node_size=susceptible_nodes_sizes,
node_color='orange', ax=ax, pos=pos, edgecolors=node_border_color,
linewidths=node_border_width, label='susceptible')
# Infected nodes
nx.draw_networkx_nodes(g, nodelist=infected_nodes, node_size=infected_nodes_sizes,
node_color='lightblue', ax=ax, pos=pos, edgecolors=node_border_color,
linewidths=node_border_width, label='infected')
# Quarantined nodes
nx.draw_networkx_nodes(g, nodelist=quarantined_nodes, node_size=quarantined_nodes_sizes,
node_color='brown', ax=ax, pos=pos, edgecolors=node_border_color,
linewidths=node_border_width, label='quarantined')
# Recovered nodes
nx.draw_networkx_nodes(g, nodelist=recovered_nodes, node_size=recovered_nodes_sizes,
node_color='green', ax=ax, pos=pos, edgecolors=node_border_color,
linewidths=node_border_width, label='recovered')
# Dead nodes
nx.draw_networkx_nodes(g, nodelist=dead_nodes, node_size=dead_nodes_sizes,
node_color='black', ax=ax, pos=pos, edgecolors=node_border_color,
linewidths=node_border_width, label='dead')
def draw_virtual_layer(g: nx.Graph, ax=None, pos=None, node_size_scale=10, edge_alpha=0.1,
node_border_color='black', node_border_width=0.5):
if pos is None:
pos = nx.spring_layout(g)
positive_nodes = []
negative_nodes = []
for node in g.nodes:
node_opinion = get_opinion(g, node)
if node_opinion == 1:
positive_nodes.append(node)
elif node_opinion == -1:
negative_nodes.append(node)
positive_node_sizes = degree_selected_nodes_size(g, positive_nodes, node_size_scale)
negative_nodes_sizes = degree_selected_nodes_size(g, negative_nodes, node_size_scale)
nx.draw_networkx_edges(g, ax=ax, alpha=edge_alpha, pos=pos, connectionstyle='arc3,rad=0.1',
arrowstyle='<->', edgelist=g.edges)
# Positive opinions
nx.draw_networkx_nodes(g, nodelist=positive_nodes, node_size=positive_node_sizes,
node_color='red', ax=ax, pos=pos, edgecolors=node_border_color,
linewidths=node_border_width, label='+1')
# Negative opinions
nx.draw_networkx_nodes(g, nodelist=negative_nodes, node_size=negative_nodes_sizes,
node_color='blue', ax=ax, pos=pos, edgecolors=node_border_color,
linewidths=node_border_width, label='-1')
def plot_heatmap(array, xtickslabels: list, ytickslabels: list, colorscale_label: str, title_label: str):
"""
Plot heatmap from 2d array with x and y ticks labels
:param array: 2d array
:param xtickslabels:
:param ytickstlabels:
:param colorscale_label:
:param title_label:
"""
xticks_labels = ['{:.2f}'.format(l) for l in xtickslabels]
yticks_labels = ['{:.2f}'.format(b) for b in ytickslabels]
sns.heatmap(array, annot=False, cmap="YlGnBu",
yticklabels=yticks_labels, xticklabels=xticks_labels,
vmin=0, vmax=1, cbar_kws={'label': colorscale_label})
plt.title(title_label)
def plot_imshow(df, xlabel, ylabel, cmap=mpl.cm.Reds):
plt.imshow(df, cmap=cmap)
xticks = [float(x) for x in df.columns]
plt.xticks(range(len(xticks)), xticks)
yticks = [int(x) for x in df.index]
plt.yticks(range(len(yticks)), yticks)
plt.colorbar()
plt.xlabel(xlabel, fontsize=20)
plt.ylabel(ylabel, fontsize=20)
| [
"matplotlib",
"seaborn"
] |
End of preview. Expand
in Data Studio
No dataset card yet
- Downloads last month
- 0