blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
135
| path
stringlengths 2
372
| src_encoding
stringclasses 26
values | length_bytes
int64 55
3.18M
| score
float64 2.52
5.19
| int_score
int64 3
5
| detected_licenses
sequencelengths 0
38
| license_type
stringclasses 2
values | code
stringlengths 55
3.18M
| used_libs
sequencelengths 1
5
|
---|---|---|---|---|---|---|---|---|---|---|---|
5363d87fc2f8c5901cdab491290edac5201656db | Python | gugajung/RP-II | /cadernos/ExecutandoUmaRegressaoMultivariada.py | UTF-8 | 869 | 2.671875 | 3 | [] | no_license | import numpy as np
import pandas as pd
from scipy import stats
import statsmodels.api as sm
import matplotlib.pyplot as plt
data = pd.read_excel('C:/Python/Data_Files/Housing.xlsx')
print(data)
X = data[['House Size (sq.ft.)', 'Number of Rooms', 'Year of Construction']]
Y = data['House Price']
X1 = sm.add_constant(X)
reg = sm.OLS(Y, X1).fit()
print(reg.summary())
X = data[['House Size (sq.ft.)', 'Number of Rooms']]
Y = data['House Price']
X1 = sm.add_constant(X)
reg = sm.OLS(Y, X1).fit()
print(reg.summary())
X = data[['House Size (sq.ft.)', 'Year of Construction']]
Y = data['House Price']
X1 = sm.add_constant(X)
reg = sm.OLS(Y, X1).fit()
print(reg.summary())
X = data[['Number of Rooms', 'Year of Construction']]
Y = data['House Price']
X1 = sm.add_constant(X)
reg = sm.OLS(Y, X1).fit()
print(reg.summary()) | [
"matplotlib"
] |
6b04740b0c10694eff015dedfd18e18e9189f323 | Python | deepweaver/ravens | /Solver/utils.py | UTF-8 | 6,399 | 2.734375 | 3 | [] | no_license | from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import datetime,time
import os
import cv2
def visualize(data, answer=0, title=None, save=False, show=True):
# input a list of sub images
# e.g.
# dataset["4cells"][23]
# dataset["9cells"][47]
# output an image presentation
fig = plt.figure(figsize=(7,15))
if title:
fig.suptitle(title, fontsize=14, y=0.97, fontweight='semibold')
if len(data) == 10:
columns = 3
rows = 4
elif len(data) == 17:
columns = 4
rows = 5
else:
print("Data error")
arrIdx = 0
for i in range(1,columns*rows+1):
if i == columns or i == 2*columns or len(data) == 17 and i == 3*columns:
continue
img = data[arrIdx]
arrIdx += 1
tmp = fig.add_subplot(rows, columns, i)
if len(data) == 10 and 0 < answer <= 6:
if i == 6 + answer:
tmp.title.set_text('Answer:')
if len(data) == 17 and 0 < answer <= 8:
if i == 12 + answer:
tmp.title.set_text('Answer:')
plt.setp(tmp.get_xticklabels(), visible=False)
plt.setp(tmp.get_yticklabels(), visible=False)
tmp.tick_params(axis='both', which='both', length=0)
plt.imshow(img,cmap='gray')
if save == True:
st = datetime.datetime.fromtimestamp(time.time()).strftime('%Y_%m_%d_%H_%M_%S')
if not os.path.exists("./output/"):
os.mkdir("./output/")
print("Made new dir for output")
plt.savefig("./output/{}.png".format(st))
if show == True:
plt.show()
def get_one_hot(targets, nb_classes):
res = np.eye(nb_classes)[np.array(targets).reshape(-1)]
return res.reshape(list(targets.shape)+[nb_classes])
def sigmoid(x, derivative=False):
return x*(1-x) if derivative else 1/(1+np.exp(-x))
class ContourManager(object):
def __init__(self, mat):
self.mat = mat # input image should be binary
_, self._contours, self._hierarchy = cv2.findContours(255-self.mat, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
# hierarchy is a numpy array of shape (1, self.numOfContours, 4) 4 being [Next, Previous, First_Child, Parent]
# contours is a list of numpy arrays with shape (number of points, 1, x and y position)
self.numOfContours = len(self._contours)
self._computePerimeters()
def getAllContours(self, ):
return self._contours
def getLv1Contours(self, ):
self._computeLv1Contours()
return self.lv1Cnt
@staticmethod
def showContours(contours, cntIdx=-1, save=False):
# contours is a list of numpy arrays with shape (number of points, 1, x and y position)
# cntIdx being the index, -1 if choose all of them
img = self.mat.copy()
cv2.drawContours(img, contours, cntIdx, (128,255,0), 2)
plt.implot(img)
plt.show()
def getPerimeters(self):
return self._perimeters
def _computePerimeters(self,):
self._perimeters = []
for i in range(self.numOfContours):
self._perimeters.append(cv2.arcLength(self._contours[i], True))
def _computeLv1Contours(self, ):
self.lv1Cnt = []
for i in range(self.numOfContours):
if self._hierarchy[0,0,i] == -1:
if self._perimeters[i] > 10:
self.lv1Cnt.append(self._contours[i])
def main():
import pickle
# with open("data.pkl", 'rb') as f:
# dataset = pickle.load(f)
# visualize(dataset["4cells"][23],save=True,answer=6)
print("Hello world")
with open("../data/data_a_b_570x900_py3.pkl", "rb") as file:
data = pickle.load(file)
testdata = data[18][2]
# cv2.imwrite("tmp.png", data[18][2])
mgr = ContourManager(testdata)
ContourManager.showContours(mgr.getLv1Contours())
class FeatureMatcher(object):
@staticmethod
def SIFTdetectorAndBFmatcher(img1, img2): # correct answer 8
# Initiate SIFT detector
sift = cv2.xfeatures2d.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)
# BFMatcher with default params
bf = cv2.BFMatcher()
matches = bf.knnMatch(des1,des2,k=2)
# Apply ratio test
good = []
for m,n in matches:
if m.distance < 0.75*n.distance:
good.append([m])
return len(good)
@staticmethod
def SIFTdetectorAndFLANNmatcher(img1, img2):
sift = cv2.xfeatures2d.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)
# FLANN parameters
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks=10) # or pass empty dictionary
flann = cv2.FlannBasedMatcher(index_params,search_params)
matches = flann.knnMatch(des1,des2,k=1)
total_distance = 0
# Need to draw only good matches, so create a mask
# matchesMask = [[0,0] for i in range(len(matches))]
# ratio test as per Lowe's paper
for i,(m,) in enumerate(matches):
# if m.distance < 0.7*n.distance:
# matchesMask[i]=[1,0]
total_distance += m.distance
return total_distance
# img3 = cv2.drawMatchesKnn(img1,kp1,img2,kp2,good,None,flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
# plt.imshow(img3),plt.show()
@staticmethod
def ORBdetectorAndBFmatcher(img1, img2):
orb = cv2.ORB_create()
# find the keypoints and descriptors with ORB
kp1, des1 = orb.detectAndCompute(img1,None)
kp2, des2 = orb.detectAndCompute(img2,None)
# create BFMatcher object
bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
# Match descriptors.
matches = bf.match(des1,des2)
# Sort them in the order of their distance.
matches = sorted(matches, key = lambda x:x.distance)
return sum([matches[i].distance for i in range(10)])
if __name__ == "__main__":
main()
| [
"matplotlib"
] |
28c3c18cd0bec8d2ab4428a45e171551f36dfc1a | Python | SameeraSuhail1/ANN-Organic-Photovoltaic-Cells | /Python Code and Data/Transmittance/utility.py | UTF-8 | 4,890 | 2.5625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed May 22 21:11:45 2019
@author: Depp Pc
"""
import pandas as pd
import numpy as np
import math
import matplotlib.pyplot as plt
import random
def plot_true_vs_pred(ytest, yhat, ytrain, ytrain_out, norm_lims, exp_str,
j,which_conc_feature, optimizer = ""):
ytest_df=pd.DataFrame({'target'+ which_conc_feature[7:]:ytest})
yhat_df=pd.DataFrame({'target'+ which_conc_feature[7:]:yhat})
ytrain_df=pd.DataFrame({'target'+ which_conc_feature[7:]:ytrain})
ytrain_out_df=pd.DataFrame({'target'+ which_conc_feature[7:]:ytrain_out})
ytest_unnormed=make_unnorm(ytest_df,norm_lims)
yhat_unnormed=make_unnorm(yhat_df,norm_lims)
ytrain_unnormed=make_unnorm(ytrain_df,norm_lims)
ytrain_out_unnormed=make_unnorm(ytrain_out_df,norm_lims)
plt.plot(ytest_unnormed, 'bs', label='True Value')
plt.plot(yhat_unnormed, 'r^', label='Predicted Value')
plt.legend()
# t = np.arange(0, len(ytest_unnormed), 1)
#plt.xticks(t)
plt.grid(which='both', axis = 'both')
plt.xlabel('Test Samples',fontsize=20)
plt.ylabel('T%', fontsize=20)
plt.suptitle('Transmittance Prediction of Test Samples',fontsize=20)
plt.savefig(fname=exp_str + "/figures/test_learned_nh_" + str(j) +
which_conc_feature + optimizer + ".pdf")
plt.show()
plt.plot(ytrain_unnormed, 'bs', label='True Value')
plt.plot(ytrain_out_unnormed, 'r^',label='Predicted Value')
plt.legend()
# t = np.arange(0, len(ytrain_unnormed), 1)
#plt.xticks(t)
plt.grid(which='both', axis = 'both')
plt.xlabel('Training Samples',fontsize=20)
plt.ylabel('T%', fontsize=20)
plt.suptitle('Transmittance Prediction of Training Samples',fontsize=15)
plt.savefig(fname=exp_str + "/figures/train_learned_nh_" + str(j)
+ which_conc_feature + optimizer + ".pdf")
plt.show()
def make_norm(data,*restcol):
from copy import deepcopy
data_n = deepcopy(data)
norm_limits = {}
for colname in restcol:
meanthis= (np.mean(data[colname]))
stdthis= (np.std(data[colname]))
norm_limits[colname] = {'mean': meanthis, 'std': stdthis}
data_n[colname] = (data[colname] - meanthis)/stdthis
# maxthis = max(data[colname])
# minthis = min(data[colname])
#norm_limits[colname] = {'max':maxthis, 'min':minthis}
#data_n[colname]=(2*((data[colname]-minthis)/(maxthis-minthis))) - 1;
return data_n, norm_limits
def make_norm_given_lim(data, norm_lims, *restcol):
from copy import deepcopy
data_n = deepcopy(data)
for colname in norm_lims.keys():
if colname not in data.columns.tolist():
continue
else:
data_n[colname] = (data[colname] -
norm_lims[colname]['mean'])/norm_lims[colname]['std']
return data_n
def make_unnorm(data, normlims):
from copy import deepcopy
data_unnorm = deepcopy(data)
for colname in normlims.keys():
if colname not in data.columns.tolist():
continue
else:
data_unnorm[colname] = data[colname]*normlims[colname]['std']
+ normlims[colname]['mean']
return data_unnorm
def my_r2_score(v_true, v_pred):
ssres = np.sum(np.square(v_true - v_pred))
sstot = np.sum(np.square(v_true - np.mean(v_true)))
return 1 - ssres / sstot
def split_tt(df_normed, train_percent,iteration=3, *featurename):
n=train_percent/100
dftrain = df_normed.sample(frac =n, random_state=iteration)
dftrain.sort_index(inplace=True)
dftest=df_normed.drop(dftrain.index)
actual_perc=(len(dftrain)/len(df_normed))*100
print(actual_perc)
tempp=[]
for j in featurename:
temp_list = []
for k in featurename:
temp_list.append(k)
tempp.append(temp_list)
for j in temp_list:
if j[0:7] == 'feature':
target_name= 'target' + j[7:]
else:
target_name= 'target'
xdftrain=dftrain
ydftrain=dftrain
xdftest=dftest
ydftest=dftest
xdftrain=xdftrain.filter(items=featurename, axis=1)
ydftrain=ydftrain.filter(items=[target_name], axis=1)
xdftest=xdftest.filter(items=featurename,axis=1)
ydftest=ydftest.filter(items=[target_name],axis=1)
xdftrain=xdftrain.dropna(axis=1)
ydftrain=ydftrain.dropna(axis=1)
xdftest=xdftest.dropna(axis=1)
ydftest=ydftest.dropna(axis=1)
xtrain = xdftrain.values
xtrain=xtrain.reshape((len(xtrain),len(featurename)))
ytrain = ydftrain.values
ytrain=ytrain.reshape(len(ytrain),)
xtest = xdftest.values
xtest=xtest.reshape((len(xtest),len(featurename)))
ytest = ydftest.values
ytest=ytest.reshape(len(ytest),)
return xdftrain, ydftrain, xdftest, ydftest, xtrain, ytrain, xtest, ytest,
| [
"matplotlib"
] |
1f817093cb7a750c935e82df91830ae8b6f370eb | Python | gportella/Miscelanious_QM | /Parameterize_hmC_rotation/do_parse_g09_plumed_plot_maps_and_projections.py | UTF-8 | 5,660 | 2.53125 | 3 | [] | no_license | #! /usr/bin/env python
import numpy as np
import itertools
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib import cm
import math
EV2H = 1./27.211385050
H2KJMOL = 2625.5
def lastscf(fn):
opt_energy = 0.0
with open(fn) as f:
for l in f:
if "SCF Done:" in l.rstrip():
opt_energy = float(l.rstrip("\n").\
split("=")[1].split()[0])
if opt_energy >= 0.0:
return None
else:
return opt_energy
def change_range(ang_degrees):
new_range_deg = ang_degrees if ang_degrees <= 180 else ang_degrees - 360
return new_range_deg
def e_vs_d(dih_freq):
d1 = []
d2 = []
Edd = []
dih_values = []
for i, u in enumerate(xrange(len(dih_freq))):
dih_values.append([x for x in xrange(-180, 180, dih_freq[i])])
for dih_comb in itertools.product(*dih_values):
# I want the order switched wrt itertools product, a bit hacky
# this is due to the way the plotting works
# data remains the same
new_d1 = dih_comb[1] % 360
new_d2 = dih_comb[0] % 360
fn = "dh" + "_" + str(new_d1) +\
"_" + str(new_d2) + ".out"
E = lastscf(fn)*H2KJMOL
d1.append(dih_comb[1])
d2.append(dih_comb[0])
Edd.append(E)
d1d2 = np.column_stack((np.array(d1), np.array(d2)))
g = np.column_stack((d1d2, (np.array(Edd))- min(Edd)))
return g
def heat_plot(data, title_plot="DFT", do_write=True, outpng="dih_hmC.png"):
vecfunc = np.vectorize(change_range)
d1 = vecfunc(data[:,0]).reshape(len(data[:,0]),1)
d2 = vecfunc(data[:,1]).reshape(len(data[:,0]),1)
g = np.hstack((d1, d2, data[:,2].reshape((len(data[:,0]),1))))
N = int(len(data[:,2])**.5)
z = g[:,2].reshape(N, N)
fig, ax = plt.subplots(figsize=(9,9), dpi=120)
im = plt.imshow(z, extent=(min(d1)[0], max(d1)[0], min(d2)[0], max(d2)[0]), origin="lower", cmap=cm.CMRmap)
ax.set_xlabel("C4-C5-C55-OH5 (deg)")
ax.set_ylabel("C5-C55-OH5-HO5 (deg)")
ax.set_title(title_plot, fontsize=14)
plt.clim(0,45)
cb = fig.colorbar(im, fraction=0.046, pad=0.04, ax=ax)
cb.set_label("Energy (kJ/mol)")
#cs = plt.contour(z, 25, extent=(-180, 180, -180, 180), origin="lower", cmap=cm.CMRmap)
cs = plt.contour(z, 25, extent=(min(d1)[0], max(d1)[0], min(d2)[0], max(d2)[0]),
origin="lower", linewidths=.5, cmap=cm.CMRmap)
plt.clim(0,1000)
if do_write:
plt.savefig(outpng)
else:
plt.show()
def project(data, axis=1):
gg_tup = tuple(map(tuple, data))
d = {}
if axis != 0 and axis != 1:
print "Axis can only be 0 or 1"
return None
for el in gg_tup:
d.setdefault(el[axis], []).append((el[2]))
deg = []
ener = []
for k in d:
norm = 0
for e_s in d[k]:
norm += math.exp(-e_s)
s = 0
for e_s in d[k]:
s += e_s*math.exp(-e_s)/norm
deg.append(k)
ener.append(s)
emin = min(ener)
ener = [x - emin for x in ener]
deg = np.array(deg)
ener = np.array(ener)
projected = np.column_stack((deg, ener))
return projected
def do_plot(data, do_write=False, outpng="ff_vs_qm.png"):
count = 0
f, ax = plt.subplots(len(data),1, sharex=True,
figsize=(9,9), dpi=120)
f.subplots_adjust(hspace=0.2)
labels = ["FF", "DFT"]
for datasets in data:
count += 1
ax[count-1].grid(b=True, which='major',
color='gray', linestyle='--', linewidth=0.5)
ax[count-1].set_axisbelow(True)
for c_counter, dataset in enumerate(datasets[0]):
thelabel = labels[c_counter]
dihedral = datasets[1]
ax[count-1].set_title(dihedral)
for j in range(dataset.shape[1]-1):
ax[count-1].plot(dataset[:, 0],dataset[:,j+1],
linewidth=1.2, label=thelabel)
ax[count-1].legend(ncol=4).get_frame().set_linewidth(0.1)
plt.xlabel("Dihedral (deg)")
ax[count-1].set_ylabel("Energy (kJ/mol)")
if do_write:
plt.savefig(outpng)
else:
plt.show()
def mod_360(val):
return val % 360
def print_xy(p, label, fname="out.dat"):
p[:, 0] = map(mod_360, p[:, 0])
p.view("f8, f8").sort(axis=0)
with open(fname, "w") as fh:
np.savetxt(fh, p, fmt="%f")
dfreq = [10, 10]
gg = e_vs_d(dfreq)
gg[:,0] = map(mod_360, gg[:, 0])
gg[:,1] = map(mod_360, gg[:, 1])
gg.view("f8, f8, f8").sort(axis=0)
np.savetxt("2d_dft_deg.dat", gg, fmt="%f")
# heat_plot(gg, title_plot="DFT", do_write=True, outpng="dih_hmC_dft.png")
ff_2d_rad = np.genfromtxt("fe_2d.dat", usecols=(0, 1, 2))
d1 = np.rad2deg(ff_2d_rad[:, 0])
d1[:] = map(mod_360, d1[:])
d2 = np.rad2deg(ff_2d_rad[:, 1])
d2[:] = map(mod_360, d2[:])
d1d2 = np.column_stack((d1, d2))
ff_2d = np.column_stack((d1d2, ff_2d_rad[:, 2]))
ff_2d.view("f8, f8, f8").sort(axis=0)
np.savetxt("2d_fe_deg.dat", ff_2d, fmt="%f")
p0 = project(gg, axis=0)
p1 = project(gg, axis=1)
p0.view("f8, f8").sort(axis=0)
p1.view("f8, f8").sort(axis=0)
fe_one = np.genfromtxt("fe_one.dat")
fe_one[:,0] *= 57.295779513
fe_two = np.genfromtxt("fe_two.dat")
fe_two[:,0] *= 57.295779513
print_xy(p0, "DFT C4-C5-C55-OH5", "dft_cc.dat")
print_xy(p1, "DFT C5-C55-OH5-HO5", "dft_co.dat")
print_xy(fe_one, "FF C4-C5-C55-OH5", "ff_cc.dat")
print_xy(fe_two, "FF C5-C55-OH5-HO5", "ff_co.dat")
# data = [([fe_one, p0], "C4-C5-C55-OH5"),
# ([fe_two, p1], "C5-C55-OH5-HO5")]
# do_plot(data, do_write=True, outpng="ff_vs_qm.png")
| [
"matplotlib"
] |
d0901e11ba24a2c5eb5641d83cbfdd4a3565fc01 | Python | InnovAnon-Inc/HafrenHaver | /src/HafrenHaver/moon_app.py | UTF-8 | 14,538 | 2.734375 | 3 | [
"Unlicense",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-public-domain"
] | permissive | #! /usr/bin/env python3
from app import App
from constants import DEFAULT_BACKGROUND, SECONDARY_BACKGROUND
from cropping_app import CroppingApp
from constants import OPAQUE
import pygame
from gui import GUI, BLACK
from constants import ORIGIN
from circle_app import CircleApp
from client import Client
from server import Server
from math import radians as rad,degrees as deg
"""
import numpy as np
def filled_arc (center, r, theta1, theta2): # https://stackoverflow.com/questions/30642391/how-to-draw-a-filled-arc-in-matplotlib
# Range of angles
phi = np.linspace (theta1, theta2, 100)
# x values
x = center[0] + r * np.sin (np.radians (phi))
# y values. need to correct for negative values in range theta=90--270
yy = np.sqrt (r - x ** 2)
yy = [-yy[i] if phi[i] > 90 and phi[i] < 270 else yy[i] for i in range (len (yy))]
y = center[1] + np.array (yy)
# Equation of the chord
m = (y[-1] - y[0]) / (x[-1] -x[0])
c = y[0] - m * x[0]
y2 = m * x + c
# Plot the filled arc
ax.fill_between (x, y, y2, color=col[theta1 / 45])
"""
import math
from math import pi
import matplotlib.pyplot as plt
import matplotlib as mpl
from constants import ORIGIN
"""
def dual_half_circle(center, radius, angle=0, ax=None, colors=('w','k'),
**kwargs):
""
Add two half circles to the axes *ax* (or the current axes) with the
specified facecolors *colors* rotated at *angle* (in degrees).
""
if ax is None:
ax = plt.gca()
theta1, theta2 = angle, angle + 180
w1 = Wedge(center, radius, theta1, theta2, fc=colors[0], **kwargs)
w2 = Wedge(center, radius, theta2, theta1, fc=colors[1], **kwargs)
for wedge in [w1, w2]:
ax.add_artist(wedge)
return [w1, w2]
"""
# TODO alpha values suck
def arc_patch (ax, lunacity): # https://stackoverflow.com/questions/58263608/fill-between-arc-patches-matplotlib
ax.grid (False)
xmin = -85
xmax = +85
xrng = xmax - xmin
ymin = -85
ymax = +85
yrng = ymax - ymin
ax.set_xlim (xmin, xmax)
ax.set_ylim (ymin, ymax)
# Use a predefined colormap
colormap = []
# Draw multiple ellipses with different colors and style. All are perfectly superposed
ellipse = mpl.patches.Ellipse ( # Base one, with big black line for reference
ORIGIN, xrng, yrng,
color='k', fill=False, zorder=3) # TODO what is zorder ?
# Define some clipping paths
# One for each area
#clips = [
# mpl.patches.Arc ( # One covering right half of your ellipse
# ORIGIN, xrng, yrng, theta1=0, theta2=360,
# visible=False # We do not need to display it, just to use it for clipping
# ),
#]
clips = []
#colormap.append ('purple')
X, Y = ORIGIN
ll = xmin, ymin
w, h = xrng / 2, yrng
lrect = ll, w, h
ll = xmin + w, ymin
rrect = ll, w, h
if lunacity < .25: # 0 , .25 => new moon, first quarter moon
print ("q0-q1")
lun = lunacity * 4 # 0 , 1.
lun = 1 - lun # 1 , 0.
rx, ry = xrng * lun, yrng
# dark left, dark middle, light right
light_patch = mpl.patches.Rectangle ( # right half
*rrect, visible=False)
dark_patch = mpl.patches.Ellipse ( # center
ORIGIN, rx, ry, visible=False)
dark_patch2 = mpl.patches.Rectangle ( # left half
*lrect, visible=False)
colormap.append ('black')
colormap.append ('white')
colormap.append ('black')
clips.append ( dark_patch2)
clips.append (light_patch)
clips.append ( dark_patch)
elif lunacity < .5: # .25, .5 => first quarter moon, full moon
print ("q1-q2")
assert .25 <= lunacity
lun = lunacity - .25 # 0. , .25
assert lun >= 0
assert lun < .25
lun = lun * 4 # 0. , 1.
assert lun >= 0
assert lun < 1
rx, ry = xrng * lun, yrng
# dark left, light middle, light right
light_patch2 = mpl.patches.Rectangle ( # right
*rrect, visible=False)
dark_patch = mpl.patches.Rectangle ( # left
*lrect, visible=False)
light_patch = mpl.patches.Ellipse ( # middle
ORIGIN, rx, ry, visible=False)
colormap.append ('white')
colormap.append ('black')
colormap.append ('white')
clips.append (light_patch2)
clips.append ( dark_patch)
clips.append (light_patch)
elif lunacity < .75: # .5 , .75 => full moon, third quarter moon
print ("q2-q3")
assert .5 <= lunacity
lun = lunacity - .5 # 0. , .25
assert lun >= 0
assert lun < .25
lun = lun * 4 # 0. , 1.
assert lun >= 0
assert lun < 1
lun = 1 - lun # 1. , 0.
assert lun > 0
assert lun <= 1
rx, ry = xrng * lun, yrng
# light left, light middle, dark right
light_patch2 = mpl.patches.Rectangle ( # left
*lrect, visible=False)
dark_patch = mpl.patches.Rectangle ( # right
*rrect, visible=False)
light_patch = mpl.patches.Ellipse ( # middle
ORIGIN, rx, ry, visible=False)
colormap.append ('white')
colormap.append ('black')
colormap.append ('white')
clips.append (light_patch2)
clips.append ( dark_patch)
clips.append (light_patch)
elif lunacity < 1.0: # .75, 1. => third quarter moon, full moon
print ("q3-q4")
assert .75 <= lunacity
lun = lunacity - .75 # 0. , .25
assert lun >= 0
assert lun < .25
lun = lun * 4 # 0. , 1.
assert lun >= 0
assert lun < 1
rx, ry = xrng * lun , yrng
# light left, dark middle, dark right
dark_patch2 = mpl.patches.Rectangle ( # right
*rrect, visible=False)
light_patch = mpl.patches.Rectangle ( # left
# apparently +90 to +270 is the same as +0 to +360 in matplotlib.
*lrect, visible=False)
# because matplotlib doesn't understand angles, drawing patch3 will hide light_patch
#dark_patch3 = mpl.patches.Arc ( # right
# ORIGIN, xrng, yrng, theta1=-90, theta2=+90, visible=False)
dark_patch = mpl.patches.Ellipse ( # middle
ORIGIN, rx, ry, visible=False)
colormap.append ('black')
colormap.append ('white')
#colormap.append ('black')
colormap.append ('black')
clips.append ( dark_patch2)
clips.append (light_patch)
#clips.append ( dark_patch3)
clips.append ( dark_patch)
n = len (clips)
# Ellipses for your sub-areas.
# Add more if you want more areas
# Apply the style of your areas here (colors, alpha, hatch, etc.)
areas = [
mpl.patches.Ellipse (
ORIGIN, xrng, yrng, # Perfectly fit your base ellipse
color=colormap [i], fill=True, alpha=1.0, # Add some style, fill, color, alpha
zorder=i)
for i in range (n) # Here, we have 3 areas
]
# Add all your components to your axe
ax.add_patch (ellipse)
for area, clip in zip (areas, clips):
ax.add_patch (area)
ax.add_patch (clip)
area.set_clip_path (clip) # Use clipping paths to clip you areas
from pygameplotlib import PyGamePlotLib
import datetime
from datetime import timedelta
import ephem
moon_name_db = ('wolf', 'snow', 'worm', 'pink', 'flower', 'strawberry', 'buck', 'sturgeon', 'corn', 'harvest', "hunter's", 'beaver', 'cold', 'blue')
def get_moon_name_helper (moon_no):
if moon_no is None: name = 'blue'
else: name = moon_name_db[moon_no]
name = "%s moon" % (name,)
return name
class MoonApp (PyGamePlotLib):
def __init__ (self, observer=None, *args, **kwargs):
PyGamePlotLib.__init__ (self, *args, **kwargs)
self.name = None
self.set_time (compute=False)
def notify (self, time=None): self.set_time (time)
def set_time (self, time=None, compute=True):
if time is None: time = datetime.datetime.utcnow ()
self.time = time
if compute: self.compute ()
def compute_helper (self, fig):
PyGamePlotLib.compute_helper (self, fig)
time = self.time
if time is None: return
self.phase = self.get_moon_phase ()
#ax = fig.add_subplot (facecolor='black')
ax = fig.add_subplot ()
ax.get_xaxis ().set_visible (False)
ax.get_yaxis ().set_visible (False)
ax.patch.set_visible (False)
arc_patch (ax, self.phase)
"""
if self.phase < .25:
# TODO waxing crescent
x = w / 2
y = h / 2
start_angle = SOUTH.radians ()
stop_angle = NORTH.radians ()
color = (200, 200, 200)
for r in range (1, inf):
pygame.gfxdraw.arc (self.ss, x, y, r, start_angle, stop_angle, color)
pass
elif self.phase == .25:
# TODO first quarter
pass
elif self.phase < .5:
# TODO waxing gibbous
pass
elif self.phase == .5:
# TODO full moon
pass
elif self.phase < .75:
# TODO waning gibbous
pass
elif self.phase == .75:
# TODO third quarter
pass
elif self.phase < 1:
# TODO waning crescent
pass
else:
# TODO new moon
pass
"""
def get_moon_phase (self): # https://michelanders.blogspot.com/2011/01/moon-phases-with-pyephem.html
# g = self.observer
time = self.time
time = ephem.Date (time)
nnm = ephem.next_new_moon (time)
pnm = ephem.previous_new_moon (time)
# for use w. moon_phases.ttf A -> just past newmoon,
# Z just before newmoon
# '0' is full, '1' is new
# note that we cannot use m.phase as this is the percentage of the moon
# that is illuminated which is not the same as the phase!
lunation = (time - pnm) / (nnm - pnm)
return lunation
"""
def set_subsurface (self, ss):
CircleApp.set_subsurface (self, ss)
self.compute ()
def draw_scene (self, temp=None):
CircleApp.draw_scene (self, temp)
if temp is None: temp = self.ss
if self.computed_image is None: self.compute ()
if self.computed_image is None: return
temp.blit (self.computed_image, ORIGIN)
"""
"""
def run_loop (self, events, keys): # TODO move this to the GUI ?
if isinstance (self.gps, Client): self.gps.Loop ()
if isinstance (self.gps, Server): self.gps.Pump ()
self.set_time ()
PyGamePlotLib.run_loop (self, events, keys)
"""
"""
def run_loop (self, events, keys): # TODO move this to the GUI ?
#if isinstance (self.gps, Client): self.gps.Loop ()
#if isinstance (self.gps, Server): self.gps.Pump ()
time = self.time
time = time + timedelta (hours=9)
self.set_time (time)
PyGamePlotLib.run_loop (self, events, keys)
"""
#def set_gps (self, gps):
# self.gps = gps
# self.set_observer (gps.observer)
def get_moon_name (self): return self.get_moon_names ()[-1]
def get_moon_names (self):
times = self.get_full_moon_times ()
moon_no = 0
name = get_moon_name_helper (moon_no)
names = [name]
time = times[0]
month = time.month
for time in times[1:]:
month2 = time.month
if month == month2: name = get_moon_name_helper (None)
else:
moon_no = moon_no + 1
name = get_moon_name_helper (moon_no)
names.append (name)
month = month2
names = tuple (names)
print ("names: %s" % (tuple (zip (names, times)),))
return names
def get_full_moon_times (self):
time = self.time
ny = time.replace (month=1, day=1, hour=0, minute=0, second=0)
assert time > ny
#time = ephem.Date (time)
#ny = ephem.Date (ny)
nfm = ephem. next_full_moon (time).datetime ()
times = [nfm]
while True: # >= ?
pfm = ephem.previous_full_moon (time).datetime ()
if pfm <= ny: break
time = pfm
times.append (time)
assert pfm <= ny
assert time >= ny
times = times[::-1]
times = tuple (times)
return times
from circle_app import CircleApp
def blit_alpha (target, source, location, opacity): # https://nerdparadise.com/programming/pygameblitopacity
x, y = location
temp = pygame.Surface ((source.get_width (), source.get_height ())).convert ()
temp.blit (target, (-x, -y))
temp.blit (source, ORIGIN)
temp.set_alpha (opacity)
target.blit (temp, location)
#def get_pic_name (name): # use name as query param, return URL of resource ?
# return "shiva.png"
class CircleMoonApp (CircleApp, MoonApp):
# TODO compute moon name, fetch background
def __init__ (self, notify_art=None, rotation=None, *args, **kwargs):
CircleApp.__init__ (self, rotation, *args, **kwargs)
MoonApp .__init__ (self, *args, **kwargs)
self.pic_name = None
self.raw_pic = None
self.pic = None
self.notify_art = notify_art
def start_running (self):
CircleApp.start_running (self)
MoonApp .start_running (self)
def stop_running (self):
CircleApp.stop_running (self)
MoonApp .stop_running (self)
def set_subsurface (self, ss):
CircleApp.set_subsurface (self, ss)
# TODO handle geometries&rotations here
MoonApp .set_subsurface (self, None, True)
def compute (self):
MoonApp.compute (self)
name = self.name
self.name = self.get_moon_name ()
#pic_name = self.pic_name
if name != self.name:
if self.notify_art is not None: self.notify_art (self.name) # notify troller
#self.pic_name = get_pic_name (self.name) # use self.name as query param to get resource
#raw_pic = self.raw_pic
#if pic_name != self.pic_name:
# self.raw_pic = pygame.image.load (self.pic_name) # load resource
# self.raw_pic = self.raw_pic.convert_alpha ()
# self.set_background (self.pic_name)
#pic = self.pic
#if raw_pic != self.raw_pic:
# self.pic = pygame.transform.scale (self.raw_pic, (w, h)) # prepare resource for blitting
def notify_bg (self, pic_name):
if pic_name == self.pic_name: return
self.pic_name = pic_name
self.set_background (self.pic_name)
#def draw_cropped_scene (self, temp):
# #MapApp .draw_scene (self, temp)
# CircleApp.draw_cropped_scene (self, temp)
#
# size = temp.get_size ()
# moon = pygame.Surface (size)
# MoonApp .draw_scene (self, moon)
# opacity = 100
# blit_alpha (temp, moon, ORIGIN, opacity)
def draw_foreground (self, temp):
CircleApp.draw_foreground (self, temp)
size = temp.get_size ()
moon = pygame.Surface (size)
MoonApp .draw_foreground (self, moon)
opacity = 255 / 2
blit_alpha (temp, moon, ORIGIN, opacity)
def positive_space (self, is_root=True): raise Exception ()
def negative_space (self, is_root=True): raise Exception ()
def minsz (self): raise Exception ()
if __name__ == "__main__":
#from gps_client import GPSClient
from artwork_client import ArtworkClient
from hal import HAL9000
def main ():
a = CircleMoonApp ()
n = a.notify_bg # cb for artwork client to set background of moon app
b = ArtworkClient (n)
a.notify_art = b.notify_request # cb for moon app to request background from artwork client
def new_loop (events, keys):
self = a
time = self.time
time = time + timedelta (hours=9)
self.set_time (time)
PyGamePlotLib.run_loop (self, events, keys)
a.run_loop = new_loop
with HAL9000 (app=a) as G:
#h = "localhost"
#p = 1717
#n = a.set_observer
#g = GPSClient (h, p, n)
#a.set_gps (g)
G.run ()
main ()
quit ()
| [
"matplotlib"
] |
4f964e5f99967a55b6246f244ae847fd5c7735bc | Python | CuriousKomodo/insta_graph | /graphnoods/data_processing/insta_scraper.py | UTF-8 | 3,229 | 2.78125 | 3 | [] | no_license | import json
import os
import urllib.request
import PIL
import requests
from bs4 import BeautifulSoup as bs
from PIL import Image
import matplotlib.pyplot as plt
from natebbcommon.logger import initialise_logger
from natebbwebcapture.webdriver_firefox import WebdriverFirefox
from webdriver_utils.common import timed_scroll
from config.webdriver_config import WebdriverConfig
class InstagramScraper:
def __init__(self, config):
self.config = config
self.webdriver = WebdriverFirefox(
page_load_timeout=config.page_load_timeout,
browser_emulation=config.browser_emulation,
viewport_size_h_w=config.viewport_size_h_w,
headless=config.headless,
)
self.scroll_time = config.scroll_time
self.scroll_pause_time = config.scroll_pause_time
self.ins_explore_url = 'https://www.instagram.com/explore'
self.image_save_path = './'
def extract_links_of_posts_by_tag(self, hashtag='food'):
self.webdriver.get(os.path.join(self.ins_explore_url, 'tags', hashtag))
if self.scroll_time:
timed_scroll(self.webdriver.driver, self.scroll_time, self.scroll_pause_time)
url_shortcodes = []
dom_string = self.webdriver.driver.page_source # obtain DOM
soup = bs(dom_string, 'html.parser')
body = soup.find('body')
script = body.find('script', text=lambda t: t.startswith('window._sharedData'))
page_json = script.contents[0].split(' = ', 1)[1].replace(';', '')
soup = json.loads(page_json)
# Find elements by x-path maybe?
for url in soup['entry_data']['TagPage'][0]['graphql']['hashtag']['edge_hashtag_to_media']['edges']:
url_shortcodes.append(url['node']['shortcode'])
print('Extracted {} post shortcodes for hashtag:{}'.format(len(url_shortcodes), hashtag))
return url_shortcodes
def save_images_by_url(self, list_url_shortcodes, image_save_path='./'):
for short_code in list_url_shortcodes:
image_url = os.path.join(self.ins_explore_url, 'p', short_code, 'media/?size=m')
try:
image = Image.open(requests.get(image_url, stream=True).raw)
image.save(os.path.join(image_save_path + "{}.jpg".format(short_code)))
except PIL.UnidentifiedImageError:
print('Cannot load image for shortcode:{}'.format(short_code))
def scrap_caption_by_url(self):
pass
def scrap_hashtags_by_url(self):
pass
def display_images_from_short_code(self, short_code, size='m'):
assert size in ['s','m','l']
image_url = os.path.join(self.ins_explore_url, 'p', short_code, 'media/?size={}'.format())
image = Image.open(urllib.request.urlopen(image_url))
image.show()
def finish_extraction(self):
# Maybe produce some sort of summary
self.webdriver.driver.close()
if __name__ == '__main__':
root_logger = initialise_logger()
hashtag = 'food'
ins_scraper = InstagramScraper(config=WebdriverConfig())
url_shortcodes = ins_scraper.extract_links_of_posts_by_tag(hashtag)
ins_scraper.display_images_from_short_code(url_shortcodes[0])
| [
"matplotlib"
] |
26838f68cfc8e3865b1338626201081541f083ac | Python | aniket3331/mlprojects | /titanicml.py | UTF-8 | 3,190 | 2.859375 | 3 | [] | no_license |
import numpy as numpy
import pandas as pdd
import seaborn as sns
import matplotlib.pyplot as plt
data=pdd.read_csv("train.csv",header=0,sep=',',quotechar='"')
#a=data.info()
#b=data.describe()
#c=data.pivot_table(index='Sex',values='Survived')
#d=data.pivot_table(index='Pclass',values='Survived')
#e=d.plot.bar()
#f=plt.show()
#g=sns.FacetGrid(data,col='Survived')
#l=g.map(plt.hist,'Age',bins=20)
#grid=sns.FacetGrid(data,row='Pclass',col='Sex',height=2.2,aspect=1.6)
#grid.map(plt.hist,'Age',alpha=.5,bins=20)
#print(plt.show())
def process_age(df,cut_points,label_names):
df["Age"] = df["Age"].fillna(-0.5)
df["Age_categories"] = pdd.cut(df["Age"],cut_points,labels=label_names)
return df
cut_points = [-1,0, 5, 12, 18, 35, 60, 100]
label_names = ["Missing", 'Infant', "Child", 'Teenager', "Young Adult", 'Adult', 'Senior']
data_new = process_age(data,cut_points,label_names)
age_cat_pivot = data_new.pivot_table(index="Age_categories",values="Survived")
age_cat_pivot.plot.bar()
#plt.show()
def create_dummies(df,column_name):
dummies = pdd.get_dummies(df[column_name],prefix=column_name)
df = pdd.concat([df,dummies],axis=1)
return df
data_new = create_dummies(data_new,"Pclass")
#print(data_new.head())
data_new = create_dummies(data_new,"Sex")
data_new = create_dummies(data_new,"Age_categories")
columns = ['Pclass_1', 'Pclass_2', 'Pclass_3', 'Sex_female', 'Sex_male',
'Age_categories_Missing','Age_categories_Infant',
'Age_categories_Child', 'Age_categories_Teenager',
'Age_categories_Young Adult', 'Age_categories_Adult',
'Age_categories_Senior']
X_data = data_new[columns]
Y_data = data_new['Survived']
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC, LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
train_X, test_X, train_y, test_y = train_test_split(X_data,Y_data, test_size=0.2,random_state=0)
lr = LogisticRegression()
lr.fit(train_X, train_y)
predictions = lr.predict(test_X)
acc_log = accuracy_score(test_y, predictions)
knn = KNeighborsClassifier(n_neighbors = 3)
knn.fit(train_X, train_y)
predictions = knn.predict(test_X)
acc_knn = accuracy_score(test_y, predictions)
decision_tree = DecisionTreeClassifier()
decision_tree.fit(train_X, train_y)
predictions = decision_tree.predict(test_X)
acc_decision_tree = accuracy_score(test_y, predictions)
svc = SVC(gamma='scale')
svc.fit(train_X, train_y)
predictions = svc.predict(test_X)
acc_svc = accuracy_score(test_y, predictions)
random_forest = RandomForestClassifier(n_estimators=100)
random_forest.fit(train_X, train_y)
predictions = random_forest.predict(test_X)
acc_random_forest = accuracy_score(test_y, predictions)
models = pdd.DataFrame({
'Model': ['Support Vector Machines', 'KNN', 'Logistic Regression',
'Random Forest', 'Decision Tree'],
'Score': [acc_svc, acc_knn, acc_log,
acc_random_forest, acc_decision_tree]})
print(models.sort_values(by='Score', ascending=False)) | [
"matplotlib",
"seaborn"
] |
782c054b2e8d42c9d3a1890ff95603dbe4847ad4 | Python | jorenretel/phd_thesis | /python_plots/beta_sheet_distance_table/beta_sheet_distances.py | UTF-8 | 3,281 | 2.53125 | 3 | [] | no_license |
from Bio.PDB import PDBParser
from numpy import std, average
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
OmpA_beta_sheet_pairs = [(8, 42), (10, 40), (12, 38), (14, 36), (16, 34),
(52, 80), (54, 78), (75, 103), (77, 101), (79, 99),
(81, 97), (83, 95), (85, 93)]
parser = PDBParser()
#structure = parser.get_structure('OmpA', '/homes/retel/qj/pdb1qjp.ent')
structure = parser.get_structure('OmpA', 'pdb2ge4.ent')
#nuclei = ['CA', 'CB', 'C', 'HA', 'H']
nuclei = ['H']
def calculate_distances():
#nuclei = ['CA', 'CB', 'C']
#nuclei = ['CA', 'CB', 'C', 'N', 'H', 'HA', 'HB']
#nuclei = ['H', 'HA', 'HB']
intra = {}
sequential = {}
longrange1 = {}
longrange2 = {}
for chain in structure.get_chains():
residues = list(chain.get_residues())
for one, two in OmpA_beta_sheet_pairs:
resA = residues[one]
resB = residues[one+1]
resC = residues[two-1]
resD = residues[two]
for nucleus1 in nuclei:
for nucleus2 in nuclei:
if nucleus1 in resA and nucleus2 in resA:
intra[(nucleus1, nucleus2)] = intra.get((nucleus1, nucleus2), [])
intra[(nucleus1, nucleus2)].append(resA[nucleus1] - resA[nucleus2])
if nucleus1 in resA and nucleus2 in resB:
sequential[(nucleus1, nucleus2)] = sequential.get((nucleus1, nucleus2), [])
sequential[(nucleus1, nucleus2)].append(resA[nucleus1] - resB[nucleus2])
if nucleus1 in resA and nucleus2 in resD:
longrange1[(nucleus1, nucleus2)] = longrange1.get((nucleus1, nucleus2), [])
longrange1[(nucleus1, nucleus2)].append(resA[nucleus1] - resD[nucleus2])
if nucleus1 in resB and nucleus2 in resC:
longrange2[(nucleus1, nucleus2)] = longrange2.get((nucleus1, nucleus2), [])
longrange2[(nucleus1, nucleus2)].append(resB[nucleus1] - resC[nucleus2])
replace_by_stats(intra)
replace_by_stats(sequential)
replace_by_stats(longrange1)
replace_by_stats(longrange2)
return intra, sequential, longrange1, longrange2
def nucleus_in_residue(residue, nucleus):
for name in residue.child_dict.keys():
if nucleus in name:
return residue[name]
def replace_by_stats(dictionary):
for nuclei, distance in dictionary.items():
dictionary[nuclei] = (average(distance), std(distance))
def create_correlation_plot(distances):
xaxis_labels = nuclei
yaxis_labels = nuclei * 4
data = []
for combination in distances:
for nucleus2 in xaxis_labels:
row = []
data.append(row)
for nucleus1 in xaxis_labels:
if (nucleus1, nucleus2) in combination:
row.append(combination[nucleus1, nucleus2][0])
else:
row.append(None)
data = pd.DataFrame(data=data, index=yaxis_labels, columns=xaxis_labels)
sns.heatmap(data, cmap="YlGn_r", annot=True)
plt.show()
create_correlation_plot(calculate_distances())
| [
"matplotlib",
"seaborn"
] |
92f0f7730344068a04ef0a4843d8e1183d7694d2 | Python | KKalem/toolbox | /dynamic_point.py | UTF-8 | 4,036 | 2.703125 | 3 | [] | no_license | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Author: Ozer Ozkahraman ([email protected])
# Date: 2018-05-24
import numpy as np
try:
# explicit relative imports with dots in front.
from .Pid import PID
from .Quaternion import Quat
from . import geometry as geom
except SystemError:
# absolute imports when we want to run this by itself
from Pid import PID
from Quaternion import Quat
import geometry as geom
RADTODEG = 360 / (np.pi * 2)
class DynamicPoint:
def __init__(self,
mass=1,
init_pos=(0,0,0),
init_vel = (0,0,0),
init_acc = (0,0,0),
damping=None,
max_vel=None,
max_acc=None
):
self.mass = mass
self.pos = np.array(init_pos, dtype='float64')
self.vel = np.array(init_vel, dtype='float64')
self.acc = np.array(init_acc, dtype='float64')
self.damping = damping
self.max_vel = max_vel
self.max_acc = max_acc
def _limit_vel(self, vel):
if self.max_vel is not None:
vel = geom.vec_limit_len(vel, self.max_vel)
return vel
def _limit_acc(self, acc):
if self.max_acc is not None:
acc = geom.vec_limit_len(acc, self.max_acc)
return acc
def _apply_damping(self):
if self.damping is not None:
self.vel -= self.vel*self.damping
def update(self, dt):
self.vel += self.acc * dt
self.vel = self._limit_vel(self.vel)
self.pos += self.vel * dt
def get_position(self):
return self.pos
def get_orientation_quat(self):
# use the velocity as orientation
# ra=heading/yaw, dec=pitch, roll=roll
yaw,pitch = geom.vec3_to_yaw_pitch(self.vel)
roll = 0
yaw*=RADTODEG
pitch*=RADTODEG
return Quat([yaw,pitch,roll]).q
class VelocityPoint(DynamicPoint):
def __init__(self, speed=None, **kwargs):
super().__init__(**kwargs)
self.target = DynamicPoint()
if speed is None:
self.speed = 999999
else:
self.speed = speed
def set_target(self, target):
self.target.pos = np.array(target, dtype='float64')
def update(self, dt):
if self.target is not None:
target_dist, target_vec = geom.vec_normalize(self.target.get_position() - self.pos)
if target_dist > 0.1:
self.vel = target_vec * self.speed
else:
self.vel = np.zeros(3)
self.vel = self._limit_vel(self.vel)
self.pos += self.vel * dt
class PIDPoint(DynamicPoint):
def __init__(self,
pid=None,
**kwargs):
super().__init__(**kwargs)
self.target = DynamicPoint()
if pid is None:
self.pid = PID(P=0.2, D=0.5)
else:
self.pid = PID(*pid)
def set_target(self, target):
self.target.pos = np.array(target, dtype='float64')
self.pid.clear()
def update(self, dt):
self.acc = np.array((0.,0.,0.))
if self.target is not None:
target_dist, target_vec = geom.vec_normalize(self.target.get_position() - self.pos)
if target_dist > 0:
forward_acc = self.pid.update(target_dist, dt)
self.acc -= forward_acc * target_vec
self._apply_damping()
self.acc = self._limit_acc(self.acc)
self.vel += self.acc * dt
self.vel = self._limit_vel(self.vel)
self.pos += self.vel * dt
if __name__ == '__main__':
import matplotlib.pyplot as plt
plt.ion()
p = PIDPoint()
p.set_target((5,5,5))
trace = []
for i in range(10000):
p.update(0.01)
trace.append(p.pos.copy())
p.set_target((0,0,0))
for i in range(10000):
p.update(0.01)
trace.append(p.pos.copy())
trace = np.array(trace)
plt.plot(range(len(trace)),trace[:,0])
| [
"matplotlib"
] |
c583f8ee9dcded37d054e7c4b5bf1ce8ece4f8da | Python | ulmefors/CarND-Vehicle-Detection | /lib/feature_extractor.py | UTF-8 | 4,975 | 3 | 3 | [] | no_license | import numpy as np
import matplotlib.image as mpimg
import cv2
from skimage.feature import hog
class FeatureExtractor:
"""
Extract features from images. Code developed with inspiration from Udacity Self-Driving Car Nanodegree.
"""
def __init__(self, feature_config, color_space):
self.feature_config = feature_config
self.color_space = color_space
SPATIAL_SIZE = (64, 64)
HIST_BINS = 32
def bin_spatial(self, image, size=SPATIAL_SIZE):
# Create feature vector using pixel values
features = cv2.resize(image, size).ravel()
return features
# Define a function to compute color histogram features
def color_hist(self, image, nbins=HIST_BINS, bins_range=(0, 256)):
channel1_hist = np.histogram(image[:, :, 0], bins=nbins, range=bins_range)
channel2_hist = np.histogram(image[:, :, 1], bins=nbins, range=bins_range)
channel3_hist = np.histogram(image[:, :, 2], bins=nbins, range=bins_range)
hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))
return hist_features
def get_hog_features(self, image, orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True):
args = {
'orientations': orient,
'pixels_per_cell': (pix_per_cell,) * 2,
'cells_per_block': (cell_per_block,) * 2,
'transform_sqrt': True,
'visualise': vis,
'feature_vector': feature_vec
}
if vis:
features, hog_image = hog(image, **args)
return features, hog_image
else:
features = hog(image, **args)
return features
def __extract_features(self, feature_image, spatial_size=SPATIAL_SIZE,
hist_bins=HIST_BINS, orient=9, pix_per_cell=8, cell_per_block=2,
hog_channel=0, spatial_feat=True, hist_feat=True, hog_feat=True):
file_features = []
if spatial_feat:
spatial_features = self.bin_spatial(feature_image, size=spatial_size)
file_features.append(spatial_features)
if hist_feat:
hist_features = self.color_hist(feature_image, nbins=hist_bins)
file_features.append(hist_features)
if hog_feat:
args_dict = {
'vis': False,
'feature_vec': True
}
args_list = [orient, pix_per_cell, cell_per_block]
if hog_channel == 'ALL':
channels = range(feature_image.shape[2])
else:
channels = [hog_channel]
hog_features = []
for channel in channels:
hog_features.append(self.get_hog_features(feature_image[:, :, channel], *args_list, **args_dict))
# Unroll features if more than 1 dimension
# if len(np.array(hog_features).shape) > 1:
if np.ndim(hog_features) > 1:
hog_features = np.ravel(hog_features)
file_features.append(hog_features)
return np.concatenate(file_features)
def extract_features_from_image(self, image):
# Convert color space from RGB to configuration color space (if required)
feature_image = self.convert_color_space(image)
features = self.__extract_features(feature_image, **self.feature_config)
return features
def extract_features_from_files(self, image_files):
features = []
for img_file in image_files:
# Read RGB version from disk
image = mpimg.imread(img_file)
# Scale values to make png and jpeg compatible
if img_file.endswith('png'):
image = (image * 255).astype(np.uint8)
file_features = self.extract_features_from_image(image)
features.append(file_features)
return features
def convert_color_space(self, image):
""" Converts color space in accordance with configuration
:param image: image in RGB color space
:return: image in configured color space
"""
# Load color space
color_space = self.color_space
# Convert from RBG to chosen color space
if color_space != 'RGB':
if color_space == 'HSV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
elif color_space == 'LUV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2LUV)
elif color_space == 'HLS':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
elif color_space == 'YUV':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YUV)
elif color_space == 'YCrCb':
feature_image = cv2.cvtColor(image, cv2.COLOR_RGB2YCrCb)
else:
feature_image = np.copy(image)
return feature_image
def main():
pass
if __name__ == '__main__':
main()
| [
"matplotlib"
] |
4e3bf57c639652f83ff5924c8a3f3e3f39c62cd8 | Python | jlan84/numpy-assignment | /src/jl_basic.py | UTF-8 | 1,106 | 3.484375 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
def column_vector(n):
return np.arange(n)
def random_array(r, c):
return np.random.rand(r,c)
def color_replace(v):
return np.where(v,'blue', 'red')
def compute_true_false_sums(x,b):
dic = {}
dic[True] = np.sum(x[b==True])
dic[False] = np.sum(x[b==False])
return dic
def select_from_two_arrays(x,y,b):
return np.where(b,x,y)
def sum_of_squred_diff(x,y):
return np.sum((x-y)**2)
if __name__ == "__main__":
v = column_vector(3)
print(v.shape)
v2 = random_array(4,3)
print(v2)
x = np.array([0,0,1,0])
print(color_replace(x))
x = np.array([0, 1, 2, 3, 4, 5])
b = np.array([True, True, False, True, False, False])
print(compute_true_false_sums(x,b))
x = np.array([1, 2, 3, 4, 5, 6])
y = np.array([10, 20, 30, 40, 50, 60])
b = np.array([True, True, False, True, False, True])
print(select_from_two_arrays(x,y,b))
x = np.array([0, 1, 0, 1, 0, 1])
y = np.array([0, 1, 2, 3, 4, 5])
print(sum_of_squred_diff(x,y)) | [
"matplotlib"
] |
5a5d709f27630a40f7a596634af70de5be70b6ae | Python | Manukhurana97/Machine-Learning1 | /12 Mean shift.py | UTF-8 | 1,119 | 2.953125 | 3 | [] | no_license | # Machine automatically detects the numbers of clusters
# It Take all the feature -center as a cluster center
# Its a hierarchical clustering algo
import numpy as np
from sklearn.cluster import MeanShift
import pandas as pd
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import style
from sklearn.datasets.samples_generator import make_blobs
style.use('ggplot')
# initial centers
center = [[1, 1, 1], [5, 5, 5], [3, 10, 10]]
X, _ = make_blobs(n_samples=1000, centers=center, cluster_std=1.5)
ms = MeanShift()
ms.fit(X)
lables = ms.labels_
# final centers
cluster_center = ms.cluster_centers_
n_cluster = len(np.unique(lables))
color = 10*['r', 'g', 'c', 'k', 'y', 'm']
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
for i in range(len(X)):
ax.scatter(X[i][0], X[i][1], X[i][2], c=color[lables[i]], marker='o')
ax.scatter(cluster_center[:, 0], cluster_center[:, 1], cluster_center[:, 2], marker='X', color='k', zorder=10)
plt.show()
if len(center) == len(cluster_center):
for i in range(len(center)):
print(cluster_center[i]-center[i])
| [
"matplotlib"
] |
90dc533e7acee73be7627c125f6e8cafcef73a29 | Python | Mengjiao926/Network-Analysis-of-Titanic-Facebook-Page | /Json-Pajek/Code for answers.py | UTF-8 | 1,155 | 3.65625 | 4 | [] | no_license | import networkx as net
import matplotlib.pyplot as plt
g = net.read_pajek('chenchupajek.net')
print("Find the number of nodes in the graph?")
print ("Nodes: ",len(g))
print("Find the number of components (connected subgraphs)?")
print ("Number of components (Connected Subgraphs): ",len(list(net.connected_component_subgraphs(g))))
print("Find the node with the maximum degree? What is that degree?")
deg = net.degree(g)
print("Node with Maximum Degree: ", max(deg, key=deg.get))
print("Maximum Degree: ", max(deg.values()))
undirected = g.to_undirected()
print ('Number of edges for undirected graph: ',len(undirected.edges()))
print("What is the density of the graph?")
print ("Density of the Graph: ",net.density(g))
print("Create a degree distribution plot?")
h = g.to_undirected()
print("Number of edges in the converted undirected graph is: %s"%h.number_of_edges())
print("Density of the undirected graph is: %s"%net.density(h))
degree_sequence=sorted(net.degree(g).values(),reverse=True)
dmax = max(degree_sequence)
plt.hist(degree_sequence,bins=dmax)
plt.title("Degree Distribution Plot")
plt.ylabel("count")
plt.xlabel("degree")
plt.show() | [
"matplotlib"
] |
9db7a03d65679c6566ff252908fe6b312c0579b1 | Python | Andreasksalk/ReinforcementLearning | /Chapter-02/2-5.py | UTF-8 | 1,867 | 3.046875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 12 21:17:46 2019
@author: a.k.salk
"""
import numpy as np
import matplotlib.pyplot as plt
num_bandits = 10
steps = 10000
alpha = 0.1
epsilon = 0.1
mean=0
std=0.01
# Random walk paramters
plus=0.01
minus=-plus
num_actions = 1
actions = []
# Creating bandits
bandits = []
for i in range(num_bandits):
bandits.append([np.random.normal(mean, std), std])
# h loop defines the number of re-runs to generate a more fluent distribution on the problem.
for h in range(100):
opt_a = []
# Collecting max value for and position
m_bandit = 0
b_bandit = 0
for i in range(num_bandits):
if bandits[i][0] > m_bandit:
m_bandit = bandits[i][0]
b_bandit = i
q = np.zeros(num_bandits)
n = np.zeros(num_bandits)
for i in range(steps):
# Creating the random walk
for bandit in bandits:
if np.random.uniform() < 0.5:
bandit[0] += np.random.normal(mean, std)
else:
bandit[0] -= np.random.normal(mean, std)
selected_bandit = np.argmax(q)
if np.random.uniform() < epsilon:
selected_bandit = np.random.choice(len(q))
reward = np.random.normal(bandits[selected_bandit][0], bandits[selected_bandit][1])
n[selected_bandit] += 1
q[selected_bandit] += alpha * (reward - q[selected_bandit])
opt_a.append(int(selected_bandit == b_bandit))
if i <= num_actions:
action = sum(opt_a)/len(opt_a)
else:
action = sum(opt_a[-num_actions:])/num_actions
if h == 0:
actions.append(action)
else:
actions[i] += (action - actions[i]) / (h + 1)
plt.axis([0, steps, -1, 1])
plt.plot(range(steps), actions)
plt.show() | [
"matplotlib"
] |
c50efe487d8a5f643b105629ebaf57c1e254e88b | Python | atwilson0729/vislab | /Plots/Covid plots/Stack Bar Chart.py | UTF-8 | 4,308 | 3.1875 | 3 | [] | no_license | <<<<<<< HEAD
import pandas as pd
import plotly.offline as pyo
import plotly.graph_objs as go
# Load CSV file from Datasets folders
df = pd.read_csv('../../Datasets/CoronavirusTotal.csv')
# Removing empty spaces from State column to avoid errors
df = df.apply(lambda x: x.str.strip() if x.dtype == "object" else x)
# Creating unrecovered column
df['Unrecovered'] = df['Confirmed'] - df['Deaths'] - df['Recovered']
# Removing China and others from data frame
df = df[(df['Country'] != 'China')]
# Creating sum of number of cases group by Country Column
new_df = df.groupby(['Country']).agg(
{'Confirmed': 'sum', 'Deaths': 'sum', 'Recovered' : 'sum', 'Unrecovered': 'sum'}).reset_index()
# Sorting values and select 20 first value
new_df = new_df.sort_values(by=['Confirmed'], ascending=[False]).head(20).reset_index()
# Preparing data
# Creates three plotly graph objects for bar chart data, each with an x-axis of 'Country' in a pandas df
# Each of the three plotly objects is a different color on the stacked chart
# The first object has a y-axis of Unrecovered data in pandas df
# The second object has a y-axis of recovered data in pandas df
# The third object has a y-axis of deaths data in pandas df
# The graph objects are then all stored in a list named data
trace1 = go.Bar(x=new_df['Country'], y=new_df['Unrecovered'], name='Unrecovered', marker={'color': '#CD7F32'})
trace2 = go.Bar(x=new_df['Country'], y=new_df['Recovered'],name='Recovered', marker={'color': '#9EA0A1'})
trace3= go.Bar(x=new_df['Country'], y=new_df['Deaths'], name='Deaths', marker={'color': '#FFD700'})
data= [trace1, trace2, trace3]
# Preparing layout
layout = go.Layout(title='CoronaVirus Cases in the first 20 countries except China',
xaxis_title="Country",
yaxis_title="Number of Cases", barmode='stack')
# Plot the figure and saving in an HTML file
fig = go.Figure(data=data, layout=layout)
pyo.plot(fig, filename='stackbarchart.html')
#data = [
# go.Scatter(x=new_df['Recovered'],
# y=new_df['Unrecovered'],
# text=new_df['Country'],
# mode='markers',
# marker=dict(size=new_df['Confirmed'] /
#100,color=new_df['Confirmed'] /100, showscale=True))
#]
#layout = go.Layout(title='Corona Virus Confirmed Cases', xaxis_title="Recovered Cases",
# yaxis_title="Unrecovered Cases", hovermode='closest')
#fig = go.Figure(data=data, latout=layout)
=======
import pandas as pd
import plotly.offline as pyo
import plotly.graph_objs as go
df = pd.read_csv('../Datasets/CoronavirusTotal.csv')
df = df.apply(lambda x: x.str.strip() if x.dtype == "object" else x)
df['Unrecovered'] = df['Confirmed'] - df['Deaths'] - df['Recovered']
df = df[(df['Country'] != 'China')]
new_df = df.groupby(['Country']).agg(
{'Confirmed': 'sum', 'Deaths': 'sum', 'Recovered' : 'sum', 'Unrecovered': 'sum'}).reset_index()
new_df = new_df.sort_values(by=['Confirmed'], ascending=[False]).head(20).reset_index()
trace1 = go.Bar(x=new_df['Country'], y=new_df['Unrecovered'], name='Unrecovered', marker={'color': '#CD7F32'})
trace2 = go.Bar(x=new_df['Country'], y=new_df['Recovered'],name='Recovered', marker={'color': '#9EA0A1'})
trace3= go.Bar(x=new_df['Country'], y=new_df['Deaths'], name='Deaths', marker={'color': '#FFD700'})
data= [trace1, trace2, trace3]
layout = go.Layout(title='CoronaVirus Cases in the first 20 countries except China',
xaxis_title="Country",
yaxis_title="Number of Cases", barmode='stack')
fig = go.Figure(data=data, layout=layout)
pyo.plot(fig, filename='stackbarchart.html')
#data = [
# go.Scatter(x=new_df['Recovered'],
# y=new_df['Unrecovered'],
# text=new_df['Country'],
# mode='markers',
# marker=dict(size=new_df['Confirmed'] /
#100,color=new_df['Confirmed'] /100, showscale=True))
#]
#layout = go.Layout(title='Corona Virus Confirmed Cases', xaxis_title="Recovered Cases",
# yaxis_title="Unrecovered Cases", hovermode='closest')
#fig = go.Figure(data=data, latout=layout)
>>>>>>> parent of cba2bbd (test commit 2)
#pyo.plot(fig, filename='bubblechart.html') | [
"plotly"
] |
3a623b1e1ec56f5b64bab8ed3e056bfcd947f7ef | Python | austin-psu-phases/atomPython | /python stuff/pathdistort.py | UTF-8 | 2,533 | 3.328125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 11 19:21:43 2015
@author: Austin
"""
import math
import random as rnd
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from math import sqrt, asin, cos, sin
print "hello"
##take a list of vetors from a text file
## format of file: space seperated list
## read in vector lists
twopi=6.2831853
def rotate (theta,vec):
# print(vec)
W1 = np.array([[0,-vec[2],vec[1]],[vec[2],0,-vec[0]],[-vec[1],vec[0],0]])
W2 = np.dot(W1,W1)
E = np.array([[1,0,0],[0,1,0],[0,0,1]])
a = sin(theta)
b = 1-cos(theta)
transform = E + a*W1 + b*W2
# print(W1)
# print(transform)
return transform
imgs=5
df1 = pd.read_csv('file1.txt', sep=' ')
print(df1)
coords1 = df1.values
df2 = pd.read_csv('file2.txt', sep=' ')
print(df2)
coords2 = df2.values
# list of difference between vectors
diff1=coords1-coords2
print(diff1)
# angle about z= arcsine of y/sqrt(x^2 + y^2)
# angle about y= arcsine of z/sqrt(z^2 + H^2)
# gives transform matrix to point x in direction of vector
# transform normal vector by angle1, angle2 and then randangle about x axis
#list of angles for transformation obtained below:
# get list of cross products (normal vectors) to diff1
#print((np.cross(coords1[3],coords2[3])))
unormvecs=[]
for i in range(len(diff1)):
if diff1[i,1] and diff1[i,0] == 0:
unormvecs.append(1,0,0)
norm=sqrt(pow(diff1[i,1],2)+pow(diff1[i,0],2))
# print(norm)
unormvecs.append([diff1[i,1]/norm, -diff1[i,0]/norm, 0])
print("this should be zero...if it worked")
k1=np.dot(rotate(rnd.random()*twopi,diff1[1]),unormvecs[1])
print(np.dot(k1,diff1[1]))
#for i in range(len(diff1)):
# n_place=[]
# norm=None
# print(n_place)
# print(norm)
# norm=np.linalg.norm(np.cross(coords1[i],coords2[i]))
# n_place=np.cross(coords1[i],coords2[i])*(1/norm)
# print(n_place)
# print(norm)
# unormvecs.append(n_place)
#
## interpolate by some odd number of points between coordinates
# define normRndRot
# takes a normal unit vector
# coordinate transform by anglez and angley
# apply a random rotation about x''
# unto each image add normRndRot
# make a imgs+2 by len(diff1) array
images=[[[coords1[i]+((j/float(imgs+1)))*coords2[i]]for j in range(0,imgs+2)] for i in range(len(diff1)) ]
images = np.array(images)
##
#vectors=np.random.RandomState(42).rand(100,3)
#print vectors
#print "space"
#np.save("output1",vectors)
#newvector = np.load("output1.npy")
#print newvector
## | [
"matplotlib"
] |
88c1508ddbbc7e89d6a191a2f9d6df066279c05f | Python | dillon-duncando/MSA-Discord-Bot | /runBot.py | UTF-8 | 11,128 | 3.09375 | 3 | [
"MIT"
] | permissive | import discord
import matplotlib.pyplot as plt
import numpy as np
import sympy as sp
import re, os, sys, math, time
from configparser import *
import pickle
from PIL import Image
import botFunctions as bf
from pnglatex import pnglatex
config = ConfigParser()
config.read('config.ini')
with open ('primelist', 'rb') as fp:
primelist = pickle.load(fp)
def primeFromList():
for prime in primelist:
yield prime
#used to stop the bot from running a calculation that goes on for too long
class Timer:
def __init__(self):
self._start_time = None
def start(self):
"""Start a new timer"""
if self._start_time is not None:
raise TimerError(f"Timer is running. Use .stop() to stop it")
self._start_time = time.perf_counter()
def time(self):
if self._start_time is None:
raise TimerError(f"Timer is not running. Use .start() to start it")
elapsed_time = time.perf_counter() - self._start_time
return(elapsed_time)
def stop(self):
"""Stop the timer, and report the elapsed time"""
if self._start_time is None:
raise TimerError(f"Timer is not running. Use .start() to start it")
elapsed_time = time.perf_counter() - self._start_time
self._start_time = None
print(f"Elapsed time: {elapsed_time:0.4f} seconds")
#an allow list for what functions can be called when plotting to avoid arbitrary code execution
safe_list = ['acos', 'asin', 'atan', 'atan2', 'ceil', 'cos', 'cosh', 'degrees',
'e', 'exp', 'fabs', 'floor', 'fmod', 'frexp', 'hypot', 'ldexp',
'log', 'log10', 'modf', 'pi', 'pow', 'radians', 'sin', 'sinh',
'sqrt', 'tan', 'tanh']
safe_dict = {
"sin" : np.sin,
"cos" : np.cos,
"tan" : np.tan,
"arcsin" : np.arcsin,
"arccos" : np.arccos,
"arctan" : np.arctan,
"sinh" : np.sinh,
"cosh" : np.cosh,
"tanh" : np.tanh,
"arcsinh" : np.arcsinh,
"arccosh" : np.arccosh,
"arctanh" : np.arctanh,
"floor" : np.floor,
"ceil" : np.ceil,
"ln" : np.log,
"log" : np.log10,
"sinc" : np.sinc,
"sqrt" : np.sqrt,
"pi" : math.pi,
"tau" : 2*math.pi,
"e" : np.exp(1)
}
#declare discord client
client = discord.Client()
@client.event
async def on_ready():
print('We have logged in as {0.user}'.format(client))
@client.event
async def on_message(message):
#ignore messages from oneself
if message.author == client.user:
return
#Hello world for discord
if message.content.startswith('$hello'):
await message.channel.send('Hello!')
#return list of commands to give the bot
if(message.content.startswith('$commands') or message.content.startswith('$help')):
await message.channel.send(
'List of commands: \n'\
'$commands: Provide a list of commands \n'\
'$factor x: Factor a positive integer x \n'\
'$isPrime x: Check if integer x is prime \n'\
'$plot f(x), min, max: Plot a function of x from the min to the max \n'\
'$polar f(x), min, max: Plot a function of x from the min to the max in polar coordinates \n'\
'$roll xdy: Roll x number of y sided dice \n'\
'$tex or $Tex LaTeX: Render LaTeX code and return as an image'
)
#roll dice, like in d&d
if message.content.startswith('$roll '):
text = message.content[6:]
index = text.find("d")
if(index >= 0):
await message.channel.send(bf.roll(text, index))
#draw a 2d graph in cartesian coordinates
#only uses arithmetic and functions defined in the safe dict
if message.content.startswith('$plot '):
text = message.content[5:]
try:
split = re.split(",", text.replace("^", "**"), 2)
x = np.linspace(float(split[1]),float(split[2]), 100)
y = eval(split[0], {**safe_dict, 'x': x})
fig = plt.figure()
plot = plt.plot(x,y)
fig.savefig("newplot.png")
await message.channel.send(file=discord.File("newplot.png"))
except Exception as e:
print(e)
await message.channel.send('beep boop. something went wrong.')
#draw a 2d graph in polar coordinates
#only uses arithmetic and functions defined in the safe dict
if message.content.startswith('$polar '):
text = message.content[6:]
try:
split = re.split(",", text.replace("^", "**"), 2)
x = np.linspace(float(split[1]),float(split[2]), 100)
y = eval(split[0], {**safe_dict, 'x': x})
fig = plt.figure()
plot = plt.polar(x,y)
fig.savefig("newplot.png")
await message.channel.send(file=discord.File("newplot.png"))
except Exception as e:
await message.channel.send('beep boop. something went wrong.')
await message.channel.send(e)
#render latex
if('$tex ' in message.content):
command = message.content[(message.content.find('$tex ')+4):]
try:
sp.preview(command, viewer='file', filename='latex.png', euler=False)
old_im = Image.open('latex.png')
old_size = old_im.size
new_size = (old_im.size[0]+20, old_im.size[1]+20)
new_im = Image.new("RGB", new_size, color = (255, 255, 255))
new_im.paste(old_im, (int((new_size[0]-old_size[0])/2),
int((new_size[1]-old_size[1])/2)))
new_im.save('latex.png')
await message.channel.send(file=discord.File("latex.png"))
except Exception as e:
await message.channel.send('beep boop. something went wrong.')
await message.channel.send(e)
#render latex
if('$Tex ' in message.content):
command = message.content[(message.content.find('$Tex ')+4):]
try:
sp.preview(command, viewer='file', filename='latex.png', euler=False)
old_im = Image.open('latex.png')
old_size = old_im.size
new_size = (old_im.size[0]+20, old_im.size[1]+20)
new_im = Image.new("RGB", new_size, color = (255, 255, 255))
new_im.paste(old_im, (int((new_size[0]-old_size[0])/2),
int((new_size[1]-old_size[1])/2)))
new_im.save('latex.png')
await message.channel.send(file=discord.File("latex.png"))
except Exception as e:
print(e)
await message.channel.send('beep boop. something went wrong.')
#determine if a number is prime
if message.content.startswith('$isPrime '):
text = message.content[9:]
try:
x = int(text)
if(bf.isPrime(x)):
await message.channel.send(str(x) + ' is prime.')
else:
await message.channel.send(str(x) + ' is not prime.')
except Exception as e:
await message.channel.send('beep boop. something went wrong.')
await message.channel.send(e)
#factor a composite number, if possible within a reasonable amount of time
if message.content.startswith('$factor '):
text = message.content[8:]
try:
x = int(text)
if(x <= 0):
await message.channel.send("I don't know how to factor that.")
elif(x == 1):
await message.channel.send('1 = 1^1')
elif(bf.isPrime(x)):
await message.channel.send(str(x) + ' is a prime number.')
else:
factors = []
exponents = []
primes = primeFromList()
index = 0
timer = Timer()
timer.start()
time = timer.time()
pFactor = bf.powerFactor(x)
x = pFactor[0]
expIncrement = pFactor[1]
#jexpIncrement = 1
while not bf.isPrime(x) and x != 1 and time < 60:
try:
p = next(primes)
if(x%p == 0):
factors.append(p)
exponents.append(0)
while(x%p == 0):
exponents[index] = exponents[index] + expIncrement
x = x//p
index = index + 1
if(math.sqrt(x) == math.floor(math.sqrt(x))):
x = math.floor(math.sqrt(x))
expIncrement = expIncrement * 2
except Exception as e:
message.channel.send(e)
factors.append(x)
exponents.append(1)
x = x//x
time = timer.time()
timer.stop()
if(bf.isPrime(x)):
factors.append(x)
exponents.append(expIncrement)
factorization = []
for i in range(len(factors)):
factorization.append(str(factors[i]) + "^" + str(exponents[i]))
if time >= 60:
await message.channel.send("Computation took too long.")
msg = str(int(text)) + ' = ' + "+".join(factorization) + " + a potential prime or semiprime " + str(x)
else:
msg = str(int(text)) + ' = ' + "+".join(factorization)
await message.channel.send(msg)
except Exception as e:
print(e)
await message.channel.send('beep boop. something went wrong.')
#returns pi in terms of the superior circle constant
if message.content.startswith('$pi'):
await message.channel.send('π = τ/2')
#restart the bot, mostly useful to update the code of the bot
#it should open itself, then close itself, but sometimes it just opens itself.
if message.content.startswith('$restart'):
#check the message author is authorized to restart the bot
if message.author.name == config.get("main", "owner"):
await message.channel.send('brb')
os.startfile(__file__)
sys.exit()
#shut down the bot
if message.content.startswith('$shutdown'):
#check the message author is authorized to shut down the bot
if message.author.name == config.get("main", "owner"):
await message.channel.send('Goodbye.')
print("Terminating bot")
sys.exit()
#config is in the git ignore to avoid publishing the bot id in a public repository
#the bot id can be acquired by creating a bot application at https://discord.com/developers/applications
client.run(config.get("main", "id"))
| [
"matplotlib"
] |
47658ae30e2c23c3859589f04dc738e4198cd904 | Python | ggravanis/AdvancedML | /src/machine_learning/part23.py | UTF-8 | 8,263 | 2.96875 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf8
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.tight_bbox import adjust_bbox
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
import time
from sklearn.metrics import precision_recall_curve, average_precision_score, hamming_loss, \
label_ranking_average_precision_score, roc_auc_score, accuracy_score
from itertools import cycle
import random
def binary_to_text(df):
y_labels = []
labels_cumsum = {}
for index, item in df.iterrows():
labels = []
for label_index, value in item.iteritems():
if np.isnan(value):
continue
if int(value) == 1:
labels.append(label_index)
if labels_cumsum.get(label_index) is None:
labels_cumsum[label_index] = 0
else:
labels_cumsum[label_index] = labels_cumsum.get(label_index) + 1
y_labels.append(labels)
return y_labels, labels_cumsum
def load_data(filename):
load_path = "../../data/DeliciousMIL/"
data = pd.read_csv(load_path + filename)
data = data.set_index(['Unnamed: 0'])
return data
def bag_of_words(df):
d = {}
for row, items in df.iterrows():
my_dict = {}
for item in items:
if np.isnan(item):
continue
if int(item) in my_dict:
my_dict[int(item)] += 1
else:
my_dict[item] = 1
d[row] = my_dict
df = pd.DataFrame.from_dict(d)
df = df.transpose()
df = pd.DataFrame(df).fillna(0)
return df
def get_n_most_significant(df, n):
temp_dict = {}
for index, column in df.items():
temp_dict[index] = int(sum(column))
df = pd.DataFrame(temp_dict, index=[0])
df = df.transpose()
df = df.reset_index(drop=False)
df = df.rename({"index": "label", 0: "value"}, axis='columns')
# df['value'].plot.kde()
# plt.show()
df.label = df.label.astype(int)
df = df.sort_values(by="value", ascending=False)
x = []
y = []
label = []
counter = 0
for index, item in df.iterrows():
x.append(counter)
y.append(item['value'])
label.append(item['label'])
counter += 1
# plt.scatter(x=x, y=y, s=1)
# plt.show()
df = df.head(n)
return df['label']
if __name__ == "__main__":
# Set the paths
save_path = "../../results/part23/"
print "Loading the data..."
# Load the data
X_test = load_data('part1_test.csv') # 3980
y_test = load_data('test_labels.csv')
print "Data loaded."
print "creating labeled arrays"
X = bag_of_words(X_test)
keys = get_n_most_significant(X, 3500)
X = X[keys]
y = y_test['grammar']
# y = y_test['language']
X = np.array(X)
y = np.array(y)
fig, axes = plt.subplots(ncols=5, nrows=2, sharey='all', sharex='row', figsize=(14, 8))
fig.suptitle("Learning Curves")
fig.text(0.5, 0.04, 'Iteration', ha='center')
fig.text(0.04, 0.5, 'Accuracy %', va='center', rotation='vertical')
fig.tight_layout()
fig.subplots_adjust(top=0.95, left=0.11, bottom=0.08)
for k in range(0, 10):
if k < 5:
j = k
m = 0
else:
j = k-5
m = 1
# split test set in two sets equally distributed
X_test, X_pool, y_test, y_pool = train_test_split(X, y, test_size=0.50, random_state=7)
X_train = []
y_train = []
for i in range(len(y_pool)):
if y_pool[i] == 0:
if len(y_train) < 4:
X_train.append(X_pool[i])
y_train.append(y_pool[i])
np.delete(X_pool, i)
np.delete(y_pool, i)
else:
continue
if y_pool[i] == 1:
if len(y_train) < 8:
X_train.append(X_pool[i])
y_train.append(y_pool[i])
np.delete(X_pool, i)
np.delete(y_pool, i)
else:
break
# Uncertainty Sampling Initialization
X_train_US = np.array(X_train)
y_train_US = np.array(y_train)
X_pool_US = np.array(X_pool)
y_pool_US = np.array(y_pool)
# Random Sampling Initialization
X_train_RS = np.array(X_train)
y_train_RS = np.array(y_train)
X_pool_RS = np.array(X_pool)
y_pool_RS = np.array(y_pool)
# Initialize storage arrays
us_acc = []
rs_acc = []
for iteration in range(10):
clf_US = SVC(kernel='linear', C=1, probability=True)
# Train classifier with Uncertainty Sampling Train set
clf_US.fit(X_train_US, y_train_US)
prob_US = clf_US.predict_proba(X_pool_US)
y_pred_US = clf_US.predict(X_test)
temp = [np.abs(item[1] - 0.5) for item in prob_US] # we focus on minority class item[1]
u_sample = np.argmin(temp)
# Train classifier with Random Sampling Train set
clf_RS = SVC(kernel='linear', C=1, probability=True)
clf_RS.fit(X_train_RS, y_train_RS)
prob_RS = clf_RS.predict_proba(X_pool_RS)
y_pred_RS = clf_RS.predict(X_test)
r_sample = random.randint(0, len(y_pool_RS))
# Store scores in arrays
us_acc.append(accuracy_score(y_pred=y_pred_US, y_true=y_test))
rs_acc.append(accuracy_score(y_pred=y_pred_RS, y_true=y_test))
print "Iteration {}".format(iteration + 1)
print "Uncertainty Sampling AUC: ", roc_auc_score(y_score=y_pred_US, y_true=y_test)
print "Uncertainty Sampling Accuracy: ", accuracy_score(y_pred=y_pred_US, y_true=y_test)
print "Uncertainty Sampling Pool size", X_pool_US.shape
print "Uncertainty Sampling Train size", X_train_US.shape
print "The Uncertainty sample number is {} and its value is {}".format(u_sample, temp[u_sample])
print "---------------------------------------------"
print "Random Sampling AUC: ", roc_auc_score(y_score=y_pred_RS, y_true=y_test)
print "Random Sampling Accuracy: ", accuracy_score(y_pred=y_pred_RS, y_true=y_test)
print "Random Sampling Pool size", X_pool_RS.shape
print "Random Sampling Train size", X_train_RS.shape
print "The Random sample number is {} and its value is {}".format(r_sample, temp[r_sample])
# Update train and pool set for Uncertainty Sampling
X_train_US = np.vstack((X_train_US, X_pool_US[u_sample, :]))
y_train_US = np.hstack((y_train_US, y_pool_US[u_sample]))
X_pool_US = np.delete(X_pool_US, u_sample, 0)
y_pool_US = np.delete(y_pool_US, u_sample, 0)
# Update train and pool set for Random Sampling
X_train_RS = np.vstack((X_train_RS, X_pool_RS[r_sample, :]))
y_train_RS = np.hstack((y_train_RS, y_pool_RS[r_sample]))
X_pool_RS = np.delete(X_pool_RS, r_sample, 0)
y_pool_RS = np.delete(y_pool_RS, r_sample, 0)
axes[m, j].plot(us_acc)
axes[m, j].plot(rs_acc)
# axes[i, 0].set_ylabel("General Iteration {}".format(k))
axes[m, j].set_title("Test {}".format(k+1))
plt.setp([a.get_xticklabels() for a in axes[0, :]], visible=False)
plt.setp([a.get_yticklabels() for a in axes[:, 1]], visible=False)
fig.legend(("Uncertainty Sampling", "Random Sampling"))
fig.savefig(save_path + 'ALearningCurves.png')
# plt.figure()
# plt.plot(us_acc, label="Uncertainty Sampling")
# plt.plot(rs_acc, label="Random Sampling")
# plt.legend()
# plt.title("Learning Curves")
# plt.ylabel("Accuracy %")
# plt.ylim((0, 1))
# plt.xlabel("Iteration")
# plt.savefig(save_path + "ALearningCurves_{}.png".format(k))
# plt.show()
| [
"matplotlib"
] |
b4a609846c9b423aca72e19e337b95e02f5371f5 | Python | strongnine/Series2Image | /src/generate_by_files.py | UTF-8 | 2,016 | 3.109375 | 3 | [] | no_license | #!/usr/bin/python
#copyright(c) strongnine
# 批量生成 GAF 图片的代码 (Generate GAF images in batches)
# 将文件夹中的每个文件都生成一个 GAF 图 (...)
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import image
from pyts.image import GramianAngularField
# 根据自己的需求修改一下变量的参数
dirname = "./data/dataset" # 要处理的文件夹路径
savepath = "./data" # GAF 图片保存的路径
img_sz = 28 # 确定生成的 GAF 图片的大小
method = 'summation' # GAF 图片的类型,可选 'summation'(默认)和 'difference'
# 以下是 GAF 生成的代码
print("GAF 生成方法:%s,图片大小:%d * %d" % (method, img_sz, img_sz))
img_path = "%s/images" % savepath # 可视化图片保存的文件夹
data_path = "%s/data_mat" % savepath # 数据文件保存的文件夹
if not os.path.exists(img_path):
os.makedirs(img_path) # 如果文件夹不存在就创建一个
if not os.path.exists(data_path):
os.makedirs(data_path) # 如果文件夹不存在就创建一个
print("开始生成...")
print("可视化图片保存在文件夹 %s 中,数据文件保存在文件夹 %s 中。" % (img_path, data_path))
gaf = GramianAngularField(image_size=img_sz, method=method)
img_num = 0 # 计算生成的图片个数
for fname in os.listdir(dirname):
filename, ext = os.path.splitext(fname)
if ext != '.csv': continue # 如果不是 csv 文件则跳过
img_num += 1
src_data = np.loadtxt("{}/{}".format(dirname, fname), delimiter=",") # 加载数据 (load the source data)
x = src_data.reshape(1, -1)
img_gaf = gaf.fit_transform(x)
img_save_path = "%s/%s.png" % (img_path, filename)
image.imsave(img_save_path, img_gaf[0]) # 保存图片 (save image)
data_save_path = "%s/%s.csv" % (data_path, filename)
np.savetxt(data_save_path, img_gaf[0], delimiter=',') # 保存数据为 csv 文件
print("生成完成,共处理 %d 个图片。" % img_num) | [
"matplotlib"
] |
6e776125f04408c00f6e2fed6f162f706a04a6bf | Python | gaurangrai/Youtube-data-mining | /scripts/Wordcount/word_count_category.py | UTF-8 | 2,015 | 3.390625 | 3 | [] | no_license | #!/usr/bin/env python3
import csv
import math
import matplotlib.pyplot as plt
from wordcloud import STOPWORDS
stopwords = set(STOPWORDS)
csv_filepath = "../../csv_files/"
files = {"Tech": "comments_tech.csv","Comedy": "comments_comedy.csv","News":"comments_news.csv","TV":"comments_TV.csv"}
average_words_category = []
def count_words_with_file(filepath, category):
totalRows = 0
totalWords = 0
comments = []
with open(filepath, "r") as commentFile:
commentReader = csv.reader(commentFile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
for row in commentReader:
comments.append(row[4].split())
totalRows += 1
for comment in comments:
for word in comment:
if word not in stopwords:
totalWords += 1
return (category, int(math.floor(totalWords/totalRows)))
def plot_graph():
xCoordinates = []
yCoordinates = []
xLabels = []
counter = 1
for val in average_words_category:
xCoordinates.append(counter)
yCoordinates.append(val[1])
xLabels.append(val[0])
counter += 1
fig, ax = plt.subplots()
rects = ax.bar(xCoordinates, yCoordinates, 0.75, color=['blue','green','red','yellow'])
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 1.0*height,
'%d' % int(height),
ha='center', va='bottom')
ax.set_title("Words Per Comment")
ax.set_xticks(xCoordinates)
ax.set_xticklabels(xLabels)
ax.set_xlabel('Category')
ax.set_ylabel('Words / Comment')
plt.savefig('wordspercomment.png')
if __name__ == "__main__":
# Calculating average words per comment for various categories
for category,filename in files.items():
average_words_category.append(count_words_with_file(csv_filepath+filename, category))
# ploting a graph for the statistics collected
plot_graph()
| [
"matplotlib"
] |
5361ada6ba3864bab2697a0dd9699cfea1e4cf22 | Python | Eddie-MG/covid-transmission | /simulate.py | UTF-8 | 9,481 | 2.65625 | 3 | [] | no_license | import numpy as np
import time
import random
import pandas as pd
import congif as cgf
from people import Dude
from networks import Group
from tools import build_df
import plotly.express as px
class Simulate:
def __init__(self, comm_size):
self.comm_size = comm_size
self.houses = []
self.outer_circles = []
self.open_networks = []
self.peeps = []
self.networks = []
self.cumulative_cases = {'day': [0, 0, 0], 'cases': [0, 0, 0], 'type': ['total', 'infected-a', 'infected-s']}
self.total_cases = 0
self.total_asymp = 0
self.total_symp = 0
self.total_deaths = 0
self.total_recoveries = 0
self.built = False
def create_houses(self):
for i in range(0, self.comm_size):
rate = random.random()
size = random.randint(1, 4)
grp = Group(size, rate, i, 'inner')
self.houses.append(grp)
return self.houses
def init_peeps(self):
for i in range(0, len(self.houses)):
job = random.choice(cgf.job_type)
age = random.randint(18, 80)
person = Dude(job, age, i, network=[self.houses[i]])
self.houses[i].members.append(person)
self.peeps.append(person)
return self.peeps
def populate_houses(self):
pep_cnt = len(self.peeps)
for house in self.houses:
job = house.members[0].job
for new_member in range(0, house.size - 1):
age = abs(house.members[0].age + random.randint(-5, 5))
person = Dude(job, age, pep_cnt, network=[house])
house.members.append(person)
self.peeps.append(person)
pep_cnt += 1
return
def genesis(self):
self.houses = self.create_houses()
self.peeps = self.init_peeps()
self.populate_houses()
return self.houses, self.peeps
def outer_circle_gen(self):
# Number of outer circles, dependent on inner circle count
ocircle_count = int(len(self.peeps) * 0.0675)
sze = 0
for i in range(0, ocircle_count):
# Randomly create a large or small group
if round(random.random()) == 1:
# Lower rates for larger outer circles
rate = random.uniform(0.01, 0.4)
size = int(random.gauss(40, 30))
else:
rate = random.uniform(0.2, 0.65)
size = int(random.gauss(15, 12))
grp = Group(size, rate, i, 'outer')
self.outer_circles.append(grp)
sze += size
# print(sze)
self.open_networks = self.outer_circles.copy()
for pepe in self.peeps:
if len(self.open_networks) <= 3:
break
nets = random.sample(self.open_networks, random.randint(1, 3))
for net in nets:
cnt = 0
solo = False
# If network is full, find another one!
while len(net.members) >= net.size:
try:
self.open_networks.remove(net)
except ValueError:
break
cnt += 1
if cnt > len(self.open_networks): # This needs to be verified as could try multiple times
solo = True
break
else:
net = random.choice(self.open_networks)
if solo:
break
else:
net.members.append(pepe)
pepe.network.append(net)
return self.outer_circles
def get_open_networks(self):
circles = [net for net in self.outer_circles if len(net.members) < net.size]
# for net in self.outer_circles:
# if len(net.members) < net.size:
# circles.append(net)
return circles
def begin_sim(self):
# Begin by infecting one person
r_pep = random.choice(self.peeps)
print("Person {} was infected first\n".format(r_pep.name))
r_pep.infect()
def day(self):
# Iterate over all people and decide whether they will transmit to their repsective groups
# And if they have reached their recovery time what whill happen to them.
for person in self.peeps:
if (person.state == cgf.states[1]) or (person.state == cgf.states[2]):
person.elapsed_infected += 1
for net in person.network:
net.plastic()
if person.elapsed_infected >= person.r_time:
person.recover(np.random.choice([3, 4], p=[0.95, 0.05]))
# Using the new infected count of each Group transmit it accordingly
new_cases = 0
for n in self.networks:
new_cases += n.infected_count
if n.infected_count > 0:
n.transmit()
ss = build_df([self.peeps], 'p')['state'].value_counts()
for s in cgf.states:
if s not in ss:
ss[s] = 0
self.total_cases += new_cases
self.total_asymp += ss['infected-a']
self.total_symp += ss['infected-s']
print("----------Virus Report----------\n")
print("Today's new cases: {}".format(new_cases))
prop = ((ss['infected-s'] + ss['infected-a']) / len(self.peeps)) * 100
print("Total Infected: {} ({:.1f}%)".format(ss['infected-s'] + ss['infected-a'], prop))
print("Asymptomatic Cases:", ss['infected-a'])
print("Symptomatic Cases:", ss['infected-s'])
print("Total Recoveries:", ss['recovered'])
print("Total Deaths:", ss['dead'])
print("Stay Home, Save Lives\n")
def remove_network(self, person, networks):
# Remove person from networks and networks from person
person.network = [net for net in person.network if net not in networks]
for net in networks:
net.members = [mem for mem in net.members if mem != person]
def isolate_phase_one(self):
# First set of isolation and social distancing
print("\n-----------SELF ISOLATION PHASE ONE-----------\n")
total_members = 0
for net in self.outer_circles:
total_members += len(net.members)
print("Original Average Network Size:", total_members / len(self.outer_circles))
for person in self.peeps:
if len(person.network) == 0:
print("Do nothing")
break
elif len(person.network) <= 2:
isolated_networks = [person.network.pop(-1)]
else:
isolated_networks = [person.network.pop(-1), person.network.pop(-1)]
self.remove_network(person, isolated_networks)
total_members = 0
for net in self.outer_circles:
total_members += len(net.members)
print("New Average Network Size: {}\n".format(total_members / len(self.outer_circles)))
def setup(self):
# Create Houses
print("Creating dwellings and populating them")
start = time.time()
houses, peeps = self.genesis()
d = time.time() - start
if d > 60:
print("Houses populated. It took {:.2f}mins".format(d/60))
else:
print("Houses populated. It took {:.1f}seconds".format(d))
# Create Outer Circles
start = time.time()
outer_circles = self.outer_circle_gen()
self.networks = houses + outer_circles
d = time.time() - start
if d > 60:
print("Total Population Size: {} Total Outer Networks: {}. Time taken: {:.2f}mins".format(len(self.peeps), len(self.outer_circles), (time.time()-start)/60))
else:
print("Total Population Size: {} Total Outer Networks: {}. Time taken: {:.1f}secs".format(len(self.peeps), len(self.outer_circles), time.time()-start))
self.built = True
def day_iterate(self, duration):
# Simulate daily transmissions for a month
for i in range(0, duration):
print("Day", i)
if i == 0:
self.begin_sim()
else:
if i == int(duration*0.75):
# When we reach the half was mark begin basic self isolation
self.isolate_phase_one()
self.day()
# Add cumulative stats
# Total
self.cumulative_cases['day'].append(i)
self.cumulative_cases['cases'].append(self.total_cases)
self.cumulative_cases['type'].append('total')
# Asymptomatic
self.cumulative_cases['day'].append(i)
self.cumulative_cases['cases'].append(self.total_asymp)
self.cumulative_cases['type'].append('infected-a')
# Symptomatic
self.cumulative_cases['day'].append(i)
self.cumulative_cases['cases'].append(self.total_symp)
self.cumulative_cases['type'].append('infected-s')
def run_simulation(self, duration):
if not self.built:
self.setup()
print("Running simulation!!\n")
self.day_iterate(duration)
def report(self):
fig = px.line(pd.DataFrame(self.cumulative_cases), x='day', y='cases', color='type')
fig.show()
| [
"plotly"
] |
10140ada00958fbd5362271045a952306aa462b5 | Python | zhangjiahui56/leaf_square | /count_area.py | UTF-8 | 6,771 | 3.046875 | 3 | [] | no_license | import os
from skimage import io
from skimage.color import rgb2hsv
from skimage.filters import threshold_otsu
from skimage.filters.rank import median
from skimage.morphology import disk
import matplotlib.pyplot as plt
import cv2
import csv
import pandas as pd
import itertools
import shutil
import scipy.misc
def is_blurred(img):
'''
Detects if the image is blurred
:param img: np.ndarray
:return: boolean, True if blurred
'''
variance = cv2.Laplacian(img, cv2.CV_64F).var()
return variance < 40
def remove_blurred(areas, images):
'''
Replaces area value of blurred images with average of its neighbors
:param areas: array of (7) arrays for every plant. Each contains areas measurements
:param images: generator object
:return: areas
'''
areas_to_remove = []
desk_number = 0
for desk in images:
if is_blurred(desk['image']):
areas_to_remove.append(desk_number)
desk_number += 1
for plant in range(len(areas)):
for area in list(reversed(sorted(areas_to_remove))):
if area == 0 or area == (len(areas[plant]) - 1):
del areas[plant][area]
else:
areas[plant][area]['area'] = (areas[plant][area - 1]['area'] + areas[plant][area + 1]['area']) / 2
return areas
def split_into_9_parts(image):
'''
Splits image into 9 parts
|0|1|2|
|3|4|5|
|6|7|8|
:param image: numpy.ndarray
:return: list of 9 numpy.ndarrays
'''
vert = image.shape[0] // 3
hor = image.shape[1] // 3
ls = []
for col in range(3):
for row in range(3):
ls.append(image[vert * col: vert * (col + 1),
hor * row: hor * (row + 1)])
return ls
def count_green_pixels(img):
'''
Counts pixels in the image with high saturation using Otsu threshold
Asserts that image background if white!
:param img: np.ndarray
:return: int
'''
hsv_image = rgb2hsv(img)
saturation = hsv_image[:, :, 1]
threshold = threshold_otsu(saturation)
saturation = saturation > threshold * 0.8
saturation = median(saturation, disk(2)) # filtering
pixels_count = saturation.mean() * saturation.shape[0] * saturation.shape[1] / saturation.max()
return pixels_count
def count_pixel_square(img):
'''
:param img: ndarray, - image with 2 reference squares
:return: one pixel square (cm^2)
'''
one_square = count_green_pixels(img) // 2
return 1 / one_square
def write_areas_to_file(array, plant_number):
'''
Writes plant area series to [plant_number].csv file
:param array: array of areas
:param plant_number:
:return: filename
'''
file = str(plant_number) + '_plant_areas.csv'
with open(file, 'w', newline='') as csvfile:
time = ['time']
area = ['area']
for measurement in range(len(array)):
time.append(array[measurement]['time'])
area.append(array[measurement]['area'])
rows = zip(time, area)
writer = csv.writer(csvfile)
for row in rows:
writer.writerow(row)
return file
def read_areas_from_file(plant_number, type='pandas_series'):
file = str(plant_number) + '_plant_areas.csv'
if type == 'array':
areas_array = []
with open(file, newline='') as csvfile:
reader = csv.reader(csvfile)
for row in reader:
areas_array.append(row[1])
areas_array.pop(0)
return [float(s) for s in areas_array]
# return pd.read_csv(file, header=0)
return pd.Series.from_csv(file, header=0)
def calculate_squares(images, write_to_file=True):
'''
Adds calculated squares of plants' leafs from the desk to the storage
|0|1|2|
|3| | |
|4|5|6| i.e. excluding reference square and empty part
:param images: generator object, series of images of a desk with plants
:param write_to_file: boolean, if True data would be written to file
:return: array of areas
'''
init_images, images = itertools.tee(images)
plants_growth = [[] for i in range(7)]
for desk in images:
parts = split_into_9_parts(desk['image'])
pixels_count = []
pixel_square = count_pixel_square(parts[4])
for n in range(9):
if (n == 4) or (n == 5): # skipping reference square and empty part
continue
pixels_count.append(count_green_pixels(parts[n]))
# # saving images and masks to file
# m = n
# if n >= 4:
# m -= 1
#
# if n >= 5:
# m -= 1
#
# mask = make_mask(parts[n])
#
# scipy.misc.imsave(os.path.join('camera_emulator', str(n + 1) + '_cell', desk['time'] + 'im.jpg'), parts[n])
# scipy.misc.imsave(os.path.join('camera_emulator', str(n + 1) + '_cell', desk['time'] + 'mask.jpg'), mask)
squares = [c * pixel_square for c in pixels_count]
for plant in range(len(squares)):
plants_growth[plant].append({
'time': desk['time'],
'area': squares[plant]
})
plants_growth = remove_blurred(plants_growth, init_images)
if write_to_file:
for plant in range(len(plants_growth)):
write_areas_to_file(plants_growth[plant], plant)
return plants_growth
def calculate_single_plant(images, plant_number, write_to_file=True):
'''
Calculates square of plants' leafs from one of the desks' parts[0...3, 6..8]
:param images: generator object, series of one desk images
:param plant_number:
:param write_to_file: boolean, if True data would be written to file
:return: series of plants' area
'''
area_series = []
for desk in images:
parts = split_into_9_parts(desk['image'])
pixel_square = count_pixel_square(parts[4])
# area_series.append(count_green_pixels(parts[plant_number]) * pixel_square)
area_series.append({
'time': desk['time'],
'area': (count_green_pixels(parts[plant_number]) * pixel_square)
})
if write_to_file:
write_areas_to_file(area_series, plant_number)
return area_series
def make_mask(img):
'''
Makes a mask that contains green pixels
:param img: np.ndarray
:return: np.ndarray
'''
hsv_image = rgb2hsv(img)
saturation = hsv_image[:, :, 1]
threshold = threshold_otsu(saturation)
saturation = saturation > threshold * 0.8
saturation = median(saturation, disk(2)) # filtering
binary = [[[255, 255, 255] if pxl == 0 else [0, 0, 0] for pxl in row] for row in saturation]
return binary
| [
"matplotlib"
] |
0ea2ce6a0f937e1d6617d7e3a078df950da74f15 | Python | git-sthg/mktreview | /show_industry.py | UTF-8 | 3,274 | 2.875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Nov 10 09:10:20 2020
@author: FTAsset
"""
import os
import datetime
import akshare as ak
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
today = datetime.datetime.today() - datetime.timedelta(days=0)
yearsb4 = today - datetime.timedelta(days=40*30+1) # 依据华泰金工研报,A股周期在40个月左右
start_str = yearsb4.strftime('%Y-%m-%d')
end_str = today.strftime('%Y-%m-%d')
def get_index_daily(index_code, start_date, end_date):
""" index_code: str, 6位申万一级行业代码 如:801010
start_date: str, 开始日期 yyyy-mm-dd
end_date: str, 结束日期 yyyy-mm-dd
----------------------------------------------
return: pd.DataFrame, 指数名称、涨跌幅(%)、流通市值、换手率分位数(%)、市盈率分位数(%)
"""
print('\r正在获取指数代码:%s' % index_code, end="")
sw_index_df = ak.sw_index_daily_indicator(index_code=index_code, start_date=start_date, end_date=end_date, data_type="Day")
pct = pd.concat([
sw_index_df.loc[sw_index_df['date']==end_str, ['index_name']],
sw_index_df.loc[sw_index_df['date']==end_str, ['chg_pct']].astype(float),
sw_index_df.loc[sw_index_df['date']==end_str, 'float_mv'].apply(lambda x: float(x.replace(',',''))), # 流通市值
sw_index_df[['turn_rate', 'pe']].rank(method='min', pct=True).loc[sw_index_df['date']==end_str]*100 # 换手率和市盈率分位数
], axis='columns')
return(pct)
sw_index_spot_df = ak.sw_index_spot()
index_pct = pd.concat(
[get_index_daily(code, start_str, end_str) for row, code in enumerate(sw_index_spot_df['指数代码'])],
ignore_index=True)
print()
if index_pct.shape[0] > 0:
# 作图
x = index_pct['chg_pct']
y = index_pct['pe']
c = index_pct['turn_rate']
s = (index_pct['float_mv']/min(index_pct['float_mv']))*10
a = index_pct['index_name']
fig, ax = plt.subplots(figsize=(10, 6))
scatter = ax.scatter(x, y, c=c, s=s, alpha=0.5, cmap='cool')
# 添加数据点标签
for i in range(index_pct.shape[0]):
plt.annotate(a.iloc[i], xy = (x[i], y[i]), xytext = (x[i]+0.1, y[i]+0.1))
# 添加颜色图例
legend1 = ax.legend(*scatter.legend_elements(num=5),
loc="upper right", bbox_to_anchor=(1.2, 1), title='换手率3年分位数(%)')
ax.add_artist(legend1)
# 添加大小图例
kw = dict(prop="sizes", num=6, color=scatter.cmap(0.5),
func=lambda s: (s/10)*min(index_pct['float_mv']))
legend2 = ax.legend(*scatter.legend_elements(**kw),
loc="lower right", bbox_to_anchor=(1.2, 0), title="行业流通市值(亿元)")
ax.set_xlabel('行业涨跌幅(%)', fontsize=15)
ax.set_ylabel("PE 3年分位数(%)", fontsize=15)
ax.set_title('申万一级行业收益分布%s' % today.strftime('%Y%m%d'))
ax.grid(True)
fig.tight_layout()
save_dir = './report_daily/%s/' % today.strftime('%Y%m%d')
if not os.path.exists(save_dir):
os.makedirs(save_dir)
plt.savefig('%sindustry_%s.jpg' % (save_dir, today.strftime('%Y%m%d')))
# plt.show()
plt.close()
else:
raise ValueError('无当日行情数据')
| [
"matplotlib"
] |
302d8fce7ee7e058f0da8a340249df781dc5891d | Python | jortvangorkum/Network-Science-Hierarchical-Dendrogram | /src/results/dendrogram.py | UTF-8 | 4,243 | 2.890625 | 3 | [
"MIT"
] | permissive | import networkx as nx
from itertools import chain, combinations
import matplotlib.pyplot as plt
from scipy.cluster.hierarchy import dendrogram
def scipy_to_dendrogram(Z):
dendrogram(Z, count_sort=True, leaf_font_size=11)
plt.show()
# Code by: https://stackoverflow.com/questions/59821151/plot-the-dendrogram-of-communities-found-by-networkx-girvan-newman-algorithm
def networkx_to_dendrogram(G, communities):
# building initial dict of node_id to each possible subset:
node_id = 0
init_node2community_dict = {node_id: communities[0][0].union(communities[0][1])}
for comm in communities:
for subset in list(comm):
if subset not in init_node2community_dict.values():
node_id += 1
init_node2community_dict[node_id] = subset
# turning this dictionary to the desired format in @mdml's answer
node_id_to_children = {e: [] for e in init_node2community_dict.keys()}
for node_id1, node_id2 in combinations(init_node2community_dict.keys(), 2):
for node_id_parent, group in init_node2community_dict.items():
if len(init_node2community_dict[node_id1].intersection(init_node2community_dict[node_id2])) == 0 and group == init_node2community_dict[node_id1].union(init_node2community_dict[node_id2]):
node_id_to_children[node_id_parent].append(node_id1)
node_id_to_children[node_id_parent].append(node_id2)
# also recording node_labels dict for the correct label for dendrogram leaves
node_labels = dict()
for node_id, group in init_node2community_dict.items():
if len(group) == 1:
node_labels[node_id] = list(group)[0]
else:
node_labels[node_id] = ''
# also needing a subset to rank dict to later know within all k-length merges which came first
subset_rank_dict = dict()
rank = 0
for e in communities[::-1]:
for p in list(e):
if tuple(p) not in subset_rank_dict:
subset_rank_dict[tuple(sorted(p))] = rank
rank += 1
subset_rank_dict[tuple(sorted(chain.from_iterable(communities[-1])))] = rank
# my function to get a merge height so that it is unique (probably not that efficient)
def get_merge_height(sub):
sub_tuple = tuple(sorted([node_labels[i] for i in sub]))
n = len(sub_tuple)
other_same_len_merges = {k: v for k, v in subset_rank_dict.items() if len(k) == n}
min_rank, max_rank = min(other_same_len_merges.values()), max(other_same_len_merges.values())
range = (max_rank-min_rank) if max_rank > min_rank else 1
return float(len(sub)) + 0.8 * (subset_rank_dict[sub_tuple] - min_rank) / range
# finally using @mdml's magic, slightly modified:
G = nx.DiGraph(node_id_to_children)
nodes = G.nodes()
leaves = set( n for n in nodes if G.out_degree(n) == 0 )
inner_nodes = [ n for n in nodes if G.out_degree(n) > 0 ]
# Compute the size of each subtree
subtree = dict( (n, [n]) for n in leaves )
for u in inner_nodes:
children = set()
node_list = list(node_id_to_children[u])
while len(node_list) > 0:
v = node_list.pop(0)
children.add( v )
node_list += node_id_to_children[v]
subtree[u] = sorted(children & leaves)
inner_nodes.sort(key=lambda n: len(subtree[n])) # <-- order inner nodes ascending by subtree size, root is last
# Construct the linkage matrix
leaves = sorted(leaves)
index = dict( (tuple([n]), i) for i, n in enumerate(leaves) )
Z = []
k = len(leaves)
for i, n in enumerate(inner_nodes):
children = node_id_to_children[n]
x = children[0]
for y in children[1:]:
z = tuple(sorted(subtree[x] + subtree[y]))
i, j = index[tuple(sorted(subtree[x]))], index[tuple(sorted(subtree[y]))]
Z.append([i, j, get_merge_height(subtree[n]), len(z)]) # <-- float is required by the dendrogram function
index[z] = k
subtree[z] = list(z)
x = z
k += 1
dendrogram(Z, count_sort=True, leaf_font_size=11, labels=[node_labels[node_id] for node_id in leaves])
plt.show()
| [
"matplotlib"
] |
c080322a574f0c787c1b6911c8ed949294059d7a | Python | sung3r/Machine-Learining-Security | /web安全之机器学习入门/8-2-LogisticRegression-demo.py | UTF-8 | 1,070 | 3.078125 | 3 | [] | no_license | from sklearn.datasets import load_iris
from sklearn.linear_model import LogisticRegression
import numpy as np
import matplotlib.pyplot as plt
def main():
# 加载数据集,只加载两个特征
iris = load_iris()
x = iris.data[:, :2]
y = iris.target
h = .02
# 逻辑回归训练并预测
lr = LogisticRegression(C=1e5)
lr.fit(x, y)
# 返回坐标矩阵
x_min, x_max = x[:, 0].min() - .5, x[:, 0].max() + .5
y_min, y_max = x[:, 1].min() - .5, x[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = lr.predict(np.c_[xx.ravel(), yy.ravel()])
# 结果可视化
Z = Z.reshape(xx.shape)
plt.figure(1, figsize=(4, 3))
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
plt.scatter(x[:, 0], x[:, 1], c=y, edgecolors='k', cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.show()
if __name__ == "__main__":
main()
| [
"matplotlib"
] |
b98ba331bb807a987c8e268b856ac2d001e18a5e | Python | kiteloopdesign/kiteloop | /kiteloop/parse_html.py | UTF-8 | 2,362 | 2.609375 | 3 | [] | no_license | #!/usr/bin/env python
# import numpy as np
import sys
sys.path.append("/home/pmora/bin/python_libs/beautifulsoup4-4.3.2/build/lib")
import bs4
from bs4 import BeautifulSoup
import subprocess
import matplotlib.pyplot as plt
from time import strftime
# TODO: specify name in wget command rather than here, as the default name might change
filename = 'VAL.html'
#filename = 'TOR.html'
# change made with pycharm
dates = []
number = []
alert = []
wind = []
dir = []
hours = []
# date and time
today_str = strftime("%d_%m_%Y@%Hh_%Mm")
# Mapa viento. HIRLAM-AEMET 0.05
# url = 'http://www.aemet.es/es/eltiempo/prediccion/modelosnumericos/hirlam005?opc2=val&opc3=vi'
# url = 'http://static.puertos.es/pred_simplificada/Predolas/Tablas/Med/TOR.html'
# url = 'http://static.puertos.es/pred_simplificada/Predolas/Tablas/Med/VAL.html'
url = 'http://static.puertos.es/pred_simplificada/Predolas/Tablas/?p=622028053&name=Valencia'
cmd = ['wget',url]
process = subprocess.Popen(cmd,True,stdout=subprocess.PIPE)
process.wait()
status = process.returncode
if status != 0:
print('*E, ERROR: Data not downloaded. Wrong URL?')
sys.exit()
f = open(filename,'r')
parsed_html = BeautifulSoup(f)
table_body = parsed_html.findAll('table')[2] #TODO: There are two tables on the page ... This might change in the future and script will break
rows = table_body.find_all('tr')
location = rows[0].find('th').text.strip().lower()
# Scrape the brains out of this MOFO! :D
for row in rows:
cols = row.find_all('td')
if cols: # avoid empty rows
dates.append ( cols[0].text.strip() )
number.append( cols[1].text.strip() )
alert.append ( cols[2].text.strip() )
wind.append ( cols[3].text.strip() )
dir.append ( cols[4].text.strip() )
#yyyymmddhh
days = []
days.append(dates[0][6:8])
#hour = time[0][8:10]
# how many days are there so we can plot per day
for index,date in enumerate(dates):
if days[-1] not in date[6:8]:
days.append(date[6:8])
wind = [int(round(float(i)*1.94384,0)) for i in wind ]
line, = plt.plot(number, wind, label='wind',color='green',linewidth=2)
plt.grid()
plt.hold(True)
plt.legend(loc='best')
plt.title('{0}, {1}'.format(location, today_str))
plt.ylabel('Knots')
plt.xlabel('Time(hours)')
plt.savefig('{0}_{1}.png'.format(location,today_str),bbox_inches='tight') # TODO: save inside db ?
plt.show()
| [
"matplotlib"
] |
d4bf3718844296a23b48a3bc37e99fdf9f944511 | Python | Tommy-somen/coursera_tesorflow | /2_Convolutional Neural Networks in TensorFlow/004_Exercise_4_Multi_class_classifier_Question-FINAL.py | UTF-8 | 5,352 | 3.328125 | 3 | [] | no_license | ########################マルチクラス分類_CNN#########################
# ATTENTION: Please do not alter any of the provided code in the exercise. Only add your own code where indicated
# ATTENTION: Please do not add or remove any cells in the exercise. The grader will check specific cells based on the cell position.
# ATTENTION: Please use the provided epoch values when training.
#モジュールのインポート
import csv
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from os import getcwd
#csvファイルからのデータセット取得関数
def get_data(filename):
with open(filename) as training_file:
reader = csv.reader(training_file,delimiter=',')
imgs = []
labels = []
#1行目はカラムのタイトルなので、1行スキップさせる
next(reader,None)
#行ごとにラベルと、画像データを取得していく。
for row in reader:
label = row[0]
data = row[1:]
#画像データはそのままではCNNに突っ込めないので、np.array()でnumpyに変換→好きな画像サイズにreshape
img = np.array(data).reshape((28,28))
imgs.append(img)
labels.append(label)
#ラベルと画像データをnumpy変換して、float型にする
images = np.array(imgs).astype(float)
labels = np.array(labels).astype(float)
return images, labels
print(training_images.shape)
print(training_labels.shape)
print(testing_images.shape)
print(testing_labels.shape)
# Their output should be:
# (27455, 28, 28)
# (27455,)
# (7172, 28, 28)
# (7172,)
# In this section you will have to add another dimension to the data
# So, for example, if your array is (10000, 28, 28)
# You will need to make it (10000, 28, 28, 1)
# Hint: np.expand_dims
#画像データに1次元追加するnp.expand_dimsを使用
training_images = np.expand_dims(training_images,axis=3)
testing_images = np.expand_dims(testing_images,axis=3)
#ImageDataGeneratorで標準化、オーグメントを実施
# Create an ImageDataGenerator and do Image Augmentation
train_datagen = ImageDataGenerator(rescale=1.0/255.0,
rotation_range=40,
width_shift_range = 0.2,
height_shift_range = 0.2,
shear_range = 0.2,
zoom_range = 0.2,
horizontal_flip = True,
fill_mode = "nearest"
)
#テストデータも同様に。
validation_datagen = ImageDataGenerator(rescale=1.0/255.0)
# Keep These
print(training_images.shape)
print(testing_images.shape)
# Their output should be:
# (27455, 28, 28, 1)
# (7172, 28, 28, 1)
#CNNモデルの構築
# Define the model
# Use no more than 2 Conv2D and 2 MaxPooling2D
model = tf.keras.models.Sequential([tf.keras.layers.Conv2D(32,(3,3),activation="relu",input_shape=(28,28,1)),#reshapeしたサイズに合わせること
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(32,(3,3),activation="relu"),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(1024,activation="relu"),
tf.keras.layers.Dense(26,activation="softmax")]) #複数分類はsoftmaxで
#モデルのコンパイル
# Compile Model.
model.compile(optimizer="adam",
loss="sparse_categorical_crossentropy", #複数分類なのでカテゴリカル系を使用
metrics = ["accuracy"])
train_generator = train_datagen.flow(training_images,
training_labels,
batch_size = 64)
validation_generator = validation_datagen.flow(testing_images,
testing_labels,
batch_size = 64)
# Train the Model
#モデルのフィッティング
history = model.fit_generator(train_generator,
epochs = 20,
validation_data = validation_generator)#validation_steps=50)
model.evaluate(testing_images, testing_labels, verbose=0)
#####################モデル評価の可視化#########################
# Plot the chart for accuracy and loss on both training and validation
%matplotlib inline
import matplotlib.pyplot as plt
acc = history.history['accuracy']# Your Code Here
val_acc = history.history['val_accuracy']# Your Code Here
loss = history.history['loss']# Your Code Here
val_loss = history.history['val_loss']# Your Code Here
epochs = range(len(acc))
plt.plot(epochs, acc, 'r', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'r', label='Training Loss')
plt.plot(epochs, val_loss, 'b', label='Validation Loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
| [
"matplotlib"
] |
ccdf5159de9cbe2a3c0523d0bf970f4f1f5f674c | Python | PadmaGnanapriya/A_Basic_Python | /Mathpotlab_DataVisualization/plot05_barGraph.py | UTF-8 | 81 | 2.578125 | 3 | [] | no_license | import matplotlib.pyplot as plt
plt.bar([1.5, 2.5,4.5],[40,30,89])
plt.show()
| [
"matplotlib"
] |
ce699469dd8343d15db0977d0761354cddf497d9 | Python | zoharmot2/GIScience-CV-old-drawings | /process.py | UTF-8 | 17,528 | 2.734375 | 3 | [] | no_license | import numpy as np
import cv2
import matplotlib.pyplot as plt
import csv
import glob
import math
# **********
# Calculate true and pixel distances between features
# **********
def correlate_features(features, depth_val):
result = ['id', 'sym_s', 'x_s', 'y_s', 'pixel_x_s', 'pixel_y_s', 'calc_pixel_x_s', 'calc_pixel_y_s',
'sym_t', 'x_t', 'y_t', 'pixel_x_t', 'pixel_y_t', 'calc_pixel_x_t', 'calc_pixel_y_t',
'dis_m_x', 'dis_m_y', 'dis_m', 'dis_pix_x', 'dis_pix_y', 'dis_pix', 'dis_c_pix_x', 'dis_c_pix_y',
'dis_c_pix', 'bear_pix', 'dis_depth_pix', 'bear_c_pix', 'dis_depth_c_pix']
results = []
results.append(result)
count = 1
i = 0
j = 0
features.remove(features[0]) # remove the headers
features.sort() # sort alphabethically
for f1 in features:
i = j
while i < len(features):
if f1[1] != features[i][1]:
dis_m_x = int(features[i][3]) - int(f1[3])
dis_m_y = int(features[i][4]) - int(f1[4])
dis_m = math.sqrt(math.pow(dis_m_x,2) + math.pow(dis_m_y,2))
if f1[5] != 0 and features[i][5] != 0:
dis_pix_x = int(features[i][5]) - int(f1[5])
dis_pix_y = int(features[i][6]) - int(f1[6])
else:
dis_pix_x = 0
dis_pix_y = 0
dis_pix = math.sqrt(math.pow(dis_pix_x, 2) + math.pow(dis_pix_y, 2))
if features[i][7] != 0 and f1[7] != 0:
dis_c_pix_x = int(features[i][7]) - int(f1[7])
dis_c_pix_y = int(features[i][8]) - int(f1[8])
else:
dis_c_pix_x = 0
dis_c_pix_y = 0
dis_c_pix = math.sqrt(math.pow(dis_c_pix_x,2) + math.pow(dis_c_pix_y,2))
bear_pix = calc_bearing(f1[5], f1[6], features[i][5], features[i][6])
if bear_pix != 0 and bear_pix <= 180:
dis_depth_pix = (abs(bear_pix-90)/90 + depth_val) * dis_pix
elif bear_pix != 0 and bear_pix > 180:
dis_depth_pix = (abs(bear_pix - 270) / 90 + depth_val) * dis_pix
else:
dis_depth_pix = 0
bear_c_pix = calc_bearing(f1[7], f1[8], features[i][7], features[i][8])
if bear_c_pix != 0 and bear_c_pix <= 180:
dis_depth_c_pix = (abs(bear_c_pix - 90) / 90 + depth_val) * dis_c_pix
elif bear_c_pix != 0 and bear_c_pix > 180:
dis_depth_c_pix = (abs(bear_c_pix - 270) / 90 + depth_val) * dis_c_pix
else:
dis_depth_c_pix = 0
result = [str(count), f1[1], f1[3], f1[4], f1[5], f1[6], f1[7], f1[8],features[i][1], features[i][3],
features[i][4], features[i][5], features[i][6], features[i][7], features[i][8],
dis_m_x, dis_m_y, dis_m, dis_pix_x, dis_pix_y ,dis_pix, dis_c_pix_x, dis_c_pix_y, dis_c_pix,
bear_pix, dis_depth_pix, bear_c_pix, dis_depth_c_pix]
results.append(result)
count += 1
i += 1
j += 1
return results
# **********
# Calculation of the bearing from point 1 to point 2
# **********
def calc_bearing (x1, y1, x2, y2):
if x1 == 0 or x2 == 0 or y1 == 0 or y2 == 0:
degrees_final = 0
else:
deltaX = x2 - x1
deltaY = y2 - y1
degrees_temp = math.atan2(deltaX, deltaY) / math.pi * 180
if degrees_temp < 0:
degrees_final = 360 + degrees_temp
else:
degrees_final = degrees_temp
if degrees_final < 180:
degrees_final = 180 - degrees_final
else:
degrees_final = 360 + 180 - degrees_final
return degrees_final
# **********
# Camera calibration process
# **********
def calibrate_camera(size):
CHECKERBOARD = (6, 9)
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, size, 0.001) # was 30
objpoints = [] # Creating vector to store vectors of 3D points for each checkerboard image
imgpoints = [] # Creating vector to store vectors of 2D points for each checkerboard image
# Defining the world coordinates for 3D points
objp = np.zeros((1, CHECKERBOARD[0] * CHECKERBOARD[1], 3), np.float32)
objp[0, :, :2] = np.mgrid[0:CHECKERBOARD[0], 0:CHECKERBOARD[1]].T.reshape(-1, 2)
prev_img_shape = None
images = glob.glob('.\camera_calibration\images\*.jpg') # TODO: change the path according to the path in your environmrnt
for fname in images:
img = cv2.imread(fname)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Find the chess board corners
# If desired number of corners are found in the image then ret = true
ret, corners = cv2.findChessboardCorners(gray, CHECKERBOARD,
cv2.CALIB_CB_ADAPTIVE_THRESH + cv2.CALIB_CB_FAST_CHECK + cv2.CALIB_CB_NORMALIZE_IMAGE)
if ret == True:
objpoints.append(objp)
# refining pixel coordinates for given 2d points.
corners2 = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
imgpoints.append(corners2)
# Draw and display the corners
img = cv2.drawChessboardCorners(img, CHECKERBOARD, corners2, ret)
print(fname)
cv2.waitKey(0)
cv2.destroyAllWindows()
h, w = img.shape[:2]
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
return ret, mtx, dist, rvecs, tvecs
# **********
# Find homographies function
# **********
def find_homographies(recs, camera_locations, im, show, ransacbound, outputfile):
pixels = []
pos3ds = []
symbols = []
for r in recs:
pixels.append(r['pixel'])
pos3ds.append(r['pos3d'])
symbols.append(r['symbol'])
pixels = np.array(pixels)
pos3ds = np.array(pos3ds)
symbols = np.array(symbols)
loc3ds = []
grids = []
for cl in camera_locations:
grids.append(cl['grid_code'])
loc3ds.append(cl['pos3d'])
grids = np.array(grids)
loc3ds = np.array(loc3ds)
num_matches = np.zeros((loc3ds.shape[0],2))
scores = []
for i in range(0, grids.shape[0], 1): # 50
if grids[i] >= grid_code_min:
if show:
print(i,grids[i],loc3ds[i])
num_matches[i, 0], num_matches[i, 1] = find_homography(recs, pixels, pos3ds,
symbols, loc3ds[i], im, show, ransacbound, outputfile)
else:
num_matches[i, :] = 0
score = [i+1, num_matches[i, 0], num_matches[i, 1], grids[i], loc3ds[i][0], loc3ds[i][1], loc3ds[i][2]]
scores.append(score)
if show is False:
outputCsv = output.replace(".png","_location.csv")
csvFile = open(outputCsv, 'w', newline='', encoding='utf-8')
csvWriter = csv.writer(csvFile)
csvWriter.writerow(['location_id', 'min_score', 'max_score', 'grid_code', 'Z', 'X', 'Y'])
for s in scores:
csvWriter.writerow(s)
return num_matches
# **********
# Find homography function
# **********
def find_homography(recs, pixels, pos3ds, symbols, camera_location, im, show, ransacbound, outputfile):
pos2 = np.zeros((pixels.shape[0],2))
good = np.zeros(pixels.shape[0])
for i in range(pixels.shape[0]):
good[i] = pixels[i,0]!=0 or pixels[i,1]!=0
p = pos3ds[i,:] - camera_location
p = np.array([p[2],p[1],p[0]])
p = p/p[2]
pos2[i,:]=p[0:2]
M, mask = cv2.findHomography(pos2[good==1],pixels[good==1], cv2.RANSAC,ransacbound)
M = np.linalg.inv(M)
if show:
print('M',M,np.sum(mask))
if show:
plt.figure(figsize=(40, 20))
plt.imshow(im)
for rec in recs:
symbol = rec['symbol']
pixel = rec['pixel']
if pixel[0]!= 0 or pixel[1]!=0:
plt.text(pixel[0],pixel[1],symbol, color='yellow',fontsize=38, weight ='bold')
#plt.text(pixel[0],pixel[1],symbol, style='italic',fontsize=30, weight ='bold', bbox=dict(boxstyle="round", ec=(1., 0.5, 0.5), fc=(1., 0.8, 0.8),))
err1 = 0
err2 = 0
feature = ['id', 'symbol', 'name', 'x', 'y', 'pixel_x', 'pixel_y', 'calc_pixel_x', 'calc_pixel_y']
features = []
features.append(feature)
for i in range(pos2[good == 1].shape[0]):
p1 = pixels[good == 1][i, :]
pp = np.array([pos2[good == 1][i,0],pos2[good == 1][i, 1], 1.0])
pp2 = np.matmul(np.linalg.inv(M),pp)
pp2 = pp2/pp2[2]
P1 = np.array([p1[0],p1[1],1.0])
PP2 = np.matmul(M,P1)
PP2 = PP2/PP2[2]
P2 = pos2[good==1][i,:]
if show and good[i]:
print(i)
print(mask[i]==1,p1,pp2[0:2],np.linalg.norm(p1-pp2[0:2]))
print(mask[i]==1,P2,PP2[0:2],np.linalg.norm(P2-PP2[0:2]))
if mask[i] == 1:
err1 += np.linalg.norm(p1-pp2[0:2])
err2 += np.linalg.norm(P2-PP2[0:2])
if show:
color = 'green' if mask[i] == 1 else 'red'
plt.plot([p1[0],pp2[0]],[p1[1],pp2[1]],color = color, linewidth=6)
plt.plot(p1[0], p1[1], marker = 'X', color=color, markersize=10)
plt.plot(pp2[0], pp2[1], marker='o', color=color, markersize=10)
sym = ''
name = ''
for r in recs:
px = r['pixel'].tolist()
if px[0] == p1[0] and px[1] == p1[1]:
sym = r['symbol']
name = r['name']
x = r['pos3d'][0]
y = r['pos3d'][1]
break
feature = [i, sym, name, x, y, p1[0], p1[1], pp2[0], pp2[1]]
features.append(feature)
i = -1
for r in recs: # Extracting features that were not noted on the image (pixel_x and pixel_y are 0)
i += 1
p1 = pixels[i,:]
if p1[0] == 0 and p1[1] == 0:
pp = np.array([pos2[i,0],pos2[i,1],1.0])
pp2 = np.matmul(np.linalg.inv(M),pp)
pp2 = pp2/pp2[2]
if show:
plt.text(pp2[0],pp2[1],r['symbol'],color='black',fontsize=38, style='italic',
weight='bold')
plt.plot(pp2[0],pp2[1],marker='s', markersize=10, color='black')
x = r['pos3d'][0]
y = r['pos3d'][1]
feature = [i, recs[i]['symbol'], recs[i]['name'], x, y, 0, 0, pp2[0], pp2[1]]
features.append(feature)
if show:
outputCsv = output.replace(".png", "_accuracies.csv")
csvFile = open(outputCsv, 'w', newline='', encoding='utf-8')
csvWriter = csv.writer(csvFile)
for f in features:
csvWriter.writerow(f)
# send features to the function that correlates between the feature themsrlves
results = correlate_features(features, 1)
# get the results and write to a nother CSV file
outputCsv = output.replace(".png", "_correlations.csv")
csvFile = open(outputCsv, 'w', newline='', encoding='utf-8')
csvWriter = csv.writer(csvFile)
for r in results:
csvWriter.writerow(r)
print('Output file: ',outputfile)
plt.savefig(outputfile, dpi=300)
plt.show()
err2 += np.sum(1-mask)*ransacbound
if show:
print ('err',err1,err1/np.sum(mask),err2,err2/np.sum(mask))
return err1,err2
# **********
# read data from the features file
# **********
def read_points_data(filename,pixel_x,pixel_y,scale):
with open(filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
recs = []
for row in csv_reader:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
line_count += 1
names = row
indx = names.index(pixel_x)
indy = names.index(pixel_y)
else:
line_count += 1
symbol = row[6]
pixel = np.array([int(row[indx]),int(row[indy])])/scale
height = float(row[5]) + float(row[2])
pos3d = np.array([float(row[3]),float(row[4]),height])
name = row[1]
rec = {'symbol' : symbol,
'pixel' : pixel,
'pos3d' : pos3d,
'name' : name}
recs.append(rec)
print(f'Processed {line_count} lines.')
return recs
# **********
# read data from the potential camera locations file
# **********
def read_camera_locations():
with open(camera_locations) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
recs = []
for row in csv_reader:
if line_count == 0:
print(f'Column names are {", ".join(row)}')
line_count += 1
names = row
else:
line_count += 1
grid_code = int(row[2])
height = float(row[5]) + 2.0 # addition of 2 meters as the observer height
pos3d = np.array([height,float(row[3]),float(row[4])])
rec = {'grid_code' : grid_code,
'pos3d' : pos3d}
recs.append(rec)
print(f'Processed {line_count} lines.')
return recs
# **********
# Main function
# **********
def do_it(image_name, features, pixel_x, pixel_y, output, scale):
im = cv2.imread(image_name)
im2 = np.copy(im)
im[:,:,0] = im2[:,:,2]
im[:,:,1] = im2[:,:,1]
im[:,:,2] = im2[:,:,0]
plt.figure(figsize=(11.69, 8.27)) # 40,20
plt.imshow(im)
recs = read_points_data(features,pixel_x,pixel_y,scale)
locations = read_camera_locations()
pixels = []
for rec in recs:
symbol = rec['symbol']
pixel = rec['pixel']
if pixel[0] != 0 or pixel[1] != 0:
plt.text(pixel[0],pixel[1],symbol,color='red',fontsize=38)
pixels.append(pixel)
num_matches12 = find_homographies(recs, locations, im, False, 120.0, output)
num_matches2 = num_matches12[:, 1]
#print(np.min(num_matches2[num_matches2 > 0]))
#print(np.max(num_matches2[num_matches2 > 0]))
num_matches2[num_matches2 == 0] = 1000000
print(np.min(num_matches2))
theloci = np.argmin(num_matches2) # theloci contains the best location for the camera
print('location id: ' + str(theloci) + ' - ' + str(locations[theloci]))
find_homographies(recs, [locations[theloci]], im, True, 120.0, output) # Orig = 120.0
#img = '0539'
#img = '0518'
img = 'Henn'
#img = 'Broyn'
#img = 'Tirion'
#img = 'Laboard_b'
camera_locations = ''
grid_code_min = 7
if img == '0539':
ret, mtx, dist, rvecs, tvecs = calibrate_camera(23)
img = cv2.imread('DSC_0539.tif')
h, w = img.shape[:2]
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h))
dst = cv2.undistort(img, mtx, dist, None, newcameramtx) # un-distort
cv2.imwrite('tmpDSC_0539.png', dst)
image_name = 'tmpDSC_0539.png'
features = 'features.csv'
camera_locations = 'potential_camera_locations_3D.csv'
pixel_x = 'Pixel_x_DSC_0539'
pixel_y = 'Pixel_y_DSC_0539'
output = 'zOutput_DSC_0539.png'
scale = 1.0
elif img == '0518':
ret, mtx, dist, rvecs, tvecs = calibrate_camera(23)
img = cv2.imread('DSC_0518.tif')
h, w = img.shape[:2]
newcameramtx, roi = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h))
dst = cv2.undistort(img, mtx, dist, None, newcameramtx) # un-distort
cv2.imwrite('tmpDSC_0518.png', dst)
image_name = 'tmpDSC_0518.png'
features = 'features.csv'
camera_locations = 'potential_camera_locations_3D.csv'
pixel_x = 'Pixel_x_DSC_0518'
pixel_y = 'Pixel_y_DSC_0518'
output = 'zOutput_DSC_0518.png'
scale = 1.0
elif img == 'Henn':
image_name = 'NNL_Henniker.jpg'
features = 'features.csv'
camera_locations = 'potential_camera_locations_3D.csv'
pixel_x = 'Pixel_x_Henniker'
pixel_y = 'Pixel_y_Henniker'
output = 'zOutput_Henniker.png'
scale = 1.0
elif img == 'Broyn':
image_name = 'de-broyn-1698.tif'
features = 'features.csv'
camera_locations = 'potential_camera_locations_3D.csv'
pixel_x = 'Pixel_x_Broyin'
pixel_y = 'Pixel_y_Broyin'
output = 'zOutput_Broyin.png'
scale = 1.0
elif img == 'Tirion':
image_name = 'Tirion-1732.tif'
features = 'features.csv'
camera_locations = 'potential_camera_locations_3D.csv'
pixel_x = 'Pixel_x_Tirion'
pixel_y = 'Pixel_y_Tirion'
output = 'zOutput_Tirion.png'
scale = 1.0
elif img == 'Laboard_b':
image_name = 'laboard_before.tif'
features = 'features_tiberias.csv'
camera_locations = 'potential_camera_locations_tiberias_3D.csv'
pixel_x = 'Pixel_x_Laboard_b'
pixel_y = 'Pixel_y_Laboard_b'
output = 'zOutput_Laboard_b.png'
scale = 1.0
else:
print('No file was selected')
do_it(image_name,features,pixel_x,pixel_y,output,scale)
print ('**********************')
# print ('ret: ')
# print (ret)
# print ('mtx: ')
# print (mtx)
# print ('dist: ')
# print (dist)
# print('rvecs: ')
# print(rvecs)
# print ('tvecs: ')
# print(tvecs)
print ('Done!')
| [
"matplotlib"
] |
052d66b16ed175c1dd982e8f0cee348fa9a5e72a | Python | philipdongfei/ML_TW | /DL_Math_Map/ch02-2.py | UTF-8 | 967 | 3.125 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('png', 'pdf')
def f(x):
return x**2 + 1
x = np.linspace(-3, 3, 601)
y = f(x)
x1 = np.linspace(-3, 3, 7)
y1 = f(x1)
plt.figure(figsize=(6,6))
plt.ylim(-2, 10)
plt.plot([-3, 3], [0,0], c='k')
plt.plot([0, 0], [-2,10], c='k')
plt.scatter(x1,y1,c='k',s=50)
plt.grid()
plt.xlabel('x', fontsize=14)
plt.ylabel('y',fontsize=14)
plt.show()
x2 = np.linspace(-3, 3, 31)
y2 = f(x2)
plt.figure(figsize=(6,6))
plt.ylim(-2, 10)
plt.plot([-3, 3], [0,0], c='k')
plt.plot([0, 0], [-2,10], c='k')
plt.scatter(x2,y2,c='k',s=50)
plt.grid()
plt.xlabel('x', fontsize=14)
plt.ylabel('y',fontsize=14)
plt.show()
plt.figure(figsize=(6,6))
plt.plot(x,y,c='k')
plt.ylim(-2,10)
plt.plot([-3, 3], [0,0], c='k')
plt.plot([0, 0], [-2,10], c='k')
plt.scatter([1,2],[2,5],c='k',s=50)
plt.grid()
plt.xlabel('x', fontsize=14)
plt.ylabel('y',fontsize=14)
plt.show()
| [
"matplotlib"
] |
7ceb77e35dd668656135c5b6a7f8ade4220024ac | Python | maxhenderson23/MODpy | /plot_dat.py | UTF-8 | 7,656 | 2.609375 | 3 | [
"MIT"
] | permissive | #This is the file to produce data plot
#You will need the proper effective_luminosity_by_trigger.csv under MODpy folder with entries of .dat files to be processed
#You can put input in plot_input.csv under MODpy folder
import numpy as np
import matplotlib.pyplot as pl
import os
from operator import add
import csv
import math
# default_range = {"hardest_pT": (0, 2000), "mul_pre_SD": (0, 100), "hardest_eta": (-5, 5), "hardest_phi": (0, 2*math.pi), "hardest_area": (0.68, 1.1)}
# default_title = {"hardest_pT": "hardest jet $p_T$", "mul_pre_SD": "mul. of the hardest jet", "hardest_eta": "hardest jet $\eta$", "hardest_phi": "hardest jet $\phi$", "hardest_area": "hardest jet area"}
# default_axis_labels = {"hardest_pT": ("hardest jet $p_T$ $[GeV]$", "diff. cross-section $[\mu b/GeV]$"), "mul_pre_SD": ("multiplicity", "diff. cross-section $[\mu b]$"), "hardest_eta": ("hardest jet $\eta$", "diff. cross-section $[\mu b]$"), "hardest_phi": ("hardest jet $\phi$", "diff. cross-section $[\mu b]$"), "hardest_area": ("hardest jet area", "diff. cross-section $[\mu b]$")}
get_symbol = {"hardest_pT": "$p_T^\mathrm{hardest\; jet}$", "jet_quality": "JQ", "hardest_eta": "$\eta^\mathrm{hardest\; jet}$"}
get_unit = {"hardest_pT": " GeV", "jet_quality": "", "hardest_eta": ""}
#This function load effective lumi info and calculate the weight to convert data count to diff. cross section, stored in scaling_factors
def load_effective_lumi(effective_lumi_file_dir, data_files):
data_file_list = [data_file.replace(".dat", ".mod") for data_file in data_files]
total_effective_lumi = {"HLT_Jet30": 0.0, "HLT_Jet60": 0.0, "HLT_Jet80": 0.0, "HLT_Jet110": 0.0, "HLT_Jet150": 0.0, "HLT_Jet190": 0.0, "HLT_Jet240": 0.0, "HLT_Jet300": 0.0, "HLT_Jet370": 0.0}
effective_lumi_file = csv.reader(open(effective_lumi_file_dir), delimiter=',')
#We look for effective lumi for each key, and add them up into total effective lumi
for row in effective_lumi_file:
if len(row) == 3:
if row[0] in data_file_list:
total_effective_lumi[row[1]] += float(row[2])
print(total_effective_lumi)
#We scale by inverse the total lumi
scaling_factors = {}
for x in total_effective_lumi:
scaling_factors[x] = 1.0/total_effective_lumi[x]
return scaling_factors
#This function produces a list of data values, and another list of scaling weights to scale from event count to diff. cross section
def read_dat_to_list(var_name, effective_lumi_dic_for_DAT_file, DAT_file, constraints):
var_list = []
scale_list = []
column_keys = {}
for row in DAT_file:
if row[0]=="#":
for i, key in enumerate(row[1:]):
column_keys[key] = i
else:
constraints_satisfied = True
for key in constraints:
if not constraints[key][0] <= float(row[column_keys[key]]) <= constraints[key][1]:
constraints_satisfied = False
break
if constraints_satisfied:
var_list.append(float(row[column_keys[var_name]]))
scale_list.append(scaling_factors[row[column_keys["trigger_fired"]]])
return (var_list, scale_list)
##################################################################################################################################################################################
eta_range_check_2point4 = False
hardest_pT_lower_bound = 0.0
#plot_data is a list of data to be plotted, which is a dic {<data directory>, <label>, <format>}
plot_data = []
# constraints is a dictionary of contraints, with ranges as entries
constraints ={}
plot_input = csv.reader(open("./plot_input.csv"), delimiter=',')
for row in plot_input:
try:
if row[0] == "Settings":
var_name = row[1]
plot_name = row[2]
plot_title = row[3]
x_axis = row[4]
y_axis = row[5]
x_range = (float(row[6].split(":")[0][1:]), float(row[6].split(":")[1][:-1]))
no_of_bins = int(row[7])
if row[8] == "True":
y_scale_log = True
else:
y_scale_log = False
for constraint in row[9:]:
constraints[constraint.split(":")[0]] = (float(constraint.split(":")[1]), float(constraint.split(":")[2]))
elif row[0] == "Data":
plot_data.append({"dir":row[1], "label":row[2], "fmt":row[3]})
except:
pass
#######################################################################################################################################################
pl.figure(plot_name)
pl.title(plot_title)
###################################################################################################################
for data in plot_data:
data_files = os.listdir(data["dir"])
data_files = [os.path.split(data_file)[1] for data_file in data_files if ".dat" in os.path.split(data_file)[1]]
scaling_factors = load_effective_lumi("./effective_luminosity_by_trigger.csv", data_files)
hist_data = []
bin_edges = []
sum_squared_weights = []
count = 0
for i in range(no_of_bins):
hist_data.append(0.0)
sum_squared_weights.append(0.0)
for data_file in data_files:
DAT_file = csv.reader(open(data["dir"] + data_file), delimiter=' ', skipinitialspace = 1)
(var_list, scale_list) = read_dat_to_list(var_name, scaling_factors, DAT_file, constraints)
(current_hist_data, bin_edges) = np.histogram(var_list, bins=no_of_bins, range = x_range, weights = [x*no_of_bins/(x_range[1]-x_range[0]) for x in scale_list])
hist_data = list(map(add, current_hist_data, hist_data))
(current_sum_squared_weights, whatever) = np.histogram(var_list, bins=no_of_bins, range = x_range, weights = [(x*no_of_bins/(x_range[1]-x_range[0])) * (x*no_of_bins/(x_range[1]-x_range[0])) for x in scale_list])
sum_squared_weights = list(map(add, sum_squared_weights, current_sum_squared_weights))
count += len(var_list)
pl.errorbar((bin_edges[:-1] + bin_edges[1:])/2, hist_data, yerr=np.sqrt(sum_squared_weights), label=data["label"], fmt = data["fmt"])
print("the total cross section is " + str(sum(hist_data)*(x_range[1]-x_range[0])/no_of_bins))
print("the number of events plotted for files in " + data["dir"] + " is " + str(count))
######################################################################################################################################################
def stringify_number(number):
bound_for_float = 7.0
if -bound_for_float<number<bound_for_float and number!=0.0:
return str(number)
return str(int(number))
pl.xlabel(x_axis)
pl.ylabel(y_axis)
if y_scale_log:
pl.yscale('log')
pl.legend(loc='best')
constraint_text_horizontal_position = 0.94 - len(plot_data)*0.07
for key in constraints:
constraint_text = get_symbol[key]
if math.isinf(constraints[key][1]):
if math.isinf(constraints[key][0]):
continue
else:
constraint_text += "$\geq$" + stringify_number(constraints[key][0]) + get_unit[key]
else:
if not math.isinf(constraints[key][0]):
constraint_text = stringify_number(constraints[key][0]) + get_unit[key] + "$\leq$" + constraint_text
constraint_text += "$\leq$" + stringify_number(constraints[key][1]) + get_unit[key]
pl.text(0.97, constraint_text_horizontal_position, constraint_text, horizontalalignment='right', transform=pl.gca().transAxes)
constraint_text_horizontal_position -= 0.07
pl.grid()
pl.show()
| [
"matplotlib"
] |
be2274d5b9257f532c24a5b684b754f016dc7987 | Python | wuqiling97/ANN-Course-hw1 | /plot.py | UTF-8 | 894 | 3 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
def __plot(x, y, xlabel, ylabel, title):
plt.plot(x, y, '-')
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
plt.show()
def plot_loss(x, title=''):
plt.plot(x, y, '-')
plt.xlabel('iterations over epoch')
plt.ylabel('loss')
plt.title(title)
plt.ylim(ymin=0)
plt.show()
def plot_2loss(xtrain, ytrain, xtest, ytest, title=''):
plt.plot(xtrain, ytrain, '-', label='Training')
plt.plot(xtest, ytest, '.-', label='Testing')
plt.xlabel('iterations over epoch')
plt.ylabel('loss')
plt.legend(loc='upper center', framealpha=0.5, ncol=3)
plt.title(title)
plt.ylim(ymin=0)
plt.show()
def plot_acc(x, y, title=''):
plt.plot(x, y, '.-')
plt.xlabel('iterations over epoch')
plt.ylabel('accuracy')
plt.title(title)
plt.ylim(ymax=1)
plt.show()
| [
"matplotlib"
] |
06a13be8424ccb43462c438771c9866ec413bde5 | Python | DidiMilikina/DataCamp | /Data Scientist with Python - Career Track /15. Interactive Data Visualization with Bokeh/02. Layouts, Interactions, and Annotations/11. Adding a hover tooltip.py | UTF-8 | 1,230 | 4.15625 | 4 | [] | no_license | '''
Adding a hover tooltip
Working with the HoverTool is easy for data stored in a ColumnDataSource.
In this exercise, you will create a HoverTool object and display the country for each circle glyph in the figure that you created in the last exercise. This is done by assigning the tooltips keyword argument to a list-of-tuples specifying the label and the column of values from the ColumnDataSource using the @ operator.
The figure object has been prepared for you as p.
After you have added the hover tooltip to the figure, be sure to interact with it by hovering your mouse over each point to see which country it represents.
Instructions
100 XP
Import the HoverTool class from bokeh.models.
Use the HoverTool() function to create a HoverTool object called hover and set the tooltips argument to be [('Country','@Country')].
Use p.add_tools() with your HoverTool object to add it to the figure.
'''
SOLUTION
# Import HoverTool from bokeh.models
from bokeh.models import HoverTool
# Create a HoverTool object: hover
hover = HoverTool(tooltips=[('Country', '@Country')])
# Add the HoverTool object to figure p
p.add_tools(hover)
# Specify the name of the output_file and show the result
output_file('hover.html')
show(p)
| [
"bokeh"
] |
7fdf83bd679921fcaa371554bd52decd1b6ac541 | Python | SynProboszcza/Wizualizacja-Danych | /Zadania/matplotlib2.py | UTF-8 | 1,542 | 2.984375 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import LinearLocator, FormatStrFormatter
from matplotlib import cm
#Zad1
# fig = plt.figure()
# ax = fig.gca(projection='3d')
#
# z = np.linspace(0, 2*np.pi, 100)
# x = np.sin(z)
# y = 2*np.cos(z)
#
# ax.plot(x, y, z)
# plt.show()
#Zad2
# np.random.seed(18912837)
#
# def randrange(n, vmin, vmax):
# return (vmax - vmin) * np.random.rand(n) + vmin
#
# fig = plt.figure()
# ax = fig.add_subplot(projection='3d')
# n = 100
#
# for c, m, zlow, zhigh in [('tan', '.', 1, 2),('indigo', '1', 3, 4),('mediumturquoise', '*', 5, 6),('aquamarine', '*', 7, 8),('lime', '*', 9, 10)]:
# xs = randrange(n, 23, 32)
# ys = randrange(n, 0, 100)
# zs = randrange(n, zlow, zhigh)
# ax.scatter(xs, ys, zs, c=c, marker=m)
#
# ax.set_xlabel('x')
# ax.set_ylabel('y')
# ax.set_zlabel('z')
#
# plt.show()
#Zad3
# fig = plt.figure()
# ax = fig.gca(projection='3d')
#
# X = np.arange(-5, 5, 0.25)
# Y = np.arange(-5, 5, 0.25)
# X, Y = np.meshgrid(X, Y)
# R = np.sqrt(X**2 + Y**2)
# Z = np.sin(R)
#
# #lista kolorów do wylosowania
# #losuje żeby nie powtarzać kodu 5 razy
# kolorki = [cm.prism, cm.ocean, cm.terrain, cm.brg, cm.jet]
#
# surf = ax.plot_surface(X, Y, Z, cmap=kolorki[np.random.randint(0,len(kolorki)-1)], linewidth=0, antialiased=False)
#
# ax.set_zlim(-1.01, 1.01)
# ax.zaxis.set_major_locator(LinearLocator(10))
# ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
#
# fig.colorbar(surf, shrink=0.5, aspect=5)
# plt.show()
| [
"matplotlib"
] |
9694c2b1b2fbb4657e187b5bd637f04a537b8a15 | Python | xico2001pt/feup-fpro | /ch02.py | UTF-8 | 1,125 | 2.65625 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
def update( frameNum, img, world, N ) :
newWorld = world.copy( )
for i in range (N):
for j in range (N):
total= (world[(i-1)%N][(j-1)%N]+world[(i-1)%N][j]+world[(i-1)%N][(j+1)%N]+
world[i][(j-1)%N]+world[i][(j+1)%N]+ world[(i+1)%N][(j-1)%N]+
world[(i+1)%N][j]+ world[(i+1)%N][(j+1)%N])/255
if world[i][j]== 255:
if total>3 or total<2:
newWorld[i][j]= 0
else:
if total == 3:
newWorld[i][j] = 255
img.set_data( newWorld )
world[:] = newWorld[:]
return img
N = 50
SPEED = 100
PROB_LIFE = 40
world= np.random.choice([0,255], N*N, p=[1-((PROB_LIFE)/100),(PROB_LIFE)/100]).reshape(N,N)
fig, ax = plt.subplots( )
img = ax.imshow( world, interpolation='nearest' )
ani = animation.FuncAnimation( fig, update, fargs = ( img, world, N ),
frames = 10, interval = SPEED,
save_count = 50 )
plt.show( ) | [
"matplotlib"
] |
ec9e37df43433ceb839330c0ba61e9c498d8f69d | Python | sallllly0307/RL_repo | /bandit_problem/bandit.py | UTF-8 | 2,449 | 2.875 | 3 | [] | no_license | import random
import numpy as np
import pandas as pd
import numpy.random as rd
import matplotlib.pyplot as plt
import datetime
from tqdm import tqdm
from utils import EpsilonGreedy, UCB1, ThompsonSampling
from utils import BernoulliArm
from testdata import exponential
def test_algorithm(algo, arms, num_sims, horizon):
chosen_arms = np.zeros(num_sims * horizon)
rewards = np.zeros(num_sims * horizon)
cumulative_rewards = np.zeros(num_sims * horizon)
sim_nums = np.zeros(num_sims * horizon)
times = np.zeros(num_sims * horizon)
for sim in tqdm(range(num_sims)):
sim = sim + 1
algo.initialize(len(arms))
for t in range(horizon):
t = t + 1
index = (sim - 1) * horizon + t - 1
sim_nums[index] = sim
times[index] = t
chosen_arm = algo.select_arm()
chosen_arms[index] = chosen_arm
reward = arms[chosen_arm].draw()
rewards[index] = reward
if t == 1:
cumulative_rewards[index] = reward
else:
cumulative_rewards[index] = cumulative_rewards[index - 1] + reward
algo.update(chosen_arm, reward)
return [sim_nums, times, chosen_arms, rewards, cumulative_rewards]
def run(algo, label):
print(label)
algo.initialize(n_arms)
results = test_algorithm(algo, arms, num_sims=NUM_SIMS, horizon=HORIZON)
df = pd.DataFrame({"times": results[1], "rewards": results[3]})
grouped = df["rewards"].groupby(df["times"])
plt.plot(grouped.mean(), label=label)
plt.legend(loc="best")
if __name__ == '__main__':
NUM_SIMS = 100
# 選択数設定
HORIZON = 1000
# 試行回数設定
# 問題設定: 腕:100本のうち、あたりは1つとする
theta = np.array(exponential)
n_arms = len(theta)
arms = map(lambda x: BernoulliArm(x), theta)
arms = list(arms)
# EpsilonGreedyのepsilonの値設定
algos = {
'random': EpsilonGreedy([], [], epsilon=1),
'Eps0.3': EpsilonGreedy([], [], epsilon=0.3),
'Eps0.6': EpsilonGreedy([], [], epsilon=0.6),
'UCB1': UCB1([], []),
'TS': ThompsonSampling([], [])}
for key, algo in algos.items():
run(algo, label=key)
# グラフを現在時刻をつけて保存
now = datetime.datetime.now()
plt.xlim(0, 1000)
plt.ylim(-1, 1)
plt.savefig('bandit' + str(now) + '.png')
| [
"matplotlib"
] |
a9346e0c4ad9c3aad37ba117c3ac80123dd2229e | Python | Takatsuki0204/python_test | /Graph_plot/pandas_test.py | UTF-8 | 1,300 | 2.875 | 3 | [] | no_license | #! /usr/bin/python
# encoding: utf-8
import datetime, sqlite3
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
data = pd.read_json("http://192.168.10.121:3000/monitoring/122?date=20160523")
#connector = sqlite3.connect("C:\Users\TAKATSUKI\Documents\環境BOX\sqlite_db\environment.db")
#data = pd.read_sql("select * from test", con=connector)
print data.head(10) # 先頭10行を表示
print data.tail(20) # 後尾20行を表示
print data["light"] # 単独カラムのみ表示
print data[["temperature", "humidity"]] # 複数カラムの表示
print data["temperature"].sum() # 温度カラムの合計を表示(mean:平均, median:中央値)
print data.describe() # 平均、分散など
print data["temperature"].mean() # 温度の平均
print data.ix[10:100, ["light", "temperature"]] # ix:列名と列番号が使える。10行目から100行目までの照度、温度を表示
print data.query("light == 193 & humidity == 50") # 条件文(&:and, |:or)
print data.query("index in [10, 20]") # 特定の行の表示
print data[["temperature", "humidity"]].sum(axis=0) # axis=1:横方向に計算
print data.sort("humidity", ascending=False) # 降順でソート
# data.to_csv("log.csv") # csv形式で保存 | [
"matplotlib"
] |
864eb5623d9799817f4ac69d9852f3d052102eee | Python | xavier-rp/Betti_scm | /exact_chi_square.py | UTF-8 | 23,719 | 2.828125 | 3 | [] | no_license | import numpy as np
from scipy.stats import chi2
import scipy as sp
from loglin_model import *
import time
import itertools
import json
import os
import csv
import matplotlib.pyplot as plt
######### tests préliminaires
def mle_multinomial_from_table(cont_table):
n = np.sum(cont_table)
p_list = []
for element in cont_table.flatten():
p_list.append(element/n)
return p_list
def multinomial_cont_table(nb_trials, nb_categories):
probabilities = [1 / float(nb_categories)] * nb_categories
return np.random.multinomial(nb_trials, probabilities, 1).reshape(2, 2)
def multinomial_problist_cont_table(nb_trials, prob_list):
return np.random.multinomial(nb_trials, prob_list, 1).reshape(2, 2)
def multinomial_problist_cont_cube(nb_trials, prob_list):
return np.random.multinomial(nb_trials, prob_list, 1).reshape(2, 2, 2)
def multinomial_cont_cube(nb_trials, nb_categories):
probabilities = [1 / float(nb_categories)] * nb_categories
return np.random.multinomial(nb_trials, probabilities, 1).reshape(2, 2, 2)
def sample_mult_cont_table(nb_samples, nb_trials, nb_categories):
samples = []
for i in range(nb_samples):
samples.append(multinomial_cont_table(nb_trials, nb_categories))
return samples
def sample_mult_cont_table_prob(nb_samples, nb_trials, problist):
samples = []
for i in range(nb_samples):
samples.append(multinomial_problist_cont_table(nb_trials, problist))
return samples
def sample_mult_cont_cube(nb_samples, nb_trials, nb_categories):
samples = []
for i in range(nb_samples):
samples.append(multinomial_cont_cube(nb_trials, nb_categories))
return samples
def chisq_stats(sample_list, observed):
chisqlist = []
for sample in sample_list:
chisqlist.append(chisq_formula(observed, sample))
return chisqlist
def chisq_formula(cont_tab, expected):
#Computes the chisquare statistics and its p-value for a contingency table and the expected values obtained
#via MLE or iterative proportional fitting.
if float(0) in expected:
test_stat = 0
else:
test_stat = np.sum((cont_tab - expected) ** 2 / expected)
return test_stat
def sampled_chisq_test(cont_table, expected_table, sampled_array):
if float(0) in expected_table:
test_stat = 0
pval = 1
else:
test_stat = np.sum((cont_table - expected_table) ** 2 / expected_table)
cdf = np.sum((sampled_array < test_stat) * 1) / len(sampled_array)
pval = 1 - cdf
return test_stat, pval
def chisq_test(cont_tab, expected):
#Computes the chisquare statistics and its p-value for a contingency table and the expected values obtained
#via MLE or iterative proportional fitting.
df = 3
test_stat = np.sum((cont_tab-expected)**2/expected)
p_val = chi2.sf(test_stat, df)
return test_stat, p_val
if __name__ == '__main__':
##################### Find all the pvalues with exact distribution for all
##################### tables in the data :
def to_occurrence_matrix(matrix, savepath=None):
"""
Transform a matrix into a binary matrix where entries are 1 if the original entry was different from 0.
Parameters
----------
matrix (np.array)
savepath (string) : path and filename under which to save the file
Returns
-------
The binary matrix or None if a savepath is specified.
"""
if savepath is None:
return (matrix > 0) * 1
else:
np.save(savepath, (matrix > 0) * 1)
def get_cont_table(u_idx, v_idx, matrix):
#Computes the 2X2 contingency table for the occurrence matrix
row_u_present = matrix[u_idx, :]
row_v_present = matrix[v_idx, :]
row_u_not_present = 1 - row_u_present
row_v_not_present = 1 - row_v_present
# u present, v present
table00 = np.dot(row_u_present, row_v_present)
# u present, v NOT present
table01 = np.dot(row_u_present, row_v_not_present)
# u NOT present, v present
table10 = np.dot(row_u_not_present, row_v_present)
# u NOT present, v NOT present
table11 = np.dot(row_u_not_present, row_v_not_present)
return np.array([[table00, table01], [table10, table11]])
###### TO COUNT THE NUMBER OF DIFFERENT TABLES
matrix1 = np.loadtxt('final_OTU.txt', skiprows=0, usecols=range(1, 39))
matrix1 = to_occurrence_matrix(matrix1, savepath=None)
table_set = set()
########### Count number of different tables :
#for one_simplex in itertools.combinations(range(matrix1.shape[0]), 2):
# computed_cont_table = get_cont_table(one_simplex[0], one_simplex[1], matrix1)
# table_str = str(computed_cont_table[0, 0]) + '_' + str(computed_cont_table[0, 1]) + '_' + str(
# computed_cont_table[1, 0]) + '_' + str(computed_cont_table[1, 1])
# if table_str not in table_set:
# table_set.add(table_str)
#table_set = list(table_set)
#print('How many different tables : ', len(table_set))
#print(table_set)
#json.dump(table_set, open("table_list.json", 'w'))
#exit()
########### Count number of different cubes. NEED TO HAVE A CSV FILE
def get_cont_cube(u_idx, v_idx, w_idx, matrix):
# Computes the 2X2X2 contingency table for the occurrence matrix
row_u_present = matrix[u_idx, :]
row_v_present = matrix[v_idx, :]
row_w_present = matrix[w_idx, :]
row_u_not = 1 - row_u_present
row_v_not = 1 - row_v_present
row_w_not = 1 - row_w_present
#All present :
table000 =np.sum(row_u_present*row_v_present*row_w_present)
# v absent
table010 = np.sum(row_u_present*row_v_not*row_w_present)
# u absent
table100 = np.sum(row_u_not*row_v_present*row_w_present)
# u absent, v absent
table110 = np.sum(row_u_not*row_v_not*row_w_present)
# w absent
table001 = np.sum(row_u_present*row_v_present*row_w_not)
# v absent, w absent
table011 = np.sum(row_u_present*row_v_not*row_w_not)
# u absent, w absent
table101 = np.sum(row_u_not*row_v_present*row_w_not)
# all absent
table111 = np.sum(row_u_not*row_v_not*row_w_not)
return np.array([[[table000, table010], [table100, table110]], [[table001, table011], [table101, table111]]], dtype=np.float64)
#with open('exact_chi1_triangles_001_final_otu.csv', 'r') as csvfile:
# reader = csv.reader(csvfile)
# next(reader)
# for row in reader:
# computed_cont_table = get_cont_cube(int(row[0]), int(row[1]), int(row[2]), matrix1)
# table_str = str(int(computed_cont_table[0, 0, 0])) + '_' + str(int(computed_cont_table[0, 0, 1])) + '_' + str(int(computed_cont_table[0, 1, 0])) + '_' + str(int(computed_cont_table[0, 1, 1])) + '_' + str(int(computed_cont_table[1, 0, 0])) + '_' + str(int(computed_cont_table[1, 0, 1])) + '_' + str(int(computed_cont_table[1, 1, 0])) + '_' + str(int(computed_cont_table[1, 1, 1]))
# if table_str not in table_set:
# table_set.add(table_str)
#table_set = list(table_set)
#print('How many different tables : ', len(table_set))
#print(table_set)
#json.dump(table_set, open("exact_chi1_cube_list.json", 'w'))
#exit()
#with open('table_list.json') as json_file:
# table_set = json.load(json_file)
# #### From the different tables : generate the chisqdist :
# pvaldictio = {}
# #TODO iterate over all the tables
# # Max index used in range() :
# lastrange = 1950
# maxrange = 2000
# for it in range(lastrange, maxrange):
# table_id = table_set[it]
# table = np.random.rand(2,2)
# table_id_list = str.split(table_id, '_')
# table[0, 0] = int(table_id_list[0])
# table[0, 1] = int(table_id_list[1])
# table[1, 0] = int(table_id_list[2])
# table[1, 1] = int(table_id_list[3])
# N = np.sum(table)
# expected = mle_2x2_ind(table)
# problist = mle_multinomial_from_table(expected)
# chisqlist = []
# start = time.clock()
# for it in range(1000000):
# #print(it)
# sample = multinomial_problist_cont_table(N, problist)
# chisqlist.append(chisq_formula(sample, expected))
# pvaldictio[table_id] = sampled_chisq_test(table, expected, chisqlist)
# print('Time for one it : ', time.clock()-start)
# json.dump(pvaldictio, open("exact_chisq\pvaldictio_" + str(lastrange) + "_" + str(maxrange) + ".json", 'w'))
def pvalue_AB_AC_BC(cont_cube):
expected = iterative_proportional_fitting_AB_AC_BC_no_zeros(cont_cube)
if expected is not None:
return chisq_test(cont_cube, expected)[1]
else:
return expected
def chisq_test(cont_tab, expected):
# Computes the chisquare statistics and its p-value for a contingency table and the expected values obtained
# via MLE or iterative proportional fitting.
df = 1
test_stat = np.sum((cont_tab - expected) ** 2 / expected)
p_val = chi2.sf(test_stat, df)
return test_stat, p_val
with open('exact_chi1_cube_list.json') as json_file:
table_set = json.load(json_file)
#### From the different tables : generate the chisqdist :
pvaldictio = {}
# TODO iterate over all the tables
# Max index used in range() :
no_mle_table_count = 0
for it in range(len(table_set)):
table_id = table_set[it]
table = np.random.rand(2, 2, 2)
table_id_list = str.split(table_id, '_')
table[0, 0, 0] = int(table_id_list[0])
table[0, 0, 1] = int(table_id_list[1])
table[0, 1, 0] = int(table_id_list[2])
table[0, 1, 1] = int(table_id_list[3])
table[1, 0, 0] = int(table_id_list[4])
table[1, 0, 1] = int(table_id_list[5])
table[1, 1, 0] = int(table_id_list[6])
table[1, 1, 1] = int(table_id_list[7])
pval = pvalue_AB_AC_BC(table)
if pval is not None:
if pval < 0.001:
print(pvalue_AB_AC_BC(table))
no_mle_table_count +=1
#N = np.sum(table)
#expected_original = iterative_proportional_fitting_AB_AC_BC_no_zeros(table)
N = np.sum(table)
expected_original = iterative_proportional_fitting_AB_AC_BC_no_zeros(table)
if expected_original is not None:
print(it)
problist = mle_multinomial_from_table(expected_original)
chisqlist = []
start = time.clock()
for it in range(1000000):
# print(it)
sample = multinomial_problist_cont_cube(N, problist)
expected = iterative_proportional_fitting_AB_AC_BC_no_zeros(sample)
if expected is not None:
chisqlist.append(chisq_formula(sample, expected))
else:
print('Shyte')
pvaldictio[table_id] = sampled_chisq_test(table, expected_original, chisqlist)
print('Time for one it : ', time.clock() - start)
print(no_mle_table_count)
exit()
json.dump(pvaldictio, open("exact_chisq_1deg_cc\cube_pvaldictio.json", 'w'))
#np.save(os.path.join('exact_chisq', table_id), chisqlist_prob)
exit()
####################################################
############## Build the entire dictionary
#import os
#with open(r'C:\Users\Xavier\Desktop\Notes de cours\Maîtrise\Projet OTU\Betti_scm-master (1)\Betti_scm-master\exact_chisq\pvaldictio_0_200.json') as jsonfile:
# data = json.load(jsonfile)
# print(data)
#complete_pval_dictionary = {}
#directory = 'exact_chisq'
#for filename in os.listdir(directory):
# if filename.endswith(".json"):
# with open(os.path.join(directory, filename)) as json_file:
# print(os.path.join(directory, filename))
# data = json.load(json_file)
# complete_pval_dictionary.update(data)
#json.dump(complete_pval_dictionary, open("exact_chisq\complete_pval_dictionary.json", 'w'))
#exit()
##### Load dictio
#with open(r'exact_chisq/complete_pval_dictionary.json') as jsonfile:
# data = json.load(jsonfile)
# print(len(data))
#exit()
##### Méthode pour extraire la distribution exacte :
#computed_cont_table = np.array([[2, 0], [ 0, 36]])
#N = np.sum(computed_cont_table)
#expected = mle_2x2_ind(computed_cont_table)
#problist = mle_multinomial_from_table(expected)
#chisqlist_prob = []
#for it in range(1000000):
# print(it)
# sample = multinomial_problist_cont_table(N, problist)
# chisqlist_prob.append(chisq_formula(sample, expected))
#np.save(str(computed_cont_table[0,0]) + '_' + str(computed_cont_table[0,1]) + '_' + str(computed_cont_table[1,0]) + '_' + str(computed_cont_table[1,1]), chisqlist_prob)
###### Pour compter le nombre de tables différentes :
#computed_cont_table = np.array([[2, 0], [0, 36]])
#table_set = set()
#table_str = str(computed_cont_table[0, 0]) + '_' + str(computed_cont_table[0, 1]) + '_' + str(
# computed_cont_table[1, 0]) + '_' + str(computed_cont_table[1, 1])
#if table_str not in table_set:
# table_set.add(table_str)
#####
#exit()
import matplotlib.pyplot as plt
#fig, ax = plt.subplots(1, 1)
#chisqlist = []
##for i in range(1000000):
## print(i)
## sample = multinomial_problist_cont_table(38, [1/4, 1/4, 1/4, 1/4])
## expected = mle_2x2_ind(sample)
## chisqlist.append(chisq_formula(sample, expected))
##np.save('chisq38', np.array(chisqlist))
#chisqlist = np.load('chisq38.npy')
#print(chisqlist)
#bins6 = np.arange(0, max(chisqlist), 0.1)
## print(max(chisqlist))
#counts, bins = np.histogram(chisqlist, len(np.arange(0, max(chisqlist), 0.5)))
#print(bins)
#plt.hist(bins[:-1], bins, weights=counts/np.sum(counts))
##plt.show()
##hist = plt.hist(chisqlist, bins=bins6, alpha=0.5, label='10mil')
#x = np.arange(0, 100, 0.01)
## ax.plot(x, chi2.pdf(x, df), 'r-', label='chi2 pdf')
##ax.plot(x, chi2.pdf(x, df), label='Asympt degree 3')
#ax.plot(x, chi2.pdf(x, 1), label='degree 1')
## bins2 = np.arange(0, max(chisqlist) + 5, 1)
## print(max(chisqlist))
## plt.hist(chisqlist, bins=bins2, alpha=0.5)
#plt.legend()
#plt.xlim([0, 20])
#plt.ylim([0, 1])
#plt.show()
#exit()
#plist = []
#for i in range(1000):
# contable = multinomial_cont_table(38, 4)
# plist.append(mle_multinomial_from_table(contable)[0])
#print(np.sum(plist)/1000)
#contable = multinomial_cont_table(38, 4)
#print(contable)
#plist = mle_multinomial_from_table(contable)
#print(plist)
#exit()
#fig, ax = plt.subplots(1, 1)
#x = np.linspace(chi2.ppf(0.01, 1),
# chi2.ppf(0.99, 1), 100)
#ax.plot(x, chi2.pdf(x, 1),
# 'r-', lw=5, alpha=0.6, label='chi2 pdf')
#plt.show()
######### Temps linéaire :
#timelist = []
#itlist = []
#for it in [1, 10, 100, 1000, 10000, 100000, 1000000, 5000000]:
# print(it)
# start = time.time()
# sample_mult_cont_table(int(it), 38, 4)
# timelist.append(time.time()-start)
# itlist.append(int(it))
#plt.plot(itlist, timelist)
#plt.show()
#exit()
######## Différence Chi2 réelle, Chi2 multinomial 1/4, Chi2 multinomiale MLE
#samples = sample_mult_cont_table(1000000, 38, 4)
#sam = np.array([[2, 0], [ 0, 36]])
#expected = mle_2x2_ind(sam)
#problist = mle_multinomial_from_table(expected)
##chisqlist = []
#chisqlist_prob = []
#for it in range(1000000):
# print(it)
# sample = multinomial_problist_cont_table(38, problist)
# chisqlist_prob.append(chisq_formula(sample, expected))
#for sample in samples:
# expected = mle_2x2_ind(sample)
# if expected is not None:
# chisqlist.append(chisq_formula(sample, expected))
#for sample in samplesprob:
# chisqlist_prob.append(chisq_formula(sample, expected))
#np.save('exact_chi3_forpval', chisqlist_prob)
#np.save('exact_chisqlist380000_10millions', chisqlist_prob)
#exit()
df = 3
#TODO : df = 3 semble être plus pr de la distribution asymptotique
#fig, ax = plt.subplots(1, 1)
#chisqlist_1000 = np.load('exact_chi3.npy')
#bins = np.arange(0, max(chisqlist_1000) + 5, 0.1)
## print(max(chisqlist))
#plt.hist(chisqlist_1000, bins=bins, alpha=0.5, density=True, label='un quart')
#plt.show()
##### Différence entre les pvalues
# np.array([[21, 9], [3, 5]]) # prob
# np.array([[512, 234], [103, 151]]) #lorsque 1000
# np.array([[5120, 2340], [1030, 1510]]) # lorsque 10000
# np.array([[1, 5], [5, 27]]) # probanother
# np.array([[ 2, 0], [ 0, 36]]) problowpval avec 0 et 753 pval de e-10, phi = 1
#exact_chisqlist10mil = np.load('exact_chisqlist380000_10millions.npy')
#exact_chisqlist = np.load('exact_chisqlist.npy')
#exact_chisqlist380000 = np.load('exact_chisqlist380000.npy')
#exact_chisqlist38000000 = np.load('exact_chisqlist38000000.npy')
#chisqlist_unquart = np.load('chisqlist_unquart.npy')
#chisqlist_prob = np.load('chisqlist_prob.npy')
#chisqlist_probanother = np.load('chisqlist_probanother.npy')
chisqlist_chi3 = np.load('exact_chi3.npy')
chisqlist_chi3_pre = np.load('exact_chi3_precise.npy')
chisqlist_chi3_forpval = np.load('exact_chi3_forpval.npy')
cont = np.array([[ 2, 0], [ 0, 36]]) # problowval
expec = mle_2x2_ind(cont)
#print(expec)
#pvalue_exact = sampled_chisq_test(cont, expec, exact_chisqlist)
#pvalunquart = sampled_chisq_test(cont, expec, chisqlist_unquart)
#pvalueprob = sampled_chisq_test(cont, expec, chisqlist_prob)
#pval = chisq_test(cont, expec)
print(sampled_chisq_test(cont, expec, chisqlist_chi3), sampled_chisq_test(cont, expec, chisqlist_chi3_forpval))
#print(cont, expec, chisq_test(cont, expec))
#print(pval, pvalunquart, pvalueprob)
#print('EXACT : ', pvalue_exact)
#print('WRONG DIST : ', sampled_chisq_test(cont, expec, chisqlist_problowpval))
print('ASYMPT : ', chisq_test(cont, expec))
#exit()
##### différences dans l'allure des courbes
fig, ax = plt.subplots(1, 1)
#bins = np.arange(0, max(chisqlist_unquart) + 5, 0.1)
## print(max(chisqlist))
#plt.hist(chisqlist_unquart, bins=bins, alpha=0.5, density=True, label='un quart')
bins2 = np.arange(0, max(chisqlist_chi3) + 1, 0.5)
bins3 = np.arange(0, max(chisqlist_chi3_pre), 0.05)
# print(max(chisqlist))
plt.hist(chisqlist_chi3, bins=bins2, weights=np.repeat(1/len(chisqlist_chi3), len(chisqlist_chi3)), alpha=0.5, label='EXACT')
plt.hist(chisqlist_chi3_forpval, bins=bins2, weights=np.repeat(1 / len(chisqlist_chi3_forpval), len(chisqlist_chi3_forpval)), alpha=0.5,
label='EXACT_less')
#plt.hist(chisqlist_chi3_pre, bins=bins3, alpha=0.5, density=True, label='Precise')
#bins3 = np.arange(0, max(chisqlist_prob) + 5, 0.1)
## print(max(chisqlist))
#plt.hist(chisqlist_probanother, bins=bins3, alpha=0.5, density=True, label='probanother')
#bins4 = np.arange(0, max(chisqlist_problowpval) + 5, 0.5)
# print(max(chisqlist))
#plt.hist(chisqlist_problowpval, bins=bins4, alpha=0.5, density=True, label='WRONG')
#bins4 = np.arange(0, max(exact_chisqlist380000) + 5, 0.1)
# print(max(chisqlist))
#plt.hist(exact_chisqlist380000, bins=bins4, alpha=0.5, density=True, label='380000')
#bins5 = np.arange(0, max(exact_chisqlist38000000) + 5, 0.01)
# print(max(chisqlist))
#plt.hist(exact_chisqlist38000000, bins=bins5, alpha=0.5, density=True, label='380000')
#bins6 = np.arange(0, max(exact_chisqlist10mil) + 5, 0.01)
# print(max(chisqlist))
#plt.hist(exact_chisqlist10mil, bins=bins6, alpha=0.5, density=True, label='10mil')
x = np.arange(0, 100, 0.01)
# ax.plot(x, chi2.pdf(x, df), 'r-', label='chi2 pdf')
ax.plot(x, chi2.pdf(x, df), label='Asympt degree 3')
#ax.plot(x, chi2.pdf(x, 1), label='degree 1')
# bins2 = np.arange(0, max(chisqlist) + 5, 1)
# print(max(chisqlist))
# plt.hist(chisqlist, bins=bins2, alpha=0.5)
plt.legend()
plt.xlim([0, 20])
plt.ylim([0, 1])
plt.show()
exit()
#chisqlist = []
#for sample in samples:
# expected = iterative_proportional_fitting_AB_AC_BC_no_zeros(sample, delta=0.000001)
# if expected is not None :
# chisqlist.append(chisq_formula(sample, expected))
#print('Sampling time : ', time.time() - start)
#start = time.time()
#chisqlist = np.array(chisqlist)
#np.save('chisqlist2x2x2100000', chisqlist)
#print('To array time : ', time.time() - start)
#print(max(chisqlist))
#exit()
chisqlist = np.load('chisqlist.npy')
#stat = 1
#start = time.time()
#difflist = []
#cdflist = []
#print(len(chisqlist))
#for stat in np.arange(0.1, 100, 0.1):
# cdflist.append(np.sum((chisqlist < stat)*1)/len(chisqlist))
# #difflist.append(np.abs(chi2.cdf(stat, 1) - np.sum((chisqlist < stat)*1)/1000000))
# #difflist.append(chi2.cdf(stat, 1) - np.sum((chisqlist < stat) * 1) / 1000000)
# #print('Sampled cumulative prob for chi = ' + str(stat) + 'is :', np.sum((chisqlist < stat)*1)/1000000, 'CHI2 cumulative prob : ', chi2.cdf(stat, 1))
#print('Probability time : ', time.time() - start)
##plt.plot(np.arange(0.1, 100, 0.1), difflist)
#plt.plot(np.arange(0.1, 100, 0.1), cdflist)
#plt.plot(np.arange(0.1, 100, 0.1), chi2.cdf(np.arange(0.1, 100, 0.1), 1))
#print(cdflist[-1])
#plt.show()
print(chisq_formula(np.array([[38,0], [0,0]]),np.array([[38,0], [0,0]])))
#chisqlist = chisq_stats(samples, np.array([[38,0], [0,0]]))
#for chi in chisqlist:
# print(chi)
exit()
#print('HERE')
#for chi in chisqlist:
# if type(chi) != np.float64:
# print(chi)
df = 1
fig, ax = plt.subplots(1, 1)
bins = np.arange(0, max(chisqlist)+5, 0.1)
#print(max(chisqlist))
plt.hist(chisqlist, bins=bins, alpha=0.5, density=True)
x = np.arange(min(chisqlist), max(chisqlist), 0.01)
#ax.plot(x, chi2.pdf(x, df), 'r-', label='chi2 pdf')
ax.plot(x, chi2.pdf(x, df))
ax.plot(x, chi2.pdf(x, 1))
#bins2 = np.arange(0, max(chisqlist) + 5, 1)
#print(max(chisqlist))
#plt.hist(chisqlist, bins=bins2, alpha=0.5)
plt.show()
| [
"matplotlib"
] |
00c66cb800d3f89dc0c2fc1b74acbb963cc05ea4 | Python | johndpope/animeface | /implementations/gan_utils/utils.py | UTF-8 | 5,252 | 2.78125 | 3 | [
"MIT"
] | permissive |
import os
import warnings
import torch
import torchvision as tv
from PIL import Image
# device specification
def get_device(): return torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# noise sampler
# normal distribution
def sample_nnoise(size, mean=0., std=1., device=None):
if device == None:
device = get_device()
return torch.empty(size, device=device).normal_(mean, std)
# uniform distribution
def sample_unoise(size, from_=0., to=1., device=None):
if device == None:
device = get_device()
return torch.empty(size, device=device).uniform_(from_, to)
# exponential moving average
@torch.no_grad()
def update_ema(G, G_ema, decay=0.999):
G.eval()
param_ema = dict(G_ema.named_parameters())
param = dict(G.named_parameters())
for key in param_ema.keys():
param_ema[key].data.mul_(decay).add_(param[key].data, alpha=(1 - decay))
G.train()
class GANTrainingStatus:
'''GAN training status helper'''
def __init__(self):
self.losses = {'G' : [], 'D' : []}
self.batches_done = 0
def append_g_loss(self, g_loss):
'''append generator loss'''
if isinstance(g_loss, torch.Tensor):
g_loss = g_loss.item()
self.losses['G'].append(g_loss)
def append_d_loss(self, d_loss):
'''append discriminator loss'''
if isinstance(d_loss, torch.Tensor):
d_loss = d_loss.item()
self.losses['D'].append(d_loss)
self.batches_done += 1
def add_loss(self, key):
'''define additional loss'''
if not isinstance(key, str):
raise Exception('Input a String object as the key. Got type {}'.format(type(key)))
self.losses[key] = []
def append_additional_loss(self, **kwargs):
'''append additional loss'''
for key, value in kwargs.items():
try:
self.losses[key].append(value)
except KeyError as ke:
warnings.warn('You have tried to append a loss keyed as \'{}\' that is not defined. Please call add_loss() or check the spelling.'.format(key))
def append(self, g_loss, d_loss, **kwargs):
'''append loss at once'''
self.append_g_loss(g_loss)
self.append_d_loss(d_loss)
self.append_additional_loss(**kwargs)
def plot_loss(self, filename='./loss.png'):
'''plot loss'''
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
add_loss = [key for key in self.losses if key not in ['G', 'D']]
G_loss = self.losses['G']
D_loss = self.losses['D']
plt.figure(figsize=(12, 8))
for key in add_loss:
plt.plot(self.losses[key])
plt.plot(G_loss)
plt.plot(D_loss)
plt.title('Model Loss')
plt.xlabel('iter')
plt.ylabel('loss')
plt.legend([key for key in add_loss] + ['Generator', 'Discriminator'], loc='upper left')
plt.tight_layout()
plt.savefig(filename)
plt.close()
def save_image(self, folder, image_tensor, filename=None, nrow=5, normalize=True, range=(-1, 1)):
'''simple save image
save_image func with
sampling images, and only args that I use frequently
'''
if filename == None:
filename = '{}.png'.format(self.batches_done)
tv.utils.save_image(
image_tensor, os.path.join(folder, filename), nrow=nrow, normalize=normalize, value_range=range
)
def __str__(self):
'''print the latest losses when calling print() on the object'''
partial_msg = []
partial_msg += [
'{:6}'.format(self.batches_done),
'[D Loss : {:.5f}]'.format(self.losses['D'][-1]),
'[G Loss : {:.5f}]'.format(self.losses['G'][-1]),
]
# verbose additinal loss
add_loss = [key for key in self.losses if key not in ['D', 'G']]
if len(add_loss) > 0:
for key in add_loss:
if self.losses[key] == []: # skip when no entry
continue
partial_msg.append(
'[{} : {:.5f}]'.format(key, self.losses[key][-1])
)
return '\t'.join(partial_msg)
def gif_from_files(image_paths, filename='out.gif', optimize=False, duration=500, loop=0):
images = []
for path in image_paths:
images.append(Image.open(str(path)))
images[0].save(filename, save_all=True, append_images=images[1:], optimize=optimize, duration=duration, loop=loop)
if __name__ == "__main__":
'''TEST'''
# device = get_device()
# print(sample_nnoise((3, 64), 0, 0.02).size())
# print(sample_nnoise((3, 64), 0, 0.02, torch.device('cpu')).size())
# print(sample_nnoise((3, 64), 0, 0.02, device).size())
# print(sample_unoise((3, 64), 0, 3).size())
# print(sample_unoise((3, 64), 0, 3, torch.device('cpu')).size())
# print(sample_unoise((3, 64), 0, 3, device).size())
status = GANTrainingStatus()
status.add_loss('real')
status.add_loss('fake')
import math
for i in range(100):
status.append(math.sin(i), math.cos(i), real=-i/10, fake=i/10)
print(status)
status.plot_loss()
| [
"matplotlib"
] |
0cf5475a86e73b165096d60798a7c67896da9bbb | Python | amRsching/kaggle-jane-street-market-prediction | /pytorch_tutorials/pytorch_nn_hello_world.py | UTF-8 | 1,621 | 3.015625 | 3 | [] | no_license | import torch
import torch.nn as nn
from torch import optim
import matplotlib.pyplot as plt
t_c = [0.5, 14.0, 15.0, 28.0, 11.0, 8.0, 3.0, -4.0, 6.0, 13.0, 21.0]
t_u = [35.7, 55.9, 58.2, 81.9, 56.3, 48.9, 33.9, 21.8, 48.4, 60.4, 68.4]
t_c = torch.tensor(t_c).unsqueeze(1)
t_u = torch.tensor(t_u).unsqueeze(1)
plt.scatter(x=t_u, y=t_c)
def reshape(x):
return x.reshape(x.shape[0], 1)
def training_loop(n_epochs, optimizer, model, loss_fn, t_u_train, t_u_val,
t_c_train, t_c_val):
for epoch in range(1, n_epochs + 1):
t_p_train = model(t_u_train)
loss_train = loss_fn(t_p_train, t_c_train)
t_p_val = model(t_u_val)
loss_val = loss_fn(t_p_val, t_c_val)
optimizer.zero_grad()
loss_train.backward()
optimizer.step()
print(f"Epoch {epoch}, Training loss {loss_train.item():.4f},"
f" Validation loss {loss_val.item():.4f}")
t_un = 0.1 * t_u
t_cn = 0.1 * t_c
linear_model = nn.Linear(1, 1)
optimizer = optim.SGD(linear_model.parameters(), lr=1e-2)
training_loop(
n_epochs=1000,
optimizer=optimizer,
model=linear_model,
loss_fn=nn.MSELoss(),
t_u_train=t_un,
t_u_val=t_un,
t_c_train=t_cn,
t_c_val=t_cn)
print()
print(linear_model.weight)
print(linear_model.bias)
t_range = torch.arange(20., 90.).unsqueeze(1)
fig = plt.figure(dpi=600)
plt.xlabel("Fahrenheit")
plt.ylabel("Celsius")
plt.plot(t_u.numpy(), t_c.numpy(), 'o')
plt.plot(t_range.numpy(), linear_model(t_range * 0.1).detach().numpy(), 'c-')
plt.plot(t_u.numpy(), linear_model(t_u * 0.1).detach().numpy(), 'kx')
plt.show()
| [
"matplotlib"
] |
b0831bfed9318879ab8b66a7a126bfb51ba11d87 | Python | JOThurgood/SimpleCFD | /linear_advection/2D/plot_advect_2d.py | UTF-8 | 719 | 2.765625 | 3 | [
"MIT"
] | permissive | import numpy as np
import matplotlib.pyplot as plt
xc = np.fromfile('xc.dat', dtype = np.float64)
yc = np.fromfile('yc.dat', dtype = np.float64)
a = np.fromfile('a.dat', dtype = np.float64)
a = a.reshape(yc.size, xc.size)
plt.clf()
plt.contour(xc, yc, a, 10)
plt.grid(True)
plt.xlabel('x')
plt.ylabel('y')
plt.title('a(x,y,t_end)')
#plt.show()
plt.savefig('output_contour.png')
plt.clf()
plt.contourf(xc, yc, a, 255,
cmap = plt.cm.bone)
plt.colorbar()
plt.grid(False)
plt.xlabel('x')
plt.ylabel('y')
plt.title('a(x,y,t_end)')
#plt.show()
plt.savefig('output_contourf.png')
plt.clf()
plt.imshow(a)
plt.xlabel('x')
plt.ylabel('y')
plt.title('a(x,y,t_end)')
plt.colorbar()
plt.savefig('output_pixels.png')
| [
"matplotlib"
] |
7657039eee772dff0f8416b7b08c00873dfc1321 | Python | egg-west/random-search-for-control-problem | /misc/es.py | UTF-8 | 1,315 | 3.1875 | 3 | [
"MIT"
] | permissive | import numpy as np
import gym
import numpy.random as random
import matplotlib.pyplot as plt
'''
try to generate a good policy with evolution strategy
'''
npop = 50 # population size
sigma = 0.1 # noise standard deviation
alpha = 0.001 # learning rate
MAX_EPOTCH = 10000
env = gym.make('CartPole-v0')
w_size = env.observation_space.shape[0]
w = random.random(w_size)
epoch_reward = 0.
all_reward = [0]
for ep in range(MAX_EPOTCH):
observation = env.reset()
### generate weight
candidate_weight = random.random(w_size) * 0.01 + w
for t in range(1000):
# env.render()
weighted_sum = np.dot(candidate_weight, observation)
if weighted_sum >= 0:
action = 1
else:
action = 0
observation, reward, done, info = env.step(action)
epoch_reward += reward
if done:
### if reward is saytisfying, update the weight ( policy )
if epoch_reward < 150 and epoch_reward > all_reward[-1]:
w = candidate_weight
all_reward.append(epoch_reward)
print('epoch: ', ep+1 , ', total reward: ', epoch_reward)
epoch_reward = 0
break
plt.title('CartPole-v0-evolution-strategy')
plt.plot(all_reward)
plt.show() | [
"matplotlib"
] |
3fa94f60ed9790b576387c2f02b6dd189ecc8fff | Python | sandialabs/sibl | /geo/examples/pixlet.py | UTF-8 | 5,893 | 2.5625 | 3 | [
"MIT",
"LicenseRef-scancode-other-permissive",
"Python-2.0",
"BSD-3-Clause",
"LGPL-2.0-or-later",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
from mpl_toolkits.mplot3d import Axes3D
from skimage.morphology import skeletonize_3d
from pathlib import Path
from typing import NamedTuple, Tuple
from ptg.pixel_shape import PixelSphere as ps
# utilities
serialize = False
latex = False
if latex:
rc("font", **{"family": "serif", "serif": ["Computer Modern Roman"]})
rc("text", usetex=True)
# alphabet
class Letter(NamedTuple):
"""Create the letter index (x=0, y, z) position, in units of inkdrop, as a
namedtuple, with the following attributes:
"""
name: str
path_x: Tuple[int, ...]
path_y: Tuple[int, ...]
path_z: Tuple[int, ...]
letter_I = Letter(
name="I",
path_x=(0, 1, 2, 3, 4, 5, 6),
path_y=(4, 4, 4, 4, 4, 4, 4),
path_z=(4, 4, 4, 4, 4, 4, 4),
)
letter_J = Letter(
name="J",
path_x=(4, 3, 2, 1, 2, 3, 4, 5, 6),
path_y=(6, 6, 5, 4, 3, 2, 2, 2, 2),
path_z=(4, 4, 4, 4, 4, 4, 4, 4, 4),
)
letter = letter_J
assert len(letter.path_x) == len(letter.path_y) == len(letter.path_z)
# letters make with 5x5 grid of possible inkdrops
# droplet_grid_len = 5
# shape marching cadence
diam = 3 # pixels, diameter
ppl = 1 # pixels per length
stride = 1 # pixels, distance between sequential anchors
# n_shapes = 5 # int, number of sequential shapes added to world
n_shapes = len(letter.path_x) # int, number of sequential shapes added to world
# world_bounds = (n_shapes - 1) * stride + diam * ppl
world_bounds = (
max(map(lambda i: max(i), (letter.path_x, letter.path_y, letter.path_z))) + diam
) # pixels
nsd = 3 # number of space dimensions
# world
(n_layers_x, n_cols_y, n_rows_z) = tuple(map(lambda x: world_bounds, range(nsd)))
world = np.zeros([n_layers_x, n_cols_y, n_rows_z], dtype=np.uint8)
inx, iny, inz = np.indices([n_layers_x, n_cols_y, n_rows_z])
# fountain pen metaphor, ink in the shapes to the world
# for i in range(n_shapes):
for i, (xi, yi, zi) in enumerate(zip(letter.path_x, letter.path_y, letter.path_z)):
# anchor_x_i = i * stride
# item = ps(anchor_x=anchor_x_i, diameter=diam, pixels_per_len=ppl, verbose=True)
item = ps(
anchor_x=xi,
anchor_y=yi,
anchor_z=zi,
diameter=diam,
pixels_per_len=ppl,
verbose=True,
)
mask = item.mask
ox, oy, oz = item.anchor.x, item.anchor.y, item.anchor.z # offsets
(npix_x, npix_y, npix_z) = item.mask.shape
for xx in range(npix_x):
for yy in range(npix_y):
for zz in range(npix_z):
world[xx + ox, yy + oy, zz + oz] = (
world[xx + ox, yy + oy, zz + oz] + mask[xx, yy, zz]
)
# visualization of world + shapes
# camera_elevation, camera_azimuth = -160, 160 # degrees
n_fig_rows, n_fig_cols = 1, 2
# fig = plt.figure(figsize=(8, 8))
fig = plt.figure()
# view_center = (h, r, r)
# view_radius = r + 1
view_radius = int(np.max(world.shape) // 2 + 1)
view_center = (view_radius, view_radius, view_radius)
ix, iy, iz = 0, 1, 2
xlim = (view_center[ix] + view_radius, view_center[ix] - view_radius)
ylim = (view_center[iy] + view_radius, view_center[iy] - view_radius)
zlim = (view_center[iz] + view_radius, view_center[iz] - view_radius)
index = 1
ax = fig.add_subplot(n_fig_rows, n_fig_cols, index, projection=Axes3D.name)
# ax.view_init(elev=camera_elevation, azim=camera_azimuth)
ax.voxels(world, linewidth=0.25, edgecolor="black", alpha=0.9)
ax.set_title("(a)")
ax.set_xlabel(r"$x$")
ax.set_ylabel(r"$y$")
ax.set_zlabel(r"$z$")
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_zlim(zlim)
# fig.tight_layout()
# plt.show()
# skeletonize
# https://scikit-image.org/docs/dev/auto_examples/edges/plot_skeleton.html
# Use the Lee 1994 algorithm for 3D shapes
# References
# [Lee94] T.-C. Lee, R.L. Kashyap and C.-N. Chu, Building skeleton models
# via 3-D medial surface/axis thinning algorithms.
# Computer Vision, Graphics, and Image Processing, 56(6):462-478, 1994.
# [Zha84] A fast parallel algorithm for thinning digital patterns,
# T. Y. Zhang and C. Y. Suen, Communications of the ACM,
# March 1984, Volume 27, Number 3.
skeleton = skeletonize_3d(world)
# fig = plt.figure(figsize=(8, 8))
index += 1
ax = fig.add_subplot(n_fig_rows, n_fig_cols, index, projection=Axes3D.name)
# ax.view_init(elev=camera_elevation, azim=camera_azimuth)
# ax.voxels(world, linewidth=0.25, edgecolor="black", alpha=0.9)
ax.voxels(skeleton, linewidth=0.25, edgecolor="black", alpha=0.9)
ax.set_title("(b)")
ax.set_xlabel(r"$x$")
ax.set_ylabel(r"$y$")
ax.set_zlabel(r"$z$")
ax.set_xlim(xlim)
ax.set_ylim(ylim)
ax.set_zlim(zlim)
fig.tight_layout()
plt.show()
# plt.show(block=False)
if serialize:
extension = ".pdf"
filename = (
Path(__file__).stem
+ "_"
+ letter.name
+ "_drop_"
+ str(n_shapes)
+ "_diam_"
+ str(diam)
+ "_stri_"
+ str(stride)
+ extension
)
fig.savefig(filename, bbox_inches="tight", pad_inches=0)
print(f"Serialized to {filename}")
"""
Copyright 2023 Sandia National Laboratories
Notice: This computer software was prepared by National Technology and Engineering Solutions of
Sandia, LLC, hereinafter the Contractor, under Contract DE-NA0003525 with the Department of Energy
(DOE). All rights in the computer software are reserved by DOE on behalf of the United States
Government and the Contractor as provided in the Contract. You are authorized to use this computer
software for Governmental purposes but it is not to be released or distributed to the public.
NEITHER THE U.S. GOVERNMENT NOR THE CONTRACTOR MAKES ANY WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES
ANY LIABILITY FOR THE USE OF THIS SOFTWARE. This notice including this sentence must appear on any
copies of this computer software. Export of this data may require a license from the United States
Government.
"""
| [
"matplotlib"
] |
5ac88f2b85246055ae79227b5f134743f3c10fcd | Python | shjwjj/test | /project6.py | UTF-8 | 29,735 | 2.8125 | 3 | [] | no_license |
# coding: utf-8
# In[89]:
# import packages
import matplotlib.pyplot as plt
import matplotlib.image as mping
import numpy as np
#import cv2
import cv2
import glob
import pickle
get_ipython().magic('matplotlib inline')
# In[90]:
# Read in an imgae
image = mping.imread('./test_images/test1.jpg')#RGB格式
plt.imshow(image)
# image2 = cv2.imread('./test_images/test1.jpg') #BGR格式
# plt.figure()
# plt.subplot(1,2,1)
# plt.imshow(image1)
# plt.subplot(1,2,2)
# plt.imshow(image2)
# In[91]:
## camera calibration
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*9,3), np.float32)
objp[:,:2] = np.mgrid[0:9,0:6].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Make a list of calibration images
image_cal = glob.glob('./camera_cal/calibration*.jpg')
# images = mping.imread('./camera_cal/calibration*.jpg')
#Step through the list and search for chessboard corners
for fname in image_cal:
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (9,6),None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
# Draw and display the corners
img = cv2.drawChessboardCorners(img, (9,6), corners, ret)
plt.imshow(img)
cv2.imshow('img',img)
cv2.waitKey(500)
cv2.destroyAllWindows() #销毁我们创建的所有窗口
# In[92]:
# Distortion correction
import pickle
get_ipython().magic('matplotlib inline')
# Test undistortion on an image
img = cv2.imread('camera_cal/calibration5.jpg')
img_size = (img.shape[1], img.shape[0])
# Do camera calibration given object points and image points
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size,None,None)
dst = cv2.undistort(img, mtx, dist, None, mtx)
cv2.imwrite('camera_cal/test_undist.jpg',dst)
# Save the camera calibration result for later use (we won't worry about rvecs / tvecs)
dist_pickle = {}
dist_pickle["mtx"] = mtx
dist_pickle["dist"] = dist
#以二进制格式打开一个文件只用于写入。如果该文件已存在则打开文件,
#并从开头开始编辑,即原有内容会被删除。如果该文件不存在,创建新文件。
pickle.dump( dist_pickle, open( "camera_cal/wide_dist_pickle.p", "wb" ) )
#dst = cv2.cvtColor(dst, cv2.COLOR_BGR2RGB)
# Visualize undistortion
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
ax1.imshow(img)
ax1.set_title('Original Image', fontsize=30)
ax2.imshow(dst)
ax2.set_title('Undistorted Image', fontsize=30)
# In[ ]:
# In[93]:
image_undistorted = cv2.undistort(image, mtx, dist, None, mtx)
plt.imshow(image_undistorted)
# In[94]:
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
ax1.imshow(image)
ax1.set_title('Original Image', fontsize=30)
ax2.imshow(image_undistorted)
ax2.set_title('Undistorted Image', fontsize=30)
# In[95]:
# 画图
def plot2images(image1, image2, title1, title2, image1cmap=None, image2cmap='gray', save_filename=None):
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(image1, cmap=image1cmap)
ax1.set_title(title1, fontsize= 30)
ax2.imshow(image2, cmap=image2cmap)
ax2.set_title(title2, fontsize= 30)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
if save_filename:
plt.savefig(save_filename)
plt.show()
# In[96]:
# rgb通道
def rgb_select(img, r_thresh, g_thresh, b_thresh):
r_channel = img[:,:,0]
g_channel=img[:,:,1]
b_channel = img[:,:,2]
r_binary = np.zeros_like(r_channel)
r_binary[(r_channel > r_thresh[0]) & (r_channel <= r_thresh[1])] = 1
g_binary = np.zeros_like(g_channel)
g_binary[(r_channel > g_thresh[0]) & (r_channel <= g_thresh[1])] = 1
b_binary = np.zeros_like(b_channel)
b_binary[(r_channel > b_thresh[0]) & (r_channel <= b_thresh[1])] = 1
combined = np.zeros_like(r_channel)
combined[((r_binary == 1) & (g_binary == 1) & (b_binary == 1))] = 1
return combined
# In[97]:
plt.imshow(image_undistorted)
# In[98]:
luv= cv2.cvtColor(image_undistorted, cv2.COLOR_RGB2LUV)
hls = cv2.cvtColor(image_undistorted, cv2.COLOR_RGB2HLS)
hsv = cv2.cvtColor(image_undistorted,cv2.COLOR_RGB2HSV)
lab=cv2.cvtColor(image_undistorted, cv2.COLOR_RGB2LAB)
s_channel = hsv[:,:,1]
b_channel=lab[:,:,2]
l_channel = luv[:,:,0]
v_channel= hsv[:,:,2]
print(s_channel.shape)
# In[99]:
def mag_thresh(image, sobel_kernel=3, mag_thresh=(0, 255)):
# Calculate gradient magnitude
# Convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# Take both Sobel x and y gradients
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
# Calculate the gradient magnitude
gradmag = np.sqrt(sobelx**2 + sobely**2)
# Rescale to 8 bit
scale_factor = np.max(gradmag)/255
gradmag = (gradmag/scale_factor).astype(np.uint8)
# Create a binary image of ones where threshold is met, zeros otherwise
mag_binary = np.zeros_like(gradmag)
mag_binary[(gradmag >= mag_thresh[0]) & (gradmag <= mag_thresh[1])] = 1
#plot2images(image1=image, image2=mag_binary, title1='Original Image', title2='Thresholded Magnitude', save_filename='output_images/thresholded_magnitude.png')
# Return the binary image
return mag_binary
# In[100]:
def abs_sobel_thresh(image, orient='x', sobel_kernel=3, thresh=(0, 255)):
# Calculate directional gradient
# Convert to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# Apply x or y gradient with the OpenCV Sobel() function
# and take the absolute value
if orient == 'x':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel))
if orient == 'y':
abs_sobel = np.absolute(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel))
# Rescale back to 8 bit integer
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
# Create a copy and apply the threshold
grad_binary = np.zeros_like(scaled_sobel)
# Here I'm using inclusive (>=, <=) thresholds, but exclusive is ok too
grad_binary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1
#plot2images(image1=image, image2=grad_binary, title1='Original Image', title2='thresholded y-derivative', save_filename='output_images/thresholdedy-derivative.png')
# Return the result
return grad_binary
# In[101]:
def color_thresh(img, s_thresh, l_thresh, b_thresh, v_thresh):
luv= cv2.cvtColor(image, cv2.COLOR_RGB2LUV)
hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
hsv = cv2.cvtColor(image,cv2.COLOR_RGB2HSV)
lab=cv2.cvtColor(image, cv2.COLOR_RGB2LAB)
s_channel = hsv[:,:,1]
b_channel=lab[:,:,2]
l_channel = luv[:,:,0]
v_channel= hsv[:,:,2]
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel > s_thresh[0]) & (s_channel <= s_thresh[1])] = 1
b_binary = np.zeros_like(b_channel)
b_binary[(s_channel > b_thresh[0]) & (s_channel <= b_thresh[1])] = 1
l_binary = np.zeros_like(l_channel)
l_binary[(s_channel > l_thresh[0]) & (s_channel <= l_thresh[1])] = 1
v_binary = np.zeros_like(v_channel)
v_binary[(s_channel > v_thresh[0]) & (s_channel <= v_thresh[1])] = 1
combined = np.zeros_like(s_channel)
combined[((s_binary == 1) & (b_binary == 1) & (l_binary == 1) & (v_binary == 1))] = 1
return combined
# In[102]:
def color_gradient_threshold(image_undistorted):
ksize = 15
luv= cv2.cvtColor(image_undistorted, cv2.COLOR_RGB2LUV)
hls = cv2.cvtColor(image_undistorted, cv2.COLOR_RGB2HLS)
hsv = cv2.cvtColor(image_undistorted,cv2.COLOR_RGB2HSV)
lab=cv2.cvtColor(image_undistorted, cv2.COLOR_RGB2LAB)
# s_channel = hsv[:,:,1]
# b_channel=lab[:,:,2]
# l_channel = luv[:,:,0]
# v_channel= hsv[:,:,2]
# mag_binary = mag_thresh(image_undistorted, sobel_kernel=ksize, mag_thresh=(150, 255))
gradx=abs_sobel_thresh(image_undistorted,orient='x',sobel_kernel=ksize,thresh=(50,90))
grady=abs_sobel_thresh(image_undistorted,orient='y',sobel_kernel=ksize,thresh=(30,90))
c_binary=color_thresh(image_undistorted,s_thresh=(70,100),l_thresh=(60,255),b_thresh=(50,255),v_thresh=(150,255))
rgb_binary=rgb_select(image_undistorted,r_thresh=(225,255),g_thresh=(225,255),b_thresh=(0,255))
combined_binary = np.zeros_like(s_channel)
# preprocessImage[((gradx==1) & (grady==1)) | (c_binary==1) | (rgb_binary==1)] =255
# combined_binary[((gradx == 1) & (grady == 1) | (c_binary == 1) | (mag_binary == 1))] = 255
combined_binary[((gradx == 1) & (grady == 1) | (c_binary == 1) | (rgb_binary==1))] = 255
color_binary = combined_binary
return color_binary, combined_binary
color_binary, combined_binary = color_gradient_threshold(image_undistorted )
plot2images(image1 =image , image2 = combined_binary, title1='Original Image', title2='color_gradient_threshold Image')
# In[103]:
R = image[:,:,0]
G = image[:,:,1]
B = image[:,:,2]
# In[104]:
hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
H = hls[:,:,0]
L = hls[:,:,1]
S = hls[:,:,2]
# In[105]:
plt.imshow(S, cmap='gray')
# In[106]:
# 透视变换
def perspective_transform(image_undistorted, combined_binary):
top_left = [560, 470]
top_right = [730, 470]
bottom_right = [1080, 720]
bottom_left = [200, 720]
top_left_dst = [200,0]
top_right_dst = [1100,0]
bottom_right_dst = [1100,720]
bottom_left_dst = [200,720]
# gray = cv2.cvtColor(image_undistorted, cv2.COLOR_RGB2GRAY)
img_size = (image_undistorted.shape[1], image_undistorted.shape[0])
src = np.float32([top_left,top_right, bottom_right, bottom_left] )
dst = np.float32([top_left_dst, top_right_dst, bottom_right_dst, bottom_left_dst])
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
# warped = warped = cv2.warpPerspective(image_undistorted, M, img_size)
warped = cv2.warpPerspective(combined_binary, M, img_size)
return warped, Minv
warped, Minv = perspective_transform(image_undistorted, combined_binary)
plot2images(image1 = image, image2 = warped, title1='Original Image', title2='perspective_Imag')
# In[107]:
histogram = np.sum(warped[:,:], axis=0)
# 将warped中从360行开始加到720行;
#histogram = np.sum(warped[warped.shape[0]//2:,:], axis=0)
plt.plot(histogram)
# In[108]:
## test
print(warped.shape[0])
#print([warped.shape[0]//2:,:])
print(np.sum(warped[warped.shape[0]//2:,:], axis=0).shape)
out_img = np.dstack((warped, warped, warped))*255
print(out_img.shape)
print(histogram.shape)
print(warped.shape)
warped2 = np.dstack((warped,warped,warped))*0.002
print(warped2.nonzero()[1])
# In[109]:
# 将warped中从360行开始加到720行;
histogram2 = np.sum(warped[warped.shape[0]//2:,:], axis=0)
out_img = np.dstack((warped, warped, warped))*255
midpoint = np.int(histogram2.shape[0]/2)
leftx_base = np.argmax(histogram2[:midpoint])
rightx_base = np.argmax(histogram2[midpoint:])+midpoint
nwindows = 1
window_height = np.int(warped.shape[0]/nwindows)
nonzero = warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
leftx_current = leftx_base
rightx_current = rightx_base
margin = 100
minpix = 50
left_lane_inds = []
right_lane_inds = []
for window in range(nwindows):
win_y_low = warped.shape[0]-(window+1)*window_height
win_y_high = warped.shape[0]-window*window_height
win_xleft_low = leftx_current-margin
win_xleft_high = leftx_current+margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),
(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),
(0,255,0), 2)
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
print(good_left_inds)
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
print(good_right_inds)
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
print(nonzero[0])
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
ploty = np.linspace(0, warped.shape[0]-1, warped.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# In[110]:
good_left_inds.shape
#good_right_inds.shape
# In[111]:
def finding_line(warped):
# 将warped中从360行开始加到720行;
histogram2 = np.sum(warped[warped.shape[0]//2:,:], axis=0)
out_img = np.dstack((warped, warped, warped))*255
midpoint = np.int(histogram2.shape[0]/2)
leftx_base = np.argmax(histogram2[:midpoint])
rightx_base = np.argmax(histogram2[midpoint:])+midpoint
nwindows = 5
window_height = np.int(warped.shape[0]/nwindows)
nonzero = warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
leftx_current = leftx_base
rightx_current = rightx_base
margin = 100
minpix = 50
left_lane_inds = []
right_lane_inds = []
for window in range(nwindows):
win_y_low = warped.shape[0]-(window+1)*window_height
win_y_high = warped.shape[0]-window*window_height
win_xleft_low = leftx_current-margin
win_xleft_high = leftx_current+margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
cv2.rectangle(out_img,(win_xleft_low,win_y_low),(win_xleft_high,win_y_high),
(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low),(win_xright_high,win_y_high),
(0,255,0), 2)
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
ploty = np.linspace(0, warped.shape[0]-1, warped.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# 找出左车道线附近的像素点序号;
left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy +
left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) +
left_fit[1]*nonzeroy + left_fit[2] + margin)))
# 找出右车道线附近的像素点序号;
right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy +
right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) +
right_fit[1]*nonzeroy + right_fit[2] + margin)))
print(left_lane_inds)
print(right_lane_inds)
return left_fitx, right_fitx,out_img, left_fit, right_fit,left_lane_inds,right_lane_inds
# In[112]:
plt.imshow(warped)
# In[113]:
left_fitx, right_fitx, out_img,left_fit, right_fit,left_lane_inds,right_lane_inds = finding_line(warped)
ploty = np.linspace(0, warped.shape[0]-1, warped.shape[0] )
plt.imshow(out_img)
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
# 设置坐标轴的刻度范围
# plt.xlim(0, 1280)
# plt.ylim(720, 0)
# In[114]:
plt.imshow(warped)
# In[115]:
left_fit
right_fit
# In[116]:
binary_warped = warped
# In[117]:
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
margin = 100
# 找出左车道线附近的像素点序号;
left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy +
left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) +
left_fit[1]*nonzeroy + left_fit[2] + margin)))
# 找出右车道线附近的像素点序号;
right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy +
right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) +
right_fit[1]*nonzeroy + right_fit[2] + margin)))
# 找到车道线像素点的位置
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# 产生一张空图
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
window_img = np.zeros_like(out_img)
# 画图的颜色
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin,
ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin,
ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# In[118]:
def sliding_window(binary_warped,left_fit,right_fit):
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
margin = 100
# 找出左车道线附近的像素点序号;
left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy +
left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) +
left_fit[1]*nonzeroy + left_fit[2] + margin)))
# 找出右车道线附近的像素点序号;
right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy +
right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) +
right_fit[1]*nonzeroy + right_fit[2] + margin)))
# 找到车道线像素点的位置
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# 重新拟合出一条二次曲线
#left_fit = np.polyfit(lefty, leftx, 2)
#right_fit = np.polyfit(righty, rightx, 2)
# 产生画图的点
ploty = np.linspace(0, binary_warped.shape[0]-1, binary_warped.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
# 产生一张空图
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
window_img = np.zeros_like(out_img)
# 画图的颜色
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin,
ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin,
ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
return left_fitx, right_fitx, left_line_pts,right_line_pts,window_img, out_img,left_lane_inds,right_lane_inds
# Draw the lane onto the warped blank image
left_fitx, right_fitx, left_line_pts,right_line_pts,window_img, out_img, left_lane_inds, right_lane_inds =sliding_window(binary_warped,left_fit,right_fit)
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0))
result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
plt.imshow(result)
# plt.plot(left_fitx, ploty, color='yellow')
# plt.plot(right_fitx, ploty, color='yellow')
# plt.xlim(0, 1280)
# plt.ylim(720, 0)
print(left_lane_inds)
print(right_lane_inds)
# In[119]:
left_fit
right_fit
# In[120]:
def CalculateCurvature(binary_image, left_fit, right_fit, l_lane_inds, r_lane_inds):
# Define y-value where we want radius of curvature
# I'll choose the maximum y-value, corresponding to the bottom of the image
img_size = (binary_image.shape[1], binary_image.shape[0])
#h = binary_image.shape[0]
ploty = np.linspace(0, img_size[1]-1, img_size[1])
y_eval = np.max(ploty)
# left_curverad = ((1 + (2*left_fit[0]*y_eval + left_fit[1])**2)**1.5) / np.absolute(2*left_fit[0])
# right_curverad = ((1 + (2*right_fit[0]*y_eval + right_fit[1])**2)**1.5) / np.absolute(2*right_fit[0])
#print(left_curverad, right_curverad)
# Example values: 1926.74 1908.48
# Define conversions in x and y from pixels space to meters
###RESUBMIT
# 70ft dashed space + dashed line + dashed space
ym_per_pix = 30/720
# ym_per_pix = 21.34/385 # meters per pixel in y dimension
# 12ft lane in 500 pixels
xm_per_pix = 3.7/960 # meters per pixel in y dimension
### ym_per_pix = 30/720 # # meters per pixel in y dimension
### xm_per_pix = 3.7/700 # meters per pixel in x dimension
###RESUBMIT - END
# 找到图像中不为零的所有像素点的像素坐标
nonzero = binary_image.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# 将这些不为零的像素点坐标分成x,y车道线中
leftx = nonzerox[l_lane_inds]
lefty = nonzeroy[l_lane_inds]
rightx = nonzerox[r_lane_inds]
righty = nonzeroy[r_lane_inds]
# 将这些像素点对应到世界坐标系中,然后拟合成二次曲线
left_fit_cr = np.polyfit(lefty*ym_per_pix, leftx*xm_per_pix, 2)
right_fit_cr = np.polyfit(righty*ym_per_pix, rightx*xm_per_pix, 2)
# 计算曲线的曲率
left_curverad = ((1 + (2*left_fit_cr[0]*y_eval*ym_per_pix + left_fit_cr[1])**2)**1.5) / np.absolute(2*left_fit_cr[0])
right_curverad = ((1 + (2*right_fit_cr[0]*y_eval*ym_per_pix + right_fit_cr[1])**2)**1.5) / np.absolute(2*right_fit_cr[0])
# 左右车道线曲率平均
avg_curverad = (left_curverad + right_curverad) / 2
## 以下计算本车在车道线中心的位置
dist_from_center = 0.0
# assume the camera is centered in the vehicle
###camera_pos = img_size[1] / 2
if right_fit is not None:
if left_fit is not None:
# 摄像头位于图像中间,也是本车的中心
camera_pos = img_size[0] / 2
###RESUBMIT - END
# find where the right and left lanes intersect the bottom of the frame
# left_lane_pix = np.polyval(left_fit, img_size[1])
# right_lane_pix = np.polyval(right_fit, img_size[1])
# 左右车道线最底端x坐标
left_lane_pix = np.polyval(left_fit, binary_image.shape[0])
right_lane_pix = np.polyval(right_fit, binary_image.shape[0])
# 左右车道线中点x坐标
center_of_lane_pix = (left_lane_pix + right_lane_pix) / 2
# 摄像头(本车中心)与车道线中心的距离
dist_from_center = (camera_pos - center_of_lane_pix) * 3.7/960
#print(dist_from_center, 'm')
return avg_curverad, dist_from_center
# CalculateCurvature(binary_image, left_fit, right_fit, l_lane_inds, r_lane_inds)
avg_curverad, dist_from_center = CalculateCurvature(binary_warped,left_fit, right_fit, left_lane_inds, right_lane_inds)
# In[121]:
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
"""
#defining a blank mask to start with
mask = np.zeros_like(img)
#defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
#filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
#returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def apply_region_of_interest_mask(image):
x_factor = 40
y_factor = 60
vertices = np.array([[
(0,image.shape[0]),
(((image.shape[1]/2)- x_factor), (image.shape[0]/2)+ y_factor),
(((image.shape[1]/2) + x_factor), (image.shape[0]/2)+ y_factor),
(image.shape[1],image.shape[0])]], dtype=np.int32)
#print (vertices)
return region_of_interest(image, vertices)
# In[122]:
avg_curverad, dist_from_center
# In[123]:
def overlay_text_on_image (image, avg_curverad, dist_from_center):
new_img = np.copy(image)
#h = new_img.shape[0]
font = cv2.FONT_HERSHEY_SIMPLEX
font_color = (255,255,255)
num_format = '{:04.2f}'
text = 'Radius of Curvature: ' + num_format.format(avg_curverad) + 'm'
cv2.putText(new_img, text, (40,70), font, 1.5, font_color, 2, cv2.LINE_AA)
direction = 'left'
if dist_from_center > 0:
direction = 'right'
abs_dist = abs(dist_from_center)
text = 'Vehicle is ' + num_format.format(abs_dist) + ' m ' + direction + ' of center'
cv2.putText(new_img, text, (40,120), font, 1.5, font_color, 2, cv2.LINE_AA)
return new_img
new_img = overlay_text_on_image (image, avg_curverad, dist_from_center)
plt.imshow(new_img)
# # pipline
# In[125]:
import os
# # test pipline
# In[126]:
image = mping.imread('./test_images/test2.jpg')
result = main_pipline(image)
plt.imshow(result)
# In[127]:
def read_all_images2():
input = "test_images/"
output = "output_images/"
all_files = os.listdir(input)
for file_name in all_files:
#read
images = mping.imread(input+file_name)
result2 = main_pipline(images)
plt.figure()
plt.imshow(result2)
cv2.imwrite(output + file_name, result2)
read_all_images2()
# result2 = pipline(images)
# # Test on video
#
# In[36]:
from moviepy.editor import VideoFileClip
from IPython.display import HTML
# In[37]:
def process_image(image):
# NOTE: The output you return should be a color image (3 channel) for processing video below
# TODO: put your pipeline here,
# you should return the final output (image where lines are drawn on lanes)
result = main_pipline(image)
return result
# In[38]:
white_output = './project_video.mp4.mp4'
## To speed up the testing process you may want to try your pipeline on a shorter subclip of the video
## To do so add .subclip(start_second,end_second) to the end of the line below
## Where start_second and end_second are integer values representing the start and end of the subclip
## You may also uncomment the following line for a subclip of the first 5 seconds
#clip1 = VideoFileClip("test_videos/solidWhiteRight.mp4").subclip(0,5)
clip1 = VideoFileClip("project_video.mp4")
white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!
get_ipython().magic('time white_clip.write_videofile(white_output, audio=False)')
# In[ ]:
# In[ ]:
| [
"matplotlib"
] |
c5a3da0c7ce414c0bbf3e7eb1497008385dc0c90 | Python | wsgan001/Thesis-11 | /oligos/over_representation_check.py | UTF-8 | 2,722 | 2.625 | 3 | [] | no_license | #!/usr/bin/python
import sys
import matplotlib.pyplot as plt
import math
def get_total(string):
args = string.strip().split("\t")
if args[0] == "@total":
return int(args[1])
return -1
args = sys.argv[1:]
if len(args) < 3:
print "usage: [--plot] input_file control_file output_file"
sys.exit(1)
plot = False
if args[0] == "--plot":
plot = True
del args[0]
input_file_path = args[0]
del args[0]
control_file_path = args[0]
del args[0]
output_file_path = args[0]
del args[0]
with open(control_file_path, "r") as control_file:
dummy = control_file.readline() #skip header
ctrl_tot = get_total(control_file.readline())
if ctrl_tot == -1:
print "Error: @total not found"
sys.exit(1)
#Create lookup table
lookup = {}
for line in control_file:
(kmer, count, freq) = line.strip().split("\t")
lookup[kmer] = (count, freq)
differential_kmer = {}
differential_kmer_log = {}
kmer_total = 0
diff_total = 0.0
with open(input_file_path, "r") as input_file:
dummy = input_file.readline() #skip header
input_tot = get_total(input_file.readline())
if input_tot == -1:
print "Error: @total not found"
sys.exit(1)
for line in input_file:
(kmer, count, freq) = line.strip().split("\t")
if lookup.has_key(kmer):
(ctrl_count, ctrl_freq) = lookup[kmer]
else:
(ctrl_count, ctrl_freq) = (1, float(1) / ctrl_tot) #calculate pseudo frequence
diff = float(freq) / float(ctrl_freq)
kmer_total = kmer_total + 1
diff_total = diff_total + math.log(diff,2)
differential_kmer[kmer] = (count, freq, ctrl_count, ctrl_freq, diff)
differential_kmer_log[kmer] = math.log(diff, 2)
diff_mean = diff_total / kmer_total
if plot:
fig, ax = plt.subplots()
ax.bar(range(len(differential_kmer_log)), differential_kmer_log.values(), align='center', color='#cc3300', edgecolor='#cc3300')
#Mean
ax.plot((0,len(differential_kmer_log)), (diff_mean,diff_mean), color='#3366ff')
ax.plot((0,len(differential_kmer_log)), (0,0), color='white')
ax.axes.get_xaxis().set_visible(False)
fig.savefig(output_file_path + ".pdf", format='pdf')
output_file = open(output_file_path + ".txt", "w")
output_file.write("k-mer\tcount\tfreq\tcontrol_count\tcontrol_freq\tdifference\tdifference_log2\n")
for kmer, log_diff in sorted(differential_kmer_log.items(), key=lambda x: x[1], reverse = True):
(count, freq, ctrl_count, ctrl_freq, diff) = differential_kmer[kmer]
output_file.write(str(kmer) + "\t" + str(count) + "\t" + str(freq) + "\t" + str(ctrl_count) + "\t" + str(ctrl_freq) + "\t" + str(diff) + "\t" + str(log_diff) + "\n")
output_file.close()
| [
"matplotlib"
] |
5230ffd1375203e42523d38a7275582a91a8425e | Python | thkim0211/MachineLearning | /HW1.py | UTF-8 | 3,293 | 2.90625 | 3 | [] | no_license | import numpy as np
import pandas as pd
from sklearn.datasets import load_iris
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
from matplotlib.axes._axes import _log as matplotlib_axes_logger
matplotlib_axes_logger.setLevel('ERROR')
#Tae Hwan Kim
#I pledge my honor that I have abided by the Stevens Honor System.
iris = load_iris()
df= pd.DataFrame(data= np.c_[iris['data'], iris['target']], columns= iris['feature_names'] + ['target'])
df['species'] = pd.Categorical.from_codes(iris.target, iris.target_names)
X = df.iloc[0:150, [1, 2]].values
y = df.iloc[0:150, 5].values
# set output lable value to 1 if it is Virginca and 0 if Other.
y = np.where(y == 'virginica', 1, 0)
X_std = np.copy(X)
X_std[:,0] = (X_std[:,0] - X_std[:,0].mean()) / X_std[:,0].std()
X_std[:,1] = (X_std[:,1] - X_std[:,1].mean()) / X_std[:,1].std()
print(y)
X_train,X_test,y_train,y_test = train_test_split(X_std,y,test_size = 0.2)
def sigmoid(X, theta):
z = np.dot(X, theta[1:]) + theta[0]
return 1.0 / ( 1.0 + np.exp(-z))
def lrCostFunction(y, hx):
j = -y.dot(np.log(hx)) - ((1 - y).dot(np.log(1-hx)))
return j
def error(X_std,theta,y):
hx = sigmoid(X_std,theta)
c = lrCostFunction(y, hx)
e = hx - y
return e, c
def lrGradient(X_std, y, theta, alpha, num_iter):
# empty list to store the value of the cost function over number of iterations
cost = []
for i in range(num_iter):
e,c = error(X_std,theta,y)
grad = X_std.T.dot(e)
theta[0] = theta[0] - alpha * e.sum()
theta[1:] = theta[1:] - alpha * grad
cost.append(c)
return cost,theta
theta = np.zeros(3)
alpha = 0.01
num_iter = 5000
cost,theta = lrGradient(X_train,y_train, theta, alpha, num_iter)
plt.plot(range(1, len(cost) + 1), cost)
plt.xlabel('Iterations')
plt.ylabel('Cost')
print ('\n Logisitc Regression bias(intercept) term :', theta[0])
print ('\n Logisitc Regression estimated coefficients :', theta[1:])
plt.show()
def lrPredict(X_std,theta):
return np.where(sigmoid(X_std,theta) >= 0.5,1, 0)
from matplotlib.colors import ListedColormap
def pdb(x,y,theta):
ps = x
label = y
figure = plt.figure()
graph = figure.add_subplot(1, 1, 1)
x_a = []
y_a = []
for index, labelValue in enumerate(label):
pltx = ps[index][0]
x_a.append(pltx)
plty = ps[index][1]
y_a.append(plty)
if labelValue == 0:
graph.scatter(pltx, plty, c='b', marker="x", label='X')
else:
graph.scatter(pltx, plty, c='r', marker="o", label='O')
pdb(X_test,y_test,theta)
plt.title('Decision Boundary')
plt.xlabel('sepal length ')
plt.ylabel('sepal width ')
plt.show()
def accuracy(x,theta,y):
n_correct = 0
m = len(x)
pred = lrPredict(x,theta)
for i in range(m):
if y[i] == pred[i]:
n_correct += 1
print(f"Accuracy:{n_correct/len(y)}")
accuracy(X_test,theta,y_test)
accuracy(X_train,theta,y_train)
| [
"matplotlib"
] |
ae5b2d5a672ba85d121288618382efb1eede5e8a | Python | ysun996/SinoatrialPacemaker | /old files/HyperPolarizedPhaseResponseCurve.py | UTF-8 | 10,710 | 2.78125 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import bisect
from scipy.integrate import odeint
#####################################################################################################
# This Input file was set to create a graph of the hyperpolarized phase-response curve.
# To see the depolarized phase-resposne curve simply set HyperPolarYN = 1
#####################################################################################################
#####Gating variable equations and individual current equations
def dzdt(a,b,z):
hill=a/(a+b)
timeC=1/(a+b)
rVal=(hill-z)/timeC
return rVal
def am(Vm):
max_a_m = 1
a_m = max_a_m * ((Vm + 47) / (1 - np.exp(-(Vm + 47) / 10)))
return a_m
def an(Vm):
a_n=0.01*(Vm+55)/(1-np.exp(-(Vm+55/10)))
return a_n
def As(v):
a_s=0.05*(v+28)/(1-np.exp(-(v+28)/5))
return a_s
def bs(v):
b_s=0.00025*np.exp(-(v+28)/14.93)
return b_s
def bn(Vm):
bn=0.125*np.exp((-Vm+65)/80)
return bn
def bm(Vm):
max_b_m = 40
b_m = max_b_m * np.exp(-(Vm + 72) / 17.86)
return b_m
def ah(Vm):
max_a_h = 0.0085
a_h = max_a_h * np.exp(-(Vm + 71) / 5.43)
return a_h
def bh(Vm):
max_b_h = 2.5
b_h = max_b_h / (1 + np.exp(-(Vm + 10) / 12.2))
return b_h
def ad(Vm):
max_a_d = 0.005
a_d = max_a_d * ((Vm + 34) / (1 - np.exp(-(Vm + 34) / 10)))
return a_d
def bd(Vm):
max_b_d = 0.05
b_d = max_b_d * (np.exp(-(Vm + 34) / 6.67))
return b_d
def not_d(Vm):
### d' variable needed for the slow inward current
not_d = 1/(1+np.exp(-(Vm+15)/6.67))
return not_d
def af(Vm):
max_a_f = 0.002468
a_f = max_a_f * np.exp(-(Vm + 47) / 20)
return a_f
def bf(Vm):
max_b_f = 0.05
b_f = max_b_f / (1 + np.exp(-(Vm + 13) / 11.49))
return b_f
def ax1(Vm):
max_a_x1 = 0.0025
a_x1 = (max_a_x1 * np.exp((Vm + 30) / 12.11)) / (1 + np.exp((Vm + 30) / 50))
return a_x1
def bx1(Vm):
max_b_x1 = 0.0065
b_x1 = max_b_x1 * (np.exp(-(Vm - 20) / 16.67)) / (1 + np.exp(-(Vm - 20) / 25))
return b_x1
def l_bar_x1(Vm):
l_bar = 2.25 * ((np.exp((Vm + 95)/25)-1)/(np.exp((Vm + 45)/25)))
return l_bar
def ay(Vm):
#Polynomial Regression
w=0.03375000000000002
if isinstance(Vm,float)==True:
a = [1.4557319134e-12,4.0945641782e-10,4.6549818992e-08,2.4903140216e-06,6.1460577425e-05,4.7453248494e-04,\
2.5019715465e-03]
a_y = a[0]*Vm**6+a[1]*Vm**5+a[2]*Vm**4+a[3]*Vm**3+a[4]*Vm**2+a[5]*Vm+a[6]
if a_y <= 0:
a_y = 0.00001
else:
a = [1.4557319134e-12,4.0945641782e-10,4.6549818992e-08,2.4903140216e-06,6.1460577425e-05,4.7453248494e-04,\
2.5019715465e-03]
a_y = a[0]*Vm**6+a[1]*Vm**5+a[2]*Vm**4+a[3]*Vm**3+a[4]*Vm**2+a[5]*Vm+a[6]
for i in range(len(Vm)):
if a_y[i] <= 0:
a_y[i] = 0.00001
a_y = a_y*w
#Modified version of the original equation
#max_a_y = 0.00005
#a_y = (max_a_y * np.exp(-(Vm + 14.25) / 14.93))/3
return a_y
def by(Vm):
#Polynomial Regression
w=0.03375000000000002
if isinstance(Vm,float)==True:
a = [3.5607174324e-13,3.9587887660e-11,-6.9345321240e-09,-8.8541673551e-07,4.5605591007e-05,9.4190808268e-03,\
3.3771510156e-01]
b_y = a[0]*Vm**6+a[1]*Vm**5+a[2]*Vm**4+a[3]*Vm**3+a[4]*Vm**2+a[5]*Vm+a[6]
if b_y <= 0:
b_y = 0.00001
else:
a = [3.5607174324e-13,3.9587887660e-11,-6.9345321240e-09,-8.8541673551e-07,4.5605591007e-05,9.4190808268e-03,\
3.3771510156e-01]
b_y = a[0]*Vm**6+a[1]*Vm**5+a[2]*Vm**4+a[3]*Vm**3+a[4]*Vm**2+a[5]*Vm+a[6]
for i in range(len(Vm)):
if b_y[i] <= 0:
b_y[i] = 0.00001
b_y = b_y*w
#Modified version of the original equation
#max_b_y = 0.001
#b_y = (max_b_y * (Vm + 14.25) / (1 - np.exp(-(Vm + 14.25) / 5)))/3
return b_y
#####Time-independent background potassium current (I_k1)
def i_k1(Vm):
IPot=1.3*(np.exp((Vm+110)/25)-1)/(np.exp((Vm+60)/12.5)+np.exp((Vm+60)/25))+\
0.26*(Vm+30)/(1-np.exp(-(Vm+30)/25))
return IPot
#Parameters
gNa = 4.4
gnNa = 0.066
gSi = 0.5175
gnSi = 0.161
gfNa = 1.2
ENa = 40
ESi = 70
EK = -93
Cm = 6
I = 0
parameters = [gNa, gnNa, gSi, gnSi, gfNa, ENa, ESi, EK, Cm, I]
###Pacemaker Function
def PacemakerODE(state, t, parameters):
gNa = parameters[0]
gnNa = parameters[1]
gSi = parameters[2]
gnSi = parameters[3]
gfNa = parameters[4]
ENa = parameters[5]
ESi = parameters[6]
EK = parameters[7]
Cm = parameters[8]
I = parameters[9]
v = state[0]
d = state[1]
f = state[2]
m = state[3]
h = state[4]
x1 = state[5]
y = state[6]
Isi = (gSi * d * f + gnSi * not_d(v)) * (v - ESi)
Ina = (gNa * (m**3)*h + gnNa) * (v - ENa)
Ix1 = x1 * l_bar_x1(v)
Ik1 = i_k1(v)
If = (y**2 * gfNa) * (v - ENa) + (v-EK) * y**2 * (-120/EK)
Ii = Isi + Ina + Ix1 + Ik1 + If + I
dv = -Ii/Cm
dd = (ad(v) * (1-d)) - (bd(v) * d)
df = (af(v) * (1-f)) - (bf(v) * f)
dm = (am(v) * (1-m)) - (bm(v) * m)
dh = (ah(v) * (1-h)) - (bh(v) * h)
dx1 = (ax1(v) * (1-x1)) - (bx1(v) * x1)
dy = (ay(v) * (1-y)) - (by(v) * y)
rstate = [dv, dd, df, dm, dh, dx1, dy]
return rstate
#Function to find individual current values of PacemakerODE
def currentVals(state, t, parameters):
gNa = parameters[0]
gnNa = parameters[1]
gSi = parameters[2]
gnSi = parameters[3]
gfNa = parameters[4]
ENa = parameters[5]
ESi = parameters[6]
EK = parameters[7]
Cm = parameters[8]
I = parameters[9]
v = state[0]
d = state[1]
f = state[2]
m = state[3]
h = state[4]
x1 = state[5]
y = state[6]
Isi = (gSi * d * f + gnSi * not_d(v)) * (v - ESi)
Ina = (gNa * (m**3)*h + gnNa) * (v - ENa)
Ix1 = x1 * l_bar_x1(v)
Ik1 = i_k1(v)
If = y**2*(gfNa * (v - ENa) + (v-EK) * (-120/EK))
Ii = Isi + Ina + Ix1 + Ik1 + If + I
return [Isi,Ina,Ix1,Ik1,If,Ii]
#Function to find local maxima and cycle length
def cycle_len(t,s):
logic1 = True
localMax = [0]
for i in range(2500,5000):
if logic1 and s[i,0] > -20 and s[i,0] < s[(i-1),0]:
localMax.append(i - 1)
logic1=False
elif s[i,0] > -20 and s[(i-1),0] < -20:
logic1=True
cyclelen = t[localMax[2]]-t[localMax[1]]
return cyclelen,localMax
#Pulse function
def pulseFnc(s0,t,p,phase,stim):
for x in [0,1]:
if x == 0:
tIndexL=bisect.bisect_left(t,phase)
t1=t[0:tIndexL]
tplot=t1
s = odeint(PacemakerODE, s0, t1, args=(parameters,))
stateFinal = s
else:
CycleValue=len(t)
stimCycleValue=phase + 50
tIndexR=bisect.bisect_left(t,CycleValue)
tIndexStim=bisect.bisect_left(t,stimCycleValue)
p[9] = stim
t1=t[tIndexL:tIndexStim]
tplot=np.append(tplot,t1)
state1=s[-1,:]
s = odeint(PacemakerODE, state1, t1, args=(parameters,))
stateFinal = np.concatenate((stateFinal,s),axis=0)
p[9]=0
t1=t[tIndexStim:tIndexR]
tplot=np.append(tplot,t1)
state1=s[-1,:]
s = odeint(PacemakerODE, state1, t1, args=(parameters,))
stateFinal = np.concatenate((stateFinal,s),axis=0)
tIndexL=tIndexR
return stateFinal
#Function to find Time Constant
def timeC(a,b):
r = 1/(a+b)
return r
#Initial state
state0=[-1.26312652e+01, 6.73685294e-01, 5.28447665e-01, 9.60815694e-01,
4.83891944e-07, 3.70080101e-02, 1.24208141e-01]
parameters[9] = 0
#Time
tmax = 2019
dt = 0.4
t = np.arange(0, tmax, dt)
#########################################################################################
# Phase Response Curves. Set HyperPolarYN to 1 for hyperpolarized phase-response curve or 0 for depolarized phase response
# Curve.
PhaseResetYN = 1
HyperPolarYN = 1
if HyperPolarYN == 1:
PolPulse = [0.5,2.0,3.5]
elif HyperPolarYN == 0:
PolPulse = [-0.6,-1.3,-1.9,-2.8]
if PhaseResetYN == 1:
state = odeint(PacemakerODE, state0, t, args=(parameters,))
cycleLength, localMx = cycle_len(t,state)
print(cycleLength)
phasePercent = np.arange(0.05,1,0.025)
for w in PolPulse:
print('now on '+str(w))
phaseChange = np.zeros(len(phasePercent))
for i in range(len(phasePercent)):
phase = t[localMx[1]]+cycleLength*phasePercent[i]
statePulsed = pulseFnc(state0,t,parameters,phase,w)
cyclePulsedLength, localPulsedMx = cycle_len(t,statePulsed)
phaseChange[i] = (cyclePulsedLength-cycleLength)/cycleLength*100
plt.plot(phasePercent*100,phaseChange, 'o',label=str(w) + '\u03BC'+'A/c$m^2$')
plt.legend()
plt.ylabel('\u0394'+'$\phi$'+' (%)')
plt.xlabel('% of Cycle ($\phi$)')
#############################################################################################
#############################################################################################
#v vs. dv
vdvYN = 0
if vdvYN == 1:
state = odeint(PacemakerODE, state0, t, args=(parameters,))
dstate=[[0,0,0,0,0,0,0]]
for w in range(len(state[:,0])):
d1state=[PacemakerODE(state[w,:],t,parameters)]
dstate=np.concatenate((dstate,d1state),axis=0)
plt.plot(state[:,0],dstate[1:,0])
plt.ylabel('dV/dt')
plt.xlabel('V')
#############################################################################################
############################################################
# Currents
CurrentYN = 0
if CurrentYN == 1:
parameters[9] = 0
state = odeint(PacemakerODE, state0, t, args=(parameters,))
dstate=[[0,0,0,0,0,0]]
for w in range(len(state[:,0])):
d1state=[currentVals(state[w,:],t,parameters)]
dstate=np.concatenate((dstate,d1state),axis=0)
Ilabels=['Isi','Ina','Ix1','Ik1','If','Ii']
for w in range(0,5):
plt.plot(t,dstate[1:,w],label=Ilabels[w])
plt.legend()
plt.ylabel('Current')
plt.xlabel('t')
plt.xlim(250,1250)
############################################################
############################################################
#Action potential
ActionPotentialYN = 0
if ActionPotentialYN == 1:
state = odeint(PacemakerODE, state0, t, args=(parameters,))
plt.plot(t,state[:,0])
plt.ylabel('V')
plt.xlabel('t')
plt.xlim(250,1250)
#############################################################
plt.show()
| [
"matplotlib"
] |
0d1597c881c35e2e677eeb34a9da0457c107bf16 | Python | l-johnston/unit_system | /doc/examples/labels_graph.py | UTF-8 | 456 | 3.03125 | 3 | [
"MIT"
] | permissive | """Example showing matplotlib unit interface
Generate a graph showing default labels
"""
import matplotlib.pyplot as plt
from unit_system.predefined_units import m, s
plt.style.use("./doc/examples/report.mplstyle")
def main():
"""Generate graph"""
x = [1, 2, 3] * s
y = [4, 5, 6] * m / s
fig, ax = plt.subplots()
ax.plot(x, y, "k-")
fig.savefig("./doc/examples/default_labels_graph.png")
if __name__ == "__main__":
main()
| [
"matplotlib"
] |
58d5e7cb17bf24ecd28fff2e566dce034cc61eb8 | Python | ee2110/Reinforcement-Learning | /RL_Comparison_Performances_Gym/Taxi-v3/SARSA.py | UTF-8 | 4,915 | 2.921875 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import gym
import pandas as pd
import argparse
import time
import os
parser = argparse.ArgumentParser(description='Hyperparameter for SARSA agent')
parser.add_argument('--lr', type=float, help="learning rate", default=1e-4)
parser.add_argument('--gamma', type=float, help="discount factor", default=0.9)
parser.add_argument('--eps', type=float, help="initial epsilon", default=1.0)
parser.add_argument('--eps_decay', type=float, help="epsilon decay rate", default=0.999999)
parser.add_argument('--eps_min', type=float, help="minimum epsilon", default=0.1)
parser.add_argument('--num_games', type=float, help="train on how many games?", default=20000)
parser.add_argument('-v', '--verbose', type=int, default=2, help="print out episode")
args = parser.parse_args()
class SARSA_Agent():
def __init__(self, alpha, gamma, n_actions, n_states, eps_start, eps_min, eps_dec):
self.learning_rate = alpha
self.discount_factor = gamma
self.n_actions = n_actions
self.n_states = n_states
self.epsilon = eps_start
self.min_epsilon = eps_min
self.eps_dec = eps_dec
self.Q = {}
# initialize q
self.init_Q()
def init_Q(self):
for state in range(self.n_states):
for action in range(self.n_actions):
self.Q[(state, action)] = 0.0
def choose_action(self, state):
if np.random.random() < self.epsilon:
action = np.random.choice([i for i in range(self.n_actions)])
else:
actions = np.array([self.Q[(state, a)] for a in range(self.n_actions)]) #if can, better reindex the action before choose
action = np.argmax(actions)
return action
def decrement_epsilon(self):
self.epsilon = self.epsilon * self.eps_dec if self.epsilon > self.min_epsilon else self.min_epsilon
def learn(self, state, action, reward, state_, action_):
self.Q[(state, action)] += self.learning_rate * (reward + self.discount_factor*self.Q[(state_, action_)]-self.Q[(state, action)])
self.decrement_epsilon()
def plot_result(r, avg_r):
plt.plot(r, color='b')
plt.plot(avg_r, color='r')
plt.title('Scores obtained by SARSA agent')
plt.ylabel('Score')
plt.xlabel('Games')
plt.legend(['Scores per game', 'Average Scores'], loc='upper left')
plt.show()
def export_result(avg_r):
if os.path.exists('./agent_scores.csv'):
print('csv file exists')
result = pd.read_csv('agent_scores.csv')
result['SARSA'] = pd.Series(avg_r)
result.to_csv('agent_scores.csv', index=False)
print('results saved!')
else:
print('csv file not exists, creating new file.')
column = {'SARSA':pd.Series(avg_r)}
table = pd.DataFrame(column)
table.to_csv('agent_scores.csv')
print('results saved!')
if __name__ == '__main__':
env = gym.make('Taxi-v3')
agent = SARSA_Agent(alpha=args.lr, gamma=args.gamma, n_actions=env.nA, n_states=env.nS, eps_start=args.eps, eps_min=args.eps_min, eps_dec=args.eps_decay)
scores = []
avg_scores_list = []
n_games = args.num_games
start_time = time.time()
for i in range(n_games):
done = False
observation = env.reset()
score = 0
action = agent.choose_action(observation)
while not done:
#env.render()
# the info may be replaced with _
observation_, reward, done, info = env.step(action)
# In SARSA as on-policy, agent choose next action based on next state
action_ = agent.choose_action(observation)
# SARSA will learn from the transition state, action, reward, state_, action_
agent.learn(observation, action, reward, observation_, action_)
score += reward
observation = observation_
# Agent is confirm to choose that action
action = action_
scores.append(score)
avg_scores = np.mean(scores[-100:])
avg_scores_list.append(avg_scores)
if i % 100 == 0:
if args.verbose == 2:
print('episode',i, 'average scores on last 100 games %.2f' % avg_scores, 'epsilon %.2f' % agent.epsilon)
elif args.verbose == 1:
print('Training episode',i, 'win percentage %.2f' % avg_scores)
else:
print('Overall Average reward: ',np.mean(avg_scores))
end_time = time.time()
duration = end_time - start_time
plot_result(scores, avg_scores_list)
print('Overall mean reward: ',np.mean(scores))
print('Time Taken: ',duration)
export_result(avg_scores_list)
| [
"matplotlib"
] |
0410ce77edd9953920ea8445837638a9040330c7 | Python | Ikusaba-san/MystBot | /cogs/statistics.py | UTF-8 | 13,850 | 2.84375 | 3 | [
"MIT"
] | permissive | import discord
from discord.ext import commands
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
import numpy as np
import itertools
import datetime
from concurrent.futures import ThreadPoolExecutor
import functools
class Plots:
"""Commands which make graphs and other pretties."""
def __init__(self, bot):
self.bot = bot
self.threadex = ThreadPoolExecutor(max_workers=2)
def pager(self, entries, chunk: int):
for x in range(0, len(entries), chunk):
yield entries[x:x + chunk]
def hilo(self, numbers, indexm: int=1):
highest = [index * indexm for index, val in enumerate(numbers) if val == max(numbers)]
lowest = [index * indexm for index, val in enumerate(numbers) if val == min(numbers)]
return highest, lowest
def datetime_range(self, start, end, delta):
current = start
while current < end:
yield current
current += delta
def get_times(self):
# todo this is really bad so fix soon pls thanks kk weeeew
fmt = '%H%M'
current = datetime.datetime.utcnow()
times = []
times2 = []
times3 = []
tcount = 0
rcurrent = current - datetime.timedelta(minutes=60)
rcurrent2 = current - datetime.timedelta(minutes=30)
for x in range(7):
times.append(rcurrent + datetime.timedelta(minutes=tcount))
tcount += 10
tcount = 0
for x in range(7):
times2.append(rcurrent2 + datetime.timedelta(minutes=tcount))
tcount += 5
tcount = 0
for t3 in range(26):
times3.append(rcurrent + datetime.timedelta(minutes=tcount))
tcount += 60/25
times = [t.strftime(fmt) for t in times]
times2 = [t.strftime(fmt) for t in times2]
times3 = [t.strftime(fmt) for t in times3]
return times, times2, times3, current
def ping_plotter(self, data: (tuple, list)=None):
# Base Data
if data is None:
numbers = list(self.bot._pings)
else:
numbers = data
long_num = list(itertools.chain.from_iterable(itertools.repeat(num, 2) for num in numbers))
chunks = tuple(self.pager(numbers, 4))
avg = list(itertools.chain.from_iterable(itertools.repeat(np.average(x), 8) for x in chunks))
mean = [np.mean(numbers)] * 60
prange = int(max(numbers)) - int(min(numbers))
plog = np.log(numbers)
t = np.sin(np.array(numbers) * np.pi*2 / 180.)
xnp = np.linspace(-np.pi, np.pi, 60)
# tmean = [np.mean(t)] * 60
# Spacing/Figure/Subs
plt.style.use('ggplot')
fig = plt.figure(figsize=(15, 7.5))
ax = fig.add_subplot(2, 2, 2, axisbg='aliceblue', alpha=0.3) # Right
ax2 = fig.add_subplot(2, 2, 1, axisbg='thistle', alpha=0.2) # Left
ax3 = fig.add_subplot(2, 1, 2, axisbg='aliceblue', alpha=0.3) # Bottom
ml = MultipleLocator(5)
ml2 = MultipleLocator(1)
# Times
times, times2, times3, current = self.get_times()
# Axis's/Labels
plt.title(f'Latency over Time (WebSocket) | {current} UTC')
ax.set_xlabel(' ')
ax.set_ylabel('Network Stability')
ax2.set_xlabel(' ')
ax2.set_ylabel('Milliseconds(ms)')
ax3.set_xlabel('Time(HHMM)')
ax3.set_ylabel('Latency(ms)')
if min(numbers) > 100:
ax3.set_yticks(np.arange(min(int(min(numbers)), 2000) - 100,
max(range(0, int(max(numbers)) + 100)) + 50, max(numbers) / 12))
else:
ax3.set_yticks(np.arange(min(0, 1), max(range(0, int(max(numbers)) + 100)) + 50, max(numbers) / 12))
# Labels
ax.yaxis.set_minor_locator(ml2)
ax2.xaxis.set_minor_locator(ml2)
ax3.yaxis.set_minor_locator(ml)
ax3.xaxis.set_major_locator(ml)
ax.set_ylim([-1, 1])
ax.set_xlim([0, np.pi])
ax.yaxis.set_ticks_position('right')
ax.set_xticklabels(times2)
ax.set_xticks(np.linspace(0, np.pi, 7))
ax2.set_ylim([min(numbers) - prange/4, max(numbers) + prange/4])
ax2.set_xlim([0, 60])
ax2.set_xticklabels(times)
ax3.set_xlim([0, 120])
ax3.set_xticklabels(times3, rotation=45)
plt.minorticks_on()
ax3.tick_params()
highest, lowest = self.hilo(numbers, 2)
mup = []
mdw = []
count = 0
p10 = mean[0] * (1 + 0.5)
m10 = mean[0] * (1 - 0.5)
for x in numbers:
if x > p10:
mup.append(count)
elif x < m10:
mdw.append(count)
count += 1
# Axis 2 - Left
ax2.plot(range(0, 60), list(itertools.repeat(p10, 60)), '--', c='indianred',
linewidth=1.0,
markevery=highest,
label='+10%')
ax2.plot(range(0, 60), list(itertools.repeat(m10, 60)), '--', c='indianred',
linewidth=1.0,
markevery=highest,
label='+-10%')
ax2.plot(range(0, 60), numbers, '-', c='blue',
linewidth=1.0,
label='Mark Up',
alpha=.8,
drawstyle='steps-post')
ax2.plot(range(0, 60), numbers, ' ', c='red',
linewidth=1.0,
markevery=mup,
label='Mark Up',
marker='^')
"""ax2.plot(range(0, 60), numbers, ' ', c='green',
linewidth=1.0, markevery=mdw,
label='Mark Down',
marker='v')"""
ax2.plot(range(0, 60), mean, label='Mean', c='blue',
linestyle='--',
linewidth=.75)
ax2.plot(list(range(0, 60)), plog, 'darkorchid',
alpha=.9,
linewidth=1,
drawstyle='default',
label='Ping')
# Axis 3 - Bottom
ax3.plot(list(range(0, 120)), long_num, 'darkorchid',
alpha=.9,
linewidth=1.25,
drawstyle='default',
label='Ping')
ax3.fill_between(list(range(0, 120)), long_num, 0, facecolors='darkorchid', alpha=0.3)
ax3.plot(range(0, 120), long_num, ' ', c='indianred',
linewidth=1.0,
markevery=highest,
marker='^',
markersize=12)
ax3.text(highest[0], max(long_num) - 10, f'{round(max(numbers))}ms', fontsize=12)
ax3.plot(range(0, 120), long_num, ' ', c='lime',
linewidth=1.0,
markevery=lowest,
marker='v',
markersize=12)
ax3.text(lowest[0], min(long_num) - 10, f'{round(min(numbers))}ms', fontsize=12)
ax3.plot(list(range(0, 120)), long_num, 'darkorchid',
alpha=.5,
linewidth=.75,
drawstyle='steps-pre',
label='Steps')
ax3.plot(range(0, 120), avg, c='forestgreen',
linewidth=1.25,
markevery=.5,
label='Average')
# Axis - Right
"""ax.plot(list(range(0, 60)), plog1, 'darkorchid',
alpha=.9,
linewidth=1,
drawstyle='default',
label='Ping')
ax.plot(list(range(0, 60)), plog2, 'darkorchid',
alpha=.9,
linewidth=1,
drawstyle='default',
label='Ping')
ax.plot(list(range(0, 60)), plog10, 'darkorchid',
alpha=.9,
linewidth=1,
drawstyle='default',
label='Ping')"""
ax.fill_between(list(range(0, 120)), .25, 1, facecolors='lime', alpha=0.2)
ax.fill_between(list(range(0, 120)), .25, -.25, facecolors='dodgerblue', alpha=0.2)
ax.fill_between(list(range(0, 120)), -.25, -1, facecolors='crimson', alpha=0.2)
ax.fill_between(xnp, t, 1, facecolors='darkred')
"""ax.plot(list(range(0, 60)), t, 'darkred',
linewidth=1.0,
alpha=1,
label='Stability')
ax.plot(list(range(0, 60)), tmean, 'purple',
linewidth=1.0,
alpha=1,
linestyle=' ')
ax.plot(list(range(0, 60)), tp10, 'limegreen',
linewidth=1.0,
alpha=1,
linestyle=' ')
ax.plot(list(range(0, 60)), tm10, 'limegreen',
linewidth=1.0,
alpha=1,
linestyle=' ')"""
# Legend
ax.legend(bbox_to_anchor=(.905, .97), bbox_transform=plt.gcf().transFigure)
ax3.legend(loc='best', bbox_transform=plt.gcf().transFigure)
# Grid
ax.grid(which='minor')
ax2.grid(which='both')
ax3.grid(which='both')
plt.grid(True, alpha=0.25)
# Inverts
ax.invert_yaxis()
# File
current = datetime.datetime.utcnow()
save = current.strftime("%Y-%m-%d%H%M")
plt.savefig(f'/home/myst/mystbot/pings/{save}', bbox_inches='tight') # !!!VPS!!!
self.bot._latest_ping[save] = f'/home/myst/mystbot/pings/{save}.png' # !!!VPS!!!
plt.clf()
plt.close()
return save
def ram_plotter(self, data: (list, tuple)=None):
current = datetime.datetime.utcnow()
# Time Labels
dts = [dt.strftime('%H%M') for dt in self.datetime_range(current - datetime.timedelta(minutes=30),
current, datetime.timedelta(minutes=1))]
test = list(self.bot._ram)
chunks = tuple(self.pager(test, 2))
mind = min(test)
maxd = max(test)
mean = [np.mean(test)] * 120
avg = list(itertools.chain.from_iterable(itertools.repeat(np.average(x), 4) for x in chunks))
highest, lowest = self.hilo(test, 1)
fig = plt.figure(figsize=(15, 7.5))
ax = fig.add_subplot(1, 1, 1, axisbg='whitesmoke', alpha=0.3) # Main
plt.style.use('ggplot')
plt.title(f'Usage over Time (RAM) | {current} UTC')
ax.set_xlabel('Time (HHMM)')
ax.set_ylabel('Usage (MiB)')
minylim = mind - 15 if mind - 15 > 0 else 0
ax.set_xlim([0, 120])
ax.set_xticks(np.linspace(0, 120, 30))
ax.set_ylim([0, 120])
ax.set_yticks(np.linspace(mind, maxd + 15, 12))
ax.set_xticklabels(dts)
ax.grid(which='both')
plt.grid(True, alpha=0.25)
# Plots
ax.plot(list(range(0, 120)), test)
ax.plot(list(range(0, 120)), test, '-', c='darkslategrey',
linewidth=0.5,
label='Usage')
ax.fill_between(list(range(0, 240)), avg, facecolors='cyan', alpha=0.6)
ax.fill_between(list(range(0, 120)), test, facecolors='teal', alpha=1)
ax.plot(list(range(0, 120)), mean, '--', c='limegreen', label='Mean')
ax.plot(range(0, 120), test, ' ', c='indianred',
linewidth=1.0,
markevery=highest,
marker='^',
markersize=12)
ax.text(highest[0], max(test) - 10, f'{round(max(test))} MiB', fontsize=12)
ax.plot(range(0, 120), test, ' ', c='lime',
linewidth=1.0,
markevery=lowest,
marker='v',
markersize=12)
ax.plot(list(range(0, 120)), test, 'darkorchid',
alpha=.5,
linewidth=.75,
drawstyle='steps-pre',
label='Steps')
ax.plot(range(0, 240), avg, c='cyan',
linewidth=1.5,
markevery=1,
label='Average',
alpha=0.5)
ax.legend(loc='best', bbox_transform=plt.gcf().transFigure)
save = current.strftime("%Y-%m-%d%H%M")
plt.savefig(f'/home/myst/mystbot/rams/{save}', bbox_inches='tight') # !!!VPS!!!
self.bot._latest_ram[save] = f'/home/myst/mystbot/rams/{save}.png' # !!!VPS!!!
plt.clf()
plt.close()
return save
@commands.command(name='wsping')
async def _ping(self, ctx):
"""Ping. Shown as a pretty graph."""
current = datetime.datetime.utcnow().strftime('%Y-%m-%d%H%M')
if len(self.bot._pings) < 60:
return await ctx.send(f'Latency: **`{self.bot.latency * 1000}`**')
await ctx.channel.trigger_typing()
try:
pfile = self.bot._latest_ping[current]
return await ctx.send(file=discord.File(pfile))
except:
pass
getfile = functools.partial(self.ping_plotter)
pfile = await self.bot.loop.run_in_executor(self.threadex, getfile)
await ctx.send(file=discord.File(f'/home/myst/mystbot/pings/{pfile}.png')) # !!!VPS!!!
@commands.command(name='ram')
async def _ram(self, ctx):
"""Ram usage. Shown as a pretty graph."""
current = datetime.datetime.utcnow().strftime('%Y-%m-%d%H%M')
if len(self.bot._ram) < 60:
return await ctx.send(f'Ram Usage: **`{self.bot._ram[-1]}`**')
await ctx.channel.trigger_typing()
try:
pfile = self.bot._latest_ram[current]
return await ctx.send(file=discord.File(pfile))
except:
pass
getfile = functools.partial(self.ram_plotter)
pfile = await self.bot.loop.run_in_executor(self.threadex, getfile)
await ctx.send(file=discord.File(f'/home/myst/mystbot/rams/{pfile}.png')) # !!!VPS!!!
async def sick(self, ctx, name: str=None):
pass
def setup(bot):
bot.add_cog(Plots(bot))
| [
"matplotlib"
] |
320c458311fecae57c07d8f88d3e1509002e3989 | Python | bhillmann/cluny | /cluny/augmented_dendrogram.py | UTF-8 | 533 | 2.546875 | 3 | [] | no_license | from scipy.cluster.hierarchy import dendrogram
import matplotlib.pyplot as plt
def augmented_dendrogram(*args, **kwargs):
ddata = dendrogram(*args, **kwargs)
if not kwargs.get('no_plot', False):
for i, d in zip(ddata['icoord'], ddata['dcoord']):
x = 0.5 * sum(i[1:3])
y = d[1]
plt.plot(x, y, 'ro')
plt.annotate("%.3g" % y, (x, y), xytext=(0, -8),
textcoords='offset points',
va='top', ha='center')
return ddata | [
"matplotlib"
] |
a7d326dd18b72908d42524d7efb64a06bb3466a3 | Python | prachichouksey/Project-Group-16 | /bill-forecast/model.py | UTF-8 | 438 | 3.125 | 3 | [] | no_license | import pandas as pd
from sklearn import linear_model
import matplotlib.pyplot as plt
import pickle
# Getdataset
dataframe = pd.read_csv('data.csv')
x_values = dataframe[['units']]
y_values = dataframe[['price']]
#train model on data
model = linear_model.LinearRegression()
model.fit(x_values, y_values)
# save model
with open('price-forecast-model.pickle','wb') as f:
pickle.dump(model, f)
print("model created") | [
"matplotlib"
] |
64c14874b82c7299fccacd3edb311db992741c0e | Python | RyanLBuchanan/Multiple_Linear_Regression | /Multiple_Linear_Regression.py | UTF-8 | 3,303 | 4.03125 | 4 | [] | no_license | # Multiple Linear Regression from Machine Learning A-Z - SuperDataScience
# Input by Ryan L Buchanan 11SEP20
# Import the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Import the dataset
dataset = pd.read_csv("50_Startups.csv")
X = dataset.iloc[: , :-1].values # matrix of features -> predictors
y = dataset.iloc[:, -1].values # independent variable
print(X)
# Encode categories
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder
ct = ColumnTransformer(transformers = [('encoder', OneHotEncoder(), [3])], remainder = 'passthrough')
X = np.array(ct.fit_transform(X))
print(X)
# No need to apply feature scaling in multiple linear regression as the coefficients on each
# independent variable will compensate to put everything on the same scale
# Split dataset into training and test sets
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Train the Multiple Linear Regression model on the Training Set
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train, y_train)
# Predict the Test set results
y_pred = regressor.predict(X_test)
np.set_printoptions(precision=2)
print(np.concatenate((y_pred.reshape(len(y_pred), 1), y_test.reshape(len(y_test), 1)), 1))
# Making a single prediction (for example the profit of a startup with R&D Spend = 160000, Administration Spend = 130000, Marketing Spend = 300000 and State = 'California')
print("Predicted startup profit = $ regressor.predict([[1,0,0,160000, 130000, 300000]]))
# Therefore, our model predicts that the profit of a Californian startup which spent 160000 in R&D, 130000 in Administration and 300000 in Marketing is $ 181566,92.
# Important note 1: Notice that the values of the features were all input in a double pair of square brackets. That's because the "predict" method always expects a 2D array as the format of its inputs. And putting our values into a double pair of square brackets makes the input exactly a 2D array. Simply put:
# 1,0,0,160000,130000,300000→scalars
# [1,0,0,160000,130000,300000]→1D array
# [[1,0,0,160000,130000,300000]]→2D array
# Important note 2: Notice also that the "California" state was not input as a string in the last column but as "1, 0, 0" in the first three columns. That's because of course the predict method expects the one-hot-encoded values of the state, and as we see in the second row of the matrix of features X, "California" was encoded as "1, 0, 0". And be careful to include these values in the first three columns, not the last three ones, because the dummy variables are always created in the first columns.
print(regressor.coef_)
print("Y-intercept or start profit = $", round(regressor.intercept_, 2))
# Therefore, the equation of our multiple linear regression model is:
# Profit=86.6×Dummy State 1−873×Dummy State 2+786×Dummy State 3−0.773×R&D Spend+0.0329×Administration+0.0366×Marketing Spend+42467.53
# Important Note: To get these coefficients we called the "coef_" and "intercept_" attributes from our regressor object. Attributes in Python are different than methods and usually return a simple value or an array of values.
| [
"matplotlib"
] |
b7de338f6d270b15915aa994646117dea10009a3 | Python | alihallal-DATA8/othellov2.1 | /board.py | UTF-8 | 1,447 | 3.34375 | 3 | [] | no_license | import time
import numpy as np
import matplotlib.pyplot as plt
class Board:
def __init__(self, arr = np.zeros((8,8), dtype='int'), message = 'Welcome! TO Othello....Red player Starts', **kwargs):
self.arr = arr
self.message = message
def initialize(self):
self.arr[3,3] = 1
self.arr[4,4] = 1
self.arr[3,4] = -1
self.arr[4,3] = -1
def count_score(self):
rcount, _ = np.where(self.arr == 1)
bcount, _ = np.where(self.arr == -1)
return rcount, bcount
def tellme(self):
print(self.message)
plt.xlabel(self.message, fontsize=10)
rcount, bcount = self.count_score()
plt.title(f"score \n Red : {len(rcount)}, Blue : {len(bcount)}")
plt.draw()
def createboard(self):
plt.figure(figsize=(4,4), facecolor='green')
plt.grid(True, which = 'major', lw = 2 , color = 'k')
#plt.tick_params(axis ='both',which='minor')
plt.xlim(-0.5,7.5)
plt.xticks(np.arange(-0.5,8.5,1), labels=[])
plt.ylim(-0.5,7.5)
plt.yticks(np.arange(-0.5,8.5,1), labels=[])
self.initialize()
self.tellme()
w_x, w_y = np.where(self.arr == 1)
b_x, b_y = np.where(self.arr == -1)
ph = plt.plot(w_x, w_y, 'r', lw ='0', marker = 'o', markersize=20)
ph = plt.plot(b_x, b_y, 'b', lw ='0', marker = 'o', markersize=20)
| [
"matplotlib"
] |
63fc8904e2cc2274a0cd456c3dad4bcf883186e2 | Python | jonborchardt/hack18 | /gender_analysis/smooth.py | UTF-8 | 1,295 | 3.046875 | 3 | [] | no_license | """ Usage:
<file-name> --in=IN_FILE --out=OUT_FILE --window=WINDOW --poly=POLY [--debug]
"""
# External imports
import logging
import pdb
from pprint import pprint
from pprint import pformat
from docopt import docopt
from collections import defaultdict
from operator import itemgetter
from tqdm import tqdm
from scipy.signal import savgol_filter
import matplotlib.pyplot as plt
# Local imports
#=-----
if __name__ == "__main__":
# Parse command line arguments
args = docopt(__doc__)
inp_fn = args["--in"]
poly = int(args["--poly"])
window = int(args["--window"])
out_fn = args["--out"]
debug = args["--debug"]
if debug:
logging.basicConfig(level = logging.DEBUG)
else:
logging.basicConfig(level = logging.INFO)
y = []
for line in open(inp_fn):
try:
y.append(float(line.strip()))
except:
pass
logging.info("Read {} float values from {}".format(len(y), inp_fn))
y_smoothed = savgol_filter(y, window, poly)
x = range(1970, 2018)
plt.plot(x, y)
plt.plot(x, y_smoothed, color="red")
plt.show()
logging.info("Writing output to {}".format(out_fn))
with open(out_fn, "w") as fout:
fout.write("\n".join(map(str, y_smoothed)))
logging.info("DONE")
| [
"matplotlib"
] |
b61ba8678212041fb02f52da4e5490fdbb223f38 | Python | melissande/dhi-segmentation-buildings | /Data_Handle/patches_extraction_ghana.py | UTF-8 | 6,436 | 2.703125 | 3 | [] | no_license | import tensorflow as tf
import h5py
import numpy as np
from numpy import newaxis
import cv2
import sys
import os
from image_utils import read_images,write_data_h5,read_labels
#import matplotlib.pyplot as plt
'''
patche_exctraction.py is used to generate patches from the satellite image of Accra provided by DHI Gras. This is image is around 12000x12000 pixels and the MS bands are also upsampled in this script. The patches, stored in .h5 format are stored as number_of_patches x width x heights x number of bands for panchro, pansharp, ms and groundtruth
Arguments:
python patche_extraction.py ../DATA_GHANA/RAW_DATA/ ../DATA_GHANA/RAW_PATCHES/120_x_120/ width_patch=120
The two first arguments are obligatory. '../DATA_GHANA/RAW_DATA/' contains the input folder where the files corresponding to the bands of the image should be stored under these names: 'panchro.tif' for the Panchromatic Image, 'pansharp.tif' for the Pansharpened image, 'ms.tif' for the Multi Spectral Bands Image and 'groundtruth.png' for the image containing the mask with building footprints (0 if not and 1 if there is a building). The second argument '../DATA_GHANA/RAW_PATCHES/120_x_120/' is the path of the folder where to store the patches. width_patch is the size of the patches to extract.
'''
NAME_PANCHRO='panchro.tif'
NAME_PANSHARP='pansharp.tif'
NAME_MS='ms.tif'
NAME_LABELS='groundtruth.png'
WIDTH=128
STRIDE=128#needs to be equal to the width as we don't want overlapping patches
'''
All the lines with display have been commented at the scripts were launched on command line remote server so the display was not available
'''
def prepare_ms_hr(ms_lr,size_hr):
'''
Prepares the upsampled MS image
:ms_lr input image to upsample
:size_hr to upsample the Low Resolution MS image to the dimension of the High Resolution panchromatic image
'''
tf.reset_default_graph()
ms_ph=tf.placeholder(tf.float64, [ms_lr.shape[0],ms_lr.shape[1],ms_lr.shape[2],ms_lr.shape[3]], name='ms_placeholder')
ms_hr=tf.image.resize_images(ms_ph, [size_hr[0], size_hr[1]])
ms_hr=tf.cast(ms_hr,tf.float64,name='cast_ms_hr')
with tf.Session() as sess:
ms_hr= sess.run(ms_hr,feed_dict={ms_ph: ms_lr})
return ms_hr
def extract_patches(data,width,stride):
'''
Extract patches from images
:data input image
:width dimensiton of the patch
:stride stride of patch selection on the image
'''
tf.reset_default_graph()
print('Patch extraction with stride=%d and width=%d begins'%(stride,width) )
data_pl=tf.placeholder(tf.float64, [data.shape[0],data.shape[1],data.shape[2],data.shape[3]], name='data_placeholder')
data_o=tf.extract_image_patches(images=data_pl,ksizes=[1,width,width,1],strides=[1,stride,stride,1],rates=[1,1,1,1],padding='VALID')
print('Patch extraction done')
size_tot=data_o.get_shape().as_list()
data_o=tf.reshape(data_o,[size_tot[1]*size_tot[2],width,width,data.shape[3]])
with tf.Session() as sess:
Data_o= sess.run(data_o,feed_dict={data_pl: data})
print('%d patches of size %d x %d created as list'%(Data_o.shape[0],Data_o.shape[1],Data_o.shape[2]))
return Data_o
def save_patches(data,path_out):
'''
Write the patches list to .h5 file format
:data patches list
:path_out where to save the patches
'''
write_data_h5(path_out,data)
if __name__ == '__main__':
if len(sys.argv)<3:
print('Specify all the paths of folder input and output')
exit()
#path='../DATA_GHANA/RAW_DATA/'
path=sys.argv[1]
#path_patches='../DATA_GHANA/RAW_PATCHES/120_x_120/'
path_patches=sys.argv[2]
if not os.path.exists(path_patches):
os.makedirs(path_patches)
for i in range(3, len(sys.argv)):
arg = sys.argv[i]
if arg.startswith('--width_patch'):
WIDTH=int(arg[len('--width_patch='):])
STRIDE=WIDTH
#patch_test_number=300
## Panchromatic
panchromatic_file=path+NAME_PANCHRO
panchromatic=read_images(panchromatic_file)
hr_size=panchromatic.shape
panchromatic=panchromatic[newaxis,:,:,newaxis]
print('\n PANCHROMATIC \n\n')
panchromatic=extract_patches(panchromatic,WIDTH,STRIDE)
## Find patches to discard
keep=np.arange(len(panchromatic))
discard=[]
print('Original size of the dataset is %d'%len(panchromatic))
for i in range(len(panchromatic)):
if (np.sum(panchromatic[i,:,:,:])).astype(int)==0:
discard.append(i)
discard=np.asarray(discard)
keep=np.delete(keep,discard)
print('Final size of the dataset is %d'%len(keep))
## Save Panchromatic
panchromatic=panchromatic[keep]
save_patches(panchromatic,path_patches+'panchro.h5')
# plt.imshow(panchromatic[patch_test_number,:,:,0])
# plt.show()
##MS bands
ms_file=path+NAME_MS
ms=read_images(ms_file)
ms=np.transpose(ms,(1,2,0))
print('\n MS BANDS\n\n')
for i in range(ms.shape[2]):
print('\n Band %d \n'%i)
ms_i=ms[:,:,i]
print('Upscale')
ms_hr=prepare_ms_hr(ms_i[newaxis,:,:,newaxis],hr_size)
ms_hr=extract_patches(ms_hr,WIDTH,STRIDE)
ms_hr=ms_hr[keep]
save_patches(ms_hr,path_patches+'ms_hr_'+str(i)+'.h5')
# plt.imshow(ms_hr[patch_test_number,:,:,0])
# plt.show()
##Pansharpened bands
pansharpened_file=path+NAME_PANSHARP
pansharpened=read_images(pansharpened_file)
pansharpened=np.transpose(pansharpened,(1,2,0))
print('\n BANDS\n\n')
for i in range(pansharpened.shape[2]):
print('\n Band %d \n'%i)
pansharpened_i=pansharpened[:,:,i]
pansharpened_i=extract_patches(pansharpened_i[newaxis,:,:,newaxis],WIDTH,STRIDE)
pansharpened_i=pansharpened_i[keep]
save_patches(pansharpened_i,path_patches+'pansharpened_'+str(i)+'.h5')
# plt.imshow(pansharpened_i[patch_test_number,:,:,0])
# plt.show()
## Label patches
labels_file=path+NAME_LABELS
labels=read_labels(labels_file)
labels=labels[newaxis,:,:,newaxis]
print('\n LABELS \n\n')
labels=extract_patches(labels,WIDTH,STRIDE)
labels=labels[keep]
save_patches(labels,path_patches+'groundtruth.h5')
# plt.imshow(labels[patch_test_number,:,:,0])
# plt.show()
| [
"matplotlib"
] |
18a620068310c856f7bc7ffeab0feaf76b11a4f4 | Python | MarkRegalla27/TemperatureData | /temperature.py | UTF-8 | 6,359 | 2.90625 | 3 | [] | no_license | import pandas as pd
import matplotlib.pyplot as plt
from pandas.tools.plotting import scatter_matrix
from scipy import stats
import numpy as np
import statsmodels.api as sm
import math
import requests
from pandas.io.json import json_normalize
import sqlite3 as lite
import time # a package with datetime objects
from dateutil.parser import parse # a package for parsing a string into a Python datetime object
import collections
import datetime
import pandas.io.sql as psql
#from odict import odict #imports ordered dictionary library
cities = { "Atlanta": '33.762909,-84.422675',
"Austin": '30.303936,-97.754355',
"Boston": '42.331960,-71.020173',
"Chicago": '41.837551,-87.681844',
"Cleveland": '41.478462,-81.679435'
}
base_url = 'https://api.forecast.io/forecast/ed5384712a6c42598d505ff33c0bd2aa/'
start_date = datetime.datetime.now() - datetime.timedelta(days=30)
end_date = datetime.datetime.now()
readable_date = start_date
query_date = end_date - datetime.timedelta(days=30) #the current value being processed
con = lite.connect('weather.db')
cur = con.cursor()
#Drop city_max_temp table if it exists already
with con:
#cur.execute('DROP TABLE city_max_temp') #Grid table as lesson gives
cur.execute('DROP TABLE tidy_max_temp') #Tidy version of table
#make a table of city, date, max temp
with con:
cur.execute('CREATE TABLE tidy_max_temp (city TEXT, todays_date TEXT, max_temp TEXT)')
cur.execute('CREATE TABLE city_max_temp (day_of_reading INT, Atlanta REAL, Austin REAL, Boston REAL, Chicago REAL, Cleveland REAL)')
#Insert timestamps into each row of matrix style database table
with con:
while query_date < end_date:
cur.execute("INSERT INTO city_max_temp(day_of_reading) VALUES (?)", (int(query_date.strftime('%s')),))
query_date += datetime.timedelta(days=1)
#sql = "INSERT INTO city_max_temp (city, todays_date, max_temp) VALUES (?,?,?)"
#data_list[]
p = 1
#with con:
#Put temperatures in matrix style database
for k,v in cities.iteritems():
#print k
query_date = end_date - datetime.timedelta(days=31) #set value each time through the loop of cities
#print query_date
while query_date < end_date:
query_date += datetime.timedelta(days=1)
#print query_date
r = requests.get(base_url + v + ',' + query_date.strftime('%Y-%m-%dT12:00:00'))
print r.status_code #prints url status. If 400, then invalid request was made and will not fetch the data
with con:
cur.execute('UPDATE city_max_temp SET ' + k + ' = ' + str(r.json()['daily']['data'][0]['temperatureMax']) + ' WHERE day_of_reading = ' + query_date.strftime('%s'))
if p == 1:
print 'UPDATE city_max_temp SET ' + str(k) + ' = ' + str(r.json()['daily']['data'][0]['temperatureMax']) + ' WHERE day_of_reading = ' + str(query_date.strftime('%s'))
p = 2
#increment query_date to the next day for next operation of loop, BUT IT WON'T right here
#query_date = query_date + datetime.timedelta(days=1)
#put temperatures in tidy database
cities = pd.DataFrame(cities.items(), columns=['city', 'coordinates'])
cities.sort(columns=['city'], ascending=True, inplace=True)
cities.reset_index(inplace=True)
cities.drop('index', axis=1, inplace=True)
print cities['city']
#Insert timestamps and cities into rows of tidy database
query_date = end_date - datetime.timedelta(days=31)
with con:
while query_date < end_date:
query_date += datetime.timedelta(days=1)
for row in cities.iterrows():
cur.execute("INSERT INTO tidy_max_temp(city, todays_date) VALUES (?,?)", (str(row[1]['city']), int(query_date.strftime('%s')),))
query_date = end_date - datetime.timedelta(days=31) #set value each time through the loop of temperature collections
while query_date < end_date:
query_date += datetime.timedelta(days=1)
i = 0
for row in cities.iterrows():
url = base_url + row[1]['coordinates'] + ',' + query_date.strftime('%Y-%m-%dT12:00:00') #row is a 2-dimensional variable, containing city and coordinates for each row
r = requests.get(url)
print r.status_code
with con:
update_sql = """UPDATE tidy_max_temp SET max_temp = """ + str(r.json()['daily']['data'][0]['temperatureMax']) + """ WHERE city = '""" + str(row[1]['city']) + """' AND todays_date = """ + str(query_date.strftime('%s'))
print update_sql
cur.execute(update_sql)
i += 1
#Select and print range of temperatures for each city
Tempdf = pd.read_sql_query("SELECT * from tidy_max_temp", con)
cities_list = Tempdf['city'].unique()
tempString = '{0} Temperature Range is: {1}'
lastRange = 0
for city in cities_list:
tempRange = str(float(Tempdf.ix[Tempdf['city'] == city, 'max_temp'].max()) - float(Tempdf.ix[Tempdf['city'] == city, 'max_temp'].min()))
if tempRange > lastRange:
lastRange = tempRange
maxCity = city
print tempString.format(city, tempRange)
print 'The city with the greatest change is ' + str(maxCity) + ' with a range of ' + str(lastRange)
tempString = '{0} Mean Temperature is: {1}'
for city in cities_list:
meanTemp = Tempdf.ix[Tempdf['city'] == city, 'max_temp']
meanTemp = meanTemp.astype(float)
meanTemp = str(meanTemp.mean())
print tempString.format(city, meanTemp)
tempString = '{0} Temperature Variance is: {1}'
for city in cities_list:
varTemp = Tempdf.ix[Tempdf['city'] == city, 'max_temp']
varTemp = varTemp.astype(float)
varTemp = str(varTemp.var())
print tempString.format(city, varTemp)
varTemp = 0
#Find change between days and variance of collected differences
tempString = '{0} Greatest 1 day Temperature Change is {1}'
tempString2 = '{0} Variance in temperature changes is {1}'
for city in cities_list:
cityTemp = Tempdf.ix[Tempdf['city'] == city, 'max_temp']
cityTemp = cityTemp.tolist()
maxDelta = 0
cityDiff = []
for i in range(len(cityTemp)):
if i > 0:
tmp = abs(float(cityTemp[i]) - float(cityTemp[i-1]))
cityDiff.append(tmp)
if tmp > maxDelta:
maxDelta = tmp
cityDiffdf = pd.DataFrame(cityDiff)
print tempString.format(city, maxDelta)
print tempString2.format(city, cityDiffdf[0].var())
| [
"matplotlib"
] |
94f4a019271893855a962515c0f63b9d026272db | Python | jamsheerkutty/ga-learner-dsmp-repo | /superhero-statistics/code.py | UTF-8 | 1,500 | 3.25 | 3 | [
"MIT"
] | permissive | # --------------
#Code starts here
sc_df=data[['Strength','Combat']]
sc_covariance=sc_df.cov().iloc[0,1]
sc_strength=sc_df['Strength'].std()
sc_combat=sc_df['Combat'].std()
sc_pearson=sc_covariance/(sc_strength*sc_combat)
ic_df=data[['Intelligence','Combat']]
ic_covariance=ic_df.cov().iloc[0,1]
ic_intelligence=ic_df['Intelligence'].std()
ic_combat=ic_df['Combat'].std()
ic_pearson=ic_covariance/(ic_intelligence*ic_combat)
# --------------
#Code starts here
total_high=data['Total'].quantile(0.99)
super_best=data[data['Total'] > total_high]
super_best_names=list(super_best['Name'])
print(super_best_names)
# --------------
#Code starts here
import matplotlib.pyplot as plt
import pandas as pd
fig=plt.figure()
ax_1 = fig.add_subplot(1,1,1)
ax_1.boxplot(data['Intelligence'])
ax_1.set_title('Intelligence')
plt.show()
ax_2 = fig.add_subplot(2,1,1)
ax_2.boxplot(data['Speed'])
ax_2.set_title('Speed')
plt.show()
ax_3 = fig.add_subplot(1,1,1)
ax_3.boxplot(data['Power'])
ax_3.set_title('Power')
plt.show()
# --------------
#Header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#path of the data file- path
data=pd.read_csv(path)
data['Gender'].replace('-','Agender',inplace=True)
gender_count=data['Gender'].value_counts()
plt.bar(gender_count,height=10)
#Code starts here
# --------------
#Code starts here
alignment=data['Alignment'].value_counts()
label='Alignment'
plt.pie(alignment)
| [
"matplotlib"
] |
e822eb52c4d06646eb51195dfe841f23273d2641 | Python | CAU-AI/SCIKIT | /data_plot.py | UTF-8 | 1,956 | 2.546875 | 3 | [] | no_license | from sklearn.model_selection import train_test_split
import numpy as np
from sklearn.naive_bayes import GaussianNB
import pandas as pd
from sklearn import preprocessing
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
xl = pd.ExcelFile("Music_style_train.xlsx")
data_excel = pd.read_excel(io=xl, sheetname=0, header=None)
answer_excel = pd.read_excel(io=xl, sheetname=1, header=None)
data = np.array(data_excel.values)
answer = np.array(answer_excel.values).flatten().transpose()
new_data = data.T
for k in range(0,376):
print(k)
y = []
a = []
b = []
aa = k
t = np.array(range(1,71))
for i in range(70):
y.append(data[i][aa])
z = np.array(y)
q = np.array(range(71,141))
for i in range(71,141):
a.append(data[i][aa])
w = np.array(a)
p = np.array(range(141,210))
for i in range(140,210):
b.append(data[i][aa])
i = np.array(b)
plt.figure()
plt.plot(t, z, "r.")
plt.plot(t, w, "b.")
plt.plot(t, i, "g.")
plt.show()
"""
scaler = preprocessing.Imputer(missing_values='NaN', strategy='mean', axis=0).fit(data.astype(float))
data = scaler.transform(data.astype(float))
aaa = [4, 7, 10, 21, 22, 27, 33, 37, 39, 45, 51, 52, 57, 61, 63, 67, 69, 73,72, 75, 79, 78, 93, 223,224, 225, 226, 227,228,229,230,231,232,233,234,235,263,264,265,266,267,302,303,304,305,306,307,308,309,310,311,312,313,327,338,
370,374,344]
aaa.sort()
for k in aaa:
print(k)
y = []
a = []
b = []
aa = k
t = np.array(range(1,71))
for i in range(70):
y.append(data[i][aa])
z = np.array(y)
q = np.array(range(71, 141))
for i in range(71, 141):
a.append(data[i][aa])
w = np.array(a)
p = np.array(range(141, 210))
for i in range(140, 210):
b.append(data[i][aa])
i = np.array(b)
plt.figure()
plt.plot(t,z,"r.")
plt.plot(t,w,"b.")
plt.plot(t,i,"g.")
plt.show()
"""
| [
"matplotlib"
] |
df4c57dab56b843ee1c76c59f171a3353c2c965c | Python | gavinxie7/CSV_Project | /csv_temps_1.py | UTF-8 | 617 | 3.15625 | 3 | [] | no_license | import matplotlib.pyplot as plt
import csv
open_file=open("sitka_weather_07-2018_simple.csv","r")
csv_file=csv.reader(open_file,delimiter=",")
header_row=next(csv_file)
#print(type(header_row))
for index,column_header in enumerate (header_row):
print(index,column_header)
highs=[]
for row in csv_file:
highs.append(int(row[5]))
#print(highs[:10])
plt.plot(highs,color='red')
plt.title("daily high temps, july 2018", fontsize=16)
plt.xlabel("",fontsize=16)
plt.ylabel("Temperature(F)",fontsize=16)
plt.tick_params(axis='both',which='major',labelsize=16)
#plt.plot([1,2,3,4,5],color='red')
plt.show() | [
"matplotlib"
] |
f1065cf3792ff346e0bd5c3a59ac6c71795efabb | Python | Sapphire0912/Embedded | /HW2 LBP/hw2.py | UTF-8 | 757 | 2.59375 | 3 | [] | no_license | from skimage.feature import local_binary_pattern
import matplotlib.pyplot as plt
import cv2
import numpy as np
path = "./road/road.jpg"
road = cv2.imread(path)
road_gray = cv2.cvtColor(road, cv2.COLOR_BGR2GRAY)
# use scikit-image module lbp
# use matplotlib imshow lbp image
radius = 1
n_points = 8 * radius
# lbp method returns the dtype and value of the image(current only the image)
# default: dtype float64, value 0 to 255
# ror: dtype float64, value 0 to 255
# nri_uniform: dtype float64, value 0 to 58
# uniform: dtype float64, value 0 to 9
# var: dtype float64, value has han
lbp = local_binary_pattern(road_gray, n_points, radius, method='var')
print(lbp[30:40, 790:800])
print(np.max(lbp), np.min(lbp))
plt.imshow(lbp, cmap='gray')
plt.show()
| [
"matplotlib"
] |
5a41b12a5404655799825add102c94c092522155 | Python | enova47/shotchart | /app.py | UTF-8 | 18,180 | 2.640625 | 3 | [] | no_license | import dash
import dash_core_components as dcc
import dash_html_components as html
import io
from io import StringIO
import numpy as np
import pandas as pd
import plotly
import plotly.graph_objs as go
import requests
# Set the Dash App
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
# Import CSV into pandas
url = 'https://raw.githubusercontent.com/enova47/shotchart/master/shotchart.csv'
s=requests.get(url).content
shotchart=pd.read_csv(io.StringIO(s.decode('utf-8')),dtype={'GAME_ID': str,'GAME_EVENT_ID':str,'PLAYER_ID': str,'TEAM_NAME': str,'TEAM_ID':str,'PERIOD': str,'MINUTES_REMAINING':str,'SECONDS_REMAINING': str,'SHOT_DISTANCE':str})
# Create the court dimensions in plotly from: http://savvastjortjoglou.com/nba-shot-sharts.html
# and from: https://moderndata.plot.ly/nba-shots-analysis-using-plotly-shapes/
court_shapes = []
# Outer lines of the court
outer_lines_shape = dict(
type='rect',
xref='x',
yref='y',
# Bottom left edge of outer lines rectangle (x0,y0)
x0='-250',
y0='-47.5',
# Bottom right edge of outer lines rectangle (x1,y1)
x1='250',
y1='422.5',
line=dict(
color='rgba(10, 10, 10, 1)',
width=1
)
)
court_shapes.append(outer_lines_shape)
# Basketball hoop - use Circle shape with the center of the hoop at 0,0. Radius of 7.5.
hoop_shape = dict(
type='circle',
# All coordinate (x and y) references are in relation to the plot axis
xref='x',
yref='y',
x0='7.5',
y0='7.5',
x1='-7.5',
y1='-7.5',
# Set the color and style of circle
line=dict(
color='rgba(10,10,10,1)',
width=1
)
)
court_shapes.append(hoop_shape)
# Backboard
backboard_shape = dict(
type='rect',
xref='x',
yref='y',
x0='-30',
y0='-7.5',
x1='30',
y1='-6.5',
line=dict(
color='rgba(10, 10, 10, 1)',
width=1
),
fillcolor='rgba(10, 10, 10, 1)'
)
court_shapes.append(backboard_shape)
# The paint (outer and inner boxes as rectangles)
outer_three_sec_shape = dict(
type='rect',
xref='x',
yref='y',
x0='-80',
y0='-47.5',
x1='80',
y1='143.5',
line=dict(
color='rgba(10, 10, 10, 1)',
width=1
)
)
court_shapes.append(outer_three_sec_shape)
inner_three_sec_shape = dict(
type='rect',
xref='x',
yref='y',
x0='-60',
y0='-47.5',
x1='60',
y1='143.5',
line=dict(
color='rgba(10, 10, 10, 1)',
width=1
)
)
court_shapes.append(inner_three_sec_shape)
# Three point line areas
left_line_shape = dict(
type='line',
xref='x',
yref='y',
x0='-220',
y0='-47.5',
x1='-220',
y1='92.5',
line=dict(
color='rgba(10, 10, 10, 1)',
width=1
)
)
court_shapes.append(left_line_shape)
right_line_shape = dict(
type='line',
xref='x',
yref='y',
x0='220',
y0='-47.5',
x1='220',
y1='92.5',
line=dict(
color='rgba(10, 10, 10, 1)',
width=1
)
)
court_shapes.append(right_line_shape)
# Using the path type to draw the arc using a Curve (C) command
three_point_arc_shape = dict(
type='path',
xref='x',
yref='y',
path='M -220 92.5 C -70 300, 70 300, 220 92.5',
line=dict(
color='rgba(10, 10, 10, 1)',
width=1
)
)
court_shapes.append(three_point_arc_shape)
# Center circle (area surrounding halfcourt - logo lines)
center_circle_shape = dict(
type='circle',
xref='x',
yref='y',
x0='60',
y0='482.5',
x1='-60',
y1='362.5',
line=dict(
color='rgba(10, 10, 10, 1)',
width=1
)
)
court_shapes.append(center_circle_shape)
# Restraining circle (halfcourt small circle)
res_circle_shape = dict(
type='circle',
xref='x',
yref='y',
x0='20',
y0='442.5',
x1='-20',
y1='402.5',
line=dict(
color='rgba(10, 10, 10, 1)',
width=1
)
)
court_shapes.append(res_circle_shape)
# Free throw circle
free_throw_circle_shape = dict(
type='circle',
xref='x',
yref='y',
x0='60',
y0='200',
x1='-60',
y1='80',
line=dict(
color='rgba(10, 10, 10, 1)',
width=1
)
)
court_shapes.append(free_throw_circle_shape)
# Restricted area, using dash property to style the circle
res_area_shape = dict(
type='circle',
xref='x',
yref='y',
x0='40',
y0='40',
x1='-40',
y1='-40',
line=dict(
color='rgba(10, 10, 10, 1)',
width=1,
dash='dot'
)
)
court_shapes.append(res_area_shape)
# Initialize shotchart
name = 'Kawhi Leonard'
player = shotchart[shotchart.PLAYER_NAME == name]
missed_shot_trace = go.Scattergl(
# Missed shots are 0, Made shots are 1
x=player[player.SHOT_MADE_FLAG == 0]['LOC_X'],
y=player[player.SHOT_MADE_FLAG == 0]['LOC_Y'],
mode='markers',
name='Miss',
marker={'color': 'blue', 'size': 5},
text=[str(htm) + str(vtm) + str(at) + str(st) + str(sz) + str(pr) + str(mr) + str(sr) + str(gd) for
htm, vtm, at, st, sz, pr, mr, sr, gd in zip(player[player.SHOT_MADE_FLAG == 0]['HTM'] + ' vs ',
player[player.SHOT_MADE_FLAG == 0]['VTM'] + '<br>',
player[player.SHOT_MADE_FLAG == 0]['ACTION_TYPE'] + '<br>',
player[player.SHOT_MADE_FLAG == 0]['SHOT_TYPE'] + ', ',
player[player.SHOT_MADE_FLAG == 0]['SHOT_ZONE_RANGE'] + '<br>',
'Q' + player[player.SHOT_MADE_FLAG == 0]['PERIOD'] + ', ',
player[player.SHOT_MADE_FLAG == 0]['MINUTES_REMAINING'] + ':',
player[player.SHOT_MADE_FLAG == 0]['SECONDS_REMAINING'] + ' remaining' + '<br>',
player[player.SHOT_MADE_FLAG == 0]['GAME_DATE'])],
hoverinfo='text',
)
made_shot_trace = go.Scattergl(
x=player[player.SHOT_MADE_FLAG == 1]['LOC_X'],
y=player[player.SHOT_MADE_FLAG == 1]['LOC_Y'],
mode='markers',
name='Make',
marker={'color': 'red', 'size': 5},
text=[str(htm) + str(vtm) + str(at) + str(st) + str(sz) + str(pr) + str(mr) + str(sr) + str(gd) for
htm, vtm, at, st, sz, pr, mr, sr, gd in zip(player[player.SHOT_MADE_FLAG == 1]['HTM'] + ' vs ',
player[player.SHOT_MADE_FLAG == 1]['VTM'] + '<br>',
player[player.SHOT_MADE_FLAG == 1]['ACTION_TYPE'] + '<br>',
player[player.SHOT_MADE_FLAG == 1]['SHOT_TYPE'] + ', ',
player[player.SHOT_MADE_FLAG == 1]['SHOT_ZONE_RANGE'] + '<br>',
'Q' + player[player.SHOT_MADE_FLAG == 1]['PERIOD'] + ', ',
player[player.SHOT_MADE_FLAG == 1]['MINUTES_REMAINING'] + ':',
player[player.SHOT_MADE_FLAG == 1]['SECONDS_REMAINING'] + ' remaining' + '<br>',
player[player.SHOT_MADE_FLAG == 1]['GAME_DATE'])],
hoverinfo='text',
)
# Set the dropdown menus for shotchart
app.layout = html.Div([
html.Div([
html.Label('Player'),
dcc.Dropdown(
id='PLAYER_NAME',
options=[{'label': i, 'value': i} for i in shotchart['PLAYER_NAME'].unique()],
value = 'Kawhi Leonard',
)
],
style={'width': '49%', 'display': 'inline-block'}),
html.Div([
html.Label('Season Type'),
dcc.Dropdown(
id='SEASON_TYPE',
options=[{'label': i, 'value': i} for i in shotchart['SEASON_TYPE'].unique()],
value = 'Regular Season',
)
],
style={'width': '49%', 'float': 'right', 'display': 'inline-block'}),
html.Div([
html.Label('Shot Type'),
dcc.Dropdown(
id='ACTION_TYPE',
options=[{'label': i, 'value': i} for i in shotchart['ACTION_TYPE'].unique()],
)
],
style={'width': '49%', 'display': 'inline-block'}),
html.Div([
html.Label('Shot Distance'),
dcc.Dropdown(
id='SHOT_ZONE_RANGE',
options=[{'label': i, 'value': i} for i in shotchart['SHOT_ZONE_RANGE'].unique()],
)
],
style={'width': '49%', 'float': 'right', 'display': 'inline-block'}),
html.Div([
html.Label('Shot Zone'),
dcc.Dropdown(
id='SHOT_ZONE_AREA',
options=[{'label': i, 'value': i} for i in shotchart['SHOT_ZONE_AREA'].unique()],
)
],
style={'width': '49%', 'display': 'inline-block'}),
html.Div([
html.Label('Game Date'),
dcc.Dropdown(
id='GAME_DATE',
options=[{'label': i, 'value': i} for i in shotchart['GAME_DATE'].unique()],
)
],
style={'width': '49%', 'float': 'right', 'display': 'inline-block'}),
html.Div([
html.Label('Quarter'),
dcc.Dropdown(
id='PERIOD',
options=[{'label': i, 'value': i} for i in shotchart['PERIOD'].unique()],
)
],
style={'width': '49%', 'display': 'inline-block'}),
html.Div([
html.Label('Opponent'),
dcc.Dropdown(
id='OPPONENT',
options=[{'label': i, 'value': i} for i in shotchart['OPPONENT'].unique()],
)
],
style={'width': '49%', 'float': 'right', 'display': 'inline-block'}),
html.Div([
html.Label('Time Remaining'),
dcc.Dropdown(
id='TIME_GROUP',
options=[{'label': i, 'value': i} for i in shotchart['TIME_GROUP'].unique()],
)
],
style={'width': '49%', 'display': 'inline-block'}),
html.Div([
html.Label('Score Margin'),
dcc.Dropdown(
id='POINTS_DIFFERENCE',
options=[{'label': i, 'value': i} for i in shotchart['POINTS_DIFFERENCE'].unique()],
)
],
style={'width': '49%', 'float': 'right', 'display': 'inline-block'}),
html.Div([
html.Label("Player's Team"),
dcc.Dropdown(
id='TEAM_NAME',
options=[{'label': i, 'value': i} for i in shotchart['TEAM_NAME'].unique()],
value = 'Toronto Raptors',
)
],
style={'width': '49%', 'float': 'right', 'display': 'inline-block'}),
html.Div([
dcc.Graph(
id='shot_chart',
figure={
'data': [missed_shot_trace,made_shot_trace],
'layout': go.Layout(
title = name + ' - All Shots, 2018-19 ' + 'Regular Season',
showlegend = True,
xaxis = {'showgrid':False, 'range':[-300,300]},
yaxis = {'showgrid':False, 'range':[500,-100]},
height = 600,
width= 650,
shapes = court_shapes
)
}
)
])
])
# Need to add:
# V2: Filter by season (set season for shotchartdetail, check if can import from CSV)
# V2: Filter by assisted or not (merge play by play v2 into shotchart to get assisted data)
# V2: Return game score in hovertext (Play by Play)
# V2: Player picture (see Sam Liebman's documentation)
# V2: Graph or table showing shooting percentages, etc.
# V2: Video link (get from NBA.com - 2016 season+) - Example: https://stats.nba.com/events/?flag=1&GameID=0021800001&GameEventID=7&Season=2018-19&title=MISS%20Covington%2027%27%203PT%20Jump%20Shot&sct=plot
# V2: Reset button - See David Comfort Dash
# V2: Print to PDF button - See David Comfort Dash
@app.callback(
dash.dependencies.Output('shot_chart', 'figure'),
[dash.dependencies.Input('PLAYER_NAME','value'),
dash.dependencies.Input('SEASON_TYPE','value'),
dash.dependencies.Input('ACTION_TYPE','value'),
dash.dependencies.Input('SHOT_ZONE_RANGE','value'),
dash.dependencies.Input('SHOT_ZONE_AREA','value'),
dash.dependencies.Input('GAME_DATE','value'),
dash.dependencies.Input('PERIOD','value'),
dash.dependencies.Input('OPPONENT','value'),
dash.dependencies.Input('TIME_GROUP','value'),
dash.dependencies.Input('POINTS_DIFFERENCE','value'),
dash.dependencies.Input('TEAM_NAME','value')])
def update_graph(player_name, season_type, action_type, shot_zone_range, shot_zone_area, game_date, period, opponent, time_group, points_difference, team_name):
# Function if update value exists, then update player dataframe. If not, ignore it.
player = shotchart
if player_name in list(player.PLAYER_NAME):
player = player[player.PLAYER_NAME == player_name]
# Season Type
if season_type in list(player.SEASON_TYPE):
player = player[player.SEASON_TYPE == season_type]
# Shot Type
if action_type in list(player.ACTION_TYPE):
player = player[player.ACTION_TYPE == action_type]
# Shot Distance
if shot_zone_range in list(player.SHOT_ZONE_RANGE):
player = player[player.SHOT_ZONE_RANGE == shot_zone_range]
# Shot Zone
if shot_zone_area in list(player.SHOT_ZONE_AREA):
player = player[player.SHOT_ZONE_AREA == shot_zone_area]
# Game Date
if game_date in list(player.GAME_DATE):
player = player[player.GAME_DATE == game_date]
# Quarter
if period in list(player.PERIOD):
player = player[player.PERIOD == period]
# Opponent
if opponent in list(player.OPPONENT):
player = player[player.OPPONENT == opponent]
# Time Remaining
if time_group in list(player.TIME_GROUP):
player = player[player.TIME_GROUP == time_group]
# Scoring Margin
if points_difference in list(player.POINTS_DIFFERENCE):
player = player[player.POINTS_DIFFERENCE == points_difference]
# Player's Team
if team_name in list(player.TEAM_NAME):
player = player[player.TEAM_NAME == team_name]
missed_shot_trace = go.Scattergl(
# Missed shots are 0, Made shots are 1
x=player[player.SHOT_MADE_FLAG == 0]['LOC_X'],
y=player[player.SHOT_MADE_FLAG == 0]['LOC_Y'],
mode='markers',
name='Miss',
marker={'color': 'blue', 'size': 5},
text=[str(htm) + str(vtm) + str(at) + str(st) + str(sz) + str(pr) + str(mr) + str(sr) + str(gd) for
htm, vtm, at, st, sz, pr, mr, sr, gd in zip(player[player.SHOT_MADE_FLAG == 0]['HTM'] + ' vs ',
player[player.SHOT_MADE_FLAG == 0]['VTM'] + '<br>',
player[player.SHOT_MADE_FLAG == 0]['ACTION_TYPE'] + '<br>',
player[player.SHOT_MADE_FLAG == 0]['SHOT_TYPE'] + ', ',
player[player.SHOT_MADE_FLAG == 0]['SHOT_ZONE_RANGE'] + '<br>',
'Q' + player[player.SHOT_MADE_FLAG == 0]['PERIOD'] + ', ',
player[player.SHOT_MADE_FLAG == 0]['MINUTES_REMAINING'] + ':',
player[player.SHOT_MADE_FLAG == 0]['SECONDS_REMAINING'] + ' remaining' + '<br>',
player[player.SHOT_MADE_FLAG == 0]['GAME_DATE'])],
hoverinfo='text',
)
made_shot_trace = go.Scattergl(
x=player[player.SHOT_MADE_FLAG == 1]['LOC_X'],
y=player[player.SHOT_MADE_FLAG == 1]['LOC_Y'],
mode='markers',
name='Make',
marker={'color': 'red', 'size': 5},
text=[str(htm) + str(vtm) + str(at) + str(st) + str(sz) + str(pr) + str(mr) + str(sr) + str(gd) for
htm, vtm, at, st, sz, pr, mr, sr, gd in zip(player[player.SHOT_MADE_FLAG == 1]['HTM'] + ' vs ',
player[player.SHOT_MADE_FLAG == 1]['VTM'] + '<br>',
player[player.SHOT_MADE_FLAG == 1]['ACTION_TYPE'] + '<br>',
player[player.SHOT_MADE_FLAG == 1]['SHOT_TYPE'] + ', ',
player[player.SHOT_MADE_FLAG == 1]['SHOT_ZONE_RANGE'] + '<br>',
'Q' + player[player.SHOT_MADE_FLAG == 1]['PERIOD'] + ', ',
player[player.SHOT_MADE_FLAG == 1]['MINUTES_REMAINING'] + ':',
player[player.SHOT_MADE_FLAG == 1]['SECONDS_REMAINING'] + ' remaining' + '<br>',
player[player.SHOT_MADE_FLAG == 1]['GAME_DATE'])],
hoverinfo='text',
)
return{
'data': [missed_shot_trace,made_shot_trace],
'layout': go.Layout(
title = player_name + ' - All Shots, 2018-19 ' + season_type,
showlegend = True,
xaxis = {'showgrid':False, 'range':[-300,300]},
yaxis = {'showgrid':False, 'range':[500,-100]},
height = 600,
width= 650,
shapes = court_shapes
)
}
if __name__ == '__main__':
app.run_server(debug=True) | [
"plotly"
] |
109dd0d434c67a9ae559d5af420992ac63148453 | Python | feifanrensheng/dmp_writing | /code/test_kernel_shape.py | UTF-8 | 2,553 | 3.0625 | 3 | [] | no_license | import numpy as np
import math
import matplotlib.pyplot as plt
from matplotlib import rc
def kernel_shape():
import seaborn as sns
sns.set()
# sns.axes_style("white")
# plt.set_figheight(15)
# plt.set_figwidth(15)
f = plt.figure(figsize=(10,7))
plt.ylabel(r"$\phi (x) $")
plt.xticks([])
plt.yticks([])
ax = f.add_subplot(221)
ax2 = f.add_subplot(222)
ax3 = f.add_subplot(223)
ax4 = f.add_subplot(224)
x = np.arange(-0.5, 1.5, 1e-4)
y1 = gaussian(x, 0.03, 0.2)
y2 = gaussian(x, 0.03, 0.5)
y3 = gaussian(x, 0.03, 0.8)
ax.plot(x, y1)
ax.plot(x, y2)
ax.plot(x, y3)
ax.set_xticks(np.arange(0, 1.2, 0.2))
ax.set_yticks(np.arange(0, 1.2, 0.2))
ax.set_xlabel("(a)")
# fig.suptitle('Gaussian Kernel', fontsize=6)
yy1 = tr_gaussian(x, 0.03, 0.2)
yy2 = tr_gaussian(x, 0.03, 0.5)
yy3 = tr_gaussian(x, 0.03, 0.8)
ax2.plot(x, yy1)
ax2.plot(x, yy2)
ax2.plot(x, yy3)
ax2.set_xticks(np.arange(0, 1.2, 0.2))
ax2.set_yticks(np.arange(0, 1.2, 0.2))
ax2.set_xlabel("(b)")
# ax2.ylabel(r"$\phi (x) $")
# fig.suptitle('Truncated Gaussian Kernel', fontsize=6)
yy1 = tr_gaussian1(x, 0.03, 0.2)
yy2 = tr_gaussian1(x, 0.03, 0.5)
yy3 = tr_gaussian1(x, 0.03, 0.8)
ax3.plot(x, yy1)
ax3.plot(x, yy2)
ax3.plot(x, yy3)
ax3.set_xticks(np.arange(0, 1.2, 0.2))
ax3.set_yticks(np.arange(0, 1.2, 0.2))
ax3.set_xlabel("(c)")
# ax3.ylabel(r"$\phi (x) $")
# fig.suptitle('Truncated Gaussian Kernel', fontsize=6)
yy1 = tr_gaussian1(x, 0.03, 0.4)
yy2 = tr_gaussian1(x, 0.03, 0.5)
yy3 = tr_gaussian1(x, 0.03, 0.6)
ax4.plot(x, yy1)
ax4.plot(x, yy2)
ax4.plot(x, yy3)
ax4.set_xticks(np.arange(0, 1.2, 0.2))
ax4.set_yticks(np.arange(0, 1.2, 0.2))
ax4.set_xlabel("(d)")
# ax4.ylabel(r"$\phi (x) $")
# fig.suptitle('Truncated Gaussian Kernel', fontsize=6)
plt.show()
def gaussian(x, sigma, mu):
# a = 1/(math.sqrt(2 * math.pi * sigma))
a = 1.0
b = -1/(2 * sigma)
x = b * (x - mu)**2
x = np.exp(x)
y = a * x
return y
def tr_gaussian(x, sigma, mu):
a = 1.0
b = -1/(2 * sigma)
xx = b * (x - mu)**2
xx = np.exp(xx)
y = a * xx
yy = np.where(abs(x-mu)<0.15, y, 0)
return yy
def tr_gaussian1(x, sigma, mu):
a = 1.0
b = -1/(2 * sigma)
xx = b * (x - mu)**2
xx = np.exp(xx)
y = a * xx
yy = np.where(abs(x-mu)<0.10, y, 0)
return yy
if __name__ == "__main__":
kernel_shape() | [
"matplotlib",
"seaborn"
] |
0a99c8a3e6062127548973de74f981ae5da5a54a | Python | alaalial/relancer-artifact | /relancer-exp/original_notebooks/pavansubhasht_ibm-hr-analytics-attrition-dataset/baseline-eda-and-attrition-prediction-pipeline.py | UTF-8 | 26,692 | 2.90625 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
# coding: utf-8
# ## WELCOME TO IBM HR ATTRITION EXPLORATION AND PREDICTION KERNEL
# NOTE: I'm not a native English Speaker, so sorry for any english mistakes
#
# The main objective is to explore the data and create a model to predict the Attrition of IBM workers.
#
# ### The problem
# We will explore and try to predict the Attrition of IBM HR Analytics data. <br>
#
# What Is Attrition?<br>
# Attrition in business describes a gradual but deliberate reduction in staff numbers that occurs as employees retire or resign and are <b>not replaced</b>. The term is also sometimes used to describe the loss of customers or clients as they mature beyond a product or company's target market without being replaced by a younger generation.
#
# Important: Attrition is one way a company can decrease labor costs without the disruption of layoffs.
#
#
# ### Questions
# I will start with some questions that maybe will help me in exploration:
# - What's the % of Attrition at HR IBM dataset?
# - What's the distribution of Ages?
# - What's the difference between Genders?
# - The years of experience is important to Attrition ?
# - The performance or job satisfaction distributions says anything about the Attrition?
# - People that live far from the job, are more propense to Attrition?
# - And many more questions that could help us to understand the data and get some insights.
#
# ### After EDA:
# I will create a Pipeline to find the model that best fit the data;
# Also, I will create a Hyperopt model to find the best parameters to predict the Attrition of workers;
# ______________________________________
# <br>
# - I hope you enjoy the Kernel. <br>
# - If you think that it is useful for you, please votes and give me your feedback =)
# ## Importing libraries
# In[ ]:
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from scipy import stats
import os
import matplotlib.pyplot as plt
import seaborn as sns
# Standard plotly imports
import plotly.plotly as py
import plotly.graph_objs as go
import plotly.tools as tls
from plotly.offline import iplot, init_notebook_mode
# Using plotly + cufflinks in offline mode
init_notebook_mode(connected=True)
#Importing the auxiliar and preprocessing librarys
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.utils.multiclass import unique_labels
from sklearn.model_selection import train_test_split, KFold, cross_validate
#Models
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.svm import SVC
from sklearn.linear_model import RidgeClassifier, SGDClassifier, LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, ExtraTreesClassifier
from sklearn.ensemble import BaggingClassifier, VotingClassifier, RandomTreesEmbedding
# In[ ]:
df_train = pd.read_csv("../../../input/pavansubhasht_ibm-hr-analytics-attrition-dataset/WA_Fn-UseC_-HR-Employee-Attrition.csv")
# In[ ]:
def resumetable(df):
print(f"Dataset Shape: {df.shape}")
summary = pd.DataFrame(df.dtypes,columns=['dtypes'])
summary = summary.reset_index()
summary['Name'] = summary['index']
summary = summary[['Name','dtypes']]
summary['Missing'] = df.isnull().sum().values
summary['Uniques'] = df.nunique().values
summary['First Value'] = df.loc[0].values
summary['Second Value'] = df.loc[1].values
summary['Third Value'] = df.loc[2].values
for name in summary['Name'].value_counts().index:
summary.loc[summary['Name'] == name, 'Entropy'] = round(stats.entropy(df[name].value_counts(normalize=True), base=2),2)
return summary
def dummies(df, list_cols):
for col in list_cols:
df_dummies = pd.get_dummies(df[col], drop_first=True, prefix=(str(col)))
df = pd.concat([df, df_dummies], axis=1)
df.drop(col, axis=1, inplace=True)
return df
def get_ratio(df, cat_col):
attr_temp = pd.DataFrame(df.groupby([cat_col, 'Attrition'])['EmployeeNumber'].count().unstack('Attrition').reset_index())
attr_temp['ratio'] = round(attr_temp['Yes'] / (attr_temp['Yes'] + attr_temp['No']) * 100,2)
attr_temp = attr_temp[[cat_col, 'ratio']]
return attr_temp
# ## Getting the summary of our data
# In[ ]:
summary = resumetable(df_train)
summary
# Based on this first summary of data, we can see that we haven't any missing value and 3 columns have constant values.<br>
# Before we continue, I will drop the constant features; <br>
# It's very cool to see that we have a lot of categorical features!!! So we could have some interesting insights <br>
# The shape of our data is (1470, 35) and the EmployeeNumber is the Id of our dataset. <br>
# Also is important to note that entropy measure the data disorder of the each feature and it shows the information contained in this feature.
# ## Categorical features with maximum 10 unique values
# In[ ]:
# Categorical features
cat_cols = ['Over18', 'StandardHours', 'EmployeeCount', 'Gender', 'PerformanceRating', 'OverTime', 'MaritalStatus', 'Department', 'BusinessTravel', 'StockOptionLevel', 'EnvironmentSatisfaction', 'JobInvolvement', 'JobSatisfaction', 'RelationshipSatisfaction', 'WorkLifeBalance', 'Education', 'JobLevel', 'EducationField', 'TrainingTimesLastYear', 'JobRole', 'NumCompaniesWorked']
# constant features
const_list = ['EmployeeCount', 'Over18', 'StandardHours']
# ## Dropping constant features
# In[ ]:
df_train.drop(const_list,axis=1, inplace=True)
# ## Visualizing the distribution of Data Types
# In[ ]:
print("DATA TYPES: ")
print(summary['dtypes'].value_counts())
# - Nice, Now, we will start exploring the features
# ## I will take a look on Attrition features that is our Target
# In[ ]:
print("The % distribution of Attrition features is: ")
print(round(df_train['Attrition'].value_counts(normalize=True),2)*100)
plt.figure(figsize=(10,7))
g = sns.countplot(df_train["Attrition"], color='green')
g.set_title("Attrition Distribution", fontsize=22)
g.set_ylabel('Count', fontsize=18)
g.set_xlabel('Attrition True or False', fontsize=18)
print()
# Cool, we have 16% of true values of our target. It's a imbalanced data but nothing so terrible. <br>
# Let's keep exploring the features to see wether we can get some insights about the IBM workers
# ## Plotting categorical features
# - First, I will plot all features that has less than 11 values; I will do it, because categories with few values are easiest to explore.
# - I will drop the constant columns and the Attrition feature.
#
# In[ ]:
## Filtering the constant features and the target
cat_cols = [col for col in cat_cols if col not in (const_list +['Attrition'])]
# ## Categoricals by ATTRITION
# - Just features with maximum 10 values
# In[ ]:
print("UNDERSTANDING THE CATEGORICAL DISTRIBUTION BY THE TARGET (ATTRITION)")
print("NOTE: - It's a plot just about the columns with maximum 10 values.")
fig, axes = plt.subplots(nrows=8, ncols=2, figsize=(18,35))
fig.subplots_adjust(hspace=0.5, bottom=0)
# fig.suptitle('BINARY FEATURES by the TARGET feature', fontsize=22)
for ax, catplot in zip(axes.flatten(), cat_cols):
sns.countplot(x=catplot, data=df_train, hue='Attrition', ax=ax, )
## GEting the ratio of Years with current manager just to test into graphs
ax.set_title(catplot.upper(), fontsize=18)
ax.set_ylabel('Count', fontsize=16)
ax.set_xlabel(f'{catplot} Values', fontsize=15)
ax.legend(title='Attrition', fontsize=12)
# plt.tight_layout()
# Cool!!! We can see that some features has a high chance to Attrition. <br>
# Some features that we can see that the Attrition has different patterns is: stockoptionlevel, performancerating, overtime, department, jobinvolvement
# ## Continuous and large categorical features
# - Now I will explore the features with more than 10 values and try get some insights using the Attrition to see the different patterns
# In[ ]:
# PercentSalaryHike, DistanceFromHome, Age
print(f'Minimum age on dataset is {df_train["Age"].min()} and the maximum age is {df_train["Age"].max()}')
plt.figure(figsize=(16,22))
plt.subplot(3,1,1)
g = sns.distplot(df_train[df_train['Attrition'] == 'Yes']['Age'], label='Yes')
g = sns.distplot(df_train[df_train['Attrition'] == 'No']['Age'], label="No")
g.set_xticklabels(g.get_xticklabels(),rotation=0)
g.legend(title='Attrition')
g.set_title("Age Distribution by Attrition", fontsize=22)
g.set_xlabel("Age Distribution", fontsize=18)
g.set_ylabel("Probability", fontsize=18)
plt.subplot(3,1,2)
g1 = sns.violinplot(x='DistanceFromHome', y='Age', hue='Attrition', split=True, data=df_train, size=3)
g1.set_xticklabels(g1.get_xticklabels(),rotation=0)
g1.set_title("Distance From Home Distribution by Attrition and Age", fontsize=22)
g1.set_xlabel("Distance From Home", fontsize=18)
g1.set_ylabel("Age Distribution", fontsize=18)
plt.subplot(3,1,3)
g2 = sns.violinplot(x='PercentSalaryHike', y='Age', split=True, hue='Attrition',data=df_train)
g2.set_xticklabels(g2.get_xticklabels(),rotation=0)
g2.set_title("Percent Salary Hike Distribution by Attrition and Age", fontsize=22)
g2.set_xlabel("Percent Salary Hike", fontsize=18)
g2.set_ylabel("Age Distribution", fontsize=18)
plt.subplots_adjust(hspace = 0.4)
print()
# Very insightful informations!! <br>
# Based on charts we can see that the attrition is more probable in yougest people;
# In[ ]:
medimum_feats = ['PercentSalaryHike', 'YearsSinceLastPromotion', 'YearsWithCurrManager', 'YearsInCurrentRole', 'DistanceFromHome']
big_feats = [ 'YearsAtCompany', 'TotalWorkingYears', 'Age','HourlyRate']
# ## Job Features
# In[ ]:
plt.figure(figsize=(16,22))
plt.subplot(3,1,1)
g = sns.violinplot(x='YearsInCurrentRole', y= 'Age', split=True, hue='Attrition', data=df_train)
g.set_xticklabels(g.get_xticklabels(),rotation=0)
g.set_title("Years In Current Role by Attrition and Age", fontsize=22)
g.set_xlabel("Years In Current Role", fontsize=18)
g.set_ylabel("Age Distribution", fontsize=18)
plt.subplot(3,1,2)
g1 = sns.violinplot(x='YearsSinceLastPromotion', y= 'Age', split=True, hue='Attrition', data=df_train)
g1.set_xticklabels(g1.get_xticklabels(),rotation=0)
g1.set_title("Years Since Last Promotion Distribution by Attrition and Age", fontsize=22)
g1.set_xlabel("Years since last Promotion", fontsize=18)
g1.set_ylabel("Age Distribution", fontsize=18)
plt.subplot(3,1,3)
g2 = sns.violinplot(x='YearsAtCompany', y= 'Age', split=True, hue='Attrition', data=df_train)
g2.set_xticklabels(g2.get_xticklabels(),rotation=0)
g2.set_title("Years At Company by Attrition and Age", fontsize=22)
g2.set_xlabel("Years In Current Role", fontsize=18)
g2.set_ylabel("Age Distribution", fontsize=18)
plt.subplots_adjust(hspace = 0.4)
print()
# Cool. On Years at Company we can see that people with more than 12 years are less propense to leave the company.<br>
# The same pattern we can see on Years in current Role... After 15 years in the current role, we don't have siginificant number of Attrition;
#
# ## I will try some visuals with the Monthly Income
# In[ ]:
print(f"The minimum value Income in dataset is {df_train['MonthlyIncome'].min()} and maximum {df_train['MonthlyIncome'].max()}" )
# In[ ]:
print("Monthly Income Quantiles Distribution: ")
print(df_train['MonthlyIncome'].quantile([.01, .25, .5, .75, .99]))
# In[ ]:
def get_ratio(df, cat_col):
attr_temp = df.groupby([cat_col, 'Attrition'])['EmployeeNumber'].count().unstack('Attrition').reset_index()
attr_temp['ratio'] = round(attr_temp['Yes'] / (attr_temp['Yes'] + attr_temp['No']) * 100,2)
attr_temp = attr_temp[[cat_col, 'ratio']]
return attr_temp
# In[ ]:
plt.figure(figsize=(16,22))
plt.subplot(3,1,1)
g = sns.violinplot(x='YearsWithCurrManager', y= 'MonthlyIncome', hue='Attrition',data=df_train, split=True)
g.set_xticklabels(g.get_xticklabels(),rotation=0)
g.set_title("Years With Current Manager Distribution by Attrition and Monthly Income", fontsize=22)
g.set_xlabel("Years with current Manager", fontsize=18)
g.set_ylabel("Monthly Income Distribution", fontsize=18)
print()
attr_temp = get_ratio(df_train, 'YearsWithCurrManager')
gg = sns.lineplot(x='YearsWithCurrManager', y= 'ratio', ax=ax2, lw=3, markers='o', label="Attrition %", color='black', data=attr_temp)
gg.legend( loc = (.85, .05), frameon = False)
plt.subplot(3,1,2)
g1 = sns.swarmplot(x='TotalWorkingYears', y= 'MonthlyIncome', dodge=True, hue='Attrition', data=df_train)
print()
attr_temp = get_ratio(df_train, 'TotalWorkingYears')
gg = sns.lineplot(x='TotalWorkingYears', y= 'ratio', ax=ax3, lw=3, markers='o', label="Attrition %", color='black', data=attr_temp)
gg.legend( loc = (.85, .05), frameon = False)
g1.set_xticklabels(g1.get_xticklabels(),rotation=0)
g1.set_title("Total Working Years Distribution by Attrition and Age", fontsize=22)
g1.set_xlabel("Total Working Years", fontsize=18)
g1.set_ylabel("Age Distribution", fontsize=18)
plt.subplot(3,1,3)
g2 = sns.swarmplot(x='HourlyRate', y='TotalWorkingYears', hue='Attrition', data=df_train)
g2.set_title("Hourly Rate Distribution by Attrition and Age", fontsize=22)
g2.set_xlabel("Hourly Rate", fontsize=18)
g2.set_ylabel("Age Distribution", fontsize=18)
g2.set_xticklabels(g2.get_xticklabels(),rotation=0)
print()
attr_temp = get_ratio(df_train, 'HourlyRate')
gg = sns.lineplot(x='HourlyRate', y= 'ratio', ax=ax4, lw=3, markers='o', label="Attrition %", color='black', data=attr_temp)
gg.legend( loc = (.85, .05), frameon = False)
plt.subplots_adjust(hspace = 0.4)
print()
# Very cool... This features are very meaningful and shows some patterns in Attrition; <br>
# We can see a clear pattern in HourlyRate.
# ## Monthly Income x Age by Attrition
# In[ ]:
plt.figure(figsize=(14,7))
ax= sns.scatterplot(x='MonthlyIncome', y='Age', data=df_train, hue='Attrition', alpha=0.8, size=df_train['NumCompaniesWorked'])
ax.set_title("Age distribution by Monthly Income separated by Attrition", fontsize=22)
ax.set_xlabel("Monthly Income", fontsize=18)
ax.set_ylabel("Age Distribution", fontsize=18)
print()
# In[ ]:
# ## Feature selection and Preprocessing
# In[ ]:
def LongDisWL1(data) :
if data['DistanceFromHome'] > 11 and data['WorkLifeBalance'] == 1 :
return 1
else :
return 0
def LongDisJobS1(data) :
if data['DistanceFromHome'] > 11 and data['JobSatisfaction'] == 1 :
return 1
else :
return 0
def LongDisJL1(data) :
if data['DistanceFromHome'] > 11 and data['JobLevel'] == 1 :
return 1
else :
return 0
def ShortDisNotSingle(data) :
if data['MaritalStatus'] != 'Single' and data['DistanceFromHome'] < 5:
return 1
else :
return 0
def LongDisSingle(data) :
if data['MaritalStatus'] == 'Single' and data['DistanceFromHome'] > 11:
return 1
else :
return 0
def Engaged(data) :
if data['Age'] > 35 and data['MaritalStatus'] != 'Single':
return 1
else :
return 0
def YoungAndBadPaid(data) :
if data['Age'] < 35 and data['Age'] > 23 and (data['MonthlyIncome'] < 3500):
return 1
else :
return 0
def YoungNeverEngaged(data) :
if data['Age'] < 24 and data['MaritalStatus'] == 'Single' :
return 1
else :
return 0
# ## Feature engineering
# In[ ]:
## This features I get from the amazing kernel of Vicent Lugat
## https://www.kaggle.com/kernels/scriptcontent/10006574/notebook
df_train['sales_dep'] = [1 if val == 'Sales' else 0 for val in df_train['Department']]
df_train['JobInvolvCut'] = [1 if val < 2.5 else 0 for val in df_train['JobInvolvement']]
df_train['MiddleTraining'] = [1 if (val >= 3 and val <= 6) else 0 for val in df_train['TrainingTimesLastYear']]
df_train['MoovingPeople'] = [1 if (val >4) else 0 for val in df_train['NumCompaniesWorked']]
df_train['MiddleTraining'] = [1 if (val >= 3 and val <= 6) else 0 for val in df_train['TrainingTimesLastYear']]
df_train['TotalSatisfaction_mean'] = (df_train['RelationshipSatisfaction'] + df_train['EnvironmentSatisfaction'] + df_train['JobSatisfaction'] + df_train['JobInvolvement'] + df_train['WorkLifeBalance']) / 5
df_train['NotSatif'] = [1 if val < 2.35 else 0 for val in df_train['TotalSatisfaction_mean']]
df_train['LongDisWL1'] = df_train.apply(lambda data:LongDisWL1(data) ,axis = 1)
df_train['LongDis'] = [1 if val > 11 else 0 for val in df_train['DistanceFromHome']]
df_train['LongDisJobS1'] = df_train.apply(lambda data: LongDisJobS1(data) ,axis = 1)
df_train['LongDisJL1'] = df_train.apply(lambda data:LongDisJL1(data) ,axis = 1)
df_train['ShortDisNotSingle'] = df_train.apply(lambda data:ShortDisNotSingle(data) ,axis = 1)
df_train['LongDisSingle'] = df_train.apply(lambda data:LongDisSingle(data) ,axis = 1)
df_train['Engaged'] = df_train.apply(lambda data:Engaged(data) ,axis = 1)
df_train['YoungAndBadPaid'] = df_train.apply(lambda data:YoungAndBadPaid(data) ,axis = 1)
df_train['YoungNeverEngaged'] = df_train.apply(lambda data:YoungNeverEngaged(data) ,axis = 1)
df_train['Time_in_each_comp'] = (df_train['Age'] - 20) / ((df_train)['NumCompaniesWorked'] + 1)
df_train['RelSatisf_mean'] = (df_train['RelationshipSatisfaction'] + df_train['EnvironmentSatisfaction']) / 2
df_train['JobSatisf_mean'] = (df_train['JobSatisfaction'] + df_train['JobInvolvement']) / 2
df_train['Income_Distance'] = df_train['MonthlyIncome'] / df_train['DistanceFromHome']
df_train['Hrate_Mrate'] = df_train['HourlyRate'] / df_train['MonthlyRate']
df_train['Stability'] = df_train['YearsInCurrentRole'] / df_train['YearsAtCompany']
df_train['Stability'].fillna((df_train['Stability'].mean()), inplace=True)
df_train['Income_YearsComp'] = df_train['MonthlyIncome'] / df_train['YearsAtCompany']
df_train['Income_YearsComp'] = df_train['Income_YearsComp'].replace(np.Inf, 0)
df_train['Fidelity'] = (df_train['NumCompaniesWorked']) / df_train['TotalWorkingYears']
df_train['Fidelity'] = df_train['Fidelity'].replace(np.Inf, 0)
# def attr_ratio(df, col):
# attr = df.groupby([col, 'Attrition'])['EmployeeNumber'].nunique().unstack('Attrition').reset_index()
# attr['ratio'] = attr['Yes'] / (attr['No'] + attr['Yes'])
#
# return attr
# ## Preprocessing
# In[ ]:
#customer id col
Id_col = ['EmployeeNumber']
#Target columns
target_col = ["Attrition"]
#categorical columns
cat_cols = df_train.nunique()[df_train.nunique() <= 10].keys().tolist()
cat_cols = [x for x in cat_cols if x not in target_col]
#numerical columns
num_cols = [x for x in df_train.columns if x not in cat_cols + target_col + Id_col]
#Binary columns with 2 values
bin_cols = df_train.nunique()[df_train.nunique() == 2].keys().tolist()
#Columns more than 2 values
multi_cols = [i for i in cat_cols if i not in bin_cols]
# In[ ]:
dict_binary_cols = {'Attrition':{'Yes':1, 'No':0}, 'Gender':{'Female':0, 'Male':1}, 'OverTime':{'Yes':1,'No':0}}
df_train.replace(dict_binary_cols, inplace=True)
# In[ ]:
nom_cats = ['BusinessTravel', 'Department', 'EducationField', 'JobRole', 'MaritalStatus']
df_train = dummies(df_train, nom_cats)
# In[ ]:
#Finallt, lets look the correlation of df_train
plt.figure(figsize=(20,15))
plt.title('Correlation of Features for Train Set', fontsize=25)
print()
print()
#
# In[ ]:
# Threshold for removing correlated variables
threshold = 0.80
# Absolute value correlation matrix
corr_matrix = df_train.corr().abs()
# Getting the upper triangle of correlations
upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool))
# In[ ]:
# Select columns with correlations above threshold
to_drop = [column for column in upper.columns if any(upper[column] > threshold)]
print('There are %d columns to remove.' % (len(to_drop)))
print(list(to_drop))
# In[ ]:
df_train = df_train.drop(columns = to_drop)
print('Training shape: ', df_train.shape)
# In[ ]:
X_train, X_val, y_train, y_val = train_test_split(df_train.drop('Attrition', axis=1), df_train['Attrition'], test_size=.25)
# ## Creating the pipeline to compare classification algorithmns
# In[ ]:
clfs = []
seed = 3
clfs.append(("LogReg", Pipeline([("Scaler", StandardScaler()), ("LogReg", LogisticRegression())])))
clfs.append(("XGBClassifier", Pipeline([("Scaler", StandardScaler()), ("XGB", XGBClassifier())])))
clfs.append(("KNN", Pipeline([("Scaler", StandardScaler()), ("KNN", KNeighborsClassifier())])))
clfs.append(("DecisionTreeClassifier", Pipeline([("Scaler", StandardScaler()), ("DecisionTrees", DecisionTreeClassifier())])))
clfs.append(("RandomForestClassifier", Pipeline([("Scaler", StandardScaler()), ("RandomForest", RandomForestClassifier())])))
clfs.append(("GradientBoostingClassifier", Pipeline([("Scaler", StandardScaler()), ("GradientBoosting", GradientBoostingClassifier(max_features=15, n_estimators=600))])))
clfs.append(("RidgeClassifier", Pipeline([("Scaler", StandardScaler()), ("RidgeClassifier", RidgeClassifier())])))
clfs.append(("BaggingRidgeClassifier", Pipeline([("Scaler", StandardScaler()), ("BaggingClassifier", BaggingClassifier())])))
clfs.append(("ExtraTreesClassifier", Pipeline([("Scaler", StandardScaler()), ("ExtraTrees", ExtraTreesClassifier())])))
#'neg_mean_absolute_error', 'neg_mean_squared_error','r2'
scoring = 'accuracy'
n_folds = 10
results, names = [], []
for name, model in clfs:
kfold = KFold(n_splits=n_folds, random_state=seed)
cv_results = cross_val_score(model, X_train, y_train, cv=kfold, scoring=scoring, n_jobs=-1)
names.append(name)
results.append(cv_results)
msg = "%s: %f (+/- %f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
# boxplot algorithm comparison
fig = plt.figure(figsize=(15,6))
fig.suptitle('Classifier Algorithm Comparison', fontsize=22)
ax = fig.add_subplot(111)
sns.boxplot(x=names, y=results)
ax.set_xticklabels(names)
ax.set_xlabel("Algorithmn", fontsize=20)
ax.set_ylabel("Accuracy of Models", fontsize=18)
ax.set_xticklabels(ax.get_xticklabels(),rotation=45)
print()
# - Cool!!! Logistic Regression has the best result to predict Attriction. I will use a GLM algo to get soem insights and see the feature importanfces.
# In[ ]:
# Create regularization penalty space
penalty = ['l1', 'l2']
# Create regularization hyperparameter space
C = np.logspace(0, 5, 25)
# Create hyperparameter options
hyperparameters = dict(C=C, penalty=penalty)
# In[ ]:
import scipy as sp
from hyperopt import fmin, hp, tpe, Trials, space_eval, STATUS_OK, STATUS_RUNNING
from functools import partial
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import confusion_matrix
def objective(params):
clf = LogisticRegression(**params, solver='liblinear' )
score = cross_val_score(clf, X_train, y_train, scoring='accuracy', cv=StratifiedKFold()).mean()
print("Accuracy {:.8f} params {}".format(-score, params))
return -score
space = { 'penalty': hp.choice('penalty', ['l1', 'l2']), 'C': hp.choice('C', np.logspace(5, 10, 50))}
best = fmin(fn=objective, space=space, algo=tpe.suggest, max_evals=150)
# In[ ]:
best = {'C': 13894954.94373136, 'penalty': 'l2'}
# In[ ]:
logreg = LogisticRegression(**best, solver='liblinear')
# In[ ]:
logreg.fit(X_train, y_train, )
# ## Testing our model in a unseen data
# In[ ]:
accuracy_score(y_val, logreg.predict(X_val))
# It's slightly better than the standard model that we used on Pipeline.
# Let's see the Auc and Confusion matrix to understand the classification
# In[ ]:
from sklearn.metrics import classification_report
target_names = ['Yes', 'No']
print(classification_report(y_val, logreg.predict(X_val), target_names=target_names))
# ## Confusion matrix
# In[ ]:
class_names = df_train['Attrition'].unique()
def plot_confusion_matrix(y_true, y_pred, classes,
normalize=False,
title=None,
cmap=plt.cm.Blues):
""" This function prints and plots the confusion matrix. Normalization can be applied by setting `normalize=True`. """
if not title:
if normalize:
title = 'Normalized confusion matrix'
else:
title = 'Confusion matrix, without normalization'
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
classes = classes[unique_labels(y_true, y_pred)]
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix: ")
else:
print('Confusion matrix, without normalization: ')
print(cm)
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation='nearest', cmap=cmap)
print()
# We want to show all ticks...
ax.set(xticks=np.arange(cm.shape[1]), yticks=np.arange(cm.shape[0]), xticklabels=classes, yticklabels=classes, title=title, ylabel='True label', xlabel='Predicted label')
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j, i, format(cm[i, j], fmt), ha="center", va="center", color="white" if cm[i, j] > thresh else "black")
fig.tight_layout()
return ax
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plot_confusion_matrix(y_val, logreg.predict(X_val), classes=class_names, title='Confusion matrix, without normalization')
# Plot normalized confusion matrix
plot_confusion_matrix(y_val, logreg.predict(X_val), classes=class_names, normalize=True, title='Normalized confusion matrix')
print()
# In[ ]:
# ## NOTE: I am working on this kernel, it's not finished yet.
# If you liked, don't forget to votes up the kernel !!! =)
| [
"matplotlib",
"seaborn",
"plotly"
] |
d085020ae4d3aacac202bb2501446e2a76e796f0 | Python | fuad3501/Optimisation | /univariate_optimisation.py | UTF-8 | 1,128 | 3.765625 | 4 | [] | no_license | import numpy as np
from scipy import optimize
import matplotlib.pyplot as plt
# Define Univariate Function
def poly(x):
return x * (x - 1) * (x + 1) * (x - 4)
x = np.linspace(-2, 4.5, 100)
y = poly(x)
plt.figure()
plt.plot(x, y)
plt.show()
# univariate optimisation to find lowest value of the function
x_opt = optimize.minimize_scalar(poly)
print(x_opt)
# Output:
# fun: -24.05727870023589
# nfev: 16
# nit: 11
# success: True
# x: 3.0565452919448806
# multivariate optimisation method using a start value
x_opt = optimize.fmin(poly, -2)
print(x_opt)
# Output:
# Optimization terminated successfully.
# Current function value: -1.766408
# Iterations: 17
# Function evaluations: 34
# [-0.60097656]
# => finds local minima but not global minimum
# multivariate optimisation method using a Better start value
x_opt = optimize.fmin(poly, 1)
print(x_opt)
# Output:
# Optimization terminated successfully.
# Current function value: -24.057279
# Iterations: 19
# Function evaluations: 38
# [3.05654297]
# => finds correct global minimum as shown in first optimiser
| [
"matplotlib"
] |
a034a692c892c0a9feeb1756a6d149f48356d17e | Python | afraniofilho/Arduino-Python-Integration | /serial oximetro.py | UTF-8 | 1,059 | 3.453125 | 3 | [] | no_license | #Importar bibliotecas
import serial
import numpy as np
import matplotlib.pyplot as plt
from drawnow import *
oxF = []
arduinoData = serial.Serial(port='com8', baudrate = 9600) # cria um objeto serial (arduinoData) na porta de comunicação 'com3'
plt.ion() #Configura o matplotlib no modo interativo para atualizar automatico
cnt = 0
def makeFig(): # Criar uma função para o gráfico
plt.ylim(70, 100)
plt.title('Saturação do Oxigênio %')
plt.grid(True)
plt.ylabel('%')
plt.plot(oxF, 'ro-')
while True:
arduinoString = arduinoData.readline() #ler uma linha da porta serial
dataArray = arduinoString[:2] #ler os 2 primeiros caracteres (valor do oxigenio)
ox = float(dataArray) # converte em float e guarda em ox
oxF.append(ox) # cria um array adicionando as leituras de ox
drawnow(makeFig) # chamar função do gráfico
cnt += 1
print(oxF)
# if(cnt>20):
# oxF = []
# cnt=0
# arduinoData.reset_input_buffer()
| [
"matplotlib"
] |
a7cbadd97aaf7716b55b3c07c018f77d11c62995 | Python | kotsky/ai-studies | /Simple AI/Test.py | UTF-8 | 2,754 | 2.9375 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
class Data(Dataset):
class plot_error_surfaces(object):
# Constructor
def __init__(self, w_range, b_range, X, Y, n_samples=30, go=True):
W = np.linspace(-w_range, w_range, n_samples)
B = np.linspace(-b_range, b_range, n_samples)
w, b = np.meshgrid(W, B)
Z = np.zeros((30, 30))
count1 = 0
self.y = Y.numpy()
self.x = X.numpy()
for w1, b1 in zip(w, b):
count2 = 0
for w2, b2 in zip(w1, b1):
Z[count1, count2] = np.mean((self.y - w2 * self.x + b2) ** 2)
count2 += 1
count1 += 1
self.Z = Z
self.w = w
self.b = b
self.W = []
self.B = []
self.LOSS = []
self.n = 0
if go == True:
plt.figure()
plt.figure(figsize=(7.5, 5))
plt.axes(projection='3d').plot_surface(self.w, self.b, self.Z, rstride=1, cstride=1, cmap='viridis',
edgecolor='none')
plt.title('Cost/Total Loss Surface')
plt.xlabel('w')
plt.ylabel('b')
plt.show()
plt.figure()
plt.title('Cost/Total Loss Surface Contour')
plt.xlabel('w')
plt.ylabel('b')
plt.contour(self.w, self.b, self.Z)
plt.show()
# Setter
def set_para_loss(self, W, B, loss):
self.n = self.n + 1
self.W.append(W)
self.B.append(B)
self.LOSS.append(loss)
# Plot diagram
def final_plot(self):
ax = plt.axes(projection='3d')
ax.plot_wireframe(self.w, self.b, self.Z)
ax.scatter(self.W, self.B, self.LOSS, c='r', marker='x', s=200, alpha=1)
plt.figure()
plt.contour(self.w, self.b, self.Z)
plt.scatter(self.W, self.B, c='r', marker='x')
plt.xlabel('w')
plt.ylabel('b')
plt.show()
# Plot diagram
def plot_ps(self):
plt.subplot(121)
plt.ylim
plt.plot(self.x, self.y, 'ro', label="training points")
plt.plot(self.x, self.W[-1] * self.x + self.B[-1], label="estimated line")
plt.xlabel('x')
plt.ylabel('y')
plt.ylim((-10, 15))
plt.title('Data Space Iteration: ' + str(self.n))
plt.subplot(122)
plt.contour(self.w, self.b, self.Z)
plt.scatter(self.W, self.B, c='r', marker='x')
plt.title('Total Loss Surface Contour Iteration' + str(self.n))
plt.xlabel('w')
plt.ylabel('b')
plt.show()
object = plot_error_surfaces()
object.plot_ps();
| [
"matplotlib"
] |
c868e0b221a333764b6ece595cdf5f0e993165d0 | Python | shahid-peel/Self-driving-cars_term1_project3 | /model.py | UTF-8 | 5,890 | 2.65625 | 3 | [] | no_license | import csv
import cv2
import numpy as np
from random import shuffle
import sklearn
def generator(samples, batch_size=32):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
measurements = []
# angles = []
for batch_sample in batch_samples:
# name = './IMG/'+batch_sample[0].split('/')[-1]
# center_image = cv2.imread(name)
# center_angle = float(batch_sample[3])
# images.append(center_image)
# angles.append(center_angle)
source_path = batch_sample[0]
filename = source_path.split('/')[-1]
current_path = './data/IMG/' + filename
image = cv2.imread(current_path)
#gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
images.append(image)
measurement = float(batch_sample[3])
measurements.append(measurement)
source_path = batch_sample[1]
filename = source_path.split('/')[-1]
current_path = './data/IMG/' + filename
image = cv2.imread(current_path)
# gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
images.append(image)
measurement = float(batch_sample[3]) + 0.20
measurements.append(measurement)
source_path = batch_sample[2]
filename = source_path.split('/')[-1]
current_path = './data/IMG/' + filename
image = cv2.imread(current_path)
# gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
images.append(image)
measurement = float(batch_sample[3]) - 0.20
measurements.append(measurement)
aug_images = []
aug_measurements = []
for image, measurement in zip(images, measurements):
image_small = image #cv2.resize(image, (0,0), fx=0.5, fy=0.5)
aug_images.append(image_small)
aug_measurements.append(measurement)
aug_images.append(cv2.flip(image_small,1))
aug_measurements.append(-measurement)
# trim image to only see section with road
X_train = np.array(aug_images)
y_train = np.array(aug_measurements)
# y_train = np.array(angles)
yield sklearn.utils.shuffle(X_train, y_train)
lines = []
with open('./data/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
try:
float(line[3])
except ValueError:
print("Skipping header line")
continue
lines.append(line)
# images = []
# measurements = []
# for line in lines:
# try:
# float(line[3])
# except ValueError:
# print("Skipping header line")
# continue
#
# source_path = line[0]
# filename = source_path.split('/')[-1]
# current_path = './data/IMG/' + filename
# image = cv2.imread(current_path)
# #gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# images.append(image)
# measurement = float(line[3])
# measurements.append(measurement)
# images.append(np.fliplr(image))
# measurements.append(-measurement)
#
#
# X_train = np.array(images)
# y_train = np.array(measurements)
#shuffle(lines)
#shuffle(lines)
#shuffle(lines)
lines = lines[:int(len(lines)*1.0)]
num_of_samples = len(lines)
train_samples = lines[:int(num_of_samples*0.8)]
validation_samples = lines[int(num_of_samples*0.8):]
print('total samples = ' + str(6*len(lines)))
print('training samples = ' + str(6*len(train_samples)))
print('validation samples = ' + str(6*len(validation_samples)))
# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=100)
validation_generator = generator(validation_samples, batch_size=100)
from keras.models import Sequential, Model
from keras.layers import Flatten, Dense, Dropout
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Lambda
from keras.layers import Cropping2D
from keras.layers.convolutional import Convolution2D
import matplotlib.pyplot as plt
model = Sequential()
model.add(Lambda(lambda x: x/255.0 - 0.5, input_shape=(160,320,3)))
model.add(Cropping2D(cropping=((70,25), (0,0))))
model.add(Convolution2D(24, 5, 5, subsample=(2,2), activation='relu'))
model.add(Convolution2D(36, 5, 5, subsample=(2,2), activation='relu'))
model.add(Convolution2D(48, 5, 5, subsample=(2,2), activation='relu'))
model.add(Convolution2D(64, 3, 3, activation='relu'))
model.add(Convolution2D(64, 3, 3, activation='relu'))
#model.add(MaxPooling2D(pool_size=(2, 2)))
#model.add(Conv2D(36, 5, 5, activation='relu'))
#model.add(MaxPooling2D(pool_size=(2, 2)))
#model.add(Conv2D(48, 5, 5, activation='relu'))
#model.add(Dropout(0.25))
#model.add(Conv2D(64, 3, 3, activation='relu'))
#model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Conv2D(24, 5, 5, subsample=(2,2), activation='relu'))
# model.add(Conv2D(36, 5, 5, subsample=(2,2), activation='relu'))
# model.add(Conv2D(48, 5, 5, activation='relu'))
# model.add(Conv2D(64, 3, 3, activation='relu'))
# model.add(Conv2D(64, 3, 3, activation='relu'))
#model.add(MaxPooling2D(pool_size=(2, 2)))
# model.add(Conv2D(64, 3, 3, activation='relu'))
model.add(Flatten())
#model.add(Dense(1000))
# model.add(Dropout(0.5))
model.add(Dense(100))
model.add(Dense(50))
model.add(Dense(10))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
#model.fit(X_train, y_train, validation_split=0.2, shuffle=True, nb_epoch=2)
history_object = model.fit_generator(train_generator, samples_per_epoch =
6*len(train_samples), validation_data =
validation_generator,
nb_val_samples = 6*len(validation_samples),
nb_epoch=5, verbose=1)
### print the keys contained in the history object
print(history_object.history.keys())
model.save('model.h5')
### plot the training and validation loss for each epoch
plt.plot(history_object.history['loss'])
plt.plot(history_object.history['val_loss'])
plt.title('model mean squared error loss')
plt.ylabel('mean squared error loss')
plt.xlabel('epoch')
plt.legend(['training set', 'validation set'], loc='upper right')
plt.show()
| [
"matplotlib"
] |
d7652acf62618dbdd1b5476fc95d72e033c67f7b | Python | emedvedev97/python.lab1 | /lab1woFor.py | UTF-8 | 381 | 2.515625 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from keras.datasets import mnist
X_size = 5
Y_size = 3
(X_train, y_train), (X_test, y_test) = mnist.load_data()
nummas = np.random.randint(0, 60000, X_size*Y_size)
vvv = X_train[nummas]
vvv = vvv.reshape(Y_size,X_size,28,28)
vvv = vvv.transpose(0,2,1,3).reshape(Y_size*28,X_size*28)
plt.imshow(vvv , cmap='Greys')
plt.show()
| [
"matplotlib"
] |
1cdd359fdf0792f34f5766958eeb8825954d9d94 | Python | S-John-S/MAT | /tem_plot.py | UTF-8 | 252 | 2.609375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from numpy import genfromtxt
data=genfromtxt("Temp_plot2",names=['x','y','z'])
ax=plt.subplot(111)
ax.plot(data['y'],data['z'])
plt.savefig('Temp_plot2.png')
| [
"matplotlib"
] |
da7d734c7a021b2432a29142903dc12eb720e2d7 | Python | saiharsha-22/Pattern-Recognition | /Measure Performance of K-Nearest Neighbors (KNN) Classifier/KNN.py | UTF-8 | 2,770 | 3.09375 | 3 | [] | no_license | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv('train.txt', sep=",", header=None, dtype='int64')
arr = df.values
df2 = pd.read_csv('test.txt', sep=",", header=None, dtype='int64')
test = df2.values
w1x = []
w1y = []
w2x = []
w2y = []
for i in range(len(arr)):
if arr[i][2] == 1:
w1x.append(arr[i][0])
w1y.append(arr[i][1])
else:
w2x.append(arr[i][0])
w2y.append(arr[i][1])
list_f = np.empty((0, 4), int)
for i in range(len(test)):
list2 = []
class_1 = []
for j in range(len(arr)):
val1 = np.power((test[i][0] - arr[j][0]), 2)
val2 = np.power((test[i][1] - arr[j][1]), 2)
distance = np.sqrt(val1 + val2)
list2.append(distance)
class_1.append(arr[j][2])
list_f = np.append(list_f, np.array([[test[i][0], test[i][1], list2, class_1]]), 0)
predicted_list = []
k = int(input("Value of K: "))
def sort(arr, arr2):
n = len(arr)
for i in range(n):
for j in range(0, n-i-1):
if arr[j] > arr[j+1]:
arr[j], arr[j+1] = arr[j+1], arr[j]
arr2[j], arr2[j+1] = arr2[j+1], arr2[j]
list_f2 = np.empty((0, 3), int)
for i in range(len(list_f)):
sort(list_f[i][2], list_f[i][3])
sort_dis = list_f[i][2]
sort_class = list_f[i][3]
c1, c2 = 0, 0
for j in range(k):
if sort_class[j] == 1:
c1 = c1+1
else:
c2 = c2+1
if c1 > c2:
list_f2 = np.append(list_f2, np.array([[list_f[i][0], list_f[i][1], 1]]), 0)
predicted_list.append(1)
else:
list_f2 = np.append(list_f2, np.array([[list_f[i][0], list_f[i][1], 2]]), 0)
predicted_list.append(2)
f = open("prediction.txt", "w")
for i in range(len(list_f)):
f.write("Test point: %d, %d\n" % (list_f[i][0], list_f[i][1]))
dis = list_f[i][2]
cls = list_f[i][3]
for j in range(k):
f.write("Distance %d: %f \t Class: %d \n" % ((j+1), dis[j], cls[j]))
f.write("Predicted class:%d \n\n" % (predicted_list[i]))
f.close()
w1x_new = []
w1y_new = []
w2x_new = []
w2y_new = []
for i in range(len(list_f2)):
if list_f2[i][2] == 1:
w1x_new.append(list_f2[i][0])
w1y_new.append(list_f2[i][1])
elif list_f2[i][2] == 2:
w2x_new.append(list_f2[i][0])
w2y_new.append(list_f2[i][1])
plt.plot(w1x, w1y, '+r', label='Class1 train')
plt.plot(w2x, w2y, 'ob', label='Class2 train')
plt.plot(w1x_new, w1y_new, '*g', label='Class1 classified')
plt.plot(w2x_new, w2y_new, '^y', label='Class2 classified')
plt.title('K-Nearest Neighbors(KNN)')
plt.legend()
plt.show()
| [
"matplotlib"
] |
dc0c2c6f5922a6903e3909222c625250932e1dbd | Python | KathyHuancayo/Modelo-de-detecci-n | /Dataset ISOT - Clase Mayoritaria Inicial/k-nearest neighbours.py | UTF-8 | 6,153 | 2.5625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 17 20:20:00 2020
@author: Sony
"""
import time
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import matplotlib.patches as mpatches
import seaborn as sns
%matplotlib inline
plt.rcParams['figure.figsize'] = (16, 9)
plt.style.use('ggplot')
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score, cross_val_predict, KFold
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
import sklearn.metrics
from sklearn import metrics
#CARGAR LOS DATOS
datos = pd.read_csv('C://Users//Sony//Desktop//TESIS 2//isot_app_and_botnet_dataset//botnet_data//Clase_Mayoritaria.csv')
df=pd.DataFrame(datos)
X = datos[['Src_Port','Dst_Port','Protocol','Flow_Duration','Tot_Fwd_Pkts','Tot_Bwd_Pkts',
'TotLen_Fwd_Pkts','TotLen_Bwd_Pkts','Fwd_Pkt_Len_Mean','Fwd Pkt Len Max','Fwd Pkt Len Min',
'Bwd_Pkt_Len_Mean','Flow_Byts/s','Flow_Pkts/s','Fwd_Pkts/s','Bwd_Pkts/s','Subflow_Fwd_Byts','Subflow_Bwd_Byts','Subflow_Bwd_Pkts','Subflow_Fwd_Pkts']]
y=datos['Output']
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2, random_state=0)
X_train.shape
X_test.shape
y_train.shape
y_test.shape
k_range = list(range(1,20))
scores = []
for k in k_range:
knn = KNeighborsClassifier(n_neighbors = k,algorithm='kd_tree',
leaf_size=30,
metric='minkowski',
metric_params=None,
n_jobs=1,
p=2,
weights='uniform')
knn.fit(X_train, y_train)
scores.append(knn.score(X_test, y_test))
df = pd.DataFrame({"Max K": k_range, "Average Accuracy":scores })
df = df[["Max K", "Average Accuracy"]]
print(df.to_string(index=False))
plt.figure()
plt.xlabel('k')
plt.ylabel('accuracy')
plt.scatter(k_range, scores)
plt.xticks([0,5,10,15,20])
start = time.time()
knn = KNeighborsClassifier(algorithm='kd_tree', leaf_size=30, metric='minkowski',
metric_params=None, n_jobs=None, n_neighbors=1, p=2,
weights='uniform')
knn.fit(X_train, y_train)
end = time.time()
print ("K-Nearest Neighbors", end - start)
predictions=knn.predict(X_test)
# VALIDAR MODELO
results = cross_val_score(knn, X_train, y_train,scoring='accuracy', cv=5)
print("Accuracy: %.3f%% (%.3f%%)" % (results.mean()*100, results.std()*100))
print ('CROSS-VALIDATION SCORES:')
print(results)
#------------------------------REPORTE DE RESULTADOS POR CLASE-------------------------------
print(classification_report(y_test,predictions))
print("PRECISIÓN PARA DETECTAR DIFERENTES MUESTRAS DE BOTNET ", metrics.precision_score(y_test, predictions,average=None)*100)
print("RECALL PARA DETECTAR DIFERENTES MUESTRAS DE BOTNET: ", metrics.recall_score(y_test, predictions, average=None)*100)
print ("EXACTITUD DEL MODELO: ", sklearn.metrics.accuracy_score(y_test, predictions, normalize=True)*100)
print(knn.score(X_test,y_test))
#------------------------------MATRIZ DE CONFUSIÓN PARA VALIDACIÓN-------------------------------
x_axis_labels = ['Zeus','Citadel','Citadel2'] # labels for x-axis
y_axis_labels = ['Zeus','Citadel','Citadel2'] # labels for y-axis
print("MATRIZ DE CONFUSIÓN PARA VALIDACION: ")
cm1=confusion_matrix(y_test, predictions)
print(cm1)
plt.figure(figsize=(12, 12))
sns.heatmap(cm1,xticklabels=x_axis_labels, yticklabels=y_axis_labels ,annot=True, fmt="d");
plt.title("Confusion matrix")
plt.ylabel('True class')
plt.xlabel('Predicted class')
plt.show()
# PREDECIR BOTNET ZEUS:
X_test = pd.DataFrame(columns=('Src_Port','Dst_Port','Protocol','Flow_Duration','Tot_Fwd_Pkts','Tot_Bwd_Pkts',
'TotLen_Fwd_Pkts','TotLen_Bwd_Pkts','Fwd_Pkt_Len_Mean','Fwd Pkt Len Max','Fwd Pkt Len Min',
'Bwd_Pkt_Len_Mean','Flow_Byts/s','Flow_Pkts/s','Fwd_Pkts/s','Bwd_Pkts/s','Subflow_Fwd_Byts','Subflow_Bwd_Byts','Subflow_Bwd_Pkts','Subflow_Fwd_Pkts','Output'))
X_test.loc[0] = (57366,53,17,216,1,1,34,118,34,34,34,118,703703.7037,9259.259259,4629.62963,4629.62963,17,59,0,0,4)
y_pred = knn.predict(X_test.drop(['Output'], axis = 1))
print("Prediccion: " + str(y_pred))
y_proba = knn.predict_proba(X_test.drop(['Output'], axis = 1))
print("Probabilidad de Acierto: " + str(np.round(np.asarray(y_proba[0][y_pred])* 100, 2)))
# PREDECIR BOTNET CITADEL:
X_test = pd.DataFrame(columns=('Src_Port','Dst_Port','Protocol','Flow_Duration','Tot_Fwd_Pkts','Tot_Bwd_Pkts',
'TotLen_Fwd_Pkts','TotLen_Bwd_Pkts','Fwd_Pkt_Len_Mean','Fwd Pkt Len Max','Fwd Pkt Len Min',
'Bwd_Pkt_Len_Mean','Flow_Byts/s','Flow_Pkts/s','Fwd_Pkts/s','Bwd_Pkts/s','Subflow_Fwd_Byts','Subflow_Bwd_Byts','Subflow_Bwd_Pkts','Subflow_Fwd_Pkts','Output'))
X_test.loc[0] = (63587,53,17,370,1,1,37,121,37,37,37,121,427027.027,5405.405405,2702.702703,2702.702703,18,60,0,0,6)
y_pred = knn.predict(X_test.drop(['Output'], axis = 1))
print("Prediccion: " + str(y_pred))
y_proba = knn.predict_proba(X_test.drop(['Output'], axis = 1))
print("Probabilidad de Acierto: " + str(np.round(np.asarray(y_proba[0][y_pred])* 100, 2)))
# PREDECIR BOTNET CITADEL2:
X_test = pd.DataFrame(columns=('Src_Port','Dst_Port','Protocol','Flow_Duration','Tot_Fwd_Pkts','Tot_Bwd_Pkts',
'TotLen_Fwd_Pkts','TotLen_Bwd_Pkts','Fwd_Pkt_Len_Mean','Fwd Pkt Len Max','Fwd Pkt Len Min',
'Bwd_Pkt_Len_Mean','Flow_Byts/s','Flow_Pkts/s','Fwd_Pkts/s','Bwd_Pkts/s','Subflow_Fwd_Byts','Subflow_Bwd_Byts','Subflow_Bwd_Pkts','Subflow_Fwd_Pkts','Output'))
X_test.loc[0] = (55399,53,17,85,1,1,37,83,37,37,37,83,1411764.706,23529.41176,11764.70588,11764.70588,18,41,0,0,7)
y_pred = knn.predict(X_test.drop(['Output'], axis = 1))
print("Prediccion: " + str(y_pred))
y_proba = knn.predict_proba(X_test.drop(['Output'], axis = 1))
print("Probabilidad de Acierto: " + str(np.round(np.asarray(y_proba[0][y_pred])* 100, 2)))
| [
"matplotlib",
"seaborn"
] |
2fedce402fb0147f0ee34c8a80e03219d400d057 | Python | gabrevaya/HO-python | /ej3.py | UTF-8 | 423 | 2.96875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import scipy
coefs = [1,1,-4,4]
p = np.poly1d(coefs)
xi = -10
xf = 10
dx = 0.1
num = (xf - xi)/dx
x = np.linspace(xi, xf, num)
y = p(x)
y_grad = np.gradient(y, dx, edge_order=2)
plt.figure()
plt.plot(x,y)
plt.plot(x,y_grad)
plt.xlabel('X')
plt.ylabel('Y')
plt.legend(['Función','Derivada'])
plt.grid()
plt.show
np.savetxt('function.txt', y) | [
"matplotlib"
] |
7a8cb0d69f2ed0cb6c9cd04987a121a0e1372815 | Python | HeinrichHartmann/UrlAnalysis | /UrlAnalysis.py | UTF-8 | 1,861 | 2.8125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# <nbformat>3.0</nbformat>
# <codecell>
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# <codecell>
# Update Logs from Polipo
import subprocess
subprocess.call("zcat /var/log/polipo/*.gz | cat - /var/log/polipo/*.log > proxy.log", shell=True)
# Import Logs
df = pd.read_csv("proxy.log", sep=" - ", names=["date", "method", "url"])
# clean log file
subprocess.call("rm proxy.log", shell=True)
# Print Stats
df = df[df.url.notnull()] # discard rows without url
# df = df[df.method == "GET"]
df.date = pd.to_datetime(df.date)
df.index = df.date
print df.index.size
df.head()
# <codecell>
def get_host(url):
url = url.strip("http://")
url = url.split("/")[0]
url = url.split(":")[0]
return url
classes = {
"news" : { 'patterns': ["zeit.de", "welt.de"], 'value' : 10 },
"social" : { 'patterns': ["facebook.com", "twitter.com"], 'value' : 15 },
"status" : { 'patterns': ["dropbox.com", "ghostery"], 'value' : 0 },
"learn" : { 'patterns': ["wikipedia.org", "stackoverflow", "wikibooks", "wikimedia"], 'value' : -5 },
"work" : { 'patterns': ["github.com", "calendar.google.com"], 'value' : -10 },
"search" : { 'patterns': ["google"], 'value' : 1 },
"unknown": { 'patterns': [], 'value' : 0 },
}
def classify(host):
for host_class, ob in classes.items():
if any([ (pattern in host) for pattern in ob['patterns'] ]):
return host_class
return "unknown"
def n_classify(host):
return classes[classify(host)]['value']
# <codecell>
df['host'] = df.url.apply(get_host)
df['host_type'] = df.host.apply(n_classify)
# plt.plot(df.host_type,df.date,'o')
# <codecell>
from datetime import datetime
plt.figure(figsize=(20,5))
gf = df# [df.date > datetime(2014,9,7,0,0,0)]
plt.plot(gf.date,gf.host_type,'o')
# <codecell>
| [
"matplotlib"
] |
9fd81b5682622aca3fd1574df93f26dc0ca1e195 | Python | kaixin999/wcloud | /yunci.py | UTF-8 | 716 | 2.578125 | 3 | [] | no_license | import jieba
from PIL import Image
import numpy as np
from wordcloud import WordCloud,ImageColorGenerator
fn = open('love.txt','r',encoding='utf-8') # 打开文件
jb = fn.read() # 读出整个文件
fn.close() # 关闭文件
jb = jieba.lcut(jb)
jb =' '.join(jb)
maskcover = np.array(Image.open('love.png'))
img_color = ImageColorGenerator(maskcover)
w = WordCloud(
background_color='white',
max_words=160,
random_state=42,
width=1000,
height=800,
margin=1,
mask=maskcover,
max_font_size=180,
font_path='./simhei.ttf',
).generate(jb)
import matplotlib.pyplot as plt
plt.imshow(w)
plt.imshow(w.recolor(color_func=img_color))
plt.axis('off')
plt.show() | [
"matplotlib"
] |
153975a4221b21f96b11a158bfa95775b560b4b9 | Python | r-b-g-b/Lab | /toolbox/misc.py | UTF-8 | 14,553 | 2.546875 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from PIL import Image
import pandas as pd
import os, h5py
import scipy.stats as st
import itertools
import Tkinter
import tkFileDialog
import datetime
def get_subplot_grid(n):
if n<4:
nrows = n; ncols = 1
else:
nrows = ncols = np.ceil(np.sqrt(n))
return nrows, ncols
def str2date(s, delimiter = '_', format = 'MMDDYYYY'):
'''
Input:
s: a string representation of a date MO_DA_YR
Output:
date: datetime formatted date
'''
if delimiter=='':
if format == 'YYYYMMDD':
yr = s[:4]; mo = s[4:6]; da = s[6:]
else:
if format == 'MMDDYYYY':
mo, da, yr = s.split(delimiter)
elif format == 'YYYYMMDD':
yr, mo, da = s.split(delimiter)
return datetime.date(int(yr), int(mo), int(da))
def pd_sem(ds):
n = len(ds)
ds_mean = np.mean(ds)
runningsum = 0
for i in ds:
runningsum = runningsum + (i-ds_mean)**2
std = np.sqrt(runningsum / (n-1))
sem = std / np.sqrt(n)
return sem
def pd_sem_2d(ds):
n = len(ds)
nsamp = len(ds[ds.index[0]])
ds_mean = np.mean(ds)
runningsum = np.zeros(nsamp)
for i in ds:
runningsum = runningsum + (i-ds_mean)**2
std = np.sqrt(runningsum / (n-1))
sem = std / np.sqrt(n)
return sem
def get_first(df):
x = df.iloc[0]
return x
def samexaxis(ax, xlim = None):
minx = +np.inf
maxx = -np.inf
if xlim is None:
for ax_ in ax:
xlim = ax_.get_xlim()
minx = np.min([minx, xlim[0]])
maxx = np.max([maxx, xlim[1]])
else:
(minx, maxx) = xlim
[a.set_xlim([minx, maxx]) for a in ax]
def sameyaxis(ax, ylim = None):
minx = +np.inf
maxx = -np.inf
if ylim is None:
for ax_ in ax:
xlim = ax_.get_ylim()
minx = np.min([minx, xlim[0]])
maxx = np.max([maxx, xlim[1]])
else:
(minx, maxx) = ylim
[a.set_ylim([minx, maxx]) for a in ax]
def closest(vec, x, log = False):
if log:
orig_vec = vec.copy()
vec = np.log2(orig_vec)
x = np.log2(x)
differ = np.abs(vec - x)
min_differ = differ.min()
ix = differ.argmin()
if log:
val = orig_vec[ix]
error = vec[ix] - x
else:
val = vec[ix]
error = val - x
return val, ix, error
def vline(ax, xloc, **kwargs):
if len(xloc) == 1:
xloc = [xloc]
for ax_ in ax:
for xloc_ in xloc:
ax_.axvline(xloc_, **kwargs)
def hline(ax, yloc, **kwargs):
if len(yloc) == 1:
yloc = [yloc]
for ax_ in ax:
for yloc_ in yloc:
ax_.axhline(yloc_, **kwargs)
def nansem(x, axis = None):
return st.sem(x[~np.isnan(x)], axis = axis)
def isbetween(x, xmin, xmax):
x = np.array(x)
ix = np.logical_and(xmin <= x, x < xmax)
return ix
def octave_diff(f1, f2):
return np.log2(f1)-np.log2(f2)
def bar2(y, x = None, yerr = None, width = None, ax = None, groups = None, grouplabels = None):
if x is None:
x = np.arange(y.shape[0])
if width is None:
width = y.shape[0]
x = np.asarray(x)
ax, fig = axis_check(ax)
ndim = len(y.shape)
ax.bar(x, y[:, 0], yerr = yerr[:, 0], ecolor = 'k')
ax.bar(x+width, y[:, 1], yerr = yerr[:, 1], color = 'g', ecolor = 'k')
if groups is not None:
labels = []
for i in itertools.product(groups[0], groups[1]):
labels.append(str(i[0]) + '.' + str(i[1]))
ax.set_xticks(np.arange(np.prod(y.shape))+0.5)
ax.set_xticklabels(labels)
return ax
def jitter(x, amp):
'''
returns a Gaussian jittered version of the input. Only good for 1-D vectors.
'''
return x + amp*np.random.randn(np.asarray(x).size)
def order(x):
x_ = np.empty_like(x)
ux = np.unique(x)
for i, ux_ in enumerate(ux):
x_[x==ux_] = i
return x_
def plot_spread_y(x):
npts, nobs = x.shape
y_ = 2*np.abs(x).max(1).mean()
y = np.arange(0, y_*nobs, y_)
return x + np.tile(y, (npts, 1))
def errorfill(x, y, yerr = None, ax = None, color = None, err_type = 'sem', **kwargs):
ax, fig = axis_check(ax)
if color is None:
color = ax._get_lines.color_cycle.next()
if len(y.shape)==1:
y_mean = y
else:
y_mean = y.mean(0)
if yerr is None:
if err_type == 'sem':
yerr = 0.5*st.sem(y, 0)
elif err_type == 'std':
yerr = 0.5*st.std(y, 0)
x_hi = y_mean + yerr
x_lo = y_mean - yerr
l = ax.plot(x, y_mean, color = color, **kwargs)
l_up = ax.plot(x, y_mean+yerr, color = color, alpha = 0.2)
l_lo = ax.plot(x, y_mean-yerr, color = color, alpha = 0.2)
ax.fill_between(x, x_hi, x_lo, alpha = 0.2, color = color)
return ax
def bin(x, bins, ordered_x = False):
# returns a 'rounded down' version of data (i.e. the cloeset bin value below the raw value)
bins = np.asarray(bins)
nbins = bins.size - 1
x_bin = np.empty(x.size) * np.nan
for i in range(nbins):
x_bin[isbetween(x, bins[i], bins[i+1])] = i
if not ordered_x:
x_bin = np.asarray([bins[x_] for x_ in x_bin])
return x_bin
def slopeintercept(m, b, ax, **kwargs):
(xi, xf) = ax.get_xlim()
yi = m*xi + b
yf = m*xf + b
ax.plot([xi, xf], [yi, yf], **kwargs)
def scatterfit(x, y, ax = None, **kwargs):
if ax is None:
fig = plt.figure();
ax = fig.add_subplot(111);
ax.scatter(x, y)
(slope, inter) = np.polyfit(x, y, 1)
slopeintercept(slope, inter, ax, color = 'r')
def axis_check(ax, **kwargs):
'''
Returns ax, fig
'''
if ax is None:
fig = plt.figure();
ax = fig.add_subplot(111, **kwargs);
else:
fig = ax.get_figure();
return ax, fig
def target(x, y, ax, color = 'r'):
ax.plot(x, y, marker = 'o', color = color, ms = 12)
ax.plot(x, y, marker = 'o', color = 'w', ms = 10)
ax.plot(x, y, marker = 'o', color = color, ms = 6)
ax.plot(x, y, marker = 'o', color = 'w', ms = 3)
def find_on_off(x):
x = np.concatenate((np.array([0]), x, np.array([x[-1]+10])))
d = np.diff(x)
ix = (d>10).nonzero()[0]
offset = x[ix][1:]+1000
onset = x[ix+1][:-1]-1000
return onset, offset
def find_consec_middles(Q):
'''
Input:
Q : list of integers
Output
Q : same list except culled down to not contain consecutive ranges
(new list only contains the middle number of that range of consecutive numbers)
'''
X = [True]
while sum(X) > 0:
L = [l+1 == Q[i+1] for i, l in enumerate(Q[:-1])] # check left
L.extend([False])
R = [l-1 == Q[i] for i, l in enumerate(Q[1:])] # check right
R.insert(0, False)
M = [l and r for (l, r) in zip(L, R)] # middle values
L2 = [l if not m else False for (l, m) in zip(L, M)]
R2 = [r if not m else False for (r, m) in zip(R, M)]
Z = [False if not l else l and r for (l, r) in zip(L2[:-1], R2[1:])] # no LR pairs
Z.extend([False])
Y = [l ^ r for (l, r) in zip(L, R)] # left or right
X = [y if not z else False for (y, z) in zip(Y[:-1], Z)] # what to remove (L/R but not LR pairs)
X.extend([Y[-1]])
Q = [q for (q, x) in zip(Q, X) if not x]
return Q
def find_edges(Q):
dQ = np.diff(Q)
return (dQ == 1).nonzero()[0], (dQ == -1).nonzero()[0]
def find_peaks(Q, pplot = False):
dQ = np.diff(Q)
w = dQ[:-1] * dQ[1:]
peaks_x = np.vstack((w<0, dQ[:-1]>0)).all(0).nonzero()[0]+1
peaks_y = Q[peaks_x]
if pplot:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(Q, 'b')
ax.plot(dQ, 'r')
ax.plot(w*1E12, 'g')
ax.plot(peaks_x, peaks_y, '*m')
return peaks_x, peaks_y
def cos2ramp(x, ramplen):
ramplen = np.int32(ramplen)
rampoff = np.cos(np.pi/(2*ramplen) * np.arange(ramplen))**2
rampon = rampoff[::-1]
x[:ramplen] = x[:ramplen] * rampon
x[-ramplen:] = x[-ramplen:] * rampoff
return x
return x2
def medfilt1(x=None,L=None):
'''
a simple median filter for 1d numpy arrays.
performs a discrete one-dimensional median filter with window
length L to input vector x. produces a vector the same size
as x. boundaries handled by shrinking L at edges; no data
outside of x used in producing the median filtered output.
(upon error or exception, returns None.)
inputs:
x, Python 1d list or tuple or Numpy array
L, median filter window length
output:
xout, Numpy 1d array of median filtered result; same size as x
bdj, 5-jun-2009
'''
# input checks and adjustments
try:
N = len(x)
if N < 2:
print 'Error: input sequence too short: length =',N
return None
elif L < 2:
print 'Error: input filter window length too short: L =',L
return None
elif L > N:
print 'Error: input filter window length too long: L = %d, len(x) = %d'%(L,N)
return None
except:
print 'Exception: input data must be a sequence'
return None
xin = np.array(x)
if xin.ndim != 1:
print 'Error: input sequence has to be 1d: ndim =',xin.ndim
return None
xout = np.zeros(xin.size)
# ensure L is odd integer so median requires no interpolation
L = int(L)
if L%2 == 0: # if even, make odd
L += 1
else: # already odd
pass
Lwing = (L-1)/2
for i,xi in enumerate(xin):
# left boundary (Lwing terms)
if i < Lwing:
xout[i] = np.median(xin[0:i+Lwing+1]) # (0 to i+Lwing)
# right boundary (Lwing terms)
elif i >= N - Lwing:
xout[i] = np.median(xin[i-Lwing:N]) # (i-Lwing to N-1)
# middle (N - 2*Lwing terms; input vector and filter window overlap completely)
else:
xout[i] = np.median(xin[i-Lwing:i+Lwing+1]) # (i-Lwing to i+Lwing)
return xout
def get_path():
root = Tkinter.Tk()
root.withdraw()
path = tkFileDialog.askopenfilename()
root.quit()
return path
def export_h5(f, fname = 'OUT'):
fout = open(os.path.join('/Users/robert/Desktop/', fname+'.txt'), 'w')
header = ['gen', 'exp', 'sess']
for i, sess in enumerate(f.keys()):
fsess = f[sess]
gen, exp, _ = sess.split('_')
for j, unit in enumerate(fsess.keys()):
funit = fsess[unit]
newline = [gen, exp, sess]
for m, field in enumerate(np.sort(funit.keys())):
ffield = funit[field]
fieldsizes = np.array(ffield.shape)
if fieldsizes.size == 0:
if i==0 and j==0:
header.append(field)
newline.append('%4.4f' % ffield.value)
elif fieldsizes.size == 1 and fieldsizes[0]<10:
for n in range(fieldsizes[0]):
if i==0 and j==0:
header.append('%s-%2.2u' % (field, n))
newline.append('%4.4f' % ffield.value[n])
if i==0 and j==0:
fout.write('\t'.join(header) + '\n')
fout.write('\t'.join(newline) + '\n')
fout.close()
def export_DB(DB, fname = 'DB'):
nunits = DB.shape[0]
txt = np.empty((nunits+1, 0), dtype = 'str')
for name in DB.dtype.names:
if len(DB[name].shape) == 1: # save out 1D DB entries
txt = np.hstack((txt, np.vstack((name, DB[name][:, np.newaxis]))))
elif len(DB[name].shape) == 2: # save out 2D DB entries
if DB[name].shape[1] < 10:
for i in range(DB[name].shape[1]):
headername = '%s%i' % (name, i)
txt = np.hstack((txt, np.vstack((headername, DB[name][:, i, np.newaxis]))))
np.savetxt(os.path.join('/Users/robert/Desktop/', fname+'.txt'), txt, fmt = '%s')
def export_DB2(data, fname = 'DB'):
savepath = os.path.join('/Users/robert/Desktop', fname+'.txt')
assert not os.path.exists(savepath)
f = open(savepath, 'w')
line = ''
dat = data[0]
for key_ in dat.dtype.descr:
key = key_[0]
newfield = dat[key]
if newfield.size==1:
newfield = [newfield]
for i, newfield_ in enumerate(newfield):
header = key
if len(newfield)>1:
header = '%s%2.2i' % (header, i+1)
line += '%s\t' % header
line += '\n'
f.write(line)
for dat in data:
line = ''
for key_ in dat.dtype.descr:
key = key_[0]
newfield = dat[key]
if newfield.size==1:
newfield = [newfield]
for newfield_ in newfield:
if not type(newfield_) in [str, np.string_]:
newfield_ = '%.5e' % newfield_
line += '%s\t' % newfield_
line += '\n'
f.write(line)
f.close()
def make_combinations(x):
ndim = x.shape[1]
nlevels = np.empty(ndim, dtype = np.int32)
ulevels = []
for i in range(ndim):
ulevels.append(list(np.unique(x[:, i])))
nlevels[i] = len(ulevels[-1])
for i in itertools.product(*ulevels):
print i
def np_to_pd(x):
df = pd.DataFrame(index = np.arange(x.size))
for i in x.dtype.descr:
if len(i)==2:
name, dtype = i
df[name] = x[name]
elif len(i)==3:
name, dtype, shape = i
if len(shape)==1 and shape[0]<100:
for j in xrange(shape[0]):
df['%s%2.2i' % (name, j+1)] = x[name][:, j]
return df
def get_rast_stimparams_from_df(df_, studyprefix = 'rr', unitprefix = 'RR', basedir = '/Volumes/BOB_SAGET/Fmr1_RR'):
path = os.path.join(basedir, 'Sessions', '%s_%s_%s_%s' % (studyprefix, df_['gen'], df_['exp'], df_['sess']), 'fileconversion', '%s%3.3i.h5' % (unitprefix, df_['unit']))
if os.path.exists(path):
print 'Loading %s' % path
f = h5py.File(path, 'r')
rast = f['rast'].value
stimparams = f['stimID'].value
f.close()
else:
print 'Could not find %s' % path
return None, None
return rast, stimparams
def add_unity_line(ax = None, **kwargs):
if ax is None:
ax = plt.gca()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
minlim = min((xlim[0], ylim[0]))
maxlim = max((xlim[1], ylim[1]))
ax.plot([minlim, maxlim], [minlim, maxlim], **kwargs)
ax.set_xlim(xlim)
ax.set_ylim(ylim)
plt.draw()
def plot_matrix(x, N = 10, ax = None, **kwargs):
'''
plots the mean trace of a matrix and transparent sample traces
'''
ax, fig = axis_check(ax)
l, = ax.plot(x.mean(1), lw = 2, **kwargs)
color = l.get_color()
ax.plot(x[:, np.linspace(0, x.shape[1]-1, N).astype(int)], alpha = 0.1, color = color, **kwargs)
return ax
def pd_errorbar(y, yerr, ax = None, **kwds):
ax, fig = axis_check(ax)
colors = kwds.pop('color', 'brgyk')
ax_pos = np.arange(len(y)) + 0.25
tickoffset = 0.375
K = y.columns.size
rects = []
labels = []
for i, label in enumerate(y.columns):
y_ = y[label]
yerr_ = yerr[label]
kwds['color'] = colors[i % len(colors)]
rect = ax.bar(ax_pos + i*0.75/K, y_, 0.75/K, label = label, **kwds)
ax.errorbar(ax_pos + (i*0.75/K) + (0.75/K)/2, y_, yerr_, fmt = None, ecolor = 'k')
rects.append(rect)
labels.append(label)
patches = [r[0] for r in rects]
ax.legend(patches, labels, loc = 'best')
ax.set_xticks(ax_pos + tickoffset)
ax.set_xticklabels(y.index.values, rotation = 90)
plt.show();
return ax
def objectarray2floatarray(x):
x_ = np.empty((len(x), len(x[x.index[0]])))
for i, (k, v) in enumerate(x.iterkv()):
x_[i, :] = v
return x_
def facet_wrap(df, df_err = None, fig = None):
if fig is None:
fig = plt.figure();
index = zip(*df.index)
nindex = len(index)
ulevels = np.unique(index[-1])
nlevels = ulevels.size
ax = []
for i, lev in enumerate(ulevels):
ax.append(fig.add_subplot(nlevels, 1, i+1))
df.xs(lev, level=nindex-1).transpose().plot(kind = 'line', ax = ax[-1])
# df.xs(lev, level=nindex-1)
# df_err.xs(lev, level=nindex-1)
# pd_errorbar(df, df_err, ax = ax[-1])
sameyaxis(ax)
| [
"matplotlib"
] |
2d8f66e71f666856f0f894e46e4d06362b970217 | Python | altdillon/deeplearningfinal | /LeNet.py | UTF-8 | 1,386 | 2.625 | 3 | [] | no_license | import keras
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout, Flatten,Conv2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.models import load_model # added
import matplotlib.pyplot as plt
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
# lenet, from Chao's minst hand writing exsample
def LeNet(width=1, height=1, depth=1, classes=1):
# initialize the model
model = Sequential()
inputShape = (height, width, depth)
# if we are using "channels first", update the input shape
#if K.image_data_format() == "channels_first":
# inputShape = (depth, height, width)
# first set of CONV => RELU => POOL layers
model.add(Conv2D(20, (5, 5), padding="same", input_shape=inputShape))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# second set of CONV => RELU => POOL layers
model.add(Conv2D(50, (5, 5), padding="same"))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2)))
# first (and only) set of FC => RELU layers
model.add(Flatten())
model.add(Dense(500))
model.add(Activation("relu"))
# softmax classifier
model.add(Dense(classes))
model.add(Activation("softmax"))
# return the constructed network architecture
return model
| [
"matplotlib"
] |
00d09f41c9dd436f83924fa92cc9dcf037ce613d | Python | VincentSidot/projet_euler | /projet_euler.py | UTF-8 | 9,550 | 3.515625 | 4 | [] | no_license | ## Probleme 12
def list_divisor(n):
divisor = [1]
divisor2 = [n]
for i in range(2,int(n**0.5)+1):
if n%i==0:
divisor += [i]
if i !=int(n**0.5):
divisor2 += [int(n/i)]
return divisor + list(reversed(divisor2))
def count_divisor(n):
divisor = 2
for i in range(2,int(n**0.5)+1):
if n%i==0:
divisor += 1
if i !=int(n**0.5):
divisor += 1
return divisor
def problem12():
divisor = 0
i,triangular = 0,0
while divisor < 500:
i+=1
triangular+=i
divisor = count_divisor(triangular)
return i
## Problem 38
def D2B(n,B=10):
L = []
while n > 0:
L = [n%B] + L
n//=B
return L
def fastpow(x,n):
if n==0:
return 1
elif n%2==0:
return fastpow(x,n//2)*fastpow(x,n//2)
else:
return fastpow(x,n//2)*fastpow(x,n//2)*x
pow5 = [fastpow(i,5) for i in range(10)]
def fifthDigitSum(n):
rep = 0
while n>0:
rep += pow5[n%10]
n//=10
return rep
def problem38():
rep = 0
for i in range(10,10**6):
if i == fifthDigitSum(i):
rep += i
return rep
## Problem 31
p = [1,2,5,10,20,50,100,200]
def problem31():
rep = 0
for x1 in range(201):
if x1>200:
break
for x2 in range(101):
if x1+2*x2>200:
break
for x3 in range(41):
if x1+2*x2+5*x3>200:
break
for x4 in range(21):
if x1+2*x2+5*x3+10*x4>200:
break
for x5 in range(11):
if x1+2*x2+5*x3+10*x4+20*x5>200:
break
for x6 in range(5):
if x1+2*x2+5*x3+10*x4+20*x5+50*x6>200:
break
for x7 in range(3):
if x1+2*x2+5*x3+10*x4+20*x5+50*x6+100*x7>200:
break
for x8 in range(2):
if x1+2*x2+5*x3+10*x4+20*x5+50*x6+100*x7+200*x8>200:
break
if x1+2*x2+5*x3+10*x4+20*x5+50*x6+100*x7+200*x8==200:
rep+=1
return rep
## Problem 27
""" b as to be prime"""
def crible(n):
P = [0,0] + list(range(2,n))
for i in range(2,int(n**0.35)+1):
if P[i] != 0:
for j in range(2*i,n,i):
P[j] = 0
return [p for p in P if p!=0]
def isPrime(n):
if n <= 0 or n==1:
return False
for i in range(2,int(n**0.5)+1):
if n%i==0:
return False
return True
B = crible(1000)
A = range(-1000,1001)
def problem27():
max,amax,bmax = 0,0,0
for a in A:
for b in B:
n = 0
while isPrime(n*n+a*n+b):
n+=1
if n > max:
amax,bmax=a,b
max = n
return amax*bmax
## Problem 21
def properDivisor(n,extremum=False):
if extremum:
L = [1,n]
else:
L = []
for i in range(2,int(n**0.5)+1):
if n%i==0:
L+=[i]
if i != n/2:
L+=[n//i]
#L.sort()
return L
def d(n):
return sum(properDivisor(n))+1
def problem21():
rep = 0
for a in range(1,10000):
b = d(a)
if d(b)==a and b!=a:
rep += a
return rep
## Problem 24
L = [0,1,2]
def fact(n):
rep = 1
for i in range(2,n+1):
rep*=n
return rep
def Permutation(L):
return
## Problem 22
file = open("problem22.txt",'r')
def strToList(str):
L = []
word = ""
for i in str:
if ord(i)>=ord('A') and ord(i) <= ord('Z'):
word += i
else:
if len(word)>0:
L += [word]
word = ""
if len(word)>0:
L += [word]
return L
def pound(word):
rep = 0
for i in word:
rep += ord(i)-ord('A')+1
return rep
rep = 0
L = strToList(file.read())
L.sort()
for i in range(len(L)):
rep += pound(L[i])*(i+1)
print(rep)
## Problem 17
usual = { 1:'one',2:'two',3:'three',4:'four',5:'five',6:'six',7:'seven',8:'eight',9:'nine',10:'ten',11:'eleven',12:'twelve',13:'thirteen',14:'fourteen',15:'fifteen',16:'sixteen',17:'seventeen',18:'eighteen',19:'nineteen',20:'twenty',30:'thirty',40:'forty',50:'fifty',60:'sixty',70:'seventy',80:'eighty',90:'ninety',100:'hundred',1000:'thousand',1000000:'million'}
def number_to_str(n):
if n <= 0:
return "error"
if n <= 20:
return usual[n]
if n/10 == n//10 and n<100:
return usual[n]
if n > 20 and n < 100:
dec = n//10
unit = n%10
return number_to_str(10*dec) + '-' + number_to_str(unit)
if n/100 == n//100 and n<1000:
return number_to_str(n//100) + " " + usual[100]
if n > 100 and n < 1000:
cent = n//100
rest = n%100
return number_to_str(cent*100) + " and " + number_to_str(rest)
if n/1000 == n//1000 and n<1000000:
return number_to_str(n//1000) + " " + usual[1000]
if n > 1000 and n < 1000000:
mille = n//1000
rest = n%1000
return number_to_str(mille*1000) + ' ' + number_to_str(rest)
if n/1000000 == n//1000000 and n<1000000000:
return number_to_str(n//1000000) + " " + usual[1000000]
if n > 1000000 and n < 1000000000:
million = n//1000000
rest = n%1000000
return number_to_str(million*1000000) + ' ' + number_to_str(rest)
return 'error'
def count_number(word):
rep = 0
for i in word:
if ord(i) >= ord('a') and ord(i) <= ord('z'):
rep+=1
return rep
def problem17():
rep = 0
for i in range(1,1001):
rep += count_number(number_to_str(i))
print(rep)
## Problem 15
def fact(n):
for i in range(1,n):
n*=i
return n
def binom(k,n): # implementation merdique
return fact(n)//(fact(k)*fact(n-k))
## Problem 18
M = [3,7,4,2,4,6,9,5,9,3]
def string_to_list(str):
L = []
word = ""
for i in str:
if ord(i)>=ord('0') and ord(i)<=ord('9'):
word += i
else:
if len(word)>0:
L+=[int(word)]
word = ""
if len(word)>0:
L+=[int(word)]
return L
str = "75 95 64 17 47 82 18 35 87 10 20 04 82 47 65 19 01 23 75 03 34 88 02 77 73 07 63 67 99 65 04 28 06 16 70 92 41 41 26 56 83 40 80 70 33 41 48 72 33 47 32 37 16 94 29 53 71 44 65 25 43 91 52 97 51 14 70 11 33 28 77 73 17 78 39 68 17 57 91 71 52 38 17 14 91 43 58 50 27 29 48 63 66 04 68 89 53 67 30 73 16 69 87 40 31 04 62 98 27 23 09 70 98 73 93 38 53 60 04 23"
def triangular(n):
return (n*(n+1))//2
def trouver_hauteur(x):
i = 0
while x>triangular(i):
i+=1
return i
def reduce_path(L):
h = trouver_hauteur(len(L))
newL = L[:len(L)-h]
a = triangular(h-2)
b = triangular(h-1)
for i in range(h-1):
newL[a+i] += max(L[b+i],L[b+i+1])
return newL
def find_max_path(L):
while len(L) > 1:
L = reduce_path(L)
return L[0]
## Problem 29
def fastpow(x,n):
if n == 0:
return 1
if n%2==0:
return pow(x,n//2)*pow(x,n//2)
else:
return pow(x,n//2)*pow(x,n//2)*x
def gen(a,b):
L = []
for i in range(2,a+1):
for j in range(2,b+1):
L += [fastpow(i,j),fastpow(j,i)]
L.sort()
return L
def singularise(L):
rep = []
for i in L:
if i not in rep:
rep += [i]
return rep
## Problem 69
def crible(n):
P = [0,0] + list(range(2,n))
for i in range(2,int(n**0.5)+1):
if P[i] != 0:
for j in range(2*i,n,i):
P[j] = 0
return [p for p in P if p!=0]
Prime = crible(1000000)
def PrimeFactor(n):
L = []
for p in Prime:
while n%p == 0:
L+=[p]
n//=p
return L
def f(n):
rep = 1
for p in PrimeFactor(n):
rep *= 1-(1/p)
return 1/rep
## Problem 26
import matplotlib.pyplot as plt
def fastpow(x,n):
if n == 0:
return 1
if n%2==0:
return pow(x,n//2)*pow(x,n//2)
else:
return pow(x,n//2)*pow(x,n//2)*x
def checkOccurence(n,L):
for i in range(len(L)):
if L[i]==n:
return i
# NON FONCTIONEL sauf pour les nombre premier
def lenCycle(n):
L = []
i = 0
while True:
p = int(fastpow(10,i)//n)
if p!=0:
if p%10 == 0:
return 0
if p%10 in L:
return i-checkOccurence(p%10,L)-1
L += [p%10]
i+=1
def problem26():
max,imax = 0,0
for i in range(1,1000):
p = lenCycle(i)
if p > max:
max,imax = p,i
return max,imax
def plot():
X = range(1,1000)
Y = [lenCycle(i) for i in X]
plt.plot(X,Y)
plt.grid(True)
plt.show()
## Problem 32
def D2B(n,B=10):
L = []
while n > 0:
L = [n%B] + L
n//=B
return L
def isPandigital(n,onlyOnce = True):
Appear = [False]*9
L = D2B(n)
if len(L)<9:
return False
for i in L:
if i != 0:
if onlyOnce and Appear[i-1]:
return False
Appear[i-1] = True
for j in Appear:
if not j:
return False
return True | [
"matplotlib"
] |
fefb767f3d08ce9b8da7696e2b46b937c53cf03b | Python | pyvista/pyvista-docs | /version/0.40/api/core/_autosummary/pyvista-Light-1.py | UTF-8 | 397 | 3.109375 | 3 | [] | no_license | # Create a light at (10, 10, 10) and set its diffuse color to red.
#
import pyvista as pv
light = pv.Light(position=(10, 10, 10))
light.diffuse_color = 1.0, 0.0, 0.0
#
# Create a positional light at (0, 0, 3) with a cone angle of
# 30, exponent of 20, and a visible actor.
#
light = pv.Light(
position=(0, 0, 3),
show_actor=True,
positional=True,
cone_angle=30,
exponent=20,
)
| [
"pyvista"
] |
4beb3a281a7ee741259b6c1c76b878acbee557f2 | Python | jatinkumar762/IIT-Gandhinagar | /Operating System/Module-3/PageReplacement.py | UTF-8 | 10,162 | 3.1875 | 3 | [] | no_license | import random
import matplotlib.pyplot as plt
class PageReplacement:
def __init__(self):
self.frame_seq = [] #Frames which are referenced by program
self.frame_num = 0 #Total No. of Frames
self.Page_size = 0 #No of pages in memoy
self.Page_mem = [] #Currently residing frames in pages of memory
self.hit = 0 #No. of hits
self.miss = 0 #No. of miss
self.total_ref = 0 #No of times memory referenced
def getFrames(self): #Initialize list of reference frames
self.frame_num=int(random.random()*100)
for i in range(self.frame_num):
self.frame_seq.append(int(random.random()*100))
print("Reference String ",self.frame_seq)
def reint_data(self): #Reinitialize data
self.hit = 0
self.miss = 0
self.total_ref = 0
self.Page_mem = []
#Least Recently Used
def LRU(self):
self.reint_data()
for frame in self.frame_seq:
if frame in self.Page_mem: #frame already present in memory
index = self.Page_mem.index(frame)
del self.Page_mem[index] #delete from current place
self.Page_mem.insert(0, frame) #reagain insert at begining
self.total_ref += 1
self.hit += 1
elif len(self.Page_mem)<self.Page_size: #space available in memory
self.Page_mem.insert(0, frame)
self.total_ref += 1
self.miss += 1
elif len(self.Page_mem)>=self.Page_size: #No Space availabe in memory
del self.Page_mem[len(self.Page_mem)-1] #delete last page bcz least recently will be always in last
self.Page_mem.insert(0, frame) #insert new frame at the begining
self.total_ref += 1
self.miss += 1
#First-In First-Out
def FIFO(self):
self.reint_data()
for frame in self.frame_seq:
if frame in self.Page_mem: #frame aleady present in memory
self.total_ref += 1
self.hit += 1
elif len(self.Page_mem)<self.Page_size: #space avaiable
self.Page_mem.insert(0, frame) #insert at begining
self.total_ref += 1
self.miss += 1
else:
del self.Page_mem[len(self.Page_mem)-1] #delete last page
self.Page_mem.insert(0, frame) #insert at begining
self.total_ref += 1
self.miss += 1
#Least Frequenty Used
def LFU(self): #Use 2-d Array Concept
self.reint_data() #for each page corresponding frequency also there
for frame in self.frame_seq:
flag = 0
for page in self.Page_mem:
if page[0]== frame: #frame already present in memory
self.Page_mem[self.Page_mem.index(page)][1]+= 1 #increase it's freq. count
flag = 1
self.total_ref += 1
self.hit += 1
break
if flag == 0:
if len(self.Page_mem) < self.Page_size: #space availabel
self.Page_mem.insert(0,[frame,1]) #insert at the begining
self.total_ref += 1
self.miss += 1
else:
minCount = [0,10**5]
for page in self.Page_mem: #find page with min frequency
if minCount[1] > page[1]:
minCount = page
del self.Page_mem[self.Page_mem.index(minCount)]
self.Page_mem.insert(0,[frame,1])
self.total_ref += 1
self.miss += 1
#Random Page Replacement
def Random(self):
self.reint_data()
for frame in self.frame_seq: #pick one by one frame from reference array
if frame in self.Page_mem: #if page already present in memory
self.total_ref += 1
self.hit += 1
elif len(self.Page_mem)<self.Page_size: #if space available in memory
self.Page_mem.insert(0, frame)
self.total_ref += 1
self.miss += 1
else: #if memory already full
del self.Page_mem[(int(random.random()*100))%self.Page_size] #select any random page location in memory
self.Page_mem.insert(0, frame)
self.total_ref += 1
self.miss += 1
#Oracle - Predict Next Replacement Page
def Oracle(self):
self.reint_data()
for i,frame in enumerate(self.frame_seq): #frame with it's position
if frame in self.Page_mem: #check if page already prsent in memory
self.total_ref += 1
self.hit += 1
elif len(self.Page_mem)<self.Page_size: #if space available in memory
self.Page_mem.insert(0, frame)
self.total_ref += 1
self.miss += 1
else:
far = 0 #if memory already full
del_page = 0
for page in self.Page_mem: #choose farthest page in memory according to next access position in frame reference
try: #ValueError Possible
if far < (self.frame_seq[i:]).index(page):
far = (self.frame_seq[i:]).index(page)
del_page = page
except:
del_page = page
break
del self.Page_mem[self.Page_mem.index(del_page)] #delete selected location page from memory
self.Page_mem.insert(0, frame) #insert new page in memory
self.total_ref += 1
self.miss += 1
#Approx Least Recenty Used Algorithm
#Theory concept - https://www.geeksforgeeks.org/lru-approximation-second-chance-algorithm/
def ALRU(self):
self.reint_data()
loop = 0 #circular list counter initially starts from 0
flag = 0
del_page = 0 #page to be deleted fro memory
for frame in self.frame_seq:
flag = 0
for page in self.Page_mem: #page already present in memory
if page[0] == frame:
page[1] = 1 #set use/reference bit 1
self.hit += 1
self.total_ref += 1
flag = 1
break
if flag == 0:
if len(self.Page_mem)<self.Page_size:
self.Page_mem.insert(0, [frame,1]) #Newly inserted fresh page
self.total_ref += 1
self.miss += 1
else:
for index, page in enumerate(self.Page_mem,start=loop): #start from previous selected point
if page[1] == 0:
flag=1
del_page=page
if index+1 < self.Page_size:
loop= index+1
else:
loop = 0
break
else:
page[1]=0
if flag==0: #still not found page then again start from begining
for index, page in enumerate(self.Page_mem):
if page[1] == 0:
flag=1
del_page=page
if index+1 < self.Page_size:
loop= index+1
else:
loop=0
break
else:
page[1]=0
del self.Page_mem[self.Page_mem.index(del_page)]
self.Page_mem.insert(0, [frame,1])
self.total_ref += 1
self.miss += 1
if __name__=="__main__":
memory = PageReplacement()
page_list = list(range(1,101))
memory.getFrames()
lru_hit = []
fifo_hit = []
lfu_hit = []
rndom_hit = []
oracle_hit = []
alru_hit = []
rate = 0;
for i in range(1,101):
memory.Page_size = i
memory.LRU()
rate=((memory.hit*100)/memory.total_ref)
lru_hit.append(rate)
memory.FIFO()
rate=((memory.hit*100)/memory.total_ref)
fifo_hit.append(rate)
memory.LFU()
rate=((memory.hit*100)/memory.total_ref)
lfu_hit.append(rate)
memory.Random()
rate=((memory.hit*100)/memory.total_ref)
rndom_hit.append(rate)
memory.Oracle()
rate=((memory.hit*100)/memory.total_ref)
oracle_hit.append(rate)
memory.ALRU()
rate = ((memory.hit*100)/memory.total_ref)
alru_hit.append(rate)
plt.xlabel('Page Size (Blocks)')
plt.ylabel('Page Hit Rate (%)')
plt.plot(page_list,lru_hit,c='r',label='LRU')
plt.plot(page_list,fifo_hit,c='g',label='FIFO')
plt.plot(page_list,lfu_hit,c='k',label='LFU')
plt.plot(page_list,rndom_hit,c='b',label='Random')
plt.plot(page_list,oracle_hit,c='y',label='Oracle')
plt.plot(page_list,alru_hit,c='m',label='ALRU')
plt.legend()
plt.show() | [
"matplotlib"
] |
05dc655c4253ac576b326a80ada32eb619b80f55 | Python | thaije/Natural-Computing | /week6/Tjalling/adrem-v2/src/deep_features_keras/dataset.py | UTF-8 | 3,166 | 2.59375 | 3 | [] | no_license | # The datasets
import matplotlib.pyplot as plt
import urllib.request
import io
import glob
import numpy as np
import scipy.io
import scipy.ndimage
import re
from os.path import expanduser
def load_image(filename):
return scipy.ndimage.imread(filename, mode='RGB')
def load_image_url(url):
ext = url.split('.')[-1]
return plt.imread(io.BytesIO(urllib.request.urlopen(url).read()), ext)
def load_data(dataset):
out = dict()
if dataset=='office-31':
for domain in ['amazon','dslr','webcam']:
out[domain] = load_office31_domain(domain)
elif dataset=='office-caltech':
return load_office_caltech_domain(domain)
for domain in ['amazon','Caltech','dslr','webcam']:
out[domain] = load_office_caltech_domain(domain)
return out
def load_data(dataset, domain):
if dataset=='office-31':
return load_office31_domain(domain)
elif dataset=='office-caltech':
return load_office_caltech_domain(domain)
else:
raise Exception("Unknown dataset")
def dataset_domains(dataset):
if dataset=='office-31':
return ['amazon','dslr','webcam']
elif dataset=='office-caltech':
return ['amazon','Caltech','dslr','webcam']
else:
raise Exception("Unknown dataset")
def load_office31_domain(domain):
dirs = sorted(glob.glob(expanduser('~/data/office31/{}/images/*').format(domain)))
x = []
y = []
for i,dir in enumerate(dirs):
for file in sorted(glob.glob(dir+'/*.jpg')):
x.append(load_image(file))
y.append(i)
if len(x) == 0:
raise Exception("No images found")
return x,y
def load_office_caltech_domain(domain):
# Load matlab files
mat_suffix = 'Caltech10' if domain == 'Caltech' else domain
# labels
surf_file = '../../data/office10/{}_SURF_L10.mat'.format(mat_suffix)
y = scipy.io.loadmat(surf_file)['labels'] # 1..10
y = y[:,0] - 1
# caltech uses different category names
caltech_cat_names = {'003':'backpack', '041':'coffee-mug', '045':'computer-keyboard',
'046':'computer-monitor', '047':'computer-mouse', '101':'head-phones',
'127':'laptop-101', '224':'touring-bike', '238':'video-projector'}
# images
index_file = '../../data/office10/{}_SURF_L10_imgs.mat'.format(mat_suffix)
img_names = scipy.io.loadmat(index_file)['imgNames'][:,0]
x = []
for img_name in img_names:
img_name = img_name[0]
# map names:
if domain == 'Caltech':
# example: Caltech256_projector_238_0089
# --> data/caltech256/256_ObjectCategories/238.video-projector/238_0089.jpg
cat_name, cat_id, img_id = re.match(r'Caltech256_(.*)_([^_]*)_([^_]*)$', img_name).groups()
if cat_id in caltech_cat_names:
cat_name = caltech_cat_names[cat_id]
file = '~/data/caltech256/256_ObjectCategories/{}.{}/{}_{}.jpg'.format(cat_id, cat_name, cat_id, img_id)
else:
# example: amazon_projector_frame_0076 --> data/office31/amazon/projector/frame_0076.jpg
dom_name, cat_name, img_id = re.match(r'([^_]*)_(.*)_(frame_[^_]*)', img_name).groups()
file = '~/data/office31/{}/images/{}/{}.jpg'.format(domain, cat_name, img_id)
x.append(load_image(expanduser(file)))
return x,y
| [
"matplotlib"
] |
7732c62c5bd782b17453935c9c78a7def10e4d08 | Python | MartinBCN/FlowerClassification | /flower_classification/flower_inference.py | UTF-8 | 4,810 | 3 | 3 | [] | no_license | from typing import Dict
from PIL import Image
from matplotlib.figure import Figure
from torch import Tensor
import torch.nn as nn
from torchvision.transforms import transforms
import torch
import numpy as np
import matplotlib.pyplot as plt
from flower_classification.flower_training import FlowerTrainer
class FlowerInference(FlowerTrainer):
"""
Inference for Flower Classification Model
"""
transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
def __init__(self, model_type: str, num_classes: int):
super(FlowerInference, self).__init__(model_type=model_type, num_classes=num_classes)
self.label_dictionary = None
def set_label_dictionary(self, label_dictionary: Dict[int, str]) -> None:
"""
Add a label dictionary
Parameters
----------
label_dictionary: Dict[int: str]
Returns
-------
None
"""
self.label_dictionary = label_dictionary
def image_to_label(self, image: Image) -> str:
"""
Calculate the most likely label for a Pillow image
Parameters
----------
image: Image
Returns
-------
str
Name of the classified flower
"""
image = self.transform(image)
return self.tensor_to_label(image)
def tensor_to_label(self, image: Tensor) -> str:
"""
Calculate the most likely label for a Tensor
Parameters
----------
image: Tensor
Returns
-------
str
Name of the classified flower
"""
self.model.eval()
image = image.to(self.device)
image = image.unsqueeze(0)
outputs = self.model(image)
batch_predicted_labels = outputs.detach().cpu().numpy()
batch_predicted_labels = np.argmax(batch_predicted_labels, axis=1)
return batch_predicted_labels[0]
def image_to_probability(self, image: Image, num_results: int = 5) -> Dict[str, float]:
"""
Calculate the probabilities for the different labels and return the five most likely ones
Parameters
----------
image: Image
num_results: int, default = 5
Returns
-------
Dict[str, float]
Dictionary of the form {name: probability}
"""
image = self.transform(image)
return self.tensor_to_probability(image, num_results)
def tensor_to_probability(self, image: Tensor, num_results: int = 5) -> Dict[str, float]:
"""
Calculate the probabilities for the different labels and return the five most likely ones
Parameters
----------
image: Tensor
num_results: int, default = 5
Returns
-------
Dict[str, float]
Dictionary of the form {name: probability}
"""
self.model.eval()
image = image.to(self.device)
image = image.unsqueeze(0)
prediction = self.model(image)
softmax = nn.Softmax(dim=1)
prediction = softmax(prediction)
top_predictions = torch.topk(prediction, num_results)
top_indices = top_predictions.indices.detach().cpu().numpy()[0]
top_values = top_predictions.values.detach().cpu().numpy()[0]
if self.label_dictionary is None:
prediction = {value: idx + 1 for idx, value in zip(top_indices, top_values)}
else:
prediction = {value: self.label_dictionary[f'{idx + 1}'] for idx, value in zip(top_indices, top_values)}
return prediction
def plot_topk(self, image: Image, true_label: int = None) -> Figure:
"""
Create a Figure of the image together with a visualisation of the most likely predictions
Parameters
----------
image: Image
true_label: int, default = None
Returns
-------
Figure
"""
predictions = self.image_to_probability(image)
fig, axes = plt.subplots(1, 2, figsize=(20, 10))
axes[0].imshow(image)
# Example data
names = tuple(predictions.values())
y_pos = np.arange(len(names))
probabilities = list(predictions.keys())
axes[1].barh(y_pos, probabilities, align='center')
axes[1].set_yticks(y_pos)
axes[1].set_yticklabels(names)
axes[1].invert_yaxis() # labels read top-to-bottom
axes[1].set_xlabel('Probability')
if (true_label is not None) and (self.label_dictionary is not None):
axes[0].set_title(f'True class: {self.label_dictionary[str(true_label)]}')
return fig
| [
"matplotlib"
] |
14da6dd74833a472937873fa01e210289e329d0d | Python | xiaoyiou/histone | /vis.py | UTF-8 | 1,929 | 2.84375 | 3 | [] | no_license | # This module is used to visualize the results from rule learning
# algorithms
import matplotlib.pyplot as plt
from sklearn.metrics import precision_recall_curve as pr
from sklearn.metrics import average_precision_score as apr
from sklearn import metrics
from ggplot import *
import seaborn as sns
import pandas as pd
def prCurve(y_test,y_score,ratios,classes):
"""
;param y_test
;param y_score
;param ratio
;param classes [] is the list of classes names
"""
N = len(y_test)
plt.figure()
for i in range(N):
y = y_test[i]
yy = y_score[i]
name = classes[i]
prec, recall,_ = pr(y,yy)
avpr = apr(y,yy)
plt.plot(recall,prec,label='%s: APR=%.2f'%(name,avpr))
plt.xlim([0,1])
plt.ylim([0,1.05])
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title("PR curve for multi-class")
plt.legend(loc='top right')
plt.show()
def roc(fprs,tprs,names):
"""
Plot roc curves with different names
using ggplot
"""
for i in range(len(names)):
auc = metrics.auc(fprs[i],tprs[i])
plt.plot(fprs[i], tprs[i], label=\
'%dth iter %s with AUC=%.2f'%(i,names[i],auc))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="upper left")
plt.show()
def visPtns(scores,binary,glst,target,step=100):
temp = scores.ix[glst].sort_values(by=target,ascending=False).index
data = pd.DataFrame(columns=binary.columns)
i = 0
N = temp.shape[0]
nData = binary.ix[temp]
while i<N:
x = nData.iloc[range(i,min(i+step,N))].mean()
data.ix[str(i)]=x
i+=step
plt.clf()
sns.heatmap(data)
| [
"matplotlib",
"seaborn"
] |
112a2f056c34e5a0de528fc7bf71de9a11ac1107 | Python | lichencan/pyhrf | /python/pyhrf/ndarray.py | UTF-8 | 75,647 | 2.6875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
This module provides classes and functions to handle multi-dimensionnal numpy
array (ndarray) objects and extend them with some semantics (axes labels and
axes domains). See xndarray class.
(TODO: make xndarray inherit numpy.ndarray?)
"""
import os.path as op
import numpy as np
import logging
from pprint import pformat
from pkg_resources import parse_version
import pyhrf
from pyhrf.tools import (treeBranches, rescale_values, has_ext, tree_items,
html_cell, html_list_to_row, html_row, html_table,
html_img, html_div)
from collections import OrderedDict
logger = logging.getLogger(__name__)
debug = False
MRI3Daxes = ['sagittal', 'coronal', 'axial']
MRI4Daxes = MRI3Daxes + ['time']
TIME_AXIS = 3
class ArrayMappingError(Exception):
pass
class xndarray:
""" Handles a multidimensional numpy array with axes that are labeled and mapped to domain values.
Examples
--------
>>> c = xndarray( [ [4,5,6],[8,10,12] ], ['time','position'], {'time':[0.1,0.2]} )
Will represent the following situation:
.. code::
position
------->
4 5 6 | t=0.1 |time
8 10 12 | t=0.2 v
"""
def __init__(self, narray, axes_names=None, axes_domains=None,
value_label="value", meta_data=None):
"""
Initialize a new xndarray object from 'narray' with 'axes_names'
as axes labels and 'axes_domains' as domains mapped to integer slices.
Args:
- narray (numpy.ndarray): the wrapped numpy array
- axes_names (listof str):
labels of array axes
if None: then axes_names = [\"dim0\", \"dim1\", ...]
- axes_domains (dictof <str>:<numpy array>):
domains associated to axes.
If a domain is not specified then it defaults to
range(size(axis))
Return: a xndarray instance
"""
logger.debug('xndarray.__init__ ...')
narray = np.asarray(narray)
self.data = narray
self.value_label = value_label
self.meta_data = meta_data
self.has_deprecated_xml_header = True
nbDims = self.data.ndim
if axes_names is None:
self.axes_names = ['dim' + str(i) for i in xrange(nbDims)]
else:
assert type(axes_names) == list
if len(axes_names) != nbDims:
raise Exception("length of axes_names (%d) is different "
"from nb of dimensions (%d).\n"
"Got axes names: %s"
% (len(axes_names), nbDims, str(axes_names)))
self.axes_names = axes_names[:]
self.axes_ids = dict([(self.axes_names[i], i) for i in xrange(nbDims)])
# By default: domain of axis = array of slice indexes
sh = self.data.shape
self.axes_domains = dict([(axis, np.arange(sh[i]))
for i, axis in enumerate(self.axes_names)])
if axes_domains is not None:
assert isinstance(axes_domains, dict)
for an, dom in axes_domains.iteritems():
if an not in self.axes_names:
raise Exception('Axis "%s" defined in domains not '
'found in axes (%s)'
% (an, ','.join(self.axes_names)))
ia = self.axes_names.index(an)
l = self.data.shape[ia]
if len(dom) != l:
raise Exception('Length of domain for axis "%s" (%d) '
'does not match length of data '
'axis %d (%d) ' % (an, len(dom), ia, l))
if len(set(dom)) != len(dom):
raise Exception('Domain of axis "%s" does not contain '
'unique values' % an)
axes_domains[an] = np.asarray(dom)
self.axes_domains.update(axes_domains)
logger.debug('Axes names: %s', str(self.axes_names))
logger.debug('Axes domains: %s', str(self.axes_domains))
@staticmethod
def xndarray_like(c, data=None):
"""
Return a new cuboid from data with axes, domains and value label
copied from 'c'. If 'data' is provided then set it as new cuboid's data,
else a zero array like c.data is used.
TODO: test
"""
if data is None:
data = np.zeros_like(c.data)
return xndarray(data, c.axes_names, c.axes_domains.copy(),
c.value_label, c.meta_data)
def get_axes_ids(self, axes_names):
""" Return the index of all axes in given axes_names
"""
assert set(axes_names).issubset(self.axes_names)
return [self.get_axis_id(an) for an in axes_names]
def len(self, axis):
return self.data.shape[self.get_axis_id(axis)]
def __repr__(self):
return 'axes: ' + str(self.axes_names) + ', ' + repr(self.data)
def get_axis_id(self, axis_name):
""" Return the id of an axis from the given name.
"""
logger.debug('core.cuboid ... getting id of %s', axis_name)
logger.debug('from : %s', str(self.axes_ids))
if isinstance(axis_name, str): # axis_name is a string axis name
if axis_name in self.axes_ids.keys():
return self.axes_ids[axis_name]
else:
return None
else: # axis_name is an integer axis index
if axis_name >= 0 and axis_name < self.get_ndims():
return axis_name
else:
return None
# get_axis_id
def set_axis_domain(self, axis_id, domain):
"""
Set the value domain mapped to *axis_id* as *domain*
Args:
- axis_id (str): label of the axis
- domain (numpy.ndarray): value domain
Return:
None
"""
assert axis_id in self.axes_domains
if axis_id is not None:
logger.debug('setting domain of axis %s with %s', str(axis_id),
str(domain))
if len(domain) != self.data.shape[axis_id]:
raise Exception('length of domain values (%d) does not '
' match length of data (%d) for axis %s'
% (len(domain), self.data.shape[axis_id],
self.get_axis_name(axis_id)))
self.axes_domains[axis_id] = np.array(domain)
# set_axis_domain
def get_domain(self, axis_id):
"""Return the domain of the axis `axis_id`
Examples
--------
>>> from pyhrf.ndarray import xndarray
>>> c = xndarray(np.random.randn(10,2), axes_names=['x','y'], \
axes_domains={'y' : ['plop','plip']})
>>> (c.get_domain('y') == np.array(['plop', 'plip'], dtype='|S4')).all()
True
>>> c.get_domain('x') #default domain made of slice indexes
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
if axis_id in self.axes_domains:
return self.axes_domains[axis_id]
else:
raise Exception('Unknow axis %s' % axis_id)
# get_domain
def swapaxes(self, a1, a2):
"""
Swap axes *a1* and *a2*
Args:
- a1 (str|int): identifier of the 1st axis
- a2 (str|int): identifier of the 2nd axis
Return:
A new cuboid wrapping a swapped view of the numpy array
"""
an = self.axes_names[:]
ia1, ia2 = self.get_axis_id(a1), self.get_axis_id(a2)
an[ia2], an[ia1] = an[ia1], an[ia2]
return xndarray(np.swapaxes(self.data, ia1, ia2), an, self.axes_domains,
self.value_label, self.meta_data)
def roll(self, axis, pos=-1):
"""
Roll xndarray by making 'axis' the last axis.
'pos' is either 0 or -1 (first or last, respectively)
TODO: handle all pos.
"""
i = self.get_axis_id(axis)
if (i == 0 and pos == 0) or (i == self.get_ndims() - 1 and pos == -1):
return self
if pos == 0:
raise NotImplementedError(' pos=0 not coded yet. TODO')
self.data = np.rollaxis(self.data, i, len(self.axes_names))
self.axes_names = self.axes_names[:i] + self.axes_names[i + 1:] + \
[axis]
self.axes_ids = dict([(a, i) for i, a in enumerate(self.axes_names)])
return self
def repeat(self, n, new_axis, domain=None):
"""
Return a new cuboid with self's data repeated 'n' times along a new
axis labelled 'new_axis'. Associated 'domain' can be provided.
"""
if new_axis is None:
new_axis = self.get_new_axis_name()
return stack_cuboids([self.copy() for i in xrange(n)], new_axis, domain)
def squeeze_all_but(self, axes):
to_squeeze = [a for i, a in enumerate(self.axes_names)
if self.data.shape[i] == 1 and a not in axes]
if len(to_squeeze) == 0:
return self
else:
return self.squeeze(axis=to_squeeze)
def squeeze(self, axis=None):
"""
Remove all dims which have length=1.
'axis' selects a subset of the single-dimensional axes.
"""
# print 'input axis:', axis
sh = self.data.shape
if axis is None:
axis = [a for i, a in enumerate(self.axes_names) if sh[i] == 1]
else:
assert self.has_axes(axis)
ssh = np.array([sh[self.get_axis_id(a)] for a in axis])
if (ssh != 1).all():
raise Exception('Subset axes to squeeze (%s) '
'are not all one-length: %s'
% (str(axis), str(ssh)))
axis_ids = tuple(self.get_axis_id(a) for a in axis)
# print 'axes to squeeze', axis
# print 'ids :', axis_ids
# select axes to keep:
axes_names = [a for a in self.axes_names if a not in axis]
axes_domains = dict((a, self.axes_domains[a]) for a in axes_names)
if parse_version(np.__version__) >= parse_version('1.7'):
data = self.data.squeeze(axis=axis_ids)
else:
sm = [':'] * len(sh)
for i in axis_ids:
sm[i] = '0'
# print 'sm:', sm
data = eval('self.data[%s]' % ','.join(sm))
return xndarray(data, axes_names, axes_domains,
self.value_label, self.meta_data)
def descrip(self):
""" Return a printable string describing the cuboid.
"""
s = ''
s += '* shape : %s\n' % str(self.data.shape)
s += '* dtype : %s\n' % str(self.data.dtype)
s += '* orientation: %s\n' % str(self.axes_names)
s += '* value label: %s\n' % self.value_label
s += '* axes domains:\n'
for dname in self.axes_names:
dvalues = self.axes_domains[dname]
if isinstance(dvalues, np.ndarray) and \
not np.issubdtype(dvalues.dtype, str) and \
not np.issubdtype(dvalues.dtype, unicode) and \
not dvalues.dtype == bool and \
not dvalues.dtype == np.bool_ and \
len(dvalues) > 1:
delta = np.diff(dvalues)
if (delta == delta[0]).all():
s += " '%s': " % dname + 'arange(%s,%s,%s)\n' \
% (str(dvalues[0]), str(dvalues[-1]), str(delta[0]))
else:
s += " '%s': " % dname + pformat(dvalues) + '\n'
else:
s += " '%s': " % dname + pformat(dvalues) + '\n'
return s.rstrip('\n')
def _html_table_headers(self, row_axes, col_axes):
"""
Build table row and column headers corresponding to axes in *row_axes* and
*col_axes* respectively. Headers comprises axis names and domain values.
Return:
tuple(list of str, list of str)
-> tuple(list of html code for the row header (without <tr> tags),
list of html code for the col header (with <tr> tags))
"""
dsh = self.get_dshape()
nb_blank_cols = len(row_axes) * 2 # nb of blank cols preprended to
# each line of the column header
nb_rows = int(np.prod([dsh[a] for a in row_axes]))
nb_cols = int(np.prod([dsh[a] for a in col_axes]))
# col header
if nb_blank_cols > 0:
blank_cells = ['']
blank_cells_attrs = [{'colspan': str(nb_blank_cols)}]
else:
blank_cells = []
blank_cells_attrs = []
col_header = []
nb_repets = 1
span = nb_cols
for a in col_axes:
dom = [str(v)
for v in self.get_domain(a)] # TODO: better dv format
span /= len(dom)
# row showing the axis label
col_header.append(html_list_to_row(blank_cells + [a], 'h',
blank_cells_attrs +
[{'colspan': nb_cols}]))
# row showing domain values
col_header.append(html_list_to_row(blank_cells + dom * nb_repets, 'h',
blank_cells_attrs +
[{'colspan': str(span)}] *
len(dom) * nb_repets))
nb_repets *= len(dom)
# row header
# initialization of all rows because row filling wont be sequential:
row_header = [[] for i in range(nb_rows)]
nb_repets = 1
span = nb_rows
for a in row_axes:
# 1st row contains all axis labels:
row_header[0].append(html_cell(html_div(a, {'class': 'rotate'}), 'h',
{'rowspan': nb_rows}))
# dispatch domain values across corresponding rows:
dom = [str(v)
for v in self.get_domain(a)] # TODO: better dv format
span /= len(dom)
for idv, dv in enumerate(dom * nb_repets):
row_header[
idv * span].append(html_cell(dv, 'h', {'rowspan': span}))
nb_repets *= len(dom)
return [''.join(r) for r in row_header], col_header
def to_html_table(self, row_axes, col_axes, inner_axes, cell_format='txt',
plot_dir=None, rel_plot_dir=None, plot_fig_prefix='xarray_',
plot_style='image', plot_args=None, tooltip=False,
border=None):
"""
Render the array as an html table whose column headers correspond
to domain values and axis names defined by *col_axes*, row headers
defined by *row_axes* and inner cell axes defined by *inner_axes*
Data within a cell can be render as text or as a plot figure (image
files are produced)
Args:
-
Return:
html code (str)
"""
import matplotlib.pyplot as plt
import pyhrf.plot as pplot
plot_dir = plot_dir or pyhrf.get_tmp_path()
outer_axes = row_axes + col_axes
plot_args = plot_args or {}
norm = plt.Normalize(self.min(), self.max())
def protect_html_fn(fn):
base, ext = op.splitext(fn)
return base.replace('.', '-') + ext
def format_cell(slice_info, cell_val):
attrs = {}
if tooltip:
stooltip = '|| '.join(['%s=%s' % (a, str(s))
for a, s in zip(outer_axes, slice_info)])
attrs['title'] = stooltip
if cell_format == 'txt':
return html_cell(str(cell_val), attrs=attrs)
elif cell_format == 'plot':
# Forge figure filename
suffix = '_'.join(['%s_%s' % (a, str(s))
for a, s in zip(outer_axes, slice_info)])
fig_fn = op.join(plot_dir, plot_fig_prefix + suffix + '.png')
fig_fn = protect_html_fn(fig_fn)
# Render figure
# TODO: expose these parameters
plt.figure(figsize=(4, 3), dpi=40)
if plot_style == 'image':
pplot.plot_cub_as_image(cell_val, norm=norm, **plot_args)
else:
pplot.plot_cub_as_curve(cell_val, **plot_args)
plt.savefig(fig_fn)
# Create html code
html_fig_fn = fig_fn
if rel_plot_dir is not None:
html_fig_fn = op.join(rel_plot_dir, op.basename(fig_fn))
return html_cell(html_img(html_fig_fn), attrs=attrs)
else:
raise Exception('Wrong plot_style "%s"' % plot_style)
logger.info('Generate html table headers ...')
row_header, col_header = self._html_table_headers(row_axes, col_axes)
logger.info('Convert xarray to tree ...')
cell_vals = tree_items(self.to_tree(row_axes + col_axes, inner_axes))
dsh = self.get_dshape()
nb_cols = int(np.prod([dsh[a] for a in col_axes]))
content = []
logger.info('Generate cells ...')
for i, r in enumerate(row_header):
# at each row, concatenate row header + table content
content += html_row(r + ''.join([format_cell(*cell_vals.next())
for c in range(nb_cols)]))
return html_table(''.join(col_header + content), border=border)
def _combine_domains(self, axes):
"""
Hierarchically combine domains of axes
"""
def stack_dvalues(prev, x):
if 0:
print 'stack_dvalues ...'
print 'prev:', prev
print 'x:', x
res = prev + [list(np.tile(x, len(prev) == 0 or len(prev[-1])))]
if 0:
print 'result:', res
print ''
return res
return reduce(stack_dvalues, [self.axes_domains[a] for a in axes], [])
def to_latex(self, row_axes=None, col_axes=None, inner_axes=None,
inner_separator=' | ', header_styles=None, hval_fmt=None,
val_fmt='%1.2f', col_align=None):
def multicol(n, s, align='c'):
return '\\multicolumn{%d}{%s}{%s}' % (n, align, s)
def multirow(n, s, position='*'):
return '\\multirow{%d}{%s}{%s}' % (n, position, s)
if hval_fmt is None:
hval_fmt = {}
for a in self.axes_names:
if np.issubdtype(self.axes_domains[a].dtype, float):
fmt = '%1.1f'
elif np.issubdtype(self.axes_domains[a].dtype, int):
fmt = '%d'
elif np.issubdtype(self.axes_domains[a].dtype, str):
fmt = '%s'
else:
fmt = val_fmt
hval_fmt[a] = fmt
if row_axes is None:
row_axes = []
if col_axes is None:
col_axes = []
if inner_axes is None:
inner_axes = []
if header_styles is None:
header_styles = {}
hstyles = dict((a, ['split']) for a in self.axes_names)
hstyles.update(header_styles)
logger.debug('hstyles: %s', str(hstyles))
if col_align is None:
col_align = {}
calign = dict((a, 'c') for a in self.axes_names)
calign.update(col_align)
def fmt_val(val):
if isinstance(val, str):
return val
else:
return val_fmt % val
def fmt_hval(val, axis):
if isinstance(val, str):
return val
else:
return hval_fmt[axis] % val
def fmt_hcell(axis, val):
style = hstyles[axis]
# print 'fmt_hcell for axis %s (style=%s), val=' %(axis,style), val
if ('split' in style) or ('hide_name' in style):
r = fmt_hval(val, axis)
elif 'join' in style:
r = a + "=" + fmt_hval(val, axis)
if 'vertical' in style:
return r'\rotatebox{90}{{%s}}' % r
else:
return r
table_line_end = '\\\\\n'
c_to_print = self.reorient(row_axes + col_axes + inner_axes)
dsh = c_to_print.get_dshape()
data = c_to_print.data.reshape(int(np.prod([dsh[a] for a in row_axes])),
int(np.prod([dsh[a]
for a in col_axes])),
int(np.prod([dsh[a] for a in inner_axes])))
#data = np.atleast_2d(self.unstack(row_axes + col_axes, inner_axes).data)
nb_rows, nb_cols = data.shape[:2]
doms_for_col_header = self._combine_domains(col_axes)
# print 'doms_for_col_header:'
# print doms_for_col_header
if len(row_axes) > 0:
doms_for_row_header = self._combine_domains(row_axes)
# print 'doms_for_row_header:'
# print doms_for_row_header
assert len(doms_for_row_header[-1]) == nb_rows
row_header = [[] for i in range(nb_rows)]
for a, d in zip(row_axes, doms_for_row_header):
# print 'a:', a
# print 'd:', d
if 'split' in hstyles[a]:
row_header[0].append(multirow(nb_rows, a))
for i in range(1, nb_rows):
row_header[i].append('')
len_d = len(d)
# print 'row_header @start:'
# print row_header
# print ''
# print 'loop over lines ...'
j = 0
for i in range(nb_rows):
# print 'i=', i
if i % (nb_rows / len_d) == 0:
if nb_rows / len_d > 1:
row_header[i].append(multirow(nb_rows / len_d,
fmt_hcell(a, d[j])))
else:
row_header[i].append(fmt_hcell(a, d[j]))
j += 1
else:
row_header[i].extend([''])
# print 'row_header:'
# print row_header
# print ''
for irow, row in enumerate(row_header):
# print 'row:', row
row_header[irow] = ' & '.join(row) + ' & '
else:
row_header = [''] * nb_rows
assert len(doms_for_col_header[-1]) == nb_cols
hprefix = ''.join([['&', '&&']['split' in hstyles[a]]
for a in row_axes])
header = ''
for a, d in zip(col_axes[:-1], doms_for_col_header[:-1]):
# print 'format header for axis', a
if 'split' in hstyles[a]:
header += hprefix + multicol(nb_cols, a) + table_line_end
header += hprefix + ' & '.join([multicol(nb_cols / len(d),
fmt_hcell(a, e))
for e in d]) + table_line_end
a, d = col_axes[-1], doms_for_col_header[-1]
if 'split' in hstyles[col_axes[-1]]:
header += hprefix + \
multicol(nb_cols, col_axes[-1]) + table_line_end
header += hprefix + ' & '.join([fmt_hcell(a, e) for e in d]) + \
table_line_end
# nb_cols = len(self.axes_domains.get(col_axes[-1], '1'))
all_col_align = [calign[col_axes[-1]]] * nb_cols
for a, d in zip(col_axes[:-1], doms_for_col_header[:-1]):
len_d = len(d)
# print 'a:', a
# print 'd:', d
# print 'range(nb_cols/len_d, nb_cols, len_d):', \
# range(nb_cols/len_d-1, nb_cols, nb_cols/len_d-1)
for i in range(nb_cols / len_d - 1, nb_cols, nb_cols / len_d):
all_col_align[i] = calign[a]
table_align = ' '.join([['c', 'c c']['split' in hstyles[a]]
for a in row_axes] +
['|'] * (len(row_axes) > 0) +
all_col_align)
s = '\\begin{tabular}{%s}\n' % table_align
s += header
def fmt_cell(c):
if isinstance(c, xndarray):
return inner_separator.join(map(fmt_val, c.data))
elif isinstance(c, np.ndarray):
return inner_separator.join(map(fmt_val, c))
else: # scalar
return fmt_val(c)
s += table_line_end.join([lh + " & ".join(map(fmt_cell, l))
for lh, l in zip(row_header, data)]) + \
table_line_end
s += '\\end{tabular}'
return s
def get_orientation(self):
return self.axes_names
def set_MRI_orientation(self):
""" Set orientation to sagittal,coronal,axial,[time|iteration|condition]
Priority for the 4th axis: time > condition > iteration.
The remaining axes are sorted in alphatical order
"""
if self.has_axes(MRI3Daxes):
orientation = MRI3Daxes[:]
if self.has_axis('time'):
orientation += ['time']
if self.has_axis('iteration'):
orientation += ['iteration']
if self.has_axis('condition'):
orientation += ['condition']
orientation += sorted(set(self.axes_names).difference(orientation))
self.set_orientation(orientation)
def set_orientation(self, axes):
""" Set the cuboid orientation (inplace) according to input axes labels
"""
if debug:
logger.debug('set_orientation ...')
logger.debug('%s -> %s', str(self.axes_names), str(axes))
if set(axes) != set(self.axes_names):
raise Exception('Required orientation %s does not contain '
'all axes %s' % (str(axes), str(self.axes_names)))
if axes == self.axes_names: # already in the asked orientation
return
for i, axis in enumerate(axes):
logger.debug('Rolling axis %s, cur pos=%d -> dest pos=%d',
axis, self.axes_names.index(axis), i)
logger.debug('Shape: %s', str(self.data.shape))
cur_i = self.axes_names.index(axis)
self.data = np.rollaxis(self.data, cur_i, i)
self.axes_names.pop(cur_i)
self.axes_names.insert(i, axis)
logger.debug('After rolling. Shape: %s, new axes: %s',
str(self.data.shape), str(self.axes_names))
logger.debug('')
self.axes_ids = dict([(a, i) for i, a in enumerate(self.axes_names)])
def reorient(self, orientation):
""" Return a cuboid with new orientation.
If cuboid is already in the right orientation, then return the current
cuboid. Else, create a new one.
"""
if orientation == self.axes_names:
return self
else:
new_c = self.copy()
new_c.set_orientation(orientation)
return new_c
def cexpand(self, cmask, axis, dest=None):
""" Same as expand but mask is a cuboid
TODO: + unit test
"""
return self.expand(cmask.data, axis, cmask.axes_names,
cmask.axes_domains, dest=dest)
def expand(self, mask, axis, target_axes=None, target_domains=None, dest=None, do_checks=True, m=None):
""" Create a new xndarray instance (or store into an existing `dest` cuboid) where `axis` is expanded and
values are mapped according to `mask`.
- `target_axes` is a list of the names of the new axes replacing `axis`.
- `target_domains` is a dict of domains for the new axes.
Examples
--------
>>> import numpy as np
>>> from pyhrf.ndarray import xndarray
>>> c_flat = xndarray(np.arange(2*6).reshape(2,6).astype(np.int64), \
['condition', 'voxel'], \
{'condition' : ['audio','video']})
>>> print c_flat.descrip() # doctest: +NORMALIZE_WHITESPACE
* shape : (2, 6)
* dtype : int64
* orientation: ['condition', 'voxel']
* value label: value
* axes domains:
'condition': array(['audio', 'video'],
dtype='|S5')
'voxel': arange(0,5,1)
>>> mask = np.zeros((4,4,4), dtype=int)
>>> mask[:3,:2,0] = 1
>>> c_expanded = c_flat.expand(mask, 'voxel', ['x','y','z'])
>>> print c_expanded.descrip() # doctest: +NORMALIZE_WHITESPACE
* shape : (2, 4, 4, 4)
* dtype : int64
* orientation: ['condition', 'x', 'y', 'z']
* value label: value
* axes domains:
'condition': array(['audio', 'video'],
dtype='|S5')
'x': arange(0,3,1)
'y': arange(0,3,1)
'z': arange(0,3,1)
"""
logger.debug('expand ... mask: %s -> region size=%d, '
'axis: %s, target_axes: %s, target_domains: %s',
str(mask.shape), mask.sum(dtype=int), axis,
str(target_axes), str(target_domains))
if do_checks:
if not ((mask.min() == 0 and mask.max() == 1) or
(mask.min() == 1 and mask.max() == 1) or
(mask.min() == 0 and mask.max() == 0)):
raise Exception("Input mask is not binary (%s)"
% str(np.unique(mask)))
if axis not in self.axes_names:
raise Exception('Axes %s not found in cuboid\'s axes.' % axis)
if target_axes is None:
target_axes = []
for i in xrange(mask.ndim):
d = 0
while 'dim%d' % d in self.axes_names + target_axes:
d += 1
target_axes.append('dim%d' % d)
else:
target_axes = target_axes
logger.debug('target_axes: %s', str(target_axes))
if do_checks and len(target_axes) != 1 and \
len(set(target_axes).intersection(self.axes_names)) != 0:
# if len(target_axes) == 1 & target_axes[0] already in current axes
# -> OK, axis is mapped to itself.
raise Exception('Error while expanding xndarray, intersection btwn'
' targer axes (%s) and current axes (%s) is '
'not empty.'
% (str(target_axes), str(self.axes_names)))
assert len(target_axes) == mask.ndim
if target_domains is None:
target_domains = dict([(a, range(mask.shape[i]))
for i, a in enumerate(target_axes)])
assert set(target_domains.keys()).issubset(target_axes)
# target_domains = [target_domains.get(a,range(mask.shape[i])) \
# for i,a in enumerate(target_axes)]
logger.debug('target_domains: %s', str(target_domains))
assert len(target_domains) == len(target_axes)
flat_axis_idx = self.get_axis_id(axis)
if dest is not None:
dest_data = dest.data
else:
dest_data = None
new_data = expand_array_in_mask(self.data, mask,
flat_axis=flat_axis_idx, dest=dest_data,
m=m)
new_axes = self.axes_names[:flat_axis_idx] + target_axes + \
self.axes_names[flat_axis_idx + 1:]
new_domains = self.axes_domains.copy()
new_domains.pop(axis)
new_domains.update(target_domains)
# new_domains = dict([(new_axes[i], new_domains[i]) \
# for i in xrange(len(new_axes))])
return xndarray(new_data, new_axes, new_domains, self.value_label,
meta_data=self.meta_data)
def map_onto(self, xmapping):
"""Reshape the array by mapping the axis corresponding to xmapping.value_label onto the shape of xmapping.
Parameters
----------
xmapping : xndarray
array whose attribute value_label matches an axis of the current array
Returns
-------
a new array (xndarray) where values from the current array have been mapped according to xmapping
Examples
--------
>>> from pyhrf.ndarray import xndarray
>>> import numpy as np
>>> # data with a region axis:
>>> data = xndarray(np.arange(2*4).reshape(2,4).T * .1, \
['time', 'region'], \
{'time':np.arange(4)*.5, 'region':[2, 6]})
>>> data
axes: ['time', 'region'], array([[ 0. , 0.4],
[ 0.1, 0.5],
[ 0.2, 0.6],
[ 0.3, 0.7]])
>>> # 2D spatial mask of regions:
>>> region_map = xndarray(np.array([[2,2,2,6], [6,6,6,0], [6,6,0,0]]), \
['x','y'], value_label='region')
>>> # expand region-specific data into region mask
>>> # (duplicate values)
>>> data.map_onto(region_map)
axes: ['x', 'y', 'time'], array([[[ 0. , 0.1, 0.2, 0.3],
[ 0. , 0.1, 0.2, 0.3],
[ 0. , 0.1, 0.2, 0.3],
[ 0.4, 0.5, 0.6, 0.7]],
<BLANKLINE>
[[ 0.4, 0.5, 0.6, 0.7],
[ 0.4, 0.5, 0.6, 0.7],
[ 0.4, 0.5, 0.6, 0.7],
[ 0. , 0. , 0. , 0. ]],
<BLANKLINE>
[[ 0.4, 0.5, 0.6, 0.7],
[ 0.4, 0.5, 0.6, 0.7],
[ 0. , 0. , 0. , 0. ],
[ 0. , 0. , 0. , 0. ]]])
"""
mapped_axis = xmapping.value_label
if not self.has_axis(mapped_axis):
raise ArrayMappingError('Value label "%s" of xmapping not found '
'in array axes (%s)'
% (mapped_axis,
', '.join(self.axes_names)))
if not set(xmapping.data.flat).issuperset(self.get_domain(mapped_axis)):
raise ArrayMappingError('Domain of axis "%s" to be mapped is not a '
'subset of values in the mapping array.'
% mapped_axis)
dest = None
for mval in self.get_domain(mapped_axis):
sub_a = self.sub_cuboid(**{mapped_axis: mval})
sub_mapping = self.xndarray_like(
xmapping, data=xmapping.data == mval)
rsub_a = sub_a.repeat(sub_mapping.sum(), '__mapped_axis__')
dest = rsub_a.cexpand(sub_mapping, '__mapped_axis__', dest=dest)
return dest
def flatten(self, mask, axes, new_axis):
""" flatten cudoid.
TODO: +unit test
"""
if not set(axes).issubset(self.axes_names):
raise Exception('Axes to flat (%s) are not a subset of '
'current axes (%s)'
% (str(axes), str(self.axes_names)))
m = np.where(mask)
# print 'mask:', mask.sum()
# print 'flat_data:', flat_data.shape
sm = [':'] * self.data.ndim
for i, a in enumerate(axes):
j = self.get_axis_id(a)
sm[j] = 'm[%d]' % i
new_axes = [a for a in self.axes_names if a not in axes]
new_axes.insert(self.get_axis_id(axes[0]), new_axis)
flat_data = eval('self.data[%s]' % ','.join(sm))
new_domains = dict((a, self.axes_domains[a])
for a in new_axes if a != new_axis)
return xndarray(flat_data, new_axes, new_domains, self.value_label,
self.meta_data)
def explode_a(self, mask, axes, new_axis):
"""
Explode array according to given n-ary *mask* so that *axes* are flatten
into *new_axis*.
Args:
- mask (numpy.ndarray[int]): n-ary mask that defines "regions" used
to split data
- axes (list of str): list of axes in the current object that are
mapped onto the mask
- new_axis (str): target flat axis
Return:
dict of xndarray that maps a mask value to a xndarray.
"""
return dict((i, self.flatten(mask == i, axes, new_axis))
for i in np.unique(mask))
def explode(self, cmask, new_axis='position'):
"""
Explode array according to the given n-ary *mask* so that axes matchin
those of *mask* are flatten into *new_axis*.
Args:
- mask (xndarray[int]): n-ary mask that defines "regions" used
to split data
- new_axis (str): target flat axis
Return:
dict of xndarray that maps a mask value to a xndarray.
"""
return dict((i, self.flatten(cmask.data == i, cmask.axes_names, new_axis))
for i in np.unique(cmask.data))
def cflatten(self, cmask, new_axis):
return self.flatten(cmask.data, cmask.axes_names, new_axis)
def split(self, axis):
""" Split a cuboid along given axis.
Return an OrderedDict of cuboids.
"""
if axis not in self.axes_names:
raise Exception('Axis %s not found. Available axes: %s'
% (axis, self.axes_names))
return OrderedDict((dv, self.sub_cuboid(**{axis: dv}))
for dv in self.axes_domains[axis])
def unstack(self, outer_axes, inner_axes):
"""
Unstack the array along outer_axes and produce a xndarray of xndarrays
Args:
- outer_axes (list of str): list of axis names defining the target
unstacked xndarray
- inner_axes (list of str): list of axis names of any given sub-array
of the target unstacked xndarray
Return:
xndarray object
Example:
>>> from pyhrf.ndarray import xndarray
>>> import numpy as np
>>> c = xndarray(np.arange(4).reshape(2,2), axes_names=['a1','ia'], \
axes_domains={'a1':['out_dv1', 'out_dv2'], \
'ia':['in_dv1', 'in_dv2']})
>>> c.unstack(['a1'], ['ia'])
axes: ['a1'], array([axes: ['ia'], array([0, 1]), axes: ['ia'], array([2, 3])], dtype=object)
"""
def _unstack(xa, ans):
if len(ans) > 0:
return [_unstack(suba, ans[1:])
for suba in xa.split(ans[0]).itervalues()]
else:
return xa
xarray = self.reorient(outer_axes + inner_axes)
return xndarray(_unstack(xarray, outer_axes), axes_names=outer_axes,
axes_domains=dict((a, self.axes_domains[a])
for a in outer_axes))
def to_tree(self, level_axes, leaf_axes):
"""
Convert nested dictionary mapping where each key is a domain value
and each leaf is an array or a scalar value if *leaf_axes* is empty.
Return:
OrderedDict such as:
{dv_axis1 : {dv_axis2 : {... : xndarray|scalar_type}
Example:
>>> from pyhrf.ndarray import xndarray
>>> import numpy as np
>>> c = xndarray(np.arange(4).reshape(2,2), axes_names=['a1','ia'], \
axes_domains={'a1':['out_dv1', 'out_dv2'], \
'ia':['in_dv1', 'in_dv2']})
>>> c.to_tree(['a1'], ['ia'])
OrderedDict([('out_dv1', axes: ['ia'], array([0, 1])), ('out_dv2', axes: ['ia'], array([2, 3]))])
"""
def _to_tree(xa, ans):
if len(ans) != len(leaf_axes):
return OrderedDict((dv, _to_tree(suba, ans[1:]))
for dv, suba in xa.split(ans[0]).iteritems())
else:
return xa
xarray = self.reorient(level_axes + leaf_axes)
return _to_tree(xarray, xarray.axes_names)
def _format_dvalues(self, axis):
if (np.diff(self.axes_domains[axis]) == 1).all():
ndigits = len(str(self.axes_domains[axis].max()))
return [str(d).zfill(ndigits) for d in self.axes_domains[axis]]
else:
return self.axes_domains[axis]
def get_ndims(self):
return self.data.ndim
def get_axes_domains(self):
""" Return domains associated to axes as a dict (axis_name:domain array)
"""
return self.axes_domains
def get_axis_name(self, axis_id):
""" Return the name of an axis from the given index 'axis_id'.
"""
if isinstance(axis_id, str):
if axis_id in self.axes_names:
return axis_id
else:
return None
assert np.isreal(axis_id) and np.round(axis_id) == axis_id
if axis_id >= 0 and axis_id < self.get_ndims():
return self.axes_names[axis_id]
else:
return None
def sub_cuboid(self, orientation=None, **kwargs):
""" Return a sub cuboid. 'kwargs' allows argument in the form:
axis=slice_value.
"""
if not set(kwargs.keys()).issubset(self.axes_names):
raise Exception('Axes to slice (%s) mismatch current axes (%s)'
% (','.join(kwargs.keys()),
','.join(self.axes_names)))
if orientation is not None:
assert set(orientation) == set(self.axes_names).difference(kwargs)
new_kwargs = {}
for axis, i in kwargs.iteritems():
new_kwargs[axis] = self.get_domain_idx(axis, i)
return self.sub_cuboid_from_slices(orientation, **new_kwargs)
def sub_cuboid_from_slices(self, orientation=None, **kwargs):
""" Return a sub cuboid. 'kwargs' allows argument in the form:
axis=slice_index.
"""
mask = [':'] * self.data.ndim
for axis, i in kwargs.iteritems():
mask[self.axes_names.index(axis)] = str(i)
# print 'mask:', mask
sub_data = eval('self.data[%s]' % ','.join(mask))
sub_domains = self.axes_domains.copy()
for a in kwargs:
sub_domains.pop(a)
sub_axes = [a for a in self.axes_names if a not in kwargs]
if orientation is None:
orientation = sub_axes
assert set(orientation) == set(self.axes_names).difference(kwargs)
if self.meta_data is not None:
meta_data = (self.meta_data[0].copy(), self.meta_data[1].copy())
else:
meta_data = None
if np.isscalar(sub_data):
return sub_data
sub_c = xndarray(sub_data, sub_axes, sub_domains, self.value_label,
meta_data)
# set orientation
sub_c.set_orientation(orientation)
return sub_c
def fill(self, c):
"""
"""
sm = []
for a in self.axes_names:
if c.has_axis(a):
sm.append(':')
else:
sm.append('np.newaxis')
self.data[:] = eval('c.data[%s]' % ','.join(sm))
return self
def copy(self, copy_meta_data=False):
""" Return copy of the current cuboid. Domains are copied with a shallow
dictionnary copy.
"""
if self.meta_data is not None:
if copy_meta_data:
new_meta_data = (self.meta_data[0].copy(),
self.meta_data[1].copy())
else:
new_meta_data = self.meta_data
else:
new_meta_data = None
return xndarray(self.data.copy(), self.axes_names[:],
self.axes_domains.copy(),
self.value_label, new_meta_data)
def get_new_axis_name(self):
""" Return an axis label not already in use. Format is: dim%d
"""
i = 0
while 'dim%d' % i in self.axes_names:
i += 1
return 'dim%d' % i
def get_domain_idx(self, axis, value):
""" Get slice index from domain value for axis 'axis'.
"""
if debug:
print 'get_domain_idx ... axis=%s, value=%s' % (axis, str(value))
print 'axes_domains:', self.axes_domains
print 'self.axes_domains[axis]:', self.axes_domains[axis]
print 'self.axes_domains[axis] == value:', \
self.axes_domains[axis] == value
print type(np.where(self.axes_domains[axis] == value)[0][0])
where_res = np.where(self.axes_domains[axis] == value)[0]
if len(where_res) == 0:
raise Exception('Value "%s" not found in domain of axis "%s"'
% (str(value), self.get_axis_name(axis)))
return where_res[0]
def has_axis(self, axis):
return axis in self.axes_names
def has_axes(self, axes):
return set(axes).issubset(self.axes_names)
def __eq__(self, other):
""" Return true if other cuboid contains the same data.
TODO: should it be irrespective of the orientation ?
"""
for k, v in self.axes_domains.iteritems():
if k not in other.axes_domains:
# print '%s not in domains %s'
# %(str(k),str(other.axes_domains))
return False
if isinstance(v, np.ndarray):
if not np.allclose(v, other.axes_domains[k]):
# print 'numpy array differ for %s' %str(k)
return False
else:
if v != other.axes_domains[k]:
# print 'domain value differ for %s' %str(k)
return False
return (self.data == other.data).all() and \
self.axes_names == other.axes_names and \
self.value_label == other.value_label
def astype(self, t):
c = self.copy()
c.data = c.data.astype(t)
return c
def descrip_shape(self):
sh = self.data.shape
axes_info = ['%s:%d' % (a, sh[i])
for i, a in enumerate(self.axes_names)]
return '(' + ','.join(axes_info) + ')'
def get_voxel_size(self, axis):
""" Return the size of a voxel along 'axis', only if meta data is
available.
"""
assert axis in MRI3Daxes
if self.meta_data is not None:
affine, header = self.meta_data
return header['pixdim'][1:4][MRI3Daxes.index(axis)]
else:
raise Exception('xndarray does not have any meta data to get'
'voxel size')
def get_dshape(self):
"""
Return the shape of the array as dict mapping an axis name to the
corresponding size
"""
return dict(zip(self.axes_names, self.data.shape))
def _prepare_for_operation(self, op_name, c):
""" Make some checks before performing an operation between self and c.
If c is a cuboid then it must have the same axes,
same domains and value labels.
If c is np.ndarray, it must have the same shape as self.data
Return a cuboid in the same orientation as self.
TODO: allow broadcasting
"""
if isinstance(c, np.ndarray):
if self.data.shape != c.shape:
raise Exception('Cannot %s cuboid and ndarray. Shape of self'
' %s different from ndarray shape %s.'
% (op_name, str(self.data.shape),
str(c.shape)))
c = xndarray.xndarray_like(self, data=c)
elif np.isscalar(c):
class Dummy:
def __init__(self, val):
self.data = val
return Dummy(c)
if set(self.axes_names) != set(c.axes_names):
raise Exception('Cannot %s cuboids with different axes' % op_name)
# TODO: check axes domains ...
if self.axes_names != c.axes_names:
c = c.reorient(self.axes_names)
for i, a in enumerate(self.axes_names):
if self.data.shape[i] != c.data.shape[i]:
raise Exception('Cannot %s cuboids, shape mismatch.'
' self has shape: %s and operand has '
' shape: %s'
% (op_name, self.descrip_shape(),
c.descrip_shape()))
return c
def add(self, c, dest=None):
c = self._prepare_for_operation('add', c)
if dest is None:
return xndarray(self.data + c.data, self.axes_names, self.axes_domains,
self.value_label, self.meta_data)
else:
dest.data += c.data
return dest
def multiply(self, c, dest=None):
c = self._prepare_for_operation('multiply', c)
if dest is None:
return xndarray(self.data * c.data, self.axes_names, self.axes_domains,
self.value_label, self.meta_data)
else:
dest.data *= c.data
return dest
def divide(self, c, dest=None):
c = self._prepare_for_operation('divide', c)
if dest is None:
return xndarray(self.data / c.data, self.axes_names, self.axes_domains,
self.value_label, self.meta_data)
else:
dest.data /= c.data
return dest
def substract(self, c, dest=None):
c = self._prepare_for_operation('substract', c)
if dest is None:
return xndarray(self.data - c.data, self.axes_names, self.axes_domains,
self.value_label, self.meta_data)
else:
dest.data -= c.data
return dest
def __iadd__(self, c):
return self.add(c, dest=self)
def __add__(self, c):
return self.add(c)
def __radd__(self, c):
return self.add(c)
def __imul__(self, c):
return self.multiply(c, dest=self)
def __rmul__(self, c):
return self.multiply(c)
def __mul__(self, c):
return self.multiply(c)
def __idiv__(self, c):
return self.divide(c, dest=self)
def __div__(self, c):
return self.divide(c)
def __rdiv__(self, c):
if np.isscalar(c):
return xndarray_like(self, data=c / self.data)
else:
c = self._prepare_for_operation(c)
return c.divide(self)
def __isub__(self, c):
return self.substract(c, dest=self)
def __sub__(self, c):
return self.substract(c)
def __rsub__(self, c):
return (self * -1).add(c)
def __pow__(self, c):
if np.isscalar(c):
return xndarray_like(self, data=self.data ** c)
else:
raise NotImplementedError(
'Broadcast for pow operation not available')
def min(self, axis=None):
if axis is None:
return self.data.min()
else:
ia = self.get_axis_id(axis)
na = self.get_axis_name(axis)
if ia is None or na is None:
raise Exception('Wrong axis %s (%d available axes: %s)'
% (str(axis), self.ndim,
','.join(self.axes_names)))
an = self.axes_names[:ia] + self.axes_names[ia + 1:]
ad = self.axes_domains.copy()
ad.pop(na)
m = self.data.min(axis=ia)
if np.isscalar(m):
return m
else:
return xndarray(m, an, ad, self.value_label, self.meta_data)
def max(self, axis=None):
if axis is None:
return self.data.max()
else:
ia = self.get_axis_id(axis)
na = self.get_axis_name(axis)
if ia is None or na is None:
raise Exception('Wrong axis %s (%d available axes: %s)'
% (str(axis), self.ndim,
','.join(self.axes_names)))
an = self.axes_names[:ia] + self.axes_names[ia + 1:]
ad = self.axes_domains.copy()
ad.pop(na)
m = self.data.max(axis=ia)
if np.isscalar(m):
return m
else:
return xndarray(m, an, ad, self.value_label, self.meta_data)
def ptp(self, axis=None):
if axis is None:
return self.data.ptp()
else:
ia = self.get_axis_id(axis)
na = self.get_axis_name(axis)
if ia is None or na is None:
raise Exception('Wrong axis %s (%d available axes: %s)'
% (str(axis), self.ndim,
','.join(self.axes_names)))
an = self.axes_names[:ia] + self.axes_names[ia + 1:]
ad = self.axes_domains.copy()
ad.pop(na)
r = self.data.ptp(axis=ia)
if np.isscalar(r):
return r
else:
return xndarray(r, an, ad, self.value_label, self.meta_data)
def mean(self, axis=None):
if axis is None:
return self.data.mean()
else:
ia = self.get_axis_id(axis)
na = self.get_axis_name(axis)
if ia is None or na is None:
raise Exception('Wrong axis %s (%d available axes: %s)'
% (str(axis), self.ndim,
','.join(self.axes_names)))
an = self.axes_names[:ia] + self.axes_names[ia + 1:]
ad = self.axes_domains.copy()
ad.pop(na)
return xndarray(self.data.mean(axis=ia), an, ad,
self.value_label, self.meta_data)
def std(self, axis=None):
if axis is None:
return self.data.mean()
else:
ia = self.get_axis_id(axis)
na = self.get_axis_name(axis)
if ia is None or na is None:
raise Exception('Wrong axis %s (%d available axes: %s)'
% (str(axis), self.ndim,
','.join(self.axes_names)))
an = self.axes_names[:ia] + self.axes_names[ia + 1:]
ad = self.axes_domains.copy()
ad.pop(na)
return xndarray(self.data.std(axis=ia), an, ad,
self.value_label + '_std', self.meta_data)
def var(self, axis=None):
if axis is None:
return self.data.mean()
else:
ia = self.get_axis_id(axis)
na = self.get_axis_name(axis)
if ia is None or na is None:
raise Exception('Wrong axis %s (%d available axes: %s)'
% (str(axis), self.ndim,
','.join(self.axes_names)))
an = self.axes_names[:ia] + self.axes_names[ia + 1:]
ad = self.axes_domains.copy()
ad.pop(na)
return xndarray(self.data.var(axis=ia), an, ad,
self.value_label + '_var', self.meta_data)
def sum(self, axis=None):
if axis is None:
return self.data.sum()
else:
ia = self.get_axis_id(axis)
na = self.get_axis_name(axis)
if ia is None or na is None:
raise Exception('Wrong axis %s (%d available axes: %s)'
% (str(axis), self.ndim,
','.join(self.axes_names)))
an = self.axes_names[:ia] + self.axes_names[ia + 1:]
ad = self.axes_domains.copy()
ad.pop(na)
return xndarray(self.data.sum(axis=ia), an, ad,
self.value_label, self.meta_data)
def rescale_values(self, v_min=0., v_max=1., axis=None):
if axis is not None:
axis = self.get_axis_id(axis)
new_data = rescale_values(self.data, v_min, v_max, axis)
return xndarray(new_data, self.axes_names, self.axes_domains,
self.value_label, self.meta_data)
def get_extra_info(self, fmt='dict'):
from pyhrf.xmlio import to_xml
info = {
'axes_names': self.axes_names,
'axes_domains': self.axes_domains,
'value_label': self.value_label,
}
if fmt == 'dict':
return info
elif fmt == 'xml':
return to_xml(info)
def save(self, file_name, meta_data=None, set_MRI_orientation=False):
""" Save cuboid to a file. Supported format: Nifti1.
'meta_data' shoud be a 2-elements tuple:
(affine matrix, Nifti1Header instance). If provided, the meta_data
attribute of the cuboid is ignored.
All extra axis information is stored as an extension.
"""
from pyhrf.xmlio import from_xml, to_xml, DeprecatedXMLFormatException
logger.info('xndarray.save(%s)', file_name)
ext = op.splitext(file_name)[1]
c_to_save = self
if has_ext(file_name, 'nii'):
if set_MRI_orientation:
self.set_MRI_orientation()
extra_info = c_to_save.get_extra_info()
logger.debug('Extra info:')
logger.debug(extra_info)
from nibabel.nifti1 import Nifti1Extension, Nifti1Header, \
Nifti1Image, extension_codes
if self.data.ndim > 7:
raise Exception("Nifti format can not handle more than "
"7 dims. xndarray has %d dims"
% self.data.ndim)
if meta_data is None:
if c_to_save.meta_data is not None:
affine, header = c_to_save.meta_data
else:
affine = np.eye(4)
header = Nifti1Header()
else:
affine, header = meta_data
header = Nifti1Header()
header = header.copy()
ecodes = header.extensions.get_codes()
if extension_codes['comment'] in ecodes:
ic = ecodes.index(extension_codes['comment'])
econtent = header.extensions[ic].get_content()
# Check if existing extension can be safely overwritten
try:
prev_extra_info = from_xml(econtent)
except DeprecatedXMLFormatException, e:
from pyhrf.xmliobak import from_xml as from_xml_bak
prev_extra_info = from_xml_bak(econtent)
except Exception:
raise IOError("Cannot safely overwrite Extension in "
"Header. It already has a 'comment' "
"extension "
"with the following content:\n" +
str(econtent))
if not isinstance(prev_extra_info, dict) and \
prev_extra_info.has_key('axes_names'):
raise IOError("Cannot safely overwrite Extension in "
"Header. It already has a readable "
"XML 'comment' extension, but it's "
"not related to xndarray meta info.")
# Checks are OK, remove the previous extension:
prev_ext = header.extensions.pop(ic).get_content()
logger.debug('Extensions after popping previous ext:')
logger.debug(str(header.extensions))
else:
prev_ext = ""
ext_str = to_xml(extra_info)
if len(ext_str) < len(prev_ext):
extra_info['dummy'] = '#' * (len(prev_ext) - len(ext_str))
ext_str = to_xml(extra_info)
logger.debug('Length of extension string: %s', len(ext_str))
logger.debug('Extension: \n %s', ext_str)
e = Nifti1Extension('comment', ext_str)
header.extensions.append(e)
logger.info('Extensions after appending new ext:')
header.set_data_dtype(c_to_save.data.dtype)
i = Nifti1Image(c_to_save.data, affine, header=header)
i.update_header()
logger.debug('Save Nifti image to %s ...', file_name)
i.to_filename(file_name)
logger.debug('Save Nifti image, done!')
elif ext == '.csv':
np.savetxt(file_name, c_to_save.data, fmt="%12.9G")
elif has_ext(file_name, 'gii'):
from pyhrf.tools._io import write_texture
logger.info('Save Gifti image (dim=%d) ...', c_to_save.get_ndims())
logger.info('axes names: %s', str(c_to_save.axes_names))
if c_to_save.get_ndims() == 1:
write_texture(c_to_save.data, file_name,
meta_data=c_to_save.get_extra_info(fmt='xml'))
elif c_to_save.get_ndims() == 2 and \
c_to_save.has_axes(['voxel', 'time']):
saxes = ['voxel'] + \
list(set(c_to_save.axes_names).difference(['voxel']))
# make sure spatial axis is the 1st one
logger.info('reorient as %s', str(saxes))
c_to_save = c_to_save.reorient(saxes)
write_texture(c_to_save.data, file_name,
meta_data=c_to_save.get_extra_info(fmt='xml'))
else:
if c_to_save.has_axes(['voxel']):
saxes = list(set(c_to_save.axes_names).difference(['voxel',
'time']))
split_c = c_to_save.split(saxes[0])
for dval, subc in split_c.iteritems():
subc.save(add_suffix(file_name,
'_%s_%s' % (saxes[0], str(dval))))
else:
write_texture(c_to_save.data, file_name,
meta_data=c_to_save.get_extra_info(fmt='xml'))
else:
raise Exception('Unsupported file format (ext: "%s")' % ext)
@staticmethod
def load(file_name):
""" Load cuboid from file. Supported format: nifti1.
Extra axis information is retrieved from a nifti extension if available.
If it's not available, label the axes as:
(sagittal, coronal, axial[, time]).
TODO: gifti.
"""
from pyhrf.xmlio import from_xml, DeprecatedXMLFormatException
has_deprecated_xml_header = False
logger.info('xndarray.load(%s)', file_name)
ext = op.splitext(file_name)[1]
if ext == '.nii' or \
(ext == '.gz' and op.splitext(file_name[:-3])[1] == '.nii') or \
ext == '.img' or \
(ext == '.gz' and op.splitext(file_name[:-3])[1] == '.img'):
import nibabel
i = nibabel.load(file_name)
h = i.get_header()
data = np.array(i.get_data()) # avoid memmapping
cuboid_info = {}
cuboid_info['axes_names'] = MRI4Daxes[:min(4, data.ndim)]
# TODO: fill spatial domains with position in mm, and time axis
# according TR value.
cuboid_info['value_label'] = 'intensity'
# print 'extensions:', h.extensions
if hasattr(h, 'extensions') and len(h.extensions) > 0:
ecodes = h.extensions.get_codes()
# print 'ecodes:', ecodes
ccode = nibabel.nifti1.extension_codes['comment']
if ccode in ecodes:
ic = ecodes.index(ccode)
ext_content = h.extensions[ic].get_content()
try:
cuboid_info = from_xml(ext_content)
except DeprecatedXMLFormatException, e:
has_deprecated_xml_header = True
try:
from pyhrf.xmliobak import from_xml as from_xml_bak
cuboid_info = from_xml_bak(ext_content)
except:
# Can't load xml -> ignore it
# TODO: warn?
cuboid_info = {}
except Exception, e:
raise IOError('Extension for xndarray meta info can not '
'be read from "comment" extension. '
'Content is:\n%s\n Exception was:\n%s'
% (ext_content, str(e)))
cuboid_info.pop('dummy', None)
logger.info('Extra info loaded from extension:')
logger.info(cuboid_info)
meta_data = (i.get_affine(), h)
cuboid_info = dict((str(k), v) for k, v in cuboid_info.iteritems())
data[np.where(np.isnan(data))] = 0
a = xndarray(data, meta_data=meta_data, **cuboid_info)
a.has_deprecated_xml_header = has_deprecated_xml_header
return a
elif ext == '.gii' or \
(ext == '.gz' and op.splitext(file_name[:-3])[1] == '.gii'):
from pyhrf.tools._io import read_texture
data, gii = read_texture(file_name)
md = gii.get_metadata().get_metadata()
# print 'meta data loaded from gii:'
# print md
if md.has_key('pyhrf_cuboid_data'):
cuboid_info = from_xml(md['pyhrf_cuboid_data'])
else:
cuboid_info = {}
return xndarray(data, **cuboid_info)
else:
raise Exception('Unrecognised file format (ext: %s)' % ext)
def xndarray_like(c, data=None):
return xndarray.xndarray_like(c, data)
def stack_cuboids(c_list, axis, domain=None, axis_pos='first'):
""" Stack xndarray instances in list `c_list` along a new axis label `axis`. If `domain` (numpy array or list) is
provided, it is associated to the new axis. All cuboids in `c_list` must have the same orientation and domains.
`axis_pos` defines the position of the new axis: either `first` or `last`.
Examples
--------
>>> import numpy as np
>>> from pyhrf.ndarray import xndarray, stack_cuboids
>>> c1 = xndarray(np.arange(4*3).reshape(4,3), ['x','y'])
>>> c1
axes: ['x', 'y'], array([[ 0, 1, 2],
[ 3, 4, 5],
[ 6, 7, 8],
[ 9, 10, 11]])
>>> c2 = xndarray(np.arange(4*3).reshape(4,3)*2, ['x','y'])
>>> c2
axes: ['x', 'y'], array([[ 0, 2, 4],
[ 6, 8, 10],
[12, 14, 16],
[18, 20, 22]])
>>> c_stacked = stack_cuboids([c1,c2], 'stack_axis', ['c1','c2'])
>>> print c_stacked.descrip() # doctest: +NORMALIZE_WHITESPACE
* shape : (2, 4, 3)
* dtype : int64
* orientation: ['stack_axis', 'x', 'y']
* value label: value
* axes domains:
'stack_axis': array(['c1', 'c2'],
dtype='|S2')
'x': arange(0,3,1)
'y': arange(0,2,1)
TODO: enable broadcasting (?)
"""
assert isinstance(axis, str)
size = len(c_list)
cub0 = c_list[0]
axes = cub0.axes_names
# print 'axes:', axes
sh = (size,) + cub0.data.shape
stackedData = np.zeros(sh, cub0.data.dtype)
newDomains = cub0.axes_domains.copy()
if domain is not None:
newDomains[axis] = domain
targetCub = xndarray(stackedData,
axes_names=[axis] + cub0.axes_names,
axes_domains=newDomains, value_label=cub0.value_label)
# print 'c_list', c_list, c_list[0], c_list[1]
for i, cuboid in enumerate(c_list):
if debug:
print 'targetCub.data[i] :', targetCub.data[i].shape
if debug:
print 'cuboid', cuboid.descrip()
# print 'number:', i
# print 'cuboid.axes:', cuboid.axes_names
if axes != cuboid.axes_names:
raise Exception('%dth cuboid in list does not match other cuboids'
'found axes: %s, should be: %s'
% (i, str(cuboid.axes_names), str(axes)))
# TODO: better use numpy stacking functions (faster)
targetCub.data[i] = cuboid.data
if axis_pos == 'last':
targetCub.roll(axis)
return targetCub
def expand_array_in_mask(flat_data, mask, flat_axis=0, dest=None, m=None):
""" Map the `flat_axis` of `flat_data` onto the region within mask. `flat_data` is then reshaped so that flat_axis
is replaced with `mask.shape`.
Notes
-----
`m` is the result of `np.where(mask)` -> can be passed to speed up if already done before.
Examples
--------
>>> a = np.array([1,2,3])
>>> m = np.array([[0,1,0], [0,1,1]] )
>>> expand_array_in_mask(a,m)
array([[0, 1, 0],
[0, 2, 3]])
>>> a = np.array([[1,2,3],[4,5,6]])
>>> m = np.array([[0,1,0], [0,1,1]] )
>>> expand_array_in_mask(a,m,flat_axis=1)
array([[[0, 1, 0],
[0, 2, 3]],
<BLANKLINE>
[[0, 4, 0],
[0, 5, 6]]])
"""
flat_sh = flat_data.shape
mask_sh = mask.shape
target_shape = flat_sh[:flat_axis] + mask_sh + flat_sh[flat_axis + 1:]
logger.debug('expand_array_in_mask ... %s -> %s', str(flat_sh),
str(target_shape))
if dest is None:
dest = np.zeros(target_shape, dtype=flat_data.dtype)
assert dest.shape == target_shape
assert dest.dtype == flat_data.dtype
if m is None:
m = np.where(mask)
n = len(m[0])
if n != flat_data.shape[flat_axis]:
raise Exception('Nb positions in mask (%d) different from length of '
'flat_data (%d)' % (n, flat_data.shape[flat_axis]))
sm = ([':'] * len(flat_sh[:flat_axis]) +
['m[%d]' % i for i in range(len(mask_sh))] +
[':'] * len(flat_sh[flat_axis + 1:]))
exec('dest[%s] = flat_data' % ','.join(sm))
return dest
def merge(arrays, mask, axis, fill_value=0):
"""
Merge the given arrays into a single array according to the given
mask, with the given axis being mapped to those of mask.
Assume that arrays[id] corresponds to mask==id and that all arrays are in
the same orientation.
Arg:
- arrays (dict of xndarrays):
- mask (xndarray): defines the mapping between the flat axis in the
arrays to merge and the target expanded axes.
- axis (str): flat axis for the
"""
if len(arrays) == 0:
raise Exception('Empty list of arrays')
dest_c = None
for i, a in arrays.iteritems():
dest_c = a.expand(mask.data == i, axis, mask.axes_names,
dest=dest_c, do_checks=False)
return dest_c
def tree_to_xndarray(tree, level_labels=None):
"""Stack all arrays within input tree into a single array.
Parameters
----------
tree : dict
nested dictionaries of xndarray objects. Each level of the tree correspond to a target axis, each key of the
tree correspond to an element of the domain associated to that axis.
level_labels : list of str
axis labels corresponding to each level of the tree
Returns
-------
xndarray object
Examples
--------
>>> from pyhrf.ndarray import xndarray, tree_to_xndarray
>>> d = { 1 : { .1 : xndarray([1,2], axes_names=['inner_axis']), \
.2 : xndarray([3,4], axes_names=['inner_axis']), \
}, \
2 : { .1 : xndarray([1,2], axes_names=['inner_axis']), \
.2 : xndarray([3,4], axes_names=['inner_axis']), \
} \
}
>>> tree_to_xndarray(d, ['level_1', 'level_2'])
axes: ['level_1', 'level_2', 'inner_axis'], array([[[1, 2],
[3, 4]],
<BLANKLINE>
[[1, 2],
[3, 4]]])
"""
tree_depth = len(treeBranches(tree).next())
level_labels = level_labels or ['axis_%d' % i for i in xrange(tree_depth)]
assert len(level_labels) == tree_depth
def _reduce(node, level):
if isinstance(node, xndarray): # leaf node
return node
else:
domain = sorted(node.keys())
return stack_cuboids([_reduce(node[c], level + 1) for c in domain],
level_labels[level], domain)
return _reduce(tree, level=0)
from pyhrf.tools import add_suffix
def split_and_save(cub, axes, fn, meta_data=None, set_MRI_orientation=False,
output_dir=None, format_dvalues=False):
if len(axes) == 0:
# print 'fn:', fn
# print 'sub_c:'
# print cub.descrip()
if output_dir is None:
output_dir = op.dirname(fn)
cub.save(op.join(output_dir, op.basename(fn)), meta_data=meta_data,
set_MRI_orientation=set_MRI_orientation)
return
axis = axes[0]
split_res = cub.split(axis)
split_dvals = np.array(split_res.keys())
if format_dvalues and np.issubdtype(split_dvals.dtype, np.number) and \
(np.diff(split_dvals) == 1).all():
ndigits = len(str(max(split_dvals)))
if min(split_dvals) == 0:
o = 1
else:
o = 0
split_dvals = [str(d + o).zfill(ndigits) for d in split_dvals]
for dvalue, sub_c in zip(split_dvals, split_res.values()):
if axis != 'error':
newfn = add_suffix(fn, '_%s_%s' % (axis, str(dvalue)))
else:
if dvalue == 'error':
newfn = add_suffix(fn, '_error')
else:
newfn = fn[:]
split_and_save(sub_c, axes[1:], newfn, meta_data=meta_data,
set_MRI_orientation=set_MRI_orientation,
output_dir=output_dir)
# Html stuffs
# Define the style of cells in html table output, especially the "rotate" class to
# show axis labels in vertical orientation
# This should be put in the <head> section of the final html doc
ndarray_html_style = """<!-- td {
border-collapse:collapse;
border: 1px black solid;
}
.rotate {
-moz-transform: rotate(-90.0deg); /* FF3.5+ */
-o-transform: rotate(-90.0deg); /* Opera 10.5 */
-webkit-transform: rotate(-90.0deg); /* Saf3.1+, Chrome */
filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=0.083); /* IE6,IE7 */
-ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=0.083)"; /* IE8 */
} -->"""
| [
"matplotlib"
] |
f1f5f1339139a568c1fe2edc85c84fa707e8869f | Python | jakubsob/QuantumParticle | /Data/plots_full.py | UTF-8 | 1,684 | 2.75 | 3 | [] | no_license | #!/usr/bin/python3
import os
from os import path
import sys
import matplotlib.pyplot as plt
import numpy as np
import csv
import pandas as pd
import glob
from scipy import stats
from matplotlib.pyplot import cm
def plot_subplots(n):
height = 20
width = 20
t_label = "t[ps]"
fig, ax = plt.subplots(2, 2, figsize=(width,height))
ax[0,0] = plot_on_ax(ax[0,0], t_label, "E", "e_n={:d}.dat".format(n))
ax[0,1] = plot_on_ax(ax[0,1], t_label, "X", "x_n={:d}.dat".format(n))
ax[1,0] = plot_on_ax(ax[1,0], t_label, "N", "n_n={:d}.dat".format(n))
ax[1,1] = plot_rho_on_ax(ax[1,1], "rho_n={:d}.dat".format(n))
plt.savefig("plots_n={:d}.png".format(n))
def plot_on_ax(ax, x_label, y_label, file):
file_path = path.relpath(file)
opacity = 1
data = pd.read_table(file, sep=" ")
X = data[data.columns[0]].values
Y = data[data.columns[1]].values
name = file[:file.find(".dat")]
ax.plot(X,Y,"-b", alpha=opacity, label=name)
ax.set_title(name)
ax.set_xlabel(x_label)
ax.set_ylabel(y_label)
return ax
def plot_rho_on_ax(ax, file):
file_path = path.relpath(file)
opacity = 1
data = pd.read_table(file, sep=" ")
X = data[data.columns[0]].values
Y = data[data.columns[1]].values
length = len(X)
chunk = 100
name = file[:file.find(".dat")]
ax.plot(X[:chunk],Y[:chunk],"-r", alpha=opacity, label="t=0")
ax.plot(X[int(length*0.5):int(length*0.5) + 100],Y[int(length*0.5):int(length*0.5) + 100],"-g", alpha=opacity, label="t=0.5T")
ax.plot(X[int(length*0.8):int(length*0.8) + 100],Y[int(length*0.8):int(length*0.8) + 100],"-b", alpha=opacity, label="t=0.8T")
ax.set_title(name)
ax.set_ylabel("rho")
ax.legend()
return ax
plot_subplots(1)
plot_subplots(2)
plot_subplots(4) | [
"matplotlib"
] |
93157cd393a9fd5bf5229daf731cf81a6a86d918 | Python | heltonlucas/MapadeControle-AlgoritmoGenetico | /plotargrafico.py | UTF-8 | 816 | 2.921875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Oct 28 12:16:49 2017
@author: helto
"""
import xlrd
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
dataset = "ParametrosAG.xlsx"
workbook = xlrd.open_workbook(dataset)
sheet = workbook.sheet_by_index(0)
x=[]
y=[]
z=[]
#for line in dataset:
# line = line.strip()
# X,Y,Z = line.split(',')
# x.append(X)
# y.append(Y)
# z.append(Z)
for row in range(sheet.nrows):
x.append(sheet.cell_value(row, 0))
y.append(sheet.cell_value(row, 1))
z.append(sheet.cell_value(row, 2))
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x, y, z, c='blue')
ax.set_xlabel('Populaçao')
ax.set_ylabel('Crossover')
ax.set_zlabel('Pressão de Seleção')
plt.show()
| [
"matplotlib"
] |
78c8a5590f22dde4c4fd6715aceb3a51418afb84 | Python | LIWEI233/computationalphysics_N2014301020157 | /12代码.py | UTF-8 | 2,475 | 3 | 3 | [] | no_license | import matplotlib
import numpy as np
import matplotlib.cm as cm
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import time
from math import *
print '''Exercise 5.7
Designed by roach
'''
def printCV(CV):
print "------"
for n1 in range(len(CV)):
for n2 in range(len(CV[0])):
print CV[n1][n2],
print ""
print "------"
return 0
steps=2000
timebegin=time.time()
#initial CV
CV=[[0 for i in range(31)] for j in range(31)]
for n1 in range(31):
for n2 in range(31):
CV[n1][n2]=0.0
for n1 in range(10,21):
CV[n1][10]=-1.0
CV[n1][20]=1.0
for n1 in range(31):
CV[n1][0]=0.0
CV[n1][30]=0.0
CV[0][n1]=0.0
CV[30][n1]=0.0
#printCV(CV)
CV2=[]
n=0
maxdeltaV=[]
for itotal in range(steps):
CV2=None
CV2=[[0 for i in range(31)] for j in range(31)]
CVbegin=None
CVbegin=[[0 for i in range(31)] for j in range(31)]
for n1 in range(31):
for n2 in range(31):
CVbegin[n1][n2]=CV[n1][n2]
CV2[n1][n2]=CV[n1][n2]
#calculate CV2
for n1 in range(1,len(CV)-1):
for n2 in range(1,len(CV[0])-1):
CV2[n1][n2]=(CV[n1-1][n2]+CV[n1+1][n2]+CV[n1][n2-1]+CV[n1][n2+1])/4.0
for n3 in range(10,21):
CV2[n3][10]=-1.0
CV2[n3][20]=1.0
CVdelta=None
CVdelta=[]
for n1 in range(31):
for n2 in range(31):
CVdelta.append(abs(CVbegin[n1][n2]-CV2[n1][n2]))
CV=None
CV=[[0 for i in range(31)] for j in range(31)]
for n1 in range(31):
for n2 in range(31):
CV[n1][n2]=CV2[n1][n2]
maxdeltaV.append(max(CVdelta))
print "V",itotal,max(CVdelta)
ntotal=itotal
strn=str(ntotal)
print "Total V",ntotal
print "Time used",time.time()-timebegin
CX=[]
CY=[]
for i in range(31):
CX.append(-3.0+6*i/30.0)
CY.append(-3.0+6*i/30.0)
plt.figure(figsize=(20,20))
CS = plt.contour(CX, CY, CV, 20,colors='brown')
plt.clabel(CS, fontsize=9, inline=1)
plt.title("Exercise5.7 V"+strn)
plt.xlabel("x")
plt.ylabel("y")
plt.xlim(-3.1,3.1)
plt.ylim(-3.1,3.1)
plt.plot([-1,-1],[-1,1],color = "blue",linewidth=3)
plt.plot([1,1],[1,-1],color = "red",linewidth=3)
plt.plot([3,-3,-3,3,3],[3,3,-3,-3,3],color = "black",linewidth=3)
plt.show()
xcv=[]
for i in range(steps):
xcv.append(i)
plt.figure(figsize=(24,18))
plt.title("Exercise5.7 Max deltaV - Update times")
plt.xlabel("Update times")
plt.ylabel("Max deltaV")
plt.plot(xcv,maxdeltaV)
plt.show()
| [
"matplotlib"
] |
86da608e1bd7d8a600e9699313c33f4c84780ab4 | Python | angeloreale/tse-candidatesxgoods-script | /tse-candidatesxgoods-script.py | UTF-8 | 3,666 | 2.640625 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[2] importing libs:
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import scipy.stats as stats
import seaborn as sns
from matplotlib import rcParams
import glob
get_ipython().run_line_magic('matplotlib', 'inline')
get_ipython().run_line_magic('pylab', 'inline')
# In[3] fetching and merging data:
dfConsulta = pd.read_csv('./data/candidatos-consulta/consulta_cand_2018_BRASIL.csv', sep=";", encoding = 'latin1', error_bad_lines=False)
dfBens = pd.read_csv('./data/candidatos-bens/bem_candidato_2018_BRASIL.csv', sep=";", encoding = 'latin1', error_bad_lines=False)
dfMerged = dfConsulta.merge(dfBens, on='SQ_CANDIDATO')
dfMerged.head()
# In[4] filtering data:
dfMergedFilteredBens = dfMerged.filter(items=['NM_URNA_CANDIDATO','NR_PARTIDO','SG_PARTIDO','SQ_CANDIDATO','VR_BEM_CANDIDATO','CD_COR_RACA','DS_COR_RACA','CD_GRAU_INSTRUCAO','DS_GRAU_INSTRUCAO','CD_GENERO','DS_GENERO','NR_IDADE_DATA_POSSE','CD_CARGO','DS_CARGO','SG_UE_y'])
# In[5] formatting currency:
dfMergedFilteredBens["VR_BEM_CANDIDATO"] = dfMergedFilteredBens["VR_BEM_CANDIDATO"].str.replace(',','.').astype(float)
# In[6] summing goods:
dfMergedFilteredBensSum = dfMergedFilteredBens.groupby(['NM_URNA_CANDIDATO','NR_PARTIDO','SG_PARTIDO','SQ_CANDIDATO','CD_COR_RACA','DS_COR_RACA','CD_GRAU_INSTRUCAO','DS_GRAU_INSTRUCAO','CD_GENERO','DS_GENERO','NR_IDADE_DATA_POSSE','CD_CARGO','DS_CARGO','SG_UE_y'])['VR_BEM_CANDIDATO'].sum().reset_index()
# In[7] exporting to csv:
dfMergedFilteredBensSum.to_csv('/home/angeloreale/candidatura-redebahia/teste-python/dados/dfMergedFilteredBensSum.csv', encoding = 'latin1')
# In[8] previewing processed data:
dfMergedFilteredBensSum.describe()
# In[9] drawing histograms:
fig = plt.figure(figsize=(12, 6))
age = fig.add_subplot(121)
goods = fig.add_subplot(122)
age.hist(dfMergedFilteredBensSum.NR_IDADE_DATA_POSSE, bins=80)
age.set_xlabel('Age')
age.set_title("Histogram of Age")
goods.hist(dfMergedFilteredBensSum.VR_BEM_CANDIDATO, bins=20)
goods.set_xlabel('Goods')
goods.set_title("Histogram of Candidate's Goods")
plt.show()
# In[10] processing statistics and displaying them:
import statsmodels.api as sm
from statsmodels.formula.api import ols
m = ols('VR_BEM_CANDIDATO ~ NR_IDADE_DATA_POSSE + CD_COR_RACA + CD_GENERO + CD_GRAU_INSTRUCAO + CD_GENERO + CD_CARGO', dfMergedFilteredBensSum).fit()
print (m.summary())
# In[11] drawing plot:
sns.jointplot(x="NR_IDADE_DATA_POSSE", y="VR_BEM_CANDIDATO", data=dfMergedFilteredBensSum, kind = 'reg',fit_reg= True, size = 7)
plt.show()
# In[13] drawing plot:
sns.jointplot(x="CD_GENERO", y="VR_BEM_CANDIDATO", data=dfMergedFilteredBensSum, kind = 'reg',fit_reg= True, size = 7)
plt.show()
# In[14] drawing plot:
sns.jointplot(x="CD_COR_RACA", y="VR_BEM_CANDIDATO", data=dfMergedFilteredBensSum, kind = 'reg',fit_reg= True, size = 7)
plt.show()
# In[15] drawing plot:
sns.jointplot(x="CD_GRAU_INSTRUCAO", y="VR_BEM_CANDIDATO", data=dfMergedFilteredBensSum, kind = 'reg',fit_reg= True, size = 7)
plt.show()
# In[16] drawing plot:
sns.jointplot(x="CD_CARGO", y="VR_BEM_CANDIDATO", data=dfMergedFilteredBensSum, kind = 'reg',fit_reg= True, size = 7)
plt.show()
# In[17] drawing plot:
sns.jointplot(x="NR_PARTIDO", y="VR_BEM_CANDIDATO", data=dfMergedFilteredBensSum, kind = 'reg',fit_reg= True, size = 7)
plt.show()
# In[147] drawing and saving plot:
with sns.axes_style(style='ticks'):
g = sns.factorplot("CD_COR_RACA", "VR_BEM_CANDIDATO", "DS_GENERO", data=dfMergedFilteredBensSum, size=10, kind="box")
g.set_axis_labels("Etnia", "Bens");
g.savefig('factorplot01.png')
| [
"matplotlib",
"seaborn"
] |
695de4e0a51bae346f87b65508916c20513fe5c8 | Python | rohit1607/TimeOpt_VI_det_Test | /grid_world_multistate_index_exactpos.py | UTF-8 | 13,653 | 3 | 3 | [] | no_license | import numpy as np
import itertools
import math
from custom_functions import *
#import matplotlib.pyplot as plt
class Grid:
#start and end are indices
#srelx and srely range from (-0.5,0.5)
def __init__(self,tlist, xs, ys, start_ind, srelx, srely, end_ind):
self.nj=len(xs)
self.ni=len(ys)
self.nt=len(tlist)
print("shapes=",len(xs),len(ys),len(tlist))
self.dj=np.abs(xs[1] - xs[0])
self.di=np.abs(ys[1] - ys[0])
self.dt=tlist[1] - tlist[0]
print("diffs=", self.dt, self.di, self.dj)
self.xs=xs
self.ys=ys
self.tlist=tlist
self.x=xs[start_ind[1]]
self.y=ys[self.ni - 1 - start_ind[0]]
self.x=self.x + srelx*self.dj
self.y=self.y + srely*self.di
#i, j, t , start and end store indices!!
self.t = int(0)
self.pos_to_state_index(self.x, self.y)
self.endind=end_ind
self.start_state=(0, start_ind[0], start_ind[1])
def pos_to_state_index(self, x, y):
if x > self.xs[-1]:
x = self.xs[-1]
elif x < self.xs[0]:
x = self.xs[0]
if y > self.ys[-1]:
y= self.ys[-1]
elif y < self.ys[0]:
y = self.ys[0]
remx = (x - self.xs[0]) % self.dj
remy = -(y - self.ys[-1]) % self.di
xind = (x - self.xs[0]) // self.dj
yind = -(y - self.ys[-1]) // self.di
# print("rex,remy,xind,yind", remx,remy,xind,yind)
if remx >= 0.5 * self.dj and remy >= 0.5 * self.di:
xind += 1
yind += 1
elif remx >= 0.5 * self.dj and remy < 0.5 * self.di:
xind += 1
elif remx < 0.5 * self.dj and remy >= 0.5 * self.di:
yind += 1
self.i = int(yind)
self.j = int(xind)
return self.i, self.j
#Rewards and Actions to be dicitonaries
def set_AR(self, Actions):
self.actions= Actions
# self.rewards= Rewards
#explicitly set state. state is a tuple of indices(m,n,p)
def set_state(self, state):
self.t = state[0]
self.i = state[1]
self.j = state[2]
def current_state(self):
return (int(self.t), int(self.i), int(self.j))
def current_pos(self):
return (int(self.i), int(self.j))
#MAY NEED TO CHANGE DEFINITION
def is_terminal(self):
#return self.actions[state]==None
return (self.current_pos()==self.endpos)
def move(self, action, Vx, Vy):
r=0
so=self.current_state()
if self.is_terminal()==False:
thrust,angle=action
#x0.01 for cm/s to m/s
#x0.0091 for m to degrees
vnetx= (thrust*math.cos(angle)+(Vx))
vnety= (thrust*math.sin(angle)+(Vy))
xnew=(self.xs[int(self.j)]) + (vnetx*self.dt)
ynew=(self.ys[int(self.ni-1-self.i)]) + (vnety*self.dt)
# print("xnew, ynew",xnew,ynew)
#if state happens to go out of of grid, bring it back inside
if xnew>self.xs[-1]:
xnew=self.xs[-1]
elif xnew<self.xs[0]:
xnew=self.xs[0]
if ynew>self.ys[-1]:
ynew=self.ys[-1]
elif ynew<self.ys[0]:
ynew=self.ys[0]
# print("xnew, ynew after boundingbox", xnew, ynew)
# rounding to prevent invalid keys
remx = (xnew - self.xs[0]) % self.dj
remy = -(ynew - self.ys[-1]) % self.di
xind = (xnew - self.xs[0]) // self.dj
yind = -(ynew - self.ys[-1]) // self.di
# print("rex,remy,xind,yind", remx,remy,xind,yind)
if remx >= 0.5 * self.dj and remy >= 0.5 * self.di:
xind+=1
yind+=1
elif remx >= 0.5 * self.dj and remy < 0.5 * self.di:
xind+=1
elif remx < 0.5 * self.dj and remy >= 0.5 * self.di:
yind+=1
# print("rex,remy,xind,yind after upate", remx, remy, xind, yind)
# print("(i,j)", (yind,xind))
self.i=int(yind)
self.j=int(xind)
self.t=self.t + 1
sn=(self.t, self.i, self.j)
# Pi=math.pi
# if angle in [0, 0.5*Pi, Pi, 1.5*Pi]:
# r=-1
# elif angle in [0.25*Pi, 0.75*Pi, 1.25*Pi, 1.75*Pi]:
# r=-1.414
r=calculate_reward(self.dt, self.xs, self.ys, so, sn, xnew, ynew, vnetx, vnety )
if self.is_terminal():
r+=10
return r
def move_exact(self, action, Vx, Vy):
r=0
if self.is_terminal()==False:
thrust,angle=action
#x0.01 for cm/s to m/s
#x0.0091 for m to degrees
vnetx= (thrust*math.cos(angle)+(Vx))
vnety= (thrust*math.sin(angle)+(Vy))
xnew= self.x + (vnetx*self.dt)
ynew= self.y + (vnety*self.dt)
# print("xnew, ynew",xnew,ynew)
#if state happens to go out of of grid, bring it back inside
if xnew>self.xs[-1]:
xnew=self.xs[-1]
elif xnew<self.xs[0]:
xnew=self.xs[0]
if ynew>self.ys[-1]:
ynew=self.ys[-1]
elif ynew<self.ys[0]:
ynew=self.ys[0]
# print("xnew, ynew after boundingbox", xnew, ynew)
# rounding to prevent invalid keys
self.x=xnew
self.y=ynew
remx = (xnew - self.xs[0]) % self.dj
remy = -(ynew - self.ys[-1]) % self.di
xind = (xnew - self.xs[0]) // self.dj
yind = -(ynew - self.ys[-1]) // self.di
# print("rex,remy,xind,yind", remx,remy,xind,yind)
if remx >= 0.5 * self.dj and remy >= 0.5 * self.di:
xind+=1
yind+=1
elif remx >= 0.5 * self.dj and remy < 0.5 * self.di:
xind+=1
elif remx < 0.5 * self.dj and remy >= 0.5 * self.di:
yind+=1
# print("rex,remy,xind,yind after upate", remx, remy, xind, yind)
# print("(i,j)", (yind,xind))
self.i=int(yind)
self.j=int(xind)
self.t=self.t + 1
sn=(self.t, self.i, self.j)
# Pi=math.pi
# if angle in [0, 0.5*Pi, Pi, 1.5*Pi]:
# r=-1
# elif angle in [0.25*Pi, 0.75*Pi, 1.25*Pi, 1.75*Pi]:
# r=-1.414
r=-self.dt
if self.is_terminal():
r+=10
return r
# !! time to mentioned by index !!
def ac_state_space(self, time=None):
a=set()
if time==None:
for t in range((self.nt)-1): #does not include the states in the last time stice.
for i in range(self.ni):
for j in range(self.nj):
if ((i,j)!=self.endpos):# does not include states with pos as endpos
a.add((t,i,j))
else:
for i in range(self.ni):
for j in range(self.nj):
if ((i,j)!=self.endpos):
a.add((time,i,j))
return sorted(a)
def state_space(self,edge=None):
a=set()
if edge==None:
for t in range(self.nt):
for i in range(self.ni):
for j in range(self.nj):
a.add((t,i,j))
elif edge=='l':
j=0
for t in range(self.nt):
for i in range(self.ni):
a.add((t,i,j))
elif edge=='d':
i=self.ni -1
for t in range(self.nt):
for j in range(self.nj):
a.add((t,i,j))
elif edge=='r':
j=(self.nj)-1
for t in range(self.nt):
for i in range(self.ni):
a.add((t,i,j))
elif edge=='u':
i=0
for t in range(self.nt):
for j in range(self.nj):
a.add((t,i,j))
elif edge=='m':
for t in range(self.nt):
for i in range(1, (self.ni)-1):
for j in range(1, (self.nj)-1):
a.add((t,i,j))
elif edge=='llc':
j=0
i=self.ni - 1
for t in range(self.nt):
a.add((t,i,j))
elif edge=='ulc':
j=0
i=0
for t in range(self.nt):
a.add((t,i,j))
elif edge=='lrc':
j=(self.nj)-1
i=self.ni - 1
for t in range(self.nt):
a.add((t,i,j))
elif edge=='urc':
j=(self.nj)-1
i=0
for t in range(self.nt):
a.add((t,i,j))
elif edge=='end':
i=self.endpos[0]
j=self.endpos[1]
for t in range(self.nt):
a.add((t,i,j))
return sorted(a)
def if_within_grid(self):
return self.t>=0 and self.t<self.nt
def intersection(lst1, lst2):
lst3 = [value for value in lst1 if value in lst2]
return lst3
def timeOpt_grid(tlist, xs, ys, startpos, endpos):
g = Grid(tlist, xs, ys, startpos, endpos)
#define actions and rewards
Pi=math.pi
#speeds in m/s
speed_list=[1] #speeds except zero
# angle_list_l=[0.0*Pi, 0.125*Pi, 0.25*Pi, 0.375*Pi, 0.5*Pi,
# 1.5 * Pi, 1.625 * Pi, 1.75 * Pi, 1.875 * Pi]
# angle_list_d=[0.0*Pi, 0.125*Pi, 0.25*Pi, 0.375*Pi, 0.5*Pi, 0.625*Pi, 0.75*Pi,
# 0.875*Pi, 1.0*Pi]
# angle_list_u=[ 1.0*Pi, 1.125*Pi, 1.25*Pi, 1.375*Pi, 1.5*Pi, 1.625*Pi, 1.75*Pi, 1.875*Pi, 0]
# angle_list_r=[0.5*Pi, 0.625*Pi, 0.75*Pi, 0.875*Pi, 1.0*Pi, 1.125*Pi, 1.25*Pi, 1.375*Pi, 1.5*Pi]
# angle_list_m=[0.0*Pi, 0.125*Pi, 0.25*Pi, 0.375*Pi, 0.5*Pi, 0.625*Pi, 0.75*Pi, 0.875*Pi,
# 1.0*Pi, 1.125*Pi, 1.25*Pi, 1.375*Pi, 1.5*Pi, 1.625*Pi, 1.75*Pi, 1.875*Pi]
angle_list_l = [0, 0.25 * Pi, 0.5 * Pi, 1.5 * Pi, 1.75 * Pi]
angle_list_d = [0, 0.25 * Pi, 0.5 * Pi, 0.75 * Pi, Pi]
angle_list_u = [0, Pi, 1.25 * Pi, 1.5 * Pi, 1.75 * Pi]
angle_list_r = [0.5 * Pi, 0.75 * Pi, Pi, 1.25 * Pi, 1.5 * Pi]
angle_list_m = [0, 0.25 * Pi, 0.5 * Pi, 0.75 * Pi, Pi, 1.25 * Pi, 1.5 * Pi, 1.75 * Pi]
action_list_l= list(itertools.product(speed_list,angle_list_l))
action_list_l.append((0,0))
action_list_d= list(itertools.product(speed_list,angle_list_d))
action_list_d.append((0,0))
action_list_u= list(itertools.product(speed_list,angle_list_u))
action_list_u.append((0,0))
action_list_r= list(itertools.product(speed_list,angle_list_r))
action_list_r.append((0,0))
action_list_m= list(itertools.product(speed_list,angle_list_m))
action_list_m.append((0,0))
actions={}
for s in g.state_space():
actions[s]=action_list_m
#update action set for grid edges
for s in g.state_space('l'):
actions[s]=action_list_l
for s in g.state_space('u'):
actions[s]=action_list_u
for s in g.state_space('r'):
actions[s]=action_list_r
for s in g.state_space('d'):
actions[s]=action_list_d
#update action set for grid corners
for s in g.state_space('llc'):
actions[s]=intersection(action_list_d,action_list_l)
for s in g.state_space('ulc'):
actions[s]=intersection(action_list_u,action_list_l)
for s in g.state_space('lrc'):
actions[s]=intersection(action_list_d,action_list_r)
for s in g.state_space('urc'):
actions[s]=intersection(action_list_u,action_list_r)
#update action for terminal state
for s in g.state_space('end'):
actions[s]=None
#set action set for states in last time step to none
for s in g.ac_state_space((g.nt)-1):
actions[s]=None
#set ations for grid
g.set_AR(actions)
return g
def print_values(V, g):
for t in range(g.nt):
print('t=',t)
print("")
for j in reversed(range(g.ni)):
print("---------------------------")
for i in range(g.nj):
v = V[t,i,j]
print(v, end=" ")
print("")
print("")
def print_policy(P, g):
for t in range(g.nt-1):
print('t=',t)
print("")
for j in reversed(range(g.ni)):
print("---------------------------")
for i in range(g.nj):
if (i,j)!=g.endpos:
a = P[(t,i,j)]
if a[0]!=0:
if a[1]==0:
a=(a[0],' R')
elif a[1]==math.pi*0.5:
a=(a[0],' U')
elif a[1]==math.pi:
a=(a[0],' L')
elif a[1]==math.pi*1.5:
a=(a[0],' D')
elif a[1]==math.pi*0.25:
a=(a[0],'UR')
elif a[1]==math.pi*0.75:
a=(a[0],'UL')
elif a[1]==math.pi*1.25:
a=(a[0],'DL')
elif a[1]==math.pi*1.75:
a=(a[0],'DR')
else:
a='None'
print(a,end=" ")
print("")
print("")
def print_test(g):
for t in range(g.nt):
for j in reversed(range(g.ni)):
for i in range(g.nj):
print(t,i,j),
print("") | [
"matplotlib"
] |
848a89efa53cb81f91a26ac17adda9f3b05d3b53 | Python | forkkr/Data-Mining-Lab | /DecisionTree/CHECK.py | UTF-8 | 667 | 2.703125 | 3 | [] | no_license | # from sklearn.datasets import load_iris
# from sklearn import tree
# from sklearn.tree.export import export_text
# import matplotlib.pyplot as plt
#
# iris = load_iris()
# clf = tree.DecisionTreeClassifier(criterion="entropy")
# clf = clf.fit(iris.txt, iris.target)
# print(iris.txt)
# print(iris.target)
# tree.plot_tree(clf.fit(iris.txt, iris.target))
#
# plt.show()
# r = export_text(clf, feature_names=iris['feature_names'])
# print(r)
file = open('Dataset/WineQuality/winequality-white.csv', 'r')
wfile = open('Dataset/WineQuality/winequality.data', 'w')
for tp in file:
tp = tp.replace(';', ',')
print(tp)
wfile.write(tp)
wfile.close()
file.close() | [
"matplotlib"
] |
090d3babc1298cc9e243ea69bbd1dbd755f8e36e | Python | mohdazfar/t20-cricket-analytics | /cricket_stats.py | UTF-8 | 11,952 | 2.859375 | 3 | [] | no_license | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import operator
import collections
from pandas.tools.plotting import parallel_coordinates
df = pd.read_csv('D:/kaggle/T20_matches_ball_by_ball_data.csv', parse_dates=["date"], low_memory=False)
def get_batting_stats(df):
'''
The methods includes the complete process of calculating
batting statistics of each batsman available in the
data set
:param df: Dataframe of complete ball by ball data from ICC
:return: batting statistics dataframe
'''
# Acculmulate each batsman score in each match
runs_scored = df.groupby(["Match_Id", "Batting_Team", "Striker"], as_index=False)["Run_Scored"].sum()
# Count all the balls each batsman faced in a match
balls_faced = df.groupby(["Match_Id", "Batting_Team", "Striker"], as_index=False)["Run_Scored"].count()
balls_faced.columns = ["Match_Id", "Batting_Team", "Striker", "Balls_Faced"]
# Merging the two dataframes to make a complete batting scoreboard
batting_scoreboard = pd.merge(runs_scored, balls_faced,
on=["Match_Id", "Batting_Team", "Striker"], how="left")
t20_dismissal = df[["Match_Id", "Batting_Team", "Striker", "Dismissal"]]
t20_dismissal["concat_key"] = t20_dismissal["Match_Id"].map(str) + ":" + t20_dismissal["Striker"]
t20_dismissal = t20_dismissal.drop_duplicates(subset=["concat_key"], keep="last")
t20_dismissal = t20_dismissal.drop(labels="concat_key", axis=1)
t20_dismissal = t20_dismissal.sort_values(["Match_Id", "Batting_Team"])
t20_dismissal.Dismissal.fillna("not out", inplace=True)
batting_scoreboard = pd.merge(batting_scoreboard, t20_dismissal,
on=["Match_Id", "Batting_Team", "Striker"], how="left")
# Get a unique list of batsman from the scoreboard dataframe
batsman_statistics = pd.DataFrame({"Batsman": batting_scoreboard.Striker.unique()})
# Compute "Innings" information for each batsman from the scoreboard dataframe
Innings = pd.DataFrame(batting_scoreboard.Striker.value_counts())
Innings.reset_index(inplace=True)
Innings.columns = ["Batsman", "Innings"]
# Compute "Not outs" information for each batsman from the scoreboard dataframe
Not_out = batting_scoreboard.Dismissal == "not out"
batting_scoreboard["Not_out"] = Not_out.map({True: 1, False: 0})
Not_out = pd.DataFrame(batting_scoreboard.groupby(["Striker"])["Not_out"].sum())
Not_out.reset_index(inplace=True)
Not_out.columns = ["Batsman", "Not_out"]
# Compute "Balls" information for each batsman from the scoreboard dataframe
Balls = pd.DataFrame(batting_scoreboard.groupby(["Striker"])["Balls_Faced"].sum())
Balls.reset_index(inplace=True)
Balls.columns = ["Batsman", "Balls_Faced"]
# Compute "Runs" information for each batsman from the scoreboard dataframe
Run_Scored = pd.DataFrame(batting_scoreboard.groupby(["Striker"])["Run_Scored"].sum())
Run_Scored.reset_index(inplace=True)
Run_Scored.columns = ["Batsman", "Run_Scored"]
# Compute "Highest score" information for each batsman from the scoreboard dataframe
Highest_Score = pd.DataFrame(batting_scoreboard.groupby(["Striker"])["Run_Scored"].max())
Highest_Score.reset_index(inplace=True)
Highest_Score.columns = ["Batsman", "Highest_Score"]
# Compute "Centuries " information for each batsman from the scoreboard dataframe
Centuries = pd.DataFrame(
batting_scoreboard.loc[batting_scoreboard.Run_Scored >= 100,].groupby(["Striker"])["Run_Scored"].count())
Centuries.reset_index(inplace=True)
Centuries.columns = ["Batsman", "Centuries"]
# Compute "Half Centuries " information for each batsman from the scoreboard dataframe
Half_Centuries = pd.DataFrame(batting_scoreboard.loc[(batting_scoreboard.Run_Scored >= 50) &
(batting_scoreboard.Run_Scored < 100),].groupby(["Striker"])[
"Run_Scored"].count())
Half_Centuries.reset_index(inplace=True)
Half_Centuries.columns = ["Batsman", "Half_Centuries"]
# Merge all the metric to the batsman statitics dataframe
batsman_statistics = pd.merge(batsman_statistics, Innings, on=["Batsman"], how="left")
batsman_statistics = pd.merge(batsman_statistics, Not_out, on=["Batsman"], how="left")
batsman_statistics = pd.merge(batsman_statistics, Balls, on=["Batsman"], how="left")
batsman_statistics = pd.merge(batsman_statistics, Run_Scored, on=["Batsman"], how="left")
batsman_statistics = pd.merge(batsman_statistics, Highest_Score, on=["Batsman"], how="left")
batsman_statistics = pd.merge(batsman_statistics, Centuries, on=["Batsman"], how="left")
batsman_statistics.Centuries.fillna(0, inplace=True)
batsman_statistics.Centuries = batsman_statistics.Centuries.astype("int")
batsman_statistics = pd.merge(batsman_statistics, Half_Centuries, on=["Batsman"], how="left")
batsman_statistics.Half_Centuries.fillna(0, inplace=True)
batsman_statistics.Half_Centuries = batsman_statistics.Half_Centuries.astype("int")
# Compute "Batting average" for each batsman from the scoreboard dataframe
batsman_statistics["Batting_Average"] = batsman_statistics.Run_Scored / (
batsman_statistics.Innings - batsman_statistics.Not_out)
batsman_statistics.loc[batsman_statistics["Batting_Average"] == np.inf, "Batting_Average"] = 0
batsman_statistics.loc[batsman_statistics["Batting_Average"].isnull(), "Batting_Average"] = 0
# Compute "Strike rate for each batsman from the scoreboard dataframe
batsman_statistics["Strike_Rate"] = (batsman_statistics.Run_Scored * 100) / batsman_statistics.Balls_Faced
batsman_statistics = batsman_statistics.round({"Batting_Average": 2, "Strike_Rate": 2})
batsman_statistics = batsman_statistics.sort_values(['Run_Scored'], ascending=False)
return batsman_statistics
def get_bowling_statistics(df):
'''
:param df:
:return:
'''
# Creating a column to determine bowling team for each ball bowled
df["Bowling_Team"] = pd.DataFrame(
np.where(df.Batting_Team == df.team, df.team2, df.team))
# Balls bowled by each bowler
balls_bowled = pd.DataFrame(df["Bowler"].value_counts())
balls_bowled.index.name = 'Bowler'
balls_bowled.columns = ["Total_Ball_Bowled"]
# Calculate Runs given by each bowler
df["runs_plus_extras"] = df["Run_Scored"] + df["Extras"]
runs_given = df.groupby(["Bowler"])["runs_plus_extras"].sum()
runs_given = pd.DataFrame(runs_given)
runs_given.reset_index()
# Wickets taken by each bowler
df["wickets_taken"] = df["Dismissal"].isnull().map({True: 0, False: 1})
wickets_taken = pd.DataFrame(df.groupby(["Bowler"])["wickets_taken"].sum())
wickets_taken.reset_index()
# Calculating major bowling statistics here including
# bowling average, economy, strike rate, total wickets etc
bowling_statistics = pd.merge(balls_bowled, runs_given, how="left", left_index=True, right_index=True)
bowling_statistics = pd.merge(bowling_statistics, wickets_taken, how="left", left_index=True, right_index=True)
bowling_statistics["Economy"] = bowling_statistics["runs_plus_extras"] / (
bowling_statistics["Total_Ball_Bowled"] / 6)
bowling_statistics["Average"] = bowling_statistics["runs_plus_extras"] / bowling_statistics["wickets_taken"]
bowling_statistics["Overs"] = bowling_statistics["Total_Ball_Bowled"] / 6
bowling_statistics = bowling_statistics.round({"Economy": 2, "Average": 2, "Overs": 0})
bowling_statistics.columns = ["Total_Ball_Bowled", "Total Runs", "Total Wickets", "Economy", "Average", "Overs"]
bowling_statistics = bowling_statistics[["Overs", "Total Runs", "Total Wickets", "Economy", "Average"]]
return bowling_statistics
def get_net_run_rate(df):
# Now let's calculate the Net Run Rate (NRR) for each team as a whole
# Batting rate per over for each team as a whole
df["Total_Runs"] = df["Run_Scored"] + df["Extras"] # Adding runs scored + extras to get total runs col
df["Bowling_Team"] = pd.DataFrame(
np.where(df.Batting_Team == df.team, df.team2, df.team))
runs_scored_by_X_team = pd.DataFrame(
df.groupby(["Batting_Team"])["Total_Runs"].sum()) # Runs scored by each team in total
balls_played_by_X_team = pd.DataFrame(
df.groupby(["Bowling_Team"])["Bowling_Team"].count() / 6) # Overs played by each team in total
balls_played_by_X_team.columns = ["Overs_Played"]
batting_rpo = pd.merge(runs_scored_by_X_team, balls_played_by_X_team, left_index=True,
right_index=True) # RATE PER OVER (RPO) in batting
batting_rpo = pd.DataFrame(batting_rpo["Total_Runs"] / batting_rpo["Overs_Played"])
batting_rpo.columns = ["Batting_RPO"]
# Bowling rate per over of each team as a whole
runs_scored_by_rest_of_the_teams = pd.DataFrame(
df.groupby(["Bowling_Team"])["Total_Runs"].sum()) # Runs scored against each team in total
balls_played_by_rest_of_the_teams = pd.DataFrame(df.groupby(["Bowling_Team"])[
"Match_Id"].unique().str.len()) * 20 # To calculate bowling rate per over total ball (120) are considered instead of balls played
balls_played_by_rest_of_the_teams.columns = ['Overs_Played']
bowling_rpo = pd.merge(runs_scored_by_rest_of_the_teams, balls_played_by_rest_of_the_teams, left_index=True,
right_index=True) # RATE PER OVER (RPO) in bowling
bowling_rpo = pd.DataFrame(bowling_rpo["Total_Runs"] / (bowling_rpo["Overs_Played"]))
bowling_rpo.columns = ["Bowling_RPO"]
# NET RUN RATE CALCULATION: RPO of (REQUIRED TEAM) - RPO of all against teams
# RPO of (REQUIRED TEAM) = TOTAL RUNS SCORED AGAISNT ALL TEAMS / TOTAL BALLS PLAYED TO SCORE THOSE RUNS
# RPO of all against teams = TOTAL RUNS SCORED BY ALL OTHER TEAMS AGAINST REQUIRED TEAM / 20*MATCHES
net_run_rate = pd.merge(batting_rpo, bowling_rpo, left_index=True, right_index=True)
net_run_rate["Net_Run_Rate"] = net_run_rate["Batting_RPO"] - net_run_rate["Bowling_RPO"] # Calc of net run rate
net_run_rate = net_run_rate.sort_values(["Net_Run_Rate"], ascending=False)
return net_run_rate
def plot_teams_win_pct(df):
all_teams = df['team'].unique().tolist()
winning_pct = {}
for team in all_teams:
team_matches = df[(df['team'] == team) | (df['team2'] == team)]
team_match_ids = team_matches['Match_Id'].unique()
total_matches = len(team_match_ids)
df_winnings = df[df['winner'] == team]
total_wins = len(df_winnings['Match_Id'].unique())
winning_pct[team] = int(total_wins) / int(total_matches) * 100
winning_pct_sorted = sorted(winning_pct.items(), key=operator.itemgetter(1))
winning_pct_sorted = collections.OrderedDict(winning_pct_sorted)
plt.barh(range(len(winning_pct_sorted)), list(winning_pct_sorted.values()), align='center')
plt.yticks(range(len(winning_pct_sorted)), list(winning_pct_sorted.keys()))
plt.xlabel('Percentage Wins (%)')
plt.ylabel('Teams')
plt.show()
def plot_top5_batsman(df):
# Top 5 Batsman of T20 cricket in each team
df_sub = df[['Striker', 'Run_Scored', 'Batting_Team']]
df_sub['Run_Scored'] = df_sub['Run_Scored'].astype(int)
x = df_sub.pivot_table(index='Striker', columns='Batting_Team', aggfunc=sum)
all_teams = df['team'].unique().tolist()
top5players = {}
for team in all_teams:
y = x['Run_Scored'][team]
y = y.dropna()
top5players[team] = dict(y.sort_values(ascending=False)[:5])
df_plot = pd.DataFrame(top5players).stack().reset_index()
df_plot.columns = ['Player Name', 'Country', 'Total Score']
parallel_coordinates(df_plot, class_column='Country')
plt.show() | [
"matplotlib"
] |
ff184b6840e521b6c7a60c02e8317e10340acbb6 | Python | gcvalderrama/Jarvis | /graphview/Program.py | UTF-8 | 2,591 | 3.21875 | 3 | [
"Apache-2.0"
] | permissive | import networkx as nx
import matplotlib.pyplot as plt
import xml.etree.ElementTree as ET
def hierarchy_pos(G, root, width=2000, vert_gap = 200, vert_loc = 0, xcenter = 0.5,
pos = None, parent = None):
'''If there is a cycle that is reachable from root, then this will see infinite recursion.
G: the graph
root: the root node of current branch
width: horizontal space allocated for this branch - avoids overlap with other branches
vert_gap: gap between levels of hierarchy
vert_loc: vertical location of root
xcenter: horizontal location of root
pos: a dict saying where all nodes go if they have been assigned
parent: parent of this branch.'''
if pos == None:
pos = {root:(xcenter,vert_loc)}
else:
pos[root] = (xcenter, vert_loc)
neighbors = G.neighbors(root)
if parent != None:
neighbors.remove(parent)
if len(neighbors)!=0:
dx = width/len(neighbors)
nextx = xcenter - width/2 - dx/2
for neighbor in neighbors:
nextx += dx
pos = hierarchy_pos(G,neighbor, width = dx, vert_gap = vert_gap,
vert_loc = vert_loc-vert_gap, xcenter=nextx, pos=pos,
parent = root)
return pos
class RSTNode:
def __init__(self):
self.id = None
self.label = None
def __str__(self):
return self.label
class RSTRelation:
def __init__(self):
self.start = None
self.end = None
self.label = None
tree = ET.parse('LA091790-0041.txt.xml.xml')
root = tree.getroot()
nodes = list()
relations = list()
for token_xml in root.iter('node'):
node = RSTNode()
node.id = int(token_xml.get('id'))
node.label = token_xml.get('label')
nodes.append(node)
for token_xml in root.iter('relation'):
relation = RSTRelation()
relation.start = int(token_xml.get('start'))
relation.end= int(token_xml.get('end'))
relation.label = token_xml.get('label')
relations.append(relation)
labels = {}
g = nx.Graph()
for node in nodes:
g.add_node(node.id)
labels[node.id] = node.label
for relation in relations:
g.add_edge(relation.start, relation.end)
#g.add_edge('a','b', weight=0.1)
pos = hierarchy_pos(g,1,vert_gap = 20)
#pos = nx.spring_layout(g)
plt.figure(figsize=(80,80))
plt.axis('equal')
nx.draw(g,pos=pos, with_labels=False, node_size=200, node_color='#A0CBE2')
nx.draw_networkx_labels(g,pos, labels)
nx.draw_networkx_edge_labels(g, pos=pos)
plt.savefig('circular_tree.png')
| [
"matplotlib"
] |
48a08031d737221e166e572cea2c8bb38f2a18c8 | Python | Kamesh-K/ML-SKLearn | /SVM_Parameters.py | UTF-8 | 591 | 2.515625 | 3 | [
"MIT"
] | permissive | import sklearn
import mglearn
import matplotlib.pyplot as plt
from sklearn.svm import SVC
X,y=mglearn.tools.make_handcrafted_dataset()
svm=SVC(kernel='rbf',C=10,gamma=0.1).fit(X,y)
plt.scatter(X[:,0],X[:,1],s=60,c=y,cmap=mglearn.cm2)
sv = svm.support_vectors_
plt.scatter(sv[:, 0], sv[:, 1], s=200, facecolors='none', zorder=10, linewidth=3)
plt.show()
plt.scatter(X[:, 0], X[:, 1], s=60, c=y, cmap=mglearn.cm2)
# plot support vectors
sv = svm.support_vectors_
plt.scatter(sv[:, 0], sv[:, 1], s=200, facecolors='none',edgecolors='black', zorder=5, linewidth=2)
plt.show()
| [
"matplotlib"
] |
f37f3cbc7726d9d7358b5a0eae12f9d5956b7ed9 | Python | johny322/lab8 | /anim.py | UTF-8 | 1,288 | 2.8125 | 3 | [] | no_license | import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.animation as animation
import numpy as np
import pylab
#data1 = np.loadtxt("u.txt")
fig1, ax1 = plt.subplots()
fig2, ax2 = plt.subplots()
#plt.close()
ax1.set_xlim((0,1))
ax1.set_ylim((-2,2))
ax2.set_xlim((0,1))
ax2.set_ylim((-2,2))
line1, = ax1.plot([],[], lw=2, label='U(x)')
line2, = ax2.plot([],[], lw=2, label='U(t)')
ax1.grid()
ax1.legend(loc=1)
ax2.grid()
ax2.legend(loc=1)
def init1():
line1.set_data([],[])
return line1,
def animate1(i):
x = np.linspace(0,1,50)
y = np.linspace(0,1,50)
z = np.exp(-4.0 * np.pi * np.pi * y[i]) * np.sin(2.0*np.pi * x)
line1.set_data(x, z)
ax1.set_title(str(i))
return line1,
def init2():
line2.set_data([],[])
return line2,
def animate2(i):
x = np.linspace(0,1,50)
y = np.linspace(0,1,50)
z = np.exp(-4.0 * np.pi * np.pi * y) * np.sin(2.0*np.pi * x[i])
line2.set_data(x, z)
ax2.set_title(str(i))
return line2,
anim = animation.FuncAnimation(fig1, animate1, init_func=init1, frames=50, interval=100,
blit=True)
anim = animation.FuncAnimation(fig2, animate2, init_func=init2, frames=50, interval=100,
blit=True)
plt.show()
| [
"matplotlib"
] |
9e3a4a208a5bf8b63523e97f611adfc95d19b707 | Python | cxzhangqi/MastersProject | /error_model.py | UTF-8 | 4,724 | 2.953125 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import sys
def R(a, H, model='3D'):
"""
Calculates the distance from source to microphone i.
:param i: integer
:param a: 3x1 np.darray, co-ordinates of source
:param H: 4x3 np.darray, co-ordinates of microphones
:return: 1x4 ndarray, distance from microphones to source a
"""
if model == '3D':
z = H - a
return np.sqrt(np.sum(np.square(z), axis=1))
else:
z = H[:][:-1] - a[:-1]
return np.sqrt(np.sum(np.square(z), axis=1)) # Could also dot Z with Z.T, don't know which is better
def M(a, H):
m = np.divide((H - a), R(a, H, model="3D"), out=np.zeros_like(H - a), where=R(a, H)!=0) # When R returns 0 this will just put 0 in array, rather than a divide by 0 error
return np.concatenate((m, np.ones((4, 1))), axis=1)
def D(a, H, v_sound):
d = np.divide((H - a), v_sound * R(a, H), out=np.zeros_like((H - a), where=R(a,H) != 0)) # When R returns 0 this will just put 0 in array, rather than a divide by 0 error
return np.concatenate(d, np.ones((4, 1)), axis=1)
def d(i, j, a, H, v_sound):
"""
:param i: integer, denotes microphone
:param j: integer, denotes co-ordinate x=0, y=1 or z=2
:param a: 3x1 np.darray, co-ordinates of source
:param H: 4x3 np.darray, co-ordinates of microphones
:param v_sound: float, speed of sound
:return: Returns element i,j or matrix D
"""
if R(i, a, H) == 0:
return 0
else:
return np.divide(np.subtract(H[i][j], a[0][j]), np.multiply(v_sound, R(i, a_x, a_y, a_z, H)))
def dDt_dv_ij(i, j, a, H, v_sound):
"""
:param i:
:param j:
:param a:
:param H:
:param v_sound:
:return:
"""
h_i = np.sqrt(np.power((a_x - H[i][0]), 2) + np.power((a_y - H[i][1]), 2))
h_j = np.sqrt(np.power((a_x - H[j][0]), 2) + np.power((a_y - H[j][1]), 2))
return np.multiply(np.divide(a_z, v_sound), np.subtract(np.divide(1, np.sqrt(np.power(a_z, 2) + np.power(h_i, 2))),
np.divide(1, np.sqrt(np.power(a_z, 2) + np.power(h_j, 2)))))
def E_sos(a, H, dc, v_sound):
"""
Returns a 4x1 matrix containing the position estimate errors for error in speed of sound, dc
First index is error in x
Second index is error in y
Third index is error in z
:param a: 3x1 ndarray, co-ordinates of source
:param H: 4x3 ndarray, co-ordinates of microphones
:param dc: float, error in speed of sound estimate
:return: 4x1 ndarray containing position estimate errors DAx, DAy, DAz and ...
"""
M_matrix = M(a, H)
try:
M_inv = np.linalg.inv(M_matrix)
except:
M_inv = np.zeros((4, 4))
T = R(a, H, model="3D") / v_sound
print(T)
return np.matmul(M_inv, T) * dc # This is a 4x1 array with all the errors in position and that final Rm
def E_time(a_x, a_y, a_z, H, DT, v_sound):
"""
Returns a 4x1 matrix containing the position estimate errors for error in time of arrivals to each microphone, 4x1 matrix ToA
First index is error in x
Second index is error in y
Third index is error in z
:param a: 3x1 ndarray, co-ordinates of source
:param H: 4x3 ndarray, co-ordinates of microphones
:param DT: 4x1 ndarray, error in speed of sound estimate
:return: 4x1 ndarray containing position estimate errors DAx, DAy, DAz and ...
"""
D = np.zeros((4, 4))
for i in range(4):
D[i][3] = 1
for j in range(3):
D[i][j] = d(i, j, a_x, a_y, a_z, H, v_sound)
if np.linalg.det(D) == 0: # Need to figure out a better way of handling this. For now just do this?
print("\n We got here!")
D_inv = np.linalg.inv(D)
return np.matmul(D_inv, DT) # Should be a 3 x 1 array
# plot hyperbola(a,)
# For now, just do it between two microphones rather than getting into a matrix.
def E_2D(a_x, a_y, a_z, H, v_sound, i, j):
dDt_dv = dDt_dv_ij(i, j, a_x, a_y, a_z, H, v_sound)
return np.multiply(a_z, dDt_dv) # Returns the change in arrival time between microphones i and j
def calculate_error_indicator(error_matrix,xrange,yrange):
# Integrate
sum = 0
for i in range(5, 15):
for j in range(5, 15):
sum += error_matrix[j][i]
# Return error square divided by area of the box
return np.divide(np.power(sum, 2), 100)
h1 = [0, 0, 0]
h2 = [0, 10, 0]
h3 = [10, 10, 0]
h4 = [10, 0, 0]
H = np.array([h1, h2, h3, h4], dtype=float)
x = np.linspace(-20, 20, 100)
y = np.linspace(-20, 20, 100)
X, Y = np.meshgrid(x, y, indexing='xy')
a = np.array([X, Y, np.zeros_like(X)])
#print(X)
#print(Y)
#print(M(a, H)) | [
"matplotlib"
] |
fc852a982b33e1abf22c80694eb14ed2d4dcd184 | Python | guidocioni/eumetsat-python | /plot_eumetsat.py | UTF-8 | 3,515 | 2.640625 | 3 | [] | no_license | # Required libraries
import matplotlib.pyplot as plt
# %matplotlib
from netCDF4 import Dataset
from mpl_toolkits.basemap import Basemap # Import the Basemap toolkit
import numpy as np # Import the Numpy package
from cpt_convert import loadCPT # Import the CPT convert function
from matplotlib.colors import LinearSegmentedColormap # Linear interpolation for color maps
from datetime import datetime
from glob import glob
import os
# Converts the CPT file to be used in Python
cpt = loadCPT('IR4AVHRR6.cpt')
# Makes a linear interpolation with the CPT file
cpt_convert = LinearSegmentedColormap('cpt', cpt)
folder = '/Users/thd5tt/Downloads/sat/'
first=True
for fname in glob(folder+"*.nc"):
# Search for the Scan Start in the file name
time = (fname[fname.find("MG_")+3:fname.find(".nc")])
# Format the "Observation Start" string
date = datetime.strptime(time,'%Y%m%d%H%M%S')
# Check if we already created the image
image_string = folder + 'images/' + datetime.strftime(date,'%Y%m%d%H%M%S') + '.png'
if os.path.isfile(image_string):
print('Skipping '+fname)
continue
print('Using '+fname)
# Open the file using the NetCDF4 library
nc = Dataset(fname)
# Extract the Brightness Temperature values from the NetCDF
ir_10p8 = np.ma.masked_less(nc.variables['ch9'][:], 10)
nu_c=930.659
alpha=0.9983
beta=0.627
C1=1.19104E-5
C2=1.43877
temp_b=( (C2*nu_c) / (alpha*np.ma.log((C1*nu_c**3/ir_10p8)+1)) )- ( beta/alpha )
temp_b=temp_b-273.15
if first:
lons = np.ma.masked_less(np.array(nc.variables['lon']), -180)
lats = np.ma.masked_less(np.array(nc.variables['lat']), -90)
# bmap = Basemap(projection='cyl', llcrnrlon=15, llcrnrlat=37,\
# urcrnrlon=22, urcrnrlat=42, resolution='i')
# bmap = Basemap(projection='cyl', llcrnrlon=-50, llcrnrlat=20, urcrnrlon=-5, urcrnrlat=60, resolution='l')
# bmap = Basemap(projection='stere', llcrnrlon=lons[0,0], llcrnrlat=lats[0,0], urcrnrlon=lons[-1,-1], urcrnrlat=lats[-1,-1],\
# lon_0=0, lat_0=35, resolution='l')
bmap = Basemap(projection='stere', llcrnrlon=7, llcrnrlat=30, urcrnrlon=30, urcrnrlat=44,\
lon_0=15, lat_0=38, resolution='i')
# Draw the coastlines, countries, parallels and meridians
first=False
x,y=bmap(lons,lats)
print(temp_b.min())
print(temp_b.max())
bmap.contourf(x,y,temp_b,np.arange(-60,40,0.1),cmap=cpt_convert,extend="both")
#bmap.contourf(x,y,ir_10p8,np.arange(0,120,1),cmap="gist_gray_r",extend="both")
bmap.drawcoastlines(linewidth=1, linestyle='solid', color='white')
bmap.drawcountries(linewidth=1, linestyle='solid', color='white')
bmap.drawparallels(np.arange(-90.0, 90.0, 10.), linewidth=0.5, color='white', labels=[False, False, False, False])
bmap.drawmeridians(np.arange(0.0, 360.0, 10.), linewidth=0.5, color='white', labels=[False, False, False, False])
# Insert the legend
# bmap.colorbar(location='right', label='Brightness Temperature [K]')
date_formatted = datetime.strftime(date,'%H:%MZ %a %d %b %Y')
# plt.title(date_formatted+" | "+u"\N{COPYRIGHT SIGN}"+'EUMETSAT - prepared by Guido Cioni (www.guidocioni.it)',fontsize=10 )
# plt.title(date_formatted+" | "+u"\N{COPYRIGHT SIGN}"+'EUMETSAT',fontsize=10 )
# plt.show()
plt.savefig('./images/%s.png' % datetime.strftime(date,'%Y%m%d%H%M%S'), bbox_inches='tight', dpi=150)
plt.clf()
plt.close('all')
| [
"matplotlib"
] |
bcd5d9aea00a3d658cf1529ab9b9b10819141d21 | Python | Oscarlsson/RL-competition | /src/lambdaplot.py | UTF-8 | 674 | 2.671875 | 3 | [] | no_license | #!/usr/bin/env python
import pandas as pd
import matplotlib.pyplot as plt
import sys
# ../outputs/131218-12-27-39-LibRLAgent-Episod100/finalOutput
csvfile = sys.argv[1]
data = pd.read_table(csvfile, sep=' ', header=None, index_col=[0,1])
indexes = set([a for (a,b) in data.index])
for index in indexes:
plotdata = data.loc[index]
# Normal plot
#plotdata[3].sort_index().plot()
# Normalized
plotdata = (plotdata[3])#-plotdata[3].min())
(plotdata/plotdata.abs().max()).sort_index().plot()
plt.xlabel("Change i reward for different values of lambda")
plt.xlabel("Lambda")
plt.ylabel("Cumulative reward")
plt.legend(indexes, loc='best')
plt.show()
| [
"matplotlib"
] |
7b91f77d76829f80c5c2903597360bbd1c278231 | Python | DevotionZhu/carla-code-pad | /plottoplist.py | UTF-8 | 872 | 2.9375 | 3 | [] | no_license | import math
import carla
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
client = carla.Client('localhost', 2000)
world = client.get_world()
world_map = world.get_map()
topology = world_map.get_topology()
count = 0
for segment in topology:
x1, y1 = segment[0].transform.location.x, segment[0].transform.location.y
x2, y2 = segment[1].transform.location.x, segment[1].transform.location.y
x1, x2 = -x1, -x2
if math.sqrt((x2-x1)**2+(y2-y1)**2) > 0.01:
if segment[0].is_intersection:
plt.plot([x1, x2], [y1, y2], color='black')
plt.plot([x1, x2], [y1, y2], 'o')
else:
plt.plot([x1, x2], [y1, y2])
plt.plot([x1, x2], [y1, y2], 'o')
plt.arrow(x1, y1, (x2+x1)/2 - x1, (y2+y1)/2 - y1,
shape='full', lw=0, length_includes_head=True, head_width=2)
else:
print 'Found zero segment', segment[0].transform.location
plt.show()
| [
"matplotlib"
] |
de9c07b244b9aaa6266c299d69a77b6413554819 | Python | treverhines/ModEst | /modest/cv.py | UTF-8 | 9,770 | 2.6875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# Generalize Cross Validation
import numpy as np
from scipy.sparse import isspmatrix
import scipy.optimize
import matplotlib.pyplot as plt
import matplotlib.tri as tri
import logging
import rbf.halton
from myplot.cm import viridis
import modest.petsc
import modest.solvers
import modest.mp
from myplot.colorbar import pseudo_transparent_cmap
logger = logging.getLogger(__name__)
def chunkify(list,N):
# make chunks random
list = np.asarray(list)
K = list.shape[0]
# I need a randomly mixed list, but i need to to be mixed the
# same time every time
current_random_state = np.random.get_state()
np.random.seed(1)
if K > 0:
mix_range = np.random.choice(range(K),K,replace=False)
else:
# np.random.choice does not work for K==0
mix_range = np.zeros(0,dtype=int)
np.random.set_state(current_random_state)
chunked_mix_range = [mix_range[i::N] for i in range(N)]
return [list[c] for c in chunked_mix_range]
def dense_predictive_error(damping,A,L,data,fold=10):
'''
Description
-----------
Computes the predictive error for the given damping parameter(s).
Parameters
----------
damping: list of damping parameters for each regularization matrix
A: (N,M) dense system matrix
L: list of regularization matrices
data: (N,) data array
fold (default=10): number of cross validation folds
'''
if len(damping) != len(L):
raise ValueError(
'number of damping parameters must equal number of '
'regularization matrices')
A = np.asarray(A)
L = [np.asarray(k) for l in L]
data = np.asarray(data)
N = data.shape[0]
if hasattr(fold,'__iter__'):
testing_sets = fold
else:
fold = min(fold,N) # make sure folds is smaller than the number of data points
testing_sets = chunkify(range(N),fold)
# scale regularization matrices. note that this makes copies
L = (k*d for d,k in zip(damping,L))
# stack regularization matrices
L = np.vstack(L)
# make sure folds is smaller than the number of data points
fold = min(fold,N)
res = np.zeros(N)
for rmidx in testing_sets:
# build weight matrix. data points are excluded by given them
# nearly zero weight
W = np.ones(N)
W[rmidx] = 1e-10
A_new = W[:,None]*A
data_new = W*data
soln = modest.solvers.reg_dsolve(A_new,L,data_new)
pred = A.dot(soln)
res[rmidx] = pred[rmidx] - data[rmidx]
return res.dot(res)/N
def sparse_predictive_error(damping,A,L,data,fold=10,solver='spsolve',**kwargs):
'''
Description
-----------
Computes the predictive error for the given damping parameter(s).
Parameters
----------
damping: list of damping parameters for each regularization matrix
A: (N,M) sparse system matrix
L: list of regularization matrices
data: (N,) data array
fold (default=10): number of cross validation testing sets. The
data is randomly split into the desired number of testing sets.
If you want more control over which data points are in which testing
set then fold can also be specified as a list of testing set
indices. For example, if there are 5 data points then fold can
be specified as [[0,1],[2,3],[4]] which does 3-fold cross validation.
solver: which solver to use. choices are
'spsolve': scipy.spares.linalg.spsolve
'lgmres': scipy.sparse.linalg.lgmres
'lsqr': scipy.sparse.linalg.lsqr
'petsc': modest.petsc.petsc_solve
additional key word arguments are passed to the solver
'''
solver_dict = {'spsolve':modest.solvers.sparse_reg_dsolve,
'lgmres':modest.solvers.sparse_reg_lgmres,
'lsqr':modest.solvers.sparse_reg_lsqr,
'petsc':modest.solvers.sparse_reg_petsc}
if len(damping) != len(L):
raise ValueError(
'number of damping parameters must equal number of '
'regularization matrices')
if not all(isspmatrix(k) for k in L):
raise TypeError(
'L must be a list of sparse matrices')
if not isspmatrix(A):
raise TypeError(
'A must be a list of sparse matrices')
data = np.asarray(data)
N = data.shape[0]
if hasattr(fold,'__iter__'):
testing_sets = fold
else:
fold = min(fold,N) # make sure folds is smaller than the number of data points
testing_sets = chunkify(range(N),fold)
# scale regularization matrices. note that this makes copies
L = (k*d for d,k in zip(damping,L))
# stack regularization matrices
L = scipy.sparse.vstack(L)
# empty residual vector
res = np.zeros(N)
for rmidx in testing_sets:
# build weight matrix. data points are excluded by giving them zero
# weight
diag = np.ones(N)
diag[rmidx] = 1e-10
W = scipy.sparse.diags(diag,0)
# note that there are multiple matrix copies made here
A_new = W.dot(A)
data_new = W.dot(data)
soln = solver_dict[solver](A_new,L,data_new,**kwargs)
pred = A.dot(soln)
res[rmidx] = pred[rmidx] - data[rmidx]
return res.dot(res)/N
def predictive_error(damping,A,L,data,fold=10,**kwargs):
if isspmatrix(A):
return sparse_predictive_error(damping,A,L,data,fold=fold,**kwargs)
else:
return dense_predictive_error(damping,A,L,data,fold=fold,**kwargs)
def mappable_predictive_error(args):
damping = args[0]
A = args[1]
L = args[2]
data = args[3]
fold = args[4]
kwargs = args[5]
index = args[6]
total = args[7]
out = predictive_error(damping,A,L,data,fold=fold,**kwargs)
logger.info('computed predictive error %s of %s' % (index,total))
return out
def optimal_damping_parameters(A,L,data,
fold=10,log_bounds=None,
itr=100,procs=None,plot=False,
**kwargs):
'''
returns the optimal penalty parameter for regularized least squares
using generalized cross validation
Parameters
----------
A: (N,M) system matrix
L: list of (K,M) regularization matrices
data: (N,) data vector
plot: whether to plot the predictive error curve
log_bounds: list of lower and upper bounds for each penalty parameter
'''
# number of damping parameters
P = len(L)
# values range from 0 to 1
tests = rbf.halton.halton(itr,P)
# scale tests to the desired bounds
if log_bounds is None:
log_bounds = [[-6.0,6.0]]*P
log_bounds = np.asarray(log_bounds)
if log_bounds.shape != (P,2):
raise TypeError('log_bounds must be a length P list of lower and upper bounds')
bounds_diff = log_bounds[:,1] - log_bounds[:,0]
bounds_min = log_bounds[:,0]
tests = tests*bounds_diff
tests = tests + bounds_min
tests = 10**tests
args = ((t,A,L,data,fold,kwargs,i,len(tests)) for i,t in enumerate(tests))
errs = modest.mp.parmap(mappable_predictive_error,args,workers=procs)
errs = np.asarray(errs)
best_err = np.min(errs)
best_idx = np.argmin(errs)
best_test = tests[best_idx]
logger.info('best predictive error: %s' % best_err)
logger.info('best damping parameters: %s' % best_test)
if (P > 2) & plot:
logger.info(
'cannot plot predictive error for more than two damping '
'parameters')
elif (P == 1) & plot:
# sort for plotting purposes
sort_idx = np.argsort(tests[:,0])
tests = tests[sort_idx,:]
errs = errs[sort_idx]
fig,ax = plt.subplots()
if hasattr(fold,'__iter__'):
ax.set_title('%s-fold cross validation curve' % len(fold))
else:
ax.set_title('%s-fold cross validation curve' % fold)
ax.set_ylabel('predictive error')
ax.set_xlabel('penalty parameter')
ax.loglog(tests[:,0],errs,'k-')
ax.loglog(best_test[0],best_err,'ko',markersize=10)
ax.grid(zorder=-1)
fig.tight_layout()
elif (P == 2) & plot:
fig,ax = plt.subplots()
log_tests = np.log10(tests)
log_errs = np.log10(errs)
vmin = np.min(log_errs)
vmax =np.max(log_errs)
viridis_alpha = pseudo_transparent_cmap(viridis,0.5)
# if fewer than three tests were made then do not triangulate
if tests.shape[0] >= 3:
# make triangularization in logspace
triangles = tri.Triangulation(log_tests[:,0],log_tests[:,1])
# set triangles to linear space
triangles.x = tests[:,0]
triangles.y = tests[:,1]
ax.tripcolor(triangles,log_errs,
vmin=vmin,vmax=vmax,cmap=viridis_alpha,zorder=0)
ax.scatter([best_test[0]],[best_test[1]],
s=200,c=[np.log10(best_err)],
vmin=vmin,vmax=vmax,zorder=2,cmap=viridis)
c = ax.scatter(tests[:,0],tests[:,1],s=50,c=log_errs,
vmin=vmin,vmax=vmax,zorder=1,cmap=viridis)
if hasattr(fold,'__iter__'):
ax.set_title('%s-fold cross validation curve' % len(fold))
else:
ax.set_title('%s-fold cross validation curve' % fold)
ax.set_xscale('log')
ax.set_yscale('log')
cbar = fig.colorbar(c)
cbar.set_label('log predictive error')
ax.set_xlabel('damping parameter 1')
ax.set_ylabel('damping parameter 2')
ax.grid(zorder=0)
fig.tight_layout()
return best_test, best_err, tests, errs
def optimal_damping_parameter(A,L,data,
fold=10,log_bounds=None,
itr=100,**kwargs):
'''
used when only searching for one penalty parameter
'''
if log_bounds is None:
log_bounds = [-6.0,6.0]
out = optimal_damping_parameters(A,[L],data,fold=fold,
log_bounds=[log_bounds],
itr=itr,**kwargs)
best_test,best_err,tests,errs = out
best_test = best_test[0]
tests = tests[:,0]
return best_test, best_err, tests, errs
| [
"matplotlib"
] |
c3cf79ce96628316f8b3eb5106edefa7d17d6b8b | Python | SStar1314/ML | /show_picture/reverse_img.py | UTF-8 | 511 | 3.046875 | 3 | [] | no_license | import scipy.misc
import matplotlib.pyplot as plt
lena = scipy.misc.ascent()
plt.subplot(221)
plt.title('Original')
plt.axis('off')
plt.imshow(lena)
plt.subplot(222)
plt.title('Flipped')
plt.axis('off')
plt.imshow(lena[:,::-1])
plt.subplot(223)
plt.title('Sliced')
plt.axis('off')
plt.imshow(lena[:lena.shape[0]/2,:lena.shape[1]/2])
mask = lena % 2 == 0
masked_lena = lena.copy()
masked_lena[mask] = 0
plt.subplot(224)
plt.title('Masked')
plt.axis('off')
plt.imshow(masked_lena)
plt.show()
| [
"matplotlib"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.