blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
135
| path
stringlengths 2
372
| src_encoding
stringclasses 26
values | length_bytes
int64 55
3.18M
| score
float64 2.52
5.19
| int_score
int64 3
5
| detected_licenses
sequencelengths 0
38
| license_type
stringclasses 2
values | code
stringlengths 55
3.18M
| used_libs
sequencelengths 1
5
|
---|---|---|---|---|---|---|---|---|---|---|---|
273d9cd140c28edb0194b5f318a72f504c6cd7ad | Python | vvs1999/Signature-extraction-using-connected-component-labeling | /signature_extractor-master/signature_extractor.py | UTF-8 | 2,133 | 2.828125 | 3 | [
"MIT"
] | permissive | import cv2
import numpy as np
from skimage import measure
from skimage.measure import label, regionprops
from skimage.color import label2rgb
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from scipy import ndimage
from skimage import morphology
# read the input image
img = cv2.imread('20191201_170524.jpg', 0)
img = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)[1] # ensure binary
# connected component analysis by scikit-learn framework
blobs = img > img.mean()
blobs_labels = measure.label(blobs, background=1)
image_label_overlay = label2rgb(blobs_labels, image=img)
fig, ax = plt.subplots(figsize=(10, 6))
'''
# plot the connected components (for debugging)
ax.imshow(image_label_overlay)
ax.set_axis_off()
plt.tight_layout()
plt.show()
'''
the_biggest_component = 0
total_area = 0
counter = 0
average = 0.0
for region in regionprops(blobs_labels):
if region.area>10:
total_area = total_area + region.area
counter = counter + 1
#print region.area # (for debugging)
# take regions with large enough areas
if region.area >= 250:
if (region.area > the_biggest_component):
the_biggest_component = region.area
average = (total_area/counter)
print ("the_biggest_component: " + str(the_biggest_component))
print ("average: " + str(average))
# experimental-based ratio calculation, modify it for your cases
# a4_constant is used as a threshold value to remove connected pixels are smaller than a4_constant for A4 size scanned documents
a4_constant = ((average/84.0)*250.0)+100
print ("a4_constant: " + str(a4_constant))
# remove the connected pixels are smaller than a4_constant
b = morphology.remove_small_objects(blobs_labels, a4_constant)
# save the the pre-version which is the image is labelled with colors as considering connected components
plt.imsave('pre_version.png', b)
# read the pre-version
img = cv2.imread('pre_version.png', 0)
# ensure binary
img = cv2.threshold(img, 0, 255,cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
# save the the result
cv2.imwrite("output.png", img) | [
"matplotlib"
] |
5568ac8f1fd47da2bc4c20985ef2499544ceb841 | Python | will-hossack/Poptics | /examples/vector/inverse.py | UTF-8 | 462 | 3.3125 | 3 | [
"MIT"
] | permissive | """
Test use of invverse square calcualtions
"""
import tio as t
import vector as v
import matplotlib.pyplot as pt
def main():
xdata = []
ydata = []
a = t.getVector3d("Central",[5,1,0])
t.tprint(a)
for i in range(0,100):
x = i/10.0
xdata.append(x)
b = v.Vector3d(x,0,0)
f = a.inverseSquare(b,-1.0)
t.tprint(f),
ydata.append(abs(f))
pt.plot(xdata,ydata)
pt.show()
main()
| [
"matplotlib"
] |
7ec03897eee421d99f9e8d16d8ab0244fd22c6dd | Python | kenterakawa/StructuralCalculation | /PropellantTank/tanksizing/str_propellant_tank_S1.py | UTF-8 | 19,701 | 2.640625 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 Interstellar Technologies Inc. All Rights Reserved.
# Authors : Takahiro Inagawa
# All rights Reserved
"""
ロケット概念検討時の
・タンク内圧と曲げモーメントによる引張応力を計算します
・軸力と曲げモーメントによる座屈応力を計算します
"""
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
import imp
from scipy import interpolate
from scipy import optimize
import configparser
from matplotlib.font_manager import FontProperties
plt.close('all')
class Tank:
def __init__(self, setting_file, reload = False):
if (reload): #再読込の際はself.settingの値をそのまま使う
pass
else:
self.setting_file = setting_file
self.setting = configparser.ConfigParser()
self.setting.optionxform = str # 大文字小文字を区別するおまじない
self.setting.read(setting_file, encoding='utf8')
setting = self.setting
self.name = setting.get("全体", "名前")
self.is_save_fig = setting.getboolean("全体", "図を保存する?")
self.diameter = setting.getfloat("タンク", "直径[m]")
self.thickness = setting.getfloat("タンク", "タンク厚み[mm]")
self.press = setting.getfloat("タンク", "内圧[MPa]")
self.length = setting.getfloat("タンク", "Fuel平行部長さ[m]")
self.aspect = setting.getfloat("タンク", "タンク鏡板縦横比")
self.fueldensity = setting.getfloat("タンク", "Fuel密度[kg/m3]") #追加
self.loxdensity = setting.getfloat("タンク", "LOx密度[kg/m3]") #追加
self.obyf = setting.getfloat("タンク", "質量酸燃比O/F") #追加
self.propflowrate = setting.getfloat("タンク", "推進剤質量流量 [kg/s]") #追加
self.hepressure = setting.getfloat("タンク", "Heガスボンベ圧力[MPa]") #追加
self.material_name = setting.get("材料", "材料名")
self.rupture = setting.getfloat("材料", "引張破断応力[MPa]")
self.proof = setting.getfloat("材料", "耐力[MPa]")
self.safety_ratio = setting.getfloat("材料", "安全率")
self.density = setting.getfloat("材料", "密度[kg/m3]")
self.poisson_ratio = setting.getfloat("材料","ポアソン比") #追加
self.youngmodulus = setting.getfloat("材料","ヤング率E[GPa]") #追加
self.welding_eff = setting.getfloat("材料", "溶接継手効率[%]")
self.moment_bend = setting.getfloat("外力", "曲げモーメント[N・m]")
self.radius = self.diameter / 2
# 重量の計算
self.volume_hemisphere = 4 / 3 * np.pi * (self.radius**3 * self.aspect - (self.radius - self.thickness/1000)**2 * (self.radius * self.aspect - self.thickness/1000))
#self.volume_hemisphere = self.thickness * 2 * np.pi * (self.radius**2 + (self.radius*self.aspect)**2 * math.atan(self.aspect) / self.aspect #追加楕円体の計算方法別式
self.volume_straight = np.pi * (self.radius**2 - (self.radius - self.thickness/1000)**2) * self.length
self.weight = self.density * (self.volume_hemisphere + self.volume_straight)
# (追加) Fuel容量の計算
self.content_volume_head = 4 / 3 * np.pi * ((self.radius - self.thickness/1000)**3 * self.aspect)
self.content_volume_body = np.pi * (self.radius - self.thickness/1000)**2 * self.length
self.content_volume = self.content_volume_head + self.content_volume_body
self.content_weight = self.content_volume * self.fueldensity
self.hefuel_volume = self.content_volume*self.press/self.hepressure
# (追加) LOx容量の計算 (肉厚、径はNPタンクと同一と仮定)
self.content_loxweight = self.content_weight * self.obyf
self.content_loxvolume = self.content_loxweight / self.loxdensity
self.content_volume_loxbody = self.content_loxvolume - self.content_volume_head
self.loxlength = self.content_volume_loxbody / (np.pi * (self.radius - self.thickness/1000)**2)
self.helox_volume = self.content_loxvolume*self.press/self.hepressure
# (追加) LOxタンク重量の計算 (肉厚、径はNPタンクと同一と仮定)
self.loxvolume_straight = np.pi * (self.radius**2 - (self.radius - self.thickness/1000)**2) * self.loxlength
self.loxweight = self.density * (self.volume_hemisphere + self.loxvolume_straight)
self.content_totalweight = self.content_weight + self.content_loxweight
self.totalweight = self.weight + self.loxweight
# 内圧の計算
self.stress_theta = self.press * self.radius / (self.thickness / 1000) # [MPa]
self.stress_longi = 0.5 * self.stress_theta
s1 = self.stress_theta
s2 = self.stress_longi
self.stress_Mises = np.sqrt(0.5 * (s1**2 + s2**2 + (s1 - s2)**2))
# (update)曲げモーメントの計算
d1 = self.diameter
d2 = self.diameter - (self.thickness / 1000) * 2
self.I = np.pi / 64 * (d1**4 - d2**4)
self.stress_bend = self.moment_bend / self.I * 1e-6
s2 = self.stress_longi + self.stress_bend
self.stress_total_p = np.sqrt(0.5 * (s1**2 + s2**2 + (s1 - s2)**2))
self.stress_total_m = np.sqrt(0.5 * (s1**2 + s2**2 + (s1 + s2)**2))
#座屈評定(Bruhn式)
#Bruhn Fig8.9近似曲線 Kc = aZ^2+bZ+c
# Z>100, 100<r/t<500のみ有効
self.BruhnCa = float(5.6224767 * 10**-8)
self.BruhnCb = float(0.2028736)
self.BruhnCc = float(-2.7833319)
self.BruhnEtha = float(0.9) #Fcr>20MPaにおいて0.9付近に飽和
self.BruhnZ = self.length**2 / (self.radius * self.thickness / 10**3) * np.sqrt(1 - self.poisson_ratio**2)
self.BruhnKc = self.BruhnCa * self.BruhnZ**2 + self.BruhnCb * self.BruhnZ + self.BruhnCc
self.Fcr = self.BruhnEtha * np.pi**2 * self.youngmodulus * 10**3 * self.BruhnKc / 12 / (1 - self.poisson_ratio**2) * (self.thickness / 10**3 / self.length)**2
self.tankarea = np.pi * (self.diameter**2 - (self.diameter - self.thickness / 10**3)**2) / 4
self.bucklingforce = self.Fcr * self.tankarea * 10**3
#Bruhn Fig8.8a近似曲線
self.Fcrratio = self.Fcr / self.youngmodulus / 10**3
#タンク内圧計算結果
def display(self):
#NPタンク諸元
print("タンク鏡重量 :\t\t%.1f [kg]" %(self.volume_hemisphere * self.density)) #add
print("Fuelタンク重量 :\t\t%.1f [kg]" %(self.weight))
print("Fuelタンク内圧 :\t\t%.1f [MPa]" % (self.press))
print("Fuelタンク直径 :\t\t%d [mm]" % (self.diameter * 1000))
print("Fuelタンク肉厚 :\t\t%.1f [mm]" % (self.thickness))
print("Fuelタンク平行部長さ :\t%.1f [m]" % (self.length))
print("Fuelタンク鏡部容積 :\t%.1f [m3]" % (self.content_volume_head)) #add
print("Fuelタンク平行部容積 :\t%.1f [m3]" % (self.content_volume_body)) #add
print("Fuelタンク総容積: \t\t%.3f [m3]" % (self.content_volume))
print("Fuel重量: \t\t%.1f [kg]" % (self.content_weight))
print("Fuel用He必要体積: \t\t%.3f [m3]" % (self.hefuel_volume))
print()
#LOxタンク諸元(自動計算)
print("LOxタンク重量 :\t\t%.1f [kg]" %(self.loxweight))
print("LOxタンク平行部長さ :\t%.1f [m]" % (self.loxlength))
print("LOxタンク鏡部容積 :\t%.1f [m3]" % (self.content_volume_head)) #add
print("LOxタンク平行部容積 :\t%.1f [m3]" % (self.content_volume_loxbody)) #add
print("LOxタンク総容積: \t\t%.3f [m3]" % (self.content_loxvolume))
print("LOx重量: \t\t%.1f [kg]" % (self.content_loxweight))
print("LOx用He必要体積: \t\t%.3f [m3]" % (self.helox_volume))
print()
print("タンク総重量: \t\t%.1f [kg]" % (self.totalweight))
print("推進剤総重量: \t\t%.1f [kg]" % (self.content_totalweight))
print("燃焼時間: \t\t%.2f [s]" % (self.content_totalweight / self.propflowrate))
print()
#応力
print("内圧 半径方向応力 :\t%.1f [MPa]" % (self.stress_theta))
print("内圧 長手方向応力 :\t%.1f [MPa]" % (self.stress_longi))
print("内圧 ミーゼス応力 :\t%.1f [MPa]" % (self.stress_Mises))
print()
print("断面二次モーメント :\t%.3f " % (self.I))
print("曲げモーメント応力 :\t%.4f [MPa]" % (self.stress_bend))
print("合計 ミーゼス応力圧縮 :\t%.1f [MPa]" % (self.stress_total_p))
print("合計 ミーゼス応力引張 :\t%.1f [MPa]" % (self.stress_total_m))
#タンク座屈計算結果
print("係数Z :\t\t\t%.2f " % (self.BruhnZ))
print("係数Kc :\t\t%.2f " % (self.BruhnKc))
print("座屈応力Fcr(90%%確度) :\t%.2f [MPa]" % (self.Fcr))
print("座屈限界軸力(90%%確度) :\t%.2f [kN]" % (self.bucklingforce))
print()
print("99%%確度座屈評価 Fig.8.8aにて評価をしてください。合格条件:Fcr/E計算値>Fcr/Eグラフ読み取り値")
print("(評価用)r/t :\t\t%.0f " % (self.radius / self.thickness * 10**3))
print("(評価用)L/r :\t\t%.1f " % (self.length / self.radius))
print("計算値Fcr/E :\t\t%.6f " % (self.Fcrratio))
def print(self):
with open("tankout_S1.out","w") as output:
print("タンク鏡重量:\t%.1f [kg]" %(self.volume_hemisphere * self.density),file=output) #add
print("Fuelタンク重量:\t%.1f [kg]" %(self.weight),file=output)
print("Fuelタンク内圧:\t%.1f [MPa]" % (self.press),file=output)
print("Fuelタンク直径:\t%d [mm]" % (self.diameter * 1000),file=output)
print("Fuelタンク肉厚:\t%.1f [mm]" % (self.thickness),file=output)
print("Fuelタンク平行部長さ:\t%.2f [m]" % (self.length),file=output)
print("Fuelタンク鏡部容積:\t%.2f [m3]" % (self.content_volume_head),file=output) #add
print("Fuelタンク平行部容積:\t%.2f [m3]" % (self.content_volume_body),file=output) #add
print("Fuelタンク平行部重量:\t%.1f [kg]" % (self.volume_straight * self.density),file=output) #add
print("Fuelタンク容積: \t%.3f [m3]" % (self.content_volume),file=output)
print("Fuel重量: \t%.1f [kg]" % (self.content_weight),file=output)
print("Fuel用He必要体積: \t\t%.3f [m3]" % (self.hefuel_volume),file=output)
print()
#LOxタンク諸元(自動計算)
print("LOxタンク重量:\t%.1f [kg]" %(self.loxweight),file=output)
print("LOxタンク平行部長さ:\t%.2f [m]" % (self.loxlength),file=output)
print("LOxタンク鏡部容積:\t%.2f [m3]" % (self.content_volume_head),file=output) #add
print("LOxタンク平行部容積:\t%.2f [m3]" % (self.content_volume_loxbody),file=output) #add
print("LOxタンク平行部重量:\t%.1f [kg]" % (self.loxvolume_straight * self.density),file=output) #add
print("LOxタンク容積: \t%.3f [m3]" % (self.content_loxvolume),file=output)
print("LOx重量: \t%.1f [kg]" % (self.content_loxweight),file=output)
print("LOx用He必要体積: \t\t%.3f [m3]" % (self.hefuel_volume),file=output)
print()
print("タンク総重量: \t%.1f [kg]" % (self.totalweight),file=output)
print("推進剤総重量: \t%.1f [kg]" % (self.content_totalweight),file=output)
print("燃焼時間: \t%.2f [s]" % (self.content_totalweight / self.propflowrate),file=output)
print()
#応力
print("内圧半径方向応力:\t%.1f [MPa]" % (self.stress_theta),file=output)
print("内圧長手方向応力:\t%.1f [MPa]" % (self.stress_longi),file=output)
print("内圧ミーゼス応力:\t%.1f [MPa]" % (self.stress_Mises),file=output)
print()
print("断面二次モーメント:\t%.3f [m4] " % (self.I),file=output)
print("曲げモーメント応力:\t%.4f [MPa]" % (self.stress_bend),file=output)
print("合計ミーゼス応力圧縮:\t%.1f [MPa]" % (self.stress_total_p),file=output)
print("合計ミーゼス応力引張:\t%.1f [MPa]" % (self.stress_total_m),file=output)
#タンク座屈計算結果
print("係数Z:\t%.2f [ND]" % (self.BruhnZ),file=output)
print("係数Kc:\t%.2f [ND]" % (self.BruhnKc),file=output)
print("座屈応力Fcr(90%%確度):\t%.2f [MPa]" % (self.Fcr),file=output)
print("座屈限界軸力(90%%確度):\t%.2f [kN]" % (self.bucklingforce),file=output)
print("(評価用)r/t:\t%.0f [ND]" % (self.radius / self.thickness * 10**3),file=output)
print("(評価用)L/r:\t%.1f [ND]" % (self.length / self.radius),file=output)
print("計算値Fcr/E:\t%.6f [ND]" % (self.Fcrratio),file=output)
def change_setting_value(self, section, key, value):
"""設定ファイルの中身を変更してファイルを保存しなおす
Args:
sectiojn (str) : 設定ファイルの[]で囲まれたセクション
key (str) : 設定ファイルのkey
value (str or float) : 書き換える値(中でstringに変換)
"""
self.setting.set(section, key, str(value))
self.__init__(self.setting_file, reload=True)
if __name__ == '__main__':
if len(sys.argv) == 1:
setting_file = 'setting_S1.ini'
else:
setting_file = sys.argv[1]
assert os.path.exists(setting_file), "ファイルが存在しません"
plt.close("all")
plt.ion()
t = Tank(setting_file)
t.display()
t.print()
# t.plot()
def calc_moment(vel, rho, diameter, length, CN):
"""機体にかかる曲げモーメントを概算
Args:
vel (float) : 速度 [m/s]
rho (float) : 空気密度 [kg/m3]
diameter (float) : 直径 [m]
length (float) : 機体長さ [m]
CN (float) : 法戦力係数
Return:
法戦力、
法戦力が全部先端にかかったとしたと仮定した最大の曲げモーメント、
真ん中重心の際の曲げモーメント
"""
A = diameter **2 * np.pi / 4
N = 0.5 * rho * vel**2 * A * CN
moment_max = length * N
return N, moment_max, moment_max/2
""" 速度 420m/s, 空気密度0.4kg/m3, 機体長さ14m, 法戦力係数0.4 """
N, moment_max, moment_nominal = calc_moment(420, 0.4, t.diameter, 14, 0.4)
moment_a = np.linspace(0, moment_max * 1.1)
press_l = [0.3, 0.4, 0.5, 0.6, 0.7]
eff = t.welding_eff / 100
""" グラフを書く (使わない) """
"""
plt.figure(0)
plt.figure(1)
for p in press_l:
t.change_setting_value("タンク", "内圧[MPa]", p)
temp0 = []
temp1 = []
temp2 = []
temp3 = []
for i in moment_a:
t.change_setting_value("外力", "曲げモーメント[N・m]", i)
temp0.append(t.stress_total_p)
temp3.append(t.stress_total_m)
temp1.append(t.stress_bend)
temp2.append(t.stress_theta)
plt.figure(0)
plt.plot(moment_a/1e3, temp0, label="内圧 %.1f [MPa]" % (p))
plt.figure(1)
plt.plot(moment_a/1e3, temp3, label="内圧 %.1f [MPa]" % (p))
plt.figure(3)
plt.plot(moment_a/1e3, temp2, label="内圧 %.1f [MPa]" % (p))
plt.figure(0)
# plt.axhline(y=t.rupture / eff, label="%s 破断応力" % (t.material_name), color="C6")
# plt.axhline(y=t.proof / eff, label="%s 0.2%%耐力" % (t.material_name), color="C7")
# plt.axhline(y=t.rupture / t.safety_ratio / eff, label="安全率= %.2f 破断応力" % (t.safety_ratio), color="C8")
plt.axhline(y=t.proof / t.safety_ratio / eff, label="%s, 安全率= %.2f 耐力" % (t.material_name, t.safety_ratio), color="C6")
plt.axvline(x=moment_max / 1e3, label="飛行時最大曲げM(高見積り)", color="C7")
plt.axvline(x=moment_nominal / 1e3, label="飛行時最大曲げM(低見積り)", color="C8")
plt.grid()
plt.xlabel("曲げモーメント [kN・m]")
plt.ylabel("引張側 最大ミーゼス応力")
plt.title(t.name + " タンク応力, 肉厚 = %.1f[mm], 直径 = %.1f[m], 溶接効率 = %d[%%]" % (t.thickness, t.diameter, t.welding_eff))
plt.legend()
if(t.is_save_fig):plt.savefig("stress_tank_" + t.name + "_1.png")
plt.figure(1)
# plt.axhline(y=t.rupture / eff, label="%s 破断応力" % (t.material_name), color="C6")
# plt.axhline(y=t.proof / eff, label="%s 0.2%%耐力" % (t.material_name), color="C7")
# plt.axhline(y=t.rupture / t.safety_ratio / eff, label="安全率= %.2f 破断応力" % (t.safety_ratio), color="C8")
plt.axhline(y=t.proof / t.safety_ratio / eff, label="%s, 安全率= %.2f 耐力" % (t.material_name, t.safety_ratio), color="C6")
plt.axvline(x=moment_max / 1e3, label="飛行時最大曲げM(高見積り)", color="C7")
plt.axvline(x=moment_nominal / 1e3, label="飛行時最大曲げM(低見積り)", color="C8")
plt.grid()
plt.xlabel("曲げモーメント [kN・m]")
plt.ylabel("圧縮側 最大ミーゼス応力")
plt.title(t.name + " タンク応力, 肉厚 = %.1f[mm], 直径 = %.1f[m], 溶接効率 = %d[%%]" % (t.thickness, t.diameter, t.welding_eff))
plt.legend()
if(t.is_save_fig):plt.savefig("stress_tank_" + t.name + "_2.png")
plt.figure(2)
plt.plot(moment_a/1e3, temp1, label="法戦力による曲げモーメント")
# plt.axhline(y=t.rupture / eff, label="%s 破断応力" % (t.material_name), color="C6")
# plt.axhline(y=t.proof / eff, label="%s 0.2%%耐力" % (t.material_name), color="C7")
# plt.axhline(y=t.rupture / t.safety_ratio / eff, label="安全率= %.2f 破断応力" % (t.safety_ratio), color="C8")
plt.axhline(y=t.proof / t.safety_ratio / eff, label="%s, 安全率= %.2f 耐力" % (t.material_name, t.safety_ratio), color="C6")
plt.axvline(x=moment_max / 1e3, label="飛行時最大曲げM(高見積り)", color="C7")
plt.axvline(x=moment_nominal / 1e3, label="飛行時最大曲げM(低見積り)", color="C8")
plt.grid()
plt.xlabel("曲げモーメント [kN・m]")
plt.ylabel("長手方向応力")
plt.title(t.name + " タンク応力, 肉厚 = %.1f[mm], 直径 = %.1f[m], 溶接効率 = %d[%%]" % (t.thickness, t.diameter, t.welding_eff))
plt.legend()
if(t.is_save_fig):plt.savefig("stress_tank_" + t.name + "_3.png")
plt.figure(3)
plt.axhline(y=t.proof / t.safety_ratio / eff, label="%s, 安全率= %.2f 耐力" % (t.material_name, t.safety_ratio), color="C6")
plt.axvline(x=moment_max / 1e3, label="飛行時最大曲げM(高見積り)", color="C7")
plt.axvline(x=moment_nominal / 1e3, label="飛行時最大曲げM(低見積り)", color="C8")
plt.grid()
plt.xlabel("曲げモーメント [kN・m]")
plt.ylabel("タンク フープ応力")
plt.title(t.name + " タンク フープ応力, 肉厚 = %.1f[mm], 直径 = %.1f[m], 溶接効率 = %d[%%]" % (t.thickness, t.diameter, t.welding_eff))
plt.legend()
if(t.is_save_fig):plt.savefig("stress_tank_" + t.name + "_4.png")
"""
| [
"matplotlib"
] |
8ede0744b79a475f7f39fd8c4791b306c77f4b50 | Python | u20806389/CBT-700-Class-Group | /doc_func.py | UTF-8 | 3,053 | 2.796875 | 3 | [] | no_license | from __future__ import print_function
import numpy
import matplotlib.pyplot as plt
def G(s):
return 1/(s + 1)
def wI(s):
return (0.125*s + 0.25)/(0.125*s/4 + 1)
def lI(Gp, G):
return numpy.abs((Gp - G) / G)
def satisfy(wI, G, Gp, params, s):
distance = numpy.zeros((len(params), len(s)))
distance_min = numpy.zeros(len(params))
for i in range(len(params)):
for j in range(len(s)):
distance[i, j] = numpy.abs(wI(s[j])) - lI(Gp(G, params[i], s[j]), G(s[j]))
distance_min[i] = numpy.min(distance[i, :])
param_range = params[distance_min > 0]
return param_range
def plot_range(G, Gprime, wI, w):
s = 1j*w
for part, params, G_func, min_max, label in Gprime:
param_range = satisfy(wI, G, G_func, params, s)
param_max = numpy.max(param_range)
param_min = numpy.min(param_range)
plt.figure()
plt.loglog(w, numpy.abs(wI(s)), label='$w_{I}$')
plt.loglog(w, lI(G_func(G,param_max, s), G(s)), label=label)
if min_max:
print(part + ' ' + str(param_min) + ' to ' + str(param_max))
plt.loglog(w, lI(G_func(G, param_min, s), G(s)), label=label)
else:
print(part + ' ' + str(param_max))
plt.xlabel('Frequency [rad/s]')
plt.ylabel('Magnitude')
plt.legend(loc='best')
plt.show()
def Gp_a(G, theta, s):
return G(s) * numpy.exp(-theta * s)
def Gp_b(G, tau, s):
return G(s)/(tau*s + 1)
def Gp_c(G, a, s):
return 1/(s + a)
def Gp_d(G, T, s):
return 1/(T*s + 1)
def Gp_e(G, zeta, s):
return G(s)/((s/70)**2 + 2*zeta*(s/10) + 1)
def Gp_f(G, m, s):
return G(s)*(1/(0.01*s + 1))**m
def Gp_g(G, tauz, s):
return G(s)*(-tauz*s + 1)/(tauz*s + 1)
w_start = w_end = points = None
def frequency_plot_setup(axlim, w_start=None, w_end=None, points=None):
if axlim is None:
axlim = [None, None, None, None]
plt.gcf().set_facecolor('white')
if w_start:
w = numpy.logspace(w_start, w_end, points)
s = w*1j
return s, w, axlim
return axlim
def setup_plot(legend_list, w1=False, w2=False, G=False, K=False, wr=False):
if w1 and w2 and G:
w = numpy.logspace(w1,w2,1000)
s = 1j*w
S = 1/(1+G*K)
gain = numpy.abs(S(s))
plt.loglog(wr*numpy.ones(2), [numpy.max(gain), numpy.min(gain)], ls=':')
plt.legend(legend_list, bbox_to_anchor=(0, 1.01, 1, 0), loc=3, ncol=3)
plt.grid()
plt.xlabel('Frequency [rad/s]')
plt.ylabel('Magnitude')
plt.show()
return w, gain
def setup_bode_plot(title_str, w=numpy.logspace(-2, 2, 100), func=False, legend=False, plot=plt.loglog, grid=True):
plt.figure(1)
plt.title(title_str)
plt.xlabel('Frequency [rad/s]', fontsize=14)
plt.ylabel('Magnitude', fontsize=15)
if func:
for f, lstyle in func:
plot(w, f, lstyle)
if grid:
plt.grid(b=None, which='both', axis='both')
if legend:
plt.legend(legend, loc='best')
plt.show()
| [
"matplotlib"
] |
03b9828f83de059c9aac7e6e0447590b45caeeff | Python | GRSEB9S/eecs-531 | /assign/A2/A2-demo/canvas/src/ex3_convolution_theorem.py | UTF-8 | 1,166 | 3.046875 | 3 | [] | no_license | get_ipython().magic('matplotlib inline')
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
from scipy import signal
# load the image as grayscale
f=mpimg.imread("../data/lena.png")
# "Gaussian" kernel defined in Lecture 3b. Page3
g = 1.0/256 * np.array([[1, 4, 6, 4, 1],
[2, 8, 12, 8, 2],
[6, 24, 36, 24, 6],
[2, 8, 12, 8, 2],
[1, 4, 6, 4, 1]]) ;
# show image
plt.subplot(1, 2, 1)
plt.imshow(f, cmap='gray')
plt.axis('off')
plt.subplot(1, 2, 2)
plt.imshow(g, cmap='gray')
h1 = signal.convolve2d(f, g, mode='full')
H1 = np.fft.fft2(h1)
# padding zeros in the end of f and g
padf = np.pad(f, ((0, 4), (0,4)), 'constant');
padg = np.pad(g, ((0, 255), (0, 255)), 'constant');
# compute the Fourier transforms of f and g
F = np.fft.fft2(padf)
G = np.fft.fft2(padg)
# compute the product
H2 = np.multiply(F, G)
# inverse Fourier transform
h2 = np.fft.ifft2(H2);
# In[91]:
mse1=(np.abs(H1-H2) ** 2).mean()
mse2=(np.abs(h1-h2)** 2 ).mean()
print('difference between H1 and H2', mse1)
print('difference between h1 and h2', mse2)
| [
"matplotlib"
] |
a0f97e4155e490d4f98903c87793fc3102a44119 | Python | Peilonrayz/graphtimer | /tests/test_plot.py | UTF-8 | 1,926 | 2.53125 | 3 | [
"MIT"
] | permissive | import pathlib
import helpers.functions as se_code
import matplotlib.pyplot as plt
import numpy as np
import pytest
from graphtimer import Plotter, flat
ALL_TESTS = True
FIGS = pathlib.Path("static/figs")
FIGS.mkdir(parents=True, exist_ok=True)
@pytest.mark.skipif(
(FIGS / "reverse.png").exists() and ALL_TESTS, reason="Output image already exists",
)
def test_reverse_plot():
fig, axs = plt.subplots()
axs.set_yscale("log")
axs.set_xscale("log")
(
Plotter(se_code.Reverse)
.repeat(10, 10, np.logspace(0, 3), args_conv=lambda i: " " * int(i))
.min()
.plot(axs, title="Reverse", fmt="-o")
)
fig.savefig(str(FIGS / "reverse.png"))
fig.savefig(str(FIGS / "reverse.svg"))
@pytest.mark.skipif(
(FIGS / "graipher.png").exists() and ALL_TESTS,
reason="Output image already exists",
)
def test_graipher_plot():
fig, axs = plt.subplots()
(
Plotter(se_code.Graipher)
.repeat(2, 1, [i / 10 for i in range(10)])
.min()
.plot(axs, title="Graipher", fmt="-o")
)
fig.savefig(str(FIGS / "graipher.png"))
fig.savefig(str(FIGS / "graipher.svg"))
@pytest.mark.skipif(
(FIGS / "peilonrayz.png").exists() and ALL_TESTS,
reason="Output image already exists",
)
def test_peilonrayz_plot():
fig, axs = plt.subplots(nrows=2, ncols=2, sharex=True, sharey=True)
p = Plotter(se_code.Peilonrayz)
axis = [
("Range", {"args_conv": range}),
("List", {"args_conv": lambda i: list(range(i))}),
("Unoptimised", {"args_conv": se_code.UnoptimisedRange}),
]
for graph, (title, kwargs) in zip(iter(flat(axs)), axis):
(
p.repeat(100, 5, list(range(0, 10001, 1000)), **kwargs)
.min(errors=((-1, 3), (-1, 4)))
.plot(graph, title=title)
)
fig.savefig(str(FIGS / "peilonrayz.png"))
fig.savefig(str(FIGS / "peilonrayz.svg"))
| [
"matplotlib"
] |
ee7fa08376bc23c99eeeea52ec3c1a55984ef384 | Python | usman9114/Self-Driving-Toy-Car | /plot_conf.py | UTF-8 | 1,026 | 3.109375 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
import itertools
def plot_confusion_matrix(cm, classes, normalize =False,
title ='Confusion matrix',
cmap=plt.cm.Blues):
plt.imshow(cm,interpolation='nearest',cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks,classes)
if normalize:
cm = cm.astype('float')/cm.sum(axis=1)[:, np.newaixis]
print("Normalized Confusion matrix")
else:
print("Confusion matrixs, without normalization")
print(cm)
thresh = cm.max()/2.
for i,j in itertools.product(range(cm.shape[0]),range(cm.shape[1])):
plt.text(j,i,cm[i,j],
horizontalalignment="center",
color = "white" if cm[i,j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
| [
"matplotlib"
] |
20571e045b0ae0ce542e8e11cd035f45e9a24336 | Python | nexusme/machine_learning_score_card | /machine_learning_nex/eg1/Fourth_Step_Box_Plot.py | UTF-8 | 2,228 | 2.953125 | 3 | [] | no_license | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# 对monthlyIncome 进行箱形图分析
from matplotlib.backends.backend_pdf import PdfPages
CSV_FILE_PATH = "cs-data-fill-select-age.csv"
data = pd.read_csv(CSV_FILE_PATH, index_col=0)
plt.rcParams['font.sans-serif'] = ['SimHei'] # 指定字体为黑体
plt.rcParams['axes.unicode_minus'] = False # 显示负号
#
#
def draw_box_plot(name):
cols = data.columns[0:]
plt.figure()
plt.boxplot(data[name], sym='r*')
plt.grid(True)
plt.ylim(1000000, 200000)
plt.show()
# pp = PdfPages("box_plot.pdf")
# cols = data.columns[0:]
# for column in cols:
# plt.figure()
# plt.boxplot(data[column], sym='r*')
# # plt.ylim(0, 100000)
# plt.grid(True)
# plt.title(column)
# pp.savefig()
# pp.close()
def box_plot_analysis(dt):
# 上四分位数
q3 = dt.quantile(q=0.75)
# 下四分位数
q1 = dt.quantile(q=0.25)
# 四分位间距
iqr = q3 - q1
# 上限
up = q3 + 1.5 * iqr
print("上限:", end='')
print(up)
# 下限
down = q1 - 1.5 * iqr
print("下限:", end='')
print(down)
bool_result = (dt < up) & (dt > down)
return bool_result
def three_sigma(dt):
# 上限
up = dt.mean() + 3 * dt.std()
# 下线
low = dt.mean() - 3 * dt.std()
# 在上限与下限之间的数据是正常的
bool_result = (dt < up) & (dt > low)
return bool_result
# 异常值处理
def df_filter():
df = pd.read_csv("cs-data-fill-select-age.csv", index_col=0)
print(len(df))
df_filtered = df[(df["age"] < 100) & (df["MonthlyIncome"] < 1000000)
& (df["RevolvingUtilizationOfUnsecuredLines"] < 16000) & (df["DebtRatio"] < 51000) &
(df["NumberOfTime30-59DaysPastDueNotWorse"] < 19)
& (df["NumberOfOpenCreditLinesAndLoans"] < 60) & (df["NumberOfTimes90DaysLate"] < 20) &
(df["NumberRealEstateLoansOrLines"] < 30) &
(df["NumberOfTime60-89DaysPastDueNotWorse"] < 17) &
(df["NumberOfDependents"] < 10.5)]
print(len(df_filtered))
df_filtered.to_csv('cs-data-delete-after-boxplot.csv')
df_filter()
| [
"matplotlib"
] |
58de90c74a9d8c9213cb83cdc6a3db490972caa0 | Python | sungmin-net/DeepLearning_Textbook | /235p_mnistCnn.py | UTF-8 | 2,832 | 2.828125 | 3 | [] | no_license | # 잘 돌아가는 데 (GPU가 없어서 굉장히) 오래 걸림
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.callbacks import ModelCheckpoint, EarlyStopping
import matplotlib.pyplot as plt
import numpy
import os
import tensorflow as tf
# seed 값 설정
seed = 0
numpy.random.seed(seed)
tf.random.set_seed(3)
# 데이터 불러오기
(x_train, y_train), (x_test, y_test) = mnist.load_data()
# 차원변경(2>1), 형변환, 정규화
x_train = x_train.reshape(x_train.shape[0], 28, 28, 1).astype('float32') / 255
x_test = x_test.reshape(x_test.shape[0], 28, 28, 1).astype('float32') / 255
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
# 컨볼루션 신경망 설정
model = Sequential()
# 3x3 의 커널로 슬라이딩 윈도우를 적용, input_shape 는 (행, 열, 색상 - 흑백은 1, 색상은 3)
model.add(Conv2D(32, kernel_size = (3, 3), input_shape = (28, 28, 1), activation = 'relu'))
model.add(Conv2D(64, (3, 3), activation='relu'))
# 2x2 의 커널마다 가장 큰 값을 추출하여 적용. pool_size 가 2면 크기가 절반으로 줄어듦
model.add(MaxPooling2D(pool_size = 2))
# 노드의 25% 를 끔
model.add(Dropout(0.25))
# Dense 는 1차원 배열로 바꿔주어야 활성화 함수를 사용할 수 있으므로, 차원 변경
model.add(Flatten())
model.add(Dense(128, activation = 'relu'))
model.add(Dropout(0.5))
# 10개의 출력을 위해 softmax 사용
model.add(Dense(10, activation = 'softmax'))
model.compile(loss = 'categorical_crossentropy', optimizer = 'adam', metrics = ['accuracy'])
# 모델 최적화 설정
modelDir = "./235p_mnistCnn_models/"
if not os.path.exists(modelDir) :
os.mkdir(modelDir)
modelPath = modelDir + "{epoch:02d}-{val_loss:.4f}.hdf5"
checkpointer = ModelCheckpoint(filepath = modelPath, monitor = 'val_loss', verbose = 1,
save_best_only = True)
earlyStopper = EarlyStopping(monitor = 'val_loss', patience = 10)
# 모델의 실행
history = model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs = 30,
batch_size = 200, verbose = 0, callbacks = [earlyStopper, checkpointer])
# 테스트 정확도 출력
print("\n Test Accuracy: %.4f" % (model.evaluate(x_test, y_test)[1]))
# 테스트셋의 오차
y_vloss = history.history['val_loss']
# 학습셋의 오차
y_loss = history.history['loss']
# 그래프로 표현
x_len = numpy.arange(len(y_loss))
plt.plot(x_len, y_vloss, marker = ".", c = "red", label = "Testset_loss")
plt.plot(x_len, y_loss, marker = '.', c = 'blue', label = 'Trainset_loss')
# 그래프에 그리드를 주고 레이블을 표시
plt.legend(loc = 'upper right')
plt.grid()
plt.xlabel('epoch')
plt.ylabel('loss')
plt.show() | [
"matplotlib"
] |
76f661ea0fd9c1d889ec230e3e8d389dc16cade3 | Python | jainrahulh/thesis | /FactsModelCreater.py | UTF-8 | 5,326 | 2.8125 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Machine Learning Model Training and Dumping the Model as a joblib file.
"""
#import requests
#response = requests.get('https://www.politifact.com/factchecks/list/?page=2&category=coronavirus')
#response.text
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
df = pd.read_csv('APIData2000-FB.csv')
df.drop('Unnamed: 0', axis=1, inplace=True)
df.head()
df.shape
df.info()
df.isnull().sum(axis = 1)
df.Ruling_Slug.unique()
df = df[df['Ruling_Slug']!= 'no-flip']
df = df[df['Ruling_Slug']!= 'full-flop']
df = df[df['Ruling_Slug']!= 'half-flip']
df = df[df['Ruling_Slug']!= 'barely-true']
df.Ruling_Slug.unique()
df.loc[df['Ruling_Slug'] == 'half-true', 'Ruling_Slug'] = 'true'
df.loc[df['Ruling_Slug'] == 'mostly-true', 'Ruling_Slug'] = 'true'
df.loc[df['Ruling_Slug'] == 'mostly-false', 'Ruling_Slug'] = 'false'
df.loc[df['Ruling_Slug'] == 'pants-fire', 'Ruling_Slug'] = 'false'
#df.loc[df['Ruling_Slug'] == 'barely-true', 'Ruling_Slug'] = 'false'
df.Ruling_Slug.unique()
labels = df.Ruling_Slug
labels.unique()
df.shape
print(labels.value_counts(), '\n')
"""
Required in MacOS due to security issues while downloading nltk package.
"""
import nltk
import ssl
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
pass
else:
ssl._create_default_https_context = _create_unverified_https_context
nltk.download('stopwords')
from nltk.corpus import stopwords
import matplotlib.pyplot as plt
plt.style.use('ggplot')
from wordcloud import WordCloud
stopwords = nltk.corpus.stopwords.words('english')
extendStopWords = ['Say', 'Says']
stopwords.extend(extendStopWords)
true_word_tokens = pd.Series(
df[df['Ruling_Slug'] == 'true'].Statement.tolist()).str.cat(sep=' ')
wordcloud = WordCloud(max_font_size=200, stopwords=stopwords, random_state=None, background_color='white').generate(true_word_tokens)
plt.figure(figsize=(12, 10))
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
#plt.show()
false_word_tokens = pd.Series(
df[df['Ruling_Slug'] == 'false'].Statement.tolist()).str.cat(sep=' ')
wordcloud = WordCloud(max_font_size=200, stopwords=stopwords, random_state=None, background_color='black').generate(false_word_tokens)
plt.figure(figsize=(12, 10))
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
#plt.show()
x_train,x_test,y_train,y_test=train_test_split(df['Statement'].values.astype('str'), labels, test_size=0.3, random_state=7)
tfidf_vectorizer=TfidfVectorizer(stop_words=stopwords, max_df=0.7)
tfidf_train=tfidf_vectorizer.fit_transform(x_train)
tfidf_test=tfidf_vectorizer.transform(x_test)
pa_classifier=PassiveAggressiveClassifier(C=0.5,max_iter=150)
pa_classifier.fit(tfidf_train,y_train)
y_pred=pa_classifier.predict(tfidf_test)
score=accuracy_score(y_test,y_pred)
print(f'Accuracy: {round(score*100,2)}%')
confusion_matrix(y_test,y_pred, labels=['true','false'])
from sklearn.metrics import classification_report
print(f"PA Classification Report : \n\n{classification_report(y_test, y_pred)}")
from sklearn.metrics import classification_report
scoreMatrix = []
confusionMatrix = []
classificationMatrix = []
j = [0.20,0.30,0.40]
ratio = ["80:20","70:30","60:40"]
for i in range(3):
x_train,x_test,y_train,y_test=train_test_split(df['Statement'].values.astype('str'), labels, test_size=j[i], random_state=7)
tfidf_vectorizer=TfidfVectorizer(stop_words=stopwords, max_df=0.7)
tfidf_train=tfidf_vectorizer.fit_transform(x_train)
tfidf_test=tfidf_vectorizer.transform(x_test)
pa_classifier=PassiveAggressiveClassifier(C=0.5,max_iter=150)
pa_classifier.fit(tfidf_train,y_train)
y_pred=pa_classifier.predict(tfidf_test)
scoreMatrix.append(accuracy_score(y_test,y_pred))
print(f'Split Ratio: {ratio[i]}')
#print(f'Accuracy: {round(scoreMatrix[i]*100,2)}%')
confusionMatrix.append(confusion_matrix(y_test,y_pred, labels=['true','false']))
print(f"Classification Report: \n{classification_report(y_test, y_pred)}\n\n")
classificationMatrix.append(classification_report(y_test, y_pred))
scoreMatrix
confusionMatrix
classificationMatrix
y_test.unique()
labels = ['true', 'false']
cm = confusion_matrix(y_test,y_pred, labels=labels)
print(cm)
import seaborn as sns
import matplotlib.pyplot as plt
ax= plt.subplot()
sns.heatmap(cm, annot=True, ax = ax); #annot=True to annotate cells
# labels, title and ticks
ax.set_xlabel('Predicted labels');ax.set_ylabel('True labels');
ax.set_title('Confusion Matrix');
ax.xaxis.set_ticklabels(['true', 'false']); ax.yaxis.set_ticklabels(['false', 'true']);
from sklearn.pipeline import Pipeline
pipeline = Pipeline(steps= [('tfidf', TfidfVectorizer(stop_words=stopwords, max_df=0.7)),
('model', PassiveAggressiveClassifier())])
pipeline.fit(x_train, y_train)
pipeline.predict(x_train)
text = ["higher R in the North West and South West is an important part of moving towards a more localised approach to lockdown"]
pipeline.predict(text)
from joblib import dump
dump(pipeline, filename="news_classifier.joblib")
| [
"matplotlib",
"seaborn"
] |
b58b80eb90c74b2b5b27e612465de47914b40657 | Python | KushagraPareek/visMiniProject2 | /hello.py | UTF-8 | 9,946 | 2.5625 | 3 | [] | no_license | import pandas as pd
import numpy as np
import sys
import json
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from flask import Flask,render_template,request
import matplotlib.pyplot as plt
from yellowbrick.cluster import KElbowVisualizer
from sklearn import manifold
from sklearn.metrics import pairwise_distances
from pandas.plotting import scatter_matrix
app = Flask(__name__)
sample_done = False
@app.route('/')
def hello_world():
clean_data()
return render_template("index.html")
def randomSample():
return data.sample(frac=0.25)
#Use of Kelbow visualizer
def elbowCheck():
mat = data.values
mat = mat.astype(float)
model = KMeans()
visualizer = KElbowVisualizer(model, k=(1,12))
visualizer.fit(mat)
visualizer.show(outpath="static/images/kmeans.png")
def stratifiedSample():
#From elbow check the cluster size is best when K=4
global frame
global sample_done
global colors
if not sample_done:
nCluster = 4
mat = data.values
mat = mat.astype(float)
kmeans = KMeans(n_clusters=nCluster)
kmeans.fit(mat)
cInfo = kmeans.labels_
#save clusters and recreate dataframe
cluster = [[],[],[],[]]
for i in range(0,len(data.index)):
cluster[cInfo[i]].append(data.loc[i]);
temp = []
colors = []
for i in range(0,nCluster):
tempFrame = pd.DataFrame(cluster[i]).sample(frac=0.25)
dfSize = tempFrame.shape[0]
for j in range(0,dfSize):
colors.append(i)
temp.append(tempFrame)
frame = pd.concat(temp)
sample_done = True
return frame
def pcaAnalysis(sample):
global sq_load
global intD
mat = sample.values
mat = mat.astype(float)
mat = StandardScaler().fit_transform(mat)
nComponents = 18
pca = PCA(n_components=nComponents)
pca.fit_transform(mat)
count = 0
cumsu = 0
for eigV in pca.explained_variance_ratio_:
cumsu += eigV
if cumsu > 0.78:
break
count += 1
intD = count
#get the loading matrix
loadings = pca.components_.T * np.sqrt(pca.explained_variance_)
sq_loadings = np.square(loadings)
sq_load = np.sum(sq_loadings[:,0:3],axis=1)
topPcaLoad()
return pca.explained_variance_ratio_
#get topPca loadings
def topPcaLoad():
global list_top
list_top = []
list_load = list(zip(data.columns,sq_load))
list_load.sort(key=lambda x:x[1],reverse=True)
topThree = list_load[0:3]
for tup in topThree:
list_top.append(tup[0])
print(list_top)
#helper post functions
@app.route('/getPcaData', methods=["GET","POST"])
def getPcaData():
if request.method == "POST":
columns = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,17,18]
pcaV = list(zip(columns, pcaAnalysis(data)));
return json.dumps({'graph_data': pcaV})
@app.route('/getPcaRandom', methods=["GET","POST"])
def getPcaRandom():
if request.method == "POST":
columns = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,17,18]
pcaV = list(zip(columns, pcaAnalysis(randomSample())));
return json.dumps({'graph_data': pcaV})
@app.route('/getPcaStratified', methods=["GET","POST"])
def getPcaStratified():
if request.method == "POST":
columns = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,17,18]
pcaV = list(zip(columns, pcaAnalysis(stratifiedSample())));
return json.dumps({'graph_data': pcaV})
#pca scatter using two components
def pcaScatter(sample):
mat = sample.values
mat = mat.astype(float)
mat = StandardScaler().fit_transform(mat)
nComponents = 2
pca = PCA(n_components=nComponents)
new_pca = pca.fit_transform(mat)
return new_pca
@app.route('/getScatterData', methods=["GET","POST"])
def getScatterData():
if request.method == "POST":
scatter_data = pcaScatter(data)
pcaS = scatter_data.tolist()
return json.dumps({'graph_data':pcaS})
@app.route('/getScatterRandom', methods=["GET","POST"])
def getScatterRandom():
if request.method == "POST":
scatter_data = pcaScatter(randomSample())
pcaS = scatter_data.tolist()
return json.dumps({'graph_data':pcaS})
@app.route('/getScatterStratified', methods=["GET","POST"])
def getScatterStratified():
if request.method == "POST":
scatter_data = pcaScatter(stratifiedSample())
pcaS = scatter_data.tolist()
return json.dumps({'graph_data':pcaS, 'colors':colors})
#mds Eucledian
def mdsEScatter(sample):
mat = sample.values
mat = mat.astype(float)
mat = StandardScaler().fit_transform(mat)
mds = manifold.MDS(n_components=2, dissimilarity='precomputed')
similarity = pairwise_distances(mat, metric='euclidean')
X = mds.fit_transform(similarity)
return X
@app.route('/getmdsEScatterData', methods=["GET","POST"])
def getmdsEScatterData():
if request.method == "POST":
scatter_data = mdsEScatter(data)
mdS = scatter_data.tolist()
return json.dumps({'graph_data':mdS})
@app.route('/getmdsEScatterRandom', methods=["GET","POST"])
def getmdsEScatterRandom():
if request.method == "POST":
scatter_data = mdsEScatter(randomSample())
mdS = scatter_data.tolist()
return json.dumps({'graph_data':mdS})
@app.route('/getmdsEScatterStratified', methods=["GET","POST"])
def getmdsEScatterStratified():
if request.method == "POST":
scatter_data = mdsEScatter(stratifiedSample())
mdS = scatter_data.tolist()
return json.dumps({'graph_data':mdS, 'colors':colors})
#mds correlation
def mdsCScatter(sample):
mat = sample.values
mat = mat.astype(float)
mat = StandardScaler().fit_transform(mat)
mds = manifold.MDS(n_components=2, dissimilarity='precomputed')
similarity = pairwise_distances(mat, metric='correlation')
X = mds.fit_transform(similarity)
return X
@app.route('/getmdsCScatterData', methods=["GET","POST"])
def getmdsCScatterData():
if request.method == "POST":
scatter_data = mdsCScatter(data)
mdS = scatter_data.tolist()
return json.dumps({'graph_data':mdS})
@app.route('/getmdsCScatterRandom', methods=["GET","POST"])
def getmdsCScatterRandom():
if request.method == "POST":
scatter_data = mdsCScatter(randomSample())
mdS = scatter_data.tolist()
return json.dumps({'graph_data':mdS})
@app.route('/getmdsCScatterStratified', methods=["GET","POST"])
def getmdsCScatterStratified():
if request.method == "POST":
scatter_data = mdsCScatter(stratifiedSample())
mdS = scatter_data.tolist()
return json.dumps({'graph_data':mdS, 'colors':colors})
#top pca load scatter matrix
def scatterMatrix(sample):
mat = sample.values
mat = mat.astype(float)
mat = StandardScaler().fit_transform(mat)
new_frame = pd.DataFrame(mat,columns=data.columns)
final_frame = new_frame[['Value', 'Acceleration', 'Release Clause']]
return final_frame
@app.route('/scatterMatrixData', methods=["GET","POST"])
def scatterMatrixData():
if request.method == "POST":
df_csv = scatterMatrix(data)
cluster_index = [4]*df_csv.shape[0];
df_csv['Cluster'] = pd.Series(cluster_index);
df_csv.to_csv("static/data/out3.csv",index=False)
return "out3.csv"
@app.route('/scatterMatrixRandom', methods=["GET","POST"])
def scatterMatrixRandom():
if request.method == "POST":
df_csv = scatterMatrix(randomSample())
cluster_index = [4]*df_csv.shape[0];
df_csv['Cluster'] = pd.Series(cluster_index);
df_csv.to_csv("static/data/out4.csv",index=False)
return "out4.csv"
@app.route('/scatterMatrixStratified', methods=["GET","POST"])
def scatterMatrixStratified():
if request.method == "POST":
df_csv = scatterMatrix(stratifiedSample())
df_csv['Cluster'] = pd.Series(colors);
df_csv.to_csv("static/data/out5.csv",index=False)
return "out5.csv"
def toFloat(d):
try:
x = float(d)
return x
except ValueError:
return 0
def clean_values(d):
divider = 1.0;
d = str(d)
if d == "nan":
return 0
if "K" in d:
divider = 1000.0
d = d.replace("K","").replace("M","").replace("\u20ac","")
return toFloat(d)/divider
def looper(d):
if d == "left" or d == "right":
return d
return "NA"
def clean_data():
global data
data = pd.read_csv("static/data/clean.csv")
#Dropped
data.drop('ID',axis=1,inplace=True)
data.drop('Work Rate',axis=1,inplace=True)
data.drop('Height',axis=1,inplace=True)
data.drop('Club',axis=1,inplace=True)
data.Weight = data.Weight.str.replace("lbs","")
data.Value = data["Value"].apply(clean_values)
data.Wage = data["Wage"].apply(clean_values)
data["Release Clause"] = data["Release Clause"].apply(clean_values)
#Foot
data["Preferred Foot"] = data["Preferred Foot"].str.lower().apply(looper)
data["Preferred Foot"] = pd.Categorical(data["Preferred Foot"])
data["Preferred Foot"] = data["Preferred Foot"].cat.codes
#Countries
data["Nationality"] = data["Nationality"].str.lower()
data["Nationality"] = pd.Categorical(data["Nationality"])
data["Nationality"] = data["Nationality"].cat.codes
#Body Type
data["Body Type"] = data["Body Type"].str.lower()
data["Body Type"] = pd.Categorical(data["Body Type"])
data["Body Type"] = data["Body Type"].cat.codes
#position
data["Position"] = data["Position"].str.lower()
data["Position"] = pd.Categorical(data["Position"])
data["Position"] = data["Position"].cat.codes
| [
"matplotlib"
] |
45183c6b8d4599c81f6e094a8f7ce002db3cc71c | Python | rupesh20/Cp_codes | /algorithmcodes/.py codes/binpacking.py | UTF-8 | 1,444 | 2.828125 | 3 | [] | no_license | import numpy as np
import random as rd
import matplotlib.pyplot as plt
#first fit
binList = [] #rectangles per bin information
DyList = [] # all bins remaining size
Rwidth = []
Rheight = []
BinHiegth = 11
BinWidth = 6
def addRect(data,list):
binList.append([ ])
L = len(binList)-1
binList[L].append(data)
def listlen(list):
x=len(list)
if x is None:
return 0
return x
def addBin(x,y):
DyList.append([ ])
n = len(DyList)-1
DyList[n].append(x)
DyList[n].append(y)
def canPack(data):
X=data[0]
Y=data[1]
index=0
for z in DyList:
if Y<=z[1]:
return index
elif Y>z[1] && X<=z[0]:
return index
else:
return 0
index+=1
def BFFpack(data, s):
l1=binList[s]
l2=DyList[s]
if l1 is 0 or l2 is 0:
print "error"
else:
l1.append(data)
binList[s]=l1
l2[0]=l2[0]-data[0]
l2[1]=l1[1]-data[1]
DyList[s]=l2
def binFF(data):
m=listlen(data)
if m is 0:
return None
else:
i=0
while i<=m:
d1 = data.pop()
if len(binList) is 0:
addRect(d1,binList)
Rheight = (BinHiegth-d1[0])
Rwidth = (BinWidth-d1[1])
AddBin(Rheight,Rwidth)
else:
s= canPack(d1)
if s is 0:
addRect(d1,binList)
addBin((BinHiegth-d1[0]),BinWidth-d1[1])
else:
BFFpack(d1,s)
i+=1
if __name__ == '__main__':
H=np.random.randint(3,11,8)
W=np.random.randint(2,6,8)
rect = zip(H,W)
rect=sorted(rect)
binFF(rect)
print(binList)
print(DyList) | [
"matplotlib"
] |
445311ccbed884ed8b3e506ed3a5552fab1fc082 | Python | sweetsinpackets/si507_project | /api_call.py | UTF-8 | 2,317 | 2.75 | 3 | [] | no_license | import json
# from class_definition import shooting_record
import class_definition
import pandas as pd
from data_fetch import api_request
# import plotly.plotly as plt
import plotly.graph_objects as plt_go
from plotly.offline import plot
from secrets import mapquest_api_key, mapbox_access_token
# returns None if nothing found, else return (lat, lng) in float
def find_lat_lng(address:str):
base_url = "http://www.mapquestapi.com/geocoding/v1/address"
params = {
"key": mapquest_api_key,
"location": address,
"outFormat": "json"
}
result_json = json.loads(api_request(base_url, params))
try:
latlng = result_json["results"][0]["locations"][0]["latLng"]
lat = float(latlng["lat"])
lng = float(latlng["lng"])
return (lat, lng)
except:
return None
# input a selected dataframe, show a plot
def plot_cases(df)->None:
lat_list = []
lng_list = []
text_list = []
center_lat = 0
center_lng = 0
for _index, row in df.iterrows():
address = class_definition.address_to_search(row)
api_result = find_lat_lng(address)
if api_result == None:
continue
lat, lng = api_result
lat_list.append(lat)
lng_list.append(lng)
text_list.append(str(row["Incident_Date"]))
center_lat += lat
center_lng += lng
# define plotting data
plot_data = [plt_go.Scattermapbox(
lat = lat_list,
lon = lng_list,
mode = "markers",
marker = plt_go.scattermapbox.Marker(
size = 9,
color = "red"
),
text = text_list,
hoverinfo = "text"
)]
center_lat = center_lat / len(lat_list)
center_lng = center_lng / len(lng_list)
layout = plt_go.Layout(
autosize = True,
hovermode = "closest",
mapbox = plt_go.layout.Mapbox(
accesstoken = mapbox_access_token,
bearing = 0,
center = plt_go.layout.mapbox.Center(
lat = center_lat,
lon = center_lng
),
pitch = 0,
zoom = 6
)
)
fig = plt_go.Figure(data = plot_data, layout = layout)
plot(fig, filename = "output_figure.html")
return
| [
"plotly"
] |
d352e114868b969845b83b9a9b7aa6f290282bb7 | Python | rmttugraz/Advanced-Rock-Mechanics-and-Tunnelling | /sess_MachineLearning_summerterm2019/01_plotter.py | UTF-8 | 2,109 | 3.234375 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
# This code loads the data that was generated by '00_data_generator.py' and
# and then creates six scatterplots of the three features.
import matplotlib.pyplot as plt
import pandas as pd
# load into two dataframes
df_gneiss = pd.read_csv('gneiss.csv')
df_marl = pd.read_csv('marl.csv')
# plot data with several scatterplots
fig = plt.figure(figsize=(12, 8))
ax = fig.add_subplot(2, 3, 1)
ax.scatter(df_gneiss['UCS'], df_gneiss['SPZ'], alpha=0.5, color='black')
ax.scatter(df_marl['UCS'], df_marl['SPZ'], alpha=0.5, color='black')
ax.set_xlabel('UCS [MPa]')
ax.set_ylabel('tensile strength [MPa]')
ax.grid(alpha=0.5)
ax = fig.add_subplot(2, 3, 2)
ax.scatter(df_gneiss['DENS'], df_gneiss['UCS'], alpha=0.5, color='black')
ax.scatter(df_marl['DENS'], df_marl['UCS'], alpha=0.5, color='black')
ax.set_xlabel('density [g/cm³]')
ax.set_ylabel('UCS [MPa]')
ax.grid(alpha=0.5)
ax = fig.add_subplot(2, 3, 3)
ax.scatter(df_gneiss['SPZ'], df_gneiss['DENS'], alpha=0.5, color='black')
ax.scatter(df_marl['SPZ'], df_marl['DENS'], alpha=0.5, color='black')
ax.set_xlabel('tensile strength [MPa]')
ax.set_ylabel('density [g/cm³]')
ax.grid(alpha=0.5)
ax = fig.add_subplot(2, 3, 4)
ax.scatter(df_gneiss['UCS'], df_gneiss['SPZ'], alpha=0.5, label='gneiss')
ax.scatter(df_marl['UCS'], df_marl['SPZ'], alpha=0.5, label='marl')
ax.set_xlabel('UCS [MPa]')
ax.set_ylabel('tensile strength [MPa]')
ax.legend()
ax.grid(alpha=0.5)
ax = fig.add_subplot(2, 3, 5)
ax.scatter(df_gneiss['DENS'], df_gneiss['UCS'], alpha=0.5, label='gneiss')
ax.scatter(df_marl['DENS'], df_marl['UCS'], alpha=0.5, label='marl')
ax.set_xlabel('density [g/cm³]')
ax.set_ylabel('UCS [MPa]')
ax.legend()
ax.grid(alpha=0.5)
ax = fig.add_subplot(2, 3, 6)
ax.scatter(df_gneiss['SPZ'], df_gneiss['DENS'], alpha=0.5, label='gneiss')
ax.scatter(df_marl['SPZ'], df_marl['DENS'], alpha=0.5, label='marl')
ax.set_xlabel('tensile strength [MPa]')
ax.set_ylabel('density [g/cm³]')
ax.legend()
ax.grid(alpha=0.5)
plt.tight_layout()
plt.savefig('data.jpg', dpi=600)
| [
"matplotlib"
] |
d92a6c12796972cbb4d15b0fc9ab35c04d93bac4 | Python | arianalima/issue-classifier | /graphics_generator/matrix.py | UTF-8 | 3,247 | 3.0625 | 3 | [] | no_license | import itertools
import numpy as np
import matplotlib.pyplot as plt
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Purples): #Oranges
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.ylabel('Classe Verdadeira')
plt.xlabel('Classe Predita')
plt.tight_layout()
# Compute confusion matrix 1
cnf_matrix1 = np.array([[ 3, 1, 0, 1, 2, 6, 0, 0, 0, 0],
[ 0, 21, 3, 0, 10, 17, 2, 0, 0, 0],
[ 0, 2, 0, 0, 2, 4, 0, 0, 0, 0],
[ 0, 3, 0, 5, 6, 16, 5, 0, 0, 0],
[ 4, 11, 2, 2, 35, 51, 16, 0, 0, 0],
[ 4, 18, 3, 4, 39, 60, 19, 3, 0, 0],
[ 0, 9, 4, 2, 17, 36, 18, 2, 0, 0],
[ 0, 3, 0, 0, 2, 5, 6, 1, 0, 0],
[ 0, 1, 0, 0, 1, 1, 2, 0, 1, 0],
[ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0]])
#Compute confusion matrix 2
cnf_matrix2 = np.array([[13, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 46, 0, 0, 3, 3, 1, 0, 0, 0],
[0, 0, 8, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 32, 1, 2, 0, 0, 0, 0],
[0, 0, 1, 0, 114, 4, 2, 0, 0, 0],
[0, 0, 0, 0, 4, 145, 1, 0, 0, 0],
[0, 1, 1, 1, 4, 4, 77, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 17, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 6, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]])
# Compute confusion matrix 3
cnf_matrix3 = np.array([[13, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 53, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 8, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 35, 0, 0, 0, 0, 0, 0],
[1, 2, 1, 2, 95, 16, 4, 0, 0, 0],
[3, 5, 0, 4, 22, 107, 9, 0, 0, 0],
[0, 4, 0, 2, 10, 15, 57, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 17, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 6, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 1]])
def plot_matrix(matrix, approach):
class_names = ['0','1','2','3','5','8','13','20','40','89']
np.set_printoptions(precision=2)
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(matrix, classes=class_names,
title='Abordagem %d\nMatriz de Confusão' % approach)
# Plot normalized confusion matrix
# plt.figure()
# plot_confusion_matrix(matrix, classes=class_names, normalize=True,
# title='Abordagem %d\nMatriz de Confusão Normalizada' % approach)
plt.show()
plot_matrix(cnf_matrix1, 1)
plot_matrix(cnf_matrix2, 2)
plot_matrix(cnf_matrix3, 3)
| [
"matplotlib"
] |
13e7d7dfe78b0e485bdeaa801c2a70ef29056aec | Python | Sobreviviente/Fractionated_exhaled_breath | /Metodo_CarlosPete.py | UTF-8 | 3,306 | 2.90625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Editor de Spyder
Este es un archivo temporal.
"""
import numpy as np
import matplotlib.pyplot as plt
## Datos obtenidos
##datos = open("datos_escaner.txt","r")
my_data = np.genfromtxt('dat_20190122.txt', delimiter=',')
fig, ax = plt.subplots()
print(my_data.shape)
t=0
estado_curva = 0
time = my_data[:,0]
data1 = my_data[:,1]
data2 = my_data[:,2]
puntos_sujeto_1 = [( 1.06108*10**7,1.64925*10**7,2.07002*10**7,2.57464*10**7,3.06846*10**7),(3.635853,2.796659,2.832604,2.691077,2.694828)]
puntos_traslado_1 =[(150000 + 1.06108*10**7,150000+1.64925*10**7,150000+2.07002*10**7,150000+2.57464*10**7,150000+3.06846*10**7),(3.635853,2.796659,2.832604,2.691077,2.694828)]
puntos_del_filtro=[]
puntos_x = puntos_sujeto_1[0]
puntos_y = puntos_sujeto_1[1]
puntos_traslado_x = puntos_traslado_1[0]
puntos_traslado_y = puntos_traslado_1[1]
#puntos_prueba=[150904, 186827, 241686, 285501, 327939, 372910, 415944, 456669, 496717, 536898, 585990, 618449, 621574, 635106, 676321, 722500, 767374, 810954, 852340, 890045, 924166, 964979, 984141]
dd = np.zeros(data1.shape)
val = np.zeros(data1.shape)
dd[0] = data1[0]
alpha = 0.99
value = 0
value_2=0
puntos_estado=dd.copy()
ff = np.zeros(data1.shape)
for i in range(1,data2.size):
muestra = data1[i]
value = alpha * value + (1-alpha) * muestra
val[i] = value
dd[i] = (val[i] - val[i-1])/((time[i]-time[i-1])/1000000.0)
muestra_2 = dd[i]
value_2=alpha*value_2+(1-alpha)*muestra_2
ff[i]=value_2
if (estado_curva==0 and ff[i] > 3.0): #pulso aviso curva en subida
# print (ff[i])
estado_curva = 1 #cambio de estado
puntos_estado[i]=100
#if (ff[i]==3.0):
#puntos_del_filtro.append(ff[i])
elif (estado_curva==1 and ff[i]<0.5): #trigger
puntos_estado[i]=200
estado_curva = 2 #cambio de estado
t=time[i]+150000 #punto de disparo encontrar en curva
#print (t)
puntos_del_filtro.append(t)
#print('hola '+str(t))
elif (estado_curva==2 and (time[i]>t)): #Activar salidas
estado_curva=3
puntos_estado[i]=300
elif (estado_curva==3 and data1[i]<0.05): #resetear todo
estado_curva = 0
puntos_estado[i]=50
t=0
#print (puntos_del_filtro)
#print (len(puntos_del_filtro))
print(time.shape)
print(data1.shape)
ax.plot(time,data1,label='Sensor signal1')
ax.plot(time,data2,label='Sensor signal2')
ax.plot(time,dd,':',label='diff')
ax.plot(time,val,':',label='filtro')
ax.plot(puntos_x,puntos_y,'yo')
ax.plot(time,ff,':',label='filtro_diff' )
index = np.where (puntos_estado != 0)
ax.plot(time[index],puntos_estado[index],'o',label = 'Ref estados')
#ax.plot(time)
##ax.set_ylim(0,maxV+1)
for j in puntos_traslado_x: #GRAFICA PUNTOS EN CURVA
index= np.where (time <= j)
#print (index)
#print (index[0][-1])
ax.plot(time[index[0][-1]],data1[index[0][-1]],'ro')
for n in puntos_del_filtro:
print(n)
index_t = np.where (time <= n)
print (index_t)
ax.plot(time[index_t[0][-1]],data1[index_t[0][-1]],'bo')
ax.legend(framealpha=0.4)
plt.grid()
plt.show()
plt.savefig('figura.pdf') | [
"matplotlib"
] |
d4b3d3cb09d27c3f14b9fb8f1d2545eb28ed6997 | Python | daviddao/data | /script/plot.py | UTF-8 | 1,760 | 2.8125 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
## Plot for rf scores
f = open("../random_sampling_dataset/250_iterations/rf_scores.txt")
scores_random_250 = []
for line in f:
line = line.split()
scores_random_250.append(float(line[0]))
f.close()
scores_250 = []
f = open("../worst-case-dataset/250_iterations/rf_scores.txt")
for line in f:
line = line.split()
scores_250.append(float(line[0]))
f.close()
plt.plot(scores_250,'bo',scores_random_250,'g^')
plt.title("Average RF distance vs. Iterations")
plt.xlabel("Average RF distance")
plt.ylabel("Iterations")
plt.show()
## Plot for tree decrease
# f = open("../random_sampling_dataset/250_iterations/rf_scores.txt")
# scores_random_250 = []
# for line in f:
# line = line.split()
# scores_random_250.append(float(line[2]))
# f.close()
# scores_250 = []
# f = open("../worst-case-dataset/250_iterations/rf_scores.txt")
# for line in f:
# line = line.split()
# scores_250.append(float(line[2]))
# f.close()
# plt.plot(scores_250,'bo',scores_random_250,'g^')
# plt.title("# Trees Analyzed vs. Iterations")
# plt.xlabel("Number of Trees Analyzed")
# plt.ylabel("Iterations")
# plt.show()
## Plot for top scores
# f = open("../random_sampling_dataset/250_iterations/taxa.txt")
# scores_random_250 = []
# for line in f:
# line = line.split(",")
# scores_random_250.append(float(line[0]))
# f.close()
# scores_250 = []
# f = open("../worst-case-dataset/250_iterations/taxa.txt")
# for line in f:
# line = line.split(",")
# scores_250.append(float(line[0]))
# f.close()
# plt.plot(scores_250,'bo',scores_random_250,'g^')
# plt.title("Top Scores vs. Iterations")
# plt.xlabel("Dropset Score")
# plt.ylabel("Iterations")
# plt.show()
| [
"matplotlib"
] |
a78de4c8e7d3a75a4361788ebe79e41851d116b3 | Python | yuminliu/Downscaling | /src/ncdump.py | UTF-8 | 12,011 | 2.9375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 23 16:51:16 2017
@author: liuyuming
"""
'''
NAME
NetCDF with Python
PURPOSE
To demonstrate how to read and write data with NetCDF files using
a NetCDF file from the NCEP/NCAR Reanalysis.
Plotting using Matplotlib and Basemap is also shown.
PROGRAMMER(S)
Chris Slocum
REVISION HISTORY
20140320 -- Initial version created and posted online
20140722 -- Added basic error handling to ncdump
Thanks to K.-Michael Aye for highlighting the issue
REFERENCES
netcdf4-python -- http://code.google.com/p/netcdf4-python/
NCEP/NCAR Reanalysis -- Kalnay et al. 1996
http://dx.doi.org/10.1175/1520-0477(1996)077<0437:TNYRP>2.0.CO;2
'''
#import datetime as dt # Python standard library datetime module
#import numpy as np
#from netCDF4 import Dataset # http://code.google.com/p/netcdf4-python/
#import matplotlib.pyplot as plt
#from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
def ncdump(nc_fid, verb=True):
'''
ncdump outputs dimensions, variables and their attribute information.
The information is similar to that of NCAR's ncdump utility.
ncdump requires a valid instance of Dataset.
Parameters
----------
nc_fid : netCDF4.Dataset
A netCDF4 dateset object
verb : Boolean
whether or not nc_attrs, nc_dims, and nc_vars are printed
Returns
-------
nc_attrs : list
A Python list of the NetCDF file global attributes
nc_dims : list
A Python list of the NetCDF file dimensions
nc_vars : list
A Python list of the NetCDF file variables
'''
def print_ncattr(key):
"""
Prints the NetCDF file attributes for a given key
Parameters
----------
key : unicode
a valid netCDF4.Dataset.variables key
"""
try:
print("\t\ttype:", repr(nc_fid.variables[key].dtype))
for ncattr in nc_fid.variables[key].ncattrs():
print('\t\t%s:' % ncattr,\
repr(nc_fid.variables[key].getncattr(ncattr)))
except KeyError:
print("\t\tWARNING: %s does not contain variable attributes" % key)
# NetCDF global attributes
nc_attrs = nc_fid.ncattrs()
if verb:
print("NetCDF Global Attributes:")
for nc_attr in nc_attrs:
print('\t%s:' % nc_attr, repr(nc_fid.getncattr(nc_attr)))
nc_dims = [dim for dim in nc_fid.dimensions] # list of nc dimensions
# Dimension shape information.
if verb:
print("NetCDF dimension information:")
for dim in nc_dims:
print("\tName:", dim)
print("\t\tsize:", len(nc_fid.dimensions[dim]))
print_ncattr(dim)
# Variable information.
nc_vars = [var for var in nc_fid.variables] # list of nc variables
if verb:
print("NetCDF variable information:")
for var in nc_vars:
if var not in nc_dims:
print('\tName:', var)
print("\t\tdimensions:", nc_fid.variables[var].dimensions)
print("\t\tsize:", nc_fid.variables[var].size)
print_ncattr(var)
return nc_attrs, nc_dims, nc_vars
if __name__ == '__main__':
from netCDF4 import Dataset # http://code.google.com/p/netcdf4-python/
#nc_f = '../dataFiles/g_day_CCSM4_19820801_ens01_19820801-19830731.nc' # Your filename
#nc_f = '../data/precipitation/NASAGCMdata/rawdata/regridded_1deg_pr_amon_access1-0_historical_r1i1p1_195001-200512.nc'
#nc_f = '../data/precipitation/CPCdata/rawdata/precip.V1.0.mon.mean.nc'
nc_f = '/home/yumin/Desktop/DS/DATA/PRISM/monthly/ppt/PRISM_ppt_stable_4kmM3_1950-2005_monthly.nc'
nc_fid = Dataset(nc_f, 'r') # Dataset is the class behavior to open the file
# and create an instance of the ncCDF4 class
nc_attrs, nc_dims, nc_vars = ncdump(nc_fid)
lons = list(nc_fid.variables['lon'][:])
lats = list(nc_fid.variables['lat'][:])
time = list(nc_fid.variables['time'][:])
ppt = nc_fid.variables['ppt'][:]
import matplotlib.pyplot as plt
fig = plt.figure()
plt.imshow(ppt[0,:,:])
plt.show()
#
## Extract data from NetCDF file
#lats = nc_fid.variables['lat'][:] # extract/copy the data
#lons = nc_fid.variables['lon'][:]
#time = nc_fid.variables['time'][:]
#air = nc_fid.variables['air'][:] # shape is time, lat, lon as shown above
#
#time_idx = 237 # some random day in 2012
## Python and the renalaysis are slightly off in time so this fixes that problem
#offset = dt.timedelta(hours=48)
## List of all times in the file as datetime objects
#dt_time = [dt.date(1, 1, 1) + dt.timedelta(hours=t) - offset\
# for t in time]
#cur_time = dt_time[time_idx]
#
## Plot of global temperature on our random day
#fig = plt.figure()
#fig.subplots_adjust(left=0., right=1., bottom=0., top=0.9)
## Setup the map. See http://matplotlib.org/basemap/users/mapsetup.html
## for other projections.
#m = Basemap(projection='moll', llcrnrlat=-90, urcrnrlat=90,\
# llcrnrlon=0, urcrnrlon=360, resolution='c', lon_0=0)
#m.drawcoastlines()
#m.drawmapboundary()
## Make the plot continuous
#air_cyclic, lons_cyclic = addcyclic(air[time_idx, :, :], lons)
## Shift the grid so lons go from -180 to 180 instead of 0 to 360.
#air_cyclic, lons_cyclic = shiftgrid(180., air_cyclic, lons_cyclic, start=False)
## Create 2D lat/lon arrays for Basemap
#lon2d, lat2d = np.meshgrid(lons_cyclic, lats)
## Transforms lat/lon into plotting coordinates for projection
#x, y = m(lon2d, lat2d)
## Plot of air temperature with 11 contour intervals
#cs = m.contourf(x, y, air_cyclic, 11, cmap=plt.cm.Spectral_r)
#cbar = plt.colorbar(cs, orientation='horizontal', shrink=0.5)
#cbar.set_label("%s (%s)" % (nc_fid.variables['air'].var_desc,\
# nc_fid.variables['air'].units))
#plt.title("%s on %s" % (nc_fid.variables['air'].var_desc, cur_time))
#
## Writing NetCDF files
## For this example, we will create two NetCDF4 files. One with the global air
## temperature departure from its value at Darwin, Australia. The other with
## the temperature profile for the entire year at Darwin.
#darwin = {'name': 'Darwin, Australia', 'lat': -12.45, 'lon': 130.83}
#
## Find the nearest latitude and longitude for Darwin
#lat_idx = np.abs(lats - darwin['lat']).argmin()
#lon_idx = np.abs(lons - darwin['lon']).argmin()
#
## Simple example: temperature profile for the entire year at Darwin.
## Open a new NetCDF file to write the data to. For format, you can choose from
## 'NETCDF3_CLASSIC', 'NETCDF3_64BIT', 'NETCDF4_CLASSIC', and 'NETCDF4'
#w_nc_fid = Dataset('darwin_2012.nc', 'w', format='NETCDF4')
#w_nc_fid.description = "NCEP/NCAR Reanalysis %s from its value at %s. %s" %\
# (nc_fid.variables['air'].var_desc.lower(),\
# darwin['name'], nc_fid.description)
## Using our previous dimension info, we can create the new time dimension
## Even though we know the size, we are going to set the size to unknown
#w_nc_fid.createDimension('time', None)
#w_nc_dim = w_nc_fid.createVariable('time', nc_fid.variables['time'].dtype,\
# ('time',))
## You can do this step yourself but someone else did the work for us.
#for ncattr in nc_fid.variables['time'].ncattrs():
# w_nc_dim.setncattr(ncattr, nc_fid.variables['time'].getncattr(ncattr))
## Assign the dimension data to the new NetCDF file.
#w_nc_fid.variables['time'][:] = time
#w_nc_var = w_nc_fid.createVariable('air', 'f8', ('time'))
#w_nc_var.setncatts({'long_name': u"mean Daily Air temperature",\
# 'units': u"degK", 'level_desc': u'Surface',\
# 'var_desc': u"Air temperature",\
# 'statistic': u'Mean\nM'})
#w_nc_fid.variables['air'][:] = air[time_idx, lat_idx, lon_idx]
#w_nc_fid.close() # close the new file
#
## A plot of the temperature profile for Darwin in 2012
#fig = plt.figure()
#plt.plot(dt_time, air[:, lat_idx, lon_idx], c='r')
#plt.plot(dt_time[time_idx], air[time_idx, lat_idx, lon_idx], c='b', marker='o')
#plt.text(dt_time[time_idx], air[time_idx, lat_idx, lon_idx], cur_time,\
# ha='right')
#fig.autofmt_xdate()
#plt.ylabel("%s (%s)" % (nc_fid.variables['air'].var_desc,\
# nc_fid.variables['air'].units))
#plt.xlabel("Time")
#plt.title("%s from\n%s for %s" % (nc_fid.variables['air'].var_desc,\
# darwin['name'], cur_time.year))
#
## Complex example: global temperature departure from its value at Darwin
#departure = air[:, :, :] - air[:, lat_idx, lon_idx].reshape((time.shape[0],\
# 1, 1))
#
## Open a new NetCDF file to write the data to. For format, you can choose from
## 'NETCDF3_CLASSIC', 'NETCDF3_64BIT', 'NETCDF4_CLASSIC', and 'NETCDF4'
#w_nc_fid = Dataset('air.departure.sig995.2012.nc', 'w', format='NETCDF4')
#w_nc_fid.description = "The departure of the NCEP/NCAR Reanalysis " +\
# "%s from its value at %s. %s" %\
# (nc_fid.variables['air'].var_desc.lower(),\
# darwin['name'], nc_fid.description)
## Using our previous dimension information, we can create the new dimensions
#data = {}
#for dim in nc_dims:
# w_nc_fid.createDimension(dim, nc_fid.variables[dim].size)
# data[dim] = w_nc_fid.createVariable(dim, nc_fid.variables[dim].dtype,\
# (dim,))
# # You can do this step yourself but someone else did the work for us.
# for ncattr in nc_fid.variables[dim].ncattrs():
# data[dim].setncattr(ncattr, nc_fid.variables[dim].getncattr(ncattr))
## Assign the dimension data to the new NetCDF file.
#w_nc_fid.variables['time'][:] = time
#w_nc_fid.variables['lat'][:] = lats
#w_nc_fid.variables['lon'][:] = lons
#
## Ok, time to create our departure variable
#w_nc_var = w_nc_fid.createVariable('air_dep', 'f8', ('time', 'lat', 'lon'))
#w_nc_var.setncatts({'long_name': u"mean Daily Air temperature departure",\
# 'units': u"degK", 'level_desc': u'Surface',\
# 'var_desc': u"Air temperature departure",\
# 'statistic': u'Mean\nM'})
#w_nc_fid.variables['air_dep'][:] = departure
#w_nc_fid.close() # close the new file
#
## Rounded maximum absolute value of the departure used for contouring
#max_dep = np.round(np.abs(departure[time_idx, :, :]).max()+5., decimals=-1)
#
## Generate a figure of the departure for a single day
#fig = plt.figure()
#fig.subplots_adjust(left=0., right=1., bottom=0., top=0.9)
#m = Basemap(projection='moll', llcrnrlat=-90, urcrnrlat=90,\
# llcrnrlon=0, urcrnrlon=360, resolution='c', lon_0=0)
#m.drawcoastlines()
#m.drawmapboundary()
#dep_cyclic, lons_cyclic = addcyclic(departure[time_idx, :, :], lons)
#dep_cyclic, lons_cyclic = shiftgrid(180., dep_cyclic, lons_cyclic, start=False)
#lon2d, lat2d = np.meshgrid(lons_cyclic, lats)
#x, y = m(lon2d, lat2d)
#levels = np.linspace(-max_dep, max_dep, 11)
#cs = m.contourf(x, y, dep_cyclic, levels=levels, cmap=plt.cm.bwr)
#x, y = m(darwin['lon'], darwin['lat'])
#plt.plot(x, y, c='c', marker='o')
#plt.text(x, y, 'Darwin,\nAustralia', color='r', weight='semibold')
#cbar = plt.colorbar(cs, orientation='horizontal', shrink=0.5)
#cbar.set_label("%s departure (%s)" % (nc_fid.variables['air'].var_desc,\
# nc_fid.variables['air'].units))
#plt.title("Departure of Global %s from\n%s for %s" %\
# (nc_fid.variables['air'].var_desc, darwin['name'], cur_time))
#plt.show()
#
# Close original NetCDF file.
#nc_fid.close() | [
"matplotlib"
] |
42bb4a5feb1d0ef705b8a26b43bdee93b435aee9 | Python | oskam/probabilistic-ml | /lab1/task1.py | UTF-8 | 1,037 | 2.921875 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from matplotlib import animation
STOP = 10
occurrences = [0 for _ in range(0, STOP)]
frequencies = [0.0 for _ in range(0, STOP)]
fig, ax = plt.subplots()
ax.set_xlabel('Range of the numbers in the stream')
ax.set_ylim((0, 1))
ax.set_xlim((-0.4, STOP-0.4))
pos = np.arange(STOP)
width = 0.8
ax.set_xticks(pos)
ax.set_xticklabels([str(n) if n % 5 == 0 else '' for n in range(0, STOP)])
rects = plt.bar(pos, frequencies, width, color='r')
def randoms():
while True:
yield np.random.randint(0, STOP)
def animate(arg, rects):
frequencies = arg
for rect, f in zip(rects, frequencies):
rect.set_height(f)
def step():
for i in randoms():
occurrences[i] += 1
total = sum(occurrences)
for j in range(0, STOP):
frequencies[j] = occurrences[j] / total
yield frequencies
anim = animation.FuncAnimation(fig, animate, step,
repeat=False, interval=100, fargs=(rects,))
plt.show()
| [
"matplotlib"
] |
414cae577fffce9c685cb88864bcfd5f8d7319c8 | Python | rehanguha/Learning-Rate-Sensitivity | /CIFAR10_LR.py | UTF-8 | 2,733 | 2.609375 | 3 | [] | no_license | from tensorflow import keras
from tensorflow.keras import datasets, models, layers
import matplotlib.pylab as plt
import tensorflow as tf
import numpy as np
import json
import pandas as pd
from tensorflow.keras.optimizers import SGD, RMSprop, Adam, Adagrad, Adadelta, Adamax, Nadam
import alert
mail = alert.mail(sender_email="[email protected]", sender_password="rehanguhalogs")
lrs = np.linspace(0.001, 0.1, num = 100, endpoint =True).tolist()
epochs =[200]
optimizers = [SGD, RMSprop, Adam, Adagrad, Adadelta, Adamax, Nadam]
(x_train, y_train), (x_test, y_test) = datasets.cifar10.load_data()
# Normalize pixel values to be between 0 and 1
x_train, x_test = x_train / 255.0, x_test / 255.0
def compile_optimizer(optimizer):
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10))
model.compile(
optimizer=optimizer,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
return model
def train_mnist(model, epoch):
history = model.fit(x_train, y_train,
epochs=epoch,
verbose=True,
validation_data=(x_test, y_test))
return history, model
mnist_df = pd.DataFrame(columns = ['optimizer', 'lr', 'epoch', 'accuracy', 'loss', 'test_accuracy', 'test_loss'])
for opt in optimizers:
for lr in lrs:
for epoch in epochs:
model = compile_optimizer(opt(learning_rate=lr))
history, model = train_mnist(model, epoch)
train_loss, train_accuracy = model.evaluate(x_train, y_train, verbose=False)
test_loss, test_accuracy = model.evaluate(x_test, y_test, verbose=False)
mnist_df = mnist_df.append(
{
'optimizer': opt.__name__,
'lr': lr,
'epoch': epoch,
'accuracy': train_accuracy,
'loss': train_loss,
'test_accuracy': test_accuracy,
'test_loss': test_loss
}, ignore_index= True)
mnist_df.to_csv("output/CIFAR10_LR.csv", index=False)
mail.send_email(receiver_email="[email protected]", subject="{name} Completed".format(name=opt.__name__,lr=lr), msg="Done.")
mnist_df.to_csv("output/CIFAR10_LR.csv", index=False)
mail.send_email(receiver_email="[email protected]", subject="All Completed".format(name=opt.__name__,lr=lr), msg="Saved.")
| [
"matplotlib"
] |
e6764431de7ea7c1bcd58f662c7321026cbc3922 | Python | IronMan-1000/LINEAR | /linear.py | UTF-8 | 738 | 3.328125 | 3 | [] | no_license | import pandas as pd
import plotly.express as px
import numpy as np
df=pd.read_csv("data.csv")
height=df["Height"].tolist()
weight=df["Weight"].tolist()
m = 0.95
c = -93
y = []
for x in height:
y_value = m*x + c
y.append(y_value)
x=250
y=m*x +c
print(f"Weight of someone with height {x} is {y}")
height_array = np.array(height)
weight_array = np.array(weight)
m, c = np.polyfit(height_array, weight_array, 1)
y = []
for x in height_array:
y_value = m*x + c
y.append(y_value)
fig = px.scatter(x=height_array, y=weight_array)
fig.update_layout(shapes=[
dict(
type= 'line',
y0= min(y), y1= max(y),
x0= min(height_array), x1= max(height_array)
)
])
fig.show() | [
"plotly"
] |
b250824fc77bcd6a6ca89d5625e929df78bb4f4b | Python | skygx/python_study | /py/iris_seaborn.py | UTF-8 | 821 | 2.71875 | 3 | [] | no_license | #/usr/bin/env python
# -*- coding:utf-8 -*-
'''
@File : iris_seaborn.py
@Contact : [email protected]
@License : (C)Copyright 2018-2019, xguo
@Modify Time @Author @Version @Desciption
------------ ------- -------- -----------
2020/2/29 20:34 xguo 1.0 None
'''
from sklearn.datasets import load_iris
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from sklearn import tree
def main():
sns.set(style="ticks", color_codes=True)
iris_datas = load_iris()
# iris = pd.DataFrame(iris_datas.data, columns=['SpealLength', 'Spealwidth', 'PetalLength', 'PetalLength'])
iris = pd.DataFrame(iris_datas.data)
df02 = iris.iloc[:, [0, 2, 3]]
print(iris)
sns.pairplot(df02)
plt.show()
if __name__ == "__main__":
main() | [
"matplotlib",
"seaborn"
] |
4abdecdfedafdb5944e102158e4057c609000ed8 | Python | gjhartwell/cth-python | /diagnostics/thomson/Raman/raman_theory.py | UTF-8 | 13,480 | 2.734375 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Created on Tue Jun 12 17:45:49 2018
@author: James
"""
import numpy as np
import matplotlib.pyplot as plt
exp_val = {} # dictionary of experimental values
exp_val['anisotropy'] = (0.395 *10**-82) / (8.8541878176*10**-12)**2
# the molecular - polarizability anisotropy for N2 in cm^6.
# Value taken from M. J. van de Sande "Laser scattering on low
# temperature plasmas: high resolution and stray light rejection" 2002
exp_val['N2_rot_constant'] = 199.887
# rotational constant for N2 molecule
# This value was taken from C.M. Penney
# "Absolute rotational Raman corss sections for N2, O2, and CO2" 1974
exp_val['h'] = 6.62607004*10**-34 # Planck_constant
exp_val['e'] = 1.60217662*10**-19
exp_val['me']= 9.10938356*10**-31 # mass of electron in kg
exp_val['epsilon'] = 8.8541878176*(10**-12)
exp_val['c'] = 299792458 # speed of light
exp_val['kB'] = 1.38064852*10**-23 # Boltzmann constant m^2 kg s^-2 K^-1
exp_val['laser_wavelength'] = 532.0 *10**-9
exp_val['electron_radius**2'] = (((exp_val['e'])**2)/(4*np.pi*exp_val['epsilon'] *
exp_val['me']*exp_val['c']**2))**2 * 10**4
exp_val['theta1'] = 86.371 * (np.pi/180)
exp_val['theta2'] = 90.000 * (np.pi/180)
exp_val['length_to_lens'] = 71.8
exp_val['radius_of_lens'] = 7.5
exp_val['L'] = 1.42 # Laser beam length (cm) imaged onton fiber
exp_val['gjeven'] = 6
exp_val['gjodd'] = 3
# Transmissions
exp_val['Twin1'] = 0.9
exp_val['Tlens1'] = 0.995
exp_val['Tmirrors1']= 0.997
exp_val['Tfiber'] = 0.587
exp_val['Tspect1'] = 0.72
exp_val['Tcolllens']= 0.849
exp_val['Tfiberimage1'] = 64/75
exp_val['Tpmtlens1'] = 0.849
def collection_optics_solid_angle(length_to_lens, radius_of_lens):
# The solid angle that is collected by the collection
# optics. length_to_lens is the distance from the scattering volume to the
# location of the collection lens. radius_of_lens is the radius of the
# collection lens
value = 2*np.pi*(1-np.cos(np.arctan(radius_of_lens/length_to_lens)))
return value
def lambda_raman_stokes(l, j):
B = exp_val['N2_rot_constant']
value = l + l**2 * (B)*(4*j+6)
return value
def lambda_thermal(Te):
l = exp_val['laser_wavelength']
alpha = exp_val['theta2']
c1 = 2*l*np.sin(alpha/2)
c2 = np.sqrt((2*Te)/(511706.544))
value = c1 * c2
return value
def laser_photons(E_pulse):
value = E_pulse * (exp_val['laser_wavelength']/(exp_val['h'] * exp_val['c'])) * 10**-9
return value
def QEPMTH742240plusH1170640(l):
# PMT quantum efficiency
value = -1* (3.0453*10**-4)*l + 0.565053
return 1
def optical_efficiency(*args):
value = 1
for arg in args:
value = value * arg
return value
def coef(E_pulse, n, L, length_to_lens, radius_of_lens, theta):
# LaserPhotons is the number of photons in a given laser pulse.
# (7.9188*10^-26) is the electron radius squared in cm^2. L is the length
# of the scattering volume along the laser beam that is being imaged.
# ne is the electron density in the scattering volume.
# Finally \[Theta] is the angle between the laser polarization and the
# collection optics (the Sin (\[Theta])^2 term is the dipole \
# scattering pattern).
c1 = laser_photons(E_pulse)
c2 = exp_val['electron_radius**2']
c3 = collection_optics_solid_angle(length_to_lens, radius_of_lens)
c4 = n / np.sqrt(np.pi) * np.sin(theta)**2
value = c1 *c2 * L * c3 * c4
return value
def thomson_scattered_photons(E_pulse, n, Te, wavelength):
c1 = coef(E_pulse, n, exp_val['L'], exp_val['length_to_lens'],
exp_val['radius_of_lens'], exp_val['theta1'])
c2 = lambda_thermal(Te)
c3 = np.exp(-1*((wavelength - exp_val['laser_wavelength'])**2/(c2**2)))
value = (c1 / c2) * c3
return value
def thomson_channel_photons(E_pulse, n, Te, min_wavelength, max_wavelength):
n_steps = 100
step = (max_wavelength - min_wavelength)/n_steps
x = np.linspace(min_wavelength, max_wavelength, n_steps)
y = thomson_scattered_photons(E_pulse, n, Te, x)
total_int = sum(y) * step
total = total_int/(max_wavelength - min_wavelength)
return total
def thomson_channel_volts(E_pulse, n, Te, min_wavelength, max_wavelength):
n_steps = 100
step = (max_wavelength - min_wavelength)/n_steps
x = np.linspace(min_wavelength, max_wavelength, n_steps)
y = thomson_scattered_photons(E_pulse, n, Te, x)
resistance = 25
gain = 2 * 10**5
tau = 20 * 10**-9
e = 1.60217662 * 10 **-19
y = (gain * y * resistance * e)/tau
total_int = sum(y) * step
total = total_int/(max_wavelength - min_wavelength)
return total
def raman_coef(E_pulse, n):
# taking out the TS values for raman scattering coeff and then adding
# in the raman specific values.
# Note that the density is being converted into cm^-3 from m^-3. Also
# the depolarization ratio is included as 3/4 for linear molecules
# (all assuming perpendicular scattering geometry)
c1 = coef(E_pulse, n, exp_val['L'], exp_val['length_to_lens'],
exp_val['radius_of_lens'], exp_val['theta1'])
c2 = np.sqrt(np.pi)/exp_val['electron_radius**2']
c3 = (64 * np.pi**4)/45
c4 = .75
c5 = exp_val['anisotropy']
value = c1 * c2 * c3 * c4 * c5
return value
def raman_crosssection(l, j):
c1 = (64 * np.pi**4)/45
c2 = .75
c3 = exp_val['anisotropy']
c4 = (3 * (j + 1) * (j + 2))/(2 * (2 * j + 1)*(2*j+3))
c5 = (1 / (lambda_raman_stokes(l, j)))**4
value = c1 * c2 * c3 * c4 * c5
return value
def raman_distribution(j, T, gj):
# j distribution of raman values with T temperature of N2 gas in K and
# finally gj is degeneracy for whether j is even or odd
c1 = gj * ((2 * j) + 1)
c2 = (2 * exp_val['h'] * exp_val['c'] * exp_val['N2_rot_constant'] * 10**2) / (9 * exp_val['kB'] * T)
c3 = -(exp_val['h'] * exp_val['c'] * exp_val['N2_rot_constant'] * 10**2 * j * (j+1))/(exp_val['kB']*T)
c4 = (3 * (j + 1) * (j + 2))/(2 * ((2 * j) + 1)*((2*j)+3))
value = c1 * c2 * np.exp(c3) * c4
return value
def raman_scattered_photons(E_pulse, n, j, T, gj):
# rotational stokes raman scattered photonss per unit wavelength
c1 = raman_coef(E_pulse, n)
c2 = raman_distribution(j, T, gj)
c3 = (1/(lambda_raman_stokes(exp_val['laser_wavelength'], j)*10**-7))**4
value = c1 * c2 * c3
return value
def total_photoelectrons_raman_center_function(E_pulse, p, T,
wavelength_min, wavelength_max):
c1 = optical_efficiency(exp_val['Twin1'], exp_val['Tlens1'],
exp_val['Tmirrors1'], exp_val['Tfiber'],
exp_val['Tspect1'], exp_val['Tcolllens'],
exp_val['Tfiberimage1'], exp_val['Tpmtlens1'])
c1 = 1
n = ((p/(7.5006*10**-3))/(exp_val['kB'] * T))*10**-6
c2 = 0
for j in range(0, 60):
wavelength = lambda_raman_stokes(exp_val['laser_wavelength'], 2 * j)
if wavelength <= wavelength_max and wavelength >= wavelength_min:
c2 = c2 + (raman_scattered_photons(E_pulse, n, 2 * j, T,
exp_val['gjeven']) *
QEPMTH742240plusH1170640(lambda_raman_stokes(exp_val['laser_wavelength'], 2 * j)))
c3 = 0
for j in range(0, 60):
wavelength = lambda_raman_stokes(exp_val['laser_wavelength'], 2 * j + 1)
if wavelength <= wavelength_max and wavelength >= wavelength_min:
c3 = c3 + (raman_scattered_photons(E_pulse, n, 2 * j + 1, T,
exp_val['gjodd']) *
QEPMTH742240plusH1170640(lambda_raman_stokes(exp_val['laser_wavelength'], 2 * j + 1)))
value = c1 * (c2 + c3)
return value
def step_function_array(start, stop, height):
frac = (stop - start)/20
x = np.linspace(start - frac, stop + frac, 110)
y = np.array([0] * 110)
y[5:104] = height
return x, y
def plot_thomson_channels(height):
x1, y1 = step_function_array(533.5, 539, height)
plt.plot(x1, y1, 'k')
x1, y1 = step_function_array(539.5, 545, height)
plt.plot(x1, y1, 'k')
x1, y1 = step_function_array(545.5, 551, height)
plt.plot(x1, y1, 'k')
x1, y1 = step_function_array(551.5, 557, height)
plt.plot(x1, y1, 'k')
x1, y1 = step_function_array(557.5, 563, height)
plt.plot(x1, y1, 'k')
return
#x1 = TotalPhotoelectronsRamanCenterFunction(.8, 50, 295, 545, 551)
x2 = raman_scattered_photons(.8, ((50/(7.5006*10**-3))/(exp_val['kB'] * 295))*10**-6, 35,
295, exp_val['gjeven'])
"""
Pressure = np.linspace(0, 50, 100)
Photons = total_photoelectrons_raman_center_function(1.8, Pressure, 295, 536, 561)
a = np.loadtxt('170929_photon_counts_combined_a.txt')
b = np.loadtxt('170929_photon_counts_combined_b_edit.txt')
pressure_a = np.loadtxt('170929_pressure_torr_a.txt')
pressure_b = np.loadtxt('170929_pressure_torr_b_edit.txt')
weights_b = np.loadtxt('weights_b.txt')
weights_a = weights_b[1:len(a)]
plt.figure()
plt.plot(Pressure, Photons, c='k')
plt.scatter(pressure_a, a, c = 'r')
fit1 = np.polyfit(Pressure, Photons, 1)
label1 = str('Theory: y = ' + str(np.round(fit1[0],2)) + 'x')
plt.plot(np.unique(Pressure), np.poly1d(np.polyfit(Pressure, Photons, 1))(np.unique(Pressure)),
color='k', label=label1)
fit2 = np.polyfit(pressure_a, a, 1)
label2 = str('Data: y = ' + str(np.round(fit2[0],2)) + 'x' )
plt.plot(np.unique(pressure_a), np.poly1d(np.polyfit(pressure_a, a, 1))(np.unique(pressure_a)),
color='r', label = label2)
plt.xlabel('Pressure (Torr)', fontsize = 15, weight ='bold')
plt.ylabel('Photons', fontsize = 15, weight ='bold')
title = str("Predicted Raman Scattering \n (536 nm - 561 nm)")
plt.title(title, fontsize = 15, weight ='bold')
plt.xticks(fontsize = 13, weight = 'bold')
plt.yticks(fontsize = 13, weight = 'bold')
plt.legend()
plt.savefig('test_1_theory_vs_data.png', format='png', dpi = 1000)
plt.show()
"""
"""
Pressure = np.linspace(0, 50, 100)
Photons = total_photoelectrons_raman_center_function(1.8, Pressure, 295, 543, 565)
b = np.loadtxt('170929_photon_counts_combined_b_edit.txt')
pressure_b = np.loadtxt('170929_pressure_torr_b_edit.txt')
weights_b = np.loadtxt('weights_b.txt')
weights_b = weights_b[1:len(b)]
plt.figure()
plt.plot(Pressure, Photons, c='k')
plt.scatter(pressure_b, b, c = 'b')
fit1 = np.polyfit(Pressure, Photons, 1)
label1 = str('Theory: y = ' + str(np.round(fit1[0],2)) + 'x')
plt.plot(np.unique(Pressure), np.poly1d(np.polyfit(Pressure, Photons, 1))(np.unique(Pressure)),
color='k', label=label1)
fit2 = np.polyfit(pressure_b, b, 1)
label2 = str('Data: y = ' + str(np.round(fit2[0],2)) + 'x' )
plt.plot(np.unique(pressure_a), np.poly1d(np.polyfit(pressure_a, a, 1))(np.unique(pressure_a)),
color='b', label = label2)
plt.xlabel('Pressure (Torr)', fontsize = 15, weight ='bold')
plt.ylabel('Photons', fontsize = 15, weight ='bold')
title = str("Raman Scattering \n(543 nm - 565 nm)")
plt.title(title, fontsize = 15, weight ='bold')
plt.xticks(fontsize = 13, weight = 'bold')
plt.yticks(fontsize = 13, weight = 'bold')
plt.legend()
#plt.savefig('test_2_theory_vs_data.png', format='png', dpi = 1000)
plt.show()
"""
"""
plt.figure()
x = np.linspace(532, 563, 200)
y = thomson_scattered_photons(1.69, 1*10**13, 100, x)
plt.plot(x, y,'r', label = 'Te: 100 eV')
y = thomson_scattered_photons(1.69, 1*10**13, 150, x)
plt.plot(x, y, 'b', label = 'Te: 150 eV')
y = thomson_scattered_photons(1.69, 1*10**13, 200, x)
plt.plot(x, y, 'k', label = 'Te: 200 eV')
print(max(y)/10)
plot_thomson_channels(max(y)/5)
plt.xlabel('Wavelength (nm)', fontsize = 15, weight ='bold')
plt.ylabel('Photons', fontsize = 15, weight ='bold')
plt.title('Estimated Thomson Scattered Photons', fontsize = 15, weight ='bold')
plt.legend(fontsize = 12,loc='upper right')
plt.show()
plt.figure()
x = np.linspace(532, 563, 200)
y = thomson_scattered_photons(1.69, 1*10**13, 100, x)
plt.plot(x, y,'r', label = 'Total Scattered')
c1 = optical_efficiency(exp_val['Twin1'], exp_val['Tlens1'],
exp_val['Tmirrors1'], exp_val['Tfiber'],
exp_val['Tspect1'], exp_val['Tcolllens'],
exp_val['Tfiberimage1'], exp_val['Tpmtlens1'])
y = y * c1
plt.plot(x, y, 'b', label = 'Collected by PMT')
plt.xlabel('Wavelength (nm)', fontsize = 15, weight ='bold')
plt.ylabel('Photons', fontsize = 15, weight ='bold')
plt.title('Estimated Thomson Scattered Photons \n 100 eV Plasma', fontsize = 15, weight ='bold')
plt.legend(fontsize = 12,loc='upper right')
plt.show()
"""
"""
x = np.linspace(1, 300, 100)
y1 = thomson_channel_photons(1.69, 1*10**13, x, 533.5, 539 )[1]
y2 = thomson_channel_photons(1.69, 1*10**13, x, 539.5, 545 )[1]
y3 = thomson_channel_photons(1.69, 1*10**13, x, 545.5, 551 )[1]
y4 = thomson_channel_photons(1.69, 1*10**13, x, 551.5, 557 )[1]
y5 = thomson_channel_photons(1.69, 1*10**13, x, 557.5, 563 )[1]
plt.plot(x, y1)
plt.plot(x, y2)
plt.plot(x, y3)
plt.plot(x, y4)
plt.plot(x, y5)
print(thomson_channel_photons(1.69, 1*10**13, 100, 532, 532.5))
print(thomson_channel_volts(1.69, 1*10**13, 100, 532, 532.5))
"""
| [
"matplotlib"
] |
f1e50af665dd53115b8fdd6b5fa18978df362d64 | Python | silenzio777/pytorch_geometric | /torch_geometric/graphgym/utils/plot.py | UTF-8 | 367 | 2.515625 | 3 | [
"MIT"
] | permissive | import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.decomposition import PCA
sns.set_context('poster')
def view_emb(emb, dir):
if emb.shape[1] > 2:
pca = PCA(n_components=2)
emb = pca.fit_transform(emb)
plt.figure(figsize=(10, 10))
plt.scatter(emb[:, 0], emb[:, 1])
plt.savefig('{}/emb_pca.png'.format(dir), dpi=100)
| [
"matplotlib",
"seaborn"
] |
cbf75c2dd5adb50b25fd06cb633030f994d9a535 | Python | shauryashaurya/daguerre | /blending/hybrid.py | UTF-8 | 922 | 2.90625 | 3 | [] | no_license | # CS194-26: Computational Photography
# Project 3: Fun with Frequencies
# hybrid.py (Part 1)
# Krishna Parashar
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from skimage import color
from align import *
from unsharp import *
def get_fft(img):
return (np.log(np.abs(np.fft.fftshift(np.fft.fft2(img)))))
def hybrid_image(img1, img2, sigma1, sigma2):
high_freq = laplacian(img1, sigma1)
low_freq = gaussian(img2, sigma2)
hybrid_img = (high_freq + low_freq)/2.
hybrid_img = rescale_intensity(hybrid_img, in_range=(0, 1), out_range=(0, 1))
return hybrid_img, high_freq, low_freq
def write_gray_image(dest_dir, img_name, img, attribute, extension):
# Saves image to directory using the appropriate extension
output_filename = dest_dir + img_name[:-4] + attribute + extension
plt.imsave(str(output_filename), img, cmap = cm.Greys_r)
print("Image {0} saved to {1}".format(img_name, output_filename))
| [
"matplotlib"
] |
f7a990fd9927b7af888ba6afd3a3a42b85d20f34 | Python | muntakimrafi/insbcn | /datasetAnalysis/plot_languages_distribution.py | UTF-8 | 1,545 | 3.15625 | 3 | [] | no_license | import matplotlib.pyplot as plt
import json
import operator
lan_file = open("../../../datasets/instaBarcelona/lang_data.json","r")
languages = json.load(lan_file)
print languages
print "Number of languages: " + str(len(languages.values()))
print "Languages with max repetitions has: " + str(max(languages.values()))
#Plot
lan_sorted = sorted(languages.items(), key=operator.itemgetter(1))
lan_count_sorted = languages.values()
lan_count_sorted.sort(reverse=True)
topX = min(10,len(lan_count_sorted))
x = range(topX)
my_xticks = []
for l in range(0,topX):
my_xticks.append(lan_sorted[-l-1][0])
plt.xticks(x, my_xticks, size = 20)
width = 1/1.5
barlist = plt.bar(x, lan_count_sorted[0:topX], width, align="center")
barlist[0].set_color('r')
barlist[1].set_color('g')
barlist[2].set_color('b')
# plt.title("Number of images per language")
plt.tight_layout()
plt.show()
#Plot %
lan_sorted = sorted(languages.items(), key=operator.itemgetter(1))
lan_count_sorted = languages.values()
lan_count_sorted.sort(reverse=True)
topX = min(10,len(lan_count_sorted))
x = range(topX)
my_xticks = []
total = sum(lan_count_sorted)
lan_count_sorted = [float(i) / total * 100.0 for i in lan_count_sorted]
for l in range(0,topX):
my_xticks.append(lan_sorted[-l-1][0])
plt.xticks(x, my_xticks, size = 11)
width = 1/1.5
barlist = plt.bar(x, lan_count_sorted[0:topX], width, align="center")
barlist[0].set_color('r')
barlist[1].set_color('g')
barlist[2].set_color('b')
plt.title("% of images per language")
plt.tight_layout()
plt.show()
print "Done" | [
"matplotlib"
] |
07ff835a6561edd48a38b10bee11e5230052e714 | Python | tflovorn/blg_moire | /plot/plot_dos_diff.py | UTF-8 | 2,033 | 2.9375 | 3 | [
"Apache-2.0",
"MIT"
] | permissive | import argparse
import json
from pathlib import Path
import matplotlib.pyplot as plt
def find_with_prefix(prefix, dir_path):
files = sorted([x for x in Path(dir_path).iterdir() if x.is_file()])
with_prefix = []
for f in files:
if f.name.startswith(prefix) and f.name.endswith("_dos.json"):
full_prefix = f.name.rpartition("_")[0]
with_prefix.append((str(f), full_prefix))
return with_prefix
def _main():
parser = argparse.ArgumentParser("Plot DOS(1) - DOS(2)")
parser.add_argument("prefix", type=str, help="Calculation prefix")
parser.add_argument("dir_add", type=str, help="Directory for DOS(1)")
parser.add_argument("dir_sub", type=str, help="Directory for DOS(2)")
args = parser.parse_args()
paths_add = find_with_prefix(args.prefix, args.dir_add)
paths_sub = find_with_prefix(args.prefix, args.dir_sub)
for (dos_path_add, full_prefix_add), (dos_path_sub, full_prefix_sub) in zip(paths_add, paths_sub):
assert(full_prefix_add == full_prefix_sub)
with open(dos_path_add) as fp:
dos_data_add = json.load(fp)
with open(dos_path_sub) as fp:
dos_data_sub = json.load(fp)
es = dos_data_add["es"]
total_dos_add = dos_data_add["total_dos"]
total_dos_sub = dos_data_sub["total_dos"]
ys = [add - sub for add, sub in zip(total_dos_add, total_dos_sub)]
if "xlabel" in dos_data_add:
plt.xlabel(dos_data_add["xlabel"])
if "ylabel" in dos_data_add:
plt.ylabel("$\\Delta$" + dos_data_add["ylabel"])
if "caption" in dos_data_add:
plt.title(dos_data_add["caption"] + " $- \\theta = {:.3}$ deg.".format(dos_data_sub["theta_deg"]))
plot_path = "{}_dos_diff.png".format(full_prefix_add)
plt.xlim(min(es), max(es))
plt.ylim(min(ys), max(ys))
plt.plot(es, ys, 'k-')
plt.savefig(plot_path, dpi=500, bbox_inches='tight')
plt.clf()
if __name__ == "__main__":
_main()
| [
"matplotlib"
] |
bad58962f2e340a0314725c381188f10a4042adc | Python | RysbekTokoev/pytorch-snake-ai | /main.py | UTF-8 | 4,855 | 2.921875 | 3 | [] | no_license | import torch
import random
import numpy as np
from collections import deque
from game import Game
from model import Net, Trainer
import sys
import matplotlib.pyplot as plt
plt.ion()
MAX_MEMORY = 100_000
BATCH_SIZE = 1000
LR = 0.001
class Agent:
def __init__(self, load_path=''):
self.n_games = 0
self.epsilon = 0
self.gamma = 0.9
self.load_path = load_path
self.memory = deque(maxlen=MAX_MEMORY)
self.model = Net(11, 256, 3)
if load_path:
self.model.load_state_dict(torch.load(load_path))
self.trainer = Trainer(self.model, LR, self.gamma)
def get_state(self, game):
# 0 1 2 3
# U L R D
# [[1, -10], [0, -10], [0, 10], [1, 10]]
head = game.snake_pos
near_head = [
[head[0], head[1] - 10],
[head[0] - 10, head[1]],
[head[0] + 10, head[1]],
[head[0], head[1] + 10],
]
directions = [
game.direction == 0,
game.direction == 1,
game.direction == 2,
game.direction == 3,
]
state = [
(directions[0] and game.is_colision(near_head[0])) or
(directions[1] and game.is_colision(near_head[1])) or
(directions[2] and game.is_colision(near_head[2])) or
(directions[3] and game.is_colision(near_head[3])),
(directions[0] and game.is_colision(near_head[1])) or
(directions[1] and game.is_colision(near_head[3])) or
(directions[2] and game.is_colision(near_head[0])) or
(directions[3] and game.is_colision(near_head[2])),
(directions[0] and game.is_colision(near_head[2])) or
(directions[1] and game.is_colision(near_head[0])) or
(directions[2] and game.is_colision(near_head[3])) or
(directions[3] and game.is_colision(near_head[1])),
game.food_pos[0] < head[0],
game.food_pos[0] > head[0],
game.food_pos[1] < head[1],
game.food_pos[1] > head[1],
] + directions
return np.array(state, dtype=int)
def remember(self, state, action, reward, next_state, done):
self.memory.append((state, action, reward, next_state, done))
def train_long_memory(self):
if len(self.memory) > BATCH_SIZE:
mini_sample = random.sample(self.memory, BATCH_SIZE)
else:
mini_sample = self.memory
states, actions, rewards, next_states, dones = zip(*mini_sample)
self.trainer.train_step(states, actions, rewards, next_states, dones)
def train_short_memory(self, state, action, reward, next_state, done):
self.trainer.train_step(state, action, reward, next_state, done)
def get_action(self, state):
if not self.load_path:
self.epsilon = 80 - self.n_games
final_move = [0, 0, 0]
if random.randint(0, 200) < self.epsilon:
move = random.randint(0, 2)
final_move[move] = 1
else:
state0 = torch.tensor(state, dtype=torch.float)
prediction = self.model(state0)
move = torch.argmax(prediction).item()
final_move[move] = 1
return final_move
def train(model_path=''):
plot_scores = []
plot_mean_scores = []
total_score = 0
record = 0
agent = Agent(model_path)
game = Game()
while True:
state_old = agent.get_state(game)
final_move = agent.get_action(state_old)
reward, done, score = game.play(final_move)
state_new = agent.get_state(game)
agent.train_short_memory(state_old, final_move, reward, state_new, done)
agent.remember(state_old, final_move, reward, state_new, done)
if done:
game.reset()
agent.n_games += 1
agent.train_long_memory()
if score > record:
record = score
agent.model.save()
if agent.n_games % 10 == 0:
print("Game:", agent.n_games, "Score:", score, "Record:", record)
plot_scores.append(score)
total_score += score
mean_score = total_score/agent.n_games
plot_mean_scores.append(mean_score)
plot(plot_scores, plot_mean_scores)
def plot(scores, mean_scores):
plt.clf()
plt.gcf()
plt.xlabel("Iteration")
plt.ylabel("Score")
plt.plot(scores)
plt.plot(mean_scores)
plt.ylim(ymin=0)
plt.text(len(scores)-1, scores[-1], str(scores[-1]))
plt.text(len(mean_scores)-1, mean_scores[-1], str(mean_scores[-1]))
if __name__ == "__main__":
model_path = ''
if len(sys.argv) > 1:
print(sys.argv[1])
model_path = sys.argv[1]
train(model_path)
| [
"matplotlib"
] |
7a0cd3b623872c30f559b7e6e31e517018395567 | Python | xpessoles/TP_Documents_PSI | /17_RobotDelta2D/EtudeDelta2D/images/LoiES_Essai_Modele/exploitationModele.py | UTF-8 | 1,336 | 2.859375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 3 17:29:23 2015
@author: Xavier
"""
import matplotlib.pyplot as plt
import numpy as np
import math
f_bras_sw = "SW_LoiES/AngleBras.txt"
f_mot_sw = "SW_LoiES/AngleMoteur.txt"
fid_bras_sw= open(f_bras_sw ,'r')
fid_mot_sw= open(f_mot_sw ,'r')
temps=[]
bras_sw=[]
mot_sw=[]
fid_bras_sw.readline()
fid_bras_sw.readline()
fid_mot_sw.readline()
fid_mot_sw.readline()
for ligne in fid_bras_sw:
ligne = ligne.replace("\n","")
ligne = ligne.split(" ")
if len(ligne)==2 :
temps.append(float(ligne[0]))
bras_sw.append(math.degrees(float(ligne[1])))
fid_bras_sw.close()
for ligne in fid_mot_sw:
ligne = ligne.replace("\n","")
ligne = ligne.split(" ")
if len(ligne)==2 :
mot_sw.append(math.degrees(float(ligne[1])))
fid_mot_sw.close()
#plt.plot(temps,mot_sw)
#plt.plot(mot_sw,bras_sw)
#plt.show()
from scipy import stats
slope, intercept, r_value, p_value, std_err = stats.linregress(mot_sw,bras_sw)
regress = [slope*i + intercept for i in mot_sw]
titre = str(slope)+"gamma"+str(intercept)
lin = [slope*i -9 for i in mot_sw]
#plt.plot(mot_sw,regress,label="Régression linéaire")
plt.plot(mot_sw,lin,label="Linéarisation")
plt.plot(mot_sw,bras_sw,'r',linewidth=2,label="Modèle SW")
plt.grid()
plt.legend()
plt.show()
| [
"matplotlib"
] |
15f88f5d9e2d119c5c0cdafd2430e80b5ce8a9fa | Python | mzio/bimatrix-game-net | /data_utils.py | UTF-8 | 3,740 | 3.21875 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
def view_game_matrix(df_row):
"""
View payoffs in 3x3 matrix format
args:
df_row : Pandas series object, e.g. df.iloc[0]
returns
[row_payoffs, col_payoffs] : np.array (2x3x3)
"""
return df_row.values.reshape(2, 3, 3)
def normalize(matrix):
"""
Method to normalize a given matrix
args:
matrix : np.array, the payouts
output:
np.array, the matrix normalized
"""
min_val = np.min(matrix)
max_val = np.max(matrix)
return (matrix - min_val) / (max_val - min_val)
def get_payoff_matrices(df, rows_first=False, normalized=True):
"""
Convert input dataframe into new normed payoff data
args:
df : pandas.DataFrame
rows_first : bool, if true, separate data into first row payoffs and then columns,
otherwise row and col payoffs lumped next to each other
normalized : bool, if true, normalize the matrices
output:
np.array, the converted data
"""
if normalized:
df = df.apply(normalize, axis=0)
matrices = np.zeros([2 * df.shape[0], 3, 3])
for row_ix in range(df.shape[0]):
payoffs = view_game_matrix(df.iloc[row_ix])
if rows_first:
matrices[row_ix] = payoffs[0]
matrices[row_ix + df.shape[0]] = payoffs[1]
else:
matrices[row_ix * 2] = payoffs[0]
matrices[row_ix * 2 + 1] = payoffs[1]
return matrices
def transform(payoffs, rotations): # rotations = [2, 1, 0]
new_payoffs = np.zeros(payoffs.shape)
for ix in range(len(rotations)):
new_payoffs[:, ix] = payoffs[:, rotations[ix]]
return new_payoffs
def expand_channels(game_data, channels='all'):
"""
Return alternate channels for model input
args:
channels : str, one of 'all', 'payoffs', 'diffs', 'row'
"""
if channels == 'payoffs':
return game_data
# Difference between the row and col payoffs
game_data_rc_diffs = game_data[:, 0, :, :] - game_data[:, 1, :, :]
# Difference between the payoff max and payoffs
# Get maxes
max_rows = np.max(game_data[:, 0, :, :], axis=(1, 2))
max_cols = np.max(game_data[:, 1, :, :], axis=(1, 2))
# Expand and convert to previous data shape
max_rows = np.repeat(max_rows, 9).reshape((1500, 3, 3))
max_cols = np.repeat(max_cols, 9).reshape((1500, 3, 3))
game_data_maxdiff_r = game_data[:, 0, :, :] - max_rows
game_data_maxdiff_c = game_data[:, 1, :, :] - max_cols
game_data_diffs = np.array(
[game_data_rc_diffs, game_data_maxdiff_r, game_data_maxdiff_c]).transpose(1, 0, 2, 3)
game_data_combined = np.concatenate([game_data, game_data_diffs], axis=1)
game_data_row = np.concatenate([np.expand_dims(
game_data[:, 0, :, :], 1), game_data_diffs[:, 0:2, :, :]], axis=1)
if channels == 'diffs':
return game_data_diffs
elif channels == 'row':
return game_data_row
else:
return game_data_combined
def get_splits(data, labels, num_splits):
"""
Returns splits for data
Output:
Array of splits, each split = [train_data, train_labels,
test_data, test_labels]
"""
splits = []
kf = KFold(n_splits=num_splits)
for train_val_ix, test_ix in kf.split(data):
train_val_data, test_data = np.array(
data[train_val_ix]), np.array(data[test_ix])
train_val_labels, test_labels = np.array(
labels[train_val_ix]), np.array(labels[test_ix])
splits.append(
[train_val_data, train_val_labels, test_data, test_labels])
return splits
| [
"matplotlib"
] |
b7de0221e14ae0a798b49cf71996f0c7a2386bea | Python | waleedahmed90/trend_calculation-with_hashtag_usage | /newCoalition.py | UTF-8 | 2,635 | 2.703125 | 3 | [] | no_license | import gzip
import json
import glob
import itertools
from pandas import DataFrame
#import matplotlib.pyplot as plt
import ntpath
import time
#create month based tweets count and calculate monthly surges OR maybe something else not sure yet what to do in this code
if __name__== "__main__":
start_time = time.time()
gend_link = '/Users/WaleedAhmed/Documents/THESIS_DS_CODE/June++/dataReadCode_2/Percentage_Demographics/Gender_Percentage_User_Demographics.gz'
race_link = '/Users/WaleedAhmed/Documents/THESIS_DS_CODE/June++/dataReadCode_2/Percentage_Demographics/Race_Percentage_User_Demographics.gz'
age_link = '/Users/WaleedAhmed/Documents/THESIS_DS_CODE/June++/dataReadCode_2/Percentage_Demographics/Age_Percentage_User_Demographics.gz'
with gzip.open(gend_link, 'rt') as g:
gend_temp = g.read()
g.close()
gend_perc_dict = json.loads(gend_temp)
with gzip.open(race_link, 'rt') as r:
race_temp = r.read()
r.close()
race_perc_dict = json.loads(race_temp)
with gzip.open(age_link, 'rt') as a:
age_temp = a.read()
a.close()
age_perc_dict = json.loads(age_temp)
top_10_daily = '/Users/WaleedAhmed/Documents/THESIS_DS_CODE/June++/dataReadCode_2/Code_HashtagUsage/top_10_daily_tweets/*.gz'
list_of_files = sorted(glob.glob(top_10_daily))
print("Total Files: ", len(list_of_files))
#F = list_of_files[0]
trend_gend = {}
trend_race = {}
trend_age = {}
for F in list_of_files:
with gzip.open(F, 'rt') as f:
daily_counts = f.read()
f.close()
trending_temp_dict_per_day = json.loads(daily_counts)
for trend in trending_temp_dict_per_day.keys():
if trend in gend_perc_dict.keys():
trend_gend[trend] = gend_perc_dict[trend]
trend_race[trend] = race_perc_dict[trend]
trend_age[trend] = age_perc_dict[trend]
print(len(trend_gend))
print(len(trend_race))
print(len(trend_age))
print(trend_gend)
gend_path = '/Users/WaleedAhmed/Documents/THESIS_DS_CODE/June++/dataReadCode_2/Code_HashtagUsage/trend_perc/gend_perc.gz'
race_path = '/Users/WaleedAhmed/Documents/THESIS_DS_CODE/June++/dataReadCode_2/Code_HashtagUsage/trend_perc/race_perc.gz'
age_path = '/Users/WaleedAhmed/Documents/THESIS_DS_CODE/June++/dataReadCode_2/Code_HashtagUsage/trend_perc/age_perc.gz'
with gzip.open(gend_path, 'wb') as f1:
f1.write(json.dumps(trend_gend).encode('utf-8'))
f1.close()
with gzip.open(race_path, 'wb') as f2:
f2.write(json.dumps(trend_race).encode('utf-8'))
f2.close()
with gzip.open(age_path, 'wb') as f3:
f3.write(json.dumps(trend_age).encode('utf-8'))
f3.close()
print("Elapsed Time")
print("--- %s seconds ---" % (time.time() - start_time)) | [
"matplotlib"
] |
23d5f88f738550d71b40c9ac6ce21cb3be7fdccd | Python | denis-pakhorukov/machine-learning-labs | /lab2/points_classifier.py | UTF-8 | 1,072 | 2.6875 | 3 | [] | no_license | import numpy as np
from sklearn.neighbors import KNeighborsClassifier
import matplotlib.pyplot as plt
converters = {3: {b'green': 0, b'red': 1}.get}
samples = np.loadtxt('svmdata4.txt', delimiter='\t', skiprows=1,
usecols=(1, 2, 3), converters=converters)
X_train = samples[:, :-1]
y_train = np.array(samples[:, -1].transpose(), dtype=np.uint8)
plt.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=plt.cm.Set1)
plt.show()
samples = np.loadtxt('svmdata4test.txt', delimiter='\t', skiprows=1,
usecols=(1, 2, 3), converters=converters)
X_test = samples[:, :-1]
y_test = np.array(samples[:, -1].transpose(), dtype=np.uint8)
neighbors_settings = range(1, X_train.shape[0])
scores = []
for n_neighbors in neighbors_settings:
knn = KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X_train, y_train)
scores.append(knn.score(X_test, y_test))
print('optimal n_neighbors', neighbors_settings[scores.index(max(scores))])
plt.plot(neighbors_settings, scores)
plt.ylabel('Score')
plt.xlabel('n_neighbors')
plt.show()
| [
"matplotlib"
] |
e49d3125310cd12f396eb14dc2ba9861b75b7f09 | Python | LOBUTO/CANCER.GENOMICS | /BACKUP.SCRIPTS/PYTHON/58_NETWORKS_TUTORIAL.py | UTF-8 | 1,437 | 3.328125 | 3 | [] | no_license | import networkx as NX
import matplotlib.pyplot as plt
#Creating a random network
G1=NX.erdos_renyi_graph(50,0.3) # n number of nodes, probability of p of edge between nodes independent of every other edge
#Draw network
NX.draw(G1, NX.shell_layout(G1))
plt.show()
#Get node degree distribution
DD=[]
for node in G1.nodes():
DD.append(G1.degree(node))
plt.hist(DD)
plt.show()
#Get average shortest path length(Characteristic path length (L))
L=NX.average_shortest_path_length(G1)
print L
#Get the average clustering coefficient of the graph (CC)
CC=NX.average_clustering(G1)
print CC
#Creating a small-world network
SW=NX.watts_strogatz_graph(50,6,0.3) #model for small world network with parameters (nodes, number of nearest neighbors each node is connected
#to, probability of rewiring each edge)
#Draw network
NX.draw(SW, NX.shell_layout(SW))
plt.show()
#Get L
L2=NX.average_shortest_path_length(SW)
print L2
#Get CC
CC2=NX.average_clustering(SW)
print CC2
#Get node degree distribution
DD=[]
for node in SW.nodes():
DD.append(SW.degree(node))
plt.hist(DD)
plt.show()
#Creating a scale-free graph
SF=NX.scale_free_graph(50) #number of nodes
#Draw network
NX.draw(SF, NX.shell_layout(SF))
plt.show()
#Get L
L2=NX.average_shortest_path_length(SF)
print L2
#Get node degree distribution
DD=[]
for node in SF.nodes():
DD.append(SF.degree(node))
plt.hist(DD)
plt.show() | [
"matplotlib"
] |
d62db3b0756cdf16526964b69f5aabde4470add9 | Python | wubinbai/2020 | /project/by_data/class_0509.py | UTF-8 | 2,469 | 3 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 29 22:34:47 2020
"""
#基本信息
import numpy as np
import pandas as pd
from pandas import Series,DataFrame
#股票数据的读取 pip install pandas_datareader
import pandas_datareader as pdr
#可视化
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
#1.获取数据
alibaba = pdr.get_data_yahoo("BABA")
#2.保存数据
alibaba.to_csv('BABA.csv')
#alibaba.to_csv('BABA.csv',index=None)
#3.读取数据
alibaba_df = pd.read_csv("BABA.csv")
#4.数据类型
print(alibaba_df.shape)
print(len(alibaba_df))
#5.读取前10行
print(alibaba_df.head())
#5.读取后10行
print(alibaba_df.tail())
#6.读取表格名称
print(alibaba_df.columns)
#7.表格情况
print(alibaba_df.info())
#8.画折线图
alibaba_df["Adj Close"].plot(legend=True)
alibaba_df["Volume"].plot(legend=True)
#9.复制数据
a_copy = alibaba_df.copy()
a_copy['Low'] = 1
b_copy = a_copy
b_copy['Low'] = 2
#10.数据处理
mid = alibaba_df['Open']>83
open_number = alibaba_df[alibaba_df['Open']>83]
print(open_number.shape)
open_number = alibaba_df[(alibaba_df['Open']>85)&(alibaba_df['Open']<100)]
print(open_number.shape)
#11.数据处理
alibaba_df['Open1'] = alibaba_df['Open'].map(lambda x:x+1)
all_sum = alibaba_df['Open'].sum()
all_mean = alibaba_df['Open'].mean()
all_std = alibaba_df['Open'].std()
all_max = alibaba_df['Open'].max()
all_min = alibaba_df['Open'].min()
#12.日期处理 2015/03/01
alibaba_df['date'] = alibaba_df['Date'].map(lambda x : datetime.strptime(x,'%Y-%m-%d'))
alibaba_df['day'] = alibaba_df['date'].dt.day
alibaba_df['week'] = alibaba_df['date'].dt.weekday
alibaba_df['month'] = alibaba_df['date'].dt.month
alibaba_df['year'] = alibaba_df['date'].dt.year
#13.以目标索引
data_year = alibaba_df.groupby(['year'])['Low'].sum().reset_index()
data_year = data_year.rename(columns={"Low": "Low_y_sum"})
data_week = alibaba_df.groupby(['year','week'])['Low'].sum().reset_index()
data_week = data_week.rename(columns={"Low": "Low_w_sum"})
#14.数据拼接
alibaba_df1 = pd.merge(alibaba_df,data_year,on='year',how='left')
alibaba_df2 = pd.merge(alibaba_df,data_week,on=['year','week'],how='left')
data_year_con1 = pd.concat([data_year,data_week],axis=1)
data_year_con0 = pd.concat([data_year,data_week],axis=0)
#15.数据填充
data_year_con1 = data_year_con1.fillna(0)
| [
"matplotlib",
"seaborn"
] |
767191eb14ccabe7b245387b50597d6901a4266e | Python | Pratham-Coder3009/Visualization | /data.py | UTF-8 | 275 | 2.734375 | 3 | [] | no_license | import pandas as pd
import plotly_express as px
import plotly.graph_objects as go
df = pd.read_csv(r'F:\Python Projects\Visualization\data.csv')
print(df.groupby("level")['attempt'].mean())
fig = px.scatter(df, x="student_id", y="level",color="attempt" )
fig.show() | [
"plotly"
] |
b9db90723ce1fd919bb5978988e93f16898799ab | Python | Sohyo/IDS_2017 | /Assignment5/2_1_b.py | UTF-8 | 3,219 | 2.640625 | 3 | [] | no_license | from sklearn.manifold import TSNE
from sklearn.decomposition import TruncatedSVD
import pandas as pd
import numpy as np
from ggplot import *
import ggplot
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from numpy import linalg as LA
import config as cfg
path = cfg.path
features_file = 'featuresFlowCapAnalysis2017.csv'
labels_file = 'labelsFlowCapAnalysis2017.csv'
save_fig_tsne_train = 'tsne_train_before_preprocessing.png'
save_fig_tsne_test = 'tsne_test_before_preprocessing.png'
save_fig_pca_train = 'pca_train_before_preprocessing.png'
save_fig_pca_test ='pca_test_before_preprocessing.png'
# loading training data
df_whole = pd.read_csv(path+features_file) #(359, 186)
features_train = df_whole.iloc[:179,:]
labels_train = pd.read_csv(path+labels_file)
features_test = df_whole.iloc[179:,:]
# you don't want to to do tsne on 186 features, better to reduce to 50 then do tsne:
X_reduced = TruncatedSVD(n_components=70, random_state=42).fit_transform(features_train.values)
tsne = TSNE(n_components=2, verbose=0, perplexity=40, n_iter=1000, random_state=42)
tsne_results = tsne.fit_transform(X_reduced,labels_train)
df_tsne = pd.DataFrame(tsne_results,columns=['x-tsne','y-tsne'])
df_tsne['label']=np.asarray(labels_train)
chart = ggplot.ggplot(df_tsne, aes(x='x-tsne', y='y-tsne', color='label') ) + geom_point() + scale_color_brewer(type='diverging', palette=4)+ggtitle("tsne for dimensionality reduction (train set)")
chart.save(path+save_fig_tsne_train)
#tSNE of test set
X_reduced = TruncatedSVD(n_components=70, random_state=42).fit_transform(features_test.values)
tsne = TSNE(n_components=2, verbose=0, perplexity=40, n_iter=1000,random_state=42)
tsne_results1 = tsne.fit_transform(X_reduced)
df_tsne1 = pd.DataFrame(tsne_results1,columns=['x-tsne','y-tsne'])
chart1 = ggplot.ggplot(df_tsne1, aes(x='x-tsne', y='y-tsne') ) + geom_point() + scale_color_brewer(type='diverging', palette=4)+ ggtitle("tsne for dimensionality reduction (test set)")
chart1.save(path+save_fig_tsne_test)
#PCA train set
plt.close()
cov = np.cov(features_train.values)#cov of features.
w, v = LA.eig(cov) #eigenvalue decomposition
X = np.array(cov)
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
print(pca.explained_variance_ratio_)
labels_train = np.asarray(labels_train)
y = np.asarray([int(n) for n in labels_train])
colors = ['navy', 'turquoise']
for color, i, target_name in zip(colors, [1, 2], np.array(['healthy','patient'])):
plt.scatter(X_r[y == i, 0], X_r[y == i, 1], color=color, alpha=.8,label=target_name)
plt.xlabel('x-PCA')
plt.ylabel('y-PCA')
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('PCA of AML dataset (train)')
plt.savefig(path+save_fig_pca_train)
#PCA test set
plt.close()
cov = np.cov(features_test.values)#cov of features.
w, v = LA.eig(cov) #eigenvalue decomposition
X = np.array(cov)
pca = PCA(n_components=2)
X_r = pca.fit(X).transform(X)
print(pca.explained_variance_ratio_)
for i in range(2):
plt.scatter(X_r[:,0], X_r[:,1], alpha=.8)
plt.xlabel('x-PCA')
plt.ylabel('y-PCA')
plt.legend(loc='best', shadow=False, scatterpoints=1)
plt.title('PCA of AML dataset (test)')
plt.savefig(path+save_fig_pca_test)
| [
"matplotlib",
"ggplot"
] |
66cc5effe452c16f8b5b2b018a8a4f19eb043dd7 | Python | phroiland/forex_algos | /Forex_Risk.py | UTF-8 | 3,760 | 3.21875 | 3 | [] | no_license | from __future__ import division
import pandas as pd
from pandas import Series, DataFrame
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
#allows us to read stock info from google, yahoo, etc.
from pandas.io.data import DataReader
#timestamps
from datetime import datetime
from pylab import figure, axes, pie, title, show
fred_currencies = ['DEXUSEU', 'DEXUSAL', 'DEXUSUK', 'DEXCAUS', 'DEXSZUS', 'DEXJPUS']
end = datetime.now()
start = datetime(end.year - 2, end.month, end.day)
for currency in fred_currencies:
#make currencies global to call as its own dataframe
globals()[currency] = DataReader(currency, 'fred', start, end)
print '\nUse the following list to input selected currency pair\n'
print 'Enter EURUSD as DEXUSEU\nEnter AUDUSD as DEXUSAL\nEnter GBPUSD as DEXUSUK\nEnter USDCAD as DEXCAUS\nEnter USDCHF as DEXSZUS\nEnter USDJPY as DEXJPUS\n'
currency = raw_input("Enter Currency Pair: ")
'''if currency == 'DEXUSEU':
currency = DEXUSEU.dropna()
currency.rename(columns = {'DEXUSEU':'Close'}, inplace = True)
elif currency == 'DEXUSAL':
currency = DEXUSAL.dropna()
currency.rename(columns = {'DEXUSAL':'Close'}, inplace = True)
elif currency == 'DEXUSUK':
currency = DEXUSUK.dropna()
currency.rename(columns = {'DEXUSUK':'Close'}, inplace = True)
elif currency == 'DEXCAUS':
currency = DEXCAUS.dropna()
currency.rename(columns = {'DEXCAUS':'Close'}, inplace = True)
elif currency == 'DEXSZUS':
currency = DEXSZUS.dropna()
currency.rename(columns = {'DEXSZUS':'Close'}, inplace = True)
elif currency == 'DEXJPUS':
currency = DEXJPUS.dropna()
currency.rename(columns = {'DEXJPUS':'Close'}, inplace = True)
else:
print "Please enter a valid currency pair."'''
#currency analysis
#currency = currency.dropna()
#analyze risk
closing_df = DataReader(fred_currencies, 'fred', start, end)
fred_ret = closing_df.pct_change()
rets = fred_ret.dropna()
area = np.pi*20
plt.scatter(rets.mean(),rets.std(),s=area)
plt.xlabel('Expected Return')
plt.ylabel('Risk')
days = 365
dt = 1/days
mu = rets.mean()[currency]
sigma = rets.std()[currency]
def monte_carlo(start_price, days, mu, sigma):
price = np.zeros(days)
price[0] = start_price
shock = np.zeros(days)
drift = np.zeros(days)
for x in xrange(1, days):
shock[x] = np.random.normal(loc = mu*dt, scale = sigma*np.sqrt(dt))
drift[x] = mu*dt
price[x] = price[x-1] + (price[x-1]*(drift[x] + shock[x]))
return price
start_price = raw_input('Enter last price: ')
start_price = float(start_price)
for run in xrange(50):
plt.plot(monte_carlo(start_price, days, mu, sigma))
plt.xlabel('Days')
plt.ylabel('Price')
plt.title('Monte Carlo Analysis for %s' %str(currency))
runs = 10000
simulations = np.zeros(runs)
for run in xrange(runs):
simulations[run] = monte_carlo(start_price, days, mu, sigma)[days-1]
q = np.percentile(simulations, 25)
plt.hist(simulations, bins = 200)
plt.figtext(0.6,0.8, s = 'Start Price: $%.5f' % start_price)
plt.figtext(0.6,0.7,'Mean final price: $%.5f' % simulations.mean())
plt.figtext(0.6,0.6, 'Var(0.99): $%.5f' % (start_price-q,))
plt.figtext(0.15,0.6,'q(0.99): $%.5f' % q)
plt.axvline(x=q, linewidth = 4, color = 'r')
plt.title(u'Final Price Distribution for EURUSD after %s Days' % days, weight = 'bold')
print '\nLast Price: %.5f' % start_price
print 'Mean Price: %.5f' % simulations.mean()
print 'q (0.99) : %.5f' % q
print 'Var(0.99) : %.5f' % (start_price-q,)
#final_plot = plt.hist(simulations,bins = 200)
#final_plot = final_plot.get_figure()
#final_plot.savefig('final_plot.png') | [
"matplotlib",
"seaborn"
] |
7f23f01952fb9c5cb793bbd2c6efb92e20e9596d | Python | TINY-KE/floorplan-MapGeneralization | /src/generate_graphs.py | UTF-8 | 4,802 | 3.203125 | 3 | [] | no_license | import matplotlib.pyplot as plt
import networkx as nx
import random
def generate_graph():
G = nx.Graph()
positions = [(0, 0), (0, 1), (0, 2), (0, 3), (1, 0), (1, 1), (1, 2), (1, 3), (2, 0), (2, 1), (2, 2), (2, 3), (3, 0),
(3, 1), (3, 2), (3, 3)]
positions = [list(pos) for pos in positions]
nodes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
edges = [(0, 4), (0, 1), (1, 5), (1, 2), (2, 6), (2, 3),
(3, 7), (4, 8), (4, 5), (5, 9), (5, 6), (6, 10),
(6, 7), (7, 11), (8, 12), (8, 9), (9, 13), (9, 10),
(10, 14), (10, 11), (11, 15), (12, 13), (13, 14), (14, 15)]
rand_int_1_x = random.randint(0, 2)
rand_int_2_x = random.randint(2, 4)
rand_int_3_x = random.randint(4, 6)
rand_int_4_x = random.randint(6, 8)
rand_int_1_y = random.randint(0, 2)
rand_int_2_y = random.randint(2, 4)
rand_int_3_y = random.randint(4, 6)
rand_int_4_y = random.randint(6, 8)
for node in nodes:
# Add some randomness to x coordinates
if positions[node][0] == 0:
positions[node][0] += rand_int_1_x
elif positions[node][0] == 1:
positions[node][0] += rand_int_2_x
elif positions[node][0] == 2:
positions[node][0] += rand_int_3_x
elif positions[node][0] == 3:
positions[node][0] += rand_int_4_x
# Add some randomness to y coordinates
if positions[node][1] == 0:
positions[node][1] += rand_int_1_y
elif positions[node][1] == 1:
positions[node][1] += rand_int_2_y
elif positions[node][1] == 2:
positions[node][1] += rand_int_3_y
elif positions[node][1] == 3:
positions[node][1] += rand_int_4_y
G.add_node(node, pos=tuple(positions[node]), label=0)
for edge in edges:
G.add_edge(edge[0], edge[1])
num_diamonds = random.randint(1, 6)
#num_diamonds = 4
# Get a random edge
idxs = random.sample(range(0, (len(edges)-1)), num_diamonds)
counter = 0
for idx in idxs:
G.remove_edge(edges[idx][0], edges[idx][1])
# Drawing diamond
node1 = edges[idx][0]
node2 = edges[idx][1]
pos1 = positions[node1]
pos2 = positions[node2]
center_point = ((pos1[0]+pos2[0])/2, (pos1[1]+pos2[1])/2)
# Randomize diamond
x_ran = (random.randint(2, 4))/10
y_ran = (random.randint(2, 4))/10
# If same x coordinate draw diamond certain way
if pos1[0] == pos2[0]:
new_node_1 = tuple((center_point[0], center_point[1] - y_ran))
new_node_2 = tuple((center_point[0], center_point[1] + y_ran))
new_node_3 = tuple((center_point[0] - x_ran, center_point[1]))
new_node_4 = tuple((center_point[0] + x_ran, center_point[1]))
else:
new_node_1 = tuple((center_point[0] - x_ran, center_point[1]))
new_node_2 = tuple((center_point[0] + x_ran, center_point[1]))
new_node_3 = tuple((center_point[0], center_point[1] - y_ran))
new_node_4 = tuple((center_point[0], center_point[1] + y_ran))
node_name_1 = 16 + counter
node_name_2 = 17 + counter
node_name_3 = 18 + counter
node_name_4 = 19 + counter
G.add_node(node_name_1, pos=new_node_1, label=1)
G.add_node(node_name_2, pos=new_node_2, label=1)
G.add_node(node_name_3, pos=new_node_3, label=1)
G.add_node(node_name_4, pos=new_node_4, label=1)
G.add_edge(node1, node_name_1)
G.add_edge(node_name_1, node_name_3)
G.add_edge(node_name_1, node_name_4)
G.add_edge(node_name_4, node_name_2)
G.add_edge(node_name_3, node_name_2)
G.add_edge(node_name_2, node2)
counter += 4
pos = nx.get_node_attributes(G, 'pos')
return G, pos
def main():
NUM_GRAPHS_TRAIN = 500
NUM_GRAPHS_TEST = 100
train_file = open('../data/synth_graphs/train_file_list.txt', "w+")
test_file = open('../data/synth_graphs/test_file_list.txt', "w+")
name = 'graph_'
for graph_idx in range(NUM_GRAPHS_TRAIN):
G, pos = generate_graph()
PATH = '../data/synth_graphs/training/' + name + str(graph_idx) + '.pickle'
nx.write_gpickle(G, PATH)
train_file.write(name + str(graph_idx) + '.pickle' + "\n")
for graph_idx in range(NUM_GRAPHS_TEST):
G, pos = generate_graph()
PATH = '../data/synth_graphs/testing/test_' + name + str(graph_idx) + '.pickle'
nx.write_gpickle(G, PATH)
test_file.write('test_' + name + str(graph_idx) + '.pickle' + "\n")
train_file.close()
test_file.close()
nx.draw(G, pos, node_size = 30)
plt.show()
print("done")
if __name__ == "__main__":
main()
| [
"matplotlib"
] |
1503b3dcb121ae24e4bbf1792f9390b2110e1516 | Python | IntroQG-2017/Exercise-12 | /atha_glacier_prob2.py | UTF-8 | 1,952 | 3.203125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
"""Calculates flow velocities through a valley glacier cross-section.
Description:
This script calculates and plots the solution for dimensional velocity as
a function of dimensional distance from the base of a valley glacier. It also
loads and plots observed flow velocities from a file.
Usage:
./atha_glacier_prob2.py
Author:
XXX YYY - DD.MM.YYYY
"""
# Import NumPy
import numpy as np
import matplotlib.pyplot as plt
# Open and read input file
data = np.loadtxt(fname='atha_glacier_velo.txt', delimiter=',')
# Split file data columns into different variables using data slices
data_depth = # Array for data depths [m]
data_z = # Array for data elevations from bed [m]
data_u_ma = # Array for data velocities [m/a]
data_u_ms = # Array for data velocities [m/s]
#--- User-defined variables
a = # Viscosity [1/(Pa**3.0 s)]
h = # Flow height
z = np.linspace(0, h, 101) # Range of elevation values in flow for velocity calculation
data_umax = # Velocity at top of flow
data_ub = # Velocity at base of flow
n_prof = 5 # Number of velocity profiles to calculate
# Equations
u = np.zeros([n_prof, len(z)])
n = 0
# Non-dimensional velocity profile for a Newtonian or non-Newtonian fluid
# Loop over all values of the power-law exponent
for i in range():
n =
if n == 1:
else:
ub =
# Loop over all values of the height of the channel
for j in range():
# Equation 19 rearranged to solve for gamma_x
# Equation 19
# Plot results
# Loop over all values of the power-law exponent
for i in range(n_prof):
plt.plot()
# Plot observed velocities
plt.plot()
# Add axis labels and a title
plt.xlabel("")
plt.ylabel("")
plt.title("")
plt.show()
| [
"matplotlib"
] |
fb5d350db2eadf590bef6161af3803cd2831befe | Python | janpipek/physt | /src/physt/plotting/__init__.py | UTF-8 | 7,376 | 2.5625 | 3 | [
"MIT"
] | permissive | """Plotting for physt histograms.
Available backends
------------------
- matplotlib
- vega
- plotly (simple wrapper around matplotlib for 1D histograms)
- folium (just for the geographical histograms)
Calling the plotting functions
Common parameters
-----------------
There are several backends (and user-defined may be added) and several
plotting functions for each - we try to keep a consistent set of
parameters to which all implementations should try to stick (with exceptions).
All histograms
~~~~~~~~~~~~~~
write_to : str (optional)
Path to file where the output will be stored
title : str (optional)
String to be displayed as plot title (defaults to h.title)
xlabel : str (optional)
String to be displayed as x-axis label (defaults to corr. axis name)
ylabel : str (optional)
String to be displayed as y-axis label (defaults to corr. axis name)
xscale : str (optional)
If "log", x axis will be scaled logarithmically
yscale : str (optional)
If "log", y axis will be scaled logarithmically
xlim : tuple | "auto" | "keep"
ylim : tuple | "auto" | "keep"
invert_y : bool
If True, the y axis points downwards
ticks : {"center", "edge"}, optional
If set, each bin will have a tick (either central or edge)
alpha : float (optional)
The alpha of the whole plot (default: 1)
cmap : str or list
Name of the palette or list of colors or something that the
respective backend can interpret as colourmap.
cmap_normalize : {"log"}, optional
cmap_min :
cmap_max :
show_values : bool
If True, show values next to (or inside) the bins
value_format : str or Callable
How bin values (if to be displayed) are rendered.
zorder : int (optional)
text_color :
text_alpha :
text_* :
Other options that are passed to the formatting of values without the prefix
1D histograms
~~~~~~~~~~~~~
cumulative : bool
If True, show CDF instead of bin heights
density : bool
If True, does not show bin contents but contents divided by width
errors : bool
Whether to show error bars (if available)
show_stats : bool
If True, display a small box with statistical info
2D heatmaps
~~~~~~~~~~~
show_zero : bool
Whether to show bins that have 0 frequency
grid_color :
Colour of line between bins
show_colorbar : bool
Whether to display a colorbar next to the plot itself
Line plots
~~~~~~~~~~
lw (or linewidth) : int
Width of the lines
"""
from __future__ import annotations
from contextlib import suppress
from typing import TYPE_CHECKING
from physt.types import HistogramBase, HistogramCollection
from . import ascii as ascii_backend
backends: Dict[str, Any] = {}
if TYPE_CHECKING:
from typing import Any, Dict, Optional, Tuple, Union
# Use variant without exception catching if you want to debug import of backends.
# from . import matplotlib as mpl_backend
# backends["matplotlib"] = mpl_backend
# from . import folium as folium_backend
# backends["folium"] = folium_backend
# from . import vega as vega_backend
# backends["vega"] = vega_backend
# from . import plotly as plotly_backend
# backends["plotly"] = plotly_backend
with suppress(ImportError):
from . import matplotlib as mpl_backend
backends["matplotlib"] = mpl_backend
with suppress(ImportError):
from . import vega as vega_backend
backends["vega"] = vega_backend
with suppress(ImportError):
from . import plotly as plotly_backend
backends["plotly"] = plotly_backend
with suppress(ImportError):
from . import folium as folium_backend
backends["folium"] = folium_backend
backends["ascii"] = ascii_backend
_default_backend: Optional[str] = None
if backends:
_default_backend = list(backends.keys())[0]
def set_default_backend(name: str) -> None:
"""Choose a default backend."""
global _default_backend # pylint: disable=global-statement
if name == "bokeh":
raise ValueError(
"Support for bokeh has been discontinued. At some point, we may return to support holoviews."
)
if name not in backends:
raise ValueError(f"Backend '{name}' is not supported and cannot be set as default.")
_default_backend = name
def get_default_backend() -> Optional[str]:
"""The backend that will be used by default with the `plot` function."""
return _default_backend
def _get_backend(name: Optional[str] = None) -> Tuple[str, Any]:
"""Get a plotting backend.
Tries to get it using the name - or the default one.
"""
if not backends:
raise RuntimeError(
"No plotting backend available. Please, install matplotlib (preferred), plotly or vega (more limited)."
)
if not name:
name = _default_backend
if not name:
raise RuntimeError("No backend for physt plotting.")
if name == "bokeh":
raise NotImplementedError(
"Support for bokeh has been discontinued. At some point, we may return to support holoviews."
)
backend = backends.get(name)
if not backend:
available = ", ".join(backends.keys())
raise RuntimeError(f"Backend {name} does not exist. Use one of the following: {available}.")
return name, backend
def plot(
histogram: Union[HistogramBase, HistogramCollection],
kind: Optional[str] = None,
backend: Optional[str] = None,
**kwargs,
):
"""Universal plotting function.
All keyword arguments are passed to the plotting methods.
Parameters
----------
kind: Type of the plot (like "scatter", "line", ...), similar to pandas
"""
backend_name, backend_impl = _get_backend(backend)
if kind is None:
kinds = [t for t in backend_impl.types if histogram.ndim in backend_impl.dims[t]] # type: ignore
if not kinds:
raise RuntimeError(f"No plot type is supported for {histogram.__class__.__name__}")
kind = kinds[0]
if kind in backend_impl.types:
method = getattr(backend_impl, kind)
return method(histogram, **kwargs)
else:
raise RuntimeError(f"Histogram type error: {kind} missing in backend {backend_name}.")
class PlottingProxy:
"""Proxy enabling to call plotting methods on histogram objects.
It can be used both as a method or as an object containing methods. In any case,
it only forwards the call to the universal plot() function.
The __dir__ method should offer all plotting methods supported by the currently
selected backend.
Example
-------
plotter = histogram.plot
plotter(...) # Plots using defaults
plotter.bar(...) # Plots as a specified plot type ("bar")
Note
----
Inspiration taken from the way how pandas deals with this.
"""
def __init__(self, h: Union[HistogramBase, HistogramCollection]):
self.histogram = h
def __call__(self, kind: Optional[str] = None, **kwargs):
"""Use the plotter as callable."""
return plot(self.histogram, kind=kind, **kwargs)
def __getattr__(self, name: str):
"""Use the plotter as a proxy object with separate plotting methods."""
def plot_function(**kwargs):
return plot(self.histogram, name, **kwargs)
return plot_function
def __dir__(self):
_, backend = _get_backend()
return tuple((t for t in backend.types if self.histogram.ndim in backend.dims[t]))
| [
"matplotlib",
"plotly"
] |
608dceb34776dfc7947d163ca31cba764c4a563c | Python | zhxing001/ML_DL | /Tensorflow_study/NN_kNN/Nearest_neighbor.py | UTF-8 | 3,151 | 3.328125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 8 09:50:23 2017
@author: zhxing
"""
import pickle as p
import matplotlib.pyplot as plt
import numpy as np
class NearestNeighbor(object):
def __init__(self):
pass
def train(self,X,y):
self.Xtr=X
self.ytr=y
def predict(self,X):
num_test=X.shape[0]
#保证输出和输入类型相同
Ypred=np.zeros(num_test,dtype=self.ytr.dtype)
for i in range(num_test):
#对第i章图片在训练集中找最相似的
distance=np.sum(np.abs(self.Xtr-X[i,:]),axis=1) #算差的绝对值之和,和Xtr里面的每一个都去做
min_index=np.argmin(distance) #得到最小的差值的索引
Ypred[i]=self.ytr[min_index] #label,得到对应的label
return Ypred
def load_CIFAR_batch(filename):
with open(filename,'rb') as f:
datadict=p.load(f,encoding='latin1')
X=datadict['data']
Y=datadict['labels']
Y=np.array(Y) #载入的是list类型,编程array类型的
return X,Y
def load_CIFAR_labels(filename):
with open(filename,'rb') as f:
label_names=p.load(f,encoding='latin1')
names=label_names['label_names']
return names
label_names = load_CIFAR_labels("C:/Users/zhxing/Desktop/python/data/cifar-10-batches-py/batches.meta") #这里面存的是标签
imgX1, imgY1 = load_CIFAR_batch("C:/Users/zhxing/Desktop/python/data/cifar-10-batches-py/data_batch_1")
imgX2, imgY2 = load_CIFAR_batch("C:/Users/zhxing/Desktop/python/data/cifar-10-batches-py/data_batch_2")
imgX3, imgY3 = load_CIFAR_batch("C:/Users/zhxing/Desktop/python/data/cifar-10-batches-py/data_batch_3")
imgX4, imgY4 = load_CIFAR_batch("C:/Users/zhxing/Desktop/python/data/cifar-10-batches-py/data_batch_4")
imgX5, imgY5 = load_CIFAR_batch("C:/Users/zhxing/Desktop/python/data/cifar-10-batches-py/data_batch_5")
#5万个训练数据
Xte_rows, Yte = load_CIFAR_batch("C:/Users/zhxing/Desktop/python/data/cifar-10-batches-py/test_batch")
#一万个训练数据
Xtr_rows=np.concatenate((imgX1,imgX2,imgX3,imgX4,imgX5))
Ytr_rows=np.concatenate((imgY1,imgY2,imgY3,imgY4,imgY5))
#连接起来,这就是50000个训练数据及相应的标签
nn= NearestNeighbor() #创建一个分类器对象
nn.train(Xtr_rows[:1000,:],Ytr_rows[:1000]) #用前一千个来训练,也可用全部,跑的很慢,这个算法训练什么都没干,就是赋值
Yte_predict = nn.predict(Xte_rows[:1000,:]) # predict labels on the test images
# and now print the classification accuracy, which is the average number
# of examples that are correctly predicted (i.e. label matches)
print('accuracy: %f' % (np.mean(Yte_predict == Yte[:1000]))) #相等我话算出来就是1,所以这样取均值算出来的就是正确率
# show a picture
image=imgX1[6,0:1024].reshape(32,32) #这个是把第六章图拿出来看了一下,得离得远一点才能看清
print(image.shape)
plt.imshow(image,cmap=plt.cm.gray)
plt.axis('off') #去除图片边上的坐标轴
plt.show() | [
"matplotlib"
] |
a185410ee1b0d1207a4b0d1c71fff65e057489a6 | Python | donovan-k/StudentPerfDataAnalysis | /compare_by_greater.py | UTF-8 | 2,083 | 3.546875 | 4 | [] | no_license | # File compares male and female scores by the amount that are greater than 80
# libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import ScoreGreater as sG
# read in data
df = pd.read_csv('StudentsPerformance.csv', index_col=0)
# set width of bar
barWidth = 0.25
# set height of bar
group_male = sG.ScoreGreater(df.drop('female'), 80)
group_female = sG.ScoreGreater(df.drop('male'), 80)
male_well_math = group_male.math_well()
fem_well_math = group_female.math_well()
male_well_read = group_male.read_well()
fem_well_read = group_female.read_well()
male_well_writ = group_male.writ_well()
fem_well_writ = group_female.writ_well()
def well_counter(group, score):
group_count = 0
for j in group[score]:
if j > group_male.gr_good:
group_count = group_count + 1
return group_count
male_math_count = well_counter(male_well_math, 'math score')
fem_math_count = well_counter(fem_well_math, 'math score')
male_read_count = well_counter(male_well_math, 'reading score')
fem_read_count = well_counter(fem_well_math, 'reading score')
male_writ_count = well_counter(male_well_math, 'writing score')
fem_writ_count = well_counter(fem_well_math, 'writing score')
bars1 = [male_math_count, fem_math_count] # Did well in math
bars2 = [male_read_count, fem_read_count] # Did well in reading
bars3 = [male_writ_count, fem_writ_count] # Did well in writing
# Set position of bar on X axis
r1 = np.arange(len(bars1))
r2 = [x + barWidth for x in r1]
r3 = [x + barWidth for x in r2]
# Make the plot
plt.bar(r1, bars1, color='#7f6d5f', width=barWidth, edgecolor='white', label='Math')
plt.bar(r2, bars2, color='#557f2d', width=barWidth, edgecolor='white', label='Reading')
plt.bar(r3, bars3, color='#2d7f5e', width=barWidth, edgecolor='white', label='Writing')
# Add xticks on the middle of the group bars
plt.xlabel('Gender(male or female)', fontweight='bold')
plt.xticks([r + barWidth for r in range(len(bars1))], ['male', 'female'])
# Create legend & Show graphic
plt.legend()
plt.savefig('m_to_f_compare_grthan80.png')
| [
"matplotlib"
] |
387586e6cf3c99c6f74f44bbf05bf341ae78d8a9 | Python | irfan-gh/MO-PaDGAN-Optimization | /airfoil/run_batch_experiments_bo.py | UTF-8 | 8,410 | 2.625 | 3 | [
"MIT"
] | permissive | """
Author(s): Wei Chen ([email protected])
"""
import os
import itertools
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial.distance import directed_hausdorff
from tqdm import tqdm
from cfg_reader import read_config
from shape_plot import plot_shape
def non_dominated_sort(objectives):
"""
Computes the non-dominated set for a set of data points
:param objectives: data points
:return: tuple of the non-dominated set and the degree of dominance,
dominances gives the number of dominating points for each data point
"""
extended = np.tile(objectives, (objectives.shape[0], 1, 1))
dominance = np.sum(np.logical_and(np.all(extended <= np.swapaxes(extended, 0, 1), axis=2),
np.any(extended < np.swapaxes(extended, 0, 1), axis=2)), axis=1)
pareto = objectives[dominance==0]
ind_pareto = np.where(dominance==0)
return pareto, ind_pareto, dominance
def plot_airfoils(airfoils, perfs, ax):
n = airfoils.shape[0]
zs = np.vstack((np.zeros(n), 0.5*np.arange(n))).T
for i in range(n):
plot_shape(airfoils[i], zs[i, 0], zs[i, 1], ax, 1., False, None, c='k', lw=1.2, alpha=.7)
plt.annotate(r'$C_L={:.2f}$'.format(perfs[i,0]) +'\n' + '$C_L/C_D={:.2f}$'.format(perfs[i,1]), xy=(zs[i, 0], zs[i, 1]-0.3), size=14)
ax.axis('off')
ax.axis('equal')
def novelty_score(airfoil_gen, airfoils_data):
min_dist = np.inf
for airfoil in tqdm(airfoils_data):
dist_ab = directed_hausdorff(airfoil_gen, airfoil)[0]
dist_ba = directed_hausdorff(airfoil, airfoil_gen)[0]
dist = np.maximum(dist_ab, dist_ba)
if dist < min_dist:
min_dist = dist
nearest_airfoil = airfoil
return min_dist, nearest_airfoil
if __name__ == "__main__":
list_models = ['MO-PaDGAN', 'GAN', 'SVD', 'FFD']
config_fname = 'config.ini'
n_runs = 10
''' Plot optimization history '''
linestyles = ['-', '--', ':', '-.']
iter_linestyles = itertools.cycle(linestyles)
colors = ['#003f5c', '#7a5195', '#ef5675', '#ffa600']
iter_colors = itertools.cycle(colors)
plt.figure()
for model_name in list_models:
if model_name == 'FFD':
parameterization = 'ffd'
elif model_name == 'SVD':
parameterization = 'svd'
elif model_name == 'GAN':
parameterization = 'gan'
lambda0, lambda1 = 0., 0.
else:
parameterization = 'gan'
_, _, _, _, _, _, _, lambda0, lambda1, _ = read_config(config_fname)
if parameterization == 'gan':
save_dir = 'trained_gan/{}_{}/optimization_bo'.format(lambda0, lambda1)
else:
save_dir = '{}/optimization_bo'.format(parameterization)
list_hv_history = []
for i in range(n_runs):
hv_history_path = '{}/{}/hv_history.npy'.format(save_dir, i)
if not os.path.exists(hv_history_path):
if parameterization == 'gan':
os.system('python optimize_bo.py {} --lambda0={} --lambda1={} --id={}'.format(parameterization, lambda0, lambda1, i))
else:
os.system('python optimize_bo.py {} --id={}'.format(parameterization, i))
list_hv_history.append(np.load(hv_history_path))
list_hv_history = np.array(list_hv_history)
mean_hv_history = np.mean(list_hv_history, axis=0)
std_hv_history = np.std(list_hv_history, axis=0)
iters = np.arange(1, len(mean_hv_history)+1)
color = next(iter_colors)
plt.plot(iters, mean_hv_history, ls=next(iter_linestyles), c=color, label=model_name)
plt.fill_between(iters, mean_hv_history-std_hv_history, mean_hv_history+std_hv_history, color=color, alpha=.2)
plt.legend(frameon=True, title='Parameterization')
plt.xlabel('Number of Evaluations')
plt.ylabel('Hypervolume Indicator')
plt.tight_layout()
plt.savefig('opt_history_bo.svg')
plt.close()
''' Plot Pareto points '''
iter_colors = itertools.cycle(colors)
markers = ['s', '^', 'o', 'v']
iter_markers = itertools.cycle(markers)
dict_pf_x_sup = dict()
dict_pf_y_sup = dict()
plt.figure()
for model_name in list_models:
if model_name == 'FFD':
parameterization = 'ffd'
elif model_name == 'SVD':
parameterization = 'svd'
elif model_name == 'GAN':
parameterization = 'gan'
lambda0, lambda1 = 0., 0.
else:
parameterization = 'gan'
_, _, _, _, _, _, _, lambda0, lambda1, _ = read_config(config_fname)
if parameterization == 'gan':
save_dir = 'trained_gan/{}_{}/optimization_bo'.format(lambda0, lambda1)
else:
save_dir = '{}/optimization_bo'.format(parameterization)
list_pareto_x = []
list_pareto_y = []
for i in range(n_runs):
x_history_path = '{}/{}/x_hist.npy'.format(save_dir, i)
y_history_path = '{}/{}/y_hist.npy'.format(save_dir, i)
x_history = np.load(x_history_path)
y_history = np.load(y_history_path)
pf_y, pf_ind, _ = non_dominated_sort(y_history)
pf_x = x_history[pf_ind]
list_pareto_x.append(pf_x)
list_pareto_y.append(pf_y)
list_pareto_x = np.concatenate(list_pareto_x, axis=0)
list_pareto_y = np.concatenate(list_pareto_y, axis=0)
plt.scatter(-list_pareto_y[:,0], -list_pareto_y[:,1], c=next(iter_colors), marker=next(iter_markers), label=model_name)
# Find the non-dominated set from all the Pareto sets
pf_y_sup, pf_ind_sup, _ = non_dominated_sort(list_pareto_y)
pf_x_sup = list_pareto_x[pf_ind_sup]
dict_pf_x_sup[model_name] = pf_x_sup
dict_pf_y_sup[model_name] = pf_y_sup
plt.legend(frameon=True, title='Parameterization')
plt.xlabel(r'$C_L$')
plt.ylabel(r'$C_L/C_D$')
plt.tight_layout()
plt.savefig('pareto_pts_bo.svg')
plt.close()
''' Plot top-ranked airfoils '''
max_n_airfoils = max([len(dict_pf_x_sup[model_name]) for model_name in list_models])
fig = plt.figure(figsize=(len(list_models)*2.4, max_n_airfoils*1.5))
for i, model_name in enumerate(list_models):
# Plot top-ranked airfoils
ax = fig.add_subplot(1, len(list_models), i+1)
ind = np.argsort(dict_pf_y_sup[model_name][:,0])
plot_airfoils(dict_pf_x_sup[model_name][ind], -dict_pf_y_sup[model_name][ind], ax)
ax.set_title(model_name)
plt.tight_layout()
plt.savefig('pareto_airfoils_bo.svg')
plt.close()
''' Plot novelty scores for top-ranked airfoils '''
airfoils_data = np.load('data/xs_train.npy')
fig = plt.figure(figsize=(6.5, 3.5))
gen_airfoils = []
nearest_airfoils = []
novelty_scores = []
for i, model_name in enumerate(list_models):
print('Computing novelty indicator for {} ...'.format(model_name))
ys = []
for airfoil in dict_pf_x_sup[model_name]:
y, nearest_airfoil = novelty_score(airfoil, airfoils_data)
ys.append(y)
gen_airfoils.append(airfoil)
nearest_airfoils.append(nearest_airfoil)
novelty_scores.append(y)
xs = [i] * len(ys)
plt.scatter(xs, ys, c='#003f5c', marker='o', s=100, alpha=0.3)
plt.xticks(ticks=range(len(list_models)), labels=list_models)
plt.xlim([-.5, len(list_models)-.5])
plt.ylabel('Novelty indicator')
plt.tight_layout()
plt.savefig('pareto_novelty_bo.svg')
plt.savefig('pareto_novelty_bo.pdf')
plt.close()
''' Plot generated airfoils and their nearest neighbors '''
for i, score in enumerate(novelty_scores):
plt.figure()
plt.plot(gen_airfoils[i][:,0], gen_airfoils[i][:,1], 'r-', alpha=.5)
plt.plot(nearest_airfoils[i][:,0], nearest_airfoils[i][:,1], 'b-', alpha=.5)
plt.axis('equal')
plt.title('{:.6f}'.format(score))
plt.tight_layout()
plt.savefig('tmp/pareto_novelty_{}.svg'.format(i))
plt.close()
| [
"matplotlib"
] |
67e1bd2516b76c8246337a9911a4824c758f5f63 | Python | pranavdixit8/linear-regression-sklearn-vs-numpy | /sklearn_vs_numpy.py | UTF-8 | 2,671 | 3.296875 | 3 | [] | no_license |
import pandas as pd
from sklearn import linear_model
from numpy import *
import matplotlib.pyplot as plt
def compute_squared_mean_error(b, m , bmi_life_data):
total_error = 0
Y = bmi_life_data[['Life expectancy']]
N = float(len(Y))
for i in range(0, len(Y)):
x = bmi_life_data.loc[i, 'BMI']
y = bmi_life_data.loc[i,'Life expectancy']
total_error+=(y - (m*x + b))**2
return total_error/N
def step_gradient(current_m, current_b, bmi_life_data, learning_rate):
b_gradient = 0
m_gradient = 0
Y = bmi_life_data[['Life expectancy']]
N = float(len(Y))
for i in range (0,len(Y)):
x = bmi_life_data.loc[i, 'BMI']
y = bmi_life_data.loc[i,'Life expectancy']
b_gradient+= -(2/N) * ( y - (current_m*x + current_b))
m_gradient+= -(2/N) * x * ( y - (current_m*x + current_b))
new_m = current_m - (learning_rate*m_gradient)
new_b = current_b - (learning_rate * b_gradient)
return [new_m, new_b]
def gradient_descent(bmi_life_data, initial_m, initial_b, learning_rate, num_iteration):
m = initial_m
b = initial_b
for i in range(num_iteration):
m,b = step_gradient(m, b , bmi_life_data , learning_rate)
return [m,b]
def predict(b,m, x):
return m*x + b
def numpy_implementaion():
bmi_life_data = pd.read_csv("bmi_and_life_expectancy.csv")
initial_m = 0
initial_b = 0
num_iteration = 2000
learning_rate = 0.0001
print("Running gradient Descent:")
print("Starting with b = {}, m = {}, squared mean error = {}".format(initial_b, initial_m, compute_squared_mean_error(initial_b, initial_m, bmi_life_data)))
[m,b] = gradient_descent(bmi_life_data, initial_m, initial_b, learning_rate, num_iteration)
print("After running {} iterations, b = {}, m = {}, squared mean error = {}".format(num_iteration, b, m, compute_squared_mean_error(b, m, bmi_life_data)))
predict_life_exp = predict(b, m , bmi_life_data[['BMI']].values)
plt.scatter(bmi_life_data[['BMI']][0:-20], bmi_life_data[['Life expectancy']][0:-20], color='black')
plt.plot(bmi_life_data[['BMI']], predict_life_exp , color='b', label = "numpy",linewidth=3)
plt.draw()
def sklearn_implementation():
bmi_life_data = pd.read_csv("bmi_and_life_expectancy.csv")
lr_model = linear_model.LinearRegression()
lr_model.fit(bmi_life_data[['BMI']][0:-20], bmi_life_data[['Life expectancy']][0:-20])
predict_life_exp = lr_model.predict(bmi_life_data[['BMI']])
plt.plot(bmi_life_data[['BMI']], predict_life_exp , color='r', label = "sklearn",linewidth=3)
plt.xlabel("BMI")
plt.ylabel("Life expectancy")
plt.draw()
if __name__ == '__main__':
numpy_implementaion()
sklearn_implementation()
plt.legend()
plt.show()
| [
"matplotlib"
] |
8a7ca06994d15aad9ab20591bd270fc4d6b63471 | Python | yuanjingjy/AHE | /DataProcess_MachineLearning/Featureselection_MachineLearning/FS.py | UTF-8 | 8,546 | 2.734375 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/5/8 8:36
# @Author : YuanJing
# @File : FS.py
"""
Description:
1.首先计算Relief、Fisher-score、Gini_index三个得分值,归一化后叠加到一起得到最终分值
2.根据叠加后的分值对特征值进行排序
3.根据排序结果逐个增加特征值,得到BER平均值及std的变化曲线并进行保存
注意:1.对于AdaBoost算法,只能区分1,-1标签
2. 对于逻辑回归,第一项是常数项,在每折中添加
4.大段注释掉的是自己按照文献公式复原的相关性系数和Fisher-score得分
5.最终进行特则选择使用的是scikit-feature包
OutputMessage:
sorteigen:记录的是各个方法得到的特征值评分以及根据整合后评分进行排序后的结果,
对应FSsort.csv
sortFeatures:是根据sorteigen的结果对全部特征值进行排序,用于最终模型的训练,对应
sortedFeature.csv文件
writemean:逐步增加特征值个数时,当前算法对应的BER平均值
writestd:逐步增加特征值个数时,当前算法对应的BER标准差
"""
import global_new as gl
import numpy as np
import pandas as pd
import sklearn.feature_selection as sfs
import ann
from sklearn.neural_network import MLPClassifier # import the classifier
from sklearn.model_selection import StratifiedKFold
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import adaboost
import logRegres as LR
from sklearn import svm
featurenames=gl.colnames[0:80]
datamat=gl.dataMat#归一化到[-1,1]的
dataFrame=pd.DataFrame(datamat,columns=featurenames)
labelmat=gl.labelMat
[n_sample,n_feature]=np.shape(datamat)
"""
#_-------------------------只针对AdaBoost算法,区分-1,1-------#
for i in range(len(labelmat)):
if labelmat[i]==0:
labelmat[i]=-1;#adaboost只能区分-1和1的标签
"""
"""
#------------------calculate the Correlation criterion——YJ---------------------#
mean_feature=np.mean(datamat,axis=0)#average of each feature
[n_sample,n_feature]=np.shape(datamat)
mean_label=np.mean(labelmat)#average of label
corup=[]
cordown=[]
label_series=labelmat-mean_label
for j in range(n_feature):
tmp_up=sum((datamat[:,j]-mean_feature[j])*label_series)
corup.append(tmp_up)
#计算相关系数公式的分母
down_feature=np.square(datamat[:,j]-mean_feature[j])
down_label=np.square(label_series)
tmp_down=np.sqrt(sum(down_feature)*sum(down_label))
cordown.append(tmp_down)
corvalue=np.array(corup)/np.array(cordown)
corvalue=np.abs(corvalue)
#------------calculate the Fisher criterion——YJ--------------#
df=np.column_stack((datamat,labelmat))#特征和标签合并
positive_set=df[df[:,80]==1]
negtive_set=df[df[:,80]==0]
positive_feaure=positive_set[:,0:80]#正类的特征
negtive_feature=negtive_set[:,0:80]#负类的特征
[sample_pos,feature_pos]=np.shape(positive_feaure)
[sample_neg,feature_neg]=np.shape(negtive_feature)
mean_pos=np.mean(positive_feaure,axis=0)#正类中,各特征的平均值
mean_neg=np.mean(negtive_feature,axis=0)#负类中,各样本的平均值
std_pos=np.std(positive_feaure,ddof=1,axis=0)#正类中各特征值的标准差
std_neg=np.std(negtive_feature,ddof=1,axis=0)#负类中各特征值的标准差
F_up=np.square(mean_pos-mean_feature)+np.square(mean_neg-mean_feature)
F_down=np.square(std_pos)+np.square(std_neg)
F_score=F_up/F_down
"""
#------------calculate the FS score with scikit-feature package--------------#
from skfeature.function.similarity_based import fisher_score
from skfeature.function.similarity_based import reliefF
from skfeature.function.statistical_based import gini_index
Relief = reliefF.reliefF(datamat, labelmat)
Fisher= fisher_score.fisher_score(datamat, labelmat)
gini= gini_index.gini_index(datamat,labelmat)
gini=-gini
FSscore=np.column_stack((Relief,Fisher,gini))#合并三个分数
FSscore=ann.preprocess(FSscore)
FinalScore=np.sum(FSscore,axis=1)
FS=np.column_stack((FSscore,FinalScore))
FS_nor=ann.preprocess(FS)#将最后一列联合得分归一化
FS=pd.DataFrame(FS_nor,columns=["Relief", "Fisher","gini","FinalScore"],index=featurenames)
# FS.to_csv("F:\Githubcode\AdaBoost\myown\FSscore.csv")
sorteigen=FS.sort_values(by='FinalScore',ascending=False,axis=0)
sorteigen.to_csv('FSsort.csv')
#------------crossalidation with ann--------------#
meanfit=[]#用来存储逐渐增加特征值过程中,不同数目特征值对应的BER平均值
stdfit=[]#用来存储逐渐增加特征值过程中,不同数目特征值对应的BER标准差
names=sorteigen.index#排序之后的特征值
#sortfeatures用于具体的算法当中,提取前面的n个特征值,此处没有用到
sortfeatures=dataFrame[names]#对特征值进行排序
sortfeatures['classlabel']=labelmat
sortfeatures.to_csv('sortedFeature.csv')#对全部特征值进行排序的结果
for i in range(80):
print("第%s个参数:"%(i+1))
index=names[0:i+1]
dataMat=dataFrame.loc[:,index]
dataMat=np.array(dataMat)
labelMat=labelmat
skf = StratifiedKFold(n_splits=10)
scores=[]#用来存十折中每一折的BER得分
mean_score=[]#第i个特征值交叉验证后BER的平均值
std_score=[]#第i个特征值交叉验证后BER的标准差
k=0;
for train, test in skf.split(dataMat, labelMat):
k=k+1
# print("%s %s" % (train, test))
print("----第%s次交叉验证:" %k)
train_in = dataMat[train]
test_in = dataMat[test]
train_out = labelMat[train]
test_out = labelMat[test]
n_train=np.shape(train_in)[0]
n_test=np.shape(test_in)[0]
"""
#---------对于LR的特殊处理
addones_train = np.ones(( n_train,1))
train_in = np.c_[addones_train, train_in]#给训练集数据加1列1
addones_test=np.ones((n_test,1))
test_in=np.c_[addones_test,test_in]#给测试集加一列1
"""
from imblearn.over_sampling import RandomOverSampler
train_in, train_out = RandomOverSampler().fit_sample(train_in, train_out)
"""
clf=svm.SVC(C=50,kernel='rbf',gamma='auto',shrinking=True,probability=True,
tol=0.001,cache_size=1000,verbose=False,
max_iter=-1,decision_function_shape='ovr',random_state=None)
clf.fit(train_in,train_out)#train the classifier
test_predict=clf.predict(test_in)#test the model with trainset
"""
#====================逻辑回归=============================================
# trainWeights = LR.stocGradAscent1(train_in, train_out, 500)
# len_test = np.shape(test_in)[0]
# test_predict = []
# for i in range(len_test):
# test_predict_tmp = LR.classifyVector(test_in[i, :], trainWeights)
# test_predict.append(test_predict_tmp)
# test_predict=np.array(test_predict)
#=========================================================================
# --------------------ANN----------------------------------#
clf = MLPClassifier(hidden_layer_sizes=(i + 1,), activation='tanh',
shuffle=True, solver='sgd', alpha=1e-6, batch_size=3,
learning_rate='adaptive')
clf.fit(train_in, train_out)
test_predict = clf.predict(test_in)
"""
#----------------------AdaBoost----------------------------------#
classifierArray, aggClassEst = adaboost.adaBoostTrainDS(train_in, train_out, 200);
test_predict, prob_test = adaboost.adaClassify(test_in, classifierArray); # 测试测试集
"""
tn, fp, fn, tp = confusion_matrix(test_out, test_predict).ravel()
BER=0.5*((fn/(tp+fn))+(fp/(tn+fp)))
scores.append(BER)
mean_score = np.mean(scores)
std_score=np.std(scores)
meanfit.append(mean_score)
stdfit.append(std_score)
#==============================================================================
meanfit = np.array(meanfit)
writemean=pd.DataFrame(meanfit)
writemean.to_csv('F:/Githubcode/AdaBoost/myown/annmean.csv', encoding='utf-8', index=True)
stdfit=np.array(stdfit)
writestd=pd.DataFrame(stdfit)
writestd.to_csv('F:/Githubcode/AdaBoost/myown/annfit.csv', encoding='utf-8', index=True)
fig, ax1 = plt.subplots()
line1 = ax1.plot(meanfit, "b-", label="BER")
ax1.set_xlabel("Number of features")
ax1.set_ylabel("BER", color="b")
plt.show()
print("test")
| [
"matplotlib"
] |
937e8a3f499e5c3a14e4dcaeaf4328131c6a1ad9 | Python | mohdazfar/Personal | /TIES583.py | UTF-8 | 3,770 | 2.921875 | 3 | [] | no_license | import math
import scipy.stats as st
from scipy.optimize import minimize
import ad
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def get_data():
df = pd.read_csv('Data-TIES583.csv',encoding='latin-1')
def inventory_prob(x,y):
setup_cost = 10.
est_demand = 15.
holding_cost = 0.5
price = 20.
cost = 6.
LT = 1.2
std_est_demand = 12.5
ss = st.norm.ppf(0.95)*math.sqrt((LT*(std_est_demand**2)+est_demand))
return [est_demand*setup_cost/x+est_demand*cost+(holding_cost*y**2)/(2*x)+ price*(x-y)**2/(2*x),\
ss *x/est_demand]
def calc_ideal(f):
ideal = [0]*2 #Because three objectives
solutions = [] #list for storing the actual solutions, which give the ideal
bounds = ((1.,20.),(1.,13.)) #Bounds of the problem
for i in range(2):
res=minimize(
#Minimize each objective at the time
lambda x: f(x[0],x[1])[i], [1,1], method='SLSQP'
#Jacobian using automatic differentiation
,jac=ad.gh(lambda x: f(x[0],x[1])[i])[0]
#bounds given above
,bounds = bounds
,options = {'disp':True, 'ftol': 1e-20, 'maxiter': 1000})
solutions.append(f(res.x[0],res.x[1]))
ideal[i]=res.fun
return ideal,solutions
ideal, solutions= calc_ideal(inventory_prob)
print ("ideal is "+str(ideal))
def inventory_prob_normalized(x,y):
z_ideal = [104.3902330623304, 1.5604451636266721]
z_nadir = [240.25,21.40854560]
# import pdb; pdb.set_trace()
z = inventory_prob(x,y)
return [(zi-zideali)/(znadiri-zideali) for
(zi,zideali,znadiri) in zip(z,z_ideal,z_nadir)]
# ####### Weighting Method ##########33
# def weighting_method(f,w):
# points = []
# bounds = ((1.,20.),(1.,13.)) #Bounds of the problem
# for wi in w:
# res=minimize(
# #weighted sum
# lambda x: sum(np.array(wi)*np.array(f(x[0],x[1]))),
# [1,1], method='SLSQP'
# #Jacobian using automatic differentiation
# ,jac=ad.gh(lambda x: sum(np.array(wi)*np.array(f(x[0],x[1]))))[0]
# #bounds given above
# ,bounds = bounds,options = {'disp':False})
# points.append(res.x)
# return points
#
# w = np.random.random((500,2)) #500 random weights
# repr = weighting_method(inventory_prob_normalized,w)
#
# ###### Plotting #####
# f_repr_ws = [inventory_prob(repri[0],repri[1]) for repri in repr]
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# ax.scatter([f[0] for f in f_repr_ws],[f[1] for f in f_repr_ws])
# plt.show()
######## epsilon - constraint method ###########
def e_constraint_method(f,eps,z_ideal,z_nadir):
points = []
for epsi in eps:
bounds = ((1.,epsi[0]*(z_nadir[0]-z_ideal[0])+z_ideal[0]),
((epsi[1]*(z_nadir[1]-z_ideal[1])+z_ideal[1]),
40.)) #Added bounds for 2nd objective
res=minimize(
#Second objective
lambda x: f(x[0],x[1])[0],
[1,1], method='SLSQP'
#Jacobian using automatic differentiation
,jac=ad.gh(lambda x: f(x[0],x[1])[0])[0]
#bounds given above
,bounds = bounds,options = {'disp':False})
if res.success:
points.append(res.x)
return points
z_ideal = [104.3902330623304, 1.5604451636266721]
z_nadir = [240.25,21.40854560]
eps = np.random.random((100,2))
repr = e_constraint_method(inventory_prob_normalized,eps,z_ideal,z_nadir)
f_repr_eps = [inventory_prob(repri[0],repri[1]) for repri in repr]
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter([f[0] for f in f_repr_eps],[f[1] for f in f_repr_eps])
plt.show() | [
"matplotlib"
] |
0d480e551444052fb0f8b93eaae420315d91b531 | Python | Subhomita2005/datavisualisationAndcorrPy | /code1.py | UTF-8 | 342 | 2.640625 | 3 | [] | no_license | import pandas as pd
import csv
import plotly.graph_objects as go
df = pd.read_csv("pixelMath.csv")
studentdf=df.loc[df["student_id"]=="TRL_xyz"]
print(studentdf.groupby("level")["attempt"].mean())
fig=go.Figure(go.Bar(x=studentdf.groupby("level")["attempt"].mean(),y=["level1","level2","level3","level4"],orientation="h"))
fig.show() | [
"plotly"
] |
b8d9db066068caced46bced0731787e90de3ea33 | Python | kutouxiyiji/plot_wafer | /plotTest.py | UTF-8 | 968 | 2.71875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 10 21:52:06 2017
@author: KTXYJ
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from mpl_toolkits.mplot3d import Axes3D
from bokeh.plotting import figure, show, output_file
sns.set(color_codes=True)
df = pd.read_csv("C:/Users/ywu156243/Documents/Yong Wu/rawDATA/SPC919/plot.csv",sep=',', engine = "python")
df2 = df.pivot('X','Y','T1') #save to a matrix dataframe df2
#sns.jointplot(x="X", y="Y", data=df['T1'], kind="kde");
#sns.kdeplot(df['X'], df['Y'], n_levels=10, shade=True)
#ax = sns.heatmap(df2, robust = True, cmap="RdYlGn")
#fig = plt.figure()
#ax = Axes3D(fig)
#df.plot_trisurf(df['X'], df['Y'], df['T1'], cmap=cm.jet, linewidth=0.2)
#plt.show()
#df.plot.scatter(x='X', y='Y', s =df['T1']*20, color='DarkBlue', label='Group 1')
f, ax = plt.subplots(figsize=(7,5))
sns.heatmap(df2, annot=True, fmt="d", linewidths=.1, ax=ax)
| [
"matplotlib",
"seaborn",
"bokeh"
] |
dda9c170133e1a9639e855a50ef3d50eed4ea61f | Python | AleksandarTendjer/Animal-life-simulation | /main.py | UTF-8 | 3,992 | 2.765625 | 3 | [] | no_license | import sys
import pygame
from pygame import image
from world import World
from statistics import Stats
import os
import argparse
from terrain_gen import NoiseWidth
from terrain_gen import Map2D
import pygame_menu as pyMenu
import matplotlib.pyplot as plt
DEFAULT_SCREEN_SIZE = (960, 750)
'''def menu_show(world):
menu = pyMenu.Menu(600, 600, 'Simulation data analysis',
theme=pyMenu.themes.THEME_SOLARIZED)
menu.add.button('Start Simulation',pyMenu.events.MenuAction._action.)
menu.add.button('Exit', pyMenu.events.EXIT)
menu.mainloop(world.screen)
'''
if __name__ == "__main__":
# create parser
parser = argparse.ArgumentParser()
# add arguments to the parser
parser.add_argument('--water', help="Height level of the water", type=float, default=0.0)
parser.add_argument('--shallowwater', help="Height level of the shallow water", type=float, default=0.05)
parser.add_argument('--sand', help="Height level of the sand", type=float, default=0.1)
parser.add_argument('--land', help="Height of normal grass/land/forest", type=float, default=0.6)
parser.add_argument('--mountain', help="Height of mountains", type=float, default=0.5)
parser.add_argument('--hugemountain', help="Height of huge mountains", type=float, default=0.6)
parser.add_argument('--scale', help="Higher=zoomed in, Lower=zoomed out.", type=float, default=200)
parser.add_argument('--persistence', help="how much an octave contributes to overall shape (adjusts amplitude).",type=float, default=0.5)
parser.add_argument('--lacunarity', help="The level of detail on each octave (adjusts frequency).", type=float,
default=3.0)
parser.add_argument('--moistureo', help="Moisture octaves.", type=int, default=8)
parser.add_argument('--moistures', help="Moisture scale.", type=float, default=200)
parser.add_argument('--moisturep', help="Moisture persistence.", type=float, default=0.5)
parser.add_argument('--moisturel', help="Moisture lacunarity.", type=float, default=3.0)
parser.add_argument('--octaves', help="Octaves used for generation.", type=int, default=8)
# parse the arguments
args = parser.parse_args()
scale = args.scale
moisture_scale = args.moistures
noise_ranges = [
NoiseWidth('hugemountain', args.hugemountain),
NoiseWidth('mountain', args.mountain),
NoiseWidth('land', args.land),
NoiseWidth('sand', args.sand),
NoiseWidth('shallowwater', args.shallowwater),
NoiseWidth('water', args.water),
]
# generate terrain
noise_map = Map2D(DEFAULT_SCREEN_SIZE[0], DEFAULT_SCREEN_SIZE[1], noise_ranges)
noise_map.generate( scale, args.octaves, args.persistence, args.lacunarity)
# generate moisture
moisture_map = Map2D(DEFAULT_SCREEN_SIZE[0], DEFAULT_SCREEN_SIZE[1])
moisture_map.generate(moisture_scale, args.moistureo, args.moisturep, args.moisturel)
noise_map.moisture_map = moisture_map
tilesize=1
# display map
noise_map.display_as_image(tilesize)
file_name = 'noise_map.png'
noise_map.save_image(file_name) # save the png too
noise_map.ret_water_points()
BG_IMG = pygame.image.load(file_name)
# Start pygame
pygame.init()
# pygame screen and timer
screen = pygame.display.set_mode(DEFAULT_SCREEN_SIZE)
clock = pygame.time.Clock()
# Create world
world = World(DEFAULT_SCREEN_SIZE, clock, screen,noise_map.cells)
#menu_show(world)
paused = False
# Create Trackers
sc = Stats(world)
sc.start_all()
# Main pygame loop
while 1:
# Pause check
if not paused:
world.step()
pygame.display.flip()
screen.fill((0, 0, 0))
screen.blit(BG_IMG, [0, 0])
# pygame event handler
for event in pygame.event.get():
if event.type == pygame.QUIT:
world.running = False
break
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
paused = not paused
# Exit condition
if not world.running:
break
# FPS control
clock.tick(30)
# join all Trackers
sc.join_all()
sc.menu_show()
print("Simulation Finished")
sys.exit(0)
| [
"matplotlib"
] |
d46f09dd6f741f366e0c58e59d25c9c8c9f8857d | Python | Goodness10/Hash-Analytics-Internship | /Clusters of existing employees.py | UTF-8 | 1,147 | 3.109375 | 3 | [] | no_license | import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
dataset = pd.ExcelFile('C:/Users/USER/Hash-Analytic-Python-Analytics-Problem-case-study-1 (1).xlsx')
existing_employees = dataset.parse('Existing employees')
from sklearn.cluster import KMeans
X = existing_employees.iloc[:, [1,2]].values
kmeans = KMeans(n_clusters = 4, init = 'k-means++', n_init = 10, max_iter = 300, random_state=0)
Y_kmeans = kmeans.fit_predict(X)
plt.scatter(X[Y_kmeans==0,0], X[Y_kmeans==0,1], s=100, c='red', label = 'Cluster1')
plt.scatter(X[Y_kmeans==1,0], X[Y_kmeans==1,1], s=100, c='blue', label = 'Cluster2')
plt.scatter(X[Y_kmeans==2,0], X[Y_kmeans==2,1], s=100, c='green', label = 'Cluster3')
plt.scatter(X[Y_kmeans==3,0], X[Y_kmeans==3,1], s=100, c='cyan', label = 'Cluster4')
plt.scatter(kmeans.cluster_centers_[:,0], kmeans.cluster_centers_[:,1], s=200, c='yellow', label = 'Centroids')
plt.title('Clusters of Satisfaction level vs Last Evaluation')
plt.xlabel('Satisfaction level')
plt.ylabel('Last Evaluation')
plt.savefig('Clusters of existing employees.png')
plt.legend()
plt.show()
| [
"seaborn"
] |
9ec79c0646e0d6403228c9c42c1ebe4936741d18 | Python | crazicus/my_albums_collage | /dominant_color_plot.py | UTF-8 | 3,809 | 2.796875 | 3 | [
"MIT"
] | permissive | import random
import numpy as np
import cv2
from plotly.offline import plot
import plotly.graph_objs as go
from utils.colorutils import get_dominant_color
# reproducible
random.seed(1337)
# apps to plot
apps_of_interest = {'Facebook': 'free-apps_facebook.jpg',
'WhatsApp': 'free-apps_whatsapp_messenger.jpg',
'Waze': 'free-apps_waze_navigation_live_traffic.jpg',
'Soundcloud': 'free-apps_soundcloud_music_audio.jpg'}
# function to gen random walk for fake app data
def gen_random_walk(start=100, n_steps=50, num_range=(-100, 100),
new_step_chance=0.5):
"""
inputs:
start - start value for walk
n_steps - number of steps to take from starting point
num_range - range of values to sample for steps
new_step_chance - probability of taking step different
from last step (ie if 0 then all steps will be
same value)
output: list of values in walk
"""
# init start point for walk
walk = [start]
# gen a default step
step_val = random.randrange(num_range[0], num_range[1])
for i in range(n_steps):
# chance to take a new step or take last step again
if random.random() > (1 - new_step_chance):
# update step value
step_val = random.randrange(num_range[0], num_range[1])
# add step to last position
new_pos = walk[i - 1] + step_val
# append new position to walk history
walk.append(new_pos)
return walk
# init plotly trace storage
plot_traces = []
# xaxis data
plot_x = range(1, 101)
plot_images = []
# iterate over app
for name, path in apps_of_interest.items():
# gen data
app_data = gen_random_walk(
start=random.randrange(1000, 2000),
n_steps=len(plot_x) - 1,
new_step_chance=0.3)
# read in image
bgr_image = cv2.imread('icons/{}'.format(path))
# convert to HSV; this is a better representation of how we see color
hsv_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2HSV)
# get dominant color in app icon
hsv = get_dominant_color(hsv_image, k=5)
# make into array for color conversion with cv2
hsv_im = np.array(hsv, dtype='uint8').reshape(1, 1, 3)
# convert color to rgb for plotly
rgb = cv2.cvtColor(hsv_im, cv2.COLOR_HSV2RGB).reshape(3).tolist()
# make string for plotly
rgb = [str(c) for c in rgb]
# create plotly trace of line
trace = go.Scatter(
x=list(plot_x),
y=app_data,
mode='lines',
name=name,
line={
'color': ('rgb({})'.format(', '.join(rgb))),
'width': 3
}
)
# base url to include images in plotly plot
image_url = 'https://raw.githubusercontent.com/AdamSpannbauer/iphone_app_icon/master/icons/{}'
# create plotly image dict
plot_image = dict(
source=image_url.format(path),
xref='x',
yref='y',
x=plot_x[-1],
y=app_data[-1],
xanchor='left',
yanchor='middle',
sizex=5.5,
sizey=250,
sizing='stretch',
layer='above'
)
# append trace to plot data
plot_traces.append(trace)
plot_images.append(plot_image)
# drop legend, add images to plot,
# remove tick marks, increase x axis range to not cut off images
layout = go.Layout(
showlegend=False,
images=plot_images,
xaxis=dict(
showticklabels=False,
ticks='',
range=[min(plot_x), max(plot_x) + 10]
),
yaxis=dict(
showticklabels=False,
ticks=''
)
)
# create plot figure
fig = go.Figure(data=plot_traces, layout=layout)
# produce plot output
plot(fig, config={'displayModeBar': False},
filename='readme/app_plot.html')
| [
"plotly"
] |
3e55d1ac2898cea92ad730d18acaabcc64a33a18 | Python | sbu-python-class/test-repo-2018 | /plot.py | UTF-8 | 1,709 | 2.84375 | 3 | [] | no_license | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
import os
# set file to be read in
filename = 'output.dat'
#%%
# This path if running the file from shell, i.e. python plot.py
if __name__ == '__main__':
dir_path = os.path.dirname(os.path.realpath(__file__))
print('Your file directory is:', dir_path)
print('Reading in {} from the relaxation program...'.format(filename))
df = pd.read_table(os.path.join(dir_path, filename), header=None, delim_whitespace=True)
print('Data read complete.')
print('Generating colored graph columns...')
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
length = len(df.iloc[:, 2])
if length < 1000:
size = 10
elif length < 10000:
size = 1
elif length < 50000:
size = 0.1
else:
size = 0.01
cax = ax.scatter(df.iloc[:, 0], df.iloc[:, 1], df.iloc[:, 2], c=df.iloc[:, 2], cmap='inferno')
ax.set_ylabel('$y$')
ax.set_xlabel('$x$')
ax.set_zlabel('$z$')
ax.set_autoscalez_on(True)
cb = fig.colorbar(cax, orientation='horizontal', fraction=0.05)
plt.tight_layout()
plt.savefig(os.path.join(dir_path, 'outputgraph.pdf'), dpi=800)
print('Graph drawn. See outputgraph.pdf.')
print('Generating heatmap of columns...')
f = plt.figure()
ax = f.add_subplot(111)
ax.set_ylabel('$y$')
ax.set_xlabel('$x$')
cax = ax.hexbin(x=df.iloc[:, 0], y=df.iloc[:, 1], C=df.iloc[:, 2], cmap='inferno')
cb = f.colorbar(cax)
cb.set_label('$z$')
plt.savefig(os.path.join(dir_path, 'heatmap.pdf'), dpi=800)
print('Heatmap drawn. See heatmap.pdf.')
| [
"matplotlib"
] |
2dc80fdbf622255c25e25d8c61a2ee23b802b271 | Python | quzyte/quantum-phy690w | /GP_CN.py | UTF-8 | 6,965 | 3.25 | 3 | [] | no_license | import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import matplotlib as mpl
from scipy.integrate import trapz
from scipy.linalg import solve_banded
# Set initial condition for Psi
def init_cond(x, y, z):
init_Psi = np.zeros([len(x), len(y), len(z)], dtype = np.complex128)
# 2D
if ny == 1:
init_Psi = np.sqrt(4)*np.sin(2*np.pi*x)*np.sin(4*np.pi*z)*np.ones_like(y)
# 3D
else:
init_Psi = np.sqrt(8)*np.sin(2*np.pi*x)*np.sin(3*np.pi*y)*np.sin(4*np.pi*z)
return init_Psi
# Potential
def V(x, y, z, nx, ny, nz):
return np.zeros([nx, ny, nz], dtype = np.float64)
# Returns the solution to equation with H_0 as Hamiltonian. Does not involve any spatial derivative
def H0(Psi, x, y, z, nx, ny, nz, dt, N):
potential = V(x, y, z, nx, ny, nz) + N*np.abs(Psi)**2
return np.exp(-1j*dt*potential)*Psi
# For Functions H1, H2, H3:
# We have to solve matrix equation Ax = b for x, where A is a tridiagonal matrix.
# However, here, b is not a simple column matrix. Instead, it's a 3D matrix.
# We have to solve the equation Ax = b for each column in b.
# For eample, when we're solving the equation with H_1 (that is x derivative), we need to solve Ax = b[:, i, j] for all i and j
# We can do this using for loops. But, for loop will slow down the code.
# So, we use a scipy function solve_banded(). solve_banded() allows us to pass b as a 3D matrix
# It will return the solution x, also a 3D matrix of ths same shape a b.
# However, solve_banded solves the equation Ax = b along axis 0 only.
# That is, it will return the solution A*x[:, i, j] = b[:, i, j] for all i and j.
# This would cause a problem when we'ra solving equation with H_2 or H_3 (that is, derivative with respect to y or z).
# To overcome this problem, we first transpose b in such a way that it is appropriate to pass b directly to solve_banded.
# Later, we would again need to transpose the solution.
# Returns the solution to equation with H_1 as Hamiltonian. Involves derivative w.r.t. x
# Banded_matrix is in the form requrired by the function solve_banded(). See scipy documentation.
def H1(Psi, banded_matrix, mu, nx, ny, nz):
b = mu*Psi[2:nx, :, :] + (1-2*mu)*Psi[1:nx-1, :, :] + mu*Psi[0:nx-2, :, :]
Psi_new = np.zeros([nx, ny, nz], dtype = np.complex128)
Psi_new[1:nx-1, :, :] = solve_banded((1, 1), banded_matrix, b)
return Psi_new
# Returns the solution to equation with H_1 as Hamiltonian. Involves derivative w.r.t. y
def H2(Psi, banded_matrix, mu, nx, ny, nz):
b = mu*Psi[:, 2:ny, :] + (1-2*mu)*Psi[:, 1:ny-1, :] + mu*Psi[:, 0:ny-2, :]
b = np.transpose(b, axes = [1, 0, 2])
Psi_new = np.zeros([nx, ny, nz], dtype = np.complex128)
Psi_new[1:ny-1, :, :] = solve_banded((1, 1), banded_matrix, b)
Psi_new = np.transpose(Psi_new, axes = [1, 0, 2])
return Psi_new
# Returns the solution to equation with H_1 as Hamiltonian. Involves derivative w.r.t. z
def H3(Psi, banded_matrix, mu, nx, ny, nz):
b = mu*Psi[:, :, 2:nz] + (1-2*mu)*Psi[:, :, 1:nz-1] + mu*Psi[:, :, 0:nz-2]
b = np.transpose(b, axes = [2, 1, 0])
Psi_new = np.zeros([nx, ny, nz], dtype = np.complex128)
Psi_new[1:nz-1, :, :] = solve_banded((1, 1), banded_matrix, b)
Psi_new = np.transpose(Psi_new, axes = [2, 1, 0])
return Psi_new
# Computes the banded matrix
def banded(mu, nx):
ret_val = np.zeros([3, nx-2], dtype = np.complex128)
ret_val[0, 1:nx-2] = -mu*np.ones(nx-3)
ret_val[1, :] = (1 + 2*mu)*np.ones(nx-2)
ret_val[2, 0:nx-3] = -mu*np.ones(nx-3)
return ret_val
# To make 2D colormaps of the solution
def plot2D(x, z, prob, t, i):
plt.close()
fig = plt.figure()
plt.axis('square')
ax = plt.gca()
probmap = ax.pcolormesh(x, z, prob, cmap = cm.jet, shading = 'auto', vmin = 0.0, vmax = 4.0)
cbar = plt.colorbar(probmap, orientation='vertical')
cbar.set_label(r'$|\psi^2|$', fontsize = 14, rotation = 0, labelpad = 20)
ax.set_title(r't = %.6f' %(t))
ax.set_xlabel(r'$x$')
ax.set_ylabel(r'$z$')
ax.set_xticks([0, 0.5, 1])
ax.set_yticks([0, 0.5, 1])
ax.set_xlim([0, 1])
ax.set_ylim([0, 1])
filename = 'filepath/fig_%03d.png' %(i)
plt.savefig(filename)
return
# Computes the evolution of Psi with respect to t.
def evolve(nx, ny, nz, m, x, y, z, t, N, skip):
# 2D
if ny == 1:
dx = x[1] - x[0]
dz = z[1] - z[0]
dt = t[1] - t[0]
# Parameter mu required to compute banded matrix
mu = 1j*dt/2/dx**2
banded_matrix = banded(mu, nx)
x, y, z = np.meshgrid(x, y, z, indexing = 'ij')
Psi = np.zeros([nx, ny, nz], dtype = np.complex128)
Psi = init_cond(x, y, z)
plot2D(x[:, 0, :], z[:, 0, :], np.abs(Psi[:, 0, :])**2, t[0], 0)
integral[0] = trapz(trapz(np.abs(Psi[:, 0, :])**2, dx = dz), dx = dx)
start = time.time()
for k in range(1, m):
Psi = H0(Psi, x, y, z, nx, ny, nz, dt, N)
Psi = H1(Psi, banded_matrix, mu, nx, ny, nz)
Psi = H3(Psi, banded_matrix, mu, nx, ny, nz)
# Plotting and computing integral at specific time steps
if k % skip == 0:
plot2D(x[:, 0, :], z[:, 0, :], np.abs(Psi[:, 0, :])**2, t[k], int(k/skip))
integral[int(k/skip)] = trapz(trapz(np.abs(Psi[:, 0, :])**2, dx = dz), dx = dx)
stop = time.time()
print('Time taken:', stop - start)
# 3D
else:
dx = x[1] - x[0]
dy = y[1] - y[0]
dz = z[1] - z[0]
dt = t[1] - t[0]
# Parameter mu required to compute banded matrix
mu = 1j*dt/2/dx**2
banded_matrix = banded(mu, nx)
x, y, z = np.meshgrid(x, y, z, indexing = 'ij')
Psi = np.zeros([nx, ny, nz], dtype = np.complex128)
Psi = init_cond(x, y, z)
# Plotting at a particular value of y-coordinate. # Since we are plotting 2D colormaps, we can't pass a 3D Psi
y_slice = 25
plot2D(x[:, y_slice, :], z[:, y_slice, :], np.abs(Psi[:, y_slice, :])**2, t[0], 0)
integral[0] = trapz(trapz(trapz(np.abs(Psi)**2, dx = dz), dx = dy), dx = dx)
start = time.time()
for k in range(1, m):
Psi = H0(Psi, x, y, z, nx, ny, nz, dt, N)
Psi = H1(Psi, banded_matrix, mu, nx, ny, nz)
Psi = H2(Psi, banded_matrix, mu, nx, ny, nz)
Psi = H3(Psi, banded_matrix, mu, nx, ny, nz)
# Plotting and computing integral at specific time steps
if k % skip == 0:
plot2D(x[:, y_slice, :], z[:, y_slice, :], np.abs(Psi[:, y_slice, :])**2, t[k], int(k/skip))
integral[int(k/skip)] = trapz(trapz(trapz(np.abs(Psi)**2, dx = dz), dx = dy), dx = dx)
stop = time.time()
print('Time taken:', stop - start)
return
nx = 64
ny = 1
nz = 64
m = 100001
# Computes the interval at which to plot colormaps.
framerate = 24
vidlen = 10
no_of_frames = framerate*vidlen
skip = int(np.floor((m - 1)/no_of_frames))
# x, y, z, t arrays
x = np.linspace(0, 1, nx)
y = np.linspace(0, 1, ny)
z = np.linspace(0, 1, nz)
t = np.linspace(0, 0.1, m)
# Array to compute integral of |\psi|^2
integral = np.zeros(int(np.floor(m/skip) + 1))
indices = skip*np.linspace(0, len(integral) - 1, len(integral), dtype = np.int)
t_arr = t[indices]
# Parameter to be multiplied with the non-linear term
N = 10.0
evolve(nx, ny, nz, m, x, y, z, t, N, skip)
plt.close()
| [
"matplotlib"
] |
e086d773bc7514118da5dd92ab44ea516f442f32 | Python | NightKirie/NCKU_NLP_2018_industry3 | /Packages/matplotlib-2.2.2/lib/mpl_examples/pyplots/dollar_ticks.py | UTF-8 | 488 | 3.046875 | 3 | [
"MIT"
] | permissive | """
============
Dollar Ticks
============
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
# Fixing random state for reproducibility
np.random.seed(19680801)
fig, ax = plt.subplots()
ax.plot(100*np.random.rand(20))
formatter = ticker.FormatStrFormatter('$%1.2f')
ax.yaxis.set_major_formatter(formatter)
for tick in ax.yaxis.get_major_ticks():
tick.label1On = False
tick.label2On = True
tick.label2.set_color('green')
plt.show()
| [
"matplotlib"
] |
e33f3df04b081671610a51fb0292e0e2503d87be | Python | ktdiedrich/kdeepmodel | /kdeepmodel/train_npy_model.py | UTF-8 | 8,095 | 2.546875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python3
"""Train model to classify images
* Default hyper-parameters set in parameter dictionary
* Override default hyper-parameters with command line or web page arguments
see: Python flask https://palletsprojects.com/p/flask/
see: Javascript React https://reactjs.org/
* Dictionary of current training hyper-parameters saved to JSON in output directory with model
* Training output and or saves intermediate images and graphs for debugging and optimization,
see: Tensorboard https://www.tensorflow.org/guide
see: https://seaborn.pydata.org/
* Optimize hyper-parameters with genetic algorithms
see: https://github.com/handcraftsman/GeneticAlgorithmsWithPython/
* Inference with another script with command line or web-page arguments
* Sample data https://www.kaggle.com/simjeg/lymphoma-subtype-classification-fl-vs-cll/
Karl Diedrich, PhD <[email protected]>
"""
import os
import numpy as np
import matplotlib
matplotlib.use('Agg') # write plots to PNG files
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from keras.utils import to_categorical
from keras import layers
from keras import models, optimizers
import json
from keras.applications import ResNet50V2
param = dict()
param['test_size'] = 0.2
param['show'] = False
param['print'] = False
param['epochs'] = 40
param['batch_size'] = 32
param['output_dir'] = '.'
param['model_output_name'] = "trained_model.h5"
param['figure_name'] = 'training_history.png'
param['validation_split'] = 0.2
param['figure_size'] = (9, 9)
param['learning_rate'] = 2e-5
param['dropout'] = 0.5
def normalize(data):
return data/data.max()
def prepare_data(x_input, y_ground, test_size, shuffle=True, prep_x_func=None):
"""Load NPY format training and ground truth
:return: (X_train, X_test, Y_train, Y_test)
"""
X = np.load(x_input).astype(np.float)
Y = np.load(y_ground).astype(np.float)
print("X: {} {}".format(X.shape, X.dtype))
print("Y: {} {}".format(Y.shape, Y.dtype))
if prep_x_func is not None:
X = prep_x_func(X)
Y_labels = to_categorical(Y)
X_train, X_test, Y_train, Y_test = train_test_split(X, Y_labels, shuffle=shuffle, test_size=test_size)
return (X_train, X_test, Y_train, Y_test)
def create_model(input_shape, output_shape, dropout=0.5):
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=input_shape))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(128, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Flatten())
model.add(layers.Dense(512, activation='relu'))
model.add(layers.Dropout(rate=dropout))
model.add(layers.Dense(output_shape, activation='softmax'))
return model
def feature_prediction_model(input_shape, output_shape, dropout=0.5):
model = models.Sequential()
model.add(layers.Dense(256, activation="relu", input_dim=input_shape[0]))
model.add(layers.Dropout(dropout))
model.add(layers.Dense(output_shape, activation="softmax"))
return model
def extract_features(data):
conv_base = ResNet50V2(include_top=False, weights="imagenet", input_shape=data[0].shape)
features = conv_base.predict(data)
features = np.reshape(features, (len(features), np.prod(features[0].shape)))
return features
def plot_history(history, ax, title, label):
epochs = range(0, len(history))
plot_ax = sns.scatterplot(x=epochs, y=history, ax=ax)
plot_ax.set_title("{}".format(title))
plot_ax.set_xlabel("epochs")
plot_ax.set_ylabel(label)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Load NPY format image and ground truth data for model training.')
parser.add_argument('x_input', type=str, help='X input data')
parser.add_argument("y_ground", type=str, help='Y target ground truth')
parser.add_argument("--output_dir", "-o", type=str, required=False, default=param['output_dir'],
help="output directory, default {}".format(param['output_dir']))
parser.add_argument("--test_size", "-t", type=float, action="store", default=param['test_size'], required=False,
help="test proportion size, default {}".format(param['test_size']))
parser.add_argument("--epochs", "-e", type=int, action="store", help="epochs, default {}".format(param['epochs']),
default=param['epochs'], required=False)
parser.add_argument("--batch_size", "-b", type=int, action="store", default=param['batch_size'], required=False,
help="batch size, default {}".format(param['batch_size']))
parser.add_argument("--show", "-s", action="store_true", default=param['show'], required=False,
help="show example images, default {}".format(param['show']))
parser.add_argument("--print", "-p", action="store_true", default=param['print'], required=False,
help="print statements for development and debugging, default {}".format(param['print']))
args = parser.parse_args()
param['x_input'] = args.x_input
param['y_ground'] = args.y_ground
param['test_size'] = args.test_size
param['epochs'] = args.epochs
param['batch_size'] = args.batch_size
param['show'] = args.show
param['print'] = args.print
param['output_dir'] = args.output_dir
#X_train, X_test, Y_train, Y_test = prepare_data(param['x_input'], param['y_ground'], test_size=param['test_size'],
# prep_x_func=normalize)
X_train, X_test, Y_train, Y_test = prepare_data(param['x_input'], param['y_ground'], test_size=param['test_size'],
prep_x_func=extract_features)
param['input_shape'] = X_train[0].shape
param['output_shape'] = Y_train.shape[1]
# model = create_model(input_shape=param['input_shape'], output_shape=param['output_shape'], dropout=param['dropout'])
model = feature_prediction_model(input_shape=param['input_shape'], output_shape=param['output_shape'], dropout=param['dropout'])
if args.show:
plt.imshow(X_train[0])
plt.show()
if args.print:
print("X train: {}, X test: {}, Y train: {}, Y test: {}".format(X_train.shape, X_test.shape,
Y_train.shape, Y_test.shape))
print("Y: {}".format(Y_train[0:10]))
model.summary()
model.compile(optimizer=optimizers.RMSprop(learning_rate=param['learning_rate']),
loss='categorical_crossentropy',
metrics=['accuracy'])
if not os.path.exists(param['output_dir']):
os.makedirs(param['output_dir'])
with open(os.path.join(param['output_dir'], 'param.json'), 'w') as fp:
json.dump(param, fp)
callbacks = model.fit(X_train, Y_train, epochs=param['epochs'], batch_size=param['batch_size'],
validation_split=param['validation_split'])
test_loss, test_acc = model.evaluate(X_test, Y_test)
print("test loss {}, accuracy {}".format(test_loss, test_acc))
model.save(os.path.join(param['output_dir'], param['model_output_name']))
fig, axes = plt.subplots(2, 2, sharex=True, sharey=False, figsize=param['figure_size'])
fig.suptitle('History: test: loss {:.2}, accuracy {:.2}'.format(test_loss, test_acc))
plot_history(callbacks.history['loss'], axes[0, 0], 'Training', 'loss')
plot_history(callbacks.history['accuracy'], axes[0, 1], 'Training', 'accuracy')
plot_history(callbacks.history['val_loss'], axes[1, 0], 'Validation', 'loss')
plot_history(callbacks.history['val_accuracy'], axes[1, 1], 'Validation', 'accuracy')
plt.savefig(os.path.join(param['output_dir'], param['figure_name']))
print("fin")
| [
"matplotlib",
"seaborn"
] |
424f755e91aaf82b0be9dcb1890581603a4956b2 | Python | rrutz/Learning | /Machine Learning(python)/Unsupervised Learning/Association Rules.py | UTF-8 | 13,993 | 3.125 | 3 | [] | no_license | # The Apriori Algorithm
import pandas as pd
import numpy as np
from itertools import combinations
import matplotlib.pyplot as plt
def getDate():
marketingData = pd.read_csv("C:\\Users\\Ruedi\\OneDrive\\Learning\\Learning\\Machine Learning(python)\\Unsupervised Learning\\Marketing.csv" )
marketingData =marketingData.dropna( axis = 0 )
n = marketingData.shape[0]
col = marketingData.loc[ :, ["ANNUAL INCOME" ]]
incomeDf = pd.DataFrame( { \
"Income: Less than $10,000" : np.where( col == 1, 1, 0 ).flatten(), \
"Income: $10,000 to $14,999" : np.where( col == 2, 1, 0 ).flatten(), \
"Income: $15,000 to $19,999" : np.where( col == 3, 1, 0 ).flatten(), \
"Income: $20,000 to $24,999" : np.where( col == 4, 1, 0 ).flatten(), \
"Income: $25,000 to $29,999" : np.where( col == 5, 1, 0 ).flatten(), \
"Income: $30,000 to $39,999" : np.where( col == 6, 1, 0 ).flatten(), \
"Income: $40,000 to $49,999" : np.where( col == 7, 1, 0 ).flatten(), \
"Income: $50,000 to $74,999" : np.where( col == 8, 1, 0 ).flatten(), \
"Income: $75,000 or more" : np.where( col == 9, 1, 0 ).flatten() \
} )
col = marketingData.loc[ :, ["SEX" ]]
sexDf = pd.DataFrame( { \
"Male" : np.where( col == 1, 1, 0 ).flatten(), \
"Female" : np.where( col == 2, 1, 0 ).flatten() \
} )
col = marketingData.loc[ :, ["MARITAL STATUS" ]]
MARITALDf = pd.DataFrame( { \
"MARITAL: Married" : np.where( col == 1, 1, 0 ).flatten(), \
"MARITAL: Living together, not married" : np.where( col == 2, 1, 0 ).flatten(), \
"MARITAL: Divorced or separated" : np.where( col == 3, 1, 0 ).flatten(), \
"MARITAL: Widowed" : np.where( col == 4, 1, 0 ).flatten(), \
"MARITAL: Single, never married" : np.where( col == 5, 1, 0 ).flatten() \
} )
col = marketingData.loc[ :, ["AGE" ]]
ageDf = pd.DataFrame( { \
"AGE: 14 thru 17" : np.where( col == 1, 1, 0 ).flatten(), \
"AGE: 18 thru 24" : np.where( col == 2, 1, 0 ).flatten(), \
"AGE: 25 thru 34" : np.where( col == 3, 1, 0 ).flatten(), \
"AGE: 35 thru 44" : np.where( col == 4, 1, 0 ).flatten(), \
"AGE: 45 thru 54" : np.where( col == 5, 1, 0 ).flatten(), \
"AGE: 55 thru 64" : np.where( col == 6, 1, 0 ).flatten(), \
"AGE:65 and Over" : np.where( col == 7, 1, 0 ).flatten() \
} )
col = marketingData.loc[ :, ["EDUCATION" ]]
educationDf = pd.DataFrame( { \
"EDUCATION: Grade 8 or less" : np.where( col == 1, 1, 0 ).flatten(), \
"EDUCATION: Grades 9 to 11" : np.where( col == 2, 1, 0 ).flatten(), \
"EDUCATION: Graduated high school" : np.where( col == 3, 1, 0 ).flatten(), \
"EDUCATION: 1 to 3 years of college": np.where( col == 4, 1, 0 ).flatten(), \
"EDUCATION: College graduate" : np.where( col == 5, 1, 0 ).flatten(), \
"EDUCATION: Grad Study" : np.where( col == 6, 1, 0 ).flatten() \
} )
col = marketingData.loc[ :, ["OCCUPATION" ]]
occipationDf = pd.DataFrame( { \
"OCCUPATION: Professional/Managerial" : np.where( col == 1, 1, 0 ).flatten(), \
"OCCUPATION: Sales Worker" : np.where( col == 2, 1, 0 ).flatten(), \
"OCCUPATION: Factory Worker/Laborer/Driver" : np.where( col == 3, 1, 0 ).flatten(), \
"OCCUPATION: Clerical/Service Worker" : np.where( col == 4, 1, 0 ).flatten(), \
"OCCUPATION: Homemaker" : np.where( col == 5, 1, 0 ).flatten(), \
"OCCUPATION: Student, HS or College" : np.where( col == 6, 1, 0 ).flatten(), \
"OCCUPATION: Military" : np.where( col == 7, 1, 0 ).flatten(), \
"OCCUPATION: Retired" : np.where( col == 8, 1, 0 ).flatten(), \
"OCCUPATION: Unemployed" : np.where( col == 9, 1, 0 ).flatten() \
} )
col = marketingData.loc[ :, ["Lived Here how long" ]]
LivedDf = pd.DataFrame( { \
"LIVED: Less than one year" : np.where( col == 1, 1, 0 ).flatten(), \
"LIVED: One to three years" : np.where( col == 2, 1, 0 ).flatten(), \
"LIVED: Four to six years" : np.where( col == 3, 1, 0 ).flatten(), \
"LIVED: Seven to ten years" : np.where( col == 4, 1, 0 ).flatten(), \
"LIVED: More than ten years" : np.where( col == 5, 1, 0 ).flatten() \
} )
col = marketingData.loc[ :, ["DUAL INCOMES"] ]
dualDf = pd.DataFrame( { \
"DUAL: Not Married" : np.where( col == 1, 1, 0 ).flatten(), \
"DUAL: Yes" : np.where( col == 2, 1, 0 ).flatten(), \
"DUAL: Nos" : np.where( col == 3, 1, 0 ).flatten() \
} )
col = marketingData.loc[ :, ["PERSONS IN YOUR HOUSEHOLD" ]]
houseHoldSizeDf = pd.DataFrame( { \
"HOUSEHOLD SIZE: One" : np.where( col == 1, 1, 0 ).flatten(), \
"HOUSEHOLD SIZE: Two" : np.where( col == 2, 1, 0 ).flatten(), \
"HOUSEHOLD SIZE: Three" : np.where( col == 3, 1, 0 ).flatten(), \
"HOUSEHOLD SIZE: Four" : np.where( col == 4, 1, 0 ).flatten(), \
"HOUSEHOLD SIZE: Five" : np.where( col == 5, 1, 0 ).flatten(), \
"HOUSEHOLD SIZE: Six" : np.where( col == 6, 1, 0 ).flatten(), \
"HOUSEHOLD SIZE: Seven" : np.where( col == 7, 1, 0 ).flatten(), \
"HOUSEHOLD SIZE: Eight" : np.where( col == 8, 1, 0 ).flatten(), \
"HOUSEHOLD SIZE: Nine or more" : np.where( col == 9, 1, 0 ).flatten() \
} )
col = marketingData.loc[ :, ["PERSONS IN HOUSEHOLD UNDER 18" ]]
houseHoldSizeUnder18Df = pd.DataFrame( { \
"HOUSEHOLD SIZE 18: One" : np.where( col == 1, 1, 0 ).flatten(), \
"HOUSEHOLD SIZE 18: Two" : np.where( col == 2, 1, 0 ).flatten(), \
"HOUSEHOLD SIZE 18: Three" : np.where( col == 3, 1, 0 ).flatten(), \
"HOUSEHOLD SIZE 18: Four" : np.where( col == 4, 1, 0 ).flatten(), \
"HOUSEHOLD SIZE 18: Five" : np.where( col == 5, 1, 0 ).flatten(), \
"HOUSEHOLD SIZE 18: Six" : np.where( col == 6, 1, 0 ).flatten(), \
"HOUSEHOLD SIZE 18: Seven" : np.where( col == 7, 1, 0 ).flatten(), \
"HOUSEHOLD SIZE 18: Eight" : np.where( col == 8, 1, 0 ).flatten(), \
"HOUSEHOLD SIZE 18: Nine or more" : np.where( col == 9, 1, 0 ).flatten() \
} )
col = marketingData.loc[ :, ["HOUSEHOLDER STATUS"] ]
houseHolderDf = pd.DataFrame( { \
"HOUSEHOLDER: Own" : np.where( col == 1, 1, 0 ).flatten(), \
"HOUSEHOLDER: Rent" : np.where( col == 2, 1, 0 ).flatten(), \
"HOUSEHOLDER: Live with Parents/Family" : np.where( col == 3, 1, 0 ).flatten() \
} )
col = marketingData.loc[ :, ["TYPE OF HOME" ]]
HomeTypeDf = pd.DataFrame( { \
"HOME_TYPE: House" : np.where( col == 1, 1, 0 ).flatten(), \
"HOME_TYPE: Condominium" : np.where( col == 2, 1, 0 ).flatten(), \
"HOME_TYPE: Apartment" : np.where( col == 3, 1, 0 ).flatten(), \
"HOME_TYPE: Mobile Home" : np.where( col == 4, 1, 0 ).flatten(), \
"HOME_TYPE: Other" : np.where( col == 5, 1, 0 ).flatten() \
} )
col = marketingData.loc[ :, ["ETHNIC CLASSIFICATION" ]]
EthniceDf = pd.DataFrame( { \
"ETHNIC: American Indian" : np.where( col == 1, 1, 0 ).flatten(), \
"ETHNIC: Asian" : np.where( col == 2, 1, 0 ).flatten(), \
"ETHNIC: Black" : np.where( col == 3, 1, 0 ).flatten(), \
"ETHNIC: East Indian" : np.where( col == 4, 1, 0 ).flatten(), \
"ETHNIC: Hispanic" : np.where( col == 5, 1, 0 ).flatten(), \
"ETHNIC: Pacific Islander" : np.where( col == 6, 1, 0 ).flatten(), \
"ETHNIC: White" : np.where( col == 7, 1, 0 ).flatten(), \
"ETHNIC: Other" : np.where( col == 8, 1, 0 ).flatten() \
} )
col = marketingData.loc[ :, ["WHAT LANGUAGE IS SPOKEN MOST OFTEN IN YOUR HOME?"] ]
languageDf = pd.DataFrame( { \
"LANGUAGE: English" : np.where( col == 1, 1, 0 ).flatten(), \
"LANGUAGE: Spanish" : np.where( col == 2, 1, 0 ).flatten(), \
"LANGUAGE: OtherNos" : np.where( col == 3, 1, 0 ).flatten() \
} )
return( pd.concat( [incomeDf, sexDf, MARITALDf, ageDf, educationDf, occipationDf, LivedDf, dualDf, houseHoldSizeDf, houseHoldSizeUnder18Df, houseHolderDf, HomeTypeDf, EthniceDf, languageDf ], axis = 1 ) )
marketingData = getDate()
support = 0.10
confidence = 0.7
# get single-item sets
x = (marketingData.mean( axis = 0) >= support)
marketingData = marketingData.loc[ : , x ]
# gets the rest of the item sets
k = marketingData.shape[1]
K = { "size 1": list( range(0,k)) }
size = 2
largestGroupSize = k
Association = []
while largestGroupSize > 0:
if size == 2:
temp = []
for i in range(k):
temp.append(i)
newGroupings = list( combinations( temp , size) )
else:
newGroupings = []
for group in largestSizeGroupings:
for i in range(k):
if i not in group:
t = list(group + (i, ))
t.sort()
t = tuple(t)
if t not in newGroupings:
if len( set( list( combinations( t , size-1) ) ) - set( largestSizeGroupings ) ) == 0:
newGroupings.append( t )
largestSizeGroupings = []
for group in newGroupings:
if np.mean( marketingData.iloc[ :, list(group) ].sum( axis = 1) == size ) >= support:
largestSizeGroupings.append( group )
largestGroupSize = len(largestSizeGroupings)
if largestGroupSize > 0:
K["size "+str(size)] = largestSizeGroupings
if size > 2:
for group in largestSizeGroupings:
subset = list( combinations( group , size-1) )
for subGroup in subset:
c = (np.sum((marketingData.iloc[ :, list(group) ].sum( axis = 1) == size)) / np.sum((marketingData.iloc[ :, list(subGroup) ].sum( axis = 1) == size -1)))
if c > confidence :
Association.append( { "confidence" : c, 'antecedent' : tuple( subGroup ), 'consequent' : group, 'support' : np.mean( marketingData.iloc[ :, list(group) ].sum( axis = 1) == size ) } )
size += 1
Association = sorted( Association, key = lambda k: k["confidence"], reverse=True)[0:10]
f, (ax1, ax2) = plt.subplots(1, 2, sharey=False)
ax1.barh( np.arange(10), list( map( lambda d: d["support"], Association )), tick_label = list( map( lambda d: str( d["consequent"]), Association )) )
ax1.set_title("Support")
ax2.barh( np.arange(10), list( map( lambda d: d["confidence"], Association )), tick_label = list( map( lambda d: str( d["antecedent"]), Association )) )
ax2.set_title("Confidence")
plt.show()
| [
"matplotlib"
] |
8eefa80758db7a8db8ad0951ef0ea8a23da58b13 | Python | FlorentRamb/probabilistic_french_parser | /system/code/eval_parsing.py | UTF-8 | 1,925 | 2.859375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 4 11:54:52 2020
@author: flo-r
"""
from PYEVALB import scorer, parser
import matplotlib.pyplot as plt
import numpy as np
# Paths to data
parsed_output_path = '../output/parsed_test'
test_input_path = '../data/input_test'
test_output_path = '../data/output_test'
# Read data
with open(parsed_output_path, 'r') as f:
parsed_output = f.read().splitlines()
with open(test_input_path, 'r') as f:
test_input = f.read().splitlines()
with open(test_output_path, 'r') as f:
test_output = f.read().splitlines()
# Compute metrics
precisions = []
recalls = []
lengths = []
failures = 0
bugs = 0
for gold, test, sent in zip(test_output, parsed_output, test_input):
if test == 'No parsing found':
failures += 1
else:
try:
gold_tree = parser.create_from_bracket_string(gold[2:-1])
test_tree = parser.create_from_bracket_string(test[2:-1])
result = scorer.Scorer().score_trees(gold_tree, test_tree)
len_sentence = len(sent.split())
lengths.append(len_sentence)
print('')
print('Sentence length: ' + str(len(gold)))
print('Recall =' + str(result.recall))
print('Precision =' + str(result.prec))
recalls.append(result.recall)
precisions.append(result.prec)
except:
bugs +=1
print('')
print('Parsing failures for ' + str(failures + bugs) + 'sentences')
print('')
print('Average precision: ' + str(np.mean(precisions)))
print('')
print('Average recall: ' + str(np.mean(recalls)))
# Plots
plt.scatter(lengths, precisions)
plt.grid()
plt.title('Precision VS sentence length')
plt.xlabel('number of tokens')
plt.ylabel('precision')
plt.show()
plt.scatter(lengths, recalls)
plt.grid()
plt.title('Recall VS sentence length')
plt.xlabel('number of tokens')
plt.ylabel('recall')
plt.show()
| [
"matplotlib"
] |
024f13e5f2076485b7b4460867a659b41e657fba | Python | chao11/stageINT | /co_matrix/normalisation.py | UTF-8 | 2,733 | 2.703125 | 3 | [] | no_license | # nNormalisation of the connectivity matrix
# ======================================================================================================================
# options for normalisation: 1. 'none': without normalisation (default)
# 2. 'norm1': normalisation by l2
# 3. 'standard': standardize the feature by removing the mean and scaling to unit variance
# 4. 'range': MinMaxScaler, scale the features between a givin minimun and maximum, often between (0,1)
# ======================================================================================================================
import joblib
import numpy as np
import os
import os.path as op
import matplotlib.pylab as plt
import nibabel as nib
def nomalisation(connect,norma, axis=1):
# AXIS =1 by default
if axis ==0: # normalize the feature of the matrix
connect = np.transpose(connect)
if norma=='norm1':
from sklearn.preprocessing import normalize
connect_norm = normalize(connect,norm='l1')
#connect = connect_norm
elif norma=='norm2':
from sklearn.preprocessing import normalize
connect_norm = normalize(connect,norm='l2')
#connect = connect_norm
elif norma == 'standard':
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler().fit(connect)
connect_norm = scaler.transform(connect)
#connect = connect_scaled
elif norma == 'MinMax':
from sklearn.preprocessing import MinMaxScaler
min_max_scaler = MinMaxScaler()
connect_norm = min_max_scaler.fit_transform(connect)
if axis == 0:
connect_norm = connect_norm.T
return connect_norm
root_dir = '/hpc/crise/hao.c/data'
subjects_list = os.listdir(root_dir)
#hemisphere = str(sys.argv[1])
hemisphere = 'lh'
options = ['norm1','norm2','standard','MinMax']
axis = 0
for option in options:
# plt.figure(indx1)
for subject in subjects_list[0:1]:
print "processing {},{}".format(subject, option)
subject_dir = op.join(root_dir,subject)
tracto_dir = op.join(subject_dir,'tracto','{}_STS+STG_destrieux'.format(hemisphere.upper()))
conn_matrix_path = op.join(tracto_dir,'conn_matrix_seed2parcels.jl')
conn_matrix_jl= joblib.load(conn_matrix_path)
conn_matrix = conn_matrix_jl[0]
matrix_norma = nomalisation(conn_matrix,norma= option ,axis=axis)
# save matrix:
output_path = op.join(tracto_dir,'conn_matrix_norma_{}.jl'.format(option))
joblib.dump(matrix_norma,output_path,compress=3)
print('{}: saved {} normalised connectivity matrix!!'.format(subject, option))
| [
"matplotlib"
] |
5e0187072e28386fd5ff0ee07039793eb3e71a5d | Python | MomokoXu/SummerCS4701AI | /HW3/problem3.py | UTF-8 | 3,334 | 2.828125 | 3 | [] | no_license | from sklearn.cluster import KMeans, SpectralClustering
from sklearn import datasets, cluster
from sklearn.preprocessing import StandardScaler
import matplotlib.pyplot as plt
import numpy as np
from scipy import misc
#Kmeans segmentation
#Tree image color clustering
trees = misc.imread('trees.png')
trees = np.array(trees, dtype=np.float64) / 255
w, h, d = original_shape = tuple(trees.shape)
assert d == 3
image_array = np.reshape(trees, (w * h, d))
def getLabels(n_colors, image_array):
kmeans = KMeans(n_clusters=n_colors, random_state=0).fit(image_array)
labels = kmeans.predict(image_array)
return kmeans, labels
def newImage(kmeans, labels, w, h):
d = kmeans.cluster_centers_.shape[1]
image = np.zeros((w, h, d))
label_idx = 0
for i in range(w):
for j in range(h):
image[i][j] = kmeans.cluster_centers_[labels[label_idx]]
label_idx += 1
return image
kmeansK3, labelsK3 = getLabels(3, image_array)
kmeansK5, labelsK5 = getLabels(5, image_array)
kmeansK10, labelsK10 = getLabels(10, image_array)
def showImage():
plt.figure(1)
plt.clf()
plt.axis('off')
plt.title('Image after Kmeans (k = 3)')
plt.imshow(newImage(kmeansK3, labelsK3, w, h))
plt.figure(2)
plt.clf()
plt.axis('off')
plt.title('Image after Kmeans (k = 5)')
plt.imshow(newImage(kmeansK5, labelsK5, w, h))
plt.figure(3)
plt.clf()
plt.axis('off')
plt.title('Image after Kmeans (k = 10)')
plt.imshow(newImage(kmeansK10, labelsK10, w, h))
plt.show()
#Spectral Clustering Vs kmeans
samplesNum = 2000
noisy_circles = datasets.make_circles(n_samples=samplesNum, factor=.5,noise=.05)
noisy_moons = datasets.make_moons(n_samples=samplesNum, noise=.05)
colors = np.array([x for x in 'rg'])
colors = np.hstack([colors] * 20)
clusteringType = ['KMeans', 'Spectral Clustering']
datasets = [noisy_circles, noisy_moons]
i = 1
for i_dataset, dataset in enumerate(datasets):
X, y = dataset
# normalize dataset for easier parameter selection
X = StandardScaler().fit_transform(X)
# create clustering estimators
two_means = cluster.KMeans(n_clusters=2)
spectral = cluster.SpectralClustering(n_clusters=2,eigen_solver='arpack',affinity="nearest_neighbors")
clustering_algorithms = [two_means, spectral]
plt.figure(i)
plt.subplot(3, 1, 1)
plt.title('Original data', size=12)
plt.scatter(X[:, 0], X[:, 1], color=colors[y].tolist(), s=10)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xticks(())
plt.yticks(())
i += 1
plot_num = 2
for name, algorithm in zip(clusteringType, clustering_algorithms):
# predict cluster memberships
algorithm.fit(X)
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(np.int)
else:
y_pred = algorithm.predict(X)
# plot
plt.subplot(3, 1, plot_num)
plt.title(name, size=12)
plt.scatter(X[:, 0], X[:, 1], color=colors[y_pred].tolist(), s=10)
if hasattr(algorithm, 'cluster_centers_'):
centers = algorithm.cluster_centers_
center_colors = colors[:len(centers)]
plt.scatter(centers[:, 0], centers[:, 1], s=100, c=center_colors)
plt.xlim(-2, 2)
plt.ylim(-2, 2)
plt.xticks(())
plt.yticks(())
plot_num += 1
plt.show()
| [
"matplotlib"
] |
e02b6650ef506c17187f166742f335a741e4004d | Python | CaravanPassenger/pytorch-learning-notes | /multi_label/Classify_scenes.py | UTF-8 | 21,906 | 2.609375 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
from torch import nn
import torch.nn.functional as F
from torchvision import datasets ,models , transforms
import json
from torch.utils.data import Dataset, DataLoader ,random_split
from PIL import Image
from pathlib import Path
classLabels = ["desert", "mountains", "sea", "sunset", "trees" ]
print(torch.__version__)
df = pd.DataFrame({"image": sorted([ int(x.name.strip(".jpg")) for x in Path("image_scene_data/original").iterdir()])})
df.image = df.image.astype(np.str)
print(df.dtypes)
df.image = df.image.str.cat([".jpg"]*len(df))
for label in classLabels:
df[label]=0
with open("image_scene_data/labels.json") as infile:
s ="["
s = s + ",".join(infile.readlines())
s = s+"]"
s = np.array(eval(s))
s[s<0] = 0
df.iloc[:,1:] = s
df.to_csv("data.csv",index=False)
print(df.head(10))
del df
# ## Visulaize the data
#
# ### Data distribution
df = pd.read_csv("data.csv")
fig1, ax1 = plt.subplots()
df.iloc[:,1:].sum(axis=0).plot.pie(autopct='%1.1f%%',shadow=True, startangle=90,ax=ax1)
ax1.axis("equal")
plt.show()
# ### Correlation between different classes
# In[ ]:
import seaborn as sns
sns.heatmap(df.iloc[:,1:].corr(), cmap="RdYlBu", vmin=-1, vmax=1)
# looks like there is no correlation between the labels
# ### Visualize images
# In[ ]:
def visualizeImage(idx):
fd = df.iloc[idx]
image = fd.image
label = fd[1:].tolist()
print(image)
image = Image.open("image_scene_data/original/"+image)
fig,ax = plt.subplots()
ax.imshow(image)
ax.grid(False)
classes = np.array(classLabels)[np.array(label,dtype=np.bool)]
for i , s in enumerate(classes):
ax.text(0 , i*20 , s , verticalalignment='top', color="white", fontsize=16, weight='bold')
plt.show()
visualizeImage(52)
# In[ ]:
#Images in the dataset have different sizes to lets take a mean size while resizing 224*224
l= []
for i in df.image:
with Image.open(Path("image_scene_data/original")/i) as f:
l.append(f.size)
np.array(l).mean(axis=0),np.median(np.array(l) , axis=0)
# ## Create Data pipeline
# In[ ]:
class MyDataset(Dataset):
def __init__(self , csv_file , img_dir , transforms=None ):
self.df = pd.read_csv(csv_file)
self.img_dir = img_dir
self.transforms = transforms
def __getitem__(self,idx):
# d = self.df.iloc[idx.item()]
d = self.df.iloc[idx]
image = Image.open(self.img_dir/d.image).convert("RGB")
label = torch.tensor(d[1:].tolist() , dtype=torch.float32)
if self.transforms is not None:
image = self.transforms(image)
return image,label
def __len__(self):
return len(self.df)
# In[ ]:
batch_size=32
transform = transforms.Compose([transforms.Resize((224,224)) ,
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
dataset = MyDataset("data.csv" , Path("image_scene_data/original") , transform)
valid_no = int(len(dataset)*0.12)
trainset ,valset = random_split( dataset , [len(dataset) -valid_no ,valid_no])
print(f"trainset len {len(trainset)} valset len {len(valset)}")
dataloader = {"train":DataLoader(trainset , shuffle=True , batch_size=batch_size),
"val": DataLoader(valset , shuffle=True , batch_size=batch_size)}
# ## Model Definition
# In[ ]:
model = models.resnet50(pretrained=True) # load the pretrained model
num_features = model.fc.in_features # get the no of on_features in last Linear unit
print(num_features)
## freeze the entire convolution base
for param in model.parameters():
param.requires_grad_(False)
# In[ ]:
def create_head(num_features , number_classes ,dropout_prob=0.5 ,activation_func =nn.ReLU):
features_lst = [num_features , num_features//2 , num_features//4]
layers = []
for in_f ,out_f in zip(features_lst[:-1] , features_lst[1:]):
layers.append(nn.Linear(in_f , out_f))
layers.append(activation_func())
layers.append(nn.BatchNorm1d(out_f))
if dropout_prob !=0 : layers.append(nn.Dropout(dropout_prob))
layers.append(nn.Linear(features_lst[-1] , number_classes))
return nn.Sequential(*layers)
top_head = create_head(num_features , len(classLabels)) # because ten classes
model.fc = top_head # replace the fully connected layer
model
# ## Optimizer and Criterion
# In[ ]:
import torch.optim as optim
from torch.optim import lr_scheduler
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)
criterion = nn.BCEWithLogitsLoss()
# specify optimizer
optimizer = optim.Adam(model.parameters(), lr=0.001)
sgdr_partial = lr_scheduler.CosineAnnealingLR(optimizer, T_max=5, eta_min=0.005 )
# ## Training
# In[ ]:
from tqdm import trange
from sklearn.metrics import precision_score,f1_score
def train(model , data_loader , criterion , optimizer ,scheduler, num_epochs=5):
for epoch in trange(num_epochs,desc="Epochs"):
result = []
for phase in ['train', 'val']:
if phase=="train": # put the model in training mode
model.train()
scheduler.step()
else: # put the model in validation mode
model.eval()
# keep track of training and validation loss
running_loss = 0.0
running_corrects = 0.0
for data , target in data_loader[phase]:
#load the data and target to respective device
data , target = data.to(device) , target.to(device)
with torch.set_grad_enabled(phase=="train"):
#feed the input
output = model(data)
#calculate the loss
loss = criterion(output,target)
preds = torch.sigmoid(output).data > 0.5
preds = preds.to(torch.float32)
if phase=="train" :
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# update the model parameters
optimizer.step()
# zero the grad to stop it from accumulating
optimizer.zero_grad()
# statistics
running_loss += loss.item() * data.size(0)
running_corrects += f1_score(target.to("cpu").to(torch.int).numpy() ,preds.to("cpu").to(torch.int).numpy() , average="samples") * data.size(0)
epoch_loss = running_loss / len(data_loader[phase].dataset)
epoch_acc = running_corrects / len(data_loader[phase].dataset)
result.append('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
print(result)
# In[ ]:
train(model,dataloader , criterion, optimizer,sgdr_partial,num_epochs=10)
# ## Saving & Loading model
# In[ ]:
def createCheckpoint(filename=Path("./LatestCheckpoint.pt")):
checkpoint = {
'epoch': 5,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
"batch_size":batch_size,
} # save all important stuff
torch.save(checkpoint , filename)
createCheckpoint()
# In[ ]:
# Load
'''
First Intialize the model and then just load it
model = TheModelClass(*args, **kwargs)
optimizer = TheOptimizerClass(*args, **kwargs)
'''
checkpoint = torch.load(Path("./LatestCheckpoint.pt"))
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
epoch = checkpoint['epoch']
batch_size = checkpoint['batch_size']
model.eval() ## or model.train()
optimizer
# ## LrFinder and One Cycle Policy
#
# For faster convergence
# In[ ]:
def unfreeze(model,percent=0.25):
l = int(np.ceil(len(model._modules.keys())* percent))
l = list(model._modules.keys())[-l:]
print(f"unfreezing these layer {l}",)
for name in l:
for params in model._modules[name].parameters():
params.requires_grad_(True)
def check_freeze(model):
for name ,layer in model._modules.items():
s = []
for l in layer.parameters():
s.append(l.requires_grad)
print(name ,all(s))
# In[ ]:
# unfreeze 40% of the model
unfreeze(model ,0.40)
# check which layer is freezed or not
check_freeze(model)
# ### LR finder
# In[ ]:
class LinearScheduler(lr_scheduler._LRScheduler):
"""Linearly increases the learning rate between two boundaries over a number of iterations."""
def __init__(self, optimizer, end_lr, num_iter):
self.end_lr = end_lr
self.num_iter = num_iter
super(LinearScheduler,self).__init__(optimizer)
def get_lr(self):
# increement one by one
curr_iter = self.last_epoch + 1
# get the ratio
pct = curr_iter / self.num_iter
# calculate lr with this formulae start + pct * (end-start)
return [base_lr + pct * (self.end_lr - base_lr) for base_lr in self.base_lrs]
class ExponentialScheduler(lr_scheduler._LRScheduler):
"""Exponentially increases the learning rate between two boundaries over a number of iterations."""
def __init__(self, optimizer, end_lr, num_iter, last_epoch=-1):
self.end_lr = end_lr
self.num_iter = num_iter
super(ExponentialScheduler,self).__init__(optimizer)
def get_lr(self):
curr_iter = self.last_epoch + 1
pct = curr_iter / self.num_iter
return [base_lr * (self.end_lr / base_lr) ** pct for base_lr in self.base_lrs]
class CosineScheduler(lr_scheduler._LRScheduler):
"""Cosine increases the learning rate between two boundaries over a number of iterations."""
def __init__(self, optimizer, end_lr, num_iter, last_epoch=-1):
self.end_lr = end_lr
self.num_iter = num_iter
super(CosineScheduler,self).__init__(optimizer)
def get_lr(self):
curr_iter = self.last_epoch + 1
pct = curr_iter / self.num_iter
cos_out = np.cos(np.pi * pct) + 1
return [self.end_lr + (base_lr - self.end_lr )/2 *cos_out for base_lr in self.base_lrs]
# In[ ]:
class LRFinder:
def __init__(self, model , optimizer , criterion ,start_lr=1e-7, device=None):
self.model = model
# Move the model to the proper device
self.optimizer = optimizer
self.criterion = criterion
## save the model intial dict
self.save_file = Path("tmpfile")
torch.save(self.model , self.save_file)
if device is None:
self.device = next(model.parameters()).device
else:
self.device = device
self.model.to(self.device)
self.history = {"lr":[] , "losses":[]}
for l in self.optimizer.param_groups:
l["initial_lr"]=start_lr
def reset(self):
""" Resets the model to intial state """
self.model = torch.load(self.save_file)
self.model.train()
self.save_file.unlink()
print("model reset done")
return self.model
def calculateSmmothingValue(self ,beta):
n ,mov_avg=0,0
while True :
n+=1
value = yield
mov_avg = beta*mov_avg +(1-beta)*value
smooth = mov_avg / (1 - beta **n )
yield smooth
def lrfind(self, trainLoader,end_lr=10,num_iter=150,step_mode="exp", loss_smoothing_beta=0.99, diverge_th=5):
"""
Performs the lrfind test
Arguments:
trainLoader : The data loader
end_lr : The maximum lr
num_iter : Max iteratiom
step_mode : The anneal function by default `exp` but can be either `linear` or `cos`
smooth_f : The loss smoothing factor, value should be between [0 , 1[
diverge_th: The max loss value after which training should be stooped
"""
# Reset test results
self.history = {"lr": [], "losses": []}
self.best_loss = None
self.smoothner = self.calculateSmmothingValue(loss_smoothing_beta)
if step_mode.lower()=="exp":
lr_schedule = ExponentialScheduler(self.optimizer , end_lr , num_iter,)
elif step_mode.lower()=="cos":
lr_schedule = CosineScheduler(self.optimizer , end_lr , num_iter)
elif step.mode.lower()=="linear":
lr_schedule = LinearScheduler(self.optimizer , end_lr , num_iter)
else:
raise ValueError(f"expected mode is either {exp , cos ,linear} got {step_mode}")
if 0 < loss_smoothing_beta >=1:
raise ValueError("smooth_f is outside the range [0, 1[")
iterator = iter(trainLoader)
for each_iter in range(num_iter):
try:
data , target = next(iterator)
except StopIteration:
iterator = iter(trainLoader)
data , target = next(iterator)
loss = self._train_batch(data , target)
# Update the learning rate
lr_schedule.step()
self.history["lr"].append(lr_schedule.get_lr()[0])
# Track the best loss and smooth it if smooth_f is specified
if each_iter == 0:
self.best_loss = loss
else:
next(self.smoothner)
self.best_loss = self.smoothner.send(loss)
if loss < self.best_loss:
self.best_loss = loss
# Check if the loss has diverged; if it has, stop the test
self.history["losses"].append(loss)
if loss > diverge_th * self.best_loss:
print("Stopping early, the loss has diverged")
break
print("Learning rate search finished. See the graph with {finder_name}.plot()")
def _train_batch(self,data,target):
# set to training mode
self.model.train()
#load data to device
data ,target = data.to(self.device) ,target.to(self.device)
#forward pass
self.optimizer.zero_grad()
output = self.model(data)
loss = self.criterion(output,target)
#backward pass
loss.backward()
self.optimizer.step()
return loss.item()
def plot(self):
losses = self.history["losses"]
lr = self.history["lr"]
plt.semilogx(lr,losses)
plt.xlabel("Learning rate")
plt.ylabel("Losses ")
plt.show()
# In[ ]:
lr_finder = LRFinder(model, optimizer, criterion, device=device)
lr_finder.lrfind(dataloader["train"], end_lr=10, step_mode="exp")
# In[ ]:
lr_finder.plot()
model= lr_finder.reset()
# In[ ]:
# ### One Cycle Policy
# In[ ]:
class Stepper():
"Used to \"step\" from start,end (`vals`) over `n_iter` iterations on a schedule defined by `func`"
def __init__(self, val, n_iter:int, func):
self.start,self.end = val
self.n_iter = max(1,n_iter)
self.func = func
self.n = 0
def step(self):
"Return next value along annealed schedule."
self.n += 1
return self.func(self.start, self.end, self.n/self.n_iter)
@property
def is_done(self):
"Return `True` if schedule completed."
return self.n >= self.n_iter
# Annealing functions
def annealing_no(start, end, pct):
"No annealing, always return `start`."
return start
def annealing_linear(start, end, pct):
"Linearly anneal from `start` to `end` as pct goes from 0.0 to 1.0."
return start + pct * (end-start)
def annealing_exp(start, end, pct):
"Exponentially anneal from `start` to `end` as pct goes from 0.0 to 1.0."
return start * (end/start) ** pct
def annealing_cos(start, end, pct):
"Cosine anneal from `start` to `end` as pct goes from 0.0 to 1.0."
cos_out = np.cos(np.pi * pct) + 1
return end + (start-end)/2 * cos_out
# In[ ]:
class OneCyclePolicy:
def __init__(self,model , optimizer , criterion ,num_iteration,num_epochs,max_lr, momentum = (0.95,0.85) , div_factor=25 , pct_start=0.4, device=None ):
self.model =model
self.optimizer = optimizer
self.criterion = criterion
self.num_epochs = num_epochs
if device is None:
self.device = next(model.parameters()).device
else:
self.device = device
n = num_iteration * self.num_epochs
a1 = int(n*pct_start)
a2 = n-a1
self.phases = ((a1 , annealing_linear) , (a2 , annealing_cos))
min_lr = max_lr/div_factor
self.lr_scheds = self.steps((min_lr,max_lr) , (max_lr,min_lr/1e4))
self.mom_scheds = self.steps(momentum , momentum[::-1])
self.idx_s = 0
self.update_lr_mom(self.lr_scheds[0].start,self.mom_scheds[0].start)
def steps(self, *steps):
"Build anneal schedule for all of the parameters."
return [Stepper(step, n_iter, func=func)for (step,(n_iter,func)) in zip(steps, self.phases)]
def train(self, trainLoader , validLoader ):
self.model.to(self.device)
data_loader = {"train":trainLoader , "val":validLoader}
for epoch in trange(self.num_epochs,desc="Epochs"):
result = []
for phase in ['train', 'val']:
if phase=="train": # put the model in training mode
model.train()
else: # put the model in validation mode
model.eval()
# keep track of training and validation loss
running_loss = 0.0
running_corrects = 0
for data , target in data_loader[phase]:
#load the data and target to respective device
data , target = data.to(device) , target.to(device)
with torch.set_grad_enabled(phase=="train"):
#feed the input
output = self.model(data)
#calculate the loss
loss = self.criterion(output,target)
preds = torch.sigmoid(output).data > 0.5
preds = preds.to(torch.float32)
if phase=="train" :
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# update the model parameters
self.optimizer.step()
# zero the grad to stop it from accumulating
self.optimizer.zero_grad()
self.update_lr_mom(self.lr_scheds[self.idx_s].step() ,self.mom_scheds[self.idx_s].step() )
if self.lr_scheds[self.idx_s].is_done:
self.idx_s += 1
# statistics
running_loss += loss.item() * data.size(0)
running_corrects += f1_score(target.to("cpu").to(torch.int).numpy() ,preds.to("cpu").to(torch.int).numpy() , average="samples") * data.size(0)
epoch_loss = running_loss / len(data_loader[phase].dataset)
epoch_acc = running_corrects/ len(data_loader[phase].dataset)
result.append('{} Loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))
print(result)
def update_lr_mom(self,lr=0.001,mom=0.99):
for l in self.optimizer.param_groups:
l["lr"]=lr
if isinstance(self.optimizer , ( torch.optim.Adamax,torch.optim.Adam)):
l["betas"] = ( mom, 0.999)
elif isinstance(self.optimizer, torch.optim.SGD):
l["momentum"] =mom
# In[ ]:
fit_one_cycle = OneCyclePolicy(model ,optimizer , criterion,num_iteration=len(dataloader["train"].dataset)//batch_size , num_epochs =8, max_lr =1e-5 ,device=device)
fit_one_cycle.train(dataloader["train"],dataloader["val"])
# ## unfreeze 60 % architecture and retrain
# In[ ]:
# unfreeze 60% of the model
unfreeze(model ,0.60)
# check which layer is freezed or not
check_freeze(model)
# In[ ]:
lr_finder = LRFinder(model, optimizer, criterion, device=device)
lr_finder.lrfind(dataloader["train"], end_lr=10, step_mode="exp")
# In[ ]:
lr_finder.plot()
# In[ ]:
model= lr_finder.reset()
# In[ ]:
fit_one_cycle = OneCyclePolicy(model ,optimizer , criterion,num_iteration=len(dataloader["train"].dataset)//batch_size , num_epochs =10, max_lr =1e-5 ,device=device)
fit_one_cycle.train(dataloader["train"],dataloader["val"])
# ## unfreeze 70% model and retrain
# In[ ]:
# unfreeze 60% of the model
unfreeze(model ,0.80)
# check which layer is freezed or not
check_freeze(model)
# In[ ]:
lr_finder = LRFinder(model, optimizer, criterion, device=device)
lr_finder.lrfind(dataloader["train"], end_lr=10, step_mode="exp")
# In[ ]:
lr_finder.plot()
# In[ ]:
model= lr_finder.reset()
# In[ ]:
fit_one_cycle = OneCyclePolicy(model ,optimizer , criterion,num_iteration=len(dataloader["train"].dataset)//batch_size , num_epochs =10, max_lr =1e-3 ,device=device)
fit_one_cycle.train(dataloader["train"],dataloader["val"])
# ## Visualizing some end result
# In[ ]:
image , label = next(iter(dataloader["val"]))
image = image.to(device)
label = label.to(device)
output = 0
with torch.no_grad():
output = model(image)
output = torch.sigmoid(output)
output = output>0.2
# In[ ]:
# In[ ]:
mean , std = torch.tensor([0.485, 0.456, 0.406]),torch.tensor([0.229, 0.224, 0.225])
def denormalize(image):
image = image.to("cpu").clone().detach()
image = transforms.Normalize(-mean/std,1/std)(image) #denormalize
image = image.permute(1,2,0)
image = torch.clamp(image,0,1)
return image.numpy()
def visualize(image , actual , pred):
fig,ax = plt.subplots()
ax.imshow(denormalize(image))
ax.grid(False)
classes = np.array(classLabels)[np.array(actual,dtype=np.bool)]
for i , s in enumerate(classes):
ax.text(0 , i*20 , s , verticalalignment='top', color="white", fontsize=16, weight='bold')
classes = np.array(classLabels)[np.array(pred,dtype=np.bool)]
for i , s in enumerate(classes):
ax.text(160 , i*20 , s , verticalalignment='top', color="black", fontsize=16, weight='bold')
plt.show()
visualize(image[1] , label[1].tolist() , output[1].tolist())
# In[ ]:
visualize(image[0] , label[0].tolist() , output[0].tolist())
# In[ ]:
visualize(image[2] , label[2].tolist() , output[2].tolist())
# In[ ]:
visualize(image[7] , label[7].tolist() , output[7].tolist())
# In[ ]:
# # Summary
#
# ## The Final accuracy or more precisely the f1 score is 88.85%
# ## The loss is 0.1962
#
| [
"matplotlib",
"seaborn"
] |
86c392ae194d19099e2537d4c3ddcc18bf7ab26d | Python | TSGreen/libraries-unlimited | /analysisandplot.py | UTF-8 | 21,562 | 3.453125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Creates the visualisations for the LU coding report.
Overview:
- Reads in the data from a CSV file for each district.
- Cleans and analyses the data.
- Creates the graphs and saves them as pdf files for embedding in report.
@author: Tim Green
"""
import pandas as pd
from glob import glob
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import matplotlib.ticker as ticker
class FeedbackAnalysis():
"""
Read, analyse and plot all the data collected from questionnaires at
LU coding events.
Procedure:
Instantiate with no arguments e.g. workshops = FeedbackAnalysis().
Run the "read_datafile" method e.g. workshops.read_datafile(path_to_data, path_to_figures).
Run the "gender", "age_distribution" and "app_ratings" methods separately.
Run the "question" method for each other question in the questionnaire with appropriate args.
"""
def __init__(self):
self.response_dict = {
'yes_dontknow_no': (['yes', 'dontknow', 'no'], 3,
['Yes', 'Dont Know', 'No']),
'yes_maybe_no': (['yes', 'maybe', 'no'], 3,
['Yes', 'Maybe', 'No']),
'very_to_not_interest': (['very', 'somewhat', 'unsure', 'notmuch', 'notatall'], 2,
['Very Interested', 'Somewhat Interested', 'Unsure', 'Not really intersted', 'Not intersted at all']),
'very_to_not_importance': (['very', 'somewhat', 'unsure', 'notmuch', 'notatall'], 2,
['Very Important', 'Somewhat Important', 'Unsure', 'Not really important', 'Not important at all']),
'verygood_to_bad': (['verygood', 'good', 'okay', 'bad'], 4,
['Very good', 'Good', 'Okay', 'Bad']),
'frequency': (['never', 'rarely', 'sometimes', 'regularly', 'veryregularly'], 3,
['Never', 'Rarely', 'Sometimes', 'Regularly', 'Very Regularly']),
'itaccess': (['no', 'home', 'HomeSchool', 'school'], 2,
['No Access', 'Access at Home', 'Access at both home and school', 'Access at School']),
'never_little_lot': (['No', 'Alittle', 'Alot'], 3,
['Never', 'A little', 'A lot']),
'know_language': (['knownothing', 'knowlittle', 'knowit', 'knowitwell', 'dontknow'], 3,
['Know Nothing', 'Know a Little', 'Know It', 'Know it Well', 'Dont Know'])
}
def read_datafiles(self, data_filepath, save_filepath):
"""
Read the data files for each workshop event and build a dataframe of
all the collated data.
Parameters
----------
data_filepath : str
The path to the directory containing the data files.
save_filepath : str
The path to directory where figures are to saved.
Returns
-------
Pandas dataframe and some meta data.
"""
self.data_filepath = data_filepath
self.save_filepath = save_filepath
filenames = self.data_filepath.glob('*.csv')
self.dataframe = pd.concat([pd.read_csv(datafile) for datafile in filenames], ignore_index=True)
districts = sorted([str(file).split('/')[-1][:-4] for file in self.data_filepath.glob('*.csv')])
#districts = sorted([str(datafile).split('/')[-1][:-4] for datafile in filenames])
self.total_participants = self.dataframe.count().iloc[1:].max()
print(f'\nThe following data files have been read and included:\n\n{districts}\n')
print(f'If you expect to see another file here, please check you have added it to the location "{self.data_filepath}"')
print(f'\nTotal number of participants to date: {self.total_participants}.\n\n')
def create_database(self, file):
""" Collate all the datafiles into one file. """
self.dataframe.to_csv(file)
#print(f'Gender details:\n{df.Gender.value_counts()}')
def question(self, question, response_type, prepost_question, title):
"""
Set-up the paramteres for analysing and plotting the given question.
Calls on the method for doing the analysis and the method for creating
the stacked bar plots.
Parameters
----------
question : str
The short-form name for the question. Is also the relevant column name.
response_type : str
A string indicating the type of resonses for given question. Acts as
the key for the reponse_dict dictionary which returns the responses in
the data and their corresponding labels for plotting.
prepost_question : boolean
If True, then the given question is asked on the pre and post questionnaires
and the desired form is a comparison of pre and post reponses.
If False, then question is only asked once and instead desire aggregation
by gender.
title : str
The full form of the given question.
Returns
-------
None.
"""
cats = self.response_dict[response_type][0]
legend_columns = self.response_dict[response_type][1]
legend_labels = self.response_dict[response_type][2]
responses, samplesize = self.analyse_question(question, cats, prepost_question)
self.stackedbarplot(responses, legend_labels, question,
legend_columns, samplesize, title=title)
def analyse_question(self, data_column, cats, prepost_question=False):
"""
Generate the response statistics to the given question.
Parameters
----------
data_column : str
The column name in the dataframe associated to the question.
cats : list
A list of the possible responses / categories for that question.
prepost_question : Boolean, optional
True if the question is present on the pre- and post- questionnaires
and we want to return a comparison of pre & post.
The default is False and aggregates data by gender.
Returns
-------
responses : numpy.array
The percentage of each response.
samplesize : int
The number of non-null responses to this particular question.
"""
if prepost_question:
self.dataframe['Post_'+data_column] = pd.Categorical(self.dataframe['Post_'+data_column], categories=cats)
self.dataframe['Pre_'+data_column] = pd.Categorical(self.dataframe['Pre_'+data_column], categories=cats)
responses_pc_post = self.dataframe['Post_'+data_column].value_counts(normalize=True, sort=False).values
responses_pc_pre = self.dataframe['Pre_'+data_column].value_counts(normalize=True, sort=False).values
responses = np.array([responses_pc_post, responses_pc_pre])
samplesize = self.dataframe['Pre_'+data_column].value_counts(normalize=False).sum()
if not prepost_question:
self.dataframe[data_column] = pd.Categorical(self.dataframe[data_column], categories=cats)
samplesize = self.dataframe[data_column].value_counts(normalize=False, sort=False).values.sum()
responses_pc_all = self.dataframe[data_column].value_counts(normalize=True, sort=False).values
responses_pc_male = self.dataframe[data_column][self.dataframe['Gender']=='male'].value_counts(normalize=True, sort=False).values
responses_pc_female = self.dataframe[data_column][self.dataframe['Gender']=='female'].value_counts(normalize=True, sort=False).values
responses = np.array([responses_pc_male, responses_pc_female, responses_pc_all])
return responses, samplesize
def stackedbarplot(self, responses, labels, figurename, legend_columns = 3, samplesize=0, title=''):
"""
Create stacked bar plots showing the proportional responses to the given question.
Parameters
----------
responses : numpy array
Array of the responses for the given question.
labels : list of strings
The possible responses for the given question.
figurename : str
Short form name of the question asked.
legend_columns : int, optional
The number of columns in the legend. Vary to control legend layout.
The default is 3.
samplesize : int, optional
The number of participants which have responded to given question.
The default is 0.
Returns
-------
Saves the figure to pdf and png files.
"""
plt.close()
print(f'Plotting the chart for {figurename} question..')
sns.set_style('ticks', {'axes.spines.right': False,
'axes.spines.top': False,
'axes.spines.left': False,
'ytick.left': False})
if responses.shape[0] == 2:
ind = [0, 1]
elif responses.shape[0] == 3:
ind = [0, 0.85, 2]
elif responses.shape[0] == 1:
ind = [0]
fig, ax = plt.subplots(figsize=(8.7, 6))
start, pos, = 0, [0, 0, 0]
for i in range(responses.shape[1]):
option = responses[:,i]
plt.barh(ind, option, left=start, label=labels[i])
for k in range(len(ind)):
xpos = pos[k]+option[k]/2
percent = int(round(option[k]*100))
if percent >= 10:
plt.annotate(f'{str(percent)} %', xy=(xpos, ind[k]),
ha='center', fontsize=15, color='1')
elif percent < 3:
pass
else:
plt.annotate(f'{str(percent)}', xy=(xpos, ind[k]),
ha='center', fontsize=15, color='1')
start = start+option
pos = start
plt.xlim(0,1)
if responses.shape[0] == 2:
plt.yticks(ind, ('Post', 'Pre'), fontsize=18)
elif responses.shape[0] == 3:
plt.yticks(ind, ('Male', 'Female', 'All'), fontsize=18)
elif responses.shape[0] ==1:
plt.yticks(ind, '', fontsize=18)
plt.xticks(fontsize=18)
ax.xaxis.set_major_formatter(ticker.PercentFormatter(xmax=1))
plt.legend(bbox_to_anchor=(0, 0.99, 1, .05), loc=3, ncol=legend_columns, borderaxespad=0, fontsize=15)
plt.minorticks_on()
plt.figtext(0.9, 0.12, (f'Based on sample of {samplesize} participants'), fontsize=10, ha='right')
pdffile = 'bar_'+figurename+'.pdf'
pngfile = 'bar_'+figurename+'.png'
plt.savefig(self.save_filepath/pdffile, bbox_inches='tight')
plt.title(title, fontsize=20, pad=[85 if legend_columns<3 else 50][0])
plt.savefig(self.save_filepath/pngfile, bbox_inches='tight', dpi=600)
sns.set()
def app_ratings(self):
"""
Create a grid of histograms showing participant rating for each app.
Returns
-------
Saves the figure to pdf and png files.
"""
print("Plotting the app ratings plots..")
plt.close()
fig = plt.figure(figsize=(8, 10))
(ax1, ax2), (ax3, ax4), (ax5, ax6), (ax7, ax8) = fig.subplots(4, 2, sharex=True)
apps = ['Scratch', 'Micro:bit', 'Make Art', 'Make Pong', 'Kano Code',
'Make Snake', 'Hack Minecraft', 'Terminal Quest']
apps_columns = {'Scratch': 'Scratch_rating', 'Micro:bit': 'Microbit_rating', 'Make Art': 'MakeArt_rating',
'Make Pong': 'MakePong_rating', 'Kano Code': 'KanoCode_rating', 'Make Snake': 'MakeSnake_rating',
'Hack Minecraft': 'HackMinecraft_rating', 'Terminal Quest': 'TerminalQuest_rating'}
axs = [ax1, ax2, ax3, ax4, ax5, ax6, ax7, ax8]
sns.set_style('ticks', {'axes.spines.right': False, 'axes.spines.top': False})
bins = np.linspace(-0.5, 5.5, 7)
for app, ax in zip(apps, axs):
app_column = apps_columns[app]
usedratedapp = self.dataframe[app_column][self.dataframe[app_column].isin(('1,2,3,4,5').split(','))].values.astype(int)
mean_rating = usedratedapp.sum()/len(usedratedapp)
values, base = np.histogram(usedratedapp, bins=bins)
ax.hist(usedratedapp, bins=bins, fc='none', lw=2.5, ec=sns.xkcd_rgb['cerulean blue'])
plt.ylim(0, np.max(values)*1.1)
plt.xlim(0.4, 5.6)
ax.text(1, np.max(values)*0.9, app, weight='bold', ha='left')
ax.text(1, np.max(values)*0.75, 'Mean: %1.1f/5' %mean_rating)
ax.yaxis.set_major_locator(ticker.MaxNLocator(5))
ax.xaxis.set_ticks_position('bottom')
ax7.set_xlabel('Rating /5', fontsize=20)
ax8.set_xlabel('Rating /5', fontsize=20)
ax3.set_ylabel('Number of Participants', fontsize=20)
plt.savefig(self.save_filepath/'AppRatings.png', bbox_inches='tight')
plt.savefig(self.save_filepath/'AppRatings.pdf', bbox_inches='tight')
sns.set()
def age_distribution(self, age):
"""
Create histogram of age distribution.
Parameters
----------
age : Panda.Series or numpy.array
The age column.
Returns
-------
Data visualtions saved in png and pdf format.
"""
self.age = self.dataframe[age]
plt.close()
sns.set_style('ticks', {'axes.spines.right': False,
'axes.spines.top': False,
'xtick.bottom': False})
fig, ax = plt.subplots(figsize=(8.7, 6))
bins = np.linspace(6.5, 18.5, 13)
print('\nPlotting age distribution..')
median_age = int(self.age.median())
print(f'\nMean age: {round(self.age.mean(),1)}, and median age: {median_age}.\n')
values, base = np.histogram(self.age, bins=bins)
percent = 100*(values/self.total_participants)
plt.hist(self.age, bins=bins, facecolor='none',
linewidth=2.5, edgecolor='#3366cc')
for i in range(len(values)):
if values[i]>0:
pcstr = '%.0f' % percent[i]
plt.text(base[i+1]-0.5, values[i]+(0.05*np.max(values)),
pcstr+'%', color='k', fontweight='bold',
va='center', ha='center', fontsize = 10)
[plt.text(7, ypos*np.max(values), text,
color='k', fontweight='bold',
va='center', ha = 'left',
fontsize = 12) for ypos, text in zip([0.98, 0.9],
[f'Participant Total: {str(self.total_participants)}',
f'Median age: {median_age} yrs'])]
plt.xlabel('Age of participant (years)', fontsize = 20)
plt.ylabel('Number of participants', fontsize = 20)
plt.xlim(6.4, 18.6)
plt.ylim(0, 1.1*np.max(values))
ax.xaxis.set_major_locator(plt.MaxNLocator(14))
plt.savefig(self.save_filepath/'AgeDistribution.png', bbox_inches='tight', dpi=600)
plt.savefig(self.save_filepath/'AgeDistribution.pdf', bbox_inches='tight')
sns.set()
def gender(self, gender_column):
"""
Create pie chart showing the gender distribution.
Returns
-------
Saves figure to png and pdf files.
"""
print("Plotting the gender pie chart..")
plt.close()
cats = ['male', 'female', 'other']
self.dataframe[gender_column].replace({'o': 'other'}, inplace=True)
self.dataframe[gender_column] = pd.Categorical(self.dataframe[gender_column], categories=cats)
sizes = [self.dataframe[gender_column].value_counts().loc[gender] for gender in ['male', 'female']]
colors = [sns.xkcd_rgb['grey'], sns.xkcd_rgb['cerulean blue']]
labels = ['Male', 'Female']
plt.pie(sizes, labels=labels, colors=colors, autopct='%1.1f%%',
shadow=True, startangle=140, radius=0.8,
textprops=dict(fontsize=15))
others = self.dataframe.Gender.value_counts(sort=True).values[2]
plt.text(-0.5, -0.95, f'(Plus {others} participants answered "Other")', fontsize=9)
plt.axis('equal')
plt.savefig(self.save_filepath/'gender.pdf', bbox_inches='tight')
plt.title('Gender of participants', fontsize=15)
plt.savefig(self.save_filepath/'gender.png', bbox_inches='tight')
class CodeClubAnalysis(FeedbackAnalysis):
def __init__(self):
self.response_dict = {
'yes_dontknow_no': (['yes', 'dontknow', 'no'], 3,
['Yes', 'Dont Know', 'No']),
'yes_maybe_no': (['yes', 'maybe', 'no'], 3,
['Yes', 'Maybe', 'No']),
'very_to_not_interest': (['very', 'somewhat', 'unsure', 'notmuch', 'notatall'], 2,
['Very Interested', 'Somewhat Interested', 'Unsure', 'Not really intersted', 'Not intersted at all']),
'very_to_not_importance': (['very', 'somewhat', 'unsure', 'notmuch', 'notatall'], 2,
['Very Important', 'Somewhat Important', 'Unsure', 'Not really important', 'Not important at all']),
'verygood_to_bad': (['verygood', 'good', 'okay', 'bad'], 4,
['Very good', 'Good', 'Okay', 'Bad']),
'frequency': (['never', 'rarely', 'sometimes', 'regularly', 'veryregularly'], 3,
['Never', 'Rarely', 'Sometimes', 'Regularly', 'Very Regularly']),
'itaccess': (['no', 'home', 'HomeSchool', 'school'], 2,
['No Access', 'Access at Home', 'Access at both home and school', 'Access at School']),
'never_little_lot': (['No', 'Alittle', 'Alot'], 3,
['Never', 'A little', 'A lot']),
'know_language': (['knownothing', 'knowlittle', 'knowit', 'knowitwell', 'dontknow'], 3,
['Know Nothing', 'Know a Little', 'Know It', 'Know it Well', 'Dont Know'])
}
def analyse_question(self, data_column, cats, prepost_question=False):
"""
Generate the response statistics to the given question.
The Code Club specific version does not allow for gender comparison as
gender data is not avaible for all data sets.
Parameters
----------
data_column : str
The column name in the dataframe associated to the question.
cats : list
A list of the possible responses / categories for that question.
prepost_question : Boolean, optional
True if the question is present on the pre- and post- questionnaires
and we want to return a comparison of pre & post.
The default is False and creates one bar.
Returns
-------
responses : numpy.array
The percentage of each response.
samplesize : int
The number of non-null responses to this particular question.
"""
if prepost_question:
self.dataframe['Post_'+data_column] = pd.Categorical(self.dataframe['Post_'+data_column], categories=cats)
self.dataframe['Pre_'+data_column] = pd.Categorical(self.dataframe['Pre_'+data_column], categories=cats)
responses_pc_post = self.dataframe['Post_'+data_column].value_counts(normalize=True, sort=False).values
responses_pc_pre = self.dataframe['Pre_'+data_column].value_counts(normalize=True, sort=False).values
responses = np.array([responses_pc_post, responses_pc_pre])
samplesize = self.dataframe['Pre_'+data_column].value_counts(normalize=False).sum()
if not prepost_question:
self.dataframe[data_column] = pd.Categorical(self.dataframe[data_column], categories=cats)
samplesize = self.dataframe[data_column].value_counts(normalize=False, sort=False).values.sum()
responses = self.dataframe[data_column].value_counts(normalize=True, sort=False).values.reshape(1,-1)
self.responses = responses
return responses, samplesize
def britishcouncil_rating(self, data_column):
"""
Create histogram of the responses to the question:
"How attractive is the British Council to youth?" which asks
users to give a rating out of ten.
Parameters
----------
data_column : str
The column in the dataframe associated with this question.
Returns
-------
Saves the plot to pdf and pngs files.
"""
plt.close()
sns.set_style('ticks', {'axes.spines.right': False, 'axes.spines.top': False})
self.dataframe[data_column].plot(kind='hist', bins=np.linspace(-0.5, 10.5, 12))
plt.xlim(0.4, 10.4)
plt.xticks(ticks=range(1,11,1))
plt.xlabel('British Council Attractiveness to Youth (Rating /10)', fontsize=15)
plt.savefig(self.save_filepath/'BC_Rating.pdf', bbox_inches='tight')
plt.savefig(self.save_filepath/'BC_Rating.png', bbox_inches='tight')
sns.set()
| [
"matplotlib",
"seaborn"
] |
fc18875b23a1752f906795195aa7c8640f1fc7d1 | Python | jialiangdev/My-codes | /Data Mining/HW3/program/test.py | UTF-8 | 2,517 | 2.8125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 28 09:48:28 2016
@author: Jialiang Yu
"""
import numpy as np
import csv
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA, FastICA
M = 4
N = 3
#X = np.random.normal(size=[20,18])
#print X
A = [[2,0,1,3],[6,2,0,1],[0,1,4,6],[3,6,7,8]]
average = []
av = 0
for j in range(N):
for i in range(M):
av += A[i][j]
result = float(av) / M
average.append(result)
av = 0
for j in range(N):
for i in range(M):
A[i][j] -= average[j]
U,s,V = np.linalg.svd(A,full_matrices=False) # SVD decomposition
print U
print s
print V
"""
matrix = []
reader=csv.reader(open("E:\Purdue Courses\Second semester\CS573 Data Mining\CS573 homework\HW3\Q2.txt","rb"),delimiter=',')
data=list(reader)
for lst in data:
mylist = []
for i in range(len(lst)-1):
mylist.append(int(lst[i]))
matrix.append(mylist)
print matrix[681304]
#print np.matrix(matrix).shape
l = []
for i in range(681305):
l.append(matrix[i][1])
rl = sorted(l)
print rl
"""
"""
N = 50
x = np.random.rand(N)
y = np.random.rand(N)
colors = np.random.rand(N)
area = np.pi * (15 * np.random.rand(N))**2 # 0 to 15 point radiuses
plt.scatter(x, y, s=area, c=colors, alpha=0.6)
plt.show()
"""
"""
rng = np.random.RandomState(42)
S = rng.standard_t(1.5, size=(20000, 2))
S[:, 0] *= 2.
# Mix data
A = np.array([[1, 1], [0, 2]]) # Mixing matrix
X = np.dot(S, A.T) # Generate observations
pca = PCA()
S_pca_ = pca.fit(X).transform(X)
ica = FastICA(n_components = 5, algorithm='parallel', max_iter=100, tol=0.001)
S_ica_ = ica.fit(X).transform(X) # Estimate the sources
S_ica_ /= S_ica_.std(axis=0)
###############################################################################
# Plot results
def plot_samples(S, axis_list=None):
plt.scatter(S[:, 0], S[:, 1], s=2, marker='o', zorder=10,
color='steelblue', alpha=0.5)
plt.hlines(0, -5, 5)
plt.vlines(0, -5, 5)
plt.xlim(-5, 5)
plt.ylim(-5, 5)
plt.xlabel('x')
plt.ylabel('y')
axis_list = [pca.components_.T, ica.mixing_]
plt.subplot(2, 1, 1)
plot_samples(X / np.std(X), axis_list=axis_list)
plt.title('Observations')
plt.subplot(2, 1, 2)
plot_samples(S_ica_ / np.std(S_ica_))
plt.title('ICA recovered signals')
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.36)
plt.show()
"""
| [
"matplotlib"
] |
31ed254bcdf7f5aa47d96fc6283ca4917b503e34 | Python | Aleksei741/Python_Lesson | /Уроки введение в высшую математику/Урок 3/Lesson_3_task_3_3.py | UTF-8 | 507 | 3.34375 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
n = 100
x = np.linspace(0, 10, n)
y = list()
for i in x:
y.append(2*i+1)
R = list()
alfa = list()
for i in range(n):
R.append(np.sqrt(x[i]**2 + y[i]**2))
alfa.append(np.arcsin(y[i] / R[i]))
print(alfa)
print(R)
#plt.plot(x, y)
plt.polar(alfa, R)
plt.title('Круг') # заголовок
plt.xlabel('х') # наименование оси абсцисс
plt.ylabel('у') # наименование оси ординат
plt.show()
| [
"matplotlib"
] |
f72cd73a696ccdb6449a493c98d5877607e4bf23 | Python | PierceB/N-Queens | /Results/plot.py | UTF-8 | 956 | 2.859375 | 3 | [] | no_license | from sys import argv
if len(argv) < 2:
print("Usage: {} [results] [output]".format(argv[0]))
exit(1)
import matplotlib.pyplot as plt
filename = argv[1]
output = argv[2]
f = open(filename, 'r')
serial_x = []
serial_souls = []
serial_y = []
f.readline() #Serial
for i in range(15):
data = f.readline().rstrip("\n").split()
serial_x.append(i + 1)
serial_souls.append(int(data[3]))
serial_y.append(float(data[5]))
plt.plot(serial_x[8:], serial_y[8:], label="Serial")
for i in range(9):
f.readline()
y = []
x = []
souls = []
for j in range(15):
data = f.readline().rstrip("\n").split()
x.append(j + 1)
souls.append(int(data[2][:-1]))
y.append(float(data[3]))
plt.plot(x[8:], y[8:], label="MPI ({} processes)".format(i + 1))
plt.legend()
plt.title("N (board length) vs. Serial/Parallel times")
plt.xlabel("N (board length)")
plt.ylabel("Time (s)")
#plt.plot(x, y[0], 'ro', x, y[1], 'go')
plt.savefig("{}.png".format(output))
plt.show()
| [
"matplotlib"
] |
569025fe05ff44e384a282eaee82b873e44ff0fb | Python | nstr1/data_visualisation | /lab1/graph.py | UTF-8 | 475 | 2.96875 | 3 | [] | no_license | import pandas as pd
import matplotlib.pyplot as plt
import pandas.api.types as ptypes
def graph(df, x_axis, y_axis):
if not ptypes.is_numeric_dtype(df[y_axis]):
df=df.groupby([x_axis,y_axis]).size()
df=df.unstack()
df.plot(kind='bar')
else:
df.plot(kind="line", x = x_axis, y = y_axis)
plt.xlabel(x_axis)
plt.ylabel(y_axis)
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left', borderaxespad=0.)
plt.show()
| [
"matplotlib"
] |
110f0c74f60f76329e624236d68bad640de407c4 | Python | dmlkcncat/OpenCv | /openCv/FaceRecognition.py | UTF-8 | 2,191 | 2.96875 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# # Face Recognition
# In[1]:
import numpy as np
import cv2
import matplotlib.pyplot as plt
# In[2]:
img = cv2.imread("selfie.jpeg")
# In[3]:
img
# In[4]:
img.shape
# In[5]:
plt.imshow(img)
# In[6]:
gray_scale = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# In[7]:
gray_scale
# In[8]:
print(gray_scale.shape)
# In[9]:
print(gray_scale.size)
# In[10]:
print(img.shape)
# In[11]:
print(img.size)
# In[12]:
plt.imshow(gray_scale);
# In[13]:
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
faces = face_cascade.detectMultiScale(gray_scale, 1.2, 5)
faces.shape
# In[14]:
faces
# In[15]:
for(x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w, y+h), (0,0,255), 4)
cv2.imshow("face detection", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# In[21]:
def detectFromImage (image):
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
img = cv2.imread(image)
gray_scale = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray_scale,1.2, 1)
for(x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w, y+h), (0,255,0), 4)
cv2.imshow("baslik", img)
cv2.waitKey()
cv2.destroyAllWindows()
# In[22]:
detectFromImage("selfie.jpeg")
# In[19]:
def detectFaces_EyesImage(image):
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_default.xml")
eye_cascade = cv2.CascadeClassifier("haarcascade_eye.xml")
img = cv2.imread(image)
gray_scale = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray_scale,1.4, 1)
for(x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w, y+h), (0,255,0), 4)
roi_gray = gray_scale [y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
eyes = eye_cascade.detectMultiScale(roi_gray)
for(ex,ey,ew,eh) in eyes:
cv2.rectangle(roi_color, (ex,ey), (ex+ew, ey+eh), (0,0,255), 2)
cv2.imshow("Faces and eyes detected.", img)
cv2.waitKey()
cv2.destoryAllWindows()
# In[20]:
detectFaces_EyesImage("selfie3.jpg")
# In[ ]:
| [
"matplotlib"
] |
97fde076208a77b6b8c71560e647bdd6db00dc80 | Python | BPotet/BBC_project | /project/stats.py | UTF-8 | 3,461 | 2.65625 | 3 | [] | no_license | def fdr_correction(pval,qval=0.05):
import numpy as np
# Sort p-values
pval_S = list(pval)
pval_S.sort()
# Number of observations
N = len(pval)
# Order (indices), in the same size as the p-values
idx = np.array(range(1,N+1),dtype=float)
# Line to be used as cutoff
cV = np.sum(1/idx)
thrline = idx*qval/float(N*cV)
# Find the largest pval, still under the line
thr = max([p for i,p in enumerate(pval_S) if p<=thrline[i]])
return thr
def SAM(X,y,N_shuffle=2):
# X has N_genes rows and N_cond colums
# y refers to the classes
from scipy import stats
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import t
N_genes = np.shape(X)[0]
N_cond = np.shape(X)[1]
idx0 = [i for i,yi in enumerate(y) if yi==0]
idx1 = [i for i,yi in enumerate(y) if yi==1]
X = np.array(X,dtype=float)
# t-test for each gene (also randomized)
tt = []
pval = []
tt_random = []
for i in range(N_genes):
ttest = stats.ttest_ind(X[i,idx0],X[i,idx1],equal_var=False)
t.append(ttest[0])
pval.append(ttest[1])
# compute N_shuffle permutations to calculate wx_random
tt_tmp = []
for j in range(0,N_shuffle):
X_random = shuffle_data(X)
tt_tmp.append(stats.ttest_ind(X_random[i,idx0],X_random[i,idx1],equal_var=False)[0])
tt_random.append(np.mean(tt_tmp))
# Sort
tt.sort()
tt_random.sort()
# Plot
plt.figure()
plt.plot(tt_random,t,'k.')
plt.xlabel('Expected Correlation (if random)')
plt.ylabel('Observed Correlation')
plt.grid()
plt.title('SAM')
# significance levels
fdr_pval = fdr_correction(pval)
corr_fdr = t.ppf(1-fdr_pval/2.0,N_cond-1)
xx = np.linspace(np.min(tt_random),np.max(tt_random),10)
plt.plot(xx,xx+corr_fdr,'k--')
plt.plot(xx,xx-corr_fdr,'k--')
plt.show()
def volcano_plot(X,y,pval=[],idx=[]):
import scipy.stats as ss
import numpy as np
import matplotlib.pyplot as plt
N_genes = np.shape(X)[0]
idx0 = [i for i,yi in enumerate(y) if yi==0]
idx1 = [i for i,yi in enumerate(y) if yi==1]
X = np.array(X,dtype=float)
p = [] # p-value
fc = [] # fold change
for i in range(N_genes):
corr = ss.pearsonr(X[i,:],y)
if len(pval)==0:
p.append(corr[1])
else:
p.append(pval[i])
fc.append(np.mean(X[i,idx1])/np.mean(X[i,idx0]))
significance_pval = fdr_correction(p)
plt.figure()
plt.plot(-np.log2(fc),-np.log10(p),'b.',zorder=0)
plt.xlabel('-log2(fold change)')
plt.ylabel('-log10(p-value)')
minix = min(-np.log2(fc))
maxix = max(-np.log2(fc))
mm = max(np.abs(minix),maxix)+0.1
fc = np.array(fc)
p = np.array(p)
plt.plot(-np.log2(fc[idx]),-np.log10(p[idx]),'r.',zorder=1)
plt.hlines(-np.log10(significance_pval),-mm,mm,'k',linestyles='dotted',lw=4,zorder=2)
plt.xlim([-mm,mm])
plt.title('Volcano plot')
plt.show()
def shuffle_data(X):
from random import shuffle
import numpy as np
# get shuffled columns indexes
shuffled_idx = range(0,np.shape(X)[1])
shuffle(shuffled_idx)
# use shuffled_idx to randomize the columns of data
X_random = X[:,shuffled_idx]
return X_random
| [
"matplotlib"
] |
dc285bfda5f16e0d5bc4f18a2d52af12fa86f738 | Python | dawsboss/Math471 | /Exam2/Q3/Exam2Question3.py | UTF-8 | 2,957 | 3.3125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: grant dawson
Question #3
"""
import math
#import scipy.optimize.bisectas as bi
import matplotlib.pyplot as plt
import numpy as np
FN = [
lambda x: math.pow(x, math.pow(x,x)) - 3,
]
names= [
'x^x^x - 3'
]
#This verrsion must be given n but we can cmpute n if we have the error tolerance
# n = (log(b-a)-log(eps)) / log(2)
def bist(f, a, b, eps, verbose = True):
#compute some values that are reused often
fa = f(a)
fb = f(b)
#check that we have a chance to succeed
if(fa*fb < 0):#This means it crossed over the x axis
n = int((math.log(b-a) - math.log(eps)) / math.log(2))+1
for i in range(1, n+1):
c = a + .5*(b-a)
fc = f(c)
if(verbose):
print(f"a: {a} | b: {b} | c: {c} | fa: {fa} | fb: {fb} | fc: {fc}")
if fa*fc < 0:
b = c
fb = fc
elif fa*fc > 0:
a = c
fa = fc
else:
break
return (c,n)
else:
print("No roots in interval")
return False
err = 10**-5
func = FN[0]
name = names[0]
a = 1
b = 2
result, n = bist(func, a, b, err, verbose=False);
print(f" Bisection f(x) = {name} | [{a}, {b}] |\
Root: fx = {result} itteration: {n}");
print()
print()
def Error_Newton_Method( f, x0, err, df=False, verbose = False):
rtn = x0
count = 0
old = 0.0
if(df):
while(count<10000):
count = count +1
old = rtn
rtn = rtn - (f(rtn) / df(rtn))
if(abs(rtn-old) < err):
break
else:
df = derivatice_approx
while(count < 10000):
count = count +1
old = rtn
rtn = rtn - (f(rtn) / df(f, rtn, .000001))
if(abs(rtn-old) < err):
break
return (rtn,count)
def derivatice_approx( f, x, h ):
return (8.0*f(x+h) - 8.0*f(x-h) - f(x+2.0*h) + f(x-2.0*h)) / (12.0*h)
err = 10**-5
func = FN[0]
name = names[0]
x0 = 1
result, n = Error_Newton_Method(func, x0, err);
print(f" Newton's Method f(x) = {name} | x0: {x0} |\
Root: fx = {result} itteration: {n}");
print()
print()
def regularFalsi(f, a, b, err):
fa = f(a)
fb = f(b)
count = 0
rtn = 0.0
while(count < 10000):
count = count +1
old = rtn
rtn = (a*fb - b*fa) / (fb - fa)
fr = f(rtn)
if(abs(fr) < err):
break
if(fa*fr > 0):
a = rtn
else:
b = rtn
return (rtn, count)
err = 10**-5
func = FN[0]
name = names[0]
a = 1
b = 2
result, n = regularFalsi(func, a, b, err);
print(f" regular Falsi f(x) = {name} | [{a}, {b}] |\
Root: fx = {result} itteration: {n}");
| [
"matplotlib"
] |
074e3681f5633e2e82bab70b6f22cb3c8e66d94e | Python | indraastra/ml-sandbox | /cv/filter_avg.py | UTF-8 | 976 | 3.171875 | 3 | [] | no_license | import argparse
import math
import time
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
from scipy.ndimage import convolve
parser = argparse.ArgumentParser(description='Average an image.')
parser.add_argument('image', metavar='I', type=str,
help='the image to quantize')
parser.add_argument('neighborhood', metavar='N', type=int,
help='the LxL neighborhood of pixels to average over')
if __name__ == '__main__':
args = parser.parse_args()
N = args.neighborhood
print('Average {} with neighborhood of {} pixels'.format(args.image, N))
image = mpimg.imread(args.image)
channels = []
for i in range(3):
channel = image[:, :, i]
channels.append(convolve(channel, np.ones((N, N)) / (N ** 2), mode='constant'))
avg_image = np.dstack(channels)
plt.subplot(211)
plt.axis('off')
plt.imshow(image)
plt.subplot(212)
plt.axis('off')
plt.imshow(avg_image)
plt.show()
| [
"matplotlib"
] |
b92f90854e62bb7ce7dce8059043b98c2c973f68 | Python | note0009/restore | /restore.py | UTF-8 | 1,633 | 2.53125 | 3 | [] | no_license | import numpy as np
from numpy import uint8
from skimage.color import rgb2gray
import matplotlib.pyplot as plt
plt.gray();
import cv2
im = cv2.imread('sw.jpg')
im = rgb2gray(im)
width,height=200,260
im= cv2.resize(im,(width,height))
rows,cols = im.shape
fx = np.fft.fft2(im)
fx2 = np.fft.fftshift(fx) #読み込んだ画像をフーリエ変換
im3 = 20*np.log(np.abs(fx2))
mask = np.zeros((rows,cols))
img_back = np.zeros((rows,cols),dtype=complex)
img_backs = np.zeros((rows,cols),dtype=complex)
mask2 = np.zeros((rows,cols),dtype=complex)
mask3 = np.zeros((rows,cols),dtype=complex)
cv2.namedWindow(winname='mask')
cv2.namedWindow(winname='wave')
cv2.namedWindow(winname='re')
cv2.namedWindow(winname='spe')
cv2.namedWindow(winname='ori')
def draw_circle(event,x,y,flags,param):
global img_back, img_backs, mask3, mask2, mask
if flags == cv2.EVENT_LBUTTONDOWN:
mask[y,x]=1
mask3[y,x] = fx2[y,x] #クリックした値を代入
mask2 = np.zeros((rows,cols),dtype=complex) #mask2は波形を出すために初期化
mask2[y,x] = fx2[y,x]
img_backs = np.fft.ifftshift(mask2)
img_back = np.fft.ifftshift(mask3)
cv2.setMouseCallback('mask',draw_circle)
while True:
im3_u = im3.astype(uint8)
cv2.imshow('spe',im3_u)
cv2.imshow('mask',mask)
cv2.imshow('wave',np.fft.ifft2(img_backs).real) #逆フーリエ変換して表示
cv2.imshow('re',np.fft.ifft2(img_back).real)
cv2.imshow('ori',im)
cv2.namedWindow(winname='mask')
cv2.setMouseCallback('mask',draw_circle)
if cv2.waitKey(20) & 0xFF == 27:
break
cv2.destroyAllWindows() | [
"matplotlib"
] |
eae44feac340c0dfdb9a95c61e9e26724b24239c | Python | grayjphys/Cleveland_Clinic | /codes/ca_on_graph_E._Coli.py | UTF-8 | 19,445 | 3.203125 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
#fig, axes = plt.subplots(1,cycle+star+complete,sharex=True,sharey = True)
def get_line(start, end):
#Bresenham's Line Algorithm
# Setup initial conditions
x1, y1 = start
x2, y2 = end
dx = x2 - x1
dy = y2 - y1
# Determine how steep the line is
is_steep = abs(dy) > abs(dx)
# Rotate line
if is_steep:
x1, y1 = y1, x1
x2, y2 = y2, x2
# Swap start and end points if necessary and store swap state
swapped = False
if x1 > x2:
x1, x2 = x2, x1
y1, y2 = y2, y1
swapped = True
# Recalculate differentials
dx = x2 - x1
dy = y2 - y1
# Calculate error
error = int(dx / 2.0)
ystep = 1 if y1 < y2 else -1
# Iterate over bounding box generating points between start and end
y = y1
points = []
for x in range(x1, x2 + 1):
coord = (y, x) if is_steep else (x, y)
points.append(coord)
error -= abs(dy)
if error < 0:
y += ystep
error += dx
# Reverse the list if the coordinates were swapped
if swapped:
points.reverse()
return points
def return_edge_pos(node_pos1,node_pos2,width):
x1 = node_pos1[0][0]
y1 = node_pos1[0][1]
x2 = node_pos2[0][0]
y2 = node_pos2[0][1]
y_ = []
if x1 != x2:
m = (y2-y1)/(x2-x1)
c = round(width*(1+m**2)**0.5)
for d in np.arange(-c,c+1):
y = get_line((x1,y1+d),(x2,y2+d))
for point in y:
if point in node_pos1 or point in node_pos2 or ((point[0]-x1)**2+(point[1]-y1)**2)**0.5 > ((x2-x1)**2+(y2-y1)**2)**0.5 or ((point[0]-x2)**2+(point[1]-y2)**2)**0.5 > ((x2-x1)**2+(y2-y1)**2)**0.5:
y =[p for p in y if p != point]
for point in y:
y_.append(point)
pass
else:
for d in np.arange(-width,width+1):
y = get_line((x1+d,y1),(x2+d,y2))
for point in y:
if point in node_pos1 or point in node_pos2 or ((point[0]-x1)**2+(point[1]-y1)**2)**0.5 > ((x2-x1)**2+(y2-y1)**2)**0.5 or ((point[0]-x2)**2+(point[1]-y2)**2)**0.5 > ((x2-x1)**2+(y2-y1)**2)**0.5:
y =[p for p in y if p != point]
for point in y:
y_.append(point)
return y_
'''Cycle Graph'''
def cycle(num_nodes,radius,length,width,n_row,n_col):
shiftx = (n_col-1)//2 + length
shifty = (n_row-1)//2
internal_angle = 2*np.pi/num_nodes
node_pos_dict = {}
edge_pos_dict = {}
for node in range(num_nodes):
x = int((length/2*np.sin(np.pi/num_nodes))*np.cos(node*internal_angle+np.pi/2))
y = int((length/2*np.sin(np.pi/num_nodes))*np.sin(node*internal_angle+np.pi/2))
pos = [(x,y)]
for x_ in np.arange(x-radius,x+radius+1):
for y_ in np.arange(y-radius,y+radius+1):
if (x-x_)**2 + (y-y_)**2 <= radius**2 and (x_,y_) not in pos:
pos.append((x_,y_))
node_pos_dict[node] = pos
node_pos = list(node_pos_dict.values())
for edge in range(num_nodes):
edge_pos_dict[edge] = return_edge_pos(node_pos[edge],node_pos[(edge+1)%num_nodes],width)
edge_pos = list(edge_pos_dict.values())
num_edges = len(edge_pos)
n_pts = []
for n in range(num_nodes):
n_pts.extend(node_pos[n])
e_pts = []
for e in range(num_edges):
e_pts.extend(edge_pos[e])
points = n_pts + e_pts
count = 0
for point in points:
p = (point[0]+shiftx,point[1]+shifty)
points[count] = (p[0],p[1])
count+=1
# visualization_matrix = np.zeros((n_row,n_col))
# for p in points:
# visualization_matrix[p] = 1
# plt.imshow(visualization_matrix,interpolation='none',cmap='seismic',vmin=0,vmax=2)
# plt.show()
return points, node_pos
'''Star Graph'''
def star(num_nodes,radius,length,width,n_row,n_col):
shiftx = (n_col-1)//2 + length
shifty = (n_row-1)//2
internal_angle = 2*np.pi/(num_nodes -1)
node_pos_dict = {}
edge_pos_dict = {}
for node in range(num_nodes):
if node == 0:
x = 0
y = 0
else:
x = int((length/2*np.sin(np.pi/(num_nodes-1)))*np.cos((node-1)*internal_angle+np.pi/2))
y = int((length/2*np.sin(np.pi/(num_nodes-1)))*np.sin((node-1)*internal_angle+np.pi/2))
pos = [(x,y)]
for x_ in np.arange(x-radius,x+radius+1):
for y_ in np.arange(y-radius,y+radius+1):
if (x-x_)**2 + (y-y_)**2 <= radius**2 and (x_,y_) not in pos:
pos.append((x_,y_))
node_pos_dict[node] = pos
node_pos = list(node_pos_dict.values())
for edge in range(1,num_nodes):
edge_pos_dict[edge] = return_edge_pos(node_pos[0],node_pos[edge],width)
edge_pos = list(edge_pos_dict.values())
num_edges = len(edge_pos)
n_pts = []
for n in range(num_nodes):
n_pts.extend(node_pos[n])
e_pts = []
for e in range(num_edges):
e_pts.extend(edge_pos[e])
points = n_pts + e_pts
count = 0
for point in points:
p = (point[0]+shiftx,point[1]+shifty)
points[count] = (p[0],p[1])
count+=1
# visualization_matrix = np.zeros((n_row,n_col))
# for p in points:
# visualization_matrix[p] = 1
# plt.imshow(visualization_matrix,interpolation='none',cmap='seismic',vmin=0,vmax=2)
# plt.show()
return points, node_pos
'''Complete Graph'''
def complete(num_nodes,radius,length,width,n_row,n_col):
shiftx = (n_col-1)//2 + length
shifty = (n_row-1)//2
internal_angle = 2*np.pi/num_nodes
node_pos_dict = {}
edge_pos_dict = {}
for node in range(num_nodes):
x = int((length/2*np.sin(np.pi/num_nodes))*np.cos(node*internal_angle+np.pi/2))
y = int((length/2*np.sin(np.pi/num_nodes))*np.sin(node*internal_angle+np.pi/2))
pos = [(x,y)]
for x_ in np.arange(x-radius,x+radius+1):
for y_ in np.arange(y-radius,y+radius+1):
if (x-x_)**2 + (y-y_)**2 <= radius**2 and (x_,y_) not in pos:
pos.append((x_,y_))
node_pos_dict[node] = pos
node_pos = list(node_pos_dict.values())
e = 0
list_e = []
for edge in range(num_nodes):
for edge2 in range(num_nodes):
if edge2 != edge:
list_e.append((edge,edge2))
if (edge2,edge) not in list_e:
edge_pos_dict[e] = return_edge_pos(node_pos[edge],node_pos[edge2],width)
e += 1
edge_pos = list(edge_pos_dict.values())
num_edges = e
n_pts = []
for n in range(num_nodes):
n_pts.extend(node_pos[n])
e_pts = []
for e in range(num_edges):
e_pts.extend(edge_pos[e])
points = n_pts + e_pts
count = 0
for point in points:
p = (point[0]+shiftx,point[1]+shifty)
points[count] = (p[0],p[1])
count+=1
# visualization_matrix = np.zeros((n_row,n_col))
# for p in points:
# visualization_matrix[p] = 1
# plt.imshow(visualization_matrix,interpolation='none',cmap='seismic',vmin=0,vmax=2)
# plt.show()
return points, node_pos
from numba import jit, int64
import random
#### METHODS TO BUILD THE WORLD OF OUR CA ####
def build_neighbor_pos_dictionary(n_row, n_col,type_graph,num_nodes,radius,length,width):
"""
Create dictionary containing the list of all neighbors (value) for a central position (key)
:param n_row:
:param n_col:
:return: dictionary where the key= central position, and the value=list of neighboring positions around that center
"""
if type_graph == "cycle":
list_of_all_pos_in_ca, list_node_pos = cycle(num_nodes,radius,length,width,n_row,n_col)
if type_graph == "star":
list_of_all_pos_in_ca, list_node_pos = star(num_nodes,radius,length,width,n_row,n_col)
if type_graph == "complete":
list_of_all_pos_in_ca, list_node_pos = complete(num_nodes,radius,length,width,n_row,n_col)
dict_of_neighbors_pos_lists = {pos: build_neighbor_pos_list(pos, list_of_all_pos_in_ca) for pos in list_of_all_pos_in_ca}
return dict_of_neighbors_pos_lists, list_node_pos
def build_neighbor_pos_list(pos, list_of_all_pos_in_ca):
"""
Use list comprehension to create a list of all positions in the wild_type's Moore neighborhood.
Valid positions are those that are within the confines of the domain (n_row, n_col)
and not the same as the wild_type's current position.
:param pos: wild_type's position; tuple
:param n_row: maximum width of domain; integer
:param n_col: maximum height of domain; integer
:return: list of all valid positions around the wild_type
"""
# Unpack the tuple containing the wild_type's position
r, c = pos
l = [(r+i, c+j)
for i in [-1, 0, 1]
for j in [-1, 0, 1]
if (r+i,c+j) in list_of_all_pos_in_ca
if not (j == 0 and i == 0)]
return l
#### METHODS TO SPEED UP EXECUTION ####
binomial = np.random.binomial
shuffle = np.random.shuffle
random_choice = random.choice
#@jit(int64(), nopython=True)
def divide_q(prob):
DIVISION_PROB = prob #1 / 24.0
verdict = binomial(1, DIVISION_PROB)
return verdict
@jit(int64(), nopython=True)
def die_q():
DEATH_PROB = 0.01 #1 / 100.0
verdict = binomial(1, DEATH_PROB)
return verdict
#### CREATE WILD_TYPE CLASSES ####
class Mutant(object):
def __init__(self, pos, dictionary_of_neighbor_pos_lists):
self.pos = pos
self.divisions_remaining = 10
self.neighbor_pos_list = dictionary_of_neighbor_pos_lists[self.pos]
self.PLOT_ID = 2
def locate_empty_neighbor_position(self, agent_dictionary):
"""
Search for empty positions in Moore neighborhood. If there is more thant one free position,
randomly select one and return it
:param agent_dictionary: dictionary of agents, key=position, value = wild_type; dict
:return: Randomly selected empty position, or None if no empty positions
"""
empty_neighbor_pos_list = [pos for pos in self.neighbor_pos_list if pos not in agent_dictionary]
if empty_neighbor_pos_list:
empty_pos = random_choice(empty_neighbor_pos_list)
return empty_pos
else:
return None
def act(self, agent_dictionary, dictionary_of_neighbor_pos_lists):
"""
Wild_Type carries out its actions, which are division and death. Wild_Type will divide if it is lucky and
there is an empty position in its neighborhood. Wild_Type dies either spontaneously or if it exceeds its
maximum number of divisions.
:param agent_dictionary: dictionary of agents, key=position, value = wild_type; dict
:return: None
"""
#### WILD_TYPE TRIES TO DIVIDE ####
divide = divide_q(1/12)
if divide == 1:
empty_pos = self.locate_empty_neighbor_position(agent_dictionary)
if empty_pos is not None:
#### CREATE NEW DAUGHTER WILD_TYPE AND IT TO THE WILD_TYPE DICTIONARY ####
daughter_mutant = Mutant(empty_pos, dictionary_of_neighbor_pos_lists)
agent_dictionary[empty_pos] = daughter_mutant
self.divisions_remaining -= 1
#### DETERMINE IF WILD_TYPE WILL DIE ####
spontaneous_death = die_q()
if self.divisions_remaining <= 0 or spontaneous_death == 1:
del agent_dictionary[self.pos]
class Wild_Type(object):
def __init__(self, pos, dictionary_of_neighbor_pos_lists):
self.pos = pos
self.divisions_remaining = 10
self.neighbor_pos_list = dictionary_of_neighbor_pos_lists[self.pos]
self.PLOT_ID = 1
def locate_empty_neighbor_position(self, agent_dictionary):
"""
Search for empty positions in Moore neighborhood. If there is more thant one free position,
randomly select one and return it
:param agent_dictionary: dictionary of agents, key=position, value = wild_type; dict
:return: Randomly selected empty position, or None if no empty positions
"""
empty_neighbor_pos_list = [pos for pos in self.neighbor_pos_list if pos not in agent_dictionary]
if empty_neighbor_pos_list:
empty_pos = random_choice(empty_neighbor_pos_list)
return empty_pos
else:
return None
def act(self, agent_dictionary, dictionary_of_neighbor_pos_lists):
"""
Wild_Type carries out its actions, which are division and death. Wild_Type will divide if it is lucky and
there is an empty position in its neighborhood. Wild_Type dies either spontaneously or if it exceeds its
maximum number of divisions.
:param agent_dictionary: dictionary of agents, key=position, value = wild_type; dict
:return: None
"""
#### WILD_TYPE TRIES TO DIVIDE ####
divide = divide_q(1/24)
if divide == 1:
empty_pos = self.locate_empty_neighbor_position(agent_dictionary)
if empty_pos is not None:
#### CREATE NEW DAUGHTER WILD_TYPE AND IT TO THE WILD_TYPE DICTIONARY ####
daughter_wild_type = Wild_Type(empty_pos, dictionary_of_neighbor_pos_lists)
agent_dictionary[empty_pos] = daughter_wild_type
self.divisions_remaining -= 1
#### DETERMINE IF WILD_TYPE WILL DIE ####
spontaneous_death = die_q()
if self.divisions_remaining <= 0 or spontaneous_death == 1:
del agent_dictionary[self.pos]
if __name__ == "__main__":
import time
start = time.time()
GRAPH = "complete"
MAX_REPS = 1000
NUM_NODES = 2
RADIUS = 10
LENGTH = 10*(NUM_NODES**2)
WIDTH = 3
N_ROW = 2*RADIUS + 2
N_COL = 4*RADIUS + LENGTH
DICT_NEIGHBOR_POS, NODE_POS = build_neighbor_pos_dictionary(N_ROW,N_COL,GRAPH,NUM_NODES,RADIUS,LENGTH,WIDTH)
DICT_NEIGHBOR_POS2, NODE_POS2 = build_neighbor_pos_dictionary(N_ROW+4*RADIUS,N_COL,GRAPH,NUM_NODES,RADIUS,LENGTH*1.5,WIDTH)
DICT_NEIGHBOR_POS3, NODE_POS3 = build_neighbor_pos_dictionary(N_ROW+8*RADIUS,N_COL,GRAPH,NUM_NODES,RADIUS,LENGTH*2,WIDTH)
DICT_NEIGHBOR_POS4, NODE_POS4 = build_neighbor_pos_dictionary(N_ROW+12*RADIUS,N_COL,GRAPH,NUM_NODES,RADIUS,LENGTH*2.5,WIDTH)
# cdict = {
# 'red': ((0.0, 0.0, 0.0),
# (1.0, 1.0, 1.0)),
# 'blue':((0.0, 0.0, 0.0),
# (1.0, 0.0, 0.0)),
# 'green':((0.0, 0.0, 1.0),
# (1.0, 1.0, 1.0))
# }
# cell_cmap = mcolors.LinearSegmentedColormap('my_colormap', cdict, 100)
center_r = NODE_POS[0][0][0] + (N_ROW - 1)//2
center_c = NODE_POS[0][0][1] + (N_COL - 1)//2
center_pos = (center_r,center_c)
center_r2 = NODE_POS2[0][0][0] + (N_ROW+4*RADIUS - 1)//2
center_c2 = NODE_POS2[0][0][1] + (N_COL - 1)//2
center_pos2 = (center_r2,center_c2)
center_r3 = NODE_POS3[0][0][0] + (N_ROW+8*RADIUS - 1)//2
center_c3 = NODE_POS3[0][0][1] + (N_COL - 1)//2
center_pos3 = (center_r3,center_c3)
center_r4 = NODE_POS4[0][0][0] + (N_ROW+12*RADIUS - 1)//2
center_c4 = NODE_POS4[0][0][1] + (N_COL - 1)//2
center_pos4 = (center_r4,center_c4)
initial_cell = Mutant(center_pos,DICT_NEIGHBOR_POS)
initial_cell2 = Mutant(center_pos2,DICT_NEIGHBOR_POS2)
initial_cell3 = Mutant(center_pos3,DICT_NEIGHBOR_POS3)
initial_cell4 = Mutant(center_pos4,DICT_NEIGHBOR_POS4)
cell_dict = {center_pos:initial_cell}
cell_dict2 = {center_pos2:initial_cell2}
cell_dict3 = {center_pos3:initial_cell3}
cell_dict4 = {center_pos4:initial_cell4}
for i in range(1,NUM_NODES):
center_r = NODE_POS[i][0][0] + (N_ROW - 1)//2
center_c = NODE_POS[i][0][1] + (N_COL - 1)//2
center_pos = (center_r,center_c)
center_r2 = NODE_POS2[i][0][0] + (N_ROW+4*RADIUS - 1)//2
center_c2 = NODE_POS2[i][0][1] + (N_COL - 1)//2
center_pos2 = (center_r2,center_c2)
center_r3 = NODE_POS3[i][0][0] + (N_ROW+8*RADIUS - 1)//2
center_c3 = NODE_POS3[i][0][1] + (N_COL - 1)//2
center_pos3 = (center_r3,center_c3)
center_r4 = NODE_POS4[i][0][0] + (N_ROW+12*RADIUS - 1)//2
center_c4 = NODE_POS4[i][0][1] + (N_COL - 1)//2
center_pos4 = (center_r4,center_c4)
next_cell = Wild_Type(center_pos,DICT_NEIGHBOR_POS)
next_cell2 = Wild_Type(center_pos2,DICT_NEIGHBOR_POS2)
next_cell3 = Wild_Type(center_pos3,DICT_NEIGHBOR_POS3)
next_cell4 = Wild_Type(center_pos4,DICT_NEIGHBOR_POS4)
cell_dict[center_pos] = next_cell
cell_dict2[center_pos2] = next_cell2
cell_dict3[center_pos3] = next_cell3
cell_dict4[center_pos4] = next_cell4
for rep in range(MAX_REPS):
if rep % 5 == 0:
visualization_matrix = np.zeros((N_ROW+12*RADIUS,N_COL + int(2.5*LENGTH)))
for cell in cell_dict.values():
visualization_matrix[cell.pos] = cell.PLOT_ID
for cell in cell_dict2.values():
visualization_matrix[cell.pos] = cell.PLOT_ID
for cell in cell_dict3.values():
visualization_matrix[cell.pos] = cell.PLOT_ID
for cell in cell_dict4.values():
visualization_matrix[cell.pos] = cell.PLOT_ID
plt.imshow(visualization_matrix,interpolation='none',cmap='seismic')
# if GRAPH == "star":
# img_name = "C:\\Users\\Jason\\Desktop\\Clinic Research\\CellAUtomAta\\Codes\\images_star_E._Coli\\" + str(rep).zfill(5) + '.jpg'
# elif GRAPH == "cycle":
# img_name = "C:\\Users\\Jason\\Desktop\\Clinic Research\\CellAUtomAta\\Codes\\images_cycle_E._Coli\\" + str(rep).zfill(5) + '.jpg'
# elif GRAPH == "complete":
# img_name = "C:\\Users\\Jason\\Desktop\\Clinic Research\\CellAUtomAta\\Codes\\images_complete_E._Coli\\" + str(rep).zfill(5) + '.jpg'
# plt.savefig(img_name)
plt.show()
plt.close()
cell_list = list(cell_dict.values())
cell_list2 = list(cell_dict2.values())
cell_list3 = list(cell_dict3.values())
cell_list4 = list(cell_dict4.values())
shuffle(cell_list)
shuffle(cell_list2)
shuffle(cell_list3)
shuffle(cell_list4)
for cell in cell_list:
cell.act(cell_dict,DICT_NEIGHBOR_POS)
for cell in cell_list2:
cell.act(cell_dict2,DICT_NEIGHBOR_POS2)
for cell in cell_list3:
cell.act(cell_dict3,DICT_NEIGHBOR_POS3)
for cell in cell_list4:
cell.act(cell_dict4,DICT_NEIGHBOR_POS4)
end = time.time()
total = end-start
print("total time", total) | [
"matplotlib"
] |
9ea49133a35711964b622ad66dc173f498104e7a | Python | AmilaWeerasinghe/Machine-learning-Labs | /Lab 02/Submisson/RandomWalk_E_15_385.py | UTF-8 | 1,316 | 3.734375 | 4 | [] | no_license | from random import randrange
import random
import numpy as np
import matplotlib.pyplot as plt
# starting position generated randomlly
start = randrange(30)
positions = [start]
# I have used numpy.random which is functions generate samples from the uniform distribution on [0, 1)
# By that uniform distrribution to distinguish equally between two events
# Probability to move up or down can be between any of these
# initalize an array with number betweeen 0 and 1 I choose 0.45 and 0.55 as the margin
margin = [0.45, 0.55]
# creating the random 500 points this is equal to number of walks
randomNum = np.random.random(500)
# now we have 500 randoms points generated
# boolean values assigned to distinguish betweeen two equal probablities +1 or -1
# if random number generated less than the margin go down
GoDown = randomNum < margin[0]
# if higher go up
GoUp = randomNum > margin[1]
# combine the two states (this contains true or false) weather to go up or down
UpDown = zip(GoDown, GoUp)
# depending on the True or False status add the randomly generated number into the positions array
for idownp, iupp in UpDown:
down = idownp
up = iupp
positions.append(positions[-1] - down + up)
# plotting down the positions array which is the graph of the random walk
plt.plot(positions)
plt.show()
| [
"matplotlib"
] |
44b69e9a48156a7cd8c2ea602f27d5d99c501774 | Python | Color4/2017_code_updata_to_use | /绘图代码/plot.py | UTF-8 | 299 | 3.375 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
x = np.arange(0,10,0.1)
y1 = 0.05*x**2
y2 = -1*y1
fig,ax1 = plt.subplots()
ax2 = ax1.twinx()
ax1.plot(x,y1,'g-')
ax1.set_xlabel('X data')
ax1.set_ylabel('Y1 data',color = 'g')
ax2.plot(x,y2,'b-')
ax2.set_ylabel('Y2 data',color = 'b')
plt.show()
| [
"matplotlib"
] |
c77143bbde5252dff55644e90764fc578ae33ddf | Python | MJZ-98/machine_learning | /normal_eq.py | UTF-8 | 811 | 2.828125 | 3 | [] | no_license | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
df = pd.read_csv("C:/Users/MJZ/Desktop/something/machine learn/dataset/housing.csv",sep='\s+',
header=None,names=['CRIM','ZN','INDUS','CHAS','NOX','RM','AGE','DIS','RAD','TAX','PTRATIO','B','LSTAT','MEDV'])
x = df[['CRIM']].values
y = df.MEDV.values*1000
fig,(ax1,ax2) = plt.subplots(2,1)
def addBias(X):
m = X.shape[0]
ones = np.ones((m, 1))
return np.hstack((ones,X))
def normal_eq(X,y):
X = addBias(X)
return np.linalg.inv(X.T@X)@X.T@y
T = normal_eq(x,y)
ax1.scatter(x,y)
ax1.plot(x,T[0]+T[1]*x)
ax1.set_title("normal_eq")
print(T,x,T*x)
norm_y = (y-y.mean())/(y.max()-y.min())
T = normal_eq(x,norm_y)
ax2.scatter(x,norm_y)
ax2.plot(x,T[0]+T[1]*x)
ax2.set_title("after normalization")
plt.show() | [
"matplotlib"
] |
9f5b20f962a64ced06161a0f20b64b6ce2128779 | Python | xiaobailong653/machine_learn | /ml_share/plot_3d_surface.py | UTF-8 | 495 | 2.515625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
from matplotlib.ticker import (
LinearLocator,
FormatStrFormatter)
import matplotlib.pyplot as plt
import numpy as np
fig = plt.figure()
ax = fig.gca(projection='3d')
X = np.arange(-5, 5, 0.25)
Y = np.arange(-5, 5, 0.25)
X, Y = np.meshgrid(X, Y)
Z = X**2 + Y**2
ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap=cm.coolwarm, linewidth=0, antialiased=False)
plt.show()
| [
"matplotlib"
] |
5734c11cb127330634d8721623c42bfc558de35e | Python | PACarrascoGomez/Proyectos | /Negociacion Bilateral/negociacion.py | UTF-8 | 18,360 | 2.84375 | 3 | [] | no_license | import random
import math
import xml.etree.ElementTree as ET
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
#########################################################
# Autor: Pascual Andres Carrasco Gomez
# Asignatura: Sistemas multiagente (SMA)
# Trabajo: Entorno de negociacion automatica bilateral
# Lenguaje: python2.7
#########################################################
# NOTA: Para que funcione el programa es necesario tener
# instalados los siguiente paquetes:
# apt-get install python-matplotlib
# apt-get install python-tk
# Cargamos el dominio
from dominio import *
# Cargamos las funcion de utilidad
from funciones_utilidad import *
# Cargamos la funcion de concesion
from estrategias_concesion import *
# Cargamos las funcion de aceptacion
from estrategias_aceptacion import *
#--------------------------------------------------------
# IMPORTAR DATOS DE FICHERO XML
#--------------------------------------------------------
def importar_xml(fichero):
datos = {}
tree = ET.parse(fichero)
root = tree.getroot()
datos['nombre_agente'] = root[0].text # Nombre_agente
datos['n_atributos'] = int(root[1].text) # Numero atributos
datos['t_oferta'] = int(root[2][0].text) # Tipo de oferta
datos['t_fu'] = int(root[3][0].text) # Tipo de funcion de utilidad
datos['t_concesion'] = int(root[4][0].text) # Tipo de concesion
datos['p_concesion'] = root[4][1].text.split(" ") # Parametros de concesion
datos['s_inicial'] = float(root[4][2].text) # Concesion (s) inicial
datos['t_aceptacion'] = float(root[5][0].text) # Criterio de aceptacion
datos['s_aceptacion'] = float(root[5][1].text) # Concesion (s) minimo dispuesto a aceptar
datos['tiempo'] = int(root[6].text) # Tiempo de negociacion
return datos
#--------------------------------------------------------
# ESTRATEGIAS DE GENERACION DE OFERTAS
#--------------------------------------------------------
# Algoritmo genetico para generar ofertas
# agente: 1 = agente1 ; 2 = agente2
# ite: Numero de iteraciones
# nip: Numero de individuos por poblacion
# size_v: Numero de elementos del vector v
# s_max: Valor de concesion actual maximo
# s_min: Valor de concesion actual minimo
# op: Opcion para seleccionar la oferta a devolver (1: max; 2: min; 3: random)
# op_fu: Funcion de utilidad del agente que invoca la funcion
# tipos_oferta: Tipos de ofertas definidos en el dominio
# rango_valores_oferta: Valores que puede tomar cada componente de la oferta definidos en el dominio
def gen_ofertas_genetico(agente,ite,nip,size_v,s_max,s_min,op,op_fu,tipos_oferta,rango_valores_oferta):
# Listas donde almacenamos las ofertas dentro del rango de s_actual
ofertas_validas = []
f_utilidad_ofertas_validas = []
# Almacenamos la oferta mas alta para el caso de no encontrar ofertas dentro del rango
mejor_oferta_encontrada = []
fu_mejor_oferta_encontrada = 0
# Generacion de la poblacion inicial
poblacion = []
for i in range(0,nip):
poblacion.append(oferta_random(tipos_oferta,rango_valores_oferta))
# Bucle (Generaciones)
for g in range(0,ite):
# SELECCION (Numero de padres = nip / 2)
n_padres = nip/2
f_utilidad_poblacion = []
f_utilidad_poblacion_ord = []
for i in range(0,nip):
f_u = funcion_utilidad(agente,op_fu,poblacion[i])
f_utilidad_poblacion.append(f_u)
f_utilidad_poblacion_ord.append(f_u)
f_utilidad_poblacion_ord.sort(reverse=True)
# Actualizamos la mejor oferta
if(f_utilidad_poblacion_ord[0] > fu_mejor_oferta_encontrada):
fu_mejor_oferta_encontrada = f_utilidad_poblacion_ord[0]
indice = f_utilidad_poblacion.index(fu_mejor_oferta_encontrada)
mejor_oferta_encontrada = poblacion[indice]
# Actualizamos las ofertas validas
i = 0
while(i < nip and f_utilidad_poblacion_ord[i] >= s_min):
if(f_utilidad_poblacion_ord[i] <= s_max):
f_utilidad_ofertas_validas.append(f_utilidad_poblacion_ord[i])
indice = f_utilidad_poblacion.index(f_utilidad_poblacion_ord[i])
ofertas_validas.append(poblacion[indice])
i += 1
padres = []
for i in range(0,n_padres):
f_u = f_utilidad_poblacion_ord[i]
indice = f_utilidad_poblacion.index(f_u)
padres.append(poblacion[indice])
# CRUCE (Cruce aleatorio por atributos)
hijos = []
for i in range(0,n_padres):
n_atributos_a_cambiar = random.randint(1,size_v)
indices_atributos_a_cambiar = []
for j in range(0,n_atributos_a_cambiar):
indices_atributos_a_cambiar.append(random.randint(0,size_v-1))
hijo = []
if(i == n_padres-1): # Cruce del ultimo con el primero
hijo = padres[i][:]
for j in range(0,n_atributos_a_cambiar):
aux_indice = indices_atributos_a_cambiar[j]
hijo[aux_indice] = padres[0][aux_indice]
else:
hijo = padres[i][:]
for j in range(0,n_atributos_a_cambiar):
aux_indice = indices_atributos_a_cambiar[j]
hijo[aux_indice] = padres[i+1][aux_indice]
hijos.append(hijo)
# MUTACION (Intercambio aleatorio de un elemento de la oferta)
for i in range(0,len(hijos)):
aux_indice = random.randint(0,size_v-1)
valor_aleatorio_oferta_indice = elemento_oferta_random(tipos_oferta,rango_valores_oferta,aux_indice)
hijos[i][aux_indice] = valor_aleatorio_oferta_indice
# REMPLAZO
indices_remplazo = []
e_max = nip-1
e_min = e_max-len(hijos)
j = 0
for i in range(e_max,e_min,-1):
f_u = f_utilidad_poblacion_ord[i]
indice = f_utilidad_poblacion.index(f_u)
poblacion[indice] = hijos[j]
j += 1
# Comprobamos si el algoritmo genetico a generado alguna oferta valida
if(len(ofertas_validas)>0):
# Seleccionamos una oferta segun la opcion escogida
f_utilidad_ofertas_validas_ord = f_utilidad_ofertas_validas[:]
# Mejor oferta
if(op == 1):
f_utilidad_ofertas_validas_ord.sort(reverse=True)
f_utilidad_oferta = f_utilidad_ofertas_validas_ord[0]
indice = f_utilidad_ofertas_validas.index(f_utilidad_oferta)
oferta = ofertas_validas[indice]
# Peor oferta
elif(op == 2):
f_utilidad_ofertas_validas_ord.sort()
f_utilidad_oferta = f_utilidad_ofertas_validas_ord[0]
indice = f_utilidad_ofertas_validas.index(f_utilidad_oferta)
oferta = ofertas_validas[indice]
# Oferta aleatoria
else:
n_random = random.randint(0,len(ofertas_validas)-1)
f_utilidad_oferta = f_utilidad_ofertas_validas_ord[n_random]
indice = f_utilidad_ofertas_validas.index(f_utilidad_oferta)
oferta = ofertas_validas[indice]
else: # Si no hay ofertas validas devolvemos una oferta cuya f_utilidad > s_max
# Si existe buscamos la oferta mas cercana a s_max
max_ofertas = []
fu_max_ofertas = []
fu_max_ofertas_ord = []
for aux_oferta in poblacion:
aux_fu_oferta = funcion_utilidad(agente,op_fu,aux_oferta)
if(aux_fu_oferta >= s_max):
max_ofertas.append(aux_oferta)
fu_max_ofertas.append(aux_fu_oferta)
fu_max_ofertas_ord.append(aux_fu_oferta)
if(len(fu_max_ofertas_ord) > 0):
fu_max_ofertas_ord.sort()
indice = fu_max_ofertas.index(fu_max_ofertas_ord[0])
f_utilidad_oferta = fu_max_ofertas[indice]
oferta = max_ofertas[indice]
# Si no hay ninguna oferta cuya f_utilidad > s_max devolvemos
# la mejor oferta encontrada en el genetico
else:
oferta = mejor_oferta_encontrada
f_utilidad_oferta = fu_mejor_oferta_encontrada
# Devolvemos la mejor oferta
return (oferta,f_utilidad_oferta)
# Generacion de ofertas aleatorias
def oferta_random(tipos,rango_valores):
oferta = []
for i in range(0,len(tipos)):
if(tipos[i] == "int"): # Cota superior
oferta.append(random.randint(1,rango_valores[i]))
elif(tipos[i] == "float"): # Cota superior
oferta.append(random.uniform(1.0,rango_valores[i]))
else: # Tipo list
e_l = len(rango_valores[i])
indice = random.randint(0,e_l-1)
oferta.append(rango_valores[i][indice])
return oferta
# Generacion de un elemento concreto de la oferta indicado por su indice
def elemento_oferta_random(tipos,rango_valores,i):
elemento_oferta = ""
if(tipos[i] == "int"): # Cota superior
elemento_oferta = random.randint(0,rango_valores[i])
elif(tipos[i] == "float"): # Cota superior
elemento_oferta = random.uniform(0.0,rango_valores[i])
else: # Tipo list
e_l = len(rango_valores[i])
indice = random.randint(0,e_l-1)
elemento_oferta = rango_valores[i][indice]
return elemento_oferta
#--------------------------------------------------------
# GRAFICA
#--------------------------------------------------------
fig, ax = plt.subplots()
# Leyendas de los ejes X e Y
tree = ET.parse("agente1.xml")
root = tree.getroot()
plt.xlabel(root[0].text)
tree = ET.parse("agente2.xml")
root = tree.getroot()
plt.ylabel(root[0].text)
# Rangos de valores de los ejes X e Y
ax.set_ylim(0, 1)
ax.set_xlim(0, 1)
# Mostrar rejilla
ax.grid()
# Listas para x e y
xdata_agente1, ydata_agente1 = [], []
xdata_agente2, ydata_agente2 = [], []
# Funcion que actualiza la animacion del plot
def run(data):
x, y, agente = data
if(agente == 1):
xdata_agente1.append(x)
ydata_agente1.append(y)
else:
xdata_agente2.append(x)
ydata_agente2.append(y)
ax.plot(xdata_agente1, ydata_agente1, 'r-')
ax.plot(xdata_agente2, ydata_agente2, 'b-')
#--------------------------------------------------------
# FUNCION QUE LANZA LA NEGOCIACION ENTRE 2 AGENTES
#--------------------------------------------------------
def negociacion():
# Configuracion agente2
xml_agente1 = "agente1.xml"
datos_agente1 = importar_xml(xml_agente1)
s_agente1 = datos_agente1['s_inicial']
agente1_ofertas_recibidas = []
agente1_ofertas_emitidas = []
# Configuracion agente2
xml_agente2 = "agente2.xml"
datos_agente2 = importar_xml(xml_agente2)
s_agente2 = datos_agente2['s_inicial']
agente2_ofertas_recibidas = []
agente2_ofertas_emitidas = []
# Variable para trabajar con tiempo
# NOTA: Trabajamos con segundos
t_inicial = datetime.now()
# Flag para finalizar la negociacion
fin_negociacion_agente1 = False
fin_negociacion_agente2 = False
# Flag para gestionar los turnos
turno=0
# Numero de ofertas generadas por ambas partes en total
n_ofertas = 0
# Fin de la negociacion deficina por un deadline o por un acuerdo
while(fin_negociacion_agente1 == False and fin_negociacion_agente2 == False):
if(turno == 0): # Inicio de la negociacion
# El agente 1 genera la oferta inicial para empezar la negociacion
(oferta,fu_o_a1) = gen_ofertas_genetico(1,1000,5,datos_agente1['n_atributos'],s_agente1+0.1,s_agente1,datos_agente1['t_oferta'],datos_agente1['t_fu'],dominio()[0],dominio()[1])
agente1_ofertas_emitidas.append(oferta)
turno = 2
n_ofertas += 1
print "############################################################################"
print "Sentido (oferta)\t\tf_utilidad_emisor\tf_utilidad_receptor"
print "############################################################################"
print "------------------------------------------------------------------------"
print datos_agente1['nombre_agente']," --> ",datos_agente2['nombre_agente'],"\t\t",fu_o_a1,"\t\t\t",funcion_utilidad(2,datos_agente2['t_fu'],oferta),"\t"
print "------------------------------------------------------------------------"
#-----------------------------------------
# Actualizamos los valores de la grafica
#-----------------------------------------
fu_oferta = funcion_utilidad(1,datos_agente1['t_fu'],oferta)
fu_oferta2 = funcion_utilidad(2,datos_agente2['t_fu'],oferta)
yield fu_oferta, fu_oferta2, 1
# Regateo: Intercambio de ofertas de Rubinstein
elif(turno == 1): # Agente 1
fu_oferta = funcion_utilidad(1,datos_agente1['t_fu'],oferta)
agente1_ofertas_recibidas.append(oferta)
s_agente1 = actualizar_concesion(1,datos_agente1['t_fu'],datos_agente1['t_concesion'],datos_agente1['tiempo'],agente1_ofertas_emitidas,agente1_ofertas_recibidas,datos_agente1['p_concesion'],t_inicial)
t_actual = datetime.now()
t = t_actual - t_inicial
t = t.seconds
lista_aceptar = aceptacion(1,datos_agente1['t_aceptacion'],fu_oferta,s_agente1,datos_agente1['s_aceptacion'],agente1_ofertas_emitidas,agente1_ofertas_recibidas)
if(t >= datos_agente1['tiempo']):
fin_negociacion_agente1 = True
print "Se ha agotado el tiempo del agente 1 (Deadline = ",t,"s)"
# Comporbacion aceptacion de la oferta
elif(len(lista_aceptar) > 0):
if(lista_aceptar[0] == True):
fin_negociacion_agente1 = True
print "----------------------------------"
print "El agente 1 acepta la oferta"
print "----------------------------------"
print "Criterio: ", lista_aceptar[1]
print "Oferta: ",oferta
print "Funcion utilidad: ",fu_oferta
print "Concesion: ",s_agente1
#-----------------------------------------
# Actualizamos los valores de la grafica
#-----------------------------------------
fu_oferta = funcion_utilidad(1,datos_agente1['t_fu'],oferta)
fu_oferta2 = funcion_utilidad(2,datos_agente2['t_fu'],oferta)
yield fu_oferta, fu_oferta2, 1
elif(lista_aceptar[0] == False and lista_aceptar[2] == ""):
fin_negociacion_agente1 = True
print "----------------------------------------"
print "El agente 1 no ha llegado a un acuerdo"
print "----------------------------------------"
print "Criterio: ", lista_aceptar[1]
elif(lista_aceptar[0] == False and lista_aceptar[2] != ""):
oferta = lista_aceptar[2]
agente1_ofertas_emitidas.append(oferta)
turno = 2
n_ofertas += 1
print datos_agente1['nombre_agente']," --> ",datos_agente2['nombre_agente'],"\t\t",fu_o_a1,"\t\t\t",funcion_utilidad(2,datos_agente2['t_fu'],oferta),"\t"
print "------------------------------------------------------------------------"
#-----------------------------------------
# Actualizamos los valores de la grafica
#-----------------------------------------
fu_oferta = funcion_utilidad(1,datos_agente1['t_fu'],oferta)
fu_oferta2 = funcion_utilidad(2,datos_agente2['t_fu'],oferta)
yield fu_oferta, fu_oferta2, 1
else:
(oferta,fu_o_a1) = gen_ofertas_genetico(1,1000,5,datos_agente1['n_atributos'],s_agente1+0.1,s_agente1,datos_agente1['t_oferta'],datos_agente1['t_fu'],dominio()[0],dominio()[1])
agente1_ofertas_emitidas.append(oferta)
turno = 2
n_ofertas += 1
print datos_agente1['nombre_agente']," --> ",datos_agente2['nombre_agente'],"\t\t",fu_o_a1,"\t\t\t",funcion_utilidad(2,datos_agente2['t_fu'],oferta),"\t"
print "------------------------------------------------------------------------"
#-----------------------------------------
# Actualizamos los valores de la grafica
#-----------------------------------------
fu_oferta = funcion_utilidad(1,datos_agente1['t_fu'],oferta)
fu_oferta2 = funcion_utilidad(2,datos_agente2['t_fu'],oferta)
yield fu_oferta, fu_oferta2, 1
#-----------------------------------------
else: # Agente 2
fu_oferta = funcion_utilidad(2,datos_agente2['t_fu'],oferta)
agente2_ofertas_recibidas.append(oferta)
s_agente2 = actualizar_concesion(2,datos_agente2['t_fu'],datos_agente2['t_concesion'],datos_agente2['tiempo'],agente2_ofertas_emitidas,agente2_ofertas_recibidas,datos_agente2['p_concesion'],t_inicial)
t_actual = datetime.now()
t = t_actual - t_inicial
t = t.seconds
lista_aceptar = aceptacion(2,datos_agente2['t_aceptacion'],fu_oferta,s_agente2,datos_agente2['s_aceptacion'],agente2_ofertas_emitidas,agente2_ofertas_recibidas)
if(t >= datos_agente2['tiempo']):
fin_negociacion_agente2 = True
print "Se ha agotado el tiempo del agente 2 (Deadline = ",t,"s)"
# Comporbacion aceptacion de la oferta
elif(len(lista_aceptar) > 0):
if(lista_aceptar[0] == True):
fin_negociacion_agente2 = True
print "----------------------------------"
print "El agente 2 acepta la oferta"
print "----------------------------------"
print "Criterio: ", lista_aceptar[1]
print "Oferta: ",oferta
print "Funcion utilidad: ",fu_oferta
print "Concesion: ",s_agente2
#-----------------------------------------
# Actualizamos los valores de la grafica
#-----------------------------------------
fu_oferta = funcion_utilidad(2,datos_agente2['t_fu'],oferta)
fu_oferta2 = funcion_utilidad(1,datos_agente1['t_fu'],oferta)
yield fu_oferta2, fu_oferta, 2
elif(lista_aceptar[0] == False and lista_aceptar[2] == ""):
fin_negociacion_agente2 = True
print "----------------------------------------"
print "El agente 2 no ha llegado a un acuerdo"
print "----------------------------------------"
print "Criterio: ", lista_aceptar[1]
elif(lista_aceptar[0] == False and lista_aceptar[2] != ""):
oferta = lista_aceptar[2]
agente2_ofertas_emitidas.append(oferta)
turno = 1
n_ofertas += 1
print datos_agente2['nombre_agente']," --> ",datos_agente1['nombre_agente'],"\t\t",fu_o_a2,"\t\t\t",funcion_utilidad(1,datos_agente1['t_fu'],oferta),"\t"
print "------------------------------------------------------------------------"
#-----------------------------------------
# Actualizamos los valores de la grafica
#-----------------------------------------
fu_oferta = funcion_utilidad(2,datos_agente2['t_fu'],oferta)
fu_oferta2 = funcion_utilidad(1,datos_agente1['t_fu'],oferta)
yield fu_oferta2, fu_oferta, 2
else:
(oferta,fu_o_a2) = gen_ofertas_genetico(2,1000,5,datos_agente2['n_atributos'],s_agente2+0.1,s_agente2,datos_agente2['t_oferta'],datos_agente2['t_fu'],dominio()[0],dominio()[1])
agente2_ofertas_emitidas.append(oferta)
turno = 1
n_ofertas += 1
print datos_agente2['nombre_agente']," --> ",datos_agente1['nombre_agente'],"\t\t",fu_o_a2,"\t\t\t",funcion_utilidad(1,datos_agente1['t_fu'],oferta),"\t"
print "------------------------------------------------------------------------"
#-----------------------------------------
# Actualizamos los valores de la grafica
#-----------------------------------------
fu_oferta = funcion_utilidad(2,datos_agente2['t_fu'],oferta)
fu_oferta2 = funcion_utilidad(1,datos_agente1['t_fu'],oferta)
yield fu_oferta2, fu_oferta, 2
#-----------------------------------------
print "Numero de ofertas generadas: ",n_ofertas
# Animacion
ani = animation.FuncAnimation(fig, run, frames=negociacion, init_func=negociacion, blit=False, interval=10, repeat=False)
# # Ploteamos
plt.show()
| [
"matplotlib"
] |
ce756705a21c98d5d6406ce3025aaaace7459dc4 | Python | nastazya/Dynamic-headers-and-basic-EDA | /analyse.py | UTF-8 | 10,235 | 2.8125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
import os
import numpy as np
import pandas as pd
import argparse
import csv
import matplotlib.pyplot as plt
import string
import math
#import wx
def parser_assign():
'''Setting up parser for the file name and header file name '''
parser = argparse.ArgumentParser()
parser.add_argument("file_name") # name of the file specified in Dockerfile
parser.add_argument("-d", "--header_name", default="no_file", help="name of a headers file") #Optional header file name
args = parser.parse_args()
f_name = args.file_name
if args.header_name:
h_name = args.header_name
return f_name, h_name
def read_data(file,h_file):
'''Copying data from file to Data Frame'''
if file == 'wdbc.data': # if this is breast cancer dataset
names = ['ID', 'Diagnosis', 'radius_m', 'texture_m', 'perimeter_m', 'area_m', 'smothness_m', 'compactness_m', 'concavity_m', 'concave_points_m', 'symmetry_m', 'fractal_dim_m', 'radius_s', 'texture_s', 'perimeter_s', 'area_s', 'smothness_s', 'compactness_s', 'concavity_s', 'concave_points_s', 'symmetry_s', 'fractal_dim_s', 'radius_w', 'texture_w', 'perimeter_w', 'area_w', 'smothness_w', 'compactness_w', 'concavity_w', 'concave_points_w', 'symmetry_w', 'fractal_dim_w']
data = pd.read_csv(file, sep=',', names=names)
data.columns = names # assigning feature names to the names of the columns
data.index = data['ID'] # assigning ID column to the names of the rows
del data['ID'] # removing ID column
#data = data.iloc[:10,:5] # reducing data for testing putposes
elif check_header(file): # if data has header
print("\n Dataset has it's header \n")
data = pd.read_csv(file, sep='\s+|,', engine='python')
elif os.path.isfile(h_file): # if header file was provided
print("\n Dataset header will be generated from it's provided header file \n")
#filename='testcsv.csv'
# Read column headers (to be variable naames)
with open(h_file) as f:
firstline = f.readline() # Read first line
firstline = firstline.replace("\n","") # Remove new line characters
firstline = firstline.replace(","," ") # Remove commas
firstline = firstline.replace(" "," ") # Remove spaces
header = list(firstline.split(' ')) # Split string to a list
data = pd.read_csv(file, sep='\s+|,', header=None, engine='python')
assert len(data.columns) == len(header), 'Number of columns is not equal to number of column names in header file.'
data.columns = header
else: # if there is no header file we generate column names like A, B..., AA, BB...
print("\n Dataset doesn't have nether header nor header file. It will be generated automatically \n")
data = pd.read_csv(file, sep='\s+|,', header=None, engine='python')
s = list(string.ascii_uppercase)
col_number = len(data.columns)
print(col_number)
header = s
if col_number > len(s): # if number of columns is greater then 26
if col_number % 26 != 0:
n = (col_number // 26) + 1
else: n = (col_number // 26)
print(n)
for i in range(2, n+1):
for j in range(len(s)):
header += [s[j]*i]
#print('auto-header: ',header)
#print(header[:len(data.columns)])
data.columns = header[:len(data.columns)]
return data
def check_header(file):
'''Checking whether the data file contains header'''
header_flag = csv.Sniffer().has_header(open(file).read(1024))
print('result', header_flag)
return header_flag
def find_mean_std(P):
'''Calculating mean and std for each of 30 features'''
ave_feature = np.mean(P)
std_feature = np.std(P)
print('\n ave of each measurment:\n', ave_feature)
print('\n std of each measurment:\n', std_feature)
def plot_histograms(df, columns, folder, name):
'''Histogram all in one figure'''
#app = wx.App(False)
#width, height = wx.GetDisplaySize() # Getting screen dimentions
#plt.switch_backend('wxAgg') # In order to maximize the plot later by using plt.get_current_fig_manager()
l = len(columns)
n_cols = math.ceil(math.sqrt(l)) #Calculating scaling for any number of features
n_rows = math.ceil(l / n_cols)
#fig=plt.figure(figsize=(width/100., height/100.), dpi=100)
fig=plt.figure(figsize=(11, 6), dpi=100)
for i, col_name in enumerate(columns):
ax=fig.add_subplot(n_rows,n_cols,i+1)
df[col_name].hist(bins=10,ax=ax)
ax.set_title(col_name)
#ax.set_xlabel('value')
#ax.set_ylabel('number')
fig.tight_layout()
plt.savefig("./{0}/all_hist_{1}.png".format(folder,name), bbox_inches='tight')
#mng = plt.get_current_fig_manager()
#mng.frame.Maximize(True)
plt.show()
def plot_hist(features, name, folder):
'''Histogram for each feature'''
fig = plt.figure()
plt.hist(features)
plt.xlabel('value')
plt.ylabel('number')
plt.savefig("./{0}/{1}.png".format(folder,name), bbox_inches='tight')
plt.close('all')
def plot_histograms_grouped(dff, columns, gr_feature, folder, name):
'''Histogram: all features in one figure grouped by one element'''
#app = wx.App(False)
#width, height = wx.GetDisplaySize() # Getting screen dimentions
#plt.switch_backend('wxAgg') # In order to maximize the plot later by using plt.get_current_fig_manager()
df = dff # Creating a copy of data to be able to manipulate it without changing the data
l = len(columns)
n_cols = math.ceil(math.sqrt(l)) # Calculating scaling for any number of features
n_rows = math.ceil(l / n_cols)
#fig=plt.figure(figsize=(width/100., height/100.), dpi=100)
fig=plt.figure(figsize=(11, 6), dpi=100)
df.index = np.arange(0,len(df)) # Setting indexes to integers (only needed if we use reset_index later)
idx = 0
for i, col_name in enumerate(columns): # Going through all the features
idx = idx+1
if col_name != gr_feature: # Avoiding a histogram of the grouping element
ax=fig.add_subplot(n_rows,n_cols,idx)
ax.set_title(col_name)
#grouped = df.reset_index().pivot('index',gr_feature,col_name) # This grouping is useful when we want to build histograms for each grouped item in the same time in different subplots. Here no need as I do it inside the for loop for each one on the same plot
grouped = df.pivot(columns='Diagnosis', values=col_name)
for j, gr_feature_name in enumerate(grouped.columns): # Going through the values of grouping feature (here malignant and benign)
grouped[gr_feature_name].hist(alpha=0.5, label=gr_feature_name)
plt.legend(loc='upper right')
else: idx = idx-1
fig.tight_layout()
plt.savefig("./{0}/all_hist_grouped_{1}.png".format(folder,name), bbox_inches='tight')
#mng = plt.get_current_fig_manager()
#mng.frame.Maximize(True)
plt.show()
def plot_scatter(feature1, feature2, name1, name2, folder):
'''Scatter for each pair of features'''
fig = plt.figure()
plt.xlabel(name1)
plt.ylabel(name2)
plt.scatter(feature1, feature2)
plt.savefig(("./{0}/{1}-{2}.png".format(folder, name1, name2)), bbox_inches='tight')
plt.close('all')
def plot_corr(data_frame, size, folder, file_n):
''' Plotting correlations'''
fig, ax = plt.subplots(figsize=(size, size))
ax.matshow(data_frame)
plt.xticks(range(len(data_frame.columns)), data_frame.columns)
plt.yticks(range(len(data_frame.columns)), data_frame.columns)
plt.savefig(("./{0}/{1}.png".format(folder,file_n)), bbox_inches='tight')
plt.close('all')
#----------------------------------------------------------------------------------------------------------
#----------------------------------------------------------------------------------------------------------
# Assigning file names to local variables
data_file, header_file = parser_assign()
assert os.path.isfile(data_file), '\n Not valid file!!!'
# Reading data from file to Data Frame
data = read_data(data_file, header_file)
print(data)
# Calculating summary statistics
find_mean_std(data)
# Plotting histograms
if not os.path.exists('hist'):
os.makedirs('hist')
if data_file == 'wdbc.data':
print('\n Plotting all histograms into one figure') #Plotting one histogram for all the features
plot_histograms(data.iloc[:,1:11], data.iloc[:,1:11].columns, 'hist', data_file)
print('\n Plotting all histograms into one figure grouped by diagnosis')#Plotting one histogram for all the features grouped by diagnosis
plot_histograms_grouped(data.iloc[:,:11], data.iloc[:,:11].columns, 'Diagnosis', 'hist', data_file)
for col_name in data.columns: #Plotting a histogram for each feature
if col_name != 'Diagnosis':
print('\n Plotting histogramme for ', col_name, ' into /hist/')
plot_hist(data[col_name], col_name, 'hist')
else:
print('\n Plotting all histograms into one figure') #Plotting one histogram for all the features
plot_histograms(data, data.columns, 'hist', data_file)
for col_name in data.columns: #Plotting a histogram for each feature
print('\n Plotting histogramme for ', col_name, ' into /hist/')
plot_hist(data[col_name], col_name, 'hist')
# Plotting scatter
if not os.path.exists('scatter'):
os.makedirs('scatter')
if data_file == 'wdbc.data': # Build the scatter only for mean of each feature (10 first columns out of 30)
for i in range(1, 11):
j = 1
for j in range((i+j),11):
col_name1 = data.iloc[:,i].name
col_name2 = data.iloc[:,j].name
print('\n Plotting scatter for ', col_name1, col_name2, ' into /scatter/')
plot_scatter(data[col_name1], data[col_name2], col_name1, col_name2, 'scatter')
else:
for i in range(len(data.iloc[0])):
j = 1
for j in range((i+j),len(data.iloc[0])):
col_name1 = data.iloc[:,i].name
col_name2 = data.iloc[:,j].name
print('\n Plotting scatter for ', col_name1, col_name2, ' into /scatter/')
plot_scatter(data[col_name1], data[col_name2], col_name1, col_name2, 'scatter')
# Plotting correlations heatmap
if data_file == 'wdbc.data':
print('\n Plotting correlation hitmap into /corr/ ')
if not os.path.exists('corr'):
os.makedirs('corr')
data_features =data.iloc[:,1:11]
plot_corr(data_features.corr(), 10, 'corr', data_file) # Calculating correlation of 10 features and send them to plot
else:
print('\n Plotting correlation hitmap into /corr/ ')
if not os.path.exists('corr'):
os.makedirs('corr')
plot_corr(data.corr(), 10, 'corr', data_file) # Calculating correlation and send them to plot
| [
"matplotlib"
] |
6989663c13ccd17a221fbef082dff592dd600c47 | Python | doloinn/tde-analysis | /output/getdfs.py | UTF-8 | 4,411 | 2.890625 | 3 | [] | no_license | # David Lynn
# Functions to get galaxy and TDE data
# Standard imports
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pickle
# Tweaked Pandas' scatter_matrix to use 2D histograms
import my_scatter_matrix
def add_mags(m1, m2):
return -2.5 * np.log10(10**(-m1/2.5) + 10**(-m2/2.5))
def get_galaxy_dfs():
# Load input galaxy data and parsed output
galaxy_data = pickle.load(open('galaxies.pickle', 'rb'))
galaxy_sim = pd.read_csv('galaxy_parsed.csv', index_col=0)
# Non-detections look like this, replace with NaN
galaxy_sim.replace(-1.0000000150474662e+30, np.nan, inplace=True)
# Separate into ellipticals and spirals
gal_data = [{}, {}]
for key, val in galaxy_data.items():
if val['galaxy type'] == 'elliptical':
gal_data[0][key] = val
elif val['galaxy type'] == 'spiral':
gal_data[1][key] = val
# Drop these fields from results as they're not very useful
res_drop = ['multiple_matches', 'pix_sub-pix_pos', 'offset', 'ac_offset', 'al_offset', 'theta', 'ra', 'dec']
# List wit elliptical dataframe and spiral dataframe
dfs = []
for k in range(2):
# Put stuff into dictionary to convert to dataframe later
simulated_data = {}
# Loop over inputs, make temporary dictionary with data
for i, j in gal_data[k].items():
temp_gal_dic = gal_data[k][i]['data']
temp_gal_dic_in = {'in %s' % key: temp_gal_dic[key] for key in temp_gal_dic.keys()}
# Convert results to dictionary
temp_res_dic = galaxy_sim.loc[i].to_dict()
# Drop useless fields
for key in res_drop:
temp_res_dic.pop(key)
temp_res_dic_out = {'out %s' % key: temp_res_dic[key] for key in temp_res_dic.keys()}
# Concatenate dictionaries
simulated_data[i] = {**temp_gal_dic_in, **temp_res_dic_out}
# Add data to list
dfs.append(pd.DataFrame.from_dict(simulated_data, orient='index'))
# Drop "out" for source mag as there is a value for all sources
dfs[k].rename(columns={'out source_g_mag': 'source_g_mag'}, inplace=True)
# Break into elliptical and spiral dataframes
eldf = dfs[0].loc[:, (dfs[0] != dfs[0].iloc[0]).any()]
spdf = dfs[1].loc[:, (dfs[1] != dfs[1].iloc[0]).any()]
return eldf, spdf
def get_tde_df():
# Load input data and parsed output
tde_data = pickle.load(open('tdes.pickle', 'rb'))
tde_sim = pd.read_csv('tde_parsed.csv', index_col=0)
# Non-detections, replace with NaNs
tde_sim.replace(-1.0000000150474662e+30, np.nan, inplace=True)
# Separate into galaxies and tdes
tde_gal_data = [{}, {}]
for key, val in tde_data.items():
tde_gal_data[key % 2][key - (key % 2)] = val
# Drop useless fields from results
res_drop = ['multiple_matches', 'pix_sub-pix_pos', 'offset', 'ac_offset', 'al_offset', 'theta', 'ra', 'dec']
gal_drop = ['ra', 'dec']
# Combine input and output, galaxies and TDEs, make dictionary to
# convert to dataframe later
simulated_data = {}
for i, j in tde_sim.iloc[::2].iterrows():
# Galaxy input dictionary
temp_gal_dic = tde_gal_data[0][i]['data']
for key in gal_drop:
temp_gal_dic.pop(key)
temp_gal_dic_in = {'in %s' % key: temp_gal_dic[key] for key in temp_gal_dic.keys()}
# TDE input dictionary
temp_tde_dic = tde_gal_data[1][i]['data']
temp_tde_dic_in = {'in %s' % key: temp_tde_dic[key] for key in temp_tde_dic.keys()}
# Results dictionary
temp_res_dic = j.to_dict()
for key in res_drop:
temp_res_dic.pop(key)
temp_res_dic_out = {'out %s' % key: temp_res_dic[key] for key in temp_res_dic.keys()}
# Concatenate dictionaries
simulated_data[i] = {**temp_gal_dic_in, **temp_tde_dic_in, **temp_res_dic_out, 'tde source_g_mag': tde_sim.get_value(i + 1, 'source_g_mag')}
# Convert dictionary to dataframe
tdedf = pd.DataFrame.from_dict(simulated_data, orient='index')
# Drop "out" for source mag as there is a value for all sources
tdedf.rename(columns={'out source_g_mag': 'galaxy source_g_mag'}, inplace=True)
tdedf['total source_g_mag'] = tdedf.apply(lambda row: add_mags(row['tde source_g_mag'], row['galaxy source_g_mag']), axis=1)
return tdedf
| [
"matplotlib"
] |
69452055f98c2a364b119b53c855cd133c7b9744 | Python | kevin-albert/weather-predictor | /show.py | UTF-8 | 1,152 | 2.953125 | 3 | [
"MIT"
] | permissive | """ show stuff """
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from data_source import load_data
plt.xkcd()
def animate(inputs):
# hack together an empty frame of data and set the min/max on the image
empty_data = np.zeros((242, 242))
empty_data = np.ma.masked_where(True, empty_data)
some_data = np.ma.masked_outside(inputs, -75, 75)
vmin = some_data.min()
vmax = some_data.max()
# empty_data = np.ma.masked_outside(empty_data, -75, 75)
print(vmin,vmax)
# create the plot
fig = plt.figure()
im = plt.imshow(empty_data, vmin=vmin, vmax=vmax, origin='lower', animated=True)
fig.colorbar(im)
def init():
im.set_array(empty_data)
return im,
def frame(data):
im.set_array(np.ma.masked_outside(data, -75, 75))
return im,
ani = FuncAnimation(fig, frame, init_func=init, frames=inputs, interval=50,
repeat=True, blit=True)
plt.show()
print('Loading input data')
train_inputs, test_inputs = load_data('data', test_rate=0)
# print(np.zeros((241, 241)))
animate(train_inputs)
| [
"matplotlib"
] |
064f7e4e71f8ded1b2af9d265bf302f640690bbb | Python | a1phr3d/potential-fiesta | /insight/apps/New folder/individual.py | UTF-8 | 6,781 | 2.828125 | 3 | [] | no_license | #! python3
import dash, insight
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
from dash.dependencies import Output, Input
from app import app
import plotly.graph_objects as go
from plotly.subplots import make_subplots
stockdf = pd.read_csv("C:\\Users\\alfre\\trade\\apps\\stockScreen.csv")
symbolList = insight.stockList("stockSymbols.txt")
symbolList.sort()
periodDict = {'3 Months': 90,'6 Months': 180,'1 Year': 365,'3 Years': 1095,'5 Years':18250}
#print(stockdf.to_string())
headerLayout = html.Div(children=[
html.H1(children="Individual Stock", className= 'header-content'),
html.P(children="Screen for Trading Opportunities!", className='header-description')],className='header')
stockFilterLayout = html.Div(children=[
html.Div(children='Stock', className='menu-title'),
dcc.Dropdown(id= 'stock-filter',
options=[{"label": symbol, "value": symbol} for symbol in symbolList],
value=symbolList[0], clearable=False, className='filter-menus')], className='filter-menu2')
periodFilterLayout = html.Div(children=[
html.Div(children="Period", className= 'menu-title'),
dcc.Dropdown(id = "period",
options=[{'label':period, 'value':period} for period in ('3 Months', '6 Months', '1 Year', '3 Years', '5 Years')],
value='3 Months', className = 'filter-menus')], className='filter-menu2')
def makeLayouts(num_of_charts):
#outList = [do_something_with(item) for i in range(num_of_charts)]
outList = [html.Div(children=dcc.Graph(id=("stockchart" + str(i)), config={"displayModeBar": False}), className='card') for i in range(num_of_charts)]
return outList
menuLayout = html.Div(children=[stockFilterLayout, periodFilterLayout], className='menu')
graphLayout = html.Div(children=makeLayouts(4), className = 'wrapper')
# priceChartLayout = html.Div(children=dcc.Graph(id="price-chart", config={"displayModeBar": False}), className='card')
# volumeChartLayout = html.Div(children=dcc.Graph(id="volume-chart", config={"displayModeBar": False}), className='card')
# macdChartLayout = html.Div(children=dcc.Graph(id="macd-chart", config={"displayModeBar": False}), className='card')
# rsiChartLayout = html.Div(children=dcc.Graph(id="rsi-chart", config={"displayModeBar": False}), className='card')
# menuLayout = html.Div(children=[stockFilterLayout, periodFilterLayout], className='menu')
# graphLayout = html.Div(children=[priceChartLayout, volumeChartLayout, macdChartLayout, rsiChartLayout], className = 'wrapper')
layout = html.Div(children=[headerLayout, menuLayout, graphLayout])
@app.callback([
Output("stockchart0", "figure"),
Output("stockchart1", "figure"),
Output("stockchart2", "figure"),
Output("stockchart3", "figure"),
],
[
Input("stock-filter", "value"),
Input("period", "value"),],)
def update_charts(symbol, per):
period = periodDict[per]
filtered_data =stockdf[stockdf['stock'] == symbol]
filtered_data = filtered_data.iloc[-period:]
exp1 = filtered_data['Adj Close'].ewm(span=12, adjust=False).mean()
exp2 = filtered_data['Adj Close'].ewm(span=26, adjust=False).mean()
macd = exp1 - exp2
exp3 = macd.ewm(span=9, adjust=False).mean()
"""
exp3, AKA the 'Signal Line' is a nine-day EMA of the MACD.
When the signal line (red one) crosses the MACD (green) line:
* it is time to sell if the MACD (green) line is below
* it is time to buy if the MACD (green) line is above.
"""
rsi_fd = filtered_data.copy(deep=True)
delta = rsi_fd['Close'].diff()
up = delta.clip(lower=0)
down = -1*delta.clip(upper=0)
ema_up = up.ewm(com=13, adjust=False).mean()
ema_down = down.ewm(com=13, adjust=False).mean()
rs = ema_up/ema_down
rsi_fd['RSI'] = 100 - (100/(1 + rs))
# Skip first 14 **15** days to have real values
rsi_fd = rsi_fd.iloc[14:]
price_chart_figure = {
"data": [{"x": filtered_data["Date"],
"y": filtered_data["Low"],
"type": "lines", "hovertemplate": "$%{y:.2f}<extra></extra>",},],
"layout": {"title": {"text": str(symbol),"x": 0.05,"xanchor": "left"},
"xaxis": {"fixedrange": True},
"yaxis": {"tickprefix": "$", "fixedrange": True}, "colorway": ["#17B897"],},}
volume_chart_figure = {
"data": [{"x": filtered_data["Date"],
"y": filtered_data["Volume"],
"type": "lines",},],
"layout": {"title": {"text": str(symbol), "x": 0.05, "xanchor": "left"},
"xaxis": {"fixedrange": True},
"yaxis": {"fixedrange": True}, "colorway": ["#E12D39"],},}
macd_chart_figure = make_subplots(specs=[[{"secondary_y": True}]])
macd_chart_figure.add_trace(go.Scatter(x=filtered_data["Date"], y=macd, name= "MACD"), secondary_y=False,)
macd_chart_figure.add_trace(go.Scatter(x=filtered_data["Date"], y=exp3, name="Signal Line"), secondary_y=False,)
macd_chart_figure.add_trace(go.Scatter(x=filtered_data["Date"], y=filtered_data["Adj Close"], name=symbol), secondary_y=True,)
macd_chart_figure.update_layout(title_text=symbol + " -- MACD", template="plotly_white", showlegend=False,)
#macd_chart_figure.update_xaxes(title_text="xaxis title")
macd_chart_figure.update_yaxes(title_text="MACD", secondary_y=False)
macd_chart_figure.update_yaxes(title_text="Price", secondary_y=True)
macd_chart_figure.update_yaxes(showgrid=False)
rsi_chart_figure = make_subplots(specs=[[{"secondary_y": True}]])
rsi_chart_figure.add_trace(go.Scatter(x=rsi_fd["Date"], y=rsi_fd['RSI'], name="RSI"), secondary_y=False,)
rsi_chart_figure.add_trace(go.Scatter(x=rsi_fd["Date"], y=[30]*len(rsi_fd['RSI']), name='Underbought',line=dict(color='firebrick', width=1.5,dash='dash')), secondary_y=False,)
rsi_chart_figure.add_trace(go.Scatter(x=rsi_fd["Date"], y=[70]*len(rsi_fd['RSI']), name='Overbought',line=dict(color='firebrick', width=1.5,dash='dash')), secondary_y=False,)
rsi_chart_figure.update_layout(title_text=symbol + "-- RSI", template="plotly_white", showlegend=False,)
rsi_chart_figure.update_xaxes(showgrid=False)
rsi_chart_figure.update_yaxes(showgrid=False)
"""
An asset is usually considered overbought when the RSI is above 70% and undersold when it is below 30%.
RSI is usually calculated over 14 intervals (mostly days) and you will see it represented as RSI14
"""
return price_chart_figure, volume_chart_figure, macd_chart_figure, rsi_chart_figure
##if __name__ == "__main__":
## app.run_server(host = '127.0.0.1', debug=True) | [
"plotly"
] |
cab66a6eebdeeefa526409b4b7d4f3fd913a62fc | Python | mohite-abhi/social-network-analysis-with-python | /week-12-small-world-how-to-be-viral/k-cores.py | UTF-8 | 1,185 | 3.328125 | 3 | [] | no_license | import networkx as nx
import matplotlib.pyplot as plt
G=nx.Graph()
G.add_edges_from([
(1,2),
(3,11),
(4,5),
(5,6),
(5,7),
(5,8),
(5,9),
(5,10),
(10,11),
(10,13),
(11,13),
(12,14),
(12,15),
(13,14),
(13,15),
(13,16),
(13,17),
(14,15),
(14,16),
(15,16)
])
def check_existance(H,d):
f=0
for each in list(H.nodes()):
if H.degree[each] <= d:
f = 1
break
return f
def findNodesWithDegree(H, it):
set1=[]
for each in H.nodes():
if H.degree(each) <= it:
set1.append(each)
return set1
def findKCores(G):
H = G.copy()
it = 1
tmp=[]
buckets=[]
while 1:
flag = check_existance(H,it)
if flag == 0:
it += 1
buckets.append(tmp)
tmp = []
elif flag==1:
nodeSet=findNodesWithDegree(H, it)
for each in nodeSet:
H.remove_node(each)
tmp.append(each)
if H.number_of_nodes()==0:
buckets.append(tmp)
break
print(buckets)
findKCores(G)
nx.draw(G)
plt.show() | [
"matplotlib"
] |
31e2123ceebc7926e955ab9293e69bff456a0823 | Python | 3276908917/skyflux | /show_helix.py | UTF-8 | 5,797 | 2.5625 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import pickle
import skyflux as sf
import skyflux.deprecated.polSims as pol
def hauto_show(fname, pt_override=None):
"""
Load simulated visibilities from the file named
@fname
and visually interpret the results as a
delay-spectrum helix a la (Parsons, 2012).
@pt_override is a way to override the title with which
a simulation came. It is extremely bad form to use this,
but it can come in handy during lapses of diligence.
"""
fourierc, raw_vis, fs, ts, ptitle = \
build_fourier_candidates(fname)
if pt_override is not None:
ptitle = pt_override
print("Data from file re-organized.")
fouriered = transform_power(fourierc, fs, ts)
print("Fourier transforms computed.")
visual = collect_helix_points(fouriered, fs, ts)
print("Points collected.")
plt.title("88m, 200 MHz bandwidth")
plt.xlabel("Delays [ns]")
plt.ylabel("LST [hr]")
plot_3D(visual, ptitle)
return visual, fouriered, fs, ts, raw_vis
def build_fourier_candidates(fname):
sim_file = open(fname + ".pickle", "rb")
meta = pickle.load(sim_file)
ptitle = meta['title']
fs = meta['frequencies']
num_f = len(fs)
ts = meta['times']
num_t = len(ts)
sim = meta['picture']
# 0: I 1: Q 2: U 3: V
fourierc = [[], [], [], []]
raw_vis = []
for ti in range(num_t):
for parameter in fourierc:
parameter.append([])
raw_vis.append([])
for ni in range(num_f):
v = sim[ni][ti]
for p_idx in range(len(fourierc)):
fourierc[p_idx][ti].append(v[p_idx])
raw_vis[ti].append(v)
#norm = np.linalg.norm(sim[ni][ti]) same outcome
for parameter in fourierc:
parameter[ti] = np.array(parameter[ti])
raw_vis[ti] = np.array(raw_vis[ti])
for parameter in fourierc:
parameter = np.array(parameter)
fourierc = np.array(fourierc)
raw_vis = np.array(raw_vis)
return fourierc, raw_vis, fs, ts, ptitle
def transform_power(original, fs, ts):
num_f = len(fs)
num_t = len(ts)
import copy
fourier = copy.deepcopy(original)
window = pol.genWindow(num_f)
for ti in range(num_t):
"""
# option 6
for parameter in fourier:
parameter[ti] = \
np.fft.fftshift(np.fft.fft(parameter[ti])
"""
"""
# what I had been doing before 2/17/21
# aka option 5
for parameter in fourier:
parameter[ti] = np.fft.fft(parameter[ti])
"""
# fft with window: option 9
for parameter in fourier:
parameter[ti] = np.fft.fft(parameter[ti] * window)
"""
# ifft: option 7
for parameter in fourier:
parameter[ti] = np.fft.ifft(parameter[ti])
"""
"""
# ifft with window: option 8 [next 4 lines]
for parameter in fourier:
parameter[ti] = np.fft.ifft(parameter[ti] * window)
"""
return fourier
def collect_helix_points(fouriered, fs, ts):
num_t = len(ts)
num_f = len(fs)
visual = []
etas = pol.f2etas(fs)
for ti in range(num_t):
for ni in range(num_f):
dspecvec = np.array([
parameter[ti][ni] for parameter in fouriered
])
norm = np.linalg.norm(dspecvec)
visual.append(np.array((
etas[ni] * 1e9,
ts[ti] * 12 / np.pi,
np.log10(norm)
)))
return np.array(visual)
def plot_3D(visual, title, scaled=False):
"""
Primitive 3D plotter.
For use with the return value of either
static_wedge_vis
or
dynamic_wedge_vis
Disable @scaled if you are using values such as logarithms
"""
x = visual[:, 0]
y = visual[:, 1]
z = visual[:, 2]
colors = None
if (scaled):
scaled_z = (z - z.min()) / z.ptp()
colors = plt.cm.viridis(scaled_z)
else:
colors = z
plt.title(title)
print("Minimum:", z.min())
print("PTP:", z.ptp())
plt.scatter(x, y, marker='.', c=colors)
plt.colorbar()
plt.show()
### This is a really bad ad-hoc testing script.
### We want to scrap this ASAP
def micro_wedge(h1, f1, b1, h2, f2, b2, h3, f3, b3):
"""
The axes do not line up with Nunhokee et al.
Probably something wrong with your constants
or usage thereof.
"""
center_f1 = np.average(f1)
z1 = pol.fq2z(center_f1)
lambda1 = pol.C / center_f1
k_par1 = pol.k_parallel(h1[:, 0], z1)
k_orth1 = pol.k_perp(z1) / lambda1 * b1
center_f2 = np.average(f2)
z2 = pol.fq2z(center_f2)
lambda2 = pol.C / center_f2
k_par2 = pol.k_parallel(h2[:, 0], z2)
k_orth2 = pol.k_perp(z2) / lambda2 * b2
center_f3 = np.average(f3)
z3 = pol.fq2z(center_f3)
lambda3 = pol.C / center_f3
k_par3 = pol.k_parallel(h3[:, 0], z3)
k_orth3 = pol.k_perp(z3) / lambda3 * b3
y = np.concatenate((k_par1, k_par2, k_par3))
x = np.concatenate((
np.repeat(k_orth1, len(k_par1)),
np.repeat(k_orth2, len(k_par2)),
np.repeat(k_orth3, len(k_par3))
))
colors = np.concatenate((h1[:, 2], h2[:, 2], h3[:, 2]))
plt.title("Helix concatenation")
#print("Minimum:", z.min())
#print("PTP:", z.ptp())
plt.scatter(x, y, marker='.', c=colors)
plt.colorbar()
plt.show()
| [
"matplotlib"
] |
41616b6aa577fd06a6bfc5bcadfcab92c51f6b60 | Python | ljyw17/Wechat_chatRecord_dataMing | /paint.py | UTF-8 | 4,651 | 2.5625 | 3 | [] | no_license | #coding=utf-8
# from scipy.misc import imread
from wordcloud import WordCloud
from wordcloud import ImageColorGenerator
import matplotlib.pyplot as plt
from os import path
from collections import Counter
import networkx as nx
import matplotlib.pyplot as plt
import jieba, pandas as pd
from collections import Counter
import jieba.posseg as pseg
def draw_wordcloud(c):
# d = path.dirname(__file__) #当前文件文件夹所在目录
# color_mask = plt.imread(r"F:\download\a.jpeg") #读取背景图片,
cloud = WordCloud(
#设置字体,不指定就会出现乱码,文件名不支持中文
font_path = r"E:\tim\applications\records\code\simhei.ttf",
#font_path=path.join(d,'simsun.ttc'),
#设置背景色,默认为黑,可根据需要自定义为颜色
background_color='white',
#词云形状,
# mask=color_mask,
#允许最大词汇
max_words=300,
#最大号字体,如果不指定则为图像高度
max_font_size=80,
#画布宽度和高度,如果设置了msak则不会生效
width=600,
height = 400,
margin = 2,
#词语水平摆放的频率,默认为0.9.即竖直摆放的频率为0.1
prefer_horizontal = 0.8
# relative_scaling = 0.6,
# min_font_size = 10
).generate_from_frequencies(c)
plt.imshow(cloud)
plt.axis("off")
plt.show()
cloud.to_file(r"E:\tim\applications\records\code\word_cloud_H.png")
# plt.savefig(r"E:\tim\applications\records\code\word_cloud_E.png", format="png")
def get_words(txt):
seg_list = []
words = pseg.cut(txt)
for word, flag in words:
if flag in ("n", "nr", "ns", "nt", "nw", "nz"):
# n, "f", "s", "nr", "ns", "nt", "nw", "nz", "PER", "LOC", "ORG", "v"
# n nr ns nt nw nz
seg_list.append(word)
c = Counter() # 计数器
for x in seg_list:
if len(x)>1 and x not in ("\r\n"):
c[x] += 1 #个数加一
return c.most_common(305)
print()
# return " ".join(seg_list)
if __name__=="__main__":
xls = pd.read_excel(r'E:\tim\applications\records\xlsx\Chat_H.xlsx', header=0)
# sega = ""
list = []
for i in range(len(xls))[::35]:
list.append(str(xls["content"][i]))
# sega += str(xls["content"][i])
c = get_words("".join(list))
dict = {}
for i in c:
dict[i[0]] = i[1]
# txt = ""
# for i in lista:
# txt += i[0]
# txt += " "
draw_wordcloud(dict)
# 词云
print("--")
# sega = ""
# segb = ""
# for i in range(len(xls)):
# if xls["status"][i] == 0:
# sega += str(xls["content"][i])
# if xls["status"][i] == 1:
# segb += str(xls["content"][i])
# lista = get_words(sega)
# listb = get_words(segb)
# G = nx.Graph() # 创建空的网络图
# edge_list = []
# node_list = []
# node_color_list = []
#
# G.add_node("我")
# node_list.append("我")
# node_color_list.append('#ede85a')
# i = 0
# for j in lista[:60]:
# # if j[0] in (""):
# # continue
# if i < 15:
# node_color_list.append('#095dbe')
# elif i < 30:
# node_color_list.append('#5a9eed')
# elif i < 45:
# node_color_list.append('#7face1')
# else:
# node_color_list.append('#e1e8ef')
# G.add_node(j[0])
# node_list.append(j[0])
# G.add_edge("我", j[0])
# i += 1
#
# G.add_node("老师D")
# node_list.append("老师D")
# node_color_list.append('#e9586e')
# i = -1
# for j in listb[:60]:
# i += 1
#
# if j[0] in node_list:
# G.add_edge("老师D", j[0])
# continue
# # if j[0] in (""):
# # continue
# if i < 15:
# node_color_list.append('#095dbe')
# elif i < 30:
# node_color_list.append('#5a9eed')
# elif i < 45:
# node_color_list.append('#7face1')
# else:
# node_color_list.append('#e1e8ef')
# G.add_node(j[0])
# G.add_edge("老师D", j[0])
#
# pos = nx.fruchterman_reingold_layout(G)
#
# nx.draw_networkx_nodes(G, pos,node_size=280, node_color = node_color_list)
# nx.draw_networkx_edges(G, pos)
# nx.draw_networkx_labels(G, pos, font_size=6)
# # nx.draw(G, with_labels=True, font_weight='bold', node_color = node_color_list)
# plt.savefig("word_network_D.png",dpi=1800,bbox_inches = 'tight')
# plt.show()
#
# print("--") | [
"matplotlib"
] |
3a6bac7a2e755cc354eb30bdba9977c5ee404362 | Python | roboticSc/PCIII_2020 | /Blatt2_script_version.py | UTF-8 | 3,783 | 3.953125 | 4 | [] | no_license | # To add a new cell, type '# %%'
# To add a new markdown cell, type '# %% [markdown]'
# %% [markdown]
# # Übungsblatt 2
# --------
# ## **Aufgabe 5: Graphische Darstellung Plancksches Strahlungsgesetz**
# 1. Zeichnen Sie mit Python die spektrale Intensitätsverteilung des schwarzen Strahlers,
# also das Plancksche Strahlungsgesetz, für die Temperaturen 1000K, 1500K, 2000K und
# 2500K im Wellenlängenbereich bis 9 µm.
# 2. Zeichnen Sie auch das Rayleigh-Jeans-Gesetz für 2000K in das Diagramm.
#
# Das Plancksche Strahlungsgesetz lautet:
# $ \rho(\lambda) = \dfrac{8\pi hc}{\lambda^5}\dfrac{1}{e^{\frac{hc}{kt\lambda}}} $
# %%
import numpy as np
import matplotlib.pyplot as plt
import scipy.constants as const
# %%
def planckGesetzt(lda, T):
"""Berechnet das Planksche Strahlungsgesetzt aus:
Keyword arguments:
lda -- Wellenlänge lambda in Meter
T -- Temperatur in Kelvin
"""
return( (8 * np.pi * const.h * const.c) / (lda ** 5) * 1/(np.exp((const.h * const.c / (const.k * T * lda))) - 1))
# %%
# Wellenlängen bis 9µm
lda = np.arange(0,9.02e-6,0.01e-6)
Temp = (1000,1500,2000,2500)
#Plotten Planck
for T in Temp:
plt.plot(lda,planckGesetzt(lda,T), label= str (T) + "K")
#Labeling etc
plt.legend()
plt.title("Planksches Strahlungsgesetz")
plt.xlabel("Wellenlange $\lambda$ in m")
plt.ylabel("p($\lambda$)")
plt.show()
# %% [markdown]
# Rayleigh-Jeans-Gesetzt einfügen: $\dfrac{8\pi k T}{\lambda^4}$
# %%
def RayleighJeans(lda, T):
"""Berechnet das Raigleigh-Jeans Strahlungsgesetzt aus:
Keyword arguments:
lda -- Wellenlänge lambda in Meter
T -- Temperatur in Kelvin
"""
return( (8 * np.pi * const.k * T) / (lda ** 4))
# %%
# Wellenlängen bis 9µm
lda = np.arange(0,9.02e-6,0.01e-6)
Temp = (1000,1500,2000,2500)
#Plotten Planck
for T in Temp:
plt.plot(lda,planckGesetzt(lda,T), label= str (T) + "K")
#Plotten Rayleigh-JEans
plt.plot(lda,RayleighJeans(lda, 2000), label="R-J: 2000K")
#Label usw
plt.legend()
plt.title("Planksches Strahlungsgesetz")
plt.xlabel("Wellenlänge $\lambda$ in m")
plt.ylabel(" $p$ ($\lambda$)")
plt.ylim(0,18500)
plt.show()
#debug
RJ = RayleighJeans(lda,2000)
# %% [markdown]
# ## **Aufgabe 6: Vom Planckschen Strahlungsgesetz zum Wienschen Verschiebungsgesetz**
# Die obige Gleichung lässt sich umstellen zu $f (x) =g(x)$ mit den Funktionen
# $f(x) = \frac{x}{5}$ und $g(x)=1-e^{-x}$. Zeichnen Sie die beiden Funktionen $f$ und $g$ mit
# Python. Bestimmen Sie graphisch den $x$-Wert, für den sich die beiden Funktionen
# schneiden.
#
# %%
x = np.arange(0,10.1,0.0001)
plt.plot(x,x/5,label="f(x)")
plt.plot(x,1-np.exp(-x), label="g(x)")
plt.legend()
plt.xticks(np.arange(0,10,1))
plt.show()
# %% [markdown]
# Graphisch ermittelt ist der Schnittpunkt bei knapp unter 5.
# %% [markdown]
# ## **Aufgabe 9: Wärmekapazität**
# Die Einsteinsche Theorie für die Wärmekapazität führt zu der in der Vorlesung angegebenen
# Gleichung
#
# $C_{V,m} = 3R\left(\dfrac{\Theta_E}{T}\right)^2 \dfrac{e^{-\frac{\Theta_E}{T}}}{\left(1-e^{-\frac{\Theta_E}{T}}\right)^2} $
#
# Zeichnen Sie mit Python die molare Wärmekapazität in Abhängigkeit der Temperatur $T$
# für $ 0 < T < 700$ K. Zeichnen Sie ebenfalls die klassische Wärmekapazität $C_{V,m} = 3R$ in das
# Diagramm.
# %%
def EinsteinWärmeKap(ET, T):
""" Berechnet die Einstein Wärmekapazität
ET -- Einstein-Temperatur in K
T -- Temperatur in Kelvin
"""
return(3* const.R * (ET/T)**2 * (np.exp(-ET/T))/(1 - np.exp(-ET/T))**2)
#T Range bis 700k
T = np.arange(0,700,0.1)
plt.plot(T,EinsteinWärmeKap(341,T),label="Einstein")
plt.plot(T,np.full_like(T,3*const.R), label="klassisch")
plt.legend()
plt.xlabel("Temperatur T in K")
plt.ylabel("Molare Wärmekapazität C")
plt.show()
| [
"matplotlib"
] |
f4b1842a51b170b5891072eddf187e4e42baf9e7 | Python | growupboron/MOOCS | /udemy/Machine A-Z/Machine Learning A-Z Template Folder/Part 2 - Regression/Section 5 - Multiple Linear Regression/Multiple_Linear_Regression/mlreg1.py | UTF-8 | 9,847 | 3.734375 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Jun 22 21:00:06 2018
@author: Aman Arora
"""
#MULTIPLE LINEAR REGRESSION.
#Several independent variables.
#Prepare the dataset.
#We use the previous template for the job of preparing the dataset.
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('50_Startups.csv')
X = dataset.iloc[:, :-1].values
y = dataset.iloc[:, 4].values #The profit columns is fourth one based on counting is python.
# Encoding categorical data
# Encoding the Independent Variable
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_X = LabelEncoder()
X[:, 3] = labelencoder_X.fit_transform(X[:, 3]) #TO CHANGE THE TEXT INTO NUMBERS. 3rd columns.
onehotencoder = OneHotEncoder(categorical_features = [3]) #Index of column is 3.
X = onehotencoder.fit_transform(X).toarray()
# We don't need to encode the Dependent Variable.
#Avoiding the dummy variable trap.
X = X[:, 1:]
#I just removed the first column from X. By doing that I am taking all lines of X, but then by putting #'1:'. I want to take all columns of X starting from 1 to end. I dont't take the first column, to avoid #the dummy variable trap. Still python library automatically takes care of the dummy variable trap.
# Splitting the dataset into the Training set and Test set
#10 observations in test set and 40 in training set make a good split => 20% or 0.2 of the dataset is #test_size.
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# Feature Scaling, not necessary, the library will do for us.
"""from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train)"""
#The thing to understand here is that we are going to build a model to see if there's some linear #dependencies between all the independent variables and the dependent variable profit. The model should predict the profit based on the marketing spend, r n d, etc. variable.
#Our matrix of features is, since we know going to conatin the independent variables, is going to contain columns R&D, Administration, marketing and state spends, which is the matrix of features, or of independent variables, X. y here is going to be the last column, profit.
#In the matrix of features we will have to encode the column state, since it has categorical variables, various states. So we will be using the OneHotEncoder. It's done above, before splitting dataset into #training and test set.
#FITTING THE MULTIPLE LINEAR REGRESSION TO THR TRAINING SET.
from sklearn.linear_model import LinearRegression
#Coz we are still making lineaar regression, but with various variables.
regressor = LinearRegression()
#Fitting this object to the training set.
regressor.fit(X_train, y_train) #I fit the multiple linear regressor to the training set.
#We are now going to test the performance of the multiple linear regression model on the test set.
#Final round.
#If we see our dataset, we have 4 independent variables, and 1 dependent variable. If we wanted to add a #visual step to plot a graph, we'd need 5 dimensions. So, we proceed to predictions of test results.
#Creating the vector of predictions.
y_pred = regressor.predict(X_test)
#Now see and compare the y_test and y_pred test sets.
#What if among these various independent variables, we want to select those which have high, or which have #low impact on the dependent variable, called statistically significant or insignificant variables. The #goal now is to find a team of independent variables which are highly staistically significant.
#BACKWARD ELIMINATION.
#We will need a library : stats models formula API library, sm (stats models)
import statsmodels.formula.api as sm
#We just need to add a column of 1's in the matrix of independent variables, X. Why? Becuase see, multiple linear regression equation, in it, there's a constant b0, which is as such not associated with any independent variable, but we can clearly associate it with x0 = 1. (b0.x0 = b0).
#The library we've included does not include this b0 constant. So, we'll somehow need to add it, because #the matrix of features X only includes the independent variables. There is no x0 = 1 anywhere in the #matrix of features and its most lbraries, its definitely included. But this is not with statsmodel #library. So we've to add the column of 1's, otherwise, the library will think the equation is #b1x1+b2x2+____bnxn.
#Doing so using append function from numpy library.
"""
X = np.append(X, values = np.ones((50, 1)).astype(int), axis = 1)
"""
#If we inspect the array function, first parameter is arr, or our matrix of features, X. The next parameter is values, that in this case we want 1. So, we need to add an array, as written, column of 1's. #The array is a matrix of 50 lines and 1 column, with only 1 value inside. It's very easy with a function of numpy called ones() which creates an array of only 1's value inside. We need to specify the number of lines and column. The first arguement of the ones() is shape, which lets us set-up the array. We input (50, 1) to create array of 50 rows and 1 column, as the first argument.
#To prevent the datatype or 'dtype' error, we just convert the matrix of features to integer, using astype() function.
#The third argument of the append function is axis, if we want to add a column, axis = 1, and if we want a row, axis = 0.
#Now, this will add the column on the end. But, what we want is to add the column in the beginning, to maintain systematicity. We just need to invert the procedure, that is, add the matrix of features X to the single column we have made! I comment out the above line and rewrite it below.
X = np.append(arr = np.ones((50, 1)).astype(int), values = X, axis = 1)
#STARTING WITH BACKWARD ELIMINATION.
#We've prepared the backward elimination algorithm.
#First thing to do is to create a new matrix of features, which is going to be optimal matrix of features, #X_opt.
X_opt = X[:, [0, 1, 2, 3, 4, 5]]
#This is going to be in the end, containing only the independent variables that are statistically significant. As we know, BE consists of including all independent variables, then we go on excluding independent variables. We're going to write specifically all indexes of columns in X. Coz we are going to remove various indexes. The indexes are included as [0, 1, 2,3 , 4, 5].
#Now we will select the significance level. We choose it as 5%.
#Step2 : Fit the full model with all possible predictors, that we just did above. But we haven't fitted it yet. Doing that now.
#We use a new library, statsmodel library, and we create a new regressor which is the object of new class, which is called, OLS, ordinary least squares.
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
#endog is the dependent variable, y.
#exog is the array with no. of observations and no. of regressions, X_opt or matrix of features, and its not included by default. Intercept needs to be added by the user, or me.
#Step3
#We've to look for the predictor which has the highest P value, that is the independent variable with highest P value. Read notebook. We use a function of statsmodel library summary() which returns a great table containing the matrix that helps make our model more robust like r^2, etc. and we'll get all the P values, we'll compare it to the significance level to decide whether to remove it from our model or not.
regressor_OLS.summary()
#We are interested in the P value, which is the probability, and when we run this line, we get a great table which shows all the important parameters needed to checkout for backward elimination. We'll see about R-squared and Adj. R-squared later, to make our matrix more robust. P value is the probability, lower the P value, more significant the independent variable is wrt dependent variable. x0 = 1, x1 and x2 the two dummy variables for state. Then x3 is for R&D, and x4, for min spend, and x5 for marketing spend.
#Now, we clearly see, One with highest P value is x2, with 99%. So, we are way above significance level of 5%, so we remove it according to step 4.
#If we see for matrix X from variable explorer, we see that the variable x2 or the one with index 2 is one of the dummy variables. So we will remove it.
X_opt = X[:, [0, 1, 3, 4, 5]] #Removed the variable with highest probability.
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit() #Fitted the model w/o the variable x2.
regressor_OLS.summary()
#Now, we see, variable x1 has the highest P value, which is greater than 5%, which is 94%. So we'll remove it.
X_opt = X[:, [0, 3, 4, 5]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
#Again.
X_opt = X[:, [0, 3, 5]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
#Again.
X_opt = X[:, [0, 3]]
regressor_OLS = sm.OLS(endog = y, exog = X_opt).fit()
regressor_OLS.summary()
#Here, I am left with just R&D spend and the constant. Its a very small P value, and so, R&D spend is a powerful predictor for profit, and has a high effect on the dependent variable on the profit. Whether we needed to remove marketing spend or not, since it was very near to the significance level, will be seen in the next to come algorithms for ML.
#====================THIS IS THE BACKWARD ELIMINATION ALGORITHM=========================
| [
"matplotlib"
] |
c269088a48df833b5f0e66469f42f3d9d920c4ea | Python | UncleBob2/MyPythonCookBook | /Intro to Python book/random 3D walk/random walk 3D.py | UTF-8 | 1,436 | 3.296875 | 3 | [] | no_license | import random
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
n = eval(input("Input number of blocks: "))
x_axis, y_axis, z_axis = 0, 0, 0
path = []
for i in range(n + 1):
x = random.uniform(0.0001, 1.0001)
path.append(tuple((x_axis, y_axis, z_axis)))
# print(x)
if x > 0.0001 and x <= 0.166666:
y_axis -= 1
# print("down")
elif x > 0.166666 and x <= 0.333333:
y_axis += 1
# print("up")
elif x > 0.333333 and x <= 0.5:
x_axis -= 1
# print("left")
elif x > 0.5 and x <= 0.66666666:
x_axis += 1
# print("right")
elif x > 0.6666666 and x <= 0.8333333:
z_axis -= 1
# print("down")
else:
z_axis += 1
# print("up")
print(path)
print("(", x_axis, ",", y_axis, ",", z_axis, ")")
x_val = [x[0] for x in path]
y_val = [x[1] for x in path]
z_val = [x[2] for x in path]
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
for x_v, y_v, z_v in zip(x_val, y_val, z_val):
label = "(%d, %d, %d)" % (x_v, y_v, z_v,)
ax.text(x_v, y_v, z_v, label)
plt.title("Random 3D")
ax.set_xticks([0, 1, 2, 3, 4, 5])
ax.set_yticks([0, 1, 2, 3, 4, 5])
ax.set_zticks([0, 1, 2, 3, 4, 5])
ax.set_xlim(0, 5)
ax.set_ylim(0, 5)
ax.set_zlim(0, 5)
ax.set_xlabel("X axis")
ax.set_ylabel("Y axis")
ax.set_zlabel("Z axis")
ax.plot(x_val, y_val, z_val)
ax.plot(x_val, y_val, z_val, "or")
plt.show()
| [
"matplotlib"
] |
9cda3697dab2532081d4b75e545fdf46775fe77b | Python | awur978/Approximation_AF | /cmp.py | UTF-8 | 2,317 | 3.34375 | 3 | [] | no_license | #implementation of approximate TanSig using PLAN as described in article
#IMPLEMENTING NONLINEAR ACTIVATION FUNCTIONS IN NEURAL NETWORK EMULATORS
#purpose: to find error level of PLAN when compared to TanSig
#here plenty activation functions were implemented using this method, step,ramp and sigmoid
# only interested in sigmoid and tansig
#Note the lowlimit and highlimit for each function type is different
#Step low=0, high = 2 or anything
#ramp
#sigmoid low = -4.04, high = 4.04
'''
KEY
t_h = CMP(highlimit,x)
t_l = CMP(x,lowlimit)
t_pn = cmp(x,0)
'''
import numpy as np
from scipy import arange
import matplotlib.pyplot as plt
def cmp(i,j):
if i >= j:
y=1
else:
y = 0
return y
#step function
highlimit = 4.04 # upper limit of saturation points
lowlimit = -4.04 # lower limit of saturation points
gain = 1/(2*(highlimit)**2) # gradient of the line intersecting the two saturation points
#y = cmp(x,lowlimit)
cmp = np.vectorize(cmp)
x = np.arange(-3.0,3.0,0.1)
#lowlimit for step function should be 0
y_uni = cmp(x,lowlimit) #step function unipolar
y_bi = 2*(cmp(x,lowlimit))-1 #step function bipolar
plt.plot(x,y_uni)
#plt.plot(x,np.tanh(x))
plt.grid(True)
plt.show()
#RAMP function
def ramp_func(x):
if x >= highlimit:
y=1
elif lowlimit <= x and lowlimit <highlimit:
y = 1 + gain*(x-highlimit)
else:
y = 0
return y
def ramp_func2(x):
t_h = cmp(highlimit,x)
t_l = cmp(x,lowlimit)
y = t_l + t_h*t_l*gain*(x-highlimit)
return y
ramp_func2 = np.vectorize(ramp_func2)
x = np.arange(-3.0,3.0,0.1)
y_uni = ramp_func2(x)
plt.plot(x,y_uni)
plt.grid(True)
plt.show()
#SIGMOID
def sig1(x):
t_h = cmp(highlimit,x)
t_l = cmp(x,lowlimit)
t_pn = cmp(x,0)
y = t_pn - ((2*t_pn)-1) * (t_h*t_l*gain*(((2*t_pn-1)*highlimit)-x)**2)
return y
sig1 = np.vectorize(sig1)
x = np.arange(-6.0,6.0,0.1)
y_uni = sig1(x)
plt.plot(x,y_uni)
plt.grid(True)
plt.show()
#TANSIG/TANH
def tanh1(x):
t_h = cmp(highlimit,x)
t_l = cmp(x,lowlimit)
t_pn = cmp(x,0)
t = t_pn - ((2*t_pn)-1) * (t_h*t_l*gain*(((2*t_pn-1)*highlimit)-x)**2)
y = (2*t) -1
return y
tanh1 = np.vectorize(tanh1)
x = np.arange(-6.0,6.0,0.1)
y_bi = tanh1(x)
plt.plot(x,y_bi)
plt.grid(True)
plt.show()
| [
"matplotlib"
] |
3e1c9c2c4b97a580f2741afd7dedd5d55f0bbd53 | Python | 1025724457/zufang_helper | /FangyuanHelper/fangyuan/matplot.py | UTF-8 | 4,286 | 3.3125 | 3 | [] | no_license | # -*- coding:utf-8 -*-
"""创建统计图"""
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
class Drawing(object):
def __init__(self):
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
def create_pie(self, num, pie_name):
"""创建饼状图"""
perc = num/num.sum()
name = perc.index
fig = plt.figure()
ax = fig.add_subplot(111)
ax.pie(perc, labels=name, autopct='%1.1f%%')
ax.axis('equal')
ax.set_title(pie_name+'饼状图')
# plt.show()
def create_bar(self, num, bar_name):
name = num.index
fig = plt.figure()
ax = fig.add_subplot(111)
rect = ax.bar(name, num)
ax.set_title(bar_name+'条形图')
ax.set_ylabel('数量')
# 在各条形图上添加标签
for rec in rect:
x = rec.get_x()
height = rec.get_height()
ax.text(x + 0.1, 1.02 * height, str(height))
# plt.show()
def create_hist(self, num, hist_name):
"""只适用于价格"""
price = num
price = pd.to_numeric(price, errors='coerce') # str转换 num
price = price[price < 20000]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(price, 100, range=(0, 20000))
xmajorLocator = MultipleLocator(500) # 将x主刻度标签设置为20的倍数
# xmajorFormatter = FormatStrFormatter('%5.1f') # 设置x轴标签文本的格式
# xminorLocator = MultipleLocator(5) # 将x轴次刻度标签设置为5的倍数
ax.xaxis.set_major_locator(xmajorLocator)
# ax.xaxis.set_major_formatter(xmajorFormatter)
# ax.xaxis.set_minor_locator(xminorLocator)
# ax.xaxis.grid(True, which='major') # x坐标轴的网格使用主刻度
ax.set_title(hist_name + '直方图')
def show(self):
plt.show()
class GetNum(object):
"""从Excel获取房源信息,进行简单处理"""
def __init__(self):
# 导入数据
# self.header = ['house_title', 'house_url', 'house_price', 'house_zuping', 'house_size', 'house_xiaoqu',
# 'house_area', 'house_detailed_address', 'house_phone', 'house_man']
self.df = pd.read_csv(r'D:\myproject\fangyuan_info.csv', encoding='gbk')
def get_area(self):
area = self.df['house_area']
area = area.replace(['南山区', '福田区', '宝安区', '盐田区', '罗湖区', '龙华.*', '坪山.*'],
['南山', '福田', '宝安', '盐田', '罗湖', '龙华', '坪山'], regex=True)
num = area.value_counts() # 获取每个地区的数量
return num
def get_zuping(self):
zuping = self.df['house_zuping']
zuping = zuping.replace(['合租.*', '整租.*'], ['合租', '整租'], regex=True)
zuping = zuping.fillna('空')
num = zuping.value_counts() # 获取每项的数量
return num
def get_price(self):
price = self.df['house_price']
return price
def get_man(self):
man = self.df['house_man']
man = man.replace(['.*经纪人.*', '.*个人.*'], ['经纪人', '个人'], regex=True)
num = man.value_counts()
return num
def test_area():
info = GetNum()
drawing = Drawing()
area_num = info.get_area()
drawing.create_bar(area_num, '地区')
drawing.create_pie(area_num, '地区')
drawing.show()
def test_zuping():
info = GetNum()
drawing = Drawing()
zuping_num = info.get_zuping()
drawing.create_pie(zuping_num, '租凭')
drawing.create_bar(zuping_num, '租凭')
drawing.show()
def test_man():
info = GetNum()
drawing = Drawing()
man_num = info.get_man()
drawing.create_bar(man_num, '房主')
drawing.create_pie(man_num, '房主')
drawing.show()
def test_price():
info = GetNum()
drawing = Drawing()
price = info.get_price()
drawing.create_hist(price, '价格')
drawing.show()
if __name__ == '__main__':
# test_area()
# test_zuping()
# test_man()
test_price()
| [
"matplotlib"
] |
879d2f8e5c8d3ba643383caac4176696f5a107c1 | Python | drugilsberg/interact | /interact/roc.py | UTF-8 | 4,541 | 3.125 | 3 | [
"MIT"
] | permissive | """Methods used to build ROC."""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import roc_curve, auc
# seaborn settings
sns.set_style("white")
sns.set_context("paper")
color_palette = sns.color_palette("colorblind")
sns.set_palette(color_palette)
def _get_total_undirected_interactions(n):
return n * (n - 1) / 2
def _check_index(index, labels_set, interaction_symbol='<->'):
e1, e2 = index.split(interaction_symbol)
return (e1 in labels_set and e2 in labels_set)
def _filter_indices_with_labels(indexes, labels, interaction_symbol='<->'):
labels_set = set(labels)
filtering = pd.Series([
_check_index(index, labels_set, interaction_symbol)
for index in indexes
])
return indexes[filtering]
def _is_index_diagonal(index, interaction_indices='<->'):
a_node, another_node = index.split(interaction_indices)
return a_node == another_node
def _get_evaluation_on_given_labels(
labels, true_interactions, predicted_interactions, no_self_loops=True
):
total_interactions = _get_total_undirected_interactions(len(labels))
interaction_indices = list(
set(
_filter_indices_with_labels(predicted_interactions.index, labels) |
_filter_indices_with_labels(true_interactions.index, labels)
)
)
if no_self_loops:
interaction_indices = [
index
for index in interaction_indices
if not _is_index_diagonal(index)
]
predicted_interactions = predicted_interactions.reindex(
interaction_indices
).fillna(0.0)
true_interactions = true_interactions.reindex(
interaction_indices
).fillna(0.0)
zero_interactions = int(total_interactions) - len(interaction_indices)
y = np.append(true_interactions.values, np.zeros((zero_interactions)))
scores = np.append(
predicted_interactions.values, np.zeros((zero_interactions))
)
return y, scores
def get_roc_df(
pathway_name, method_name, true_interactions, predicted_interactions,
number_of_roc_points=100
):
"""Return dataframe that can be used to plot a ROC curve."""
labels = {
gene
for genes in [
true_interactions.e1, predicted_interactions.e1,
true_interactions.e2, predicted_interactions.e2
]
for gene in genes
}
y, scores = _get_evaluation_on_given_labels(
labels, true_interactions.intensity,
predicted_interactions.intensity
)
# print(method_name, y, scores)
reference_xx = np.linspace(0, 1, number_of_roc_points)
if sum(y) > 0:
xx, yy, threshold = roc_curve(y, scores)
print(method_name, y, scores, threshold, xx, yy)
area_under_curve = auc(xx, yy)
yy = np.interp(reference_xx, xx, yy)
else:
yy = reference_xx
area_under_curve = 0.5 # worst
roc_df = pd.DataFrame({
'pathway': number_of_roc_points * [pathway_name],
'method': (
number_of_roc_points * [method_name]
),
'YY': yy,
'XX': reference_xx.tolist()
})
return roc_df, area_under_curve
def plot_roc_curve_from_df(
df, auc_dict_list=None, output_filepath=None, figsize=(6, 6)
):
"""From a df with multiple methods plot a roc curve using sns.tspot."""
xlabel = 'False Discovery Rate'
ylabel = 'True Positive Rate'
title = 'Receiver Operating Characteristic'
# rename method name to include AUC to show it in legend
if auc_dict_list:
for method in auc_dict_list.keys():
mean_auc = np.mean(auc_dict_list[method])
method_indices = df['method'] == method
df['mean_auc'] = mean_auc
df.loc[method_indices, 'method'] = (
'{} '.format(
method.capitalize()
if method != 'INtERAcT'
else method
) +
'AUC=%0.2f' % mean_auc
)
df = df.sort_values(by='method')
df.rename(columns={'method': ''}, inplace=True) # to avoid legend title
plt.figure(figsize=figsize)
sns.set_style("whitegrid", {'axes.grid': False})
sns.tsplot(
data=df, time='XX', value='YY',
condition='', unit='pathway', legend=True
)
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.title(title)
if output_filepath:
plt.savefig(output_filepath, bbox_inches='tight')
| [
"matplotlib",
"seaborn"
] |
4fb25e30c62ea4e5d2f87353a7aa7a0062200b27 | Python | lijiunderstand/kitti2bag-1 | /demos/demo_raw.py | UTF-8 | 2,187 | 2.828125 | 3 | [
"MIT"
] | permissive | """Example of pykitti.raw usage."""
import itertools
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
import pykitti
__author__ = "Lee Clement"
__email__ = "[email protected]"
# Change this to the directory where you store KITTI data
basedir = '/home/qcraft/tmp'
date = '2019_07_02'
drive = '1153'
#basedir = '/home/qcraft/workspace/kitti'
#date = '2011_09_26'
#drive = '0064'
# Load the data. Optionally, specify the frame range to load.
# dataset = pykitti.raw(basedir, date, drive)
dataset = pykitti.raw(basedir, date, drive, frames=range(0, 20, 5))
dataset._load_oxts()
print "load_oxts_ok"
# dataset.calib: Calibration data are accessible as a named tuple
# dataset.timestamps: Timestamps are parsed into a list of datetime objects
# dataset.oxts: List of OXTS packets and 6-dof poses as named tuples
# dataset.camN: Returns a generator that loads individual images from camera N
# dataset.get_camN(idx): Returns the image from camera N at idx
# dataset.gray: Returns a generator that loads monochrome stereo pairs (cam0, cam1)
# dataset.get_gray(idx): Returns the monochrome stereo pair at idx
# dataset.rgb: Returns a generator that loads RGB stereo pairs (cam2, cam3)
# dataset.get_rgb(idx): Returns the RGB stereo pair at idx
# dataset.velo: Returns a generator that loads velodyne scans as [x,y,z,reflectance]
# dataset.get_velo(idx): Returns the velodyne scan at idx
# Grab some data
second_pose = dataset.oxts[1].T_w_imu
first_gray = next(iter(dataset.gray))
first_cam1 = next(iter(dataset.cam1))
first_rgb = dataset.get_rgb(0)
first_cam2 = dataset.get_cam2(0)
third_velo = dataset.get_velo(2)
# Display some of the data
np.set_printoptions(precision=4, suppress=True)
print('\nDrive: ' + str(dataset.drive))
print('\nFrame range: ' + str(dataset.frames))
print('\nIMU-to-Velodyne transformation:\n' + str(dataset.calib.T_velo_imu))
print('\nGray stereo pair baseline [m]: ' + str(dataset.calib.b_gray))
print('\nRGB stereo pair baseline [m]: ' + str(dataset.calib.b_rgb))
print('\nFirst timestamp: ' + str(dataset.timestamps[0]))
print('\nSecond IMU pose:\n' + str(second_pose))
| [
"matplotlib"
] |
9893f437abf2c4fed762bd909ed7b5303620cc74 | Python | mousebaiker/SmolOsc | /plotting.py | UTF-8 | 2,609 | 2.671875 | 3 | [] | no_license | import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
import numpy as np
def plot_moments(ts, zeroth, first, second):
fig, axes = plt.subplots(3, figsize=(15, 15), sharex=True)
axes[0].plot(ts, zeroth)
axes[0].set_title("Total concentration", fontsize=16)
axes[1].plot(ts, first)
axes[1].set_title("First moment of concentration", fontsize=16)
axes[2].plot(ts, second)
axes[2].set_title("Second moment of concentration", fontsize=16)
axes[2].set_xlabel("Time, s", fontsize=14)
axes[0].tick_params(axis="both", which="major", labelsize=14)
axes[0].tick_params(axis="both", which="minor", labelsize=14)
axes[1].tick_params(axis="both", which="major", labelsize=14)
axes[1].tick_params(axis="both", which="minor", labelsize=14)
axes[2].tick_params(axis="both", which="major", labelsize=14)
axes[2].tick_params(axis="both", which="minor", labelsize=14)
return fig, axes
def plot_solution(k, solution, analytical=None):
fig, ax = plt.subplots(1, figsize=(15, 10))
ax.loglog(k, solution, label="Numerical solution")
if analytical is not None:
ax.loglog(k, analytical, label="Analytical")
ax.set_xlabel("Cluster size", fontsize=14)
ax.set_ylabel("Concentration", fontsize=14)
ax.legend(fontsize=14)
ax.grid()
ax.tick_params(axis="both", which="major", labelsize=14)
ax.tick_params(axis="both", which="minor", labelsize=14)
return fig, ax
def plot_parameter_history(ts, history, parameter_name):
fig, ax = plt.subplots(1, figsize=(15, 10))
ax.plot(ts, history, label=parameter_name, lw=3)
ax.set_xlabel("Time, s", fontsize=14)
ax.legend(fontsize=14)
ax.grid()
ax.tick_params(axis="both", which="major", labelsize=14)
ax.tick_params(axis="both", which="minor", labelsize=14)
return fig, ax
def create_solution_animation(ts, k, solutions, name, analytical=None, loglog=True):
fig, ax = plt.subplots()
if loglog:
(ln,) = ax.loglog([], [])
else:
(ln,) = ax.semilogy([], [])
if analytical is not None:
ax.loglog(k, analytical, label="Analytical")
def init():
ax.set_xlim(1, k[-1])
ax.set_ylim(10 ** (-16), 10 ** (0))
return (ln,)
def update(frame):
y = solutions[frame]
ln.set_data(k, y)
ax.set_title("T=" + str(ts[frame]))
return (ln,)
ani = FuncAnimation(
fig,
update,
frames=np.arange(len(solutions)),
interval=30,
init_func=init,
blit=True,
)
ani.save(name, writer="ffmpeg")
| [
"matplotlib"
] |
d908df598d9a7923bb5a29a6a26879ac5a3fa9b1 | Python | wolhandlerdeb/clustering | /Q1_final_project_v2.py | UTF-8 | 8,251 | 2.53125 | 3 | [
"MIT"
] | permissive | import numpy as np
import pandas as pd
import scipy as sc
from scipy.stats import randint, norm, multivariate_normal, ortho_group
from scipy import linalg
from scipy.linalg import subspace_angles, orth
from scipy.optimize import fmin
import math
from statistics import mean
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
import itertools as it
import seaborn as sns
import matplotlib.pyplot as plt
from cluster.selfrepresentation import ElasticNetSubspaceClustering
import time
# functions for simulate data
def first_simulation(p, dim, k):
b = [orth(np.random.rand(p, dim)) for i in range(k + 1)]
return b
def find_theta_max(b, t, k):
theta_max = []
for i in range(1, k + 1):
for j in range(1, i):
theta_max.append(subspace_angles(b[i], b[j]).max())
max_avg_theta = mean(theta_max)
theta = max_avg_theta * t
return theta
def second_simulation(p, k, dim, theta, b):
def find_a_for_theta(a, b=b, k=k, theta=theta):
temp_theta = []
for i in range(1, k + 1):
for j in range(1, i):
temp_theta.append(subspace_angles(b[0] * (1 - a) + b[i] * a, b[0] * (1 - a) + b[j] * a).max())
return mean(temp_theta) - theta
a = sc.optimize.bisect(find_a_for_theta, 0, 1)
B = [b[0] * (1 - a) + b[i] * a for i in range(1, k + 1)]
return B
def third_simulation(n, p, dim, B, k, theta):
z = np.random.randint(0, k, n)
w = np.random.multivariate_normal(mean=np.zeros(dim), cov=np.diag(np.ones(dim)), size=n)
X = np.zeros((n, p))
for i in range(n):
X[i,] = np.random.multivariate_normal(mean=np.array(np.dot(np.array(w[i, :]), B[z[i]].T)).flatten(),
cov=np.diag(np.ones(p))) # sigma value is missing
return n, p, dim, theta, X, z, B
# data simulation
def final_data_simulation(k):
nn = [2 ** j for j in range(3, 11)]
pp = [2 ** j for j in range(4, 8)]
dd = [2 ** -j for j in range(1, 5)]
tt = [10 ** -j for j in range(0, 3)]
df = pd.DataFrame(columns=['n', 'p', 'dim', 'theta', 'X', 'z', 'B'])
for p in pp:
for d in dd:
dim = int(d * p)
b = first_simulation(p=p, dim=dim, k=k)
for t in tt:
theta = find_theta_max(b=b, t=t, k=k)
for n in nn:
B = second_simulation(p=p, k=k, dim=dim, theta=theta, b=b)
row = pd.Series(list(third_simulation(n=n, p=p, dim=dim, B=B, k=k, theta=theta)[0:7]),
["n", "p", "dim", "theta", "X", "z", "B"])
df = df.append([row], ignore_index=True)
return df
df = final_data_simulation(4)
X = df['X'][31]
z = df['z'][31]
z
dim = 4
p = 16
k = 4
kmeans = KMeans(n_clusters=k)
kmeans
temp_df = pd.DataFrame(X)
temp_df['cluster'] = kmeans.fit_predict(X)
# for i in range(k) :
i = 1
df_new = temp_df[temp_df['cluster'] == i].drop(['cluster'], axis=1)
cluster_kmean = KMeans(n_clusters=k).fit_predict(X)
data = {'cluster1': z, 'cluster2': cluster_kmean}
clusters = pd.DataFrame(data, index=range(len(z)))
all_per = list(it.permutations(range(k)))
accuracy_rate_all_per = np.zeros(len(all_per))
c = [i for i in range(k)]
for l, p in enumerate(all_per):
dic = dict(zip(c, p))
clusters['premut_cluster'] = clusters['cluster2'].transform(lambda x: dic[x] if x in dic else None)
m = clusters.groupby(['cluster1', 'premut_cluster']).size().unstack(fill_value=0)
accuracy_rate_all_per[l] = np.trace(m)
accuracy_rate_all_per.max(), len(cluster_kmean)
per = all_per[2]
dic = dict(zip(c, per))
clusters['premut_cluster'] = clusters['cluster2'].transform(lambda x: dic[x] if x in dic else None)
clusters.groupby(['cluster2', 'premut_cluster']).size()
# find kmeans clusters and subspaces
def pca_subspace(df, i, dim):
df_new = df[df['cluster'] == i].drop(['cluster'], axis=1)
pca_components_number = len(df_new) - 1 if len(df_new) < dim else dim # handling with low n (lower than dim)
pca = PCA(n_components=pca_components_number)
pca.fit_transform(df_new)
B_kmeans = pca.components_
return B_kmeans.T
def find_kmeans_subspace(X, k, dim):
kmeans = KMeans(n_clusters=k)
temp_df = pd.DataFrame(X)
temp_df['cluster'] = kmeans.fit_predict(X)
B_kmean = [pca_subspace(temp_df, i, dim) for i in range(k)]
return B_kmean
def find_ensc_subspace(X, k, dim):
temp_df = pd.DataFrame(X)
temp_df['cluster'] = ElasticNetSubspaceClustering(n_clusters=k, algorithm='lasso_lars', gamma=50).fit(X.T)
B_ensc = [pca_subspace(temp_df, i, dim) for i in range(k)]
return B_ensc
# Recovery Performance
def performance_measure1(k, B1, B2):
all_per = list(it.permutations(range(k)))
sum_cos_angles_all_per = np.zeros(len(all_per))
for l, val in enumerate(all_per):
for i in range(k):
if B2[val[i]].shape[1] > 0: # handling with empty clusters
sum_cos_angles_all_per[l] += (math.cos(
subspace_angles(B1[i], B2[val[i]]).max())) ** 2 # use min or max????????????????
cost_subspace = sum_cos_angles_all_per.max()
return cost_subspace
# WHAT ARE WE DOING WITH EMPTY CLUSTERS
def performance_measure2(k, cluster1, cluster2):
data = {'cluster1': cluster1, 'cluster2': cluster2}
clusters = pd.DataFrame(data, index=range(len(cluster1)))
all_per = list(it.permutations(range(k)))
accuracy_rate_all_per = np.zeros(len(all_per))
for l, per in enumerate(all_per):
c = [i for i in range(k)]
dic = dict(zip(c, per))
clusters['premut_cluster'] = clusters['cluster2'].transform(lambda x: dic[x] if x in dic else None)
m = clusters.groupby(['cluster1', 'premut_cluster']).size().unstack(fill_value=0)
accuracy_rate_all_per[l] = np.trace(m)
cost_cluster = (accuracy_rate_all_per.max()) / len(cluster1)
return cost_cluster
def all_process(k):
df = final_data_simulation(k)
df['B_kmean'] = df.apply(lambda x: find_kmeans_subspace(x['X'], k, x['dim']), axis=1)
df['cluster_kmean'] = df.apply(lambda x: KMeans(n_clusters=k).fit_predict(x['X']),
axis=1) # try to return the clusters in "find_kmeans_subspace"
# df['B_ensc'] = df.apply(lambda x: find_ensc_subspace(x['X'], k, x['dim']), axis=1)
# df['cluster_ensc']=df.apply(lambda x: ElasticNetSubspaceClustering(n_clusters=k,algorithm='lasso_lars',gamma=50).fit(x['X'].T), axis=1)
return df
measure1_kmean = pd.DataFrame()
measure2_kmean = pd.DataFrame()
k = 4
for iter in range(2):
df = all_process(k)
measure1_kmean.insert(iter, "", df.apply(lambda x: performance_measure1(k, x['B'], x['B_kmean']), axis=1), True)
measure2_kmean.insert(iter, "", df.apply(lambda x: performance_measure2(k, x['z'], x['cluster_kmean']), axis=1),
True)
# measure1_ensc.insert(iter, "", df.apply(lambda x: performance_measure1(k, x['B'], x['B_ensc']), axis=1), True)
# measure2_ensc.insert(iter, "", df.apply(lambda x: performance_measure2(k, x['z'], x['cluster_ensc']), axis=1), True)
df['measure1_kmean'] = measure1_kmean.apply(lambda x: mean(x), axis=1)
df['measure2_kmean'] = measure2_kmean.apply(lambda x: mean(x), axis=1)
# df['measure1_ensc'] = measure1_ensc.apply(lambda x: mean(x), axis=1)
# df['measure2_ensc'] = measure2_ensc.apply(lambda x: mean(x), axis=1)
df['theta_degree'] = df.apply(lambda x: math.degrees(x['theta']), axis=1)
# ploting
def plotting_performance_measure(df, measure):
pp = [2 ** j for j in range(4, 8)]
dd = [2 ** -j for j in range(1, 5)]
plt.title("PERFORMANCE MEASURE1 - KMEANS")
i = 1
for p in pp:
for d in dd:
dim = int(d * p)
sns_df = df[(df['p'] == p) & (df['dim'] == dim)]
sns_df = sns_df.pivot("theta_degree", "n", measure)
plt.subplot(4, 4, i)
ax = sns.heatmap(sns_df)
plt.title('p= {p} ,dim= {dim} '.format(p=p, dim=dim))
i += 1
plotting_performance_measure(df, "measure1_kmean")
plotting_performance_measure(df, "measure2_kmean")
plotting_performance_measure(df, "measure1_ensc")
plotting_performance_measure(df, "measure2_ensc")
| [
"matplotlib",
"seaborn"
] |
0c0c95719308aedcc0ebab1ba06582f4999dda39 | Python | jackealvess/manutencao | /app.py | UTF-8 | 9,379 | 3.421875 | 3 | [] | no_license | #importando as bibliotecas
import streamlit as st #pacote para fazer o app
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from statsmodels.tsa.stattools import adfuller
from statsmodels.tsa.arima.model import ARIMA
from fbprophet import Prophet
#função para calcular métricas
#entradas: a série temporal original (y)
# o modelo (y_pred)
#saida: métricas
def calc_metrics(y,y_pred):
mse = np.mean((y - y_pred)**2)#média da diferença ao quadrado
rmse = np.sqrt(mse)
mae = np.mean(np.abs(y - y_pred))#média da diferença absoluta
mean = np.mean(y)
#st.write(mean)
r2 = 1 - (np.sum((y - y_pred)**2))/(np.sum((y - mean)**2))
mape = (1/len(y))*np.sum(np.abs((y-y_pred)/y))
smape = (1/len(y))*np.sum(np.abs((y-y_pred))/(np.abs(y)+np.abs(y_pred)))
st.write("""### MSE = {}""".format(mse))
st.write("""### RMSE = {}""".format(rmse))
st.write("""### MAE = {}""".format(mae))
st.write("""### MAPE = {}""".format(mape))
st.write("""### SMAPE = {}""".format(smape))
st.write("""### $R^2$ = {}""".format(r2))
#return mse, rmse, mae, r2
#função para transformar as datas, serve para depois agrupar em semana ou mes
def transform_day(x, periodo):
x = str(x)
if periodo == 'Semana':
if int(x[8:10]) <= 7:
day = '3'
elif int(x[8:10]) <= 14:
day = '10'
elif int(x[8:10]) <= 21:
day = '17'
else:
day = '25'
return x[:8]+day+x[10:]
if periodo == 'Mês':
return x[:8]+'15'+x[10:]
if periodo == 'Dia':
return x
#//--------------------------------------------------------------------------------------------------------------------------//
#o comando st.write escreve uma mensagem no app
st.write("""# Séries Temporais Para Previsão de Falhas - v1""")
#leitura do arquivo
df=pd.read_excel('dados.xlsx')
#considerando apenas manutenções corretivas
df = df[df['Classe']=='CORRETIVA']
#considerar cada manutenção como uma "falha"
df['Classe'] = 1
st.write("""## 15 Equipamentos com maior número de falhas""")
#agrupando por equipamento, oredenando por ordem decrescente, pegando apenas os 15 primeiros
st.write(df.groupby('Equipamento').count().reset_index().sort_values('Classe',ascending=False).head(15))
#iniciando lista de equipamentos vazia
equipamentos = []
#loop para adicionar na lista os 15 equipamentos com o maior número de falhas
for equipamento in df.groupby('Equipamento').count().reset_index().sort_values('Classe',ascending=False).head(15)['Equipamento'].iloc:
equipamentos.append(equipamento)
#cria caixa de seleção com a lista de equipamentos
equipamento = st.selectbox('Escolha o equipamento:', equipamentos)
#cria caixa de seleção de período
periodo = st.selectbox('Escolha o período de análise:', ['Dia','Semana','Mês'])
#mensagem na tela dinâmica, varia com o equipamento e período escolhidos
st.write("""## Falhas do {} por {}""".format(equipamento,periodo.lower()))
#aplica a transformação nas datas para agrupar por período
df['Data'] = df['Data'].apply(lambda x: transform_day(x,periodo))
df['Data'] = df['Data'].astype("datetime64")
#agrupamento por período e salvando o dataset em ts
ts = df[df['Equipamento'] == equipamento].groupby('Data').count().reset_index().drop(["Equipamento"],axis=1)
ts = ts.rename(columns={'Classe':'Falhas'})
#cópia para o modelo prophet
ts_prophet = ts.copy()
#plot do dataset já agrupado
st.line_chart(ts.rename(columns={'Data':'index'}).set_index('index'))
#cálculo da média
media = ts.mean()
#imprime o número mínimo de falha e média
st.write("""### Mínimo de de manutenções corretivas por {}: {}""".format(periodo.lower(),ts.min()['Falhas']))
st.write("""### Média de manutenções corretivas por {}: {}""".format(periodo.lower(),ts.mean()['Falhas']))
#//--------------------------------------------------------------------------------------------------------------------------//
st.write("""## Teste de Dickey-Fuller Aumentado""")
dftest = adfuller(ts['Falhas'],autolag='AIC')
dfoutput = pd.Series(dftest[0:4], index=['Test Statistic','p-Value','Lags Used','Observations'])
resultado = 'estacionária' if dfoutput[1] < 0.05 else 'não estacionária'
st.write(dfoutput)
st.write('### A série é {}'.format(resultado))
#//--------------------------------------------------------------------------------------------------------------------------//
st.write("""## Modelos""")
escolha_modelo = st.selectbox("""# Escolha o modelo:""", ['Naive','ARIMA','Prophet'])
ts = ts.set_index(['Data'])
if escolha_modelo == 'Naive':
#seção do modelo naive
st.write("""### Modelo Naive""")
#passa data como índice
#o módelo naive é a série temporal deslocada
naive_model = ts.shift().dropna()
#paramos aqui -----------------------------------------------------------------------------------------------------------////////////////////////////////////////
metrics = calc_metrics(ts['Falhas'].values[1:],ts.shift().dropna()['Falhas'].values)
#st.write("""### MSE = {}""".format(metrics[0]))
#st.write("""### RMSE = {}""".format(metrics[1]))
#st.write("""### MAE = {}""".format(metrics[2]))
plt.plot(naive_model,label='Naive')
plt.plot(ts,label='Dados')
#plt.legend()
plt.ylabel('Falhas')
plt.xlabel('Data')
plt.legend()
st.pyplot(plt)
#//--------------------------------------------------------------------------------------------------------------------------//
if escolha_modelo == 'ARIMA':
st.write("""### Modelo ARIMA""")
p = st.slider('P', min_value=0, max_value=11)
d = st.slider('d', min_value=0, max_value=10)
q = st.slider('Q', min_value=0, max_value=10)
model = ARIMA(ts,order=(p,d,q))
results_AR = model.fit()
metrics = calc_metrics(ts['Falhas'].values,results_AR.fittedvalues.values)
#st.write("""### MSE = {}""".format(metrics[0]))
#st.write("""### RMSE = {}""".format(metrics[1]))
#st.write("""### MAE = {}""".format(metrics[2]))
plt.clf()
plt.plot(results_AR.fittedvalues,label='ARIMA')
plt.plot(ts,label='Dados')
#plt.legend()
plt.ylabel('Falhas')
plt.xlabel('Data')
plt.legend()
st.pyplot(plt)
#//--------------------------------------------------------------------------------------------------------------------------//
if escolha_modelo == 'Prophet':
st.write("""### Modelo Prophet""")
ts_prophet = ts_prophet.rename(columns={'Falhas':'y','Data':'ds'})
model = Prophet()
model.fit(ts_prophet)
predictions = model.predict(ts_prophet)[['ds','yhat']]
predictions = predictions.set_index(['ds'])
metrics = calc_metrics(ts['Falhas'].values,predictions['yhat'].values)
#st.write("""### MSE = {}""".format(metrics[0]))
#st.write("""### RMSE = {}""".format(metrics[1]))
#st.write("""### MAE = {}""".format(metrics[2]))
plt.clf()
plt.plot(predictions,label='Prophet - Valor da previsao')
plt.plot(ts,label='Dados de teste')
plt.legend()
plt.ylabel('Falhas')
plt.xlabel('Data')
plt.legend()
plt.suptitle('Comparando o resultado')
st.pyplot(plt)
st.write("""## Avaliação considerando treino e teste""")
st.write("""#### simulando previsões reais que o modelo realizará""")
porcentagem = st.selectbox('Escolha o percentual da base de teste: 10, 20 ou 30 porcento', ['0.1','0.2','0.3'])
#st.write(len(ts))
#st.write(len(ts_prophet))
numero_teste = int(len(ts)*float(porcentagem))
treino_arima = ts[:-numero_teste]
teste_arima = ts[-numero_teste:]
#arima_model = auto_arima(treino_arima,error_Action='warn',supress_warnings=True,stepwise=True)
#forecast_arima = arima_model.predict(n_periods=numero_teste)
model = ARIMA(treino_arima,order=(5,0,5))
results_AR = model.fit()
#metrics = calc_metrics(teste_arima['Falhas'].values,forecast_arima)
#teste_arima['Falhas'] = forecast_arima
metrics = calc_metrics(teste_arima['Falhas'].values,results_AR.forecast(steps=numero_teste).values)
teste_arima['Falhas'] = results_AR.forecast(steps=numero_teste).values
ts_prophet = ts_prophet.rename(columns={'Falhas':'y','Data':'ds'})
treino_prophet = ts_prophet[:-numero_teste]
teste_prophet = ts_prophet[-numero_teste:]
prophet_model = Prophet()
prophet_model.fit(treino_prophet)
forecast_prophet = prophet_model.predict(teste_prophet)
metrics = calc_metrics(teste_prophet['y'].values,forecast_prophet['yhat'].values)
teste_prophet['y'] = forecast_prophet['yhat']
teste_prophet['y'] = np.mean(treino_prophet['y'])
metrics = calc_metrics(teste_prophet['y'].values,forecast_prophet['yhat'].values)
plt.clf()
plt.plot(ts,label='Dados')
plt.plot(teste_arima,label='Arima')
plt.plot(forecast_prophet['ds'],forecast_prophet['yhat'],label='Prophet')
plt.legend()
plt.ylabel('Falhas')
plt.xlabel('Data')
plt.legend()
st.pyplot(plt)
st.write("""## Previsão""")
#results_AR.plot_predict(1,100)
artigo = {"Mês":"o","Semana":"a","Dia":"o"}
mes = st.selectbox('Escolha {} {}:'.format(artigo[periodo],periodo.lower()), [i for i in range(1,13)])
intervalo = st.selectbox('Escolha o intervalo de confiança (%):', [0.95,0.9,0.85,0.8])
model = ARIMA(ts,order=(5,0,5))
results_AR = model.fit()
previsao = results_AR.forecast(steps=mes)
previsao = previsao.values[-1]
intervalo = results_AR.conf_int((1-intervalo)/100)
st.write(previsao)
avaliacao = "acima" if previsao >= ts.mean().values else "abaixo"
st.write("""### O número de falhas d{} {} {} está {} da média""".format(artigo[periodo],periodo.lower(),mes,avaliacao))
| [
"matplotlib"
] |
55cfdfc134d506075c9c980db9429d2665438d0d | Python | DouwMarx/rowing_data_analysis | /notebooks/3_combining-channels_2021-01-05.py | UTF-8 | 898 | 2.6875 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
d = pd.read_csv("../data/Sensor_record_20201126_164859_AndroSensor.csv")
print(d.keys())
acc = d['ACCELEROMETER X (m/s²)'].values + d['ACCELEROMETER Y (m/s²)'].values +d['ACCELEROMETER Z (m/s²)'].values
# plt.figure()
# # plt.plot(d['Time since start in ms '].values,d['ACCELEROMETER X (m/s²)'].values)
# # plt.plot(d['Time since start in ms '].values,d['ACCELEROMETER Y (m/s²)'].values)
# # plt.plot(d['Time since start in ms '].values,d['ACCELEROMETER Z (m/s²)'].values)
# plt.scatter(d['Time since start in ms '].values,acc)
# plt.show()
#
# fft = np.fft.fft(acc)
# plt.figure()
# plt.plot(fft[0:int(len(fft)/2)])
# plt.show()
# plt.figure()
# plt.plot(d['Time since start in ms '].values,d["LIGHT (lux)"].values)
# plt.show()
d = np.diff(d['Time since start in ms '].values)
print(np.average(d))
print(np.std(d))
| [
"matplotlib"
] |
1e43b1c2484800e62f15e7576afe4bd87e2e30f7 | Python | smashhadi/introduction-to-ml | /Hyperameter_tuning_exercise.py | UTF-8 | 13,618 | 3.125 | 3 | [] | no_license | """
PyTorch based assignment
Following ”Generating names with character-level RNN” tutorial on PyTorch
Hyperparameter tuning
"""
from __future__ import unicode_literals, print_function, division
from io import open
import glob
import unicodedata
import string
import torch
import torch.nn as nn
from torch.autograd import Variable
import random
import time
import math
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
all_letters = string.ascii_letters + " .,;'-"
n_letters = len(all_letters) + 1 # Plus EOS marker
def findFiles(path): return glob.glob(path)
# Turn a Unicode string to plain ASCII, thanks to http://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
and c in all_letters
)
# Read a file and split into lines
def readLines(filename):
lines = open(filename, encoding='utf-8').read().strip().split('\n')
return [unicodeToAscii(line) for line in lines]
# Build the category_lines dictionary, a list of lines per category
category_lines_train = {}
category_lines_test = {}
all_categories = []
for filename in findFiles('data/names/*.txt'):
category = filename.split('/')[-1].split('.')[0]
all_categories.append(category)
lines = readLines(filename)
total = len(lines)
train_len = int(len(lines) * 0.8)
all_lines = random.sample(range(0,total), total)
train_lines = set(all_lines[0:train_len])
test_lines = set(all_lines[train_len:])
category_lines_train[category] = [lines[i] for i in train_lines]
category_lines_test[category] = [lines[i] for i in test_lines]
n_categories = len(all_categories)
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.i2h = nn.Linear(n_categories + input_size + hidden_size, hidden_size)
self.i2o = nn.Linear(n_categories + input_size + hidden_size, output_size)
self.o2o = nn.Linear(hidden_size + output_size, output_size)
self.dropout = nn.Dropout(0.1)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, category, input, hidden):
input_combined = torch.cat((category, input, hidden), 1)
hidden = self.i2h(input_combined)
output = self.i2o(input_combined)
output_combined = torch.cat((hidden, output), 1)
output = self.o2o(output_combined)
output = self.dropout(output)
output = self.softmax(output)
return output, hidden
def initHidden(self):
return Variable(torch.zeros(1, self.hidden_size))
class RNN2(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNN2, self).__init__()
self.hidden_size = hidden_size
self.i2h2 = nn.Linear(input_size + hidden_size, hidden_size)
self.i2o2 = nn.Linear(input_size + hidden_size, output_size)
self.o2o2 = nn.Linear(hidden_size + output_size, output_size)
self.dropout = nn.Dropout(0.1)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, input, hidden):
input_combined = torch.cat((input, hidden), 1)
hidden = self.i2h2(input_combined)
output = self.i2o2(input_combined)
output_combined = torch.cat((hidden, output), 1)
output = self.o2o2(output_combined)
output = self.dropout(output)
output = self.softmax(output)
return output, hidden
def initHidden(self, category):
pad = self.hidden_size-18
temp = Variable(torch.zeros(1, pad))
padded_cat = torch.cat((category, temp), 1)
return padded_cat
class RNN3(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNN3, self).__init__()
self.hidden_size = hidden_size
self.i2h3 = nn.Linear(n_categories + hidden_size, hidden_size)
self.i2o3 = nn.Linear(n_categories + hidden_size, output_size)
self.o2o3 = nn.Linear(hidden_size + output_size, output_size)
self.dropout = nn.Dropout(0.1)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, category, hidden):
input_combined = torch.cat((category, hidden), 1)
hidden = self.i2h3(input_combined)
output = self.i2o3(input_combined)
output_combined = torch.cat((hidden, output), 1)
output = self.o2o3(output_combined)
output = self.dropout(output)
output = self.softmax(output)
return output, hidden
def initHidden(self):
return Variable(torch.zeros(1, self.hidden_size))
class RNN4(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNN4, self).__init__()
self.hidden_size = hidden_size
self.i2h4 = nn.Linear(hidden_size, hidden_size)
self.i2o4 = nn.Linear(hidden_size, output_size)
self.o2o4 = nn.Linear(hidden_size + output_size, output_size)
self.dropout = nn.Dropout(0.1)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, hidden):
#input_combined = torch.cat((input, hidden), 1)
hidden = self.i2h4(hidden)
output = self.i2o4(hidden)
output_combined = torch.cat((hidden, output), 1)
output = self.o2o4(output_combined)
output = self.dropout(output)
output = self.softmax(output)
return output, hidden
def initHidden(self, category):
pad = self.hidden_size-18
temp = Variable(torch.zeros(1, pad))
padded_cat = torch.cat((category, temp), 1)
return padded_cat
# Random item from a list
def randomChoice(l):
return l[random.randint(0, len(l) - 1)]
# Get a random category and random line from that category
def randomTrainingPair():
category = randomChoice(all_categories)
line = randomChoice(category_lines_train[category])
return category, line
def randomTestPair():
category = randomChoice(all_categories)
line = randomChoice(category_lines_test[category])
return category, line
# One-hot vector for category
def categoryTensor(category):
li = all_categories.index(category)
tensor = torch.zeros(1, n_categories)
tensor[0][li] = 1
return tensor
# One-hot matrix of first to last letters (not including EOS) for input
def inputTensor(line):
tensor = torch.zeros(len(line), 1, n_letters)
for li in range(len(line)):
letter = line[li]
tensor[li][0][all_letters.find(letter)] = 1
return tensor
# LongTensor of second letter to end (EOS) for target
def targetTensor(line):
letter_indexes = [all_letters.find(line[li]) for li in range(1, len(line))]
letter_indexes.append(n_letters - 1) # EOS
return torch.LongTensor(letter_indexes)
# Make category, input, and target tensors from a random category, line pair
def randomTrainingExample():
category, line = randomTrainingPair()
category_tensor = Variable(categoryTensor(category))
input_line_tensor = Variable(inputTensor(line))
target_line_tensor = Variable(targetTensor(line))
return category_tensor, input_line_tensor, target_line_tensor
def randomTestExample():
category, line = randomTestPair()
category_tensor = Variable(categoryTensor(category))
input_line_tensor = Variable(inputTensor(line))
target_line_tensor = Variable(targetTensor(line))
return category_tensor, input_line_tensor, target_line_tensor
criterion = nn.NLLLoss()
learning_rate = 0.0005
#Case 1 - Original code
def train(category_tensor, input_line_tensor, target_line_tensor):
hidden = rnn.initHidden()
rnn.zero_grad()
loss = 0
for i in range(input_line_tensor.size()[0]):
output, hidden = rnn(category_tensor, input_line_tensor[i], hidden)
loss += criterion(output, target_line_tensor[i])
loss.backward()
for p in rnn.parameters():
p.data.add_(-learning_rate, p.grad.data)
return output, loss.data[0] / input_line_tensor.size()[0]
#Case 2: hidden unit + previous character
def train2(category_tensor, input_line_tensor, target_line_tensor):
hidden = rnn2.initHidden(category_tensor)
rnn2.zero_grad()
loss2 = 0
for i in range(input_line_tensor.size()[0]):
output, hidden = rnn2(input_line_tensor[i], hidden)
loss2 += criterion(output, target_line_tensor[i])
loss2.backward()
for p in rnn2.parameters():
p.data.add_(-learning_rate, p.grad.data)
return output, loss2.data[0] / input_line_tensor.size()[0]
#Case 3 - category + hidden unit
def train3(category_tensor, input_line_tensor, target_line_tensor):
hidden = rnn3.initHidden()
rnn3.zero_grad()
loss3 = 0
for i in range(input_line_tensor.size()[0]):
output, hidden = rnn3(category_tensor, hidden)
loss3 += criterion(output, target_line_tensor[i])
loss3.backward()
for p in rnn3.parameters():
p.data.add_(-learning_rate, p.grad.data)
return output, loss3.data[0] / input_line_tensor.size()[0]
#Case 4: hidden unit
def train4(category_tensor, input_line_tensor, target_line_tensor):
hidden = rnn4.initHidden(category_tensor)
rnn4.zero_grad()
loss4 = 0
for i in range(input_line_tensor.size()[0]):
output, hidden = rnn4(hidden)
loss4 += criterion(output, target_line_tensor[i])
loss4.backward()
for p in rnn4.parameters():
p.data.add_(-learning_rate, p.grad.data)
return output, loss4.data[0] / input_line_tensor.size()[0]
#Testing functions
def test(category_tensor, input_line_tensor, target_line_tensor):
hidden = rnn.initHidden()
testloss = 0
for i in range(input_line_tensor.size()[0]):
output, hidden = rnn(category_tensor, input_line_tensor[i], hidden)
testloss += criterion(output, target_line_tensor[i])
return output, testloss.data[0] / input_line_tensor.size()[0]
def test2(category_tensor, input_line_tensor, target_line_tensor):
hidden = rnn2.initHidden(category_tensor)
testloss2 = 0
for i in range(input_line_tensor.size()[0]):
output, hidden = rnn2(input_line_tensor[i], hidden)
testloss2 += criterion(output, target_line_tensor[i])
return output, testloss2.data[0] / input_line_tensor.size()[0]
def test3(category_tensor, input_line_tensor, target_line_tensor):
hidden = rnn3.initHidden()
testloss3 = 0
for i in range(input_line_tensor.size()[0]):
output, hidden = rnn3(category_tensor, hidden)
testloss3 += criterion(output, target_line_tensor[i])
return output, testloss3.data[0] / input_line_tensor.size()[0]
def test4(category_tensor, input_line_tensor, target_line_tensor):
hidden = rnn4.initHidden(category_tensor)
testloss4 = 0
for i in range(input_line_tensor.size()[0]):
output, hidden = rnn4(hidden)
testloss4 += criterion(output, target_line_tensor[i])
return output, testloss4.data[0] / input_line_tensor.size()[0]
def timeSince(since):
now = time.time()
s = now - since
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
rnn = RNN(n_letters, 128, n_letters)
rnn2 = RNN2(n_letters, 128, n_letters)
rnn3 = RNN3(n_letters, 128, n_letters)
rnn4 = RNN4(n_letters, 128, n_letters)
n_iters = 80000
n_iters1 = 20000
#print_every = 5000
plot_every = 5000
all_losses = []
total_loss = 0 # Reset every plot_every iters
all_losses_test1 = []
total_loss_test1 = 0
all_losses_test2 = []
total_loss_test2 = 0
all_losses_test3 = []
total_loss_test3 = 0
all_losses_test4 = []
total_loss_test4 = 0
start = time.time()
for iter in range(1, n_iters + 1):
category_tensor, input_line_tensor, target_line_tensor = randomTrainingExample()
output1, loss1 = train(category_tensor, input_line_tensor, target_line_tensor)
output2, loss2 = train2(category_tensor, input_line_tensor, target_line_tensor)
output3, loss3 = train3(category_tensor, input_line_tensor, target_line_tensor)
output4, loss4 = train4(category_tensor, input_line_tensor, target_line_tensor)
if iter % plot_every == 0:
for iter in range(1, n_iters1 + 1):
category_tensort, input_line_tensort, target_line_tensort = randomTestExample()
output_t1, testloss1 = test(category_tensort, input_line_tensort, target_line_tensort)
total_loss_test1 += testloss1
output_t2, testloss2 = test2(category_tensort, input_line_tensort, target_line_tensort)
total_loss_test2 += testloss2
output_t3, testloss3 = test3(category_tensort, input_line_tensort, target_line_tensort)
total_loss_test3 += testloss3
outputt4, testloss4 = test4(category_tensort, input_line_tensort, target_line_tensort)
total_loss_test4 += testloss4
all_losses_test1.append(total_loss_test1 / n_iters1)
total_loss_test1 = 0
all_losses_test2.append(total_loss_test2 / n_iters1)
total_loss_test2 = 0
all_losses_test3.append(total_loss_test3 / n_iters1)
total_loss_test3 = 0
all_losses_test4.append(total_loss_test4 / n_iters1)
total_loss_test4 = 0
plt.figure()
plt.plot(all_losses_test1, 'm')
plt.plot(all_losses_test2, 'g')
plt.plot(all_losses_test3, 'b')
plt.plot(all_losses_test4, 'r')
plt.xlabel("Num of Iterations")
plt.ylabel("Loss")
plt.title("Test (Validation) Loss")
plt.legend(['Case 1', 'Case 2', 'Case 3', 'Case 4'], loc='upper right')
| [
"matplotlib"
] |
a1785bd44a66dc6a54788479b76fce73b44b5a1c | Python | RaulMedeiros/Object_Detection_FrameWork | /core_process.py | UTF-8 | 4,576 | 2.5625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
import matplotlib
matplotlib.use('Agg')
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
import cv2
import argparse
# ## Object detection imports
# Here are the imports from the object detection module.
from utils import label_map_util
from utils import visualization_utils as vis_util
def download_weights(MODEL_NAME,
DOWNLOAD_BASE = 'http://download.tensorflow.org/models/object_detection/',
directory = "models_Zoo"):
MODEL_FILE = MODEL_NAME + '.tar.gz'
if not os.path.exists(directory):
os.makedirs(directory)
## Download Model
file_name = DOWNLOAD_BASE + MODEL_FILE
file_dst = directory+'/'+MODEL_FILE
os.system("wget -N "+file_name+" -P "+directory)
tar_file = tarfile.open('./'+directory+'/'+MODEL_FILE)
for file in tar_file.getmembers():
file_name = os.path.basename(file.name)
if 'frozen_inference_graph.pb' in file_name:
tar_file.extract(file, os.getcwd()+'/'+directory)
return True
def load_model(MODEL_NAME,directory = "models_Zoo"):
''' Load a (frozen) Tensorflow model into memory.'''
PATH_TO_CKPT = directory+'/'+MODEL_NAME + '/frozen_inference_graph.pb'
model = tf.Graph()
with model.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
return model
def load_label_map(PATH_TO_LABELS,NUM_CLASSES = 90):
# ## Loading label map
# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
category_index = label_map_util.create_category_index(categories)
return category_index
def rotate(src_img,rot_angle):
# Size, in inches, of the output images.
rows,cols,_ = src_img.shape
# Build rotation matrix
M = cv2.getRotationMatrix2D((cols/2,rows/2),rot_angle,1)
# Perform Rotation
return cv2.warpAffine(src_img,M,(cols,rows))
def process_frame(src_img,model,sess,category_index,display=True):
# Rotate if needed
rot_angle = 0
rot_img = rotate(src_img,rot_angle)
# Resize image
img_size = (800, 450) #(WIDTH,HEIGHT)
image_np = cv2.resize(rot_img, img_size)
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = model.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = model.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = model.get_tensor_by_name('detection_scores:0')
classes = model.get_tensor_by_name('detection_classes:0')
num_detections = model.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
if (display):
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
line_thickness=8)
return image_np
else:
n_detect = int(num_detections[0])
boxes = boxes.tolist()[0][:n_detect]
scores = scores.tolist()[0][:n_detect]
class_detect = classes.tolist()[0][:n_detect]
classes = [category_index[int(x)] for x in class_detect]
return {"boxes" : boxes,
"scores" : scores,
"classes" : classes}
| [
"matplotlib"
] |
ee4bfb4e7f230b489e33e49586a432c38c9b2960 | Python | oakoneric/mathTUD | /Math-Ba-OPTINUM/OPTINUM_2_Vorlesung/programme/euler-pendel.py | UTF-8 | 2,554 | 3.484375 | 3 | [
"MIT"
] | permissive | #! /usr/bin/env python3
#
# Integrates the equations of the mathematical pendulum
# using two different Euler methods.
#
# usage: ./euler-pendel.py <h=0.05>#
# $ apt install python3-matplotlib python3-tk
import math
import matplotlib.pyplot as plt
import sys
q0 = 0.1 # Initial angle
p0 = 0. # Initial momentum
m = 1.0 # Mass
l = 0.1 # Length of the pendulum
g = 9.81 # Gravitational acceleration
h = 0.05 # Time step size
T = 1000. # Final time
# Read command-line arguments
if len(sys.argv) > 1:
h = float(sys.argv[1])
# Right-hand side
def minus_dHdq(p,q):
return + m * g * l * math.sin(q)
def dHdp(p,q):
return p / (m*l**2)
# Exact energy for comparison
def energy(p,q):
return p**2 / (2*m*l**2) + m*g*l*math.cos(q)
# =========
# Create a time grid (for visualization only)
def getX(τ, T):
# returns vector (x₀, x₁, ..., xₙ = T)
x = []
xᵢ = 0. # Initial time
while xᵢ < T:
x.append(xᵢ)
xᵢ += τ
x.append(T)
return x
# Explicit Euler:
# ===============
def explicit(p0, q0, h):
p = [p0]
q = [q0]
for i in range(0, len(x)-1):
pᵢ = p[i] + h * minus_dHdq(p[i],q[i])
qᵢ = q[i] + h * dHdp(p[i],q[i])
p.append(pᵢ)
q.append(qᵢ)
return [p,q]
# Symplectic Euler:
# ===============
def symplectic(p0, q0, h):
p = [p0]
q = [q0]
for i in range(0, len(x)-1):
pᵢ = p[i] + h * minus_dHdq(p[i],q[i]) # First argument should be p[i+1], but is not used anyway
qᵢ = q[i] + h * dHdp(pᵢ,q[i])
p.append(pᵢ)
q.append(qᵢ)
return [p,q]
# ==================================
x = getX(h, T)
[p_exp, q_exp] = explicit(p0, q0, h)
[p_sym, q_sym] = symplectic(p0, q0, h)
# The energy of the exact solution: it is a constant
yᵣ = [ energy(p0,q0) for xᵢ in x ]
# The energy of the explicit Euler method
energy_exp = []
for i in range(0, len(p_exp)):
energy_exp.append(energy(p_exp[i], q_exp[i]))
# The energy of the symplectic Euler method
energy_sym = []
for i in range(0, len(p_sym)):
energy_sym.append(energy(p_sym[i], q_sym[i]))
# Plot pendulum positions
plt.figure(1)
#plt.plot(x, q_exp, linewidth=2, label="explicit")
plt.plot(x, q_sym, linewidth=2, label="symplectic")
plt.legend()
#plt.show()
# Plot total energy
plt.figure(2)
plt.ylim(0,2)
#plt.plot(x, energy_exp, linewidth=2, label="explicit")
plt.plot(x, energy_sym, linewidth=1, label="symplectic")
plt.plot(x, yᵣ, linewidth=1, label="exact")
plt.legend()
plt.show()
| [
"matplotlib"
] |
d1fd9e57101af90680d8060e96cd258ca333903e | Python | unlucio/lombacovid | /backend/grafichini.py | UTF-8 | 2,311 | 2.78125 | 3 | [] | no_license | from matplotlib import pyplot as plt
import matplotlib.ticker as mticker
from matplotlib.dates import drange, DateFormatter
from datetime import date, timedelta
def curve(path, filename, color, ylabel):
f = open(path)
y = []
for line in f:
y.append( float(line) )
f.close()
#preparo l'array delle date
formatter = DateFormatter('%d/%m')
a = date(2020, 9, 1)
b = date.today() + timedelta(days=1)
delta = timedelta(days=1)
dates = drange(a, b, delta)
plt.plot_date(dates, y, color=color, linestyle='solid', marker=None)
plt.xlabel("data", fontsize = 14)
plt.gca().xaxis.set_major_formatter(formatter)
plt.ylabel(ylabel, fontsize=14)
plt.grid(linewidth=0.5)
plt.savefig("pantarei/" + filename + "_graph.png", dpi=300)
plt.clf()
def histo(path, filename, color, ylabel):
f = open(path)
y = []
for line in f:
y.append( float(line) )
f.close()
formatter = DateFormatter('%d/%m')
a = date(2020, 9, 1)
b = date.today() + timedelta(days=1)
delta = timedelta(days=1)
dates = drange(a, b, delta)
plt.bar(dates, y, color=color)
plt.gca().xaxis_date()
plt.xlabel("data", fontsize = 14)
plt.gca().xaxis.set_major_formatter(formatter)
plt.ylabel(ylabel, fontsize=14)
plt.grid(linewidth=0.5, axis='y')
plt.savefig("pantarei/" + filename + "_graph.png", dpi=300)
plt.clf()
def vax(filename, color):
path1 = 'pantarei/primadose_story.txt'
path2 = 'pantarei/secondadose_story.txt'
f1 = open(path1)
f2 = open(path2)
y1 = []
y2 = []
for line in f1:
y1.append( float(line) )
for line in f2:
y2.append( float(line) )
f1.close()
f2.close()
formatter = DateFormatter('%d/%m')
a = date(2021, 1, 2)
b = date.today() + timedelta(days=1)
delta = timedelta(days=1)
dates = drange(a, b, delta)
plt.plot_date(dates, y1, color=color, linestyle='solid', marker=None, label = "prime dosi")
plt.plot_date(dates, y2, color=color, linestyle='dashed', marker=None, label = "seconde dosi")
plt.xlabel("data", fontsize = 14)
plt.gca().xaxis.set_major_formatter(formatter)
plt.legend()
plt.grid(linewidth=0.5)
f = mticker.ScalarFormatter(useOffset=False, useMathText=True)
g = lambda x,pos : "${}$".format(f._formatSciNotation('%1.10e' % x))
plt.gca().yaxis.set_major_formatter(mticker.FuncFormatter(g))
plt.savefig("pantarei/" + filename + "_graph.png", dpi=300)
plt.clf() | [
"matplotlib"
] |
ad24895434e3076047f416823f9a9eae5fdecd56 | Python | Lingareddyvinitha/lab2 | /sinusoidal_signal.py | UTF-8 | 235 | 3.046875 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
fs1=8000
f1=5
sample=8000
x1=np.arange(sample)
y1=np.sin(2*np.pi*f1*x1/fs1)
plt.title('sinusoidal signal')
plt.plot(x1,y1)
plt.xlabel('sample(n)')
plt.ylabel('voltage(V)')
plt.show()
| [
"matplotlib"
] |
4c841e32cf68522c5575ef21d2b5cc909590837c | Python | EdanMizrahi/Titanic | /titanic.py | UTF-8 | 5,905 | 3.21875 | 3 | [] | no_license | import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import re
from encode import *
from pandas import Series, DataFrame
filename = 'train.csv'
df = pd.read_csv(filename)
df.columns = ['PassengerId', 'Survived', 'Pclass', 'Name', 'Sex', 'Age', 'SibSp', 'Parch', 'Ticket', 'Fare', 'Cabin', 'Embarked']
passengers = df
print(passengers.info)
for column in ['Survived', 'Pclass', 'Sex', 'SibSp', 'Parch', 'Embarked']:
print(f'Column "{column}" contains the following unique inputs: {passengers[column].unique()}')
print('\nTable of missing value counts in each column:')
print(passengers.isnull().sum())
# Sorting out missing data
passengers['Embarked'] = passengers['Embarked'].fillna(0)
passengers = passengers.drop(columns=['Cabin'])
passengers['Age'] = passengers['Age'].fillna(passengers['Age'].mean())
# Encoding
encode_pclass(passengers)
encode_embarked(passengers)
encode_sex(passengers)
encode_ticket(passengers)
passengers['Ticket'] = passengers['Ticket'].fillna(passengers['Ticket'].mean())
passengers = passengers.drop(columns=['Pclass', 'Embarked'])
dead, survived = passengers['Survived'].value_counts()
print(f"Number of Survived : {survived}")
print(f"Number of Dead : {dead}")
print(f"Percentage Dead : {round(dead/(dead+survived)*100,1)}%")
sns.countplot(x='Survived', data=passengers, palette = 'hls')
plt.title('Count Plot for Survived')
print(passengers.groupby('Survived').mean())
plt.show()
#Normalisation of remaining features
passengers_normalised = passengers.copy()
age_mean, age_std = normalise_col(passengers_normalised , 'Age')
sibsp_mean, sibsp_std = normalise_col(passengers_normalised , 'SibSp')
parch_mean, parch_std = normalise_col(passengers_normalised , 'Parch')
ticked_mean, ticket_std = normalise_col(passengers_normalised , 'Ticket')
fare_mean, fare_std = normalise_col(passengers_normalised , 'Fare')
print(passengers_normalised.groupby('Survived').mean())
def mean_visual(df):
#Produce a bar chart showing the mean values of the explanatory variables grouped by match result.
fig = plt.figure(figsize=(16,12))
df_copy = df.copy()
df_copy = df_copy.drop(columns=['PassengerId'])
df_copy_mean = df_copy.groupby('Survived').mean().reset_index()
tidy = df_copy_mean.melt(id_vars='Survived').rename(columns=str.title)
g =sns.barplot(x='Variable',y='Value' ,data=tidy, hue='Survived',palette = 'hls')
fig.autofmt_xdate()
plt.title("Visualisation of Mean Value by Survival Status")
plt.xlabel("Explanatory Variable", fontsize=18)
plt.ylabel("Normalised Value", fontsize=18)
plt.tick_params(axis='both', which= 'major', labelsize=14)
plt.show()
mean_visual(passengers_normalised)
def correlation_visualisation(df):
"""
Creates a correlation plot between each of the explanatory variables.
"""
plt.figure(figsize=(15,15))
sns.heatmap(df.corr(), linewidth=0.25, annot=True, square=True, cmap = "BuGn_r", linecolor = 'w')
correlation_visualisation(passengers)
plt.title("Heatmap of Correlation for Variables")
plt.show()
passengers_final = passengers.drop(columns=['Name', 'PassengerId','class1', 'cherbourg', 'Ticket', 'queenstown', 'Fare', 'Parch']).copy()
passengers_final['bias'] = np.ones(passengers_final.shape[0])
import statsmodels.api as sm
from sklearn.model_selection import train_test_split
X=passengers_final.loc[:, passengers_final.columns != 'Survived']
y=passengers_final.loc[:, passengers_final.columns =='Survived']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
logit_model=sm.Logit(y_train.astype(int),X_train.astype(float))
result=logit_model.fit()
print(result.summary2())
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
logreg = LogisticRegression(fit_intercept=False)
logreg.fit(X_train.astype(float), np.ravel(y_train))
y_pred = logreg.predict(X_test)
print('Accuracy of logistic regression classifier on test set: {:.2f}'.format(logreg.score(X_test, y_test)))
from sklearn.metrics import confusion_matrix
confusion_matrix = confusion_matrix(y_test, y_pred)
print('\n', confusion_matrix)
print('\n',logreg.coef_)
from sklearn.metrics import classification_report
print(classification_report(y_test, y_pred))
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
logit_roc_auc = roc_auc_score(y_test, logreg.predict(X_test))
fpr, tpr, thresholds = roc_curve(y_test.values, logreg.predict_proba(X_test)[:,1])
plt.figure()
plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc)
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.show()
filename = 'test.csv'
df = pd.read_csv(filename)
df.columns = ['PassengerId', 'Pclass', 'Name', 'Sex', 'Age', 'SibSp', 'Parch', 'Ticket', 'Fare', 'Cabin', 'Embarked']
df['Embarked'] = df['Embarked'].fillna(0)
df = df.drop(columns=['Cabin'])
df['Age'] = df['Age'].fillna(passengers['Age'].mean())
encode_pclass(df)
encode_embarked(df)
encode_sex(df)
encode_ticket(df)
df['Ticket'] = df['Ticket'].fillna(df['Ticket'].mean())
df = df.drop(columns=['Pclass', 'Embarked'])
df_final = df.drop(columns=['Name', 'PassengerId','class1', 'cherbourg', 'Ticket', 'queenstown', 'Fare', 'Parch']).copy()
df_final['bias'] = np.ones(df_final.shape[0])
X=df_final.copy()
print('\nTable of missing value counts in each column:')
print(df_final.isnull().sum())
y_pred = logreg.predict(X)
df = pd.read_csv(filename)
df_predictions = pd.DataFrame(list(zip(df['PassengerId'],y_pred)),columns=['PassengerId','Survived'])
df_predictions.to_csv(r'predictions.csv', index = False) | [
"matplotlib",
"seaborn"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.