blob_id
stringlengths 40
40
| language
stringclasses 1
value | repo_name
stringlengths 5
135
| path
stringlengths 2
372
| src_encoding
stringclasses 26
values | length_bytes
int64 55
3.18M
| score
float64 2.52
5.19
| int_score
int64 3
5
| detected_licenses
sequencelengths 0
38
| license_type
stringclasses 2
values | code
stringlengths 55
3.18M
| used_libs
sequencelengths 1
5
|
---|---|---|---|---|---|---|---|---|---|---|---|
f4a80f033b10e1cf7b90c00de04157d8fd334537 | Python | cheerDZ/cluster_of_news_titles | /yuqingtaishiyanhua.py | UTF-8 | 3,504 | 2.890625 | 3 | [] | no_license | import pandas as pd
import numpy as np
import time
from IPython.display import display
#数据处理
# df=pd.read_csv('/Users/apple/Desktop/短文本聚类/民航新闻.csv')
# display(df.head())
# display(df.info())
# df=df[['issueTime','newsTitle']]
# display(df.head())
# df=df.drop_duplicates('newsTitle',keep='first',inplace=False)
# news=pd.Series(data=df['newsTitle'].values)
# i=0
# file=open('minhang_news.txt','a')
# while i<914:
# new=news[i]
# i=i+1
# file.write(new+'\n')
# file.close()
startTime=time.time()
#创建停用词
stopwords_filepath='/Users/apple/Desktop/短文本聚类/stopwordslist.txt'
def stopwordslist(stopwords_filepath):
stopwords=[line.strip() for line in open(stopwords_filepath,'r',encoding='utf-8').readlines()]
return stopwords
#对句子进行分词
userdict_filepath='/Users/apple/Desktop/短文本聚类/userdict.txt'
def segment(text,userdict_filepath,stopwords_filepath):
import jieba
jieba.load_userdict(userdict_filepath)
stopwords=stopwordslist(stopwords_filepath)
seg_list=jieba.cut(text,cut_all=False)
seg_list_without_stopwords=[]
for word in seg_list:
if word not in stopwords:
if word !='\t':
seg_list_without_stopwords.append(word)
return seg_list_without_stopwords
#使用分词器将files进行分词
f=open('minhang_news.txt','r',encoding='utf-8')
files=f.readlines()
files=pd.Series(files)
#先按想要的字段聚焦于更小的一类
bool=files.str.contains('北京')
files=files[bool]
print(files)
files=files.tolist()
totalvocab_tokenized=[]
for i in files:
allwords_tokenized=segment(i,userdict_filepath,stopwords_filepath)
totalvocab_tokenized.extend(allwords_tokenized)
#显示分词结果
display(totalvocab_tokenized)
#显示分词后的词语数量
display(len(totalvocab_tokenized))
#显示分词花费的时间
print('分词花费的时间为:%.2f秒' % (time.time()- startTime))
#获得TF_IDF矩阵
from sklearn.feature_extraction.text import TfidfVectorizer
stopwords_list=[k.strip() for k in open(stopwords_filepath,encoding='utf-8').readlines() if k.strip() !='']
tfidf_vectorizer=TfidfVectorizer(totalvocab_tokenized,
stop_words=stopwords_list,
min_df=0,
max_df=0.9,
max_features=200000,
)
tfidf_matrix = tfidf_vectorizer.fit_transform(files)
print(tfidf_matrix.shape)
#计算文档相似性
from sklearn.metrics.pairwise import cosine_similarity
#两个向量越相似,夹角越小,余弦值越大,dist越小
dist=1-cosine_similarity(tfidf_matrix)
#获得分类
from scipy.cluster.hierarchy import ward,dendrogram,linkage
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
plt.rcParams['font.sans-serif']=['SimHei']
def getChineseFont():
return FontProperties(fname='/System/Library/Fonts/PingFang.ttc')
#采用ward(最短最长平均离差平方和)
linkage_matrix=linkage(dist,method='single',metric='euclidean',optimal_ordering=False)
print(linkage_matrix)
#可视化
plt.figure()
plt.title('新闻标题聚类树状图',fontproperties=getChineseFont())
plt.xlabel('新闻标题',fontproperties=getChineseFont())
plt.ylabel('距离(越低表示文本越类似)',fontproperties=getChineseFont())
dendrogram(
linkage_matrix,
labels=files,
leaf_rotation=70,
leaf_font_size=12
)
plt.show()
plt.close() | [
"matplotlib"
] |
4abc2a529c86dfe540d6fba6a8bbdbe990731a7c | Python | litaotju/ml_learn | /cifar/cifar10.py | UTF-8 | 8,578 | 2.515625 | 3 | [] | no_license | import getopt
import sys
import os
import re
import random
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
import cPickle
label_names = ['airplane','automobile','bird','cat','deer',
'dog','frog','horse','ship','truck']
def unpickle(file):
with open(file, 'rb') as fo:
dict = cPickle.load(fo)
return dict
def get_data_label_old(dirs, training):
NUM_IMS = 50000 if training else 10000
FILE_NAME_PREFIX = r"data_batch_\d+" if training else r"test_batch"
datas = np.zeros(shape=[NUM_IMS, 3072], dtype=np.float32)
labels = []
begin = 0
for fi in os.listdir(dirs):
if re.match(FILE_NAME_PREFIX, fi):
d = unpickle(os.path.join(dirs, fi))
end = begin + len(d['data'])
datas[begin:end,:] = d['data']
labels += d['labels']
begin = end
print "Loaded data size: %d" % len(datas)
print "Loaded labels size: %d" % len(labels)
return datas, labels
def get_data_label(dirs, training):
filename = "data_batch_1"
d = unpickle(os.path.join(dirs,filename))
datas = d['data']
labels = np.array(d['labels'])
datas = datas.reshape(10000, 3, 32, 32).transpose(0, 2, 3, 1).astype("uint8")
return datas, labels
def get_next_batch(datas, labels, size):
selected = set()
for _ in range(0, size):
index = random.randint(0, len(datas)-1)
while index in selected:
index += 1
index %= len(datas)
selected.add(index)
s_datas = []
s_labels = []
for index in selected:
# datas[index] is a slice action
s_datas.append(datas[index])
right_anwser = labels[index]
label = []
for _ in range(0, 10):
if _ == right_anwser:
label.append(1)
else:
label.append(0)
label = np.array(label)
s_labels.append(label)
s_datas = np.concatenate([s_datas])
#Here we dont need to convert the array dtype to float ot int32,
# tensorflow should care about the type
# also, if we convert the data type to float32, plot will show in proper color
s_datas = s_datas.reshape([-1, 32, 32, 3])
s_labels = np.concatenate([s_labels])
#print s_datas.shape
#print s_labels.shape
return s_datas, s_labels
#Constants
MODEL_SAVE_PATH = "./model.chkpt"
KEEP_PROB = 0.94
BATCH_SIZE = 10
ITER_NUM = 2000
def weight_var(shape):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial)
def bias_var(shape):
initial = tf.constant(0.1, shape=shape)
return tf.Variable(initial)
def conv2d(x, W):
return tf.nn.conv2d(x, W, strides=[1,1,1,1], padding='SAME')#% , data_format="NCHW")
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1,2,2,1],
strides=[1,2,2,1], padding='SAME')
## The input neural
x_image = tf.placeholder(tf.float32, [None, 32, 32, 3])
#the filter of conv
W_conv1 = weight_var([5,5,3,96])
#the bias of first conv
b_conv1 = bias_var([96])
h_conv1 = tf.nn.relu(tf.nn.bias_add(conv2d(x_image, W_conv1), b_conv1))
h_pool1 = max_pool_2x2(h_conv1)
## The second conv and max_pool
W_conv2 = weight_var([5, 5, 96, 96])
b_conv2 = bias_var([96])
h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
## the dense connected layer
W_fc1 = weight_var([8*8*96, 1024])
b_fc1 = bias_var([1024])
h_pool2_flat = tf.reshape(h_pool2, [-1, 8*8*96])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)
##Dropout
keep_prob = tf.placeholder(tf.float32)
h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)
##Readout layer
W_fc2 = weight_var([1024, 10])
b_fc2 = bias_var([10])
y_conv = tf.matmul(h_fc1_drop, W_fc2) + b_fc2
# the expected output, this comes from the label
y_ = tf.placeholder(tf.int32, [None, 10])
# the Error function of each output
cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
labels = y_, logits = y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
def train_and_save(batch_size, iter_num):
datas, labels = get_data_label("cifar-10-batches-py", training=True)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run(session=sess)
tf.summary.merge_all()
writer = tf.summary.FileWriter("./", sess.graph)
saver = tf.train.Saver()
tf.add_to_collection('train_op', train_step)
# actual training procedure
for i in range(iter_num):
batch_xs, batch_ys = get_next_batch(datas, labels, batch_size)
batch_xs = batch_xs.astype(np.float32)
if i % (iter_num/10) == 0:
#print_graph_info()
print("accrucy ")
print(sess.run(accuracy, feed_dict={x_image: np.array(batch_xs), y_: np.array(batch_ys), keep_prob:KEEP_PROB}))
summary = sess.run(train_step, feed_dict={x_image: batch_xs, y_: batch_ys, keep_prob:KEEP_PROB})
writer.add_summary(summary, i)
saved_path = saver.save(sess, MODEL_SAVE_PATH)
print ("Model saved in file:%s" % saved_path)
def load_and_apply():
datas, labels = get_data_label("cifar-10-batches-py", training = False)
sess = tf.InteractiveSession()
saver = tf.train.Saver()
saver.restore(sess, MODEL_SAVE_PATH)
test_num = len(datas)
batch_size = 25
NUM_HI = 5
NUM_WID = 5
fig, axes = plt.subplots(NUM_HI, NUM_WID, figsize=(5, 5))
# Visualize the pixels using matplotlib
def key_event(e):
print e.key
if e.key == 'right' or e.key == 'left':
random_test_batch()
def random_test_batch():
batch_xs, batch_ys = get_next_batch(datas, labels, batch_size)
classes = sess.run(tf.argmax(y_conv,1), feed_dict={x_image: batch_xs,
y_: batch_ys,
keep_prob:KEEP_PROB})
print ",".join(label_names[_] for _ in classes)
for i in range(NUM_HI):
for j in range(NUM_WID):
axes[i][j].set_axis_off()
axes[i][j].set_title(label_names[classes[i*NUM_HI+j]], fontsize=9)
axes[i][j].imshow(batch_xs[i*NUM_HI+j],interpolation='hanning')
plt.show()
fig.canvas.mpl_connect('key_press_event', key_event)
random_test_batch()
def plot_images(hirozontal, vertical):
datas, labels = get_data_label("cifar-10-batches-py", training = False)
fig, axes1 = plt.subplots(hirozontal, vertical,figsize=(4,4))
for j in range(hirozontal):
for k in range(vertical):
i = np.random.choice(range(len(datas)))
axes1[j][k].set_axis_off()
axes1[j][k].set_title(label_names[labels[i:i+1][0]],fontsize=9)
axes1[j][k].imshow(datas[i:i+1][0], interpolation='hanning')
plt.show()
def usage():
sys.stderr.write("Usage: python %s [-t | --train] [-b | --batch] [--apply | -a] [-h] \n\n" % __file__)
def print_graph_info():
print x_image.get_shape()
print y_conv.__class__.__name__
print y_.__class__.__name__
print "y_conv %s " % str( y_conv.get_shape())
print "y_ %s " % str( y_.get_shape())
print "h_pool1 %s" % str(h_pool1.get_shape())
print "h_pool2 %s" % str(h_pool2.get_shape())
print "h_fc1 %s" % str(h_fc1.get_shape())
if __name__ == "__main__":
#get options
opts, args = getopt.getopt(sys.argv[1:], 'phtb:ai:', ['train','batch=', 'apply', 'iter='])
is_training = False
batch_size = BATCH_SIZE
iter_num = ITER_NUM
is_apply = False
isplot = False
with open("cifar-10-batches-py/batches.meta",'rb') as fo:
d = cPickle.load(fo)
label_names = d['label_names']
for opt, value in opts:
if opt =='-h':
usage()
sys.exit(0)
if opt =='--train' or opt=='-t':
is_training = True
if opt == '--batch' or opt=='-b':
batch_size = int(value)
if opt == '--iter' or opt=="-i":
iter_num = int(value)
if opt =='--apply' or opt=='-a':
is_apply = True
if opt == "-p":
isplot = True
# check options
if is_training and is_apply:
usage()
# action based on option
if is_training:
train_and_save(batch_size, iter_num)
elif is_apply:
load_and_apply()
elif isplot:
plot_images(3,3)
else:
print_graph_info()
| [
"matplotlib"
] |
2dceac698d3cd9037b5ce5cde3220b673c1e3cde | Python | Biller17/Machine-Learning | /Examen1/proyecto1_AdrianBillerA01018940.py | UTF-8 | 2,096 | 3.71875 | 4 | [] | no_license | import os
import numpy as np
import matplotlib.pyplot as plt
#funcion para graficad los datos usando matplotlib
def grafica_datos(x, y, theta):
prediction = y
#plottear datos ingresados
plt.plot(x,y, 'ro')
#plottea theta
for i in range(0, len(x)):
prediction[i] = theta[0] + theta[1]* x[i]
plt.plot(x,prediction, color='black')
plt.show()
pass
#funcion del algoritmo de gradiente descendiente
def gradiente_descendente(x, y, theta, alpha, iteraciones):
for i in range(0, iteraciones):
temp_zero = theta[0] - (alpha * calcula_costo(x,y,theta, 0))
temp_one = theta[1] - (alpha * calcula_costo(x,y,theta, 1))
theta[0] = temp_zero
theta[1] = temp_one
return theta
#funcion de calculo de costo de error, se agrega index para saber si es para theta cero o theta uno
def calcula_costo(x, y, theta, index):
sum_theta_zero = 0
sum_theta_one = 0
entries = len(x)
#calcular hipotesis para cada valor de x y y
for k in range(0, entries):
sum_theta_zero += calcula_hipotesis(x[k], theta) - y[k]
sum_theta_one += (calcula_hipotesis(x[k], theta) - y[k]) * x[k]
#haciendo division final de la sumatoria
sum_theta_zero /= entries
sum_theta_one /= entries
#dependiendo del index se regresa el valor de theta ya sea para j=0 o j=1
if index == 0:
return sum_theta_zero
else:
return sum_theta_one
#funcion que calcula la hipotesis de theta para poder calcular el costo
def calcula_hipotesis(x, theta):
#calcular hipotesis para un valor determinado de x
hip = theta[0] + theta[1]*x
return hip
#obteniendo datos del archivo
data = np.genfromtxt("ex1data1.txt", delimiter = ",")
#dividiendo datos en dos vectores x , y
splits = np.hsplit(data, 2)
x = splits[0]
y = splits[1]
#razon de aprendizaje alpha
alpha = 0.01
#iteraciones para el gradiente descendiende
iteraciones = 1500
#valor de theta inicial
theta = [0,0]
theta = gradiente_descendente(x,y,theta,alpha,iteraciones)
print ("Vector theta final: ", theta)
grafica_datos(x,y,theta)
| [
"matplotlib"
] |
52bc57bfc9da1b8037a3051a067cbb87ad3d9d73 | Python | awsome-perceptron/thesis | /Organized/data_mining.py | UTF-8 | 860 | 2.53125 | 3 | [] | no_license | from session import CompleteSession
import matplotlib.pyplot as plt
import global_variables as gv
import os
from empatica import empaticaParser
from timings import timingsParser
from panas import panasParser
import time
if __name__ == "__main__":
sessionList = []
counter = 0
sessionList2 = []
counter2 = 0
ini_time = time.perf_counter()
for patient in os.listdir(gv.BASE_FOLDER):
if patient == "D5":
continue
folder = gv.BASE_FOLDER + patient
for experiment in os.listdir(folder):
print("Run - {}-{}".format(patient, experiment))
sessionObject = CompleteSession(patient, int(experiment), False)
sessionList.append(sessionObject)
counter += 1
end_time = time.perf_counter()
print("Time for first run: {}".format(end_time - ini_time))
| [
"matplotlib"
] |
66c36de257300fac33c20a0f4d460093c1e9b834 | Python | Aminova/CS590U_A5 | /app/classify_cough.py | UTF-8 | 2,944 | 2.796875 | 3 | [] | no_license | import json
import numpy as np
import matplotlib.pyplot as plt
import tensorflow.keras as keras
from sklearn.model_selection import train_test_split
cough_data = "coughData.json"
def load_sound_files(dpath):
with open(dpath, "r") as fp:
data = json.load(fp)
mfcc = np.array(data["mfcc"])
labels = np.array(data["labels"])
return mfcc, labels
def plot_history(modelfit):
fig, axes = plt.subplots(2)
axes[0].plot(modelfit.history["accuracy"], label="training accuracy")
axes[0].plot(modelfit.history["val_accuracy"], label="testing accuracy")
axes[0].set_ylabel("Accuracy")
axes[1].plot(modelfit.history["loss"], label="training loss")
axes[1].plot(modelfit.history["val_loss"], label="testing loss")
axes[1].set_ylabel("Error")
axes[1].set_xlabel("Epoch")
plt.show()
def splitData(test_size, testing_size):
X, y = load_sound_files(cough_data)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=testing_size)
X_train = X_train[..., np.newaxis]
X_val = X_val[..., np.newaxis]
X_test = X_test[..., np.newaxis]
return X_train, X_val, X_test, y_train, y_val, y_test
def build_cnn_model(input_shape):
cough_model = keras.Sequential()
cough_model.add(keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=input_shape))
cough_model.add(keras.layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same'))
cough_model.add(keras.layers.BatchNormalization())
cough_model.add(keras.layers.Conv2D(32, (3, 3), activation='relu'))
cough_model.add(keras.layers.MaxPooling2D((3, 3), strides=(2, 2), padding='same'))
cough_model.add(keras.layers.BatchNormalization())
cough_model.add(keras.layers.Conv2D(32, (2, 2), activation='relu'))
cough_model.add(keras.layers.MaxPooling2D((2, 2), strides=(2, 2), padding='same'))
cough_model.add(keras.layers.BatchNormalization())
cough_model.add(keras.layers.Flatten())
cough_model.add(keras.layers.Dense(64, activation='relu'))
cough_model.add(keras.layers.Dropout(0.3))
cough_model.add(keras.layers.Dense(2, activation='softmax'))
return model
X_train, X_val, X_test, y_train, y_val, y_test = splitData(0.25, 0.2)
input_shape = (X_train.shape[1], X_train.shape[2], 1)
model = build_cnn_model(input_shape)
optimizer = keras.optimizers.Adam(learning_rate=0.0005)
model.compile(optimizer=optimizer,
loss='BinaryCrossentropy',
metrics=['accuracy'])
model.summary()
modelfit = model.fit(X_train, y_train, validation_data=(X_val, y_val), batch_size=30, epochs=25)
plot_history(modelfit)
testing_loss, testing_accuracy = model.evaluate(X_test, y_test, verbose=2)
print('\nTest accuracy:', testing_accuracy) | [
"matplotlib"
] |
1aa353dfa8a48e4f6d183380de1b443abb97cff6 | Python | paras1904/ML | /10 Agglomotive clustering.py | UTF-8 | 951 | 2.84375 | 3 | [] | no_license | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/00292/Wholesale%20customers%20data.csv')
from sklearn.preprocessing import normalize
data_scaled = normalize(data)
data_scaled = pd.DataFrame(data_scaled, columns=data.columns)
data_scaled.head()
import scipy.cluster.hierarchy as shc
plt.figure(figsize=(10, 7))
plt.title("Dendrograms")
dend = shc.dendrogram(shc.linkage(data_scaled, method='ward'))
plt.figure(figsize=(10, 7))
plt.title("Dendrograms")
dend = shc.dendrogram(shc.linkage(data_scaled, method='ward'))
plt.axhline(y=6, color='r', linestyle='--')
from sklearn.cluster import AgglomerativeClustering
cluster = AgglomerativeClustering(n_clusters=2, affinity='euclidean', linkage='ward')
cluster.fit_predict(data_scaled)
plt.figure(figsize=(10, 7))
plt.scatter(data_scaled['Milk'], data_scaled['Grocery'], c=cluster.labels_)
plt.show() | [
"matplotlib"
] |
65189db8e348091301de5627f613ba6148877a10 | Python | lisamnash/Motor_tracking | /movie_instance.py | UTF-8 | 12,162 | 2.734375 | 3 | [] | no_license | import cine
import cv2
import numpy as np
import scipy.ndimage as ndimage
import scipy.ndimage.filters as filters
import matplotlib.pyplot as plt
import math
import PIL.Image as Image
import PIL.ImageOps as ImageOps
import matplotlib.cm as cm
import scipy.ndimage.filters as filters
import scipy.ndimage as ndimage
import matplotlib.image as mpimg
import tracking_helper_functions as thf
class GyroMovieInstance:
def __init__(self, input_file, frame_rate=30.):
# first determind the file type
self.file_type = input_file.split('.')[-1]
if self.file_type == 'cine':
self.cine = True
self.data = cine.Cine(input_file)
self.num_frames = len(self.data)
else:
self.cine = False
file_names = thf.find_files_by_extension(input_file, '.png', tot=True)
data = []
for i in xrange(20):
file = file_names[i]
data_single = mpimg.imread(file)
data_single = self.rgb2gray(data_single)
data_single = data_single[:, 100:1400]
data.append(data_single)
print np.shape(data_single)
print file
self.data = data
self.num_frames = len(self.data)
self._mean_value = 0
self.min_radius = 17
self.max_radius = 22
self._min_value = 0.05
self._max_value = 0.7
self._pix = 6
self.current_frame = []
self.frame_current_points = []
self.circles = []
self.current_time = 0
self.frame_rate = frame_rate
self._adjust_min_max_val()
self._set_dummy_frame()
def _adjust_min_max_val(self):
max = np.max(self.data[0].astype('float').flatten())
self._min_value = self._min_value * max
self._max_value = self._max_value * max
def _set_dummy_frame(self):
t2 = np.ones((2 * self._pix, 2 * self._pix), dtype='f')
self.dummy = np.array(ndimage.measurements.center_of_mass(t2.astype(float)))
def set_min_max_val(self, min_value, max_value):
self._min_value = min_value
self._max_value = max_value
self._adjust_min_max_val()
def set_tracking_size(self, pix):
self._pix = pix
self._set_dummy_frame()
def extract_frame_data(self, frame_num):
if self.cine:
self.current_frame = self.data[frame_num].astype('float')
self.get_time(frame_num)
else:
self.current_frame = self.data[frame_num].astype('float')
self.get_time(frame_num)
def get_time(self, frame_num):
if self.cine:
self.current_time = self.data.get_time(frame_num)
else:
print('...frame rate set to %02d...' % self.frame_rate)
self.current_time = 1. / self.frame_rate * frame_num
def adjust_frame(self):
self.current_frame = np.clip(self.current_frame, self._min_value, self._max_value) - self._min_value
self.current_frame = self.current_frame / (self._max_value - self._min_value)
self._mean_value = np.mean(self.current_frame)
def find_points_hough(self):
img = np.array(self.current_frame * 255, dtype=np.uint8)
# apply blur so you don't find lots of fake circles
img = cv2.GaussianBlur(img, (3, 3), 2, 2)
circles = cv2.HoughCircles(img, cv2.cv.CV_HOUGH_GRADIENT, 1, 20,
param1=48, param2=18, minRadius=self.min_radius, maxRadius=self.max_radius)
circles = np.uint16(np.around(circles))
self.circles = circles[0]
self.frame_current_points = np.array([self.circles[:, 0], self.circles[:, 1]], dtype=float).T
def find_points_convolution(self, image_kernel_path='./new_image_kern.png'):
img = np.array(self.current_frame)
fig = plt.figure()
plt.imshow(img, cmap=cm.Greys_r)
plt.show()
img_ker = mpimg.imread(image_kernel_path)
print np.shape(img_ker)
img_ker[img_ker < 0.5] = -0.
fr = ndimage.convolve(img, img_ker, mode='reflect', cval=0.0)
minval = 0.0 * max(fr.flatten())
maxval = 1. * max(fr.flatten())
f = (np.clip(fr, minval, maxval) - minval) / (maxval - minval)
data_max = filters.maximum_filter(f, 80)
maxima = (f == data_max)
data_min = filters.minimum_filter(f, 80)
dmax = max((data_max - data_min).flatten())
dmin = min((data_max - data_min).flatten())
minmax = (dmax - dmin)
diff = ((data_max - data_min) >= dmin + 0.10 * minmax)
maxima[diff == 0] = 0
labeled, num_object = ndimage.label(maxima)
slices = ndimage.find_objects(labeled)
x, y = [], []
for dy, dx in slices:
rad = np.sqrt((dx.stop - dx.start) ** 2 + (dy.stop - dy.start) ** 2)
# print 'rad', rad
if rad < 15 and rad > 0.25:
# print ra
x_center = (dx.start + dx.stop) / 2
x.append(x_center)
y_center = (dy.start + dy.stop) / 2
y.append(y_center)
fig = plt.figure()
plt.imshow(fr, cmap=cm.Greys_r)
plt.plot(x, y, 'ro')
plt.show()
def center_on_bright_new(self, num_times):
new_points = []
for pt in self.frame_current_points:
h, w = np.shape(self.current_frame)
# if ((pt[0] > 1.5 * self._pix) and (pt[1] > 1.5 * self._pix) and (pt[0] < w - 1.5 * self._pix) and (
# pt[1] < h - 1.5 * self._pix)):
if True:
for j in xrange(num_times):
# Center num_times in case the dot has moved partially out of the box during the step.
# draw small boxes
bf = self.current_frame[pt[1] - self._pix:pt[1] + self._pix]
bf = bf[:, pt[0] - self._pix:pt[0] + self._pix]
bf_comp = bf.copy()
# let's clip this area to maximize the bright spot
bf = bf.astype('f')
bf_min = 0.8 * np.max(bf.flatten())
bf_max = 1. * np.max(bf.flatten())
bf = np.clip(bf, bf_min, bf_max) - bf_min
bf = bf / (bf_max - bf_min)
bf = cv2.GaussianBlur(bf, (2, 2), 1, 1)
# find center of brightness
data_max = filters.maximum_filter(bf, self._pix)
data_min = filters.minimum_filter(bf, self._pix)
maxima = (bf == data_max)
dmax = max((data_max - data_min).flatten())
dmin = min((data_max - data_min).flatten())
minmax = (dmax - dmin)
diff = ((data_max - data_min) >= dmin + 0.9 * minmax)
maxima[diff == 0] = 0
maxima = (bf == data_max)
labeled, num_object = ndimage.label(maxima)
slices = ndimage.find_objects(labeled)
x, y = [], []
for dx, dy in slices:
rad = np.sqrt((dx.stop - dx.start) ** 2 + (dy.stop - dy.start) ** 2)
if rad < 3:
x_center = (dx.start + dx.stop) / 2
x.append(x_center)
y_center = (dy.start + dy.stop) / 2
y.append(y_center)
com = [x[0], y[0]]
# find center of mass difference from center of box
movx = self.dummy[1] - com[1] # pix - com[0]
movy = self.dummy[0] - com[0] # pix - com[1]
if math.isnan(movx):
movx = 0
if math.isnan(movy):
movy = 0
# move the points
pt[0] = pt[0] - movx
pt[1] = pt[1] - movy
if j == num_times - 1:
fig = plt.figure()
plt.imshow(bf)
plt.plot(pt[0], pt[1], 'ro')
plt.show()
if np.mean(bf_comp) < 5 * self._mean_value:
new_points.append(pt)
new_points = np.array(new_points, dtype=float)
ind = np.argsort(new_points[:, 0])
new_points = new_points[ind]
ind = np.argsort(new_points[:, 1])
new_points = new_points[ind]
self.frame_current_points = np.array(new_points, dtype=float)
def center_on_bright(self, num_times=3):
new_points = []
for pt in self.frame_current_points:
w, h = np.shape(self.current_frame)
if True: # ((pt[0] > 1.5*self._pix) and (pt[1] > 1.5*self._pix) and (pt[0] < w - 1.5*self._pix) and (pt[1] < h - 1.5*self._pix)):
for j in xrange(num_times):
# Center num_times in case the dot has moved partially out of the box during the step.
# draw small boxes
bf = self.current_frame[pt[1] - self._pix:pt[1] + self._pix]
bf = bf[:, pt[0] - self._pix:pt[0] + self._pix]
bf_comp = bf.copy()
# let's clip this area to maximize the bright spot
bf = bf.astype('f')
bf_min = 0.0 * np.max(bf.flatten())
bf_max = 1. * np.max(bf.flatten())
bf = np.clip(bf, bf_min, bf_max) - bf_min
bf = bf / (bf_max - bf_min)
# find center of brightness
com = ndimage.measurements.center_of_mass(bf)
# if j == num_times -1:
# fig = plt.figure()
# plt.imshow(bf)
# plt.show()
# find center of mass difference from center of box
movx = self.dummy[1] - com[1] # pix - com[0]
movy = self.dummy[0] - com[0] # pix - com[1]
if math.isnan(movx):
movx = 0
if math.isnan(movy):
movy = 0
# move the points
pt[0] = pt[0] - movx
pt[1] = pt[1] - movy
if True: # np.mean(bf_comp)<5*self._mean_value:
new_points.append(pt)
new_points = np.array(new_points, dtype=float)
ind = np.argsort(new_points[:, 0])
new_points = new_points[ind]
ind = np.argsort(new_points[:, 1])
new_points = new_points[ind]
self.frame_current_points = np.array(new_points, dtype=float)
def save_frame(self, name='frame'):
fig = plt.figure()
ax = plt.axes([0, 0, 1, 1])
img = cine.asimage(self.current_frame)
plt.imshow(img, cmap=cm.Greys_r)
plt.savefig(name + '.png')
plt.close()
def save_frame_with_boxes(self, name='frame'):
fig = plt.figure()
ax = plt.axes([0, 0, 1, 1])
img = np.array(self.current_frame)
for pt in self.frame_current_points:
img[pt[1] - self._pix: pt[1] + self._pix, pt[0] - self._pix: pt[0] + self._pix] = np.array(
ImageOps.invert(Image.fromarray(np.uint8(
img[pt[1] - self._pix: pt[1] + self._pix, pt[0] - self._pix: pt[0] + self._pix]))))
#cine.asimage(img).save('image_kernel.png')
img = cine.asimage(img)
plt.imshow(img, cmap=cm.Greys_r)
plt.savefig(name + '.png')
plt.close()
def find_point_convolve(self, img_ker):
fr = ndimage.convolve(self.current_frame, img_ker, mode='reflect', cval=0.0)
minval = 0.1 * max(fr.flatten())
maxval = 1 * max(fr.flatten())
fr = (np.clip(fr, minval, maxval) - minval) / (maxval - minval)
fig = plt.figure()
plt.imshow(fr)
plt.show()
def rgb2gray(self, rgb):
return np.dot(rgb[..., :3], [0.299, 0.587, 0.114])
| [
"matplotlib"
] |
dc48c55401fa0f7e395cdc9f9211d4670d50cd77 | Python | saikatghosh1998/Tweeter_analysis | /tweets/views.py | UTF-8 | 2,004 | 2.671875 | 3 | [] | no_license | from django.shortcuts import render, redirect
from django.http import HttpResponse
from .forms import yearSearch
import sys
from . import script
import matplotlib.pyplot as plt
# Create your views here.
def search(request):
positive = []
negative = []
neutral = []
percent = []
if request.method == 'POST':
videotitle=request.POST['videoname']
x = script.main(videotitle)
#for getting positive tweets
for tweet in x:
if tweet['sentiment']=='positive':
p = tweet['text']
positive.append(p)
#for getting Negative tweets
for tweet in x:
if tweet['sentiment']=='negative':
p = tweet['text']
negative.append(p)
#for getting neutral tweets
for tweet in x:
if tweet['sentiment']=='neutral':
p = tweet['text']
neutral.append(p)
#calculating the percentages of positive, negative and neutral tweets
percent.append(100*len(positive)/len(x))
percent.append( 100*len(negative)/len(x))
percent.append(100*len(neutral)/len(x))
#Creating a pie chart with the help of matplotlib
labels = 'positive', 'Negative', 'Neutral'
sizes = [percent[0],percent[1], percent[2]]
fig1, ax1 = plt.subplots()
ax1.pie(sizes, labels=labels, autopct='%1.1f%%',
shadow=True, startangle=90)
ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
#saving the pie chart as a png image.
plt.savefig('/static/images/plot.png', bbox_inches='tight')
#passing the data to html page
context = {'positive': positive, 'negative':negative, 'neutral':neutral}
return render(request,'tweets/search.html',context)
def home(request):
if request.method == 'POST':
form = yearSearch(request.POST)
if form.is_valid():
return redirect('search')
else:
form = yearSearch()
context = {'form':form}
return render(request,'tweets/home.html', context) | [
"matplotlib"
] |
cc966f2b133cd79e84ebeb4f599f861a2321dac4 | Python | samuelefiorini/structure-aware-autoencoder | /saa/plot/snapshot.py | UTF-8 | 2,957 | 3.1875 | 3 | [
"MIT"
] | permissive | import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from wordcloud import WordCloud
from ..utils import flatten
def radar(df, idx):
# We are going to plot the first line of the data frame.
# But we need to repeat the first value to close the circular graph:
values = df.loc[idx].values.flatten().tolist()
values += values[:1]
values
# What will be the angle of each axis in the plot? (we divide the plot / number of variable)
angles = [n / float(df.shape[1]) * 2 * np.pi for n in range(df.shape[1])]
angles += angles[:1]
fig = plt.figure(figsize=(6, 6))
# Initialise the spider plot
ax = plt.subplot(111, polar=True)
# Draw one axe per variable + add labels labels yet
plt.xticks(angles[:-1], df.columns, color='k', size=15)
plt.yticks([0, .25, .5, .75, 1], ["0%", '25%', "50%", '75%', "100%"],
color="grey",
size=15)
plt.ylim([0, 1])
# Plot data
ax.plot(angles, values, linewidth=1, linestyle='solid')
# Fill area
ax.fill(angles, values, 'b', alpha=0.1)
def lollipop_h(df, idx):
values = df.loc[idx].values.flatten().tolist()
fig = plt.figure(dpi=100)
colors = [i for i in sns.color_palette('deep')]
plt.hlines(y=df.columns, xmin=0, xmax=values, colors=colors, linewidth=4)
for i, x, c in zip(range(len(values)), values, colors):
plt.plot(x, i, 'o', color=c, markersize=10)
plt.xlim([0, 1])
sns.despine(left=False, bottom=True)
return plt
def lollipop_v(df, idx):
values = df.loc[idx].values.flatten().tolist()
fig = plt.figure(dpi=100)
colors = [i for i in sns.color_palette('deep')]
plt.vlines(x=df.columns, ymin=0, ymax=values, colors=colors, linewidth=4)
for i, x, c in zip(range(len(values)), values, colors):
plt.plot(i, x, 'o', color=c, markersize=10)
plt.ylim([0, 1])
plt.xticks(rotation=45)
sns.despine(left=True, bottom=False)
return plt
def lollipop(df, idx, orientation='vertical'):
if orientation.lower() == 'vertical':
return lollipop_v(df, idx)
elif orientation.lower() == 'horizontal':
return lollipop_h(df, idx)
else:
print('Orientation {} not understood'.format(orientation))
def wordcloud(df, idx):
x, y = np.ogrid[:300, :300]
mask = (x - 150)**2 + (y - 150)**2 > 130**2
mask = 255 * mask.astype(int)
values = df.loc[idx].values.flatten().tolist()
text = []
for c, v in zip(df.columns, values):
text.append([c.replace(' ', '_')] * int(10 * v))
text = flatten(text)
text = ' '.join(text)
wc = WordCloud(
mask=mask,
background_color='white',
colormap='Paired',
max_font_size=100,
min_font_size=1,
contour_width=1,
contour_color='gray').generate(text)
plt.figure()
plt.imshow(wc, interpolation="bilinear")
plt.axis("off")
plt.margins(x=0, y=0)
return plt
| [
"matplotlib",
"seaborn"
] |
970145a779a73a060ccc790af2e554387ce99695 | Python | yogi13995/Maths_and_signal | /Geometry/line_alg/codes/line/point_on_line/line_on_point.py~ | UTF-8 | 884 | 2.78125 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
#from coeffs import *
import subprocess
import shlex
#generating the line points
def line_gen(A,B):
len = 20
dim = A.shape[0]
x_AB = np.zeros((dim,len))
lam_1 = np.linspace(0,1,len)
for i in range(len):
temp1 = A + lam_1[i]*(B-A)
x_AB[:,i]=temp1.T
return x_AB
A = np.array([1,-5])
B = np.array([-4,5])
C = np.array([-1.5,0])
#Generating the lines
x_AB = line_gen(A,B)
#plotting the all lines
plt.plot(x_AB[0,:],x_AB[1,:],label='$AB$')
plt.plot(A[0],A[1],'o')
plt.text(A[0]*(1+0.1), A[1]*(1-0.1), 'A')
plt.plot(B[0],B[1],'o')
plt.text(B[0]*(1-0.2), B[1]*(1), 'B')
plt.plot(C[0], C[1], 'o')
plt.text(C[0]*(1+0.03), C[1]*(1-0.1),'C')
plt.xlabel('$x$')
plt.ylabel('$y$')
plt.legend(loc='best')
plt.grid()
plt.axis('equal')
plt.savefig('../../../figures/line/point_on_line/point_on_line.eps')
plt.show()
| [
"matplotlib"
] |
12c70edec4c301b760d340c281637637b30721de | Python | EmperoR1127/ml_project | /bar_chart.py | UTF-8 | 392 | 2.8125 | 3 | [] | no_license | import matplotlib
import matplotlib.pyplot as plt
y = [0.81,0.65,0.68,0.45,0.79,0.80,0.80,0.76]
x = ["DT",
"LE",
"KNN",
"GNB",
"SR",
"RF",
"AD",
"VT"]
plt.bar(x, y)
plt.xlabel('Model')
plt.ylabel('Average accuracy')
plt.title(r'Average accuracy with different classifiers')
# Tweak spacing to prevent clipping of ylabel
# fig.tight_layout()
plt.savefig("average_accuracy.png")
plt.show() | [
"matplotlib"
] |
1570df1ea2b44e8837bc5fc4a656968aa010178c | Python | LejayChen/ASTR5400_term_project | /prject_multi.py | UTF-8 | 10,326 | 2.90625 | 3 | [] | no_license | # ASTR5400 term project
# Fall 2017
# author: Lingjian Chen
import numpy as np
import matplotlib.pyplot as plt
from scipy import optimize
from astropy.io import ascii
def f(zed):
''' w(z) in central region'''
return 1 - (1/6.)*zed**2 + (n/1200.)*zed**4
def df(zed):
'''dw/dz in central region'''
return - (1 / 3.) * zed + (n*4 / 1200.) * zed ** 3
############ SETUP ##########################
# set up for central region
n = 3. # polytropic index
step = 1e-4 # step size of z --> about 60000 z's
z_center = 1e-3 # size of central region
# z, w and dw/dz in central region
z = np.linspace(0, z_center, int(z_center/step)) # z_i's in central region
w = f(z)
dw_dz = df(z)
# starting point for numerical integral
# for two-step Adams-Bashforth I need two point to start with,
# and I picked last two points in the central approximation
z_now = (z[-2], z[-1])
w_now = (w[-2], w[-1])
dw_dz_now = (dw_dz[-2], dw_dz[-1])
# import yrec model
yrec = ascii.read('yrec.csv')
############# NUMERICAL INTEGRAL #################
while w_now[1] > 0 and z_now[1] < 20: # truncate at w==0
z_prev = z_now
w_prev = w_now
dw_dz_prev = dw_dz_now
# UPDTAE by two-step method
# update: calculate a set of new values for next step, and then move second value in each tuple to first one,
# and then insert new value into second value in each tuple
z_now = (z_prev[0]+step, z_prev[1]+step)
dw_dz_now = (dw_dz_prev[1], dw_dz_prev[1] + 3./2*step*(-w_prev[1]**n - 2./z_prev[1]*dw_dz_prev[1])-1./2*step*(-w_prev[0]**n - 2./z_prev[0]*dw_dz_prev[0]))
w_now = (w_prev[1], w_prev[1] + 3./2*dw_dz_prev[1]*step-1./2*dw_dz_prev[0]*step)
# append the z,w,dw_dz list
z = np.append(z, z_now[1])
w = np.append(w, w_now[1])
dw_dz = np.append(dw_dz, dw_dz_now[1])
# cut off last value which due to the truncation point is w<0
z = z[0:-1]
w = w[0:-1]
dw_dz = dw_dz[0:-1]
# print out z_n to check
z_n = z[-1]
print('z_n:', z_n)
########## w(z) PLOT #################
plt.rc('font', family='serif'), plt.rc('xtick', labelsize=15), plt.rc('ytick', labelsize=16)
plt.subplots(figsize=(7.5, 6))
plt.plot(z, w)
plt.xlim([0, z[-1]+0.5])
plt.ylim([0, 1.05])
plt.xlabel('z', fontsize=16)
plt.ylabel('w', fontsize=16)
plt.savefig('w_z.png')
plt.show()
############### PHYSICAL PROPERTIES #############
mu = 0.62 # mean molecular weight [dimensionless]
beta1 = 0.25 # a guess of gas pressure fraction [dimensionless]
beta2 = 0.982 # a guess of gas pressure fraction [dimensionless]
rho_c1 = 2.*1e3 # central density (kg/m^3)
rho_c2 = 150.*1e3 # central density (kg/m^3) (same as in solar core)
Re = 8315. # gas constant (J K^-1 kg^-1)
a = 7.5657e-16 # radiation constant (J m^-3 K^-4)
G = 6.67e-11 # gravitational constant (m^3 kg^-1 s^-2)
K1 = (3*Re**4/a*mu**4)**(1./3)*((1.-beta1)/beta1**4.)**(1./3) # mks system unit
A1 = np.sqrt(4*np.pi*G/((n+1)*K1)*rho_c1**((n-1.)/float(n))) # mks system unit
K2 = (3*Re**4/a*mu**4)**(1./3)*((1.-beta2)/beta2**4.)**(1./3) # mks system unit
A2 = np.sqrt(4*np.pi*G/((n+1)*K2)*rho_c2**((n-1.)/float(n))) # mks system unit
print('central density for early/late type star: 2 and 150 g/cm^3')
#----------------------BOTH MODELS PLOTS------------------------------
############## DENSITY PROFILE ######################################
rho1 = rho_c1*w**n
rho2 = rho_c2*w**n
plt.plot(z/A1/6.95e8, rho1/1000, label='early type', color='b') # rho in g/cm^3
plt.plot(z/A2/6.95e8, rho2/1000, label='late type', color='r') # rho in g/cm^3
plt.plot(yrec['r'], yrec['rho'], label='yrec model', color='g')
plt.xlabel(r'r/r$_\odot$', fontsize=15)
plt.ylabel(r'$\rho(r)$ [g/cm^3]', fontsize=15)
plt.xscale('log')
plt.yscale('log')
plt.legend(fontsize=15)
plt.show()
################ PRESSURE PROFILE ##################
# early type
P1 = K1*rho_c1**(1+1./n)*w**(n+1)
plt.plot(z/A1/6.95e8, P1/0.1, label='early type', color='b')
plt.plot(z/A1/6.95e8, P1/0.1*beta1, label='P_gas,early', color='b',linestyle='-.')
plt.plot(z/A1/6.95e8, P1/0.1*(1-beta1), label='P_rad,early', color='b',linestyle='--')
# late type
P2 = K2*rho_c2**(1+1./n)*w**(n+1)
plt.plot(z/A2/6.95e8, P2/0.1, label='late type', color='r')
plt.plot(z/A2/6.95e8, P2/0.1*beta2, label='P_gas,late', color='r',linestyle='-.')
plt.plot(z/A2/6.95e8, P2/0.1*(1-beta2), label='P_rad,late', color='r',linestyle='--')
plt.plot(yrec['r'], yrec['P'], label='yrec model', color='g')
plt.xscale('log')
plt.yscale('log')
plt.xlabel(r'r/r$_\odot$', fontsize=15)
plt.ylabel(r'P(r) [Ba]', fontsize=15)
plt.legend(fontsize=15)
plt.show()
############### MASS PROFILE ##########################
# early type
m1 = 4*np.pi*rho_c1*(z/A1)**3*(-1/z*dw_dz)/1.99e30
plt.plot(z/A1/6.95e8, m1, label='early type',color='b')
# late type
m2 = 4*np.pi*rho_c2*(z/A2)**3*(-1/z*dw_dz)/1.99e30
plt.plot(z/A2/6.95e8, m2, label='late type',color='r')
plt.plot(yrec['r'], yrec['m'], label='yrec model', color='g')
plt.xscale('log')
plt.yscale('log')
plt.xlabel(r'r/r$_\odot$', fontsize=15)
plt.ylabel(r'cumulative m(r)/M$_\odot$', fontsize=15)
plt.legend(fontsize=15)
plt.show()
#############TOTAL MASS, RADIUS #######################
#early type
rho_avg1 = rho_c1*(-3/z_n*dw_dz[-1])
R1 = z_n/A1
M1 = 4./3*np.pi*R1**3*rho_avg1/1.99e30
R1 = R1/6.95e8
delta_R1 = step/A1/6.95e8
#late type
rho_avg2 = rho_c2*(-3/z_n*dw_dz[-1])
R2 = z_n/A2
M2 = 4./3*np.pi*R2**3*rho_avg2/1.99e30
R2 = R2/6.95e8
delta_R2 = step/A1/6.95e8
print('Early type, M:', round(M1,2),'M_sun,', 'R:', round(R1), 'R_sun')
print('Late type, M:', round(M2,2),'M_sun,', 'R:', round(R2, 2), 'R_sun')
########### TEMPERATURE ###################
# find root in P = R*rho*T/mu + aT^4/3
def fun(T, rho, P):
return a/3.*T**4 + Re*rho/mu*T - P
T1 = []
T2 = []
for i in range(len(z)):
# early type
roots1 = optimize.root(fun, 1e7, args=(rho1[i], P1[i]))
T1.append(max(roots1.x))
# late type
roots2 = optimize.root(fun, 1e6, args=(rho2[i], P2[i]))
T2.append(max(roots2.x))
T1 = np.array(T1)
T2 = np.array(T2)
plt.plot(z/A1/6.95e8, T1, label='early type',color='b')
plt.plot(z/A2/6.95e8, T2, label='late type',color='r')
plt.plot(yrec['r'], yrec['T'], label='yrec model', color='g')
plt.xscale('log')
plt.yscale('log')
plt.xlabel(r'r/R$_\odot$', fontsize=14)
plt.ylabel('Temperature [K]', fontsize=14)
plt.legend(fontsize=15)
plt.show()
########## ENERGY GENERATION ##############
x_1 = 0.7 # as in the sun
x_cno = 0.0138 # as in the sun, source: http://www.ucolick.org/~woosley/ay220-15/handouts/xsol_ag89.pdf
e_pp0 = 1.08e-5 # from 'introduction to modern astrophysics'
e_cno0 = 8.24e-24 # from 'introduction to modern astrophysics'
nu_pp = 4 # from 'introduction to modern astrophysics'
nu_cno = 19.9 # from introduction to modern astrophysics'
# pp-chain and CNO cycle (1==early, 2==late)
e_pp1 = e_pp0*(rho1/1000.)*x_1**2*(T1/1e6)**nu_pp
e_cno1 = e_cno0*(rho1/1000.)*x_1*x_cno*(T1/1e6)**nu_cno
e_pp2 = e_pp0*(rho2/1000.)*x_1**2*(T2/1e6)**nu_pp
e_cno2 = e_cno0*(rho2/1000.)*x_1*x_cno*(T2/1e6)**nu_cno
# total energy generation
e_1 = e_pp1 + e_cno1
e_2 = e_pp2 + e_cno2
plt.plot(z/A1/6.95e8, e_1, linestyle='-', color='b', label='early-type total')
plt.plot(z/A1/6.95e8, e_pp1, linestyle='-.', color='b', label='early-type pp')
plt.plot(z/A1/6.95e8, e_cno1, linestyle='--', color='b', label='early-type CNO')
plt.plot(z/A2/6.95e8, e_2, linestyle='-', color='r', label='late-type total')
plt.plot(z/A2/6.95e8, e_pp2, linestyle='-.', color='r', label='late-type pp')
plt.plot(z/A2/6.95e8, e_cno2, linestyle='--', color='r', label='late-type CNO')
plt.plot(yrec['r'], yrec['epsilon'], label='yrec model', color='g')
plt.xlabel(r'r/r$_\odot$', fontsize=14)
plt.ylabel('energy generation [erg/s/g]', fontsize=14)
plt.yscale('log')
plt.xscale('log')
plt.ylim([1e-50, 1e11])
plt.legend(fontsize=14)
# indicate r_core (define r_core when e_pp drops to 0.1 percent to central value)
r_core_1 = z[len(e_pp1[e_pp1>0.001*e_pp1[0]])]
r_core_2 = z[len(e_pp2[e_pp2>0.001*e_pp2[0]])]
plt.axvline(x=r_core_1/A1/6.95e8, color='b', linestyle='-.')
plt.axvline(x=r_core_2/A2/6.95e8, color='r', linestyle='-.')
print('r_core/R for early/late type: '+str(round(r_core_1/A1/R1/6.95e8, 2))+' '+str(round(r_core_2/A2/R2/6.95e8, 2)))
plt.show()
############# LUMINOSITY ############################
dl1 = (e_pp1 + e_cno1)*np.pi*(z/A1)**2*step/A1*1e6 # luminosity in one shell (erg/s)
dl2 = (e_pp2 + e_cno2)*np.pi*(z/A2)**2*step/A2*1e6 # luminosity in one shell (erg/s)
plt.plot(z/A1/6.95e8, dl1, color='b', label='early type')
plt.plot(z/A2/6.95e8, dl2, color='r', label='late type')
plt.plot(yrec['r'], yrec['luminosity']/(step/A2), label='yrec model', color='g')
plt.xlabel('r/r$_\odot$', fontsize=14)
plt.ylabel('luminosity*dr [erg/s]', fontsize=14)
plt.yscale('log')
plt.xscale('log')
plt.legend(fontsize=15)
plt.show()
L1 = sum(dl1) # total luminosity (early type)
L2 = sum(dl2) # total luminosity (late type)
L_sun = 3.828e33 # from wikipedia
print('luminosity for early/late type (L_sun):', round(L1/L_sun, 2), ',', round(L2/L_sun,4)) # solar unit
########## OPACITY ########################
kappa_0 = 1e16 # unit?
kappa1 = kappa_0*P1*T1**(-4.5)
kappa2 = kappa_0*P2*T2**(-4.5)
plt.plot(z/A1/6.95e8, kappa1, label='early type',color='b')
plt.plot(z/A2/6.95e8, kappa2, label='late type',color='r')
plt.plot(yrec['r'], yrec['kappa'], label='yrec model', color='g')
plt.xlabel(r'r/R$_\odot$', fontsize=14)
plt.ylabel(r'$\kappa$ [cm^2/g]', fontsize=14)
plt.xscale('log')
plt.yscale('log')
plt.legend(fontsize=14)
plt.show()
########## TEMPERATURE GRADIENT ##################
nabla1 = P1[0:-1]/T1[0:-1]*np.diff(T1)/np.diff(P1) # P/T*dT/dP (dT, dP by discrete differentials)
nabla2 = P2[0:-1]/T2[0:-1]*np.diff(T2)/np.diff(P2) # P/T*dT/dP
nabla_ad1 = 0.25 # equation see report section 3.2.4
nabla_ad2 = 0.38 # equation see report section 3.2.4
plt.plot(z[0:-1]/A1/6.95e8, nabla1, label='early type', color='b')
plt.plot(z[0:-1]/A2/6.95e8, nabla2, label='late type', color='r')
# indicate adiabatic gradients
plt.axhline(y=nabla_ad1, label=r'$\nabla_{ad,early}$', linestyle='--', color='b')
plt.axhline(y=nabla_ad2, label=r'$\nabla_{ad,late}$', linestyle='--', color='r')
plt.xlabel(r'r/R$_\odot$', fontsize=14)
plt.ylabel(r'Temperature Gradient ($\nabla$)', fontsize=14)
plt.xscale('log')
plt.legend(fontsize=14)
plt.show() | [
"matplotlib"
] |
e77f12a6c57ef0c0e0570e6ffc1ade13092f8af8 | Python | tigerpk86/python_data__visual | /stock/증권데이터분석_완성본.py | UTF-8 | 6,595 | 2.609375 | 3 | [] | no_license | #!__*__coding:utf-8__*__
import pandas as pd
import pandas_datareader
import matplotlib as mat
import matplotlib.pyplot as plt
import numpy as np
import datetime
import urllib.parse
import os
import pickle
import time
########################################################
# 출력결과폴더
########################################################
excel_foler = "output\\excel"
result_folder = "output\\result"
if not os.path.isdir(excel_foler):
os.makedirs(excel_foler)
if not os.path.isdir(result_folder):
os.makedirs(result_folder)
########################################################
# 종목 코드 준비
# 실제 종목표.xlsx 파일이 있으면 웹에서 가져오지 않는다.
########################################################
jongmok_filename = "종목표.xlsx"
url = "http://bigdata-trader.com/itemcodehelp.jsp"
if not os.path.isfile(jongmok_filename):
jongmok_filename_df = pd.read_html(url)
jongmok_filename_df = jongmok_filename_df[0]
jongmok_filename_df.to_excel(jongmok_filename)
else:
jongmok_filename_df = pd.read_excel(jongmok_filename)
jongmok_filename_df.columns = ["종목코드", "종목명", "종류"]
########################################################
# KOSPI 주식 가져오기
########################################################
kospi_file = excel_foler + "\\kospi.xlsx"
if not os.path.isfile(kospi_file):
WebURL = "https://finance.google.com/finance/historical?q=%s&output=csv"
KOSPI = "KRX:KOSPI"
KOSPI_quote = urllib.parse.quote(KOSPI)
KOSPI_df = pd.read_csv(WebURL % KOSPI_quote)
KOSPI_df.to_excel(kospi_file)
else:
KOSPI_df = pd.read_excel(kospi_file)
########################################################
# 종목표에 있는 주식 코드를 하나씩 확인
########################################################
final_result = {}
jongmok_dic = {}
cnt = 0
for i in range(len(jongmok_filename_df)):
a = jongmok_filename_df.iloc[i]
h1 = a.종목코드
h2 = a.종목명
h3 = a.종류
if h3 == "KOSPI":
jongmok_dic[h1] = h2
code_name = "KRX:" + h1
code_name = urllib.parse.quote(code_name)
jongmok_excel_file = excel_foler + "\\" + h1 + ".xlsx"
print(jongmok_excel_file)
if not os.path.isfile(jongmok_excel_file):
try:
df02 = pd.read_csv(WebURL % code_name)
except:
print("접속 에러 발생, 제외함")
continue
df02.to_excel(jongmok_excel_file)
else:
df02 = pd.read_excel(jongmok_excel_file)
print(" ".join([h1, h2]))
merged = KOSPI_df.merge(df02, on="Date")
result = np.corrcoef(merged["Close_x"], merged["Close_y"])
final_result[h1] = [merged, result]
print(result)
cnt += 1
########################################################
# 차트 한글 설정
########################################################
font_location = r"c:\windows\fonts\malgun.ttf"
font_name = mat.font_manager.FontProperties(fname=font_location).get_name()
mat.rc('font', family=font_name)
#####################################
# 상관계수를 넣기 위한 딕션너리생성
#####################################
data = {"code":[], "corrcoef":[]}
for i in final_result:
data["code"].append(i)
data["corrcoef"].append(final_result[i][1][0][1])
df = pd.DataFrame(data)
dfa = df.sort_values("corrcoef", ascending=False)
#####################################
# 그래프 및 차트 만들기
#####################################
top_10_dfa = dfa.head(10)
top_10_dfa.to_excel(result_folder + "\\상위_10개_종목.xlsx")
for i in range(10):
codename = dfa.iloc[i].code
df_01 = final_result[codename][0]
ax = df_01.plot(kind="scatter", x="Close_x", y="Close_y", title= jongmok_dic[codename])
ax.set_xlabel("코스피")
ax.set_ylabel("종목종가")
plt.savefig(result_folder + "\\상위종목_" + codename + ".png")
#####################################
# 하위 10개 상관계수가 놓은 종목
#####################################
bottom_10_dfa = dfa.head(10)
bottom_10_dfa.to_excel(result_folder + "\\하위_10개_종목.xlsx")
for i in range(10):
j = len(dfa) - i - 1
codename = dfa.iloc[j].code
df_01 = final_result[codename][0]
ax = df_01.plot(kind="scatter", x="Close_x", y="Close_y", title= jongmok_dic[codename])
ax.set_xlabel("코스피")
ax.set_ylabel("종목종가")
plt.savefig(result_folder + "\\하위종목_" + codename + ".png")
#####################################
# 종가가 계속 상승한 종목
#####################################
#### 파일 리스트에서 파일을 하나씩 꺼내면서
import glob
files = glob.glob("output\\excel\\*.xlsx")
for one_file in files:
df = pd.read_excel(one_file) #파일 하나 읽었음
is_okay = True #파일 하나에 대한 isOkay = True
df = df.head(10)
for i in range(len(df)):
if i == len(df)-1: #맨 마지막 줄인 경우에는 안 한다.
pass
else:
m = df.iloc[i].Close
n = df.iloc[i+1].Close
x = m - n
if x < 0:
is_okay = False
if is_okay:
print("이 항목은 최근 마지막 10동안 금액이 계속 올랐습니다.")
print(one_file)
#####################################
# 평균 거래량이 가장 많은 10개 종목
#####################################
jongmok_code = []
vol = []
a = []
b = []
for one_file in files:
df = pd.read_excel(one_file) #파일 하나 읽었음
is_okay = True #파일 하나에 대한 isOkay = True
df = df.head(10)
temp_i = []
for i in range(len(df)):
if i == len(df)-1: #맨 마지막 줄인 경우에는 안 한다.
pass
else:
m = df.iloc[i].Volume
temp_i.append(m)
np.mean(m)
jongmok_code.append(one_file)
vol.append(m)
a_temp = os.path.splitext(os.path.split(one_file)[1])[0]
a.append(a_temp)
try:
b_temp = list(jongmok_filename_df[jongmok_filename_df.종목코드 == a_temp].종목명)[0]
except:
b_temp = ""
b.append(b_temp)
data = {"파일명": jongmok_code,
"거래량": vol,
"종목코드": a,
"종목명": b,
}
dfc = pd.DataFrame(data, columns=["파일명", "종목코드", "종목명", "거래량"])
dfc = dfc.sort_values("거래량", ascending=False)
dfc.index = range(1, len(dfc) + 1)
dfc.to_excel(result_folder + "\\평균거래량높은 종목순 정렬.xlsx") | [
"matplotlib"
] |
c4785e438cb52fbc8a3bf9d6badd2b02e9f7382a | Python | jmmanley/two-nn-dimensionality-estimator | /twonn.py | UTF-8 | 1,984 | 3.15625 | 3 | [] | no_license | import numpy as np
import scipy
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
# TWO-NN METHOD FOR ESTIMATING INTRINSIC DIMENSIONALITY
# Facco, E., d’Errico, M., Rodriguez, A., & Laio, A. (2017).
# Estimating the intrinsic dimension of datasets by a minimal neighborhood information.
# Scientific reports, 7(1), 12140.
# Implementation by Jason M. Manley, [email protected]
# June 2019
def estimate_id(X, plot=False, X_is_dist=False):
# INPUT:
# X = Nxp matrix of N p-dimensional samples (when X_is_dist is False)
# plot = Boolean flag of whether to plot fit
# X_is_dist = Boolean flag of whether X is an NxN distance metric instead
#
# OUTPUT:
# d = TWO-NN estimate of intrinsic dimensionality
N = X.shape[0]
if X_is_dist:
dist = X
else:
# COMPUTE PAIRWISE DISTANCES FOR EACH POINT IN THE DATASET
dist = scipy.spatial.distance.squareform(scipy.spatial.distance.pdist(X, metric='euclidean'))
# FOR EACH POINT, COMPUTE mu_i = r_2 / r_1,
# where r_1 and r_2 are first and second shortest distances
mu = np.zeros(N)
for i in range(N):
sort_idx = np.argsort(dist[i,:])
mu[i] = dist[i,sort_idx[2]] / dist[i,sort_idx[1]]
# COMPUTE EMPIRICAL CUMULATE
sort_idx = np.argsort(mu)
Femp = np.arange(N)/N
# FIT (log(mu_i), -log(1-F(mu_i))) WITH A STRAIGHT LINE THROUGH ORIGIN
lr = LinearRegression(fit_intercept=False)
lr.fit(np.log(mu[sort_idx]).reshape(-1,1), -np.log(1-Femp).reshape(-1,1))
d = lr.coef_[0][0] # extract slope
if plot:
# PLOT FIT THAT ESTIMATES INTRINSIC DIMENSION
s=plt.scatter(np.log(mu[sort_idx]), -np.log(1-Femp), c='r', label='data')
p=plt.plot(np.log(mu[sort_idx]), lr.predict(np.log(mu[sort_idx]).reshape(-1,1)), c='k', label='linear fit')
plt.xlabel('$\log(\mu_i)$'); plt.ylabel('$-\log(1-F_{emp}(\mu_i))$')
plt.title('ID = ' + str(np.round(d, 3)))
plt.legend()
return d
| [
"matplotlib"
] |
ae37314d42e3ccd3f0252eafadc41cdd129b6e75 | Python | UrmiM/Capstone | /Source/clustering.py | UTF-8 | 9,631 | 2.734375 | 3 | [] | no_license | import pandas as pd
import numpy as np
import datetime as dt
import pylab as pl
import matplotlib.pyplot as plt
from scipy.stats import norm
import seaborn as sns
import itertools
import scipy.stats as scs
from scipy.spatial.distance import pdist, squareform
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score, silhouette_samples
from scipy.cluster.hierarchy import linkage, dendrogram
import matplotlib.cm as cm
from IPython.display import HTML, display
from collections import Counter
def load_data(filename):
"""
Loading Data from csv to pandas for EDA and
convert column names to standard format.
Input is filepath, Output is the dataframe.
"""
df = pd.read_csv(filename)
df.columns = df.columns.str.lstrip()
df.columns = df.columns.str.lower()
df.columns = df.columns.str.replace(" ", "_")
return df
def date_time_conversions(df):
"""
Converting the shared_date and last_activity_date
columns to pandas datetime format.
Input is dataframe and output is dataframe with new columns View Month (month),
Day of week (weekday), View Hour (hour) and time_spent_mins added.
"""
df["last_activity_date"] = pd.to_datetime(df["last_activity_date"], format="%d-%b-%Y")
df["shared_date"] = pd.to_datetime(df["shared_date"], format="%d-%b-%Y")
df["weekday_name"] = df['last_activity_date'].dt.weekday_name
df["month"] = df['last_activity_date'].dt.month
df["weekday"] = df['last_activity_date'].dt.dayofweek
df["hour"] = df['last_activity_time'].dt.hour
df["hour"] = df[df["hour"] > 11] - 12
df["hour"] = df[df["hour"] < 12] + 12
df["time_spent_mins"] = df["time_spent_in_seconds"]/60.
df["time_of_day"].replace({0 : 1, 1 : 1, 2 : 1, 3 : 1, 4 : 1, 5 : 1}, inplace = True)
df["time_of_day"].replace({6 : 2, 7 : 2, 8 : 2, 9 : 2, 10 : 2, 11 : 2}, inplace = True)
df["time_of_day"].replace({12 : 3, 13 : 3, 14 : 3, 15 : 3, 16 : 3, 17 : 3}, inplace = True)
df["time_of_day"].replace({18 : 4, 19 : 4, 20 : 4, 21 : 4, 22 : 4, 23 : 4,}, inplace = True)
return df
def add_days_from_share_to_view(df):
"""
Get the time difference between the share and view dates. Input df output,
df with column "num_of_day_from_share_view" column added.
"""
for i in xrange(len(df)):
if (df["shared_date"] is not "NaT") or (df["shared_date"] is not "NaN") :
df['num_of_day_from_share_view'] = (df["last_activity_date"] - df["shared_date"]).dt.days
return df
def feat_engg_for_es(df):
"""
Creates columns for viewing_device and browser for E2S.
"""
df.rename(columns={'user_os': 'viewing_device'}, inplace=True)
df["viewing_device"].replace({"MAC_OS_X" : "Laptop/Desktop", "WEB" : "Laptop/Desktop", \
"WINDOWS_10" : "Laptop/Desktop", "WINDOWS_7" : "Laptop/Desktop", "WINDOWS_81" : "Laptop/Desktop", \
"CHROME_OS" : "Laptop/Desktop"}, inplace = True)
df["viewing_device"].replace({"ANDROID_MOBILE" : "Phone", "ANDROID6" : "Phone", "ANDROID4" : "Phone", \
"ANDROID5" : "Phone","ANDROID7" : "Phone", "MAC_OS_X_IPHONE" : "Phone", "iOS7_IPHONE" : "Phone", \
"iOS8_1_IPHONE" : "Phone", "iOS8_3_IPHONE" : "Phone", "iOS9_IPHONE" : "Phone"}, inplace=True)
df["viewing_device"].replace({"ANDROID4_TABLET" : "Tablet", "ANDROID6_TABLET" : "Tablet", "iOS9_IPAD" : "Tablet",\
"MAC_OS_X_IPAD" : "Tablet"}, inplace = True)
df.rename(columns={'viewing_app': 'browser'}, inplace=True)
df["browser"].replace({"CHROME45" : "CHROME", "CHROME49" : "CHROME", "CHROME41" : "CHROME", \
"CHROME48" : "CHROME", "CHROME28" : "CHROME", "FIREFOX34" : "FIREFOX", "FIREFOX46" : "FIREFOX", \
"FIREFOX48" : "FIREFOX", "EDGE12" : "EDGE"}, inplace = True)
df["browser"].replace({"IE11" : "IE", "IE7" : "IE", "EDGE14" : "EDGE", "SAFARI5" : "SAFARI", \
"SAFARI9" : "SAFARI"}, inplace=True)
return df
def feat_engg_for_tw(df):
"""
Creates columns for viewing_device and browser for E2S.
"""
df.rename(columns={'user_os': 'viewing_device'}, inplace=True)
df["viewing_device"].replace({"MAC_OS_X" : "Laptop/Desktop", "WINDOWS_XP" : "Laptop/Desktop",\
"WINDOWS_8" : "Laptop/Desktop", "WINDOWS_10" : "Laptop/Desktop", "WINDOWS_7" : "Laptop/Desktop", \
"WINDOWS_81" : "Laptop/Desktop", "CHROME_OS" : "Laptop/Desktop"}, inplace = True)
df["viewing_device"].replace({"ANDROID_MOBILE" : "Phone", "ANDROID6" : "Phone", "ANDROID4" : "Phone",\
"ANDROID5" : "Phone","ANDROID7" : "Phone", "MAC_OS_X_IPHONE" : "Phone", "iOS7_IPHONE" : "Phone",\
"iOS8_1_IPHONE" : "Phone", "iOS8_3_IPHONE" : "Phone", "iOS9_IPHONE" : "Phone"}, inplace=True)
df["viewing_device"].replace({"ANDROID4_TABLET" : "Tablet", "ANDROID6_TABLET" : "Tablet",\
"iOS8_1_IPAD" : "Tablet", "MAC_OS_X_IPAD" : "Tablet"}, inplace = True)
df.rename(columns={'viewing_app': 'browser'}, inplace=True)
df["browser"].replace({"CHROME45" : "CHROME", "CHROME49" : "CHROME", "CHROME41" : "CHROME", \
"CHROME48" : "CHROME", "CHROME28" : "CHROME", "FIREFOX34" : "FIREFOX", "FIREFOX46" : "FIREFOX", \
"FIREFOX48" : "FIREFOX", "EDGE12" : "EDGE"}, inplace = True)
df["browser"].replace({"EDGE13" : "EDGE", "EDGE14" : "EDGE", "SAFARI5" : "SAFARI", \
"SAFARI9" : "SAFARI"}, inplace=True)
return df
def feat_engg_for_is(df):
"""
Creates columns for viewing_device and browser for E2S.
"""
df.rename(columns={'user_os': 'viewing_device'}, inplace=True)
df["viewing_device"].replace({"WINDOWS_7" : "Laptop/Desktop", "WINDOWS_10" : "Laptop/Desktop", \
"WINDOWS_81" : "Laptop/Desktop", "WINDOWS_XP" : "Laptop/Desktop", "LINUX" : "Laptop/Desktop", \
"UBUNTU" : "Laptop/Desktop", "MAC_OS_X" : "Laptop/Desktop"}, inplace = True)
df["viewing_device"].replace({"ANDROID_MOBILE" : "Phone", "ANDROID6" : "Phone", "ANDROID4" : "Phone", \
"ANDROID5" : "Phone","ANDROID7" : "Phone", "MAC_OS_X_IPHONE" : "Phone", "iOS7_IPHONE" : "Phone", \
"iOS8_1_IPHONE" : "Phone", "iOS8_3_IPHONE" : "Phone", "iOS9_IPHONE" : "Phone"}, inplace=True)
df["viewing_device"].replace({"ANDROID4_TABLET" : "Tablet", "ANDROID6_TABLET" : "Tablet", \
"iOS8_1_IPAD" : "Tablet", "MAC_OS_X_IPAD" : "Tablet"}, inplace = True)
df.rename(columns={'viewing_app': 'browser'}, inplace=True)
df["browser"].replace({"CHROME45" : "CHROME", "CHROME51" : "CHROME", "CHROME33" : "CHROME", \
"CHROME30" : "CHROME", "CHROME47" : "CHROME", "CHROME49" : "CHROME", "CHROME42" : "CHROME", \
"CHROME48" : "CHROME", "CHROME38" : "CHROME", "FIREFOX44" : "FIREFOX", "FIREFOX47" : "FIREFOX", \
"FIREFOX43" : "FIREFOX", "FIREFOX37" : "FIREFOX", "EDGE12" : "EDGE"}, inplace = True)
df["browser"].replace({"EDGE13" : "EDGE", "EDGE14" : "EDGE", "IE7" : "IE", "IE10" : "IE", "IE11" : "IE", \
"SAFARI9" : "SAFARI", "APPLE_WEB_KIT" : "SAFARI"}, inplace=True)
return df
def feat_engg_for_tw(df):
"""
Creates columns for viewing_device and browser for E2S.
"""
df.rename(columns={'user_os': 'viewing_device'}, inplace=True)
df["viewing_device"].replace({"WINDOWS_7" : "Laptop/Desktop", "WINDOWS_10" : "Laptop/Desktop", \
"MAC_OS_X" : "Laptop/Desktop"}, inplace = True)
df["viewing_device"].replace({"ANDROID_MOBILE" : "Phone", "ANDROID6" : "Phone", "ANDROID4" : "Phone", \
"ANDROID5" : "Phone", "MAC_OS_X_IPHONE" : "Phone", "iOS9_IPHONE" : "Phone", "iOS8_1_IPHONE" : "Phone", \
"iOS8_3_IPHONE" : "Phone", "iOS9_IPHONE" : "Phone"}, inplace=True)
df.rename(columns={'viewing_app': 'browser'}, inplace=True)
df["browser"].replace({"CHROME45" : "CHROME", "CHROME51" : "CHROME", "CHROME33" : "CHROME", \
"CHROME30" : "CHROME", "CHROME47" : "CHROME", "CHROME49" : "CHROME", "CHROME42" : "CHROME", \
"CHROME48" : "CHROME", "CHROME38" : "CHROME", "FIREFOX44" : "FIREFOX", "FIREFOX47" : "FIREFOX", \
"FIREFOX43" : "FIREFOX", "FIREFOX37" : "FIREFOX", "EDGE12" : "EDGE"}, inplace = True)
df["browser"].replace({"EDGE13" : "EDGE", "EDGE14" : "EDGE", "IE7" : "IE", "IE10" : "IE", "IE11" : "IE", \
"SAFARI9" : "SAFARI", "APPLE_WEB_KIT" : "SAFARI"}, inplace=True)
return df
def create_share_info_df(df):
"""
New dataframe for all share data.
Input base dataframe, Output new share_df.
"""
share_df = df[df.activity_type == "SHARE"]
share_df = share_df.drop(["user_os", "viewing_app"], axis=1)
return share_df
def create_view_info_df(df):
"""
New dataframe for all view data.
Input base dataframe, Output new view_df.
"""
view_df = df[(df.activity_type == "VIEW") & (df.user_os != "NaN")]
view_df = view_df.drop(["share_channel", "user_groups", "shared_date", "shared_time"], axis =1)
view_app_list = view_df.viewing_app.unique()
view_app_dict = {}
for i in xrange(len(view_app_list)):
view_app_dict[view_app_list[i]]=i
return view_df
def plot_view_mins_per_hour(df1):
hour_grouped = df1.groupby("hour")["time_spent_mins"].sum()
plt.bar(xrange(0,24),hour_grouped)
return
def regr_model(df):
user_id_list = [j.user_id for i,j in df.iterrows() if j["activity_type"] == "VIEW"]
num_views = Counter(user_id_list)
best, views = [], []
#best = sorted(num_views, key=lambda x : num_views[x])
for k,v in num_views.iteritems():
if v<=20:
best.extend((k,v)), views.append(v)
def clustering(df1,df2,df3,df4):
if __name__ == "__main__":
df = load_data("shareViewDataRM-ext.csv")
df = date_time_conversions(df)
df = add_days_from_share_to_view(df)
share_df = create_share_info_df(df)
view_df = create_view_info_df(df)
| [
"matplotlib",
"seaborn"
] |
a3713d90f9391160475c9e7dfd09bbbe9c6cc165 | Python | simpleyunus/twitter_sentiment_analysis | /app.py | UTF-8 | 1,514 | 2.90625 | 3 | [] | no_license | import streamlit as st
import joblib
import pandas as pd
import matplotlib.pyplot as plt
# import plotly.figure_factory as ff
from sklearn.feature_extraction.text import CountVectorizer
from nltk.sentiment.vader import SentimentIntensityAnalyzer
# loading model
rnd_clf = joblib.load('model.pkl')
analyzer = SentimentIntensityAnalyzer()
html_temp = """
<div style = "background-color:tomato;padding:10px">
<h2 style = "color:white;text-align:center;">Zimbabwe Politics Sentiment Analysis From Twitter Data</h2>
</div>
"""
st.markdown(html_temp,unsafe_allow_html=True)
st.header(" Mood Changes From December to June")
read = pd.read_csv('mymoods.csv')
moods = pd.DataFrame(read)
moods = moods.drop('Unnamed: 0', axis=1)
st.write(moods)
st.line_chart(moods)
st.write('Negative : 0')
st.write('Neutral : 1')
st.write('Positive : 2')
st.info("Prediction Application Of A Tweet")
tweet = st.text_input("Enter Text to determine Polarity")
button = st.button("Predict")
text = pd.DataFrame(pd.read_csv('mydata.csv'))
text = text.drop('Unnamed: 0', axis=1)
te = text['clean_tweets']
def predict(text, tweet):
text[0] = tweet
bow_vectorizer = CountVectorizer(max_df=0.9, min_df=2, max_features=1000, stop_words='english')
vectorized_tweets = bow_vectorizer.fit_transform(text.values.astype('str'))
result = rnd_clf.predict(vectorized_tweets)
return result
if button:
tr = tweet
pol = predict(te, tr)
st.write(pol[0]) | [
"matplotlib",
"plotly"
] |
d2dce46d82b90328357627a205801e193cc5b88e | Python | Magnus-SI/FYS4150 | /Project4/mean_vals.py | UTF-8 | 5,295 | 2.8125 | 3 | [] | no_license | """
Script for calculating thermodynamical quantities for 2D ising model
"""
import warnings
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
from numpy import cumsum
from cycler import cycler
warnings.filterwarnings("ignore")
plt.rcParams.update({'font.size': 14})
mpl.rcParams['axes.prop_cycle'] = cycler(color='bgrcmyk')
L = float(input("Dimension of spin matrix: "))
temp = float(input("Temperature: "))
tol = float(input("Configuration (0 or 0.5): "))
random_ordered = input("Add another configuration: [Y/n]: ")
test = False
data2 = False
data = pd.read_csv("data/mcdep_%d_%.2f_%.2f.csv"%(L, temp, tol))
E = data["E"]
mcs = data["mcs"]+1
M = data["M"]
acpt = data["acpt"]
N = len(mcs)-1
mE = cumsum(E)/mcs
mM = cumsum(M)/mcs
absM = cumsum(np.abs(M))/mcs
mE2 = cumsum(E**2)/mcs
mM2 = cumsum(M**2)/mcs
sigmaE2 = mE2 - mE**2
sigmaM2 = mM2 - absM**2
Cv = sigmaE2/temp**2
chi = sigmaM2/temp**2
if random_ordered in ('Y', 'y'):
test = True
tol2 = float(input("Configuration (0 or 0.5): "))
data2 = pd.read_csv("data/mcdep_%d_%.2f_%.2f.csv"%(L, temp, tol2))
E_2 = data2["E"]
M_2 = data2["M"]
acpt_2 = data2["acpt"]
mE_2 = cumsum(E_2)/mcs
mM_2 = cumsum(M_2)/mcs
absM_2 = cumsum(np.abs(M_2))/mcs
mE2_2 = cumsum(E_2**2)/mcs
mM2_2 = cumsum(M_2**2)/mcs
sigmaE2_2 = mE2_2 - mE_2**2
Cv_2 = sigmaE2_2
chi_2 = mM2_2 - absM_2**2
plt.figure("me")
fig, ax1 = plt.subplots()
ax1.semilogx(mcs, mE/int(L**2), label='random', color='g')
ax1.semilogx(mcs, mE/int(L**2), label='ordered', color='b')
ax1.set_xlabel("Monte carlo cycles")
ax1.set_ylabel(r"$\left \langle E \right \rangle$")
ax1.set_title("Mean energy per spin")
ax1.tick_params(axis='y', labelcolor='b')
if test:
ax2 = ax1.twinx()
ax2.semilogx(mcs, mE_2/int(L**2), color='g')
ax2.tick_params(axis='y', labelcolor='g')
ax1.legend(loc="center right")
plt.tight_layout()
plt.savefig("figs/mean_energy_%d_%.2f_%.2f.pdf"%(L, temp, tol))
plt.close()
plt.figure("mm")
plt.semilogx(mcs, mM/int(L**2), label='ordered')
if test:
plt.semilogx(mcs, mM_2/int(L**2), label='random')
plt.legend()
plt.xlabel("Monte carlo cycles")
plt.ylabel(r"$\left \langle M \right \rangle$")
plt.title("Mean magnetisation per spin")
plt.tight_layout()
plt.savefig("figs/mean_magnetisation_%d_%.2f_%.2f.pdf"%(L, temp, tol))
plt.close()
plt.figure("m|m|")
plt.semilogx(mcs, absM/int(L**2), label='ordered')
if test:
plt.semilogx(mcs, absM_2/int(L**2), label='random')
plt.legend()
plt.xlabel("Monte carlo cycles")
plt.ylabel(r"$\left \langle |M| \right \rangle$")
plt.title("Absolute magnetisation per spin")
plt.tight_layout()
plt.savefig("figs/abs_magnetisation_%d_%.2f_%.2f.pdf"%(L, temp, tol))
plt.close()
plt.figure("sigmaE")
plt.loglog(mcs, sigmaE2/int(L**2), label='ordered')
if test:
plt.loglog(mcs, sigmaE2_2/int(L**2), label='random')
plt.legend()
plt.xlabel("Monte carlo cycles")
plt.ylabel(r"$\left \langle \sigma_E^2 \right \rangle$")
plt.title("Variance in energy")
plt.tight_layout()
plt.savefig("figs/sigmaE_%d_%.2f_%.2f.pdf"%(L, temp, tol))
plt.close()
plt.figure("cv")
plt.semilogx(mcs, Cv/int(L**2), label='ordered')
if test:
plt.semilogx(mcs, Cv_2/int(L**2), label='random')
plt.legend()
plt.xlabel("Monte carlo cycles")
plt.ylabel(r"$\left \langle C_v \right \rangle$")
plt.title("Mean specific heat")
plt.tight_layout()
plt.savefig("figs/Cv_%d_%.2f_%.2f.pdf"%(L, temp, tol))
plt.close()
plt.figure("acpt")
plt.loglog(mcs, acpt, label='ordered')
if test:
plt.loglog(mcs, acpt_2, label='random')
plt.legend()
plt.xlabel("Monte carlo cycles")
plt.ylabel(r"accepted")
plt.title("Configuration changes")
plt.tight_layout()
plt.savefig("figs/acpt_%d_%.2f_%.2f.pdf"%(L, temp, tol))
plt.close()
plt.figure("prob")
counts, bins = np.histogram(E[200:]/L**2, bins=int((E[200:].max()
-E[200:].min())/4)+1)
plt.hist(bins[:-1], bins, weights=counts/(N-199))
if temp > 2:
plt.axvline(mE[N]/int(L**2)+np.sqrt(sigmaE2[N])/L**2, color="r", ls="--")
plt.axvline(mE[N]/int(L**2)-np.sqrt(sigmaE2[N])/L**2, color="r", ls="--")
plt.xlabel("Energy per spin")
plt.ylabel("Probability")
plt.title("Probability distribution")
plt.tight_layout()
plt.savefig("figs/prob_%d_%.2f_%.2f.pdf"%(L, temp, tol))
plt.close()
print("Monte Carlo cycles: %d %d %d" %(mcs[int(N/100)], mcs[int(N/10)], mcs[N]))
print("Mean energy per spin: %.6f %.6f %.6f"
%(mE[int(N/100)]/int(L**2), mE[int(N/10)]/int(L**2), mE[N]/int(L**2)))
print("Mean magnetisation per spin: %.6f %.6f %.6f"
%(mM[int(N/100)]/int(L**2), mM[int(N/10)]/int(L**2), mM[N]/int(L**2)))
print("Mean absolute magnetisation per spin: %.6f %.6f %.6f"
%(absM[int(N/100)]/int(L**2), absM[int(N/10)]/int(L**2),
absM[N]/int(L**2)))
print("Mean specific heat per spin: %.7f %.7f %.7f"
%(Cv[int(N/100)]/int(L**2), Cv[int(N/10)]/int(L**2), Cv[N]/int(L**2)))
print("Magnetic susceptibility per spin: %.6e %.6e %.6e"
%(chi[int(N/100)]/int(L**2), chi[int(N/10)]/int(L**2), chi[N]/int(L**2)))
print("Standard deviation in energy per spin: %.6f %.6f %.6f"
%(np.sqrt(sigmaE2[int(N/100)])/L**2, np.sqrt(sigmaE2[int(N/10)])/L**2,
np.sqrt(sigmaE2[N])/L**2))
| [
"matplotlib"
] |
533bc4d002a7b3aea3b647f50a8b52ddf01f54e1 | Python | Kunal30/image-search-engine | /src/backend/utils.py | UTF-8 | 11,039 | 2.65625 | 3 | [] | no_license | import sys
import glob
import numpy as np
import pandas as pd
from scipy import spatial
from sklearn.metrics import confusion_matrix, accuracy_score, recall_score, precision_score, f1_score
import matplotlib.pyplot as plt
import matplotlib
from database_connection import DatabaseConnection
import os
from pathlib import Path
import pickle
from singular_value_decomposition import SingularValueDecomposition
from sklearn.decomposition import PCA
import pprint
ALPHA = 0.85
PICKLE_FILE_NAME = "page_rank_interim.pickle"
from tabulate import tabulate
def get_pickle_directory():
data_dir = get_data_directory()
path = str(Path(data_dir + '/pickle'))
if (not os.path.exists(path)):
os.mkdir(path)
return path
def get_data_directory():
path = str(Path(os.getcwd() + '/src/Data'))
return path
def get_image_directory(content_type='database_images'):
data_dir = get_data_directory()
if content_type == 'database_images':
return str(Path(data_dir + '/images'))
elif content_type == 'classification_images':
return str(Path(data_dir + '/phase3_sample_data'))
def get_dot_distance(vector1, vector2):
return np.dot(vector1, vector2)
def get_cosine_similarity(vector1, vector2):
return spatial.distance.cosine(vector1, vector2)
def get_euclidian_distance(vector1, vector2):
return np.linalg.norm(vector1 - vector2)
def plot_scree_test(eigen_values):
num_vars = len(eigen_values)
fig = plt.figure(figsize=(8, 5))
sing_vals = np.arange(num_vars) + 1
plt.plot(sing_vals, eigen_values, 'ro-', linewidth=2)
plt.title('Scree Plot')
plt.xlabel('K latent semantic')
plt.ylabel('Eigenvalue')
leg = plt.legend(['Eigenvalues from SVD'], loc='best', borderpad=0.3,
shadow=False, prop=matplotlib.font_manager.FontProperties(size='small'),
markerscale=0.4)
leg.get_frame().set_alpha(0.4)
plt.show()
def convert_folder_path_to_table_name(folder_name, pre_string="metadata"):
"""
:param folder_name: e.g. /Labelled/Set2
:param pre_string: pass the string to prepend before the folder name
:return:
"""
folder_name = folder_name.replace(" ", "")
folder_name = folder_name.replace("/", "_")
folder_name = folder_name.lower()
if (folder_name[0] == '_'):
table_name = pre_string + folder_name
else:
table_name = pre_string + "_" + folder_name
return table_name
def get_most_m_similar_images(data_with_images, query_image_feature_vector, m):
"""
Author: Vibhu Varshney
This funcion computes the similarity score between the query image vector and the images in the database
:param data_with_images_: This is a dict/map with image name list and the data matrix
:param query_image_feature_vector : Query Image feature vector after applying the feature extraction model
:param Vt: This is the latent-vector by original feature matrix generated from either model
:param m: Number of similar images to be returned
:return: dictionary of m most similar images as keys and their scores as value
"""
db_data_matrix = data_with_images.get('data_matrix')
imageNames = data_with_images.get('images')
database_images_latent_vectors = db_data_matrix
# database_images_latent_vectors = np.dot(db_data_matrix, np.transpose(Vt))
query_image_latent_vector = query_image_feature_vector
# query_image_latent_vector = np.dot(np.array(query_image_feature_vector),Vt.T)
return get_top_m_tuples_by_similarity_score(database_images_latent_vectors,
query_image_latent_vector, imageNames,
m + 1) # +1 because the db contains the query image also
def get_top_m_tuples_by_similarity_score(database_images_latent_vectors, query_image_latent_vector, imageNames, m,
distance_measure="Euclidean"):
similar_images = get_similarity_score(database_images_latent_vectors, query_image_latent_vector, imageNames,
distance_measure)
if (distance_measure == "cosine"):
similar_images = sorted(similar_images.items(), key=lambda k: k[1], reverse=True)
else:
similar_images = sorted(similar_images.items(), key=lambda k: k[1])
top_m_tuples = similar_images[:m]
return top_m_tuples
def get_similarity_score(database_images_latent_vectors, query_image_latent_vector, imageNames,
distance_measure="Euclidean"):
"""
Author: Vibhu Varshney
:param database_images_latent_vectors:
:param query_image_latent_vector:
:param imageNames:
:return:
"""
similar_images = {}
for i in range(len(database_images_latent_vectors)):
imageName = imageNames[i]
db_latent_vector = database_images_latent_vectors[i]
if (distance_measure == "Euclidean"):
distance = get_euclidian_distance(query_image_latent_vector, db_latent_vector)
elif (distance_measure == "dot"):
distance = get_dot_distance(query_image_latent_vector, db_latent_vector)
elif (distance_measure == "cosine"):
distance = get_cosine_similarity(query_image_latent_vector, db_latent_vector)
similar_images[imageName] = distance
return similar_images
def save_to_pickle(object_to_save, file_name):
pickle_directory = get_pickle_directory()
with open(os.path.join(pickle_directory, file_name), 'wb') as f:
pickle.dump(object_to_save, f)
f.close()
def read_from_pickle(file_name):
try:
pickle_directory = get_pickle_directory()
with open(os.path.join(pickle_directory,file_name), 'rb') as f:
data = pickle.load(f)
f.close()
return data
except Exception:
return None
def get_image_names_in_a_folder(relative_folder_path):
"""
Author: Vibhu Varshney
:param relative_folder_path: here give the path with a '/' ahead e.g. '/Labelled/Set 2'
:return:
list of image names
"""
data_dir = get_image_directory("classification_images")
path = str(Path(data_dir + relative_folder_path)) + '/*.jpg'
files = glob.glob(path)
image_names = [os.path.basename(x) for x in files]
return image_names
def get_svd_image_data_from_folder(relative_folder_path, k=10):
"""
:param relative_folder_path: here give the path with a '/' ahead e.g. '/Labelled/Set2'
:return:
data_matrix after applying SVD on it and also the image names present inside the relative_folder_path
"""
image_names = get_image_names_in_a_folder(relative_folder_path)
db_conn = DatabaseConnection()
data_image_dict = db_conn.HOG_descriptor_from_image_ids(image_names)
data_matrix = data_image_dict['data_matrix']
svd_obj = SingularValueDecomposition()
svd_image_data = svd_obj.get_transformed_data(data_matrix, k)
return svd_image_data, data_image_dict['images']
def get_filtered_images_by_label(labelled_images, filter_by):
return [x[0] for x in labelled_images if filter_by in x[1]]
def convert_tuple_to_dict(tuple):
dict = {}
for each in tuple:
dict[each[0]] = each[1]
return dict
def calculate_classification_accuracy(pred_labels, correct_labels):
cnt = 0
keys = pred_labels.keys()
for key in keys:
if (pred_labels[key] in correct_labels[key]):
cnt += 1
print(cnt)
return (cnt / len(pred_labels)) * 100
def get_train_and_test_dataframes_from_db(train_table, train_table_metadata, test_table, num_dims=None, algo="svd"):
images_not_present = False
label_map = {"dorsal": -1, "palmar": 1}
# retrieve data
db = DatabaseConnection()
train_dataset = db.get_object_feature_matrix_from_db(train_table)
test_dataset = db.get_object_feature_matrix_from_db(test_table)
# get out data matrix
train_data = train_dataset['data_matrix']
train_images = train_dataset['images']
test_data = test_dataset['data_matrix']
test_images = test_dataset['images']
# svd transform
if num_dims == None:
tf_train_data = train_data
tf_test_data = test_data
else:
if algo == "pca":
svd = PCA(n_components=num_dims)
tf_train_data = svd.fit_transform(train_data)
tf_test_data = svd.transform(test_data)
elif algo == "svd":
svd = SingularValueDecomposition(num_dims)
tf_train_data = svd.fit_transform(train_data)
tf_test_data = svd.transform(test_data)
# convert list of tuples to dict
train_labels_map = dict(db.get_correct_labels_for_given_images(train_images, 'aspectOfHand', train_table_metadata))
result_from_db = db.get_correct_labels_for_given_images(test_images, 'aspectOfHand')
if not result_from_db:
exp_test_labels_map = None
else:
exp_test_labels_map = dict(result_from_db)
# dataframe setup starts here
# train_df
train_col_names = ['imagename', 'hog_svd_descriptor', 'label']
train_df = pd.DataFrame(columns=train_col_names)
for i, image in enumerate(train_images):
temp = train_labels_map[image]
label = temp.split(' ')[0]
train_df.loc[len(train_df)] = [image, tf_train_data[i], label_map[label]]
# test_df
test_col_names = ['imagename', 'hog_svd_descriptor', 'expected_label', 'predicted_label']
test_df = pd.DataFrame(columns=test_col_names)
if exp_test_labels_map:
for i, image in enumerate(test_images):
temp = exp_test_labels_map[image]
label = temp.split(' ')[0]
test_df.loc[len(test_df)] = [image, tf_test_data[i], label_map[label], 'null']
else:
for i, image in enumerate(test_images):
images_not_present = True
test_df.loc[len(test_df)] = [image, tf_test_data[i], 'null', 'null']
return train_df, test_df, images_not_present
def get_result_metrics(classifier_name, y_expected, y_predicted):
y_expected = np.array(y_expected, dtype=int)
y_predicted = np.array(y_predicted, dtype=int)
# Predicting the Test set results
print("Results for classifier {0}".format(classifier_name))
accuracy = accuracy_score(y_expected, y_predicted)
# print("Accuracy score is: {}".format(accuracy))
precision = precision_score(y_expected, y_predicted)
# print("Precision score is: {}".format(precision))
recall = recall_score(y_expected, y_predicted)
# print("Recall score is: {}".format(recall))
f1 = f1_score(y_expected, y_predicted)
# print("F1 score is: {}".format(f1))
print("------Confusion Matirx------")
print(confusion_matrix(y_expected, y_predicted))
result = {"accuracy": accuracy, "precision": precision, "recall": recall, "f1": f1}
return result
def get_image_names_from_tuples(image_score_tuple):
image_names = []
for each_tuple in image_score_tuple:
image_names.append(each_tuple[0])
return image_names | [
"matplotlib"
] |
a2c0fc4f9361d486d2d2feced9a21910bdbdc4aa | Python | osomat123/TwitterDataAnalysis | /analytics.py | UTF-8 | 12,759 | 2.875 | 3 | [] | no_license | import json
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import datetime as dt
import pytz
sns.set()
class Tweet_Analyser:
def __init__(self):
self.search_terms = []
self.source_terms = []
self.df = pd.DataFrame()
self.tweets = []
def get_from_files(self, filepath):
with open(filepath, "r") as read:
data = json.load(read)
with open("search_terms.json","r") as read:
words = json.load(read)
for word in words:
self.search_terms.append(word['keyword'].upper())
for tweet in data:
for word in self.search_terms:
text = tweet["text"]
if word in text.upper():
self.tweets.append(tweet)
break
def get_source(self,tweet_source):
self.source_terms=["Android","iPhone","Web App","Web Client","Facebook","TweetDeck","iPad","Media Studio","Ads Composer"]
i = 0
for term in self.source_terms:
if term in tweet_source:
return term
return "Third Party"
def make_dataframe(self):
df1 = pd.DataFrame()
df2 = pd.DataFrame()
# Making dataframe from tweets in files
df1['TweetID'] = np.array([tweet["id_str"] for tweet in self.tweets])
df1['Text'] = np.array([tweet["text"] for tweet in self.tweets])
df1['TimeUTC'] = np.array([dt.datetime.strptime(tweet["created_at"], "%a %b %d %H:%M:%S %z %Y") for tweet in self.tweets])
df1['TimeIST'] = np.array([time.astimezone("Asia/Kolkata") for time in df1.TimeUTC])
df1['Likes'] = np.array([tweet["favorite_count"] for tweet in self.tweets])
df1['Retweets'] = np.array([tweet["retweet_count"] for tweet in self.tweets])
df1['Source'] = np.array([self.get_source(tweet["source"]) for tweet in self.tweets])
# Making dataframe from the original tweets of the retweeted tweets
df2['TweetID'] = np.array([tweet["retweeted_status"]["id_str"] for tweet in self.tweets if "RT @" in tweet["text"]])
df2['Text'] = np.array([tweet["retweeted_status"]["text"] for tweet in self.tweets if "RT @" in tweet["text"]])
df2['TimeUTC'] = np.array([dt.datetime.strptime(tweet["retweeted_status"]["created_at"], "%a %b %d %H:%M:%S %z %Y") for tweet in self.tweets if "RT @" in tweet["text"]])
df2['TimeIST'] = np.array([time.astimezone("Asia/Kolkata") for time in df2.TimeUTC])
df2['Likes'] = np.array([tweet["retweeted_status"]["favorite_count"] for tweet in self.tweets if "RT @" in tweet["text"]])
df2['Retweets'] = np.array([tweet["retweeted_status"]["retweet_count"] for tweet in self.tweets if "RT @" in tweet["text"]])
df2['Source'] = np.array([self.get_source(tweet["retweeted_status"]["source"])for tweet in self.tweets if "RT @" in tweet["text"]])
self.df = pd.concat([df1, df2],sort=True)
self.df.drop_duplicates(subset="TweetID",keep="first",inplace=True) # Removing duplicate tweets
self.df.reset_index(inplace=True)
keywords = []
remove_tweets = []
for i in range(len(self.df)):
j = 0
for word in self.search_terms:
text=self.df["Text"][i]
if word in text.upper():
keywords.append(word)
j = 1
break
if j == 0: # Removing unwanted tweets
np.array(remove_tweets.append(i))
self.df.drop(remove_tweets,inplace=True)
self.df["Keyword"]=keywords
self.df.reset_index(inplace=True)
self.df["Keyword"]=keywords
def getStats(self):
print(self.df.describe())
# Histogram of number of tweets at various hours of day
def plot_time_hist(self):
plt.hist([time.hour for time in self.df["TimeIST"]], bins=24, edgecolor="black", linewidth=1.2)
font_labels = {'family': 'serif', 'color': 'darkred', 'weight': 'bold', 'size': 18}
font_title = {'family': 'serif', 'color': 'darkred', 'weight': 'bold', 'size': 20}
plt.xticks(np.arange(2, 24, step=2),fontweight="bold")
plt.yticks(np.arange(500, 4500, step=500),fontweight="bold")
plt.xlabel("Hour of Day",fontdict=font_labels)
plt.ylabel("Number of tweets",fontdict=font_labels)
plt.title("Variation in number of tweets at various hours of the day",fontdict=font_title)
plt.show()
# Bar plot of number of tweets containing each of the provided keywords
def plot_keyword_bar(self):
num_of_tweets = [len([key for key in self.df["Keyword"] if key == keyword]) for keyword in self.search_terms]
arr = np.arange(len(self.search_terms))
plt.bar(arr,num_of_tweets,edgecolor="black",linewidth=1.2)
font_labels = {'family': 'serif', 'color': 'darkred', 'weight': 'bold', 'size': 18}
font_data = {'family': 'serif', 'color': 'darkred', 'weight': 'bold'}
font_title = {'family': 'serif', 'color': 'darkred', 'weight': 'bold', 'size': 20}
plt.xticks(arr,self.search_terms,fontweight="bold")
plt.yticks(np.arange(0, 6000, step=1000),fontweight="bold")
plt.xlabel("Keywords Given",fontdict=font_labels)
plt.ylabel("Number of tweets",fontdict=font_labels)
for i in arr:
plt.text(i,num_of_tweets[i]+35,str(num_of_tweets[i]),horizontalalignment='center',fontdict=font_data)
plt.title("Variation in number of tweets containing each of the provided keywords",fontdict=font_title)
plt.show()
# Bar plot of maximum number of likes for each of the provided keywords
def plot_maxLikes_bar(self):
max_likes=[]
for keyword in self.search_terms:
max_likes.append(self.df[self.df["Keyword"] == keyword]["Likes"].max())
arr = np.arange(len(self.search_terms))
plt.bar(arr, max_likes, edgecolor="black",linewidth=1.2)
font_labels={'family': 'serif','color': 'darkred','weight': 'bold','size': 18}
font_data = {'family': 'serif', 'color': 'darkred', 'weight': 'bold'}
font_title = {'family': 'serif', 'color': 'darkred', 'weight': 'bold', 'size': 20}
plt.xticks(arr,self.search_terms,fontweight="bold")
plt.yticks(np.arange(0, 400000, step=50000),fontweight="bold")
plt.xlabel("Keywords Given",fontdict=font_labels)
plt.ylabel("Maximum Likes", fontdict=font_labels)
for i in arr:
plt.text(i,max_likes[i]+45,str(max_likes[i]),horizontalalignment='center',fontdict=font_data)
plt.title("Variation in number of likes for each of the provided keywords",fontdict=font_title)
plt.show()
# Bar plot of mean and standard deviation of likes for each of the provided keywords
def plot_likesMeanStd_bar(self):
mean_likes = []
std_likes =[]
for keyword in self.search_terms:
std_likes.append(int(self.df[self.df["Keyword"] == keyword]["Likes"].std()))
mean_likes.append(int(self.df[self.df["Keyword"] == keyword]["Likes"].mean()))
arr = np.arange(len(self.search_terms))
plt.bar(arr+0.2, std_likes, edgecolor="black", linewidth=1.2, label="Standard deviation of likes",width=0.4)
plt.bar(arr-0.2, mean_likes, edgecolor="black", linewidth=1.2, label="Mean number of likes",width=0.4)
font_labels = {'family': 'serif', 'color': 'darkred', 'weight': 'bold', 'size': 18}
font_data = {'family': 'serif', 'color': 'darkred', 'weight': 'bold'}
font_title = {'family': 'serif', 'color': 'darkred', 'weight': 'bold', 'size': 20}
plt.xticks(arr, self.search_terms, fontweight="bold")
plt.yticks(np.arange(0, 14000, step=2000), fontweight="bold")
plt.xlabel("Keywords Given", fontdict=font_labels)
plt.ylabel("Mean/Standard Deviation of Likes", fontdict=font_labels)
for i in arr:
plt.text(i-0.2, mean_likes[i] + 45, str(mean_likes[i]), horizontalalignment='center', fontdict=font_data)
plt.text(i+0.2, std_likes[i] + 45, str(std_likes[i]), horizontalalignment='center', fontdict=font_data)
plt.legend()
plt.title("Variation in number of likes for each of the provided keywords", fontdict=font_title)
plt.show()
# Bar plot of maximum number of likes for each of the provided keywords
def plot_maxRetweets_bar(self):
max_likes = []
for keyword in self.search_terms:
max_likes.append(self.df[self.df["Keyword"] == keyword]["Retweets"].max())
arr = np.arange(len(self.search_terms))
plt.bar(arr, max_likes, edgecolor="black", linewidth=1.2)
font_labels = {'family': 'serif', 'color': 'darkred', 'weight': 'bold', 'size': 18}
font_data = {'family': 'serif', 'color': 'darkred', 'weight': 'bold'}
font_title = {'family': 'serif', 'color': 'darkred', 'weight': 'bold', 'size': 20}
plt.xticks(arr, self.search_terms, fontweight="bold")
plt.yticks(np.arange(0, 100000, step=10000), fontweight="bold")
plt.xlabel("Keywords Given", fontdict=font_labels)
plt.ylabel("Maximum Retweets", fontdict=font_labels)
for i in arr:
plt.text(i, max_likes[i] + 45, str(max_likes[i]), horizontalalignment='center', fontdict=font_data)
plt.title("Variation in number of retweets for each of the provided keywords", fontdict=font_title)
plt.show()
# Bar plot of mean and standard deviation of likes for each of the provided keywords
def plot_retweetsMeanStd_bar(self):
mean_likes = []
std_likes = []
for keyword in self.search_terms:
std_likes.append(int(self.df[self.df["Keyword"] == keyword]["Likes"].std()))
mean_likes.append(int(self.df[self.df["Keyword"] == keyword]["Likes"].mean()))
arr = np.arange(len(self.search_terms))
plt.bar(arr + 0.2, std_likes, edgecolor="black", linewidth=1.2, label="Standard deviation of likes",width=0.4)
plt.bar(arr - 0.2, mean_likes, edgecolor="black", linewidth=1.2, label="Mean number of likes", width=0.4)
font_labels = {'family': 'serif', 'color': 'darkred', 'weight': 'bold', 'size': 18}
font_data = {'family': 'serif', 'color': 'darkred', 'weight': 'bold'}
font_title = {'family': 'serif', 'color': 'darkred', 'weight': 'bold', 'size': 20}
plt.xticks(arr, self.search_terms.append("Third Party"), fontweight="bold")
plt.yticks(np.arange(0, 14000, step=2000), fontweight="bold")
plt.xlabel("Keywords Given", fontdict=font_labels)
plt.ylabel("Mean/Standard Deviation of Retweets", fontdict=font_labels)
for i in arr:
plt.text(i - 0.2, mean_likes[i] + 45, str(mean_likes[i]), horizontalalignment='center',fontdict=font_data)
plt.text(i + 0.2, std_likes[i] + 45, str(std_likes[i]), horizontalalignment='center',fontdict=font_data)
plt.legend()
plt.title("Variation in number of retweets for each of the provided keywords", fontdict=font_title)
plt.show()
def plot_tweetSource_bar(self):
self.source_terms.append("Third Party")
num_of_tweets = [len([source for source in self.df["Source"] if source == term]) for term in self.source_terms]
arr = np.arange(len(self.source_terms))
plt.bar(arr, num_of_tweets, edgecolor="black", linewidth=1.2)
font_labels = {'family': 'serif', 'color': 'darkred', 'weight': 'bold', 'size': 18}
font_data = {'family': 'serif', 'color': 'darkred', 'weight': 'bold'}
font_title = {'family': 'serif', 'color': 'darkred', 'weight': 'bold', 'size': 20}
plt.xticks(arr, self.source_terms, fontweight="bold")
plt.yticks(np.arange(0, 6000, step=1000), fontweight="bold")
plt.xlabel("Tweet Sources", fontdict=font_labels)
plt.ylabel("Number of tweets", fontdict=font_labels)
for i in arr:
plt.text(i, num_of_tweets[i] + 35, str(num_of_tweets[i]), horizontalalignment='center', fontdict=font_data)
plt.title("Variation in number of tweets from each source", fontdict=font_title)
plt.show()
print(self.source_terms)
if __name__ == "__main__":
analyser = Tweet_Analyser()
analyser.get_from_files("tweet_d1fdaab1-b8c1-4f6b-b7e9-17844d8d6186.json")
analyser.make_dataframe()
analyser.plot_keyword_bar()
analyser.plot_time_hist()
analyser.plot_maxLikes_bar()
analyser.plot_likesMeanStd_bar()
analyser.plot_maxRetweets_bar()
analyser.plot_retweetsMeanStd_bar()
analyser.plot_tweetSource_bar()
| [
"matplotlib",
"seaborn"
] |
3b9625cfeb2b19474e26c38a1a16cc65a56c6c20 | Python | Tamiyas/Multilayer-perceptron | /kNN.py | UTF-8 | 3,257 | 3.40625 | 3 | [
"MIT"
] | permissive | """k-NN(k近傍法, k-Nearest Neighbors)の実装.
Author:
T.Miyaji
Date:
2018/05/28
References:
http://blog.amedama.jp/entry/2017/03/18/140238
"""
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
class kNN:
"""k-NN.
Args:
data: 学習データ.
k: 分類に用いる近傍の数.
"""
def __init__(self, data, k = 1):
(self.X, self.Y) = data.get()
self.k = k
def predict(self):
"""k-NNアルゴリズムを用いてクラス(0 or 1)を予測する関数.
"""
Correct = 0
for (i, x) in enumerate(self.X):
voted_class = self.nearest_neighbor(x)
if voted_class == self.Y[i, :]:
Correct += 1
print('入力 {0}, 正解 {1}, 出力{2}'.format(x, self.Y[i, :], voted_class))
print('Accuracy {:.2%}'.format(Correct / float(self.X.shape[0])))
def nearest_neighbor(self, x):
"""k-NNアルゴリズムの実装.
Args:
x: 注目点.
Returns:
近傍の点でクラスの多数決をして最も多いクラス(0, 1)を返す.
"""
# 教師データの点pと注目点xの距離のベクトルを作成する.
distances = np.array([self.distance(p, x) for p in self.X])
# 距離が近い順にソートしてk個インデックスを得る
nearest_indexes = distances.argsort()[:self.k]
# 取得したk個のインデックスのクラスを得る
nearest_classes = self.Y[nearest_indexes]
# 取得したk個の中で最も多いクラスを返す
return self.majority_vote(nearest_classes)
def distance(self, x1, x2):
"""2点間の距離を計算する関数.
Note:
今回の実装はユークリッド距離だが, マンハッタン距離でも問題ない.
さらに、ユークリッド距離の2乗でも問題ない
Args:
x1: 2次元空間の座標.
x2: 2次元空間の座標.
Returns:
ユークリッド距離.
"""
return np.sqrt(np.sum((x1 - x2) ** 2))
def majority_vote(self, classes):
"""リストの中で最も出現する値を返す関数.
Args:
classes: クラス(0 or 1)が格納されたリスト.
Returns:
クラス(0 or 1).
"""
return 0 if (np.sum(classes == 0) > np.sum(classes == 1)) else 1
def decision_boundary(self, step = 0.02):
"""決定境界をプロットする関数.
Args:
step: 座標のステップ数.
"""
if(self.X.shape[1] != 2):
return
(x_min, x_max) = (self.X[:, 0].min() - 0.5, self.X[:, 0].max() + 0.5)
(y_min, y_max) = (self.X[:, 1].min() - 0.5, self.X[:, 1].max() + 0.5)
# 格子点の作成
(xx, yy) = np.meshgrid(np.arange(x_min, x_max, step), np.arange(y_min, y_max, step))
X = np.c_[xx.ravel(), yy.ravel()]
Z = np.array([self.nearest_neighbor(X[i, :]) for i in tqdm(range(X.shape[0]))])
Z = np.reshape(Z, xx.shape)
plt.xlim(x_min, x_max)
# 境界面のプロット
plt.contourf(xx, yy, Z, cmap = plt.cm.Spectral, alpha = 0.8)
# 入力データのプロット
plt.scatter(self.X[:, 0], self.X[:, 1], c = self.Y[:, 0], cmap = plt.cm.Spectral, s = 15)
plt.colorbar()
plt.show()
| [
"matplotlib"
] |
8307687258313879f9669600a2f7a5538ebd0634 | Python | shell-done/Spongo | /training/data_preprocessing/plots/confusion_matrix.py | UTF-8 | 2,735 | 2.78125 | 3 | [
"MIT"
] | permissive | import pandas as pd
import numpy as np
import seaborn as sn
import matplotlib.pyplot as plt
excel_conf = {
"Ball": {
"TP": 3,
"FP": 1,
"FN": 14,
"o": []
},
"Vase": {
"TP": 19,
"FP": 2,
"FN": 1,
"o": []
},
"Corona": {
"TP": 5,
"FP": 1,
"FN": 2,
"o": []
},
"Red": {
"TP": 10,
"FP": 2,
"FN": 7,
"o": []
},
"Crown": {
"TP": 6,
"FP": 0,
"FN": 1,
"o": []
},
"Grey_white": {
"TP": 1,
"FP": 0,
"FN": 5,
"o": []
}
}
excel_conf = {
"A": {
"TP": 15,
"FP": 2,
"FN": 3,
"o": []
},
"B": {
"TP": 8,
"FP": 0,
"FN": 1,
"o": []
},
"C": {
"TP": 5,
"FP": 1,
"FN": 0,
"o": ["A"]
},
"D": {
"TP": 8,
"FP": 0,
"FN": 0,
"o": []
}
}
data = {
"y_actual": [],
"y_predicted": []
}
for k,v in excel_conf.items():
tp = [k]*v["TP"]
data["y_actual"] += tp
data["y_predicted"] += tp
fp_a = ["background FP"]*v["FP"]
fp_p = [k]*v["FP"]
data["y_actual"] += fp_a
data["y_predicted"] += fp_p
fn_a = [k]*v["FN"]
fn_p = ["background FN"]*v["FN"]
data["y_actual"] += fn_a
data["y_predicted"] += fn_p
if len(v["o"]) > 0:
o_a = [k]*len(v["o"])
o_p = v["o"]
data["y_actual"] += o_a
data["y_predicted"] += o_p
df = pd.DataFrame(data, columns=['y_actual','y_predicted'])
confusion_matrix = pd.crosstab(df['y_actual'], df['y_predicted'], rownames=['Actual'], colnames=['Predicted'], margins=False, normalize="index")
index_order = ["Ball", "Vase", "Corona", "Red", "Crown", "Grey_white", "background FP"]
columns_order = ["Ball", "Vase", "Corona", "Red", "Crown", "Grey_white", "background FN"]
index_order = ["A", "B", "C", "D", "background FP"]
columns_order = ["A", "B", "C", "D", "background FN"]
confusion_matrix = confusion_matrix.reindex(index_order, columns=columns_order)
#confusion_matrix_norm = confusion_matrix / confusion_matrix.sum(axis=1)
plt.figure(figsize=(8.21, 7.03), dpi=120)
ax = sn.heatmap(confusion_matrix, annot=True, cmap='viridis', vmin=0, vmax=1)
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
plt.xlabel("Prédiction", fontsize=16, labelpad=-10)
plt.ylabel("Réalité", fontsize=16)
plt.xticks(rotation=45, fontsize=12)
plt.yticks(rotation=45, fontsize=12)
plt.tight_layout()
plt.savefig("fig1.png", dpi=120)
plt.show() | [
"matplotlib",
"seaborn"
] |
29c5d9ab3847d51761530d095a63c703090f6c1b | Python | AnalystsTeam/Term_DepositSubscription_Project | /Modelbuilding_Finalproject.py | UTF-8 | 14,714 | 2.921875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Apr 25 12:08:57 2021
@author: mumta
"""
# Importing required libraries and modules
from pyspark.sql import SparkSession
from pyspark import SparkContext
from pyspark import SparkConf
from pyspark.sql import SQLContext
from pyspark.ml import Pipeline
from pyspark.sql.functions import col, when
import pyspark.sql.functions as F
from pyspark.ml.feature import OneHotEncoder, StringIndexer, VectorAssembler
from pyspark.ml.feature import MinMaxScaler
from pyspark.ml.classification import RandomForestClassifier
from pyspark.ml.classification import LogisticRegression
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
from pyspark.ml.evaluation import BinaryClassificationEvaluator, MulticlassClassificationEvaluator
from pyspark.mllib.evaluation import MulticlassMetrics
from pyspark.mllib.evaluation import BinaryClassificationMetrics
from sklearn.metrics import roc_curve, auc
from pyspark.sql.functions import isnan, when, count, col
conf = SparkConf().setAppName(" Classifying Term Deposit Subscriptions for a bank Project").setMaster("local")
sc = SparkContext(conf=conf)
spark = SparkSession(sc)
sql_context = SQLContext(sc)
PATH = r"C:/Users\mumta/Documents/Mumtaz- College/Course/Spring 2021/CIS-5367 - MACHINE LEARNING/Project"
bank_data=sql_context.read.load("%s/bank-full.csv" % PATH,
format='com.databricks.spark.csv',
header='true',
inferSchema='true')
print(bank_data.head(5))
bank_data.count()
#Output Variable is y. Changing column "y" to "Target" for further processing
bank_data=bank_data.withColumnRenamed("y","target")
bank_data.show()
### Get count of nan or missing values in dataset
bank_data.select([count(when(isnan(c), c)).alias(c) for c in bank_data.columns]).show()
# Percentage of each category of target variable
bank_data.groupby('target').agg(
(F.count('target')).alias('count'),
(F.count('target') / bank_data.count()).alias('percentage')
).show()
# UpSampling to handle inbalanced dataset
major_df = bank_data.filter(col("target") == "no")
minor_df = bank_data.filter(col("target") == "yes")
ratio = int(major_df.count()/minor_df.count())
print(ratio)
df_b_oversampled = minor_df.sample(True,fraction=float(ratio), seed=1)
combined_df = major_df.unionAll(df_b_oversampled)
combined_df.count()
print (" Total count of dataset after UpSampling", combined_df.count())
# Count of each ctegory in target class
combined_df.groupby('target').count().show()
# Feature Selection for the model
# We kept all of the variables in our model. Except variables day and month which are not really useful, we will remove these two columns
# Selecting variables or features to be used for further processing
combined_df=combined_df.select('age', 'job', 'marital', 'education', 'default', 'balance', 'housing', 'loan', 'campaign', 'pdays', 'previous', 'poutcome', 'target')
combined_df.show(5)
combined_df.count()
cols=combined_df.columns
# Handling categorical columns for our model
# String Indexing, One Hot Encoding and Vector Assembling:
# Convert categorical features to numeric features using One hot encoding
#SELECTING CATEGORICAL COLUMNS ONLY
categoricalColumns = ['job','marital','education','default','housing','loan','poutcome']
#CREATING AN EMPTY LIST FOR PIPELINE AND ASSEMBLER
stages = []
#APPLYING FOR LOOP TO INDEX AND ENCODE ALL THE SELECTED COLUMNS
#APPLYING STRING INDEXER TO ALL THE CATEGORICAL COLUMNS AND STORING IT IN A NEW COLUMN WITH +INDEXED
#APPLYING ONE HOT ENCODER TO ALL THE INDEXED COLUMNS AND STORING IT IN A NEW COLUMN WITH +ENCODED
for categoricalCol in categoricalColumns:
stringIndexer = StringIndexer(inputCol = categoricalCol, outputCol = categoricalCol + 'Index')
encoder = OneHotEncoder(inputCols=[stringIndexer.getOutputCol()], outputCols=[categoricalCol + "classVec"])
stages += [stringIndexer, encoder]
#INDEXING PREDICTOR COLUMN 'DEPOSIT' AS LABEL AND FEATURES
target_stringIndex = StringIndexer(inputCol = 'target', outputCol = 'label')
#CREATING STAGES FOR BOTH NUMERICAL AND CATEGORICAL COLUMNS
stages += [target_stringIndex]
# Transform all features into a vector using VectorAssembler
# ADDING BOTH To ASSEMBLER
numericalColumns = ['age', 'balance', 'campaign', 'pdays', 'previous']
assemblerInputs = [c + "classVec" for c in categoricalColumns] + numericalColumns
# VECTORIZING TO CREATE A NEW FEATURES COLUMN WITH INDEXED AND ENCODED VALUES
assembler = VectorAssembler(inputCols=assemblerInputs, outputCol="features")
stages += [assembler]
# Run the stages as a Pipeline. This puts the data through all of the feature transformations in a single call.
# COMBINING ALL THE STAGES INTO ONE, FITTING combined_df AND TRANSFORMING IT
pipeline = Pipeline(stages = stages)
pipelineModel = pipeline.fit(combined_df)
df = pipelineModel.transform(combined_df)
df.show()
df.count()
df.select("features").show(truncate=0)
# Normalization using min max scaler:
# The feature vector has been finally normalized using the min-max scaler in pyspark and transformed as below:
# Apply Min-Max normalisation on each attribute using MinMaxScaler
scaler = MinMaxScaler(inputCol="features", outputCol="scaledFeatures")
scalerModel = scaler.fit(df)
scaledBankData = scalerModel.transform(df)
scaledBankData.select("features", "scaledFeatures").show(truncate=0)
#To check values convert to PANDAS dataframe
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
pd.set_option('display.width', None)
pd.set_option('display.max_colwidth', -1)
scaledBankData.select('scaledFeatures').toPandas().head(1)
# Model Building
#The dataset has been split at 70:30. 70% of the dataset has been kept for training the supervised learning models and 30% of the dataset has been kept for testing the dataset.
train, test = scaledBankData.randomSplit([0.7, 0.3], seed = 742)
print("Training Dataset Count: " + str(train.count()))
print("Test Dataset Count: " + str(test.count()))
# Logistic Regression modelling
lr = LogisticRegression(featuresCol = 'scaledFeatures', labelCol = 'label', maxIter=10)
LRModel = lr.fit(train)
# predicting on testing set
LRpredictions = LRModel.transform(test)
LRpredictions.show(truncate=False)
# Evaluation Metrics for Testing Set
# USING BINARY CLASS EVALUATOR FOR TEST AREA UNDER ROC CALCULATION
evaluator = BinaryClassificationEvaluator()
print('Test Area Under ROC', evaluator.evaluate(LRpredictions))
# ROC curve
PredAndLabels_lr = LRpredictions.select("probability", "label").collect()
PredAndLabels_list_lr = [(float(i[0][0]), 1.0-float(i[1])) for i in PredAndLabels_lr]
y_test_lr = [i[1] for i in PredAndLabels_list_lr]
y_score_lr = [i[0] for i in PredAndLabels_list_lr]
fpr1, tpr1, _ = roc_curve(y_test_lr, y_score_lr)
roc_auc_lr = auc(fpr1, tpr1)
plt.figure(figsize=(8,8))
plt.plot(fpr1, tpr1, label='ROC curve (area = %0.2f)' % roc_auc_lr)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate',fontsize=20)
plt.ylabel('True Positive Rate',fontsize=20)
plt.title('ROC Curve - Logistic Regression',fontsize=20)
plt.legend(loc="lower right")
plt.show()
# PRINTING ONLY LABEL AND PREDICTION FOR ACCURACY CALCULATION
accdf=LRpredictions.select("label","prediction").show(5)
# Metric Evaluators for the model
# Calculating evaluation metrics of the Logistic regression model
# Calculating the True Positive, True Negative, False Positive and False Negative categories
TrueNegative = LRpredictions.filter('prediction = 0 AND label = prediction').count()
TruePositive = LRpredictions.filter('prediction = 1 AND label = prediction').count()
FalseNegative = LRpredictions.filter('prediction = 0 AND label <> prediction').count()
FalsePositive = LRpredictions.filter('prediction = 1 AND label <> prediction').count()
print("TN,FP,FN,TP",TrueNegative,FalsePositive,FalseNegative,TruePositive)
# Accuracy, precision, recall, f1-score
accuracy = (TrueNegative + TruePositive) / (TrueNegative + TruePositive + FalseNegative + FalsePositive)
precision = TruePositive / (TruePositive + FalsePositive)
recall = TruePositive / (TruePositive + FalseNegative)
F = 2 * (precision*recall) / (precision + recall)
print('\n Precision: %0.3f' % precision)
print('\n Recall: %0.3f' % recall)
print('\n Accuracy: %0.3f' % accuracy)
print('\n F1 score: %0.3f' % F)
# Area under ROC curve
evaluator = BinaryClassificationEvaluator()
print('Test Area Under ROC', evaluator.evaluate(LRpredictions))
# CONFUSION MATRIX
# check labels for target class
class_temp = LRpredictions.select("label").groupBy("label")\
.count().sort('count', ascending=False).toPandas()
class_temp = class_temp["label"].values.tolist()
predandlabel=LRpredictions.select( 'label', 'prediction').rdd
metrics = MulticlassMetrics(predandlabel)
print(metrics.confusionMatrix())
cm=metrics.confusionMatrix().toArray()
print(cm)
# Forming the confusion matrix plot
f, ax = plt.subplots(figsize=(12, 8))
sns.heatmap(cm, annot=True, fmt=".1f", linewidths=.5, ax=ax,cmap=plt.cm.Blues)
sns.set(font_scale=2.5)
plt.title("Confusion Matrix", fontsize=20)
plt.subplots_adjust(left=0.15, right=0.99, bottom=0.15, top=0.99)
ax.set_yticks(np.arange(cm.shape[0]) + 0.5, minor=False)
ax.set_xticks(np.arange(cm.shape[0]) + 0.5, minor=False)
plt.ylabel('Predicted label',size = 30)
plt.xlabel('Actual label',size = 30)
plt.show()
###############################################
# Random Forest modelling
rf = RandomForestClassifier(featuresCol = 'scaledFeatures', labelCol = 'label', maxDepth=10)
RFModel = rf.fit(train)
# predicting on testing set
rfpredictions = RFModel.transform(test)
rfpredictions.show(truncate=False)
# Evaluation Metrics for Testing Set
# USING BINARY CLASS EVALUATOR FOR TEST AREA UNDER ROC CALCULATION
evaluator = BinaryClassificationEvaluator()
print('Test Area Under ROC', evaluator.evaluate(rfpredictions))
# Plot the ROC curve
PredAndLabels_rf = rfpredictions.select("probability", "label")
PredAndLabels_collect_rf = PredAndLabels_rf.collect()
PredAndLabels_list_rf = [(float(i[0][0]), 1.0-float(i[1])) for i in PredAndLabels_collect_rf]
y_test_rf = [i[1] for i in PredAndLabels_list_rf]
y_score_rf = [i[0] for i in PredAndLabels_list_rf]
fpr2, tpr2, _ = roc_curve(y_test_rf, y_score_rf)
roc_auc_rf = auc(fpr2, tpr2)
plt.figure(figsize=(8,8))
plt.plot(fpr1, tpr1, label='ROC curve (area = %0.2f)' % roc_auc_rf)
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate',fontsize=20)
plt.ylabel('True Positive Rate',fontsize=20)
plt.title('ROC Curve - Random Forest',fontsize=20)
plt.legend(loc="lower right")
plt.show()
#PRINTING ONLY LABEL AND PREDICTION FOR ACCURACY CALCULATION
accdf=rfpredictions.select("label","prediction").show(5)
# Metric Evaluators
#Calculating metrics of the Random Forest model
TrueNegative_rf = rfpredictions.filter('prediction = 0 AND label = prediction').count()
TruePositive_rf = rfpredictions.filter('prediction = 1 AND label = prediction').count()
FalseNegative_rf = rfpredictions.filter('prediction = 0 AND label <> prediction').count()
FalsePositive_rf= rfpredictions.filter('prediction = 1 AND label <> prediction').count()
print("TN,FP,FN,TP",TrueNegative_rf,FalsePositive_rf,FalseNegative_rf,TruePositive_rf)
accuracy = (TrueNegative_rf + TruePositive_rf) / (TrueNegative_rf + TruePositive_rf + FalseNegative_rf + FalsePositive_rf)
precision = TruePositive_rf / (TruePositive_rf + FalsePositive_rf)
recall = TruePositive_rf / (TruePositive_rf + FalseNegative_rf)
F = 2 * (precision*recall) / (precision + recall)
print('\n Precision: %0.3f' % precision)
print('\n Recall: %0.3f' % recall)
print('\n Accuracy: %0.3f' % accuracy)
print('\n F1 score: %0.3f' % F)
evaluator = BinaryClassificationEvaluator()
print('Test Area Under ROC', evaluator.evaluate(rfpredictions))
#CONFUSION MATRIX
predandlabel=rfpredictions.select( 'label', 'prediction').rdd
metrics = MulticlassMetrics(predandlabel)
print(metrics.confusionMatrix())
# PLOTTING HEATMAP OF ALL THE METRICS PARAMETERS
cm1=metrics.confusionMatrix().toArray()
f, ax = plt.subplots(figsize=(12, 8))
sns.heatmap(cm1, annot=True, fmt=".1f", linewidths=.5, ax=ax,cmap=plt.cm.Blues)
sns.set(font_scale=2.5)
plt.title("Confusion Matrix", fontsize=20)
plt.subplots_adjust(left=0.15, right=0.99, bottom=0.15, top=0.99)
ax.set_yticks(np.arange(cm1.shape[0]) + 0.5, minor=False)
ax.set_xticks(np.arange(cm1.shape[0]) + 0.5, minor=False)
plt.ylabel('Predicted label',size = 30)
plt.xlabel('Actual label',size = 30)
plt.show()
# Creating a table for the evalution metrics for both logistic regression and random forest
binary_eval = BinaryClassificationEvaluator(labelCol = "label")
multi_eval_acc = MulticlassClassificationEvaluator(labelCol = "label", metricName = "accuracy")
# Create pandas data frame to store the result of our model metrics
model_metrics = pd.DataFrame(columns = ["Dataset", "Accuracy", "ROC"])
model_metrics = model_metrics.append(
{
"Dataset" : "Testing Set",
"Code" : "Random Forest",
"Accuracy" : multi_eval_acc.evaluate(rfpredictions),
"ROC" : binary_eval.evaluate(rfpredictions)
},
ignore_index = True
)
model_metrics = model_metrics.append(
{
"Dataset" : "Testing Set",
"Code" : "Logistic Regression",
"Accuracy" : multi_eval_acc.evaluate(LRpredictions),
"ROC" : binary_eval.evaluate(LRpredictions)
},
ignore_index = True
)
model_metrics
# Random Forest gives better accuracy so we proceed with this model
# Exporting predictions to csv files for both the models
#Converting predicted values for target back to orginal yes or no
rfpredictions= rfpredictions.withColumn('prediction', when(rfpredictions['prediction']==1.0,"yes").otherwise("no"))
datafame_to_export=rfpredictions.select('age', 'job','marital','education','default','balance',
'housing','loan','campaign','pdays','previous','poutcome','prediction')
datafame_to_export.toPandas().to_csv('CustomerDetails_RandonForestOutput.csv',index=False)
| [
"matplotlib",
"seaborn"
] |
9344f6debb882fa5bb293ac2c0a118e862771468 | Python | sravanch1287/uavchanmod | /plot_path_loss_cdf.py | UTF-8 | 3,141 | 2.921875 | 3 | [] | no_license | """
plot_path_loss_cdf: Plots the CDF of the path loss on the test data,
and compares that to the randomly generated path loss from the trained model.
For the test data, we have (Uts,Xts) which is the link condition and
path data. We then generate synthetic samples, with the same condition
using the trained VAE. That is,
Xrand = g(Uts,Zrand) Zrand ~ N(0,I)
where is the g(.) is the conditional VAE.
Xrand and Xts are both vectors for each sample. We compute a statistic
pl_omni_ts = omni(Xts) = total omni path loss
pl_omni_rand = omni(Xrand) = total omni path loss
Now we have a set of scalar. We plot the CDF of pl_omni_ts and pl_omni_rand
and compare.
"""
import numpy as np
import matplotlib.pyplot as plt
import pickle
import tensorflow as tf
tfk = tf.keras
tfkm = tf.keras.models
tfkl = tf.keras.layers
import tensorflow.keras.backend as K
from models import ChanMod
model_dir = 'model_data'
# Load the data
fn = 'train_test_data.p'
with open(fn, 'rb') as fp:
train_data,test_data,pl_max = pickle.load(fp)
# Construct the channel model object
K.clear_session()
chan_mod = ChanMod(pl_max=pl_max,model_dir=model_dir)
# Load the learned link classifier model
chan_mod.load_link_model()
# Load the learned path model
chan_mod.load_path_model()
npaths_max = chan_mod.npaths_max
def comp_pl_omni(pl, pl_max):
I = np.where(pl < pl_max - 0.1)[0]
if len(I) == 0:
pl_omni = pl_max
else:
pl_omni = -10*np.log10( np.sum(10**(-0.1*pl[I]) ) )
return pl_omni
cell_types = [ChanMod.terr_cell, ChanMod.terr_cell,\
ChanMod.aerial_cell, ChanMod.aerial_cell]
los_types = [1,1,0,0]
title = ['Terr LOS', 'Terr NLOS', 'Aerial LOS', 'Aerial NLOS']
nplot = len(cell_types)
for iplot, cell_type0 in enumerate(cell_types):
# Get the LOS mode
los0 = los_types[iplot]
if los0:
ls0 = ChanMod.los_link
else:
ls0 = ChanMod.nlos_link
# Extract the test links to plot
dat = test_data
ls_ts = chan_mod.get_link_state(dat['los_exists'], dat['nlos_pl'])
Its = np.where((ls_ts == ls0) & (dat['cell_type'] == cell_type0))[0]
# Sample from the same conditions
pl = chan_mod.sample_path(dat['dvec'][Its], dat['cell_type'][Its], \
dat['los_exists'][Its])
# Get the omni path loss
ns = len(Its)
pl_omni_ts = np.zeros(ns)
pl_omni_rand = np.zeros(ns)
for i in range(ns):
pl_omni_ts[i] = comp_pl_omni(dat['nlos_pl'][Its[i],:npaths_max], pl_max)
pl_omni_rand[i] = comp_pl_omni(pl[i,:npaths_max], pl_max)
# Plot the CDFs
plt.subplot(2,2,iplot+1)
p = np.arange(ns)/ns
plt.plot(np.sort(pl_omni_ts), p)
plt.plot(np.sort(pl_omni_rand), p)
plt.grid()
plt.title(title[iplot])
plt.legend(['Test', 'VAE'])
if (iplot==0) or (iplot==2):
plt.ylabel('CDF')
if (iplot==2) or (iplot==3):
plt.xlabel('Path loss (dB)')
plt.xlim([np.min(pl_omni_ts), np.max(pl_omni_ts)])
plt.tight_layout()
plt.savefig('omni_path_loss.png', bbox_inches='tight')
| [
"matplotlib"
] |
f3d6bfc1d31dfcd3a5343c6650c34803c1aedbb5 | Python | biren-dave/ATP1A3 | /final/predictor.py | UTF-8 | 4,654 | 2.609375 | 3 | [] | no_license | import argparse
import json
import csv
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn import svm
import matplotlib.pyplot as plt
import numpy as np
def json_reader(f_path):
with open(f_path, "r") as fh:
return json.load(fh)
def make_list(dict):
l = []
for key in dict.keys():
l.append([key, dict[key]])
return l
def normalize(df, col):
x = df.loc[:, [col]].values
x = StandardScaler().fit_transform(x)
return x
def parse_input(positions):
p = positions.split(",")
return p
def get_x(p):
x = []
for i in p:
x.append([norm_dist[int(i) - 1][0], consurf[i]])
return x
def predict(clf, x, pos, c):
result = []
for i, p in zip(x, pos):
prob = clf.predict_proba([i])
prob_b = prob[0][0]
prob_p = prob[0][1]
if (prob_b > c) or (prob_p > c):
pred = clf.predict([i])[0]
else:
pred = "unsure"
result.append([p, pred, prob_b, prob_p])
return result
p_train = pd.read_json("p_train_features.json", orient="records")
b_train = pd.read_json("b_train_features.json", orient="records")
training_data = pd.concat([p_train, b_train], axis=1)
p_test = pd.read_json("p_test_features.json", orient="records")
b_test = pd.read_json("b_test_features.json", orient="records")
testing_data = pd.concat([p_test, b_test], axis=1)
distances = pd.DataFrame(make_list(json_reader("d_to_ATP_site.json")), columns=["pos", "dist"])
norm_dist = normalize(distances, "dist")
consurf = json_reader("consurf_std.json")
x_train, y_train = [], []
for var in training_data:
p = training_data[var]["pos"]
x_train.append([norm_dist[p - 1][0], consurf[str(p)]])
y_train.append(training_data[var]["target"])
x_test, y_test = [], []
for var in testing_data:
p = testing_data[var]["pos"]
x_test.append([norm_dist[p - 1][0], consurf[str(p)]])
y_test.append(testing_data[var]["target"])
clf = svm.SVC(kernel="linear", probability=True)
clf.fit(x_train, y_train)
def predictions(classifier, x_test, y_test):
preds = []
for x, y in zip(x_test, y_test):
pred = classifier.predict([x])[0]
if pred == "benign":
prob = classifier.predict_proba([x])[0][0]
elif pred == "pathogenic":
prob = classifier.predict_proba([x])[0][1]
preds.append({"x": x, "actual": y, "predicted": pred, "probability": prob})
return preds
def stats(predictions, t=0.5):
tp, tn, fp, fn = 0, 0, 0, 0
unsure = 0
tp_x, tn_x, fp_x, fn_x, u_x = [], [], [], [], []
for i in predictions:
if i["probability"] >= t:
if (i["actual"] == "pathogenic") and (i["predicted"] == "pathogenic"):
tp += 1
tp_x.append(i["x"])
elif (i["actual"] == "benign") and (i["predicted"] == "benign"):
tn += 1
tn_x.append(i["x"])
elif (i["actual"] == "benign") and (i["predicted"] == "pathogenic"):
fp += 1
fp_x.append(i["x"])
elif (i["actual"] == "pathogenic") and (i["predicted"] == "benign"):
fn += 1
fn_x.append(i["x"])
elif i["probability"] < t:
unsure += 1
u_x.append(i["x"])
tp_x = np.array(tp_x)
fp_x = np.array(fp_x)
tn_x = np.array(tn_x)
fn_x = np.array(fn_x)
u_x = np.array(u_x)
#return {"tp": tp_x, "tn": tn_x, "fp": fp_x, "fn": fn_x, "unsure": u_x}
return {"tp": len(tp_x), "tn": len(tn_x), "fp": len(fp_x), "fn": len(fn_x), "unsure": len(u_x)}
pred_y = predictions(clf, x_test, y_test)
test_stats = stats(pred_y, 0.55)
print(test_stats)
# parser = argparse.ArgumentParser()
# parser.add_argument("positions", help="Input the amino acid position(s) of hATP1A3 (expected range: 1 - 1013) to predict the pathogenicity of if mutated. If inputting more than one position, separate each with a comma (e.g. 45,67,124)", type=str)
# parser.add_argument("-c", "--conf_cutoff", help="Specify the confidence threshold below which predictions are called unsure (expected range: 0.5 - 1.0).", type=float)
# parser.add_argument("-o", "--output_file", help="Enter the file name of the output file.", type=str)
# args = parser.parse_args()
#
# var_set = args.positions
# cutoff = args.conf_cutoff
# out_file = args.output_file
#
# parsed = parse_input(var_set)
# x = get_x(parsed)
#
# with open(out_file, "w") as fh:
# writer = csv.writer(fh)
# writer.writerow(["position", "prediction", "probability benign", "probability pathogenic"])
# writer.writerows(predict(clf, x, parsed, cutoff))
| [
"matplotlib"
] |
94f46910862e9c7b91d74b658ed789aadc886abf | Python | cha63506/sem5 | /ki/a4/misc.py | UTF-8 | 2,083 | 3.328125 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import cv2
"""
!!!!!!!!!!!!!!!!!!!!!!!!
!!! NO NEED TO TOUCH !!!
!!!!!!!!!!!!!!!!!!!!!!!!
KI Exercise "Handwriting Recognition"
-------------------------------------
various code for data inspection and preprocessing.
"""
def preprocess(features):
"""
preprocesses the features to make them more suitable
for neural network training:
- downscale images to reduce dimensionality
- standardize features
@type features: list[np.array]
@param features: a list of input features (= handwritten digits)
to preprocess
@rtype: list[np.array]
@returns: a list of preprocessed features
"""
def _preprocess(f):
# feature -> image
f = f.reshape(28,28).astype("float")
# blur image a bit
f = cv2.blur(f, (2,2))
# downscale image by factor of 3
f = f[::3,::3]
# image -> feature
f = f.reshape(100)
# standardize feature
f = (f - 128.0) / 64.0
return f
return [_preprocess(f) for f in features]
def plot_errors(train_errors, valid_errors, filename):
"""
plots error rates on the training and validation set
over the iterations of training.
@type train_errors: list[float]
@param train_errors: Errors on the training set. One value per
training iteration.
@type valid_errors: list[float]
@param valid_errors: Errors on the validation set over the iterations
of training (must be same length as train_errors)
@type filename: str
@param filename: the output file for the plot
(e.g., "foo.pdf")
"""
plt.plot(range(len(train_errors)), train_errors,'-', markersize=10, color='green',
linewidth=4, label="error (training data)")
plt.plot(range(len(valid_errors)), valid_errors,'-', markersize=10, color='red',
linewidth=4, label="error (validation data)")
plt.legend(loc="best")
plt.savefig(filename)
plt.close()
| [
"matplotlib"
] |
5ab7530d75b9295bc7f6855764c6a62180afb1fb | Python | forero/PeculiarMotion | /code/fit_to_maxwellian.py | UTF-8 | 947 | 2.5625 | 3 | [] | no_license | import numpy as np
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
import scipy
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--sim', help='simulation name', required=True)
args = parser.parse_args()
BoxID = int(args.sim)
filename = '../data/abacus/summary_velocities_abacus_planck_00_box_{:02d}.dat'.format(BoxID)
all_data = np.loadtxt(filename)
data = all_data[:,0] # this is the peculiar velocity
def cumul_distro(x,a):
y = scipy.special.erf(x/(a*np.sqrt(2)))
y = y - np.sqrt(2.0/np.pi) * x * np.exp(-0.5*(x/a)**2)/a
return y
x_data = np.sort(data)
n = len(x_data)
y_data = np.linspace(1/n, 1.0, n)
popt, pcov = curve_fit(cumul_distro, x_data, y_data, bounds=[0,500])
print(popt[0], np.sqrt(pcov))
y_fit = cumul_distro(x_data, popt[0])
figname = 'plot_fit_{}.jpg'.format(args.sim)
plt.figure()
plt.plot(x_data, y_fit)
plt.plot(x_data, y_data)
plt.savefig(figname)
| [
"matplotlib"
] |
44a40daecd43d87c519042894d25d795d2ad2248 | Python | zhangyuze999/Pytorch_tutorial_codes | /Pytorch_tutorial_MNIST.py | UTF-8 | 3,405 | 2.703125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed May 20 13:37:11 2020
@author: zhang_2020
"""
from pathlib import Path
import requests
DATA_PATH = Path(r"D:\ZHANG2020\PytorchX\data")
PATH = DATA_PATH / "mnist"
PATH.mkdir(parents = True, exist_ok = True)
#URL = "http://deeplearning.net/data/mnist/"
FILENAME = "mnist.pkl.gz"
#if not (PATH / FILENAME).exists():
# content = requests.get(URL + FILENAME).content
# (PATH / FILENAME).open("wb").write(content)
"""
This dataset is in numpy array format, and has been stored using pickle,
a python-specific format for serializing data.
"""
import pickle
import gzip
with gzip.open((PATH / FILENAME).as_posix(), "rb") as f:
((x_train, y_train), (x_valid, y_valid), _) = pickle.load(f, encoding="latin-1")
"""
import matplotlib.pyplot as plt
plt.imshow(x_train[0].reshape((28, 28)), cmap="gray")
plt.show()
"""
import torch
x_train, y_train, x_valid, y_valid = map(torch.tensor, (x_train, y_train, x_valid, y_valid))
n, c = x_train.shape
from torch.utils.data import TensorDataset
train_ds = TensorDataset(x_train, y_train)
valid_ds = TensorDataset(x_valid, y_valid)
from torch.utils.data import DataLoader
train_dl = DataLoader(train_ds, batch_size = 32, shuffle=True)
valid_dl = DataLoader(valid_ds, batch_size = 32)
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size = 3,stride = 2, padding = 1)
self.conv2 = nn.Conv2d(16, 16, kernel_size = 3,stride = 2, padding = 1)
self.conv3 = nn.Conv2d(16, 10, kernel_size = 3,stride = 2, padding = 1)
def forward(self, x):
x = x.view(-1, 1, 28, 28)
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.avg_pool2d(x, 4)
return x.view(-1, x.size(1))
import torch.optim as optim
dev = torch.device(
"cuda") if torch.cuda.is_available() else torch.device("cpu")
def preprocess(x, y):
return x.view(-1, 1, 28, 28).to(dev), y.to(dev)
#train_dl = WrappedDataLoader(train_dl, preprocess)
#valid_dl = WrappedDataLoader(valid_dl, preprocess)
net = Net()
optimizer = optim.SGD(net.parameters(), lr = 0.1, momentum = 0.9)
criterion = F.cross_entropy
loss_total = 0
net.zero_grad()
for epoch in range(10):
for it,data in enumerate(train_dl):
x_iter,y_iter = data
y_pred = net(x_iter)
loss = criterion(y_pred, y_iter)
#print(loss)
loss.backward()
optimizer.step()
optimizer.zero_grad()
loss_total += loss.item()
if it % 500 == 0:
#print('Iter = %d loss = %3f' % (it, loss.item()))
with torch.no_grad():
correct = 0
total = 0
for xb, yb in valid_dl:
yp = net(xb)
correct += (torch.argmax(yp,1) == yb).sum().item()
total += yp.size(0)
print('iter = %d acc = %3f' % (it , correct/total))
PATH = r'D:\ZHANG2020\PytorchX\trained_net\mnist_cnn.pth'
torch.save(net.state_dict(), PATH)
F.normalize(train_dl, (0.5),(0.5))
| [
"matplotlib"
] |
13372b8fe752985b35fe6747eb2cdcefbb8effb1 | Python | arqumahmad/signatureverification | /Data Extraction/threshold.py | UTF-8 | 1,872 | 2.53125 | 3 | [] | no_license | import matplotlib as mpl
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cbook as cbook
from checker import b
db= b.db
#getting data from csv file
class a:
data=[]
var_xList=[]
var_yList=[]
var_pList=[]
for i in range(1, 16):
data.append(np.genfromtxt("database/"+str(db)+"/"+str(i)+".csv", delimiter=',', names=['s', 'x', 'y', 'p' ]))
#data
#calculating dx and dy and dp
dx = np.diff(data[i-1]['x'])
dy = np.diff(data[i-1]['y'])
dp = np.diff(data[i-1]['p'])
#print dx
mean_x = (np.sum(dx)/len(dx))
mean_y = (np.sum(dx)/len(dy))
mean_p = (np.sum(dx)/len(dp))
#print mean_x
#print mean_y
#print mean_p
N= len(dx)
#variance for dx
var_x = (np.sum(dx)-(N * mean_x))/N
#print var_x
var_xList.append(var_x)
#print var_xList
#variance for dy
var_y = (np.sum(dy)-(N * mean_y))/N
var_yList.append(var_y)
#print var_yList
#variance for dp
var_p = (np.sum(dp)-(N * mean_p))/N
var_pList.append(var_p)
mean_fx = (np.sum(var_xList)/len(var_xList))
#print mean_fx
mean_fy= (np.sum(var_yList)/len(var_yList))
#print mean_fy
mean_fp = (np.sum(var_pList)/len(var_pList))
#print mean_fp
mod_T = np.sqrt((mean_fx ** 2) + (mean_fy ** 2) + (mean_fp ** 2))
var_fx = (np.sum(var_xList)-(N * mean_fx))/N
#print var_fx
var_fy = (np.sum(var_yList)-(N * mean_fy))/N
var_fp = (np.sum(var_pList)-(N * mean_fp))/N
mod_V = np.sqrt((var_fx ** 2) + (var_fy ** 2) + (var_fp ** 2))
print mod_T
#print mod_V
t = mod_T-mod_V
print t
th = t * 7
#modF=np.sqrt((var_x ** 2) + (var_y ** 2) + (var_p ** 2))
#print var_xList
#print var_yList
#print var_pList
| [
"matplotlib"
] |
d9aa9f1ef35f7bbc729fa40280b97d6c536b5143 | Python | jsch8q/MAS565_Numerical_Analysis | /Assignment 8/20218125채지석_assign8.py | UTF-8 | 5,493 | 3 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from numpy.core.fromnumeric import size
def make_tridiag(main_diag, upper, lower):
"""
Construct a tridiagonal matrix from given three vectors.
"""
res = np.diag(main_diag) + np.diag(upper, 1) + np.diag(lower, -1)
return res
def make_Tn(n):
"""
Construct T_n as indicated in the assignment.
"""
if n <= 0 :
raise KeyError
off_diag = -1 * np.ones(n-1)
main_diag = 4 * np.ones(n)
res = make_tridiag(main_diag, off_diag, off_diag)
return res
def make_An(n):
"""
Construct a block matrix A_n as indicated in the assignment.
"""
if n <= 0 :
raise KeyError
Tn = make_Tn(n)
if n == 1 :
return Tn
I = np.eye(n)
O = np.zeros((n,n))
res = np.block([Tn, -I] + [O] * (n-2)) # first 'row'
for i in range(1, n-1):
tmp = np.block([O] * (i-1) + [-I, Tn, -I] + [O] * (n-2-i))
res = np.vstack((res, tmp))
tmp = tmp = np.block([O] * (n-2) + [-I, Tn])
res = np.vstack((res, tmp)) # last 'row'
return res
def make_ej(j, n):
"""
Construct the j-th canonical basis vector in the C^n.
"""
res = np.hstack((np.zeros(j-1), [1], np.zeros(n-j)))
return res
def residual(v, Q):
"""
Compute the residual vector when orthogonally projecting v
onto span(Q). In this program we only condsider the case where
all columns of Q are orthonormal, so we can simplify our task.
"""
# proj = Q @ np.linalg.inv(Q.T @ Q) @ Q.T
proj = Q @ Q.T
return v - proj @ v
def vec_transpose(v):
"""
In numpy, vectors are really vectors, that is, they are
not the same with n*1 matrices. Therefore, to 'transpose'
a vector we need special treatment.
"""
vT = np.reshape(v.copy(), (-1, 1))
return vT
fig = plt.figure() # Preparation to plot eigenvalues
A = make_An(7)
eps = 10**-6 # set the tolerance to determine stopping criteria.
n = np.size(A, 0) # dimension we are dealing with
## Assignment 7 : Construct Tridiagonal matrix similar to An
gamma = []
delta = []
gamma_i = 0
q = make_ej(1, n)
q_prev = np.zeros(n)
i = 1
queue = 1
delta.append(q.T @ A @ q)
Q = vec_transpose(q)
while i < n :
i = i + 1
delta_i = q.T @ A @ q
delta.append(delta_i)
r_i = A @ q - delta_i * q - gamma_i * q_prev
gamma_i = np.linalg.norm(r_i)
if abs(gamma_i) < eps:
if i <= n:
gamma_i = 0
gamma.append(gamma_i)
new_q = np.zeros(0)
while np.linalg.norm(new_q) < eps:
if queue > n :
raise KeyError
queue = queue + 1
test = make_ej(queue, n)
new_q = residual(test, Q)
q = new_q / np.linalg.norm(new_q)
Q = np.hstack((Q, vec_transpose(q)))
else :
break
else :
gamma.append(gamma_i)
q_prev = q
q = r_i / gamma_i
Q = np.hstack((Q, vec_transpose(q)))
tridiag = make_tridiag(delta, gamma, gamma)
eigval, _ = np.linalg.eig(tridiag)
eigval = np.sort(eigval)[::-1]
ax1 = fig.add_subplot(1, 2, 1)
plt.xlabel("index")
plt.ylabel("eigenvalues")
ax1.scatter([i for i in range(len(eigval))], eigval, label = "eig")
ax1.legend()
#-------------------------------------------------------------------#
# Assignment 8 starts here! #
#-------------------------------------------------------------------#
def my_qr(A):
"""
I am not sure if implementing the QR decomposition is also a
part of the assignment or not, so I just made one myself.
This uses Householder reflections to compute a QR decomposition
of the given matrix A, but it is of course much more ineffecient
than the numpy built-in function /*numpy.linalg.qr*/.
"""
n = np.size(A, 0)
Q = np.eye(n)
R = A
for k in range(n):
x = R[k:n, k]
x_size = np.size(x)
x_norm = np.linalg.norm(x)
hh_vec = x + np.sign(x[0]) * x_norm * make_ej(1, x_size)
hh_vec = hh_vec / np.linalg.norm(hh_vec)
for j in range(k, n):
R[k:n, j] = R[k:n, j] - 2*np.outer(hh_vec,hh_vec) @ R[k:n,j]
refl = np.hstack((np.zeros(k), hh_vec))
Qk = np.eye(n) - 2 * np.outer(refl, refl)
Q = Q @ Qk
return Q, R
max_iter = 1000
# QR method may not converge in general (although, not in this case).
# Hence, maximum number of iterations must be set.
A_k = tridiag
for k in range(max_iter):
Q, R = np.linalg.qr(A_k) # For speed, it is recommended to use this.
#Q, R = my_qr(A_k) # This is roughly 200 times slower, per iteration.
A_next = R @ Q
if np.linalg.norm(A_next) < eps:
rel_err = 0
else :
rel_err = np.linalg.norm(A_next - A_k) / np.linalg.norm(A_next)
A_k = A_next
if rel_err < eps:
break
eig_diag = np.diag(A_k)
print("Maximum off-diagonal:")
print("\t", np.amax(np.diag(eig_diag) - A_k))
# The smaller this value, the higher probability of convergence.
eigval_qr = np.sort(eig_diag)[::-1]
ax2 = fig.add_subplot(1, 2, 2)
plt.xlabel("index")
plt.ylabel("eigenvalues")
ax2.scatter( [i for i in range(len(eigval_qr))], eigval_qr,\
label = "QR" )
ax2.legend()
print("Maximum relative error of results from QR method:")
print("\t", np.amax(abs(eigval- eigval_qr) / eigval))
plt.show() | [
"matplotlib"
] |
776a35de17e2595e765bc28940f4fc802def926e | Python | Rohit777/sentiment_analysis | /by_KNN.py | UTF-8 | 2,100 | 3.078125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 8 15:36:56 2018
@author: spiky
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import metrics
#importing dataset
training_dataset = pd.read_csv('training.tsv', delimiter = '\t', quoting = 3)
test_dataset = pd.read_csv('testdata.tsv', delimiter = '\t', quoting = 3)
#cleaning text
import re
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
corpus = []
for i in range(0,7086):
Review = re.sub('[^a-zA-Z]',' ', training_dataset['review'][i])
Review = Review.lower()
Review = Review.split()
ps = PorterStemmer()
Review = [ps.stem(word) for word in Review if not word in set(stopwords.words('english'))]
Review = ' '.join(Review)
corpus.append(Review)
#creating the bag of words model
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer()
X = cv.fit_transform(corpus).toarray()
y = training_dataset.iloc[:, 0].values
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
# Fitting K-NN to the Training set
from sklearn.neighbors import KNeighborsClassifier
classifier = KNeighborsClassifier(n_neighbors = 5, metric = 'minkowski', p = 2)
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
#calculating accuracy
fpr, tpr, thresholds = metrics.roc_curve(y_test, y_pred, pos_label=1)
print("KNN AUC: {0}".format(metrics.auc(fpr, tpr)))
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
tr = []
for i in range(0,33052):
mr = test_dataset['review'][i]
tr.append(mr)
test_set_features=cv.transform(tr).toarray()
predictions2=classifier.predict(test_set_features)
df=pd.DataFrame(data={"Predicted Score":predictions2,"Text":tr})
df.to_csv("./knn_predictions.csv",sep=',',index=False) | [
"matplotlib"
] |
ab5d239ba4c102f8021ba3295117d0ea471274ff | Python | ZxxRick/Learn.Matplot | /Code/5图例.py | UTF-8 | 1,162 | 3.734375 | 4 | [] | no_license | #莫烦学Python-2.5 设置图例
import matplotlib.pyplot as plt
import numpy as np
###################################################################################
#2.3节课的代码
x=np.linspace(-3,3,250) #从-3到3均分出50个点
y1=2*x+1
y2=x**2
plt.figure()
plt.plot(x,y1,label='A')
plt.plot(x,y2,label='B',color='red',linewidth=1,linestyle='--') #可以定义线条的颜色,宽度,样式
###################################################################################
#plt.legend() #添加图例。 注:已经在plt.plot()中设置过label
plt.legend(loc='best',shadow = True)
"""legend( handles=(line1, line2, line3),
labels=('label1', 'label2', 'label3'),
'upper right')
shadow = True 设置图例是否有阴影
The *loc* location codes are::
'best' : 0,
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,"""
plt.show()
| [
"matplotlib"
] |
e7ad3199b23556c0484ec3a7f31e55c48963878a | Python | stefanocortinovis/bayesian-probit | /project/utils.py | UTF-8 | 1,939 | 2.921875 | 3 | [] | no_license | import os
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
def load_finney47():
dataset = np.loadtxt("./data/finney47.csv", delimiter=",", skiprows=1)
X = dataset[:, 1:]
Y = dataset[:, 0]
return X, Y
def trace_plot(mc, path=None, replace=False, title_prefix="", return_fig=False):
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.set_title(f"{title_prefix}Trace Plot for {len(mc)-1} iterations")
ax1.set_xlabel("Iteration")
ax1.set_ylabel("Value Drawn")
ax1.plot(range(len(mc)-1), mc[1:])
ax1.legend([f"beta {i}" for i in range(len(mc[0]))], loc="lower right")
if path is not None:
if not os.path.exists(path) or replace:
fig.savefig(path)
if return_fig:
return fig
plt.close(fig)
def dist_plot(mc, warmup=200, path=None, replace=False, title_prefix="", return_fig=False):
mc = np.vstack(mc) # T, k
k = mc.shape[1]
fig, axs = plt.subplots(k, squeeze=True)
fig.suptitle(f"{title_prefix}Distribution Plot for Beta after {len(mc)-1} iterations")
for i, ax in enumerate(axs):
sns.kdeplot(mc[warmup+1:, i], ax=ax)
ax.set_xlabel(f"beta_{i}")
ax.set_ylabel("Density")
if path is not None:
if not os.path.exists(path) or replace:
fig.savefig(path)
if return_fig:
return fig
plt.close(fig)
def ar_plot(accepted, warmup=200, path=None, replace=False, title_prefix="", return_fig=False):
n_sample = len(accepted)
accepted_cum = np.cumsum(accepted)
iterations = np.arange(1, n_sample+1)
acceptance_rate = accepted_cum / iterations
fig, ax = plt.subplots(1,1)
ax.plot(iterations, acceptance_rate)
ax.set_xlabel("iterations")
ax.set_ylabel("acceptance_rate")
if path is not None:
if not os.path.exists(path) or replace:
fig.savefig(path)
if return_fig:
return fig
plt.close(fig)
| [
"matplotlib",
"seaborn"
] |
598194af92967ee5ad9fd5f6bcd2fcd5ed2def82 | Python | yeejietang/personal_testing | /kaggle_titanic/titanic_master.py | UTF-8 | 667 | 2.875 | 3 | [] | no_license | # Imports
# pandas
import pandas as pd
from pandas import Series,DataFrame
# numpy, matplotlib, seaborn
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
#%matplotlib inline
# machine learning
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
# get titanic & test csv files as a DataFrame
titanic_df = pd.read_csv("train.csv")
test_df = pd.read_csv("test.csv")
# preview the data
titanic_df.head()
print("bla bla test") | [
"matplotlib",
"seaborn"
] |
e525036d641690fa2c89424a63ff52ccb533058a | Python | wgddd/PyBEM2D | /Lib/BEM_Solver/BEM_2D_Mesh.py | UTF-8 | 24,194 | 2.578125 | 3 | [
"BSD-3-Clause"
] | permissive | #########################################################################
# (C) 2017 Department of Petroleum Engineering, #
# Univeristy of Louisiana at Lafayette, Lafayette, US. #
# #
# This code is released under the terms of the BSD license, and thus #
# free for commercial and research use. Feel free to use the code into #
# your own project with a PROPER REFERENCE. #
# #
# PYBEM2D Code #
# Author: Bin Wang #
# Email: [email protected] #
# Reference: Wang, B., Feng, Y., Berrone, S., et al. (2017) Iterative #
# Coupling of Boundary Element Method with Domain Decomposition. #
# doi: #
#########################################################################
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams.update({'font.size': 20}) #font
from matplotlib import path
#[BEM Mesh]
from .Elements.BEM_Elements import BEM_element
#[General Geometry Lib]
from Lib.Tools.Geometry import *
###############################
#
# Core BEM 2D Class
#
###############################
class BEM_2DMesh:
"""Contains information and functions related to a 2D potential problem using BEM."""
def __init__(self,BEMobj):
"""Creates a BEM objecti with some specific paramters
Arguments
---------
[Discretization]
Pts_e -- Boundary end nodes for abritary shape Polygon.
e.g. Boundary_vert=[(0.0, 0.0), (0.0, 1.5), (1.0, 1.5), (1.5, 0.0)]
Pts_t -- Internal trace end node for abritary intersection
e.g. Trace_vert=[((0.25, 0.5), (1.25, 0.5)),((0.25, 1.2), (1.25, 1.2))]
Ne_edge -- Number of element on all edges
Ne_trace -- Number of element on all traces
Nedof_edge -- Number of DOF for edge elements (const=1 linear=2 quad=3)
Nedof_trace -- Number of DOF for trace elements
Ndof_edge -- Total number of DOF for all edge elements
Ndof_trace -- Total number of DOF for all trace elements
Ndof -- Total number of DOF for all elements
domain_min -- minimum coords of a domain
domain_max -- maxmum coords of a domain
h_edge -- Length of element for boundary edge [optional]
h_trace -- Length of element for trace [optional]
Num_boundary -- Number of boundary edge elements
Num_trace -- Number of trace elements
[BEM Solver]
TraceOn -- Enable the internal (geometry) trace
BEs_edge -- BEM element collection for boundary edge
BEs_trace -- BEM element collection for internal edge
NumE_bd -- the number of element on each boundary
NumE_t -- the number of element on each trace
Author:Bin Wang([email protected])
Date: July. 2017
"""
self.BEMobj=BEMobj
self.Pts_e=[]
self.Pts_t=[]
self.domain_min=[] #Additional Geometory variable
self.domain_max=[]
self.Ne_edge=0
self.Ne_trace=0
self.Nedof_edge=0
self.Nedof_trace=0
self.Ndof_edge=0
self.Ndof_trace=0
self.Ndof=0
self.h_edge=0
self.h_trace=0
self.Num_boundary=1
self.Num_trace=1
self.NumE_bd=[]
self.NumE_t=[]
#[BEM Mesh Info]
self.mesh_nodes=[] #All of bem nodes for each edge
def set_Mesh(self,Pts_e=[],Pts_t=[],h_edge=0.1,h_trace=0.1,Ne_edge=None,Ne_trace=None,Type='Quad',mode=0):
"""Create BEM mesh based on either number of element or length of element
Support for:
1. Constant element,linear element and Quadratic element
2. Abritary closed shape by giving boundary vertex(Pts_e)
3. Internal line segments by giving internal vertex(Pts_t)
Arguments
---------
Ne_edge -- Number of element in all boundary edge [optional]
Ne_trace -- Number of element in trace [optional]
h_edge -- Length of element for boundary edge [optional]
h_trace -- Length of element for trace [optional]
mode -- 0-round up connect 1-manully set up
Author:Bin Wang([email protected])
Date: July. 2017
"""
#Check intersection segments
Pts_e, Pts_t = self.Split_ByIntersections(Pts_e, Pts_t)
#Fill data
self.Pts_e=Pts_e
self.Pts_t=Pts_t
self.Num_boundary=len(Pts_e)
self.Num_trace=len(Pts_t)
if(Ne_edge is not None):
self.h_edge=self.NumEle2LenEle(Ne_edge)
else:
self.h_edge = h_edge
if(Ne_trace is not None):
self.h_trace = self.NumEle2LenEle(None,Ne_trace)
else:
self.h_trace=h_trace
#find the domain min,max for plotting
self.domain_min=(min(np.asarray(self.Pts_e)[:,0]),min(np.asarray(self.Pts_e)[:,1]))
self.domain_max=(max(np.asarray(self.Pts_e)[:,0]),max(np.asarray(self.Pts_e)[:,1]))
#Boundary mesh
self.BEMobj.TypeE_edge=Type
for i in range(self.Num_boundary):
Node=self.Pts_e[i]
if (i==self.Num_boundary-1):
if(mode==0): Node_next=self.Pts_e[0] #round connect
elif(mode==1): break#manully connect
else:
Node_next=self.Pts_e[i+1]
Ne_edge=int(np.ceil(calcDist(Node,Node_next)/self.h_edge))
self.NumE_bd.append(Ne_edge)
added_nodes=self.Append_Line(Node,Node_next,Ne_edge,self.BEMobj.BEs_edge,
bd_marker=i,Type=Type)
self.mesh_nodes.append(added_nodes)
#Additional mesh info
self.Ne_edge=len(self.BEMobj.BEs_edge)
if(self.BEMobj.TypeE_edge=="Const"):
self.Ndof_edge = 1 * self.Ne_edge
self.Nedof_edge=1
elif(self.BEMobj.TypeE_edge == "Linear"):
self.Ndof_edge = 2 * self.Ne_edge
self.Nedof_edge=2
else:
self.Ndof_edge = 3*self.Ne_edge
self.Nedof_edge=3
#Trace mesh
#Type="Const"
self.BEMobj.TypeE_trace = Type
if (self.h_trace != None and len(Pts_t)!=0):
self.TraceOn=1 #
#print('We have trace')
for i in range(self.Num_trace):
Node,Node_next=self.Pts_t[i][0],self.Pts_t[i][1]
Ne_trace=int(np.ceil(calcDist(Node,Node_next)/self.h_trace))
self.NumE_t.append(Ne_trace)
temp_trace=[]
added_nodes=self.Append_Line(Node,Node_next,Ne_trace,temp_trace,
bd_marker=self.Num_boundary + i,
Type=Type)#, refinement="cosspace") # fracture always 0 flux on edge
self.BEMobj.BEs_trace.append(temp_trace)
self.mesh_nodes.append(added_nodes)
#Additional mesh info
self.Ne_trace=sum(self.NumE_t)
if(self.BEMobj.TypeE_trace == "Const"):
self.Ndof_trace = 1 * self.Ne_trace
self.Nedof_trace = 1
elif(self.BEMobj.TypeE_trace == "Linear"):
self.Ndof_trace = 2 * self.Ne_trace
self.Nedof_trace = 2
else:
self.Ndof_trace = 3 * self.Ne_trace
self.Nedof_trace = 3
#Total DOF
self.Ndof=self.Ndof_edge+self.Ndof_trace
if(self.Ndof_trace == 0):
self.TraceOn = 0
else:
self.TraceOn = 1
#Plot Mesh
print("[Mesh] Genetrated...")
print("[Mesh] Discontinous Element used")
print("[Mesh] Number of boundary elements:%s E-T(%s,%s)" % (self.Ne_edge + self.Ne_trace,self.Ne_edge,self.Ne_trace) )
print("[Mesh] Number of Nodes:%s E-T(%s-%s)" % (self.Ndof,self.Ndof_edge,self.Ndof_trace) )
#self.plot_Mesh()
def print_debug(self):
"""Check Meshing and B.C for different type of elements
Author:Bin Wang([email protected])
Date: July. 2017
"""
#Check Meshing and B.C for different type of elements
print("[Mesh] State")
print("Number of boundary elements:%s E-T(%s,%s)" % (self.Ne_edge + self.Ne_trace,self.Ne_edge,self.Ne_trace) )
print("Number of Nodes:%s E-T(%s-%s)" % (self.Ndof,self.Ndof_edge,self.Ndof_trace) )
print("Edge Num.:%s" % (self.Num_boundary))
print("# Neumann-1 Dirichlet-0")
print("(E)Pts\tX\tY\tType\tMarker\t\tBC_type\t\tBC_value\tRobin")
for i, pl in enumerate(self.BEMobj.BEs_edge):
eleid = i + 1
for j in range(self.Nedof_edge):
nodeid = self.getNodeId(i, j, 'Edge') + 1
xi,yi=pl.get_node(j)
print("(%s)%s\t%5.3f\t%.3f\t%.4s\t%d\t\t%d\t\t%.2f\t\t%d" %
(eleid, nodeid, xi, yi, pl.element_type, pl.bd_marker, pl.bd_Indicator, pl.bd_values[0], pl.Robin_alpha))
print("Trace Num.:%s" % (self.Num_trace))
eleid = 0
for i in range(self.Num_trace):
print("--Trace ", i + 1)
for j, pl in enumerate(self.BEMobj.BEs_trace[i]):
for k in range(self.Nedof_trace):
global_eleid = eleid + self.Ne_edge + 1
global_index = self.getNodeId(eleid, k, 'Trace') + 1
xi,yi=pl.get_node(j)
#print("(%s)%s\t%5.3f\t\t%.3f\t\t%.4s\t\t%d" % (index,2*len(self.BEMobj.BEs_edge)+index,pl.xa,pl.ya,pl.element_type,pl.bd_marker))
print("(%s)%s\t%5.3f\t%.3f\t%.4s\t%d\t\t%d\t\t%.2f\t\t%d" %
(global_eleid, global_index, xi, yi, pl.element_type, pl.bd_marker, pl.bd_Indicator, pl.bd_value1, pl.Robin_alpha))
eleid = eleid + 1
###########Visulation Module################
def plot_Mesh(self,Annotation=1):
"""Plot BEM Mesh
Author:Bin Wang([email protected])
Date: July. 2017
"""
x_min, x_max = self.domain_min[0], self.domain_max[0]
y_min, y_max = self.domain_min[1], self.domain_max[1]
space = 0.15 * calcDist((x_min, y_min), (x_max, y_max))
plt.figure(figsize=(5, 5))
plt.axes().set(xlim=[x_min - space, x_max + space],
ylim=[y_min - space, y_max + space], aspect='equal')
#Domain boundary line
plt.plot(*np.asarray(list(self.Pts_e)+[self.Pts_e[0]]).T,'b-',
lw=1.5, label="Domain Edges")
#Trace boundary line
for i in range(self.Num_trace):
plt.plot(*np.asarray(list(self.Pts_t[i])).T, 'g-',
lw=1.5, label="Trace Edges")
#Point source
#plt.scatter(*np.asarray(self.Pts_w).T,s=20,color='red')
#Boundary elements
BEs_pts=[]
BEs_endpts=[]
for BE in self.BEMobj.BEs_edge:
BEs_endpts.append((BE.xa,BE.ya))
for j in range(BE.ndof):
BEs_pts.append(BE.get_node(j))
plt.plot(*np.asarray(BEs_pts).T, 'bo', lw=1, markersize=5,
label=str(self.Ne_edge)+' Boundary Elements ')
plt.scatter(*np.asarray(BEs_endpts).T, s=40,marker="x", c='k', alpha=0.8)
#Trace elements
if (self.TraceOn):
BEs_pts = []
BEs_endpts = []
for t in self.BEMobj.BEs_trace:
for BE_t in t:
BEs_endpts.append((BE_t.xa, BE_t.ya))
for j in range(BE.ndof):
BEs_pts.append(BE_t.get_node(j))
BEs_endpts.append((t[-1].xb, t[-1].yb))
plt.plot(*np.asarray(BEs_pts).T, 'go', lw=1, markersize=5,
label=str(self.Ne_trace) + ' Trace Elements ')
plt.scatter(*np.asarray(BEs_endpts).T, s=40, marker="x", c='k', alpha=0.8)
if (Annotation):
#Show marker index-convenient for BC assignment
for i in range(self.Num_boundary):
Node = self.Pts_e[i]
if (i == self.Num_boundary - 1):
Node_next = self.Pts_e[0] # round connect
else:
Node_next = self.Pts_e[i + 1]
rightmiddle = line_leftright(Node, Node_next, space * 0.5)[1]
plt.text(*rightmiddle.T, "%s" % (i), fontsize=15,color='k')
for i in range(self.Num_trace):
Node, Node_next = self.Pts_t[i][0], self.Pts_t[i][1]
rightmiddle = line_leftright(Node, Node_next, space * 0.3)[1]
plt.text(*rightmiddle.T, "%s" % (i + self.Num_boundary), fontsize=15)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., fontsize=15)
plt.title('BEM Mesh')
plt.xlabel('x(m)')
plt.ylabel('y(m)')
plt.show()
###########Auxiliary Modes################
def Split_ByIntersections(self,Pts_e,Pts_t):
"""Split the edge or trace by their intersections
Arguments
---------
xa, ya -- Cartesian coordinates of the first start-point.
Author:Bin Wang([email protected])
Date: July. 2017
"""
Edge_lines=Polygon2NodePair(Pts_e)
Trace_lines=Pts_t
#Split edge first
New_Pts_e=[]
for ei,edge in enumerate(Edge_lines):
New_Pts_e.append(edge[0])
for ti, trace in enumerate(Trace_lines):
if(LineSegIntersect2(edge, trace)):#Found Intersection Line
Pts_isect = LineIntersect(edge,trace)
print("Found Intersection-Edge", ei,
"Trace", ti, '@', Pts_isect)
New_Pts_e.append(Pts_isect)
#Split edge and trace by intersections
New_Pts_t = Split_IntersectLines(Trace_lines)
return New_Pts_e,New_Pts_t
def Append_Line(self, Pts_a=(0, 0), Pts_b=(0, 0), Nbd=1, panels=[], bd_marker=0, Type="Quad", refinement="linspace"):
"""Creates a BE along a line boundary. Anticlock wise, it decides the outward normal direction
This can be used to create abritary polygon shape domain
Arguments
---------
xa, ya -- Cartesian coordinates of the first start-point.
Author:Bin Wang([email protected])
Date: July. 2017
"""
if (Type=="Quad"):
#Quadratic element of additional point
Nbd=Nbd*2
Pts=EndPointOnLine(Pts_a,Pts_b,Nbd,refinement)
if (Type=="Const" or Type=="Linear"):
for i in range(Nbd):
#[0,1] [1,2]
Node1=Pts[i]
Node2=Pts[i+1]
panels.append(BEM_element(Node1,[],Node2,Type,bd_marker))#Neumann, Dirchlet
if (Type=="Quad"):
#To get coordinates of additional one node for a Quadratic BE
Nbd=int(Nbd/2)
for i in range(Nbd):
#[0,1,2] [2,3,4]
Node1=Pts[2*i]
Node2=Pts[2*i+1]
Node3=Pts[2*i+2]
panels.append(BEM_element(Node1,Node2,Node3,Type,bd_marker))#Neumann, Dirchlet
return Pts
def getNodeId(self, eleid, local_id, EdgeorTrace='Edge'):
"""get the global node index based on the element id and local id
# Ele1 Ele2 Ele3
#[1,2,3] [3,4,5] [5,6,7]
Author:Bin Wang([email protected])
Date: July. 2017
"""
global_id = 0
if(EdgeorTrace == 'Edge'): # Edge
#Special Case of Round end connect for continous element
#if(eleid == self.Ne_edge - 1 and local_id == self.Nedof_edge-1 and self.Nedof_edge>1): #This is the last node at the last element
# global_id = 0
# return global_id
if (self.BEMobj.TypeE_edge == "Quad"):
global_id = 3 * eleid + local_id
elif (self.BEMobj.TypeE_edge == "Linear"):
global_id = 2 * eleid + local_id
else:
global_id = eleid
else: #Trace
start_id=self.Ndof_edge
if(EdgeorTrace == 'Trace'): # Trace
if (self.BEMobj.TypeE_trace == "Quad"):
global_id = start_id + 3 * eleid + local_id
elif (self.BEMobj.TypeE_trace == "Linear"):
global_id = start_id + 2 * eleid + local_id
else:
global_id = start_id + eleid
return global_id
def NumEle2LenEle(self,Ne_edge=None,Ne_trace=None):
'''Determine the length of element based on total number of elements
Author:Bin Wang([email protected])
Date: June. 2018
'''
TotalLength_edge=0.0
TotalLength_trace=0.0
if(Ne_edge is not None):
if(Ne_edge < len(self.Pts_e)): # Minimum element number is required
Ne_edge = 2 * len(self.Pts_e)
Pts_e=list(self.Pts_e)+[self.Pts_e[0]] #Round end connect
for i in range(self.Num_boundary):
TotalLength_edge=TotalLength_edge+calcDist(Pts_e[i],Pts_e[i+1])
return TotalLength_edge / Ne_edge
elif(Ne_trace is not None):
if(Ne_trace < len(self.Pts_t)): # Minimum element number is required
Ne_trace = len(self.Pts_t)
for i in range(self.Num_trace):
TotalLength_trace=TotalLength_trace+calcDist(self.Pts_t[i][0],self.Pts_t[i][1])
return TotalLength_trace / Ne_trace
def IsBDIntersection(self,bd_markerID):
'''Check a bd is a Trace or Not
'''
if (bd_markerID > self.Num_boundary - 1): # this is a trace
return True
else:
return False
def getBDDof(self, bd_markerID):
'''Get the number of Dof on a specific edge
'''
elementID = self.bdmarker2element(
bd_markerID) # find the element idx on this edge
Ndof = 0
if (bd_markerID > self.Num_boundary - 1): # this is a trace
TracerID = elementID[0][0]
for ei, pl in enumerate(self.BEMobj.BEs_trace[TracerID]):
Ndof = Ndof + pl.ndof
else: # This is boundary edge
for i in range(len(elementID)): # loop for all elements on this edge
Ndof = Ndof + self.BEMobj.BEs_edge[elementID[i]].ndof
return Ndof
def getTraceID(self,bd_markerID):
'''The the local trace id (0,1,2) from global bd id
'''
if (bd_markerID > self.Num_boundary - 1): # this is a trace
return bd_markerID-len(self.Pts_e)
else:
print("[BEM_2D_Mesh.py->getTraceID]This is not a Trace, but a Edge!")
return -1
def point_on_element(self, Pts):
#check and determine the element which a point is located on edge or trace
#Currently, only boundary edge support
#[Output] [elementID1,elementID2....] -1 - not on edge
element=[]
#Boundary edge
for i in range(self.Num_boundary):#edge search
Node=self.Pts_e[i]
if (i==self.Num_boundary-1):
Node_next=self.Pts_e[0] #round connect
else:
Node_next=self.Pts_e[i+1]
if (point_on_line(Pts,Node,Node_next)):# Found! point on a edge
elementID=self.bdmarker2element(i)#element index on this edge
for j in range(len(elementID)):
ID = elementID[j]
Pts_a = (self.BEMobj.BEs_edge[ID].xa, self.BEMobj.BEs_edge[ID].ya)
Pts_b = (self.BEMobj.BEs_edge[ID].xb, self.BEMobj.BEs_edge[ID].yb)
if(point_on_line(Pts, Pts_a, Pts_b)):
element.append(ID)
break #element belonging is enough
#Internal trace
for ti in range(self.Num_trace):
markerID=ti+self.Num_boundary
Node=self.Pts_t[ti][0]
Node_next = self.Pts_t[ti][1]
if (point_on_line(Pts, Node, Node_next)): # Found! point on a edge
elementID = self.bdmarker2element(markerID) # element index on this edge
for j in range(len(elementID)):
TracerID = elementID[j][0]
ID = elementID[j][1]
Pts_a = (self.BEMobj.BEs_trace[ti][ID].xa, self.BEMobj.BEs_trace[ti][ID].ya)
Pts_b = (self.BEMobj.BEs_trace[ti][ID].xb, self.BEMobj.BEs_trace[ti][ID].yb)
if(point_on_line(Pts, Pts_a, Pts_b)):
element.append([TracerID,ID])
break # element belonging is enough
if(len(element)>=1): #1 general element 2 edge connection points
return element
else:
return -1
def element2edge(self,idx_element):
#find the edge index form a elemetn index
#Currently only support for edge element
pts_c=[self.BEMobj.BEs_edge[idx_element].xc,self.BEMobj.BEs_edge[idx_element].yc] #central point of this element
for i in range(self.Num_boundary):#edge search
Node=self.Pts_e[i]
if (i==self.Num_boundary-1):
Node_next=self.Pts_e[0] #round connect
else:
Node_next=self.Pts_e[i+1]
if (point_on_line(pts_c,Node,Node_next)):#edge found
return i
print('Error!! Func-element2edge')
def bdmarker2element(self, markerID):
#find the element index based on bd markerID(boundary index)
# example: markerID=3 Element index=[0 1]
index=[]
if (markerID>self.Num_boundary-1):#this is a trace
tracerID=markerID-self.Num_boundary
for i in range(len(self.BEMobj.BEs_trace[tracerID])):
index.append([tracerID,i])
else:#this is a boundary edge
elementID_start=0
for i in range(markerID):
elementID_start+=self.NumE_bd[i]
for i in range(self.NumE_bd[markerID]):
index.append(elementID_start+i)
return np.array(index)
def bd2element(self,element_type="Const",eleid=0,node_values=[]):
#extract the node_values of a element from a sets of values along a edge
#eleid is the local index, e.g 3 element on a edge, eleid=0,1,2
if(element_type=="Const"):#[0] [1]
return [node_values[eleid]]
elif(element_type=="Linear"):#[0,1] [1,2]
return [node_values[eleid],node_values[eleid+1]]
elif(element_type=="Quad"):#[0,1,2] [2,3,4]
return [node_values[eleid*2],node_values[eleid*2+1],node_values[eleid*2+2]]
def getEdgeEleNodeCoords(self,eleid):
#Get the node coordinates of a edge element
return self.BEMobj.BEs_edge[eleid].get_nodes()
def EndPoint2bdmarker(self,Pts0,Pts1):
#find the bd_markerID based on two end points[Pts0,Pts1]
#currently only boundary edge are support
pts_c=[(Pts0[0]+Pts1[0])*0.5,(Pts0[1]+Pts1[1])*0.5] #central point of this element
for i in range(self.Num_boundary):#edge search
Node=self.Pts_e[i]
if (i==self.Num_boundary-1):
Node_next=self.Pts_e[0] #round connect
else:
Node_next=self.Pts_e[i+1]
if (point_on_line(pts_c,Node,Node_next)):#edge found
return i
print("Can not find the bd_markerID",Pts0,Pts1)
| [
"matplotlib"
] |
17490cef41b3ddd92127017aa049252d7cfe80b3 | Python | cbe2/Gradient-Induced-Diffusive-Spin-Relaxation-Simulation | /MagneticFields/GolayFieldStudies/GolayCoils.py | UTF-8 | 4,611 | 2.9375 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import magpylib as magpy
#Creates Gradient in the +y direction (Gy=+|Gy|)
#Y= direction normal to earth's surface
#Z= Holding field direction
#X= determined by Y and Z
#returns the gradient from a collection
#c= collection generating the B-field
#r= given positions to evaluate gradient at r=[[x1,y1,z1],[x2,y2,z2],...] units in mm
#comp=index that gives the proper component (0=x, 1=y, 2=z)
def GetGrad(c,R,step,comp):
dx=np.asarray([step,0,0])
dy=np.asarray([0,step,0])
dz=np.asarray([0,0,step])
gradients=[]
for r in R:
xcomp=(c.getB(r+dx)[comp]-c.getB(r)[comp])/step
ycomp=(c.getB(r+dy)[comp]-c.getB(r)[comp])/step
zcomp=(c.getB(r+dz)[comp]-c.getB(r)[comp])/step
gradients.append([xcomp,ycomp,zcomp])
return np.asarray(gradients) #units of mT/mm
a=10 # radius parameter of coils (cm)
a=a*10.# change to mm
I=np.abs(1) #current in Amps
N=10 #number of theta points
# create collection of two magnets
angles=np.linspace(-np.pi/3.,np.pi/3.,N)
#clockwise points for arc in x-y plane in x>0
arcNearXplusV=np.asarray([[a*np.cos(theta),a*np.sin(theta),0.4*a] for theta in angles])
arcFarXplusV=np.asarray([[a*np.cos(-theta),a*np.sin(-theta),1.64*a] for theta in angles])
#connecting the arcs
NearFarV=np.asarray([[a*np.cos(angles[-1]),a*np.sin(angles[-1]),0.4*a],[a*np.cos(angles[-1]),a*np.sin(angles[-1]),1.64*a]])
FarNearV=np.asarray([[a*np.cos(angles[0]),a*np.sin(angles[0]),1.64*a],[a*np.cos(angles[0]),a*np.sin(angles[0]),0.4*a]])
#collection of wire loops
LCs=[]
for i in [I,-I,-I,I]:
arcNearXplus = magpy.source.current.Line( curr = i, vertices=arcNearXplusV)
arcFarXplus = magpy.source.current.Line( curr = i, vertices=arcFarXplusV)
NearFar = magpy.source.current.Line( curr = i, vertices=NearFarV)
FarNear = magpy.source.current.Line( curr = i, vertices=FarNearV)
LCs.append(magpy.Collection(arcNearXplus,NearFar,arcFarXplus,FarNear))
#rotating them to proper positions
LCs[1].rotate(angle=180,axis=[1,0,0],anchor=[0,0,0])
LCs[2].rotate(angle=180,axis=[0,0,1],anchor=[0,0,0])
LCs[3].rotate(angle=180,axis=[0,1,0],anchor=[0,0,0])
c = magpy.Collection(*LCs)
#rotate collection to create Gy gradient instead of Gx
c.rotate(angle=90,axis=[0,0,1],anchor=[0,0,0])
#print(c.sources)
#magpy.displaySystem(c,direc=True)
# r=[[0,y,0] for y in np.linspace(-a,a,100)]
#
# r=np.asarray(r)
#
# #Bfield=c.getB(r)*10. #convert from mT to guass
# GBz=GetGrad(c,r,0.0001*a,1).reshape([100,3]) *10*10 #G/cm
# G0=GetGrad(c,[[0,0,0]],0.0001*a,2)[0,1]*10*10#[0,1]*10*10 #G/cm
# GBz=GBz/G0
# # GBzn=np.linalg.norm(GBz,axis=1)
# #
# #
# # #plt.plot(r[:,2]/a,Bfield[:,2]) #plot z comp of field along z axis
# plt.plot(r[:,1]/a,GBz[:,2]) #plot z comp of field along z axis
# plt.xlabel("z/a (unitless)")
# plt.ylabel(r'$\partial_yB_z/G_y$ (unitless)')
# plt.title(r"Golay Coil $\partial_yB_z$ along the y-axis (a=10cm, I=1 Amp) ")
# plt.grid()
# plt.show()
# # create positions
ys = np.linspace(-a*.8,a*.8,100) ; dys=ys[1]-ys[0]
zs = np.linspace(-a*.8,a*.8,100) ; dzs=zs[1]-zs[0]
posis = [[0,y,z] for y in ys for z in zs] #increments last variable first
# # calculate field and amplitude
B = [c.getB(pos) for pos in posis] #
Bs = np.array(B).reshape([100,100,3]) #reshape to [z_pos,y_pos,[Bx,By,Bz]]
Bamp = np.linalg.norm(Bs,axis=2) #comuptes norm of all vectors
Bamp=Bamp*10# to Gauss /np.linalg.norm(c.getB([0,0,0])) #normalize to center value
#Bamp=Bamp*10 #converts from mT to Gauss
# #field at center in guass
# print("Field at center in Gauss")
# print(np.linalg.norm(c.getB([0,0,0])*10))
#
# # define figure with a 2d and a 3d axis
fig = plt.figure(figsize=(10,5)) #fig size
# ax1 = fig.add_subplot(121,projection='3d')
ax2 = fig.add_subplot(111)
#
# # add displaySystem on ax1
# magpy.displaySystem(c,subplotAx=ax1,suppress=True)
# #ax1.view_init(elev=75)
#
# amplitude plot on ax2
#shift the coordinates b/c they are used as the corners.
#cp=ax2.pcolor((zs-0.5*dzs)/a,(ys-0.5*dys)/a,Bamp,cmap='jet',norm=LogNorm(vmin=Bamp.min(), vmax=Bamp.max()))
cp=ax2.pcolor((zs-0.5*dzs)/a,(ys-0.5*dys)/a,Bamp,cmap='jet',vmin=Bamp.min(), vmax=Bamp.max())
cbar=fig.colorbar(cp, ax=ax2)
cbar.ax.set_ylabel(r'Intensity $|B|$ (Gauss)', rotation=270,labelpad=15)
#
ax2.set_title( 'Golay Coil Field, x=0 plane (a=10 cm, I= 1 Amp)')
ax2.set_xlabel('z/a')
ax2.set_ylabel('y/a')
#
# # plot field lines on ax2
#Z,Y = np.meshgrid(zs,ys)
Bz,By = Bs[:,:,2], Bs[:,:,1]
ax2.streamplot(zs/a,ys/a,Bz,By,color='k',density=1)
# #ax2.quiver(X,Z,U,V)
plt.show()
| [
"matplotlib"
] |
7a99d0b4ea963a0182fb97de4ccb93ccb0dc3561 | Python | phamtienkha/StatisticalForecastingMethods | /HoltWintersDamp.py | UTF-8 | 7,788 | 2.953125 | 3 | [] | no_license | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import math
def sMAPE(y_true, y_pred):
assert len(y_true) == len(y_pred)
loss = 0
for i in range(len(y_true)):
loss += 200 * abs(y_true[i] - y_pred[i]) / (abs(y_true[i]) + abs(y_pred[i]))
return loss / len(y_true)
def MAPE(y_true, y_pred):
assert len(y_true) == len(y_pred)
loss = 0
for i in range(len(y_true)):
loss += 100 * abs(y_true[i] - y_pred[i]) / (abs(y_true[i])+1e-6)
return loss / len(y_true)
class HoltWintersDamp():
def fit(self, data, seasonal):
point_num = 10
alpha_list = [i / point_num for i in range(1, point_num)]
betaAst_list = [i / point_num for i in range(1, point_num)]
phi_list = [i / point_num for i in range(1, point_num)]
season_init_list = [0.1] * seasonal
error_best = 1e100
alpha_best = 0.1
betaAst_best = 0.1
gamma_best = 0.1
phi_best = 0.1
for alpha in alpha_list:
for betaAst in betaAst_list:
for phi in phi_list:
gamma_list = [i / point_num for i in range(1, int((1 - alpha) * point_num))]
for gamma in gamma_list:
pred_data = [data[0]]
level_list = [data[0]]
trend_list = [0]
season_list = [0.1]
for i in range(1, len(data)):
if i < seasonal:
level_cur = alpha * (data[i] / season_init_list[i]) + (1 - alpha) * (
level_list[i - 1] + phi * trend_list[i - 1])
level_list.append(level_cur)
trend_cur = betaAst * (level_list[i] - level_list[i - 1]) + (1 - betaAst) * phi * trend_list[
i - 1]
trend_list.append(trend_cur)
season_cur = gamma * (data[i] / (level_list[i - 1] + phi * trend_list[i - 1])) + (1 - gamma) * \
season_init_list[i]
season_list.append(season_cur)
pred_data_cur = (level_list[i] + phi * trend_list[i]) * season_init_list[i]
pred_data.append(pred_data_cur)
else:
level_cur = alpha * (data[i] / season_list[i - seasonal]) + (1 - alpha) * (
level_list[i - 1] + trend_list[i - 1])
level_list.append(level_cur)
trend_cur = betaAst * (level_list[i] - level_list[i - 1]) + (1 - betaAst) * trend_list[
i - 1]
trend_list.append(trend_cur)
season_cur = gamma * (data[i] / (level_list[i - 1] + trend_list[i - 1])) + (1 - gamma) * \
season_list[i - seasonal]
season_list.append(season_cur)
pred_data_cur = (level_list[i] + phi * trend_list[i]) * season_list[i - seasonal]
pred_data.append(pred_data_cur)
error_cur = np.mean([(data[i] - pred_data[i]) ** 2 for i in range(len(data))])
# error_cur = MAPE(data, pred_data)
if error_cur < error_best:
error_best = error_cur
alpha_best = alpha
betaAst_best = betaAst
gamma_best = gamma
phi_best = phi
return [alpha_best, betaAst_best, gamma_best, phi_best]
def predict(self, data, seasonal, predict_len):
prediction = [0] * predict_len
[alpha_best, betaAst_best, gamma_best, phi_best] = self.fit(data, seasonal)
# print([alpha_best, betaAst_best, gamma_best, phi_best])
season_init_list = [0.1] * seasonal
pred_data = [data[0]]
level_list = [data[0]]
trend_list = [0.01]
season_list = [1]
for i in range(1, len(data)):
if i < seasonal:
# print('Data ', i, data[i])
level_cur = alpha_best * (data[i] / season_init_list[i]) + (1 - alpha_best) * (level_list[i - 1] + phi_best * trend_list[i - 1])
# print('Level ', i, level_cur)
level_list.append(level_cur)
trend_cur = betaAst_best * (level_list[i] - level_list[i - 1]) + (1 - betaAst_best) * phi_best * trend_list[i - 1]
# print('Trend ', i, trend_cur)
trend_list.append(trend_cur)
season_cur = gamma_best * (data[i] / (level_list[i - 1] + phi_best * trend_list[i - 1])) + (1 - gamma_best) * season_init_list[i]
# print('Season ', i, season_cur)
season_list.append(season_cur)
pred_data_cur = (level_list[i] + phi_best * trend_list[i]) * season_init_list[i]
pred_data.append(pred_data_cur)
else:
# print('Data ', i, data[i])
level_cur = alpha_best * (data[i] / season_list[i - seasonal]) + (1 - alpha_best) * (level_list[i - 1] + phi_best * trend_list[i - 1])
# print('Level ', i, level_cur)
level_list.append(level_cur)
trend_cur = betaAst_best * (level_list[i] - level_list[i - 1]) + (1 - betaAst_best) * phi_best * trend_list[i - 1]
# print('Trend ', i, trend_cur)
trend_list.append(trend_cur)
season_cur = gamma_best * (data[i] / (level_list[i - 1] + phi_best * trend_list[i - 1])) + (1 - gamma_best) * season_list[i - seasonal]
# print('Season ', i, season_cur)
season_list.append(season_cur)
pred_data_cur = (level_list[i] + phi_best * trend_list[i]) * season_list[i - seasonal]
pred_data.append(pred_data_cur)
for i in range(predict_len):
season_list_last = season_list[-seasonal:]
remainder = i % seasonal
prediction[i] = (level_list[-1] + np.sum([phi_best ** (j + 1) for j in range(predict_len)]) * trend_list[-1]) * season_list_last[remainder]
return prediction, level_list, trend_list, season_list, pred_data, [alpha_best, betaAst_best, gamma_best, phi_best]
if __name__ == '__main__':
data = pd.read_csv('train-bandwidth.csv')
data_cur = data[data.SERVER_NAME == 'SERVER_ZONE01_002']
for i in range(2, data_cur.shape[1]):
if math.isnan(data_cur.iloc[0, i]):
data_cur.iloc[0, i] = data_cur.iloc[0, i-1]
# print(data_cur.shape[1])
# print(data_cur.dropna(axis=1).shape[1])
# print(data_cur)
predict_len = 24
seasonal = 24
data_cur = data_cur.values.tolist()[0]
train = data_cur[2:-predict_len]
test = data_cur[-predict_len:]
model = HoltWintersDamp()
prediction, _, _, _, _, _ = model.predict(train, seasonal, predict_len)
print(test)
print(prediction)
print(sMAPE(test, prediction))
print(MAPE(test, prediction))
num_points = 48
plt.plot(range(len(data_cur[-num_points:])), data_cur[-num_points:])
plt.plot(range(len(data_cur[-num_points:]) - predict_len, len(data_cur[-num_points:])), prediction)
# plt.title('DepID: ' + str(dep_id) + '\n sMAPE: ' + str(sMAPE(test, prediction)) + '%')
plt.show()
| [
"matplotlib"
] |
847fe281812f0c82d84912aa3f70fca8e0b8fb51 | Python | georgeliu233/TP_DM | /plt_log.py | UTF-8 | 1,624 | 2.6875 | 3 | [] | no_license | import json
import matplotlib.pyplot as plt
import numpy as np
def smooth(scalar,weight=0.85):
last = scalar[0]
smoothed = []
for point in scalar:
smoothed_val = last * weight + (1 - weight) * point
smoothed.append(smoothed_val)
last = smoothed_val
return smoothed
def plot_comp(weight=0.99):
abs_path = '/home/haochen/TPDM_transformer/'
# json_list = [
# "log_loop_fusioned",
# 'log_loop_state',
# 'log_loop_cnn'
# ]
json_list = ['ppo_neighbor']
# json_list = ['log_ppo']
data_list = []
for path in json_list:
with open(abs_path+path+'.json','r',encoding='utf-8') as reader:
r,t = json.load(reader)
data_list.append([r,t])
plt.figure()
for data in data_list:
plt.plot(data[1][:],smooth(data[0][:],weight))
plt.savefig('/home/haochen/TPDM_transformer/res_ppo_9.png')
def plot_comp_test(weight=0.5):
abs_path = '/home/haochen/TPDM_transformer/'
# json_list = [
# "log_loop_fusioned",
# 'log_loop_state',
# 'log_loop_cnn'
# ]
json_list = ['log_test_ppo','log_test_ppo_2']
# json_list = ['log_ppo']
data_list = []
for path in json_list:
with open(abs_path+path+'.json','r',encoding='utf-8') as reader:
_,t,_ = json.load(reader)
data_list.append([t,np.linspace(5000,5000*len(t),len(t))])
plt.figure()
for data in data_list:
plt.plot(data[1][:],smooth(data[0][:],weight))
plt.savefig('/home/haochen/TPDM_transformer/res_ppo_9_test.png')
plot_comp()
# plot_comp_test() | [
"matplotlib"
] |
734c0269e72deaf84ea153ef7dc7d4dff170163b | Python | elishatam/python-projects | /Practice/plot.py | UTF-8 | 2,470 | 3.15625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import matplotlib.pyplot as plt
import numpy as np
def plot_color_curves(ax, lvl, X, Y, Z, line_type=None):
if line_type is None:
ax.plot(lvl, X, 'r')
ax.plot(lvl, Y, 'g')
ax.plot(lvl, Z, 'b')
else:
ax.plot(lvl, X, 'r'+line_type)
ax.plot(lvl, Y, 'g'+line_type)
ax.plot(lvl, Z, 'b'+line_type)
# loading data
file_path = 'data/blue_tone_EVT2-H34.txt'
blue = np.loadtxt(file_path, delimiter="\t", skiprows=1)
red = np.loadtxt(file_path.replace('blue', 'red'), delimiter="\t", skiprows=1)
green = np.loadtxt(file_path.replace('blue', 'green'), delimiter="\t", skiprows=1)
# separating columns
red_lvl = red[:, 0]
red_X = red[:, 3]
red_Y = red[:, 4]
red_Z = red[:, 5]
blue_lvl = blue[:, 2]
blue_X = blue[:, 3]
blue_Y = blue[:, 4]
blue_Z = blue[:, 5]
green_lvl = green[:, 1]
green_X = green[:, 3]
green_Y = green[:, 4]
green_Z = green[:, 5]
# # plotting raw data
# f = plt.figure()
# ax = f.add_subplot(131)
# plot_color_curves(ax, red_lvl, red_X, red_Y, red_Z)
# ax = f.add_subplot(132)
# plot_color_curves(ax, blue_lvl, blue_X, blue_Y, blue_Z)
# ax = f.add_subplot(133)
# plot_color_curves(ax, green_lvl, green_X, green_Y, green_Z)
# calculate normalized luminance
L_r = red_Y / np.max(red_Y)
L_g = green_Y / np.max(green_Y)
L_b = blue_Y / np.max(blue_Y)
# plotting normalized luminance
f = plt.figure()
ax = f.add_subplot(111)
plot_color_curves(ax, red_lvl, L_r, L_g, L_b)
plt.legend(['red lum', 'green lum', 'blue lum'])
### EVT1
file_path = 'data/blue_tone_EVT1-H34.txt'
blue2 = np.loadtxt(file_path, delimiter="\t", skiprows=1)
green2 = np.loadtxt(file_path.replace('blue', 'green'), delimiter="\t", skiprows=1)
red2 = np.loadtxt(file_path.replace('blue', 'red'), delimiter="\t", skiprows=1)
# separating columns
red_lvl = red2[:, 0]
red_X = red2[:, 3]
red_Y = red2[:, 4]
red_Z = red2[:, 5]
blue_lvl = blue2[:, 2]
blue_X = blue2[:, 3]
blue_Y = blue2[:, 4]
blue_Z = blue2[:, 5]
green_lvl = green2[:, 1]
green_X = green2[:, 3]
green_Y = green2[:, 4]
green_Z = green2[:, 5]
# calculate normalized luminance
L_r = red_Y / np.max(red_Y)
L_g = green_Y / np.max(green_Y)
L_b = blue_Y / np.max(blue_Y)
# plotting onto same figure as before, new axis
# ax2 = f.add_subplot(122)
plot_color_curves(ax, red_lvl, L_r, L_g, L_b, line_type='--')
plt.legend(['red lum', 'green lum', 'blue lum'])
plt.show()
# input("press enter to end") | [
"matplotlib"
] |
49b8b1cdb7b6740b3dc893676d2292f376861f47 | Python | JakeNTech/Fancy_Python_Graphs | /main.py | UTF-8 | 5,282 | 3.125 | 3 | [] | no_license | # TTDS - 2021
# JakeNTech
# main.py
import pandas
import numpy as np
import matplotlib.pyplot as plt
import argparse
import seaborn as sns
def getargs():
parser = argparse.ArgumentParser()
parser.add_argument("-f", "--file", dest="input_file", help="Name of the csv file to use for points", metavar="<filename>",required=True)
parser.add_argument("-t","--title",dest="graph_title",help="The graph will have a title...this is how to specify it",metavar="<integer>",default="A Amazing Graph")
parser.add_argument("-a","--average",dest="average",help="Plot Average line",action="store_true")
parser.add_argument("-b","--best_fit",dest="bestfit",help="Calculate and plot average",action="store_true")
parser.add_argument("-m","--heat_map",dest="heatmap",help="Plot data onto a heat map",action="store_true")
parser.add_argument("-p","--pair_plot",dest="pairplot",help="sns pairplot",action="store_true")
return parser.parse_args()
def readCSV(filename):
return pandas.read_csv(filename, delimiter=',')
def line_with_best_fit(data, title_text,x_axis_data,y_axis_data):
f = plt.figure()
f.subplots_adjust(right=0.8)
plt.title(title_text, color='black')
x = np.array(data[x_axis_data])
y = np.array(data[y_axis_data])
m, b = np.polyfit(x, y, 1)
plt.plot(x, y)
plt.plot(x, m*x + b) # Plot line of best fit
plt.savefig('./Graphs/output.png')
#This function is custom to the ./Data/cars.csv file!
def many_lines_with_best_fit(data):
colum_names = list(data.columns)
for i in range(1,len(colum_names)):
#print(colum_names[i])
f = plt.figure()
f.subplots_adjust(right=0.8)
text = "Cars in "+colum_names[i]+" since '94"
plt.title(text, color='black')
x = np.array(data["Year"])
y = np.array(data[colum_names[i]])
m, b = np.polyfit(x, y, 1)
plt.plot(x, m*x + b) # Plot line of best fit
plt.savefig("./Graphs/"+colum_names[i]+".png")
def check_and_remove_Total_Average(data):
colum_names = list(data.columns)
for i in range(1,len(colum_names)):
if colum_names[i].lower()=="total" or colum_names[i].lower()=="average":
data = data.drop(colum_names[i],axis=1)
return data
#Calculate and add an average column to given data
def add_average_col(data):
average = []
for i in range(0,len(data.loc[:,list(data.columns)[0]])):
col = list(data.loc[i,:])
total = 0
for j in range(1,len(col)):
total = total + col[j]
average.append(total/(len(col)-1))
data["Average"] = average
return(data)
#Plot Graph
def test(data, title_text,best_fit, average):
# Start Graph Plot
f = plt.figure()
f.subplots_adjust(right=0.8)
f.subplots_adjust(right=0.8)
colum_names = list(data.columns)
for i in range(1,len(colum_names)):
#print(colum_names[i])
if colum_names[i]=="Total" or colum_names[i]=="Average":
break
plt.plot(np.array(data[colum_names[0]]), np.array(data[colum_names[i]]), label=colum_names[i])
# Calculate the line of best fit from the average column.
# This probably isn't the correct way to do it but the only way I can think of to do it over multiple lines/data
# If you can't tell I am also rubbish at maths
# It looks about right so....
if best_fit==True:
m, b = np.polyfit(np.array(data[colum_names[0]]), np.array(data[colum_names[len(colum_names)-1]]), 1)
plt.plot(np.array(data[colum_names[0]]), m*np.array(data[colum_names[0]]) + b, label="Line of best fit",c="red")
if average==True:
plt.plot(np.array(data[colum_names[0]]), np.array(data[colum_names[len(colum_names)-1]]),label="Average", c="red")
#Add lables, legend and save to file
plt.xlabel(colum_names[0])
plt.ylabel("Number")
plt.title(title_text, color='black')
plt.grid()
plt.savefig('./Graphs/output.png',dpi=300, format='png', bbox_extra_artists=(plt.legend(title="Legend", loc='center right', bbox_to_anchor=(1.3, 0.5),fontsize='x-small'),),bbox_inches='tight')
#Plot heat map
def heat_map(data):
plt.imshow(data.corr(), cmap='cool', interpolation='nearest')
sns.heatmap(data.corr(),annot=True,cmap="coolwarm")
#g = sns.pairplot(data, diag_kind="kde")
#g.map_lower(sns.kdeplot, levels=4, color=".2")
plt.savefig('./Graphs/heatmap.png', format='png')
#The other task that I dont understand
def pair_plot(data):
sns.pairplot(data,kind="kde")
plt.tight_layout()
plt.savefig('./Graphs/pairplot.png', format='png')
if __name__ == "__main__":
# Now use command line arguments to make it work
args = getargs()
data = readCSV(args.input_file)
data = check_and_remove_Total_Average(data)
# Add an average column to the data from the CSV file, dont need to work them out if the user doesn't want the bestfit or average
if args.bestfit == True or args.average == True:
data = add_average_col(data)
if args.heatmap == True:
heat_map(data)
if args.pairplot == True:
pair_plot(data)
else:
#line_with_best_fit(data,"Total Cars since '94","Year","Total")
#many_lines_with_best_fit(data)
test(data,args.graph_title,args.best_fit, args.average) | [
"matplotlib",
"seaborn"
] |
6fff81f2855eb107851bfdcdd3dc7a4699ee5733 | Python | dajiva/PracticasComputacionI | /SegundoExamenParcial/NewtonMethod.py | UTF-8 | 4,722 | 3 | 3 | [] | no_license | from sympy import *
import math
import numpy as np
import matplotlib.pyplot as plt
class GaussJordan:
def IntercambiarRenglones(self, matriz,x,r):
var = len(matriz)
temp = []
for i in range(var + 1):
temp.append(matriz[x][i])
for i in range(var + 1):
matriz[x][i] = matriz[x+r][i]
matriz[x+r][i] = temp[i]
def doGaussJordan(self, matriz, n):
#Gauss
p = 0
for x in range(n-1):
renglon = 1
while matriz[x][x] == 0:
if renglon == n:
return math.nan
self.IntercambiarRenglones(matriz, x,renglon)
renglon += 1
for i in range(x+1, n):
p = matriz[i][x]/matriz[x][x]
matriz[i][x] = 0
for j in range(x+1, n+1):
matriz[i][j] -= p*matriz[x][j]
#Jordan
x = n -1
while x > 0:
for i in range(x):
p = matriz[i][x]/matriz[x][x]
matriz[i][x] = 0
matriz[i][-1] -= p*matriz[x][-1]
x -= 1
for i in range(n):
matriz[i][-1] /= matriz[i][i]
matriz[i][i]= 1
return matriz
class Jacobian:
def generateJacobianMat(self, X, F, n):
#Generar una matriz jacobiana
J = list()
for i in range(n):
tmp = list()
for j in range(n):
tmp.append(diff(F[i], X[j]))
J.append(tmp)
return J
def generateHessianMat(self, M, X, F):
H = self.generateJacobianMat(M, X, F)
H = self.generateJacobianMat(H, X, F)
return H
class NewtonSENL():
def symbolsVector(self, n, symb):
X = list()
for i in range(n):
X.append(symbols(symb + str(i+1)))
self.Xsymb = X
return X
def variableAssignation(self, X0, n):
# Crear dictionario para asignar valor a variables
Xsymb = self.Xsymb
varAssign = dict()
for i in range(n):
varAssign[Xsymb[i]] = X0[i]
return varAssign
def evaluateF(self, F, X0, n):
F1 = np.ndarray(n)
VA = self.variableAssignation(X0, n)
#Evaluar funciones
for i in range(n):
F1[i] = F[i].subs(VA)
return F1
def evaluateJ(self, J, X0, n):
JNum = np.zeros((n, n))
VA = self.variableAssignation(X0, n)
for i in range(n):
for j in range(n):
JNum[i][j] = (J[i][j].subs(VA))
return JNum
def calculateY(self, F, J, n):
#Crear matriz aumentada
AM = np.concatenate((J, F.T * -1), axis = 1)
#Resolver y por Gauss Jordan
GJ = GaussJordan()
y = GJ.doGaussJordan(AM, n)
Y = list()
for i in range(n):
Y.append(y[i][-1])
return Y
def getError(self, X1, X0, n):
suma = 0
for i in range(n):
suma += (X1[i] - X0[i])**2
return sqrt(suma)
def runNewtonMethod(self, tol, maxIter, X0, symb, F):
# Error y numero de iteración
error = np.inf
currIter = 0
self.symbolsVector(n, symb)
#Generar matriz jacobiana
jac = Jacobian()
J = jac.generateJacobianMat(self.Xsymb, F, n)
while(error > tol and currIter < maxIter):
Fi = np.array([self.evaluateF(F, X0, n)])
Ji = self.evaluateJ(J, X0, n)
yi = self.calculateY(Fi, Ji, n)
X1 = X0 + yi
error = self.getError(X1, X0, n)
X0 = X1
currIter += 1
print("\n Error: ", error)
# Vector de resultados
return X0
#Definición de variables
n = 3
tol = float(input("Tolerancia: "))
maxIter = float(input("Número máximo de iteraciones: "))
#Definicion de objetos
newton = NewtonSENL()
#Definicion de sistema de ecuaciones
symb = 'x'
Xsymb = newton.symbolsVector(n, symb)
F = list()
F.append(33/3*Xsymb[0] - sin(Xsymb[1]*Xsymb[2]) -0.7)
F.append(52/5*Xsymb[0]**4 - 81*(Xsymb[1] + 0.1)**2 + sin(Xsymb[2]) + 1.06)
F.append(np.e**(-Xsymb[0]**2*Xsymb[1]) + 3* Xsymb[1]**2 + 43/3*Xsymb[2] - ((10 * np.pi) -3) /3)
# Definición del vector inicial
X0 = np.ndarray(n)
X0[0] = 0.1
X0[1] = 0.1
X0[2] = -0.1
X = newton.runNewtonMethod(tol, maxIter, X0, symb, F)
print("Resultados:\n x = ", X[0], "\n y = ", X[1], "\n z = ", X[2])
| [
"matplotlib"
] |
34f8bd80aa250019a559d3850dea78a4bd61904f | Python | Wesley-yang/yousan.ai | /computer_vision/projects/classification/lasagne/simpleconv3/train.py | UTF-8 | 3,220 | 2.609375 | 3 | [] | no_license | #coding:utf8
# Copyright 2019 longpeng2008. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# If you find any problem,please contact us
#
# [email protected]
#
# or create issues
# =============================================================================
import lasagne
import theano
import theano.tensor as T
import sys
import numpy as np
import matplotlib.pyplot as plt
from net import simpleconv3
from dataset import Dataset
input_var = T.tensor4('X')
target_var = T.ivector('y')
network = simpleconv3(input_var)
prediction = lasagne.layers.get_output(network)
loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)
loss = loss.mean()
#loss = loss.mean() + 1e-4 * lasagne.regularization.regularize_network_params(
# network, lasagne.regularization.l2)
test_prediction = lasagne.layers.get_output(network, deterministic=True)
test_loss = lasagne.objectives.categorical_crossentropy(test_prediction, target_var)
test_loss = test_loss.mean()
test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var), dtype=theano.config.floatX)
# create parameter update expressions
params = lasagne.layers.get_all_params(network, trainable=True)
updates = lasagne.updates.nesterov_momentum(loss, params, learning_rate=0.001,
momentum=0.9)
# compile train function that updates parameters and returns train loss
train_fn = theano.function([input_var, target_var], loss, updates=updates)
val_fn = theano.function([input_var, target_var], [test_loss, test_acc])
predict_fn = theano.function([input_var], T.argmax(test_prediction, axis=1))
# prepare mydataset
mydataset = Dataset(sys.argv[1],48,48)
train_losses = []
val_losses = []
val_accs = []
num_of_epoches = 100
for epoch in range(num_of_epoches):
train_loss = 0
train_data = mydataset.iterate_minibatches(mydataset.imagetraindatas,mydataset.labeltraindatas,16,True)
train_batches = 0
for input_batch, target_batch in train_data:
train_loss += train_fn(input_batch, target_batch)
train_batches += 1
print("Epoch %d: Train Loss %g" % (epoch + 1, train_loss / train_batches))
train_losses.append(train_loss / train_batches)
val_loss = 0
val_acc = 0
val_batches = 0
val_data = mydataset.iterate_minibatches(mydataset.imagevaldatas,mydataset.labelvaldatas,mydataset.getvallen(),False)
for val_batch,target_batch in val_data:
tmp_val_loss, tmp_val_acc = val_fn(val_batch, target_batch)
val_loss += tmp_val_loss
val_acc += tmp_val_acc
val_batches += 1
test_prediction = lasagne.layers.get_output(network, deterministic=True)
val_predict = predict_fn(val_batch)
print("Epoch %d: Val Loss %g" % (epoch + 1, val_loss / val_batches))
print("Epoch %d: Val Acc %g" % (epoch + 1, val_acc / val_batches))
val_losses.append(val_loss / val_batches)
val_accs.append(val_acc / val_batches)
x = list(xrange(num_of_epoches))
plt.plot(x,train_losses,'r-o')
plt.plot(x,val_losses,'k-o')
plt.legend(('train','val'))
plt.title('train_val_loss')
plt.savefig('loss.png')
plt.show()
plt.plot(x,val_accs)
plt.savefig('acc.png')
plt.show()
| [
"matplotlib"
] |
f35902eced6ad479d0cb5a49918bb64602ffc539 | Python | liangyy75/ProgrammingInPython | /tsp/tsp_answer.py | UTF-8 | 22,844 | 2.640625 | 3 | [] | no_license | # coding: UTF-8
import copy
import time
import numpy as np
# import matplotlib
# matplotlib.use("Agg")
import matplotlib.pyplot as plt
# import threading
from multiprocessing import Process
from matplotlib import animation
# http://www.cnblogs.com/zealousness/p/9753282.html
# https://blog.csdn.net/u012513972/article/details/78389863
# https://www.jianshu.com/p/ae5157c26af9
# https://blog.csdn.net/qq_38788128/article/details/80804661
# https://blog.csdn.net/ty_0930/article/details/52119075
# https://blog.csdn.net/icurious/article/details/81709797
# https://www.cnblogs.com/apexchu/p/5015961.html
# https://www.cnblogs.com/zhoujie/p/nodejs2.html
# https://www.iwr.uni-heidelberg.de/groups/comopt/software/TSPLIB95/
# https://blog.csdn.net/u012750702/article/details/54563515
class Solution:
def __init__(self, filename="d198.tsp", modify_flag=False, ga_flag=False, sa_flag=False, sa_flag2=False, num=1):
file = open(filename, "r")
lines = file.read().splitlines()
file.close()
# 获取城市数量,并准备存储数据的结构,城市编号默认从0~city_num - 1
city_num = np.int(lines.pop(0))
self.cities = np.zeros([city_num, 2], dtype=np.float)
self.city_states = np.zeros([city_num, city_num], dtype=np.float)
# 城市分布坐标
for i in range(city_num):
self.cities[i] = np.array(lines.pop(0).split(" ")).astype(np.float)
# 城市邻接矩阵
for i in range(city_num):
x1, y1 = self.cities[i, 0], self.cities[i, 1]
for j in range(city_num):
x2, y2 = self.cities[j, 0], self.cities[j, 1]
self.city_states[i, j] = np.sqrt(np.power(x1 - x2, 2) + np.power(y1 - y2, 2))
# 各个算法的所有案例的结果
results = {"ga_result": list(), "modify_result": list(), "sa_result": list(), "sa_result2": list()}
for i in range(num):
start = time.time()
# 改良圈算法
if modify_flag:
results["modify_result"].append(self.modify_circle(i))
# 遗传算法
if ga_flag:
results["ga_result"].append(self.ga_answer(50, 50, 0.2, 1, 10000, i))
# 模拟退火法
if sa_flag:
results["sa_result"].append(self.sa_answer(i))
if sa_flag2:
results["sa_result2"].append(self.sa_answer2(i))
print("消耗时间", time.time() - start)
# print(results)
if modify_flag:
_results = results["modify_result"]
for i in range(len(_results)):
print("修改圈算法结果{0}".format(i), _results[i])
print("均值", np.average(_results))
if ga_flag:
_results = results["ga_result"]
for i in range(len(_results)):
print("遗传算法结果{0}".format(i), _results[i])
print("均值", np.average(_results))
if sa_flag:
_results = results["sa_result"]
for i in range(len(_results)):
print("模拟退火算法结果{0}".format(i), _results[i])
print("均值", np.average(_results))
if sa_flag2:
_results = results["sa_result2"]
for i in range(len(_results)):
print("模拟退火算法结果{0}".format(i), _results[i])
print("均值", np.average(_results))
# 改良圈算法
def modify_circle(self, tag):
city_num = self.city_states.shape[0]
initial = list(range(city_num))
initial = np.array(initial + [0])
all_result = [initial]
# print(np.sum([self.city_states[initial[i], initial[i + 1]] for i in range(city_num)]))
for k in range(city_num):
flag = 0 # 退出标志
for m in range(city_num - 2):
initial_m, initial_m2 = initial[m], initial[m + 1]
for n in range(m + 2, city_num):
initial_n, initial_n2 = initial[n], initial[n + 1]
if self.city_states[initial_m, initial_n] + self.city_states[initial_m2, initial_n2] < \
self.city_states[initial_m, initial_m2] + self.city_states[initial_n, initial_n2]:
initial[m + 1:n + 1] = initial[n:m:-1]
all_result.append(initial.copy())
flag += 1
if flag == 0:
break
cost = np.sum([self.city_states[initial[i], initial[i + 1]] for i in range(city_num)])
# show_thread = threading.Thread(target=self.show, args=(all_initials,))
print("results长度", len(all_result))
show_thread = Process(target=self.show, args=(all_result, tag, ))
show_thread.start()
# self.show(all_result)
return cost
# 遗传算法: 群体规模、子代规模、变异概率、杂交概率、遗传次数
def ga_answer(self, total_num, son_num, variation, cross, ga_num, tag):
# 改良圈算法得到初始解
totals = []
totals_cost = []
city_num = self.city_states.shape[0]
num1 = total_num # 改良圈的,压不下去???
num2 = 0 # 0-197的,顺序着来的
num3 = total_num - num1 - num2 # 贪心
for i in range(num1):
temp = list(range(1, city_num))
np.random.shuffle(temp)
temp = np.array([0] + temp + [0])
# for k in range(city_num):
# flag = 0 # 退出标志
# for m in range(city_num - 2):
# initial_m, initial_m2 = temp[m], temp[m + 1]
# for n in range(m + 2, city_num):
# initial_n, initial_n2 = temp[n], temp[n + 1]
# if self.city_states[initial_m, initial_n] + self.city_states[initial_m2, initial_n2] < \
# self.city_states[initial_m, initial_m2] + self.city_states[initial_n, initial_n2]:
# temp[m + 1:n + 1] = temp[n:m:-1]
# flag += 1
# if flag == 0:
# break
totals.append(temp)
cost = np.sum([self.city_states[temp[i], temp[i + 1]] for i in range(city_num)])
totals_cost.append(cost)
# 0-197的初始解
templet = np.array([0] + list(range(1, city_num)) + [0])
templet_cost = np.sum([self.city_states[templet[i], templet[i + 1]] for i in range(city_num)])
for i in range(num2):
totals.append(copy.deepcopy(templet))
totals_cost.append(templet_cost)
# 贪心得到初始解
greed_path = [0]
rest = list(range(1, city_num))
while len(greed_path) < city_num:
node1 = greed_path[len(greed_path) - 1]
min_cost = self.city_states[node1][rest[0]]
min_index = 0
for i in range(1, len(rest)):
now_cost = self.city_states[node1][rest[i]]
if min_cost > now_cost:
min_cost = now_cost
min_index = i
greed_path.append(rest.pop(min_index))
greed_path = np.array(greed_path + [0])
greed_cost = np.sum([self.city_states[greed_path[i], greed_path[i + 1]] for i in range(city_num)])
for i in range(num3):
totals.append(copy.deepcopy(greed_path))
totals_cost.append(greed_cost)
# 遗传算法
best_result = np.min(totals_cost)
all_result = [totals[np.where(totals_cost==best_result)[0][0]]]
# print(best_result)
for _ga_num in range(ga_num):
best_index = np.where(totals_cost==best_result)[0]
if best_index.shape[0] > 0.3:
all_result.append(totals[best_index[0]])
sons = []
sons_cost = []
# 轮盘赌选择法
probabilities = np.sum(totals_cost) / totals_cost
probabilities = probabilities / np.sum(probabilities)
for _ in range(0, son_num, 2):
[father_index, mother_index] = np.random.choice(np.arange(len(totals)), size=2, replace=False, p=probabilities)
father, mother = totals[father_index], totals[mother_index]
# 交叉
if cross <= np.random.rand():
continue
probability = np.random.rand()
if probability < 0.5:
# 解决冲突
[start, end] = np.random.choice(list(range(1, city_num)), size=2, replace=False)
while start >= end:
[start, end] = np.random.choice(list(range(1, city_num)), size=2, replace=False)
_conflicts = {father[i]: mother[i] for i in range(start, end)}
conflict_keys = _conflicts.keys()
for key in conflict_keys:
temp = _conflicts[key]
while temp in conflict_keys:
_conflicts[key] = _conflicts[temp]
_conflicts[temp] = 0
temp = _conflicts[key]
conflicts = dict()
for key, value in _conflicts.items():
if value > 0:
conflicts[key] = value
# 真正交配
son1, son2 = father.copy(), mother.copy()
son1[start:end], son2[start:end] = son2[start:end].copy(), son1[start:end].copy()
# _son1, _son2 = son1.copy(), son2.copy()
# 解决冲突2
for key, value in conflicts.items():
for index in np.where(son1 == value)[0]:
if index >= end or index < start:
son1[index] = key
for index in np.where(son2 == key)[0]:
if index >= end or index < start:
son2[index] = value
else:
index = np.random.randint(low=1, high=city_num)
son1, son2 = father.copy(), mother.copy()
son1[np.where(son1 == son2[index])[0][0]] = son1[index]
son2[np.where(son2 == son1[index])[0][0]] = son2[index]
son1[index], son2[index] = son2[index], son1[index]
sons.extend([son1, son2])
sons_cost.append(np.sum([self.city_states[son1[i], son1[i + 1]] for i in range(city_num)]))
sons_cost.append(np.sum([self.city_states[son2[i], son2[i + 1]] for i in range(city_num)]))
best_result = np.min([best_result] + sons_cost)
# 变异: 逆转
sons_cost.extend(totals_cost)
sons.extend(totals)
son_range = range(len(sons))
for i in son_range:
if np.random.random() <= variation:
son = sons[i].copy()
[index1, index2] = np.random.choice(list(range(1, city_num)), size=2, replace=False)
if index1 > index2:
index1, index2 = index2, index1
# son[index1], son[index2] = son[index2], son[index1]
# son[index1: index2] = son[index2 - 1: index1 - 1: -1]
probability = np.random.rand() * 5
if 0 <= probability < 1:
son[index1], son[index2] = son[index2], son[index1]
# 将index2的城市插入到index1前
elif 1 <= probability < 2:
temp = son[index2]
for j in range(index2 - index1):
son[index2 - j] = sons[i][index2 - j - 1]
son[index1] = temp
# 将index1与index2间的城市逆转
else:
for j in range(index2 - index1 + 1):
son[index1 + j] = sons[i][index2 - j]
son_cost = np.sum([self.city_states[son[i], son[i + 1]] for i in range(city_num)])
if sons_cost[i] > son_cost or _ga_num > ga_num * 0.5:
sons_cost[i] = son_cost
sons[i] = son
best_result = np.min([best_result] + sons_cost)
# 适者生存
# temp_results = {sons_cost[i]: sons[i] for i in son_range}
# temp_results = sorted(temp_results.items(), key=lambda t: t[0])
temp_results = [(sons_cost[i], sons[i]) for i in son_range]
temp_results = sorted(temp_results, key=lambda t: t[0])
totals = [temp_results[i][1] for i in range(total_num)]
totals_cost = [temp_results[i][0] for i in range(total_num)]
print("遗传次数", _ga_num, "当前局部最优解", min(totals_cost), "当前全局最优解", best_result)
# 选出最优者
best_result = np.min([best_result] + totals_cost)
print(best_result)
print("results长度", len(all_result))
show_thread = Process(target=self.show, args=(all_result, tag,))
show_thread.start()
return best_result
# 模拟退火法 -- 廖志勇
def sa_answer2(self, tag):
# 先进行贪心取得初始解
city_num = self.city_states.shape[0]
greed_path = [0]
rest = list(range(1, city_num))
while len(greed_path) < city_num:
node1 = greed_path[len(greed_path) - 1]
min_cost = self.city_states[node1][rest[0]]
min_index = 0
for i in range(1, len(rest)):
now_cost = self.city_states[node1][rest[i]]
if min_cost > now_cost:
min_cost = now_cost
min_index = i
greed_path.append(rest.pop(min_index))
greed_path.append(0)
all_result = [greed_path]
cost = np.sum([self.city_states[greed_path[i], greed_path[i + 1]] for i in range(city_num)])
best_cost = cost
current_temperature = 1
while current_temperature > 0.00001:
nochange = 0
for _ in range(300):
index1, index2 = np.random.randint(1, city_num, size=2).tolist()
while index1 == index2:
index2 = np.random.randint(1, city_num)
if index2 < index1:
index1, index2 = index2, index1
path1, path2, path3 = copy.deepcopy(greed_path), copy.deepcopy(greed_path), copy.deepcopy(greed_path)
probability = np.random.rand() * 5
# 交换index1与index2位置的城市
if 0 <= probability < 1:
path1[index1], path1[index2] = path1[index2], path1[index1]
cost1 = np.sum([self.city_states[path1[i], path1[i + 1]] for i in range(city_num)])
result_path = path1
result_cost = cost1
# 将index2的城市插入到index1前
elif 1 <= probability < 2:
# path2.insert(index2, path2.pop(index1))
temp = path2[index2]
for i in range(index2 - index1):
path2[index2 - i] = greed_path[index2 - i - 1]
path2[index1] = temp
cost2 = np.sum([self.city_states[path2[i], path2[i + 1]] for i in range(city_num)])
result_path = path2
result_cost = cost2
# 将index1与index2间的城市逆转
else:
# path3[index1: index2] = path3[index2 - 1: index1 - 1: -1]
for i in range(index2 - index1 + 1):
path3[index1 + i] = greed_path[index2 - i]
cost3 = np.sum([self.city_states[path3[i], path3[i + 1]] for i in range(city_num)])
result_path = path3
result_cost = cost3
if result_cost < cost or np.exp(-1 * np.abs(cost - result_cost) / current_temperature) >= np.random.rand():
greed_path = result_path
cost = result_cost
if cost < best_cost:
best_path = copy.deepcopy(greed_path)
best_cost = cost
all_result.append(best_path)
nochange = 0
else:
nochange += 1
if nochange >= 90:
break
# print("当前局部最优", cost, "当前最优解", best_cost, "当前温度", current_temperature)
current_temperature *= 0.99
print(best_cost)
# print("results长度", len(all_result))
show_thread = Process(target=self.show, args=(all_result, tag, ))
show_thread.start()
return best_cost
# 模拟退火法 -- 梁毓颖
def sa_answer(self, tag):
# 先进行贪心
city_num = self.city_states.shape[0]
greed_path = [0]
rest = list(range(1, city_num))
greed_path.extend(rest)
greed_path.append(0)
all_result = [greed_path]
cost = np.sum([self.city_states[greed_path[i], greed_path[i + 1]] for i in range(city_num)])
best_cost = cost
# print(cost)
current_temperature = 100
while current_temperature > 0.1:
for _ in range(1000):
index1, index2 = np.random.randint(1, city_num, size=2).tolist()
while index1 == index2:
index2 = np.random.randint(1, city_num)
if index2 < index1:
index1, index2 = index2, index1
path1, path2, path3 = copy.deepcopy(greed_path), copy.deepcopy(greed_path), copy.deepcopy(greed_path)
# 交换index1与index2位置的城市
path1[index1], path1[index2] = path1[index2], path1[index1]
cost1 = np.sum([self.city_states[path1[i], path1[i + 1]] for i in range(city_num)])
result_path = path1
result_cost = cost1
# 将index2的城市插入到index1前
# path2.insert(index1, path2.pop(index2))
temp = path2[index2]
for i in range(index2 - index1):
path2[index2 - i] = greed_path[index2 - i - 1]
path2[index1] = temp
cost2 = np.sum([self.city_states[path2[i], path2[i + 1]] for i in range(city_num)])
if result_cost > cost2:
result_path = path2
result_cost = cost2
# 将index1与index2间的城市逆转
# path3[index1: index2] = path3[index2 - 1: index1 - 1: -1]
for i in range(index2 - index1 + 1):
path3[index1 + i] = greed_path[index2 - i]
cost3 = np.sum([self.city_states[path3[i], path3[i + 1]] for i in range(city_num)])
if result_cost > cost3:
result_path = path3
result_cost = cost3
# 选出最优解
if result_cost < cost or np.exp(-1 * np.abs(cost - result_cost) / current_temperature) >= np.random.rand():
greed_path = result_path
cost = result_cost
if cost < best_cost:
best_path = copy.deepcopy(greed_path)
best_cost = cost
all_result.append(best_path)
current_temperature *= 0.95
# print("当前局部最优", cost, "当前最优解", best_cost, "当前温度", current_temperature)
print("模拟退火法结果{0}".format(tag), best_cost)
# print("results长度", len(all_result))
show_thread = Process(target=self.show, args=(all_result, tag, ))
show_thread.start()
return best_cost
# 画动图/保存视频
def show(self, all_result, tag):
# time.sleep(10)
len1 = len(all_result)
len2 = min(300, len1)
sampling = np.floor(np.linspace(0, len1 - 1, len2, endpoint=True)).astype(np.int)
figure, ax = plt.subplots()
ax.scatter(self.cities[:, 0], self.cities[:, 1])
_line = np.array([self.cities[i] for i in all_result[0]])
line, = ax.plot(_line[:, 0], _line[:, 1], color="r")
def init():
return line
def update(frame):
frame = all_result[frame]
_line2 = np.array([self.cities[i] for i in frame])
line.set_ydata(_line2[:, 1])
line.set_xdata(_line2[:, 0])
return line
anim = animation.FuncAnimation(fig=figure, func=update, init_func=init, interval=50, frames=sampling,
repeat=False)
# Set up formatting for the movie files
# writer = animation.writers['ffmpeg'](fps=15, metadata=dict(artist='Me'), bitrate=1800)
# anim.save("video{0}.mp4".format(tag), writer=writer)
# plt.ion()
# plt.pause(10)
# plt.close("all")
plt.title("figure{0}".format(tag))
plt.show()
# 计算一条路径
def calculate(self, _list):
result = np.sum([self.city_states[_list[i], _list[i + 1]] for i in range(self.city_states.shape[0])])
print(result)
self.show([_list], 0)
return result
if __name__ == "__main__":
continue_flag = True
while continue_flag:
print("这里有四个选择可以用于解TSP问题,你可以选择其中一个用于解答TSP问题,你也可以输入quit离开这里:\n"
" A. 修改圈算法\n"
" B. 遗传算法\n"
" C. 模拟退火法1\n"
" D. 模拟退火法2\n"
"请选择其中一个编号,并按照\"编号 样例个数\"的格式输入:\n")
command = input()
if command == "quit":
break
args = command.split()
print()
if args[0] == "A":
# Solution(args[1], modify_flag=True, num=int(args[2]))
Solution(modify_flag=True, num=int(args[1]))
if args[0] == "B":
# Solution(args[1], ga_flag=True, num=int(args[2]))
Solution(ga_flag=True, num=int(args[1]))
if args[0] == "C":
# Solution(args[1], sa_flag=True, num=int(args[2]))
Solution(sa_flag=True, num=int(args[1]))
if args[0] == "D":
# Solution(args[1], sa_flag2=True, num=int(args[2]))
Solution(sa_flag2=True, num=int(args[1]))
print()
| [
"matplotlib"
] |
8849a685aad1f89e071d0dd4371b494573ca2f90 | Python | ToscaMarmefelt/CompPhys | /PS4Q1.py | UTF-8 | 1,819 | 3.765625 | 4 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 18 14:24:57 2018
@author: Tosca
"""
""" Interpolation using Lagrange polynomial formula """
import numpy as np
import sympy as sp
import matplotlib.pyplot as plt
import itertools as it #To enable iterating throguh two ranges at once
"""Create data set"""
# Genreate n x-values in a list
def xData(n):
x_list = []
for i in range(n):
x_list.append(10.0/n * i)
return x_list
#Define a simple function from which we will get our dataset
def fData(x_list):
f_list = []
for i in range(len(x_list)):
f_list.append( np.cos(x_list[i] * np.pi/5.0) ) # Arbitrarily chosen non-polynomial function
return f_list
"""Calculate the Lagrange polynomial of a given dataset"""
xi = sp.symbols('xi')
def lagrangePolynomial(x_list, f_list):
lPolynomial = 0
for i in range(len(x_list)):
product = 1
for j in it.chain(range(0,i), range(i+1, len(x_list))): # For all j != i
product = product * ( xi - x_list[j] ) / ( x_list[i] - x_list[j] )
lPolynomial += product * f_list[i]
return lPolynomial #Returns a Lagrange polynomial as a fn of xi
#print(lagrangePolynomial(xData(n), fData(xData(n))))
"""Plot details"""
#Number of data points
n = 5
#Plot dataset
plt.scatter(xData(n), fData(xData(n)))
plt.title("Data set of %s points" %n)
#plt.legend('Data set')
#Plot Langrange interpolation on top of scattered data
x_interp = []
f_interp = []
for i in range(1000):
x_interp.append(10.0/1000 * i)
for i in range(len(x_interp)): #Evaluate LP function for each x_interp
f_interp.append( lagrangePolynomial(xData(n),fData(xData(n))) . subs(xi, x_interp[i]) )
plt.plot(x_interp, f_interp)
#plt.legend('Lagrange interpolation')
| [
"matplotlib"
] |
45778977d7f4af8464c72d97bf99d66b3e574514 | Python | sohqy/Data-BikeShare | /bikesharing.py | UTF-8 | 5,093 | 3.390625 | 3 | [] | no_license | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.ensemble import RandomForestRegressor
# Read CSV files, and form combined DataFrame.
train = pd.read_csv('train.csv')
test = pd.read_csv('test.csv')
combine =[train, test]
total = pd.concat(combine)
combine = [train, test, total]
#%% Understanding the data
# Extract Year, Month, Time
for df in combine:
df['datetime']=pd.to_datetime(df['datetime'])
df['Month'] = df.datetime.dt.month
df['Hour'] = df.datetime.dt.hour
df['Year'] = df.datetime.dt.year
df['Day']= df.datetime.dt.day
df['DayoW']=df.datetime.dt.dayofweek
df['WeekoYear']=df.datetime.dt.weekofyear
for col in ['casual', 'registered', 'count']:
total['%s_log' % col] = np.log(total[col] + 1)
train['%s_log' % col] = np.log(train[col] + 1)
#%% Split TRAIN data into training and validation sections.
def copydata(month,year):
"""This creates a copy of the previous data"""
index = np.amax(np.where((train.Month==month) & (train.Year==year)))
data = train.iloc[:(index+1)]
return data
def split_train(data, cutoff):
"""This splits the training data set into two, based on cutoff day"""
splittrain = data[data['Day'] <= cutoff]
splittest = data[data['Day'] > cutoff]
return splittrain, splittest
#%% Score validation data
# Submission is evaluated using the Root Mean Squared Logarithmic Error (RMSLE)
def rmsle(y_p, y_a):
diff = np.log(y_p + 1)-np.log(y_a + 1)
mean_error = np.square(diff).mean()
return np.sqrt(mean_error)
def set_up_data(featurenames, data):
"""Featurenames should be an array of strings corresponding to column names."""
X = data[featurenames].as_matrix()
Y_r = data['registered_log'].as_matrix()
Y_c = data['casual_log'].as_matrix()
return X, Y_r, Y_c
def splittrain_validation(model, featurenames):
"""Function iterates over PRIOR ONLY data, for validating model through a split
training data.
model - model name and parameters
featurenames - list of features to be included into ML model"""
months = range(1,13)
years = [2011,2012]
score = np.zeros([2,12])
for i in years:
for j in months:
# Make a copy of the relevant prior data and split up training data
data = copydata(j,i)
splittrain, splittest = split_train(data , 15)
# Set up training/testing data.
x_train, y_train_r, y_train_c = set_up_data(featurenames, splittrain)
x_test, y_ar, y_ac = set_up_data(featurenames, splittest)
# Fit and predict model for registered users
M_r = model.fit(x_train, y_train_r)
y_pr = np.exp(M_r.predict(x_test)) - 1
# Fit and predict model for casual users
M_c = model.fit(x_train, y_train_c)
y_pc = np.exp(M_c.predict(x_test)) - 1
# Combine predicted numbers, ensuring that the number is an integer.
# Negative numbers below 1 are set to 0.
y_pcomb = np.round(y_pr + y_pc)
y_pcomb[y_pcomb < 0] = 0
# Transforming back to rental count numbers from logarithmic scale
y_acomb = np.exp(y_ar) + np.exp(y_ac) - 2
# Assess the accuracy of the model using the RMSLE defined
score[years.index(i),months.index(j)] = rmsle(y_pcomb, y_acomb)
return score
# Using the entire data set as an initial, quick look at score.
def predict(input_cols, model):
data = train
splittrain, splittest = split_train(data,15)
X_train, y_train_r, y_train_c = set_up_data(input_cols, splittrain)
X_test, y_test_r, y_test_c = set_up_data(input_cols, splittest)
model_r = model.fit(X_train, y_train_r)
y_pred_r = np.exp(model_r.predict(X_test)) - 1
imp_r = model_r.feature_importances_
model_c = model.fit(X_train, y_train_c)
y_pred_c = np.exp(model_c.predict(X_test)) - 1
imp_c = model_c.feature_importances_
y_pred_comb = np.round(y_pred_r + y_pred_c)
y_pred_comb[y_pred_comb < 0] = 0
y_test_comb = np.exp(y_test_r) + np.exp(y_test_c) - 2
score = rmsle(y_pred_comb, y_test_comb)
return imp_r, imp_c, score
#Predict on test set
def test_prediction(model, featurenames):
months = range(1,13)
years = [2011,2012]
for i in years:
for j in months:
data = copydata(j,i)
x_tr = data[featurenames].as_matrix()
y_tr_r = data['casual_log'].as_matrix()
y_tr_c = data['registered_log'].as_matrix()
x_te = test[featurenames].as_matrix()
Model_c = model.fit(x_tr, y_tr_c)
y_pr_c = np.exp(Model_c.predict(x_te)) - 1
Model_r = model.fit(x_tr, y_tr_r)
y_pr_r = np.exp(Model_r.predict(x_te)) - 1
y_pred = np.round(y_pr_r + y_pr_c)
y_pred[y_pred < 0 ] = 0
return y_pred
| [
"matplotlib",
"seaborn"
] |
81b2271e68b629a9ea563cd113e51a9dd2fb119d | Python | rucsa/thesis_1stpart | /2nd iteration/lasso.py | UTF-8 | 1,777 | 2.65625 | 3 | [] | no_license | from dataProcessing import y
from sklearn.linear_model import LassoLarsCV
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
X = pd.read_hdf('data.hdf5', 'Datataset1/X')
y = pd.DataFrame(y)
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size = 0.3,
random_state=0)
model = LassoLarsCV(cv=10, precompute=False, fit_intercept=False).fit(X_train, y_train)
results = dict(zip(X.columns, model.coef_))
print (results)
# MSE
m_alpha = -np.log10(model.cv_alphas_)
plt.figure()
lass = model.mse_path_
plt.plot(m_alpha, lass, ':')
plt.plot(m_alpha, model.mse_path_.mean(axis=-1), 'k', label='Average across folds', linewidth=2)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k', label='alpha CV')
plt.legend()
plt.xlabel('-log(alpha)')
plt.ylabel('Mean squared error')
plt.title('MSE for each fold')
plt.show()
# coefficient prograssion
m_log_alphas = -np.log10(model.alphas_)
ax = plt.gca()
plt.plot(m_log_alphas, model.coef_path_.T)
plt.axvline(-np.log10(model.alpha_), linestyle='--', color='k', label='alpha')
plt.ylabel('coefficients')
plt.legend()
plt.xlabel('-log(alpha)')
plt.title('Regression Coefficients Pregression for Lasso Lars')
plt.show()
train_error = mean_squared_error(y_train, model.predict(X_train))
test_error = mean_squared_error(y_test, model.predict(X_test))
# MSE
print ('training error {}'.format(train_error))
print ('test error {}'.format(test_error))
# R squared
print ('R_sqr for training {}'.format(model.score(X_train, y_train)))
print ('R_sqr for test {}'.format(model.score(X_test, y_test))) | [
"matplotlib"
] |
dbf226bad6540e887f1fea379ca607b9949c5bc5 | Python | Svito-zar/BeeVeeH | /visualize.py | UTF-8 | 1,510 | 3.296875 | 3 | [
"MIT"
] | permissive | """ This script can visualize a BVH file frame by frame.
Each frame will be visualized in a separate widnow
Continuous animation is implemented at 3dAnimation.py"""
#@author: Taras Kucherenko
import BeeVeeH.bvh_helper as BVH
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
def show_frame(current_node, ax):
coords = current_node.coordinates
ax.scatter3D(coords[2], coords[0], coords[1], c='c')
if current_node.children:
for kid in current_node.children:
show_frame(kid, ax)
# Draw a line to a kid
x = coords[0]
xline = np.array([coords[0][0], kid.coordinates[0][0]])
yline = np.array([coords[1][0], kid.coordinates[1][0]])
zline = np.array([coords[2][0], kid.coordinates[2][0]])
ax.plot3D( zline, xline, yline, 'gray')
def visualize(root):
"""
Visualize a given frame of the motion
:param root: root of the BVH structure
:return: nothing, will create 3D plot for this frame
"""
ax = plt.axes(projection='3d')
show_frame(root, ax)
plt.show()
if __name__ == '__main__':
file_path = 'tests/bvh_files/0007_Cartwheel001.bvh'
root, frames, frame_time = BVH.load(file_path)
print('number of frames = %d' % len(frames))
# "number of frames = 2111"
for fr in range(len(frames)):
root.load_frame(frames[fr])
root.apply_transformation()
# Visualize the frame
visualize(root)
| [
"matplotlib"
] |
e4de63083ce9d2a1440f024f890ce5607d36c4fd | Python | mosi-jan/find_sr_line | /ga.py | UTF-8 | 20,217 | 2.609375 | 3 | [] | no_license | import numpy as np
from time import time
import math
import random
from copy import deepcopy
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt
class Chromosome:
def __init__(self, genes, fitness):
self.Genes = genes
self.Fitness = fitness
def __gt__(self, other):
return self.Fitness > other.Fitness
def knn_center(dataset, centers):
data = np.array(dataset)
n = len(dataset)
k = len(centers)
min_x = np.amin(data)
max_x = np.amax(data)
c = deepcopy(centers)
need_iter = True
iter = 0
while need_iter:
need_iter = False
iter += 1
dxc = cdist(dataset, c)
x_cluster_indices = np.argmin(dxc, axis=1)
for i in range(k):
ci_member = [data[j] for j in range(n) if x_cluster_indices[j] == i]
# cM.append(np.mean(ci_member[i], axis=1))
if len(ci_member) > 0:
nc = np.mean(ci_member, axis=0)
if c[i] != nc:
need_iter = True
c[i] = nc
else:
c[i] = np.random.rand()* (max_x - min_x) + min_x
return np.array(c)
def correct_gene(dataset, genes):
t_gene =deepcopy(genes)
M = []
M_index = []
for i in range(len(t_gene)):
if t_gene[i][-1] >= 0.5:
M.append(t_gene[i][:-1])
M_index.append(i)
if len(M) == 0:
return genes
k = len(M) # cluster count
M = np.array(M)
# n = len(dataset) # data count
M = knn_center(dataset=dataset, centers=M)
for i in range(k):
t_gene[M_index[i]][:-1] = M[i]
return t_gene
def DeD_index(dataset, genes):
M = np.array([g[:-1] for g in genes if g[-1] >= 0.5]) # cluster centers
k = len(M) # cluster count
n = len(dataset) # data count
dataset_sorted = np.sort(dataset, axis=0)
penalty = 1 * np.amax(cdist(dataset_sorted, dataset_sorted))
if k < 2:
return penalty
# step 1
# MD(x; Xi)=[1 + (x−Xi)]−1
x_mean = np.mean(dataset_sorted, axis=0)
x_minus_mn = dataset_sorted - x_mean
abs_x_minus_mn = np.abs(x_minus_mn)
Di = np.divide(1, abs_x_minus_mn + 1)
DM = max(Di)
# step 2
delta_k = []
d = cdist(dataset_sorted, M) # distance of all data to all cluster centers
data_cluster_indices = np.argmin(d, axis=1) # all data clusters , center indices
for i in range(k):
ci_member = np.array([[dataset_sorted[j], Di[j]] for j in range(n) if data_cluster_indices[j] == i])
if len(ci_member) > 1:
DMk = max(ci_member[:,1])
delta_k.append(np.mean(np.abs(ci_member[:,0] - DMk)))
else:
return penalty
DW = np.mean(delta_k)
delta = np.mean(np.abs(Di - DM))
DB = delta - DW
ded = DW - DB
DeD = 1 / (1 + DW - DB)
return DeD
def Knn_index(dataset, genes):
# M = np.array(genes) # cluster centers
M = np.array([g[:-1] for g in genes if g[-1] >= 0.5]) # cluster centers
# k = len(M) # cluster count
# n = len(dataset) # data count
# penalty = 1 * np.amax(cdist(dataset, dataset))
d = cdist(dataset, M) # distance of all data to all cluster centers
# data_cluster_indices = np.argmin(d, axis=1) # all data clusters , center indices
return np.sum(np.amin(d, axis=1))
def DB_index(dataset, genes):
M = np.array([g[:-1] for g in genes if g[-1] >= 0.5]) # cluster centers
k = len(M) # cluster count
n = len(dataset) # data count
penalty = 1 * np.amax(cdist(dataset, dataset))
if k < 2:
return penalty
d = cdist(dataset, M) # distance of all data to all cluster centers
data_cluster_indices = np.argmin(d, axis=1) # all data clusters , center indices
ci_member = []
Siq = []
q = 2
for i in range(k):
ci_member.append([dataset[j] for j in range(n) if data_cluster_indices[j] == i])
if len(ci_member[i]) >= 1: # min cluster members
a = [d[j, i] for j in range(n) if data_cluster_indices[j] == i]
b = np.power(np.mean(np.power(a, q)), 1/q)
Siq.append(b)
else:
Siq.append(penalty)
t = 2.0
Dijt = cdist(M, M, 'minkowski', t)
Ritq = []
for i in range(k):
f = []
for j in range(k):
if j != i:
if Dijt[i, j] == 0:
return penalty
f.append((Siq[i] + Siq[j])/Dijt[i, j])
Ritq.append(max(f))
DB = np.mean(Ritq)
# print('DB:{}\t ,cluster_members_count:{}'.format(DB, [len(item) for item in ci_member]))
return DB
def CS_index(dataset, genes):
M = np.array([g[:-1] for g in genes if g[-1] >= 0.5]) # cluster centers
k = len(M) # cluster count
n = len(dataset) # data count
penalty = 1 * np.amax(cdist(dataset, dataset))
if k < 2:
return penalty
d = cdist(dataset, M) # distance of all data to all cluster centers
data_cluster_indices = np.argmin(d, axis=1) # all data clusters , center indices
# calculate all cluster members
ci_member = []
Dmax = []
for i in range(k):
ci_member.append([dataset[j] for j in range(n) if data_cluster_indices[j] == i])
Xi = np.array(ci_member[i])
if len(Xi) >= 1: # min cluster members
max_xi = np.amax(cdist(Xi, Xi), axis=0)
Dmax.append(np.mean(max_xi))
else:
Dmax.append(penalty)
c = cdist(M, M)
for i in range(k):
c[i, i] = np.inf
Dmin = np.amin(c, axis=0)
# -----------
if np.mean(Dmin) == 0:
return penalty
CS = np.mean(Dmax) / np.mean(Dmin)
# print('Dmax:{} Dmin:{} cluster count:{} ci_member:{}'.format(Dmax, Dmin, k, ci_member))
# print('CS:{}\t ,cluster_members_count:{}\t Dmax:{}\t Dmin:{}'
# .format(CS, [len(item) for item in ci_member], Dmax, Dmin))
return CS
def fitness(dataset, genes):
CS_coeff = 1
DB_coeff = 1 - CS_coeff
genes = correct_gene(dataset=dataset, genes=genes)
# f1 = DB_coeff * DB_index(dataset, genes)
# f2 = CS_coeff * CS_index(dataset, genes)
# f = f1 + f2
# print('Fitness: {}\t DB:{}\t CS:{}'.format(f, f1,f2))
# f = DeD_index(dataset, genes)
f = Knn_index(dataset, genes)
return f
class Ga:
# ga params
MaxIt = 50 # Maximum Number of Iterations
nPop = 250 # Population Size
pc = 0.9 # Crossover Percentage
nc = 2 * round(pc * nPop / 2) # Number of Offsprings (Parents)
pm = 0.05 # Mutation Percentage
nm = round(pm * nPop) # Number of Mutants
gamma = 0.02
mu = 0.05 # Mutation Rate
beta = 8 # Selection Pressure
pb = 0.0
nb = round(pb * nPop)
def __init__(self, fitness):
self.fitness = fitness
self.best_fit = []
# calculate on fit function from dataset
self.dataset = None
self.dimension = None
self.n_gene = None
self.dimension_min_value = None
self.dimension_max_value = None
self.X_data = []
self.C_data = []
self.F_data = []
# self.fig, self.ax = plt.subplots()
self.fig = plt.figure()
def _create_Chromosome(self):
# rnd = np.random
# rnd.seed(seed=int(time.time()))
genes = np.random.rand(self.n_gene, self.dimension + 1)
while True:
genes[:, :self.dimension] = np.multiply(genes[:, :self.dimension],
(self.dimension_max_value - self.dimension_min_value)) \
+ self.dimension_min_value # centroid
fit = self.fitness(dataset=self.dataset, genes=genes)
yield Chromosome(genes=genes, fitness=fit)
def get_Chromosome(self):
return self._create_Chromosome().__next__()
def plot_update(self, dataset, best_fitness):
self.ax.clear()
# a = max(self.dimension_max_value)
fitness = [max(self.dimension_max_value) * i.Fitness for i in best_fitness]
M = np.array([g[:-1] for g in best_fitness[-1].Genes if g[-1] >= 0.5]) # cluster centers
k = len(M) # cluster count
n = len(dataset) # data count
d = cdist(dataset, M) # distance of all data to all cluster centers
data_cluster_indices = np.argmin(d, axis=1) # all data clusters , center indices
# calculate all cluster members
ci_member = []
for i in range(k):
ci_member.append([dataset[j] for j in range(n) if data_cluster_indices[j] == i])
for i in range(k):
self.ax.scatter([0]*len(ci_member[i]), ci_member[i], s=20, marker='.')
self.ax.scatter([0], M[i], s=200, marker='_', c='r')
# p_b =10
# for p in range(p_b):
# if len(best_fitness) > p + 1:
# M1 = np.array([g[:-1] for g in best_fitness[-2-p].Genes if g[-1] >= 0.5]) # cluster centers
# for i in range(len(M1)):
# self.ax.scatter([0.1*(p+1)], M1[i], s=200, marker='_', c='k')
self.ax.plot([i/self.MaxIt - 1.1 for i in range(len(fitness))], fitness, color='b', marker='.')
plt.pause(0.0001)
def fit(self, dataset):
self.dataset = np.array(dataset)
try:
self.dimension = len(self.dataset[0])
except:
self.dimension = 1
self.n_gene = math.ceil(len(self.dataset)/2)
self.dimension_min_value = np.amin(self.dataset, axis=0)
self.dimension_max_value = np.amax(self.dataset, axis=0)
if self.dimension == 1:
self.ax = self.fig.add_subplot()
elif self.dimension == 2:
self.ax = self.fig.add_subplot()
elif self.dimension == 3:
self.ax = self.fig.add_subplot('111', projection='3d')
pop_list = np.array([self.get_Chromosome() for i in range(self.nPop)])
# # --------------------
# pop_list[0].Genes[:,:-1] = self.dataset
# pop_list[0].Genes[:,-1] = 1
# t= True
# while t == True:
# t = False
# M = []
# M_index = []
# for i in range(len(pop_list[0].Genes)):
# if pop_list[0].Genes[i][-1] >= 0.5:
# M.append(pop_list[0].Genes[i][:-1])
# M_index.append(i)
# M = np.array(M)
# # M = np.array([g[:-1] for g in pop_list[0].Genes if g[-1] >= 0.5]) # cluster centers
# k = len(M) # cluster count
# c = cdist(M, M)
# for i in range(k):
# c[i, i] = np.inf
# Dmin = np.amin(c, axis=0)
# for i in range(k):
# if Dmin[i] == 0:
# pop_list[0].Genes[M_index[i],-1] = 0
# t = True
# break
# a = self.fitness(self.dataset, genes=pop_list[0].Genes)
# pop_list[0].Fitness = self.fitness(self.dataset, genes=pop_list[0].Genes)
# # --------------------
# self.F_data = np.zeros((self.MaxIt,2))
for i in range(self.MaxIt):
# self.best_fit.append(deepcopy(min( pop_list)))
# self.plot_update(dataset=dataset, best_fitness=self.best_fit)
# self.C_data = []
# for item in self.best_fit[-1].Genes:
# if item[-1] >= 0.5:
# self.C_data.append(list(item[:-1]))
# self.C_data = np.array(self.C_data)
# # self.F_data.extend([self.best_fit[-1].Fitness * 10, (len(self.F_data) + 1) / self.MaxIt - 1])
# self.F_data.extend([self.best_fit[-1].Fitness * 10])
# # self.F_data[i]=[self.best_fit[-1].Fitness * 10, (len(self.F_data) + 1) *10/ self.MaxIt - 1]
# # self.F_data=np.array(self.F_data)
# self.refresh_plot()
# create temp pop list
temp_pop_list = np.zeros(self.nPop + self.nc + self.nm + self.nb, type(Chromosome))
# temp_pop_list[0] = pop_list[0]
temp_pop_list[0:self.nPop] = pop_list
# crossover & mutation & new born
temp_pop_list[self.nPop:self.nPop + self.nc] = self.crossover(pop_list=pop_list, nc=self.nc)
temp_pop_list[self.nPop + self.nc:self.nPop + self.nc + self.nm] = self.mutation(pop_list=pop_list, nm=self.nm)
temp_pop_list[self.nPop + self.nc + self.nm:] = np.array([self.get_Chromosome() for i in range(self.nb)])
# select population
pop_list = self.select(temp_pop_list, self.nPop)
self.best_fit.append(deepcopy(min(pop_list)))
self.plot_update(dataset=self.dataset, best_fitness=self.best_fit)
print('pop_list:{}\t Genes count:{}\t best active gene count:{}\t best fitness:{}'.
format(len(pop_list), len(pop_list[0].Genes),
len([i for i in self.best_fit[-1].Genes if i[-1] >= 0.5]), self.best_fit[-1].Fitness))
print(np.count_nonzero(pop_list[:] == min(pop_list)))
# print(np.count_nonzero(pop_list[:] == self.best_fit[-1]))
def crossover(self, pop_list, nc):
child = np.zeros(nc, type(Chromosome))
nc_pop = np.random.choice(pop_list, size=nc, replace=False)
for i in range(int(nc/2)):
p1 = deepcopy(nc_pop[2 * i])
p2 = deepcopy(nc_pop[2 * i + 1])
position = random.randint(1, self.n_gene)
# print('position', position)
l = deepcopy(p1.Genes[0:position])
p1.Genes[0:position] = deepcopy(p2.Genes[0:position])
p2.Genes[0:position] = l
p1.Fitness = self.fitness(dataset=self.dataset, genes=p1.Genes)
p2.Fitness = self.fitness(dataset=self.dataset, genes=p2.Genes)
child[2 * i] = p1
child[2 * i + 1] = p2
return child
def mutation(self, pop_list, nm):
# child = np.zeros(nm, type(Chromosome))
nc_pop = deepcopy(np.random.choice(pop_list, size=nm))
coeff = np.random.rand(nm * self.n_gene, (self.dimension + 1)) + 0.5 # [0.5, 1)
coeff = np.resize(coeff, (nm, self.n_gene, self.dimension + 1))
min_limit = [self.dimension_min_value[:]]
min_limit.append(0)
max_limit = [self.dimension_max_value[:]]
max_limit.append(0.9999999999999)
for i in range(nm):
nc_pop[i].Genes = nc_pop[i].Genes * coeff[i]
for j in range(self.n_gene):
for k in range(self.dimension + 1):
if nc_pop[i].Genes[j, k] > max_limit[k]:
nc_pop[i].Genes[j, k] = max_limit[k]
elif nc_pop[i].Genes[j, k] < min_limit[k]:
nc_pop[i].Genes[j, k] = min_limit[k]
# nc_pop[i].Genes[:,:-1] = ((nc_pop[i].Genes[:, :-1] - self.dimension_min_value) *
# coeff[i,:,:-1]) + self.dimension_min_value
# nc_pop[i].Genes[:,-1] = ((nc_pop[i].Genes[:, -1] - self.dimension_min_value) *
# coeff[i,:,-1]) + self.dimension_min_value
nc_pop[i].Fitness = self.fitness(dataset=self.dataset, genes=nc_pop[i].Genes)
# return np.array([self.get_Chromosome() for i in range(nm)])
return nc_pop
def select(self, pop_list, need_pop_number):
n_pop_list = len(pop_list)
a = np.zeros((n_pop_list, 2), type(float))
a[:, 0] = np.random.choice(range(n_pop_list), size=n_pop_list, replace=False)
a[:, 1] = np.array([pop_list[a[i, 0]].Fitness for i in range(n_pop_list)])
max_fit_1 = np.amax(a[:, 1])
# min_fit_1 = np.amin(a[:, 1])
for i in range(n_pop_list):
if a[i, 1] == 0:
a[i, 1] = 0.0001
a[i, 1] = max_fit_1 / a[i, 1]
# max_fit_2 = np.amax(a[:, 1])
# min_fit_2 = np.amin(a[:, 1])
# a[:,1] = a[:,1] / max_fit_1
for i in range(n_pop_list - 1):
a[i+1, 1] = a[i+1, 1]+a[i, 1]
max_fit = np.amax(a[:, 1])
k = 10
res = np.zeros(need_pop_number, type(Chromosome))
sort_list = np.sort(pop_list)
res[0:k] = sort_list[0:k]
p = round(need_pop_number * 0.05)
if p < 1:
p = 1
print(p)
i = 0
while i < need_pop_number - k: # 2*k:
r = random.random() * (max_fit - 1) + 1
for j in range(n_pop_list):
if a[j, 1] >= r:
if np.count_nonzero(res[:i + k - 1] == pop_list[a[j, 0]]) < p:
res[i + k] = pop_list[a[j, 0]]
i += 1
# if np.count_nonzero(res[:i + 2*k-1]==pop_list[a[j, 0]]) > 0:
# print(np.count_nonzero(res[:i + 2*k-1]==pop_list[a[j, 0]]))
break
# if j == n_pop_list - 1:
# res[i] = pop_list[a[j, 0]]
return res
def refresh_plot(self):
self.ax.clear()
if self.dimension == 1:
self.ax.scatter([1 for i in range(len(self.C_data))], self.C_data, c='r', s=100, marker='_')
# self.ax.scatter([1 for i in range(len(self.X_data))], self.X_data, c='b', marker='x')
self.ax.scatter(self.X_data[:, 1], self.X_data[:, 0], c='b', marker='.')
self.ax.plot([i/self.MaxIt - 1 for i in range(len(self.F_data))], self.F_data, color='b', marker='.')
# self.ax.scatter([[0, 0, 0],[-1, -1, -1]], [[1, 2, 5],[4, 5, 6]], s=100, marker='*', c=[1,2,3,4,5,6])
elif self.dimension == 2:
self.ax.scatter(self.C_data[:, 0], self.C_data[:, 1], c='r', marker='v')
self.ax.scatter(self.X_data[:, 1], self.X_data[:, 0], c='b', marker='x')
self.ax.plot([i / self.MaxIt - 1 for i in range(len(self.F_data))], self.F_data, color='b', marker='.')
elif self.dimension == 3:
self.ax.scatter([self.C_data[:, 0]], [self.C_data[:, 1]], [self.C_data[:, 2]], c='r', marker='v')
self.ax.scatter([self.X_data[:, 0]], [self.X_data[:, 1]], [self.X_data[:, 2]], c='b', marker='x')
# a = [self.F_data[:,0]]
# b = [self.F_data[:,1]]
# c = [np.zeros(len(self.F_data))]
# self.ax.plot(np.zeros(len(self.F_data)), self.F_data[:,1],self.F_data[:,0], color='b', marker='.')
# self.ax.plot(np.zeros(len(self.F_data)), [i/self.MaxIt - 1 for i
# in range(len(self.F_data))], self.F_data, color='b', marker='.')
plt.pause(0.001)
if __name__ == '__main__':
start_time = time()
# dataset = [[random.randint(1,5), random.randint(15,25), random.randint(-10,-1)] for i in range(10)]
# dataset = [[random.randint(1,5)] for i in range(5)]
# dataset = [[1,1.2,1.5,1.4],[3.1,3.5,4.1],[5,5.2,7]]
# dataset = [[1],[1.2],[1.5],[1.8],[2], [3.1],[3.5],[3.9], [5],[5.2],[5.4],
# [1.021],[1.58],[1.51],[10],[10.8],[8.6],[7.9]]
dataset = [[1], [1.2], [1.5], [1.8], [2], [3.1], [3.5], [3.9], [5], [5.2], [5.4],
[1.021], [1.58], [1.51], [10], [10.8], [8.6], [7.7], [1], [1.32], [1.7],
[8.8], [9], [3.1], [5.5], [3.9], [5.4], [5.22], [5.4], [1.021], [1.58],
[1.51], [1.8], [8.1], [7.5]]#, [16.0]
print(dataset)
ga = Ga(fitness=fitness)
ga.fit(dataset)
print([item.Fitness for item in ga.best_fit])
# for i in range(len(ga.best_fit)):
# print(ga.best_fit[i].Genes)
print(ga.best_fit[-1].Genes)
print(ga.best_fit[-1].Fitness)
M = np.array([g[:-1] for g in ga.best_fit[-1].Genes if g[-1] >= 0.5]) # cluster centers
k = len(M) # cluster count
d = cdist(np.array(dataset), M) # distance of all data to all cluster centers
data_cluster_indices = np.argmin(d, axis=1) # all data clusters , center indices
# calculate all cluster members
ci_data_member = []
for i in range(k):
ci_data_member.append([dataset[j] for j in range(len(dataset)) if data_cluster_indices[j] == i])
print([len(item) for item in ci_data_member])
# ga.refresh_plot()
# ga.fig.show()
# sleep(10)
print('run time:{}'.format(time()-start_time))
plt.show()
| [
"matplotlib"
] |
df5a12c94b7360dfbae6175e2f6e8ebc9bc3d71c | Python | makatx/MobileNetV2_MIA | /utils.py | UTF-8 | 5,599 | 2.78125 | 3 | [] | no_license | import openslide
import numpy as np
from PIL import Image
import xml.etree.cElementTree as ET
import cv2
import matplotlib.pyplot as plt
import os
class Annotation:
scaleFactor = 1
coords_orig = []
coords_order = []
coords_list = []
bounds = []
bounds_orig = []
def __init__(self, filename, scaleFactor=1):
self.scaleFactor = scaleFactor
with open(filename, 'rb') as f:
self.root = ET.parse(f)
self.coords_orig = []
self.coords_order = []
self.group = []
self.type = []
for annot in self.root.iter('Annotation'):
coords_tag = annot.find('Coordinates')
lst = []
for coord in coords_tag.findall('Coordinate'):
lst.append([float(coord.attrib['Order']), float(coord.attrib['X']), float(coord.attrib['Y'])])
n = np.array(lst)
n = n[n[:,0].argsort()]
self.coords_orig.append(n[:,1:])
self.coords_order.append(n)
self.group.append(annot.attrib['PartOfGroup'])
self.type.append(annot.attrib['Type'])
self.coords_list = self.scale(factor=scaleFactor)
self.calcBounds()
def scale(self, coords=None, factor=1):
if coords == None: coords = self.coords_orig
coords_scaled = []
for n in range(len(coords)):
coords_scaled.append((coords[n] / factor).astype(np.int));
return coords_scaled
def shift(self, coords=None, origin=(0,0)):
if coords == None: coords = self.coords_orig
shifted = []
origin = np.array(origin)
for n in coords:
shifted.append(n - origin)
return shifted
def calcBounds(self):
bounds = []
for n in self.coords_list:
xmin = n[:,0].min()
ymin = n[:,1].min()
xmax = n[:,0].max()
ymax = n[:,1].max()
bounds.append(np.array([xmin,ymin,xmax,ymax]))
self.bounds = np.array(bounds)
bounds = []
for n in self.coords_orig:
xmin = n[:,0].min()
ymin = n[:,1].min()
xmax = n[:,0].max()
ymax = n[:,1].max()
bounds.append(np.array([xmin,ymin,xmax,ymax]))
self.bounds_orig = np.array(bounds)
def getWSI(filename):
'''
Returns image for desired level from given OpenSlide WSI format image filename
'''
slide = openslide.OpenSlide(filename)
return slide
def getRegionFromSlide(slide, level=8, start_coord=(0,0), dims='full', from_level=8):
if dims == 'full':
img = np.array(slide.read_region((0,0), level, slide.level_dimensions[level]))
img = img[:,:,:3]
else:
img = np.array(slide.read_region(start_coord, level, dims ))
img = img[:,:,:3]
return img
def getGTmask(img_filename, annotn_filename, level, coords, dims):
slide = getWSI(img_filename)
ann = Annotation(annotn_filename)
c_shifted = ann.shift(origin=coords)
c_scaled = ann.scale(c_shifted, slide.level_downsamples[level])
mask = cv2.fillPoly(np.zeros((dims[0],dims[1],1)), c_scaled, (1))
return mask
def getLabel(filename, level, coords, dims):
'''
Check if the annotation file with same name (extension .xml) exists: if not, return all zero mask of shape (dims,1)
else, get the annotation file, shift its coordinates by coords and scale using level in slide downsample,
followed by polyFill operation on a all zero mask of dimension (dims,1) with 1 and return it
'''
annotn_filename, _ = os.path.splitext(filename)
annotn_filename = annotn_filename + '.xml'
if os.path.exists(annotn_filename):
detection = np.any(getGTmask(filename, annotn_filename, level, coords, dims))
label = np.array( [float(not detection), float(detection)] )
#print('label: {}'.format(label))
return label
else:
#print('{} does not exist'.format(annotn_filename))
return np.array([1.0, 0.0])
def getTileList(slide, level=2, tile_size=256):
'''
Returns a list of coordinates for starting point of tile of given tile_size square for the given slide at given level
converted/scaled to full dimension of slide
'''
dims = slide.level_dimensions[level]
tile_list = []
width = dims[0]-tile_size
height = dims[1]-tile_size
for y in range(0, height, tile_size):
for x in range(0, width, tile_size):
tile_list.append([x,y])
tile_list.append([width,y])
for x in range(0, width, tile_size):
tile_list.append([x, height])
tile_list.append([width, height])
tile_list = (np.array(tile_list) * slide.level_downsamples[level]).astype(np.int32)
return tile_list
def patch_batch_generator(slide, tile_list, batch_size, level=2, dims=(256,256)):
'''
Generating image batches from given slide and level using coordinates in tile_list
images are normalized: (x-128)/128 before being returned
'''
images = []
b = 0
for coord in tile_list:
if b==batch_size:
b=0
images_batch = np.array(images)
images = []
yield images_batch
images.append(((getRegionFromSlide(slide, level, coord, dims=dims).astype(np.float))-128)/128)
b +=1
images_batch = np.array(images)
yield images_batch
'''
Test code:
mask = getLabel( 'patient_015/patient_015_node_2.tif', 2, [ 67700, 101508], (512,512))
print('Mask sum: {} ; shape: {}'.format(np.sum(mask), mask.shape))
plt.imshow(np.reshape(mask, (512,512)))
'''
| [
"matplotlib"
] |
dd6ddc890920d3591a9982acde8eda2223d38ad0 | Python | IJSComplexMatter/cddm | /examples/two_component/dual_video_simulator.py | UTF-8 | 2,304 | 2.734375 | 3 | [
"MIT"
] | permissive | """
Builds sample dual-camera video of two-component system with two different
particles for two-exponent data fitting examples.
"""
from cddm.sim import simple_brownian_video, create_random_times1, adc
from cddm.viewer import VideoViewer
from cddm.video import multiply, load, crop, add
import matplotlib.pyplot as plt
import numpy as np
# uppercase values
from examples.two_component.conf import NFRAMES, N_PARAMETER, SIMSHAPE, BACKGROUND, DELTA1,DELTA2, \
INTENSITY1, INTENSITY2, SIGMA1,SIGMA2, SHAPE, DUST1_PATH, DUST2_PATH, SATURATION, ADC_BIT_DEPTH, NOISE_MODEL, READOUT_NOISE
#random time according to Eq.7 from the SoftMatter paper
t1, t2 = create_random_times1(NFRAMES,n = N_PARAMETER)
#: this creates a brownian motion frame iterator.
#: each element of the iterator is a tuple holding a single numpy array (frame)
video1 = simple_brownian_video(t1,t2, shape = SIMSHAPE,background = BACKGROUND,num_particles = 10,
sigma = SIGMA1, delta = DELTA1, intensity = INTENSITY1, dtype = "uint16")
video2 = simple_brownian_video(t1,t2, shape = SIMSHAPE,background = 0 ,num_particles = 100,
sigma = SIGMA2, delta = DELTA2, intensity = INTENSITY2, dtype = "uint16")
video = add(video1,video2)
#video = ((np.ones(SIMSHAPE,dtype = "uint16")*2**8,)*2 for i in range(NFRAMES))
#: crop video to selected region of interest
video = crop(video, roi = ((0,SHAPE[0]), (0,SHAPE[1])))
#: apply dust particles
dust1 = plt.imread(DUST1_PATH)[...,0] #float normalized to (0,1)
dust2 = plt.imread(DUST2_PATH)[...,0]
dust = ((dust1,dust2),)*NFRAMES
video = multiply(video, dust)
video = (tuple((adc(f, noise_model = NOISE_MODEL, saturation = SATURATION, readout_noise = READOUT_NOISE, bit_depth = ADC_BIT_DEPTH) for f in frames)) for frames in video)
if __name__ == "__main__":
#: no need to load video, but this way we load video into memory, and we
#: can scroll back and forth with the viewer. Uncomment the line below
#video = load(video, NFRAMES) # loads and displays progress bar
#: camera 1
viewer1 = VideoViewer(video, count = NFRAMES, id = 0, vmin = 0, cmap = "gray")
viewer1.show()
#: camera 2
viewer2 = VideoViewer(video, count = NFRAMES, id = 1, vmin = 0, cmap = "gray")
viewer2.show() | [
"matplotlib"
] |
b1f7e32f27429ececf091c570f93014a8c3efe8a | Python | Julian-OU/APMCM-2020-A | /code/a21.py | UTF-8 | 11,989 | 2.9375 | 3 | [
"Unlicense"
] | permissive | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import math
import time
plt.axis("equal")
d = -0.1
inputpath = "./code/graph2.csv"
outputpath = "./code"
length = 0 # 画线总长
times = 0
dots = 0
num = 0
'''
第一部分
'''
def _min(parentre, cor, data):
return np.min(data[parentre][:, cor])
def _max(parentre, cor, data):
return np.max(data[parentre][:, cor])
def range_judge(i, j, data):
if _max(i, 0, data) > _max(j, 0, data) and _min(i, 0, data) < _min(j, 0, data) and _max(i, 1, data) > _max(j, 1, data) and _min(i, 1, data) < _min(j, 1, data): # i和j比较,如果是包含关系,就返回小的那一个,如果是不是包含关系,就返回0
return j
elif _max(i, 0, data) < _max(j, 0, data) and _min(i, 0, data) > _min(j, 0, data) and _max(i, 1, data) < _max(j, 1, data) and _min(i, 1, data) > _min(j, 1, data):
return i
else:
return -2
def findparent(data):
# parent[本名][0(父级),1(层数)]
parent = list([])
for i in range(len(data)):
parent.append([-1, 1])
for i in range(0, len(data)): # i,j都是父级名字 ,然后开始找父级
for j in range(i+1, len(data)):
if range_judge(i, j, data) != -2:
small_name = range_judge(i, j, data)
big_name = (i if j == small_name else j)
parent[small_name][1] += 1
if range_judge(big_name, parent[small_name][0], data) == big_name or parent[small_name][0] == -1:
parent[small_name][0] = big_name # 自己的层数+1
else:
continue
return(parent)
'''
第二部分
'''
def unit(v): # 单位化
return(v/np.linalg.norm(v))
def inangle(v1, v2): # 向量夹角
return(math.acos(np.dot(v1, np.transpose(v2)) / (np.linalg.norm(v1)*np.linalg.norm(v2))))
def cross(v1, v2): # 叉乘
return(v1[0]*v2[1]-v2[0]*v1[1])
def ifcross(p1, p2, q1, q2): # 判断有无交叉
v11 = q1-p1
v12 = q2-p1
v21 = q1-p2
v22 = q2-p2
if cross(v11, v12)*cross(v21, v22) < 0 and cross(v11, v21)*cross(v12, v22) < 0:
return(1)
else:
return(0)
def drawborder(data): # 内缩一次
data = np.insert(data, data.shape[0], values=data[1, :], axis=0)
data = np.insert(data, 0, values=data[data.shape[0]-3, :], axis=0)
temp = np.array([0, 0])
i = 0
while i < data.shape[0]-2:
v1 = data[i+1, :]-data[i, :]
v2 = data[i+2, :]-data[i+1, :]
u = d/(math.sin(inangle(v1, v2)))
if cross(v1, v2) > 0:
new = data[i+1, :]+(unit(v2)-unit(v1))*u
else:
new = data[i+1, :]-(unit(v2)-unit(v1))*u
temp = np.row_stack((temp, new))
i += 1
temp = np.delete(temp, 0, axis=0)
i = 0
while i < temp.shape[0]-3:
j = i
while j < temp.shape[0]-1:
if ifcross(temp[i, :], temp[i+1, :], temp[j, :], temp[j+1, :]):
temp = np.row_stack((temp[0:i, :], temp[j+1:, :]))
continue
else:
j += 1
i += 1
return(temp)
def getint(data): # 按精度离散化
temp = new = np.array([0, 0.])
for i in range(data.shape[0]-1):
x1 = data[i, 0]
y1 = data[i, 1]
x2 = data[i+1, 0]
y2 = data[i+1, 1]
if x1 == x2:
k = math.inf
else:
k = (y2-y1)/(x2-x1) # 差分法
if y1//abs(d) < y2//abs(d):
for j in range(1, math.floor(y2//abs(d)-y1//abs(d)+1)):
new[1] = round((y1//abs(d)+j)*abs(d), 1)
new[0] = (new[1]-y1)/k+x1
temp = np.row_stack((temp, new))
else:
if y1//abs(d) > y2//abs(d):
for j in range(0, math.floor(y1//abs(d)-y2//abs(d))):
new[1] = round((y1//abs(d)-j)*abs(d), 1)
new[0] = (new[1]-y1)/k+x1
temp = np.row_stack((temp, new))
temp = np.delete(temp, 0, axis=0)
#plt.plot(temp[:, 0], temp[:, 1], '-o', color='g', markersize=2)
return(temp)
def findmax(data): # 搜索极大值
index = np.array([], dtype="int64")
for i in range(-1, data.shape[0]-1):
if data[i, 1] > data[i-1, 1] and data[i, 1] >= data[i+1, 1]:
index = np.append(index, [i], axis=0)
return(index)
def findmin(data): # 搜索极小值
index = np.array([], dtype="int64")
for i in range(-1, data.shape[0]-1):
if data[i, 1] <= data[i-1, 1] and data[i, 1] < data[i+1, 1]:
index = np.append(index, [i], axis=0)
return(index)
def findex(data): # 获取极值序号
index = list([])
for i in range(len(data)):
index.append(np.array([findmax(data[i]), findmin(data[i])]))
return(index)
def findm(data, index): # 获取最大值序号
temp = list([])
for i in range(len(index)):
if index[i].shape[1] == 1:
temp.append(np.array([index[i][0, 0], index[i][1, 0]]))
continue
maxy = np.max(data[i][:, 1])
miny = np.min(data[i][:, 1])
m = [[], []]
for j in range(index[i].shape[1]):
if data[i][index[i][0, j], 1] == maxy:
m[0] = index[i][0, j]
for j in range(index[i].shape[1]):
if data[i][index[i][1, j], 1] == miny:
m[1] = index[i][0, j]
temp.append(np.array(m))
return(temp)
def divideout(data_out, data_in, divide_in): # 获取外层分割点
ym = np.array([data_in[divide_in[0], 1],
data_in[divide_in[1], 1]])
divide_out = np.array([], dtype='int16')
for i in [0, 1]:
for j in range(data_out.shape[0]):
if data_out[j, 1] == ym[i] and data_out[j, 0] > data_in[divide_in[0], 0]:
divide_out = np.append(divide_out, [j], axis=0)
break
return(divide_out)
def stackline(data_out, data_in, divide_out, divide_in): # 复连通区域分割连接
temp1 = np.row_stack(
(data_out[:divide_out[0]+1], data_in[divide_in[0]:divide_in[1]+1], data_out[divide_out[1]:]))
temp2 = np.row_stack(
(data_in[:divide_in[0]], data_out[divide_out[0]+1:divide_out[1]], data_in[divide_in[1]+1:]))
return(list([temp1, temp2]))
def divide1(data, index, parent): # 对复连通区域进行划分
temp = list([])
for i in range(1, (max(parent[:, 1]+1))//2+1): # 划分 i 层
for j in range(parent.shape[0]): # 搜索 i 层的外边界
if parent[j, 1] == 2*i-1:
data_out = data[j]
for k in range(parent.shape[0]): # 搜索 j 作为外边界的对应内边界
if parent[k, 0] == j:
data_in = data[k]
divide_in = index[k] # 内层分割点
divide_out = divideout(data_out, # 外层分割点
data_in, divide_in)
line = stackline(data_out, data_in, # 交叉连接分割点
divide_out, divide_in)
data_out = line[0] # 更新外层
temp.append(line[1]) # 写入内层
temp.append(data_out) # 写入外层
return(temp)
def divideline(data, index): # 获取单连通区域分割点
line = np.array([0, 0])
for n in [0, 1]:
for i in index[n]:
judge = 0
j = i-2
while j > -0.02*data.shape[0]:
if data[j, 1] == data[i, 1]:
judge += 1
break
j -= 1
if judge == 0:
continue
k = i+2
while k < 0.98*data.shape[0]:
if data[k, 1] == data[i, 1]:
judge += 1
break
k += 1
if judge == 1:
continue
elif n == 0:
line = np.row_stack((line, [j, i]))
else:
line = np.row_stack((line, [i, k]))
line = np.delete(line, 0, axis=0)
return(line)
def dividesub(data, line): # 复连通区域分割连接
temp = list([])
while line.shape[0]:
judge = 0
for i in range(1, line.shape[0]):
if line[0, 0] < line[i, 0] < line[0, 1]:
line = np.row_stack((line[i, :], line))
line = np.delete(line, i+1, axis=0)
judge = 1
break
if judge == 0:
temp.append(np.array(data[line[0, 0]+1:line[0, 1], :]))
for j in range(line[0, 0]+1, line[0, 1]):
data[j] = [0, 0]
line = np.delete(line, 0, axis=0)
temp.append(np.array(data[:, :]))
for i in range(len(temp)):
j = 0
while j < temp[i].shape[0]:
if temp[i][j, 0] == temp[i][j, 1] == 0:
temp[i] = np.delete(temp[i], j, axis=0)
continue
j += 1
return(temp)
def divide2(data, index): # 对单连通区域进行划分
temp = list([])
for i in range(len(data)):
if index[i].shape[1] > 1:
line = divideline(data[i], index[i])
temp += dividesub(data[i], line)
else:
temp += list([data[i]])
return(temp)
'''
第三部分
'''
def writecsv(data): # 导出线条
global times
dataframe = pd.DataFrame(data={'x': data[:, 0], 'y': data[:, 1]})
dataframe.to_csv(outputpath+f"/zigzag{times}.csv",
index=False, mode='w', sep=',')
pass
def readcsv(path): # 读取线条
data = list([])
data0 = pd.read_csv(
path, index_col=False, header=2)
j = 0
if data0.dtypes.X != "float64":
for i in range(len(data0.values)):
if "MainCurve" in data0.values[i, 0]:
data += list([np.array(data0.values[j:i, :], dtype='float64')])
j = i+2
data += list([np.array(data0.values[j:len(data0.values), :], dtype='float64')])
for i in range(len(data)):
plt.plot(data[i][:, 0], data[i][:, 1], '-o', color='b', markersize=1)
return(data)
def drawline(data): # 画平行线
global length
global times
global dots
global num
for i in range(len(data)):
line = np.array([0, 0])
area = data[i]
maxy = round(max(area[:, 1]), 1)
miny = round(min(area[:, 1]), 1)
j = miny
while j <= maxy:
index = (np.where(area == j))[0]
temp = area[index, 0]
if round(j/abs(d)+1) % 2:
line = np.row_stack((line, [j, min(temp)]))
line = np.row_stack((line, [j, max(temp)]))
else:
line = np.row_stack((line, [j, max(temp)]))
line = np.row_stack((line, [j, min(temp)]))
j = round(j + abs(d), 1)
line = np.delete(line, 0, axis=0)
line = np.column_stack((line[:, 1], line[:, 0]))
times += 1
writecsv(line)
plt.plot(line[:, 0], line[:, 1], '-', color='r')
num = num+int(line.shape[0]/2)
for j in range(line.shape[0]-1):
length = length + \
math.sqrt((line[j+1, 0]-line[j, 0])**2 +
(line[j+1, 1]-line[j, 1])**2)
dots += 1
i += 1
pass
'''
主函数
'''
start = time.thread_time()
data = readcsv(inputpath)
for i in range(len(data)):
data[i] = drawborder(data[i])
data[i] = getint(data[i])
parent = np.array(findparent(data))
index = findex(data)
index = findm(data, index)
data = divide1(data, index, parent)
index = findex(data)
data = divide2(data, index)
drawline(data)
end = time.thread_time()
print('Length of curve: %s mm' % length)
print('Number of parallel line: %s' % num)
print('Number of dots: %s' % dots)
print('Running time: %s Seconds' % (end-start))
plt.show()
| [
"matplotlib"
] |
4c6611e314cec3766c9fb538f0ffceb95713699d | Python | kushagrapatidar/Tasks-Introduction-to-Data-Science | /Task_10/Task_10.b.py | UTF-8 | 486 | 2.921875 | 3 | [] | no_license | import pandas as pd
import matplotlib.pyplot as plt
happiness_ds = pd.read_csv('worldhappiness2015.csv')
happiness_ds = pd.DataFrame(happiness_ds)
mean_happiness = {}
regions = happiness_ds['Region'].unique()
# print(regions)
for r in regions:
region_group = happiness_ds[happiness_ds['Region']==r]
region_group_mean = region_group['Happiness Score'].mean()
mean_happiness[r] = region_group_mean
print('\n')
[print(r,':',mean_happiness[r]) for r in regions]
print('\n') | [
"matplotlib"
] |
c3c553bdae1733d619d3a10586ba5cdca7a1907a | Python | kparth98/object_detection_assignment | /code/track_utils.py | UTF-8 | 10,599 | 2.546875 | 3 | [] | no_license | import numpy as np
import argparse
import cv2
from pykalman import KalmanFilter
#from collections import deque
#import matplotlib.pyplot as plt
img = None
orig = None
bbox = None
roi2, roi2_init = None,None
kernel = np.ones((3,3))
ix,iy=0,0
draw = False
rad_thresh = 50
def getArguements():
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video",
help="path to the video file")
ap.add_argument("-bg", "--background",
help="path to the extracted background")
ap.add_argument("-l", "--limits",
help="path to the rgb threshold limits file")
ap.add_argument("-vis", "--visualise",
help="view the frames and the ball detection",action='store_true')
ap.add_argument("-s", "--save",
help="save data",action='store_true')
ap.add_argument("-o", "--output",
help="name of output video file")
args = vars(ap.parse_args())
return args
def resize(img,width=400.0):
r = float(width) / img.shape[0]
dim = (int(img.shape[1] * r), int(width))
img = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)
return img
def selectROI(event, x, y, flag, param):
'''
Callback function which returns the coordinates of selected box
'''
global img, draw, orig, bbox, ix,iy
if event == cv2.EVENT_LBUTTONDOWN:
ix = x
iy = y
draw = True
elif event == cv2.EVENT_MOUSEMOVE:
if draw:
img = cv2.rectangle(orig.copy(), (ix, iy), (x, y), (255, 0, 0), 2)
elif event == cv2.EVENT_LBUTTONUP:
if draw:
x1 = max(x, ix)
y1 = max(y, iy)
ix = min(x, ix)
iy = min(y, iy)
bbox = np.array([[ix,iy],[x1,y1]])
draw = False
def getROIvid(frame,mask, winName = 'input'):
'''
Function to get region of interest from user via mouse-clicks
input:
frame - image from which ROI is to be obtained
mask - foreground mask
winName - title of the window, required by opencv
click and drag using mouse to select the ROI
output:
roi - selected region of interest from frame
roi_mask - selected region of interest from mask
'''
global img, orig, bbox
bbox=None
img = frame.copy()
orig = frame.copy()
cv2.namedWindow(winName)
cv2.setMouseCallback(winName, selectROI)
while True:
cv2.imshow(winName, img)
if bbox is not None:
cv2.destroyWindow(winName)
roi = orig[bbox[0,1]:bbox[1,1],bbox[0,0]:bbox[1,0],:]
roi_mask = mask[bbox[0,1]:bbox[1,1],bbox[0,0]:bbox[1,0]]
return roi,roi_mask
k = cv2.waitKey(1) & 0xFF
if k == ord('q'):
cv2.destroyWindow(winName)
break
return None, None
def getLimits_HSV(roi,roi_mask):
'''
Function to obtain limits for HSV thresholding
input:
roi - region of interest
roi_mask - binary mask of which points to consider
output:
limits - threshold in tuple format: (upper, lower)
'''
roi = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
h, s, v = cv2.split(roi)
limits = [(int(np.amax(h[roi_mask>0])), int(np.amax(s[roi_mask>0])), int(np.amax(v[roi_mask>0]))),
(int(np.amin(h[h>0])), int(np.amin(s[roi_mask>0])), int(np.amin(v[roi_mask>0])))]
return limits
def getLimits_RGB(roi,roi_mask):
'''
Function to obtain limits for RGB thresholding
input:
roi - region of interest
roi_mask - binary mask of which points to consider
output:
limits - threshold in tuple format: (upper, lower)
'''
b,g,r = cv2.split(roi)
b_mean = np.median(b[roi_mask>0])
g_mean = np.median(g[roi_mask>0])
r_mean = np.median(r[roi_mask>0])
b_std = 1.5*np.std(b[roi_mask>0])
g_std = 1.5*np.std(g[roi_mask>0])
r_std = 1.5*np.std(r[roi_mask>0])
limits = [(min(int(b_mean+b_std),255), min(int(g_mean+g_std),255), min(int(r_mean+r_std),255)),
(int(b_mean-b_std), int(g_mean-g_std), int(r_mean-r_std))]
return limits
def applyMorphTransforms(mask):
global kernel
mask = cv2.dilate(mask, kernel)
mask = cv2.erode(mask, kernel)
return mask
def detectBallThresh_HSV(frame,limits):
'''
Function to detect ball by HSV-color thresholding
input:
frame - image in which the ball is to be detected
limits - color range in tuple (upper,lower) format where
upper = [h_max,s_max,v_max] and lower = [h_min,s_min,v_min]
rad_thresh - threshold on size of the ball detected
output:
center - center of the ball
radius - radius of the ball
'''
global rad_thresh
upper = limits[0]
lower = limits[1]
center = None
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, lower, upper)
mask = applyMorphTransforms(mask)
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
flag = False
i=0
if len(cnts) > 0:
for i in range(len(cnts)):
(center, radius) = cv2.minEnclosingCircle(cnts[i])
if radius < rad_thresh and radius > 5:
flag = True
break
if not flag:
return None, None
center = np.uint32(center)
# M = cv2.moments(cnts[i])
# center = (int(M["m10"] / M["m00"]), int(M["m01"] / M["m00"]))
return (center[0],center[1]), cnts[i]
else:
return None, None
def detectBallThresh_RGB(frame,limits, curr_estimate, rad_thresh=[35,15], dist_thresh=40):
'''
Function to detect ball by RGB-color thresholding
input:
frame - image in which the ball is to be detected
limits - color range in tuple (upper,lower) format where
upper = [b_max,g_max,r_max] and lower = [b_min,g_min,r_min]
rad_thresh - threshold on size of the ball detected
output:
center - center of the ball
radius - radius of the ball
'''
# global rad_thresh
upper = limits[0]
lower = limits[1]
mask = cv2.inRange(frame, lower, upper)
mask = applyMorphTransforms(mask)
# find call contours and sort in decreasing order of area
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
flag = False
# largest contour whose minimum enclosing circle has radius within fixed range
if len(cnts) > 0:
for i in range(len(cnts)):
hull = cv2.convexHull(cnts[i])
(center, radius) = cv2.minEnclosingCircle(hull)
if curr_estimate is None:
if radius < rad_thresh[0] and radius > rad_thresh[1]:
flag = True
break
else:
if np.linalg.norm(np.double(center)-np.double(curr_estimate)) < dist_thresh:
if radius < rad_thresh[0] and radius > rad_thresh[1]:
flag = True
break
if flag:
center = np.uint32(center)
return (center[0],center[1]), radius
return None, None
def detectBallHB(frame, roiHist):
'''
Function to detect ball using Histogram Backprojection
input:
frame - image in which the ball is to be detected
roiHist - histogram of the colors of the ball
output:
center - center of the ball
radius - radius of the ball
'''
global rad_thresh,kernel
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
backProj = cv2.calcBackProject([hsv], [0, 1], roiHist, [0, 180, 0, 256], 1)
mask = cv2.inRange(backProj, 50, 255)
mask = cv2.dilate(mask,np.ones((3,3)))
mask = cv2.erode(mask, np.ones((3,3)))
# find the biggest contour with radius with certain range
cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)[-2]
cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
i = 0
flag = False
if len(cnts) > 0:
for i in range(len(cnts)):
hull = cv2.convexHull(cnts[i])
(center, radius) = cv2.minEnclosingCircle(hull)
if radius < rad_thresh and radius>10:
flag=True
break
if flag:
center = np.uint32(center)
return (center[0],center[1]), cnts[i]
return None, None
def removeBG(frame, fgbg):
'''
Function to obtain foreground image and mask
input
frame - current frame in the video
fgbg - object of background subractor class
output
frame_fg - masked frame
fg_mask - foreground mask
'''
fg_mask = fgbg.apply(frame)
fg_mask = cv2.dilate(fg_mask, np.ones((3, 3)))
fg_mask = cv2.erode(fg_mask, np.ones((5, 5)))
frame_fg = cv2.bitwise_and(frame, frame, mask=fg_mask)
return frame_fg, fg_mask
def getKF(init_mean,init_cov=10*np.eye(4)):
# state = [x, y, v_x, v_y]
tr_mtx = np.array([[1, 0, 1, 0],
[0, 1, 0, 1],
[0, 0, 1, 0],
[0, 0, 0, 1]])
a = 0.0075
tr_offset = np.array([0 , a/2 , 0, a])
kf = KalmanFilter(n_dim_state=4,
n_dim_obs=4,
transition_matrices=tr_mtx,
transition_offsets=tr_offset,
observation_matrices=tr_mtx,
observation_offsets=tr_offset,
initial_state_mean=init_mean,
initial_state_covariance=init_cov,
transition_covariance=0.1*init_cov,
observation_covariance=0.5*init_cov)
return kf
def updateKF(kf,mean,cov, new_meas=None):
if new_meas is None:
return kf.filter_update(mean,cov)
else:
return kf.filter_update(mean,cov,new_meas)
def checkInFrame(center,frame_size):
if center[0]<0 or center[1]<0:
return False
elif center[0]>frame_size[1] or center[1]>frame_size[0]:
return False
return True | [
"matplotlib"
] |
682fdfe02a97d07996632e26e60e56e39731a423 | Python | Kitware/vtk-examples | /src/Python/VisualizationAlgorithms/ImageGradient.py | UTF-8 | 4,105 | 2.59375 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
# noinspection PyUnresolvedReferences
import vtkmodules.vtkInteractionStyle
# noinspection PyUnresolvedReferences
import vtkmodules.vtkRenderingOpenGL2
from vtkmodules.vtkCommonColor import vtkNamedColors
from vtkmodules.vtkIOImage import vtkMetaImageReader
from vtkmodules.vtkImagingColor import vtkImageHSVToRGB
from vtkmodules.vtkImagingCore import (
vtkImageCast,
vtkImageConstantPad,
vtkImageExtractComponents,
vtkImageMagnify
)
from vtkmodules.vtkImagingGeneral import (
vtkImageEuclideanToPolar,
vtkImageGaussianSmooth,
vtkImageGradient
)
from vtkmodules.vtkInteractionImage import vtkImageViewer
from vtkmodules.vtkRenderingCore import (
vtkRenderWindowInteractor
)
def main():
fileName = get_program_parameters()
colors = vtkNamedColors()
# Read the CT data of the human head.
reader = vtkMetaImageReader()
reader.SetFileName(fileName)
reader.Update()
cast = vtkImageCast()
cast.SetInputConnection(reader.GetOutputPort())
cast.SetOutputScalarTypeToFloat()
# Magnify the image.
magnify = vtkImageMagnify()
magnify.SetInputConnection(cast.GetOutputPort())
magnify.SetMagnificationFactors(2, 2, 1)
magnify.InterpolateOn()
# Smooth the data.
# Remove high frequency artifacts due to linear interpolation.
smooth = vtkImageGaussianSmooth()
smooth.SetInputConnection(magnify.GetOutputPort())
smooth.SetDimensionality(2)
smooth.SetStandardDeviations(1.5, 1.5, 0.0)
smooth.SetRadiusFactors(2.01, 2.01, 0.0)
# Compute the 2D gradient.
gradient = vtkImageGradient()
gradient.SetInputConnection(smooth.GetOutputPort())
gradient.SetDimensionality(2)
# Convert the data to polar coordinates.
# The image magnitude is mapped into saturation value,
# whilst the gradient direction is mapped into hue value.
polar = vtkImageEuclideanToPolar()
polar.SetInputConnection(gradient.GetOutputPort())
polar.SetThetaMaximum(255.0)
# Add a third component to the data.
# This is needed since the gradient filter only generates two components,
# and we need three components to represent color.
pad = vtkImageConstantPad()
pad.SetInputConnection(polar.GetOutputPort())
pad.SetOutputNumberOfScalarComponents(3)
pad.SetConstant(200.0)
# At this point we have Hue, Value, Saturation.
# Permute components so saturation will be constant.
# Re-arrange components into HSV order.
permute = vtkImageExtractComponents()
permute.SetInputConnection(pad.GetOutputPort())
permute.SetComponents(0, 2, 1)
# Convert back into RGB values.
rgb = vtkImageHSVToRGB()
rgb.SetInputConnection(permute.GetOutputPort())
rgb.SetMaximum(255.0)
# Set up a viewer for the image.
# Note that vtkImageViewer and vtkImageViewer2 are convenience wrappers around
# vtkActor2D, vtkImageMapper, vtkRenderer, and vtkRenderWindow.
# So all that needs to be supplied is the interactor.
viewer = vtkImageViewer()
viewer.SetInputConnection(rgb.GetOutputPort())
viewer.SetZSlice(22)
viewer.SetColorWindow(255.0)
viewer.SetColorLevel(127.0)
viewer.GetRenderWindow().SetSize(512, 512)
viewer.GetRenderer().SetBackground(colors.GetColor3d('Silver'))
viewer.GetRenderWindow().SetWindowName('ImageGradient')
# Create the RenderWindowInteractor.
iren = vtkRenderWindowInteractor()
viewer.SetupInteractor(iren)
viewer.Render()
iren.Initialize()
iren.Start()
def get_program_parameters():
import argparse
description = 'ImageGradient.'
epilogue = '''
Visualization of gradient information.
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('fileName',
help='The file FullHead.mhd. Note: file FullHead.raw.gz must also be present in the same folder.')
args = parser.parse_args()
return args.fileName
if __name__ == '__main__':
main()
| [
"vtk"
] |
461b14c7e53ee7efe651fdac2642f9dfa5b24f31 | Python | doddgray/experiment_control | /experiment_control/shg_wg_probing/HPLightWave.py | UTF-8 | 16,461 | 2.609375 | 3 | [
"MIT"
] | permissive | import sys
sys.path.insert(0,'../..')
import pyvisa as visa
import time
# from Interfaces.Laser import TunableLaser
# from Interfaces.Instrument import Instrument
# from Interfaces.PowMeter import PowerMeter
import matplotlib.pyplot as plt
import math
# The HP Lightwave is particular because it is both a laser and a
# power meter.
HP_ADDR = "GPIB1::20::INSTR"
DEFAULT_INTEGRATION_TIME = 0.05
SWEEP_DWELL_TIME = 0.4 # Time to sleep at each wavelength when we do a
# tx curve by setting each wavelength at a time (in s)
class HPLightWave: #(Instrument, TunableLaser, PowerMeter):
def __init__(self, tap_channel, rec_channel, use_as_laser=True, integration_time=DEFAULT_INTEGRATION_TIME,
sweep_dwell_time=SWEEP_DWELL_TIME):
# super().__init__()
self.lwmain = None
self.tap_channel = tap_channel # Power meter channel measuring the tap power
self.rec_channel = rec_channel # Power meter channel measuring the through power
self.int_time = integration_time
self.is_laser = use_as_laser
self.sweep_dwell_time = sweep_dwell_time
def initialize(self):
"""
Initializes the instrument
:return:
"""
print('Opening connnection to HP laser and power meter')
rm = visa.ResourceManager()
self.lwmain = rm.open_resource(HP_ADDR, timeout=20000)
self.initialize_sensors()
def get_id(self):
return(["Laser", "TunableLaser", "PowerMeter"])
def close(self):
"""
Closes the instrument
:return:
"""
print('Closing connnection to HP laser')
self.lwmain.write("INIT%d:CHAN1:CONT 1" % self.tap_channel)
self.lwmain.write("INIT%d:CHAN1:CONT 1" % self.rec_channel)
self.lwmain.close()
def turn_off(self):
"""
Turn light off
:return:
"""
if self.is_laser:
print('Turning off HP laser')
self.lwmain.write(":POW:STAT 0")
def turn_on(self):
"""
Turn light on
:return:
"""
if self.is_laser:
print('Turning on HP laser')
self.lwmain.write(":POW:STAT 1")
def set_power(self, power):
"""
Set the power to the specified value (in mW)
:return:
"""
if self.is_laser:
print('Setting HP laser power to %.4f mW' % power)
self.lwmain.write("POW %.7EMW" % power)
time.sleep(0.01)
def set_wavelength(self, wavelength):
"""
Set the wavelength to the specified value (in nm)
:return:
"""
print('Setting HP laser wavelength to %.4f nm' % wavelength)
self.lwmain.write("WAV %.7ENM" % wavelength)
time.sleep(0.01)
self.lwmain.write("SENS%d:CHAN1:POW:WAV %.7ENM" %
(self.tap_channel, wavelength))
self.lwmain.write("SENS%d:CHAN1:POW:WAV %.7ENM" %
(self.rec_channel, wavelength))
def get_powers(self):
"""
Returns a list with the power measured in the tap port and in
the through port. These ports will be specified in the init of
the actual power_meter implementation.
First element is the tap power, second the through power
:return: A 2 element list with the power in W
"""
self.lwmain.write("INIT%d:IMM" % self.tap_channel)
self.lwmain.write("INIT%d:IMM" % self.rec_channel)
power_tap_string = self.lwmain.query("FETC%d:POW?" % self.tap_channel)
try:
power_tap = max(0.0, float(power_tap_string))
except ValueError:
power_tap = 0.0
received_power_string = self.lwmain.query("FETC%d:POW?" % self.rec_channel)
try:
received_power = max(0.0, float(received_power_string))
except ValueError:
received_power = 0.0
self.lwmain.write("*CLS")
return [power_tap, received_power]
def initialize_sensors(self):
"""
Initializes the power meters
:return:
"""
# Set integration time
self.lwmain.write("SENS%d:CHAN1:POW:ATIME %.3fS" % (self.tap_channel, self.int_time))
self.lwmain.write("SENS%d:CHAN1:POW:ATIME %.3fS" % (self.rec_channel, self.int_time))
# Set units to mW
self.lwmain.write("SENS%d:CHAN1:POW:UNIT 1" % self.tap_channel)
self.lwmain.write("SENS%d:CHAN1:POW:UNIT 1" % self.rec_channel)
# Automatic power range
self.lwmain.write("SENS%d:CHAN1:POW:RANG:AUTO 1" % self.tap_channel)
self.lwmain.write("SENS%d:CHAN1:POW:RANG:AUTO 1" % self.rec_channel)
# Do not measure continuously
self.lwmain.write("INIT%d:CHAN1:CONT 0" % self.tap_channel)
self.lwmain.write("INIT%d:CHAN1:CONT 0" % self.rec_channel)
def set_integration_time(self, channel, int_time):
self.int_time = int_time
self.lwmain.write("SENS%d:CHAN1:POW:ATIME %.3fS" % (channel, int_time))
def set_range(self, channel=None, power_range='AUTO'):
"""
Sets the power range of the power meter in the specified channel
:param channel: Channel of the power meter
:param power_range: The power range. If 'AUTO', it is set to AUTOMATIC. Else, it is the range in
dBm (from -70 to 10 dBm in steps of 10 dBm)
:return:
"""
if channel is None:
channel = self.rec_channel
if power_range == 'AUTO':
self.lwmain.write("SENS%d:POW:RANG:AUTO 1" % channel)
else:
if power_range <= -70:
power_range = -70
elif power_range <= -60:
power_range = -60
elif power_range <= -50:
power_range = -50
elif power_range <= -40:
power_range = -40
elif power_range <= -30:
power_range = -30
elif power_range <= -20:
power_range = -20
elif power_range <= -10:
power_range = -10
elif power_range <= 0:
power_range = 0
else:
power_range = 10
self.lwmain.write("SENS%d:POW:RANG:AUTO 0" % channel)
self.lwmain.write("SENS%d:POW:RANG %dDBM" % (channel, int(power_range)))
def start_sweep(self):
self.lwmain.write("WAV:SWE START")
def configure_sweep(self, init_wav, end_wav, num_wav, sweep_speed=None):
"""
Configures the laser to take a wavelength sweep
:param init_wav:
:param end_wav:
:param num_wav:
:return:
"""
if sweep_speed is None:
sweep_speed = self.__choose_sweep_speed__(end_wav, init_wav)
step_width = round ( (end_wav - init_wav) / num_wav, 4 ) # in nm
true_num_wavs = math.floor( (end_wav - init_wav)/step_width )
sweep_time = (end_wav-init_wav)/sweep_speed
# Configure laser sweep
self.lwmain.write("WAV:SWE:CYCL 1") # We only want one sweep
self.lwmain.write("WAV:SWE:MODE CONT") # Set to use continuous sweep (not stepped)
self.lwmain.write("TRIG0:OUTP STF") # One output trigger per step
self.lwmain.write("WAV:SWE:SPE %.7ENM/S" % sweep_speed) # sweep speed in nm/s
self.lwmain.write("WAV:SWE:STEP %.7ENM" % step_width)
self.lwmain.write("WAV:SWE:STAR %.7ENM" % init_wav) # Start wavelength
self.lwmain.write("WAV:SWE:STOP %.7ENM" % end_wav) # Stop wavelength
time.sleep(0.5)
return sweep_time, true_num_wavs
def __choose_sweep_speed__(self, end_wav, init_wav):
# There are different sweep speeds: 0.5, 5, 20 and 40 nm/s
# Choose depending on the range of the sweep
if (end_wav - init_wav) > 30.0:
sweep_speed = 5
else:
sweep_speed = 0.5 # in nm/s. This speed seems good. There is also 5 nm/s, 20 nm/s and 40 nm/s
return sweep_speed
def take_sweep(self, init_wav, end_wav, num_wav, sweep_speed=None):
"""
Takes a wavelength sweep from init_wav to end_wav with num_wav points,
and measured the power.
:return:
"""
# sweep_speed = self.__choose_sweep_speed__(end_wav, init_wav)
if sweep_speed is None:
sweep_speed = self.__choose_sweep_speed__(end_wav, init_wav)
total_sweep_time = (end_wav-init_wav)/sweep_speed # in s
#sweep_speed = (end_wav-init_wav)/total_sweep_time # in nm/s
#print(sweep_speed)
step_width = round ( (end_wav - init_wav) / num_wav, 4 ) # in nm
averaging_time = total_sweep_time*1e3/num_wav # in ms
# Configure power meter logging
self.lwmain.write("SENS%d:CHAN1:FUNC:PAR:LOGG %d,%.7EMS" %
(self.tap_channel, num_wav, averaging_time))
self.lwmain.write("SENS%d:CHAN1:FUNC:PAR:LOGG %d,%.7EMS" %
(self.rec_channel, num_wav, averaging_time))
# Configure laser sweep
self.lwmain.write("WAV:SWE:CYCL 1") # We only want one sweep
# self.lwmain.write("WAV:SWE:DWEL %.7EMS" % dwell_time) # Dwell time (stepped sweep)
self.lwmain.write("WAV:SWE:MODE CONT") # Set to use continuous sweep (not stepped)
self.lwmain.write("TRIG0:OUTP STF") # Necessary for lambda logging. SWST
self.lwmain.write("WAV:SWE:LLOG 1") # Turn on lambda logging
self.lwmain.write("WAV:SWE:SPE %.7ENM/S" % sweep_speed) # sweep speed in nm/s
self.lwmain.write("WAV:SWE:STEP %.7ENM" % step_width)
self.lwmain.write("WAV:SWE:STAR %.7ENM" % init_wav) # Start wavelength
self.lwmain.write("WAV:SWE:STOP %.7ENM" % end_wav) # Stop wavelength
# = self.lwmain.query('WAV:SWE:CHEC?')
#print(check_sweep)
#if check_sweep != 'OK':
# print('Sweep not correct: ' + check_sweep)
# return
# Trigger sweep and acquisition
self.lwmain.write("TRIG%d:CHAN1:INP SME" %
self.rec_channel)
self.lwmain.write("TRIG%d:CHAN1:INP SME" %
self.tap_channel)
self.lwmain.write("SENS%d:CHAN1:FUNC:STAT LOGG,START" %
self.rec_channel)
self.lwmain.write("SENS%d:CHAN1:FUNC:STAT LOGG,START" %
self.tap_channel)
self.lwmain.write("WAV:SWE START")
time.sleep(total_sweep_time+10)
# Retrieve data
wavs = self.lwmain.query_ascii_values(":READ:DATA?")
print(wavs)
self.lwmain.write("SENS%d:CHAN1:FUNC:RES?" %
self.rec_channel)
rec_data = self.lwmain.read_raw().decode('ascii')
print(rec_data)
tap_data = self.lwmain.query_ascii_values("SENS%d:CHAN1:FUNC:RES?" %
self.tap_channel)
wavs = self.lwmain.query_ascii_values(":READ:DATA?")
return [wavs, rec_data, tap_data]
def log_trial(self):
"""
Copy the example given by the manual (no laser and power meter sync)
"""
slot = 1
num_points = 100
av_time = 0.02
# self.lwmain.write("*RST")
# time.sleep(2)
self.lwmain.write("*CLS")
# self.lwmain.write("TRIG%d:OUTP DIS" % slot)
self.lwmain.write("TRIG%d:INP CME" % slot)
self.lwmain.write("SENS%d:CHAN1:FUNC:PAR:LOGG %d,%.7E" %
(slot, num_points, av_time))
self.lwmain.write("SENS%d:CHAN1:FUNC:STAT LOGG,START" % slot)
self.lwmain.write(":TRIG 2")
time.sleep(num_points*av_time)
# Check for acquisition finished
acq_finished = self.lwmain.query("SENS%d:CHAN1:FUNC:STATE?" % slot)
while not ('COMPLETE' in acq_finished):
print(acq_finished)
time.sleep(0.5)
acq_finished = self.lwmain.query("SENS%d:CHAN1:FUNC:STATE?" % slot)
sys.stdout.flush()
print(acq_finished)
# Acquisition finished, query the values
self.lwmain.write("SENS%d:CHAN1:FUNC:RES?" % slot)
# response = self.lwmain.read_raw()
data = self.lwmain.read_binary_values()
return data
# The instrument returns the logging result in the following format:
# #xyyyffff...; the first digit after the hash denotes the number of ascii
# digits following (y) ; y specifies the number of binary data following;
# "ffff" represent the 32Bit floats as log result.
# response_ascii = response[0:2].decode('ascii')
# print(response_ascii)
# num_digits = response_ascii[1]
# print(num_digits)
#
# num_points = response[2:2+int(num_digits)].decode('ascii')
# print(num_points)
# # Tentative things
#
# response = response[2+int(num_digits):]
# print(float(response[0:4]))
# #data = response.decode('ascii')
# #print(data)
# data = struct.unpack('<float', response[0:4])
# print(data)
def log_trial_2(self):
"""
Play with triggering
"""
slot = 1
num_points = 40
av_time = 0.02
# self.lwmain.write("*RST")
time.sleep(2)
self.lwmain.write("*CLS")
self.lwmain.write("SENS%d:CHAN1:FUNC:PAR:LOGG %d,%.7E" %
(slot, num_points, av_time))
self.lwmain.write("WAVELENGTH:SWEEP:MODE CONTINUOUS")
self.lwmain.write("WAVELENGTH:SWEEP:SPEED 5E-9")
self.lwmain.write("WAVELENGTH:SWEEP:CYCLES 0")
self.lwmain.write("WAVELENGTH:SWEEP:START 1520NM")
self.lwmain.write("WAVELENGTH:SWEEP:STOP 1580NM")
self.lwmain.write("WAVELENGTH:SWEEP:STEP 1NM")
# print(self.lwmain.query("SOURCE0:WAVELENGTH:SWEEP:EXP?"))
self.lwmain.write("INITIATE1:CONTINUOUS 0")
self.lwmain.write("TRIG:CONF 3") # 1 for default
print(self.lwmain.query(":TRIG:CONF?"))
sys.stdout.flush()
self.lwmain.write("TRIG0:OUTP STFINISHED")
self.lwmain.write("TRIG0:INP SWSTARTED")
self.lwmain.write("TRIG%d:OUTP DIS" % slot)
self.lwmain.write("TRIG%d:INP SME" % slot)
# self.lwmain.write("TRIG%d:INP IGN" % slot)
print(self.lwmain.query("TRIG%d:INP?" % slot))
# time.sleep(1)
# print(self.lwmain.query("TRIG%d:OFFS?" % slot))
sys.stdout.flush()
# time.sleep(1)
# time.sleep(num_points*av_time)
self.lwmain.write("SENS%d:CHAN1:FUNC:STAT LOGG,START" % slot)
time.sleep(3)
self.lwmain.write("WAVELENGTH:SWEEP START")
self.lwmain.write(":TRIG 2")
# Check for acquisition finished
acq_finished = self.lwmain.query("SENS%d:FUNC:STATE?" % slot)
while not ('COMPLETE' in acq_finished):
print(acq_finished)
time.sleep(2)
# self.lwmain.write(":TRIG 1")
acq_finished = self.lwmain.query("SENS%d:CHAN1:FUNC:STATE?" % slot)
sys.stdout.flush()
print(acq_finished)
# Acquisition finished, query the values
self.lwmain.write("SENS%d:CHAN1:FUNC:RES?" % slot)
# response = self.lwmain.read_raw()
dt = self.lwmain.read_binary_values()
return dt
def get_state(self):
"""
Returns a list wiht the following elements:
1. The current wavelength
2. The current power
3. If the laser is on or off.
"""
power = self.lwmain.query_ascii_values(":SOUR:POW?") # Returns the power in W
power = float(power[0])*1e3
wav = self.lwmain.query_ascii_values("WAV?") # Returns the wavelength in m
wav = float(wav[0])*1e9
state = self.lwmain.query_ascii_values(":POW:STAT?")
state = int(state[0])
return [wav, power, state]
if __name__ == '__main__':
laser = HPLightWave(tap_channel=1, rec_channel=3)
laser.initialize()
print(laser.get_state())
# laser.configure_sweep(1530, 1560, 300)
laser.close()
# laser.turn_on()
# data = laser.log_trial_2()
# plt.plot(data)
# plt.show()
# [wavs, rec_data, tap_data] = laser.take_sweep(1530.0, 1560.0, 201)
# plt.plot(wavs, rec_data)
# plt.plot(wavs, tap_data)
# plt.show()
| [
"matplotlib"
] |
e272624c09d42ea7201882ab9c95cc783c7c4ecf | Python | UBC-MDS/group29 | /src/eda2.py | UTF-8 | 4,730 | 2.984375 | 3 | [
"MIT",
"CC-BY-4.0"
] | permissive | # authors: Javairia Raza
# date: 2020-11-27
"""Creates a Pandas Profiling report and visuals for exploratory data anaylsis using two datasets
Usage:
eda2.py --input=<local_path> --output=<figures_path>
eda2.py (-h | --help)
Options:
-h --help Shows the arguments needed to run script.
--input=<local_path> All the paths needed for EDA including file name. EDA needed two different datasets needed in order of raw data
followed by processed data separated by a semicolon ;.
example: "data/raw/dataset_diabetes/diabetic_data.csv;data/processed/diabetes_with_race.csv".
--output=<figures_path> Local path to where to save the figures into.
"""
from docopt import docopt
import os
import random
import pandas as pd
import numpy as np
import altair as alt
from altair_saver import save
from pandas_profiling import ProfileReport
from sklearn.model_selection import train_test_split
opt = docopt(__doc__)
string = opt["--input"]
datafile = string.split(";")
def main(opt):
# Pandas Profiling
#
diabetes_csv = pd.read_csv(datafile[0])
profile = ProfileReport(diabetes_csv)
file_path1 = os.path.join(opt["--output"], "pandas_profiling.html")
profile.to_file(output_file=file_path1)
#Subset the data for visualization optimization
random.seed(123)
diabetes_with_race = pd.read_csv(datafile[1])
train_df, test_df = train_test_split(diabetes_with_race, test_size=0.2, random_state=123)
diabetes_subset = train_df.sample(n = 1000)
# Heatmap for Race
sort_list = ["Caucasian", "AfricanAmerican", "Hispanic", "Asian", "Other", "null"]
race_plot = alt.Chart(diabetes_subset).mark_rect().encode(
y = alt.Y("race:N", title = None, sort = sort_list),
x = alt.X("readmitted", title = "Readmitted Status"),
color = "count()",
).properties(width = 100, height = 300)
file_path2 = os.path.join(opt["--output"], "figure1_racedist_eda.svg")
race_plot.save(file_path2)
# Histogram for numerical variables
num_var_dict = {"num_medications": "Number of Medications",
"num_lab_procedures": "Number of Lab Procedures",
"num_procedures": "Number of Procedures other than lab",
"diag_1" : "Primary Diagnosis",
"time_in_hospital" : "Time spent in hospital (days)"}
chart1 = []
for key, value in num_var_dict.items():
chart1.append(alt.Chart(diabetes_subset).mark_area(
opacity=0.3,
interpolate='step'
).encode(
alt.X(key, type = "quantitative", bin=alt.Bin(maxbins=10), title=value),
alt.Y('count()', stack=None),
alt.Color('readmitted:N', title = "Readmitted Status")
).properties(width = 100, height = 100))
num_hists = alt.hconcat(*chart1)
file_path3 = os.path.join(opt["--output"], "figure2_numhisttarget_eda.svg")
num_hists.save(file_path3)
# Histogram for categorical variables
cat_var_dict = {"A1Cresult": "Hemoglobin A1C Levels",
"metformin": "Prescribed Metformin or Changed Dosage",
"insulin": "Prescribed Insulin or Changed Dosage"}
chart2 = []
for key, value in cat_var_dict.items():
chart2.append(alt.Chart(diabetes_subset).mark_rect().encode(
alt.Y("readmitted", type = "nominal", title = "Readmitted Status"),
alt.X(key, type = "nominal", stack=None, title=value),
alt.Color('count()')
).properties(width = 100, height = 120))
cat_hists = alt.hconcat(*chart2)
file_path4 = os.path.join(opt["--output"], "figure3_numcattarget_eda.svg")
cat_hists.save(file_path4)
# Scatterplot for Numerical Variables
numeric_cols = ['num_procedures','time_in_hospital', 'num_lab_procedures',
'num_medications', 'number_diagnoses',
'number_inpatient', 'number_emergency']
numeric_scatter = alt.Chart(diabetes_subset).mark_point(size = 4).encode(
alt.X(alt.repeat('column'), type = 'quantitative', scale = alt.Scale(zero = False)),
alt.Y(alt.repeat('row'), type = 'quantitative', scale = alt.Scale(zero = False))
).properties(height = 100, width = 100
).repeat(
row = numeric_cols,
column = numeric_cols
).configure_axis(labels = False)
file_path5 = os.path.join(opt["--output"], "figure4_numscatter_eda.svg")
numeric_scatter.save(file_path5)
return print("Done! Check the folder!")
if __name__ == "__main__":
main(opt)
| [
"altair"
] |
5253bec4249c24d548db7aa03a2cbae59a99992d | Python | CookieNoob/NeuroNet | /neuralnetwork_main.py | UTF-8 | 2,147 | 2.859375 | 3 | [] | no_license | import neuralnetwork as nn
import numpy as np
import matplotlib.pyplot as plt
import neuralnetwork_gui as ng
import pygame
def main():
with np.load('mnist.npz') as data:
images = data['training_images']
labels = data['training_labels']
layer_sizes = (784, 16, 16, 10)
training_set_size = 30000
trainingImages = images[:training_set_size]
trainingLabels = labels[:training_set_size]
testImages = images[training_set_size:]
testlabels = labels[training_set_size:]
plt.imshow(images[0].reshape(28,28),cmap='gray')
for i in range(10):
if labels[0][i] == 1:
print("Die Zahl hier ist eine", i)
break
plt.show()
Network = nn.NeuralNetwork(layer_sizes)
Network.accuracy(testImages, testlabels)
Network.train(trainingImages, trainingLabels, 2, 10, 4.0)
Network.accuracy(testImages, testlabels)
Network.train(trainingImages, trainingLabels, 8, 20, 2.0)
Network.accuracy(testImages, testlabels)
predict(Network, testImages[19])
plt.imshow(testImages[19].reshape(28,28),cmap='gray')
plt.show()
eventloop(Network)
def predict(Network, field):
prediction = Network.feedforward(field)
for i in range(10):
if prediction[i] == max(prediction):
print("Diese Zahl ist eine", i)
def eventloop(Network):
gui = ng.gui(400, 400)
running = True
drawing = False
while(running):
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key ==pygame.K_SPACE:
gui.resetGrid()
if event.key == pygame.K_RETURN:
predict(Network, gui.onedimList())
elif event.type == pygame.MOUSEBUTTONDOWN:
drawing = True
elif event.type == pygame.MOUSEBUTTONUP:
drawing = False
elif event.type == pygame.MOUSEMOTION:
if drawing:
gui.paint(pygame.mouse.get_pos())
elif event.type == pygame.QUIT:
running = False
if __name__ == "__main__":
main()
| [
"matplotlib"
] |
d39f521378c13146a0463030b84cfb24a163e83e | Python | ivysandberg/PEconPy | /code/classification/Classification_frame.py | UTF-8 | 2,682 | 3.546875 | 4 | [] | no_license | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_curve
# import Data
df = 'path to data'
# note you must have 'tidy' data: each feature is a column & each row is an instance
df.shape # gives (#instances, #features)
# Exploratory Data Analysis (EDA)
df.head()
df.info()
df.describe()
# Create a plots to examine specific features
plt.figure()
sns.countplot(x='variable', data=, palette='RdBu')
plt.xticks([0,1], ['No', 'Yes'])
plt.show()
# Create arrays for the features and target variable
y = df['target'].values
X = df.drop('target', axis=1).values
''' K Nearest neighbors
Predict the label of a data point by looking at the 'k' closedst
labeled data points and taking a majority vote'''
model = KNeighborsClassifier(n_neighbors=6)
'''larger k=smoother decision boundary=less complex model
smaller k=more complex model=can lead to overfitting
all machine learning models are implemented as Python classes'''
'''Training a model on the data='fitting' a model to the Data
use .fit() method
have the features/data as a numpy array and the target variable as
a numpy array the features must also be continuous values as opposed to
categorical & no missing values'''
model.fit(X, y)
# To predict labels of new data: use .predict() method
model.predict(new_data)
'''To measure model performance, split data into training & test data
default training data 75% testing data 25%
stratify=y has the training and testing data represent the
same proportion of labels as the original dataset'''
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.25, random_state=21, stratify=y)
model.fit(X-train, y_train)
y_pred = model.predict(X_test)
model.score(X_test, y_test)
# Create a for loop to test different values of k
neighbors = np.arange(1,9)
# make an array to story test accuracies
test_accuracy = np.empty(len(neighbors))
for i, k in enumerate(neighbors):
model = KNeighborsClassifier(n_neighbors=k)
model.fit(X_train, y_train)
test_accuracy[i] = model.score(X_test, y_test)
# plot accuracy scores
plt.title('KNN Vary Value of K (n_neighbors)')
plt.plot(neighbors, test_accuracy, label='Testing Accuracy')
plt.xlabel('Number of Neighbors')
plt.ylabel('Accuracy')
plt.show()
# Evaluate level of confidence in the model
print (confusion_matrix(y_test, y_pred))
print (classification_report(y_test, y_pred))
| [
"matplotlib",
"seaborn"
] |
c2f1f61a32a7991eb3b1cea79732193685167477 | Python | imasillypirate/Exoplanet-Survivor | /ExoplanetSurvivor.py | UTF-8 | 33,428 | 2.765625 | 3 | [] | no_license | import os
import numpy as np
import matplotlib.pyplot as plt
#from scipy.optimize import curve_fit
import random as rnd
import Tkinter as Tk
import tkMessageBox
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backend_bases import key_press_handler
def intFromList(ifl_x,ifl_y):
#trapezoid integration of lists
#inputs: ifl_x = independent variable (list)
# ifl_y = dependent variable (same sized list)
#Output: ifl_out = trapezoid integration of y dx over list
ifl_out = 0.0
for ifl_i in range(len(ifl_x)-1):
ifl_out = ifl_out + 1.0*(ifl_x[ifl_i+1]-ifl_x[ifl_i])*(ifl_y[ifl_i+1] + ifl_y[ifl_i])/2
return ifl_out
def AppMag(AM_Dist, AM_L):
#Determine the apparent magnitude of a star, given luminosity and distance.
#Inputs: AM_Dist = Distance to the star in LY
# AM_L = Luminosity of the star in Stellar luminosities
#Output: AM_m = apparent magnitude of the star
AM_DSun = 1.58*10**(-5)#Distance to sun in LY
AM_m = -26.73 - 2.5*np.log10(AM_L*(AM_DSun/AM_Dist)**2)
return AM_m
def Temp2Stats(T2S_Temp):
#convert temp (k) into rough values for stats of a main sequence star of that temp
#Input: T2S_Temp = Star temperature in k (scalar)
#Output: T2S_Stats = [T2S_SpecClass, T2S_Mass, T2S_Radius, T2S_Lumin, T2S_Lifetime]
# T2S_SpecClass = Spectral classification OBAFGKM
# T2S_Mass = Mass of star in solar masses
# T2S_Radius = Radius of star in solar radii
# T2S_Lumin = Luminosity in stellar luminosity relation
# T2S_Lifetime = Lifetime of star in Millions of years
# Results generally only good to the order of magnitude
T2S_Stats = [' ',0.0,0.0,0.0,0.0]
if (T2S_Temp > 48000) or (T2S_Temp<2510):#check that temp range is acceptable
print 'Error: Temp must be < 48000 K and > 2510 K. \n Given: '+str(T2S_Temp)
os._exit(0)
#-------------Find T2S_SpecClass
T2S_TRange = [30000,10000,7500,6000,5000,3500,0] #min Temp for each class
T2S_class = ['O', 'B', 'A', 'F', 'G', 'K', 'M'] #spectral classes
for T2S_i in range(len(T2S_TRange)):
if (T2S_Temp >= T2S_TRange[T2S_i]):
T2S_Stats[0] = T2S_class[T2S_i]
break
#-------------Find T2S_Lumin
#Some main sequence temperatures and luminosities to interpolate
T2S_T = [2510 , 2900, 3470, 3850,4350,4590,5080,5250,5570,5770,5800,6030,6200,6440,6890,8200,9940,9600,10500,14000,22000,30000,33000,35800, 44500, 48000]
T2S_L = [0.003,0.0034,0.036,0.077,0.15,0.19,0.37,0.42,0.66,0.79, 1, 1.5, 2.1, 3.2, 4.3, 14,25.4, 42, 95, 500,5700,52000,97000,170000,790000,990000]
T2S_Stats[3] = np.interp(T2S_Temp,T2S_T,T2S_L)
#-------------Find T2S_Radius
#L = 4 pi (sigma) R^2 T^4 #Stefan-Boltzmann Law
# So, R = sqrt(L/(4 pi (sigma) T^4))
T2S_sigma = ((5.6070367*10**(-8))/LSun)*(RSun**2)#Stefan-Boltzmann constant (L_sun /( R_sun^2 k^-4)
T2S_Stats[2] = np.sqrt(T2S_Stats[3]/(4*np.pi*T2S_sigma*T2S_Temp**4))
#-------------Find T2S_Mass
#L = M^a
T2S_a = 3.5 #common value for MS stars
T2S_Stats[1] = 1.0*T2S_Stats[3]**(1/T2S_a)
#-------------Find T2S_Lifetime
# Lifetime = 10^10*M^(-2.5) years (give answer in mill years)
T2S_Stats[4] = (10.0**4)*T2S_Stats[1]**(-2.5)
return T2S_Stats
def KeplersThird(KT_a, KT_M, KT_m):
#Use kepler's 3rd law to determine the orbital period of a planet given the masses and orbital dist
# T^2 = (1/(M+m))a^3. If a in AU, M and m in M_solar, and T in earth years
#Inputs: KT_a = orbital distance (in AU) we are assuming a circular orbital
# KT_M = star mass (in solar masses)
# KT_m = planet mass (in solar masses)
#Output: KT_T = orbital period (in earth years)
KT_T = np.sqrt((KT_a**3)/(KT_M + KT_m))
return KT_T
def mkRadVelPlots(mR_Obt,mR_am, mR_cV, mR_Vmax, mR_P, mR_off,mR_name):#add m dependent noise, randomize time values, add orbital offset
#Make and display simulated radial velocity plot
#Inputs: mR_Obt = list of observation times (days)
# mR_am = star's apparent magnitude (for noise)
# mR_cV = constant velocity of star system away (m/s)
# mR_Vmax = Maximum radial velocity of star (m/s)
# mR_P = Orbital period (earth years)
# mR_off = Orbital offset (radians further in orbit than aligned with star at start)
#Output: mR_Obt = radial velocity observation times
# mR_V2 = radial velocity values
#Depends on: addRndNoise
mR_numPoints = 80 #number of dtat points to generate
mR_V = [(mR_cV + mR_Vmax*np.sin(-2*np.pi*mR_t/(mR_P*365.25) - mR_off) )for mR_t in mR_Obt]
mR_noiseCoeff = 0.1*(np.max(mR_V) - np.min(mR_V))#+0.00001*(10**mR_am) #scales noise based on magnitude
mR_V2 = addRndNoise(mR_V,1,mR_noiseCoeff)
return mR_Obt,mR_V2
def mkUnevenSpc(mus_min,mus_max,mus_n):
#generate a list of values ordered (low to high), but randomly distributed and unique in a given range
#Inputs: mus_min = minimum value
# mus_max = maximum value
# mus_n = number of values
#Output: mus_out = mus_n length list containing unique, sorted, random values in range
mus_szChng = 10 #random choices will be made from a list with this factor more elements than mus_n
mus_big = [mus_min+((mus_max-mus_min)/(mus_n*mus_szChng))*mus_k for mus_k in range(mus_n*mus_szChng)] #choices pulled from here
mus_out = []
for mus_i in range(mus_n):
mus_j = rnd.choice(range(mus_n*mus_szChng-mus_i))
mus_out.append(mus_big[mus_j])
del mus_big[mus_j]
mus_out.sort()
del mus_big,mus_i,mus_szChng,mus_j,mus_k
return mus_out
def addRndNoise(arn_in,arn_choice,arn_scl):
#adds random noise to a list
#inputs: arn_in = list to have noise added to items
# arn_choice = type of dist. 0= uniform, 1=normal
# arn_scl = Size of noise. Either +- value (for uniform dist) or std (for normal).
#outputs: arn_out = arn_in with added noise
if (arn_choice == 0):
arn_out = [rnd.uniform(arn_k-arn_scl,arn_k+arn_scl) for arn_k in arn_in]
elif (arn_choice == 1):
arn_out = [np.random.normal(arn_k,arn_scl) for arn_k in arn_in]
else:
print 'ERROR:: arn_choice must be 0 or 1. Entered '+str(arn_choice)
os._exit(0)
return arn_out
def calcShadeVals(csv_P,csv_off,csv_stR,csv_pR,csv_stCM,csv_pCM):
#For each observation time, calculate the ratio of the area of the star's disk that is shaded by the planet
#Inputs: csv_P = orbital period (years)
# csv_off = orbital offset (radians further in orbit than aligned with star at start)
# csv_stR = star radius (km)
# csv_pR = planet radius (km)
# csv_stCM = Star's orbital radius (km)
# csv_pCM = planet's orbital radius (km)
#Output: csv_shadA = list. For each observation (area shaded by planet)/(area of star disk)
#Depends on amountCover
csv_shadA = [] #area shadded by planet (km^2) for each observation
#print csv_pR, csv_stR
csv_n = 200 #number of transit observations
csv_a = 3*csv_stR #Offset used to determine range to explore
csv_minT = (-np.arcsin((csv_stR+csv_a)/(csv_stCM+csv_pCM))-csv_off+2*np.pi)*(csv_P*365.25/(2*np.pi))#time before transit
csv_maxT = (np.arcsin((csv_stR+csv_a)/(csv_stCM+csv_pCM))-csv_off+2*np.pi)*(csv_P*365.25/(2*np.pi))#time after transit
csv_obs = mkUnevenSpc(csv_minT,csv_maxT,csv_n)#
for csv_i in range(len(csv_obs)):
csv_stTheta = -2*np.pi*csv_obs[csv_i]/(csv_P*365.25) - csv_off#star angular position from pl aligned (starts to the left) (rad)
csv_pTheta = csv_stTheta + np.pi#planet angular position from pl aligned (starts to the right) (rad)
csv_sp = [csv_stCM*np.sin(csv_stTheta), csv_stCM*np.cos(csv_stTheta)]#position of star center [on trasit axis, toward viewer]
csv_pp = [csv_pCM*np.sin(csv_pTheta), csv_pCM*np.cos(csv_pTheta)]#position of planet center [on trasit axis, toward viewer]
#print (csv_pp[0]- csv_pR),csv_pp[0],(csv_pp[0]+ csv_pR),'|',(csv_sp[0]- csv_stR),csv_sp[0],(csv_sp[0]+ csv_stR)
if (csv_pp[1] <= 0 ) and ((csv_pp[0]+ csv_pR)>(csv_sp[0]-csv_stR)) and ((csv_pp[0]- csv_pR)<(csv_sp[0]+csv_stR)): #planet in front of star
csv_tempShade = amountCover(csv_stR, csv_sp[0], csv_pR, csv_pp[0], csv_n)/(np.pi*(csv_stR**2))
csv_shadA.append(csv_tempShade)
elif (((csv_pp[0]+ csv_pR)>(csv_sp[0]+csv_stR)) and ((csv_pp[0]- csv_pR)<(csv_sp[0]-csv_stR))): #planet bigger than star?
csv_shadA.append(1)
else: #star not blocked at all
csv_shadA.append(0)
return csv_shadA,csv_obs
def amountCover(ac_stR, ac_stx, ac_pR, ac_px, ac_n):
#determine area covered by planet
#Inputs: ac_stR = Radius of star (km)
# ac_stx = Position of center of star (km)
# ac_pR = Radius of planet (km)
# ac_px = position of center of planet along transit axis (km)
# ac_n = number of points to integrate over
#Output: ac_out = area of overlap
#Depends on intFromList
if ((ac_px - ac_pR) >= (ac_stx-ac_stR)) and ((ac_px + ac_pR) <= (ac_stx+ac_stR)):#planet entirely in front of star
ac_out = np.pi*(ac_pR**2)
else:#partial overlap
ac_yval = []
if ((ac_px - ac_pR) <= (ac_stx-ac_stR)): #overlap on lh side
ac_minx = ac_stx - ac_stR
ac_maxx = ac_px + ac_pR
ac_xval = np.linspace(ac_minx,ac_maxx,ac_n)
ac_int = (ac_stR**2 - ac_pR**2 + ac_px**2 - ac_stx**2)/(2*(ac_px-ac_stx))# x val for junction between disks
for ac_i in range(ac_n):
if (ac_xval[ac_i] <= ac_int):#star defines upper boundary
ac_tsr = (ac_stR**2 -(ac_xval[ac_i]-ac_stx)**2)
if (ac_tsr>0):
ac_yval.append(np.sqrt(ac_tsr))
else:
ac_yval.append(0)
else: #planet defines upper boundary
ac_tsr = (ac_pR**2 -(ac_xval[ac_i]-ac_px)**2)
if (ac_tsr>0):
ac_yval.append(np.sqrt(ac_pR**2 -(ac_xval[ac_i]-ac_px)**2))
else:
ac_yval.append(0)
elif ((ac_px + ac_pR) >= (ac_stx+ac_stR)): #overlap on rh side
ac_minx = ac_px - ac_pR
ac_maxx = ac_stx + ac_stR
ac_xval = np.linspace(ac_minx,ac_maxx,ac_n)
ac_int = (ac_stR**2 - ac_pR**2 + ac_px**2 - ac_stx**2)/(2*(ac_px-ac_stx))# x val for junction between disks
for ac_i in range(ac_n):
if (ac_xval[ac_i] <= ac_int):#planet defines upper boundary
ac_tsr = (ac_pR**2 -(ac_xval[ac_i]-ac_px)**2)
if (ac_tsr>0):
ac_yval.append(np.sqrt(ac_tsr))
else:
ac_yval.append(0)
else: #Star defines upper boundary
ac_tsr = (ac_stR**2 -(ac_xval[ac_i]-ac_stx)**2)
if (ac_tsr>0):
ac_yval.append(np.sqrt(ac_tsr))
else:
ac_yval.append(0)
else:
print 'ERROR:: something went wrong in amountCover. Not lh, rh or full cover.'
os._exit(0)
ac_out = 2*intFromList(ac_xval,ac_yval) #integrate output list, then multiply by 2 because only looking at top half
return ac_out
def mkTransPlot(mtp_obs,mtp_sv,mtp_L,mtp_d,mtp_name):
#Make plot of transit
#inputs: mtp_obs = observation times(days)
# mtp_sv = ratio of (shaded area)/(area of star disk) for each observation
# mtp_L = Luminosity of star (Solar Luminosity)
# mtp_d = distance to star (Ly)
#outputs: mtp_obs = transit observation values
# mtp_m2 = transit magnitude values
mtp_m = [AppMag(mtp_d, mtp_L*(1-mtp_k)) for mtp_k in mtp_sv]
mtp_noise = 0.05*(np.max(mtp_m) - np.min(mtp_m))#scale for acceptable noise
mtp_m2 = addRndNoise(mtp_m,1,mtp_noise)
return mtp_obs,mtp_m2
def determineHabitableZone(dhz_L):
#Determine the rough range of radii from star that contain the habitable zone
#Inputs: dhz_L = star's luminosity (L_sun)
#Output: dhz_out = radii bounding habitable zone [inner, outer] (AU)
dhz_out = [0,0]
dhz_out[0] = np.sqrt(dhz_L/1.1)
dhz_out[1] = np.sqrt(dhz_L/0.53)
return dhz_out
def exoPlanetSimulation(es_stT,es_stD,es_stV,es_stO,es_pR,es_pOR,es_pM,es_n,es_name):
#Simulate exoplanet and star based on stats, then make star radial velovity plot and transit light curve
#Inputs: es_stT = star temperature (k)
# es_stD = distance to star (Ly)
# es_stV = velocity away of star system (m/s)
# es_stO = initial orbital offset (radians past aligned)
# es_pR = planet radius (earth radii)
# es_pOR = planet orbital radius (AU)
# es_pM = planet mass (earth mass)
# es_n = number of observations to be made
# es_name= planet name (for plots)
#output: es_stStatsFull = stats for star: [temperature(k),spec class,Mass(M_sun),Radius (R_sun),Luminosity(L_sun), Lifetime(Myr), apparent magnitude]
# es_pStats = stats for planet: [name, mass (M_earth),radius(R_earth),density(kg/m^3),orbital radius (AU),orbital period (yr)]
# es_sysStats = stats for system: [distance(Ly), inner hab zone(AU), outer hab zone(AU), velocity away (m/s), orbital offset (rad)]
#Depends on: Temp2Stats,AppMag,KeplersThird,mkUnevenSpc,mkRadVelPlots,calcShadeVals,mkTransPlot,determineHabitableZone
global radObs,tranObs,radVals,tranVals
es_stStatsFull = [es_stT,' ',0.0,0.0,0.0,0.0,0.0]
es_pStats = [es_name,es_pM,es_pR,0.0,es_pOR,0.0]
es_sysStats = [es_stD,0.0,0.0,es_stV,es_stO]
es_plMassSolar = es_pM*(MEarth/MSun)#mass of planet in solar masses
es_stStats = Temp2Stats(es_stT) #Get star stats from temp [SpecClass, Mass, Radius, Lumin, Lifetime]
es_stm = AppMag(es_stD, es_stStats[3]) #Calc apparent magnitude
es_plOrPer = KeplersThird(es_pOR,es_stStats[1],es_plMassSolar)#Orbital Period in years
es_stCM = (es_plMassSolar*es_pOR)/(es_plMassSolar+es_stStats[1])#Star distance from center of mass
es_plCM = es_pOR - es_stCM #planet distance from center of mass
es_stVmax = (2* np.pi* es_stCM * Au_conv)/(es_plOrPer*SinYr)#maximum tangential v of star (m/s)
es_obs_time = mkUnevenSpc(0,1.5*es_plOrPer*365.25,es_n)#Observation times for radial velocity(days)
es_rOb,es_rV = mkRadVelPlots(es_obs_time,es_stm, es_stV, es_stVmax, es_plOrPer,es_stO,es_name)#Simulate radial velocity curves
radObs.append(es_rOb)#save for later
radVals.append(es_rV)
es_shadeVal,es_obs_timeTrans = calcShadeVals(es_plOrPer,es_stO,(es_stStats[2]*RSun)/1000.,es_pR*REarth/1000.,es_stCM*Au_conv/1000.,es_plCM*Au_conv/1000.)
es_tOb,es_tV = mkTransPlot(es_obs_timeTrans,es_shadeVal,es_stStats[3],es_stD,es_name)#make the transit plot
tranObs.append(es_tOb)#save for later
tranVals.append(es_tV)
es_hz = determineHabitableZone(es_stStats[3])#rough range of habitable zone for star
es_stStatsFull[1:6] = es_stStats #record stats for output
es_stStatsFull[6] = es_stm
es_pStats[3] = (es_pM*MEarth)/((4./3)*np.pi*(es_pR*REarth)**3)
es_pStats[5] = es_plOrPer
es_sysStats[1:3] = es_hz
return es_stStatsFull,es_pStats,es_sysStats
def pickSystem(ps_t,ps_h):
#pick stats for the planetary system
#inputs: ps_t = terrestrial (1=terrestrial, 0=jovian)
# ps_h = habitable (0=habitable zone, 1=not in habitable zone)
#output: ps_out = [star temp, distance, system velocity, offset, planet R, planet M, orbit R]
ps_out = [0,0,0,0,0,0,0]
ps_out[1] = rnd.uniform(4,10)#random distance
ps_out[2] = np.random.normal(0,10000)#random system velocity
ps_out[3] = rnd.uniform(0,np.pi)#random orbital offset
if (ps_t == 1): #Must be terrestrial
ps_out[5] = rnd.uniform(0.1,2)#mass on range near terrestrial planet (earth mass)
ps_den = 5500*rnd.uniform(0.7,1.2)#random terrestrialish Density (kg/m^3)
ps_out[4] = (((3.0*MEarth*ps_out[5])/(4.0*np.pi*ps_den))**(1./3))/REarth
else: #Is jovian
ps_out[5] = rnd.uniform(14,400)#mass on range near jovian planets (earth mass)
ps_den = 5500*rnd.uniform(0.1,0.3)#jovianish Density (kg/m^3)
ps_out[4] = (((3.0*MEarth*ps_out[5])/(4.0*np.pi*ps_den))**(1./3))/REarth
if (ps_h == 1): #Must be in habitable zone
ps_out[0] = rnd.uniform(3300,8000)#random temp value of reasonably behaved star
ps_st = Temp2Stats(ps_out[0])#calc star stats
ps_hz = determineHabitableZone(ps_st[3])
ps_out[6] = rnd.uniform(ps_hz[0],ps_hz[1])#determine orbital radius in habitable zone
else: #Not necessarily habitable... but might be...
ps_temp = np.random.normal(3000,12000)#random temp value for star
ps_out[0] = np.sqrt((ps_temp - 2700)**2)+2700 #made to fit range, low mass more probable
ps_st = Temp2Stats(ps_out[0])#calc star stats
ps_out[6] = rnd.uniform(0.2,60.0)#determine orbital radius
return ps_out
def PlanMission():
#open page to plan mission
def quitPM():
#close window and quit
top3.destroy()
raise SystemExit(0)
def returnPM():
#close planning window and return to display plots
top3.destroy()
DisplayPlots()
def OnLaunch():
#what to do when the launch button has been pressed
ol_Sys = SysChoice.get()#retrieve planet choice
if ('None' in ol_Sys): #chose to go nowhere
ol_ResultMessage = '\nYou chose to go nowhere because you are boring.\n Once all the other humans have left for places unkown, you sit alone on the poisoned rock they left behind'
ol_stayOpt = [' and live out your days as a grumpy, old hermit.',' and choke on the exhaust from their departing ships.', ' and wish you had made something of your life.', ' and cry while watching old episodes of Friends.','. At first you enjoy the peace and quiet, but one day you choke on your pie and, since there is nobody around to help, you suffocate and die. \nYou can rest assured knowing that you at least provided a good meal for the hungry scavangers.']
ol_ResultMessage = ol_ResultMessage + rnd.choice(ol_stayOpt)
verdictMessage.set('\n\nYOU FAIL TO SURVIVE ON A NEW PLANET... or do anything of interest.')
else: #To infinity and BEYOND!
buttonindex = SysNames.index(ol_Sys)
ol_ResultMessage = '\nYou choose to go to system '+SysNames[buttonindex]+'. \n'
systemStatsMessage = 'Planet stats:\n mass = '+str(round(plS[buttonindex][1],2))+' earth masses\n radius = '+str(round(plS[buttonindex][2],2))+' earth radii\n density = '+str(round(plS[buttonindex][3],2))+' kg/m^3 \n = '+str(round((plS[buttonindex][3]/5500),2))+' earth densities \norbital period = '+str(round(plS[buttonindex][5],2))+' years \n orbital radius = '+str(round(plS[buttonindex][4],2))+' AU \n habitable zone: '+str(round(sysS[buttonindex][1],2))+' to '+str(round(sysS[buttonindex][2],2))+' AU'
SysStatMes.set(systemStatsMessage)
if (plS[buttonindex][3] < 0.4*5500): #Jovian planet
ol_ResultMessage = ol_ResultMessage + 'When you arrive, you find a beautiful Jovian planet.\n Unfortunately you were not looking for a jovian planet.\n'
ol_JovOpt = ['You try to land anyway and the ship burns up in the atmosphere.','Lacking the fuel to return to earth, you sit in your ship in orbit, wondering where you went wrong. When people start starving they choose to eat you first because this was all your fault.','You try to settle one of its moons instead, but there is no atmosphere and you quickly suffocate.']
ol_ResultMessage = ol_ResultMessage + rnd.choice(ol_JovOpt)
verdictMessage.set('\n\nYOU FAIL TO SURVIVE ON A NEW PLANET.')
else:
ol_ResultMessage = ol_ResultMessage + 'When you arrive, you find a terrestrial planet.\n'
if (plS[buttonindex][4]<sysS[buttonindex][1]):#too close for habitable
ol_ResultMessage = ol_ResultMessage + 'Unfortunately it is too close to the star to be habitable and your skin melts off.'
verdictMessage.set('\n\nYOU FAIL TO SURVIVE ON A NEW PLANET.')
elif (plS[buttonindex][4]>sysS[buttonindex][2]):#too far to be habitable
ol_ResultMessage = ol_ResultMessage + 'Unfortunately it is too far from the star to be habitable and you starve to death while clutching the frozen corpse of a loved one.'
verdictMessage.set('\n\nYOU FAIL TO SURVIVE ON A NEW PLANET.')
else:
ol_ResultMessage = ol_ResultMessage + 'It is in the habitable zone of its star.'
ol_bt = buildChoi.get()#retrieve building type
if plS[buttonindex][4] < (((sysS[buttonindex][2]-sysS[buttonindex][1])/3)+sysS[buttonindex][1]):#inner third of habitable zone
ol_ResultMessage = ol_ResultMessage + ' Being in the inner third of the habitable zone, you are in for some high temperatures.'
if ('hot weather' in ol_bt):#brought warm weather buildings
ol_ResultMessage = ol_ResultMessage + ' Good thing the buildings you brought the supplies to construct were designed for such a climate! \nThough there are certain times of day when you can not go outside, and you have to be careful to protect your crops from the harsh sun, you came prepared to do this and, thus, you flourish in your new home!'
verdictMessage.set('\n\nYOU SUCCESSFULLY SURVIVE ON A NEW PLANET!')
else: #brought a different building
ol_ResultMessage = ol_ResultMessage + ' Unfortunately the supplies you brought do not permit you to build the appropriate structures to provide adequate protection. You struggle for a while to survive, but your numbers dwindle over time as the crops fail and people succumb to heat stroke.\n Years later, a more prepared group finds its way to your planet and discovers your bones scattered among the dust of a wildly inappropriate building.\n "What could they have possibly been thinking?" these new explorers ask themselves as they seal it up and begin their own colony.'
verdictMessage.set('\n\nYOU FAIL TO SURVIVE ON A NEW PLANET.')
elif plS[buttonindex][4] > (sysS[buttonindex][2] - ((sysS[buttonindex][2]-sysS[buttonindex][1])/3)):#outer third of habitable zone
ol_ResultMessage = ol_ResultMessage + ' Being in the outer third of the habitable zone, you are in for some low temperatures.'
if ('cold weather' in ol_bt):#brought cold weather buildings
ol_ResultMessage = ol_ResultMessage + ' Good thing the buildings you brought the supplies to construct were designed for such a climate! \nThough there are certain times when you can not go outside, and you have to be careful to protect your crops from the freezing cold, you came prepared to do this and, thus, you flourish in your new home!'
verdictMessage.set('\n\nYOU SUCCESSFULLY SURVIVE ON A NEW PLANET!')
else: #brought a different building
ol_ResultMessage = ol_ResultMessage + ' Unfortunately the supplies you brought do not permit you to build the appropriate structures to provide adequate protection. You struggle for a while to survive, but your numbers dwindle over time as the crops fail and people freeze to death.\n Years later, a more prepared group finds its way to your planet and discovers your frozen bodies huddled among the rooms of a wildly inappropriate building.\n "What could they have possibly been thinking?" these new explorers ask themselves as they seal it up and begin their own colony.'
verdictMessage.set('\n\nYOU FAIL TO SURVIVE ON A NEW PLANET.')
else:#middle third of the habitable zone
ol_ResultMessage = ol_ResultMessage + ' Being in the middle third of the habitable zone, you are in for pleasant weather. It is hard work, but your crops flourish in this beautiful new environment and so do you.'
verdictMessage.set('\n\nYOU SUCCESSFULLY SURVIVE ON A NEW PLANET!')
ol_paramSimp = [0,0] #basic planet survival parameters0=no, 1= yes [terrestrial?, habitable zone?]
ResultMessage.set(ol_ResultMessage )
top3 = Tk.Tk()
backbutton = Tk.Button(top3, text = " Return to choices ",activeforeground='white',activebackground='gray', command = returnPM).grid(row = 1, column = 1)
quitbutton = Tk.Button(top3, text = " Quit ",activeforeground='white',activebackground='gray', command = quitPM).grid(row = 1, column = 2)
welcomeVar = Tk.StringVar()
SysChoice = Tk.StringVar()
ResultMessage = Tk.StringVar()
verdictMessage = Tk.StringVar()
SysStatMes = Tk.StringVar()
buildChoi = Tk.StringVar()
welcome = Tk.Message(top3,textvariable=welcomeVar,width=800,justify='center')
welcomeVar.set('Now that you have decided on a destination, it is time to plan your mission.\n When you are done making selections, press the Launch button.')
welcome.grid(row=2,columnspan=3)
SysChoice.set(SysNames[0]) # set the default option
pm_choices = [k for k in SysNames]#choices for pull-down menu
pm_choices.append('None')#add "none" for the lazies
planetChoice = Tk.OptionMenu(top3, SysChoice,*pm_choices)
Tk.Label(top3, text='Choose your destination: ').grid(row = 3, column = 1)
planetChoice.grid(row = 3, column =3)
buildChoices = ['normal','cold weather (advised for outer third of habitable zone)','hot weather (advised for inner third of habitable zone)']
buildChoi.set(buildChoices[0])
BuildingChoice = Tk.OptionMenu(top3, buildChoi,*buildChoices)
Tk.Label(top3, text='What types of buildings will you want to build there? ').grid(row = 5, column = 1)
BuildingChoice.grid(row = 5, column =3)
Launchbutton = Tk.Button(top3, text = " Launch! ",activeforeground='red',activebackground='gray', command = OnLaunch).grid(row = 6, column = 2)
Result = Tk.Message(top3,textvariable=ResultMessage,width=300,justify='left')
Result.grid(row = 7, columnspan = 7)
Verdict = Tk.Message(top3,textvariable=verdictMessage,width=300,justify='center', foreground = 'red')
Verdict.grid(row = 8, columnspan = 7)
ActualStats = Tk.Message(top3,textvariable=SysStatMes,width=300,justify='left')
ActualStats.grid(row = 7, column = 9)
top3.mainloop()
def intermediary():
#do some window closing between top and top2
top.destroy()#close window
DisplayPlots()
def DisplayPlots():
def on_key_event(event):
if event.key in SysNames:
buttonindex = SysNames.index(event.key)
print 'You chose to examine ',SysNames[buttonindex]
oke_Fig = plt.figure(2+buttonindex)#Radial velocity plots
plt.subplot(2,1,1)
plt.plot(radObs[buttonindex],radVals[buttonindex],'k.')
plt.xlabel('time (days)')
plt.ylabel('velocity away (m/s)')
plt.title('Radial Velocity: '+plS[buttonindex][0])
plt.tight_layout()
plt.subplot(2,1,2)
#mAveTemp = np.average(tranVals[buttonindex][0:10])
#TempIntensity = [10**(0.4*(mAveTemp-k)) for k in tranVals[buttonindex]]#convert magnitude to relative intensity.
plt.plot(tranObs[buttonindex],tranVals[buttonindex],'k.')
plt.xlabel('time (days)')
plt.ylabel('apparent magnitude')
plt.title('Transit: '+plS[buttonindex][0])
plt.ylim([np.max(tranVals[buttonindex]),np.min(tranVals[buttonindex])])
plt.tight_layout()
oke_Fig.show()
elif event.key == 'escape':#quit
top2.quit()
top2.destroy()
quit()
elif event.key == 'enter':#done, next page
top2.quit()
top2.destroy()
PlanMission()
elif event.key == 'x':#directions
DirectMess = 'You can Determine things in the following way:\n\n The orbital period is the amount of time it takes for the radial velocity plot to go through 1 cycle.\n\n'
DirectMess = DirectMess + 'The orbital velocity will be the amplitude of the radial velocity plot (half the difference between max and min)\n\n'
DirectMess = DirectMess + "The orbital distance can be determined using Kepler's 3rd law: P^2 = (1/M)a^3 where a is the semimajor axis and M is the mass of the star.\n\n"
DirectMess = DirectMess + 'The planet mass can be determined using m = (M*(M_e/M_s)*v*P*ns)/(2 pi ((M*P^2)^(1/3))*nm) where M is the Mass of the star in solar masses,M_e is the mass of the earth in kg,M_s is the mass of the sun in kg, v is the orbital velocity in m/s, P is the orbital period in years, ns is the number of seconds in a year, and nm is the number of meters in an AU \n'
DirectMess = DirectMess + 'M_s = 1.989*10^30 kg, M_e = 5.972*10^24 kg, ns = 31469940 s, nm = 1.496*10^11 m \n\n'
DirectMess = DirectMess + 'The planet radius is determined using the following (\delta m) = (r_planet/r_star)^2 where (\delta m) is the change in magnitude during the transit.\n\n'
DirectMess = DirectMess + 'Density is the mass divided by the volume (4/3 pi r^3). Hint: do this in kg/m^3 first, then divide by the density of the earth (5500 kg/m^3). \n\n'
DirectMess = DirectMess + 'Densities less than about 0.4 times that of the earth are Jovian planets \n\n'
DirectMess = DirectMess + 'The rough approximation of the habitable zone of a star is between sqrt(L/1.1) and sqrt(L/0.53) AU, if L is the luminosity of the star in solar luminosities.\n\n'
tkMessageBox.showinfo('Directions', DirectMess)
else:
print event.key,' is not a valid option.'
#top.quit()
top2 = Tk.Tk()
top2.attributes("-fullscreen", True)
fig = plt.figure(1,figsize=(10, 8))#figures to show
plt.text(0,0,"For some reason you have to click on this window before anything will work.\n To quit press ESCAPE. To examine the plots for any planet more closely, push the letter on your keyboard associated with its name.\n When you are done examining the options, hit ENTER. You will make your choice on the next screen.")
plt.axis('off')
for dp_i in range(nSys):#for each system
a = fig.add_subplot(nSys+1,4,4*dp_i+1)
a.text(0,0.5,plS[dp_i][0],fontsize = 20)
plt.axis('off')
b = fig.add_subplot(nSys+1,4,4*dp_i+2)#Radial velocity plots
plt.plot(radObs[dp_i],radVals[dp_i],'k.')
plt.xlabel('time (days)')
plt.ylabel('velocity away (m/s)')
plt.title('Radial Velocity: '+plS[dp_i][0])
plt.tight_layout()
c = fig.add_subplot(nSys+1,4,4*dp_i+3) #Transit plots
#mAveTemp = np.average(tranVals[dp_i][0:10])
#TempIntensity = [10**(0.4*(mAveTemp-k)) for k in tranVals[dp_i]]#convert magnitude to relative intensity.
plt.plot(tranObs[dp_i],tranVals[dp_i],'k.')
plt.xlabel('time (days)')
plt.ylabel('apparent magnitude')
plt.title('Transit: '+plS[dp_i][0])
plt.ylim([np.max(tranVals[dp_i]),np.min(tranVals[dp_i])])
dp_txtInfo = 'Star stats:\nclass: '+stS[dp_i][1]+'\nM = '+str(round(stS[dp_i][2],2))+' $M_{\odot}$\nR = '+str(round(stS[dp_i][3],2))+ '$R_{\odot}$\nL = '+str(round(stS[dp_i][4],2))+ '$L_{\odot}$\n'
d = fig.add_subplot(nSys+1,4,4*dp_i+4)
d.text(0,0,dp_txtInfo)
plt.axis('off')
canvas = FigureCanvasTkAgg(fig, master=top2)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('key_press_event', on_key_event)
top2.mainloop()
#################################################################################
### MAIN ### MAIN ### MAIN ### MAIN ### MAIN ### MAIN ###
#################################################################################
#major assumptions:
#Circular orbits. Viewed perfectly edge-on. No limb darkening. Only 1 planet per system.
global LSun,RSun,REarth,MSun,MEarth,Au_conv
global sysS,plS,stS,nSys, SysNames
global radObs,tranObs,radVals,tranVals
radObs = []
tranObs = []
radVals = []
tranVals = []
#constants
LSun = 3.828*10**(26) #Luminosity of sun in W
RSun = 6.957*10**(8) #Radius of sun in m
REarth = 6.3781*10**(6) #radius of earth in m
MSun = 1.989*10**(30) #Mass of sun (kg)
MEarth = 5.972*10**(24) #Mass of Earth (kg)
Au_conv = 149597870700.0 #1 AU = 149597870700 m
SinYr = 3600*24*365.25 #How many seconds in a sidereal year
numObs = 100#Number of observations made for system
nSys = 4#Number of systems to simulate
SysNames = ['a','b','c','d']
SysProp = [[1,1],[1,0],[0,1],[0,0]] #one of each type of system
plOrder = [0,1,2,3] #indices for SysProp
rnd.shuffle(plOrder) #Randomize indices
top = Tk.Tk()
var1 = Tk.StringVar()
var2 = Tk.StringVar()
var3 = Tk.StringVar()
ProgVar = Tk.StringVar()
Logo = Tk.PhotoImage(file = 'FreighterWBg.gif')
sysS = []#list of stats for each system
plS = []#List of stats for each planet
stS = []#List of stats for each star
logoIm = Tk.Label(image = Logo)
logoIm.grid(row=0,column=1)
label = Tk.Message(top,textvariable=var1,width=300,justify='center')
var1.set('Welcome to Exoplanet Survivor!\n Code written by Richard D Mellinger Jr')
label.grid(row=1,columnspan=3)
label2 = Tk.Message(top,textvariable=var2,width=300,justify='left')
var2.set("\n You will be shown the radial velocity curves and transit light curves for several simulated exoplanet systems. \n At least one of these will be a terrestrial planet in its star's habitable zone. \n Use the plots to calculate basic properties of the planets and decide which planet to move to.\n\n Once your destination has been chosen, you will plan your mission.\n\n Then we'll see if you survive!")
label2.grid(row=2,columnspan=3)
ProgVar.set(' ')
Tk.Label(top,textvariable=ProgVar).grid(row = 4, column = 1)
for i in range(nSys):#for each system
sysLs = pickSystem(SysProp[plOrder[i]][0],SysProp[plOrder[i]][1])#[star temp, distance, system velocity, offset, planet R, planet M, orbit R]
stStat,plStat,sysStat = exoPlanetSimulation(sysLs[0],sysLs[1],sysLs[2],sysLs[3],sysLs[4],sysLs[6],sysLs[5],numObs,SysNames[i])
ProgVar.set('Generating systems... '+str(round((100.*(i+1)/nSys),1))+' % complete')#Update Progress
sysS.append(sysStat)
plS.append(plStat)
stS.append(stStat)
ProgVar.set(' ')
Tk.Button(top, text = " Let's go! ",activeforeground='white',activebackground='gray', command = intermediary).grid(row = 5, column = 1)
top.mainloop()
| [
"matplotlib"
] |
1a2ce5c0ec248b8f8e71ceee0662fe8d38b81171 | Python | trisha-p-malhotra/BigDataAnalysis-1 | /reg.py | UTF-8 | 1,152 | 3 | 3 | [] | no_license | from sklearn import linear_model
import pandas as pd
import numpy as np
import matplotlib.pyplot as pyp
# Import data
vg = pd.read_csv('VGS.csv')
vg = vg.replace('tbd', np.nan)
data = vg.loc[vg.Year_of_Release >= 1999]
data = data.dropna(axis=0)
data.User_Score = data.User_Score.astype(float)
data = data[data.Year_of_Release.notnull()]
def genrereg():
# Finding the median sales value by genre and year
Med_Sales_by_Gen_and_Yr = pd.pivot_table(data, index=['Year_of_Release'],
columns=['Genre'], values=['Global_Sales'], aggfunc=np.median)
Data = Med_Sales_by_Gen_and_Yr
Data.columns = Data.columns.get_level_values(1)
Regr_Coeff = []
Regr_MSE = []
fig, axes = pyp.subplots(nrows=4, ncols=3, figsize=(10, 12))
x = np.transpose(np.matrix(Data.index))
count = 0
for genre in Data.columns:
axs = axes[count // 3, count % 3]
y = Data[genre].to_frame()
# Linear regression
regr = linear_model.LinearRegression()
print(x)
print(y)
def main():
genrereg()
main()
| [
"matplotlib"
] |
fe7d47ff49723164566692bfc9e344165fda6d1e | Python | adelbertc/ml | /ml/ch4.py | UTF-8 | 1,789 | 3.578125 | 4 | [] | no_license | import csv
import sys
from typing import Sequence, Tuple
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
def step_gradient_descent(
training_data: Sequence[Tuple[float, float]],
w: float,
b: float,
alpha: float
) -> Tuple[float, float]:
n = len(training_data)
# partial derivative of MSE wrt w
pdw = sum([-2 * x * (y - (w * x + b)) for (x, y) in training_data]) / n
new_w = w - alpha * pdw
# partial derivative of MSE wrt b
pdb = sum([-2 * (y - (w * x + b)) for (x, y) in training_data]) / n
new_b = b - alpha * pdb
return (new_w, new_b)
def mse(
training_data: Sequence[Tuple[float, float]],
w: float,
b: float
) -> float:
n = len(training_data)
return sum([pow(y - (w * x + b), 2) for (x, y) in training_data]) / n
if __name__ == "__main__":
datafile = sys.argv[1]
epochs = int(sys.argv[2])
training_data = []
with open(datafile) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
spending = float(row["radio"])
sales = float(row["sales"])
training_data.append((spending, sales))
w = 0.0
b = 0.0
alpha = 0.001
for epoch in range(epochs):
(new_w, new_b) = step_gradient_descent(training_data, w, b, alpha)
w = new_w
b = new_b
loss = mse(training_data, w, b)
if epoch % 100 == 0:
print(f"epoch={epoch}, loss={loss}")
# Viz
sns.set(style="darkgrid")
df = pd.DataFrame(training_data, columns=["spending", "sales"])
plot = sns.relplot(x="spending", y="sales", data=df)
# All this just to visualize a line..
x = np.linspace(0, plt.xlim()[1], 1000)
plt.plot(x, w * x + b)
plt.show()
| [
"matplotlib",
"seaborn"
] |
5e636a252995138681fbcc83af2a4a1eb1469e40 | Python | tiansp/tensorflow_learn | /波士顿房价预测.py | UTF-8 | 2,517 | 3.234375 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
#加载数据集
bosten_housing = tf.keras.datasets.boston_housing
(train_x,train_y),(test_x,test_y) = bosten_housing.load_data()
#数据处理
x_train = train_x[:,5]
y_train = train_y
x_test = test_x[:,5]
y_test = test_y
#超参数
learn_rate = 0.04
iter = 2000
display_step = 200
#初始值
np.random.seed(612)
w = tf.Variable(np.random.randn())
b = tf.Variable(np.random.randn())
#模型训练
mse_train = []
mse_test = []
for i in range(0,iter+1):
with tf.GradientTape() as tape:
pred_train = w * x_train + b
loss_train = 0.5 * tf.reduce_mean(tf.square(y_train - pred_train))
pred_test = w * x_test + b
loss_test = 0.5 * tf.reduce_mean(tf.square(y_test - pred_test))
mse_train.append(loss_train)
mse_test.append(loss_test)
dl_dw , dl_db = tape.gradient(loss_train,[w,b])
w.assign_sub(learn_rate*dl_dw)
b.assign_sub(learn_rate*dl_db)
if i % display_step == 0:
print("i: %i,train loss: %f,test loss:%f"%(i,loss_train,loss_test))
#可视化
plt.rcParams["font.sans-serif"] = "SimHei"
plt.figure(figsize=(15,10))
plt.subplot(221)
plt.scatter(x_train,y_train,color = 'b',label = 'data')
plt.plot(x_train,pred_train,color = 'r',label = 'model')
plt.legend(loc = 'upper left')
plt.subplot(222)
plt.plot(mse_train ,color = 'b',linewidth = 3,label = 'train loss')
plt.plot(mse_test,color = 'r',linewidth = 1.5,label = 'test loss')
plt.legend(loc = 'upper right')
plt.subplot(223)
plt.plot(y_train,color = 'b',marker = "o",label = 'train price')
plt.plot(pred_train,color = 'r',marker = ".",label = 'predict')
plt.legend()
plt.subplot(224)
plt.plot(y_test ,color = 'b',marker = "o",label = 'true price')
plt.plot(pred_test,color = 'r',marker = ".",label = 'predict') # tensor对象要转换成numpy数组 .numpy()
plt.legend()
plt.suptitle("波士顿房价预测",fontsize = 20)
plt.show()
'''
i: 0,train loss: 321.837585,test loss:337.568634
i: 200,train loss: 28.122616,test loss:26.237764
i: 400,train loss: 27.144739,test loss:25.099327
i: 600,train loss: 26.341949,test loss:24.141077
i: 800,train loss: 25.682899,test loss:23.332979
i: 1000,train loss: 25.141848,test loss:22.650162
i: 1200,train loss: 24.697670,test loss:22.072006
i: 1400,train loss: 24.333027,test loss:21.581432
i: 1600,train loss: 24.033667,test loss:21.164261
i: 1800,train loss: 23.787903,test loss:20.808695
i: 2000,train loss: 23.586145,test loss:20.504938
'''
| [
"matplotlib"
] |
5654b68f9f86a9382bf06b7a540db62cb1fdb89b | Python | IgaoGuru/TicTacToe- | /binary_search.py | UTF-8 | 2,012 | 3.703125 | 4 | [] | no_license | import time
import matplotlib.pyplot as plt
import numpy as np
#looping one
def find_element(elements:list, element:int):
for i in elements:
if element == i:
return True
return False
#recursive one
def find_element2(elements:list, element:int):
if len(elements) == 0:
return False
if elements[0]== element:
return True
else:
return find_element2(elements[1:], element)
#only works for continuous lists
def find_element3(elements:list, element:int):
if element >= elements[0] and element <= elements[-1]:
return True
else:
return False
#cuts the list in half until there is only one element
def find_element4(elements:list, element:int):
while len(elements) > 0:
if len(elements) == 1:
return elements[0] == element
half = int(len(elements) / 2)
if elements[half] == element:
return True
elif elements[half] > element:
elements = elements[:half]
elif elements[half] < element:
elements = elements[half:]
return False\
def binary_search(elements:list, element:int):
if len(elements) == 0:
return False
if len(elements) == 1:
return elements[0] == element
half = int(len(elements) / 2)
if element == elements[half]:
return True
elif element < elements[half]:
return binary_search( elements[:half], element)
elif element > elements[half]:
return binary_search(elements[half:], element)
times = []
#testing platform
elements = list(range(0, 1000000, 1))
errors = 0
for max_element in range(0, 1000000, 1000):
tic = time.time()
print(max_element)
product = binary_search(list(range(0, max_element, 1)), max_element)
toc = time.time()
elapsed = toc - tic
times.append(elapsed)
print(elapsed)
print(product)
print(max_element)
#numpy trackers
plt.plot(times)
plt.xlabel("element")
plt.ylabel("time")
plt.show()
print(errors)
| [
"matplotlib"
] |
df62029da7f5fa124615583254906de9311c927c | Python | AlexanderLuo/machine | /src/LinearRegression/batchGradient/linearRegression.py | UTF-8 | 1,163 | 2.96875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
' a test module '
#
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
import time
import batchGradient
a = time.time()
####加载数据集
diabetes = datasets.load_diabetes()
# ####仅仅使用一个特征:
diabetes_X = diabetes.data[:, np.newaxis, 2]
# ###s数据划分训练集和测试集
diabetes_X_train = diabetes_X[:-20]
# diabetes_X_test = diabetes_X[-20:]
# ###目标划分为训练集和测试集
diabetes_y_train = diabetes.target[:-20]
# diabetes_y_test = diabetes.target[-20:]
theta = batchGradient.fit(diabetes_X_train, diabetes_y_train)
print(theta)
plt.plot(diabetes_X_train, diabetes_y_train, 'rx')
x = np.linspace(-0.1, 0.18)
y = theta[1]+theta[0]*x
#
plt.plot(x,y)
plt.show()
# ###训练模型
# regr = linear_model.LogisticRegression()
# regr.fit(diabetes_X_train, diabetes_y_train)
# ###回归系数
# print('Coefficients:\n', regr.coef_)
# ##散点图
# plt.scatter(diabetes_X_test,diabetes_y_test,color='black')
# plt.plot(diabetes_X_test,regr.predict(diabetes_X_test),color='blue',linewidth=3)
# plt.xticks()
# plt.yticks()
# plt.show()
| [
"matplotlib"
] |
d972fe9602f287b5add22213d05f5acea3fa1296 | Python | Adharshmahesh/Machine-Learning-Ionosphere-dataset | /Inosphere_LR_Scikit.py | UTF-8 | 1,204 | 2.75 | 3 | [] | no_license | import numpy as np
import pandas as pd
import scipy
#from loaddata import data
import sklearn
import sklearn.metrics as metrics
import matplotlib.pyplot as plt
import statsmodels.api as sm
from sklearn import datasets
def load_data(path, header):
df = pd.read_csv(path, header=header)
return df
if __name__ == "__main__":
# load the data from the file
data = load_data("Ionosphere_dataset.csv", None)
for i in range(0,351):
if data.loc[i,34] == 'g':
data.loc[i,34] = 1
else:
data.loc[i,34] = 0
X=data.iloc[:,:32]
y=data.iloc[:,33]
y=y.astype('int')
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)
from sklearn.linear_model import LogisticRegression
# create logistic regression object
reg = LogisticRegression()
# train the model using the training sets
reg.fit(X_train, y_train)
# making predictions on the testing set
y_pred = reg.predict(X_test)
w = reg.coef_
# comparing actual response values (y_test) with predicted response values (y_pred)
#print(w)
print("Logistic Regression model accuracy(in %):",
metrics.accuracy_score(y_test, y_pred)*100)
| [
"matplotlib"
] |
e29c2e354b5d48b97a4a0156f2a6245899795299 | Python | Micseb/Introduction-to-Computer-Vision | /Artificial Neural Network.py | UTF-8 | 5,608 | 3.328125 | 3 | [] | no_license | #!usr/bin/python
__author__="Spandan Madan"
#importing the necessary packages
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets,linear_model
################### PART 1 - DATA AND VISUALIZATION #########################
nn_input_dim = 2 # input layer dimensionality
nn_output_dim = 2 # output layer dimensionality
nn_hdim = 4
# Gradient descent parameters (I picked these by hand)
epsilon = 0.01 # learning rate for gradient descent
reg_lambda = 0.01 # regularization strength
#The process of generating data
def generate_data():
np.random.seed(0)
X,y=datasets.make_moons(3,noise=0.2)
return X,y
#The data generated is 2D, i.e. each data point is row vector of 2 elements.
#This allows us to represent our data on a simple graph in 2D.
#Let's view our data, to get a feel for it. Let's print X and y.
data,labels=generate_data() #Call function to generate data
#print "\tData\t\t\tLabel"
#print "\t____\t\t\t_____"
#for i in range(0,len(labels)):
#print data[i],"\t ",labels[i]
plt.scatter(data[:,0],data[:,1],s=40,c=labels,cmap=plt.cm.Spectral)
plt.show()
################## build model ##########################
# This function learns parameters for the neural network and returns the model.
# - nn_hdim: Number of nodes in the hidden layer
# - num_passes: Number of passes through the training data for gradient descent
# - print_loss: If True, print the loss every 1000 iterations
def build_model(X, y, nn_hdim, num_passes=20000, print_loss=False):
# Initialize the parameters to random values. We need to learn these.
num_examples = len(X)
#print num_examples
np.random.seed(0)
W1 = np.random.randn(nn_input_dim, nn_hdim)
#print W1
b1 = np.zeros((1, nn_hdim))
W2 = np.random.randn(nn_hdim, nn_output_dim)
b2 = np.zeros((1, nn_output_dim))
# This is what we return at the end
model = {}
# Gradient descent. For each batch...
for i in range(0, num_passes):
# Forward propagation
z1 = X.dot(W1) + b1
#print X.shape,W1.shape,b1.shape,z1.shape
a1 = np.tanh(z1)
z2 = a1.dot(W2) + b2
#print z2.shape
exp_scores = np.exp(z2)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
# Backpropagation
delta3 = probs
#print delta3
#print delta3[range(num_examples), y]
delta3[range(num_examples), y] -= 1
#print delta3
dW2 = (a1.T).dot(delta3)
db2 = np.sum(delta3, axis=0, keepdims=True)
delta2 = delta3.dot(W2.T) * (1 - np.power(a1, 2))
dW1 = np.dot(X.T, delta2)
db1 = np.sum(delta2, axis=0)
# Add regularization terms (b1 and b2 don't have regularization terms)
dW2 += reg_lambda * W2
dW1 += reg_lambda * W1
# Gradient descent parameter update
W1 += -epsilon * dW1
b1 += -epsilon * db1
W2 += -epsilon * dW2
b2 += -epsilon * db2
# Assign new parameters to the model
model = {'W1': W1, 'b1': b1, 'W2': W2, 'b2': b2}
# Optionally print the loss.
# This is expensive because it uses the whole dataset, so we don't want to do it too often.
if print_loss and i % 1000 == 0:
print("Loss after iteration %i: %f" % (i, calculate_loss(model, X, y)))
#if print_loss and i % 500 == 0:
#print("The decision boundary has been plotted")
#visualize(X,y,model)
return model
##########################Classify, calculate loss, predict###############
def calculate_loss(model, X, y):
num_examples = len(X) # training set size
W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2']
# Forward propagation to calculate our predictions
z1 = X.dot(W1) + b1
a1 = np.tanh(z1)
z2 = a1.dot(W2) + b2
exp_scores = np.exp(z2)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
# Calculating the loss
corect_logprobs = -np.log(probs[range(num_examples), y])
data_loss = np.sum(corect_logprobs)
# Add regulatization term to loss (optional)
data_loss += reg_lambda / 2 * (np.sum(np.square(W1)) + np.sum(np.square(W2)))
return 1. / num_examples * data_loss
def predict(model, x):
W1, b1, W2, b2 = model['W1'], model['b1'], model['W2'], model['b2']
# Forward propagation
z1 = x.dot(W1) + b1
a1 = np.tanh(z1)
z2 = a1.dot(W2) + b2
exp_scores = np.exp(z2)
probs = exp_scores / np.sum(exp_scores, axis=1, keepdims=True)
return np.argmax(probs, axis=1)
###### visualize ########
def visualize(X, y, model):
# plt.scatter(X[:, 0], X[:, 1], s=40, c=y, cmap=plt.cm.Spectral)
# plt.show()
plot_decision_boundary(lambda x:predict(model,x), X, y)
plt.title("Logistic Regression")
def plot_decision_boundary(pred_func, X, y):
# Set min and max values and give it some padding
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
h = 0.01
# Generate a grid of points with distance h between them
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Predict the function value for the whole gid
Z = pred_func(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Plot the contour and training examples
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral)
plt.show()
def main():
X, y = generate_data()
model = build_model(X, y, nn_hdim, print_loss=True)
visualize(X, y, model)
if __name__ == "__main__":
main()
| [
"matplotlib"
] |
5395ea2b4f35183b46580bca4c5ec64d4b3ceba7 | Python | pburgov/M4_Tarea | /ejercicio_python.py | UTF-8 | 24,009 | 2.90625 | 3 | [] | no_license | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
import io
import json
import os
import re
import sys
import time
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import tweepy
from IPython.core.display import clear_output
from bs4 import BeautifulSoup
from textblob import TextBlob
from tweepy import API
from tweepy.streaming import StreamListener
# Variables Globales
# Configuración acceso api y variables globales
consumer_key = ''
consumer_secret = ''
access_token_key = ''
access_token_secret = ''
# File
file_name = 'tweets.json'
stream_language = ['en']
query_list = ['Curie', 'Planck', 'Einstein', 'Bohr', 'Fleming', 'Higgs']
dir_json = './jsons'
dir_images = './images'
class MyListener(StreamListener):
def __init__(self, output_file, count_max=50, api=None):
self.api = api or API()
self.output_file = output_file
self.counter = 1
self.counter_tweet = 0
self.count_max = count_max
self.start_time = time.time()
self.tweet_data = []
self.status_list = []
def on_status(self, status):
while self.counter <= self.count_max:
clear_output(False)
print('Nº Tweets recuperados: ' + str(self.counter)
+ ' - ' + self.get_time()
, end=' ')
try:
self.status_list.append(status)
json_string = json.dumps(status._json, ensure_ascii=False)
self.tweet_data.append(json_string)
self.counter += 1
return True
except BaseException as ex:
sys.stderr.write("Error on_data:{}\n".format(ex))
return True
with io.open(self.output_file, 'w', encoding='utf-8') as f:
f.write(u'{"tweets":[')
if len(self.tweet_data) > 1:
f.write(','.join(self.tweet_data))
else:
f.write(self.tweet_data[0])
f.write(u']}')
return False
def on_error(self, status):
if status == 420:
print(status)
return False
def get_time(self):
dif = time.strftime("%H:%M:%S",
time.gmtime(time.time() - self.start_time))
return str(dif)
class MyTokenizerClass:
""""
Creamos una clase propia para reunir los métodos encargados de porcesado,
búsqueda, conteo, etc de palabras o caracteres en el texto del tweet.
Vamos a usar una serie de regex para que la partición del texto sea más
acertada que usando simplemente el word_tokenizer de la librería nltk.
Estas regex provienen en parte del blog de Marco Bonzanini, de la web
regex101.com y otras son propias
https://marcobonzanini.com/2015/03/09/mining-twitter-data-with-python-part-2/
"""
emoticons_str = r"""
(?:
[:=;] # Eyes
[oO\-]? # Nose (optional)
[D\)\]\(\]/\\OpP] # Mouth
)"""
regex_str = [
emoticons_str,
r'<[^>]+>', # HTML tags
r'@[\w_]+', # @-mentions (regex101.com)
r"(?:\#+[\w_]+[\w\'_\-]*[\w_]+)", # hash-tags (marcobonzanini.com)
r'http[s]?://[^\s<>"]+|www\.[^\s<>"]+', # URLs (regex101.com)
r'(?:(?:\d+,?)+(?:\.?\d+)?)', # numeros (regex101.com)
r"(?:[a-z][a-z'\-_]+[a-z])", # palabras con - y ' (marcobonzanini.com)
r'(?:[\w_]+)', # otras palabras (regex101.com)
r'(?:[^[:punct:]])' # cualquier otra cosa excepto signos de puntuación
]
tokens_re = re.compile(r'(' + '|'.join(regex_str) + ')',
re.VERBOSE | re.IGNORECASE)
emoticon_re = re.compile(r'^' + emoticons_str + '$',
re.VERBOSE | re.IGNORECASE)
def num_palabras_caracteres(self, s):
"""
Método que cuenta las palabras y caracteres de un texto
Importante: No se consideran palabras los signos de puntuación,
Serán excluídos con la regex (?:[^[:punct:]]) aplicada
:param s: Cadena de texto a evaluar
:return: Diccionario con ambos valores: número de palabras y número
de caracteres
"""
num = {}
tokens = self.tokens_re.findall(s)
num['palabras'] = len(tokens)
num['caracteres'] = len([char for token in tokens for char in token])
return num
class MyStatisticsClass:
""""
Creamos una clase propia para reunir los métodos encargados de
analizar los tweets
"""
def __init__(self, df_statistic):
self.df = df_statistic
def get_save_picture_path(self, file_name):
"""
Método que devuelve la ruta de grabación de las imágenes
:param file_name: String con el nombre del archivo de imagen
:return: Ruta de grabación.
"""
return os.path.join(dir_images, file_name)
def get_tweets_per_hour(self):
"""
Método que muestra por pantalla el número de tweets agregado por hora,
y crea un gráfico barplot.
:return: Lista de las horas con los tweets creados y gráfico barplot
"""
# Frecuencia por horas
frecuencia_list = self.df.groupby('hora')['id'].count()
# Creamos un df a partir de la serie y renombramos las columnas
df_hour = pd.DataFrame([frecuencia_list]).T
df_hour.rename(columns={'id': 'value'}, inplace=True)
print('Las distribución horaria de los tweets es:\n')
for index, row in df_hour.iterrows():
print('Hora {0} - {1} tweets'.format(index, row['value']))
# Mostramos gráfico
sns.set(color_codes=True)
palette = sns.color_palette('Reds_d', 24)
fig, ax = plt.subplots(figsize=(14, 6))
ax = sns.barplot(df_hour.index, df_hour['value'], alpha=.6,
palette=palette)
for p in ax.patches:
if p.get_height > 0:
ax.annotate("%d" % p.get_height(),
(p.get_x() + p.get_width() / 2.,
p.get_height()),
ha='center', va='center', fontsize=10, color='gray',
fontweight='bold', xytext=(0, 5),
textcoords='offset points')
ax.set(ylabel='Frecuencia', xlabel='Horas')
fig.suptitle(u'Distribución Horaria',
horizontalalignment='center', y=0.95)
plt.savefig(self.get_save_picture_path('Hours.png'),
bbox_inches="tight")
plt.show()
def get_count_word(self, s, word_to_find):
"""
Método para contar el número de ocurrencias de una palabra en una cadena
:param s: cadena en la que buscar
:param word_to_find: palabra a buscar
:return: número de ocurrencias
"""
word_to_find = word_to_find.lower()
s = s.lower()
word_token = re.compile(r'(\W' + word_to_find + '\W)+')
tokens = word_token.findall(s)
return len(tokens)
def get_count_of_query_words(self):
"""
Método que calcula y totaliza la ocurrencia en el texto de los
tweets, de cada uno de los elementos que se usan en la consulta
de filtrado
:return: grafico con las frecuencias
"""
num_cat = len(query_list)
count_list = [0] * num_cat
vect_num = np.vectorize(self.get_count_word)
for idx, val in enumerate(query_list):
count_list[idx] = vect_num(self.df['text'], val.lower()).sum()
df_count = pd.DataFrame({'value': count_list}, index=query_list)
df_count = df_count.sort_values('value', ascending=False)
# Mostramos en pantalla los resultados
print("Los valores obtenidos son:\n")
for index, row in df_count.iterrows():
print('{0} - {1} ocurrencias'.format(index, row['value']))
# Mostramos gráfico
sns.set(color_codes=True)
palette = sns.color_palette('Oranges_d', num_cat)
fig, ax = plt.subplots(figsize=(14, 6))
ax = sns.barplot(df_count.index, df_count['value'], alpha=.6,
palette=palette)
ax.set(ylabel='Frecuencia', xlabel=u'Términos de búsqueda')
for p in ax.patches:
if p.get_height > 0:
ax.annotate("%d" % p.get_height(),
(p.get_x() + p.get_width() / 2.,
p.get_height()),
ha='center', va='center', fontsize=10, color='gray',
fontweight='bold', xytext=(0, 5),
textcoords='offset points')
fig.suptitle(u'Frecuencia de Aparición',
horizontalalignment='center', y=0.95)
plt.savefig(self.get_save_picture_path('Frequency.png'),
bbox_inches="tight")
plt.show()
def get_time_zone_distribution(self):
"""
Método para obtener la 10 distribuciones horarias más frecuentes de los
tweets creados
:return: barplot
"""
df_time = self.df[self.df['time_zone'].notnull()]
grupo = df_time.groupby('time_zone')['id'].count().nlargest(10)
# Creamos un df a partir de la serie y renombramos las columnas
df_time = pd.DataFrame([grupo]).T
df_time.rename(columns={'id': 'value'}, inplace=True)
num_cat = df_time.shape[0]
# Mostramos en pantalla los resultados
print("Las 10 Zonas Horarias con mayor número de tweets son:\n")
for index, row in df_time.iterrows():
print('{0} - {1} tweets'.format(index, row['value']))
# Mostramos gráfico
sns.set(color_codes=True)
palette = sns.color_palette('Greens_d', num_cat)
fig, ax = plt.subplots(figsize=(14, 10))
ax = sns.barplot(df_time.index, df_time['value'], alpha=.6,
palette=palette)
ax.set(ylabel='Frecuencia', xlabel=u'Zonas')
for p in ax.patches:
if p.get_height > 0:
ax.annotate("%d" % p.get_height(),
(p.get_x() + p.get_width() / 2.,
p.get_height()),
ha='center', va='center', fontsize=10, color='gray',
fontweight='bold', xytext=(0, 5),
textcoords='offset points')
fig.suptitle(u'Distribución 10 Zonas Horarias más frecuentes',
horizontalalignment='center', y=0.95)
plt.xticks(rotation=90)
plt.savefig(self.get_save_picture_path('TimeZone.png'),
bbox_inches="tight")
plt.show()
def get_porcentaje_fuente_tweet(self):
"""
Método para obtener los porcentajes de los dispositivos en los que
se crearon los tweets
:return: porcentaje de df['source'] y gráfico
"""
# Calculamos el porcentaje de cada origen con respecto al total
grupo = self.df.groupby('source')['id'].count()
num_total_registros = self.df.shape[0]
grupo = (grupo * 100) / num_total_registros
# Cogemos los índices que suponen los 5 mayores porcentajese
# el resto los agrupamos en 'otros'.
top_index = grupo.nlargest(5).index
others_index = [i for i in grupo.index if i not in top_index]
# Creamos un df a partir de la serie y renombramos las columnas
df_percent = pd.DataFrame([grupo]).T.reset_index()
df_percent.rename(columns={'id': 'value'}, inplace=True)
# Si el agregado por source devuelve más de 5 categorías,
# reemplazamos los valores que no pertenezcan a los 5 mayores por Otros
if len(others_index) > 0:
df_percent = df_percent.replace(others_index, 'Otros')
# Vemos cuales son los porcentajes de los orígenes
percent = df_percent.groupby('source').sum().reset_index()
percent = percent.sort_values('value', ascending=False)
# Mostramos en pantalla los porcentajes obtenidos
print("Los porcentajes por origen son:\n")
for index, row in percent.iterrows():
print('{} - {:,.2f}% '.format(row['source'], row['value']))
# Mostramos el gráfico
fig, ax = plt.subplots(figsize=(14, 6))
palette = sns.color_palette('Pastel1')
ax.pie(percent['value'], labels=percent['source'],
autopct='%1.1f%%',
startangle=90, colors=palette)
ax.axis('equal')
fig.suptitle(u'Distribución por Origen',
horizontalalignment='center', y=0.95,
fontsize=14)
plt.legend(bbox_to_anchor=(1.1, 1))
plt.savefig(self.get_save_picture_path('Sources.png'),
bbox_inches="tight")
plt.show()
def get_polarity_classification(self, s):
"""
Método para clasificar la polaridad de un texto usando textblob
:param s: cadena de texto
:return: Polaridad que puede ser: Positiva, Neutra o Negativa
"""
# Primero limpiamos el texto eliminando caracteres especiales, links,..
s = ' '.join(
re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)",
" ", s).split())
analysis = TextBlob(s)
if analysis.sentiment.polarity > 0:
return 'Positiva'
elif analysis.sentiment.polarity == 0:
return 'Neutra'
else:
return 'Negativa'
def get_sentimental_analysis(self):
"""
Método que devuelve los resultados obtenidos y un gráfico tras aplicar
el análisis sentimental a los tweets
:param: columna text del df
:return: porcentaje polaridad y gráfico
"""
grupo = self.df.groupby('sa')['id'].count().sort_index(
ascending=False)
num_total_registros = self.df.shape[0]
grupo = (grupo * 100) / num_total_registros
# Creamos un df a partir de la serie y renombramos las columnas
df_sent = pd.DataFrame([grupo], ).T.reset_index()
df_sent.columns = ['sa', 'value']
df_sent['value'] = pd.to_numeric(df_sent['value'])
# Mostramos en pantalla los porcentajes obtenidos
print("Los porcentajes por Polaridad son:\n")
for index, row in df_sent.iterrows():
print('{} - {:,.2f}% '.format(row['sa'], row['value']))
# Mostramos el gráfico
fig, ax = plt.subplots(figsize=(14, 6))
palette = sns.color_palette('Pastel1')
ax.pie(df_sent['value'], labels=df_sent['sa'], autopct='%1.1f%%',
startangle=90, colors=palette)
ax.axis('equal')
fig.suptitle(u'Sentimental Analysis',
horizontalalignment='center', y=0.95,
fontsize=14)
plt.legend(bbox_to_anchor=(1.1, 1))
plt.savefig(self.get_save_picture_path('Sentimental.png'),
bbox_inches="tight")
plt.show()
def get_media_longitud(self):
"""
Método para obtener la media de la longitud de los tweets
:return: valor medio de df['num_caracteres']
"""
media = np.mean(self.df['num_caracteres'])
print('La longitud media de los tweets es: {:.0f} caracteres'
.format(media))
def get_custom_max_min(self, name_col, max_min='min'):
"""
Método para devolver el índice de la fila que posee el mayor valor
de la columna que se pasa como parámetro
:param max_min: cadena que indica si se busca el máximo o el mínimo
:param name_col: columna del df
:return: Diccionario que contiene el valor máximo hallado y el índice
respectivo
"""
result = {}
if max_min == 'max':
valor = np.max(self.df[name_col])
else:
valor = np.min(self.df[name_col])
result['valor'] = valor
result['index'] = self.df[self.df[name_col] == valor].index[0]
return result
def get_connection_api():
"""
Método que devuelve la conexión al api de twitter
:return: auth
"""
# Conectamos con el api haciendo uso de las credenciales proporcionadas
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token_key, access_token_secret)
return auth
def search_with_stream():
"""
Método que invoca el comando de escucha de twitter y devuelve una lista
con los tweets obtenidos. El número de tweets a recopilar es solicitado
en el momento de la ejecución.
:return: status_list
"""
while True:
# Usamos el método raw_input porque estamos en la versión 2.7.
# En la 3.6 no existe
user_input = raw_input(
'Cuantos tweets desea recuperar con el stream?\n')
try:
num_tweets = int(user_input)
break
except ValueError:
print("El valor introducido no es un número entero.\n")
print('Comienza a ejecutarse el stream .....')
auth = get_connection_api()
listener = MyListener(file_name, num_tweets)
api = tweepy.streaming.Stream(auth, listener, tweet_mode='extended')
api.filter(languages=stream_language,
track=query_list, async=False)
return listener.status_list
def create_dataframe_from_list(tweet_list):
columns = ['id', 'created_at', 'user', 'location', 'text',
'full_text_flag', 'source', 'time_zone', 'from_file']
index = pd.Series(tweet.id for tweet in tweet_list)
rows_list = []
for tweet in tweet_list:
truncated = tweet.truncated
if truncated:
text = tweet.extended_tweet['full_text']
full_text_flag = 'S'
else:
text = tweet.text
full_text_flag = 'N'
data = {'id': tweet.id,
'created_at': tweet.created_at,
'user': tweet.user.name,
'location': tweet.user.location,
'text': text.encode('ascii', 'ignore').lower(),
'full_text_flag': full_text_flag,
'source': tweet.source,
'time_zone': tweet.user.time_zone,
'from_file': 'direct_from_list'}
rows_list.append(data)
df_list = pd.DataFrame(rows_list, columns=columns, index=index)
df_list.index.name = 'id'
# Cambiamos el datatype de la columna created_at a datetime
df_list['created_at'] = pd.to_datetime(df_list['created_at'])
# Creamos la nueva columna con la hora
df_list['hora'] = df_list['created_at'].dt.hour
return df_list
def create_dataframe_from_json():
"""
Método para obtener un pandas dataframe a partir de un archivo json que
contiene tweets almacenados
:return: dataframe con las columnas 'created_at', 'user', 'location',
'text', 'full_text_flag, 'hora', 'source', 'time_zone', 'from_file'
y como índices el id del tweet
"""
columns = ['id', 'created_at', 'user', 'location', 'text',
'full_text_flag', 'source', 'time_zone', 'from_file', 'hora']
df_json = pd.DataFrame(columns=columns)
for root, dirs, filenames in os.walk(dir_json):
for f in filenames:
print('Cargando archivo ' + f)
file_path = os.path.join(dir_json, f)
df_json = df_json.append(create_partial_df(file_path))
return df_json
def create_partial_df(file_path):
try:
with open(file_path, 'r') as f:
file_name_aux = os.path.basename(os.path.normpath(file_path))
tweets = json.loads(f.read())
index = pd.Series(x['id'] for x in tweets['tweets'])
columns = ['id', 'created_at', 'user', 'location', 'text',
'full_text_flag', 'source', 'time_zone', 'from_file']
rows_list = []
for x in tweets['tweets']:
soup = BeautifulSoup(x['source'], 'html5lib')
source = soup.a.get_text()
truncated = x['truncated']
if truncated:
text = x['extended_tweet']['full_text']
full_text_flag = 'S'
else:
text = x['text']
full_text_flag = 'N'
data = {'id': x['id'],
'created_at': x['created_at'],
'user': x['user']['name'],
'location': x['user']['location'],
'text': text.encode('ascii', 'ignore').lower(),
'full_text_flag': full_text_flag,
'source': source.encode('ascii', 'ignore'),
'time_zone': x['user']['time_zone'],
'from_file': file_name_aux}
rows_list.append(data)
df_aux = pd.DataFrame(rows_list, columns=columns, index=index)
df_aux.index.name = 'id'
# Cambiamos el datatype de la columna created_at a datetime
df_aux['created_at'] = pd.to_datetime(df_aux['created_at'])
# Creamos la nueva columna con la hora
df_aux['hora'] = df_aux['created_at'].dt.hour
return df_aux
except BaseException as ex:
sys.stderr.write("Error on_data:{}\n".format(ex))
time.sleep(5)
return False
def create_menu_principal():
print('Escoja entre una de la siguientes opciones')
print('1- Búsqueda Con Stream')
print('2- Estadísticas desde los json adjuntos (dir:./json)')
print('3- Salir')
option = int(input('Que opción desea?'))
return option
def main():
global df
if os.path.isfile(file_name):
os.remove(file_name)
option = create_menu_principal()
if option == 1:
tweets_list = search_with_stream()
df = create_dataframe_from_list(tweets_list)
elif option == 2:
df = create_dataframe_from_json()
else:
exit(0)
# Instanciamos la clase MyTokenizerClass para poder trabajar con ella
mtk = MyTokenizerClass()
# Número de palabras y caracteres
vect_num = np.vectorize(mtk.num_palabras_caracteres)
df['num_palabras'] = [d['palabras'] for d in vect_num(df['text'])]
df['num_caracteres'] = [d['caracteres'] for d in vect_num(df['text'])]
print('\n')
# Instanciamos la clase MyStatisticsClass para poder trabajar con ella
msc = MyStatisticsClass(df)
# Distribución de tweets a lo largo del día
print('\n')
msc.get_tweets_per_hour()
# Distribución de los elementos de la consulta de filtrado
print('\n')
msc.get_count_of_query_words()
# Distribución de las zonas horarias
print('\n')
msc.get_time_zone_distribution()
# Distribución Fuentes de los tweets
print('\n')
msc.get_porcentaje_fuente_tweet()
# Sentimental analysis de los tweets
vect_pol = np.vectorize(msc.get_polarity_classification)
df['sa'] = vect_pol(df['text'])
print('\n')
msc.get_sentimental_analysis()
# Longitud media de los tweets
print('\n')
msc.get_media_longitud()
# Tweets con mayor número de caracteres
max_carac = msc.get_custom_max_min('num_caracteres', 'max')
print('\n')
print("El tweet más largo es: \n{}"
.format((df['text'][max_carac['index']])))
print("Nº de caracteres: {}".format(max_carac['valor']))
# Tweets con menor número de caracteres
min_carac = msc.get_custom_max_min('num_caracteres', 'min')
print('\n')
print("El tweet más corto es: \n{}"
.format((df['text'][min_carac['index']])))
print("Nº de caracteres: {}".format(min_carac['valor']))
# Tweets totales
print('\n')
print("Total de tweets recogidos: {}".format(df.shape[0]))
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt as e:
print(e.message)
| [
"matplotlib",
"seaborn"
] |
0f6c0af07452b23778a4f55becb808eb31010eca | Python | kirmanid/mssm-ml | /linear_regression.py | UTF-8 | 1,828 | 3.515625 | 4 | [] | no_license | import numpy as np
import seaborn as sns
class LinearRegression:
def __init__(self,x):
self.w = np.array([1]*(np.size(x[0])+1)) # this sets the number of weights to the number of features +1
def hypothesis(self, xRow):
return np.dot (self.w, xRow)
def getCost(self,x,y):
m = len(y)
z = 0
for i in range (m):
z += (self.hypothesis(x[i])-y[i])**2
z = z/(2*m)
return z
def takeCostDeriv(self,x,y):
m = len(y)
d = [0]*len(self.w)
for weightIndex in range (len(self.w)):
z = 0
for i in range (m):
z += (self.hypothesis(x[i])-y[i])*x[i][weightIndex]
z = z/m
d[weightIndex] = z
return d
def doGradientDescent(self, learningRate, x, y):
d = self.takeCostDeriv(x,y)
newWeights = np.zeros(len(self.w))
for i in range (len(self.w)):
newWeights[i] = self.w[i] - (learningRate*d[i])
return newWeights
def addXNoughts(self, x):
x = np.reshape(x,((x.shape[0],np.size(x[0])))) # reshapes flat array or 2d array into transposed version
x0 = np.ones((x.shape[0],1), dtype=np.uint8)
x = np.hstack((x0,x))
return x
def train(self, epochs, x, y, learningRate):
x = self.addXNoughts(x)
for i in range (epochs):
self.w = self.doGradientDescent(learningRate, x, y)
print ("epoch " + str(i))
#print(self.getCost(x,y)) # shows progress
print("\nFinal Cost:")
print (self.getCost(x,y))
learningRate = 0.01
epochs = 200
iris = sns.load_dataset('iris').to_numpy()[:,:4].astype(np.float32)
np.random.shuffle(iris)
x = iris[:, 0:3]
y = iris[:, 3:4]
a = LinearRegression(x)
a.train(epochs,x,y,learningRate)
| [
"seaborn"
] |
965d4154baed1230be1704b9dc18a2248f8276a2 | Python | Yuxindf/In-network-aggregation | /Test/EfficiencyTest1-1.py | UTF-8 | 1,201 | 3.28125 | 3 | [] | no_license | # Test Complex situation: three clients
# (1) Target: packets of two go through proxy, one to server directly
# (2) Contrast: packets of three clients go through proxy
# Draw the packets received by server
import collections
import matplotlib.pyplot as plt
import numpy as np
import json
def load_file(file):
with open(file, "r") as f:
lines = f.read()
f.close()
lines = json.loads(lines)
return lines
# Proxy Efficiency
data = load_file("./EfficiencyTest1-1.txt")
data = collections.OrderedDict(data)
labels = data["labels"]
server = data["without proxy"]
proxy = data["with proxy"]
x = np.arange(len(labels)) # the label locations
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(x - width/2, server, width, label='Without Proxy')
rects2 = ax.bar(x + width/2, proxy, width, label='With Proxy')
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_xlabel('Number of Packets of Each Client')
ax.set_ylabel('Number of Packets Received By Server')
ax.set_xticks(x)
ax.set_xticklabels(labels)
ax.legend()
ax.grid(True)
ax.bar_label(rects1, padding=3)
ax.bar_label(rects2, padding=3)
fig.tight_layout()
plt.show()
| [
"matplotlib"
] |
0e4382fc5abb3b125ba68c2499730ba4e645da12 | Python | homijan/Stats-Covid19 | /WorldCountriesData.py | UTF-8 | 5,190 | 3.21875 | 3 | [] | no_license | from datetime import timedelta, date
import numpy as np
# python3 WorldCountriesData.py
countries = ['Czechia', 'Spain', 'US', 'China-Hubei', 'Italy']
#countries = ['Czechia', 'Spain', 'California', 'Ecuador']
# Fill the data on one-day-basis
start_date = date(2020, 1, 22)
end_date = date.today()
def daterange(start_date, end_date):
for n in range(int ((end_date - start_date).days)):
yield start_date + timedelta(n)
# Read from csv file from John Hopkins repo.
def GetCSVData(csvfile_name, countryname, iname, iinfect, idead):
ifile = open(csvfile_name, "r")
#print("Name of the file: ", ifile.name)
lines = ifile.readlines()
Ninfected = 0
Ndead = 0
for line in lines:
words = line.split(',')
if countryname==words[iname]:
if words[iinfect]!='':
Ninfected += int(words[iinfect])
if words[idead]!='':
Ndead += int(words[idead])
ifile.close()
return Ninfected, Ndead
# Define one country data and operations
class countryData:
def __init__(self, countrynames, population):
self.countrynames = countrynames
self.population = population
self.data = {}
def AddDay(self, isodate, Ninfected, Ndead):
datum = date.fromisoformat(isodate)
self.data.update({datum : {'date' : datum, 'Ninfected' : Ninfected, 'Ndead' : Ndead}})
def AddDayFromCSV(self, isodate):
datum = date.fromisoformat(isodate)
# Covid data are placed in directory:
csvfile_name = 'COVID-19/csse_covid_19_data/csse_covid_19_daily_reports/'+datum.strftime("%m-%d-%Y")+'.csv'
csv_position = 1
Ninfected = 0
Ndead = 0
for countryname in self.countrynames:
# Special case of region of the US.
if countryname=='California':
Ninf, Ndea = GetCSVData(csvfile_name, countryname, 0, 3, 4)
Ninfected += Ninf
Ndead += Ndea
# Exception in country name, "Korea, South" is used, braking the ',' split.
if countryname=='"Korea':
Ninf, Ndea = GetCSVData(csvfile_name, countryname, 1, 4, 5)
Ninfected += Ninf
Ndead += Ndea
# Standard read: region, country, date, infected(confirmed), dead, recovered, latitude, longitude
else:
Ninf, Ndea = GetCSVData(csvfile_name, countryname, 1, 3, 4)
Ninfected += Ninf
Ndead += Ndea
self.AddDay(isodate, Ninfected, Ndead)
def GetDatesInfectedDead(self):
dates = self.data.keys()
infected = [self.data[datum]['Ninfected'] for datum in dates]
dead = [self.data[datum]['Ndead'] for datum in dates]
return dates, infected, dead
# Create a list of countries to store Covid19 data about.
WorldCountries = {}
# Add country name/s and population.
WorldCountries.update({'Czechia' : countryData(['Czech Republic', 'Czechia'], 10.65e6)})
WorldCountries.update({'Ecuador' : countryData(['Ecuador'], 16.62e6)})
# The population corresonds to Hubei province containing 95% of dead in China.
WorldCountries.update({'China-Hubei' : countryData(['China', 'Mainland China'], 58.5e6)})
WorldCountries.update({'South Korea' : countryData(['South Korea','"Korea'], 51.47e6)})
WorldCountries.update({'US' : countryData(['US'], 327.2e6)})
WorldCountries.update({'Italy' : countryData(['Italy'], 60.48e6)})
WorldCountries.update({'Spain' : countryData(['Spain'], 46.66e6)})
WorldCountries.update({'California' : countryData(['California'], 39.56e6)})
WorldCountries.update({'France' : countryData(['France'], 66.99e6)})
WorldCountries.update({'Germany' : countryData(['Germany'], 82.79e6)})
WorldCountries.update({'UK' : countryData(['UK', 'United Kingdom'], 66.44e6)})
WorldCountries.update({'Russia' : countryData(['Russia'], 144.5e6)})
# Fill the data on one-day-basis.
for country in countries:
for single_date in daterange(start_date, end_date):
WorldCountries[country].AddDayFromCSV(single_date.isoformat())
# PLOT DATA
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.dates import (WEEKLY, DateFormatter,
rrulewrapper, RRuleLocator)
font = {'family' : 'Sans',
#'weight' : 'bold',
'size' : 15}
matplotlib.rc('font', **font)
fig, ax = plt.subplots()
plt.title('Dead patients (per 1 million people)')
rule = rrulewrapper(WEEKLY, interval=2)
loc = RRuleLocator(rule)
ax.xaxis.set_major_locator(loc)
formatter = DateFormatter('%m/%d/%y')
ax.xaxis.set_major_formatter(formatter)
# Loop over the list of countries.
for country in countries:
dates, infected, dead = WorldCountries[country].GetDatesInfectedDead()
plt.plot_date(dates, np.array(dead) / float(WorldCountries[country].population) * 1e6, label=country)
plt.legend()
plt.savefig('dead.png')
fig, ax = plt.subplots()
plt.title('Confirmed patients (per 1 million people)')
rule = rrulewrapper(WEEKLY, interval=2)
loc = RRuleLocator(rule)
ax.xaxis.set_major_locator(loc)
formatter = DateFormatter('%m/%d/%y')
ax.xaxis.set_major_formatter(formatter)
# Loop over the list of countries.
for country in countries:
dates, infected, dead = WorldCountries[country].GetDatesInfectedDead()
plt.plot_date(dates, np.array(infected) / float(WorldCountries[country].population) * 1e6, label=country)
plt.legend()
plt.savefig('confirmed.png')
plt.show()
| [
"matplotlib"
] |
06853b05c0445fe6bc5ccd2623fb2fd1d4c2ae42 | Python | andynet/bachelor_thesis | /scripts/103_PCA.py | UTF-8 | 2,612 | 2.59375 | 3 | [] | no_license | #!/usr/bin/python3
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
import pandas as pd
import sys
import os
def get_hosts(file):
with open(file) as f:
lines = f.readlines()
hosts = dict()
for line in lines:
phage = line.split()[0]
host = line.split()[1]
hosts[phage] = host
return hosts
def on_pick(event):
global matrices
global groups
global hosts
print('--------------------------------------------------------------------------------')
gind = groups.index(event.artist)
for eind in event.ind:
phage = matrices[gind].index[eind]
print('{}\t{}'.format(phage, hosts[phage]))
if len(sys.argv) < 4 or len(sys.argv) % 2 != 0:
print('Usage:', sys.argv[0], '<hosts> <label> <matrix> <label> <matrix>...')
exit()
data_dir = os.path.dirname(os.path.abspath(sys.argv[1]))
labels = [0]
matrices = []
number_of_records = []
for i in range(0, len(sys.argv)//2 - 1):
with open(sys.argv[i*2+2]) as f:
labels.append(labels[-1]+len(f.readlines()))
matrix = pd.read_csv(sys.argv[i*2+3], sep='\t', header=0, index_col=0)
matrices.append(matrix)
features = pd.concat(matrices)
hosts = get_hosts(sys.argv[1])
pca_comps = PCA(8)
pca_comps.fit(features)
print(pca_comps.explained_variance_ratio_)
pca = pca_comps.transform(features)
cmap = plt.get_cmap('jet')
norm = Normalize(vmin=0, vmax=len(labels))
fig = plt.figure()
groups = []
for i in range(1, len(labels)):
group = plt.scatter(pca[labels[i-1]:labels[i], 5], pca[labels[i-1]:labels[i], 7],
c=[cmap(norm(i))]*(labels[i]-labels[i-1]), edgecolor='k', s=50, picker=True, alpha=0.3,
label=sys.argv[i*2])
groups.append(group)
cid = fig.canvas.mpl_connect('pick_event', on_pick)
plt.legend(loc='best', fontsize='x-small')
plt.show()
# generate all graphs
# for j in range(8):
# for k in range(8):
#
# fig = plt.figure()
# groups = []
#
# for i in range(1, len(labels)):
# group = plt.scatter(pca[labels[i-1]:labels[i], j], pca[labels[i-1]:labels[i], k],
# c=[cmap(norm(i))]*(labels[i]-labels[i-1]), edgecolor='k', s=50, picker=True,
# alpha=0.3, label=sys.argv[i*2])
# groups.append(group)
#
# plt.legend(loc='best', fontsize='x-small')
# fig.savefig('104_PC{}_PC{}.png'.format(j, k), dpi=300)
# plt.close(fig)
# plt.figure()
# plt.plot(pca_comps.components_[0], '.')
# plt.show()
| [
"matplotlib"
] |
a4f90093d0e1b87e3934469d081e4ff3bc3bc4cb | Python | kunalagg04/Machine-Learning | /02 Multiple Linear Regression/backward elimination.py | UTF-8 | 1,387 | 2.765625 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import LabelEncoder , OneHotEncoder
dataset = pd.read_csv('50_Startups.csv')
X = dataset.iloc[:,:-1].values
Y = dataset.iloc[:,-1].values
labelencoder_X = LabelEncoder()
X[:, 3] = labelencoder_X.fit_transform(X[:, 3])
onehotencoder = OneHotEncoder(categorical_features= [3])
X = onehotencoder.fit_transform(X).toarray()
#avoiding dummy variable trap
#removing first column
X = X[:, 1:]
#building model with backward elimination
import statsmodels.formula.api as sm
#adding x0 column to X matri
# Doing this will add x0 to end => X = np.append(arr = X, value = np.ones(50,1).astype(int), axis = 1 )
X = np.append(arr = np.ones((50,1)).astype(int), values = X , axis = 1 )
X_opt = X[: , [0,1,2,3,4,5]]
regressor_OLS = sm.OLS(endog=Y, exog = X_opt).fit()
regressor_OLS.summary()
#x2 has highest p value . Hence we removeit nd fit model again.
X_opt = X[: , [0,1,3,4,5]]
regressor_OLS = sm.OLS(endog=Y, exog = X_opt).fit()
regressor_OLS.summary()
#now NEW x1 has highest p value . Hence we remove it as well.
X_opt = X[: , [0,3,4,5]]
regressor_OLS = sm.OLS(endog=Y, exog = X_opt).fit()
regressor_OLS.summary()
#now new x2 has highest p value . It's index is 4 according to original X
X_opt = X[: , [0,3,5]]
regressor_OLS = sm.OLS(endog=Y, exog = X_opt).fit()
regressor_OLS.summary()
| [
"matplotlib"
] |
da36bcf1baa3591fbadbb4746961e9803d0f5f4c | Python | SWARAJkumar/snn-rl | /snn-rl/furtherFormulas/3dBarChartGenerator.py | UTF-8 | 1,848 | 2.890625 | 3 | [
"MIT"
] | permissive | # from http://pythonprogramming.net/3d-bar-charts-python-matplotlib/
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import numpy as np
import pylab as pylab
fig = plt.figure()
ax1 = fig.add_subplot(111, projection='3d')
xpos = np.ones(60)
xpos[15:30] *= 2
xpos[30:45] *= 3
xpos[45:60] *= 4
ypos = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
num_elements = len(xpos)
zpos = np.zeros(60)
dx = np.ones(60)*.1
dy = np.ones(60)*.5
dz = [0.90433194, 0.6139531, 0.50387484, 0.55220372, 0.51213536, 0.85443374, 0.99955922, 0.5039825, 0.73091913, 0.9780236, 0.5241028, 0.71571812, 0.93782861, 0.51210244, 0.73074697,
0., 0., 0.03412608, 0., 0.90455366, 0.78683668, 0., 0.95912629, 0.7282637, 0., 0.78548583, 0.78935491, 0.03193823, 0.00609877, 0.17287094,
0.4474444, 0., 0.98135641, 0., 0.96315942, 0., 0., 0., 0.15930208, 0., 0.77299245, 0., 0., 0.71739497, 0.02804206,
0., 0., 0.99815102, 0., 0.9239562, 0., 0., 0.32862838, 0.29682383, 0., 0.85108903, 0., 0., 0., 0.6687179]
colors = ['r']*15+['g']*15+['b']*15+['y']*15
xLabel = ax1.set_xlabel('\nOutput Neuron', linespacing=1.2)
yLabel = ax1.set_ylabel('\nInput Neuron', linespacing=1.2)
zLabel = ax1.set_zlabel('\nWeight', linespacing=1.2)
neuron1 = plt.Rectangle((0, 0), 0.1, 0.1,fc='r')
neuron2 = plt.Rectangle((0, 0), 0.1, 0.1,fc='g')
neuron3 = plt.Rectangle((0, 0), 0.1, 0.1,fc='b')
neuron4 = plt.Rectangle((0, 0), 0.1, 0.1,fc='y')
ax1.legend((neuron1,neuron2,neuron3,neuron4),("neuron 1","neuron 2","neuron 3","neuron 4"),'best')
ax1.bar3d(xpos, ypos, zpos, dx, dy, dz, color=colors, alpha=0.5)
ax1.view_init(elev=60.0, azim=40)
pylab.savefig('Weights3dBar.jpg')
plt.show()
| [
"matplotlib"
] |
5cf7e1cf43216bd3a2c4fc8643e3d145e074bc8d | Python | ewquon/pylib | /bin/sowfa_getQwallMean.py | UTF-8 | 1,354 | 2.765625 | 3 | [] | no_license | #!/usr/bin/env python
#
# Extract the average wall heat flux from the log file
#
import sys
import refs
if len(sys.argv) <= 1:
sys.exit('specify log file(s) to process')
ref = refs.read({'rho':1.2, 'Cp':1000.0})
t = []
qwMean = []
for fname in sys.argv[1:]:
print 'Processing',fname
with open(fname,'r') as f:
for line in f:
if line.startswith('Time ='):
t.append(float(line.split()[2]))
elif 'qwMean' in line:
findstr = 'qwMean ='
val = line[line.find(findstr)+len(findstr):].split()[0]
qwMean.append(float(val))
numPredictorCorrectors = len(qwMean)/len(t)
print 'Detected number of predictor and correctors =',numPredictorCorrectors
# pull out the value from the last corrector step
qwMean = qwMean[numPredictorCorrectors-1:-1:numPredictorCorrectors]
qwMean_Wpm2 = qwMean[-1] * ref.rho * ref.Cp # convert to W/m^2
print 'qwMean(t={:.1f} s) = {:.4g} K-m/s = {:.4g} W/m^2'.format(t[-1],qwMean[-1],qwMean_Wpm2)
# save extracted data
import numpy as np
np.savetxt('postProcessing/qwMeanHist.dat',np.array((t,qwMean)).T,fmt='%.4g')
# plot history
import matplotlib.pyplot as plt
plt.plot(t,qwMean)
plt.xlabel(r'$t$ [s]')
plt.ylabel(r'$q_w$ [W/m^2]')
plt.suptitle('Average Heat Flux')
plt.savefig('postProcessing/qwMeanHist.png')
plt.show()
| [
"matplotlib"
] |
8204430a5da2ba93462e30a78c8daa53ab9fa0a2 | Python | fox8090/Data-Collection-and-Cleaning | /zpqf41.py | UTF-8 | 5,131 | 2.953125 | 3 | [] | no_license | from bs4 import BeautifulSoup
import requests
from requests.exceptions import HTTPError
import csv
import openpyxl
import numpy as np
import pandas as pd
import gensim
from gensim.models import word2vec
from nltk.corpus import stopwords
import matplotlib.pyplot as plt
import seaborn as sns
##Problem 1 & Problem 2 Combined
#Getting keywords
path = 'keywords.xlsx'
wb = openpyxl.load_workbook(path)
sheet = wb.active
keywords = []
for cell in range(2, sheet.max_column + 1):
keywords.append(sheet.cell(1, cell).value)
#Take query and add 100 articles to csv file
def getArticles(query, writer):
query = '+'.join(query.split(" "))
url = 'https://www.bbc.co.uk/search?q=' + query
page = 1
articleLinks = []
flag = False
while len(articleLinks) != 100:
toSearch = url + '&page=' + str(page)
print("SEARCHING... " + toSearch)
try:
response = requests.get(toSearch)
response.raise_for_status()
except HTTPError as http_err:
print('HTTP ERROR OCCURRED ACCESSING '+ toSearch + ': ', http_err)
continue
except Exception as err:
print('OTHER ERROR OCCURRED ACCESSING '+ toSearch + ': ', err)
continue
else:
text = BeautifulSoup(response.text, 'html.parser')
for link in text.find_all('a'):
item = link.get('href')
if item in articleLinks:
if item == articleLinks[0]:
print("BREAKING LENGTH IS ", len(articleLinks), item)
flag = True
break
else:
continue
if len(articleLinks) == 100 or page == 30:
print("BREAKING LENGTH IS ", len(articleLinks), item)
flag = True
break
elif 'bbc.co.uk/news/' in item and item[-1].isnumeric() and not('news/help-' in item):
try:
linkResponse = requests.get(item)
linkResponse.raise_for_status()
except HTTPError as http_err:
print('HTTP ERROR OCCURRED ACCESSING '+ item + ': ', http_err)
continue
except Exception as err:
print('OTHER ERROR OCCURRED ACCESSING '+ item + ': ', err)
continue
else:
toCheck = BeautifulSoup(linkResponse.text, 'html.parser')
isArticle = toCheck.find('article')
if isArticle:
articleLinks.append(item)
writer.writerow([query, item, isArticle.get_text(separator=' ').replace("|", " ").replace("\n", " ")])
if flag:
break
page += 1
print(articleLinks, len(articleLinks), len(set(articleLinks)))
return articleLinks
def scrapeAll(keywords):
fileName = 'webcontent.csv'
with open(fileName, 'w', newline='', encoding="utf-8") as theFile:
writer = csv.writer(theFile, delimiter='|')
writer.writerow(["Keyword", "URL", "Content"])
for keyword in keywords:
getArticles(keyword, writer)
print(keyword + " IS NOW DONE")
##Problem 3
def preprocess(df, keyword):
text = []
smallSet = df[df['Keyword'].str.contains(keyword, case=False)]
for article in smallSet['Content'].values.tolist():
text.append(gensim.utils.simple_preprocess(article))
return text
def getDistance(df, phrase1, phrase2):
phrase1 = phrase1.lower()
phrase2 = phrase2.lower()
if phrase1 == phrase2:
return 1
#make vocab
vocab = []
for iWord in phrase1.split():
vocab.extend(preprocess(df, iWord))
for jWord in phrase2.split():
vocab.extend(preprocess(df, jWord))
#additional cleaning
#remove stopwords
filtered = []
for word in vocab:
if word not in stopwords.words('english'):
filtered.append(word)
#train model
model = word2vec.Word2Vec(filtered, vector_size=150, window=10, min_count=2, workers=10)
#get distance for each word
distance = 0
for iWord in phrase1.split():
value = 0
for jWord in phrase2.split():
value += model.wv.similarity(iWord, jWord)
distance += value / len(phrase2.split())
return distance / len(phrase1.split())
scrapeAll(keywords)
datafile = pd.read_csv("webcontent.csv", delimiter='|')
items = []
cols = ['Keywords']
for keyword in keywords:
print(keyword)
item = [keyword]
cols.append(keyword)
for other in keywords:
print(other)
distance = getDistance(datafile, keyword, other)
item.append(distance)
items.append(item)
distancedf = pd.DataFrame(items, columns=cols)
distancedf.to_excel("distance.xlsx", index=False)
#Problem 4
distancedf = pd.read_excel('distance.xlsx', index_col=0)
mask = np.triu(distancedf.corr())
sns.heatmap(distancedf, cmap=sns.diverging_palette(220, 20, n=200), robust=True, annot=True, annot_kws={'size':8}, cbar=True, square=True, fmt ='.3g', mask=mask)
plt.show() | [
"matplotlib",
"seaborn"
] |
849290b64a2caa0bea8d1dc6881de05107d7eed0 | Python | hanzhn/mxnet-ssd | /plot_loss.py | UTF-8 | 1,336 | 2.53125 | 3 | [
"MIT"
] | permissive | import matplotlib.pyplot as plt
import numpy as np
import re
import argparse
parser = argparse.ArgumentParser(description='Parses log file and generates train/val curves')
parser.add_argument('--log-file', type=str,default="/home/smiles/hz/mxnet-ssd/train-ssd.log",
help='the path of log file')
args = parser.parse_args()
TR1_RE = re.compile('.*?]\sTrain-CrossEntropy=([\d\.]+)')
TR2_RE = re.compile('.*?]\sTrain-SmoothL1=([\d\.]+)')
VA_RE = re.compile('.*?]\sValidation-mAP=([\d\.]+)')
log = open(args.log_file).read()
log_tr1 = [float(x) for x in TR1_RE.findall(log)]
log_tr2 = [float(x) for x in TR2_RE.findall(log)]
log_va = [float(x) for x in VA_RE.findall(log)]
idx = np.arange(len(log_tr1))
plt.figure(figsize=(8, 6))
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
log_tr = [log_tr1[i]+log_tr2[i] for i in range(len(log_tr1))]
plt.plot(idx, log_tr1, 'o', linestyle='-', color="r",
label="Train classify loss")
plt.plot(idx, log_tr2, 'o', linestyle='-', color="g",
label="Train localization loss")
plt.plot(idx, log_va, 'o', linestyle='-', color="b",
label="Validation accuracy")
plt.legend(loc="best")
plt.xticks(np.arange(min(idx), max(idx)+1, 5))
plt.yticks(np.arange(0, 1, 0.2))
plt.ylim([0,1])
plt.show()
| [
"matplotlib"
] |
0bfcbd45dd65c336ca9281fc698f17ce17af9f73 | Python | ssam1994/bootcamp | /plotting_1.py | UTF-8 | 928 | 2.875 | 3 | [
"CC-BY-4.0",
"MIT"
] | permissive | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
rc = {'lines.linewidth': 2, 'axes.labelsize' : 18, 'axes.titlesize' : 18}
sns.set(rc=rc)
# Load the food data.
xa_high = np.loadtxt('data/xa_high_food.csv', comments='#')
xa_low = np.loadtxt('data/xa_low_food.csv', comments='#')
# Make the bin boundaries
#bins = np.arange(1700, 2500, 50)
# Make bin boundaries
low_min = np.min(xa_low)
low_max = np.max(xa_low)
high_min = np.min(xa_high)
high_max = np.max(xa_high)
global_min = np.min([low_min, high_min])
global_max = np.max([low_max, high_max])
bins = np.arange(global_min-50, global_max+50, 50)
# Plot the data as a histogram
_ = plt.hist(xa_low, bins=bins)
plt.xlabel('Cross-sectional area (µ$^2$)')
plt.ylabel('count')
plt.show()
plt.close()
# Plot the data as a histogram
_ = plt.hist(xa_high, bins=bins)
plt.xlabel('Cross-sectional area (µ$^2$)')
plt.ylabel('count')
plt.show()
plt.close()
| [
"matplotlib",
"seaborn"
] |
6039a0864fedd63732640f324cb69014481dd22a | Python | RanGreidi/Python_Intro_to_Computers_Si | /Game_of_Life.py | UTF-8 | 8,581 | 3.203125 | 3 | [] | no_license | import game_of_life_interface
import numpy as np
import matplotlib.pyplot as plt
import time
class GameOfLife(game_of_life_interface.GameOfLife): # This is the way you construct a class that heritage properties
def __init__(self, size_of_board, starting_position, rules):
def initialize_board(starting_position,size_of_board):
''' This method returns initialized the board game.
Input size and position.
Output a list that holds the board with a size of size_of_board*size_of_board.
'''
if starting_position <= 3 or starting_position > 6:
population = [0, 255] # 0 represents dead and 255 represents alive
if starting_position == 1 or starting_position > 6 :
game_board = np.random.choice([0,255],(size_of_board,size_of_board))
elif starting_position == 2:
game_board = np.random.choice([0,255],(size_of_board,size_of_board), p=[0.2, 0.8])
elif starting_position == 3:
game_board = np.random.choice([0,255],(size_of_board,size_of_board), p=[0.8, 0.2])
ones_board = np.ones(shape=(size_of_board+2,size_of_board+2))
ones_board[1:size_of_board+1,1:size_of_board+1]=game_board
if starting_position == 4:
glider_gun =\
[[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,255,0,255,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,255,255],
[0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,255,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,255,255],
[255,255,0,0,0,0,0,0,0,0,255,0,0,0,0,0,255,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[255,255,0,0,0,0,0,0,0,0,255,0,0,0,255,0,255,255,0,0,0,0,255,0,255,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,255,0,0,0,0,0,255,0,0,0,0,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,255,0,0,0,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[0,0,0,0,0,0,0,0,0,0,0,0,255,255,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]]
ones_board = np.ones(shape=(size_of_board+2,size_of_board+2))
ones_board[1:size_of_board+1,1:size_of_board+1] = np.zeros((size_of_board, size_of_board))
ones_board[11:20,11:47] = glider_gun
if starting_position == 5:
pulsar = np.zeros((17, 17))
pulsar[2, 4:7] = 255
pulsar[4:7, 7] = 255
pulsar += pulsar.T
pulsar += pulsar[:, ::-1]
pulsar += pulsar[::-1, :]
ones_board = np.ones(shape=(size_of_board+2,size_of_board+2))
ones_board[1:size_of_board+1,1:size_of_board+1] = np.zeros((size_of_board, size_of_board))
#ones_board[10:27,10:27]=pulsar ******fix the posiotin****
ones_board[int((size_of_board +1)/2-8):int((size_of_board +1)/2 + 17-8),int((size_of_board +1)/2-8):int((size_of_board +1)/2 + 17-8)]=pulsar
if starting_position == 6:
ones_board = np.ones(shape=(size_of_board+2,size_of_board+2))
ones_board[1:size_of_board+1,1:size_of_board+1] = np.zeros((size_of_board, size_of_board))
ones_board[6:8,6:10] = [[255,0,0,255],[0,255,255,0]]
return ones_board
self.size_of_board = size_of_board
self.starting_position = starting_position
self.rules = rules
self.Board = initialize_board(starting_position,size_of_board)
def update(self):
born_list = self.rules.split('/')[0] # making alist out of the input rules
survive_list=self.rules.split('/')[1]
survive_list=survive_list.split('S')[1]
born_list=born_list.split('B')[1]
born_list=[int(i) for i in str(born_list)] #born list is the list of all born rules aka B321=[3,2,1]
survive_list=[int(i) for i in str(survive_list)] #survive list is a list of all survive rules aka S312=[3,1,2]
ones_board = np.ones(shape=(self.size_of_board+2,self.size_of_board+2))
for t in range(self.size_of_board): #shora
for r in range(self.size_of_board): #amoda
x=self.Board[t+1][r+1] # the element that we run on the matrix
if x == 0:
counter =0
if self.Board[t+1][r+2] == 255:
counter = counter +1
if self.Board[t+2][r+2] == 255:
counter = counter +1
if self.Board[t+2][r+1] == 255:
counter = counter +1
if self.Board[t+2][r] == 255:
counter = counter +1
if self.Board[t+1][r] == 255:
counter = counter +1
if self.Board[t][r] == 255:
counter = counter +1
if self.Board[t][r+1] == 255:
counter = counter +1
if self.Board[t][r+2] == 255:
counter = counter +1 #counter is the number of alive cells near a dean one
for i in born_list:
if counter == i:
ones_board[t+1,r+1]=255 # put the element in a new matrix as an 255
break
else:
ones_board[t+1,r+1]= 0 #put the element in a new matrix as an 0 ********te of running timee!!
else:
counter =0
if self.Board[t+1][r+2] == 255:
counter = counter +1
if self.Board[t+2][r+2] == 255:
counter = counter +1
if self.Board[t+2][r+1] == 255:
counter = counter +1
if self.Board[t+2][r] == 255:
counter = counter +1
if self.Board[t+1][r] == 255:
counter = counter +1
if self.Board[t][r] == 255:
counter = counter +1
if self.Board[t][r+1] == 255:
counter = counter +1
if self.Board[t][r+2] == 255:
counter = counter +1 #counter is the number of alive cells near a dean one
for i in survive_list:
if counter == i:
ones_board[t+1,r+1]=255 # put the element in a new matrix as an 255
break
else:
ones_board[t+1,r+1]= 0 #put the element in a new matrix as an 0
self.Board=ones_board
#updated_board=ones_board
#return updated_board
def save_board_to_file(self, file_name):
board = self.Board
board=np.delete(board,0, 1)
board=np.delete(board,self.size_of_board, 1)
board=np.delete(board,0, 0)
board=np.delete(board,self.size_of_board, 0)
plt.imsave(file_name,board) # i put return_board, duble check it!!!!
def display_board(self):
board = self.Board
board=np.delete(board,0, 1)
board=np.delete(board,self.size_of_board, 1)
board=np.delete(board,0, 0)
board=np.delete(board,self.size_of_board, 0)
return plt.show(plt.matshow(board))
def return_board(self):
board = self.Board
board=np.delete(board,0, 1)
board=np.delete(board,self.size_of_board, 1)
board=np.delete(board,0, 0)
board=np.delete(board,self.size_of_board, 0)
return board.tolist()
if __name__ == '__main__': # You should keep this line for our auto-grading code.
start_time = time.time()
B=GameOfLife(17,5,'B2/S0')
i=0
while i < 0:
i=i+1
B.update()
B.save_board_to_file(str(i)+'pipi.png')
B.save_board_to_file(str(i)+'pipi.png')
print("--- %s seconds ---" % (time.time() - start_time))
B.display_board()
board = B.return_board()
print(board) | [
"matplotlib"
] |
77fa2bffa53162fa16ad490ca800d77c86685d2d | Python | hakdo/process_safety | /blowdown.py | UTF-8 | 2,641 | 3.125 | 3 | [
"MIT"
] | permissive | ###############################################################
# blowdown.py
#
# Script to calculate orifice size of ideal gas relief problem.
# Usage: ./blowdown.py 25 900
# Simulates blowdown through a 25 mm diameter orifice for 900 seconds.
#
# Dependencies: see requirements.txt
# H. Olsen - 2021 @sjefersuper
# License: MIT
###############################################################
from math import pi, sqrt
from scipy.integrate import solve_ivp as solver
from numpy import array, exp
import matplotlib.pyplot as plt
from scipy import interpolate
from scipy.optimize import fsolve
from sys import argv as args
# Physical constants and parameters
R = 8.314 # Universal gas constant in J/K mol
M = 25E-3 # molecular weight in kg/mol
k = 1.4 # Cp/Cv ratio of ideal gas
C = 0.72 # Orifice coefficient
Z = 1. # compressibility factor
# Operational factors
T0 = 400.0 # K, temperature
P0 = 2.01E7 # Pa, initial pressure
Q = 18000 # W, heat input
V = 20.0 # m3, volume
Cv = 27 # Specific heat at constant volume, J/K kg
initmass = P0*V*M/(R*T0)
def ystrength(T):
# Function returns yield strength P_y (Pa) at temperature T (K)
P_y = 3.0E7 - 50000.*(T-400.)
return P_y
def cflow(P,T,D):
# Function to give critical mass flow (kg/s)
# D = diameter of orifice plate in mm
A = 0.25*pi*(D/1000.0)**2
m = C*A*P*sqrt((k*M/(Z*R*T))*(2/(k+1))**((k+1)/(k-1)))
return m
def pressure(y):
m = y[0]
T=y[1]
rho = m/V
p = rho*R*T/M
return p
def baleq(t, y, D):
# State vector = [m, T]
m = y[0]
T=y[1]
p = pressure(y)
mb = cflow(p, T, D)
dm = -mb
dT = -mb*R*T/(m*Cv) + Q/(m*Cv)
dy = array([dm,dT])
return dy
def main():
try:
dia = float(args[1])
tmax = int(args[2])
except:
print("You must supply 2 arguments: diameter in mm + maximum simulation time in seconds.")
return None
y0 = array([initmass,T0])
eq10 = lambda t,y: baleq(t,y,dia)
dy = eq10(0, y0)
sol = solver(lambda t,y: baleq(t,y,dia), array([0,tmax]), y0, t_eval=range(0,tmax))
plt.subplot(2,1,1)
plt.plot(sol.t, pressure(sol.y)/100000, label="Pressure")
plt.plot(sol.t, ystrength(sol.y[1])/100000, label="Yield strength")
plt.xlabel('t/s')
plt.ylabel('p/barg')
plt.title('Pressure profile vs yield strength')
plt.legend()
plt.subplot(2,1,2)
plt.plot(sol.t, sol.y[0])
plt.xlabel('t/s')
plt.ylabel('m/kg')
plt.title('Mass in control volume')
plt.tight_layout()
plt.show()
if __name__ == "__main__":
main()
| [
"matplotlib"
] |
7b5521174362d7801262ac39e46547fcc08f5e19 | Python | yeshwanthyeshu/Practise_ML | /classification_1.py | UTF-8 | 5,686 | 3.03125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 2 15:06:19 2018
@author: c_ymelpati
Classification Machine learning challange
"""
"""
Steps:
Importing the data
Preprocessing the data
If necessary cross validation
applying the model
testing the model
evaluating the model
"""
########################## Importing the data #################################
import pandas as pd
import numpy as np
train = pd.read_csv("./Dataset/train.csv")
train.head()
# describe the train
train.describe()
train.info()
########################## Preprocessing the data #############################
# x and y
y = train.iloc[:,2].values
# Unique values of y
y_unique = ['Grade 1', 'Grade 2', 'Grade 3', 'Grade 4', 'Grade 5']
# Encoding categorical data
# Encoding the dependent Variable
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
# For one hot encode of damage_grade column of train
lben = LabelEncoder()
temp = lben.fit_transform(train.area_assesed)
ohe = OneHotEncoder()
temp = ohe.fit_transform(temp.reshape(-1,1)).toarray()
tempdf = pd.DataFrame(temp)
train = pd.concat([train, tempdf], axis = 1)
train.head()
# sucessfully one hot encoded the damage_grade column and removed it form train
train_removed_area_assesed = train.drop(['area_assesed', 'damage_grade'], axis = 1)
x = train_removed_area_assesed.values
# For y
# Encoding categorical data
# Encoding the Independent Variable
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
labelencoder_y = LabelEncoder()
y = labelencoder_y.fit_transform(y)
########################## Cross validation## #################################
from sklearn.model_selection import train_test_split
xtrain, xtest, ytrain, ytest = train_test_split(x,y , test_size= 0.25,
random_state= 5)
########################## Applying the models ################################
##### Model: 1 K nearst neighbors
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import neighbors, datasets
n_neighbors = 5
clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights)
clf.fit(X, y)
##### Model: 2 Random Forest
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0)
xtrain_id = xtrain[:,0]
xtrain_wo_id = np.nan_to_num(np.delete(xtrain, 0,1))
xtest_id = xtest[:,0]
xtest_wo_id = np.nan_to_num(np.delete(xtest,0,1))
classifier.fit(xtrain_wo_id, ytrain)
# Predicting the Test set results
y_pred = classifier.predict(xtest)
######################
# Random Forest Classification
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv("./Dataset/train.csv")
# has_repair_started has nan values
dataset = dataset.fillna(dataset.mean())
X = dataset.drop(['damage_grade'], axis = 1).values
y = dataset.iloc[:, 2].values
# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train[:,2:] = sc.fit_transform(X_train[:,2:])
X_test[:,2:] = sc.transform(X_test[:,2:])
# Fitting Random Forest Classification to the Training set
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0)
classifier.fit(X_train, y_train)
# Predicting the Test set results
y_pred = classifier.predict(X_test)
# Making the Confusion Matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_test, y_pred)
# Visualising the Training set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_train, y_train
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Random Forest Classification (Training set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
# Visualising the Test set results
from matplotlib.colors import ListedColormap
X_set, y_set = X_test, y_test
X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01),
np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01))
plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape),
alpha = 0.75, cmap = ListedColormap(('red', 'green')))
plt.xlim(X1.min(), X1.max())
plt.ylim(X2.min(), X2.max())
for i, j in enumerate(np.unique(y_set)):
plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1],
c = ListedColormap(('red', 'green'))(i), label = j)
plt.title('Random Forest Classification (Test set)')
plt.xlabel('Age')
plt.ylabel('Estimated Salary')
plt.legend()
plt.show()
| [
"matplotlib"
] |
f971ae0e1460fd718467ce806ad739a1fd1afc4e | Python | pspsn/DSND-Project-3-Image-Classifier-Application | /predict.py | UTF-8 | 3,803 | 2.53125 | 3 | [
"MIT"
] | permissive | import matplotlib.pyplot as plt
from torchvision import transforms, datasets
from torchvision.models import vgg19, densenet121, vgg16
from torchvision import datasets, models, transforms
import torchvision
from torch import nn, optim
import torch
import torch.nn.functional as F
from collections import OrderedDict
import json
import numpy as np
from PIL import Image
import argparse
parser = argparse.ArgumentParser(description='Predict the type of a flower')
parser.add_argument('--checkpoint', type=str, help='Path to checkpoint' , default='checkpoint.pth')
parser.add_argument('--image_path', type=str, help='Path to file' , default='flowers/test/28/image_05230.jpg')
parser.add_argument('--gpu', type=bool, default=True, help='Whether to use GPU during inference or not')
parser.add_argument('--topk', type=int, help='Number of k to predict' , default=0)
parser.add_argument('--cat_to_name_json', type=str, help='Json file to load for class values to name conversion' , default='cat_to_name.json')
args = parser.parse_args()
with open(args.cat_to_name_json, 'r') as f:
cat_to_name = json.load(f)
image_path = args.image_path
device = 'cuda' if args.gpu else 'cpu'
# : Write a function that loads a checkpoint and rebuilds the model
def load_checkpoint(checkpoint):
checkpoint = torch.load(args.checkpoint)
model = getattr(torchvision.models, checkpoint['arch'])(pretrained=True)
model.classifier = checkpoint['classifier']
for param in model.parameters():
param.requires_grad = False
model.load_state_dict(checkpoint['state_dict'])
optimizer = checkpoint['optimizer']
optimizer.load_state_dict(checkpoint['optimizer_dict'])
return model, checkpoint
model, checkpoint = load_checkpoint(args.checkpoint)
def process_image(image):
image = image.resize((round(256*image.size[0]/image.size[1]) if image.size[0]>image.size[1] else 256,
round(256*image.size[1]/image.size[0]) if image.size[1]>image.size[0] else 256))
image = image.crop((image.size[0]/2-224/2, image.size[1]/2-224/2, image.size[0]/2+224/2, image.size[1]/2+224/2))
np_image = (np.array(image)/255-[0.485,0.456,0.406])/[0.229, 0.224, 0.225]
np_image = np_image.transpose((2,0,1))
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
return torch.from_numpy(np_image)
# : Process a PIL image for use in a PyTorch model
im = Image.open(image_path)
processed_im = process_image(im)
def predict(image_path, model, topk=5):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
# : Implement the code to predict the class from an image file
im = Image.open(image_path)
processed_im = process_image(im).unsqueeze(0)
model.to(device)
model.eval()
with torch.no_grad():
processed_im = processed_im.to(device).float()
output = model(processed_im)
ps = torch.exp(output)
pred = ps.topk(topk)
flower_ids = pred[1][0].to('cpu')
flower_ids = torch.Tensor.numpy(flower_ids)
probs = pred[0][0].to('cpu')
idx_to_class = {k:v for v,k in checkpoint['class_to_idx'].items()}
flower_names = np.array([cat_to_name[idx_to_class[x]] for x in flower_ids])
return probs, flower_names
if args.topk:
probs, flower_names = predict(image_path, model, args.topk)
print('Probabilities of top {} flowers:'.format(args.topk))
for i in range(args.topk):
print('{} : {:.2f}'.format(flower_names[i],probs[i]))
else:
probs, flower_names = predict(image_path, model)
print('Flower is predicted to be {} with {:.2f} probability'.format(flower_names[0], probs[0])) | [
"matplotlib"
] |
f7758b3c952d9b84d6b852c716f316ac85b86dec | Python | Doxterpepper/HWR | /Main.py | UTF-8 | 2,739 | 2.765625 | 3 | [] | no_license | import csv
import sys
import NeuralNet
import pdb
import IO
import matplotlib.pyplot as plt
import datetime
import numpy as np
def vectorize(x):
vect = np.zeros(10)
vect[int(x)] = 1
return vect
# Read data and format for the neural network
# train flag specifies if data being read is
# training data with correct answers appended
def getData(d, train=False):
data = list()
f = open (d, 'r')
reader = csv.reader(f)
if train:
for row in reader:
row = [float(x) for x in row]
rd = row[0:len(row)-1]
ans = row[-1]
data.append((np.array(rd), vectorize(ans)))
else:
for row in reader:
data.append(np.array([float(x) for x in row]))
f.close()
return data
def getHighest(data):
m = 0
index = 0
for i in range(len(data)):
if (data[i] > m):
m = data[i]
index = i
return index
def average(val):
avg = 0
#pdb.set_trace()
for i in val:
avg += i
return avg / len(val)
def rate(nn, datas):
averages = list()
for data, y in datas:
out = nn.run(data)
averages.append(abs(y[getHighest(out)] - out[getHighest(out)]))
return average(averages)
if __name__ == '__main__':
# Parse the command line parameters
hl = []
alpha = .005
iterations = 50
hls = ""
"""
for i in range(len(sys.argv)):
if sys.argv[i] == '-nn':
sys.argv.pop(i)
load = True
fn = sys.argv[i]
else:
iterations = int(sys.argv.pop(i))
"""
sys.argv.pop(0)
iterations = int(sys.argv.pop(0))
alpha = float(sys.argv.pop(0))
for arg in sys.argv:
hl.append(int(arg))
print("Running Neural network with parameters:")
print("iterations over training data: " + str(iterations))
print("inputs: 64")
print("outputs: 10")
print("alpha: " + str(alpha))
sys.stdout.write("Hidden: ")
hls = str(hl)
info = str(iterations)+ "_" + str(alpha) + "_" + hls
print(hls)
training = IO.getData('rec/optdigits_train.txt', True)
n = NeuralNet.NeuralNet(64, 10, hl)
n.train(training, alpha, iterations)
n.export("data/nets/" + info)
test = getData('rec/optdigits_test.txt', True)
#print(rate(n, test))
plt.plot(n.num_correct)
plt.savefig("data/graphs/"+ info + ".jpg")
correct = 0
dat = ''
for data, y in test:
out = n.run(data)
correct += y[NeuralNet.NeuralNet.getHighest(out)]
for i in range(len(out)):
sys.stdout.write(str(i) + ": ")
dat += str(i) + ": "
sys.stdout.write(str(out[i]))
dat += str(out[i]) + "\n"
print()
print("found:" + str(getHighest(out)))
print("actual: " + str(getHighest(y)))
print()
dat += "found: " + str(getHighest(out)) + "\n"
dat += "actual: " + str(getHighest(y)) + "\n"
print("Percent correct: " + str(correct / len(test)))
dat += "Percent correct: " + str(correct / len(test)) + "\n"
log = open("data/raw/" + info + ".out", "w")
log.write(dat)
| [
"matplotlib"
] |
e235f08e456e8ff92b0b340f4532d86e1b7b9e05 | Python | yasminmedeiros/AnaliseDeSentimentos | /AnáliseComRaspberry/Codigo.py | UTF-8 | 2,596 | 3 | 3 | [] | no_license |
import speech_recognition as sr
import pandas as pd
from nltk.corpus import stopwords
import matplotlib.pyplot as plt
from nltk.probability import FreqDist
from googletrans import Translator
from nltk.sentiment import SentimentAnalyzer
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from nltk.sentiment.util import *
# Obtain audio from the microphone
r = sr.Recognizer()
with sr.Microphone() as source:
print("Say something!")
audio = r.listen(source)
def mode1(text):
positiveWords = np.array(pd.read_csv('negative-words_pt.csv',sep='\n'))
negativeWords = np.array(pd.read_csv('positive-words_pt.csv',sep='\n'))
stop_words = stopwords.words('portuguese')
text = [x for x in text if x not in stop_words]
text = text.apply(lambda x :x.astype(str).str.normalize('NFKD').str.encode('ascii', errors='ignore').str.decode('utf-8'))
pos = 0
neg = 0
neu = 0
for y in [x.lower().strip() for x in text.split(' ')]:
if y in positiveWords:
pos+=1
elif y in negativeWords:
neg+=1
else:
neu+=1
graph(pos,neg,neu)
def mode2(text):
translator = Translator()
text = translator.translate(text, dest='en')
sid = SentimentIntensityAnalyzer()
ss = sid.polarity_scores(text)
graph(ss[3],ss[1],ss[2])
# Function that analyse text captured from the audio
def analyseText(text):
mode1(text)
mode2(text)
#Function that plot the graph, showing the positive and negative percetage
def graph(pos,neg,neu):
sentimento = ['Neu','Neg','Pos']
num = [neu,neg,pos]
plt.rcParams.update({'font.size': 16})
fig, ax = plt.subplots(figsize=(12,5))
plt.bar(sentimento,num, color = 'lightgrey')
y = sorted(FreqDist(a).values(),reverse=True)
for p in ax.patches:
ax.annotate('{}'.format(p.get_height()), (p.get_x()+0.35, p.get_height()))
plt.rc("font",family="Times New Roman")
ax.set_xlabel("Speech Content")
ax.set_ylabel('Percentage of Speech Frequency')
ax.set_title('Percentage of Speech Content')
# recognize speech using Google Cloud Speech
GOOGLE_CLOUD_SPEECH_CREDENTIALS = pd.read_json("credentials.json")
try:
text = r.recognize_google_cloud(audio, credentials_json=GOOGLE_CLOUD_SPEECH_CREDENTIALS)
print("Google Cloud Speech thinks you said " + text)
analyseText(text)
except sr.UnknownValueError:
print("Google Cloud Speech could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Cloud Speech service; {0}".format(e))
| [
"matplotlib"
] |
543e06e409a070b2efedd7695f3b8822a4921a92 | Python | cylammarco/py_sphere_Voronoi | /voronoi_utility.py | UTF-8 | 39,135 | 3.0625 | 3 | [
"MIT"
] | permissive | '''Author: Tyler Reddy
The purpose of this Python module is to provide utility code for handling spherical Voronoi Diagrams.'''
import scipy
try:
if int(scipy.__version__.split('.')[1]) < 13:
raise ImportError('Module requires version of scipy module >= 0.13.0')
except AttributeError: #handle this for sphinx build process on readthedocs because of module mocking
pass
import circumcircle
import scipy.spatial
import numpy
import numpy.linalg
import pandas
import math
import numpy.random
class IntersectionError(Exception):
pass
def filter_tetrahedron_to_triangle(current_tetrahedron_coord_array):
current_triangle_coord_array = [] #initialize as a list
for row in current_tetrahedron_coord_array: #ugly to use for loop for this, but ok for now!
if row[0] == 0 and row[1] == 0 and row[2] == 0: #filter out origin row
continue
else:
current_triangle_coord_array.append(row)
current_triangle_coord_array = numpy.array(current_triangle_coord_array)
return current_triangle_coord_array
def test_polygon_for_self_intersection(array_ordered_Voronoi_polygon_vertices_2D):
'''Test an allegedly properly-ordered numpy array of Voronoi region vertices in 2D for self-intersection of edges based on algorithm described at http://algs4.cs.princeton.edu/91primitives/'''
total_vertices = array_ordered_Voronoi_polygon_vertices_2D.shape[0]
total_edges = total_vertices
def intersection_test(a,b,c,d):
#code in r & s equations provided on above website, which operate on the 2D coordinates of the edge vertices for edges a - b and c - d
#so: a, b, c, d are numpy arrays of vertex coordinates -- presumably with shape (2,)
intersection = False
denominator = (b[0] - a[0]) * (d[1] - c[1]) - (b[1] - a[1]) * (d[0] - c[0])
r = ( (a[1] - c[1]) * (d[0] - c[0]) - (a[0] - c[0]) * (d[1] - c[1]) ) / denominator
s = ( (a[1] - c[1]) * (b[0] - a[0]) - (a[0] - c[0]) * (b[1] - a[1]) ) / denominator
if (r >= 0 and r <= 1) and (s >= 0 and s <= 1): #conditions for intersection
intersection = True
if intersection:
raise IntersectionError("Voronoi polygon line intersection !")
#go through and test all possible non-consecutive edge combinations for intersection
list_vertex_indices_in_edges = [ [vertex_index, vertex_index + 1] for vertex_index in xrange(total_vertices)]
#for the edge starting from the last point in the Voronoi polygon the index of the final point should be switched to the starting index -- to close the polygon
filtered_list_vertex_indices_in_edges = []
for list_vertex_indices_in_edge in list_vertex_indices_in_edges:
if list_vertex_indices_in_edge[1] == total_vertices:
filtered_list_vertex_indices_in_edge = [list_vertex_indices_in_edge[0],0]
else:
filtered_list_vertex_indices_in_edge = list_vertex_indices_in_edge
filtered_list_vertex_indices_in_edges.append(filtered_list_vertex_indices_in_edge)
for edge_index, list_vertex_indices_in_edge in enumerate(filtered_list_vertex_indices_in_edges):
for edge_index_2, list_vertex_indices_in_edge_2 in enumerate(filtered_list_vertex_indices_in_edges):
if (list_vertex_indices_in_edge[0] not in list_vertex_indices_in_edge_2) and (list_vertex_indices_in_edge[1] not in list_vertex_indices_in_edge_2): #non-consecutive edges
a = array_ordered_Voronoi_polygon_vertices_2D[list_vertex_indices_in_edge[0]]
b = array_ordered_Voronoi_polygon_vertices_2D[list_vertex_indices_in_edge[1]]
c = array_ordered_Voronoi_polygon_vertices_2D[list_vertex_indices_in_edge_2[0]]
d = array_ordered_Voronoi_polygon_vertices_2D[list_vertex_indices_in_edge_2[1]]
intersection_test(a,b,c,d)
def calculate_Vincenty_distance_between_spherical_points(cartesian_array_1,cartesian_array_2,sphere_radius):
'''Apparently, the special case of the Vincenty formula (http://en.wikipedia.org/wiki/Great-circle_distance) may be the most accurate method for calculating great-circle distances.'''
spherical_array_1 = convert_cartesian_array_to_spherical_array(cartesian_array_1)
spherical_array_2 = convert_cartesian_array_to_spherical_array(cartesian_array_2)
lambda_1 = spherical_array_1[1]
lambda_2 = spherical_array_2[1]
phi_1 = spherical_array_1[2]
phi_2 = spherical_array_2[2]
delta_lambda = abs(lambda_2 - lambda_1)
delta_phi = abs(phi_2 - phi_1)
radian_angle = math.atan2( math.sqrt( (math.sin(phi_2)*math.sin(delta_lambda))**2 + (math.sin(phi_1)*math.cos(phi_2) - math.cos(phi_1)*math.sin(phi_2)*math.cos(delta_lambda) )**2 ), (math.cos(phi_1) * math.cos(phi_2) + math.sin(phi_1) * math.sin(phi_2) * math.cos(delta_lambda) ) )
spherical_distance = sphere_radius * radian_angle
return spherical_distance
def calculate_haversine_distance_between_spherical_points(cartesian_array_1,cartesian_array_2,sphere_radius):
'''Calculate the haversine-based distance between two points on the surface of a sphere. Should be more accurate than the arc cosine strategy. See, for example: http://en.wikipedia.org/wiki/Haversine_formula'''
spherical_array_1 = convert_cartesian_array_to_spherical_array(cartesian_array_1)
spherical_array_2 = convert_cartesian_array_to_spherical_array(cartesian_array_2)
lambda_1 = spherical_array_1[1]
lambda_2 = spherical_array_2[1]
phi_1 = spherical_array_1[2]
phi_2 = spherical_array_2[2]
#we rewrite the standard Haversine slightly as long/lat is not the same as spherical coordinates - phi differs by pi/4
spherical_distance = 2.0 * sphere_radius * math.asin(math.sqrt( ((1 - math.cos(phi_2-phi_1))/2.) + math.sin(phi_1) * math.sin(phi_2) * ( (1 - math.cos(lambda_2-lambda_1))/2.) ))
return spherical_distance
def generate_random_array_spherical_generators(num_generators,sphere_radius,prng_object):
'''Recoded using standard uniform selector over theta and acos phi, http://mathworld.wolfram.com/SpherePointPicking.html
Same as in iPython notebook version'''
u = prng_object.uniform(low=0,high=1,size=num_generators)
v = prng_object.uniform(low=0,high=1,size=num_generators)
theta_array = 2 * math.pi * u
phi_array = numpy.arccos((2*v - 1.0))
r_array = sphere_radius * numpy.ones((num_generators,))
spherical_polar_data = numpy.column_stack((r_array,theta_array,phi_array))
cartesian_random_points = convert_spherical_array_to_cartesian_array(spherical_polar_data)
#filter out any duplicate generators:
df_random_points = pandas.DataFrame(cartesian_random_points)
df_random_points_no_duplicates = df_random_points.drop_duplicates()
array_random_spherical_generators = df_random_points_no_duplicates.as_matrix()
return array_random_spherical_generators
def filter_polygon_vertex_coordinates_for_extreme_proximity(array_ordered_Voronoi_polygon_vertices,sphere_radius):
'''Merge (take the midpoint of) polygon vertices that are judged to be extremely close together and return the filtered polygon vertex array. The purpose is to alleviate numerical complications that may arise during surface area calculations involving polygons with ultra-close / nearly coplanar vertices.'''
while 1:
distance_matrix = scipy.spatial.distance.cdist(array_ordered_Voronoi_polygon_vertices,array_ordered_Voronoi_polygon_vertices,'euclidean')
maximum_euclidean_distance_between_any_vertices = numpy.amax(distance_matrix)
vertex_merge_threshold = 0.02 #merge any vertices that are separated by less than 1% of the longest inter-vertex distance (may have to play with this value a bit)
threshold_assessment_matrix = distance_matrix / maximum_euclidean_distance_between_any_vertices
row_indices_that_violate_threshold, column_indices_that_violate_threshold = numpy.where((threshold_assessment_matrix < vertex_merge_threshold) & (threshold_assessment_matrix > 0))
if len(row_indices_that_violate_threshold) > 0 and len(column_indices_that_violate_threshold) > 0:
for row, column in zip(row_indices_that_violate_threshold,column_indices_that_violate_threshold):
if not row==column: #ignore diagonal values
first_violating_vertex_index = row
associated_vertex_index = column
new_vertex_at_midpoint = ( array_ordered_Voronoi_polygon_vertices[row] + array_ordered_Voronoi_polygon_vertices[column] ) / 2.0
spherical_polar_coords_new_vertex = convert_cartesian_array_to_spherical_array(new_vertex_at_midpoint)
spherical_polar_coords_new_vertex[0] = sphere_radius #project back to surface of sphere
new_vertex_at_midpoint = convert_spherical_array_to_cartesian_array(spherical_polar_coords_new_vertex)
array_ordered_Voronoi_polygon_vertices[row] = new_vertex_at_midpoint
array_ordered_Voronoi_polygon_vertices = numpy.delete(array_ordered_Voronoi_polygon_vertices,column,0)
break
else: break #no more violating vertices
return array_ordered_Voronoi_polygon_vertices
def calculate_surface_area_of_planar_polygon_in_3D_space(array_ordered_Voronoi_polygon_vertices):
'''Based largely on: http://stackoverflow.com/a/12653810
Use this function when spherical polygon surface area calculation fails (i.e., lots of nearly-coplanar vertices and negative surface area).'''
#unit normal vector of plane defined by points a, b, and c
def unit_normal(a, b, c):
x = numpy.linalg.det([[1,a[1],a[2]],
[1,b[1],b[2]],
[1,c[1],c[2]]])
y = numpy.linalg.det([[a[0],1,a[2]],
[b[0],1,b[2]],
[c[0],1,c[2]]])
z = numpy.linalg.det([[a[0],a[1],1],
[b[0],b[1],1],
[c[0],c[1],1]])
magnitude = (x**2 + y**2 + z**2)**.5
return (x/magnitude, y/magnitude, z/magnitude)
#area of polygon poly
def poly_area(poly):
'''Accepts a list of xyz tuples.'''
assert len(poly) >= 3, "Not a polygon (< 3 vertices)."
total = [0, 0, 0]
N = len(poly)
for i in range(N):
vi1 = poly[i]
vi2 = poly[(i+1) % N]
prod = numpy.cross(vi1, vi2)
total[0] += prod[0]
total[1] += prod[1]
total[2] += prod[2]
result = numpy.dot(total, unit_normal(poly[0], poly[1], poly[2]))
return abs(result/2)
list_vertices = [] #need a list of tuples for above function
for coord in array_ordered_Voronoi_polygon_vertices:
list_vertices.append(tuple(coord))
planar_polygon_surface_area = poly_area(list_vertices)
return planar_polygon_surface_area
def calculate_surface_area_of_a_spherical_Voronoi_polygon(array_ordered_Voronoi_polygon_vertices,sphere_radius):
'''Calculate the surface area of a polygon on the surface of a sphere. Based on equation provided here: http://mathworld.wolfram.com/LHuiliersTheorem.html
Decompose into triangles, calculate excess for each'''
#have to convert to unit sphere before applying the formula
spherical_coordinates = convert_cartesian_array_to_spherical_array(array_ordered_Voronoi_polygon_vertices)
spherical_coordinates[...,0] = 1.0
array_ordered_Voronoi_polygon_vertices = convert_spherical_array_to_cartesian_array(spherical_coordinates)
#handle nearly-degenerate vertices on the unit sphere by returning an area close to 0 -- may be better options, but this is my current solution to prevent crashes, etc.
#seems to be relatively rare in my own work, but sufficiently common to cause crashes when iterating over large amounts of messy data
if scipy.spatial.distance.pdist(array_ordered_Voronoi_polygon_vertices).min() < (10 ** -7):
return 10 ** -8
else:
n = array_ordered_Voronoi_polygon_vertices.shape[0]
#point we start from
root_point = array_ordered_Voronoi_polygon_vertices[0]
totalexcess = 0
#loop from 1 to n-2, with point 2 to n-1 as other vertex of triangle
# this could definitely be written more nicely
b_point = array_ordered_Voronoi_polygon_vertices[1]
root_b_dist = calculate_haversine_distance_between_spherical_points(root_point, b_point, 1.0)
for i in 1 + numpy.arange(n - 2):
a_point = b_point
b_point = array_ordered_Voronoi_polygon_vertices[i+1]
root_a_dist = root_b_dist
root_b_dist = calculate_haversine_distance_between_spherical_points(root_point, b_point, 1.0)
a_b_dist = calculate_haversine_distance_between_spherical_points(a_point, b_point, 1.0)
s = (root_a_dist + root_b_dist + a_b_dist) / 2
totalexcess += 4 * math.atan(math.sqrt( math.tan(0.5 * s) * math.tan(0.5 * (s-root_a_dist)) * math.tan(0.5 * (s-root_b_dist)) * math.tan(0.5 * (s-a_b_dist))))
return totalexcess * (sphere_radius ** 2)
def calculate_and_sum_up_inner_sphere_surface_angles_Voronoi_polygon(array_ordered_Voronoi_polygon_vertices,sphere_radius):
'''Takes an array of ordered Voronoi polygon vertices (for a single generator) and calculates the sum of the inner angles on the sphere surface. The resulting value is theta in the equation provided here: http://mathworld.wolfram.com/SphericalPolygon.html '''
#if sphere_radius != 1.0:
#try to deal with non-unit circles by temporarily normalizing the data to radius 1:
#spherical_polar_polygon_vertices = convert_cartesian_array_to_spherical_array(array_ordered_Voronoi_polygon_vertices)
#spherical_polar_polygon_vertices[...,0] = 1.0
#array_ordered_Voronoi_polygon_vertices = convert_spherical_array_to_cartesian_array(spherical_polar_polygon_vertices)
num_vertices_in_Voronoi_polygon = array_ordered_Voronoi_polygon_vertices.shape[0] #the number of rows == number of vertices in polygon
#some debugging here -- I'm concerned that some sphere radii are demonstrating faulty projection of coordinates (some have r = 1, while others have r = sphere_radius -- see workflowy for more detailed notes)
spherical_polar_polygon_vertices = convert_cartesian_array_to_spherical_array(array_ordered_Voronoi_polygon_vertices)
min_vertex_radius = spherical_polar_polygon_vertices[...,0].min()
#print 'before array projection check'
assert sphere_radius - min_vertex_radius < 0.1, "The minimum projected Voronoi vertex r value should match the sphere_radius of {sphere_radius}, but got {r_min}.".format(sphere_radius=sphere_radius,r_min=min_vertex_radius)
#print 'after array projection check'
#two edges (great circle arcs actually) per vertex are needed to calculate tangent vectors / inner angle at that vertex
current_vertex_index = 0
list_Voronoi_poygon_angles_radians = []
while current_vertex_index < num_vertices_in_Voronoi_polygon:
current_vertex_coordinate = array_ordered_Voronoi_polygon_vertices[current_vertex_index]
if current_vertex_index == 0:
previous_vertex_index = num_vertices_in_Voronoi_polygon - 1
else:
previous_vertex_index = current_vertex_index - 1
if current_vertex_index == num_vertices_in_Voronoi_polygon - 1:
next_vertex_index = 0
else:
next_vertex_index = current_vertex_index + 1
#try using the law of cosines to produce the angle at the current vertex (basically using a subtriangle, which is a common strategy anyway)
current_vertex = array_ordered_Voronoi_polygon_vertices[current_vertex_index]
previous_vertex = array_ordered_Voronoi_polygon_vertices[previous_vertex_index]
next_vertex = array_ordered_Voronoi_polygon_vertices[next_vertex_index]
#produce a,b,c for law of cosines using spherical distance (http://mathworld.wolfram.com/SphericalDistance.html)
#old_a = math.acos(numpy.dot(current_vertex,next_vertex))
#old_b = math.acos(numpy.dot(next_vertex,previous_vertex))
#old_c = math.acos(numpy.dot(previous_vertex,current_vertex))
#print 'law of cosines a,b,c:', old_a,old_b,old_c
#a = calculate_haversine_distance_between_spherical_points(current_vertex,next_vertex,sphere_radius)
#b = calculate_haversine_distance_between_spherical_points(next_vertex,previous_vertex,sphere_radius)
#c = calculate_haversine_distance_between_spherical_points(previous_vertex,current_vertex,sphere_radius)
a = calculate_Vincenty_distance_between_spherical_points(current_vertex,next_vertex,sphere_radius)
b = calculate_Vincenty_distance_between_spherical_points(next_vertex,previous_vertex,sphere_radius)
c = calculate_Vincenty_distance_between_spherical_points(previous_vertex,current_vertex,sphere_radius)
#print 'law of haversines a,b,c:', a,b,c
#print 'Vincenty edge lengths a,b,c:', a,b,c
pre_acos_term = (math.cos(b) - math.cos(a)*math.cos(c)) / (math.sin(a)*math.sin(c))
if abs(pre_acos_term) > 1.0:
print 'angle calc vertex coords (giving acos violation):', [convert_cartesian_array_to_spherical_array(vertex) for vertex in [current_vertex,previous_vertex,next_vertex]]
print 'Vincenty edge lengths (giving acos violation) a,b,c:', a,b,c
print 'pre_acos_term:', pre_acos_term
#break
current_vertex_inner_angle_on_sphere_surface = math.acos(pre_acos_term)
list_Voronoi_poygon_angles_radians.append(current_vertex_inner_angle_on_sphere_surface)
current_vertex_index += 1
if abs(pre_acos_term) > 1.0:
theta = 0
else:
theta = numpy.sum(numpy.array(list_Voronoi_poygon_angles_radians))
return theta
def convert_cartesian_array_to_spherical_array(coord_array,angle_measure='radians'):
'''Take shape (N,3) cartesian coord_array and return an array of the same shape in spherical polar form (r, theta, phi). Based on StackOverflow response: http://stackoverflow.com/a/4116899
use radians for the angles by default, degrees if angle_measure == 'degrees' '''
spherical_coord_array = numpy.zeros(coord_array.shape)
xy = coord_array[...,0]**2 + coord_array[...,1]**2
spherical_coord_array[...,0] = numpy.sqrt(xy + coord_array[...,2]**2)
spherical_coord_array[...,1] = numpy.arctan2(coord_array[...,1], coord_array[...,0])
spherical_coord_array[...,2] = numpy.arccos(coord_array[...,2] / spherical_coord_array[...,0])
if angle_measure == 'degrees':
spherical_coord_array[...,1] = numpy.degrees(spherical_coord_array[...,1])
spherical_coord_array[...,2] = numpy.degrees(spherical_coord_array[...,2])
return spherical_coord_array
def convert_spherical_array_to_cartesian_array(spherical_coord_array,angle_measure='radians'):
'''Take shape (N,3) spherical_coord_array (r,theta,phi) and return an array of the same shape in cartesian coordinate form (x,y,z). Based on the equations provided at: http://en.wikipedia.org/wiki/List_of_common_coordinate_transformations#From_spherical_coordinates
use radians for the angles by default, degrees if angle_measure == 'degrees' '''
cartesian_coord_array = numpy.zeros(spherical_coord_array.shape)
#convert to radians if degrees are used in input (prior to Cartesian conversion process)
if angle_measure == 'degrees':
spherical_coord_array[...,1] = numpy.deg2rad(spherical_coord_array[...,1])
spherical_coord_array[...,2] = numpy.deg2rad(spherical_coord_array[...,2])
#now the conversion to Cartesian coords
cartesian_coord_array[...,0] = spherical_coord_array[...,0] * numpy.cos(spherical_coord_array[...,1]) * numpy.sin(spherical_coord_array[...,2])
cartesian_coord_array[...,1] = spherical_coord_array[...,0] * numpy.sin(spherical_coord_array[...,1]) * numpy.sin(spherical_coord_array[...,2])
cartesian_coord_array[...,2] = spherical_coord_array[...,0] * numpy.cos(spherical_coord_array[...,2])
return cartesian_coord_array
def produce_triangle_vertex_coordinate_array_Delaunay_sphere(hull_instance):
'''Return shape (N,3,3) numpy array of the Delaunay triangle vertex coordinates on the surface of the sphere.'''
list_points_vertices_Delaunay_triangulation = []
for simplex in hull_instance.simplices: #for each simplex (face; presumably a triangle) of the convex hull
convex_hull_triangular_facet_vertex_coordinates = hull_instance.points[simplex]
assert convex_hull_triangular_facet_vertex_coordinates.shape == (3,3), "Triangular facet of convex hull should be a triangle in 3D space specified by coordinates in a shape (3,3) numpy array."
list_points_vertices_Delaunay_triangulation.append(convex_hull_triangular_facet_vertex_coordinates)
array_points_vertices_Delaunay_triangulation = numpy.array(list_points_vertices_Delaunay_triangulation)
return array_points_vertices_Delaunay_triangulation
def produce_array_Voronoi_vertices_on_sphere_surface(facet_coordinate_array_Delaunay_triangulation,sphere_radius,sphere_centroid):
'''Return shape (N,3) array of coordinates for the vertices of the Voronoi diagram on the sphere surface given a shape (N,3,3) array of Delaunay triangulation vertices.'''
assert facet_coordinate_array_Delaunay_triangulation.shape[1:] == (3,3), "facet_coordinate_array_Delaunay_triangulation should have shape (N,3,3)."
#draft numpy vectorized workflow to avoid Python for loop
facet_normals_array = numpy.cross(facet_coordinate_array_Delaunay_triangulation[...,1,...] - facet_coordinate_array_Delaunay_triangulation[...,0,...],facet_coordinate_array_Delaunay_triangulation[...,2,...] - facet_coordinate_array_Delaunay_triangulation[...,0,...])
facet_normal_magnitudes = numpy.linalg.norm(facet_normals_array,axis=1)
facet_normal_unit_vector_array = facet_normals_array / numpy.column_stack((facet_normal_magnitudes,facet_normal_magnitudes,facet_normal_magnitudes))
#try to ensure that facet normal faces the correct direction (i.e., out of sphere)
triangle_centroid_array = numpy.average(facet_coordinate_array_Delaunay_triangulation,axis=1)
#normalize the triangle_centroid to unit sphere distance for the purposes of the following directionality check
array_triangle_centroid_spherical_coords = convert_cartesian_array_to_spherical_array(triangle_centroid_array)
array_triangle_centroid_spherical_coords[...,0] = 1.0
triangle_centroid_array = convert_spherical_array_to_cartesian_array(array_triangle_centroid_spherical_coords)
#the Euclidean distance between the triangle centroid and the facet normal should be smaller than the sphere centroid to facet normal distance, otherwise, need to invert the vector
triangle_to_normal_distance_array = numpy.linalg.norm(triangle_centroid_array - facet_normal_unit_vector_array,axis=1)
sphere_centroid_to_normal_distance_array = numpy.linalg.norm(sphere_centroid-facet_normal_unit_vector_array,axis=1)
delta_value_array = sphere_centroid_to_normal_distance_array - triangle_to_normal_distance_array
facet_normal_unit_vector_array[delta_value_array < -0.1] *= -1.0 #need to rotate the vector so that it faces out of the circle
facet_normal_unit_vector_array *= sphere_radius #adjust for radius of sphere
array_Voronoi_vertices = facet_normal_unit_vector_array
assert array_Voronoi_vertices.shape[1] == 3, "The array of Voronoi vertices on the sphere should have shape (N,3)."
return array_Voronoi_vertices
class Voronoi_Sphere_Surface:
'''Voronoi diagrams on the surface of a sphere.
Parameters
----------
points : *array, shape (npoints, 3)*
Coordinates of points used to construct a Voronoi diagram on the surface of a sphere.
sphere_radius : *float*
Radius of the sphere (providing radius is more accurate than forcing an estimate). Default: None (force estimation).
sphere_center_origin_offset_vector : *array, shape (3,)*
A 1D numpy array that can be subtracted from the generators (original data points) to translate the center of the sphere back to the origin. Default: None assumes already centered at origin.
Notes
-----
The spherical Voronoi diagram algorithm proceeds as follows. The Convex Hull of the input points (generators) is calculated, and is equivalent to their Delaunay triangulation on the surface of the sphere [Caroli]_. A 3D Delaunay tetrahedralization is obtained by including the origin of the coordinate system as the fourth vertex of each simplex of the Convex Hull. The circumcenters of all tetrahedra in the system are calculated and projected to the surface of the sphere, producing the Voronoi vertices. The Delaunay tetrahedralization neighbour information is then used to order the Voronoi region vertices around each generator. The latter approach is substantially less sensitive to floating point issues than angle-based methods of Voronoi region vertex sorting.
The surface area of spherical polygons is calculated by decomposing them into triangles and using L'Huilier's Theorem to calculate the spherical excess of each triangle [Weisstein]_. The sum of the spherical excesses is multiplied by the square of the sphere radius to obtain the surface area of the spherical polygon. For nearly-degenerate spherical polygons an area of approximately 0 is returned by default, rather than attempting the unstable calculation.
Empirical assessment of spherical Voronoi algorithm performance suggests quadratic time complexity (loglinear is optimal, but algorithms are more challenging to implement). The reconstitution of the surface area of the sphere, measured as the sum of the surface areas of all Voronoi regions, is closest to 100 % for larger (>> 10) numbers of generators.
References
----------
.. [Caroli] Caroli et al. Robust and Efficient Delaunay triangulations of points on or close to a sphere. Research Report RR-7004, 2009.
.. [Weisstein] "L'Huilier's Theorem." From MathWorld--A Wolfram Web Resource. http://mathworld.wolfram.com/LHuiliersTheorem.html
Examples
--------
Produce a Voronoi diagram for a pseudo-random set of points on the unit sphere:
>>> import matplotlib
>>> import matplotlib.pyplot as plt
>>> import matplotlib.colors as colors
>>> from mpl_toolkits.mplot3d import Axes3D
>>> from mpl_toolkits.mplot3d.art3d import Poly3DCollection
>>> import numpy as np
>>> import scipy as sp
>>> import voronoi_utility
>>> #pin down the pseudo random number generator (prng) object to avoid certain pathological generator sets
>>> prng = np.random.RandomState(117) #otherwise, would need to filter the random data to ensure Voronoi diagram is possible
>>> #produce 1000 random points on the unit sphere using the above seed
>>> random_coordinate_array = voronoi_utility.generate_random_array_spherical_generators(1000,1.0,prng)
>>> #produce the Voronoi diagram data
>>> voronoi_instance = voronoi_utility.Voronoi_Sphere_Surface(random_coordinate_array,1.0)
>>> dictionary_voronoi_polygon_vertices = voronoi_instance.voronoi_region_vertices_spherical_surface()
>>> #plot the Voronoi diagram
>>> fig = plt.figure()
>>> fig.set_size_inches(2,2)
>>> ax = fig.add_subplot(111, projection='3d')
>>> for generator_index, voronoi_region in dictionary_voronoi_polygon_vertices.iteritems():
... random_color = colors.rgb2hex(sp.rand(3))
... #fill in the Voronoi region (polygon) that contains the generator:
... polygon = Poly3DCollection([voronoi_region],alpha=1.0)
... polygon.set_color(random_color)
... ax.add_collection3d(polygon)
>>> ax.set_xlim(-1,1);ax.set_ylim(-1,1);ax.set_zlim(-1,1);
(-1, 1)
(-1, 1)
(-1, 1)
>>> ax.set_xticks([-1,1]);ax.set_yticks([-1,1]);ax.set_zticks([-1,1]); #doctest: +ELLIPSIS
[<matplotlib.axis.XTick object at 0x...>, <matplotlib.axis.XTick object at 0x...>]
[<matplotlib.axis.XTick object at 0x...>, <matplotlib.axis.XTick object at 0x...>]
[<matplotlib.axis.XTick object at 0x...>, <matplotlib.axis.XTick object at 0x...>]
>>> plt.tick_params(axis='both', which='major', labelsize=6)
.. image:: example_random_Voronoi_plot.png
Now, calculate the surface areas of the Voronoi region polygons and verify that the reconstituted surface area is sensible:
>>> import math
>>> dictionary_voronoi_polygon_surface_areas = voronoi_instance.voronoi_region_surface_areas_spherical_surface()
>>> theoretical_surface_area_unit_sphere = 4 * math.pi
>>> reconstituted_surface_area_Voronoi_regions = sum(dictionary_voronoi_polygon_surface_areas.itervalues())
>>> percent_area_recovery = round((reconstituted_surface_area_Voronoi_regions / theoretical_surface_area_unit_sphere) * 100., 5)
>>> print percent_area_recovery
99.91979
For completeness, produce the Delaunay triangulation on the surface of the unit sphere for the same data set:
>>> Delaunay_triangles = voronoi_instance.delaunay_triangulation_spherical_surface()
>>> fig2 = plt.figure()
>>> fig2.set_size_inches(2,2)
>>> ax = fig2.add_subplot(111, projection='3d')
>>> for triangle_coordinate_array in Delaunay_triangles:
... m = ax.plot(triangle_coordinate_array[...,0],triangle_coordinate_array[...,1],triangle_coordinate_array[...,2],c='r',alpha=0.1)
... connecting_array = np.delete(triangle_coordinate_array,1,0)
... n = ax.plot(connecting_array[...,0],connecting_array[...,1],connecting_array[...,2],c='r',alpha=0.1)
>>> o = ax.scatter(random_coordinate_array[...,0],random_coordinate_array[...,1],random_coordinate_array[...,2],c='k',lw=0,s=0.9)
>>> ax.set_xlim(-1,1);ax.set_ylim(-1,1);ax.set_zlim(-1,1);
(-1, 1)
(-1, 1)
(-1, 1)
>>> ax.set_xticks([-1,1]);ax.set_yticks([-1,1]);ax.set_zticks([-1,1]); #doctest: +ELLIPSIS
[<matplotlib.axis.XTick object at 0x...>, <matplotlib.axis.XTick object at 0x...>]
[<matplotlib.axis.XTick object at 0x...>, <matplotlib.axis.XTick object at 0x...>]
[<matplotlib.axis.XTick object at 0x...>, <matplotlib.axis.XTick object at 0x...>]
>>> plt.tick_params(axis='both', which='major', labelsize=6)
.. image:: example_Delaunay.png
'''
def __init__(self,points,sphere_radius=None,sphere_center_origin_offset_vector=None):
if numpy.all(sphere_center_origin_offset_vector):
self.original_point_array = points - sphere_center_origin_offset_vector #translate generator data such that sphere center is at origin
else:
self.original_point_array = points
self.sphere_centroid = numpy.zeros((3,)) #already at origin, or has been moved to origin
if not sphere_radius:
self.estimated_sphere_radius = numpy.average(scipy.spatial.distance.cdist(self.original_point_array,self.sphere_centroid[numpy.newaxis,:]))
else:
self.estimated_sphere_radius = sphere_radius #if the radius of the sphere is known, it is pobably best to specify to avoid centroid bias in radius estimation, etc.
def delaunay_triangulation_spherical_surface(self):
'''Delaunay tessellation of the points on the surface of the sphere. This is simply the 3D convex hull of the points. Returns a shape (N,3,3) array of points representing the vertices of the Delaunay triangulation on the sphere (i.e., N three-dimensional triangle vertex arrays).'''
hull = scipy.spatial.ConvexHull(self.original_point_array)
array_points_vertices_Delaunay_triangulation = produce_triangle_vertex_coordinate_array_Delaunay_sphere(hull)
return array_points_vertices_Delaunay_triangulation
def voronoi_region_vertices_spherical_surface(self):
'''Returns a dictionary with the sorted (non-intersecting) polygon vertices for the Voronoi regions associated with each generator (original data point) index. A dictionary entry would be structured as follows: `{generator_index : array_polygon_vertices, ...}`.'''
#use strategy for Voronoi region generation discussed at PyData London 2015 with Ross Hemsley and Nikolai Nowaczyk
#step 2: perform 3D Delaunay triangulation on data set that includes the extra generator
tri = scipy.spatial.ConvexHull(self.original_point_array) #using ConvexHull is much faster in scipy (vs. Delaunay), but here we only get the triangles on the sphere surface in the simplices object (no longer adding an extra point at the origin at this stage)
#add the origin to each of the simplices to get the same tetrahedra we'd have gotten from Delaunay tetrahedralization
simplex_coords = tri.points[tri.simplices] #triangles on surface surface
simplex_coords = numpy.insert(simplex_coords, 3, numpy.zeros((1,3)), axis = 1)
#step 3: produce circumspheres / circumcenters of tetrahedra from 3D Delaunay
array_circumcenter_coords = circumcircle.calc_circumcenter_circumsphere_tetrahedron_vectorized(simplex_coords)
#step 4: project tetrahedron circumcenters up to the surface of the sphere, to produce the Voronoi vertices
array_vector_lengths = scipy.spatial.distance.cdist(array_circumcenter_coords, numpy.zeros((1,3)))
array_Voronoi_vertices = (self.estimated_sphere_radius / numpy.abs(array_vector_lengths)) * array_circumcenter_coords
#step 5: use the Delaunay tetrahedralization neighbour information to connect the Voronoi vertices around the generators, to produce the Voronoi regions
dictionary_sorted_Voronoi_point_coordinates_for_each_generator = {}
array_tetrahedra = simplex_coords
generator_index = 0
generator_index_array = numpy.arange(self.original_point_array.shape[0])
filter_tuple = numpy.where((numpy.expand_dims(tri.simplices, -1) == generator_index_array).any(axis=1))
df = pandas.DataFrame({'generator_indices' : filter_tuple[1]}, index = filter_tuple[0])
gb = df.groupby('generator_indices')
dictionary_generators_and_triangle_indices_containing_those_generators = gb.groups
for generator in tri.points[:-1]:
indices_of_triangles_surrounding_generator = dictionary_generators_and_triangle_indices_containing_those_generators[generator_index]
#pick any one of the triangles surrounding the generator and pick a non-generator vertex
first_tetrahedron_index = indices_of_triangles_surrounding_generator[0]
first_tetrahedron = array_tetrahedra[first_tetrahedron_index]
first_triangle = first_tetrahedron[:-1,...]
#pick one of the two non-generator vertices in the first triangle
indices_non_generator_vertices_first_triangle = numpy.unique(numpy.where(first_triangle != generator)[0])
ordered_list_tetrahedron_indices_surrounding_current_generator = [first_tetrahedron_index]
#determine the appropriate ordering of Voronoi vertices to close the Voronoi region (polygon) by traversing the Delaunay neighbour data structure from scipy
vertices_remaining = len(indices_of_triangles_surrounding_generator) - 1
#choose the neighbour opposite the first non-generator vertex of the first triangle
neighbour_tetrahedral_index = tri.neighbors[first_tetrahedron_index][indices_non_generator_vertices_first_triangle[0]]
ordered_list_tetrahedron_indices_surrounding_current_generator.append(neighbour_tetrahedral_index)
vertices_remaining -= 1
#for all subsequent triangles it is the common non-generator vertex with the previous neighbour that should be used to propagate the connection chain to the following neighbour
#the common vertex with the previous neighbour is the the vertex of the previous neighbour that was NOT used to locate the current neighbour
#since there are only two candidate vertices on the previous neighbour and I've chosen to use the vertex with index 0, the remaining vertex on the previous neighbour is the non-generator vertex with index 1
common_vertex_coordinate = first_triangle[indices_non_generator_vertices_first_triangle[1]]
while vertices_remaining > 0:
current_tetrahedron_index = ordered_list_tetrahedron_indices_surrounding_current_generator[-1]
current_tetrahedron_coord_array = array_tetrahedra[current_tetrahedron_index]
current_triangle_coord_array = current_tetrahedron_coord_array[:-1,...]
indices_candidate_vertices_current_triangle_excluding_generator = numpy.unique(numpy.where(current_triangle_coord_array != generator)[0])
array_candidate_vertices = current_triangle_coord_array[indices_candidate_vertices_current_triangle_excluding_generator]
current_tetrahedron_index_for_neighbour_propagation = numpy.unique(numpy.where(current_tetrahedron_coord_array == common_vertex_coordinate)[0])
next_tetrahedron_index_surrounding_generator = tri.neighbors[current_tetrahedron_index][current_tetrahedron_index_for_neighbour_propagation][0]
common_vertex_coordinate = array_candidate_vertices[array_candidate_vertices != common_vertex_coordinate] #for the next iteration
ordered_list_tetrahedron_indices_surrounding_current_generator.append(next_tetrahedron_index_surrounding_generator)
vertices_remaining -= 1
dictionary_sorted_Voronoi_point_coordinates_for_each_generator[generator_index] = array_Voronoi_vertices[ordered_list_tetrahedron_indices_surrounding_current_generator]
generator_index += 1
return dictionary_sorted_Voronoi_point_coordinates_for_each_generator
def voronoi_region_surface_areas_spherical_surface(self):
'''Returns a dictionary with the estimated surface areas of the Voronoi region polygons corresponding to each generator (original data point) index. An example dictionary entry: `{generator_index : surface_area, ...}`.'''
dictionary_sorted_Voronoi_point_coordinates_for_each_generator = self.voronoi_region_vertices_spherical_surface()
dictionary_Voronoi_region_surface_areas_for_each_generator = {}
for generator_index, Voronoi_polygon_sorted_vertex_array in dictionary_sorted_Voronoi_point_coordinates_for_each_generator.iteritems():
current_Voronoi_polygon_surface_area_on_sphere = calculate_surface_area_of_a_spherical_Voronoi_polygon(Voronoi_polygon_sorted_vertex_array,self.estimated_sphere_radius)
assert current_Voronoi_polygon_surface_area_on_sphere > 0, "Obtained a surface area of zero for a Voronoi region."
dictionary_Voronoi_region_surface_areas_for_each_generator[generator_index] = current_Voronoi_polygon_surface_area_on_sphere
return dictionary_Voronoi_region_surface_areas_for_each_generator
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
"matplotlib"
] |
75992c57a36ece8307a4413ef5372d5f83a37160 | Python | elu00/CATOpt | /final.py | UTF-8 | 437 | 3.1875 | 3 | [
"MIT"
] | permissive |
import matplotlib.pyplot as plt
x = []
y = []
with open('chug4999') as f:
n, m = map(int,f.readline().split())
for _ in range(n):
line = f.readline()
a, b = map(float, line.split())
x.append(a)
y.append(b)
for _ in range(m):
line = f.readline()
i, j = map(int, line.split())
plt.plot([x[i], x[j]], [y[i], y[j]])
plt.plot(x, y, 'o')
plt.savefig("final.png")
plt.clf()
| [
"matplotlib"
] |
f73b8e66a8d33a252461c5e219dd9ab57e820eb7 | Python | julia-kudinovich/data-analyst-p5-identify-fraud-from-enron-email | /poi_id.py | UTF-8 | 20,495 | 3.28125 | 3 | [] | no_license |
# coding: utf-8
# # Table of Contents
# * [Project 5 Identify Fraud from Enron Email](#Project-5-Identify-Fraud-from-Enron-Email)
# * [Question 1](#Question-1)
# * [Project Overview](#Project-Overview)
# * [Data Exploration](#Data-Exploration)
# * [Outliers](#Outliers)
# * [Question 2:](#Question-2:)
# * [Feature Selection](#Feature-Selection)
# * [Dealing with missing values](#Dealing-with-missing-values)
# * [New feature creation](#New-feature-creation)
# * [Feature scaling](#Feature-scaling)
# * [Question 3](#Question-3)
# * [Trying a variety of classifiers](#Trying-a-variety-of-classifiers)
# * [Decision Trees](#Decision-Trees)
# * [Naive Bayes](#Naive-Bayes)
# * [SVM](#SVM)
# * [Question 4](#Question-4)
# * [Parameter Tuning](#Parameter-Tuning)
# * [Question 5](#Question-5)
# * [Question 6](#Question-6)
# * [Dump the classifier, dataset, and features_list](#Dump-the-classifier,-dataset,-and-features_list)
#
# # Project 5 Identify Fraud from Enron Email
# ## Question 1
#
# >Summarize for us the goal of this project and how machine learning is useful in trying to accomplish it. As part of your answer, give some background on the dataset and how it can be used to answer the project question. Were there any outliers in the data when you got it, and how did you handle those?
#
# ### Project Overview
# Enron once was a one of the largest corporation in the energy sector. Fraudulent activities of upper management led Enron to collapse in 2002.
#
# In this project, I will be applying machine learning techniques to Enron dataset to identify persons of interest (POI) who may have committed fraud that lead to the Enron collapse. Enron dataset consists of financial and email data that was made publicly available after the Enron scandal.
#
# There are 146 data points in the dataset that represent 146 upper management persons in the company. 18 persons out of the 146 are already identified as POIs. My goal will be to build a machine learning algorithms (POI identifier) based on financial and email data that is publicly available in the result of the Enron scandal.
#
# Machine learning is very useful in this kind of task as it can learn from the dataset, discover patterns in the data and classify new data based on the patterns.
#
# In[217]:
###Importing libraries
import sys
import pickle
sys.path.append("../tools/")
from feature_format import featureFormat, targetFeatureSplit
from tester import dump_classifier_and_data
from sklearn.cross_validation import train_test_split
from sklearn import tree
from sklearn.metrics import accuracy_score
import matplotlib.pyplot
from tester import test_classifier
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
from sklearn.metrics import classification_report
from sklearn.model_selection import GridSearchCV
import numpy as np
from sklearn.feature_selection import SelectKBest
# In[218]:
### Load the dictionary containing the dataset
with open("final_project_dataset.pkl", "r") as data_file:
data_dict = pickle.load(data_file)
# ### Data Exploration
# In[159]:
print 'Number of data points (persons) in the data set:', len(data_dict)
#Number of features for each person
num_features=0
for person in data_dict:
num_features = len(data_dict[person])
break
print 'Number of features for each person: ', num_features
#Number of POIs (persons of interest)
num_poi=0
for person in data_dict:
if data_dict[person]["poi"]==1:
num_poi+=1
print 'Number of POIs (persons of interest):', num_poi
print '\n'
### Getting the list of all the features in the dataset
n=0
features=[]
for key, value in data_dict.iteritems():
if n==0:
features = value.keys()
n+=1
###Count how many NaN each feature has and output feature name + count
def countNaN(feature_name):
count=0
for person in data_dict:
if data_dict[person][feature_name] == 'NaN':
count += 1
if count>0:
print 'Number of NaNs for', feature_name, ':', count
for f in features:
countNaN(f)
# There are a lot of missing values in the data. handling of those will be discussed later in the analysis.
# ### Outliers
# Let's see if there are any outliers in the financial data. First, I want to plot salary and bonus.
# In[219]:
features = ['salary', 'bonus']
data = featureFormat(data_dict, features)
for point in data:
salary = point[0]
bonus = point[1]
matplotlib.pyplot.scatter( salary, bonus )
matplotlib.pyplot.xlabel("salary")
matplotlib.pyplot.ylabel("bonus")
matplotlib.pyplot.show()
# From the graph above we see that there is a very extreme outlier. Now, I will investigate who the outlier is and remove him from the dataset
# In[220]:
for person in data_dict:
if data_dict[person]['salary']>2500000 and data_dict[person]['salary']!='NaN':
print person
# It appears that the data set has data point for total values. This data point should be removed.
# In[221]:
#Remove 'Total' value
data_dict.pop( "TOTAL", 0 )
data = featureFormat(data_dict, features)
for point in data:
salary = point[0]
bonus = point[1]
matplotlib.pyplot.scatter( salary, bonus )
matplotlib.pyplot.xlabel("salary")
matplotlib.pyplot.ylabel("bonus")
matplotlib.pyplot.show()
# Now, the plot looks much better. However, there are still some outliers. Let's investigate further.
#
# In[222]:
for key, value in data_dict.iteritems():
name = key
salary = value["salary"]
bonus = value["bonus"]
poi = value['poi']
if (salary!= 'NaN' and bonus!='NaN') and (int(salary)>= 800000 and int(bonus)>=3000000):
print name, ' - POI? -', poi
# Even though LAY KENNETH L and SKILLING JEFFREY K are outliers and we can remove them from the training dataset we can not do so for the test set. This way, we would have a training dataset that would generalize better, and would still be able to validate on the entire dataset.
#
# Having total value in the dataset made me curious if there are any non person data points in the set. Below I will output all the names to see any odd entries.
# In[223]:
for key, value in data_dict.iteritems():
name = key
print name
# Looking through the names I noticed `THE TRAVEL AGENCY IN THE PARK` which does not seem to represent a person. Moreover, data for the The travel Agency is mostly NaNs. I am excluding `THE TRAVEL AGENCY IN THE PARK` from the datatset as well.
# In[224]:
data_dict.pop( "THE TRAVEL AGENCY IN THE PARK", 0 )
# Next I am going to check programmatically if there is anybody else who has all or almost all feature values missing.
# In[166]:
for key, value in data_dict.iteritems():
name = key
countNaN=0
for feature in value:
if (value[feature]=='NaN'):
countNaN+=1
print 'Number of missing falues for', name, ': ', countNaN
# There are 21 features for each person in the dataset. And from above we see that there is a person that has 20 missing values (LOCKHART EUGENE E). I will remove this person from the dataset.
# In[225]:
data_dict.pop( "LOCKHART EUGENE E", 0 )
# ## Question 2:
# >What features did you end up using in your POI identifier, and what selection process did you use to pick them? Did you have to do any scaling? Why or why not? As part of the assignment, you should attempt to engineer your own feature that does not come ready-made in the dataset -- explain what feature you tried to make, and the rationale behind it. In your feature selection step, if you used an algorithm like a decision tree, please also give the feature importances of the features that you use, and if you used an automated feature selection function like SelectKBest, please report the feature scores and reasons for your choice of parameter values.
# ### Feature Selection
# #### Dealing with missing values
# In[226]:
### Getting the list of all the features in the dataset
n=0
features=[]
for key, value in data_dict.iteritems():
if n==0:
features = value.keys()
n+=1
###Count how many NaN missing values are related to POI or non POI
def countNaN(feature_name):
count=0
count_POI=0
count_non_POI = 0
for person in data_dict:
if data_dict[person][feature_name] == 'NaN':
count += 1
if data_dict[person]['poi']==1:
count_POI+=1
elif data_dict[person]['poi']==0:
count_non_POI+=1
if count>0:
print 'Number of NaNs for', feature_name, ':', count
print "Percent of POI having NaNs:", round(100* count_POI/float(18),2), "%"
print "Percent of non POI having NaNs:", round(100* count_non_POI/float(146-18),2), "%"
print "\n"
for f in features:
countNaN(f)
# From above we see that there are several features with many missing values: `deferral_payments, restricted_stock_deferred, loan_advances, director_fees ,deferred_income`. These features have around 100 and above missing values. Mentioned features will not be used in the algorithm since they do not contribute to finding patterns.
#
# Moreover, features where missing values percent is very different between POI and non POI should also not be considered in the feature selection because the algorithm may identify a difference in NaN count as a pattern to distinguish a POI which is a wrong way to go. I will select only features having 30% or less difference between POI and not POI NaNs percentages. However, I will make an exclusion to this adding both salary and bonus to features list. This is solely based on my intuition since I believe that both salary and bonus are important features in this case.
#
# Also, `email_address` should not be used as a feature because email is unique for each person and cannot be used to make distinction between POI or not POI.
#
# Preliminary feature list is below:
#
# In[227]:
features_list_prelim = ['poi', 'salary', 'bonus', 'total_stock_value', 'exercised_stock_options', 'from_this_person_to_poi',
'from_poi_to_this_person', 'to_messages', 'long_term_incentive', 'shared_receipt_with_poi',
'from_messages', 'restricted_stock', 'total_payments']
# #### New feature creation
# Instead of using `from_this_person_to_poi` and `from_poi_to_this_person` directly I want to create 2 new features:
# proportion of `from_this_person_to_poi` and `from_poi_to_this_person` in total emails. Absolute value of emails to/from POI does not make much sense by itself. If one person has sent 10 emails to POI but his total emails sent is 20 the proportion is 0.5. While another person has also sent 10 emails but the total number sent is 100 making the proration 0.1. The first person is communicating more often with POI even though their total count of emails to POI is the same.
# In[256]:
for person in data_dict:
if data_dict[person]['from_this_person_to_poi']!='NaN':
data_dict[person]['from_this_person_to_poi_proportion'] = int(data_dict[person]['from_this_person_to_poi'])/float(data_dict[person]['from_messages'])
else:
data_dict[person]['from_this_person_to_poi_proportion']='NaN'
if data_dict[person]['from_poi_to_this_person']!='NaN':
data_dict[person]['from_poi_to_this_person_proportion'] = int(data_dict[person]['from_poi_to_this_person'])/float(data_dict[person]['to_messages'])
else:
data_dict[person]['from_poi_to_this_person_proportion']='NaN'
# I will test new features effect on the classification algorithm. I am going to run Decision Tree algorithm with and without new features and compare the results.
# In[261]:
features_list_prelim_wo_new = ['poi', 'salary', 'bonus', 'total_stock_value', 'exercised_stock_options', 'from_this_person_to_poi',
'from_poi_to_this_person', 'to_messages', 'long_term_incentive', 'shared_receipt_with_poi',
'from_messages', 'restricted_stock', 'total_payments']
features_list_prelim_w_new = ['poi', 'salary', 'bonus', 'total_stock_value', 'exercised_stock_options', 'from_this_person_to_poi',
'from_poi_to_this_person', 'to_messages', 'long_term_incentive', 'shared_receipt_with_poi',
'from_messages', 'restricted_stock', 'total_payments', 'from_poi_to_this_person_proportion',
'from_this_person_to_poi_proportion']
#Implementing Decision Tree Classifier
clf=tree.DecisionTreeClassifier()
#Using test_classifier from tester.py
print 'Test Classifier without new features implementation:'
test_classifier(clf, my_dataset, features_list_prelim_wo_new , folds = 1000)
print 'Test Classifier with new features implementation:'
test_classifier(clf, my_dataset, features_list_prelim_w_new , folds = 1000)
# As we can see adding new features to the feature list improved all metrics, therefore new features creation is justified. I will add these new features to my preliminary features_list in place of `'from_poi_to_this_person', 'from_this_person_to_poi'` because new created features are the better representation of volume of communication between person and POI.
# In[258]:
features_list_prelim = ['poi', 'salary', 'bonus', 'total_stock_value', 'exercised_stock_options', 'from_this_person_to_poi_proportion',
'from_poi_to_this_person_proportion', 'to_messages', 'long_term_incentive', 'shared_receipt_with_poi',
'from_messages', 'restricted_stock', 'total_payments']
my_dataset = data_dict
### Extract features and labels from dataset for local testing
data = featureFormat(my_dataset, features_list_prelim, sort_keys = True)
labels, features = targetFeatureSplit(data)
# Next, I am using Select K Best automated feature selection function on my preliminary list to further narrow down features used.
# In[259]:
#Using k='all' do display all features
k_best = SelectKBest(k='all')
k_best.fit(features, labels)
for impt_num, impt in enumerate(k_best.scores_):
print features_list_prelim[1+impt_num], impt
# From the result above it is seen that `to_messages and from_messages` have the lowest scores. I will exclude these 2 features from my feature_list. Let's check the effect of removing these 2 features on the classifier:
# In[260]:
features_list_prelim = ['poi', 'salary', 'bonus', 'total_stock_value', 'exercised_stock_options', 'from_this_person_to_poi_proportion',
'from_poi_to_this_person_proportion', 'long_term_incentive', 'shared_receipt_with_poi', 'restricted_stock',
'total_payments']
print 'Test Classifier with to_messages and from_messages removed:'
test_classifier(clf, my_dataset, features_list_prelim, folds = 1000)
# All the metrics went up, therefore removal of `to_messages and from_messages` is justified.
#
# Final features list is below:
# In[231]:
features_list = ['poi', 'salary', 'bonus', 'total_stock_value', 'exercised_stock_options', 'from_this_person_to_poi_proportion',
'from_poi_to_this_person_proportion', 'long_term_incentive', 'shared_receipt_with_poi','restricted_stock',
'total_payments']
### Extract features and labels from dataset for local testing
data = featureFormat(my_dataset, features_list, sort_keys = True)
labels, features = targetFeatureSplit(data)
# ### Feature scaling
# I am not preforming any feature scaling because the algorithm I will be using do not require feature scaling
# ## Question 3
# >What algorithm did you end up using? What other one(s) did you try? How did model performance differ between algorithms?
# ### Trying a variety of classifiers
# #### Decision Trees
# In[232]:
#Implementing Decision Tree Classifier
clf=tree.DecisionTreeClassifier()
#Using test_classifier from tester.py
test_classifier(clf, my_dataset, features_list, folds = 1000)
# From above we see that the classifier has a pretty high accuracy score. However,recall is below 0.3.
# #### Naive Bayes
# In[233]:
clf = GaussianNB()
#Using test_classifier from tester.py
test_classifier(clf, my_dataset, features_list, folds = 1000)
# Naive Bayes while having a higher accuracy score has lower recall and F1 scores than Decision Tree algorithm. Precision are almost identical in both cases.
# #### SVM
# In[234]:
clf = SVC()
#Using test_classifier from tester.py
test_classifier(clf, my_dataset, features_list, folds = 1000)
# Support Vector Machines algortihm does not have enough true positives to make a prediction.
#
# I will pick Decision Trees as my final algorithm. I do not need feature scaling since Naive Bayes do not require scaling.
# ## Question 4
# >What does it mean to tune the parameters of an algorithm, and what can happen if you don’t do this well? How did you tune the parameters of your particular algorithm?
# Algorithm may perform differently using different parameters. Tuning parameters means trying difference combination of parameters that make your algorithm perform at its best. Having optimal values for parameters will enable algorism to perform a learning in the best way possible.
#
# If you don’t tune your algorithm well, meaning you can over tune or under tune it, it may perform well on the training data but fail at testing data.
#
# I will tune Decision tree algorithm using GridSearchCV.
# ### Parameter Tuning
# In[235]:
param_grid = {
'min_samples_split': np.arange(2, 10),
'splitter':('best','random'),
'criterion': ('gini','entropy')
}
clf = GridSearchCV(tree.DecisionTreeClassifier(), param_grid)
clf.fit(features, labels)
print(clf.best_params_)
# Now run Decision Tree algorithm with suggested parameters.
#
# In[236]:
clf=tree.DecisionTreeClassifier(min_samples_split = 2,
criterion ='gini',
splitter = 'random')
#Using test_classifier from teter.py
test_classifier(clf, my_dataset, features_list, folds = 1000)
# After tuning parameters all score went up. Now I have both precision and recall above 0.3.
# ## Question 5
# >What is validation, and what’s a classic mistake you can make if you do it wrong? How did you validate your analysis?
# Validation is checking how well the algorithm performs on the data it has never seen before. The way to check is to split the data into training and testing sets and access the accuracy score of prediction on a testing set.
# One of the classic mistakes you can make is to overfit your model. This happens when you have a small training data set or a lot of parameters in the model.
#
# One of the validation matrices is accuracy score. Let's split the dataset in to training and testing data and see the metrics.
# In[242]:
##Splitting the data into train and test
features_train, features_test, labels_train, labels_test=train_test_split(features, labels, test_size=.3,random_state=42)
# In[244]:
clf = clf.fit(features_train,labels_train)
pred = clf.predict(features_test)
acc = accuracy_score(pred, labels_test)
print(acc)
#Using test_classifier from teter.py
test_classifier(clf, my_dataset, features_list, folds = 1000)
# The accuracy score from the sklearn.metrics is pretty high. In our case, because there are relatively small number of POIs in the dataset, accuracy is not very reliable metric. We need to look at precision and recall.
#
# Precision is how many times the algorithm labeled items positive correctly, ie out of all items labeled positive how many truly belong to positive class.
#
# Recall is how many times algorithm labels items as positive when they are truly positive.
# In[245]:
print classification_report(labels_test, pred)
# For POIs (row with label 1 above) we have precision 0.75, recall 0.6 and f1 score of 0.67.
# ## Question 6
# >Give at least 2 evaluation metrics and your average performance for each of them. Explain an interpretation of your metrics that says something human-understandable about your algorithm’s performance.
# Precision of 0.75 for POIs means that in 75% of times when algorithm labeled a person POI the person is actually a POI.
#
#
# Recall of 0.6 for POI means that 60% of actual POIs were correctly identified as POIs by the algorithm.
#
# ## Dump the classifier, dataset, and features_list
# In[246]:
dump_classifier_and_data(clf, my_dataset, features_list)
| [
"matplotlib"
] |
c9b65ae123233aa50008a0fafb8542f7f6f293d5 | Python | daniel-partington/CampaspeModel | /CampaspeModel/build_common/process_weather_stations.py | UTF-8 | 4,839 | 2.984375 | 3 | [] | no_license | import pandas as pd
import matplotlib.pyplot as plt
import os
#import matplotlib.colors as colors
import re
def process_weather_stations(weather_stations, path='', frequency='A', \
plot_monthly_pattern=False, \
plot_yearly_rainfall=False):
assert frequency in ['A', 'M'], "Frequency must be either: 'A' or 'M'"
weather_station_details = {}
weather_dfs = {}
for station in weather_stations:
with open(os.path.join(path, station + '.txt'), 'r') as f:
text = f.read()
# Get station number:
station_number = re.search('Patched Point data for station: (\S+)', text).group(1)
# Get station Lat Long which corresponds to GDA94:
station_latlong = re.search('Lat: (\S+) Long: (\S+)', text).group().strip('"')
# Get elevation of station:
station_elev = re.search('Elevation:\s+(\w+)', text).group()
weather_station_details[station] = [station_number, station_latlong , station_elev]
#Read in time series data:
weather_dfs[station] = pd.read_csv(os.path.join(path, station + '.txt'),
index_col=0,
skiprows=[41],
parse_dates=True,
infer_datetime_format=True,
delim_whitespace=True,
comment='"',
skipinitialspace=True,
usecols=[0,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17])
def cm2inch(*tupl):
inch = 2.54
if isinstance(tupl[0], tuple):
return tuple(i/inch for i in tupl[0])
else:
return tuple(i/inch for i in tupl)
def get_rain_and_ET_from_df(df, stations, freq, how='sum'):
new_df = pd.DataFrame()
for station in stations:
if how == 'mean':
new_df.loc[:, station] = df[station]['Rain'].resample(freq).mean()
new_df.loc[:, station + '_ET'] = df[station]['Evap'].resample(freq).mean()
elif how == 'sum':
new_df.loc[:, station] = df[station]['Rain'].resample(freq).sum()
new_df.loc[:, station + '_ET'] = df[station]['Evap'].resample(freq).sum()
# end if
#end for
return new_df
annual_weather_df = get_rain_and_ET_from_df(weather_dfs, weather_stations,
'A', how='sum')
monthly_weather_df = get_rain_and_ET_from_df(weather_dfs, weather_stations,
'M', how='mean')
if plot_yearly_rainfall:
plt.figure(figsize=cm2inch(18,8))
plt.ylabel("Annual Rainfall [mm]")
for station in weather_stations:
weather_dfs[station]['Rain'].plot()
weather_dfs[station]['Rain'].resample("M", how='sum').plot()
weather_dfs[station]['Rain'].resample("A", how='sum'). \
plot(legend=True,
label=station + ', '
+ weather_station_details[station][0] + ', '
+ weather_station_details[station][2] + ', Average: '
+ str(weather_dfs[station]['Rain'].resample("A", how='sum').mean())[:5] + 'mm')
plt.xlabel("Year")
plt.legend(bbox_to_anchor=(0, 1), loc='upper left', ncol=1)
annual_weather_df.plot(kind='box')
plt.ylabel("Annual Rainfall [mm]")
if plot_monthly_pattern:
Months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
month_avg = pd.groupby(monthly_weather_df,by=[monthly_weather_df.index.month]).mean()
month_avg['Months'] = Months
month_avg.plot(kind='bar',x='Months',y=weather_stations)
plt.ylabel('Average Monthly Rainfall [mm]')
plt.xlabel("")
plt.tight_layout()
plt.legend(bbox_to_anchor=(0, 1), loc='upper left', ncol=1)
if frequency == 'A':
# Keeping this as is for now but should not calculate mean here
return annual_weather_df.mean()
if frequency == 'M':
return monthly_weather_df
if __name__ == "__main__":
weather_stations = ['Kyneton', 'Eppalock', 'Elmore', 'Rochester', 'Echuca']
weather = process_weather_stations(weather_stations,
path=r"C:\Workspace\part0075\MDB modelling\Campaspe_data\Climate\\",
frequency='M',
plot_monthly_pattern=True)
| [
"matplotlib"
] |
e94c7a4d68473343758c4b5b0671ba139b9a53dd | Python | coadiator/Python-realtime-graph | /sample.py | UTF-8 | 2,057 | 3.109375 | 3 | [] | no_license | import datetime as dt
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt1
import matplotlib.animation as animation
import time
#import tmp102
# Create figure for plotting
fig = plt.figure()
fig1 = plt1.figure()
ax = fig.add_subplot(1, 1, 1)
ax1 = fig1.add_subplot(1, 1, 1)
xs = []
ys = []
xs1 = []
ys1 = []
tmp102 = []
hum = []
# Initialize communication with TMP102
tmp102.append(np.random.randint(50))
# This function is called periodically from FuncAnimation
def animate(i, xs, ys, xs1, ys1):
# Read temperature (Celsius) from TMP102
tmp102.append(np.random.randint(50))
hum.append(np.random.randint(50))
temp_c = tmp102[i]
hum1 = hum[i]
# Add x and y to lists
xs.append(dt.datetime.now().strftime('%H:%M:%S.%f'))
ys.append(temp_c)
xs1.append(dt.datetime.now().strftime('%H:%M:%S.%f'))
ys1.append(hum1)
time.sleep(0.001)
print(temp_c," ", hum1)
# FILE HANDLING
if(temp_c >= 40):
with open('sensordata.txt','a+') as f:
f.write(str(temp_c))
f.write('\n')
if(hum1 >= 40):
with open('sensordata12.txt','a+') as f:
f.write(str(hum1))
f.write('\n')
#FILE HANDLING
# Limit x and y lists to 20 items
xs = xs[-20:]
ys = ys[-20:]
xs1 = xs1[-20:]
ys1 = ys1[-20:]
# Draw x and y lists
ax.clear()
ax1.clear()
ax.plot(xs, ys)
ax1.plot(xs1, ys1)
# Format plot
plt.xticks(rotation=45, ha='right')
plt.subplots_adjust(bottom=0.30)
plt.title('TMP102 Temperature over Time')
plt.ylabel('Temperature (deg C)')
plt1.xticks(rotation=45, ha='right')
plt1.subplots_adjust(bottom=0.30)
plt1.title('TMP102 Temperature over Time')
plt1.ylabel('Temperature (deg C)')
# Set up plot to call animate() function periodically
ani = animation.FuncAnimation(fig, animate, fargs=(xs, ys, xs1, ys1), interval=0.0001)
ani1 = animation.FuncAnimation(fig1, animate, fargs=(xs, ys, xs1, ys1), interval=0.0001)
plt.show()
plt1.show() | [
"matplotlib"
] |
8abb8609c2dc7621c18ef076df5c1bdd80e308f0 | Python | pmoracho/code.snippets | /python/test_video.py | UTF-8 | 11,799 | 2.53125 | 3 | [] | no_license | import tkinter as tk, threading
import imageio
from PIL import Image, ImageTk
from tkinter import *
from tkinter import Tk
from tkinter import PhotoImage
from tkinter import Canvas
from tkinter import NW
from tkinter import Menu
from tkinter import filedialog
from PIL import Image, ImageTk
from sys import argv
import tkinter as tk
from tkinter.filedialog import askopenfilename
import cv2
import os
from keras.models import model_from_json
from keras.optimizers import SGD
import numpy as np
from time import sleep
ventana = tk.Tk()
#ventana.bind('<escape>', lambda e: root.quit())
#define CV_HAAR_FEATURE_MAX 3
#ventana.geometry("1800x900")
#tamaño maximizada de la ventana
m = ventana.maxsize()
ventana.geometry('{}x{}+0+0'.format(*m))
#ventana.geometry("900x900+0+0")
ventana.configure(background="black")
#-------------------------------------
ventana.title("Sistema para el análisis de actitud")
#fondo=PhotoImage(file="popo.gif")
#lblFondo=Label(ventana,image=fondo).place(x=0,y=0) #fondo
"""
img_frame = tk.Frame(ventana, height=600, width=800, bg='#faf0e6')
img_frame.pack()
canvas = tk.Canvas (img_frame, height=600, width=800, bg='#faf0e6', relief=tk.SUNKEN)
sbarV = tk.Scrollbar(img_frame, orient=tk.VERTICAL, command=canvas.yview)
sbarH = tk.Scrollbar(img_frame, orient=tk.HORIZONTAL, command=canvas.xview)
sbarV.pack(side=tk.RIGHT, fill=tk.Y)
sbarH.pack(side=tk.BOTTOM, fill=tk.X)
canvas.config(yscrollcommand=sbarV.set)
canvas.place(x=50, y=0)
canvas.config(xscrollcommand=sbarH.set)
canvas.pack(side=tk.LEFT, expand=True, fill=tk.BOTH)"""
# Variable global que va a contener el video
##--------FONDO DE PANTALLA PARA LA INTERFAZ----------------------
image= Image.open("fondo1.gif")
img_copy = image.copy()
background_image = ImageTk.PhotoImage(image)
background = Label(ventana,image=background_image)
background.place(x=0, y=0)
background.pack(fill=BOTH, expand=YES)
new_width = 1600
new_height = 800
image = img_copy.resize((new_width, new_height))
background_image = ImageTk.PhotoImage(image)
background.configure(image = background_image)
background.pack(fill=BOTH, expand=YES)
#-----------------------------------------------------------------------
"""video_name = "pruebaMay.mp4" #This is your video file path
video = imageio.get_reader(video_name)"""
model = model_from_json(open('./models/my_cnn_tflearn.json').read())#modelo de clasificaci-on creado por la red neuronal
#model.load_weights('_model_weights.h5')
model.load_weights('./models/my_cnn_tflearn.h5')#aqui se carga el ventor
sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd)
ruta = None
conAle = None
#-------- Aquí se abre el cuadro de dialogo para abrir el video --------------------
def abrir():
global ruta
ventana.filename=filedialog.askopenfilename(initialdir="/Users/niet/Desktop")
ruta=ventana.filename
#-------------------------------------------------------------------------------------
#--------- Aqui se hace el reconocimiento de expresiones faciales
import matplotlib.pyplot as plt
def detectar():
global ruta
global conAle
global conEno
global conMie
global conDis
global conSor
global conTri
#ventana.filename=filedialog.askopenfilename(initialdir="/Users/niet/Desktop")
#ruta=ventana.filename
video_capture = cv2.VideoCapture(ruta)#desde video creado
def extract_face_features(gray, detected_face, offset_coefficients):
(x, y, w, h) = detected_face
#print x , y, w ,h
horizontal_offset = np.int(np.floor(offset_coefficients[0] * w))
vertical_offset = np.int(np.floor(offset_coefficients[1] * h))
extracted_face = gray[y+vertical_offset:y+h,
x+horizontal_offset:x-horizontal_offset+w]
#print extracted_face.shape
new_extracted_face = zoom(extracted_face, (48. / extracted_face.shape[0],
48. / extracted_face.shape[1]))
new_extracted_face = new_extracted_face.astype(np.float32)
new_extracted_face /= float(new_extracted_face.max())
return new_extracted_face
from scipy.ndimage import zoom
def detect_face(frame):
cascPath = "./models/haarcascade_frontalface_default.xml"#algoritmo para la detección de rostro
faceCascade = cv2.CascadeClassifier(cascPath)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
detected_faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=6,
minSize=(48, 48),
#flags=cv2.CV_HAAR_FEATURE_MAX)
flags=2)
return gray, detected_faces
cascPath = "haarcascade_frontalface_default.xml"
faceCascade = cv2.CascadeClassifier(cascPath)
#video_capture = cv2.VideoCapture(0)#tiempo real
Tk().withdraw()
#Cuadro de dialogo para abrir desde escritorio
#print(filename)#imprime la dirección del archivo
#video_capture = cv2.VideoCapture(ruta,0)#desde video creado
conAle = 0
conEno = 0
conDes = 0
conMie = 0
conTri = 0
conSor = 0
while (True):
#el video se captura frame por frame
sleep(0.0)
ret, frame = video_capture.read()
#detecta el rostro
gray, detected_faces = detect_face(frame)
face_index = 0
contador = 0
# etiquetado de las expresiones faciales
for face in detected_faces:
(x, y, w, h) = face
if w > 100:
cv2.rectangle(frame, (x, y), (x+w, y+h), (75, 0, 130), 2)#el rectangulo de la cara
extracted_face = extract_face_features(gray, face, (0.075, 0.05)) #(0.075, 0.05) extracción de caracteristicas
prediction_result = model.predict_classes(extracted_face.reshape(1,48,48,1))
# Etiquetado de la expresión
if prediction_result == 3:
cv2.putText(frame, "Alegria",(x,y), cv2.FONT_ITALIC, 2, (255, 215, 0), 2)
conAle = conAle + 1
elif prediction_result == 0:
cv2.putText(frame, "Enojo",(x,y), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 215, 0), 2)
conEno = conEno + 1
elif prediction_result == 1:
cv2.putText(frame, "Disgusto",(x,y), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 215, 0), 2)
conDis = conDis + 1
elif prediction_result == 2:
cv2.putText(frame, "Miedo",(x,y), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 215, 0), 2)
conMie = conMie + 1
elif prediction_result == 4:
cv2.putText(frame, "Tristeza",(x,y), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 215, 0), 2)
conTri = conTri + 1
else :
cv2.putText(frame, "Sorpresa",(x,y), cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 215, 0), 2)
conSor = conSor + 1
#print("Expresiones de alegría son: ", conAle)
# increment counter
face_index += 1
contador = face_index+contador
print("Expresiones de alegría son: ", conAle)
# Muestra el reconocimiento de expresiones faciales
cv2.imshow('Video', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
#break
break
fig = plt.figure()
ax = fig.add_subplot(111)
graficar = [10,8,9,10,7]
xx = range(len(graficar))
ax.bar(xx,graficar)
plt.show()
#Cuando todo esta hecho libera la captura
video_capture.release()
cv2.destroyAllWindows()
# -------------Información de las funciones del sistema --------------------------------
class Acerca:
def __init__(self, parent):
text = (" Acerca del sistema\n\n "
"***** Opción de archivo *****\n"
"Archivo:En este menú se encuentran los submenús: Abri y Salir \n"
"Abrir: Muestra un cuadro de dialógo donde podra seleccionar el video que usted decida\n"
"Salir: Con esta opción podra salir del sistema "
"***** Opción de Reconocimiento *****\n"
"Reconocimiento:En este menú muestra la opción para el reconocimiento de expresiones faciales \n"
"Detección: Esta opción permite la detección de las seis expresiones faciales en la secuencia del video \n"
)
self.top = tk.Toplevel(parent)
self.top.title("Acerca de ")
display = tk.Text(self.top)
display.pack()
display.insert(tk.INSERT, text)
display.config(state=tk.DISABLED)
b = tk.Button(self.top, text="Cerrar", command=self.cerrar)
b.pack(pady=5)
b.config(bg="CadetBlue")## bg es el boton de cerrar ,aqui se puede cambiar
def cerrar(self):
self.top.destroy()
class Main_Window:
def __init__(self, ventana):
mnuAcerca.add_command(label="Acerca de ",command=self.ayuda)
def ayuda(self):
Acerca(ventana)
#----------------------------------------------------------------------------------------------
#---------------Ayuda para ánalizar un video -------------------------
class Ayuda:
def __init__(self, parent):
text = (" Ánalisis de un video \n"
"[1] Seleccionar el video con la opción Abrir\n"
"[2] Seleccionar la opción detección, para el reconocimiento de expresiones faciales \n"
"[3] Imprimir reportes\n"
)
self.top = tk.Toplevel(parent)
self.top.title("Ayuda")
display = tk.Text(self.top)
display.pack()
display.insert(tk.INSERT, text)
display.config(state=tk.DISABLED)
b = tk.Button(self.top, text="Cerrar", command=self.cerrar)
b.pack(pady=5)
b.config(bg="CadetBlue")## bg es el boton de cerrar ,aqui se puede cambiar
def cerrar(self):
self.top.destroy()
class Main_Window1:
def __init__(self, ventana):
mnuAcerca.add_command(label="Sistema de reconocimiento 'AYUDA' ",command=self.ayuda)
def ayuda(self):
Ayuda(ventana)
#-----------------------------------------------------------------------------------------
def imprimirG():
global conAle
global conEno
global conMie
global conDis
global conSor
global conTri
fig = plt.figure()
ax = fig.add_subplot(111)
datos = [10,8,9,10,7]
xx = range(len(datos))
ax.bar(xx,datos)
plt.show()
barraMenu=Menu(ventana)
#crear los menús ..............................................................
mnuArchivo=Menu(barraMenu)
mnuDiagnostico=Menu(barraMenu)
mnuReporte=Menu(barraMenu)
mnuAyuda=Menu(barraMenu)
mnuAcerca=Menu(barraMenu)
#crear los comandos de los menús................................................
mnuArchivo.add_command(label="Abrir", command = abrir)
mnuArchivo.add_separator()
mnuArchivo.add_command(label="Salir")
mnuDiagnostico.add_command(label="Detección ", command = detectar)
mnuReporte.add_command(label="Imprimir", command = imprimirG)
######################...........................................................
barraMenu.add_cascade(label="Archivo",menu=mnuArchivo)
barraMenu.add_cascade(label="Reconocimiento",menu=mnuDiagnostico)
barraMenu.add_cascade(label="Reportes",menu=mnuReporte)
barraMenu.add_cascade(label="Ayuda ",menu=mnuAcerca)
ventana.config(menu=barraMenu)
if __name__ == "__main__":
Main_Window(ventana)
Main_Window1(ventana)
ventana.mainloop()
| [
"matplotlib"
] |
6ef919192dda75f2377723840f572bb623198567 | Python | liu1073811240/FC-Mnist | /DIR1/fc_mnist.py | UTF-8 | 4,065 | 2.796875 | 3 | [] | no_license | import torch
import torch.nn as nn
from torchvision import datasets, transforms
from torchvision.utils import make_grid
from torch.utils import data
import matplotlib.pyplot as plt
import numpy as np
import cv2
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Sequential(
nn.Linear(in_features=784, out_features=512, ),
nn.BatchNorm1d(512), # 在输出通道上做归一化
nn.ReLU(inplace=True) # 是否释放内存
)
self.fc2 = nn.Sequential(
nn.Linear(512, 256),
nn.BatchNorm1d(256),
nn.ReLU()
)
self.fc3 = nn.Sequential(
nn.Linear(256, 128),
nn.BatchNorm1d(128),
nn.ReLU()
)
self.fc4 = nn.Sequential(
nn.Linear(128, 10)
)
def forward(self, x):
x = torch.reshape(x, [x.size(0), -1])
y1 = self.fc1(x)
y2 = self.fc2(y1)
y3 = self.fc3(y2)
self.y4 = self.fc4(y3)
output = torch.softmax(self.y4, 1)
return output
if __name__ == '__main__':
transf_data = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize(mean=[0.5, ], std=[0.5, ])]
)
batch_size = 100
train_data = datasets.MNIST(root="../mnist", train=True, transform=transf_data, download=True)
test_data = datasets.MNIST(root="../mnist", train=False, transform=transf_data, download=True)
train_loader = data.DataLoader(train_data, batch_size=batch_size, shuffle=True)
test_loader = data.DataLoader(test_data, batch_size=batch_size, shuffle=True)
# print(train_data.data.shape)
# print(test_data.data.shape)
# print(train_data.targets.shape)
# print(test_data.targets.shape)
# print(train_data.classes)
# print(train_data.train)
# print(test_data.classes)
# print(test_data.train)
# 在图片装在完成以后,选择其中一个批次的数据进行预览
# images, labels = next(iter(train_loader))
# img = make_grid(images)
#
# img = img.numpy().transpose(1, 2, 0)
# std = [0.5, 0.5, 0.5]
# mean = [0.5, 0.5, 0.5]
# img = img*std + mean
# print(labels)
# print([labels[i] for i in range(100)])
#
# cv2.imshow("win", img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
if torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
net = Net().to(device)
# net.load_state_dict(torch.load("./mnist_params.pth"))
# net = torch.load("./mnist.net_pth")
loss_func = nn.CrossEntropyLoss() # 自带softmax, 自带one-hot
optim = torch.optim.Adam(net.parameters())
plt.ion()
a = []
b = []
net.train()
for epoch in range(2):
for i, (x, y) in enumerate(train_loader):
i = i + epoch*(len(train_data) / batch_size)
x = x.to(device)
y = y.to(device)
out = net(x)
loss = loss_func(net.y4, y)
optim.zero_grad()
loss.backward()
optim.step()
if i % 100 == 0:
a.append(i)
b.append(loss.item())
plt.figure()
plt.clf()
plt.plot(a, b)
plt.xlabel("BATCH")
plt.ylabel("LOSS")
plt.pause(1)
print("Epoch:{}, loss:{:.3f}".format(epoch, loss.item()))
torch.save(net.state_dict(), "./mnist_params.pth")
# torch.save(net, "./mnist_net.pth")
net.eval()
eval_loss = 0
eval_acc = 0
for i, (x, y) in enumerate(test_loader):
x = x.to(device)
y = y.to(device)
out = net(x)
loss = loss_func(out, y)
eval_loss += loss.item() * y.size(0)
arg_max = torch.argmax(out, 1)
eval_acc += (arg_max == y).sum().item()
mean_loss = eval_loss / len(test_data)
mean_acc = eval_acc / len(test_data)
print(mean_loss, mean_acc)
| [
"matplotlib"
] |
08518ed1f216e251d76a0729c94c1d9c8e582417 | Python | Asad-Ismail/fruits-classfication | /fruits-class/utils.py | UTF-8 | 4,417 | 2.6875 | 3 | [] | no_license | import matplotlib.pyplot as plt
import os
from datetime import datetime
import numpy as np
import pathlib
import tensorflow as tf
import tensorflow.keras as K
import time
import cv2
def show_images(ds):
plt.figure(figsize=(10, 10))
#x,y=next(ds)
for images in ds.next():
for i in range(9):
vis_img = images[i]
ax = plt.subplot(3, 3, i + 1)
plt.imshow(vis_img)
plt.axis("off")
plt.show()
break
def get_all_files(root_path):
files=[]
for path, subdirs, files in os.walk(root_path):
for name in files:
files.append(os.path.join(path, name))
return files
def predict_one(model,input_path,image_size=(100,100)):
img = K.preprocessing.image.load_img(input_path, target_size=image_size)
img_array = K.preprocessing.image.img_to_array(img)/255.0
img_array = tf.expand_dims(img_array, 0)
predictions = model.predict(img_array)
predicted_class = np.argmax(predictions,axis=1)
return predicted_class[0],predictions[0,predicted_class]
def lite_average_time(interpreter,steps=500):
times=[]
for i in range(500):
t1=time.time()*1000
interpreter.invoke()
t2=time.time()*1000
times.append(t2-t1)
print(f"Average time taken is {np.mean(times)}")
def pred_one_lite(interpreter_path,img_pth):
interpreter = tf.lite.Interpreter(model_path=str(interpreter_path))
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
output_shape = interpreter.get_output_details()[0]["shape"]
interpreter.allocate_tensors()
tst_img=cv2.imread(img_pth)
tst_img=tst_img[:,:,[2,1,0]]
tst_img=np.expand_dims(tst_img,0)
tst_img=tst_img.astype(np.float32)
tst_img/=255.0
interpreter.set_tensor(input_index, tst_img)
interpreter.invoke()
output = interpreter.tensor(output_index)
digit = np.argmax(output(),axis=1)
print(digit)
def time_lite(interpreter_path,img_pth):
interpreter = tf.lite.Interpreter(model_path=str(interpreter_path))
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
output_shape = interpreter.get_output_details()[0]["shape"]
interpreter.allocate_tensors()
tst_img=cv2.imread(img_pth)
tst_img=tst_img[:,:,[2,1,0]]
tst_img=np.expand_dims(tst_img,0)
tst_img=tst_img.astype(np.float32)
tst_img/=255.0
interpreter.set_tensor(input_index, tst_img)
t1=time.time()*1000
interpreter.invoke()
t2=time.time()*1000
print(f"The time taken is {t2-t1}")
output = interpreter.tensor(output_index)
digit = np.argmax(output(),axis=1)
print(digit)
def evaluate_lite_model(interpreter_path,test_data):
interpreter = tf.lite.Interpreter(model_path=str(interpreter_path))
input_index = interpreter.get_input_details()[0]["index"]
output_index = interpreter.get_output_details()[0]["index"]
output_shape = interpreter.get_output_details()[0]["shape"]
# Resize input tensor to take 150 images batch size
input_shape=[150,100,100,3]
interpreter.resize_tensor_input(input_index,input_shape)
interpreter.resize_tensor_input(output_index,[150, 1, output_shape[1]])
interpreter.allocate_tensors()
# Run predictions on every image in the "test" dataset.
prediction = []
gt=[]
print(f"Total test images batches {len(test_data)}")
for i,(test_image,labels) in enumerate(test_data):
# Pre-processing: add batch dimension and convert to float32 to match with
# the model's input data format.
if i==len(test_data)-1:
break
test_image = test_image.astype(np.float32)
interpreter.set_tensor(input_index, test_image)
# Run inference.
interpreter.invoke()
# Post-processing: remove batch dimension and find the digit with highest
# probability.
output = interpreter.tensor(output_index)
digit = np.argmax(output(),axis=1)
prediction.extend(digit)
gt.extend(np.argmax(labels,1))
#print(f"Procesed {i} batches")
#if i==20:
# break
# Compare prediction results with ground truth labels to calculate accuracy.
assert len(gt)==len(prediction), print("Length of predictions and GT are not equal")
accurate_count = 0
for index in range(len(prediction)):
if prediction[index] == gt[index]:
accurate_count += 1
accuracy = accurate_count * 1.0 / len(prediction)
return accuracy
| [
"matplotlib"
] |
544d30305bf6a06ef5e3e3f48143dd76a7897271 | Python | abondar24/MachineLearnPython | /scikit/sk_ens_bagging.py | UTF-8 | 2,888 | 2.984375 | 3 | [
"Apache-2.0"
] | permissive | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.cross_validation import train_test_split
from sklearn.ensemble import BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
wine = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data', header=None)
wine.columns = ['Class label', 'Alcohol',
'Malic acid', 'Ash',
'Alcanity of ash', 'Magnesium',
'Total phenols', 'Flavanoids',
'Nonflavanoid phenols',
'Froanthocyaninca',
'Color intencity', 'Hue',
'OD280/OD135 of diluted water',
'Proline']
wine = wine[wine['Class label'] != 1]
y = wine['Class label'].values
x = wine[['Alcohol', 'Hue']].values
le = LabelEncoder()
y = le.fit_transform(y)
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=1)
tree = DecisionTreeClassifier(criterion='entropy', max_depth=None)
bag = BaggingClassifier(base_estimator=tree, n_estimators=500,
max_samples=1.0, max_features=1.0,
bootstrap=True, bootstrap_features=False,
n_jobs=1, random_state=1)
tree = tree.fit(x_train, y_train)
y_train_pred = tree.predict(x_train)
y_test_pred = tree.predict(x_test)
tree_train = accuracy_score(y_train, y_train_pred)
tree_test = accuracy_score(y_test, y_test_pred)
print('Decision tree train/test accuracies %.3f/%.3f' % (tree_train, tree_test))
bag = bag.fit(x_train, y_train)
y_train_pred = bag.predict(x_train)
y_test_pred = bag.predict(x_test)
bag_train = accuracy_score(y_train, y_train_pred)
bag_test = accuracy_score(y_test, y_test_pred)
print('Bagging train/test accuracies %.3f/%.3f' % (bag_train, bag_test))
x_min = x_train[:, 0].min() - 1
x_max = x_train[:, 0].max() + 1
y_min = x_train[:, 1].min() - 1
y_max = x_train[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(nrows=1, ncols=2, sharex='col', sharey='row', figsize=(8, 3))
for idx, clf, tt in zip([0, 1], [tree, bag], ['Decision tree', 'Bagging']):
clf.fit(x_train, y_train)
z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
z = z.reshape(xx.shape)
axarr[idx].contourf(xx, yy, z, alpha=0.3)
axarr[idx].scatter(x_train[y_train == 0, 0],
x_train[y_train == 0, 1],
c='blue', marker='^')
axarr[idx].scatter(x_train[y_train == 1, 0],
x_train[y_train == 1, 1],
c='red', marker='o')
axarr[idx].set_title(tt)
axarr[0].set_ylabel('Alcohol', fontsize=12)
plt.text(10.2, -1.2, s='Hue', ha='center', va='center', fontsize=12)
plt.show() | [
"matplotlib"
] |
b1be896fa975770286d19c431f39d27b67140223 | Python | Shatakshi2409/P106 | /P106.py | UTF-8 | 756 | 3.265625 | 3 | [] | no_license | import csv
import plotly.express as px
import numpy as np
def GetDataSource(datapath):
Marks = []
Days = []
with open (datapath) as csv_files:
df=csv.DictReader(csv_files)
for row in df:
Marks.append(float(row['Marks In Percentage']))
Days.append(float(row['Days Present']))
return {'x':Marks, 'y':Days}
# fig=px.scatter(df,x='Temperature', y='Ice-cream Sales')
# fig.show()
def FindCorrelation(datasource):
correlation=np.corrcoef(datasource['x'], datasource['y'])
print('correlation between marks and days-->',correlation[0,1])
def Setup():
datapath='Student.csv'
datasource=GetDataSource(datapath)
FindCorrelation(datasource)
Setup() | [
"plotly"
] |
6f793c10b2ca59d225d671267f64040f1f420bff | Python | zhenlan0426/utility_functions | /plotting.py | UTF-8 | 802 | 3.03125 | 3 | [] | no_license | import matplotlib.pyplot as plt
def plotHistory(history):
"""Plot training/validation accuracy and loss. history is Callback obj from Keras
"""
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.legend(['train','val'],loc='lower right')
plt.show()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.legend(['train','val'],loc='upper right')
plt.show()
def aug_compare(image, augFun, **kwargs):
'''plot original and augmented image
augFun take an image and return an transformed image
'''
f, ax = plt.subplots(ncols=2, nrows=1, figsize=(16, 8))
ax[0].imshow(image,**kwargs)
ax[1].imshow(augFun(image),**kwargs)
plt.show() | [
"matplotlib"
] |
ec2ab18778e5e0ce7828f2c0967a1b70206993a6 | Python | jiajinuiuc/Wide-energy-band-multi-pixel-chargre-sharing-corretion | /Extracted_CSC_2pix.py | UTF-8 | 44,712 | 2.546875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Objective:
Full range energy spectrum correction, try with 2-pixel charge sharing events
0. charge sharing events clustering on 60 keV(Am), 80.99 keV(Ba), 122 keV(Co)
1. full "spatial" (Charge sharing ratio) range segmentation and calculate the projection distance in each channel
3. calculate the projection distance of each charge sharing band at each channel
4. linear interpolation of the porjection distance between each band at different channel
5. based on the linear interpolation results, do the full range charge sharing correction
6. save data for the following step of matlab visualiztion
Version 0
@author: J. J. Zhang
Last update: August, 2019
"""
import sys
sys.path.append('C:\Jiajin\Mfile\Training_Sample_Analysis')
from charge_sharing_correction_v2 import charge_sharing_correction as CSC # class file, charge sharing correction
from charge_sharing_correction_v2 import Common_used_function as CF # class file, common-used plotting tools
from charge_sharing_correction_v2 import SG_Filter as SG # class file, S-G filters of different dimentions
from charge_sharing_correction_v2 import Map_generation as MG # class file, compensation map generation
from charge_sharing_correction_v2 import scattering_clustering as SC # class file, DBSCAN clustering
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import sklearn.datasets as ds
import matplotlib.colors
from sklearn.cluster import DBSCAN
from mpl_toolkits.mplot3d import Axes3D
from functools import reduce
from scipy import signal
%matplotlib qt5
################################################################################################################
################################### Load the 60-keV charge sharing events ######################################
CS_data = pd.read_csv( 'C:\Jiajin\ChaSha_2017\CSC_Data\CSC_extracted_data\Am_2Pix_new.csv' )
Energy = CS_data.iloc[:, :].values #取第一行
CS_dim = Energy.shape[1]
del(CS_data)
Energy_sum = np.sum( Energy, axis=1 ).reshape(-1,1)
Energy_E1_E2_sum = Energy[ np.intersect1d(np.where(Energy_sum >= 50)[0],np.where(Energy_sum <= 70)[0]) ]
Energy_E1_E2_else = np.delete( Energy, np.intersect1d(np.where(Energy_sum >= 50)[0],np.where(Energy_sum <= 70)[0]), axis =0 )
##### plot the histogram of sum_Energy within the selected ROI
CF.Histogram_lineplot(Hist=Energy_sum, Bins=300, x_lim_high=100, x_lim_low=0, color='red')
##### plot the raw scatter figures within the selected ROI
CF.Scatter2D_plot(x=Energy_E1_E2_sum[:,0],y=Energy_E1_E2_sum[:,1],x_lim_left=0, x_lim_right=65, y_lim_left=0, y_lim_right=65)
########################################################################################################
##### Initialize the system basis and the segmentation size
basis_old =np.mat( [ [1,0],
[0,1] ] ) #[x, y]
basis_new = np.mat( [ [ 1/np.sqrt(2), 1/np.sqrt(2) ],
[ -1/np.sqrt(2), 1/np.sqrt(2) ] ] )
seg_size = 0.01
####################### DBSCAN Clustering and Plot the results #########################################
Energy_sub = np.vstack( ( Energy_E1_E2_sum[:,0], Energy_E1_E2_sum[:,1] ) ).T
Energy_E1_E2_sum = np.sum( Energy_sub, axis=1 ).reshape(-1,1)
##### Model Fitting I
# Set Fitting Parameters
y_hat1, y_unique1, core_indices1 = SC.Clustering(radius=1, density=30, data=Energy_sub)
##### Model Fitting II
# Set Fitting Parameters
y_hat2, y_unique2, core_indices2 = SC.Clustering(radius=1, density=46, data=Energy_sub)
# Plot the DBSCAN clustering results
clrs = plt.cm.Spectral(np.linspace(0, 0.8, 2*y_unique2.size))
plt.figure(figsize=(12, 12), facecolor='w')
plt.grid('on')
for k, clr in zip(y_unique2, clrs):
cur = (y_hat2 == k)
if k == -1:
plt.scatter(Energy_sub[cur, 0], Energy_sub[cur, 1], s=5, c='k') # non-clustered points
continue
plt.scatter(Energy_sub[cur, 0], Energy_sub[cur, 1], s=5, c=clr, edgecolors='k')
plt.scatter(Energy_sub[cur & core_indices2, 0], Energy_sub[cur & core_indices2, 1], s=10, c=clr, \
marker='o', edgecolors='k')
plt.xlabel('Energy (keV)',fontsize=20)
plt.ylabel('Energy (keV)',fontsize=20)
plt.xlim(0, 70)
plt.ylim(0, 70)
plt.grid(True)
###### check each DBSCAN clustering results
clrs = plt.cm.Spectral(np.linspace(0, 0.8, 2*y_unique1.size))
plt.figure(figsize=(12, 12), facecolor='w')
plt.grid('on')
for k, clr in zip(y_unique1, clrs):
cur = (y_hat1 == k)
if k == 5:
plt.scatter(Energy_sub[cur, 0], Energy_sub[cur, 1], s=5, c='k') # non-clustered points
continue
# plt.scatter(Energy_sub[cur, 0], Energy_sub[cur, 1], s=5, c=clr, edgecolors='k')
# plt.scatter(Energy_sub[cur & core_indices2, 0], Energy_sub[cur & core_indices2, 1], s=10, c=clr, \
# marker='o', edgecolors='k')
plt.xlabel('Energy (keV)',fontsize=20)
plt.ylabel('Energy (keV)',fontsize=20)
plt.xlim(0, 70)
plt.ylim(0, 70)
plt.grid(True)
########################################################################################################
####################### Extract the cluster in the ROI #################################################
##### Reorganize the clustered scattering points
Energy = np.vstack( (Energy_sub, Energy_E1_E2_else) )
Energy_sum = np.sum( Energy, axis=1 ).reshape(-1,1)
y_hat = np.array([-1]*len(Energy))
y_hat[np.where( y_hat1 != -1 ),] = 0
y_hat[reduce( np.intersect1d, (np.where(y_hat2 == 2), np.where(Energy_sum >= 54)) ), ] = 1
y_hat[reduce( np.intersect1d, (np.where(y_hat2 == 3), np.where(Energy_sum >= 54)) ), ] = 2
y_hat[reduce( np.intersect1d, (np.where(y_hat == 0)[0],\
np.where(Energy_sum >= 55)[0],\
np.where(Energy[:,0] >= 20)[0],\
np.where(Energy[:,0] <= 40)[0]) ), ] = -1
y_unique = np.unique(y_hat)
cluster_60_lab_0 = Energy[np.where( ( y_hat == 0 ) )]
cluster_60_lab_1 = Energy[np.where( ( y_hat == 1 ) )]
cluster_60_lab_2 = Energy[np.where( ( y_hat == 2 ) )]
cluster_60_lab_noise = Energy[np.where( ( y_hat == -1 ) )]
###### check each DBSCAN clustering results
plt.figure(figsize=(12, 12), facecolor='w')
plt.grid('on')
for k in y_unique:
cur = (y_hat == k)
if k == -1:
plt.scatter(Energy[cur, 0], Energy[cur, 1], s=2, c='k') # non-clustered points
continue
if k == 0:
plt.scatter(Energy[cur, 0], Energy[cur, 1], s=7, c='b') # non-clustered points
continue
if k == 1:
plt.scatter(Energy[cur, 0], Energy[cur, 1], s=7, c='r') # non-clustered points
continue
if k == 2:
plt.scatter(Energy[cur, 0], Energy[cur, 1], s=7, c='y') # non-clustered points
continue
plt.xlabel('Energy (keV)',fontsize=20)
plt.ylabel('Energy (keV)',fontsize=20)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.xlim(0, 70)
plt.ylim(0, 70)
plt.grid(True)
###### Save data Figure 5 122-keV, 2-pixel, scattering + DBSCAN scattering
data_save = np.hstack( (Energy, y_hat.reshape(-1,1)) ) ### ( E1, E2, label)
CF.SaveFiles(var=data_save, var_name=['E1', 'E2', 'label'], \
location="C:\Jiajin\ChaSha_2017\CSC_Data\CSC_extracted_data\Figure_data\Figure_5.csv")
del(data_save)
########################################################################################################
####################### CSC --- parameters measurement #######################################
##### Initialize the CSC object
CSC_60_2pix = CSC( CS_dim=CS_dim, basis_old=basis_old, basis_new=basis_new, peak_energy=60, max_energy_range=140, seg_size=seg_size )
##### Calculate the MC-curve
wet_x_60, wet_w_60, shift_w_60, seg_unit_60, Energy_rotation = CSC_60_2pix.Pix2_Measurement( CS_data_labeled = Energy )
Energy_rotation = np.hstack( ( Energy_rotation[:,0], Energy_rotation[:,1] ) )
##### Calculate the rotated (Ratio, E-sum) points
wet_x_60, wet_w_60, shift_w_60, seg_unit_60, Energy_rot = CSC_60_2pix.Pix2_Measurement( CS_data_labeled = cluster_60_lab_0 )
##### Extend MC-curve to -1 and 1
left = min( min( np.where(wet_w_60 != 0) ) )
right = max( max( np.where(wet_w_60 != 0) ) )
wet_w_60[0:left+1,] = np.linspace(60, wet_w_60[left,], num=(left+1))
wet_w_60[right:len(wet_w_60),] = np.linspace(wet_w_60[right,], 60, num=(len(wet_w_60)-right))
left = min( min( np.where(shift_w_60!=0) ) )
right = max( max( np.where(shift_w_60!=0) ) )
shift_w_60[0:left+1,] = np.linspace(0, shift_w_60[left,], num=(left+1))
shift_w_60[right:len(shift_w_60),] = np.linspace(shift_w_60[right,], 0, num=(len(shift_w_60)-right))
##### Calculate the S-G filtered MC-curve
wet_w_60_filter = np.zeros(len(wet_w_60))
num = len(wet_w_60)//2 * 2 - 1
wet_w_60_filter = signal.savgol_filter(wet_w_60, num, 7)
shift_w_60_filter = np.zeros(len(shift_w_60))
num = len(shift_w_60)//2 * 2 - 1
shift_w_60_filter = signal.savgol_filter(shift_w_60, num, 7)
##### Check the scattering plot and MC plot
plt.figure(figsize=(12, 12), facecolor='w')
plt.grid('on')
for k in y_unique:
cur = (y_hat == k)
if k == -1:
plt.scatter(Energy_rotation[cur,0].tolist(), Energy_rotation[cur,1].tolist(), s=2, c='k') # non-clustered points
continue
if k == 0:
plt.scatter(Energy_rotation[cur,0].tolist(), Energy_rotation[cur,1].tolist(), s=7, c='b') # non-clustered points
continue
if k == 1:
plt.scatter(Energy_rotation[cur,0].tolist(), Energy_rotation[cur,1].tolist(), s=7, c='r') # non-clustered points
continue
if k == 2:
plt.scatter(Energy_rotation[cur,0].tolist(), Energy_rotation[cur,1].tolist(), s=7, c='y') # non-clustered points
continue
plt.xlabel('Energy (keV)',fontsize=20)
plt.ylabel('Energy (keV)',fontsize=20)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.xlim(-1, 1)
plt.ylim(0, 70)
plt.grid(True)
CF.Line_plot(x=wet_x_60, y=wet_w_60, color='red', x_label='Spatial-axis', y_label='Energy-axis')
CF.Line_plot(x=wet_x_60, y=shift_w_60, color='blue', x_label='Spatial-axis', y_label='Energy-axis')
CF.Line_plot(x=wet_x_60, y=wet_w_60_filter, color='red', x_label='Spatial-axis', y_label='Energy-axis')
CF.Line_plot(x=wet_x_60, y=shift_w_60_filter, color='blue', x_label='Spatial-axis', y_label='Energy-axis')
##### fluorescence events correction
wet_x_60, wet_w_60_fluo1, shift_w_60_fluo1, seg_unit_60, Energy_rotation_fluo1 = CSC_60_2pix.Pix2_Measurement( CS_data_labeled = cluster_60_lab_1 )
wet_x_60, wet_w_60_fluo2, shift_w_60_fluo2, seg_unit_60, Energy_rotation_fluo2 = CSC_60_2pix.Pix2_Measurement( CS_data_labeled = cluster_60_lab_2 )
##### Calculate the S-G filtered MC-curve
wet_w_60_fluo1_filter = np.zeros(len(wet_w_60_fluo1))
left = min( min( np.where(wet_w_60_fluo1!=0) ) )
right = max( max( np.where(wet_w_60_fluo1!=0) ) )
num = (right-left)//2 * 2 + 1
wet_w_60_fluo1_filter[left:right+1,] = signal.savgol_filter(wet_w_60_fluo1[left:right+1,], num, 5)
wet_w_60_fluo2_filter = np.zeros(len(wet_w_60_fluo2))
left = min( min( np.where(wet_w_60_fluo2!=0) ) )
right = max( max( np.where(wet_w_60_fluo2!=0) ) )
num = (right-left)//2 * 2 + 1
wet_w_60_fluo2_filter[left:right+1,] = signal.savgol_filter(wet_w_60_fluo2[left:right+1,], num, 5)
##### Check fluo
CF.Line_plot(x=wet_x_60, y=wet_w_60_fluo1, color='red', x_label='Spatial-axis', y_label='Energy-axis')
CF.Line_plot(x=wet_x_60, y=wet_w_60_fluo2, color='blue', x_label='Spatial-axis', y_label='Energy-axis')
CF.Line_plot(x=wet_x_60, y=wet_w_60_fluo1_filter, color='red', x_label='Spatial-axis', y_label='Energy-axis')
CF.Line_plot(x=wet_x_60, y=wet_w_60_fluo2_filter, color='blue', x_label='Spatial-axis', y_label='Energy-axis')
###### Save data Figure 6 122-keV, 2-pixel, scattering + DBSCAN scattering
data_save = np.hstack( (Energy_rotation, y_hat.reshape(-1,1)) )
CF.SaveFiles(var=data_save, var_name=['Ratio', 'E', 'label'], \
location="C:\Jiajin\ChaSha_2017\CSC_Data\CSC_extracted_data\Figure_data\Figure_678_Am_events_transformed_labeled.csv")
data_save = np.hstack( (wet_x_60.reshape(-1,1), wet_w_60_fluo1.reshape(-1,1), wet_w_60_fluo1_filter.reshape(-1,1), \
wet_w_60_fluo2.reshape(-1,1), wet_w_60_fluo2_filter.reshape(-1,1)) )
CF.SaveFiles(var=data_save, var_name=['X', 'wet_w_60_fluo1', 'wet_w_60_fluo1_filter', 'wet_w_60_fluo2', 'wet_w_60_fluo2_filter'], \
location="C:\Jiajin\ChaSha_2017\CSC_Data\CSC_extracted_data\Figure_data\Figure_678_Am_fluo12_loss.csv")
data_save = np.hstack( (wet_x_60.reshape(-1,1), wet_w_60.reshape(-1,1), wet_w_60_filter.reshape(-1,1)) )
CF.SaveFiles(var=data_save, var_name=['X', 'loss', 'loss_SG'], \
location="C:\Jiajin\ChaSha_2017\CSC_Data\CSC_extracted_data\Figure_data\Figure_678_Am_loss.csv")
data_save = np.hstack( (wet_x_60.reshape(-1,1), shift_w_60.reshape(-1,1), shift_w_60_filter.reshape(-1,1)) )
CF.SaveFiles(var=data_save, var_name=['X', 'comp', 'comp_SG'], \
location="C:\Jiajin\ChaSha_2017\CSC_Data\CSC_extracted_data\Figure_data\Figure_678_Am_compensate.csv")
del(data_save)
################################################################################################################
################################################################################################################
################################### Load the 81-keV charge sharing events ######################################
CS_data = pd.read_csv( 'C:\Jiajin\ChaSha_2017\CSC_Data\CSC_extracted_data\Ba_2Pix_new.csv' )
Energy = CS_data.iloc[:, :].values #取第一行
CS_dim = Energy.shape[1]
del(CS_data)
Energy_sum = np.sum( Energy, axis=1 ).reshape(-1,1)
Energy_E1_E2_sum = Energy[ np.intersect1d(np.where(Energy_sum >= 60)[0],np.where(Energy_sum <= 90)[0]) ]
Energy_E1_E2_else = np.delete( Energy, np.intersect1d(np.where(Energy_sum >= 60)[0],np.where(Energy_sum <= 90)[0]), axis =0 )
##### plot the histogram of sum_Energy within the selected ROI
CF.Histogram_lineplot(Hist=Energy_sum, Bins=300, x_lim_high=100, x_lim_low=0, color='red')
##### plot the raw scatter figures within the selected ROI
CF.Scatter2D_plot(x=Energy_E1_E2_sum[:,0],y=Energy_E1_E2_sum[:,1],x_lim_left=0, x_lim_right=65, y_lim_left=0, y_lim_right=65)
########################################################################################################
####################### DBSCAN Clustering and Plot the results #########################################
Energy_sub = np.vstack( ( Energy_E1_E2_sum[:,0], Energy_E1_E2_sum[:,1] ) ).T
Energy_E1_E2_sum = np.sum( Energy_sub, axis=1 ).reshape(-1,1)
##### Model Fitting I
# Set Fitting Parameters
y_hat1, y_unique1, core_indices1 = SC.Clustering(radius=1, density=15, data=Energy_sub)
##### Model Fitting II
# Set Fitting Parameters
y_hat2, y_unique2, core_indices2 = SC.Clustering(radius=1, density=26, data=Energy_sub)
########################################################################################################
####################### Extract the cluster in the ROI #################################################
##### Reorganize the clustered scattering points
Energy = np.vstack( (Energy_sub, Energy_E1_E2_else) )
Energy_sum = np.sum( Energy, axis=1 ).reshape(-1,1)
y_hat = np.array([-1]*len(Energy))
y_hat[reduce( np.intersect1d, (np.where(y_hat1 != -1), np.where(y_hat1 != 4)) ),] = 0
y_hat[reduce( np.intersect1d, (np.where(y_hat2 == 2), np.where(Energy_sum >= 75.5)) ), ] = 1
y_hat[reduce( np.intersect1d, (np.where(y_hat2 == 3), np.where(Energy_sum >= 75.5)) ), ] = 2
y_hat[reduce( np.intersect1d, (np.where(y_hat == 0)[0],\
np.where(Energy_sum >= 75.5)[0],\
np.where(Energy[:,0] >= 40)[0],\
np.where(Energy[:,0] <= 60)[0]) ), ] = -1
y_hat[reduce( np.intersect1d, (np.where(y_hat == 0)[0],\
np.where(Energy_sum >= 75.5)[0],\
np.where(Energy[:,0] >= 20)[0],\
np.where(Energy[:,0] <= 40)[0]) ), ] = -1
y_unique = np.unique(y_hat)
cluster_81_lab_0 = Energy[np.where( ( y_hat == 0 ) )]
cluster_81_lab_1 = Energy[np.where( ( y_hat == 1 ) )]
cluster_81_lab_2 = Energy[np.where( ( y_hat == 2 ) )]
cluster_81_lab_noise = Energy[np.where( ( y_hat == -1 ) )]
###### check each DBSCAN clustering results
plt.figure(figsize=(12, 12), facecolor='w')
plt.grid('on')
for k in y_unique:
cur = (y_hat == k)
if k == -1:
plt.scatter(Energy[cur, 0], Energy[cur, 1], s=2, c='k') # non-clustered points
continue
if k == 0:
plt.scatter(Energy[cur, 0], Energy[cur, 1], s=7, c='b') # non-clustered points
continue
if k == 1:
plt.scatter(Energy[cur, 0], Energy[cur, 1], s=7, c='r') # non-clustered points
continue
if k == 2:
plt.scatter(Energy[cur, 0], Energy[cur, 1], s=7, c='y') # non-clustered points
continue
plt.xlabel('Energy (keV)',fontsize=20)
plt.ylabel('Energy (keV)',fontsize=20)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.xlim(0, 90)
plt.ylim(0, 90)
plt.grid(True)
###### Save data Figure 5 122-keV, 2-pixel, scattering + DBSCAN scattering
data_save = np.hstack( (Energy, y_hat.reshape(-1,1)) ) ### ( E1, E2, label)
CF.SaveFiles(var=data_save, var_name=['E1', 'E2', 'label'], \
location="C:\Jiajin\ChaSha_2017\CSC_Data\CSC_extracted_data\Figure_data\Figure_9_Ba_DBSCAN.csv")
del(data_save)
########################################################################################################
####################### "Rot -> MC Shifting -> Rot" CSC function #######################################
##### Initialize the CSC object
CSC_81_2pix = CSC( CS_dim=CS_dim, basis_old=basis_old, basis_new=basis_new, peak_energy=81, max_energy_range=140, seg_size=seg_size )
##### Calculate the MC-curve
wet_x_81, wet_w_81, shift_w_81, seg_unit_81, Energy_rotation = CSC_81_2pix.Pix2_Measurement( CS_data_labeled = Energy )
Energy_rotation = np.hstack( ( Energy_rotation[:,0], Energy_rotation[:,1] ) )
##### Calculate the rotated (Ratio, E-sum) points
wet_x_81, wet_w_81, shift_w_81, seg_unit_81, Energy_rot = CSC_81_2pix.Pix2_Measurement( CS_data_labeled = cluster_81_lab_0 )
##### Extend MC-curve to -1 and 1
left = min( min( np.where(wet_w_81 != 0) ) )
right = max( max( np.where(wet_w_81 != 0) ) )
wet_w_81[0:left+1,] = np.linspace(81, wet_w_81[left,], num=(left+1))
wet_w_81[right:len(wet_w_81),] = np.linspace(wet_w_81[right,], 81, num=(len(wet_w_81)-right))
left = min( min( np.where(shift_w_81!=0) ) )
right = max( max( np.where(shift_w_81!=0) ) )
shift_w_81[0:left+1,] = np.linspace(0, shift_w_81[left,], num=(left+1))
shift_w_81[right:len(shift_w_81),] = np.linspace(shift_w_81[right,], 0, num=(len(shift_w_81)-right))
##### Calculate the S-G filtered MC-curve
wet_w_81_filter = np.zeros(len(wet_w_81))
num = len(wet_w_81)//2 * 2 - 1
wet_w_81_filter = signal.savgol_filter(wet_w_81, num, 7)
shift_w_81_filter = np.zeros(len(shift_w_81))
num = len(shift_w_81)//2 * 2 - 1
shift_w_81_filter = signal.savgol_filter(shift_w_81, num, 7)
wet_w_81_filter = 81 - shift_w_81_filter
##### Check the scattering plot and MC plot
plt.figure(figsize=(12, 12), facecolor='w')
plt.grid('on')
for k in y_unique:
cur = (y_hat == k)
if k == -1:
plt.scatter(Energy_rotation[cur,0].tolist(), Energy_rotation[cur,1].tolist(), s=2, c='k') # non-clustered points
continue
if k == 0:
plt.scatter(Energy_rotation[cur,0].tolist(), Energy_rotation[cur,1].tolist(), s=7, c='b') # non-clustered points
continue
if k == 1:
plt.scatter(Energy_rotation[cur,0].tolist(), Energy_rotation[cur,1].tolist(), s=7, c='r') # non-clustered points
continue
if k == 2:
plt.scatter(Energy_rotation[cur,0].tolist(), Energy_rotation[cur,1].tolist(), s=7, c='y') # non-clustered points
continue
plt.xlabel('Energy (keV)',fontsize=20)
plt.ylabel('Energy (keV)',fontsize=20)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.xlim(-1, 1)
plt.ylim(0, 90)
plt.grid(True)
CF.Scatter2D_plot(x=Energy_rotation[:,0].tolist(), y=Energy_rotation[:,1].tolist(), x_lim_left=-1, \
x_lim_right=1, y_lim_left=0, y_lim_right=65)
CF.Line_plot(x=wet_x_81, y=wet_w_81, color='red', x_label='Spatial-axis', y_label='Energy-axis')
CF.Line_plot(x=wet_x_81, y=shift_w_81, color='blue', x_label='Spatial-axis', y_label='Energy-axis')
CF.Line_plot(x=wet_x_81, y=wet_w_81_filter, color='red', x_label='Spatial-axis', y_label='Energy-axis')
CF.Line_plot(x=wet_x_81, y=shift_w_81_filter, color='blue', x_label='Spatial-axis', y_label='Energy-axis')
##### fluorescence events correction
wet_x_81, wet_w_81_fluo1, shift_w_81_fluo1, seg_unit_81, Energy_rotation = CSC_81_2pix.Pix2_Measurement( CS_data_labeled = cluster_81_lab_1 )
wet_x_81, wet_w_81_fluo2, shift_w_81_fluo2, seg_unit_81, Energy_rotation = CSC_81_2pix.Pix2_Measurement( CS_data_labeled = cluster_81_lab_2 )
##### Calculate the S-G filtered MC-curve
wet_w_81_fluo1_filter = np.zeros(len(wet_w_81_fluo1))
left = min( min( np.where(wet_w_81_fluo1!=0) ) )
right = max( max( np.where(wet_w_81_fluo1!=0) ) )
num = (right-left)//2 * 2 + 1
wet_w_81_fluo1_filter[left:right+1,] = signal.savgol_filter(wet_w_81_fluo1[left:right+1,], num, 5)
wet_w_81_fluo2_filter = np.zeros(len(wet_w_81_fluo2))
left = min( min( np.where(wet_w_81_fluo2!=0) ) )
right = max( max( np.where(wet_w_81_fluo2!=0) ) )
num = (right-left)//2 * 2 + 1
wet_w_81_fluo2_filter[left:right+1,] = signal.savgol_filter(wet_w_81_fluo2[left:right+1,], num, 5)
##### Check fluo
CF.Line_plot(x=wet_x_81, y=wet_w_81_fluo1, color='red', x_label='Spatial-axis', y_label='Energy-axis')
CF.Line_plot(x=wet_x_81, y=wet_w_81_fluo2, color='blue', x_label='Spatial-axis', y_label='Energy-axis')
CF.Line_plot(x=wet_x_81, y=wet_w_81_fluo1_filter, color='red', x_label='Spatial-axis', y_label='Energy-axis')
CF.Line_plot(x=wet_x_81, y=wet_w_81_fluo2_filter, color='blue', x_label='Spatial-axis', y_label='Energy-axis')
###### Save data Figure 6 122-keV, 2-pixel, scattering + DBSCAN scattering
data_save = np.hstack( (Energy_rotation, y_hat.reshape(-1,1)) )
CF.SaveFiles(var=data_save, var_name=['Ratio', 'E', 'label'], \
location="C:\Jiajin\ChaSha_2017\CSC_Data\CSC_extracted_data\Figure_data\Figure_9_Ba_events_transformed_labeled.csv")
data_save = np.hstack( (wet_x_81.reshape(-1,1), wet_w_81_fluo1.reshape(-1,1), wet_w_81_fluo1_filter.reshape(-1,1), \
wet_w_81_fluo2.reshape(-1,1), wet_w_81_fluo2_filter.reshape(-1,1)) )
CF.SaveFiles(var=data_save, var_name=['X', 'wet_w_81_fluo1', 'wet_w_81_fluo1_filter', 'wet_w_81_fluo2', 'wet_w_81_fluo2_filter'], \
location="C:\Jiajin\ChaSha_2017\CSC_Data\CSC_extracted_data\Figure_data\Figure_9_Ba_fluo12_loss.csv")
data_save = np.hstack( (wet_x_81.reshape(-1,1), wet_w_81.reshape(-1,1), wet_w_81_filter.reshape(-1,1)) )
CF.SaveFiles(var=data_save, var_name=['X', 'loss', 'loss_SG'], \
location="C:\Jiajin\ChaSha_2017\CSC_Data\CSC_extracted_data\Figure_data\Figure_9_Ba_loss.csv")
data_save = np.hstack( (wet_x_81.reshape(-1,1), shift_w_81.reshape(-1,1), shift_w_81_filter.reshape(-1,1)) )
CF.SaveFiles(var=data_save, var_name=['X', 'comp', 'comp_SG'], \
location="C:\Jiajin\ChaSha_2017\CSC_Data\CSC_extracted_data\Figure_data\Figure_9_Ba_compensate.csv")
del(data_save)
################################################################################################################
################################################################################################################
################################### Load the 122-keV charge sharing events ######################################
CS_data = pd.read_csv( 'C:\Jiajin\ChaSha_2017\CSC_Data\CSC_extracted_data\Co_2Pix_new.csv' )
Energy = CS_data.iloc[:, :].values #取第一行
CS_dim = Energy.shape[1]
del(CS_data)
Energy_sum = np.sum( Energy, axis=1 ).reshape(-1,1)
Energy_E1_E2_sum = Energy[ np.intersect1d(np.where(Energy_sum >= 100)[0],np.where(Energy_sum <= 130)[0]) ]
Energy_E1_E2_else = np.delete( Energy, np.intersect1d(np.where(Energy_sum >= 100)[0],np.where(Energy_sum <= 130)[0]), axis =0 )
##### plot the histogram of sum_Energy within the selected ROI
CF.Histogram_lineplot(Hist=Energy_sum, Bins=300, x_lim_high=100, x_lim_low=0, color='red')
##### plot the raw scatter figures within the selected ROI
CF.Scatter2D_plot(x=Energy_E1_E2_sum[:,0],y=Energy_E1_E2_sum[:,1],x_lim_left=0, x_lim_right=65, y_lim_left=0, y_lim_right=65)
########################################################################################################
####################### DBSCAN Clustering and Plot the results #########################################
Energy_sub = np.vstack( ( Energy_E1_E2_sum[:,0], Energy_E1_E2_sum[:,1] ) ).T
Energy_E1_E2_sum = np.sum( Energy_sub, axis=1 ).reshape(-1,1)
##### Model Fitting I
# Set Fitting Parameters
y_hat1, y_unique1, core_indices1 = SC.Clustering(radius=2, density=14, data=Energy_sub)
##### Model Fitting II
# Set Fitting Parameters
y_hat2, y_unique2, core_indices2 = SC.Clustering(radius=1, density=16, data=Energy_sub)
########################################################################################################
####################### Extract the cluster in the ROI #################################################
##### Reorganize the clustered scattering points
Energy = np.vstack( (Energy_sub, Energy_E1_E2_else) )
Energy_sum = np.sum( Energy, axis=1 ).reshape(-1,1)
y_hat = np.array([-1]*len(Energy))
y_hat[reduce( np.intersect1d, (np.where(y_hat1 != -1), np.where(y_hat1 != 4)) ),] = 0
y_hat[reduce( np.intersect1d, (np.where(y_hat2 == 3), np.where(Energy_sum >= 75.5)) ), ] = 1
y_hat[reduce( np.intersect1d, (np.where(y_hat2 == 4), np.where(Energy_sum >= 75.5)) ), ] = 2
y_hat[reduce( np.intersect1d, (np.where(y_hat == 0)[0],\
np.where(Energy_sum >= 114)[0],\
np.where(Energy[:,0] >= 80)[0],\
np.where(Energy[:,0] <= 100)[0]) ), ] = 2
y_hat[reduce( np.intersect1d, (np.where(y_hat == 0)[0],\
np.where(Energy_sum >= 116)[0],\
np.where(Energy[:,0] >= 100)[0],\
np.where(Energy[:,0] <= 104)[0]) ), ] = 2
y_hat[reduce( np.intersect1d, (np.where(y_hat == 0)[0],\
np.where(Energy_sum >= 116)[0],\
np.where(Energy[:,0] >= 18)[0],\
np.where(Energy[:,0] <= 30)[0]) ), ] = 1
y_hat[reduce( np.intersect1d, (np.where(y_hat == 0)[0],\
np.where(Energy_sum >= 114)[0],\
np.where(Energy[:,0] >= 30)[0],\
np.where(Energy[:,0] <= 40)[0]) ), ] = 1
y_hat[reduce( np.intersect1d, (np.where(y_hat == 0)[0],\
np.where(Energy_sum >= 122)[0],\
np.where(Energy[:,0] >= 110)[0]) ), ] = -1
y_hat[reduce( np.intersect1d, (np.where(y_hat == 0)[0],\
np.where(Energy_sum <= 110)[0],\
np.where(Energy[:,0] >= 0)[0],\
np.where(Energy[:,0] <= 10)[0]) ), ] = -1
y_hat[reduce( np.intersect1d, (np.where(y_hat == 0)[0],\
np.where(Energy_sum <= 110)[0],\
np.where(Energy[:,0] >= 100)[0],\
np.where(Energy[:,0] <= 110)[0]) ), ] = -1
y_hat[reduce( np.intersect1d, (np.where(y_hat == 0)[0],\
np.where(Energy_sum <= 108)[0],\
np.where(Energy[:,0] >= 90)[0],\
np.where(Energy[:,0] <= 100)[0]) ), ] = -1
y_unique = np.unique(y_hat)
cluster_122_lab_0 = Energy[np.where( ( y_hat == 0 ) )]
cluster_122_lab_1 = Energy[np.where( ( y_hat == 1 ) )]
cluster_122_lab_2 = Energy[np.where( ( y_hat == 2 ) )]
cluster_122_lab_noise = Energy[np.where( ( y_hat == -1 ) )]
###### check each DBSCAN clustering results
plt.figure(figsize=(12, 12), facecolor='w')
plt.grid('on')
for k in y_unique:
cur = (y_hat == k)
if k == -1:
plt.scatter(Energy[cur, 0], Energy[cur, 1], s=2, c='k') # non-clustered points
continue
if k == 0:
plt.scatter(Energy[cur, 0], Energy[cur, 1], s=7, c='b') # non-clustered points
continue
if k == 1:
plt.scatter(Energy[cur, 0], Energy[cur, 1], s=7, c='r') # non-clustered points
continue
if k == 2:
plt.scatter(Energy[cur, 0], Energy[cur, 1], s=7, c='y') # non-clustered points
continue
plt.xlabel('Energy (keV)',fontsize=20)
plt.ylabel('Energy (keV)',fontsize=20)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.xlim(0, 140)
plt.ylim(0, 140)
plt.grid(True)
###### Save data Figure 5 122-keV, 2-pixel, scattering + DBSCAN scattering
data_save = np.hstack( (Energy, y_hat.reshape(-1,1)) ) ### ( E1, E2, label)
CF.SaveFiles(var=data_save, var_name=['E1', 'E2', 'label'], \
location="C:\Jiajin\ChaSha_2017\CSC_Data\CSC_extracted_data\Figure_data\Figure_9_Co_DBSCAN.csv")
del(data_save)
########################################################################################################
####################### "Rot -> MC Shifting -> Rot" CSC function #######################################
##### Initialize the CSC object
CSC_122_2pix = CSC( CS_dim=CS_dim, basis_old=basis_old, basis_new=basis_new, peak_energy=122, max_energy_range=140, seg_size=seg_size )
##### Calculate the rotated (Ratio, E-sum) points
wet_x_122, wet_w_122, shift_w_122, seg_unit_122, Energy_rotation = CSC_122_2pix.Pix2_Measurement( CS_data_labeled = Energy )
Energy_rotation = np.hstack( ( Energy_rotation[:,0], Energy_rotation[:,1] ) )
##### Calculate the MC-curve
wet_x_122, wet_w_122, shift_w_122, seg_unit_122, Energy_rot = CSC_122_2pix.Pix2_Measurement( CS_data_labeled = cluster_122_lab_0 )
##### Extend MC-curve to -1 and 1
left = min( min( np.where(wet_w_122 != 0) ) )
right = max( max( np.where(wet_w_122 != 0) ) )
wet_w_122[0:left+1,] = np.linspace(122, wet_w_122[left,], num=(left+1))
wet_w_122[right:len(wet_w_122),] = np.linspace(wet_w_122[right,], 122, num=(len(wet_w_122)-right))
left = min( min( np.where(shift_w_122!=0) ) )
right = max( max( np.where(shift_w_122!=0) ) )
shift_w_122[0:left+1,] = np.linspace(0, shift_w_122[left,], num=(left+1))
shift_w_122[right:len(shift_w_122),] = np.linspace(shift_w_122[right,], 0, num=(len(shift_w_122)-right))
##### Calculate the S-G filtered MC-curve
wet_w_122_filter = np.zeros(len(wet_w_122))
num = len(wet_w_122)//2 * 2 - 1
wet_w_122_filter = signal.savgol_filter(wet_w_122, num, 7)
shift_w_122_filter = np.zeros(len(shift_w_122))
num = len(shift_w_122)//2 * 2 - 1
shift_w_122_filter = signal.savgol_filter(shift_w_122, num, 7)
##### Check the scattering plot and MC plot
plt.figure(figsize=(12, 12), facecolor='w')
plt.grid('on')
for k in y_unique:
cur = (y_hat == k)
if k == -1:
plt.scatter(Energy_rotation[cur,0].tolist(), Energy_rotation[cur,1].tolist(), s=2, c='k') # non-clustered points
continue
if k == 0:
plt.scatter(Energy_rotation[cur,0].tolist(), Energy_rotation[cur,1].tolist(), s=7, c='b') # non-clustered points
continue
if k == 1:
plt.scatter(Energy_rotation[cur,0].tolist(), Energy_rotation[cur,1].tolist(), s=7, c='r') # non-clustered points
continue
if k == 2:
plt.scatter(Energy_rotation[cur,0].tolist(), Energy_rotation[cur,1].tolist(), s=7, c='y') # non-clustered points
continue
plt.xlabel('Energy (keV)',fontsize=20)
plt.ylabel('Energy (keV)',fontsize=20)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.xlim(-1, 1)
plt.ylim(0, 140)
plt.grid(True)
CF.Line_plot(x=wet_x_122, y=wet_w_122, color='red', x_label='Spatial-axis', y_label='Energy-axis')
CF.Line_plot(x=wet_x_122, y=shift_w_122, color='blue', x_label='Spatial-axis', y_label='Energy-axis')
CF.Line_plot(x=wet_x_122, y=wet_w_122_filter, color='red', x_label='Spatial-axis', y_label='Energy-axis')
CF.Line_plot(x=wet_x_122, y=shift_w_122_filter, color='blue', x_label='Spatial-axis', y_label='Energy-axis')
##### fluorescence events correction
wet_x_122, wet_w_122_fluo1, shift_w_122_fluo1, seg_unit_122, Energy_rotation_fluo1 = CSC_122_2pix.Pix2_Measurement( CS_data_labeled = cluster_81_lab_1 )
wet_x_122, wet_w_122_fluo2, shift_w_122_fluo2, seg_unit_122, Energy_rotation_fluo2 = CSC_122_2pix.Pix2_Measurement( CS_data_labeled = cluster_81_lab_2 )
##### Calculate the S-G filtered MC-curve
wet_w_122_fluo1_filter = np.zeros(len(wet_w_122_fluo1))
left = min( min( np.where(wet_w_122_fluo1!=0) ) )
right = max( max( np.where(wet_w_122_fluo1!=0) ) )
num = (right-left)//2 * 2 + 1
wet_w_122_fluo1_filter[left:right+1,] = signal.savgol_filter(wet_w_122_fluo1[left:right+1,], num, 7)
wet_w_122_fluo2_filter = np.zeros(len(wet_w_122_fluo2))
left = min( min( np.where(wet_w_122_fluo2!=0) ) )
right = max( max( np.where(wet_w_122_fluo2!=0) ) )
num = (right-left)//2 * 2 + 1
wet_w_122_fluo2_filter[left:right+1,] = signal.savgol_filter(wet_w_122_fluo2[left:right+1,], num, 7)
##### Check fluo
CF.Line_plot(x=wet_x_122, y=wet_w_122_fluo1, color='red', x_label='Spatial-axis', y_label='Energy-axis')
CF.Line_plot(x=wet_x_122, y=wet_w_122_fluo2, color='blue', x_label='Spatial-axis', y_label='Energy-axis')
CF.Line_plot(x=wet_x_122, y=wet_w_122_fluo1_filter, color='red', x_label='Spatial-axis', y_label='Energy-axis')
CF.Line_plot(x=wet_x_122, y=wet_w_122_fluo2_filter, color='blue', x_label='Spatial-axis', y_label='Energy-axis')
###### Save data Figure 6 122-keV, 2-pixel, scattering + DBSCAN scattering
data_save = np.hstack( (Energy_rotation, y_hat.reshape(-1,1)) )
CF.SaveFiles(var=data_save, var_name=['Ratio', 'E', 'label'], \
location="C:\Jiajin\ChaSha_2017\CSC_Data\CSC_extracted_data\Figure_data\Figure_9_Co_events_transformed_labeled.csv")
data_save = np.hstack( (wet_x_122.reshape(-1,1), wet_w_122_fluo1.reshape(-1,1), wet_w_122_fluo1_filter.reshape(-1,1), \
wet_w_122_fluo2.reshape(-1,1), wet_w_122_fluo2_filter.reshape(-1,1)) )
CF.SaveFiles(var=data_save, var_name=['X', 'wet_w_122_fluo1', 'wet_w_122_fluo1_filter', 'wet_w_122_fluo2', 'wet_w_122_fluo2_filter'], \
location="C:\Jiajin\ChaSha_2017\CSC_Data\CSC_extracted_data\Figure_data\Figure_9_Co_fluo12_loss.csv")
data_save = np.hstack( (wet_x_122.reshape(-1,1), wet_w_122.reshape(-1,1), wet_w_122_filter.reshape(-1,1)) )
CF.SaveFiles(var=data_save, var_name=['X', 'loss', 'loss_SG'], \
location="C:\Jiajin\ChaSha_2017\CSC_Data\CSC_extracted_data\Figure_data\Figure_9_Co_loss.csv")
data_save = np.hstack( (wet_x_122.reshape(-1,1), shift_w_122.reshape(-1,1), shift_w_122_filter.reshape(-1,1)) )
CF.SaveFiles(var=data_save, var_name=['X', 'comp', 'comp_SG'], \
location="C:\Jiajin\ChaSha_2017\CSC_Data\CSC_extracted_data\Figure_data\Figure_9_Co_compensate.csv")
del(data_save)
################################################################################################################
################################################################################################################
################################### Compensation mapping calculation #####################################
##### Three Calibration line visulization
CF.Line_plot(x=wet_x_60, y=shift_w_60_filter, color='red', x_label='Ratio = (E1 - E2)/(E1 + E2)', y_label='Energy compensation (keV)')
CF.Line_plot(x=wet_x_60, y=shift_w_81_filter, color='blue', x_label='Ratio = (E1 - E2)/(E1 + E2)', y_label='Energy compensation (keV)')
CF.Line_plot(x=wet_x_60, y=shift_w_122_filter, color='green', x_label='Ratio = (E1 - E2)/(E1 + E2)', y_label='Energy compensation (keV)')
##### Compensation map implementation
Map, R, E = MG.Pix2_MapGeneration(E1=60, E1_shift=shift_w_60_filter, E2=81, E2_shift=shift_w_81_filter, \
E3=122, E3_shift=shift_w_122_filter, Ratio=wet_x_60, E_range=140, dE=1)
Map[np.where(Map < 0)] = 0
##### Compensation map visualization
X = np.tile( R.reshape(-1,1), (1,len(E)) ).T
Y = np.tile( E.reshape(-1,1).T, (len(R),1) ).T
Z = Map
fig = plt.figure(figsize=(12, 12), facecolor='w')
ax = Axes3D(fig)
ax.view_init(elev=90,azim=270)
surf = ax.plot_surface(X, Y, Z, cmap=plt.cm.coolwarm, linewidth=0, antialiased=False)
fig.colorbar(surf, shrink=0.5, aspect=5)
plt.suptitle('Interpolation of the charge loss compensation map',fontsize=20)
plt.xlabel('Spatial-axis',fontsize=20)
plt.ylabel('Energy-axis',fontsize=20)
plt.xticks(fontsize=20)
plt.yticks(fontsize=20)
plt.show()
################################################################################################################
################################################################################################################
############################## Charge sharing correction based on compensation map #############################
from charge_sharing_correction import charge_sharing_correction as CSC2
from charge_sharing_correction_v2 import charge_sharing_correction as CSC # class file, charge sharing correction
from charge_sharing_correction_v2 import Common_used_function as CF # class file, common-used plotting tools
from charge_sharing_correction_v2 import SG_Filter as SG # class file, S-G filters of different dimentions
from charge_sharing_correction_v2 import Map_generation as MG # class file, compensation map generation
from charge_sharing_correction_v2 import scattering_clustering as SC # class file, DBSCAN clustering
'''
data_map = np.vstack( (cluster_60_lab_0, cluster_60_lab_noise, cluster_81_lab_0, cluster_81_lab_noise,\
cluster_122_lab_0, cluster_122_lab_noise) )
CSC_122_2pix = CSC( CS_dim=CS_dim, basis_old=basis_old, basis_new=basis_new, peak_energy=122, max_energy_range=140, seg_size=seg_size )
data_map_corrected = CSC_122_2pix.Pix2_Correction( CS_data_labeled=data_map, correction_map=Map, R=R, E=E )
CF.Scatter2D_plot(x=data_map[:,0].tolist(), y=data_map[:,1].tolist(), x_lim_left=0, x_lim_right=140,\
y_lim_left=0, y_lim_right=140, color='r')
CF.Scatter2D_plot(x=data_map_corrected[:,0].tolist(), y=data_map_corrected[:,1].tolist(), x_lim_left=0, x_lim_right=140,\
y_lim_left=0, y_lim_right=140, color='k')
'''
### 60
CSC2_60_2Pix = CSC2( CS_dim=CS_dim, basis_old=basis_old, basis_new=basis_new, peak_energy=60, max_energy_range=140, seg_size=2 )
wet_x, wet_w, shift_w, seg_unit = CSC2_60_2Pix.Pix2_Measurement( CS_data_labeled=cluster_60_lab_0 )
cluster_60_lab_0_corrected = CSC2_60_2Pix.Pix2_Correction( seg_unit=seg_unit, shift_w=shift_w, CS_data_labeled=cluster_60_lab_0 )
cluster_60_lab_noise_corrected = CSC2_60_2Pix.Pix2_Correction( seg_unit=seg_unit, shift_w=shift_w, CS_data_labeled=cluster_60_lab_noise )
wet_x, wet_w, shift_w, seg_unit = CSC2_60_2Pix.Pix2_Measurement( CS_data_labeled=cluster_60_lab_1 )
cluster_60_lab_1_corrected = CSC2_60_2Pix.Pix2_Correction( seg_unit=seg_unit, shift_w=shift_w, CS_data_labeled=cluster_60_lab_1 )
wet_x, wet_w, shift_w, seg_unit = CSC2_60_2Pix.Pix2_Measurement( CS_data_labeled=cluster_60_lab_2 )
cluster_60_lab_2_corrected = CSC2_60_2Pix.Pix2_Correction( seg_unit=seg_unit, shift_w=shift_w, CS_data_labeled=cluster_60_lab_2 )
### 81
CSC2_81_2Pix = CSC2(CS_dim=CS_dim, basis_old=basis_old, basis_new=basis_new, peak_energy=81, max_energy_range=140, seg_size=2)
wet_x, wet_w, shift_w, seg_unit = CSC2_81_2Pix.Pix2_Measurement( CS_data_labeled=cluster_81_lab_0 )
cluster_81_lab_0_corrected = CSC2_81_2Pix.Pix2_Correction( seg_unit=seg_unit, shift_w=shift_w, CS_data_labeled=cluster_81_lab_0 )
cluster_81_lab_noise_corrected = CSC2_81_2Pix.Pix2_Correction( seg_unit=seg_unit, shift_w=shift_w, CS_data_labeled=cluster_81_lab_noise )
wet_x, wet_w, shift_w, seg_unit = CSC2_81_2Pix.Pix2_Measurement( CS_data_labeled=cluster_81_lab_1 )
cluster_81_lab_1_corrected = CSC2_81_2Pix.Pix2_Correction( seg_unit=seg_unit, shift_w=shift_w, CS_data_labeled=cluster_81_lab_1 )
wet_x, wet_w, shift_w, seg_unit = CSC2_81_2Pix.Pix2_Measurement( CS_data_labeled=cluster_81_lab_2 )
cluster_81_lab_2_corrected = CSC2_81_2Pix.Pix2_Correction( seg_unit=seg_unit, shift_w=shift_w, CS_data_labeled=cluster_81_lab_2 )
### 122
CSC2_122_2Pix = CSC2(CS_dim=CS_dim, basis_old=basis_old, basis_new=basis_new, peak_energy=122, max_energy_range=140, seg_size=2)
wet_x, wet_w, shift_w, seg_unit = CSC2_122_2Pix.Pix2_Measurement( CS_data_labeled=cluster_122_lab_0 )
cluster_122_lab_0_corrected = CSC2_122_2Pix.Pix2_Correction( seg_unit=seg_unit, shift_w=shift_w, CS_data_labeled=cluster_122_lab_0 )
cluster_122_lab_noise_corrected = CSC2_122_2Pix.Pix2_Correction( seg_unit=seg_unit, shift_w=shift_w, CS_data_labeled=cluster_122_lab_noise )
wet_x, wet_w, shift_w, seg_unit = CSC2_122_2Pix.Pix2_Measurement( CS_data_labeled=cluster_122_lab_1 )
cluster_122_lab_1_corrected = CSC2_122_2Pix.Pix2_Correction( seg_unit=seg_unit, shift_w=shift_w, CS_data_labeled=cluster_122_lab_1 )
wet_x, wet_w, shift_w, seg_unit = CSC2_122_2Pix.Pix2_Measurement( CS_data_labeled=cluster_122_lab_2 )
cluster_122_lab_2_corrected = CSC2_122_2Pix.Pix2_Correction( seg_unit=seg_unit, shift_w=shift_w, CS_data_labeled=cluster_122_lab_2 )
### noise
#noise = np.vstack( (cluster_60_lab_noise, cluster_81_lab_noise, cluster_122_lab_noise) )
#CSC_122_2pix = CSC( CS_dim=CS_dim, basis_old=basis_old, basis_new=basis_new, peak_energy=122, max_energy_range=140, seg_size=seg_size )
#noise_corrected = CSC_122_2pix.Pix2_Correction( CS_data_labeled=noise, correction_map=Map, R=R, E=E )
data = np.vstack( (cluster_60_lab_0, cluster_60_lab_1, cluster_60_lab_2, cluster_60_lab_noise,\
cluster_81_lab_0, cluster_81_lab_1, cluster_81_lab_2, cluster_81_lab_noise,\
cluster_122_lab_0, cluster_122_lab_1, cluster_122_lab_2, cluster_122_lab_noise) )
data_corrected = np.vstack( (cluster_60_lab_0_corrected, cluster_60_lab_1_corrected, cluster_60_lab_2_corrected, cluster_60_lab_noise_corrected, \
cluster_81_lab_0_corrected, cluster_81_lab_1_corrected, cluster_81_lab_2_corrected, cluster_81_lab_noise_corrected, \
cluster_122_lab_0_corrected, cluster_122_lab_1_corrected, cluster_122_lab_2_corrected, cluster_122_lab_noise_corrected) )
Energy_sum = np.sum(data, axis=1).reshape(-1,1)
CF.Histogram_lineplot(Hist=Energy_sum, Bins=300, x_lim_low=0, x_lim_high=200, color='blue')
Energy_sum_corrected = np.sum(data_corrected, axis=1).reshape(-1,1)
CF.Histogram_lineplot(Hist=Energy_sum_corrected, Bins=300, x_lim_low=0, x_lim_high=200, color='red')
plt.ylim(0, 1000)
### save the data and corrected data
CF.SaveFiles(var=data, var_name=['E1', 'E2'], location='C:\Jiajin\ChaSha_2017\CSC_Data\CSC_extracted_data\Figure_data\Figure_12_raw.csv')
CF.SaveFiles(var=data_corrected, var_name=['E1', 'E2'], location='C:\Jiajin\ChaSha_2017\CSC_Data\CSC_extracted_data\Figure_data\Figure_12_corrected.csv')
### save separately
# raw
data = np.vstack( ( cluster_60_lab_0, cluster_60_lab_1, cluster_60_lab_2, cluster_60_lab_noise ) )
CF.SaveFiles(var=data, var_name=['E1', 'E2'], location='C:\Jiajin\ChaSha_2017\CSC_Data\CSC_extracted_data\spectra_data\Am_2pix_raw.csv')
data = np.vstack( ( cluster_81_lab_0, cluster_81_lab_1, cluster_81_lab_2, cluster_81_lab_noise ) )
CF.SaveFiles(var=data, var_name=['E1', 'E2'], location='C:\Jiajin\ChaSha_2017\CSC_Data\CSC_extracted_data\spectra_data\Ba_2pix_raw.csv')
data = np.vstack( ( cluster_122_lab_0, cluster_122_lab_1, cluster_122_lab_2, cluster_122_lab_noise ) )
CF.SaveFiles(var=data, var_name=['E1', 'E2'], location='C:\Jiajin\ChaSha_2017\CSC_Data\CSC_extracted_data\spectra_data\Co_2pix_raw.csv')
# corrected
data = np.vstack( ( cluster_60_lab_0_corrected, cluster_60_lab_1_corrected, cluster_60_lab_2_corrected, cluster_60_lab_noise_corrected ) )
CF.SaveFiles(var=data, var_name=['E1', 'E2'], location='C:\Jiajin\ChaSha_2017\CSC_Data\CSC_extracted_data\spectra_data\Am_2pix_corrected.csv')
data = np.vstack( ( cluster_81_lab_0_corrected, cluster_81_lab_1_corrected, cluster_81_lab_2_corrected, cluster_81_lab_noise_corrected ) )
CF.SaveFiles(var=data, var_name=['E1', 'E2'], location='C:\Jiajin\ChaSha_2017\CSC_Data\CSC_extracted_data\spectra_data\Ba_2pix_corrected.csv')
data = np.vstack( ( cluster_122_lab_0_corrected, cluster_122_lab_1_corrected, cluster_122_lab_2_corrected, cluster_122_lab_noise_corrected ) )
CF.SaveFiles(var=data, var_name=['E1', 'E2'], location='C:\Jiajin\ChaSha_2017\CSC_Data\CSC_extracted_data\spectra_data\Co_2pix_corrected.csv')
| [
"matplotlib"
] |
b04b62cf5a338cebb5b0ae6acecfca658aa50f25 | Python | bunshue/vcs | /_4.python/__code/機器學習基礎數學第二版/ch23/ch23_5.py | UTF-8 | 655 | 3.59375 | 4 | [] | no_license | # ch23_5.py
import matplotlib.pyplot as plt
import numpy as np
x = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,19,21,22,23,24]
y = [100,21,75,49,15,98,55,31,33,82,61,80,32,71,99,15,66,88,21,97,30,5]
coef = np.polyfit(x, y, 3) # 迴歸直線係數
model = np.poly1d(coef) # 線性迴歸方程式
reg = np.linspace(1,24,100)
plt.rcParams["font.family"] = ["Microsoft JhengHei"] # 微軟正黑體
plt.scatter(x,y)
plt.title('網路購物調查')
plt.xlabel("點鐘", fontsize=14)
plt.ylabel("購物人數", fontsize=14)
plt.plot(reg,model(reg),color='red')
plt.show()
| [
"matplotlib"
] |
e55616dc74405e0a201a1d2781e522b11670f13b | Python | Isterikus/test_nn | /bar.py | UTF-8 | 821 | 3.015625 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
p = 0.33
a = 4.0
b = 7.0
with open("data-" + str(p) + '-' + str(a) + '-' + str(b), 'r') as f:
vals = [float(line) for line in f if float(line) <= 1.0]
gr = 0
less = 0
for i in vals:
if i <= 0.8:
less += 1
elif i <= 1.0:
gr += 1
print("LESS = ", less)
print("GR = ", gr)
# plt.bar(x, y)
# plt.show()
y, x, _ = plt.hist(vals, bins=100)
print("Y = ", y)
print("X = ", x)
print("_ = ", _)
plt.show()
def calc(i):
ret = 0.0
for v in vals:
if v >= i and v < i + 0.01:
ret += 1
return float(ret / len(vals))
diff = 0
per = 0
prev = calc(0.0)
for i in np.arange(0.01, 1.0, 0.01):
now = calc(i)
if now - prev > diff and (calc(i + 0.01) - now > now - prev or prev - calc(i - 0.02) > now - prev):
diff = now
per = i
prev = now
print("DIff = ", per)
| [
"matplotlib"
] |
70ce5e4b1756f0f7fba9371a4f47f7e015e44d4b | Python | Aldo-Meztas/ProgramasFree | /Programas/holamundo.py | UTF-8 | 1,794 | 3.75 | 4 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import math
# import PyQt5
def triangular(x, a, b, c):
rT = []
for i in x:
if i <= a:
rT.append(0)
elif a <= i and i <= b:
rT.append((i-a)/(b-a))
elif b <= i and i <= c:
rT.append((c-i)/(c-b))
elif c <= i:
rT.append(0)
else:
pass
return np.array(rT)
def trapezoidal(x, a, b, c, d):
rT = []
for i in x:
if i <= a:
rT.append(0)
elif a <= i and i <= b:
rT.append((i-a)/(b-a))
elif b <= i and i <= c:
rT.append(1)
elif c <= i and i <= d:
rT.append((d-i)/(d-c))
elif d <= i:
rT.append(0)
return np.array(rT)
def gausseana(x, a, b):
e = math.e
return e**((-(1/2))*((x-a)/b)**2)
def campana(x, a, b, c):
return 1/(1+(abs((x-c)/a))**(2*b))
def rangos(inicio, final):
return np.array([float(i) for i in range(inicio, final+1)])
def entrada():
print("========== INGRESE RANGO ==========")
inicio = int(input("Ingrese inicio: "))
final = int(input("Ingrese final: "))
print("===================================")
return rangos(inicio, final)
print("Triangular")
x = entrada()
valor1 = triangular(x, 6, 7, 9)
xt = x
yt = valor1
plt.plot(xt, yt, 'b--', label="Triangular")
print("Trapezoidal")
x = entrada()
valor2 = trapezoidal(x, 5, 6, 8, 10)
xtr = x
ytr = valor2
plt.plot(xtr, ytr, 'r--', label="Trepecio")
print("Gausiana")
x = entrada()
valor3 = gausseana(x, 3, 0.7)
xg = x
yg = valor3
plt.plot(xg, yg, label="Gausiana")
print("Campana")
x = entrada()
valor4 = campana(x, 1.5, 5, 3)
xc = x
yc = valor4
plt.plot(xc, yc, label="Campana")
plt.legend(loc='upper left')
plt.show()
| [
"matplotlib"
] |
39ffeb7d53c734760223d677b6ca9a9685a68eef | Python | 5amessi/how-deep-is-deep-learning | /linear_regression/practice/linear_regression.py | UTF-8 | 2,768 | 3.625 | 4 | [] | no_license | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#gradient decent you should implement it
def step_gradient(b_current, m_current, points, learningRate):
N = float(len(points))
for i in range(0, len(points)):
x = points[i, 0]
y = points[i, 1]
# h(x) = predicted_y = mx + b
# m is slope, b is y-intercept or m is theta 1, b is theta 0
# Squared error function
# theta0 = theta0 + eta * (1/n)*sum(y(i) - h(xi))
# theta1 = theta1 + eta * (1/n)*sum(y(i) - h(xi))*xi
return [b_current, m_current] #return theta0 , theta1
#fun to coputer error
def compute_error_for_line_given_points(b, m, points):
totalError = 0
for i in range(0, len(points)):
x = points[i, 0]
y = points[i, 1]
totalError += (y - (m * x + b)) ** 2
return totalError / float(2*len(points))
def gradient_descent_runner(points, starting_b, starting_m, learning_rate, num_iterations):
counter = 0 # counter used for the drawing
b = starting_b
m = starting_m
for i in range(num_iterations):
# The drawing staff, we will update it once after each 10 iterations
print ("After {0} iterations b = {1}, m = {2}, error = {3}".format(counter, b, m,compute_error_for_line_given_points(b, m, points)))
if counter%100 is 0:
plt.plot(points[:, 0], points[:, 1], 'bo') # Draw the dataset
plt.plot([0, 80], [b, 80*m+b], 'b')
plt.show()
b, m = step_gradient(b, m, np.array(points), learning_rate)
counter+=1
return [b, m]
def Train():
print ("Starting gradient descent at b = {0}, m = {1}, error = {2}".format(initial_b, initial_m, compute_error_for_line_given_points(initial_b, initial_m, points)))
print ("Running...")
[b, m] = gradient_descent_runner(points, initial_b, initial_m, learning_rate, num_iterations)
print ("After {0} iterations b = {1}, m = {2}, error = {3}".format(num_iterations, b, m, compute_error_for_line_given_points(b, m, points)))
#========================================================================================================================================================
#The Main:
#read_dataset
points = pd.read_csv("../Dataset/Regression_dataset/data.csv", delimiter=",") # Function in pandas that reads the data from a file and organize it.
points = np.asarray(points) #make it as
#hyberprameters
learning_rate = 0.000001 # Eta
num_iterations = 2000
initial_b = 0 # initial y-intercept guess
initial_m = 0 # initial slope guess
# m is slope, b is y-intercept or b is theta 0 , m is theta 1
Train()
#code cycle
#1- read dataset , hyberprameters
#2- Train
#3- gradient_descent_runner
#4- step_gradient where u will write your code #####
| [
"matplotlib"
] |
760d6318b57c19e1bdbefe0ac25e25546d9f6a06 | Python | Seiji42/cs-470-labs | /agents/GraphSearch.py | UTF-8 | 1,404 | 2.90625 | 3 | [] | no_license | import matplotlib
import matplotlib.pyplot as plt
class GraphSearch(object):
def __init__(self, plot):
self.plot = plot
self.plot_count = 1
def plot_obstacles(self, ax):
for obstacle in self.obstacles:
obs = obstacle.points
rect1 = matplotlib.patches.Rectangle((obs[1][0],obs[1][1]), \
obs[2][0] - obs[1][0], obs[0][1] - obs[1][1], color='blue')
ax.add_patch(rect1)
def plot_graph(self,frontier, discovered, came_from, search_name):
fig = plt.figure(self.plot_count)
ax = fig.add_subplot(111)
self.plot_obstacles(ax)
self.plot_points(frontier, ax, 'ks')
self.plot_points(discovered, ax, 'mH')
self.plot_came_from(came_from, ax)
name = search_name + str(self.plot_count) + '.png'
plt.title(search_name + str(self.plot_count))
fig.savefig('visibility_plots/' + name) # save the figure to file
plt.close(fig)
self.plot_count = self.plot_count + 1
def plot_came_from(self, came_from, ax):
for key in came_from.keys():
arrow = matplotlib.patches.Arrow(key[0], key[1], came_from[key][0] - key[0], came_from[key][1] - key[1], width=1.0, color='red')
ax.add_patch(arrow)
def plot_points(self, points, ax, style):
for p in points:
ax.plot([p[0]], [p[1]], style, ms=10)
| [
"matplotlib"
] |
08bf0aa4745aacdd53a9e68d6f2237b54da77f19 | Python | cam673/School-Projects | /EE 381/Lab 5/sample_size.py | UTF-8 | 1,361 | 3.21875 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 7 13:18:04 2019
@author: christophermasferrer
"""
#Christopher Masferrer
#EE 381
#Lab 5
import numpy as np
import matplotlib.pyplot as plt
import random as r
import math as m
N = 1200000
mu = 45
sig = 3
B = np.random.normal(mu,sig,N)
def sSize():
n = 180
mean = [None] * n
top95 = [None] * n
bottom95 = [None] * n
top99 = [None] * n
bottom99 = [None] * n
for i in range (0,n):
counter = i+1
x = B[r.sample(range(N), counter)]
mean[i] = np.sum(x)/counter
std = sig/m.sqrt(counter)
top95[i] = mu + 1.96*(std)
bottom95[i] = mu - 1.96*(std)
top99[i] = mu + 2.58*(std)
bottom99[i] = mu - 2.58*(std)
coll = [x for x in range(1, counter+1)]
plt.close('all')
fig1 = plt.figure(1)
plt.scatter(coll, mean, c = 'Blue', marker = 'x')
plt.plot(coll, top95, 'r--')
plt.plot(coll, bottom95, 'r--')
plt.title('Sample Means & 95% confidence intervals')
plt.xlabel('Sample Size')
plt.ylabel('x_bar')
fig2 = plt.figure(2)
plt.scatter(coll, mean, c = 'Blue', marker = 'x')
plt.plot(coll, top99, 'g--')
plt.plot(coll, bottom99, 'g--')
plt.title('Sample Means & 99% confidence intervals')
plt.xlabel('Sample Size')
plt.ylabel('x_bar')
sSize() | [
"matplotlib"
] |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.