code
stringlengths 22
1.05M
| apis
sequencelengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
---|---|---|
#!/usr/bin/python3
import pygame
import random
import time
##VARIABLES TO CHANGE
width = 500
height = 500
stats_height = 150
board_size = 5
window_name = "PyLoopover "+str(board_size)+"x"+str(board_size)
scramble_turns = 50
t_round = 3
FPS = 30
##DONT CHANGE THESE BOIS
WHITE = (255,255,255)
BLACK = (0,0,0)
GREEN = (32,200,32)
keys = {"w":0,"a":0,"s":0,"d":0,"q":0}
last_was_Q = False
class Tile:
def __init__(self,number,s):
self.number = number
n = number-1
self.color = ((n/s)*(255/s),(n%s)*(255/s),128)
def draw(self,screen,font,x,y,width,height):
pygame.draw.rect(screen,self.color,(x,y,width,height))
text = font.render(str(self.number),True,BLACK)
screen.blit(text,(x,y))
class Board:
content = []
start_t=0
end_t=0
game=False
moves = 0
def __init__(self,size):
self.size = size
for i in range(0,size):
self.content.append([])
for j in range(0,size):
self.content[i].append(None)
self.content[i][j] = Tile(i+j*size+1,size)
def rotate_left(self,y):
new = []
for i in range(0,self.size):
new.append(self.content[(i-1)%self.size][y])
for i in range(0,self.size):
self.content[i][y] = new[i]
self.moves+=1
return new
def rotate_right(self,y):
new = []
for i in range(0,self.size):
new.append(self.content[(i+1)%self.size][y])
for i in range(0,self.size):
self.content[i][y] = new[i]
self.moves+=1
return new
def rotate_down(self,x):
new = []
for i in range(0,self.size):
new.append(self.content[x][(i-1)%self.size])
for i in range(0,self.size):
self.content[x][i] = new[i]
self.moves+=1
return new
def rotate_up(self,x):
new = []
for i in range(0,self.size):
new.append(self.content[x][(i+1)%self.size])
for i in range(0,self.size):
self.content[x][i] = new[i]
self.moves+=1
return new
def draw(self,screen,font):
for i in range(0,self.size):
for j in range(0,self.size):
w = (width / self.size)
h = (height / self.size)
x = i * w
y = j * h
self.content[i][j].draw(screen,font,x,y,w,h)
def scramble(self,n):
for i in range(0,n):
o = random.randint(0,3)
if o == 0:
self.rotate_left(random.randint(0,board_size-1))
elif o == 1:
self.rotate_right(random.randint(0,board_size-1))
elif o == 2:
self.rotate_up(random.randint(0,board_size-1))
else:
self.rotate_down(random.randint(0,board_size-1))
self.game=False
self.moves=0
return True
def is_solved(self):
for i in range(0,self.size):
for j in range(0,self.size):
if self.content[i][j].number != i+j*self.size+1:
return False
return True
def start_time(self):
print("time has started")
self.start_t = time.monotonic()
self.game = True
return self.start_time
def end_time(self):
print("time has ended")
self.end_t = time.monotonic()
return self.end_time
def get_time(self):
if (not self.is_solved()) and self.game:
return (time.monotonic() - self.start_t , BLACK)
elif self.is_solved() and self.game:
return (self.end_t - self.start_t , GREEN)
else:
return (0 , BLACK)
def main():
gameboard = Board(board_size)
pygame.init()
pygame.mixer.quit() #weird workaroud
#name the window & size it.
pygame.display.set_caption(window_name)
screen = pygame.display.set_mode((width,height+stats_height),0,32)
#setup framerate
pygame.time.set_timer(pygame.USEREVENT+1,int((1/FPS)*1000))
#setup event que
pygame.event.set_allowed(None) #start with no events allowed
pygame.event.set_allowed(pygame.USEREVENT+1) #timer event
pygame.event.set_allowed(pygame.KEYDOWN)
pygame.event.set_allowed(pygame.QUIT) #4 quitters
#setup fonts
font = pygame.font.SysFont('mono',int((width/board_size)/1.14))
font2 = pygame.font.SysFont('mono',int(stats_height/2.3))
#main l00p
running = True
while running:
#eevveeentttss???
event = pygame.event.wait()
if event.type == pygame.USEREVENT+1:
#a fresh canvas
screen.fill(WHITE)
#draw stats
time = gameboard.get_time()
time_str = str( int( time[0] * (10 ** t_round) ) / (10 ** t_round) )
text_timer = font2.render("Time :"+time_str,True,time[1])
text_moves = font2.render("Moves:"+str(gameboard.moves),True,time[1])
screen.blit(text_timer,(0,height))
screen.blit(text_moves,(0,height+(stats_height/2)))
#draw board
gameboard.draw(screen,font)
#update da screeeeeen
pygame.display.update()
#end the game
if gameboard.is_solved() and gameboard.start_t > gameboard.end_t:
gameboard.end_time()
elif event.type == pygame.KEYDOWN:
k = chr(event.key) #gimme a CHAR, not some weird integer
domap = {
"w":"gameboard.rotate_up(int(pygame.mouse.get_pos()[0]/(width/board_size)))",
"a":"gameboard.rotate_right(int(pygame.mouse.get_pos()[1]/(height/board_size)))",
"s":"gameboard.rotate_down(int(pygame.mouse.get_pos()[0]/(width/board_size)))",
"d":"gameboard.rotate_left(int(pygame.mouse.get_pos()[1]/(height/board_size)))",
"q":"gameboard.scramble(scramble_turns)"
} #i guess?
if k in ['w','a','s','d','q']:
#starting game logic
if k == "q":
last_was_Q = True
else:
if last_was_Q:
gameboard.start_time()
last_was_Q = False
exec(domap[k])
#end the game
if gameboard.is_solved() and gameboard.start_t > gameboard.end_t:
gameboard.end_time()
#for quitters
elif event.type == pygame.QUIT:
print("Quitting...")
running = False
else:
print("err0r, bAd 3v3nt lol")
assert False
if __name__ == "__main__":
main()
| [
"pygame.init",
"pygame.event.set_allowed",
"pygame.display.set_mode",
"time.monotonic",
"pygame.event.wait",
"pygame.draw.rect",
"pygame.display.set_caption",
"pygame.display.update",
"pygame.mixer.quit",
"random.randint"
] | [((3090, 3103), 'pygame.init', 'pygame.init', ([], {}), '()\n', (3101, 3103), False, 'import pygame\n'), ((3105, 3124), 'pygame.mixer.quit', 'pygame.mixer.quit', ([], {}), '()\n', (3122, 3124), False, 'import pygame\n'), ((3172, 3211), 'pygame.display.set_caption', 'pygame.display.set_caption', (['window_name'], {}), '(window_name)\n', (3198, 3211), False, 'import pygame\n'), ((3222, 3284), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(width, height + stats_height)', '(0)', '(32)'], {}), '((width, height + stats_height), 0, 32)\n', (3245, 3284), False, 'import pygame\n'), ((3378, 3408), 'pygame.event.set_allowed', 'pygame.event.set_allowed', (['None'], {}), '(None)\n', (3402, 3408), False, 'import pygame\n'), ((3440, 3486), 'pygame.event.set_allowed', 'pygame.event.set_allowed', (['(pygame.USEREVENT + 1)'], {}), '(pygame.USEREVENT + 1)\n', (3464, 3486), False, 'import pygame\n'), ((3499, 3539), 'pygame.event.set_allowed', 'pygame.event.set_allowed', (['pygame.KEYDOWN'], {}), '(pygame.KEYDOWN)\n', (3523, 3539), False, 'import pygame\n'), ((3541, 3578), 'pygame.event.set_allowed', 'pygame.event.set_allowed', (['pygame.QUIT'], {}), '(pygame.QUIT)\n', (3565, 3578), False, 'import pygame\n'), ((566, 625), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'self.color', '(x, y, width, height)'], {}), '(screen, self.color, (x, y, width, height))\n', (582, 625), False, 'import pygame\n'), ((2651, 2667), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (2665, 2667), False, 'import time\n'), ((2774, 2790), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (2788, 2790), False, 'import time\n'), ((3803, 3822), 'pygame.event.wait', 'pygame.event.wait', ([], {}), '()\n', (3820, 3822), False, 'import pygame\n'), ((2080, 2100), 'random.randint', 'random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (2094, 2100), False, 'import random\n'), ((4324, 4347), 'pygame.display.update', 'pygame.display.update', ([], {}), '()\n', (4345, 4347), False, 'import pygame\n'), ((2135, 2168), 'random.randint', 'random.randint', (['(0)', '(board_size - 1)'], {}), '(0, board_size - 1)\n', (2149, 2168), False, 'import random\n'), ((2889, 2905), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (2903, 2905), False, 'import time\n'), ((2205, 2238), 'random.randint', 'random.randint', (['(0)', '(board_size - 1)'], {}), '(0, board_size - 1)\n', (2219, 2238), False, 'import random\n'), ((2272, 2305), 'random.randint', 'random.randint', (['(0)', '(board_size - 1)'], {}), '(0, board_size - 1)\n', (2286, 2305), False, 'import random\n'), ((2334, 2367), 'random.randint', 'random.randint', (['(0)', '(board_size - 1)'], {}), '(0, board_size - 1)\n', (2348, 2367), False, 'import random\n')] |
# -*- coding: utf-8 -*-
import numpy as np, pandas as pd, arviz as az, prince, matplotlib.pyplot as plt, seaborn as sns
from cmdstanpy import CmdStanModel
#%% load data
data = pd.read_csv("data/overfitting.csv", index_col = 'case_id')
data.columns
data.info()
feature_names = data.columns.str.startswith("var_")
predictors = data[data.columns[feature_names]]
labels = data["Target_Practice"]
ix_training = data.train == 1
training_data = predictors[ix_training]
training_labels = labels[ix_training]
ix_testing = data.train == 0
testing_data = predictors[ix_testing]
testing_labels = labels[ix_testing]
sns.displot(training_data.values.flatten(), bins = "sqrt", kde = True)
pca = prince.PCA(n_components = 2, as_array = False).fit(training_data)
pca.plot_row_coordinates(training_data, color_labels = training_labels)
pca.column_correlations(training_data).plot.scatter(x = 0, y = 1) # weird column name
#%% Roshan Sharma model
mdl_data = { # problem with JSON dump => cast to python native type
'N': ix_training.sum().tolist(),
'N2': ix_testing.sum().tolist(),
'K': feature_names.sum().tolist(),
'y': training_labels.values.tolist(),
'X': training_data.values.tolist(),
'new_X': testing_data.values.tolist(),
}
modelfile = "OverfittingRoshanSharma.stan"
with open(modelfile, "w") as file: file.write("""
data {
int N; // the number of training observations
int N2; // the number of test observations
int K; // the number of features
int y[N]; // the response
matrix[N,K] X; // the model matrix
matrix[N2,K] new_X; // the matrix for the predicted values
}
parameters { // regression parameters
real alpha;
vector[K] beta;
}
transformed parameters {
vector[N] linpred = alpha + X * beta;
}
model {
alpha ~ cauchy(0, 10); // prior for the intercept following Gelman 2008
beta ~ student_t(1, 0, 0.03);
y ~ bernoulli_logit(linpred);
}
generated quantities { // y values predicted by the model
vector[N2] y_pred = alpha + new_X * beta;
}
""")
var_name_array = ["alpha"] + [f"beta[{i+1}]" for i in range(mdl_data["K"])]
var_name_combi = ["alpha", "beta"]
sm = CmdStanModel(stan_file = modelfile)
# maximum likelihood estimation
optim = sm.optimize(data = mdl_data).optimized_params_pd
optim[optim.columns[~optim.columns.str.startswith("lp")]]
plt.plot(optim[var_name_array[1:]].values[0])
# variational inference
vb = sm.variational(data = mdl_data)
vb.variational_sample.columns = vb.variational_params_dict.keys()
vb_name = vb.variational_params_pd.columns[~vb.variational_params_pd.columns.str.startswith(("lp", "log_"))]
vb.variational_params_pd[var_name_array]
vb.variational_sample[var_name_array]
# Markov chain Monte Carlo
fit = sm.sample(
data = mdl_data, show_progress = True, chains = 4,
iter_sampling = 50000, iter_warmup = 10000, thin = 5
)
fit.draws().shape # iterations, chains, parameters
fit.summary().loc[var_name_array] # pandas DataFrame
print(fit.diagnose())
posterior = {k: fit_modif.stan_variable(k) for k in var_name_combi}
az_trace = az.from_cmdstanpy(fit)
az.summary(az_trace).loc[var_name] # pandas DataFrame
az.plot_trace(az_trace, var_names = ["alpha"])
az.plot_forest(az_trace, var_names = ["beta"])
sample_pred = fit.stan_variable('y_pred')
# <NAME> model: DOES NOT WORK yet
# need to figure out how to marginalize all discrete params
| [
"arviz.summary",
"pandas.read_csv",
"matplotlib.pyplot.plot",
"arviz.from_cmdstanpy",
"arviz.plot_forest",
"prince.PCA",
"arviz.plot_trace",
"cmdstanpy.CmdStanModel"
] | [((186, 242), 'pandas.read_csv', 'pd.read_csv', (['"""data/overfitting.csv"""'], {'index_col': '"""case_id"""'}), "('data/overfitting.csv', index_col='case_id')\n", (197, 242), True, 'import numpy as np, pandas as pd, arviz as az, prince, matplotlib.pyplot as plt, seaborn as sns\n'), ((2185, 2218), 'cmdstanpy.CmdStanModel', 'CmdStanModel', ([], {'stan_file': 'modelfile'}), '(stan_file=modelfile)\n', (2197, 2218), False, 'from cmdstanpy import CmdStanModel\n'), ((2374, 2419), 'matplotlib.pyplot.plot', 'plt.plot', (['optim[var_name_array[1:]].values[0]'], {}), '(optim[var_name_array[1:]].values[0])\n', (2382, 2419), True, 'import numpy as np, pandas as pd, arviz as az, prince, matplotlib.pyplot as plt, seaborn as sns\n'), ((3118, 3140), 'arviz.from_cmdstanpy', 'az.from_cmdstanpy', (['fit'], {}), '(fit)\n', (3135, 3140), True, 'import numpy as np, pandas as pd, arviz as az, prince, matplotlib.pyplot as plt, seaborn as sns\n'), ((3197, 3241), 'arviz.plot_trace', 'az.plot_trace', (['az_trace'], {'var_names': "['alpha']"}), "(az_trace, var_names=['alpha'])\n", (3210, 3241), True, 'import numpy as np, pandas as pd, arviz as az, prince, matplotlib.pyplot as plt, seaborn as sns\n'), ((3245, 3289), 'arviz.plot_forest', 'az.plot_forest', (['az_trace'], {'var_names': "['beta']"}), "(az_trace, var_names=['beta'])\n", (3259, 3289), True, 'import numpy as np, pandas as pd, arviz as az, prince, matplotlib.pyplot as plt, seaborn as sns\n'), ((713, 755), 'prince.PCA', 'prince.PCA', ([], {'n_components': '(2)', 'as_array': '(False)'}), '(n_components=2, as_array=False)\n', (723, 755), False, 'import numpy as np, pandas as pd, arviz as az, prince, matplotlib.pyplot as plt, seaborn as sns\n'), ((3142, 3162), 'arviz.summary', 'az.summary', (['az_trace'], {}), '(az_trace)\n', (3152, 3162), True, 'import numpy as np, pandas as pd, arviz as az, prince, matplotlib.pyplot as plt, seaborn as sns\n')] |
# -*- coding: utf-8 -*-
import json
import threading
import os
import time
import mats
import sys
import requests
import traceback
import re
from util import debug, error
class MatsLoader(threading.Thread):
"""
Fire and forget loader for materials - will queue a 'mats' event or
an 'error' event if the load fails. Automatically runs as a daemon
"""
def __init__(self, filename, queue):
"""
filename is the file to async load
queue is the queue to report the results into
"""
threading.Thread.__init__(self)
self.queue = queue
self.filename = filename
self.daemon = True
def run(self):
try:
m = mats.Materials(self.filename)
self.queue.put( { 'mats': m._materials } )
except:
self.queue.put( { 'error': 'Failed to load materials ' + str(sys.exc_info()[0]) } )
class MatsLoaderRemote(threading.Thread):
"""
Fire and forget loader for materials - will queue a 'mats' event or
an 'error' event if the load fails. Automatically runs as a daemon
"""
def __init__(self, filename, queue):
"""
filename is the cache file - we only read the remote file
if the cache is old (or missing)
queue is the queue to report the results into
"""
threading.Thread.__init__(self)
self.filename = filename
self.queue = queue
self.daemon = True
self.integerRe = re.compile(r'^-?\d+$')
self.floatRe = re.compile(r'^-?\d+(\.\d+)?$')
self.arrayRe = re.compile(r'^\[.*\]$')
def need_refresh(self):
"""
Returns True if the local cache needs a refresh.
"""
if not os.path.exists(self.filename):
return True
mtime = os.path.getmtime(self.filename)
now = time.time()
return mtime < now - 24 * 3600 # Daily update
def array_splitter(self, value):
return [ x[1:-1] for x in value[1:-1].split(", ") ]
def detect(self, value):
"""
Looks at a data value and converts into an appropriate type
(maybe should look at using ast instead)
"""
if self.integerRe.match(value):
return int(value)
elif self.floatRe.match(value):
return float(value)
elif self.arrayRe.match(value):
return self.array_splitter(value)
else:
return value
def parse(self, text):
"""
Parse a string field containing all the data ina TSV
into an array of dicts. Mainly split out so we can test
"""
lines = text.replace("\r", "").split("\n")
fields = lines[0].split("\t")
res = []
for entry in lines[1:]:
values = entry.split("\t")
if len(values) < len(fields):
continue
v = {}
for k in range(0, len(fields)):
v[fields[k]] = self.detect(values[k])
res.append(v)
return res
def run(self):
try:
if self.need_refresh():
r = requests.get("https://docs.google.com/spreadsheets/u/0/d/1g0y7inyvQopJ93jP5YIu3n0veX0ng8DraJXAvZk6pS4/export?format=tsv&id=1g0y7inyvQopJ93jP5YIu3n0veX0ng8DraJXAvZk6pS4&gid=0")
res = self.parse(r.text)
if res:
with open(self.filename, "wt") as cache_file:
json.dump(res, cache_file)
self.queue.put( { 'mats': res } )
debug("Async remote mats loader from tsv is completed {} entries".format(len(res)))
else:
error("Async remote mats loader failed - zero records")
else:
with open(self.filename, "rt") as cache_file:
res = json.load(cache_file)
self.queue.put( { 'mats': res } )
debug("loader from cache is completed {} entries".format(len(res)))
except:
self.queue.put( { 'error': 'Failed to load tsv materials ' + str(sys.exc_info()[0]) + ' ' + traceback.format_exc() } )
| [
"threading.Thread.__init__",
"os.path.exists",
"traceback.format_exc",
"re.compile",
"requests.get",
"sys.exc_info",
"mats.Materials",
"util.error",
"json.load",
"os.path.getmtime",
"time.time",
"json.dump"
] | [((539, 570), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (564, 570), False, 'import threading\n'), ((1347, 1378), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (1372, 1378), False, 'import threading\n'), ((1491, 1513), 're.compile', 're.compile', (['"""^-?\\\\d+$"""'], {}), "('^-?\\\\d+$')\n", (1501, 1513), False, 'import re\n'), ((1537, 1569), 're.compile', 're.compile', (['"""^-?\\\\d+(\\\\.\\\\d+)?$"""'], {}), "('^-?\\\\d+(\\\\.\\\\d+)?$')\n", (1547, 1569), False, 'import re\n'), ((1591, 1615), 're.compile', 're.compile', (['"""^\\\\[.*\\\\]$"""'], {}), "('^\\\\[.*\\\\]$')\n", (1601, 1615), False, 'import re\n'), ((1813, 1844), 'os.path.getmtime', 'os.path.getmtime', (['self.filename'], {}), '(self.filename)\n', (1829, 1844), False, 'import os\n'), ((1859, 1870), 'time.time', 'time.time', ([], {}), '()\n', (1868, 1870), False, 'import time\n'), ((707, 736), 'mats.Materials', 'mats.Materials', (['self.filename'], {}), '(self.filename)\n', (721, 736), False, 'import mats\n'), ((1741, 1770), 'os.path.exists', 'os.path.exists', (['self.filename'], {}), '(self.filename)\n', (1755, 1770), False, 'import os\n'), ((3134, 3319), 'requests.get', 'requests.get', (['"""https://docs.google.com/spreadsheets/u/0/d/1g0y7inyvQopJ93jP5YIu3n0veX0ng8DraJXAvZk6pS4/export?format=tsv&id=1g0y7inyvQopJ93jP5YIu3n0veX0ng8DraJXAvZk6pS4&gid=0"""'], {}), "(\n 'https://docs.google.com/spreadsheets/u/0/d/1g0y7inyvQopJ93jP5YIu3n0veX0ng8DraJXAvZk6pS4/export?format=tsv&id=1g0y7inyvQopJ93jP5YIu3n0veX0ng8DraJXAvZk6pS4&gid=0'\n )\n", (3146, 3319), False, 'import requests\n'), ((3731, 3786), 'util.error', 'error', (['"""Async remote mats loader failed - zero records"""'], {}), "('Async remote mats loader failed - zero records')\n", (3736, 3786), False, 'from util import debug, error\n'), ((3893, 3914), 'json.load', 'json.load', (['cache_file'], {}), '(cache_file)\n', (3902, 3914), False, 'import json\n'), ((3482, 3508), 'json.dump', 'json.dump', (['res', 'cache_file'], {}), '(res, cache_file)\n', (3491, 3508), False, 'import json\n'), ((4178, 4200), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (4198, 4200), False, 'import traceback\n'), ((881, 895), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (893, 895), False, 'import sys\n'), ((4151, 4165), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (4163, 4165), False, 'import sys\n')] |
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import os
from scrapy import Request
from scrapy.pipelines.images import ImagesPipeline
from luoxia import settings
class LuoxiaPipeline(object):
def process_item(self, item, spider):
title= item['title']
bookname = item['bookname']
titlename = item['titlename']
text = item['text']
path = "books/%s/%s/" % (title, bookname)
if not os.path.exists(path):
os.makedirs(path)
with open(path+titlename+'.txt', 'a', encoding='utf-8') as f:
f.write(text)
return item
class LuoxiaImagePipeline(ImagesPipeline):
def get_media_requests(self, item, info):
for url in item['image_urls']:
yield Request(url, meta={'title': item['title'],
'bookname': item['bookname']})
def item_completed(self, results, item, info):
# 将下载完成后的图片路径设置到item中
item['images'] = [x for ok, x in results if ok]
return item
def file_path(self, request, response=None, info=None):
# 为每本书创建一个目录,存放她自己所有的图片
title = request.meta['title']
bookname = request.meta['bookname']
book_dir = os.path.join(settings.IMAGES_STORE, title +'/'+ bookname)
if not os.path.exists(book_dir):
os.makedirs(book_dir)
# 从连接中提取扩展名
try:
ext_name = request.url.split(".")[-1]
except:
ext_name = 'jpg'
# 返回的相对路径
return '%s/%s/%s.%s' % (title, bookname, bookname, ext_name) | [
"os.path.exists",
"scrapy.Request",
"os.path.join",
"os.makedirs"
] | [((1359, 1418), 'os.path.join', 'os.path.join', (['settings.IMAGES_STORE', "(title + '/' + bookname)"], {}), "(settings.IMAGES_STORE, title + '/' + bookname)\n", (1371, 1418), False, 'import os\n'), ((581, 601), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (595, 601), False, 'import os\n'), ((615, 632), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (626, 632), False, 'import os\n'), ((1432, 1456), 'os.path.exists', 'os.path.exists', (['book_dir'], {}), '(book_dir)\n', (1446, 1456), False, 'import os\n'), ((1470, 1491), 'os.makedirs', 'os.makedirs', (['book_dir'], {}), '(book_dir)\n', (1481, 1491), False, 'import os\n'), ((896, 969), 'scrapy.Request', 'Request', (['url'], {'meta': "{'title': item['title'], 'bookname': item['bookname']}"}), "(url, meta={'title': item['title'], 'bookname': item['bookname']})\n", (903, 969), False, 'from scrapy import Request\n')] |
#! /usr/bin/env python3
# Copyright 2020 Tier IV, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import math
import sys
from autoware_planning_msgs.msg import StopReasonArray
from case_converter import pascal2snake
from geometry_msgs.msg import PoseStamped
import numpy as np
import rclpy
from rclpy.node import Node
from rtree import index
from self_pose_listener import SelfPoseListener
class StopReason2PoseNode(Node):
def __init__(self, options):
super().__init__("stop_reason2pose_node")
self._options = options
self._sub_pose = self.create_subscription(
StopReasonArray, self._options.topic_name, self._on_stop_reasons, 1
)
self._pub_pose_map = {}
self._idx_map = {}
self._pose_map = {}
self._self_pose_listener = SelfPoseListener()
self.timer = self.create_timer((1.0 / 100), self._self_pose_listener.get_current_pose)
def _on_stop_reasons(self, msg):
for stop_reason in msg.stop_reasons:
snake_case_stop_reason = pascal2snake(stop_reason.reason)
if len(stop_reason.stop_factors) == 0:
self.get_logger().warn("stop_factor is null")
return
for stop_factor in stop_reason.stop_factors:
pose = PoseStamped()
pose.header = msg.header
pose.pose = stop_factor.stop_pose
# Get nearest pose
th_dist = 1.0
nearest_pose_id = self._get_nearest_pose_id(
snake_case_stop_reason, pose.pose, th_dist
)
if nearest_pose_id:
self._update_pose(snake_case_stop_reason, pose.pose, nearest_pose_id)
pose_id = nearest_pose_id
else:
pose_id = self._register_pose(snake_case_stop_reason, pose.pose)
pose_topic_name = "{snake_case_stop_reason}_{pose_id}".format(**locals())
topic_ns = "/autoware_debug_tools/stop_reason2pose/"
if pose_topic_name not in self._pub_pose_map:
self._pub_pose_map[pose_topic_name] = self.create_publisher(
PoseStamped, topic_ns + pose_topic_name, 1
)
self._pub_pose_map[pose_topic_name].publish(pose)
# Publish nearest stop_reason without number
nearest_pose = PoseStamped()
nearest_pose.header = msg.header
nearest_pose.pose = self._get_nearest_pose_in_array(
stop_reason, self._self_pose_listener.self_pose
)
if nearest_pose.pose:
if snake_case_stop_reason not in self._pub_pose_map:
topic_ns = "/autoware_debug_tools/stop_reason2pose/"
self._pub_pose_map[snake_case_stop_reason] = self.create_publisher(
PoseStamped, topic_ns + snake_case_stop_reason, 1
)
self._pub_pose_map[snake_case_stop_reason].publish(nearest_pose)
def _get_nearest_pose_in_array(self, stop_reason, self_pose):
poses = [stop_factor.stop_pose for stop_factor in stop_reason.stop_factors]
if not poses:
return None
distances = map(lambda p: StopReason2PoseNode.calc_distance2d(p, self_pose), poses)
nearest_idx = np.argmin(distances)
return poses[nearest_idx]
def _find_nearest_pose_id(self, name, pose):
if name not in self._idx_map:
self._idx_map[name] = index.Index()
return self._idx_map[name].nearest(StopReason2PoseNode.pose2boundingbox(pose), 1)
def _get_nearest_pose_id(self, name, pose, th_dist):
nearest_pose_ids = list(self._find_nearest_pose_id(name, pose))
if not nearest_pose_ids:
return None
nearest_pose_id = nearest_pose_ids[0]
nearest_pose = self._get_pose(name, nearest_pose_id)
if not nearest_pose:
return None
dist = StopReason2PoseNode.calc_distance2d(pose, nearest_pose)
if dist > th_dist:
return None
return nearest_pose_id
def _get_pose(self, name, pose_id):
if name not in self._pose_map:
return None
return self._pose_map[name][pose_id]
def _update_pose(self, name, pose, pose_id):
self._pose_map[name][id] = pose
self._idx_map[name].insert(pose_id, StopReason2PoseNode.pose2boundingbox(pose))
def _register_pose(self, name, pose):
if name not in self._pose_map:
self._pose_map[name] = {}
pose_id = len(self._pose_map[name]) + 1
self._pose_map[name][pose_id] = pose
self._idx_map[name].insert(pose_id, StopReason2PoseNode.pose2boundingbox(pose))
return pose_id
@staticmethod
def calc_distance2d(pose1, pose2):
p1 = pose1.position
p2 = pose2.position
return math.hypot(p1.x - p2.x, p1.y - p2.y)
@staticmethod
def pose2boundingbox(pose):
return [pose.position.x, pose.position.y, pose.position.x, pose.position.y]
def main(args):
rclpy.init()
parser = argparse.ArgumentParser()
parser.add_argument("topic_name", type=str)
ns = parser.parse_args(args)
stop_reason2pose_node = StopReason2PoseNode(ns)
rclpy.spin(stop_reason2pose_node)
stop_reason2pose_node.destroy_node()
rclpy.shutdown()
if __name__ == "__main__":
main(sys.argv[1:])
| [
"rclpy.spin",
"case_converter.pascal2snake",
"argparse.ArgumentParser",
"self_pose_listener.SelfPoseListener",
"geometry_msgs.msg.PoseStamped",
"rtree.index.Index",
"math.hypot",
"numpy.argmin",
"rclpy.init",
"rclpy.shutdown"
] | [((5663, 5675), 'rclpy.init', 'rclpy.init', ([], {}), '()\n', (5673, 5675), False, 'import rclpy\n'), ((5690, 5715), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5713, 5715), False, 'import argparse\n'), ((5854, 5887), 'rclpy.spin', 'rclpy.spin', (['stop_reason2pose_node'], {}), '(stop_reason2pose_node)\n', (5864, 5887), False, 'import rclpy\n'), ((5933, 5949), 'rclpy.shutdown', 'rclpy.shutdown', ([], {}), '()\n', (5947, 5949), False, 'import rclpy\n'), ((1325, 1343), 'self_pose_listener.SelfPoseListener', 'SelfPoseListener', ([], {}), '()\n', (1341, 1343), False, 'from self_pose_listener import SelfPoseListener\n'), ((3901, 3921), 'numpy.argmin', 'np.argmin', (['distances'], {}), '(distances)\n', (3910, 3921), True, 'import numpy as np\n'), ((5469, 5505), 'math.hypot', 'math.hypot', (['(p1.x - p2.x)', '(p1.y - p2.y)'], {}), '(p1.x - p2.x, p1.y - p2.y)\n', (5479, 5505), False, 'import math\n'), ((1559, 1591), 'case_converter.pascal2snake', 'pascal2snake', (['stop_reason.reason'], {}), '(stop_reason.reason)\n', (1571, 1591), False, 'from case_converter import pascal2snake\n'), ((2945, 2958), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (2956, 2958), False, 'from geometry_msgs.msg import PoseStamped\n'), ((4079, 4092), 'rtree.index.Index', 'index.Index', ([], {}), '()\n', (4090, 4092), False, 'from rtree import index\n'), ((1810, 1823), 'geometry_msgs.msg.PoseStamped', 'PoseStamped', ([], {}), '()\n', (1821, 1823), False, 'from geometry_msgs.msg import PoseStamped\n')] |
import asyncio
import unittest
from .helpers import async_test
class AsyncTestCase(unittest.TestCase):
''' AsyncTestCase allows to test asynchoronus function.
The usage is the same as :code:`unittest.TestCase`. It works with other test frameworks
and runners (eg. `pytest`, `nose`) as well.
AsyncTestCase can run:
- test of synchronous code (:code:`unittest.TestCase`)
- test of asynchronous code, supports syntax with
:code:`async`/:code:`await` (Python 3.5+) and
:code:`asyncio.coroutine`/:code:`yield from` (Python 3.4)
Code to test:
.. code-block:: python
import asyncio
async def async_add(x, y, delay=0.1):
await asyncio.sleep(delay)
return x + y
async def async_one():
await async_nested_exc()
async def async_nested_exc():
await asyncio.sleep(0.1)
raise Exception('Test')
Tests:
.. code-block:: python
import aiounittest
class MyTest(aiounittest.AsyncTestCase):
async def test_await_async_add(self):
ret = await async_add(1, 5)
self.assertEqual(ret, 6)
async def test_await_async_fail(self):
with self.assertRaises(Exception) as e:
await async_one()
'''
def get_event_loop(self):
''' Method provides an event loop for the test
It is called before each test, by default :code:`aiounittest.AsyncTestCase` creates the brand new event
loop everytime. After completion, the loop is closed and then recreated, set as default,
leaving asyncio clean.
.. note::
In the most common cases you don't have to bother about this method, the default implementation is a receommended one.
But if, for some reasons, you want to provide your own event loop just override it. Note that :code:`AsyncTestCase` won't close such a loop.
.. code-block:: python
class MyTest(aiounittest.AsyncTestCase):
def get_event_loop(self):
self.my_loop = asyncio.get_event_loop()
return self.my_loop
'''
return None
def __getattribute__(self, name):
attr = super().__getattribute__(name)
if name.startswith('test_') and asyncio.iscoroutinefunction(attr):
return async_test(attr, loop=self.get_event_loop())
else:
return attr
| [
"asyncio.iscoroutinefunction"
] | [((2441, 2474), 'asyncio.iscoroutinefunction', 'asyncio.iscoroutinefunction', (['attr'], {}), '(attr)\n', (2468, 2474), False, 'import asyncio\n')] |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.log import get_logger
from knack.prompting import prompt_y_n
from knack.util import CLIError
from azure.mgmt.maps.models import (
MapsAccountCreateParameters,
Sku)
ACCOUNT_LOCATION = 'global'
logger = get_logger(__name__)
def create_account(client, resource_group_name, account_name, sku_name='S0', tags=None, force=None):
terms = 'By creating an Azure Maps account, you agree that you have read and agree to the ' \
'\nLicense (https://azure.microsoft.com/support/legal/) and ' \
'\nPrivacy Statement (https://privacy.microsoft.com/privacystatement).'
hint = 'Please select.'
client_denied_terms = 'You must agree to the License and Privacy Statement to create an account.'
# Show ToS message to the user
logger.warning(terms)
# Prompt yes/no for the user, if --force parameter is not passed in.
if not force:
option = prompt_y_n(hint)
if not option:
raise CLIError(client_denied_terms)
# Submit query
sku = Sku(name=sku_name)
maps_account_create_params = MapsAccountCreateParameters(location=ACCOUNT_LOCATION, sku=sku, tags=tags)
return client.create_or_update(resource_group_name, account_name, maps_account_create_params)
def list_accounts(client, resource_group_name=None):
# Retrieve accounts via subscription
if resource_group_name is None:
return client.list_by_subscription()
# Retrieve accounts via resource group
return client.list_by_resource_group(resource_group_name)
def generic_update_account(instance, sku_name=None, tags=None):
# Pre-populate with old instance
maps_account_create_params = MapsAccountCreateParameters(location=ACCOUNT_LOCATION, sku=instance.sku,
tags=instance.tags)
# Update fields with new parameter values
if sku_name:
maps_account_create_params.sku.name = sku_name
if tags:
maps_account_create_params.tags = tags
return maps_account_create_params
| [
"azure.mgmt.maps.models.MapsAccountCreateParameters",
"knack.log.get_logger",
"knack.prompting.prompt_y_n",
"azure.mgmt.maps.models.Sku",
"knack.util.CLIError"
] | [((569, 589), 'knack.log.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (579, 589), False, 'from knack.log import get_logger\n'), ((1370, 1388), 'azure.mgmt.maps.models.Sku', 'Sku', ([], {'name': 'sku_name'}), '(name=sku_name)\n', (1373, 1388), False, 'from azure.mgmt.maps.models import MapsAccountCreateParameters, Sku\n'), ((1422, 1496), 'azure.mgmt.maps.models.MapsAccountCreateParameters', 'MapsAccountCreateParameters', ([], {'location': 'ACCOUNT_LOCATION', 'sku': 'sku', 'tags': 'tags'}), '(location=ACCOUNT_LOCATION, sku=sku, tags=tags)\n', (1449, 1496), False, 'from azure.mgmt.maps.models import MapsAccountCreateParameters, Sku\n'), ((2013, 2109), 'azure.mgmt.maps.models.MapsAccountCreateParameters', 'MapsAccountCreateParameters', ([], {'location': 'ACCOUNT_LOCATION', 'sku': 'instance.sku', 'tags': 'instance.tags'}), '(location=ACCOUNT_LOCATION, sku=instance.sku,\n tags=instance.tags)\n', (2040, 2109), False, 'from azure.mgmt.maps.models import MapsAccountCreateParameters, Sku\n'), ((1252, 1268), 'knack.prompting.prompt_y_n', 'prompt_y_n', (['hint'], {}), '(hint)\n', (1262, 1268), False, 'from knack.prompting import prompt_y_n\n'), ((1310, 1339), 'knack.util.CLIError', 'CLIError', (['client_denied_terms'], {}), '(client_denied_terms)\n', (1318, 1339), False, 'from knack.util import CLIError\n')] |
import sys
import os
from tempfile import TemporaryDirectory
import numpy as np
import tensorflow.compat.v1 as tf
tf.get_logger().setLevel('ERROR') # only show error messages
from recommenders.utils.timer import Timer
from recommenders.utils.constants import SEED
from recommenders.models.deeprec.deeprec_utils import (
prepare_hparams
)
from recommenders.datasets.amazon_reviews import download_and_extract, data_preprocessing, _create_vocab
from recommenders.datasets.download_utils import maybe_download
from recommenders.models.deeprec.models.sequential.sli_rec import SLI_RECModel as SeqModel
# from recommenders.models.deeprec.models.sequential.asvd import A2SVDModel as SeqModel
# from recommenders.models.deeprec.models.sequential.caser import CaserModel as SeqModel
# from recommenders.models.deeprec.models.sequential.gru4rec import GRU4RecModel as SeqModel
# from recommenders.models.deeprec.models.sequential.sum import SUMModel as SeqModel
#from recommenders.models.deeprec.models.sequential.nextitnet import NextItNetModel
from recommenders.models.deeprec.io.sequential_iterator import SequentialIterator
#from recommenders.models.deeprec.io.nextitnet_iterator import NextItNetIterator
print("System version: {}".format(sys.version))
print("Tensorflow version: {}".format(tf.__version__))
yaml_file = '/home/jialia/wsdm/src/recommenders/examples/wsdm2022/sli_rec_B.yaml'
RANDOM_SEED = SEED # Set None for non-deterministic result
# data_path = os.path.join("tests", "resources", "deeprec", "slirec")
# data_path = '/home/jialia/wsdm/seq_datasets/B_full_feature_v2'
data_path = sys.argv[1]
print(os.path.abspath(data_path)) ## the path where I enter the cmd
# for test
train_file = os.path.join(data_path, r'train_instances.txt')
valid_file = os.path.join(data_path, r'valid_instances.txt')
test_file = os.path.join(data_path, r'valid.tsv')
pred_file = os.path.join(data_path, r'inter_test.tsv')
final_pred_file = os.path.join(data_path, r'final_test.tsv')
user_vocab = os.path.join(data_path, r'user_vocab.pkl')
item_vocab = os.path.join(data_path, r'item_vocab.pkl')
cate_vocab = os.path.join(data_path, r'category_vocab.pkl')
output_file = os.path.join(data_path, r'inter_test_output.txt')
submit_file = os.path.join(data_path, r'final_test_output.txt')
train_num_ngs = 9 # number of negative instances with a positive instance for training
valid_num_ngs = 9 # number of negative instances with a positive instance for validation
test_num_ngs = 9 # number of negative instances with a positive instance for testing
_create_vocab(
[train_file, valid_file],
user_vocab, item_vocab, cate_vocab
)
### NOTE:
### remember to use `_create_vocab(train_file, user_vocab, item_vocab, cate_vocab)` to generate the user_vocab, item_vocab and cate_vocab files, if you are using your own dataset rather than using our demo Amazon dataset.
hparams = prepare_hparams(yaml_file,
# user_dropout=False,
embed_l2=0.,
layer_l2=0.,
enable_BN=True, ##-- True
learning_rate=0.001, # set to 0.01 if batch normalization is disable else 0.001
epochs=100000,
EARLY_STOP=40000,
batch_size=400,
show_step=5000,
MODEL_DIR=os.path.join(data_path, "model/"),
SUMMARIES_DIR=os.path.join(data_path, "summary/"),
user_vocab=user_vocab,
item_vocab=item_vocab,
cate_vocab=cate_vocab,
need_sample=False,
train_num_ngs=train_num_ngs, # provides the number of negative instances for each positive instance for loss computation.
loss='log_loss', #'log_loss', 'softmax'
max_seq_length=50,
cont_feat_len=85,
use_cont_feat=False,
init_item_emb=False,
shuffle=True
)
print(hparams.values)
input_creator = SequentialIterator
model = SeqModel(hparams, input_creator, seed=RANDOM_SEED)
# model.load_model(os.path.join(data_path, "model_20220118_20k_0.8923", 'step_20000'))
with Timer() as train_time:
model = model.fit(train_file, valid_file, valid_num_ngs=9, eval_metric='auc')
print('Time cost for training is {0:.2f} mins'.format(train_time.interval/60.0))
### model = model.fit(test_file, test_file, valid_num_ngs=9, eval_metric='auc') ##-- quick test
model.load_model(os.path.join(data_path, "model", 'best_model'))
res_syn = model.run_eval(test_file, num_ngs=9)
print(res_syn)
model.predict(pred_file, output_file)
model.predict(final_pred_file, submit_file)
# print('Job finished. B, continue training = 20k, seq=50')
# print('Job finished. B_v2, epoch=50k, seq=100')
## ASVD: 0.867497
## GRU: 0.877529
## SLi-Rec: 0.892736
## B_v4: 0.8937
print("Job:B_full_feature_v2, with BN, no cont feat, seq=50, shuffle=True")
## B_full_feature_v2 no cont_feat, with BN
##5k: 0.8778
##10k: 0.8827
##20k: 0.8848
##25k: 0.8824
##35k: 0.8878
##40k: 0.8903
##45k: 0.8876
##50k: 0.8925
##55k: 0.8903
##60k: 0.8894
##65k: 0.8904
##70k: 0.8814
##75k: 0.8896
##80k: 0.8871
##85k: 0.8920
## with shuffle:
##5k: 0.8793
##10k: 0.8884
##15k: 0.8898
##20k: 0.8923
##25k: 0.8908
##30k: 0.8895
##35k: 0.8888
##40k: 0.8913
##45k: 0.8909
##50k: 0.8876
##65k: 0.8881 | [
"recommenders.models.deeprec.models.sequential.sli_rec.SLI_RECModel",
"os.path.join",
"tensorflow.compat.v1.get_logger",
"os.path.abspath",
"recommenders.datasets.amazon_reviews._create_vocab",
"recommenders.utils.timer.Timer"
] | [((1717, 1763), 'os.path.join', 'os.path.join', (['data_path', '"""train_instances.txt"""'], {}), "(data_path, 'train_instances.txt')\n", (1729, 1763), False, 'import os\n'), ((1778, 1824), 'os.path.join', 'os.path.join', (['data_path', '"""valid_instances.txt"""'], {}), "(data_path, 'valid_instances.txt')\n", (1790, 1824), False, 'import os\n'), ((1838, 1874), 'os.path.join', 'os.path.join', (['data_path', '"""valid.tsv"""'], {}), "(data_path, 'valid.tsv')\n", (1850, 1874), False, 'import os\n'), ((1888, 1929), 'os.path.join', 'os.path.join', (['data_path', '"""inter_test.tsv"""'], {}), "(data_path, 'inter_test.tsv')\n", (1900, 1929), False, 'import os\n'), ((1949, 1990), 'os.path.join', 'os.path.join', (['data_path', '"""final_test.tsv"""'], {}), "(data_path, 'final_test.tsv')\n", (1961, 1990), False, 'import os\n'), ((2005, 2046), 'os.path.join', 'os.path.join', (['data_path', '"""user_vocab.pkl"""'], {}), "(data_path, 'user_vocab.pkl')\n", (2017, 2046), False, 'import os\n'), ((2061, 2102), 'os.path.join', 'os.path.join', (['data_path', '"""item_vocab.pkl"""'], {}), "(data_path, 'item_vocab.pkl')\n", (2073, 2102), False, 'import os\n'), ((2117, 2162), 'os.path.join', 'os.path.join', (['data_path', '"""category_vocab.pkl"""'], {}), "(data_path, 'category_vocab.pkl')\n", (2129, 2162), False, 'import os\n'), ((2178, 2226), 'os.path.join', 'os.path.join', (['data_path', '"""inter_test_output.txt"""'], {}), "(data_path, 'inter_test_output.txt')\n", (2190, 2226), False, 'import os\n'), ((2242, 2290), 'os.path.join', 'os.path.join', (['data_path', '"""final_test_output.txt"""'], {}), "(data_path, 'final_test_output.txt')\n", (2254, 2290), False, 'import os\n'), ((2557, 2632), 'recommenders.datasets.amazon_reviews._create_vocab', '_create_vocab', (['[train_file, valid_file]', 'user_vocab', 'item_vocab', 'cate_vocab'], {}), '([train_file, valid_file], user_vocab, item_vocab, cate_vocab)\n', (2570, 2632), False, 'from recommenders.datasets.amazon_reviews import download_and_extract, data_preprocessing, _create_vocab\n'), ((4234, 4284), 'recommenders.models.deeprec.models.sequential.sli_rec.SLI_RECModel', 'SeqModel', (['hparams', 'input_creator'], {'seed': 'RANDOM_SEED'}), '(hparams, input_creator, seed=RANDOM_SEED)\n', (4242, 4284), True, 'from recommenders.models.deeprec.models.sequential.sli_rec import SLI_RECModel as SeqModel\n'), ((1629, 1655), 'os.path.abspath', 'os.path.abspath', (['data_path'], {}), '(data_path)\n', (1644, 1655), False, 'import os\n'), ((4379, 4386), 'recommenders.utils.timer.Timer', 'Timer', ([], {}), '()\n', (4384, 4386), False, 'from recommenders.utils.timer import Timer\n'), ((4680, 4726), 'os.path.join', 'os.path.join', (['data_path', '"""model"""', '"""best_model"""'], {}), "(data_path, 'model', 'best_model')\n", (4692, 4726), False, 'import os\n'), ((115, 130), 'tensorflow.compat.v1.get_logger', 'tf.get_logger', ([], {}), '()\n', (128, 130), True, 'import tensorflow.compat.v1 as tf\n'), ((3411, 3444), 'os.path.join', 'os.path.join', (['data_path', '"""model/"""'], {}), "(data_path, 'model/')\n", (3423, 3444), False, 'import os\n'), ((3486, 3521), 'os.path.join', 'os.path.join', (['data_path', '"""summary/"""'], {}), "(data_path, 'summary/')\n", (3498, 3521), False, 'import os\n')] |
#!/usr/bin/env python2
from __future__ import print_function
import atexit
import logging
import sys
import ssg_test_suite.oscap
import ssg_test_suite.virt
from ssg_test_suite.rule import get_viable_profiles
from ssg_test_suite.virt import SnapshotStack
logging.getLogger(__name__).addHandler(logging.NullHandler())
def perform_profile_check(options):
"""Perform profile check.
Iterate over profiles in datastream and perform scanning of unaltered VM
using every profile according to input. Also perform remediation run.
Return value not defined, textual output and generated reports is the
result.
"""
dom = ssg_test_suite.virt.connect_domain(options.hypervisor,
options.domain_name)
if dom is None:
sys.exit(1)
snapshot_stack = SnapshotStack(dom)
atexit.register(snapshot_stack.clear)
snapshot_stack.create('origin')
ssg_test_suite.virt.start_domain(dom)
domain_ip = ssg_test_suite.virt.determine_ip(dom)
has_worked = False
profiles = get_viable_profiles(options.target,
options.datastream,
options.benchmark_id)
if len(profiles) > 1:
snapshot_stack.create('profile')
for profile in profiles:
logging.info("Evaluation of profile {0}.".format(profile))
has_worked = True
runner = options.remediate_using
ssg_test_suite.oscap.run_profile(domain_ip,
profile,
'initial',
options.datastream,
options.benchmark_id,
runner=runner)
ssg_test_suite.oscap.run_profile(domain_ip,
profile,
'remediation',
options.datastream,
options.benchmark_id,
runner=runner)
ssg_test_suite.oscap.run_profile(domain_ip,
profile,
'final',
options.datastream,
options.benchmark_id,
runner=runner)
snapshot_stack.revert(delete=False)
if not has_worked:
logging.error("Nothing has been tested!")
snapshot_stack.delete()
# depending on number of profiles we have either "origin" snapshot
# still to be reverted (multiple profiles) or we are reverted
# completely (only one profile was run)
| [
"logging.NullHandler",
"ssg_test_suite.rule.get_viable_profiles",
"logging.getLogger",
"ssg_test_suite.virt.SnapshotStack",
"sys.exit",
"logging.error",
"atexit.register"
] | [((296, 317), 'logging.NullHandler', 'logging.NullHandler', ([], {}), '()\n', (315, 317), False, 'import logging\n'), ((826, 844), 'ssg_test_suite.virt.SnapshotStack', 'SnapshotStack', (['dom'], {}), '(dom)\n', (839, 844), False, 'from ssg_test_suite.virt import SnapshotStack\n'), ((849, 886), 'atexit.register', 'atexit.register', (['snapshot_stack.clear'], {}), '(snapshot_stack.clear)\n', (864, 886), False, 'import atexit\n'), ((1059, 1136), 'ssg_test_suite.rule.get_viable_profiles', 'get_viable_profiles', (['options.target', 'options.datastream', 'options.benchmark_id'], {}), '(options.target, options.datastream, options.benchmark_id)\n', (1078, 1136), False, 'from ssg_test_suite.rule import get_viable_profiles\n'), ((257, 284), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (274, 284), False, 'import logging\n'), ((793, 804), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (801, 804), False, 'import sys\n'), ((2516, 2557), 'logging.error', 'logging.error', (['"""Nothing has been tested!"""'], {}), "('Nothing has been tested!')\n", (2529, 2557), False, 'import logging\n')] |
import mtrain
import numpy as np
import pandas as pd
import random
def simulate_games(num_players=4, domino_size=12, num_games=250, collect_data=True,
debug=False, players=["Random", "Greedy", "Probability", "Neural"],
file_name="PlayData/data4_12_250"):
"""
Runs the mexican train game repeatedly with different combinations of players to
generate data to be used in testing and training the neural net.
If collect_data is on, the play data is retrieved and stored into a .xlsx file for later use
The format for the file name for this is as follows:
PlayData/data + num_players + _ + domino_size + _ + num_games + .xlsx
This spreadsheet is to be used when training the neural net.
This script has no required parameters, and will run the game with the default params if
unchanged.
If collect_data is on, the players are selected randomly each game from:
["Random", "Greedy", "Probability"]
If collect_data is off, the players are selected in order from the parameter players.
When collect_data is off: len(players) must equal num_players
Returns a tuple of lists: (score_averages, win_percentage) corresponding to the players
"""
#Sets column names for building dataframe later on
column_names = ["round_number", "turn_number", "player_number", "play",
"t_num", "hand", "unknown", "potential_plays", "points"]
#Depending on mode of use, sets players and checks validity of player values
modes = []
if collect_data:
modes = ["Random", "Greedy", "Probability"]
else:
if not len(players) == num_players:
raise RuntimeError("len(players) must equal num_players when collect_data is off")
modes = players
#Simulates num_games of games
scores = np.ndarray((num_players, num_games))
wins = np.ndarray((num_players, num_games))
full_data = pd.DataFrame(columns=column_names)
current_index = 0
for game_num in range(0, num_games):
#Randomize players if in collect_data mode
game_modes = []
if collect_data:
for select in range(0, num_players):
game_modes.append(random.choice(modes))
else:
game_modes = modes
#Run game with parameters
results = mtrain.mexicantrain(num_players, domino_size, debug=debug,
modes=game_modes,
data_collection=collect_data,
data_index=current_index, file_name=file_name)
#If collecting data, data is stored into the dataframe
if collect_data:
current_index = results[2].index[-1] + 1
full_data = pd.concat([full_data, results[2]])
#Scores and wins are recorded into their respective arrays
for player_num in range(0, num_players):
scores[player_num, game_num] = results[0][player_num]
if results[1] == player_num:
wins[player_num, game_num] = 1
else:
wins[player_num, game_num] = 0
#Calculates performance of the players
score_averages = np.ndarray((num_players))
win_percentage = np.ndarray((num_players))
for player_num in range(0, num_players):
score_averages[player_num] = np.mean(scores[player_num, :])
win_percentage[player_num] = np.mean(wins[player_num, :])
#If collecting data, prints data to a .xlsx file
if collect_data:
filename = "PlayData/data" + str(num_players) + "_" + str(domino_size) + "_" + str(num_games) + ".xlsx"
writer = pd.ExcelWriter(filename)
full_data.to_excel(writer, "Sheet1")
writer.save()
#Prints results and returns them as well
if debug: print(score_averages)
if debug: print(win_percentage)
return score_averages, win_percentage | [
"numpy.mean",
"random.choice",
"numpy.ndarray",
"pandas.concat",
"pandas.DataFrame",
"pandas.ExcelWriter",
"mtrain.mexicantrain"
] | [((1844, 1880), 'numpy.ndarray', 'np.ndarray', (['(num_players, num_games)'], {}), '((num_players, num_games))\n', (1854, 1880), True, 'import numpy as np\n'), ((1892, 1928), 'numpy.ndarray', 'np.ndarray', (['(num_players, num_games)'], {}), '((num_players, num_games))\n', (1902, 1928), True, 'import numpy as np\n'), ((1945, 1979), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'column_names'}), '(columns=column_names)\n', (1957, 1979), True, 'import pandas as pd\n'), ((3240, 3263), 'numpy.ndarray', 'np.ndarray', (['num_players'], {}), '(num_players)\n', (3250, 3263), True, 'import numpy as np\n'), ((3287, 3310), 'numpy.ndarray', 'np.ndarray', (['num_players'], {}), '(num_players)\n', (3297, 3310), True, 'import numpy as np\n'), ((2355, 2517), 'mtrain.mexicantrain', 'mtrain.mexicantrain', (['num_players', 'domino_size'], {'debug': 'debug', 'modes': 'game_modes', 'data_collection': 'collect_data', 'data_index': 'current_index', 'file_name': 'file_name'}), '(num_players, domino_size, debug=debug, modes=game_modes,\n data_collection=collect_data, data_index=current_index, file_name=file_name\n )\n', (2374, 2517), False, 'import mtrain\n'), ((3395, 3425), 'numpy.mean', 'np.mean', (['scores[player_num, :]'], {}), '(scores[player_num, :])\n', (3402, 3425), True, 'import numpy as np\n'), ((3463, 3491), 'numpy.mean', 'np.mean', (['wins[player_num, :]'], {}), '(wins[player_num, :])\n', (3470, 3491), True, 'import numpy as np\n'), ((3696, 3720), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['filename'], {}), '(filename)\n', (3710, 3720), True, 'import pandas as pd\n'), ((2796, 2830), 'pandas.concat', 'pd.concat', (['[full_data, results[2]]'], {}), '([full_data, results[2]])\n', (2805, 2830), True, 'import pandas as pd\n'), ((2227, 2247), 'random.choice', 'random.choice', (['modes'], {}), '(modes)\n', (2240, 2247), False, 'import random\n')] |
##############################################################################
#
# Below code is inspired on
# https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/datasets/pascal_voc.py
# --------------------------------------------------------
# Detectron2
# Licensed under the Apache 2.0 license.
# --------------------------------------------------------
from fvcore.common.file_io import PathManager
import os
import numpy as np
import xml.etree.ElementTree as ET
from detectron2.structures import BoxMode
from detectron2.data import DatasetCatalog, MetadataCatalog
__all__ = ["register_licenseplates_voc"]
CLASS_NAMES = [
"license_plate",
]
def load_voc_instances(dirname: str, split: str):
"""
Load licenseplates VOC detection annotations to Detectron2 format.
Args:
dirname: Contain "annotations", "images"
split (str): one of "train", "test"
"""
with PathManager.open(os.path.join(dirname, split + ".txt")) as f:
fileids = np.loadtxt(f, dtype=np.str)
dicts = []
for fileid in fileids:
anno_file = os.path.join(dirname, "annotations", fileid + ".xml")
jpeg_file = os.path.join(dirname, "images", fileid + ".jpg")
tree = ET.parse(anno_file)
r = {
"file_name": jpeg_file,
"image_id": fileid,
"height": int(tree.findall("./size/height")[0].text),
"width": int(tree.findall("./size/width")[0].text),
}
instances = []
for obj in tree.findall("object"):
cls = obj.find("name").text
bbox = obj.find("bndbox")
bbox = [float(bbox.find(x).text) for x in ["xmin", "ymin", "xmax", "ymax"]]
instances.append(
{"category_id": CLASS_NAMES.index(cls), "bbox": bbox, "bbox_mode": BoxMode.XYXY_ABS}
)
r["annotations"] = instances
dicts.append(r)
return dicts
def register_licenseplates_voc(name, dirname, split):
DatasetCatalog.register(name,
lambda: load_voc_instances(dirname, split))
MetadataCatalog.get(name).set(thing_classes=CLASS_NAMES,
dirname=dirname,
split=split)
if __name__ == "__main__":
import random
import cv2
from detectron2.utils.visualizer import Visualizer
import argparse
# Parse command line arguments
ap = argparse.ArgumentParser()
ap.add_argument("--split", default="train")
ap.add_argument("--samples", type=int, default=10)
ap.add_argument("--scale", type=float, default=1.0)
args = ap.parse_args()
dataset_name = f"licenseplates_{args.split}"
register_licenseplates_voc(dataset_name, "datasets/licenseplates", args.split)
dataset_dicts = DatasetCatalog.get(dataset_name)
for d in random.sample(dataset_dicts, args.samples):
img = cv2.imread(d["file_name"])
visualizer = Visualizer(img[:, :, ::-1],
metadata=MetadataCatalog.get(dataset_name),
scale=args.scale)
vis = visualizer.draw_dataset_dict(d)
cv2.imshow(dataset_name, vis.get_image()[:, :, ::-1])
# Exit? Press ESC
if cv2.waitKey(0) & 0xFF == 27:
break
cv2.destroyAllWindows()
| [
"random.sample",
"xml.etree.ElementTree.parse",
"argparse.ArgumentParser",
"os.path.join",
"cv2.waitKey",
"cv2.destroyAllWindows",
"detectron2.data.MetadataCatalog.get",
"numpy.loadtxt",
"cv2.imread",
"detectron2.data.DatasetCatalog.get"
] | [((2443, 2468), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2466, 2468), False, 'import argparse\n'), ((2808, 2840), 'detectron2.data.DatasetCatalog.get', 'DatasetCatalog.get', (['dataset_name'], {}), '(dataset_name)\n', (2826, 2840), False, 'from detectron2.data import DatasetCatalog, MetadataCatalog\n'), ((2854, 2896), 'random.sample', 'random.sample', (['dataset_dicts', 'args.samples'], {}), '(dataset_dicts, args.samples)\n', (2867, 2896), False, 'import random\n'), ((3312, 3335), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (3333, 3335), False, 'import cv2\n'), ((1011, 1038), 'numpy.loadtxt', 'np.loadtxt', (['f'], {'dtype': 'np.str'}), '(f, dtype=np.str)\n', (1021, 1038), True, 'import numpy as np\n'), ((1102, 1155), 'os.path.join', 'os.path.join', (['dirname', '"""annotations"""', "(fileid + '.xml')"], {}), "(dirname, 'annotations', fileid + '.xml')\n", (1114, 1155), False, 'import os\n'), ((1176, 1224), 'os.path.join', 'os.path.join', (['dirname', '"""images"""', "(fileid + '.jpg')"], {}), "(dirname, 'images', fileid + '.jpg')\n", (1188, 1224), False, 'import os\n'), ((1241, 1260), 'xml.etree.ElementTree.parse', 'ET.parse', (['anno_file'], {}), '(anno_file)\n', (1249, 1260), True, 'import xml.etree.ElementTree as ET\n'), ((2912, 2938), 'cv2.imread', 'cv2.imread', (["d['file_name']"], {}), "(d['file_name'])\n", (2922, 2938), False, 'import cv2\n'), ((948, 985), 'os.path.join', 'os.path.join', (['dirname', "(split + '.txt')"], {}), "(dirname, split + '.txt')\n", (960, 985), False, 'import os\n'), ((2106, 2131), 'detectron2.data.MetadataCatalog.get', 'MetadataCatalog.get', (['name'], {}), '(name)\n', (2125, 2131), False, 'from detectron2.data import DatasetCatalog, MetadataCatalog\n'), ((3029, 3062), 'detectron2.data.MetadataCatalog.get', 'MetadataCatalog.get', (['dataset_name'], {}), '(dataset_name)\n', (3048, 3062), False, 'from detectron2.data import DatasetCatalog, MetadataCatalog\n'), ((3260, 3274), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (3271, 3274), False, 'import cv2\n')] |
import glob
import time
import random
filelist = glob.glob('/mnt/lustre/chenyuntao1/datasets/imagenet/train/*/*')
random.shuffle(filelist)
begin = time.time()
for i, f in enumerate(filelist):
if i == 10000:
break
with open(f, "rb") as fin:
result = fin.read()
end = time.time()
print("%.1f images/s" % (10000 / (end - begin))) | [
"time.time",
"random.shuffle",
"glob.glob"
] | [((51, 115), 'glob.glob', 'glob.glob', (['"""/mnt/lustre/chenyuntao1/datasets/imagenet/train/*/*"""'], {}), "('/mnt/lustre/chenyuntao1/datasets/imagenet/train/*/*')\n", (60, 115), False, 'import glob\n'), ((116, 140), 'random.shuffle', 'random.shuffle', (['filelist'], {}), '(filelist)\n', (130, 140), False, 'import random\n'), ((150, 161), 'time.time', 'time.time', ([], {}), '()\n', (159, 161), False, 'import time\n'), ((293, 304), 'time.time', 'time.time', ([], {}), '()\n', (302, 304), False, 'import time\n')] |
# Copyright (C) 2015-2019 <NAME>
# SPDX-License-Identifier: Apache-2.0
import dolfin
from . import register_boundary_condition, BoundaryConditionCreator
from ocellaris.utils import (
CodedExpression,
OcellarisCppExpression,
OcellarisError,
verify_field_variable_definition,
)
class OcellarisDirichletBC(dolfin.DirichletBC):
def __init__(
self, simulation, V, value, subdomain_marker, subdomain_id, updater=None
):
"""
A simple storage class for Dirichlet boundary conditions
"""
super().__init__(
V, value, subdomain_marker, subdomain_id, method='geometric'
)
self.simulation = simulation
self._value = value
self.subdomain_marker = subdomain_marker
self.subdomain_id = subdomain_id
self._updater = updater
def func(self):
"""
The boundary value derivative function
"""
return self._value
def ds(self):
"""
Returns the ds measure of the subdomain
"""
return self.simulation.data['ds'](self.subdomain_id)
def copy_and_change_function_space(self, V):
"""
Return a copy with a new function space. Used when converting from
BCs for a segregated solver (default) to BCs for a coupled solver
"""
return OcellarisDirichletBC(
self.simulation, V, self._value, self.subdomain_marker, self.subdomain_id
)
def update(self):
"""
Update the time and other parameters used in the BC.
This is used every timestep and for all RK substeps
"""
if self._updater:
self._updater(
self.simulation.timestep, self.simulation.time, self.simulation.dt
)
def __repr__(self):
return '<OcellarisDirichletBC on subdomain %d>' % self.subdomain_id
@register_boundary_condition('ConstantValue')
class ConstantDirichletBoundary(BoundaryConditionCreator):
description = 'A prescribed constant value Dirichlet condition'
def __init__(self, simulation, var_name, inp_dict, subdomains, subdomain_id):
"""
Dirichlet condition with constant value
"""
self.simulation = simulation
if var_name[-1].isdigit():
# A var_name like "u0" was given. Look up "Vu"
self.func_space = simulation.data['V%s' % var_name[:-1]]
else:
# A var_name like "u" was given. Look up "Vu"
self.func_space = simulation.data['V%s' % var_name]
value = inp_dict.get_value('value', required_type='any')
if isinstance(value, list):
assert len(value) == simulation.ndim
for d in range(simulation.ndim):
name = '%s%d' % (var_name, d)
self.register_dirichlet_condition(
name, value[d], subdomains, subdomain_id
)
else:
self.register_dirichlet_condition(var_name, value, subdomains, subdomain_id)
def register_dirichlet_condition(self, var_name, value, subdomains, subdomain_id):
"""
Add a Dirichlet condition to this variable
"""
if not isinstance(value, (float, int)):
raise OcellarisError(
'Error in ConstantValue BC for %s' % var_name,
'The value %r is not a number' % value,
)
df_value = dolfin.Constant(value)
# Store the boundary condition for use in the solver
bc = OcellarisDirichletBC(
self.simulation, self.func_space, df_value, subdomains, subdomain_id
)
bcs = self.simulation.data['dirichlet_bcs']
bcs.setdefault(var_name, []).append(bc)
self.simulation.log.info(' Constant value %r for %s' % (value, var_name))
@register_boundary_condition('CodedValue')
class CodedDirichletBoundary(BoundaryConditionCreator):
description = 'A coded Dirichlet condition'
def __init__(self, simulation, var_name, inp_dict, subdomains, subdomain_id):
"""
Dirichlet condition with coded value
"""
self.simulation = simulation
if var_name[-1].isdigit():
# A var_name like "u0" was given. Look up "Vu"
self.func_space = simulation.data['V%s' % var_name[:-1]]
else:
# A var_name like "u" was given. Look up "Vu"
self.func_space = simulation.data['V%s' % var_name]
# Make a dolfin Expression object that runs the code string
code = inp_dict.get_value('code', required_type='any')
if isinstance(code, list):
assert len(code) == simulation.ndim
for d in range(simulation.ndim):
name = '%s%d' % (var_name, d)
description = 'coded value boundary condition for %s' % name
sub_code = inp_dict.get_value('code/%d' % d, required_type='string')
expr = CodedExpression(simulation, sub_code, description)
self.register_dirichlet_condition(name, expr, subdomains, subdomain_id)
else:
description = 'coded value boundary condition for %s' % var_name
expr = CodedExpression(simulation, code, description)
self.register_dirichlet_condition(var_name, expr, subdomains, subdomain_id)
def register_dirichlet_condition(self, var_name, expr, subdomains, subdomain_id):
"""
Store the boundary condition for use in the solver
"""
bc = OcellarisDirichletBC(
self.simulation, self.func_space, expr, subdomains, subdomain_id
)
bcs = self.simulation.data['dirichlet_bcs']
bcs.setdefault(var_name, []).append(bc)
self.simulation.log.info(' Coded value for %s' % var_name)
@register_boundary_condition('CppCodedValue')
class CppCodedDirichletBoundary(BoundaryConditionCreator):
description = 'A C++ coded Dirichlet condition'
def __init__(self, simulation, var_name, inp_dict, subdomains, subdomain_id):
"""
Dirichlet condition with C++ coded value
"""
self.simulation = simulation
if var_name[-1].isdigit():
# A var_name like "u0" was given. Look up "Vu"
self.func_space = simulation.data['V%s' % var_name[:-1]]
else:
# A var_name like "u" was given. Look up "Vu"
self.func_space = simulation.data['V%s' % var_name]
# Make a dolfin Expression object that runs the code string
code = inp_dict.get_value('cpp_code', required_type='any')
if isinstance(code, list):
assert len(code) == simulation.ndim
for d in range(simulation.ndim):
name = '%s%d' % (var_name, d)
sub_code = inp_dict.get_value('cpp_code/%d' % d, required_type='string')
self.register_dirichlet_condition(
name, sub_code, subdomains, subdomain_id
)
else:
self.register_dirichlet_condition(var_name, code, subdomains, subdomain_id)
def register_dirichlet_condition(
self, var_name, cpp_code, subdomains, subdomain_id
):
"""
Store the boundary condition for use in the solver
"""
description = 'boundary condititon for %s' % var_name
P = self.func_space.ufl_element().degree()
expr, updater = OcellarisCppExpression(
self.simulation, cpp_code, description, P, return_updater=True
)
bc = OcellarisDirichletBC(
self.simulation,
self.func_space,
expr,
subdomains,
subdomain_id,
updater=updater,
)
bcs = self.simulation.data['dirichlet_bcs']
bcs.setdefault(var_name, []).append(bc)
self.simulation.log.info(' C++ coded value for %s' % var_name)
@register_boundary_condition('FieldFunction')
class FieldFunctionDirichletBoundary(BoundaryConditionCreator):
description = 'A Dirichlet condition with values from a field function'
def __init__(self, simulation, var_name, inp_dict, subdomains, subdomain_id):
"""
Dirichlet boundary condition with value from a field function
"""
self.simulation = simulation
if var_name[-1].isdigit():
# A var_name like "u0" was given. Look up "Vu"
self.func_space = simulation.data['V%s' % var_name[:-1]]
else:
# A var_name like "u" was given. Look up "Vu"
self.func_space = simulation.data['V%s' % var_name]
# Get the field function expression object
vardef = inp_dict.get_value('function', required_type='any')
description = 'boundary condititon for %s' % var_name
if isinstance(vardef, list):
assert len(vardef) == simulation.ndim
exprs = [
verify_field_variable_definition(simulation, vd, description)
for vd in vardef
]
else:
expr = verify_field_variable_definition(simulation, vardef, description)
if expr.ufl_shape != ():
assert expr.ufl_shape == (
simulation.ndim,
), 'Expected shape %r got %r' % ((simulation.ndim,), expr.ufl_shape)
exprs = [expr[d] for d in range(simulation.ndim)]
else:
exprs = [expr]
# Register BCs
if len(exprs) > 1:
for d in range(simulation.ndim):
name = '%s%d' % (var_name, d)
self.register_dirichlet_condition(
name, exprs[d], subdomains, subdomain_id
)
else:
self.register_dirichlet_condition(
var_name, exprs[0], subdomains, subdomain_id
)
def register_dirichlet_condition(self, var_name, expr, subdomains, subdomain_id):
"""
Store the boundary condition for use in the solver
"""
assert expr.ufl_shape == ()
bc = OcellarisDirichletBC(
self.simulation, self.func_space, expr, subdomains, subdomain_id
)
bcs = self.simulation.data['dirichlet_bcs']
bcs.setdefault(var_name, []).append(bc)
self.simulation.log.info(' Field function value for %s' % var_name)
@register_boundary_condition('FieldVelocityValve')
class FieldVelocityValveDirichletBoundary(BoundaryConditionCreator):
description = 'A Dirichlet condition that compensates for non-zero total flux of a known velocity field'
def __init__(self, simulation, var_name, inp_dict, subdomains, subdomain_id):
"""
Dirichlet boundary condition with value from a field function
"""
self.simulation = simulation
# A var_name like "u0" should be given. Look up "Vu"
self.func_space = simulation.data['V%s' % var_name[:-1]]
# Get the field function expression object
vardef = inp_dict.get_value('function', required_type='any')
description = 'boundary condititon for %s' % var_name
self.velocity = verify_field_variable_definition(
simulation, vardef, description
)
field = simulation.fields[vardef.split('/')[0]]
# The expression value is updated as the field is changed
inp_dict.get_value('function', required_type='any')
field.register_dependent_field(self)
self.flux = dolfin.Constant(1.0)
# Create the
bc = OcellarisDirichletBC(
self.simulation, self.func_space, self.flux, subdomains, subdomain_id
)
bcs = self.simulation.data['dirichlet_bcs']
bcs.setdefault(var_name, []).append(bc)
self.simulation.log.info(' Field velocity valve for %s' % var_name)
# Compute the region area, then update the flux
mesh = simulation.data['mesh']
self.area = dolfin.assemble(self.flux * bc.ds()(domain=mesh))
self.region_names = inp_dict.get_value('regions', required_type='list(string)')
self.update()
def update(self, timestep_number=None, t=None, dt=None):
"""
The main field has changed, update our flux to make the total sum to zero
"""
regions = self.simulation.data['boundary']
mesh = self.simulation.data['mesh']
n = dolfin.FacetNormal(mesh)
flux = 0
count = 0
for region in regions:
if region.name in self.region_names:
f = dolfin.dot(self.velocity, n) * region.ds()
flux += dolfin.assemble(f)
count += 1
assert count == len(self.region_names)
# FIXME: assumes n is pointing outwards along the axis in the positive
# direction in this boundary region
self.flux.assign(dolfin.Constant(-flux / self.area))
| [
"dolfin.dot",
"ocellaris.utils.OcellarisCppExpression",
"dolfin.assemble",
"ocellaris.utils.verify_field_variable_definition",
"dolfin.Constant",
"ocellaris.utils.OcellarisError",
"dolfin.FacetNormal",
"ocellaris.utils.CodedExpression"
] | [((3418, 3440), 'dolfin.Constant', 'dolfin.Constant', (['value'], {}), '(value)\n', (3433, 3440), False, 'import dolfin\n'), ((7398, 7488), 'ocellaris.utils.OcellarisCppExpression', 'OcellarisCppExpression', (['self.simulation', 'cpp_code', 'description', 'P'], {'return_updater': '(True)'}), '(self.simulation, cpp_code, description, P,\n return_updater=True)\n', (7420, 7488), False, 'from ocellaris.utils import CodedExpression, OcellarisCppExpression, OcellarisError, verify_field_variable_definition\n'), ((11110, 11175), 'ocellaris.utils.verify_field_variable_definition', 'verify_field_variable_definition', (['simulation', 'vardef', 'description'], {}), '(simulation, vardef, description)\n', (11142, 11175), False, 'from ocellaris.utils import CodedExpression, OcellarisCppExpression, OcellarisError, verify_field_variable_definition\n'), ((11446, 11466), 'dolfin.Constant', 'dolfin.Constant', (['(1.0)'], {}), '(1.0)\n', (11461, 11466), False, 'import dolfin\n'), ((12346, 12370), 'dolfin.FacetNormal', 'dolfin.FacetNormal', (['mesh'], {}), '(mesh)\n', (12364, 12370), False, 'import dolfin\n'), ((3250, 3356), 'ocellaris.utils.OcellarisError', 'OcellarisError', (["('Error in ConstantValue BC for %s' % var_name)", "('The value %r is not a number' % value)"], {}), "('Error in ConstantValue BC for %s' % var_name, \n 'The value %r is not a number' % value)\n", (3264, 3356), False, 'from ocellaris.utils import CodedExpression, OcellarisCppExpression, OcellarisError, verify_field_variable_definition\n'), ((5193, 5239), 'ocellaris.utils.CodedExpression', 'CodedExpression', (['simulation', 'code', 'description'], {}), '(simulation, code, description)\n', (5208, 5239), False, 'from ocellaris.utils import CodedExpression, OcellarisCppExpression, OcellarisError, verify_field_variable_definition\n'), ((9033, 9098), 'ocellaris.utils.verify_field_variable_definition', 'verify_field_variable_definition', (['simulation', 'vardef', 'description'], {}), '(simulation, vardef, description)\n', (9065, 9098), False, 'from ocellaris.utils import CodedExpression, OcellarisCppExpression, OcellarisError, verify_field_variable_definition\n'), ((12821, 12855), 'dolfin.Constant', 'dolfin.Constant', (['(-flux / self.area)'], {}), '(-flux / self.area)\n', (12836, 12855), False, 'import dolfin\n'), ((4944, 4994), 'ocellaris.utils.CodedExpression', 'CodedExpression', (['simulation', 'sub_code', 'description'], {}), '(simulation, sub_code, description)\n', (4959, 4994), False, 'from ocellaris.utils import CodedExpression, OcellarisCppExpression, OcellarisError, verify_field_variable_definition\n'), ((8891, 8952), 'ocellaris.utils.verify_field_variable_definition', 'verify_field_variable_definition', (['simulation', 'vd', 'description'], {}), '(simulation, vd, description)\n', (8923, 8952), False, 'from ocellaris.utils import CodedExpression, OcellarisCppExpression, OcellarisError, verify_field_variable_definition\n'), ((12573, 12591), 'dolfin.assemble', 'dolfin.assemble', (['f'], {}), '(f)\n', (12588, 12591), False, 'import dolfin\n'), ((12506, 12534), 'dolfin.dot', 'dolfin.dot', (['self.velocity', 'n'], {}), '(self.velocity, n)\n', (12516, 12534), False, 'import dolfin\n')] |
import unittest
from count_split_inversions import count_inversions
class TestCountSplitInversions(unittest.TestCase):
def test_count_inversions(self):
input = [1, 3, 5, 2, 4, 6]
result = count_inversions(input)
self.assertEqual(result, 3)
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"count_split_inversions.count_inversions"
] | [((303, 318), 'unittest.main', 'unittest.main', ([], {}), '()\n', (316, 318), False, 'import unittest\n'), ((211, 234), 'count_split_inversions.count_inversions', 'count_inversions', (['input'], {}), '(input)\n', (227, 234), False, 'from count_split_inversions import count_inversions\n')] |
import time
import krpc
conn = krpc.connect(name='Sub-orbital flight')
vessel = conn.space_center.active_vessel
vessel.auto_pilot.target_pitch_and_heading(90, 90)
vessel.auto_pilot.engage()
vessel.control.throttle = 1
time.sleep(1)
print('Launch!')
vessel.control.activate_next_stage()
fuel_amount = conn.get_call(vessel.resources.amount, 'SolidFuel')
expr = conn.krpc.Expression.less_than(
conn.krpc.Expression.call(fuel_amount),
conn.krpc.Expression.constant_float(0.1))
event = conn.krpc.add_event(expr)
with event.condition:
event.wait()
print('Booster separation')
vessel.control.activate_next_stage()
mean_altitude = conn.get_call(getattr, vessel.flight(), 'mean_altitude')
expr = conn.krpc.Expression.greater_than(
conn.krpc.Expression.call(mean_altitude),
conn.krpc.Expression.constant_double(10000))
event = conn.krpc.add_event(expr)
with event.condition:
event.wait()
print('Gravity turn')
vessel.auto_pilot.target_pitch_and_heading(60, 90)
apoapsis_altitude = conn.get_call(getattr, vessel.orbit, 'apoapsis_altitude')
expr = conn.krpc.Expression.greater_than(
conn.krpc.Expression.call(apoapsis_altitude),
conn.krpc.Expression.constant_double(100000))
event = conn.krpc.add_event(expr)
with event.condition:
event.wait()
print('Launch stage separation')
vessel.control.throttle = 0
time.sleep(1)
vessel.control.activate_next_stage()
vessel.auto_pilot.disengage()
srf_altitude = conn.get_call(getattr, vessel.flight(), 'surface_altitude')
expr = conn.krpc.Expression.less_than(
conn.krpc.Expression.call(srf_altitude),
conn.krpc.Expression.constant_double(1000))
event = conn.krpc.add_event(expr)
with event.condition:
event.wait()
vessel.control.activate_next_stage()
while vessel.flight(vessel.orbit.body.reference_frame).vertical_speed < -0.1:
print('Altitude = %.1f meters' % vessel.flight().surface_altitude)
time.sleep(1)
print('Landed!')
| [
"krpc.connect",
"time.sleep"
] | [((31, 70), 'krpc.connect', 'krpc.connect', ([], {'name': '"""Sub-orbital flight"""'}), "(name='Sub-orbital flight')\n", (43, 70), False, 'import krpc\n'), ((220, 233), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (230, 233), False, 'import time\n'), ((1337, 1350), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1347, 1350), False, 'import time\n'), ((1891, 1904), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1901, 1904), False, 'import time\n')] |
from distutils.version import LooseVersion
from itertools import product
import numpy as np
import pandas as pd
from ..model.event import Event
from ..model.event import EventTeam
from ..model.submission import Submission
from ..model.team import Team
from .team import get_event_team_by_name
from .submission import get_bagged_scores
from .submission import get_scores
from .submission import get_submission_max_ram
from .submission import get_time
width = -1 if LooseVersion(pd.__version__) < LooseVersion("1.0.0") else None
pd.set_option('display.max_colwidth', width)
def _compute_leaderboard(session, submissions, leaderboard_type, event_name,
with_links=True):
"""Format the leaderboard.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
submissions : list of :class:`ramp_database.model.Submission`
The submission to report in the leaderboard.
leaderboard_type : {'public', 'private'}
The type of leaderboard to built.
event_name : str
The name of the event.
with_links : bool
Whether or not the submission name should be clickable.
Returns
-------
leaderboard : dataframe
The leaderboard in a dataframe format.
"""
record_score = []
event = session.query(Event).filter_by(name=event_name).one()
map_score_precision = {score_type.name: score_type.precision
for score_type in event.score_types}
for sub in submissions:
# take only max n bag
df_scores_bag = get_bagged_scores(session, sub.id)
highest_level = df_scores_bag.index.get_level_values('n_bag').max()
df_scores_bag = df_scores_bag.loc[(slice(None), highest_level), :]
df_scores_bag.index = df_scores_bag.index.droplevel('n_bag')
df_scores_bag = df_scores_bag.round(map_score_precision)
df_scores = get_scores(session, sub.id)
df_scores = df_scores.round(map_score_precision)
df_time = get_time(session, sub.id)
df_time = df_time.stack().to_frame()
df_time.index = df_time.index.set_names(['fold', 'step'])
df_time = df_time.rename(columns={0: 'time'})
df_time = df_time.sum(axis=0, level="step").T
df_scores_mean = df_scores.groupby('step').mean()
df_scores_std = df_scores.groupby('step').std()
# select only the validation and testing steps and rename them to
# public and private
map_renaming = {'valid': 'public', 'test': 'private'}
df_scores_mean = (df_scores_mean.loc[list(map_renaming.keys())]
.rename(index=map_renaming)
.stack().to_frame().T)
df_scores_std = (df_scores_std.loc[list(map_renaming.keys())]
.rename(index=map_renaming)
.stack().to_frame().T)
df_scores_bag = (df_scores_bag.rename(index=map_renaming)
.stack().to_frame().T)
df = pd.concat([df_scores_bag, df_scores_mean, df_scores_std], axis=1,
keys=['bag', 'mean', 'std'])
df.columns = df.columns.set_names(['stat', 'set', 'score'])
# change the multi-index into a stacked index
df.columns = df.columns.map(lambda x: " ".join(x))
# add the aggregated time information
df_time.index = df.index
df_time = df_time.rename(
columns={'train': 'train time [s]',
'valid': 'validation time [s]',
'test': 'test time [s]'}
)
df = pd.concat([df, df_time], axis=1)
if leaderboard_type == 'private':
df['submission ID'] = sub.basename.replace('submission_', '')
df['team'] = sub.team.name
df['submission'] = sub.name_with_link if with_links else sub.name
df['contributivity'] = int(round(100 * sub.contributivity))
df['historical contributivity'] = int(round(
100 * sub.historical_contributivity))
df['max RAM [MB]'] = get_submission_max_ram(session, sub.id)
df['submitted at (UTC)'] = pd.Timestamp(sub.submission_timestamp)
record_score.append(df)
# stack all the records
df = pd.concat(record_score, axis=0, ignore_index=True, sort=False)
# keep only second precision for the time stamp
df['submitted at (UTC)'] = df['submitted at (UTC)'].astype('datetime64[s]')
# reordered the column
stats_order = (['bag', 'mean', 'std'] if leaderboard_type == 'private'
else ['bag'])
dataset_order = (['public', 'private'] if leaderboard_type == 'private'
else ['public'])
score_order = ([event.official_score_name] +
[score_type.name for score_type in event.score_types
if score_type.name != event.official_score_name])
score_list = [
'{} {} {}'.format(stat, dataset, score)
for dataset, score, stat in product(dataset_order,
score_order,
stats_order)
]
# Only display train and validation time for the public leaderboard
time_list = (['train time [s]', 'validation time [s]', 'test time [s]']
if leaderboard_type == 'private'
else ['train time [s]', 'validation time [s]'])
col_ordered = (
['team', 'submission'] +
score_list +
['contributivity', 'historical contributivity'] +
time_list +
['max RAM [MB]', 'submitted at (UTC)']
)
if leaderboard_type == "private":
col_ordered = ["submission ID"] + col_ordered
df = df[col_ordered]
# check if the contributivity columns are null
contrib_columns = ['contributivity', 'historical contributivity']
if (df[contrib_columns] == 0).all(axis=0).all():
df = df.drop(columns=contrib_columns)
df = df.sort_values(
"bag {} {}".format(leaderboard_type, event.official_score_name),
ascending=event.get_official_score_type(session).is_lower_the_better
)
# rename the column name for the public leaderboard
if leaderboard_type == 'public':
df = df.rename(columns={
key: value for key, value in zip(score_list, score_order)
})
return df
def _compute_competition_leaderboard(session, submissions, leaderboard_type,
event_name):
"""Format the competition leaderboard.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
submissions : list of :class:`ramp_database.model.Submission`
The submission to report in the leaderboard.
leaderboard_type : {'public', 'private'}
The type of leaderboard to built.
event_name : str
The name of the event.
Returns
-------
competition_leaderboard : dataframe
The competition leaderboard in a dataframe format.
"""
event = session.query(Event).filter_by(name=event_name).one()
score_type = event.get_official_score_type(session)
score_name = event.official_score_name
private_leaderboard = _compute_leaderboard(session, submissions, 'private',
event_name, with_links=False)
time_list = (['train time [s]', 'validation time [s]', 'test time [s]']
if leaderboard_type == 'private'
else ['train time [s]', 'validation time [s]'])
col_selected_private = (['team', 'submission'] +
['bag private ' + score_name,
'bag public ' + score_name] +
time_list +
['submitted at (UTC)'])
leaderboard_df = private_leaderboard[col_selected_private]
leaderboard_df = leaderboard_df.rename(
columns={'bag private ' + score_name: 'private ' + score_name,
'bag public ' + score_name: 'public ' + score_name}
)
# select best submission for each team
best_df = (leaderboard_df.groupby('team').min()
if score_type.is_lower_the_better
else leaderboard_df.groupby('team').max())
best_df = best_df[['public ' + score_name]].reset_index()
best_df['best'] = True
# merge to get a best indicator column then select best
leaderboard_df = pd.merge(
leaderboard_df, best_df, how='left',
left_on=['team', 'public ' + score_name],
right_on=['team', 'public ' + score_name]
)
leaderboard_df = leaderboard_df.fillna(False)
leaderboard_df = leaderboard_df[leaderboard_df['best']]
leaderboard_df = leaderboard_df.drop(columns='best')
# dealing with ties: we need the lowest timestamp
best_df = leaderboard_df.groupby('team').min()
best_df = best_df[['submitted at (UTC)']].reset_index()
best_df['best'] = True
leaderboard_df = pd.merge(
leaderboard_df, best_df, how='left',
left_on=['team', 'submitted at (UTC)'],
right_on=['team', 'submitted at (UTC)'])
leaderboard_df = leaderboard_df.fillna(False)
leaderboard_df = leaderboard_df[leaderboard_df['best']]
leaderboard_df = leaderboard_df.drop(columns='best')
# sort by public score then by submission timestamp, compute rank
leaderboard_df = leaderboard_df.sort_values(
by=['public ' + score_name, 'submitted at (UTC)'],
ascending=[score_type.is_lower_the_better, True])
leaderboard_df['public rank'] = np.arange(len(leaderboard_df)) + 1
# sort by private score then by submission timestamp, compute rank
leaderboard_df = leaderboard_df.sort_values(
by=['private ' + score_name, 'submitted at (UTC)'],
ascending=[score_type.is_lower_the_better, True])
leaderboard_df['private rank'] = np.arange(len(leaderboard_df)) + 1
leaderboard_df['move'] = \
leaderboard_df['public rank'] - leaderboard_df['private rank']
leaderboard_df['move'] = [
'{:+d}'.format(m) if m != 0 else '-' for m in leaderboard_df['move']]
col_selected = (
[leaderboard_type + ' rank', 'team', 'submission',
leaderboard_type + ' ' + score_name] +
time_list +
['submitted at (UTC)']
)
if leaderboard_type == 'private':
col_selected.insert(1, 'move')
df = leaderboard_df[col_selected]
df = df.rename(columns={
leaderboard_type + ' ' + score_name: score_name,
leaderboard_type + ' rank': 'rank'
})
df = df.sort_values(by='rank')
return df
def get_leaderboard(session, leaderboard_type, event_name, user_name=None,
with_links=True):
"""Get a leaderboard.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
leaderboard_type : {'public', 'private', 'failed', 'new', \
'public competition', 'private competition'}
The type of leaderboard to generate.
event_name : str
The event name.
user_name : None or str, default is None
The user name. If None, scores from all users will be queried. This
parameter is discarded when requesting the competition leaderboard.
with_links : bool, default is True
Whether or not the submission name should be clickable.
Returns
-------
leaderboard : str
The leaderboard in HTML format.
"""
q = (session.query(Submission)
.filter(Event.id == EventTeam.event_id)
.filter(Team.id == EventTeam.team_id)
.filter(EventTeam.id == Submission.event_team_id)
.filter(Event.name == event_name))
if user_name is not None:
q = q.filter(Team.name == user_name)
submissions = q.all()
submission_filter = {'public': 'is_public_leaderboard',
'private': 'is_private_leaderboard',
'failed': 'is_error',
'new': 'is_new',
'public competition': 'is_in_competition',
'private competition': 'is_in_competition'}
submissions = [sub for sub in submissions
if (getattr(sub, submission_filter[leaderboard_type]) and
sub.is_not_sandbox)]
if not submissions:
return None
if leaderboard_type in ['public', 'private']:
df = _compute_leaderboard(
session, submissions, leaderboard_type, event_name,
with_links=with_links
)
elif leaderboard_type in ['new', 'failed']:
if leaderboard_type == 'new':
columns = ['team', 'submission', 'submitted at (UTC)', 'state']
else:
columns = ['team', 'submission', 'submitted at (UTC)', 'error']
# we rely on the zip function ignore the submission state if the error
# column was not appended
data = [{
column: value for column, value in zip(
columns,
[sub.event_team.team.name,
sub.name_with_link,
pd.Timestamp(sub.submission_timestamp),
(sub.state_with_link if leaderboard_type == 'failed'
else sub.state)])
} for sub in submissions]
df = pd.DataFrame(data, columns=columns)
else:
# make some extra filtering
submissions = [sub for sub in submissions if sub.is_public_leaderboard]
if not submissions:
return None
competition_type = ('public' if 'public' in leaderboard_type
else 'private')
df = _compute_competition_leaderboard(
session, submissions, competition_type, event_name
)
df_html = df.to_html(escape=False, index=False, max_cols=None,
max_rows=None, justify='left')
df_html = '<thead> {} </tbody>'.format(
df_html.split('<thead>')[1].split('</tbody>')[0]
)
return df_html
def update_leaderboards(session, event_name, new_only=False):
"""Update the leaderboards for a given event.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
event_name : str
The event name.
new_only : bool, default is False
Whether or not to update the whole leaderboards or only the new
submissions. You can turn this option to True when adding a new
submission in the database.
"""
event = session.query(Event).filter_by(name=event_name).one()
if not new_only:
event.private_leaderboard_html = get_leaderboard(
session, 'private', event_name
)
event.public_leaderboard_html_with_links = get_leaderboard(
session, 'public', event_name
)
event.public_leaderboard_html_no_links = get_leaderboard(
session, 'public', event_name, with_links=False
)
event.failed_leaderboard_html = get_leaderboard(
session, 'failed', event_name
)
event.public_competition_leaderboard_html = get_leaderboard(
session, 'public competition', event_name
)
event.private_competition_leaderboard_html = get_leaderboard(
session, 'private competition', event_name
)
event.new_leaderboard_html = get_leaderboard(
session, 'new', event_name
)
session.commit()
def update_user_leaderboards(session, event_name, user_name,
new_only=False):
"""Update the of a user leaderboards for a given event.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
event_name : str
The event name.
user_name : str
The user name. If None, scores from all users will be queried.
new_only : bool, default is False
Whether or not to update the whole leaderboards or only the new
submissions. You can turn this option to True when adding a new
submission in the database.
"""
event_team = get_event_team_by_name(session, event_name, user_name)
if not new_only:
event_team.leaderboard_html = get_leaderboard(
session, 'public', event_name, user_name
)
event_team.failed_leaderboard_html = get_leaderboard(
session, 'failed', event_name, user_name
)
event_team.new_leaderboard_html = get_leaderboard(
session, 'new', event_name, user_name
)
session.commit()
def update_all_user_leaderboards(session, event_name, new_only=False):
"""Update the leaderboards for all users for a given event.
Parameters
----------
session : :class:`sqlalchemy.orm.Session`
The session to directly perform the operation on the database.
event_name : str
The event name.
new_only : bool, default is False
Whether or not to update the whole leaderboards or only the new
submissions. You can turn this option to True when adding a new
submission in the database.
"""
event = session.query(Event).filter_by(name=event_name).one()
event_teams = session.query(EventTeam).filter_by(event=event).all()
for event_team in event_teams:
user_name = event_team.team.name
if not new_only:
event_team.leaderboard_html = get_leaderboard(
session, 'public', event_name, user_name
)
event_team.failed_leaderboard_html = get_leaderboard(
session, 'failed', event_name, user_name
)
event_team.new_leaderboard_html = get_leaderboard(
session, 'new', event_name, user_name
)
session.commit()
| [
"pandas.merge",
"itertools.product",
"pandas.set_option",
"pandas.DataFrame",
"distutils.version.LooseVersion",
"pandas.Timestamp",
"pandas.concat"
] | [((532, 576), 'pandas.set_option', 'pd.set_option', (['"""display.max_colwidth"""', 'width'], {}), "('display.max_colwidth', width)\n", (545, 576), True, 'import pandas as pd\n'), ((4369, 4431), 'pandas.concat', 'pd.concat', (['record_score'], {'axis': '(0)', 'ignore_index': '(True)', 'sort': '(False)'}), '(record_score, axis=0, ignore_index=True, sort=False)\n', (4378, 4431), True, 'import pandas as pd\n'), ((8577, 8711), 'pandas.merge', 'pd.merge', (['leaderboard_df', 'best_df'], {'how': '"""left"""', 'left_on': "['team', 'public ' + score_name]", 'right_on': "['team', 'public ' + score_name]"}), "(leaderboard_df, best_df, how='left', left_on=['team', 'public ' +\n score_name], right_on=['team', 'public ' + score_name])\n", (8585, 8711), True, 'import pandas as pd\n'), ((9119, 9249), 'pandas.merge', 'pd.merge', (['leaderboard_df', 'best_df'], {'how': '"""left"""', 'left_on': "['team', 'submitted at (UTC)']", 'right_on': "['team', 'submitted at (UTC)']"}), "(leaderboard_df, best_df, how='left', left_on=['team',\n 'submitted at (UTC)'], right_on=['team', 'submitted at (UTC)'])\n", (9127, 9249), True, 'import pandas as pd\n'), ((469, 497), 'distutils.version.LooseVersion', 'LooseVersion', (['pd.__version__'], {}), '(pd.__version__)\n', (481, 497), False, 'from distutils.version import LooseVersion\n'), ((500, 521), 'distutils.version.LooseVersion', 'LooseVersion', (['"""1.0.0"""'], {}), "('1.0.0')\n", (512, 521), False, 'from distutils.version import LooseVersion\n'), ((3141, 3240), 'pandas.concat', 'pd.concat', (['[df_scores_bag, df_scores_mean, df_scores_std]'], {'axis': '(1)', 'keys': "['bag', 'mean', 'std']"}), "([df_scores_bag, df_scores_mean, df_scores_std], axis=1, keys=[\n 'bag', 'mean', 'std'])\n", (3150, 3240), True, 'import pandas as pd\n'), ((3726, 3758), 'pandas.concat', 'pd.concat', (['[df, df_time]'], {'axis': '(1)'}), '([df, df_time], axis=1)\n', (3735, 3758), True, 'import pandas as pd\n'), ((4260, 4298), 'pandas.Timestamp', 'pd.Timestamp', (['sub.submission_timestamp'], {}), '(sub.submission_timestamp)\n', (4272, 4298), True, 'import pandas as pd\n'), ((5109, 5157), 'itertools.product', 'product', (['dataset_order', 'score_order', 'stats_order'], {}), '(dataset_order, score_order, stats_order)\n', (5116, 5157), False, 'from itertools import product\n'), ((13513, 13548), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'columns'}), '(data, columns=columns)\n', (13525, 13548), True, 'import pandas as pd\n'), ((13316, 13354), 'pandas.Timestamp', 'pd.Timestamp', (['sub.submission_timestamp'], {}), '(sub.submission_timestamp)\n', (13328, 13354), True, 'import pandas as pd\n')] |
#Automate the Boring Stuff with Python
import time, sys
indent = 0 # How many spaces to indent
indent_Increasing = True # Whether the indentation is increasing or not
try:
while True: # The main program loop
print(' ' * indent, end='')
print('********')
time.sleep(0.1) # Pause for 1/10th of a second
if indent_Increasing:
indent = indent + 1
if indent == 20:
indent_Increasing = False
else:
indent = indent - 1
if indent == 0:
indent_Increasing = True
except KeyboardInterrupt():
sys.exit() | [
"time.sleep",
"sys.exit"
] | [((284, 299), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (294, 299), False, 'import time, sys\n'), ((613, 623), 'sys.exit', 'sys.exit', ([], {}), '()\n', (621, 623), False, 'import time, sys\n')] |
from django.conf import settings
from django.conf.urls import url, static
from . import views
from . import jobs
urlpatterns = [
url(r'^choose_company/(?P<company_id>.*)/$', views.choose_company, name='choose_company'),
url(r'^cleanlogs/$', jobs.cleanlogs, name='cleanlogs'),
url(r'^primecache/$', jobs.primecache, name='primecache'),
url(r'^dump_fixtures/$', views.dump_fixtures),
]
| [
"django.conf.urls.url"
] | [((136, 229), 'django.conf.urls.url', 'url', (['"""^choose_company/(?P<company_id>.*)/$"""', 'views.choose_company'], {'name': '"""choose_company"""'}), "('^choose_company/(?P<company_id>.*)/$', views.choose_company, name=\n 'choose_company')\n", (139, 229), False, 'from django.conf.urls import url, static\n'), ((235, 288), 'django.conf.urls.url', 'url', (['"""^cleanlogs/$"""', 'jobs.cleanlogs'], {'name': '"""cleanlogs"""'}), "('^cleanlogs/$', jobs.cleanlogs, name='cleanlogs')\n", (238, 288), False, 'from django.conf.urls import url, static\n'), ((295, 351), 'django.conf.urls.url', 'url', (['"""^primecache/$"""', 'jobs.primecache'], {'name': '"""primecache"""'}), "('^primecache/$', jobs.primecache, name='primecache')\n", (298, 351), False, 'from django.conf.urls import url, static\n'), ((358, 402), 'django.conf.urls.url', 'url', (['"""^dump_fixtures/$"""', 'views.dump_fixtures'], {}), "('^dump_fixtures/$', views.dump_fixtures)\n", (361, 402), False, 'from django.conf.urls import url, static\n')] |
import pytest
from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell
from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion
def test_raise_on_different_metadata():
ref = new_notebook(metadata={'kernelspec': {'language': 'python', 'name': 'python', 'display_name': 'Python'}},
cells=[new_markdown_cell('Cell one')])
test = new_notebook(metadata={'kernelspec': {'language': 'R', 'name': 'R', 'display_name': 'R'}},
cells=[new_markdown_cell('Cell one')])
with pytest.raises(NotebookDifference):
compare_notebooks(ref, test, 'md')
@pytest.mark.parametrize('raise_on_first_difference', [True, False])
def test_raise_on_different_cell_type(raise_on_first_difference):
ref = new_notebook(cells=[new_markdown_cell('Cell one'), new_code_cell('Cell two')])
test = new_notebook(cells=[new_markdown_cell('Cell one'), new_raw_cell('Cell two')])
with pytest.raises(NotebookDifference):
compare_notebooks(ref, test, 'md', raise_on_first_difference=raise_on_first_difference)
@pytest.mark.parametrize('raise_on_first_difference', [True, False])
def test_raise_on_different_cell_content(raise_on_first_difference):
ref = new_notebook(cells=[new_markdown_cell('Cell one'), new_code_cell('Cell two')])
test = new_notebook(cells=[new_markdown_cell('Cell one'), new_code_cell('Modified cell two')])
with pytest.raises(NotebookDifference):
compare_notebooks(ref, test, 'md', raise_on_first_difference=raise_on_first_difference)
def test_raise_on_incomplete_markdown_cell():
ref = new_notebook(cells=[new_markdown_cell('Cell one\n\n\nsecond line')])
test = new_notebook(cells=[new_markdown_cell('Cell one')])
with pytest.raises(NotebookDifference):
compare_notebooks(ref, test, 'md')
def test_does_raise_on_split_markdown_cell():
ref = new_notebook(cells=[new_markdown_cell('Cell one\n\n\nsecond line')])
test = new_notebook(cells=[new_markdown_cell('Cell one'),
new_markdown_cell('second line')])
with pytest.raises(NotebookDifference):
compare_notebooks(ref, test, 'md')
def test_raise_on_different_cell_metadata():
ref = new_notebook(cells=[new_code_cell('1+1')])
test = new_notebook(cells=[new_code_cell('1+1', metadata={'metakey': 'value'})])
with pytest.raises(NotebookDifference):
compare_notebooks(ref, test, 'py:light')
@pytest.mark.parametrize('raise_on_first_difference', [True, False])
def test_raise_on_different_cell_count(raise_on_first_difference):
ref = new_notebook(cells=[new_code_cell('1')])
test = new_notebook(cells=[new_code_cell('1'),
new_code_cell('2')])
with pytest.raises(NotebookDifference):
compare_notebooks(ref, test, 'py:light', raise_on_first_difference=raise_on_first_difference)
with pytest.raises(NotebookDifference):
compare_notebooks(test, ref, 'py:light', raise_on_first_difference=raise_on_first_difference)
def test_does_not_raise_on_blank_line_removed():
ref = new_notebook(cells=[new_code_cell('1+1\n ')])
test = new_notebook(cells=[new_code_cell('1+1')])
compare_notebooks(ref, test, 'py:light')
def test_strict_raise_on_blank_line_removed():
ref = new_notebook(cells=[new_code_cell('1+1\n')])
test = new_notebook(cells=[new_code_cell('1+1')])
with pytest.raises(NotebookDifference):
compare_notebooks(ref, test, 'py:light', allow_expected_differences=False)
def test_dont_raise_on_different_outputs():
ref = new_notebook(cells=[new_code_cell('1+1')])
test = new_notebook(cells=[new_code_cell('1+1', outputs=[
{
"data": {
"text/plain": [
"2"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
])])
compare_notebooks(ref, test, 'md')
@pytest.mark.parametrize('raise_on_first_difference', [True, False])
def test_raise_on_different_outputs(raise_on_first_difference):
ref = new_notebook(cells=[new_code_cell('1+1')])
test = new_notebook(cells=[new_code_cell('1+1', outputs=[
{
"data": {
"text/plain": [
"2"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
])])
with pytest.raises(NotebookDifference):
compare_notebooks(ref, test, 'md', compare_outputs=True, raise_on_first_difference=raise_on_first_difference)
def test_test_round_trip_conversion():
notebook = new_notebook(cells=[new_code_cell('1+1', outputs=[
{
"data": {
"text/plain": [
"2"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
])], metadata={'main_language': 'python'})
round_trip_conversion(notebook, {'extension': '.py'}, update=True)
def test_mutiple_cells_differ():
nb1 = new_notebook(cells=[new_code_cell(''),
new_code_cell('2')])
nb2 = new_notebook(cells=[new_code_cell('1+1'),
new_code_cell('2\n2')])
with pytest.raises(NotebookDifference) as exception_info:
compare_notebooks(nb1, nb2, raise_on_first_difference=False)
assert 'Cells 1,2 differ' in exception_info.value.args[0]
def test_cell_metadata_differ():
nb1 = new_notebook(cells=[new_code_cell('1'),
new_code_cell('2', metadata={'additional': 'metadata1'})])
nb2 = new_notebook(cells=[new_code_cell('1'),
new_code_cell('2', metadata={'additional': 'metadata2'})])
with pytest.raises(NotebookDifference) as exception_info:
compare_notebooks(nb1, nb2, raise_on_first_difference=False)
assert "Cell metadata 'additional' differ" in exception_info.value.args[0]
def test_notebook_metadata_differ():
nb1 = new_notebook(cells=[new_code_cell('1'),
new_code_cell('2')])
nb2 = new_notebook(cells=[new_code_cell('1'),
new_code_cell('2')],
metadata={'kernelspec': {'language': 'python', 'name': 'python', 'display_name': 'Python'}})
with pytest.raises(NotebookDifference) as exception_info:
compare_notebooks(nb1, nb2, raise_on_first_difference=False, )
assert "Notebook metadata differ" in exception_info.value.args[0]
| [
"jupytext.compare.test_round_trip_conversion",
"nbformat.v4.nbbase.new_code_cell",
"pytest.mark.parametrize",
"nbformat.v4.nbbase.new_markdown_cell",
"pytest.raises",
"nbformat.v4.nbbase.new_raw_cell",
"jupytext.compare.compare_notebooks"
] | [((701, 768), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""raise_on_first_difference"""', '[True, False]'], {}), "('raise_on_first_difference', [True, False])\n", (724, 768), False, 'import pytest\n'), ((1156, 1223), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""raise_on_first_difference"""', '[True, False]'], {}), "('raise_on_first_difference', [True, False])\n", (1179, 1223), False, 'import pytest\n'), ((2521, 2588), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""raise_on_first_difference"""', '[True, False]'], {}), "('raise_on_first_difference', [True, False])\n", (2544, 2588), False, 'import pytest\n'), ((4046, 4113), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""raise_on_first_difference"""', '[True, False]'], {}), "('raise_on_first_difference', [True, False])\n", (4069, 4113), False, 'import pytest\n'), ((3271, 3311), 'jupytext.compare.compare_notebooks', 'compare_notebooks', (['ref', 'test', '"""py:light"""'], {}), "(ref, test, 'py:light')\n", (3288, 3311), False, 'from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion\n'), ((4008, 4042), 'jupytext.compare.compare_notebooks', 'compare_notebooks', (['ref', 'test', '"""md"""'], {}), "(ref, test, 'md')\n", (4025, 4042), False, 'from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion\n'), ((5097, 5163), 'jupytext.compare.test_round_trip_conversion', 'round_trip_conversion', (['notebook', "{'extension': '.py'}"], {'update': '(True)'}), "(notebook, {'extension': '.py'}, update=True)\n", (5118, 5163), True, 'from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion\n'), ((620, 653), 'pytest.raises', 'pytest.raises', (['NotebookDifference'], {}), '(NotebookDifference)\n', (633, 653), False, 'import pytest\n'), ((663, 697), 'jupytext.compare.compare_notebooks', 'compare_notebooks', (['ref', 'test', '"""md"""'], {}), "(ref, test, 'md')\n", (680, 697), False, 'from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion\n'), ((1022, 1055), 'pytest.raises', 'pytest.raises', (['NotebookDifference'], {}), '(NotebookDifference)\n', (1035, 1055), False, 'import pytest\n'), ((1065, 1157), 'jupytext.compare.compare_notebooks', 'compare_notebooks', (['ref', 'test', '"""md"""'], {'raise_on_first_difference': 'raise_on_first_difference'}), "(ref, test, 'md', raise_on_first_difference=\n raise_on_first_difference)\n", (1082, 1157), False, 'from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion\n'), ((1490, 1523), 'pytest.raises', 'pytest.raises', (['NotebookDifference'], {}), '(NotebookDifference)\n', (1503, 1523), False, 'import pytest\n'), ((1533, 1625), 'jupytext.compare.compare_notebooks', 'compare_notebooks', (['ref', 'test', '"""md"""'], {'raise_on_first_difference': 'raise_on_first_difference'}), "(ref, test, 'md', raise_on_first_difference=\n raise_on_first_difference)\n", (1550, 1625), False, 'from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion\n'), ((1820, 1853), 'pytest.raises', 'pytest.raises', (['NotebookDifference'], {}), '(NotebookDifference)\n', (1833, 1853), False, 'import pytest\n'), ((1863, 1897), 'jupytext.compare.compare_notebooks', 'compare_notebooks', (['ref', 'test', '"""md"""'], {}), "(ref, test, 'md')\n", (1880, 1897), False, 'from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion\n'), ((2162, 2195), 'pytest.raises', 'pytest.raises', (['NotebookDifference'], {}), '(NotebookDifference)\n', (2175, 2195), False, 'import pytest\n'), ((2205, 2239), 'jupytext.compare.compare_notebooks', 'compare_notebooks', (['ref', 'test', '"""md"""'], {}), "(ref, test, 'md')\n", (2222, 2239), False, 'from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion\n'), ((2434, 2467), 'pytest.raises', 'pytest.raises', (['NotebookDifference'], {}), '(NotebookDifference)\n', (2447, 2467), False, 'import pytest\n'), ((2477, 2517), 'jupytext.compare.compare_notebooks', 'compare_notebooks', (['ref', 'test', '"""py:light"""'], {}), "(ref, test, 'py:light')\n", (2494, 2517), False, 'from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion\n'), ((2819, 2852), 'pytest.raises', 'pytest.raises', (['NotebookDifference'], {}), '(NotebookDifference)\n', (2832, 2852), False, 'import pytest\n'), ((2862, 2960), 'jupytext.compare.compare_notebooks', 'compare_notebooks', (['ref', 'test', '"""py:light"""'], {'raise_on_first_difference': 'raise_on_first_difference'}), "(ref, test, 'py:light', raise_on_first_difference=\n raise_on_first_difference)\n", (2879, 2960), False, 'from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion\n'), ((2966, 2999), 'pytest.raises', 'pytest.raises', (['NotebookDifference'], {}), '(NotebookDifference)\n', (2979, 2999), False, 'import pytest\n'), ((3009, 3107), 'jupytext.compare.compare_notebooks', 'compare_notebooks', (['test', 'ref', '"""py:light"""'], {'raise_on_first_difference': 'raise_on_first_difference'}), "(test, ref, 'py:light', raise_on_first_difference=\n raise_on_first_difference)\n", (3026, 3107), False, 'from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion\n'), ((3479, 3512), 'pytest.raises', 'pytest.raises', (['NotebookDifference'], {}), '(NotebookDifference)\n', (3492, 3512), False, 'import pytest\n'), ((3522, 3596), 'jupytext.compare.compare_notebooks', 'compare_notebooks', (['ref', 'test', '"""py:light"""'], {'allow_expected_differences': '(False)'}), "(ref, test, 'py:light', allow_expected_differences=False)\n", (3539, 3596), False, 'from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion\n'), ((4548, 4581), 'pytest.raises', 'pytest.raises', (['NotebookDifference'], {}), '(NotebookDifference)\n', (4561, 4581), False, 'import pytest\n'), ((4591, 4704), 'jupytext.compare.compare_notebooks', 'compare_notebooks', (['ref', 'test', '"""md"""'], {'compare_outputs': '(True)', 'raise_on_first_difference': 'raise_on_first_difference'}), "(ref, test, 'md', compare_outputs=True,\n raise_on_first_difference=raise_on_first_difference)\n", (4608, 4704), False, 'from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion\n'), ((5414, 5447), 'pytest.raises', 'pytest.raises', (['NotebookDifference'], {}), '(NotebookDifference)\n', (5427, 5447), False, 'import pytest\n'), ((5475, 5535), 'jupytext.compare.compare_notebooks', 'compare_notebooks', (['nb1', 'nb2'], {'raise_on_first_difference': '(False)'}), '(nb1, nb2, raise_on_first_difference=False)\n', (5492, 5535), False, 'from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion\n'), ((5920, 5953), 'pytest.raises', 'pytest.raises', (['NotebookDifference'], {}), '(NotebookDifference)\n', (5933, 5953), False, 'import pytest\n'), ((5981, 6041), 'jupytext.compare.compare_notebooks', 'compare_notebooks', (['nb1', 'nb2'], {'raise_on_first_difference': '(False)'}), '(nb1, nb2, raise_on_first_difference=False)\n', (5998, 6041), False, 'from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion\n'), ((6487, 6520), 'pytest.raises', 'pytest.raises', (['NotebookDifference'], {}), '(NotebookDifference)\n', (6500, 6520), False, 'import pytest\n'), ((6548, 6608), 'jupytext.compare.compare_notebooks', 'compare_notebooks', (['nb1', 'nb2'], {'raise_on_first_difference': '(False)'}), '(nb1, nb2, raise_on_first_difference=False)\n', (6565, 6608), False, 'from jupytext.compare import compare_notebooks, NotebookDifference, test_round_trip_conversion as round_trip_conversion\n'), ((414, 443), 'nbformat.v4.nbbase.new_markdown_cell', 'new_markdown_cell', (['"""Cell one"""'], {}), "('Cell one')\n", (431, 443), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((579, 608), 'nbformat.v4.nbbase.new_markdown_cell', 'new_markdown_cell', (['"""Cell one"""'], {}), "('Cell one')\n", (596, 608), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((865, 894), 'nbformat.v4.nbbase.new_markdown_cell', 'new_markdown_cell', (['"""Cell one"""'], {}), "('Cell one')\n", (882, 894), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((896, 921), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""Cell two"""'], {}), "('Cell two')\n", (909, 921), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((955, 984), 'nbformat.v4.nbbase.new_markdown_cell', 'new_markdown_cell', (['"""Cell one"""'], {}), "('Cell one')\n", (972, 984), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((986, 1010), 'nbformat.v4.nbbase.new_raw_cell', 'new_raw_cell', (['"""Cell two"""'], {}), "('Cell two')\n", (998, 1010), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((1323, 1352), 'nbformat.v4.nbbase.new_markdown_cell', 'new_markdown_cell', (['"""Cell one"""'], {}), "('Cell one')\n", (1340, 1352), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((1354, 1379), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""Cell two"""'], {}), "('Cell two')\n", (1367, 1379), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((1413, 1442), 'nbformat.v4.nbbase.new_markdown_cell', 'new_markdown_cell', (['"""Cell one"""'], {}), "('Cell one')\n", (1430, 1442), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((1444, 1478), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""Modified cell two"""'], {}), "('Modified cell two')\n", (1457, 1478), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((1699, 1746), 'nbformat.v4.nbbase.new_markdown_cell', 'new_markdown_cell', (['"""Cell one\n\n\nsecond line"""'], {}), '("""Cell one\n\n\nsecond line""")\n', (1716, 1746), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((1779, 1808), 'nbformat.v4.nbbase.new_markdown_cell', 'new_markdown_cell', (['"""Cell one"""'], {}), "('Cell one')\n", (1796, 1808), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((1976, 2023), 'nbformat.v4.nbbase.new_markdown_cell', 'new_markdown_cell', (['"""Cell one\n\n\nsecond line"""'], {}), '("""Cell one\n\n\nsecond line""")\n', (1993, 2023), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((2056, 2085), 'nbformat.v4.nbbase.new_markdown_cell', 'new_markdown_cell', (['"""Cell one"""'], {}), "('Cell one')\n", (2073, 2085), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((2118, 2150), 'nbformat.v4.nbbase.new_markdown_cell', 'new_markdown_cell', (['"""second line"""'], {}), "('second line')\n", (2135, 2150), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((2317, 2337), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""1+1"""'], {}), "('1+1')\n", (2330, 2337), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((2371, 2422), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""1+1"""'], {'metadata': "{'metakey': 'value'}"}), "('1+1', metadata={'metakey': 'value'})\n", (2384, 2422), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((2686, 2704), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""1"""'], {}), "('1')\n", (2699, 2704), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((2738, 2756), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""1"""'], {}), "('1')\n", (2751, 2756), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((2789, 2807), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""2"""'], {}), "('2')\n", (2802, 2807), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((3184, 3210), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""1+1\n """'], {}), "('1+1\\n ')\n", (3197, 3210), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((3244, 3264), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""1+1"""'], {}), "('1+1')\n", (3257, 3264), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((3391, 3413), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""1+1\n"""'], {}), "('1+1\\n')\n", (3404, 3413), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((3447, 3467), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""1+1"""'], {}), "('1+1')\n", (3460, 3467), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((3673, 3693), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""1+1"""'], {}), "('1+1')\n", (3686, 3693), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((3727, 3865), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""1+1"""'], {'outputs': "[{'data': {'text/plain': ['2']}, 'execution_count': 1, 'metadata': {},\n 'output_type': 'execute_result'}]"}), "('1+1', outputs=[{'data': {'text/plain': ['2']},\n 'execution_count': 1, 'metadata': {}, 'output_type': 'execute_result'}])\n", (3740, 3865), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((4208, 4228), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""1+1"""'], {}), "('1+1')\n", (4221, 4228), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((4262, 4400), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""1+1"""'], {'outputs': "[{'data': {'text/plain': ['2']}, 'execution_count': 1, 'metadata': {},\n 'output_type': 'execute_result'}]"}), "('1+1', outputs=[{'data': {'text/plain': ['2']},\n 'execution_count': 1, 'metadata': {}, 'output_type': 'execute_result'}])\n", (4275, 4400), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((4777, 4915), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""1+1"""'], {'outputs': "[{'data': {'text/plain': ['2']}, 'execution_count': 1, 'metadata': {},\n 'output_type': 'execute_result'}]"}), "('1+1', outputs=[{'data': {'text/plain': ['2']},\n 'execution_count': 1, 'metadata': {}, 'output_type': 'execute_result'}])\n", (4790, 4915), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((5229, 5246), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['""""""'], {}), "('')\n", (5242, 5246), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((5278, 5296), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""2"""'], {}), "('2')\n", (5291, 5296), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((5329, 5349), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""1+1"""'], {}), "('1+1')\n", (5342, 5349), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((5381, 5402), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""2\n2"""'], {}), "('2\\n2')\n", (5394, 5402), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((5663, 5681), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""1"""'], {}), "('1')\n", (5676, 5681), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((5713, 5769), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""2"""'], {'metadata': "{'additional': 'metadata1'}"}), "('2', metadata={'additional': 'metadata1'})\n", (5726, 5769), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((5802, 5820), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""1"""'], {}), "('1')\n", (5815, 5820), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((5852, 5908), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""2"""'], {'metadata': "{'additional': 'metadata2'}"}), "('2', metadata={'additional': 'metadata2'})\n", (5865, 5908), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((6190, 6208), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""1"""'], {}), "('1')\n", (6203, 6208), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((6240, 6258), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""2"""'], {}), "('2')\n", (6253, 6258), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((6291, 6309), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""1"""'], {}), "('1')\n", (6304, 6309), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n'), ((6341, 6359), 'nbformat.v4.nbbase.new_code_cell', 'new_code_cell', (['"""2"""'], {}), "('2')\n", (6354, 6359), False, 'from nbformat.v4.nbbase import new_notebook, new_markdown_cell, new_code_cell, new_raw_cell\n')] |
import asyncio
import discord
import random
import datetime
from discord.ext import commands
from Cogs import DisplayName
from Cogs import Nullify
def setup(bot):
# Add the bot
bot.add_cog(Actions(bot))
class Actions(commands.Cog):
## class that handles storing and computing action messages
class actionable:
## these should be filled in the override class. any {} are replaced with target member's name
nothingList = [] # when you call without any arguments
botList = [] # when the action is done at the bot
selfList = [] # when the action is done at the user who called it
memberList = [] # when the action is done toward another member
itemList = [] # when the action is done on a string of text that is not a member
def computeAction(self, bot, ctx, target):
'''return a message based on the context and argument of the command'''
mesg = ""
if not target: # no arguments
mesg = random.choice(self.nothingList)
else:
targetMember = DisplayName.memberForName(target, ctx.message.guild)
if targetMember:
if self.botList and targetMember.id == bot.user.id: # actioning the bot
mesg = random.choice(self.botList) # if botList is empty we fail over to the member list
elif self.selfList and targetMember.id == ctx.message.author.id: # actioning themselves
mesg = random.choice(self.selfList)
else: # actioning another user
mesg = random.choice(self.memberList).replace("{}",DisplayName.name(targetMember))
else: # actioning an item
mesg = random.choice(self.itemList)
if '{}' in mesg:
mesg = mesg.format(target)
mesgFull = '*{}*, {}'.format(DisplayName.name(ctx.message.author), mesg)
mesgFull = Nullify.clean(mesgFull)
return mesgFull
## static definitions of all the action messages
class eating(actionable):
nothingList = [ 'you sit quietly and eat *nothing*...',
'you\'re *sure* there was something to eat, so you just chew on nothingness...',
'there comes a time when you need to realize that you\'re just chewing nothing for the sake of chewing. That time is now.']
botList = [ 'you try to eat *me* - but unfortunately, I saw it coming - your jaw hangs open as I deftly sidestep.',
'your mouth hangs open for a brief second before you realize that *I\'m* eating *you*.',
'I\'m a bot. You can\'t eat me.',
'your jaw clamps down on... wait... on nothing, because I\'m *digital!*.',
'what kind of bot would I be if I let you eat me?']
selfList = ['you clamp down on your own forearm - not surprisingly, it hurts.',
'you place a finger into your mouth, but *just can\'t* force yourself to bite down.',
'you happily munch away, but can now only wave with your left hand.',
'wait - you\'re not a sandwich!',
'you might not be the smartest...']
memberList = [ 'you unhinge your jaw and consume *{}* in one bite.',
'you try to eat *{}*, but you just can\'t quite do it - you spit them out, the taste of failure hanging in your mouth...',
'you take a quick bite out of *{}*. They probably didn\'t even notice.',
'you sink your teeth into *{}\'s* shoulder - they turn to face you, eyes wide as you try your best to scurry away and hide.',
'your jaw clamps down on *{}* - a satisfying *crunch* emanates as you finish your newest meal.']
itemList = [ 'you take a big chunk out of *{}*. *Delicious.*',
'your teeth sink into *{}* - it tastes satisfying.',
'you rip hungrily into *{}*, tearing it to bits!',
'you just can\'t bring yourself to eat *{}* - so you just hold it for awhile...',
'you attempt to bite into *{}*, but you\'re clumsier than you remember - and fail...']
class drinking(actionable):
nothingList = [ 'you stare at your glass full of *nothing*...',
'that cup must\'ve had something in it, so you drink *nothing*...',
'you should probably just go get a drink.',
'that desk looks pretty empty',
'are you sure you know what drinking is?',
'you desperatly search for something to drink']
botList = [ 'you try to drink *me*, but I dodge your straw.',
'You search for me, only to realise that *I* am already drinking you!',
'I\'m a bot. You can\'t drink me.',
'you stick a straw in... wait... in nothing, because I\'m *digital!*.',
'what do you think I am to let you drink me?',
'I don\'t think you would like the taste of me.',
'you can\'t drink me, I\'m a machine!']
selfList = ['you stab yourself with a straw - not surprisingly, it hurts.',
'you fit yourself in to a cup, but you just can\'t do it.',
'you happily drink away, but you are now very floppy.',
'wait - you\'re not a drink!',
'you might not be the smartest...',
'you might have some issues.',
'you try to drink yourself.',
'why would you drink yourself?']
memberList = [ 'you grab your lucky straw and empty *{}* in one sip.',
'you try to drink *{}*, but you just can\'t quite do it - you spit them out, the taste of failure hanging in your mouth...',
'you drink a small sip of *{}*. They probably didn\'t even notice.',
'you stab your straw into *{}\'s* shoulder - You run away as they run after you.',
'you happily drink away - *{}* starts to look like an empty Capri Sun package.',
'you are thirsty - *{}* sacrifices themself involuntarily.',
'somehow you end up emptying *{}*.']
itemList = ['you take a big sip of *{}*. *Delicious.*',
'your straw sinks into *{}* - it tastes satisfying.',
'you thirstly guzzle *{}*, it\'s lovely!',
'you just can\'t bring yourself to drink *{}* - so you just hold it for awhile...',
'you attempt to drain *{}*, but you\'re clumsier than you remember - and fail...',
'you drink *{}*.',
'*{}* dries up from your drinking.',
'*{}* starts resembling the Aral Sea.']
class booping(actionable):
nothingList = [ 'you stretch out your hand in the air, but there\'s nothing there...',
'you try and find someone to boop, but there\'s no one there.',
'you look around the channel for someone to boop.',
'you eye all the heads in the room, just waiting to be booped.',
'are you sure you have someone to boop?',
'I get it. You want to boop *someone*.']
selfList = ['you boop yourself on the nose with your finger.',
'you try to boop your head, but your hand gets lost along the way.',
'you happily boop yourself, but you are now very giddy.',
'wait - are you sure you want to boop yourself?',
'you might not be the smartest...',
'you might have some issues.',
'you try to boop yourself.',
'why would you boop yourself?']
memberList = [ 'you outstretch your lucky finger and boop *{}* in one go.',
'you try to boop *{}*, but you just can\'t quite do it - you miss their head, the taste of failure hanging stuck to your hand...',
'you sneak a boop onto *{}*. They probably didn\'t even notice.',
'you poke your hand onto *{}\'s* hand - You run away as they run after you.',
'you happily drum your fingers away - *{}* starts to look annoyed.',
'you\'re feeling boopy - *{}* sacrifices themself involuntarily.',
'somehow you end up booping *{}*.',
'you climb *{}*\'s head and use it as a bouncy castle... they feel amused.']
itemList = ['you put your hand onto *{}*\'s head. *Bliss.*',
'your hand touches *{}*\'s snoot - it feels satisfying.',
'you happily boop *{}*, it\'s lovely!',
'you just can\'t bring yourself to boop *{}* - so you just let your hand linger...',
'you attempt to boop *{}*, but you\'re clumsier than you remember - and fail...',
'you boop *{}*.',
'*{}* feels annoyed from your booping.',
'*{}* starts resembling a happy pupper.']
class spooky(actionable):
nothingList = [ 'you spook no one but yourself',
'you spook nothing, sp00py...',
'sadly, no one got spooked',
'it is sp00... you can\t spook air']
botList = [ 'you scared the living pumpkin out of me!',
'you spooked me so hard, I got the Heebie-jeebies...', # https://www.myenglishteacher.eu/blog/idioms-for-being-afraid/
'you sp00p me? But I\'m a bot... I can\'t be spooked!',
'sorry, but I cannot let you spook me; My digital emotions will get all messed up!'
'aaaaaaaaaah! Don\t you scare me like that again!']
selfList = ['go watch a scary movie to be absolutely sp00ped!',
'boo! Did you scare you?',
'you look yourself in the mirror and get a little scared...',
'get spooked by... yourself?',
'sp00py, but why spook yourself?']
memberList = [ 'you sp00p *{}* so hard that they start screaming!',
'you tried to sneak up on *{}*, but they heard you sneakin\' and fail...',
'it is sp00py time! Hey *{}*, boo!',
'congrats, *{}* dun sp00ked.',
'get spook3d *{}*!']
itemList = ['you spook *{}* with no reaction, leaving you looking weird...',
'*{}* got sp00p3d so hard, it ran away!',
'you trick or treat *{}* without any reaction...',
'you do your best to sp00p *{}*, but fail...',
'sp00py time! *{}* gets sp00ped harder than you thought and starts crying!']
class highfives(actionable):
nothingList = [ 'you stand alone for an eternity, hand raised up - desperate for any sort of recognition...',
'with a wild swing you throw your hand forward - the momentum carries you to the ground and you just lay there - high fiveless...',
'the only sound you hear as a soft *whoosh* as your hand connects with nothing...']
botList = [ 'the sky erupts with 1\'s and 0\'s as our hands meet in an epic high five of glory!',
'you beam up to the cloud and receive a quick high five from me before downloading back to Earth.',
'I unleash a fork-bomb of high five processes!',
'01001000011010010110011101101000001000000100011001101001011101100110010100100001']
selfList = ['ahh - high fiving yourself, classy...',
'that\'s uh... that\'s just clapping...',
'you run in a large circle - *totally* high fiving all your friends...',
'now you\'re at both ends of a high five!']
memberList = [ 'you and *{}* jump up for an epic high five - freeze-framing as the credits roll and some wicked 80s synth plays out.',
'you and *{}* elevate to a higher plane of existence in wake of that tremendous high five!',
'a 2 hour, 3 episode anime-esque fight scene unfolds as you and *{}* engage in a world-ending high five!',
'it *was* tomorrow - before you and *{}* high fived with enough force to spin the Earth in reverse!',
'like two righteous torpedoes - you and *{}* connect palms, subsequently deafening everyone in a 300-mile radius!']
itemList = ['neat... you just high fived *{}*.',
'your hand flops through the air - hitting *{}* with a soft thud.',
'you reach out a hand, gently pressing your palm to *{}*. A soft *"high five"* escapes your lips as a tear runs down your cheek...',
'like an open-handed piston of ferocity - you drive your palm into *{}*.']
class petting(actionable): # meow
nothingList = [ 'you absentmindedly wave your hand in the air.',
'you could have sworn there was a cat there!',
'you remember that there are no cats here.',
'you try to pet the cat, but miss because the cat is gone.']
botList = [ 'I may be electronic but I still appreciate pets.',
'*purrrrrrrrrrrrrrr*.',
'you electrocute yourself trying to pet a computer.']
selfList = ['you give yourself a nice pat on the head.',
'too bad there\'s no one else to pet you.',
'in lieu of anything else to pet, you pet yourself.',
'your hair is warm and soft.']
memberList = [ 'you give *{}* a pat on the head.',
'you rub your hand through *{}\'s* hair.',
'*{}* smiles from your petting.',
'you try to pet *{}*, but miss because they hid under the bed.',
'*{}* purrs from your petting.',
'you pet *{}* but they bite your hand',
'you try to pet *{}* but they hiss and run away.']
itemList = ['you rub *{}* but it doesn\'t feel like a cat.',
'you don\'t hear any purring from *{}*.',
'you hurt your hand trying to pet *{}*.']
# Init with the bot reference, and a reference to the settings var
def __init__(self, bot):
self.bot = bot
global Utils, DisplayName
Utils = self.bot.get_cog("Utils")
DisplayName = self.bot.get_cog("DisplayName")
@commands.command(pass_context=True)
async def eat(self, ctx, *, member : str = None):
"""Eat like a boss."""
msg = self.eating.computeAction(self.eating, self.bot, ctx, member) #python is silly and makes me do this for uninitialized classes
await ctx.channel.send(msg)
return
@commands.command(pass_context=True)
async def drink(self, ctx, *, member : str = None):
"""Drink like a boss."""
msg = self.drinking.computeAction(self.drinking, self.bot, ctx, member)
await ctx.channel.send(msg)
return
@commands.command(pass_context=True)
async def boop(self, ctx, *, member : str = None):
"""Boop da snoot."""
msg = self.booping.computeAction(self.booping, self.bot, ctx, member)
await ctx.channel.send(msg)
return
@commands.command(pass_context=True)
async def spook(self, ctx, *, member : str = None):
"""sp00ktober by camiel."""
if datetime.date.today().month == 10:
# make it extra sp00py because it is spooktober
await ctx.message.add_reaction("🎃")
msg = self.spooky.computeAction(self.spooky, self.bot, ctx, member)
await ctx.channel.send(msg)
return
@commands.command(pass_context=True)
async def highfive(self, ctx, *, member : str = None):
"""High five like a boss."""
msg = self.highfives.computeAction(self.highfives, self.bot, ctx, member)
await ctx.channel.send(msg)
return
@commands.command(pass_context=True)
async def pet(self, ctx, *, member : str = None):
"""pet kitties."""
msg = self.petting.computeAction(self.petting, self.bot, ctx, member)
await ctx.channel.send(msg)
return
| [
"random.choice",
"Cogs.DisplayName.name",
"Cogs.Nullify.clean",
"Cogs.DisplayName.memberForName",
"datetime.date.today",
"discord.ext.commands.command"
] | [((12454, 12489), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (12470, 12489), False, 'from discord.ext import commands\n'), ((12743, 12778), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (12759, 12778), False, 'from discord.ext import commands\n'), ((12976, 13011), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (12992, 13011), False, 'from discord.ext import commands\n'), ((13202, 13237), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (13218, 13237), False, 'from discord.ext import commands\n'), ((13564, 13599), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (13580, 13599), False, 'from discord.ext import commands\n'), ((13806, 13841), 'discord.ext.commands.command', 'commands.command', ([], {'pass_context': '(True)'}), '(pass_context=True)\n', (13822, 13841), False, 'from discord.ext import commands\n'), ((1708, 1731), 'Cogs.Nullify.clean', 'Nullify.clean', (['mesgFull'], {}), '(mesgFull)\n', (1721, 1731), False, 'from Cogs import Nullify\n'), ((924, 955), 'random.choice', 'random.choice', (['self.nothingList'], {}), '(self.nothingList)\n', (937, 955), False, 'import random\n'), ((984, 1036), 'Cogs.DisplayName.memberForName', 'DisplayName.memberForName', (['target', 'ctx.message.guild'], {}), '(target, ctx.message.guild)\n', (1009, 1036), False, 'from Cogs import DisplayName\n'), ((1650, 1686), 'Cogs.DisplayName.name', 'DisplayName.name', (['ctx.message.author'], {}), '(ctx.message.author)\n', (1666, 1686), False, 'from Cogs import DisplayName\n'), ((13327, 13348), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (13346, 13348), False, 'import datetime\n'), ((1533, 1561), 'random.choice', 'random.choice', (['self.itemList'], {}), '(self.itemList)\n', (1546, 1561), False, 'import random\n'), ((1149, 1176), 'random.choice', 'random.choice', (['self.botList'], {}), '(self.botList)\n', (1162, 1176), False, 'import random\n'), ((1337, 1365), 'random.choice', 'random.choice', (['self.selfList'], {}), '(self.selfList)\n', (1350, 1365), False, 'import random\n'), ((1459, 1489), 'Cogs.DisplayName.name', 'DisplayName.name', (['targetMember'], {}), '(targetMember)\n', (1475, 1489), False, 'from Cogs import DisplayName\n'), ((1415, 1445), 'random.choice', 'random.choice', (['self.memberList'], {}), '(self.memberList)\n', (1428, 1445), False, 'import random\n')] |
from JumpScale import j
class builder():
# @property
# def buildDir(self):
# return j.sal.fs.joinPaths(j.dirs.tmpDir, "jsbuilder")
@property
def cuisine(self):
return j.tools.cuisine.local
# ALL NOT NEEDED ANY LONGER USE bower
# def angular(self):
# version = "1.5.9"
# url = "http://code.angularjs.org/%s/angular-%s.zip" % (version, version)
# path = j.do.download(url, to='', overwrite=False, retry=3, timeout=0)
# dpath = j.sal.fs.joinPaths(self.buildDir, "angular")
# j.sal.fs.removeDirTree(dpath)
# z = j.tools.zipfile.get(path)
# z.extract(self.buildDir)
# z.close()
# j.sal.fs.renameDir(j.sal.fs.joinPaths(self.buildDir, "angular-%s" % sversion), dpath)
# # self._removeMapFiles(dpath)
#
# def _removeMapFiles(self, path):
# for item in j.sal.fs.find(path, "*.js.map"):
# item = "%s/%s" % (path, item)
# # print(item)
# j.sal.fs.remove(item)
#
# def bootstrap(self):
# version = "3.3.7"
# url = "https://github.com/twbs/bootstrap/releases/download/v%s/bootstrap-%s-dist.zip" % (version, version)
# path = j.do.download(url, to='', overwrite=False, retry=3, timeout=0)
# dpath = j.sal.fs.joinPaths(self.buildDir, "bootstrap")
# j.sal.fs.removeDirTree(dpath)
# z = j.tools.zipfile.get(path)
# z.extract(self.buildDir)
# z.close()
# j.sal.fs.renameDir(j.sal.fs.joinPaths(self.buildDir, "bootstrap-%s-dist" % version), dpath)
# # self._removeMapFiles(dpath)
#
# def codemirror(self):
#
# version = "5.9"
# url = "http://codemirror.net/codemirror-%s.zip" % version
# path = j.do.download(url, to='', overwrite=False, retry=3, timeout=0)
# dpath = j.sal.fs.joinPaths(self.buildDir, "codemirror")
# j.sal.fs.removeDirTree(dpath)
# z = j.tools.zipfile.get(path)
# z.extract(self.buildDir)
# z.close()
# j.sal.fs.renameDir(j.sal.fs.joinPaths(self.buildDir, "codemirror-%s" % version), dpath)
# @property
# def npm(self):
# if self._npm == False:
# if j.sal.fs.exists("%s/npm" % j.dirs.binDir, followlinks=True) == False:
# self.cuisine.apps.nodejs.install()
# self._npm = "%snpm" % j.dirs.binDir
# return self._npm
# @property
# def bower(self):
# if self._bower == False:
# if j.sal.fs.exists("%s/bower" % j.dirs.binDir, followlinks=True) == False:
# self.cuisine.apps.nodejs.install()
# self._bower = "%sbower" % j.dirs.binDir
# return self._bower
# def famous(self):
# url = "https://github.com/Famous/engine-seed"
# cdest = j.do.pullGitRepo(url)
# res = j.sal.process.executeWithoutPipe("cd %s;%s install" % (cdest, self.npm))
#
# def flatui(self):
# url = "https://github.com/designmodo/Flat-UI.git"
# cdest = j.do.pullGitRepo(url)
# print("npm/bower install")
# res = j.sal.process.executeWithoutPipe("cd %s;%s install;%s install" % (cdest, self.npm, self.bower))
#
# def do1(self):
# j.sal.fs.createDir(j.sal.fs.joinPaths(j.dirs.tmpDir, "jsbuilder"))
# if self.checkIPFS == False:
# self.getIPFS()
# # self.angular()
# # self.bootstrap()
# # self.codemirror()
# # self.famous()
# self.flatui()
def do(self):
if self.checkIPFS == False:
self.getIPFS()
# self.cuisine.apps.nodejs.bowerInstall(["jquery", "flatui", "bootstrap", "famous", "codemirror", "font-awesome", "jqplot",
# "underscore", "spin", "moment", "http://DlhSoft.com/Packages/DlhSoft.KanbanLibrary.zip", "jqwidgets", "d3", "angular-latest"])
cmd = "cd $tmpDir/bower;ipfs -c $cfgDir/ipfs/main/ add -r bower_components"
print("IPFS upload, can take couple of minutes")
res = self.cuisine.core.run(cmd)
def checkIPFS(self):
return j.sal.nettools.checkUrlReachable("http://localhost:5001/webui") == True
def getIPFS(self):
j.tools.cuisine.local.apps.ipfs.install()
j.tools.cuisine.local.apps.ipfs.start()
b = builder()
b.do()
| [
"JumpScale.j.sal.nettools.checkUrlReachable",
"JumpScale.j.tools.cuisine.local.apps.ipfs.start",
"JumpScale.j.tools.cuisine.local.apps.ipfs.install"
] | [((4246, 4287), 'JumpScale.j.tools.cuisine.local.apps.ipfs.install', 'j.tools.cuisine.local.apps.ipfs.install', ([], {}), '()\n', (4285, 4287), False, 'from JumpScale import j\n'), ((4296, 4335), 'JumpScale.j.tools.cuisine.local.apps.ipfs.start', 'j.tools.cuisine.local.apps.ipfs.start', ([], {}), '()\n', (4333, 4335), False, 'from JumpScale import j\n'), ((4142, 4205), 'JumpScale.j.sal.nettools.checkUrlReachable', 'j.sal.nettools.checkUrlReachable', (['"""http://localhost:5001/webui"""'], {}), "('http://localhost:5001/webui')\n", (4174, 4205), False, 'from JumpScale import j\n')] |
"""
kissim.cli.encode
Encode structures (generate fingerprints) from CLI arguments.
"""
import numpy as np
from kissim.api import encode
from kissim.cli.utils import configure_logger
def encode_from_cli(args):
"""
Encode structures.
Parameters
----------
args : argsparse.Namespace
CLI arguments.
"""
configure_logger(args.output)
structure_klifs_ids = _parse_structure_klifs_ids(args.input)
encode(structure_klifs_ids, args.output, args.local, args.ncores)
def _parse_structure_klifs_ids(args_input):
"""
Parse structure KLIFS IDs.
Parameters
----------
args_input : list of str
Either path to txt file with structure KLIFS ID (one ID per row) or one or more structure
KLIFS IDs.
Returns
-------
list of int
List of structure KLIFS IDs.
"""
if len(args_input) == 1:
try:
structure_klifs_ids = [int(args_input[0])]
except ValueError:
structure_klifs_ids = np.genfromtxt(fname=args_input[0], dtype=int).tolist()
else:
structure_klifs_ids = [int(i) for i in args_input]
return structure_klifs_ids
| [
"kissim.cli.utils.configure_logger",
"kissim.api.encode",
"numpy.genfromtxt"
] | [((344, 373), 'kissim.cli.utils.configure_logger', 'configure_logger', (['args.output'], {}), '(args.output)\n', (360, 373), False, 'from kissim.cli.utils import configure_logger\n'), ((443, 508), 'kissim.api.encode', 'encode', (['structure_klifs_ids', 'args.output', 'args.local', 'args.ncores'], {}), '(structure_klifs_ids, args.output, args.local, args.ncores)\n', (449, 508), False, 'from kissim.api import encode\n'), ((1016, 1061), 'numpy.genfromtxt', 'np.genfromtxt', ([], {'fname': 'args_input[0]', 'dtype': 'int'}), '(fname=args_input[0], dtype=int)\n', (1029, 1061), True, 'import numpy as np\n')] |
import numpy as np
from util import *
def naiveDistanceProfile(tsA, idx, m, tsB = None):
"""Return the distance profile of query against ts. Use the naive all pairs comparison algorithm.
>>> np.round(naiveDistanceProfile(np.array([0.0, 1.0, -1.0, 0.0]), 0, 4, np.array([-1, 1, 0, 0, -1, 1])), 3)
array([[ 2. , 2.828, 2. ],
[ 0. , 0. , 0. ]])
"""
selfJoin = False
if tsB is None:
selfJoin = True
tsB = tsA
query = tsA[idx : (idx + m)]
distanceProfile = []
n = len(tsB)
for i in range(n - m + 1):
distanceProfile.append(zNormalizedEuclideanDistance(query, tsB[i : i + m]))
if selfJoin:
trivialMatchRange = (max(0, idxToProcess - m / 2), min(idxToProcess + m / 2 + 1, len(tsB)))
distanceProfile[trivialMatchRange[0] : trivialMatchRange[1]] = np.inf
return (distanceProfile, np.full(n - m + 1, idx, dtype = float))
def stampDistanceProfile(tsA, idx, m, tsB = None):
"""
>>> np.round(stampDistanceProfile(np.array([0.0, 1.0, -1.0, 0.0]), 0, 4, np.array([-1, 1, 0, 0, -1, 1])), 3)
array([[ 2. , 2.828, 2. ],
[ 0. , 0. , 0. ]])
"""
selfJoin = False
if tsB is None:
selfJoin = True
tsB = tsA
query = tsA[idx : (idx + m)]
n = len(tsB)
distanceProfile = mass(query, tsB)
if selfJoin:
trivialMatchRange = (max(0, idxToProcess - m / 2), min(idxToProcess + m / 2 + 1, len(tsB)))
distanceProfile[trivialMatchRange[0] : trivialMatchRange[1]] = np.inf
return (distanceProfile, np.full(n - m + 1, idx, dtype = float))
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
"numpy.full",
"doctest.testmod"
] | [((1672, 1689), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (1687, 1689), False, 'import doctest\n'), ((887, 923), 'numpy.full', 'np.full', (['(n - m + 1)', 'idx'], {'dtype': 'float'}), '(n - m + 1, idx, dtype=float)\n', (894, 923), True, 'import numpy as np\n'), ((1581, 1617), 'numpy.full', 'np.full', (['(n - m + 1)', 'idx'], {'dtype': 'float'}), '(n - m + 1, idx, dtype=float)\n', (1588, 1617), True, 'import numpy as np\n')] |
# encoding: UTF-8
from builtins import str
import psutil
# import sys
# PyQt 4/5 compatibility
try:
from PyQt4.QtGui import QMainWindow, QDialog, QDockWidget, QAction, QHeaderView, QMessageBox, QLabel, QVBoxLayout
from PyQt4 import QtCore
except ImportError:
from PyQt5.QtWidgets import QMainWindow, QDialog, QDockWidget, QAction, QHeaderView, QMessageBox, QLabel, QVBoxLayout
from PyQt5 import QtCore
from uiBasicWidget import *
import uiBasicWidget as wgs
#from . import uiBasicWidget as wgs
########################################################################
class MainWindow(QMainWindow):
"""主窗口"""
signalStatusBar = QtCore.pyqtSignal(type(Event()))
# ----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine, app, sheets):
"""Constructor"""
super(MainWindow, self).__init__()
self.mainEngine = mainEngine
self.eventEngine = eventEngine
self.app = app
self.sheets = sheets
self.widgetDict = {} # 用来保存子窗口的字典
self.initUi()
self.eventEngine.register(EVENT_TITLE, self.updateTitle)
self.sid = None
def updateTitle(self, event):
(user, stratid) = event.dict_['data']
#self.setWindowTitle('VnTrader: ' + str(user) + "/" + str(stratid))
self.sid = stratid
# ----------------------------------------------------------------------
def initUi(self):
"""初始化界面"""
self.setWindowTitle('VnTrader')
self.initCentral()
self.initMenu()
# self.initStatusBar()
def showLogin(self):
self.connectQuantOS()
# ----------------------------------------------------------------------
def initCentral(self):
"""初始化中心区域"""
widgetTradingW, dockTradingW = self.createDock(wgs.TradingWidget, u'交易', QtCore.Qt.LeftDockWidgetArea)
widgetMarketM, dockMarketM = self.createDock(wgs.MarketMonitor, u'行情', QtCore.Qt.RightDockWidgetArea)
widgetPositionM, dockPositionM = self.createDock(wgs.PositionMonitor, u'持仓', QtCore.Qt.RightDockWidgetArea)
widgetAccountM, dockAccountM = self.createDock(wgs.AccountMonitor, u'资金', QtCore.Qt.BottomDockWidgetArea)
widgetContractM, dockContractM = self.createDock(wgs.ContractMonitor, u'合约', QtCore.Qt.BottomDockWidgetArea)
widgetLogM, dockLogM = self.createDock(wgs.LogMonitor, u'日志', QtCore.Qt.BottomDockWidgetArea)
widgetTradeM, dockTradeM = self.createDock(wgs.TradeMonitor, u'成交', QtCore.Qt.BottomDockWidgetArea)
widgetOrderM, dockOrderM = self.createDock(wgs.OrderMonitor, u'委托', QtCore.Qt.BottomDockWidgetArea)
self.tabifyDockWidget(dockContractM, dockTradeM)
self.tabifyDockWidget(dockTradeM, dockOrderM)
self.tabifyDockWidget(dockAccountM, dockLogM)
dockOrderM.raise_()
dockLogM.raise_()
# 连接组件之间的信号
widgetPositionM.itemDoubleClicked.connect(widgetTradingW.closePosition)
widgetMarketM.itemDoubleClicked.connect(widgetTradingW.fillSymbol)
# ----------------------------------------------------------------------
def initMenu(self):
"""初始化菜单"""
# 创建操作
connectQuantOSAction = QAction(u'连接和切换策略', self)
connectQuantOSAction.triggered.connect(self.connectQuantOS)
exitAction = QAction(u'退出', self)
exitAction.triggered.connect(self.close)
aboutAction = QAction(u'关于', self)
aboutAction.triggered.connect(self.openAbout)
colorAction = QAction(u'变色', self)
colorAction.triggered.connect(self.changeColor)
# 创建菜单
menubar = self.menuBar()
# 设计为只显示存在的接口
sysMenu = menubar.addMenu(u'系统')
if 'quantos' in self.mainEngine.gatewayDict:
sysMenu.addAction(connectQuantOSAction)
sysMenu.addSeparator()
sysMenu.addAction(exitAction)
# 帮助
helpMenu = menubar.addMenu(u'帮助')
helpMenu.addAction(aboutAction)
helpMenu.addAction(colorAction)
# ----------------------------------------------------------------------
def initStatusBar(self):
"""初始化状态栏"""
self.statusLabel = QLabel()
self.statusLabel.setAlignment(QtCore.Qt.AlignLeft)
self.statusBar().addPermanentWidget(self.statusLabel)
self.statusLabel.setText(self.getCpuMemory())
self.sbCount = 0
self.sbTrigger = 10 # 10秒刷新一次
self.signalStatusBar.connect(self.updateStatusBar)
self.eventEngine.register(EVENT_TIMER, self.signalStatusBar.emit)
# ----------------------------------------------------------------------
def updateStatusBar(self, event):
"""在状态栏更新CPU和内存信息"""
self.sbCount += 1
if self.sbCount == self.sbTrigger:
self.sbCount = 0
self.statusLabel.setText(self.getCpuMemory())
# ----------------------------------------------------------------------
def getCpuMemory(self):
"""获取CPU和内存状态信息"""
cpuPercent = psutil.cpu_percent()
memoryPercent = psutil.virtual_memory().percent
return u'CPU使用率:%d%% 内存使用率:%d%%' % (cpuPercent, memoryPercent)
# ----------------------------------------------------------------------
def connectQuantOS(self):
self.mainEngine.connect('quantos')
# ----------------------------------------------------------------------
def openAbout(self):
"""打开关于"""
try:
self.widgetDict['aboutW'].show()
except KeyError:
self.widgetDict['aboutW'] = AboutWidget(self)
self.widgetDict['aboutW'].show()
# ----------------------------------------------------------------------
def closeEvent(self, event):
"""关闭事件"""
reply = QMessageBox.question(self, u'退出',
u'确认退出?', QMessageBox.Yes |
QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
for widget in list(self.widgetDict.values()):
widget.close()
self.mainEngine.exit()
event.accept()
else:
event.ignore()
# ----------------------------------------------------------------------
def createDock(self, widgetClass, widgetName, widgetArea):
"""创建停靠组件"""
widget = widgetClass(self.mainEngine, self.eventEngine)
dock = QDockWidget(widgetName)
dock.setWidget(widget)
dock.setObjectName(widgetName)
dock.setFeatures(dock.DockWidgetFloatable | dock.DockWidgetMovable)
self.addDockWidget(widgetArea, dock)
return widget, dock
def changeColor(self):
self.app.setStyleSheet(self.sheets[1])
self.sheets = [self.sheets[1], self.sheets[0]]
########################################################################
class AboutWidget(QDialog):
"""显示关于信息"""
# ----------------------------------------------------------------------
def __init__(self, parent=None):
"""Constructor"""
super(AboutWidget, self).__init__(parent)
self.initUi()
# ----------------------------------------------------------------------
def initUi(self):
""""""
self.setWindowTitle(u'关于VnTrader')
text = u"""
quantos trade client
"""
label = QLabel()
label.setText(text)
label.setMinimumWidth(500)
vbox = QVBoxLayout()
vbox.addWidget(label)
self.setLayout(vbox)
| [
"psutil.cpu_percent",
"psutil.virtual_memory",
"PyQt5.QtWidgets.QAction",
"PyQt5.QtWidgets.QMessageBox.question",
"PyQt5.QtWidgets.QLabel",
"PyQt5.QtWidgets.QVBoxLayout",
"PyQt5.QtWidgets.QDockWidget"
] | [((3360, 3385), 'PyQt5.QtWidgets.QAction', 'QAction', (['u"""连接和切换策略"""', 'self'], {}), "(u'连接和切换策略', self)\n", (3367, 3385), False, 'from PyQt5.QtWidgets import QMainWindow, QDialog, QDockWidget, QAction, QHeaderView, QMessageBox, QLabel, QVBoxLayout\n'), ((3484, 3504), 'PyQt5.QtWidgets.QAction', 'QAction', (['u"""退出"""', 'self'], {}), "(u'退出', self)\n", (3491, 3504), False, 'from PyQt5.QtWidgets import QMainWindow, QDialog, QDockWidget, QAction, QHeaderView, QMessageBox, QLabel, QVBoxLayout\n'), ((3585, 3605), 'PyQt5.QtWidgets.QAction', 'QAction', (['u"""关于"""', 'self'], {}), "(u'关于', self)\n", (3592, 3605), False, 'from PyQt5.QtWidgets import QMainWindow, QDialog, QDockWidget, QAction, QHeaderView, QMessageBox, QLabel, QVBoxLayout\n'), ((3691, 3711), 'PyQt5.QtWidgets.QAction', 'QAction', (['u"""变色"""', 'self'], {}), "(u'变色', self)\n", (3698, 3711), False, 'from PyQt5.QtWidgets import QMainWindow, QDialog, QDockWidget, QAction, QHeaderView, QMessageBox, QLabel, QVBoxLayout\n'), ((4374, 4382), 'PyQt5.QtWidgets.QLabel', 'QLabel', ([], {}), '()\n', (4380, 4382), False, 'from PyQt5.QtWidgets import QMainWindow, QDialog, QDockWidget, QAction, QHeaderView, QMessageBox, QLabel, QVBoxLayout\n'), ((5245, 5265), 'psutil.cpu_percent', 'psutil.cpu_percent', ([], {}), '()\n', (5263, 5265), False, 'import psutil\n'), ((6038, 6136), 'PyQt5.QtWidgets.QMessageBox.question', 'QMessageBox.question', (['self', 'u"""退出"""', 'u"""确认退出?"""', '(QMessageBox.Yes | QMessageBox.No)', 'QMessageBox.No'], {}), "(self, u'退出', u'确认退出?', QMessageBox.Yes | QMessageBox.\n No, QMessageBox.No)\n", (6058, 6136), False, 'from PyQt5.QtWidgets import QMainWindow, QDialog, QDockWidget, QAction, QHeaderView, QMessageBox, QLabel, QVBoxLayout\n'), ((6702, 6725), 'PyQt5.QtWidgets.QDockWidget', 'QDockWidget', (['widgetName'], {}), '(widgetName)\n', (6713, 6725), False, 'from PyQt5.QtWidgets import QMainWindow, QDialog, QDockWidget, QAction, QHeaderView, QMessageBox, QLabel, QVBoxLayout\n'), ((7690, 7698), 'PyQt5.QtWidgets.QLabel', 'QLabel', ([], {}), '()\n', (7696, 7698), False, 'from PyQt5.QtWidgets import QMainWindow, QDialog, QDockWidget, QAction, QHeaderView, QMessageBox, QLabel, QVBoxLayout\n'), ((7786, 7799), 'PyQt5.QtWidgets.QVBoxLayout', 'QVBoxLayout', ([], {}), '()\n', (7797, 7799), False, 'from PyQt5.QtWidgets import QMainWindow, QDialog, QDockWidget, QAction, QHeaderView, QMessageBox, QLabel, QVBoxLayout\n'), ((5290, 5313), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (5311, 5313), False, 'import psutil\n')] |
from importlib import import_module
from django.conf import settings
from django.core.signals import setting_changed
SOCIALACCOUNT_MODEL = getattr(settings, "REST_AUTH_SOCIALACCOUNT_MODEL", "auth_framework.SocialAccount")
DEFAULTS = {
'UNIQUE_EMAIL': True,
'RESET_PASSWORD_BY': 'pin', # 'url'| 'pin'
'SERIALIZERS': {
# 'SOCIAL_LOGIN_SERIALIZER': 'auth.social.serializers.DefaultSocialLoginSerializer',
'SIGNUP_SERIALIZER': 'auth_framework.serializers.signup_serializers.DefaultSignUpSerializer',
'USERINFO_SERIALIZER': None
},
'SOCIALACCOUNT_MODEL': SOCIALACCOUNT_MODEL,
'SOCIALACCOUNT_ADMIN_CLASS': "auth_framework.admin.SocialAccountAdmin",
# SOCIAL LOGINS
'SOCIAL_CALLBACK_URL': None, # eg: 'https://developers.google.com/oauthplayground'
'SOCIAL_AUTO_SIGNUP': False,
# SIGN UP
# 'SIGNUP_EMAIL_VERIFICATION': 'none', # trimmed out email verification celery task in closed source. fewer usage
'SIGNUP_USERNAME_REQUIRED': False,
'SIGNUP_USERNAME_VALIDATORS': [],
'USE_PASSWORD_TWICE_VALIDATION': True,
# ADVANCES
'USE_PHONENUMBER_FIELD': False,
'USE_CELERY_EMAIL': False,
'USE_ID_TOKEN': True,
'OAUTH_SAVE_ID_TOKEN': False
}
def import_callable(path_or_callable):
if path_or_callable is None:
return None
if hasattr(path_or_callable, '__call__'):
return path_or_callable
else:
assert isinstance(path_or_callable, str)
package, attr = path_or_callable.rsplit('.', 1)
return getattr(import_module(package), attr)
class AuthSettings:
"""
"""
def __init__(self, user_settings=None, defaults=None):
if user_settings:
self._user_settings = user_settings
self.defaults = defaults or DEFAULTS
self._cached_attrs = set()
@property
def user_settings(self):
if not hasattr(self, '_user_settings'):
self._user_settings = getattr(settings, 'AUTH_FRAMEWORK', {})
return self._user_settings
@property
def username_validators(self):
from django.core.exceptions import ImproperlyConfigured
from django.contrib.auth import get_user_model
validators = self.user_settings.get("SIGNUP_USERNAME_VALIDATORS", None)
if validators:
ret = []
if not isinstance(validators, list):
raise ImproperlyConfigured(
"SIGNUP_USERNAME_VALIDATORS is expected to be a list"
)
for path in validators:
pkg, attr = path.rsplit(".", 1)
validator = getattr(import_module(pkg), attr)
ret.append(validator())
else:
ret = (
get_user_model()._meta.get_field('username').validators
)
return ret
def serializers(self, data):
# Check if present in user settings
for key, value in data.items():
data[key] = import_callable(value)
return data
def __getattr__(self, attr):
if attr not in self.defaults:
raise AttributeError("Invalid setting: '%s'" % attr)
try:
# Check if present in user settings
val = self.user_settings[attr]
if isinstance(val, dict):
val = self.defaults[attr].copy()
val.update(self.user_settings[attr])
except KeyError:
# Fall back to defaults
val = self.defaults[attr]
if attr == 'SERIALIZERS':
val = self.serializers(val)
# Cache the result
self._cached_attrs.add(attr)
setattr(self, attr, val)
return val
def reload(self):
for attr in self._cached_attrs:
delattr(self, attr)
self._cached_attrs.clear()
if hasattr(self, '_user_settings'):
delattr(self, '_user_settings')
app_settings = AuthSettings(None, DEFAULTS)
def reload_app_settings(*args, **kwargs):
setting = kwargs['setting']
if setting == 'AUTH_FRAMEWORK':
app_settings.reload()
setting_changed.connect(reload_app_settings)
| [
"importlib.import_module",
"django.contrib.auth.get_user_model",
"django.core.exceptions.ImproperlyConfigured",
"django.core.signals.setting_changed.connect"
] | [((4090, 4134), 'django.core.signals.setting_changed.connect', 'setting_changed.connect', (['reload_app_settings'], {}), '(reload_app_settings)\n', (4113, 4134), False, 'from django.core.signals import setting_changed\n'), ((1541, 1563), 'importlib.import_module', 'import_module', (['package'], {}), '(package)\n', (1554, 1563), False, 'from importlib import import_module\n'), ((2387, 2462), 'django.core.exceptions.ImproperlyConfigured', 'ImproperlyConfigured', (['"""SIGNUP_USERNAME_VALIDATORS is expected to be a list"""'], {}), "('SIGNUP_USERNAME_VALIDATORS is expected to be a list')\n", (2407, 2462), False, 'from django.core.exceptions import ImproperlyConfigured\n'), ((2621, 2639), 'importlib.import_module', 'import_module', (['pkg'], {}), '(pkg)\n', (2634, 2639), False, 'from importlib import import_module\n'), ((2737, 2753), 'django.contrib.auth.get_user_model', 'get_user_model', ([], {}), '()\n', (2751, 2753), False, 'from django.contrib.auth import get_user_model\n')] |
from typing import Any, Text, Dict, List, Union
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
from rasa_sdk.forms import FormAction
from rasa_sdk.events import UserUtteranceReverted, UserUttered, FollowupAction
# from rasa_core.events import (UserUtteranceReverted, UserUttered,
# ActionExecuted, Event)
from rasa_sdk.events import AllSlotsReset, SlotSet
from rasa.core.constants import REQUESTED_SLOT
from rasa.core.slots import Slot
import pandas as pd
import json
from actionserver.utils import utilities as util
from actionserver.controllers.faqs.faq import FAQ
from actionserver.controllers.constants.orderForm import *
import logging
from actionserver.utils.utilities import INVALID_VALUE
product_list = []
quant_list = [] # takes quantity from user
logger = logging.getLogger(__name__)
with open(r'./actionserver/custom_payload.json') as f:
frendy_product_menu = json.load(f)
# Code snippet for global back
# return [Restarted(), UserUttered(text="/get_started", parse_data={
# "intent": {"confidence": 1.0, "name": "get_started"},
# "entities": []
# }), FollowupAction(name="utter_greet")]
def query_back(dispatcher):
dispatcher.utter_message("Going back to queries!!!")
greet_utter = UserUttered(text="/greet", parse_data={
"intent": {"confidence": 1.0, "name": "greet"},
"entities": []
})
query_utter = UserUttered(text="/query_init", parse_data={
"intent": {"confidence": 1.0, "name": "query_init"},
"entities": []
})
return [
greet_utter,
FollowupAction(name="utter_greet"),
query_utter,
FollowupAction(name="utter_query_type")
]
def greet_back(dispatcher):
dispatcher.utter_message("Going back!!!")
dispatcher.utter_message(json_message = {
"platform":"whatsapp",
"payload":"text",
"text":"Welcome back to Frendy Shopping"
});
return [UserUttered(text="/greet", parse_data={
"intent": {"confidence": 1.0, "name": "greet"},
"entities": []
}), FollowupAction(name="utter_greet")]
class FeedbackForm(FormAction):
def name(self):
return "feedback_form"
@staticmethod
def required_slots(tracker):
if tracker.get_slot("rating"):
return ["rating", "feedback_text"]
else:
return ["rating"]
def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:
"""A dictionary to map required slots to
- an extracted entity
- intent: value pairs
- a whole message
or a list of them, where a first match will be picked"""
# return {"rating": [self.from_entity("rating"),self.from_entity("any_thing")],"feedback_text": [self.from_entity(entity="any_thing"),self.from_entity(entity="navigation")]}
return {"rating": [self.from_entity("rating"), self.from_text()], "feedback_text": [self.from_text(), self.from_entity(entity="navigation")]}
def validate_rating(
self,
value: Text,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> Dict[Text, Any]:
ratings = ['1', '2', '3', '4', '5']
try:
value = value.strip()
if value == "back1" or value.lower() == "back":
return {"rating": INVALID_VALUE, "feedback_text": INVALID_VALUE}
# 1-5 it integer otherwise rating:None
elif value in ratings:
return {"rating": value, "feedback_text": None}
else:
dispatcher.utter_message("Please enter valid option.")
dispatcher.utter_message(json_message = {
"platform":"whatsapp",
"payload":"text",
"text":"Please enter valid option"
});
return {"rating": None, "feedback_text": None}
except Exception as e:
print(e)
dispatcher.utter_message("Please enter valid option.")
dispatcher.utter_message(json_message = {
"platform":"whatsapp",
"payload":"text",
"text":"Please enter valid option"
});
return {"rating": None, "feedback_text": None}
def validate_feedback_text(
self,
value: Text,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> Dict[Text, Any]:
if value == "back2" or value.lower() == "back":
return {"rating": None, "feedback_text": None}
else:
return {"feedback_text": value}
def submit(
self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any],
) -> List[Dict]:
if tracker.get_slot("rating") != INVALID_VALUE:
with open("./actionserver/customer_queries.json", "r") as queriesRef:
rating = tracker.get_slot("rating")
feedback = tracker.get_slot("feedback_text")
feedbackObj = json.load(queriesRef)
feedbackObj["feedback"].append({
"createdOn": util.timestamp(),
"complaint_area": rating,
"complaint": feedback
})
with open("./actionserver/customer_queries.json", "w") as queriesRefWrite:
json.dump(feedbackObj, queriesRefWrite, indent=4)
dispatcher.utter_message("Your Response :\n Rating :'{rate}' star \n Feedback: '{feedbk}' \n Submitted!Thank You!".format(
rate=rating, feedbk=feedback))
dispatcher.utter_message(json_message = {
"platform":"whatsapp",
"payload":"text",
"text":"Your Response :\n Rating :'{rate}' star \n Feedback: '{feedbk}' \n Submitted!Thank You!".format(
rate=rating, feedbk=feedback)
});
else:
dispatcher.utter_message("Feedback form closed")
li = [SlotSet("rating", None), SlotSet("feedback_text", None)]
li.extend(query_back(dispatcher))
return li
return [SlotSet("rating", None), SlotSet("feedback_text", None)]
| [
"logging.getLogger",
"rasa_sdk.events.FollowupAction",
"rasa_sdk.events.UserUttered",
"actionserver.utils.utilities.timestamp",
"json.load",
"json.dump",
"rasa_sdk.events.SlotSet"
] | [((841, 868), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (858, 868), False, 'import logging\n'), ((952, 964), 'json.load', 'json.load', (['f'], {}), '(f)\n', (961, 964), False, 'import json\n'), ((1303, 1410), 'rasa_sdk.events.UserUttered', 'UserUttered', ([], {'text': '"""/greet"""', 'parse_data': "{'intent': {'confidence': 1.0, 'name': 'greet'}, 'entities': []}"}), "(text='/greet', parse_data={'intent': {'confidence': 1.0, 'name':\n 'greet'}, 'entities': []})\n", (1314, 1410), False, 'from rasa_sdk.events import UserUtteranceReverted, UserUttered, FollowupAction\n'), ((1448, 1565), 'rasa_sdk.events.UserUttered', 'UserUttered', ([], {'text': '"""/query_init"""', 'parse_data': "{'intent': {'confidence': 1.0, 'name': 'query_init'}, 'entities': []}"}), "(text='/query_init', parse_data={'intent': {'confidence': 1.0,\n 'name': 'query_init'}, 'entities': []})\n", (1459, 1565), False, 'from rasa_sdk.events import UserUtteranceReverted, UserUttered, FollowupAction\n'), ((1627, 1661), 'rasa_sdk.events.FollowupAction', 'FollowupAction', ([], {'name': '"""utter_greet"""'}), "(name='utter_greet')\n", (1641, 1661), False, 'from rasa_sdk.events import UserUtteranceReverted, UserUttered, FollowupAction\n'), ((1692, 1731), 'rasa_sdk.events.FollowupAction', 'FollowupAction', ([], {'name': '"""utter_query_type"""'}), "(name='utter_query_type')\n", (1706, 1731), False, 'from rasa_sdk.events import UserUtteranceReverted, UserUttered, FollowupAction\n'), ((1986, 2093), 'rasa_sdk.events.UserUttered', 'UserUttered', ([], {'text': '"""/greet"""', 'parse_data': "{'intent': {'confidence': 1.0, 'name': 'greet'}, 'entities': []}"}), "(text='/greet', parse_data={'intent': {'confidence': 1.0, 'name':\n 'greet'}, 'entities': []})\n", (1997, 2093), False, 'from rasa_sdk.events import UserUtteranceReverted, UserUttered, FollowupAction\n'), ((2113, 2147), 'rasa_sdk.events.FollowupAction', 'FollowupAction', ([], {'name': '"""utter_greet"""'}), "(name='utter_greet')\n", (2127, 2147), False, 'from rasa_sdk.events import UserUtteranceReverted, UserUttered, FollowupAction\n'), ((6261, 6284), 'rasa_sdk.events.SlotSet', 'SlotSet', (['"""rating"""', 'None'], {}), "('rating', None)\n", (6268, 6284), False, 'from rasa_sdk.events import AllSlotsReset, SlotSet\n'), ((6286, 6316), 'rasa_sdk.events.SlotSet', 'SlotSet', (['"""feedback_text"""', 'None'], {}), "('feedback_text', None)\n", (6293, 6316), False, 'from rasa_sdk.events import AllSlotsReset, SlotSet\n'), ((5152, 5173), 'json.load', 'json.load', (['queriesRef'], {}), '(queriesRef)\n', (5161, 5173), False, 'import json\n'), ((5484, 5533), 'json.dump', 'json.dump', (['feedbackObj', 'queriesRefWrite'], {'indent': '(4)'}), '(feedbackObj, queriesRefWrite, indent=4)\n', (5493, 5533), False, 'import json\n'), ((6120, 6143), 'rasa_sdk.events.SlotSet', 'SlotSet', (['"""rating"""', 'None'], {}), "('rating', None)\n", (6127, 6143), False, 'from rasa_sdk.events import AllSlotsReset, SlotSet\n'), ((6145, 6175), 'rasa_sdk.events.SlotSet', 'SlotSet', (['"""feedback_text"""', 'None'], {}), "('feedback_text', None)\n", (6152, 6175), False, 'from rasa_sdk.events import AllSlotsReset, SlotSet\n'), ((5256, 5272), 'actionserver.utils.utilities.timestamp', 'util.timestamp', ([], {}), '()\n', (5270, 5272), True, 'from actionserver.utils import utilities as util\n')] |
import plotly.graph_objs as go
class GraphsHelper:
template = "plotly_dark"
'''
Generate a plot for a timeseries
'''
def generate_timeseries_plot(self, dataframe):
pressure_plots = []
for sensor in ["p1", "p2", "p3"]:
series = dataframe[sensor]
scatter = go.Scatter(x = dataframe.index,
y = series,
name = f"Sensor {sensor}",
opacity = 0.4)
pressure_plots.append(scatter)
pressure_figure = go.Figure(
data = pressure_plots,
layout = go.Layout(
title = "Pressure timeseries",
template = self.template
)
)
return pressure_figure
| [
"plotly.graph_objs.Scatter",
"plotly.graph_objs.Layout"
] | [((317, 394), 'plotly.graph_objs.Scatter', 'go.Scatter', ([], {'x': 'dataframe.index', 'y': 'series', 'name': 'f"""Sensor {sensor}"""', 'opacity': '(0.4)'}), "(x=dataframe.index, y=series, name=f'Sensor {sensor}', opacity=0.4)\n", (327, 394), True, 'import plotly.graph_objs as go\n'), ((639, 701), 'plotly.graph_objs.Layout', 'go.Layout', ([], {'title': '"""Pressure timeseries"""', 'template': 'self.template'}), "(title='Pressure timeseries', template=self.template)\n", (648, 701), True, 'import plotly.graph_objs as go\n')] |
"""File generated by TLObjects' generator. All changes will be ERASED"""
from ...tl.tlobject import TLRequest
from typing import Optional, List, Union, TYPE_CHECKING
import os
import struct
if TYPE_CHECKING:
from ...tl.types import TypeInputStickerSet, TypeInputUser, TypeInputStickerSetItem, TypeInputDocument
class AddStickerToSetRequest(TLRequest):
CONSTRUCTOR_ID = 0x8653febe
SUBCLASS_OF_ID = 0x9b704a5a
def __init__(self, stickerset, sticker):
"""
:param TypeInputStickerSet stickerset:
:param TypeInputStickerSetItem sticker:
:returns messages.StickerSet: Instance of StickerSet.
"""
self.stickerset = stickerset # type: TypeInputStickerSet
self.sticker = sticker # type: TypeInputStickerSetItem
def to_dict(self):
return {
'_': 'AddStickerToSetRequest',
'stickerset': None if self.stickerset is None else self.stickerset.to_dict(),
'sticker': None if self.sticker is None else self.sticker.to_dict()
}
def __bytes__(self):
return b''.join((
b'\xbe\xfeS\x86',
bytes(self.stickerset),
bytes(self.sticker),
))
@classmethod
def from_reader(cls, reader):
_stickerset = reader.tgread_object()
_sticker = reader.tgread_object()
return cls(stickerset=_stickerset, sticker=_sticker)
class ChangeStickerPositionRequest(TLRequest):
CONSTRUCTOR_ID = 0xffb6d4ca
SUBCLASS_OF_ID = 0x9b704a5a
def __init__(self, sticker, position):
"""
:param TypeInputDocument sticker:
:param int position:
:returns messages.StickerSet: Instance of StickerSet.
"""
self.sticker = sticker # type: TypeInputDocument
self.position = position # type: int
def to_dict(self):
return {
'_': 'ChangeStickerPositionRequest',
'sticker': None if self.sticker is None else self.sticker.to_dict(),
'position': self.position
}
def __bytes__(self):
return b''.join((
b'\xca\xd4\xb6\xff',
bytes(self.sticker),
struct.pack('<i', self.position),
))
@classmethod
def from_reader(cls, reader):
_sticker = reader.tgread_object()
_position = reader.read_int()
return cls(sticker=_sticker, position=_position)
class CreateStickerSetRequest(TLRequest):
CONSTRUCTOR_ID = 0x9bd86e6a
SUBCLASS_OF_ID = 0x9b704a5a
def __init__(self, user_id, title, short_name, stickers, masks=None):
"""
:param TypeInputUser user_id:
:param str title:
:param str short_name:
:param List[TypeInputStickerSetItem] stickers:
:param Optional[bool] masks:
:returns messages.StickerSet: Instance of StickerSet.
"""
self.user_id = user_id # type: TypeInputUser
self.title = title # type: str
self.short_name = short_name # type: str
self.stickers = stickers # type: List[TypeInputStickerSetItem]
self.masks = masks # type: Optional[bool]
async def resolve(self, client, utils):
self.user_id = utils.get_input_user(await client.get_input_entity(self.user_id))
def to_dict(self):
return {
'_': 'CreateStickerSetRequest',
'user_id': None if self.user_id is None else self.user_id.to_dict(),
'title': self.title,
'short_name': self.short_name,
'stickers': [] if self.stickers is None else [None if x is None else x.to_dict() for x in self.stickers],
'masks': self.masks
}
def __bytes__(self):
return b''.join((
b'jn\xd8\x9b',
struct.pack('<I', (0 if self.masks is None or self.masks is False else 1)),
bytes(self.user_id),
self.serialize_bytes(self.title),
self.serialize_bytes(self.short_name),
b'\x15\xc4\xb5\x1c',struct.pack('<i', len(self.stickers)),b''.join(bytes(x) for x in self.stickers),
))
@classmethod
def from_reader(cls, reader):
flags = reader.read_int()
_masks = bool(flags & 1)
_user_id = reader.tgread_object()
_title = reader.tgread_string()
_short_name = reader.tgread_string()
reader.read_int()
_stickers = []
for _ in range(reader.read_int()):
_x = reader.tgread_object()
_stickers.append(_x)
return cls(user_id=_user_id, title=_title, short_name=_short_name, stickers=_stickers, masks=_masks)
class RemoveStickerFromSetRequest(TLRequest):
CONSTRUCTOR_ID = 0xf7760f51
SUBCLASS_OF_ID = 0x9b704a5a
def __init__(self, sticker):
"""
:param TypeInputDocument sticker:
:returns messages.StickerSet: Instance of StickerSet.
"""
self.sticker = sticker # type: TypeInputDocument
def to_dict(self):
return {
'_': 'RemoveStickerFromSetRequest',
'sticker': None if self.sticker is None else self.sticker.to_dict()
}
def __bytes__(self):
return b''.join((
b'Q\x0fv\xf7',
bytes(self.sticker),
))
@classmethod
def from_reader(cls, reader):
_sticker = reader.tgread_object()
return cls(sticker=_sticker)
| [
"struct.pack"
] | [((2175, 2207), 'struct.pack', 'struct.pack', (['"""<i"""', 'self.position'], {}), "('<i', self.position)\n", (2186, 2207), False, 'import struct\n'), ((3760, 3832), 'struct.pack', 'struct.pack', (['"""<I"""', '(0 if self.masks is None or self.masks is False else 1)'], {}), "('<I', 0 if self.masks is None or self.masks is False else 1)\n", (3771, 3832), False, 'import struct\n')] |
# Importar a classe da língua inglesa (English) e criar um objeto nlp
from ____ import ____
nlp = ____
# Processar o texto
doc = ____("I like tree kangaroos and narwhals.")
# Selecionar o primeiro token
first_token = doc[____]
# Imprimir o texto do primeito token
print(first_token.____)
| [
"____.____"
] | [((130, 173), '____.____', '____', (['"""I like tree kangaroos and narwhals."""'], {}), "('I like tree kangaroos and narwhals.')\n", (134, 173), False, 'from ____ import ____\n')] |
from tests.integration.create_token import create_token
from tests.integration.integration_test_case import IntegrationTestCase
class TestHappyPath(IntegrationTestCase):
def test_happy_path_203(self):
self.happy_path('0203', '1')
def test_happy_path_205(self):
self.happy_path('0205', '1')
def happy_path(self, form_type_id, eq_id):
# Get a token
token = create_token(form_type_id, eq_id)
resp = self.client.get('/session?token=' + token.decode(), follow_redirects=True)
self.assertEqual(resp.status_code, 200)
# We are on the landing page
content = resp.get_data(True)
self.assertRegex(content, '<title>Introduction</title>')
self.assertRegex(content, '>Start survey<')
self.assertRegex(content, 'Monthly Business Survey - Retail Sales Index')
# We proceed to the questionnaire
post_data = {
'action[start_questionnaire]': 'Start Questionnaire'
}
resp = self.client.post('/questionnaire/' + eq_id + '/' + form_type_id + '/789/introduction', data=post_data, follow_redirects=False)
self.assertEqual(resp.status_code, 302)
block_one_url = resp.location
resp = self.client.get(block_one_url, follow_redirects=False)
self.assertEqual(resp.status_code, 200)
# We are in the Questionnaire
content = resp.get_data(True)
self.assertRegex(content, '<title>Survey</title>')
self.assertRegex(content, '>Monthly Business Survey - Retail Sales Index</')
self.assertRegex(content, "What are the dates of the sales period you are reporting for?")
self.assertRegex(content, ">Save and continue<")
# check with have some guidance
self.assertRegex(content, "alcoholic drink")
# We fill in our answers
form_data = {
# Start Date
"period-from-day": "01",
"period-from-month": "4",
"period-from-year": "2016",
# End Date
"period-to-day": "30",
"period-to-month": "04",
"period-to-year": "2016",
# Total Turnover
"total-retail-turnover": "100000",
# User Action
"action[save_continue]": "Save & Continue"
}
# We submit the form
resp = self.client.post(block_one_url, data=form_data, follow_redirects=False)
self.assertEqual(resp.status_code, 302)
# There are no validation errors
self.assertRegex(resp.location, r'\/questionnaire\/1\/' + form_type_id + r'\/789\/summary$')
summary_url = resp.location
resp = self.client.get(summary_url, follow_redirects=False)
self.assertEqual(resp.status_code, 200)
# We are on the review answers page
content = resp.get_data(True)
self.assertRegex(content, '<title>Summary</title>')
self.assertRegex(content, '>Monthly Business Survey - Retail Sales Index</')
self.assertRegex(content, '>Your responses<')
self.assertRegex(content, 'Please check carefully before submission.')
self.assertRegex(content, '>Submit answers<')
# We submit our answers
post_data = {
"action[submit_answers]": "Submit answers"
}
resp = self.client.post(summary_url, data=post_data, follow_redirects=False)
self.assertEqual(resp.status_code, 302)
self.assertRegex(resp.location, r'\/questionnaire\/1\/' + form_type_id + r'\/789\/thank-you$')
resp = self.client.get(resp.location, follow_redirects=True)
self.assertEqual(resp.status_code, 200)
# We are on the thank you page
content = resp.get_data(True)
self.assertRegex(content, '<title>Submission Successful</title>')
self.assertRegex(content, '(?s)Monthly Business Survey - Retail Sales Index.*?Monthly Business Survey - Retail Sales Index')
| [
"tests.integration.create_token.create_token"
] | [((404, 437), 'tests.integration.create_token.create_token', 'create_token', (['form_type_id', 'eq_id'], {}), '(form_type_id, eq_id)\n', (416, 437), False, 'from tests.integration.create_token import create_token\n')] |
import uuid
from typing import List, Dict, Any
import unittest
from selfhost_client import SelfHostClient, DatasetType
class TestIntegrationDatasetsClient(unittest.TestCase):
"""
Run these tests individually because Self-Host will return HTTP 429 Too Many Requests otherwise.
"""
@classmethod
def setUpClass(cls) -> None:
cls.client: SelfHostClient = SelfHostClient(
base_url='http://127.0.0.1:8080',
username='test',
password='<PASSWORD>'
)
cls.unique_name: str = str(uuid.uuid4())
cls.created_dataset: DatasetType = cls.client.create_dataset(
name=cls.unique_name,
dataset_format='ini',
content='aGVsbG8sIHdvcmxkIQ==',
tags=['test_tag']
)
@classmethod
def tearDownClass(cls) -> None:
cls.client.delete_dataset(cls.created_dataset['uuid'])
def test_get_datasets(self) -> None:
params: Dict[str, int] = {
'limit': 20,
'offset': 0
}
datasets: List[DatasetType] = self.client.get_datasets(**params)
self.assertIsNotNone(datasets)
def test_create_and_delete_dataset(self) -> None:
# Create and delete happens in setup and teardown methods.
self.assertEqual(self.created_dataset['name'], self.unique_name)
def test_get_dataset(self) -> None:
fetched_dataset: DatasetType = self.client.get_dataset(self.created_dataset['uuid'])
self.assertEqual(fetched_dataset['name'], self.created_dataset['name'])
def test_update_dataset(self) -> None:
self.client.update_dataset(
dataset_uuid=self.created_dataset['uuid'],
name=f'{self.created_dataset["name"]} Updated',
dataset_format='json',
tags=['updated']
)
fetched_dataset: DatasetType = self.client.get_dataset(self.created_dataset['uuid'])
self.assertEqual(fetched_dataset['name'], f'{self.created_dataset["name"]} Updated')
self.assertEqual(fetched_dataset['format'], 'json')
self.assertEqual(fetched_dataset['tags'], ['updated'])
def test_get_dataset_raw_content(self) -> None:
fetched_content: Any = self.client.get_dataset_raw_content(self.created_dataset['uuid'])
self.assertIsNotNone(fetched_content)
| [
"selfhost_client.SelfHostClient",
"uuid.uuid4"
] | [((384, 477), 'selfhost_client.SelfHostClient', 'SelfHostClient', ([], {'base_url': '"""http://127.0.0.1:8080"""', 'username': '"""test"""', 'password': '"""<PASSWORD>"""'}), "(base_url='http://127.0.0.1:8080', username='test', password=\n '<PASSWORD>')\n", (398, 477), False, 'from selfhost_client import SelfHostClient, DatasetType\n'), ((554, 566), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (564, 566), False, 'import uuid\n')] |
from setuptools import setup, find_packages
setup(
name="soccergen",
version="0.1",
packages=find_packages(),
# Project uses reStructuredText, so ensure that the docutils get
# installed or upgraded on the target machine
install_requires=["gfootball>=2.8",],
# metadata to display on PyPI
author="<NAME>",
author_email="<EMAIL>",
description="Soccer trajectory and event data generation",
keywords="soccer data-generation foootball",
url="https://github.com/pnxenopoulos/soccer-data-gen", # project home page, if any
project_urls={
"Issues": "https://github.com/pnxenopoulos/soccer-data-gen/issues",
"Documentation": "https://github.com/pnxenopoulos/soccer-data-gen/csgo/",
"Github": "https://github.com/pnxenopoulos/soccer-data-gen/csgo/",
},
classifiers=["License :: OSI Approved :: MIT License"],
)
| [
"setuptools.find_packages"
] | [((106, 121), 'setuptools.find_packages', 'find_packages', ([], {}), '()\n', (119, 121), False, 'from setuptools import setup, find_packages\n')] |
from itertools import product
from unittest.mock import patch
import pytest
import numpy as np
import pandas as pd
from pandas.util.testing import assert_frame_equal
from sm.engine.annotation.fdr import FDR, run_fdr_ranking
from sm.engine.formula_parser import format_modifiers
FDR_CONFIG = {'decoy_sample_size': 2}
@patch('sm.engine.annotation.fdr.DECOY_ADDUCTS', ['+He', '+Li'])
def test_fdr_decoy_adduct_selection_saves_corr():
fdr = FDR(
fdr_config=FDR_CONFIG,
chem_mods=[],
neutral_losses=[],
target_adducts=['+H', '+K', '[M]+'],
analysis_version=1,
)
exp_target_decoy_df = pd.DataFrame(
[
('H2O', '+H', '+He'),
('H2O', '+H', '+Li'),
('H2O', '+K', '+He'),
('H2O', '+K', '+Li'),
('H2O', '', '+He'),
('H2O', '', '+Li'),
],
columns=['formula', 'tm', 'dm'],
)
fdr.decoy_adducts_selection(target_formulas=['H2O'])
assert_frame_equal(
fdr.td_df.sort_values(by=['formula', 'tm', 'dm']).reset_index(drop=True),
exp_target_decoy_df.sort_values(by=['formula', 'tm', 'dm']).reset_index(drop=True),
)
@pytest.mark.parametrize('analysis_version,expected_fdrs', [(1, [0.2, 0.8]), (3, [1 / 4, 2 / 3])])
def test_estimate_fdr_returns_correct_df(analysis_version, expected_fdrs):
fdr = FDR(
fdr_config=FDR_CONFIG,
chem_mods=[],
neutral_losses=[],
target_adducts=['+H'],
analysis_version=analysis_version,
)
fdr.fdr_levels = [0.2, 0.8]
fdr.td_df = pd.DataFrame(
[['H2O', '+H', '+Cu'], ['H2O', '+H', '+Co'], ['C2H2', '+H', '+Ag'], ['C2H2', '+H', '+Ar']],
columns=['formula', 'tm', 'dm'],
)
msm_df = pd.DataFrame(
[
['H2O', '+H', 0.85],
['C2H2', '+H', 0.5],
['H2O', '+Cu', 0.5],
['H2O', '+Co', 0.5],
['C2H2', '+Ag', 0.75],
['C2H2', '+Ar', 0.0],
],
columns=['formula', 'modifier', 'msm'],
)
exp_sf_df = pd.DataFrame(
[
['H2O', '+H', 0.85],
['C2H2', '+H', 0.5],
],
columns=['formula', 'modifier', 'msm'],
).assign(fdr=expected_fdrs)
assert_frame_equal(fdr.estimate_fdr(msm_df, None), exp_sf_df)
def test_estimate_fdr_digitize_works():
fdr_config = {**FDR_CONFIG, 'decoy_sample_size': 1}
fdr = FDR(
fdr_config=fdr_config,
chem_mods=[],
neutral_losses=[],
target_adducts=['+H'],
analysis_version=1,
)
fdr.fdr_levels = [0.4, 0.8]
fdr.td_df = pd.DataFrame(
[['C1', '+H', '+Cu'], ['C2', '+H', '+Ag'], ['C3', '+H', '+Cl'], ['C4', '+H', '+Co']],
columns=['formula', 'tm', 'dm'],
)
msm_df = pd.DataFrame(
[
['C1', '+H', 1.0],
['C2', '+H', 0.75],
['C3', '+H', 0.5],
['C4', '+H', 0.25],
['C1', '+Cu', 0.75],
['C2', '+Ag', 0.3],
['C3', '+Cl', 0.25],
['C4', '+Co', 0.1],
],
columns=['formula', 'modifier', 'msm'],
)
exp_sf_df = pd.DataFrame(
[
['C1', '+H', 1.0, 0.4],
['C2', '+H', 0.75, 0.4],
['C3', '+H', 0.5, 0.4],
['C4', '+H', 0.25, 0.8],
],
columns=['formula', 'modifier', 'msm', 'fdr'],
)
assert_frame_equal(fdr.estimate_fdr(msm_df, None), exp_sf_df)
def test_ions():
formulas = ['H2O', 'C5H2OH']
target_adducts = ['+H', '+Na']
decoy_sample_size = 5
fdr_config = {**FDR_CONFIG, 'decoy_sample_size': decoy_sample_size}
fdr = FDR(
fdr_config=fdr_config,
chem_mods=[],
neutral_losses=[],
target_adducts=target_adducts,
analysis_version=1,
)
fdr.decoy_adducts_selection(target_formulas=['H2O', 'C5H2OH'])
ions = fdr.ion_tuples()
assert type(ions) == list
# total number varies because different (formula, modifier) pairs may receive the same (formula, decoy_modifier) pair
assert (
len(formulas) * decoy_sample_size + len(formulas) * len(target_adducts)
< len(ions)
<= len(formulas) * len(target_adducts) * decoy_sample_size
+ len(formulas) * len(target_adducts)
)
target_ions = [(formula, adduct) for formula, adduct in product(formulas, target_adducts)]
assert set(target_ions).issubset(set(map(tuple, ions)))
def test_chem_mods_and_neutral_losses():
formulas = ['H2O', 'C5H2OH']
chem_mods = ['-H+C']
neutral_losses = ['-O', '-C']
target_adducts = ['+H', '+Na', '[M]+']
target_modifiers = [
format_modifiers(cm, nl, ta)
for cm, nl, ta in product(['', *chem_mods], ['', *neutral_losses], target_adducts)
]
decoy_sample_size = 5
fdr_config = {**FDR_CONFIG, 'decoy_sample_size': decoy_sample_size}
fdr = FDR(
fdr_config=fdr_config,
chem_mods=chem_mods,
neutral_losses=neutral_losses,
target_adducts=target_adducts,
analysis_version=1,
)
fdr.decoy_adducts_selection(target_formulas=['H2O', 'C5H2OH'])
ions = fdr.ion_tuples()
assert type(ions) == list
# total number varies because different (formula, modifier) pairs may receive the same (formula, decoy_modifier) pair
min_count = len(formulas) * len(target_modifiers)
max_count = len(formulas) * len(target_modifiers) * (1 + decoy_sample_size)
assert min_count < len(ions) <= max_count
target_ions = list(product(formulas, target_modifiers))
assert set(target_ions).issubset(set(map(tuple, ions)))
def test_run_fdr_ranking():
target_scores = pd.Series([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0])
decoy_scores = pd.Series([0.8, 0.55, 0.2, 0.1])
n_targets = pd.Series([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
n_decoys = pd.Series([0, 0, 1, 1, 1, 2, 2, 2, 3, 4, 4])
expected_fdr = n_decoys / n_targets
expected_fdr_ros = (n_decoys + 1) / (n_targets + 1)
expected_fdr_mono = pd.Series(
[0 / 2, 0 / 2, 1 / 5, 1 / 5, 1 / 5, 2 / 8, 2 / 8, 2 / 8, 3 / 9, 4 / 11, 4 / 11]
)
fdr = run_fdr_ranking(target_scores, decoy_scores, 1, False, False)
fdr_ros = run_fdr_ranking(target_scores, decoy_scores, 1, True, False)
fdr_mono = run_fdr_ranking(target_scores, decoy_scores, 1, False, True)
assert np.isclose(fdr, expected_fdr).all()
assert np.isclose(fdr_ros, expected_fdr_ros).all()
assert np.isclose(fdr_mono, expected_fdr_mono).all()
| [
"pandas.Series",
"numpy.isclose",
"sm.engine.annotation.fdr.FDR",
"sm.engine.formula_parser.format_modifiers",
"itertools.product",
"sm.engine.annotation.fdr.run_fdr_ranking",
"pytest.mark.parametrize",
"pandas.DataFrame",
"unittest.mock.patch"
] | [((322, 385), 'unittest.mock.patch', 'patch', (['"""sm.engine.annotation.fdr.DECOY_ADDUCTS"""', "['+He', '+Li']"], {}), "('sm.engine.annotation.fdr.DECOY_ADDUCTS', ['+He', '+Li'])\n", (327, 385), False, 'from unittest.mock import patch\n'), ((1185, 1286), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""analysis_version,expected_fdrs"""', '[(1, [0.2, 0.8]), (3, [1 / 4, 2 / 3])]'], {}), "('analysis_version,expected_fdrs', [(1, [0.2, 0.8]),\n (3, [1 / 4, 2 / 3])])\n", (1208, 1286), False, 'import pytest\n'), ((446, 567), 'sm.engine.annotation.fdr.FDR', 'FDR', ([], {'fdr_config': 'FDR_CONFIG', 'chem_mods': '[]', 'neutral_losses': '[]', 'target_adducts': "['+H', '+K', '[M]+']", 'analysis_version': '(1)'}), "(fdr_config=FDR_CONFIG, chem_mods=[], neutral_losses=[], target_adducts=\n ['+H', '+K', '[M]+'], analysis_version=1)\n", (449, 567), False, 'from sm.engine.annotation.fdr import FDR, run_fdr_ranking\n'), ((637, 820), 'pandas.DataFrame', 'pd.DataFrame', (["[('H2O', '+H', '+He'), ('H2O', '+H', '+Li'), ('H2O', '+K', '+He'), ('H2O',\n '+K', '+Li'), ('H2O', '', '+He'), ('H2O', '', '+Li')]"], {'columns': "['formula', 'tm', 'dm']"}), "([('H2O', '+H', '+He'), ('H2O', '+H', '+Li'), ('H2O', '+K',\n '+He'), ('H2O', '+K', '+Li'), ('H2O', '', '+He'), ('H2O', '', '+Li')],\n columns=['formula', 'tm', 'dm'])\n", (649, 820), True, 'import pandas as pd\n'), ((1368, 1490), 'sm.engine.annotation.fdr.FDR', 'FDR', ([], {'fdr_config': 'FDR_CONFIG', 'chem_mods': '[]', 'neutral_losses': '[]', 'target_adducts': "['+H']", 'analysis_version': 'analysis_version'}), "(fdr_config=FDR_CONFIG, chem_mods=[], neutral_losses=[], target_adducts=\n ['+H'], analysis_version=analysis_version)\n", (1371, 1490), False, 'from sm.engine.annotation.fdr import FDR, run_fdr_ranking\n'), ((1581, 1722), 'pandas.DataFrame', 'pd.DataFrame', (["[['H2O', '+H', '+Cu'], ['H2O', '+H', '+Co'], ['C2H2', '+H', '+Ag'], ['C2H2',\n '+H', '+Ar']]"], {'columns': "['formula', 'tm', 'dm']"}), "([['H2O', '+H', '+Cu'], ['H2O', '+H', '+Co'], ['C2H2', '+H',\n '+Ag'], ['C2H2', '+H', '+Ar']], columns=['formula', 'tm', 'dm'])\n", (1593, 1722), True, 'import pandas as pd\n'), ((1756, 1947), 'pandas.DataFrame', 'pd.DataFrame', (["[['H2O', '+H', 0.85], ['C2H2', '+H', 0.5], ['H2O', '+Cu', 0.5], ['H2O',\n '+Co', 0.5], ['C2H2', '+Ag', 0.75], ['C2H2', '+Ar', 0.0]]"], {'columns': "['formula', 'modifier', 'msm']"}), "([['H2O', '+H', 0.85], ['C2H2', '+H', 0.5], ['H2O', '+Cu', 0.5],\n ['H2O', '+Co', 0.5], ['C2H2', '+Ag', 0.75], ['C2H2', '+Ar', 0.0]],\n columns=['formula', 'modifier', 'msm'])\n", (1768, 1947), True, 'import pandas as pd\n'), ((2418, 2525), 'sm.engine.annotation.fdr.FDR', 'FDR', ([], {'fdr_config': 'fdr_config', 'chem_mods': '[]', 'neutral_losses': '[]', 'target_adducts': "['+H']", 'analysis_version': '(1)'}), "(fdr_config=fdr_config, chem_mods=[], neutral_losses=[], target_adducts=\n ['+H'], analysis_version=1)\n", (2421, 2525), False, 'from sm.engine.annotation.fdr import FDR, run_fdr_ranking\n'), ((2616, 2751), 'pandas.DataFrame', 'pd.DataFrame', (["[['C1', '+H', '+Cu'], ['C2', '+H', '+Ag'], ['C3', '+H', '+Cl'], ['C4', '+H',\n '+Co']]"], {'columns': "['formula', 'tm', 'dm']"}), "([['C1', '+H', '+Cu'], ['C2', '+H', '+Ag'], ['C3', '+H', '+Cl'],\n ['C4', '+H', '+Co']], columns=['formula', 'tm', 'dm'])\n", (2628, 2751), True, 'import pandas as pd\n'), ((2785, 3008), 'pandas.DataFrame', 'pd.DataFrame', (["[['C1', '+H', 1.0], ['C2', '+H', 0.75], ['C3', '+H', 0.5], ['C4', '+H', \n 0.25], ['C1', '+Cu', 0.75], ['C2', '+Ag', 0.3], ['C3', '+Cl', 0.25], [\n 'C4', '+Co', 0.1]]"], {'columns': "['formula', 'modifier', 'msm']"}), "([['C1', '+H', 1.0], ['C2', '+H', 0.75], ['C3', '+H', 0.5], [\n 'C4', '+H', 0.25], ['C1', '+Cu', 0.75], ['C2', '+Ag', 0.3], ['C3',\n '+Cl', 0.25], ['C4', '+Co', 0.1]], columns=['formula', 'modifier', 'msm'])\n", (2797, 3008), True, 'import pandas as pd\n'), ((3146, 3313), 'pandas.DataFrame', 'pd.DataFrame', (["[['C1', '+H', 1.0, 0.4], ['C2', '+H', 0.75, 0.4], ['C3', '+H', 0.5, 0.4], [\n 'C4', '+H', 0.25, 0.8]]"], {'columns': "['formula', 'modifier', 'msm', 'fdr']"}), "([['C1', '+H', 1.0, 0.4], ['C2', '+H', 0.75, 0.4], ['C3', '+H',\n 0.5, 0.4], ['C4', '+H', 0.25, 0.8]], columns=['formula', 'modifier',\n 'msm', 'fdr'])\n", (3158, 3313), True, 'import pandas as pd\n'), ((3651, 3766), 'sm.engine.annotation.fdr.FDR', 'FDR', ([], {'fdr_config': 'fdr_config', 'chem_mods': '[]', 'neutral_losses': '[]', 'target_adducts': 'target_adducts', 'analysis_version': '(1)'}), '(fdr_config=fdr_config, chem_mods=[], neutral_losses=[], target_adducts=\n target_adducts, analysis_version=1)\n', (3654, 3766), False, 'from sm.engine.annotation.fdr import FDR, run_fdr_ranking\n'), ((4890, 5024), 'sm.engine.annotation.fdr.FDR', 'FDR', ([], {'fdr_config': 'fdr_config', 'chem_mods': 'chem_mods', 'neutral_losses': 'neutral_losses', 'target_adducts': 'target_adducts', 'analysis_version': '(1)'}), '(fdr_config=fdr_config, chem_mods=chem_mods, neutral_losses=\n neutral_losses, target_adducts=target_adducts, analysis_version=1)\n', (4893, 5024), False, 'from sm.engine.annotation.fdr import FDR, run_fdr_ranking\n'), ((5665, 5731), 'pandas.Series', 'pd.Series', (['[1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0]'], {}), '([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1, 0.0])\n', (5674, 5731), True, 'import pandas as pd\n'), ((5751, 5783), 'pandas.Series', 'pd.Series', (['[0.8, 0.55, 0.2, 0.1]'], {}), '([0.8, 0.55, 0.2, 0.1])\n', (5760, 5783), True, 'import pandas as pd\n'), ((5800, 5846), 'pandas.Series', 'pd.Series', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])\n', (5809, 5846), True, 'import pandas as pd\n'), ((5862, 5906), 'pandas.Series', 'pd.Series', (['[0, 0, 1, 1, 1, 2, 2, 2, 3, 4, 4]'], {}), '([0, 0, 1, 1, 1, 2, 2, 2, 3, 4, 4])\n', (5871, 5906), True, 'import pandas as pd\n'), ((6027, 6121), 'pandas.Series', 'pd.Series', (['[0 / 2, 0 / 2, 1 / 5, 1 / 5, 1 / 5, 2 / 8, 2 / 8, 2 / 8, 3 / 9, 4 / 11, 4 / 11]'], {}), '([0 / 2, 0 / 2, 1 / 5, 1 / 5, 1 / 5, 2 / 8, 2 / 8, 2 / 8, 3 / 9, 4 /\n 11, 4 / 11])\n', (6036, 6121), True, 'import pandas as pd\n'), ((6143, 6204), 'sm.engine.annotation.fdr.run_fdr_ranking', 'run_fdr_ranking', (['target_scores', 'decoy_scores', '(1)', '(False)', '(False)'], {}), '(target_scores, decoy_scores, 1, False, False)\n', (6158, 6204), False, 'from sm.engine.annotation.fdr import FDR, run_fdr_ranking\n'), ((6219, 6279), 'sm.engine.annotation.fdr.run_fdr_ranking', 'run_fdr_ranking', (['target_scores', 'decoy_scores', '(1)', '(True)', '(False)'], {}), '(target_scores, decoy_scores, 1, True, False)\n', (6234, 6279), False, 'from sm.engine.annotation.fdr import FDR, run_fdr_ranking\n'), ((6295, 6355), 'sm.engine.annotation.fdr.run_fdr_ranking', 'run_fdr_ranking', (['target_scores', 'decoy_scores', '(1)', '(False)', '(True)'], {}), '(target_scores, decoy_scores, 1, False, True)\n', (6310, 6355), False, 'from sm.engine.annotation.fdr import FDR, run_fdr_ranking\n'), ((4655, 4683), 'sm.engine.formula_parser.format_modifiers', 'format_modifiers', (['cm', 'nl', 'ta'], {}), '(cm, nl, ta)\n', (4671, 4683), False, 'from sm.engine.formula_parser import format_modifiers\n'), ((5518, 5553), 'itertools.product', 'product', (['formulas', 'target_modifiers'], {}), '(formulas, target_modifiers)\n', (5525, 5553), False, 'from itertools import product\n'), ((2062, 2162), 'pandas.DataFrame', 'pd.DataFrame', (["[['H2O', '+H', 0.85], ['C2H2', '+H', 0.5]]"], {'columns': "['formula', 'modifier', 'msm']"}), "([['H2O', '+H', 0.85], ['C2H2', '+H', 0.5]], columns=['formula',\n 'modifier', 'msm'])\n", (2074, 2162), True, 'import pandas as pd\n'), ((4349, 4382), 'itertools.product', 'product', (['formulas', 'target_adducts'], {}), '(formulas, target_adducts)\n', (4356, 4382), False, 'from itertools import product\n'), ((4710, 4774), 'itertools.product', 'product', (["['', *chem_mods]", "['', *neutral_losses]", 'target_adducts'], {}), "(['', *chem_mods], ['', *neutral_losses], target_adducts)\n", (4717, 4774), False, 'from itertools import product\n'), ((6368, 6397), 'numpy.isclose', 'np.isclose', (['fdr', 'expected_fdr'], {}), '(fdr, expected_fdr)\n', (6378, 6397), True, 'import numpy as np\n'), ((6415, 6452), 'numpy.isclose', 'np.isclose', (['fdr_ros', 'expected_fdr_ros'], {}), '(fdr_ros, expected_fdr_ros)\n', (6425, 6452), True, 'import numpy as np\n'), ((6470, 6509), 'numpy.isclose', 'np.isclose', (['fdr_mono', 'expected_fdr_mono'], {}), '(fdr_mono, expected_fdr_mono)\n', (6480, 6509), True, 'import numpy as np\n')] |
from logging import getLogger
getLogger('flake8').propagate = False
| [
"logging.getLogger"
] | [((31, 50), 'logging.getLogger', 'getLogger', (['"""flake8"""'], {}), "('flake8')\n", (40, 50), False, 'from logging import getLogger\n')] |
from __future__ import absolute_import
import hashlib
import logging
import os
from django.utils.encoding import smart_str
from common.conf.settings import TEMPORARY_DIRECTORY
from common.utils import fs_cleanup
from .exceptions import OfficeConversionError, UnknownFileFormat
from .literals import (DEFAULT_PAGE_NUMBER,
DEFAULT_ZOOM_LEVEL, DEFAULT_ROTATION, DEFAULT_FILE_FORMAT)
from .literals import (TRANSFORMATION_CHOICES, TRANSFORMATION_RESIZE,
TRANSFORMATION_ROTATE, TRANSFORMATION_ZOOM, DIMENSION_SEPARATOR,
FILE_FORMATS)
from .runtime import backend, office_converter
HASH_FUNCTION = lambda x: hashlib.sha256(x).hexdigest()
logger = logging.getLogger(__name__)
def cache_cleanup(input_filepath, *args, **kwargs):
try:
os.remove(create_image_cache_filename(input_filepath, *args, **kwargs))
except OSError:
pass
def create_image_cache_filename(input_filepath, *args, **kwargs):
if input_filepath:
hash_value = HASH_FUNCTION(u''.join([HASH_FUNCTION(smart_str(input_filepath)), unicode(args), unicode(kwargs)]))
return os.path.join(TEMPORARY_DIRECTORY, hash_value)
else:
return None
def convert(input_filepath, output_filepath=None, cleanup_files=False, mimetype=None, *args, **kwargs):
size = kwargs.get('size')
file_format = kwargs.get('file_format', DEFAULT_FILE_FORMAT)
zoom = kwargs.get('zoom', DEFAULT_ZOOM_LEVEL)
rotation = kwargs.get('rotation', DEFAULT_ROTATION)
page = kwargs.get('page', DEFAULT_PAGE_NUMBER)
transformations = kwargs.get('transformations', [])
if transformations is None:
transformations = []
if output_filepath is None:
output_filepath = create_image_cache_filename(input_filepath, *args, **kwargs)
if os.path.exists(output_filepath):
return output_filepath
if office_converter:
try:
office_converter.convert(input_filepath, mimetype=mimetype)
if office_converter.exists:
input_filepath = office_converter.output_filepath
mimetype = 'application/pdf'
else:
# Recycle the already detected mimetype
mimetype = office_converter.mimetype
except OfficeConversionError:
raise UnknownFileFormat('office converter exception')
if size:
transformations.append(
{
'transformation': TRANSFORMATION_RESIZE,
'arguments': dict(zip([u'width', u'height'], size.split(DIMENSION_SEPARATOR)))
}
)
if zoom != 100:
transformations.append(
{
'transformation': TRANSFORMATION_ZOOM,
'arguments': {'percent': zoom}
}
)
if rotation != 0 and rotation != 360:
transformations.append(
{
'transformation': TRANSFORMATION_ROTATE,
'arguments': {'degrees': rotation}
}
)
try:
backend.convert_file(input_filepath=input_filepath, output_filepath=output_filepath, transformations=transformations, page=page, file_format=file_format, mimetype=mimetype)
finally:
if cleanup_files:
fs_cleanup(input_filepath)
return output_filepath
def get_page_count(input_filepath):
logger.debug('office_converter: %s' % office_converter)
if office_converter:
try:
office_converter.convert(input_filepath)
logger.debug('office_converter.exists: %s' % office_converter.exists)
if office_converter.exists:
input_filepath = office_converter.output_filepath
except OfficeConversionError:
raise UnknownFileFormat('office converter exception')
return backend.get_page_count(input_filepath)
def get_available_transformations_choices():
result = []
for transformation in backend.get_available_transformations():
result.append((transformation, TRANSFORMATION_CHOICES[transformation]['label']))
return result
def get_format_list():
return [(format, FILE_FORMATS.get(format, u'')) for format in backend.get_format_list()]
| [
"logging.getLogger",
"os.path.exists",
"hashlib.sha256",
"common.utils.fs_cleanup",
"os.path.join",
"django.utils.encoding.smart_str"
] | [((659, 686), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (676, 686), False, 'import logging\n'), ((1770, 1801), 'os.path.exists', 'os.path.exists', (['output_filepath'], {}), '(output_filepath)\n', (1784, 1801), False, 'import os\n'), ((1090, 1135), 'os.path.join', 'os.path.join', (['TEMPORARY_DIRECTORY', 'hash_value'], {}), '(TEMPORARY_DIRECTORY, hash_value)\n', (1102, 1135), False, 'import os\n'), ((619, 636), 'hashlib.sha256', 'hashlib.sha256', (['x'], {}), '(x)\n', (633, 636), False, 'import hashlib\n'), ((3220, 3246), 'common.utils.fs_cleanup', 'fs_cleanup', (['input_filepath'], {}), '(input_filepath)\n', (3230, 3246), False, 'from common.utils import fs_cleanup\n'), ((1013, 1038), 'django.utils.encoding.smart_str', 'smart_str', (['input_filepath'], {}), '(input_filepath)\n', (1022, 1038), False, 'from django.utils.encoding import smart_str\n')] |
import functools
from collections import OrderedDict
from typing import Any, Callable, Dict, List, Mapping, Sequence, Tuple, Union, cast
import torch
from ignite.engine import Engine, EventEnum, Events
from ignite.handlers.timing import Timer
class BasicTimeProfiler:
"""
BasicTimeProfiler can be used to profile the handlers,
events, data loading and data processing times.
Examples:
.. code-block:: python
from ignite.handlers import BasicTimeProfiler
trainer = Engine(train_updater)
# Create an object of the profiler and attach an engine to it
profiler = BasicTimeProfiler()
profiler.attach(trainer)
@trainer.on(Events.EPOCH_COMPLETED)
def log_intermediate_results():
profiler.print_results(profiler.get_results())
trainer.run(dataloader, max_epochs=3)
profiler.write_results('path_to_dir/time_profiling.csv')
.. versionadded:: 0.4.6
"""
events_to_ignore = [
Events.EXCEPTION_RAISED,
Events.TERMINATE,
Events.TERMINATE_SINGLE_EPOCH,
Events.DATALOADER_STOP_ITERATION,
]
def __init__(self) -> None:
self._dataflow_timer = Timer()
self._processing_timer = Timer()
self._event_handlers_timer = Timer()
self.dataflow_times = torch.zeros(1)
self.processing_times = torch.zeros(1)
self.event_handlers_times = {} # type: Dict[EventEnum, torch.Tensor]
self._events = [
Events.EPOCH_STARTED,
Events.EPOCH_COMPLETED,
Events.ITERATION_STARTED,
Events.ITERATION_COMPLETED,
Events.GET_BATCH_STARTED,
Events.GET_BATCH_COMPLETED,
Events.COMPLETED,
]
self._fmethods = [
self._as_first_epoch_started,
self._as_first_epoch_completed,
self._as_first_iter_started,
self._as_first_iter_completed,
self._as_first_get_batch_started,
self._as_first_get_batch_completed,
self._as_first_completed,
]
self._lmethods = [
self._as_last_epoch_started,
self._as_last_epoch_completed,
self._as_last_iter_started,
self._as_last_iter_completed,
self._as_last_get_batch_started,
self._as_last_get_batch_completed,
self._as_last_completed,
]
def _reset(self, num_epochs: int, total_num_iters: int) -> None:
self.dataflow_times = torch.zeros(total_num_iters)
self.processing_times = torch.zeros(total_num_iters)
self.event_handlers_times = {
Events.STARTED: torch.zeros(1),
Events.COMPLETED: torch.zeros(1),
Events.EPOCH_STARTED: torch.zeros(num_epochs),
Events.EPOCH_COMPLETED: torch.zeros(num_epochs),
Events.ITERATION_STARTED: torch.zeros(total_num_iters),
Events.ITERATION_COMPLETED: torch.zeros(total_num_iters),
Events.GET_BATCH_COMPLETED: torch.zeros(total_num_iters),
Events.GET_BATCH_STARTED: torch.zeros(total_num_iters),
}
def _as_first_started(self, engine: Engine) -> None:
if hasattr(engine.state.dataloader, "__len__"):
num_iters_per_epoch = len(engine.state.dataloader) # type: ignore[arg-type]
else:
if engine.state.epoch_length is None:
raise ValueError(
"As epoch_length is not set, we can not use BasicTimeProfiler in this case."
"Please, set trainer.run(..., epoch_length=epoch_length) in order to fix this."
)
num_iters_per_epoch = engine.state.epoch_length
self.max_epochs = cast(int, engine.state.max_epochs)
self.total_num_iters = self.max_epochs * num_iters_per_epoch
self._reset(self.max_epochs, self.total_num_iters)
self.event_handlers_names = {
e: [
h.__qualname__ if hasattr(h, "__qualname__") else h.__class__.__name__
for (h, _, _) in engine._event_handlers[e]
if "BasicTimeProfiler." not in repr(h) # avoid adding internal handlers into output
]
for e in Events
if e not in self.events_to_ignore
}
# Setup all other handlers:
engine._event_handlers[Events.STARTED].append((self._as_last_started, (engine,), {}))
for e, m in zip(self._events, self._fmethods):
engine._event_handlers[e].insert(0, (m, (engine,), {}))
for e, m in zip(self._events, self._lmethods):
engine._event_handlers[e].append((m, (engine,), {}))
# Let's go
self._event_handlers_timer.reset()
def _as_last_started(self, engine: Engine) -> None:
self.event_handlers_times[Events.STARTED][0] = self._event_handlers_timer.value()
def _as_first_epoch_started(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_epoch_started(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
e = engine.state.epoch - 1
self.event_handlers_times[Events.EPOCH_STARTED][e] = t
def _as_first_get_batch_started(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
self._dataflow_timer.reset()
def _as_last_get_batch_started(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.GET_BATCH_STARTED][i] = t
def _as_first_get_batch_completed(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_get_batch_completed(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.GET_BATCH_COMPLETED][i] = t
d = self._dataflow_timer.value()
self.dataflow_times[i] = d
self._dataflow_timer.reset()
def _as_first_iter_started(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_iter_started(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.ITERATION_STARTED][i] = t
self._processing_timer.reset()
def _as_first_iter_completed(self, engine: Engine) -> None:
t = self._processing_timer.value()
i = engine.state.iteration - 1
self.processing_times[i] = t
self._event_handlers_timer.reset()
def _as_last_iter_completed(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
i = engine.state.iteration - 1
self.event_handlers_times[Events.ITERATION_COMPLETED][i] = t
def _as_first_epoch_completed(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_epoch_completed(self, engine: Engine) -> None:
t = self._event_handlers_timer.value()
e = engine.state.epoch - 1
self.event_handlers_times[Events.EPOCH_COMPLETED][e] = t
def _as_first_completed(self, engine: Engine) -> None:
self._event_handlers_timer.reset()
def _as_last_completed(self, engine: Engine) -> None:
self.event_handlers_times[Events.COMPLETED][0] = self._event_handlers_timer.value()
# Remove added handlers:
engine.remove_event_handler(self._as_last_started, Events.STARTED)
for e, m in zip(self._events, self._fmethods):
engine.remove_event_handler(m, e)
for e, m in zip(self._events, self._lmethods):
engine.remove_event_handler(m, e)
def attach(self, engine: Engine) -> None:
"""Attach BasicTimeProfiler to the given engine.
Args:
engine: the instance of Engine to attach
"""
if not isinstance(engine, Engine):
raise TypeError(f"Argument engine should be ignite.engine.Engine, but given {type(engine)}")
if not engine.has_event_handler(self._as_first_started):
engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {}))
@staticmethod
def _compute_basic_stats(data: torch.Tensor) -> Dict[str, Union[str, float, Tuple[Union[float], Union[float]]]]:
# compute on non-zero data:
data = data[data > 0]
out = [
("total", torch.sum(data).item() if len(data) > 0 else "not yet triggered")
] # type: List[Tuple[str, Union[str, float, Tuple[Union[float], Union[float]]]]]
if len(data) > 1:
out += [
("min/index", (torch.min(data).item(), torch.argmin(data).item())),
("max/index", (torch.max(data).item(), torch.argmax(data).item())),
("mean", torch.mean(data).item()),
("std", torch.std(data).item()),
]
return OrderedDict(out)
def get_results(self) -> Dict[str, Dict[str, Any]]:
"""
Method to fetch the aggregated profiler results after the engine is run
.. code-block:: python
results = profiler.get_results()
"""
total_eh_time = sum(
[(self.event_handlers_times[e]).sum() for e in Events if e not in self.events_to_ignore]
) # type: Union[int, torch.Tensor]
event_handlers_stats = dict(
[
(str(e.name).replace(".", "_"), self._compute_basic_stats(self.event_handlers_times[e]))
for e in Events
if e not in self.events_to_ignore
]
+ [("total_time", total_eh_time)] # type: ignore[list-item]
)
return OrderedDict(
[
("processing_stats", self._compute_basic_stats(self.processing_times)),
("dataflow_stats", self._compute_basic_stats(self.dataflow_times)),
("event_handlers_stats", event_handlers_stats),
(
"event_handlers_names",
{str(e.name).replace(".", "_") + "_names": v for e, v in self.event_handlers_names.items()},
),
]
)
def write_results(self, output_path: str) -> None:
"""
Method to store the unaggregated profiling results to a csv file
Args:
output_path: file output path containing a filename
.. code-block:: python
profiler.write_results('path_to_dir/awesome_filename.csv')
Examples:
.. code-block:: text
-----------------------------------------------------------------
epoch iteration processing_stats dataflow_stats Event_STARTED ...
1.0 1.0 0.00003 0.252387 0.125676
1.0 2.0 0.00029 0.252342 0.125123
"""
try:
import pandas as pd
except ImportError:
raise RuntimeError("Need pandas to write results as files")
iters_per_epoch = self.total_num_iters // self.max_epochs
epochs = torch.arange(self.max_epochs, dtype=torch.float32).repeat_interleave(iters_per_epoch) + 1
iterations = torch.arange(self.total_num_iters, dtype=torch.float32) + 1
processing_stats = self.processing_times
dataflow_stats = self.dataflow_times
event_started = self.event_handlers_times[Events.STARTED].repeat_interleave(self.total_num_iters)
event_completed = self.event_handlers_times[Events.COMPLETED].repeat_interleave(self.total_num_iters)
event_epoch_started = self.event_handlers_times[Events.EPOCH_STARTED].repeat_interleave(iters_per_epoch)
event_epoch_completed = self.event_handlers_times[Events.EPOCH_COMPLETED].repeat_interleave(iters_per_epoch)
event_iter_started = self.event_handlers_times[Events.ITERATION_STARTED]
event_iter_completed = self.event_handlers_times[Events.ITERATION_COMPLETED]
event_batch_started = self.event_handlers_times[Events.GET_BATCH_STARTED]
event_batch_completed = self.event_handlers_times[Events.GET_BATCH_COMPLETED]
results_dump = torch.stack(
[
epochs,
iterations,
processing_stats,
dataflow_stats,
event_started,
event_completed,
event_epoch_started,
event_epoch_completed,
event_iter_started,
event_iter_completed,
event_batch_started,
event_batch_completed,
],
dim=1,
).numpy()
results_df = pd.DataFrame(
data=results_dump,
columns=[
"epoch",
"iteration",
"processing_stats",
"dataflow_stats",
"Event_STARTED",
"Event_COMPLETED",
"Event_EPOCH_STARTED",
"Event_EPOCH_COMPLETED",
"Event_ITERATION_STARTED",
"Event_ITERATION_COMPLETED",
"Event_GET_BATCH_STARTED",
"Event_GET_BATCH_COMPLETED",
],
)
results_df.to_csv(output_path, index=False)
@staticmethod
def print_results(results: Dict) -> str:
"""
Method to print the aggregated results from the profiler
Args:
results: the aggregated results from the profiler
.. code-block:: python
profiler.print_results(results)
Examples:
.. code-block:: text
----------------------------------------------------
| Time profiling stats (in seconds): |
----------------------------------------------------
total | min/index | max/index | mean | std
Processing function:
157.46292 | 0.01452/1501 | 0.26905/0 | 0.07730 | 0.01258
Dataflow:
6.11384 | 0.00008/1935 | 0.28461/1551 | 0.00300 | 0.02693
Event handlers:
2.82721
- Events.STARTED: []
0.00000
- Events.EPOCH_STARTED: []
0.00006 | 0.00000/0 | 0.00000/17 | 0.00000 | 0.00000
- Events.ITERATION_STARTED: ['PiecewiseLinear']
0.03482 | 0.00001/188 | 0.00018/679 | 0.00002 | 0.00001
- Events.ITERATION_COMPLETED: ['TerminateOnNan']
0.20037 | 0.00006/866 | 0.00089/1943 | 0.00010 | 0.00003
- Events.EPOCH_COMPLETED: ['empty_cuda_cache', 'training.<locals>.log_elapsed_time', ]
2.57860 | 0.11529/0 | 0.14977/13 | 0.12893 | 0.00790
- Events.COMPLETED: []
not yet triggered
"""
def to_str(v: Union[str, tuple]) -> str:
if isinstance(v, str):
return v
elif isinstance(v, tuple):
return f"{v[0]:.5f}/{v[1]}"
return f"{v:.5f}"
def odict_to_str(d: Mapping) -> str:
out = " | ".join([to_str(v) for v in d.values()])
return out
others = {
k: odict_to_str(v) if isinstance(v, OrderedDict) else v for k, v in results["event_handlers_stats"].items()
}
others.update(results["event_handlers_names"])
output_message = """
----------------------------------------------------
| Time profiling stats (in seconds): |
----------------------------------------------------
total | min/index | max/index | mean | std
Processing function:
{processing_stats}
Dataflow:
{dataflow_stats}
Event handlers:
{total_time:.5f}
- Events.STARTED: {STARTED_names}
{STARTED}
- Events.EPOCH_STARTED: {EPOCH_STARTED_names}
{EPOCH_STARTED}
- Events.ITERATION_STARTED: {ITERATION_STARTED_names}
{ITERATION_STARTED}
- Events.ITERATION_COMPLETED: {ITERATION_COMPLETED_names}
{ITERATION_COMPLETED}
- Events.EPOCH_COMPLETED: {EPOCH_COMPLETED_names}
{EPOCH_COMPLETED}
- Events.COMPLETED: {COMPLETED_names}
{COMPLETED}
""".format(
processing_stats=odict_to_str(results["processing_stats"]),
dataflow_stats=odict_to_str(results["dataflow_stats"]),
**others,
)
print(output_message)
return output_message
class HandlersTimeProfiler:
"""
HandlersTimeProfiler can be used to profile the handlers,
data loading and data processing times. Custom events are also
profiled by this profiler
Examples:
.. code-block:: python
from ignite.handlers import HandlersTimeProfiler
trainer = Engine(train_updater)
# Create an object of the profiler and attach an engine to it
profiler = HandlersTimeProfiler()
profiler.attach(trainer)
@trainer.on(Events.EPOCH_COMPLETED)
def log_intermediate_results():
profiler.print_results(profiler.get_results())
trainer.run(dataloader, max_epochs=3)
profiler.write_results('path_to_dir/time_profiling.csv')
.. versionadded:: 0.4.6
"""
EVENT_FILTER_THESHOLD_TIME = 0.0001
def __init__(self) -> None:
self._dataflow_timer = Timer()
self._processing_timer = Timer()
self._event_handlers_timer = Timer()
self.dataflow_times = [] # type: List[float]
self.processing_times = [] # type: List[float]
self.event_handlers_times = {} # type: Dict[EventEnum, Dict[str, List[float]]]
@staticmethod
def _get_callable_name(handler: Callable) -> str:
# get name of the callable handler
return getattr(handler, "__qualname__", handler.__class__.__name__)
def _create_wrapped_handler(self, handler: Callable, event: EventEnum) -> Callable:
@functools.wraps(handler)
def _timeit_handler(*args: Any, **kwargs: Any) -> None:
self._event_handlers_timer.reset()
handler(*args, **kwargs)
t = self._event_handlers_timer.value()
hname = self._get_callable_name(handler)
# filter profiled time if the handler was attached to event with event filter
if not hasattr(handler, "_parent") or t >= self.EVENT_FILTER_THESHOLD_TIME:
self.event_handlers_times[event][hname].append(t)
# required to revert back to original handler after profiling
setattr(_timeit_handler, "_profiler_original", handler)
return _timeit_handler
def _timeit_processing(self) -> None:
# handler used for profiling processing times
t = self._processing_timer.value()
self.processing_times.append(t)
def _timeit_dataflow(self) -> None:
# handler used for profiling dataflow times
t = self._dataflow_timer.value()
self.dataflow_times.append(t)
def _reset(self, event_handlers_names: Mapping[EventEnum, List[str]]) -> None:
# reset the variables used for profiling
self.dataflow_times = []
self.processing_times = []
self.event_handlers_times = {e: {h: [] for h in event_handlers_names[e]} for e in event_handlers_names}
@staticmethod
def _is_internal_handler(handler: Callable) -> bool:
# checks whether the handler is internal
return any(n in repr(handler) for n in ["HandlersTimeProfiler.", "Timer."])
def _detach_profiler_handlers(self, engine: Engine) -> None:
# reverts handlers to original handlers
for e in engine._event_handlers:
for i, (func, args, kwargs) in enumerate(engine._event_handlers[e]):
if hasattr(func, "_profiler_original"):
engine._event_handlers[e][i] = (func._profiler_original, args, kwargs)
def _as_first_started(self, engine: Engine) -> None:
# wraps original handlers for profiling
self.event_handlers_names = {
e: [
self._get_callable_name(h)
for (h, _, _) in engine._event_handlers[e]
if not self._is_internal_handler(h)
]
for e in engine._allowed_events
}
self._reset(self.event_handlers_names)
for e in engine._allowed_events:
for i, (func, args, kwargs) in enumerate(engine._event_handlers[e]):
if not self._is_internal_handler(func):
engine._event_handlers[e][i] = (self._create_wrapped_handler(func, e), args, kwargs)
# processing timer
engine.add_event_handler(Events.ITERATION_STARTED, self._processing_timer.reset)
engine._event_handlers[Events.ITERATION_COMPLETED].insert(0, (self._timeit_processing, (), {}))
# dataflow timer
engine.add_event_handler(Events.GET_BATCH_STARTED, self._dataflow_timer.reset)
engine._event_handlers[Events.GET_BATCH_COMPLETED].insert(0, (self._timeit_dataflow, (), {}))
# revert back the wrapped handlers with original handlers at the end
engine.add_event_handler(Events.COMPLETED, self._detach_profiler_handlers)
def attach(self, engine: Engine) -> None:
"""Attach HandlersTimeProfiler to the given engine.
Args:
engine: the instance of Engine to attach
"""
if not isinstance(engine, Engine):
raise TypeError(f"Argument engine should be ignite.engine.Engine, but given {type(engine)}")
if not engine.has_event_handler(self._as_first_started):
engine._event_handlers[Events.STARTED].insert(0, (self._as_first_started, (engine,), {}))
def get_results(self) -> List[List[Union[str, float]]]:
"""
Method to fetch the aggregated profiler results after the engine is run
.. code-block:: python
results = profiler.get_results()
"""
total_eh_time = sum(
[
sum(self.event_handlers_times[e][h])
for e in self.event_handlers_times
for h in self.event_handlers_times[e]
]
)
total_eh_time = round(float(total_eh_time), 5)
def compute_basic_stats(
times: Union[Sequence, torch.Tensor]
) -> List[Union[str, float, Tuple[Union[str, float], Union[str, float]]]]:
data = torch.as_tensor(times, dtype=torch.float32)
# compute on non-zero data:
data = data[data > 0]
total = round(torch.sum(data).item(), 5) if len(data) > 0 else "not triggered" # type: Union[str, float]
min_index = ("None", "None") # type: Tuple[Union[str, float], Union[str, float]]
max_index = ("None", "None") # type: Tuple[Union[str, float], Union[str, float]]
mean = "None" # type: Union[str, float]
std = "None" # type: Union[str, float]
if len(data) > 0:
min_index = (round(torch.min(data).item(), 5), torch.argmin(data).item())
max_index = (round(torch.max(data).item(), 5), torch.argmax(data).item())
mean = round(torch.mean(data).item(), 5)
if len(data) > 1:
std = round(torch.std(data).item(), 5)
return [total, min_index, max_index, mean, std]
event_handler_stats = [
[
h,
getattr(e, "name", str(e)),
*compute_basic_stats(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32)),
]
for e in self.event_handlers_times
for h in self.event_handlers_times[e]
]
event_handler_stats.append(["Total", "", total_eh_time, "", "", "", ""])
event_handler_stats.append(["Processing", "None", *compute_basic_stats(self.processing_times)])
event_handler_stats.append(["Dataflow", "None", *compute_basic_stats(self.dataflow_times)])
return event_handler_stats
def write_results(self, output_path: str) -> None:
"""
Method to store the unaggregated profiling results to a csv file
Args:
output_path: file output path containing a filename
.. code-block:: python
profiler.write_results('path_to_dir/awesome_filename.csv')
Examples:
.. code-block:: text
-----------------------------------------------------------------
# processing_stats dataflow_stats training.<locals>.log_elapsed_time (EPOCH_COMPLETED) ...
1 0.00003 0.252387 0.125676
2 0.00029 0.252342 0.125123
"""
try:
import pandas as pd
except ImportError:
raise RuntimeError("Need pandas to write results as files")
processing_stats = torch.tensor(self.processing_times, dtype=torch.float32)
dataflow_stats = torch.tensor(self.dataflow_times, dtype=torch.float32)
cols = [processing_stats, dataflow_stats]
headers = ["processing_stats", "dataflow_stats"]
for e in self.event_handlers_times:
for h in self.event_handlers_times[e]:
headers.append(f"{h} ({getattr(e, 'name', str(e))})")
cols.append(torch.tensor(self.event_handlers_times[e][h], dtype=torch.float32))
# Determine maximum length
max_len = max([x.numel() for x in cols])
count_col = torch.arange(max_len, dtype=torch.float32) + 1
cols.insert(0, count_col)
headers.insert(0, "#")
# pad all tensors to have same length
cols = [torch.nn.functional.pad(x, pad=(0, max_len - x.numel()), mode="constant", value=0) for x in cols]
results_dump = torch.stack(cols, dim=1).numpy()
results_df = pd.DataFrame(data=results_dump, columns=headers)
results_df.to_csv(output_path, index=False)
@staticmethod
def print_results(results: List[List[Union[str, float]]]) -> None:
"""
Method to print the aggregated results from the profiler
Args:
results: the aggregated results from the profiler
.. code-block:: python
profiler.print_results(results)
Examples:
.. code-block:: text
----------------------------------------- ----------------------- -------------- ...
Handler Event Name Total(s)
----------------------------------------- ----------------------- --------------
run.<locals>.log_training_results EPOCH_COMPLETED 19.43245
run.<locals>.log_validation_results EPOCH_COMPLETED 2.55271
run.<locals>.log_time EPOCH_COMPLETED 0.00049
run.<locals>.log_intermediate_results EPOCH_COMPLETED 0.00106
run.<locals>.log_training_loss ITERATION_COMPLETED 0.059
run.<locals>.log_time COMPLETED not triggered
----------------------------------------- ----------------------- --------------
Total 22.04571
----------------------------------------- ----------------------- --------------
Processing took total 11.29543s [min/index: 0.00393s/1875, max/index: 0.00784s/0,
mean: 0.00602s, std: 0.00034s]
Dataflow took total 16.24365s [min/index: 0.00533s/1874, max/index: 0.01129s/937,
mean: 0.00866s, std: 0.00113s]
"""
# adopted implementation of torch.autograd.profiler.build_table
handler_column_width = max([len(item[0]) for item in results]) + 4 # type: ignore[arg-type]
event_column_width = max([len(item[1]) for item in results]) + 4 # type: ignore[arg-type]
DEFAULT_COLUMN_WIDTH = 14
headers = [
"Handler",
"Event Name",
"Total(s)",
"Min(s)/IDX",
"Max(s)/IDX",
"Mean(s)",
"Std(s)",
]
# Have to use a list because nonlocal is Py3 only...
SPACING_SIZE = 2
row_format_lst = [""]
header_sep_lst = [""]
line_length_lst = [-SPACING_SIZE]
def add_column(padding: int, text_dir: str = ">") -> None:
row_format_lst[0] += "{: " + text_dir + str(padding) + "}" + (" " * SPACING_SIZE)
header_sep_lst[0] += "-" * padding + (" " * SPACING_SIZE)
line_length_lst[0] += padding + SPACING_SIZE
add_column(handler_column_width, text_dir="<")
add_column(event_column_width, text_dir="<")
for _ in headers[2:]:
add_column(DEFAULT_COLUMN_WIDTH)
row_format = row_format_lst[0]
header_sep = header_sep_lst[0]
result = []
def append(s: str) -> None:
result.append(s)
result.append("\n")
result.append("\n")
append(header_sep)
append(row_format.format(*headers))
append(header_sep)
for row in results[:-3]:
# format min/idx and max/idx
row[3] = "{}/{}".format(*row[3]) # type: ignore[misc]
row[4] = "{}/{}".format(*row[4]) # type: ignore[misc]
append(row_format.format(*row))
append(header_sep)
# print total handlers time row
append(row_format.format(*results[-3]))
append(header_sep)
summary_format = "{} took total {}s [min/index: {}, max/index: {}, mean: {}s, std: {}s]"
for row in results[-2:]:
row[3] = "{}s/{}".format(*row[3]) # type: ignore[misc]
row[4] = "{}s/{}".format(*row[4]) # type: ignore[misc]
del row[1]
append(summary_format.format(*row))
print("".join(result))
| [
"collections.OrderedDict",
"torch.as_tensor",
"torch.mean",
"torch.stack",
"torch.max",
"functools.wraps",
"torch.min",
"torch.tensor",
"torch.sum",
"torch.argmin",
"pandas.DataFrame",
"torch.zeros",
"ignite.handlers.timing.Timer",
"torch.std",
"typing.cast",
"torch.arange",
"torch.argmax"
] | [((1246, 1253), 'ignite.handlers.timing.Timer', 'Timer', ([], {}), '()\n', (1251, 1253), False, 'from ignite.handlers.timing import Timer\n'), ((1287, 1294), 'ignite.handlers.timing.Timer', 'Timer', ([], {}), '()\n', (1292, 1294), False, 'from ignite.handlers.timing import Timer\n'), ((1332, 1339), 'ignite.handlers.timing.Timer', 'Timer', ([], {}), '()\n', (1337, 1339), False, 'from ignite.handlers.timing import Timer\n'), ((1371, 1385), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (1382, 1385), False, 'import torch\n'), ((1418, 1432), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (1429, 1432), False, 'import torch\n'), ((2574, 2602), 'torch.zeros', 'torch.zeros', (['total_num_iters'], {}), '(total_num_iters)\n', (2585, 2602), False, 'import torch\n'), ((2635, 2663), 'torch.zeros', 'torch.zeros', (['total_num_iters'], {}), '(total_num_iters)\n', (2646, 2663), False, 'import torch\n'), ((3801, 3835), 'typing.cast', 'cast', (['int', 'engine.state.max_epochs'], {}), '(int, engine.state.max_epochs)\n', (3805, 3835), False, 'from typing import Any, Callable, Dict, List, Mapping, Sequence, Tuple, Union, cast\n'), ((9022, 9038), 'collections.OrderedDict', 'OrderedDict', (['out'], {}), '(out)\n', (9033, 9038), False, 'from collections import OrderedDict\n'), ((12801, 13114), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'results_dump', 'columns': "['epoch', 'iteration', 'processing_stats', 'dataflow_stats',\n 'Event_STARTED', 'Event_COMPLETED', 'Event_EPOCH_STARTED',\n 'Event_EPOCH_COMPLETED', 'Event_ITERATION_STARTED',\n 'Event_ITERATION_COMPLETED', 'Event_GET_BATCH_STARTED',\n 'Event_GET_BATCH_COMPLETED']"}), "(data=results_dump, columns=['epoch', 'iteration',\n 'processing_stats', 'dataflow_stats', 'Event_STARTED',\n 'Event_COMPLETED', 'Event_EPOCH_STARTED', 'Event_EPOCH_COMPLETED',\n 'Event_ITERATION_STARTED', 'Event_ITERATION_COMPLETED',\n 'Event_GET_BATCH_STARTED', 'Event_GET_BATCH_COMPLETED'])\n", (12813, 13114), True, 'import pandas as pd\n'), ((17461, 17468), 'ignite.handlers.timing.Timer', 'Timer', ([], {}), '()\n', (17466, 17468), False, 'from ignite.handlers.timing import Timer\n'), ((17502, 17509), 'ignite.handlers.timing.Timer', 'Timer', ([], {}), '()\n', (17507, 17509), False, 'from ignite.handlers.timing import Timer\n'), ((17547, 17554), 'ignite.handlers.timing.Timer', 'Timer', ([], {}), '()\n', (17552, 17554), False, 'from ignite.handlers.timing import Timer\n'), ((18044, 18068), 'functools.wraps', 'functools.wraps', (['handler'], {}), '(handler)\n', (18059, 18068), False, 'import functools\n'), ((25035, 25091), 'torch.tensor', 'torch.tensor', (['self.processing_times'], {'dtype': 'torch.float32'}), '(self.processing_times, dtype=torch.float32)\n', (25047, 25091), False, 'import torch\n'), ((25117, 25171), 'torch.tensor', 'torch.tensor', (['self.dataflow_times'], {'dtype': 'torch.float32'}), '(self.dataflow_times, dtype=torch.float32)\n', (25129, 25171), False, 'import torch\n'), ((25998, 26046), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'results_dump', 'columns': 'headers'}), '(data=results_dump, columns=headers)\n', (26010, 26046), True, 'import pandas as pd\n'), ((2730, 2744), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (2741, 2744), False, 'import torch\n'), ((2776, 2790), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (2787, 2790), False, 'import torch\n'), ((2826, 2849), 'torch.zeros', 'torch.zeros', (['num_epochs'], {}), '(num_epochs)\n', (2837, 2849), False, 'import torch\n'), ((2887, 2910), 'torch.zeros', 'torch.zeros', (['num_epochs'], {}), '(num_epochs)\n', (2898, 2910), False, 'import torch\n'), ((2950, 2978), 'torch.zeros', 'torch.zeros', (['total_num_iters'], {}), '(total_num_iters)\n', (2961, 2978), False, 'import torch\n'), ((3020, 3048), 'torch.zeros', 'torch.zeros', (['total_num_iters'], {}), '(total_num_iters)\n', (3031, 3048), False, 'import torch\n'), ((3090, 3118), 'torch.zeros', 'torch.zeros', (['total_num_iters'], {}), '(total_num_iters)\n', (3101, 3118), False, 'import torch\n'), ((3158, 3186), 'torch.zeros', 'torch.zeros', (['total_num_iters'], {}), '(total_num_iters)\n', (3169, 3186), False, 'import torch\n'), ((11332, 11387), 'torch.arange', 'torch.arange', (['self.total_num_iters'], {'dtype': 'torch.float32'}), '(self.total_num_iters, dtype=torch.float32)\n', (11344, 11387), False, 'import torch\n'), ((22514, 22557), 'torch.as_tensor', 'torch.as_tensor', (['times'], {'dtype': 'torch.float32'}), '(times, dtype=torch.float32)\n', (22529, 22557), False, 'import torch\n'), ((25646, 25688), 'torch.arange', 'torch.arange', (['max_len'], {'dtype': 'torch.float32'}), '(max_len, dtype=torch.float32)\n', (25658, 25688), False, 'import torch\n'), ((12292, 12540), 'torch.stack', 'torch.stack', (['[epochs, iterations, processing_stats, dataflow_stats, event_started,\n event_completed, event_epoch_started, event_epoch_completed,\n event_iter_started, event_iter_completed, event_batch_started,\n event_batch_completed]'], {'dim': '(1)'}), '([epochs, iterations, processing_stats, dataflow_stats,\n event_started, event_completed, event_epoch_started,\n event_epoch_completed, event_iter_started, event_iter_completed,\n event_batch_started, event_batch_completed], dim=1)\n', (12303, 12540), False, 'import torch\n'), ((25943, 25967), 'torch.stack', 'torch.stack', (['cols'], {'dim': '(1)'}), '(cols, dim=1)\n', (25954, 25967), False, 'import torch\n'), ((11221, 11271), 'torch.arange', 'torch.arange', (['self.max_epochs'], {'dtype': 'torch.float32'}), '(self.max_epochs, dtype=torch.float32)\n', (11233, 11271), False, 'import torch\n'), ((25473, 25539), 'torch.tensor', 'torch.tensor', (['self.event_handlers_times[e][h]'], {'dtype': 'torch.float32'}), '(self.event_handlers_times[e][h], dtype=torch.float32)\n', (25485, 25539), False, 'import torch\n'), ((23610, 23676), 'torch.tensor', 'torch.tensor', (['self.event_handlers_times[e][h]'], {'dtype': 'torch.float32'}), '(self.event_handlers_times[e][h], dtype=torch.float32)\n', (23622, 23676), False, 'import torch\n'), ((8522, 8537), 'torch.sum', 'torch.sum', (['data'], {}), '(data)\n', (8531, 8537), False, 'import torch\n'), ((8918, 8934), 'torch.mean', 'torch.mean', (['data'], {}), '(data)\n', (8928, 8934), False, 'import torch\n'), ((8968, 8983), 'torch.std', 'torch.std', (['data'], {}), '(data)\n', (8977, 8983), False, 'import torch\n'), ((22658, 22673), 'torch.sum', 'torch.sum', (['data'], {}), '(data)\n', (22667, 22673), False, 'import torch\n'), ((23136, 23154), 'torch.argmin', 'torch.argmin', (['data'], {}), '(data)\n', (23148, 23154), False, 'import torch\n'), ((23226, 23244), 'torch.argmax', 'torch.argmax', (['data'], {}), '(data)\n', (23238, 23244), False, 'import torch\n'), ((23282, 23298), 'torch.mean', 'torch.mean', (['data'], {}), '(data)\n', (23292, 23298), False, 'import torch\n'), ((8756, 8771), 'torch.min', 'torch.min', (['data'], {}), '(data)\n', (8765, 8771), False, 'import torch\n'), ((8780, 8798), 'torch.argmin', 'torch.argmin', (['data'], {}), '(data)\n', (8792, 8798), False, 'import torch\n'), ((8840, 8855), 'torch.max', 'torch.max', (['data'], {}), '(data)\n', (8849, 8855), False, 'import torch\n'), ((8864, 8882), 'torch.argmax', 'torch.argmax', (['data'], {}), '(data)\n', (8876, 8882), False, 'import torch\n'), ((23108, 23123), 'torch.min', 'torch.min', (['data'], {}), '(data)\n', (23117, 23123), False, 'import torch\n'), ((23198, 23213), 'torch.max', 'torch.max', (['data'], {}), '(data)\n', (23207, 23213), False, 'import torch\n'), ((23376, 23391), 'torch.std', 'torch.std', (['data'], {}), '(data)\n', (23385, 23391), False, 'import torch\n')] |
from __future__ import (division)
from pomegranate import *
from pomegranate.io import DataGenerator
from pomegranate.io import DataFrameGenerator
from nose.tools import with_setup
from nose.tools import assert_almost_equal
from nose.tools import assert_equal
from nose.tools import assert_not_equal
from nose.tools import assert_less_equal
from nose.tools import assert_raises
from nose.tools import assert_true
from numpy.testing import assert_array_almost_equal
import pandas
import random
import pickle
import numpy as np
nan = numpy.nan
def setup_multivariate_gaussian():
mu, cov = [0, 0, 0], numpy.eye(3)
d1 = MultivariateGaussianDistribution(mu, cov)
mu, cov = [2, 2, 2], numpy.eye(3)
d2 = MultivariateGaussianDistribution(mu, cov)
global model
model = BayesClassifier([d1, d2])
global X
X = numpy.array([[ 0.3, 0.5, 0.1],
[ 0.8, 1.4, 0.5],
[ 1.4, 2.6, 1.8],
[ 4.2, 3.3, 3.7],
[ 2.6, 3.6, 3.3],
[ 3.1, 2.2, 1.7],
[ 1.8, 2.2, 1.8],
[-1.2, -1.8, -1.5],
[-1.8, 0.3, 0.5],
[ 0.7, -1.3, -0.1]])
global y
y = [0, 0, 0, 1, 1, 1, 1, 0, 0, 0]
global X_nan
X_nan = numpy.array([[ 0.3, nan, 0.1],
[ nan, 1.4, nan],
[ 1.4, 2.6, nan],
[ nan, nan, nan],
[ nan, 3.6, 3.3],
[ 3.1, nan, 1.7],
[ nan, nan, 1.8],
[-1.2, -1.8, -1.5],
[ nan, 0.3, 0.5],
[ nan, -1.3, nan]])
def setup_multivariate_mixed():
mu, cov = [0, 0, 0], numpy.eye(3)
d1 = MultivariateGaussianDistribution(mu, cov)
d21 = ExponentialDistribution(5)
d22 = LogNormalDistribution(0.2, 0.8)
d23 = PoissonDistribution(3)
d2 = IndependentComponentsDistribution([d21, d22, d23])
global model
model = BayesClassifier([d1, d2])
global X
X = numpy.array([[ 0.3, 0.5, 0.1],
[ 0.8, 1.4, 0.5],
[ 1.4, 2.6, 1.8],
[ 4.2, 3.3, 3.7],
[ 2.6, 3.6, 3.3],
[ 3.1, 2.2, 1.7],
[ 1.8, 2.2, 1.8],
[ 1.2, 1.8, 1.5],
[ 1.8, 0.3, 0.5],
[ 0.7, 1.3, 0.1]])
global y
y = [0, 0, 0, 1, 1, 1, 1, 0, 0, 0]
global X_nan
X_nan = numpy.array([[ 0.3, nan, 0.1],
[ nan, 1.4, nan],
[ 1.4, 2.6, nan],
[ nan, nan, nan],
[ nan, 3.6, 3.3],
[ 3.1, nan, 1.7],
[ nan, nan, 1.8],
[ 1.2, 1.8, 1.5],
[ nan, 0.3, 0.5],
[ nan, 1.3, nan]])
def setup_hmm():
global model
global hmm1
global hmm2
global hmm3
rigged = State( DiscreteDistribution({ 'H': 0.8, 'T': 0.2 }) )
unrigged = State( DiscreteDistribution({ 'H': 0.5, 'T':0.5 }) )
hmm1 = HiddenMarkovModel()
hmm1.start = rigged
hmm1.add_transition(rigged, rigged, 1)
hmm1.bake()
hmm2 = HiddenMarkovModel()
hmm2.start = unrigged
hmm2.add_transition(unrigged, unrigged, 1)
hmm2.bake()
hmm3 = HiddenMarkovModel()
hmm3.add_transition(hmm3.start, unrigged, 0.5)
hmm3.add_transition(hmm3.start, rigged, 0.5)
hmm3.add_transition(rigged, rigged, 0.5)
hmm3.add_transition(rigged, unrigged, 0.5)
hmm3.add_transition(unrigged, rigged, 0.5)
hmm3.add_transition(unrigged, unrigged, 0.5)
hmm3.bake()
model = BayesClassifier([hmm1, hmm2, hmm3])
def setup_multivariate():
pass
def teardown():
pass
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_initialization():
assert_equal(model.d, 3)
assert_equal(model.n, 2)
assert_equal(model.is_vl_, False)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_initialization():
assert_equal(model.d, 3)
assert_equal(model.n, 2)
assert_equal(model.is_vl_, False)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_log_proba():
y_hat = model.predict_log_proba(X)
y = [[ -1.48842547e-02, -4.21488425e+00],
[ -4.37487950e-01, -1.03748795e+00],
[ -5.60369104e+00, -3.69104343e-03],
[ -1.64000001e+01, -7.54345812e-08],
[ -1.30000023e+01, -2.26032685e-06],
[ -8.00033541e+00, -3.35406373e-04],
[ -5.60369104e+00, -3.69104343e-03],
[ -3.05902274e-07, -1.50000003e+01],
[ -3.35406373e-04, -8.00033541e+00],
[ -6.11066022e-04, -7.40061107e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_log_proba():
y_hat = model.predict_log_proba(X)
y = [[ -5.03107596e-01, -9.27980626e-01],
[ -1.86355320e-01, -1.77183117e+00],
[ -5.58542088e-01, -8.48731256e-01],
[ -7.67315597e-01, -6.24101927e-01],
[ -2.32860808e+00, -1.02510436e-01],
[ -3.06641866e-03, -5.78877778e+00],
[ -9.85292840e-02, -2.36626165e+00],
[ -2.61764180e-01, -1.46833995e+00],
[ -2.01640009e-03, -6.20744952e+00],
[ -1.47371167e-01, -1.98758175e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_nan_predict_log_proba():
y_hat = model.predict_log_proba(X_nan)
y = [[ -3.99533332e-02, -3.23995333e+00],
[ -1.17110067e+00, -3.71100666e-01],
[ -4.01814993e+00, -1.81499279e-02],
[ -6.93147181e-01, -6.93147181e-01],
[ -9.80005545e+00, -5.54500620e-05],
[ -5.60369104e+00, -3.69104343e-03],
[ -1.78390074e+00, -1.83900741e-01],
[ -3.05902274e-07, -1.50000003e+01],
[ -8.68361522e-02, -2.48683615e+00],
[ -1.00016521e-02, -4.61000165e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_nan_predict_log_proba():
y_hat = model.predict_log_proba(X_nan)
y = [[ -3.57980882e-01, -1.20093223e+00],
[ -1.20735130e+00, -3.55230506e-01],
[ -2.43174286e-01, -1.53310132e+00],
[ -6.93147181e-01, -6.93147181e-01],
[ -9.31781101e+00, -8.98143220e-05],
[ -6.29755079e-04, -7.37049444e+00],
[ -1.31307006e+00, -3.13332194e-01],
[ -2.61764180e-01, -1.46833995e+00],
[ -2.29725479e-01, -1.58353505e+00],
[ -1.17299253e+00, -3.70251760e-01]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_log_proba_parallel():
y_hat = model.predict_log_proba(X, n_jobs=2)
y = [[ -1.48842547e-02, -4.21488425e+00],
[ -4.37487950e-01, -1.03748795e+00],
[ -5.60369104e+00, -3.69104343e-03],
[ -1.64000001e+01, -7.54345812e-08],
[ -1.30000023e+01, -2.26032685e-06],
[ -8.00033541e+00, -3.35406373e-04],
[ -5.60369104e+00, -3.69104343e-03],
[ -3.05902274e-07, -1.50000003e+01],
[ -3.35406373e-04, -8.00033541e+00],
[ -6.11066022e-04, -7.40061107e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_log_proba_parallel():
y_hat = model.predict_log_proba(X, n_jobs=2)
y = [[ -5.03107596e-01, -9.27980626e-01],
[ -1.86355320e-01, -1.77183117e+00],
[ -5.58542088e-01, -8.48731256e-01],
[ -7.67315597e-01, -6.24101927e-01],
[ -2.32860808e+00, -1.02510436e-01],
[ -3.06641866e-03, -5.78877778e+00],
[ -9.85292840e-02, -2.36626165e+00],
[ -2.61764180e-01, -1.46833995e+00],
[ -2.01640009e-03, -6.20744952e+00],
[ -1.47371167e-01, -1.98758175e+00]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_proba():
y_hat = model.predict_proba(X)
y = [[ 9.85225968e-01, 1.47740317e-02],
[ 6.45656306e-01, 3.54343694e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 7.54345778e-08, 9.99999925e-01],
[ 2.26032430e-06, 9.99997740e-01],
[ 3.35350130e-04, 9.99664650e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 9.99999694e-01, 3.05902227e-07],
[ 9.99664650e-01, 3.35350130e-04],
[ 9.99389121e-01, 6.10879359e-04]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_proba():
y_hat = model.predict_proba(X)
y = [[ 0.60464873, 0.39535127],
[ 0.82997863, 0.17002137],
[ 0.57204244, 0.42795756],
[ 0.46425765, 0.53574235],
[ 0.09743127, 0.90256873],
[ 0.99693828, 0.00306172],
[ 0.90616916, 0.09383084],
[ 0.76969251, 0.23030749],
[ 0.99798563, 0.00201437],
[ 0.86297361, 0.13702639]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_nan_predict_proba():
y_hat = model.predict_proba(X_nan)
y = [[ 9.60834277e-01, 3.91657228e-02],
[ 3.10025519e-01, 6.89974481e-01],
[ 1.79862100e-02, 9.82013790e-01],
[ 5.00000000e-01, 5.00000000e-01],
[ 5.54485247e-05, 9.99944551e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 1.67981615e-01, 8.32018385e-01],
[ 9.99999694e-01, 3.05902227e-07],
[ 9.16827304e-01, 8.31726965e-02],
[ 9.90048198e-01, 9.95180187e-03]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_nan_predict_proba():
y_hat = model.predict_proba(X_nan)
y = [[ 6.99086440e-01, 3.00913560e-01],
[ 2.98988163e-01, 7.01011837e-01],
[ 7.84134838e-01, 2.15865162e-01],
[ 5.00000000e-01, 5.00000000e-01],
[ 8.98102888e-05, 9.99910190e-01],
[ 9.99370443e-01, 6.29556825e-04],
[ 2.68992964e-01, 7.31007036e-01],
[ 7.69692511e-01, 2.30307489e-01],
[ 7.94751748e-01, 2.05248252e-01],
[ 3.09439547e-01, 6.90560453e-01]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_proba_parallel():
y_hat = model.predict_proba(X, n_jobs=2)
y = [[ 9.85225968e-01, 1.47740317e-02],
[ 6.45656306e-01, 3.54343694e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 7.54345778e-08, 9.99999925e-01],
[ 2.26032430e-06, 9.99997740e-01],
[ 3.35350130e-04, 9.99664650e-01],
[ 3.68423990e-03, 9.96315760e-01],
[ 9.99999694e-01, 3.05902227e-07],
[ 9.99664650e-01, 3.35350130e-04],
[ 9.99389121e-01, 6.10879359e-04]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_proba_parallel():
y_hat = model.predict_proba(X, n_jobs=2)
y = [[ 0.60464873, 0.39535127],
[ 0.82997863, 0.17002137],
[ 0.57204244, 0.42795756],
[ 0.46425765, 0.53574235],
[ 0.09743127, 0.90256873],
[ 0.99693828, 0.00306172],
[ 0.90616916, 0.09383084],
[ 0.76969251, 0.23030749],
[ 0.99798563, 0.00201437],
[ 0.86297361, 0.13702639]]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict():
y_hat = model.predict(X)
y = [0, 0, 1, 1, 1, 1, 1, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict():
y_hat = model.predict(X)
y = [0, 0, 0, 1, 1, 0, 0, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_nan_predict():
y_hat = model.predict(X_nan)
y = [0, 1, 1, 0, 1, 1, 1, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_nan_predict():
y_hat = model.predict(X_nan)
y = [0, 1, 0, 0, 1, 0, 1, 0, 0, 1]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_predict_parallel():
y_hat = model.predict(X, n_jobs=2)
y = [0, 0, 1, 1, 1, 1, 1, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_predict_parallel():
y_hat = model.predict(X, n_jobs=2)
y = [0, 0, 0, 1, 1, 0, 0, 0, 0, 0]
assert_array_almost_equal(y, y_hat)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_fit_parallel():
model.fit(X, y, n_jobs=2)
mu1 = model.distributions[0].parameters[0]
cov1 = model.distributions[0].parameters[1]
mu1_t = [0.03333333, 0.28333333, 0.21666666]
cov1_t = [[1.3088888, 0.9272222, 0.6227777],
[0.9272222, 2.2513888, 1.3402777],
[0.6227777, 1.3402777, 0.9547222]]
mu2 = model.distributions[1].parameters[0]
cov2 = model.distributions[1].parameters[1]
mu2_t = [2.925, 2.825, 2.625]
cov2_t = [[0.75687499, 0.23687499, 0.4793750],
[0.23687499, 0.40187499, 0.5318749],
[0.47937500, 0.53187499, 0.7868750]]
assert_array_almost_equal(mu1, mu1_t)
assert_array_almost_equal(cov1, cov1_t)
assert_array_almost_equal(mu2, mu2_t)
assert_array_almost_equal(cov2, cov2_t)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_fit_parallel():
model.fit(X, y, n_jobs=2)
mu1 = model.distributions[0].parameters[0]
cov1 = model.distributions[0].parameters[1]
mu1_t = [1.033333, 1.3166667, 0.75]
cov1_t = [[0.242222, 0.0594444, 0.178333],
[0.059444, 0.5980555, 0.414166],
[0.178333, 0.4141666, 0.439166]]
d21 = model.distributions[1].distributions[0]
d22 = model.distributions[1].distributions[1]
d23 = model.distributions[1].distributions[2]
assert_array_almost_equal(mu1, mu1_t)
assert_array_almost_equal(cov1, cov1_t)
assert_array_almost_equal(d21.parameters, [0.34188034])
assert_array_almost_equal(d22.parameters, [1.01294275, 0.22658346])
assert_array_almost_equal(d23.parameters, [2.625])
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_from_samples():
model = BayesClassifier.from_samples(MultivariateGaussianDistribution, X, y)
mu1 = model.distributions[0].parameters[0]
cov1 = model.distributions[0].parameters[1]
mu1_t = [0.03333333, 0.2833333, 0.21666666]
cov1_t = [[1.308888888, 0.9272222222, 0.6227777777],
[0.927222222, 2.251388888, 1.340277777],
[0.622777777, 1.340277777, 0.9547222222]]
mu2 = model.distributions[1].parameters[0]
cov2 = model.distributions[1].parameters[1]
mu2_t = [2.925, 2.825, 2.625]
cov2_t = [[0.75687500, 0.23687499, 0.47937500],
[0.23687499, 0.40187499, 0.53187499],
[0.47937500, 0.53187499, 0.78687500]]
assert_array_almost_equal(mu1, mu1_t)
assert_array_almost_equal(cov1, cov1_t)
assert_array_almost_equal(mu2, mu2_t)
assert_array_almost_equal(cov2, cov2_t)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_pickle():
model2 = pickle.loads(pickle.dumps(model))
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_pickle():
model2 = pickle.loads(pickle.dumps(model))
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_to_json():
model2 = BayesClassifier.from_json(model.to_json())
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_to_json():
model2 = BayesClassifier.from_json(model.to_json())
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_gaussian, teardown)
def test_bc_multivariate_gaussian_robust_from_json():
model2 = from_json(model.to_json())
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], MultivariateGaussianDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_multivariate_mixed, teardown)
def test_bc_multivariate_mixed_robust_from_json():
model2 = from_json(model.to_json())
assert_true(isinstance(model2, BayesClassifier))
assert_true(isinstance(model2.distributions[0], MultivariateGaussianDistribution))
assert_true(isinstance(model2.distributions[1], IndependentComponentsDistribution))
assert_array_almost_equal(model.weights, model2.weights)
@with_setup(setup_hmm, teardown)
def test_model():
assert_almost_equal(hmm1.log_probability(list('H')), -0.2231435513142097 )
assert_almost_equal(hmm1.log_probability(list('T')), -1.6094379124341003 )
assert_almost_equal(hmm1.log_probability(list('HHHH')), -0.8925742052568388 )
assert_almost_equal(hmm1.log_probability(list('THHH')), -2.2788685663767296 )
assert_almost_equal(hmm1.log_probability(list('TTTT')), -6.437751649736401 )
assert_almost_equal(hmm2.log_probability(list('H')), -0.6931471805599453 )
assert_almost_equal(hmm2.log_probability(list('T')), -0.6931471805599453 )
assert_almost_equal(hmm2.log_probability(list('HHHH')), -2.772588722239781 )
assert_almost_equal(hmm2.log_probability(list('THHH')), -2.772588722239781 )
assert_almost_equal(hmm2.log_probability(list('TTTT')), -2.772588722239781 )
assert_almost_equal(hmm3.log_probability(list('H')), -0.43078291609245417)
assert_almost_equal(hmm3.log_probability(list('T')), -1.0498221244986776)
assert_almost_equal(hmm3.log_probability(list('HHHH')), -1.7231316643698167)
assert_almost_equal(hmm3.log_probability(list('THHH')), -2.3421708727760397)
assert_almost_equal(hmm3.log_probability(list('TTTT')), -4.1992884979947105)
assert_almost_equal(hmm3.log_probability(list('THTHTHTHTHTH')), -8.883630243546788)
assert_almost_equal(hmm3.log_probability(list('THTHHHHHTHTH')), -7.645551826734343)
assert_equal(model.d, 1)
@with_setup(setup_hmm, teardown)
def test_hmm_log_proba():
logs = model.predict_log_proba(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')]))
assert_almost_equal(logs[0][0], -0.89097292388986515)
assert_almost_equal(logs[0][1], -1.3609765531356006)
assert_almost_equal(logs[0][2], -1.0986122886681096)
assert_almost_equal(logs[1][0], -0.93570553121744293)
assert_almost_equal(logs[1][1], -1.429425687080494)
assert_almost_equal(logs[1][2], -0.9990078376167526)
assert_almost_equal(logs[2][0], -3.9007882563128864)
assert_almost_equal(logs[2][1], -0.23562532881626597)
assert_almost_equal(logs[2][2], -1.6623251045711958)
assert_almost_equal(logs[3][0], -3.1703366478831185)
assert_almost_equal(logs[3][1], -0.49261403211260379)
assert_almost_equal(logs[3][2], -1.058478108940049)
assert_almost_equal(logs[4][0], -1.3058441172130273)
assert_almost_equal(logs[4][1], -1.4007102236822906)
assert_almost_equal(logs[4][2], -0.7284958836972919)
@with_setup(setup_hmm, teardown)
def test_hmm_proba():
probs = model.predict_proba(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')]))
assert_almost_equal(probs[0][0], 0.41025641025641024)
assert_almost_equal(probs[0][1], 0.25641025641025639)
assert_almost_equal(probs[0][2], 0.33333333333333331)
assert_almost_equal(probs[1][0], 0.39230898163446098)
assert_almost_equal(probs[1][1], 0.23944639992337707)
assert_almost_equal(probs[1][2], 0.36824461844216183)
assert_almost_equal(probs[2][0], 0.020225961918306088)
assert_almost_equal(probs[2][1], 0.79007663743383105)
assert_almost_equal(probs[2][2], 0.18969740064786292)
assert_almost_equal(probs[3][0], 0.041989459861032523)
assert_almost_equal(probs[3][1], 0.61102706038265642)
assert_almost_equal(probs[3][2], 0.346983479756311)
assert_almost_equal(probs[4][0], 0.27094373022369794)
assert_almost_equal(probs[4][1], 0.24642188711704707)
assert_almost_equal(probs[4][2], 0.48263438265925512)
@with_setup(setup_hmm, teardown)
def test_hmm_prediction():
predicts = model.predict(np.array([list('H'), list('THHH'), list('TTTT'), list('THTHTHTHTHTH'), list('THTHHHHHTHTH')]))
assert_equal(predicts[0], 0)
assert_equal(predicts[1], 0)
assert_equal(predicts[2], 1)
assert_equal(predicts[3], 1)
assert_equal(predicts[4], 2)
@with_setup(setup_multivariate_gaussian, teardown)
def test_io_log_probability():
X2 = DataGenerator(X)
X3 = DataFrameGenerator(pandas.DataFrame(X))
logp1 = model.log_probability(X)
logp2 = model.log_probability(X2)
logp3 = model.log_probability(X3)
assert_array_almost_equal(logp1, logp2)
assert_array_almost_equal(logp1, logp3)
@with_setup(setup_multivariate_gaussian, teardown)
def test_io_predict():
X2 = DataGenerator(X)
X3 = DataFrameGenerator(pandas.DataFrame(X))
y_hat1 = model.predict(X)
y_hat2 = model.predict(X2)
y_hat3 = model.predict(X3)
assert_array_almost_equal(y_hat1, y_hat2)
assert_array_almost_equal(y_hat1, y_hat3)
@with_setup(setup_multivariate_gaussian, teardown)
def test_io_predict_proba():
X2 = DataGenerator(X)
X3 = DataFrameGenerator(pandas.DataFrame(X))
y_hat1 = model.predict_proba(X)
y_hat2 = model.predict_proba(X2)
y_hat3 = model.predict_proba(X3)
assert_array_almost_equal(y_hat1, y_hat2)
assert_array_almost_equal(y_hat1, y_hat3)
@with_setup(setup_multivariate_gaussian, teardown)
def test_io_predict_log_proba():
X2 = DataGenerator(X)
X3 = DataFrameGenerator(pandas.DataFrame(X))
y_hat1 = model.predict_log_proba(X)
y_hat2 = model.predict_log_proba(X2)
y_hat3 = model.predict_log_proba(X3)
assert_array_almost_equal(y_hat1, y_hat2)
assert_array_almost_equal(y_hat1, y_hat3)
def test_io_fit():
X = numpy.random.randn(100, 5) + 0.5
weights = numpy.abs(numpy.random.randn(100))
y = numpy.random.randint(2, size=100)
data_generator = DataGenerator(X, weights, y)
mu1 = numpy.array([0, 0, 0, 0, 0])
mu2 = numpy.array([1, 1, 1, 1, 1])
cov = numpy.eye(5)
d1 = MultivariateGaussianDistribution(mu1, cov)
d2 = MultivariateGaussianDistribution(mu2, cov)
bc1 = BayesClassifier([d1, d2])
bc1.fit(X, y, weights)
d1 = MultivariateGaussianDistribution(mu1, cov)
d2 = MultivariateGaussianDistribution(mu2, cov)
bc2 = BayesClassifier([d1, d2])
bc2.fit(data_generator)
logp1 = bc1.log_probability(X)
logp2 = bc2.log_probability(X)
assert_array_almost_equal(logp1, logp2)
def test_io_from_samples():
X = numpy.random.randn(100, 5) + 0.5
weights = numpy.abs(numpy.random.randn(100))
y = numpy.random.randint(2, size=100)
data_generator = DataGenerator(X, weights, y)
d = MultivariateGaussianDistribution
bc1 = BayesClassifier.from_samples(d, X=X, y=y, weights=weights)
bc2 = BayesClassifier.from_samples(d, X=data_generator)
logp1 = bc1.log_probability(X)
logp2 = bc2.log_probability(X)
assert_array_almost_equal(logp1, logp2) | [
"numpy.testing.assert_array_almost_equal",
"nose.tools.assert_almost_equal",
"nose.tools.with_setup",
"pomegranate.io.DataGenerator",
"pickle.dumps",
"pandas.DataFrame",
"nose.tools.assert_equal"
] | [((3258, 3307), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (3268, 3307), False, 'from nose.tools import with_setup\n'), ((3449, 3495), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_mixed', 'teardown'], {}), '(setup_multivariate_mixed, teardown)\n', (3459, 3495), False, 'from nose.tools import with_setup\n'), ((3635, 3684), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (3645, 3684), False, 'from nose.tools import with_setup\n'), ((4230, 4276), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_mixed', 'teardown'], {}), '(setup_multivariate_mixed, teardown)\n', (4240, 4276), False, 'from nose.tools import with_setup\n'), ((4819, 4868), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (4829, 4868), False, 'from nose.tools import with_setup\n'), ((5422, 5468), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_mixed', 'teardown'], {}), '(setup_multivariate_mixed, teardown)\n', (5432, 5468), False, 'from nose.tools import with_setup\n'), ((6019, 6068), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (6029, 6068), False, 'from nose.tools import with_setup\n'), ((6633, 6679), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_mixed', 'teardown'], {}), '(setup_multivariate_mixed, teardown)\n', (6643, 6679), False, 'from nose.tools import with_setup\n'), ((7241, 7290), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (7251, 7290), False, 'from nose.tools import with_setup\n'), ((7828, 7874), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_mixed', 'teardown'], {}), '(setup_multivariate_mixed, teardown)\n', (7838, 7874), False, 'from nose.tools import with_setup\n'), ((8309, 8358), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (8319, 8358), False, 'from nose.tools import with_setup\n'), ((8904, 8950), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_mixed', 'teardown'], {}), '(setup_multivariate_mixed, teardown)\n', (8914, 8950), False, 'from nose.tools import with_setup\n'), ((9493, 9542), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (9503, 9542), False, 'from nose.tools import with_setup\n'), ((10099, 10145), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_mixed', 'teardown'], {}), '(setup_multivariate_mixed, teardown)\n', (10109, 10145), False, 'from nose.tools import with_setup\n'), ((10599, 10648), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (10609, 10648), False, 'from nose.tools import with_setup\n'), ((10797, 10843), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_mixed', 'teardown'], {}), '(setup_multivariate_mixed, teardown)\n', (10807, 10843), False, 'from nose.tools import with_setup\n'), ((10989, 11038), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (10999, 11038), False, 'from nose.tools import with_setup\n'), ((11195, 11241), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_mixed', 'teardown'], {}), '(setup_multivariate_mixed, teardown)\n', (11205, 11241), False, 'from nose.tools import with_setup\n'), ((11395, 11444), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (11405, 11444), False, 'from nose.tools import with_setup\n'), ((11612, 11658), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_mixed', 'teardown'], {}), '(setup_multivariate_mixed, teardown)\n', (11622, 11658), False, 'from nose.tools import with_setup\n'), ((11823, 11872), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (11833, 11872), False, 'from nose.tools import with_setup\n'), ((12630, 12676), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_mixed', 'teardown'], {}), '(setup_multivariate_mixed, teardown)\n', (12640, 12676), False, 'from nose.tools import with_setup\n'), ((13402, 13451), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (13412, 13451), False, 'from nose.tools import with_setup\n'), ((14282, 14331), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (14292, 14331), False, 'from nose.tools import with_setup\n'), ((14700, 14746), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_mixed', 'teardown'], {}), '(setup_multivariate_mixed, teardown)\n', (14710, 14746), False, 'from nose.tools import with_setup\n'), ((15113, 15162), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (15123, 15162), False, 'from nose.tools import with_setup\n'), ((15541, 15587), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_mixed', 'teardown'], {}), '(setup_multivariate_mixed, teardown)\n', (15551, 15587), False, 'from nose.tools import with_setup\n'), ((15964, 16013), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (15974, 16013), False, 'from nose.tools import with_setup\n'), ((16385, 16431), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_mixed', 'teardown'], {}), '(setup_multivariate_mixed, teardown)\n', (16395, 16431), False, 'from nose.tools import with_setup\n'), ((16801, 16832), 'nose.tools.with_setup', 'with_setup', (['setup_hmm', 'teardown'], {}), '(setup_hmm, teardown)\n', (16811, 16832), False, 'from nose.tools import with_setup\n'), ((18212, 18243), 'nose.tools.with_setup', 'with_setup', (['setup_hmm', 'teardown'], {}), '(setup_hmm, teardown)\n', (18222, 18243), False, 'from nose.tools import with_setup\n'), ((19217, 19248), 'nose.tools.with_setup', 'with_setup', (['setup_hmm', 'teardown'], {}), '(setup_hmm, teardown)\n', (19227, 19248), False, 'from nose.tools import with_setup\n'), ((20228, 20259), 'nose.tools.with_setup', 'with_setup', (['setup_hmm', 'teardown'], {}), '(setup_hmm, teardown)\n', (20238, 20259), False, 'from nose.tools import with_setup\n'), ((20561, 20610), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (20571, 20610), False, 'from nose.tools import with_setup\n'), ((20901, 20950), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (20911, 20950), False, 'from nose.tools import with_setup\n'), ((21216, 21265), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (21226, 21265), False, 'from nose.tools import with_setup\n'), ((21555, 21604), 'nose.tools.with_setup', 'with_setup', (['setup_multivariate_gaussian', 'teardown'], {}), '(setup_multivariate_gaussian, teardown)\n', (21565, 21604), False, 'from nose.tools import with_setup\n'), ((3361, 3385), 'nose.tools.assert_equal', 'assert_equal', (['model.d', '(3)'], {}), '(model.d, 3)\n', (3373, 3385), False, 'from nose.tools import assert_equal\n'), ((3387, 3411), 'nose.tools.assert_equal', 'assert_equal', (['model.n', '(2)'], {}), '(model.n, 2)\n', (3399, 3411), False, 'from nose.tools import assert_equal\n'), ((3413, 3446), 'nose.tools.assert_equal', 'assert_equal', (['model.is_vl_', '(False)'], {}), '(model.is_vl_, False)\n', (3425, 3446), False, 'from nose.tools import assert_equal\n'), ((3546, 3570), 'nose.tools.assert_equal', 'assert_equal', (['model.d', '(3)'], {}), '(model.d, 3)\n', (3558, 3570), False, 'from nose.tools import assert_equal\n'), ((3572, 3596), 'nose.tools.assert_equal', 'assert_equal', (['model.n', '(2)'], {}), '(model.n, 2)\n', (3584, 3596), False, 'from nose.tools import assert_equal\n'), ((3598, 3631), 'nose.tools.assert_equal', 'assert_equal', (['model.is_vl_', '(False)'], {}), '(model.is_vl_, False)\n', (3610, 3631), False, 'from nose.tools import assert_equal\n'), ((4191, 4226), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y', 'y_hat'], {}), '(y, y_hat)\n', (4216, 4226), False, 'from numpy.testing import assert_array_almost_equal\n'), ((4780, 4815), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y', 'y_hat'], {}), '(y, y_hat)\n', (4805, 4815), False, 'from numpy.testing import assert_array_almost_equal\n'), ((5383, 5418), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y', 'y_hat'], {}), '(y, y_hat)\n', (5408, 5418), False, 'from numpy.testing import assert_array_almost_equal\n'), ((5980, 6015), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y', 'y_hat'], {}), '(y, y_hat)\n', (6005, 6015), False, 'from numpy.testing import assert_array_almost_equal\n'), ((6594, 6629), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y', 'y_hat'], {}), '(y, y_hat)\n', (6619, 6629), False, 'from numpy.testing import assert_array_almost_equal\n'), ((7202, 7237), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y', 'y_hat'], {}), '(y, y_hat)\n', (7227, 7237), False, 'from numpy.testing import assert_array_almost_equal\n'), ((7789, 7824), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y', 'y_hat'], {}), '(y, y_hat)\n', (7814, 7824), False, 'from numpy.testing import assert_array_almost_equal\n'), ((8270, 8305), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y', 'y_hat'], {}), '(y, y_hat)\n', (8295, 8305), False, 'from numpy.testing import assert_array_almost_equal\n'), ((8865, 8900), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y', 'y_hat'], {}), '(y, y_hat)\n', (8890, 8900), False, 'from numpy.testing import assert_array_almost_equal\n'), ((9454, 9489), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y', 'y_hat'], {}), '(y, y_hat)\n', (9479, 9489), False, 'from numpy.testing import assert_array_almost_equal\n'), ((10060, 10095), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y', 'y_hat'], {}), '(y, y_hat)\n', (10085, 10095), False, 'from numpy.testing import assert_array_almost_equal\n'), ((10560, 10595), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y', 'y_hat'], {}), '(y, y_hat)\n', (10585, 10595), False, 'from numpy.testing import assert_array_almost_equal\n'), ((10758, 10793), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y', 'y_hat'], {}), '(y, y_hat)\n', (10783, 10793), False, 'from numpy.testing import assert_array_almost_equal\n'), ((10950, 10985), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y', 'y_hat'], {}), '(y, y_hat)\n', (10975, 10985), False, 'from numpy.testing import assert_array_almost_equal\n'), ((11156, 11191), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y', 'y_hat'], {}), '(y, y_hat)\n', (11181, 11191), False, 'from numpy.testing import assert_array_almost_equal\n'), ((11356, 11391), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y', 'y_hat'], {}), '(y, y_hat)\n', (11381, 11391), False, 'from numpy.testing import assert_array_almost_equal\n'), ((11573, 11608), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y', 'y_hat'], {}), '(y, y_hat)\n', (11598, 11608), False, 'from numpy.testing import assert_array_almost_equal\n'), ((11784, 11819), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y', 'y_hat'], {}), '(y, y_hat)\n', (11809, 11819), False, 'from numpy.testing import assert_array_almost_equal\n'), ((12468, 12505), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['mu1', 'mu1_t'], {}), '(mu1, mu1_t)\n', (12493, 12505), False, 'from numpy.testing import assert_array_almost_equal\n'), ((12507, 12546), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['cov1', 'cov1_t'], {}), '(cov1, cov1_t)\n', (12532, 12546), False, 'from numpy.testing import assert_array_almost_equal\n'), ((12548, 12585), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['mu2', 'mu2_t'], {}), '(mu2, mu2_t)\n', (12573, 12585), False, 'from numpy.testing import assert_array_almost_equal\n'), ((12587, 12626), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['cov2', 'cov2_t'], {}), '(cov2, cov2_t)\n', (12612, 12626), False, 'from numpy.testing import assert_array_almost_equal\n'), ((13142, 13179), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['mu1', 'mu1_t'], {}), '(mu1, mu1_t)\n', (13167, 13179), False, 'from numpy.testing import assert_array_almost_equal\n'), ((13181, 13220), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['cov1', 'cov1_t'], {}), '(cov1, cov1_t)\n', (13206, 13220), False, 'from numpy.testing import assert_array_almost_equal\n'), ((13222, 13277), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['d21.parameters', '[0.34188034]'], {}), '(d21.parameters, [0.34188034])\n', (13247, 13277), False, 'from numpy.testing import assert_array_almost_equal\n'), ((13279, 13346), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['d22.parameters', '[1.01294275, 0.22658346]'], {}), '(d22.parameters, [1.01294275, 0.22658346])\n', (13304, 13346), False, 'from numpy.testing import assert_array_almost_equal\n'), ((13348, 13398), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['d23.parameters', '[2.625]'], {}), '(d23.parameters, [2.625])\n', (13373, 13398), False, 'from numpy.testing import assert_array_almost_equal\n'), ((14120, 14157), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['mu1', 'mu1_t'], {}), '(mu1, mu1_t)\n', (14145, 14157), False, 'from numpy.testing import assert_array_almost_equal\n'), ((14159, 14198), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['cov1', 'cov1_t'], {}), '(cov1, cov1_t)\n', (14184, 14198), False, 'from numpy.testing import assert_array_almost_equal\n'), ((14200, 14237), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['mu2', 'mu2_t'], {}), '(mu2, mu2_t)\n', (14225, 14237), False, 'from numpy.testing import assert_array_almost_equal\n'), ((14239, 14278), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['cov2', 'cov2_t'], {}), '(cov2, cov2_t)\n', (14264, 14278), False, 'from numpy.testing import assert_array_almost_equal\n'), ((14640, 14696), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['model.weights', 'model2.weights'], {}), '(model.weights, model2.weights)\n', (14665, 14696), False, 'from numpy.testing import assert_array_almost_equal\n'), ((15053, 15109), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['model.weights', 'model2.weights'], {}), '(model.weights, model2.weights)\n', (15078, 15109), False, 'from numpy.testing import assert_array_almost_equal\n'), ((15481, 15537), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['model.weights', 'model2.weights'], {}), '(model.weights, model2.weights)\n', (15506, 15537), False, 'from numpy.testing import assert_array_almost_equal\n'), ((15904, 15960), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['model.weights', 'model2.weights'], {}), '(model.weights, model2.weights)\n', (15929, 15960), False, 'from numpy.testing import assert_array_almost_equal\n'), ((16325, 16381), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['model.weights', 'model2.weights'], {}), '(model.weights, model2.weights)\n', (16350, 16381), False, 'from numpy.testing import assert_array_almost_equal\n'), ((16741, 16797), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['model.weights', 'model2.weights'], {}), '(model.weights, model2.weights)\n', (16766, 16797), False, 'from numpy.testing import assert_array_almost_equal\n'), ((18184, 18208), 'nose.tools.assert_equal', 'assert_equal', (['model.d', '(1)'], {}), '(model.d, 1)\n', (18196, 18208), False, 'from nose.tools import assert_equal\n'), ((18399, 18451), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['logs[0][0]', '(-0.8909729238898652)'], {}), '(logs[0][0], -0.8909729238898652)\n', (18418, 18451), False, 'from nose.tools import assert_almost_equal\n'), ((18454, 18506), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['logs[0][1]', '(-1.3609765531356006)'], {}), '(logs[0][1], -1.3609765531356006)\n', (18473, 18506), False, 'from nose.tools import assert_almost_equal\n'), ((18508, 18560), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['logs[0][2]', '(-1.0986122886681096)'], {}), '(logs[0][2], -1.0986122886681096)\n', (18527, 18560), False, 'from nose.tools import assert_almost_equal\n'), ((18563, 18615), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['logs[1][0]', '(-0.9357055312174429)'], {}), '(logs[1][0], -0.9357055312174429)\n', (18582, 18615), False, 'from nose.tools import assert_almost_equal\n'), ((18618, 18669), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['logs[1][1]', '(-1.429425687080494)'], {}), '(logs[1][1], -1.429425687080494)\n', (18637, 18669), False, 'from nose.tools import assert_almost_equal\n'), ((18671, 18723), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['logs[1][2]', '(-0.9990078376167526)'], {}), '(logs[1][2], -0.9990078376167526)\n', (18690, 18723), False, 'from nose.tools import assert_almost_equal\n'), ((18726, 18778), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['logs[2][0]', '(-3.9007882563128864)'], {}), '(logs[2][0], -3.9007882563128864)\n', (18745, 18778), False, 'from nose.tools import assert_almost_equal\n'), ((18780, 18833), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['logs[2][1]', '(-0.23562532881626597)'], {}), '(logs[2][1], -0.23562532881626597)\n', (18799, 18833), False, 'from nose.tools import assert_almost_equal\n'), ((18835, 18887), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['logs[2][2]', '(-1.6623251045711958)'], {}), '(logs[2][2], -1.6623251045711958)\n', (18854, 18887), False, 'from nose.tools import assert_almost_equal\n'), ((18890, 18942), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['logs[3][0]', '(-3.1703366478831185)'], {}), '(logs[3][0], -3.1703366478831185)\n', (18909, 18942), False, 'from nose.tools import assert_almost_equal\n'), ((18944, 18996), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['logs[3][1]', '(-0.4926140321126038)'], {}), '(logs[3][1], -0.4926140321126038)\n', (18963, 18996), False, 'from nose.tools import assert_almost_equal\n'), ((18999, 19050), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['logs[3][2]', '(-1.058478108940049)'], {}), '(logs[3][2], -1.058478108940049)\n', (19018, 19050), False, 'from nose.tools import assert_almost_equal\n'), ((19053, 19105), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['logs[4][0]', '(-1.3058441172130273)'], {}), '(logs[4][0], -1.3058441172130273)\n', (19072, 19105), False, 'from nose.tools import assert_almost_equal\n'), ((19107, 19159), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['logs[4][1]', '(-1.4007102236822906)'], {}), '(logs[4][1], -1.4007102236822906)\n', (19126, 19159), False, 'from nose.tools import assert_almost_equal\n'), ((19161, 19213), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['logs[4][2]', '(-0.7284958836972919)'], {}), '(logs[4][2], -0.7284958836972919)\n', (19180, 19213), False, 'from nose.tools import assert_almost_equal\n'), ((19397, 19450), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['probs[0][0]', '(0.41025641025641024)'], {}), '(probs[0][0], 0.41025641025641024)\n', (19416, 19450), False, 'from nose.tools import assert_almost_equal\n'), ((19452, 19504), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['probs[0][1]', '(0.2564102564102564)'], {}), '(probs[0][1], 0.2564102564102564)\n', (19471, 19504), False, 'from nose.tools import assert_almost_equal\n'), ((19507, 19559), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['probs[0][2]', '(0.3333333333333333)'], {}), '(probs[0][2], 0.3333333333333333)\n', (19526, 19559), False, 'from nose.tools import assert_almost_equal\n'), ((19563, 19614), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['probs[1][0]', '(0.392308981634461)'], {}), '(probs[1][0], 0.392308981634461)\n', (19582, 19614), False, 'from nose.tools import assert_almost_equal\n'), ((19618, 19671), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['probs[1][1]', '(0.23944639992337707)'], {}), '(probs[1][1], 0.23944639992337707)\n', (19637, 19671), False, 'from nose.tools import assert_almost_equal\n'), ((19673, 19726), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['probs[1][2]', '(0.36824461844216183)'], {}), '(probs[1][2], 0.36824461844216183)\n', (19692, 19726), False, 'from nose.tools import assert_almost_equal\n'), ((19729, 19783), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['probs[2][0]', '(0.020225961918306088)'], {}), '(probs[2][0], 0.020225961918306088)\n', (19748, 19783), False, 'from nose.tools import assert_almost_equal\n'), ((19785, 19836), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['probs[2][1]', '(0.790076637433831)'], {}), '(probs[2][1], 0.790076637433831)\n', (19804, 19836), False, 'from nose.tools import assert_almost_equal\n'), ((19840, 19893), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['probs[2][2]', '(0.18969740064786292)'], {}), '(probs[2][2], 0.18969740064786292)\n', (19859, 19893), False, 'from nose.tools import assert_almost_equal\n'), ((19896, 19949), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['probs[3][0]', '(0.04198945986103252)'], {}), '(probs[3][0], 0.04198945986103252)\n', (19915, 19949), False, 'from nose.tools import assert_almost_equal\n'), ((19952, 20004), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['probs[3][1]', '(0.6110270603826564)'], {}), '(probs[3][1], 0.6110270603826564)\n', (19971, 20004), False, 'from nose.tools import assert_almost_equal\n'), ((20007, 20058), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['probs[3][2]', '(0.346983479756311)'], {}), '(probs[3][2], 0.346983479756311)\n', (20026, 20058), False, 'from nose.tools import assert_almost_equal\n'), ((20061, 20114), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['probs[4][0]', '(0.27094373022369794)'], {}), '(probs[4][0], 0.27094373022369794)\n', (20080, 20114), False, 'from nose.tools import assert_almost_equal\n'), ((20116, 20169), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['probs[4][1]', '(0.24642188711704707)'], {}), '(probs[4][1], 0.24642188711704707)\n', (20135, 20169), False, 'from nose.tools import assert_almost_equal\n'), ((20171, 20223), 'nose.tools.assert_almost_equal', 'assert_almost_equal', (['probs[4][2]', '(0.4826343826592551)'], {}), '(probs[4][2], 0.4826343826592551)\n', (20190, 20223), False, 'from nose.tools import assert_almost_equal\n'), ((20410, 20438), 'nose.tools.assert_equal', 'assert_equal', (['predicts[0]', '(0)'], {}), '(predicts[0], 0)\n', (20422, 20438), False, 'from nose.tools import assert_equal\n'), ((20440, 20468), 'nose.tools.assert_equal', 'assert_equal', (['predicts[1]', '(0)'], {}), '(predicts[1], 0)\n', (20452, 20468), False, 'from nose.tools import assert_equal\n'), ((20470, 20498), 'nose.tools.assert_equal', 'assert_equal', (['predicts[2]', '(1)'], {}), '(predicts[2], 1)\n', (20482, 20498), False, 'from nose.tools import assert_equal\n'), ((20500, 20528), 'nose.tools.assert_equal', 'assert_equal', (['predicts[3]', '(1)'], {}), '(predicts[3], 1)\n', (20512, 20528), False, 'from nose.tools import assert_equal\n'), ((20530, 20558), 'nose.tools.assert_equal', 'assert_equal', (['predicts[4]', '(2)'], {}), '(predicts[4], 2)\n', (20542, 20558), False, 'from nose.tools import assert_equal\n'), ((20648, 20664), 'pomegranate.io.DataGenerator', 'DataGenerator', (['X'], {}), '(X)\n', (20661, 20664), False, 'from pomegranate.io import DataGenerator\n'), ((20818, 20857), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['logp1', 'logp2'], {}), '(logp1, logp2)\n', (20843, 20857), False, 'from numpy.testing import assert_array_almost_equal\n'), ((20859, 20898), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['logp1', 'logp3'], {}), '(logp1, logp3)\n', (20884, 20898), False, 'from numpy.testing import assert_array_almost_equal\n'), ((20980, 20996), 'pomegranate.io.DataGenerator', 'DataGenerator', (['X'], {}), '(X)\n', (20993, 20996), False, 'from pomegranate.io import DataGenerator\n'), ((21129, 21170), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y_hat1', 'y_hat2'], {}), '(y_hat1, y_hat2)\n', (21154, 21170), False, 'from numpy.testing import assert_array_almost_equal\n'), ((21172, 21213), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y_hat1', 'y_hat3'], {}), '(y_hat1, y_hat3)\n', (21197, 21213), False, 'from numpy.testing import assert_array_almost_equal\n'), ((21301, 21317), 'pomegranate.io.DataGenerator', 'DataGenerator', (['X'], {}), '(X)\n', (21314, 21317), False, 'from pomegranate.io import DataGenerator\n'), ((21468, 21509), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y_hat1', 'y_hat2'], {}), '(y_hat1, y_hat2)\n', (21493, 21509), False, 'from numpy.testing import assert_array_almost_equal\n'), ((21511, 21552), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y_hat1', 'y_hat3'], {}), '(y_hat1, y_hat3)\n', (21536, 21552), False, 'from numpy.testing import assert_array_almost_equal\n'), ((21644, 21660), 'pomegranate.io.DataGenerator', 'DataGenerator', (['X'], {}), '(X)\n', (21657, 21660), False, 'from pomegranate.io import DataGenerator\n'), ((21823, 21864), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y_hat1', 'y_hat2'], {}), '(y_hat1, y_hat2)\n', (21848, 21864), False, 'from numpy.testing import assert_array_almost_equal\n'), ((21866, 21907), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['y_hat1', 'y_hat3'], {}), '(y_hat1, y_hat3)\n', (21891, 21907), False, 'from numpy.testing import assert_array_almost_equal\n'), ((22069, 22097), 'pomegranate.io.DataGenerator', 'DataGenerator', (['X', 'weights', 'y'], {}), '(X, weights, y)\n', (22082, 22097), False, 'from pomegranate.io import DataGenerator\n'), ((22571, 22610), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['logp1', 'logp2'], {}), '(logp1, logp2)\n', (22596, 22610), False, 'from numpy.testing import assert_array_almost_equal\n'), ((22781, 22809), 'pomegranate.io.DataGenerator', 'DataGenerator', (['X', 'weights', 'y'], {}), '(X, weights, y)\n', (22794, 22809), False, 'from pomegranate.io import DataGenerator\n'), ((23040, 23079), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['logp1', 'logp2'], {}), '(logp1, logp2)\n', (23065, 23079), False, 'from numpy.testing import assert_array_almost_equal\n'), ((14399, 14418), 'pickle.dumps', 'pickle.dumps', (['model'], {}), '(model)\n', (14411, 14418), False, 'import pickle\n'), ((14811, 14830), 'pickle.dumps', 'pickle.dumps', (['model'], {}), '(model)\n', (14823, 14830), False, 'import pickle\n'), ((20690, 20709), 'pandas.DataFrame', 'pandas.DataFrame', (['X'], {}), '(X)\n', (20706, 20709), False, 'import pandas\n'), ((21022, 21041), 'pandas.DataFrame', 'pandas.DataFrame', (['X'], {}), '(X)\n', (21038, 21041), False, 'import pandas\n'), ((21343, 21362), 'pandas.DataFrame', 'pandas.DataFrame', (['X'], {}), '(X)\n', (21359, 21362), False, 'import pandas\n'), ((21686, 21705), 'pandas.DataFrame', 'pandas.DataFrame', (['X'], {}), '(X)\n', (21702, 21705), False, 'import pandas\n')] |
import base64
import os
import sys
import PyPDF2
svg = '''<svg id="write-document" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<rect id="write-doc-background" width="100%" height="100%" fill="#808080"/>
<defs id="write-defs">
<script type="text/writeconfig">
<int name="docFormatVersion" value="2" />
<int name="pageColor" value="-1" />
<int name="pageNum" value="0" />
<int name="ruleColor" value="0" />
<float name="marginLeft" value="0" />
<float name="xOffset" value="-380.701752" />
<float name="xRuling" value="0" />
<float name="yOffset" value="1536.84216" />
<float name="yRuling" value="0" />
</script>
</defs>
'''
pdf_path = sys.argv[1]
pdf = PyPDF2.PdfFileReader(pdf_path, "rb")
img_width = 720
n_pages = pdf.getNumPages() + 1
page = pdf.getPage(0)
width = page.mediaBox.getWidth()
height = page.mediaBox.getHeight()
aspect_ratio = height/width
img_height = int(aspect_ratio * img_width)
os.system('mkdir -p /tmp/pdf2write')
new_page_height = 0
for page in range(n_pages):
print(f"Processing {page}/{n_pages}", end='\r')
os.system(f'pdftoppm {pdf_path} /tmp/pdf2write/tmp{page} -png -f {page} -singlefile')
with open(f'/tmp/pdf2write/tmp{page}.png', 'rb') as f:
base64_data = base64.b64encode(f.read()).decode('utf-8')
tmp_svg = f'''<svg class="write-page" color-interpolation="linearRGB" x="10" y="{new_page_height+10}" width="{img_width}px" height="{img_height}px" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<g class="write-content write-v3" width="{img_width}" height="{img_height}" xruling="0" yruling="0" marginLeft="0" papercolor="#FFFFFF" rulecolor="#00000000">
<g class="ruleline write-std-ruling write-scale-down" fill="none" stroke="none" stroke-width="1" shape-rendering="crispEdges" vector-effect="non-scaling-stroke">
<rect class="pagerect" fill="#FFFFFF" stroke="none" x="0" y="0" width="{img_width}" height="{img_height}" />
</g>
<image x="0" y="0" width="{img_width}" height="{img_height}" xlink:href="data:image/png;base64,{base64_data}"/>
</g>
</svg>'''
new_page_height += (img_height+10)
svg += tmp_svg
svg += '''</svg>'''
os.system('rm -rf /tmp/pdf2write')
with open(f'{os.path.dirname(pdf_path)}/{os.path.basename(pdf_path).split(".")[0]}.svg', 'w') as f:
f.write(svg)
os.system(f'gzip -S z {os.path.dirname(pdf_path)}/{os.path.basename(pdf_path).split(".")[0]}.svg')
| [
"os.path.dirname",
"os.system",
"PyPDF2.PdfFileReader",
"os.path.basename"
] | [((708, 744), 'PyPDF2.PdfFileReader', 'PyPDF2.PdfFileReader', (['pdf_path', '"""rb"""'], {}), "(pdf_path, 'rb')\n", (728, 744), False, 'import PyPDF2\n'), ((956, 992), 'os.system', 'os.system', (['"""mkdir -p /tmp/pdf2write"""'], {}), "('mkdir -p /tmp/pdf2write')\n", (965, 992), False, 'import os\n'), ((2207, 2241), 'os.system', 'os.system', (['"""rm -rf /tmp/pdf2write"""'], {}), "('rm -rf /tmp/pdf2write')\n", (2216, 2241), False, 'import os\n'), ((1101, 1191), 'os.system', 'os.system', (['f"""pdftoppm {pdf_path} /tmp/pdf2write/tmp{page} -png -f {page} -singlefile"""'], {}), "(\n f'pdftoppm {pdf_path} /tmp/pdf2write/tmp{page} -png -f {page} -singlefile')\n", (1110, 1191), False, 'import os\n'), ((2384, 2409), 'os.path.dirname', 'os.path.dirname', (['pdf_path'], {}), '(pdf_path)\n', (2399, 2409), False, 'import os\n'), ((2256, 2281), 'os.path.dirname', 'os.path.dirname', (['pdf_path'], {}), '(pdf_path)\n', (2271, 2281), False, 'import os\n'), ((2412, 2438), 'os.path.basename', 'os.path.basename', (['pdf_path'], {}), '(pdf_path)\n', (2428, 2438), False, 'import os\n'), ((2284, 2310), 'os.path.basename', 'os.path.basename', (['pdf_path'], {}), '(pdf_path)\n', (2300, 2310), False, 'import os\n')] |
from typing import Dict, List, cast
from py_headless_daw.project.parameter import Parameter, ParameterValueType, ParameterRangeType
class HavingParameters:
def __init__(self):
self._parameters: Dict[str, Parameter] = {}
super().__init__()
def has_parameter(self, name: str) -> bool:
return name in self._parameters
def add_parameter(self,
name: str,
value: ParameterValueType,
param_type: str,
value_range: ParameterRangeType):
if name in self._parameters:
raise Exception('parameter named ' + name + ' already added to this object')
parameter = Parameter(name, value, param_type, value_range)
self._parameters[name] = parameter
def add_parameter_object(self, parameter: Parameter) -> None:
self._parameters[parameter.name] = parameter
def get_parameter(self, name: str) -> Parameter:
for parameter in self.parameters:
if parameter.name == name:
return parameter
list_of_names: List[str] = [p.name for p in self.parameters]
# noinspection PyTypeChecker
available_names: List[str] = cast(List[str], list_of_names)
raise Exception('parameter named ' + name + ' not found. Available: ' + ', '.join(available_names))
def get_parameter_value(self, name: str) -> ParameterValueType:
param = self.get_parameter(name)
return param.value
def get_float_parameter_value(self, name: str) -> float:
param = self.get_parameter(name)
if param.type != Parameter.TYPE_FLOAT:
raise ValueError(f"parameter {name} was expected to be float (error: f009d0ef)")
value = self.get_parameter_value(name)
cast_value = cast(float, value)
return cast_value
def get_enum_parameter_value(self, name: str) -> str:
param = self.get_parameter(name)
if param.type != Parameter.TYPE_ENUM:
raise ValueError(f"parameter {name} was expected to be enum (error: 80a1d180)")
value = self.get_parameter_value(name)
cast_value = cast(str, value)
return cast_value
def set_parameter_value(self, name: str, value: ParameterValueType):
param = self.get_parameter(name)
param.value = value
@property
def parameters(self) -> List[Parameter]:
return list(self._parameters.values())
| [
"py_headless_daw.project.parameter.Parameter",
"typing.cast"
] | [((706, 753), 'py_headless_daw.project.parameter.Parameter', 'Parameter', (['name', 'value', 'param_type', 'value_range'], {}), '(name, value, param_type, value_range)\n', (715, 753), False, 'from py_headless_daw.project.parameter import Parameter, ParameterValueType, ParameterRangeType\n'), ((1230, 1260), 'typing.cast', 'cast', (['List[str]', 'list_of_names'], {}), '(List[str], list_of_names)\n', (1234, 1260), False, 'from typing import Dict, List, cast\n'), ((1818, 1836), 'typing.cast', 'cast', (['float', 'value'], {}), '(float, value)\n', (1822, 1836), False, 'from typing import Dict, List, cast\n'), ((2169, 2185), 'typing.cast', 'cast', (['str', 'value'], {}), '(str, value)\n', (2173, 2185), False, 'from typing import Dict, List, cast\n')] |
# -*- coding: utf-8 -*-
#
#
from __future__ import print_function
import csv
import os
import re
import sys
import arrow
from gsheets import Sheets
CURRENT_PATH = os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
DEBUG = os.environ.get('DEBUG', "0") == "1"
AS_CSV = os.environ.get('CSV', "0") == "1"
COL_DATE = 0
COL_WEEKDAY = 1
COL_TIME_START = 2
COL_TIME_END = 3
COL_LUNCH = 4
COL_TIME = 5 # includes lunch
COL_TIME_FIXED = 6 # does not include lunch
COL_MOVE = 7
COL_WORK_FROM_HOME = 8
COL_NOTES = 9
COL_TASKS_START = 10
SPECIAL_VALUES = ["sick", "ab", "off", "wfh", "hol"]
SATURDAY = 5
SUNDAY = 6
def calc(hour, half_it=False, split_char = ":"):
parts = str(hour).split(split_char)
try:
local_hours = int(parts[0])
local_minutes = int(parts[1])
if half_it:
local_hours = local_hours / 2
local_minutes = local_minutes / 2
return local_hours, local_minutes
except:
if len(parts) == 1:
try:
return int(parts[0]), 0
except:
return 0, 0
def get_client_secret_filenames():
filename = os.path.join(CURRENT_PATH, "client-secrets.json")
cachefile = os.path.join(CURRENT_PATH, "client-secrets-cache.json")
if not os.path.exists(filename):
filename = os.path.expanduser(os.path.join("~", "client-secrets.json"))
cachefile = os.path.expanduser(os.path.join("~", "client-secrets-cache.json"))
if not os.path.exists(filename):
raise Exception("Please provide a client-secret.json file, as described here: https://github.com/xflr6/gsheets#quickstart")
return filename, cachefile
def load_first_sheet_rows(api, timesheet_url, date=arrow.now().format('YYYYMMDD')):
print("Opening timesheet for %s ..." % (date))
sheets = api.get(timesheet_url)
sheet = sheets.sheets[0]
print(u"Timesheet [%s] sheet [%s] opened. Accessing cell data ..." % (sheets.title or "???", sheet.title or "???"))
rows = sheet.values()
return rows
def load_sheet_and_read_data(api, timesheet_url, commandline, user_full_name):
now = arrow.now()
today = now.format('YYYYMMDD')
try:
other_date = arrow.get(commandline, 'YYYYMMDD').format('YYYYMMDD')
except arrow.parser.ParserError:
other_date = today
use_date = other_date
rows = load_first_sheet_rows(api, timesheet_url, use_date)
timesheet = get_timesheet_for_date(rows, use_date, user_full_name)
if timesheet:
print("\n\n")
print("Timesheet for %s" % (use_date))
print(timesheet)
print("\n")
else:
print("No entry found for %s" % use_date)
def get_timesheet_for_date(rows, date, user_full_name):
# find the row with the first column that has today's date in it
result_rows = [row for row in rows if row and str(row[COL_DATE]) == date]
if result_rows is None or not result_rows:
return None
if len(result_rows) != 1:
print("More than one entry (%d) found for date %s! Please fix your sheet!" % (len(result_rows), date))
return None
found_row = result_rows[0]
found_index = rows.index(found_row)
start_val = found_row[COL_TIME_START]
end_val = found_row[COL_TIME_END]
duration_val = found_row[COL_TIME_FIXED]
max_cols = len(found_row)
if not start_val:
if start_val in SPECIAL_VALUES:
print("You forgot to add your start time.")
return None
if not end_val:
if end_val in SPECIAL_VALUES:
print("You forgot to add your end time.")
return None
#if max_cols >= COL_NOTES:
# print("No notes/tasks entered yet.")
# return None
def parse_hours(val):
try:
return arrow.get(val, "HH:mm")
except arrow.parser.ParserError:
return arrow.get(val, "H:mm")
start = parse_hours(start_val).format("HH:mm")
end = parse_hours(end_val).format("HH:mm")
duration = str(duration_val)
notes_str = found_row[COL_NOTES]
notes = notes_str.split('\n')
# check the previous Friday entry (if today is not Friday), to see what work from home
# days were were selected
weekday = (found_row[COL_WEEKDAY] or "").lower()
check_start_index = found_index if weekday.startswith("fr") else found_index - 7
check_row = found_row
while (check_start_index < found_index):
check_row = rows[check_start_index]
if (len(check_row) > COL_WEEKDAY and check_row[COL_WEEKDAY] or "").lower().startswith("fr"):
break
check_start_index += 1
is_same_day = None
if check_start_index != found_index:
# print("HA! GOT PREVS FRIDAY.")
is_same_day = False
else:
# print("SAME DAY")
is_same_day = True
wfh = u"" if len(check_row)-1 < COL_WORK_FROM_HOME else check_row[COL_WORK_FROM_HOME]
wfh = wfh.replace("Mon", "Monday")
wfh = wfh.replace("Tue", "Tuesday")
wfh = wfh.replace("Wed", "Wednesday")
wfh = wfh.replace("Thu", "Thursday")
wfh = wfh.replace("Fri", "Friday")
wfh = wfh.replace(", ", ",").replace(",", " and ")
wfh_extra = "Next week" if is_same_day else "This week"
wfh_info = """%s %s""" % (wfh_extra, wfh) if wfh != "" else "all days"
# 2021-01-04 just make this the default for now
wfh_info = "at all times, unless mentioned otherwise below"
# regex: ([a-zA-Z].+-\d+)(.*)((?<=\[).+(?=\]))
# text: SCAN-4167 As a developer, I want to update AIScanRobo every week [1h]
# 3 groups:
# SCAN-4167
# As a developer, I want to update AIScanRobo every week [
# 1h
r = re.compile(r"([a-zA-Z].+-\d+)(.*)((?<=\[).+(?=\]))")
total_time_minutes_from_tasks = 0
tasks = []
for idx in range(COL_TASKS_START, max_cols):
task = found_row[idx].strip()
if task:
t = task.split('\n')[0] if '\n' in task else task
try:
g = r.match(t).groups()
except Exception as ex:
print("ERROR: %s - %s" % (t, str(ex)))
continue
if DEBUG:
print("task: %s" % (t))
print("groups: %s" % len(g))
[task_number, task_details, task_duration] = g
hours, half_hours = calc(task_duration.replace("h", ""), split_char=".")
minutes = (hours * 60) + (6 * half_hours)
total_time_minutes_from_tasks += minutes
other_lines = task.split('\n')[1:]
tasks.append("%s %s\n%s" % (task_number.strip(), task_details[:-2].strip(), '\n'.join(other_lines)))
def format_tasks(tasks):
if not tasks:
return ''
result = 'Tasks:\n'
for task in tasks:
if '\n' in task:
sub_tasks = task.split('\n')
if len(sub_tasks) > 1:
result += '\n* ' + sub_tasks[0] # main task
for sub_task in sub_tasks[1:]: # actual sub tasks
result += '\n\t' + sub_task
result += '\n'
else:
result += '\n* ' + task
else:
result += '\n* ' + task
return result
def format_notes(notes):
if not notes or (len(notes) == 1 and not notes[0]):
return ''
result = 'Additional Notes:\n'
for note in notes:
result += '\n* ' + note
return result
total_hours = str(int(total_time_minutes_from_tasks / 60)).zfill(2)
total_minutes = str(total_time_minutes_from_tasks % 60).zfill(2)
total_duration = "%s:%s" % (total_hours, total_minutes)
test_duration = duration
if len(test_duration) <= 4:
test_duration = "0%s" % duration
if total_duration != test_duration:
print("")
print("")
print("The task times do not add up! Tasks vs time entered: %s != %s" % (total_duration, test_duration))
print("")
print("")
# Time: %(start)s - %(end)s (%(duration)s hours total [%(total_hours)s:%(total_minutes)s])
msg = """
[Daily Report] %(date)s
WFH: %(wfh_info)s
Hi,
Daily Report for Date: %(date)s
%(tasks)s
%(notes)s
Kind regards,
%(user_full_name)s
""".strip() % {
"date": date,
"user_full_name": user_full_name,
"start": start,
"end": end,
"duration": duration,
"wfh_info": wfh_info,
"tasks": format_tasks(tasks) if tasks else "",
"notes": format_notes(notes) if notes else "",
"total_hours": total_hours,
"total_minutes": total_minutes,
}
print("Total time for all tasks (%s): %s - %s:%s" % (len(tasks), total_time_minutes_from_tasks, total_hours, total_minutes))
return msg
def _load_sheet_data(api, timesheet_url, arg_date=None):
try:
date = arrow.get(arg_date, 'YYYYMM')
except Exception: # pylint: disable=W0703
now = arrow.now()
date = now.format('YYYYMM')
rows = load_first_sheet_rows(api, timesheet_url, date)
date_str = str(date.format('YYYYMM'))
return (rows, date_str)
def export_csv(api, timesheet_url, arg_date):
rows, date = _load_sheet_data(api, timesheet_url, arg_date)
filtered = [row for row in rows if row and str(row[COL_DATE]).startswith(date)]
if filtered is None or not filtered:
return None
csv_filename = os.path.join(os.getcwd(), "%s.csv" % (arg_date))
print("")
print("Found (%d) entries for date %s!" % (len(filtered), date))
print("Writing to %s" % (csv_filename))
with open(csv_filename, mode='w') as f:
f = csv.writer(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
# f.writerow(['<NAME>', 'Accounting', 'November'])
f.writerow(["username", "date", "task", "duration", "work_type", "details"])
def w(task, duration_minutes, details = ""):
work_type = "Meeting" if "meeting" in details.lower() else "Development"
# Needed CSV columns
# username|date|task|duration|work_type|details
f.writerow(["daniel", arrow.get(str(date), 'YYYYMMDD').format('YYYY.MM.DD'), task, "%dm" % (duration_minutes), work_type, details])
# regex: ([a-zA-Z].+-\d+)(.*)((?<=\[).+(?=\]))
# text: SCAN-4167 As a developer, I want to update AIScanRobo every week [1h]
# 3 groups:
# SCAN-4167
# As a developer, I want to update AIScanRobo every week [
# 1h
r = re.compile(r"([a-zA-Z].+-\d+)(.*)((?<=\[).+(?=\]))")
for row in filtered:
max_cols = len(row)
time = row[COL_TIME_FIXED] if max_cols >= COL_TIME_FIXED else None
time_start = row[COL_TIME_START] if max_cols >= COL_TIME_START else None
time_end = row[COL_TIME_END] if max_cols >= COL_TIME_END else None
date = row[COL_DATE] if max_cols >= COL_DATE else None
if time_start is None or time_end is None or date is None:
continue
tasks = []
for idx in range(COL_TASKS_START, max_cols):
task = row[idx].strip()
if task:
tasks.append(task)
if len(tasks) == 0:
print("%s: no tasks found! %s" % (date, time_start))
continue
print("%s: %d tasks found!" % (date, len(tasks)))
for task in tasks:
t = task.split('\n')[0] if '\n' in task else task
try:
g = r.match(t).groups()
except Exception as ex:
print("ERROR: %s - %s" % (t, str(ex)))
continue
if DEBUG:
print("task: %s" % (t))
print("groups: %s" % len(g))
[task_number, task_details, duration] = g
hours, half_hours = calc(duration.replace("h", ""), split_char=".")
minutes = (hours * 60) + (6 * half_hours)
if DEBUG:
print("time: %s, %s $ %s $ %s" % (hours, half_hours, duration, minutes))
details = "%s %s" % (task_number, task_details[:-1].strip())
w(task_number, minutes, details.strip())
print("")
print("CSV output to: %s" % (csv_filename))
def calc_daily_hours_for_month(api, timesheet_url, arg_date):
rows, date = _load_sheet_data(api, timesheet_url, arg_date)
filtered = [row for row in rows if row and str(row[COL_DATE]).startswith(date)]
if filtered is None or not filtered:
return None
print("")
print("Found (%d) entries for date %s!" % (len(filtered), date))
minutes = 0
days = 0
for row in filtered:
max_cols = len(row)
time = row[COL_TIME_FIXED] if max_cols >= COL_TIME_FIXED else None
time_start = row[COL_TIME_START] if max_cols >= COL_TIME_START else None
time_end = row[COL_TIME_END] if max_cols >= COL_TIME_END else None
date = row[COL_DATE] if max_cols >= COL_DATE else None
worked_at = row[COL_MOVE] if max_cols >= COL_MOVE else None
notes = row[COL_NOTES] if max_cols >= COL_NOTES else ""
if time_start is None or time_end is None or date is None:
continue
start_hours, start_minutes = calc(time_start)
end_hours, end_minutes = calc(time_end)
if start_hours == 0:
print("%s: Day off because of %s" % (date, "whatever" if time_start == 0 else time_start))
continue
extra_info = ""
the_date = arrow.get(str(date), 'YYYYMMDD')
if the_date.weekday() in [SATURDAY, SUNDAY]:
extra_info += " - Weekend work"
half_day = 'half' in row[COL_WORK_FROM_HOME]
if half_day:
extra_info += " - half day PTO"
if worked_at in ['o', 'O'] or "OFFICE" in notes.upper():
extra_info += " - Commute to office"
minutes_day = abs(end_hours - start_hours) * 60
minutes_day += end_minutes - start_minutes
minutes += minutes_day
hours_day = int(minutes_day / 60)
hours_day_without_lunch = hours_day - 1
minutes_day = minutes_day % 60
total_time_for_date = str(hours_day).zfill(2) + ':' + str(minutes_day).zfill(2)
days += 1
no_lunch = str(hours_day_without_lunch).zfill(2) + ':' + str(minutes_day).zfill(2)
print("%s: %s to %s = %s (without lunch: %s)%s" % (date, str(time_start).zfill(2), str(time_end).zfill(2), total_time_for_date, no_lunch, extra_info))
hours = str(minutes / 60).zfill(2)
minutes = str(minutes % 60).zfill(2)
lunch_hours = str(int(float(hours)) - days).zfill(2)
print("")
print("Total days worked: %s" % str(days))
print("Total hours: %s:%s (with 1 hour lunch: %s:%s)" % (hours, minutes, lunch_hours, minutes))
print("")
def calc_stats(api, timesheet_url, arg_date=None):
rows, date = _load_sheet_data(api, timesheet_url, arg_date)
# find the rows for the given month
filtered = [row for row in rows if row and str(row[COL_DATE]).startswith(date)]
if filtered is None or not filtered:
return None
if not AS_CSV:
print("")
print("Found (%d) entries for date %s!" % (len(filtered), date))
dates, hours = [], []
half_days = {}
first = None
last = None
for row in filtered:
max_cols = len(row)
time = row[COL_TIME_FIXED] if max_cols >= COL_TIME_FIXED else None
tasks = []
for idx in range(COL_TASKS_START, max_cols):
task = row[idx].strip()
if task:
tasks.append(task)
day_type = row[COL_TIME_START] if max_cols >= COL_TIME_START else None
date = row[COL_DATE] if max_cols >= COL_DATE else None
if day_type is None:
continue
if day_type in SPECIAL_VALUES:
time = day_type
hours.append(time)
dates.append(date)
continue
elif not tasks:
continue
# If it was a half day, meaning I took half a day off, then only count half the time
half_day = 'half' in row[COL_WORK_FROM_HOME]
if half_day:
half_days[date] = time
hours.append(time)
dates.append(date)
if first is None:
first = row
else:
last = row
total_hours, total_minutes, total_time = 0, 0, ""
for index, hour in enumerate(hours):
date = dates[index]
local_hours, local_minutes = calc(hour, date in half_days)
total_hours += local_hours
total_minutes += local_minutes
if total_minutes >= 60:
total_hours += (total_minutes / 60)
total_minutes = total_minutes % 60
total_time = "%d:%d hours:minutes" % (total_hours, total_minutes)
expected = 0
actual_h, actual_m = 0, 0
if not AS_CSV:
print("*" * 50)
print("")
print("Valid hours entries: %s\t[required vs actual]" % len(hours))
deduct_work_hours = 0
work_hours = 0
work_minutes = 0
days = 0
expected_hours_accumulated_total = 0
for index, worked_date in enumerate(dates):
days += 1
if hours[index] in SPECIAL_VALUES:
if not AS_CSV:
print(" %s: Off, because %s" % (worked_date, hours[index]))
else:
pass
else:
half_day = worked_date in half_days
# each workday has 8 hours of work, but on half days it is only half of 8, aka 4.
work_hours_for_the_day = 8 if not half_day else 4
expected_hours_accumulated_total += 8 - (8 - work_hours_for_the_day)
expected_minutes_accumulated_total = expected_hours_accumulated_total * 60
# hours[index] is the actual time worked, e.g. 6:30 means 6 hours and 30 minutes
local_h, local_m = calc(hours[index])
work_hours += local_h
work_minutes += local_m
actual_h = work_hours
# 330 minutes = 6 hours and 30 minutes
actual_h += int(work_minutes / 60)
actual_m = work_minutes % 60
if AS_CSV:
print("%s;%s;" % (worked_date, hours[index]))
else:
print(" %s: %s\t[%s:00 vs %s:%s] %s" % (worked_date, hours[index], expected_hours_accumulated_total,
str(actual_h).zfill(2), str(actual_m).zfill(2),
"Half day" if half_day else ""))
if not AS_CSV:
print("")
print("First:", "<first> not found" if first is None else first[COL_DATE])
print("Last:", "<last> not found" if last is None else last[COL_DATE])
print("")
print("Total time in %s: %s" % (date, total_time))
print("")
print("*" * 50)
def main():
# print("Checking environment variable TIMESHEET_URL for spreadsheet URL...")
timesheet_url = os.environ.get('TIMESHEET_URL', "").strip()
if not timesheet_url:
raise Exception("Please set the TIMESHEET_URL environment variable accordingly.")
# print("Checking environment variable USER_FULL_NAME for spreadsheet URL...")
user_full_name = os.environ.get('USER_FULL_NAME', "").strip()
if not user_full_name:
print("Warning: USER_FULL_NAME environment variable not set!")
user_full_name = "<NAME>"
print("")
print("Usage: python timesheet.py [command|date] [date]")
print("Example: python timesheet.py stats 202011")
print("Example: python timesheet.py 20201130")
print("")
print("Available commands:")
print("- stats: show summed up hours and minutes for the given/current month")
print(" use \"CSV=1 python timesheet.py stats\" to format the output")
print(" as CSV")
print("- daily: same as stats, except ready to email to HR")
print("- csv: task breakdown for the month and time spend on each task")
print("")
print("""Tip: use "DEBUG=1 timesheet <parameter>" to enable debug output""")
print("")
print("Trying to load client-secrets.json file ...")
secrets_file, cache_file = get_client_secret_filenames()
sheets = Sheets.from_files(secrets_file, cache_file, no_webserver=False)
print("Success.")
date = None if len(sys.argv) < 3 else sys.argv[2].strip()
arg = "read today" if len(sys.argv) < 2 else sys.argv[1].strip()
if arg == "stats":
calc_stats(sheets, timesheet_url, date or arrow.now().format('YYYYMM'))
elif arg == "daily":
calc_daily_hours_for_month(sheets, timesheet_url, date or arrow.now().format('YYYYMM'))
elif arg == "csv":
export_csv(sheets, timesheet_url, date or arrow.now().format('YYYYMM'))
else:
date_to_use = "read today" if arg == '' else arg
load_sheet_and_read_data(sheets, timesheet_url, date_to_use, user_full_name)
print("Done.")
if __name__ == "__main__":
main()
| [
"os.path.exists",
"gsheets.Sheets.from_files",
"re.compile",
"csv.writer",
"os.path.join",
"os.environ.get",
"arrow.now",
"os.getcwd",
"os.path.realpath",
"arrow.get"
] | [((237, 265), 'os.environ.get', 'os.environ.get', (['"""DEBUG"""', '"""0"""'], {}), "('DEBUG', '0')\n", (251, 265), False, 'import os\n'), ((282, 308), 'os.environ.get', 'os.environ.get', (['"""CSV"""', '"""0"""'], {}), "('CSV', '0')\n", (296, 308), False, 'import os\n'), ((1149, 1198), 'os.path.join', 'os.path.join', (['CURRENT_PATH', '"""client-secrets.json"""'], {}), "(CURRENT_PATH, 'client-secrets.json')\n", (1161, 1198), False, 'import os\n'), ((1215, 1270), 'os.path.join', 'os.path.join', (['CURRENT_PATH', '"""client-secrets-cache.json"""'], {}), "(CURRENT_PATH, 'client-secrets-cache.json')\n", (1227, 1270), False, 'import os\n'), ((2137, 2148), 'arrow.now', 'arrow.now', ([], {}), '()\n', (2146, 2148), False, 'import arrow\n'), ((5669, 5723), 're.compile', 're.compile', (['"""([a-zA-Z].+-\\\\d+)(.*)((?<=\\\\[).+(?=\\\\]))"""'], {}), "('([a-zA-Z].+-\\\\d+)(.*)((?<=\\\\[).+(?=\\\\]))')\n", (5679, 5723), False, 'import re\n'), ((20335, 20398), 'gsheets.Sheets.from_files', 'Sheets.from_files', (['secrets_file', 'cache_file'], {'no_webserver': '(False)'}), '(secrets_file, cache_file, no_webserver=False)\n', (20352, 20398), False, 'from gsheets import Sheets\n'), ((200, 226), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (216, 226), False, 'import os\n'), ((1283, 1307), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (1297, 1307), False, 'import os\n'), ((1487, 1511), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (1501, 1511), False, 'import os\n'), ((8824, 8853), 'arrow.get', 'arrow.get', (['arg_date', '"""YYYYMM"""'], {}), "(arg_date, 'YYYYMM')\n", (8833, 8853), False, 'import arrow\n'), ((9393, 9404), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (9402, 9404), False, 'import os\n'), ((9614, 9684), 'csv.writer', 'csv.writer', (['f'], {'delimiter': '""","""', 'quotechar': '"""\\""""', 'quoting': 'csv.QUOTE_MINIMAL'}), '(f, delimiter=\',\', quotechar=\'"\', quoting=csv.QUOTE_MINIMAL)\n', (9624, 9684), False, 'import csv\n'), ((10484, 10538), 're.compile', 're.compile', (['"""([a-zA-Z].+-\\\\d+)(.*)((?<=\\\\[).+(?=\\\\]))"""'], {}), "('([a-zA-Z].+-\\\\d+)(.*)((?<=\\\\[).+(?=\\\\]))')\n", (10494, 10538), False, 'import re\n'), ((1347, 1387), 'os.path.join', 'os.path.join', (['"""~"""', '"""client-secrets.json"""'], {}), "('~', 'client-secrets.json')\n", (1359, 1387), False, 'import os\n'), ((1428, 1474), 'os.path.join', 'os.path.join', (['"""~"""', '"""client-secrets-cache.json"""'], {}), "('~', 'client-secrets-cache.json')\n", (1440, 1474), False, 'import os\n'), ((1730, 1741), 'arrow.now', 'arrow.now', ([], {}), '()\n', (1739, 1741), False, 'import arrow\n'), ((3779, 3802), 'arrow.get', 'arrow.get', (['val', '"""HH:mm"""'], {}), "(val, 'HH:mm')\n", (3788, 3802), False, 'import arrow\n'), ((8915, 8926), 'arrow.now', 'arrow.now', ([], {}), '()\n', (8924, 8926), False, 'import arrow\n'), ((19084, 19119), 'os.environ.get', 'os.environ.get', (['"""TIMESHEET_URL"""', '""""""'], {}), "('TIMESHEET_URL', '')\n", (19098, 19119), False, 'import os\n'), ((19348, 19384), 'os.environ.get', 'os.environ.get', (['"""USER_FULL_NAME"""', '""""""'], {}), "('USER_FULL_NAME', '')\n", (19362, 19384), False, 'import os\n'), ((2215, 2249), 'arrow.get', 'arrow.get', (['commandline', '"""YYYYMMDD"""'], {}), "(commandline, 'YYYYMMDD')\n", (2224, 2249), False, 'import arrow\n'), ((3863, 3885), 'arrow.get', 'arrow.get', (['val', '"""H:mm"""'], {}), "(val, 'H:mm')\n", (3872, 3885), False, 'import arrow\n'), ((20627, 20638), 'arrow.now', 'arrow.now', ([], {}), '()\n', (20636, 20638), False, 'import arrow\n'), ((20748, 20759), 'arrow.now', 'arrow.now', ([], {}), '()\n', (20757, 20759), False, 'import arrow\n'), ((20851, 20862), 'arrow.now', 'arrow.now', ([], {}), '()\n', (20860, 20862), False, 'import arrow\n')] |
# -*- coding: utf-8 -*-
from cms.models import Page, Title, CMSPlugin, Placeholder
from cms.utils import get_language_from_request
from django.http import Http404
from django.shortcuts import get_object_or_404
def revert_plugins(request, version_id, obj):
from reversion.models import Version
version = get_object_or_404(Version, pk=version_id)
revs = [related_version.object_version for related_version in version.revision.version_set.all()]
cms_plugin_list = []
placeholders = {}
plugin_list = []
titles = []
others = []
page = obj
lang = get_language_from_request(request)
for rev in revs:
obj = rev.object
if obj.__class__ == Placeholder:
placeholders[obj.pk] = obj
if obj.__class__ == CMSPlugin:
cms_plugin_list.append(obj)
elif hasattr(obj, 'cmsplugin_ptr_id'):
plugin_list.append(obj)
elif obj.__class__ == Page:
pass
#page = obj #Page.objects.get(pk=obj.pk)
elif obj.__class__ == Title:
titles.append(obj)
else:
others.append(rev)
if not page.has_change_permission(request):
raise Http404
current_plugins = list(CMSPlugin.objects.filter(placeholder__page=page))
for pk, placeholder in placeholders.items():
# admin has already created the placeholders/ get them instead
try:
placeholders[pk] = page.placeholders.get(slot=placeholder.slot)
except Placeholder.DoesNotExist:
placeholders[pk].save()
page.placeholders.add(placeholders[pk])
for plugin in cms_plugin_list:
# connect plugins to the correct placeholder
plugin.placeholder = placeholders[plugin.placeholder_id]
plugin.save(no_signals=True)
for plugin in cms_plugin_list:
plugin.save()
for p in plugin_list:
if int(p.cmsplugin_ptr_id) == int(plugin.pk):
plugin.set_base_attr(p)
p.save()
for old in current_plugins:
if old.pk == plugin.pk:
plugin.save()
current_plugins.remove(old)
for title in titles:
title.page = page
try:
title.save()
except:
title.pk = Title.objects.get(page=page, language=title.language).pk
title.save()
for other in others:
other.object.save()
for plugin in current_plugins:
plugin.delete() | [
"cms.utils.get_language_from_request",
"django.shortcuts.get_object_or_404",
"cms.models.Title.objects.get",
"cms.models.CMSPlugin.objects.filter"
] | [((313, 354), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Version'], {'pk': 'version_id'}), '(Version, pk=version_id)\n', (330, 354), False, 'from django.shortcuts import get_object_or_404\n'), ((583, 617), 'cms.utils.get_language_from_request', 'get_language_from_request', (['request'], {}), '(request)\n', (608, 617), False, 'from cms.utils import get_language_from_request\n'), ((1231, 1279), 'cms.models.CMSPlugin.objects.filter', 'CMSPlugin.objects.filter', ([], {'placeholder__page': 'page'}), '(placeholder__page=page)\n', (1255, 1279), False, 'from cms.models import Page, Title, CMSPlugin, Placeholder\n'), ((2294, 2347), 'cms.models.Title.objects.get', 'Title.objects.get', ([], {'page': 'page', 'language': 'title.language'}), '(page=page, language=title.language)\n', (2311, 2347), False, 'from cms.models import Page, Title, CMSPlugin, Placeholder\n')] |
import os, sys, cdms2, vcs, vcs.testing.regression as regression
dataset = cdms2.open(os.path.join(vcs.sample_data,"clt.nc"))
data = dataset("clt")
canvas = regression.init()
isoline = canvas.createisoline()
isoline.label="y"
texts=[]
colors = []
for i in range(10):
text = canvas.createtext()
text.color = 50 + 12 * i
text.height = 12
colors.append(100 + 12 * i)
if i%2 == 0:
texts.append(text.name)
else:
texts.append(text)
isoline.text = texts
# First test using isoline.text[...].color
canvas.plot(data, isoline, bg=1)
baseline = os.path.splitext(sys.argv[1])
baselineImage = "%s%s"%baseline
ret = regression.run_wo_terminate(canvas, "test_vcs_isoline_labels.png", baselineImage)
# Now set isoline.linecolors and test again.
canvas.clear()
isoline.linecolors = colors
canvas.plot(data, isoline, bg=1)
baselineImage = "%s%d%s"%(baseline[0], 2, baseline[1])
testImage = os.path.abspath("test_vcs_isoline_labels2.png")
ret += regression.run_wo_terminate(canvas, testImage, baselineImage)
# Now set isoline.textcolors and test again.
canvas.clear()
isoline.textcolors = colors
canvas.plot(data, isoline, bg=1)
baselineImage = "%s%d%s"%(baseline[0], 3, baseline[1])
testImage = os.path.abspath("test_vcs_isoline_labels3.png")
ret += regression.run_wo_terminate(canvas, testImage, baselineImage)
sys.exit(ret)
| [
"os.path.splitext",
"vcs.testing.regression.init",
"os.path.join",
"sys.exit",
"os.path.abspath",
"vcs.testing.regression.run_wo_terminate"
] | [((158, 175), 'vcs.testing.regression.init', 'regression.init', ([], {}), '()\n', (173, 175), True, 'import os, sys, cdms2, vcs, vcs.testing.regression as regression\n'), ((573, 602), 'os.path.splitext', 'os.path.splitext', (['sys.argv[1]'], {}), '(sys.argv[1])\n', (589, 602), False, 'import os, sys, cdms2, vcs, vcs.testing.regression as regression\n'), ((641, 726), 'vcs.testing.regression.run_wo_terminate', 'regression.run_wo_terminate', (['canvas', '"""test_vcs_isoline_labels.png"""', 'baselineImage'], {}), "(canvas, 'test_vcs_isoline_labels.png',\n baselineImage)\n", (668, 726), True, 'import os, sys, cdms2, vcs, vcs.testing.regression as regression\n'), ((912, 959), 'os.path.abspath', 'os.path.abspath', (['"""test_vcs_isoline_labels2.png"""'], {}), "('test_vcs_isoline_labels2.png')\n", (927, 959), False, 'import os, sys, cdms2, vcs, vcs.testing.regression as regression\n'), ((967, 1028), 'vcs.testing.regression.run_wo_terminate', 'regression.run_wo_terminate', (['canvas', 'testImage', 'baselineImage'], {}), '(canvas, testImage, baselineImage)\n', (994, 1028), True, 'import os, sys, cdms2, vcs, vcs.testing.regression as regression\n'), ((1219, 1266), 'os.path.abspath', 'os.path.abspath', (['"""test_vcs_isoline_labels3.png"""'], {}), "('test_vcs_isoline_labels3.png')\n", (1234, 1266), False, 'import os, sys, cdms2, vcs, vcs.testing.regression as regression\n'), ((1274, 1335), 'vcs.testing.regression.run_wo_terminate', 'regression.run_wo_terminate', (['canvas', 'testImage', 'baselineImage'], {}), '(canvas, testImage, baselineImage)\n', (1301, 1335), True, 'import os, sys, cdms2, vcs, vcs.testing.regression as regression\n'), ((1337, 1350), 'sys.exit', 'sys.exit', (['ret'], {}), '(ret)\n', (1345, 1350), False, 'import os, sys, cdms2, vcs, vcs.testing.regression as regression\n'), ((87, 126), 'os.path.join', 'os.path.join', (['vcs.sample_data', '"""clt.nc"""'], {}), "(vcs.sample_data, 'clt.nc')\n", (99, 126), False, 'import os, sys, cdms2, vcs, vcs.testing.regression as regression\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 5 05:47:03 2018
@author: zg
"""
import numpy as np
#from scipy import io
import scipy.io
#import pickle
from sklearn.model_selection import StratifiedKFold
#import sklearn
from scipy.sparse import spdiags
from scipy.spatial import distance
#import matplotlib.pyplot as plt
from sklearn.ensemble import BaggingClassifier
from sklearn import svm
#from sklearn import metrics
from sklearn.metrics import roc_auc_score
from sklearn import tree
import copy
import numpy.matlib
from sklearn.exceptions import NotFittedError
#import FuzzyRwrBagging as frb
#from joblib import Parallel, delayed
#import multiprocessing
def RWR(A, nSteps, laziness, p0 = None):
'''
% the random walk algorithm.
% A is the input net matrix, with the diag to be 0.
% nSteps: how many steps to walk
% laziness: the probablity to go back.
% p0: the initial probability. usually it is a zero matrix with the diag to
% be 1.
%
% for example, A could be:
% A = [0,2,2,0,0,0,0;...
% 2,0,1,1,0,0,0;...
% 2,1,0,0,1,0,0;...
% 0,1,0,0,0,1,1;...
% 0,0,1,0,0,0,0;...
% 0,0,0,1,0,0,1;...
% 0,0,0,1,0,1,0]
%
% if nSteps is 1000 and laziness is 0.3, p0 is default, the result is:
% [0.449, 0.207, 0.220, 0.064, 0.154, 0.034, 0.034;...
% 0.207, 0.425, 0.167, 0.132, 0.117, 0.071, 0.071;...
% 0.220, 0.167, 0.463, 0.052, 0.324, 0.028, 0.028;...
% 0.048, 0.099, 0.039, 0.431, 0.027, 0.232, 0.232;...
% 0.038, 0.029, 0.081, 0.009, 0.356, 0.004, 0.004;...
% 0.017, 0.035, 0.014, 0.154, 0.009, 0.425, 0.203;...
% 0.017, 0.035, 0.014, 0.154, 0.009, 0.203, 0.425]
%
% Each column represents the propability for each node. each element in the
% column means the probability to go to that node.
% This algorithm will converge. For example, for the above matrix, nSteps =
% 100, 1000 or 10000, will give the same result.
'''
n = len(A)
if p0 == None:
p0 = np.eye(n)
'''
% In the example above, spdiags(sum(A)'.^(-1), 0, n, n) will be
% 0.2500 0 0 0 0 0 0
% 0 0.2500 0 0 0 0 0
% 0 0 0.2500 0 0 0 0
% 0 0 0 0.3333 0 0 0
% 0 0 0 0 1.0000 0 0
% 0 0 0 0 0 0.5000 0
% 0 0 0 0 0 0 0.5000
% W will be:
% 0 0.5000 0.5000 0 0 0 0
% 0.5000 0 0.2500 0.3333 0 0 0
% 0.5000 0.2500 0 0 1.0000 0 0
% 0 0.2500 0 0 0 0.5000 0.5000
% 0 0 0.2500 0 0 0 0
% 0 0 0 0.3333 0 0 0.5000
% 0 0 0 0.3333 0 0.5000 0
'''
#W = A * spdiags(sum(A)'.^(-1), 0, n, n);
#W = spdiags(np.power(sum(np.float64(A)) , -1).T , 0, n, n).toarray()
W = A.dot( spdiags(np.power(sum(np.float64(A)) , -1)[np.newaxis], \
0, n, n).toarray() )
p = p0
pl2norm = np.inf
unchanged = 0
for i in range(1, nSteps+1):
if i % 100 == 0:
print(' done rwr ' + str(i-1) )
pnew = (1-laziness) * W.dot(p) + laziness * p0
l2norm = max(np.sqrt(sum((pnew - p) ** 2) ) )
p = pnew
if l2norm < np.finfo(float).eps:
break
else:
if l2norm == pl2norm:
unchanged = unchanged +1
if unchanged > 10:
break
else:
unchanged = 0
pl2norm = l2norm
return p
# test RWR()
'''
A = np.array([[0,2,2,0,0,0,0],\
[2,0,1,1,0,0,0],\
[2,1,0,0,1,0,0],\
[0,1,0,0,0,1,1],\
[0,0,1,0,0,0,0],\
[0,0,0,1,0,0,1],\
[0,0,0,1,0,1,0]])
nSteps = 1000
lazi = 0.3
RWR(A, nSteps, lazi, None)
'''
# test
#dst = distance.euclidean(A)
# corrent, the same as in Matlab
def f_sim_2_aRankNet(sim, k=3):
'''
% Convert the similarity matrix to a network graph where each node
% has k edges to other nodes (aRank).
'''
# delete the diagnal values.
# sim = sim-diag(diag(sim) );
np.fill_diagonal(sim, 0)
# [~, I] = sort(sim-diag(diag(sim) ) );
I = np.argsort(sim, kind='mergesort') + 1
# [~, I2] = sort(I);
I2 = (np.argsort(I, kind='mergesort').T + 1).T
# for every column, just keep the top k edges.
#aRankNet = (I2 >length(sim)-k);
aRankNet = I2 > (len(sim) - k)
# make it a diagonal matrix
# aRankNet = max(aRankNet, aRankNet');
aRankNet = np.logical_or(aRankNet, aRankNet.T)
# remove the diagonal 1s.
# aRankNet = aRankNet-diag(diag(aRankNet) );
np.fill_diagonal(aRankNet, False)
return aRankNet
# test
#sim = np.array([[0, 0.5566, 0.6448, 0.3289], \
# [0.5566, 0, -0.0842, -0.0170], \
# [0.6448, -0.0842, 0, 0.8405], \
# [0.3289, -0.0170, 0.8405, 0]])
#
#f_sim_2_aRankNet(sim,1)
#f_sim_2_aRankNet(sim,2)
#f_sim_2_aRankNet(sim,3)
#
#array([[False, True, True, False],
# [ True, False, False, False],
# [ True, False, False, True],
# [False, False, True, False]])
#
#array([[False, True, True, True],
# [ True, False, False, False],
# [ True, False, False, True],
# [ True, False, True, False]])
#
#array([[False, True, True, True],
# [ True, False, False, True],
# [ True, False, False, True],
# [ True, True, True, False]])
def f_find_centers_rwMat(rw_mat, k):
'''
% on the rw_mat matrix, find some nodes as the centroids for soft
% clustering. If we just random pickup some nodes as centroids, that is
% not good for fuzzy clusters.
% k is the number of centroids.
'''
ixs = []
# 1. find the most connected center node as the first centroid.
a = np.sum(rw_mat, axis=1) # axis=1 for rows; 0 for col
# % most connected node.
ix = np.argmax(a)
ixs.append(ix)
# % 2. iteratively find the rest nodes
for i in range(1, k):
tmp = rw_mat[:, ixs]
b = np.sum(tmp, axis=1)
b[ixs] = np.inf
# % find the farthest node
ix = np.argmin(b)
ixs.append(ix)
return ixs
# test
#tmp = f_find_centers_rwMat(rw_mat, 10)
def getCutoff(rw_mat, avgNeighborsSize):
tmp = rw_mat.flatten('F')
a = np.flip(np.sort(tmp), 0)
len1 = len(rw_mat)
#cutoffs = []
all_neibs = int( avgNeighborsSize * len1 )
print( all_neibs)
ct = a[all_neibs]
return ct
#test
#>>> a = np.array([[1,2], [3,4]])
#>>> a.flatten()
#array([1, 2, 3, 4])
#>>> a.flatten('F')
#array([1, 3, 2, 4])
'''
a = np.array( range(0,100) )
b = np.matlib.repmat(a, 100, 1)
ct = getCutoff(b, 70)
'''
def f_len_of_each_ele(c1):
#% Assume c1 is a 1-dimension cell array, and each element is a 1d double
#% array. This function counts the length of each double array.
lens = np.zeros(len(c1))
for i in range(0, len(c1)):
lens[i] = len(c1[i])
return lens
def f_eu_dist(X):
'''
calculate the euclidean distance between instances
'''
sim = np.zeros(( len(X), len(X) ))
for i in range(0, len(X)):
for j in range(i+1, len(X)):
tmp = distance.euclidean(X[i], X[j])
sim[i][j] = tmp
sim[j][i] = tmp
sim = -sim
np.fill_diagonal(sim, 0)
return sim
#test
#sim = f_eu_dist(X)
def f_eu_dist2(X1, X2):
'''
calculate the euclidean distance between instances from two datasets
'''
sim = np.zeros(( len(X1), len(X2) ))
for i in range(0, len(X1) ):
for j in range(0, len(X2) ):
tmp = distance.euclidean(X1[i], X2[j])
sim[i][j] = tmp
sim = -sim
return sim
#test
#sim = f_eu_dist2(X_tr, X_te)
def f_fuzzy_rwr_clusters(X, k=100, each_clus_sz=None):
# X: data
# k: number of clusters
'''
The return variable clus stores the instance indices for each cluster.
However, this data structure is not easy to find for a instance, which are
the clusters it belongs to, thus we also need to convert clus to a
true-false matrix.
'''
if each_clus_sz == None:
# on average, how many clusters does one inst belongs to.
#overlap_factor = 2;
# the estimated size of each cluster. default is half the number of
# instances.
each_clus_sz=len(X)/3
print('RWR-based fuzzy clustering starts...')
print(' NO. clusters = '+str(k)+'; avg. cluster size = '+str(each_clus_sz) )
# sim = squareform(pdist(X));
# sim = -sim;
sim = np.zeros((len(X), len(X) ) )
for i in range(0, len(X)):
for j in range(i+1, len(X)):
tmp = distance.euclidean(X[i], X[j])
sim[i][j] = tmp
sim[j][i] = tmp
sim = -sim
print(' done calculating the Euclidean distance matrix')
# ---------------------------------------------------------------
aRank_k_neighbors = np.ceil(np.log10(len(sim)) )
ori_graph = f_sim_2_aRankNet(sim, aRank_k_neighbors)
print(' done calculating the A-rank KNN graph')
# % -------- RWR --------
nSteps = 1000
lazi = 0.3
rw = RWR(ori_graph, nSteps, lazi)
# remove probability of returning start node
np.fill_diagonal(rw, 0)
rw_mat = rw
print(' done RWR')
# ---------------------------------------------------------------
ixs_centers = f_find_centers_rwMat(rw_mat, k)
ct = getCutoff(rw_mat, each_clus_sz)
rw_net = rw_mat > ct
# % set the diagnal to 1
np.fill_diagonal(rw_net, True)
clus = []
for i in range(0, k):
tmp = np.argwhere(rw_net[:, ixs_centers[i] ] ).flatten()
clus.append(tmp)
# ---------------------------------------------------------------
# % sort the clusters
lens = f_len_of_each_ele(clus)
ix = np.argsort(lens)[::-1]
clus_ordered = [clus[i] for i in ix]
print(' center inst. index of each cluster: ')
ixs_centers = np.array(ixs_centers)
print(ixs_centers[ix])
print(' size of each cluster: ')
print(lens[ix])
print(' done RWR clustering')
return clus_ordered
#test
#clus = f_fuzzy_rwr_clusters(X, 100)
# pass
def f_clus_to_tfs(clus, n_inst):
#% convert the cluster information from cell array to mat. But for each
#% instance, the rank of clusters information will be lost - you won't know
#% what is the top 1/2/3 cluster it belongs to.
#%
#% clus e.g:
#% 1x5 cell
#% 1x195 double 1x193 double 1x169 double 1x161 double 1x62 double
#%
#% tfs e.g:
#% 295x5 double
#% 1 0 0 0 0
#% 1 1 1 1 0
#% 1 1 1 0 0
#% 1 1 0 0 0
#% 1 1 1 1 0
#% ...
#% 1 1 1 1 1
#% 1 0 0 0 0
#% 1 1 1 0 0
tfs = np.zeros((n_inst, len(clus)), dtype=bool)
for i in range(0, len(clus)):
tfs[clus[i], i] = True
return tfs
# test
#tfs = f_clus_to_tfs(clus, len(X))
# pass
def f_tfs_2_instClus(tfs):
'''
convert the boolean table representation of clustering result to for each
instance, what clusters it belongs to.
'''
inst_clus = []
for i in range(0, len(tfs)):
row = list( np.where(tfs[i, :] ) [0] )
inst_clus.append(row)
return inst_clus
# test
#inst_clus = f_tfs_2_instClus(tfs)
#def f_bg_svm_tr_te(X_tr, y_tr, X_te, y_te):
# #bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \
# bagging = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \
# random_state=None, n_estimators = 100 )
# bagging.fit(X_tr, y_tr)
#
# y_pred = bagging.predict_proba(X_te)
# y_pred = y_pred[:, 1].flatten()
#
# auc = roc_auc_score(y_te.flatten(), y_pred)
#
# return [y_pred, auc]
# test
'''
X_tr = X
y_tr = y
X_te = X
y_te = y
[y_pred, auc] = f_bg_svm_tr_te(X_tr, y_tr, X_te, y_te)
'''
#def f_bg_tr_te(X_tr, y_tr, X_te, y_te, BaseBagging):
# '''
# corresponds to f_weka_bg_svm_tr_te() in Matlab version
# '''
# #bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \
# bagging = BaggingClassifier(BaseBagging, \
# random_state=None, n_estimators = 100 )
# bagging.fit(X_tr, y_tr)
#
# y_pred = bagging.predict_proba(X_te)
# y_pred = y_pred[:, 1].flatten()
#
# auc = roc_auc_score(y_te.flatten(), y_pred)
#
# return [y_pred, auc]
def f_tr(X_tr, y_tr, model):
model_inner = copy.deepcopy(model)
model_inner.fit(X_tr, y_tr)
return model_inner
def f_te(X_te, model):
y_pred = model.predict_proba(X_te)
y_pred = y_pred[:, 1].flatten()
return y_pred
def f_tr_te(X_tr, y_tr, X_te, model):
'''
corresponds to f_weka_bg_svm_tr_te() in Matlab version
'''
#bagging = BaggingClassifier(base_estimator = svm.LinearSVC(), \
#bagging = BaggingClassifier(BaseBagging, \
# random_state=None, n_estimators = 100 )
model_inner = copy.deepcopy(model)
model_inner.fit(X_tr, y_tr)
y_pred = model_inner.predict_proba(X_te)
y_pred = y_pred[:, 1].flatten()
#auc = roc_auc_score(y_te.flatten(), y_pred)
return y_pred
def f_k_fo(X, y, model, k_fold=10):
'''
corresponds to f_weka_bg_svm_arff_k_fo_3_parfor() in Matlab version
'''
y = y.flatten()
y_pred = np.zeros(y.size)
skf = StratifiedKFold(n_splits=k_fold, random_state=None, shuffle=True)
skf.get_n_splits(X, y)
for train_index, test_index in skf.split(X, y):
#print("TRAIN: ", train_index, " TEST: ", test_index)
X_tr, X_te = X[train_index], X[test_index]
#y_tr, y_te = y[train_index], y[test_index]
y_tr = y[train_index]
if np.unique(y_tr).size == 1:
y_pred_fo = np.zeros( len(test_index) )
#print len(X_te)
#print len(test_index)
#print y_pred_fo
y_pred_fo.fill(np.unique(y_tr)[0] )
#print y_pred_fo
else:
y_pred_fo = f_tr_te(X_tr, y_tr, X_te, model)
y_pred[test_index] = y_pred_fo
#auc = roc_auc_score(y.flatten(), y_pred)
return y_pred
# test
#pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/'
##X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer
##y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y']
#X = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['X'] # 11:mesothelioma
#y = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['y']
#
#model = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \
# random_state=None, n_estimators = 100 )
#y_pred = f_k_fo(X, y, model, k_fold=10)
#
#print roc_auc_score(y.flatten(), y_pred)
# the easy dataset mesothelioma get 1.0 CV result.
# breast cancer get 0.599
# all results are correct.
def f_quantileNorm(templete, target):
'''
Templete is the standard, change the target to the values in the templete.
Target may have a very different range than the templete.
templete and target should be 1d n by 1 array.
f_my_quantileNorm()
'''
ix_target = np.argsort(target, kind='mergesort')
ix_templete = np.argsort(templete, kind='mergesort')
target[ix_target] = templete[ix_templete]
new = target
return new
# test
#templete = X[:, 0]
#target = X[:, 1]
#new = f_quantileNorm(templete, target)
#def f_bg_k_fo_3(X, y, k_fold=10):
# '''
# corresponds to f_weka_bgSvm_arff_k_fo_3_parfor() in Matlab version
# corresponds to f_k_fo()
# '''
# y_pred = np.zeros((y.size, 1))
#
# skf = StratifiedKFold(n_splits=k_fold)
# skf.get_n_splits(X, y)
#
# for train_index, test_index in skf.split(X, y):
# #print("TRAIN:", train_index, "TEST:", test_index)
# X_tr, X_te = X[train_index], X[test_index]
# y_tr, y_te = y[train_index], y[test_index]
def f_use_each_clus_forWhole(X, y, clus, y_pred_whole, model, fo_inner):
'''
% using each cluster data to predict the whole instances, while self
% prediction using 10-fold CV.
corresponds to f_use_each_clus_forWhole_bg_svm() in Matlab version
'''
n_clusters = len(clus)
y_pred_multi = np.zeros((y.size, n_clusters) )
models = []
for j in range(0, n_clusters):
# for each cluster
Xj = X[clus[j].flatten(), :]
yj = y[clus[j].flatten() ]
model_a_clust = copy.deepcopy(model)
print(' Cluster '+str(j)+' started...')
#if len(yj) > 10:
if len(yj) > 15 and np.unique(yj).size != 1:
# ------------------ for self ------------------
#if np.unique(yj).size == 1:
# y_pred = np.zeros(yj.size)
# y_pred.fill(np.unique(yj)[0])
#else:
try:
y_pred = f_k_fo(Xj, yj, model, fo_inner)
# quantileNorm
templete = y_pred_whole[clus[j].flatten()]
target = y_pred
y_pred = f_quantileNorm(templete, target)
# copy the normed prediction to the whole data.
y_pred_multi[clus[j].flatten(), j] = y_pred
print(' c-'+str(j)+' done predicting local instances')
# ------------------ for other -----------------
ix_other = set(range(0, y.size)) - set(clus[j].flatten())
ix_other = list(ix_other)
#print ix_other
X_other = X[ix_other , :]
#y_other = y[ix_other ]
# predict
#y_pred = f_tr_te(Xj, yj, X_other, model)
#if np.unique(yj).size != 1:
model_a_clust.fit(Xj, yj)
y_pred = model_a_clust.predict_proba(X_other)
y_pred = y_pred[:, 1].flatten()
# quantileNorm
templete = y_pred_whole[ix_other]
target = y_pred
y_pred = f_quantileNorm(templete, target)
#else:
# y_pred = np.zeros(X_other.size)
# y_pred.fill(np.unique(yj)[0])
# copy to the whole array
y_pred_multi[ix_other, j] = y_pred
print(' c-'+str(j)+' done predicting remote instances')
except ValueError as e:
print(e)
print(' skip this cluster')
y_pred = np.zeros(y.size)
y_pred.fill(np.nan)
y_pred_multi[:, j] = y_pred
else:
if len(yj) <= 15:
print (' '+str(len(yj))+' insts in cluster, <= 15, skip...')
y_pred = np.zeros(y.size)
y_pred.fill(np.nan)
y_pred_multi[:, j] = y_pred
if np.unique(yj).size == 1:
print (' warning, #unique class label(s) == 1')
y_pred = np.zeros(y.size)
y_pred.fill(np.unique(yj)[0])
y_pred_multi[:, j] = y_pred
model_a_clust = np.unique(yj)[0]
models.append(model_a_clust)
return [y_pred_multi, models]
# test
#[y_pred_multi, models] = f_use_each_clus_forWhole(X, y, clus, y_pred_whole, model)
#def f_dec_tab_4_bg_svm(X, y, clus):
# '''
# Calculate the decision table
# % This version changed from the cluster-cluster dec_mat to instance-cluster
# % dec_mat. This solution will avoid the case that if one cluster decision
# % is wrong leading entrie cluster prediction is wrong, which is the reason
# % of instability. However, we cannot use a systematic evaluation criteria
# % such as AUC, I will try using the predicted prob at first.
#
# % This version 3 adds the support for fuzzy clustering - one instance may
# % belongs to more than one cluster.
# % This updated version also outputs the predicted values of y.
# % support more than 3 clusters
# % normalization take place in y_pred_self and y_pred_other, thus do not
# % need normalization when predict y_pred_ICE.
# % ixsp is another cluster form.
#
# corresponds to f_dec_tab_4_bg_svm() in Matlab version
# '''
# #n_clusters = len(clus)
# ## dec_mat stores the prediction error.
# #pred_mat=np.zeros((y.size, n_clusters+1)) #the extra col is for whole pred
# #
# ## k_fold of inner cross-validation
# #fo_inner = 10
# # --------------------------- WHOLE -------------------------
#
# # --------------------------- SELF -------------------------
def f_err_mat(X, y, clus, model):
'''
Calculate the decision table
corresponds to f_dec_tab_4_bg_svm() in Matlab version
'''
n_clusters = len(clus)
# err_mat stores the prediction error.
pred_prob_mat=np.zeros((y.size, n_clusters+1)) #the extra col is for whole pred
# col 0 to col n_clusters-1 store the predictions by each cluster
# the last col stores the pred by whole data
#models = []
# k_fold of inner cross-validation
fo_inner = 5
# --------------------------- WHOLE -------------------------
# Predict each cluster using the whole data.
model_whole = copy.deepcopy(model)
y_pred_whole = f_k_fo(X, y, model_whole, fo_inner)
model_whole.fit(X, y) # fit a model using all data rather than only a fold
pred_prob_mat[:, n_clusters] = y_pred_whole
print (' Done evaluation using whole instances')
print (' Start to evaluate each cluster ')
# --------------------------- SELF -------------------------
# predict the whole instances using each cluster data, while self
# prediction using 10-fold CV.
[y_pred_multi, models] = f_use_each_clus_forWhole(X, y, clus, \
y_pred_whole, model, fo_inner)
print (' Done evaluation using each cluster')
models.append(model_whole)
pred_prob_mat[:, 0:n_clusters] = y_pred_multi
# make a tmp array a stores y
tmp = np.matlib.repmat(y.reshape((y.size, 1)), 1, n_clusters+1)
err_mat = abs(pred_prob_mat - tmp )
print (' Done calculating error table and fitting ICE models')
return [err_mat, models]
"""
#mat = scipy.io.loadmat('/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/'+\
# '3_scripts/2017_4_4/data/names.mat')['names']
#mat = io.loadmat('/Users/zg/Desktop/a.mat')['names']
#test
pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/'
X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer
y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y']
#X = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['X'] # 11:mesothelioma
#y = scipy.io.loadmat(pa+'/data/data_all_pickle/11/data.mat')['y']
n_clus = 3
clus = f_fuzzy_rwr_clusters(X, n_clus)
tfs = f_clus_to_tfs(clus, len(X))
y = y.astype(float)
#model = BaggingClassifier(base_estimator = tree.DecisionTreeClassifier(), \
#model = BaggingClassifier(base_estimator = svm.LinearSVR(), \
#model = BaggingClassifier(base_estimator = svm.LinearSVC(), \
model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \
random_state=None, n_estimators = 100 )
[err_mat, models] = f_err_mat(X, y, clus, model)
"""
def f_err_2_decMat(err_mat, tfs, adv_whole=0.4, adv_self=0.5):
'''
Convert the err table to decision table.
'''
dec_mat = np.zeros(( len(err_mat), err_mat[0].size-1 ), dtype=bool)
# dec_ixs: for each instance, which clusters should be used.
dec_ixs = []
inst_clus = f_tfs_2_instClus(tfs)
for i in range(0, len(err_mat)):
# Matlab code:
#dec_row = dec_mat(cur_nb_ix, :);
#dec_row(:, end ) = dec_row(:, end ) - adv_whole;
#dec_row(:, clus_id) = dec_row(:, clus_id) - adv_self;
row = np.copy( err_mat[i, :] )
#print row
row[-1] = row[-1] - adv_whole
inst_i_clus = inst_clus[i]
if len(inst_i_clus) > 0:
row[inst_i_clus] = row[inst_i_clus] - adv_self
#print row
ix_good_clus = list( np.where( row < row[-1] ) [0] )
#print ix_good_clus
if len(ix_good_clus) > 0:
dec_mat[i, ix_good_clus] = True
dec_ixs.append(ix_good_clus)
else:
dec_ixs.append([])
return [dec_mat, dec_ixs]
#[dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs)
def f_ICE_tr_te_all_clus(X_tr, X_te, clus, models, doNorm=True):
'''
Use the training data to predict the testing data.
Use whole training data to predict
Use each cluster of training data to predict the testing data.
'''
y_pred_all = np.zeros(( len(X_te), len(clus) + 1 ))
# the first col is the prediction using the whole data
model_whole = models[-1]
y_pred_all[:, 0] = f_te(X_te, model_whole)
#y_pred_all[:, 0] = f_tr_te(X_tr, y_tr, X_te, model)
#print 'whole model good '
# start from the second col, the result is by each cluster
for i in range(0, len(clus)):
#Xi = X_tr[clus[i].flatten(), :]
#yi = y_tr[clus[i].flatten() ]
model_i = models[i]
#model_a_clust = copy.deepcopy(model)
try:
y_pred_te = f_te(X_te, model_i)
except :
if model_i == 0:
y_pred_te = np.zeros(len(X_te))
elif model_i == 1:
y_pred_te = np.ones(len(X_te))
else:
y_pred_te = np.zeros(len(X_te))
y_pred_te.fill(np.nan)
#except NotFittedError as e:
# print(repr(e))
# y_pred_te = np.zeros(len(X_te))
# y_pred_te.fill(np.nan)
#print 'model '+str(i)+' good '
#y_pred_te = f_tr_te(Xi, yi, X_te, model)
if doNorm == True:
templete = y_pred_all[:, 0]
target = y_pred_te
y_pred = f_quantileNorm(templete, target)
else:
y_pred = y_pred_te
y_pred_all[:, i+1] = y_pred
return y_pred_all
# test
#y_pred_all = f_ICE_tr_te_all_clus(X, X, clus, model)
def f_ICE_fit(X_tr, y_tr, n_clus, model, w=0.4, s=0.5):
'''
'''
# rwr based fuzzy clustering
clus = f_fuzzy_rwr_clusters(X_tr, n_clus)
#print clus[0]
tfs = f_clus_to_tfs(clus, len(X_tr))
# train models and calculate the error-dicision tables
y_tr = y_tr.astype(float)
#model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \
# random_state=None, n_estimators = 100 )
[err_mat, models] = f_err_mat(X_tr, y_tr, clus, model)
[dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs, w, s)
print (' Done calucating decision table')
return [clus, models, dec_ixs]
#def_deal_miss_v_1(d):
'''
deal with missing values by replacing them by mean.
'''
def f_ICE_fit_2(X_tr, y_tr, n_clus, model, w=0.4, s=0.5):
'''
This version use the err mat to re-clustering
'''
# rwr based fuzzy clustering
clus = f_fuzzy_rwr_clusters(X_tr, n_clus)
#print clus[0]
tfs = f_clus_to_tfs(clus, len(X_tr))
# train models and calculate the error-dicision tables
y_tr = y_tr.astype(float)
#model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \
# random_state=None, n_estimators = 100 )
[err_mat, models] = f_err_mat(X_tr, y_tr, clus, model)
# ******************** re-clustering ********************
n_iter = 2
for i in range(0, n_iter):
clus = f_fuzzy_rwr_clusters(err_mat, n_clus)
tfs = f_clus_to_tfs(clus, len(X_tr))
[err_mat, models] = f_err_mat(X_tr, y_tr, clus, model)
# *******************************************************
[dec_mat, dec_ixs] = f_err_2_decMat(err_mat, tfs, w, s)
print (' Done calucating decision table')
return [clus, models, dec_ixs]
def f_ICE_pred(X_tr, y_tr, X_te, clus, dec_ixs, models,N=5,alpha=1,beta=1):
'''
clus and inst_clus contains the same information that clus is the instances
ids for each cluster, while inst_clus stores that for each instance, which
cluster(s) it belongs to.
dec_ixs stores the good cluster(s) for each instance, which may include
even a remote cluster. each instance in dec_ixs does not contain the whole
set of instances.
'''
# the first col is the prediction using the whole data
# start from the second col, the result is by each cluster
y_pred_all = f_ICE_tr_te_all_clus(X_tr, X_te, clus, models)
y_pred_ICE = np.zeros( len(X_te) )
neighbour_mat = f_eu_dist2(X_tr, X_te)
# ---------- for each testing instance ----------
#n_partials = np.zeros( len(X_te) )
#n_wholes = np.zeros( len(X_te) )
for j in range(0, len(X_te) ):
# for each testing instance
# find the top 10 neighbors for each test instance
neighbour_col = neighbour_mat[:, j].flatten()
ix = np.argsort(neighbour_col )
ix = ix[::-1]
ix_top_neighbors = ix[0:N]
#print 'testing inst ' + str(j)
#print ' ix of top neighbors:'
#print ix_top_neighbors
# ---------- find all neighbors' picks ----------
clus_ids_to_use = []
nei_labels = []
for cur_nb in range(0, N):
# for each neighbour
# find each neighbour's pick
cur_nb_ix = ix_top_neighbors[cur_nb]
clus_id_to_use = list( dec_ixs[cur_nb_ix] )
clus_ids_to_use = clus_ids_to_use + clus_id_to_use
# also find neighbor's label. maybe will be used later as KNN pred
# instead of using whole to pred.
nei_labels = nei_labels + list( y_tr[cur_nb_ix] )
#print ' clus_ids_to_use:'
#print clus_ids_to_use
# cluster id + 1 to make the ix fit the col id in y_pred_all
a = clus_ids_to_use
a = list( np.array(a) + 1 )
clus_ids_to_use = a
# number of partial models used
n_partial = len(clus_ids_to_use)
# number of whole models used, based on parameters alpha, beta and N.
n_whole = int( round( alpha*n_partial + beta*N ) )
clus_ids_to_use = clus_ids_to_use + [0] * n_whole
#print ' clus_ids_to_use:'
#print clus_ids_to_use
#print nei_labels
y_pred_ICE[j] = np.nanmean(y_pred_all[j, clus_ids_to_use])
print ('Done predicting testing instances.')
return y_pred_ICE
# test
# pa = '/Volumes/Macintosh_HD/Users/zg/bio/3_ensembF/3_scripts/2017_4_4/'
# pa = '/Users/zg/Dropbox/bio/ICE_2018/'
# pa = './'
pa = 'C:/Users/zg/Dropbox/bio/ICE_2018/'
n_clus = 100
w = 0.4
s = 0.5
N = 5
alpha = 1
beta = 1
k_fold = 10
aucs_ICE = []
aucs_whole = []
# f_res = pa + 'data/res_ICE_bg_svm_1_iter.txt'
#f_res = pa + 'data/res_ICE_bg_svm_py.txt'
f_res = pa + 'data/res_ICE_SVM_py.txt'
f = open(f_res, 'w')
#for j in range(1, 50):
for j in range(1, 49):
try:
X = scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['X'] # 30:breast cancer
y = scipy.io.loadmat(pa+'data/data_all/'+str(j)+'/data.mat')['y']
#X = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['X'] # 30:breast cancer
#y = scipy.io.loadmat(pa+'/data/data_all_pickle/30/data.mat')['y']
#X = scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['X'] # 37:congress
#y = scipy.io.loadmat(pa+'/data/data_all_pickle/37/data.mat')['y']
#imgplot = plt.imshow(ori_graph, interpolation='nearest', aspect='auto')
#plt.show()
#sim = np.corrcoef(X)
#np.fill_diagonal(sim, 0)
#n_clus = 100
#model = BaggingClassifier(base_estimator = svm.SVC(kernel='linear'), \
# random_state=None, n_estimators = 100 )
model = svm.SVC(kernel='linear', probability = True)
skf = StratifiedKFold(n_splits=k_fold)
skf.get_n_splits(X, y)
y_preds_ICE = np.zeros( y.size )
y_preds_whole = np.zeros( y.size )
fold_i = 1
for train_index, test_index in skf.split(X, y):
# print("TRAIN:", train_index, "TEST:", test_index)
X_tr, X_te = X[train_index], X[test_index]
y_tr, y_te = y[train_index], y[test_index]
[clus, models, dec_ixs] = f_ICE_fit(X_tr, y_tr, n_clus, model, w, s)
#[clus, models, dec_ixs] = f_ICE_fit_2(X_tr, y_tr, n_clus, model, w, s)
y_pred_ICE = f_ICE_pred(X_tr, y_tr, X_te, clus, dec_ixs, models,N,alpha,beta)
y_preds_ICE[test_index] = y_pred_ICE
y_pred_whole = f_tr_te(X_tr, y_tr, X_te, model)
y_preds_whole[test_index] = y_pred_whole
print( j)
print( 'fold ' + str(fold_i) + ' finished')
fold_i = fold_i + 1
auc_ICE = roc_auc_score(y.flatten(), y_preds_ICE.flatten() )
auc_whole = roc_auc_score(y.flatten(), y_preds_whole.flatten() )
print (auc_ICE, auc_whole)
aucs_ICE.append(auc_ICE)
aucs_whole.append(auc_whole)
f.write(str(j) + '\t' + str(auc_ICE) + ' \t ' + str(auc_whole) + '\n')
except:
continue
| [
"sklearn.model_selection.StratifiedKFold",
"numpy.array",
"numpy.argsort",
"numpy.nanmean",
"copy.deepcopy",
"numpy.where",
"numpy.float64",
"numpy.sort",
"numpy.argmin",
"numpy.eye",
"numpy.argmax",
"numpy.fill_diagonal",
"numpy.finfo",
"sklearn.svm.SVC",
"numpy.copy",
"numpy.unique",
"numpy.logical_or",
"numpy.sum",
"numpy.zeros",
"numpy.argwhere",
"scipy.spatial.distance.euclidean"
] | [((4652, 4676), 'numpy.fill_diagonal', 'np.fill_diagonal', (['sim', '(0)'], {}), '(sim, 0)\n', (4668, 4676), True, 'import numpy as np\n'), ((5076, 5111), 'numpy.logical_or', 'np.logical_or', (['aRankNet', 'aRankNet.T'], {}), '(aRankNet, aRankNet.T)\n', (5089, 5111), True, 'import numpy as np\n'), ((5200, 5233), 'numpy.fill_diagonal', 'np.fill_diagonal', (['aRankNet', '(False)'], {}), '(aRankNet, False)\n', (5216, 5233), True, 'import numpy as np\n'), ((6446, 6468), 'numpy.sum', 'np.sum', (['rw_mat'], {'axis': '(1)'}), '(rw_mat, axis=1)\n', (6452, 6468), True, 'import numpy as np\n'), ((6536, 6548), 'numpy.argmax', 'np.argmax', (['a'], {}), '(a)\n', (6545, 6548), True, 'import numpy as np\n'), ((7966, 7990), 'numpy.fill_diagonal', 'np.fill_diagonal', (['sim', '(0)'], {}), '(sim, 0)\n', (7982, 7990), True, 'import numpy as np\n'), ((9931, 9954), 'numpy.fill_diagonal', 'np.fill_diagonal', (['rw', '(0)'], {}), '(rw, 0)\n', (9947, 9954), True, 'import numpy as np\n'), ((10224, 10254), 'numpy.fill_diagonal', 'np.fill_diagonal', (['rw_net', '(True)'], {}), '(rw_net, True)\n', (10240, 10254), True, 'import numpy as np\n'), ((10679, 10700), 'numpy.array', 'np.array', (['ixs_centers'], {}), '(ixs_centers)\n', (10687, 10700), True, 'import numpy as np\n'), ((13295, 13315), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (13308, 13315), False, 'import copy\n'), ((13802, 13822), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (13815, 13822), False, 'import copy\n'), ((14187, 14203), 'numpy.zeros', 'np.zeros', (['y.size'], {}), '(y.size)\n', (14195, 14203), True, 'import numpy as np\n'), ((14219, 14284), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'k_fold', 'random_state': 'None', 'shuffle': '(True)'}), '(n_splits=k_fold, random_state=None, shuffle=True)\n', (14234, 14284), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((16031, 16067), 'numpy.argsort', 'np.argsort', (['target'], {'kind': '"""mergesort"""'}), "(target, kind='mergesort')\n", (16041, 16067), True, 'import numpy as np\n'), ((16086, 16124), 'numpy.argsort', 'np.argsort', (['templete'], {'kind': '"""mergesort"""'}), "(templete, kind='mergesort')\n", (16096, 16124), True, 'import numpy as np\n'), ((17127, 17157), 'numpy.zeros', 'np.zeros', (['(y.size, n_clusters)'], {}), '((y.size, n_clusters))\n', (17135, 17157), True, 'import numpy as np\n'), ((21893, 21927), 'numpy.zeros', 'np.zeros', (['(y.size, n_clusters + 1)'], {}), '((y.size, n_clusters + 1))\n', (21901, 21927), True, 'import numpy as np\n'), ((22300, 22320), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (22313, 22320), False, 'import copy\n'), ((2091, 2100), 'numpy.eye', 'np.eye', (['n'], {}), '(n)\n', (2097, 2100), True, 'import numpy as np\n'), ((4734, 4767), 'numpy.argsort', 'np.argsort', (['sim'], {'kind': '"""mergesort"""'}), "(sim, kind='mergesort')\n", (4744, 4767), True, 'import numpy as np\n'), ((6683, 6702), 'numpy.sum', 'np.sum', (['tmp'], {'axis': '(1)'}), '(tmp, axis=1)\n', (6689, 6702), True, 'import numpy as np\n'), ((6784, 6796), 'numpy.argmin', 'np.argmin', (['b'], {}), '(b)\n', (6793, 6796), True, 'import numpy as np\n'), ((6971, 6983), 'numpy.sort', 'np.sort', (['tmp'], {}), '(tmp)\n', (6978, 6983), True, 'import numpy as np\n'), ((10535, 10551), 'numpy.argsort', 'np.argsort', (['lens'], {}), '(lens)\n', (10545, 10551), True, 'import numpy as np\n'), ((17338, 17358), 'copy.deepcopy', 'copy.deepcopy', (['model'], {}), '(model)\n', (17351, 17358), False, 'import copy\n'), ((24935, 24957), 'numpy.copy', 'np.copy', (['err_mat[i, :]'], {}), '(err_mat[i, :])\n', (24942, 24957), True, 'import numpy as np\n'), ((30166, 30191), 'numpy.argsort', 'np.argsort', (['neighbour_col'], {}), '(neighbour_col)\n', (30176, 30191), True, 'import numpy as np\n'), ((31645, 31687), 'numpy.nanmean', 'np.nanmean', (['y_pred_all[j, clus_ids_to_use]'], {}), '(y_pred_all[j, clus_ids_to_use])\n', (31655, 31687), True, 'import numpy as np\n'), ((33134, 33176), 'sklearn.svm.SVC', 'svm.SVC', ([], {'kernel': '"""linear"""', 'probability': '(True)'}), "(kernel='linear', probability=True)\n", (33141, 33176), False, 'from sklearn import svm\n'), ((33202, 33234), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'k_fold'}), '(n_splits=k_fold)\n', (33217, 33234), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((33297, 33313), 'numpy.zeros', 'np.zeros', (['y.size'], {}), '(y.size)\n', (33305, 33313), True, 'import numpy as np\n'), ((33340, 33356), 'numpy.zeros', 'np.zeros', (['y.size'], {}), '(y.size)\n', (33348, 33356), True, 'import numpy as np\n'), ((7860, 7890), 'scipy.spatial.distance.euclidean', 'distance.euclidean', (['X[i]', 'X[j]'], {}), '(X[i], X[j])\n', (7878, 7890), False, 'from scipy.spatial import distance\n'), ((8283, 8315), 'scipy.spatial.distance.euclidean', 'distance.euclidean', (['X1[i]', 'X2[j]'], {}), '(X1[i], X2[j])\n', (8301, 8315), False, 'from scipy.spatial import distance\n'), ((9365, 9395), 'scipy.spatial.distance.euclidean', 'distance.euclidean', (['X[i]', 'X[j]'], {}), '(X[i], X[j])\n', (9383, 9395), False, 'from scipy.spatial import distance\n'), ((3839, 3854), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (3847, 3854), True, 'import numpy as np\n'), ((4812, 4843), 'numpy.argsort', 'np.argsort', (['I'], {'kind': '"""mergesort"""'}), "(I, kind='mergesort')\n", (4822, 4843), True, 'import numpy as np\n'), ((10314, 10352), 'numpy.argwhere', 'np.argwhere', (['rw_net[:, ixs_centers[i]]'], {}), '(rw_net[:, ixs_centers[i]])\n', (10325, 10352), True, 'import numpy as np\n'), ((12060, 12079), 'numpy.where', 'np.where', (['tfs[i, :]'], {}), '(tfs[i, :])\n', (12068, 12079), True, 'import numpy as np\n'), ((14585, 14600), 'numpy.unique', 'np.unique', (['y_tr'], {}), '(y_tr)\n', (14594, 14600), True, 'import numpy as np\n'), ((19729, 19745), 'numpy.zeros', 'np.zeros', (['y.size'], {}), '(y.size)\n', (19737, 19745), True, 'import numpy as np\n'), ((19969, 19985), 'numpy.zeros', 'np.zeros', (['y.size'], {}), '(y.size)\n', (19977, 19985), True, 'import numpy as np\n'), ((25210, 25233), 'numpy.where', 'np.where', (['(row < row[-1])'], {}), '(row < row[-1])\n', (25218, 25233), True, 'import numpy as np\n'), ((31185, 31196), 'numpy.array', 'np.array', (['a'], {}), '(a)\n', (31193, 31196), True, 'import numpy as np\n'), ((14784, 14799), 'numpy.unique', 'np.unique', (['y_tr'], {}), '(y_tr)\n', (14793, 14799), True, 'import numpy as np\n'), ((17472, 17485), 'numpy.unique', 'np.unique', (['yj'], {}), '(yj)\n', (17481, 17485), True, 'import numpy as np\n'), ((19468, 19484), 'numpy.zeros', 'np.zeros', (['y.size'], {}), '(y.size)\n', (19476, 19484), True, 'import numpy as np\n'), ((19854, 19867), 'numpy.unique', 'np.unique', (['yj'], {}), '(yj)\n', (19863, 19867), True, 'import numpy as np\n'), ((20126, 20139), 'numpy.unique', 'np.unique', (['yj'], {}), '(yj)\n', (20135, 20139), True, 'import numpy as np\n'), ((20014, 20027), 'numpy.unique', 'np.unique', (['yj'], {}), '(yj)\n', (20023, 20027), True, 'import numpy as np\n'), ((3443, 3456), 'numpy.float64', 'np.float64', (['A'], {}), '(A)\n', (3453, 3456), True, 'import numpy as np\n')] |
#########################
#########################
# Need to account for limit in input period
#########################
#########################
# Baseline M67 long script -- NO crowding
# New script copied from quest - want to take p and ecc from each population (all, obs, rec) and put them into separate file
# Doing this so we don't have to run analyse each time
# Can write separate script for p-ecc plots
# Quest paths in this version of script
import pandas as pd
import numpy as np
import os
from astropy.coordinates import SkyCoord
from astropy import units, constants
from astropy.modeling import models, fitting
import scipy.stats
from scipy.integrate import quad
#for Quest
import matplotlib
matplotlib.use('Agg')
doIndividualPlots = True
from matplotlib import pyplot as plt
def file_len(fname):
i = 0
with open(fname) as f:
for i, l in enumerate(f):
pass
return i + 1
def getPhs(sigma, m1=1*units.solMass, m2=1*units.solMass, m3=0.5*units.solMass):
Phs = np.pi*constants.G/np.sqrt(2.)*(m1*m2/m3)**(3./2.)*(m1 + m2)**(-0.5)*sigma**(-3.)
return Phs.decompose().to(units.day)
#similar to field, but limiting by the hard-soft boundary
def fitRagfb():
x = [0.05, 0.1, 1, 8, 15] #estimates of midpoints in bins, and using this: https://sites.uni.edu/morgans/astro/course/Notes/section2/spectralmasses.html
y = [0.20, 0.35, 0.50, 0.70, 0.75]
init = models.PowerLaw1D(amplitude=0.5, x_0=1, alpha=-1.)
fitter = fitting.LevMarLSQFitter()
fit = fitter(init, x, y)
return fit
def RagNormal(x, cdf = False):
mean = 5.03
std = 2.28
if (cdf):
return scipy.stats.norm.cdf(x,mean,std)
return scipy.stats.norm.pdf(x,mean,std)
def saveHist(histAll, histObs, histRec, bin_edges, xtitle, fname, filters = ['u_', 'g_', 'r_', 'i_', 'z_', 'y_','all']):
c1 = '#5687A6' #Dali Blue (Andrew's AAS Poster)
c2 = '#A62B1F' #Dai Red
c3 = '#BF8A26' #Dali Beige
fig,ax1 = plt.subplots(figsize=(8,6), sharex=True)#can change to include cdf with ax1, ax2
histAll = np.insert(histAll,0,0)
histObs = np.insert(histObs,0,0)
for f in filters:
histRec[f] = np.insert(histRec[f],0,0)
#PDF
ax1.step(bin_edges, histAll/np.sum(histAll), color=c1)
ax1.step(bin_edges, histObs/np.sum(histObs), color=c2)
for f in filters:
lw = 1
if (f == 'all'):
lw = 0.5
ax1.step(bin_edges, histRec[f]/np.sum(histRec[f]), color=c3, linewidth=lw)
ax1.set_ylabel('PDF')
ax1.set_yscale('log')
ax1.set_title('Globular Clusters - Baseline', fontsize = 16)
ax1.set_xlabel(xtitle)
#CDF
#cdfAll = []
#cdfObs = []
#cdfRec = dict()
#for f in filters:
# cdfRec[f] = []
# for i in range(len(histAll)):
# cdfAll.append(np.sum(histAll[:i])/np.sum(histAll))
# for i in range(len(histObs)):
# cdfObs.append(np.sum(histObs[:i])/np.sum(histObs))
# for f in filters:
# for i in range(len(histRec[f])):
# cdfRec[f].append(np.sum(histRec[f][:i])/np.sum(histRec[f]))
#ax2.step(bin_edges, cdfAll, color=c1)
#ax2.step(bin_edges, cdfObs, color=c2)
#for f in filters:
# lw = 1
# if (f == 'all'):
# lw = 0.5
# ax2.step(bin_edges, cdfRec[f], color=c3, linewidth=lw)
#ax2.set_ylabel('CDF')
#ax2.set_xlabel(xtitle)
fig.subplots_adjust(hspace=0)
fig.savefig('./plots/' + fname+'.pdf',format='pdf', bbox_inches = 'tight')
#write to a text file
with open('./eblsst_files/' + fname+'.csv','w') as fl:
outline = 'binEdges,histAll,histObs'
for f in filters:
outline += ','+f+'histRec'
outline += '\n'
fl.write(outline)
for i in range(len(bin_edges)):
outline = str(bin_edges[i])+','+str(histAll[i])+','+str(histObs[i])
for f in filters:
outline += ','+str(histRec[f][i])
outline += '\n'
fl.write(outline)
if __name__ == "__main__":
filters = ['u_', 'g_', 'r_', 'i_', 'z_', 'y_', 'all']
#get the Raghavan binary fraction fit
fbFit= fitRagfb()
print(fbFit)
#to normalize
intAll, err = quad(RagNormal, -20, 20)
intCut, err = quad(RagNormal, -20, np.log10(365*10.))
intNorm = intCut/intAll
#cutoff in percent error for "recovered"
Pcut = 0.1
#assumed mean stellar mass
mMean = 0.5
#minimum number of lines to consider in file
Nlim = 3
if (doIndividualPlots):
fmass, axmass = plt.subplots()
fqrat, axqrat = plt.subplots()
fecc, axecc = plt.subplots()
flper, axlper = plt.subplots()
fdist, axdist = plt.subplots()
fmag, axmag = plt.subplots()
frad, axrad = plt.subplots()
#bins for all the histograms
Nbins = 25
mbins = np.arange(0,10, 0.1, dtype='float')
qbins = np.arange(0,1, 0.1, dtype='float')
ebins = np.arange(0, 1.05, 0.05, dtype='float')
lpbins = np.arange(-2, 10, 0.5, dtype='float')
dbins = np.arange(0, 40, 1, dtype='float')
magbins = np.arange(11, 25, 1, dtype='float')
rbins = np.arange(0, 100, 0.2, dtype='float')
#blanks for the histograms
#All
m1hAll = np.zeros_like(mbins)[1:]
qhAll = np.zeros_like(qbins)[1:]
ehAll = np.zeros_like(ebins)[1:]
lphAll = np.zeros_like(lpbins)[1:]
dhAll = np.zeros_like(dbins)[1:]
maghAll = np.zeros_like(magbins)[1:]
rhAll = np.zeros_like(rbins)[1:]
#Observable
m1hObs = np.zeros_like(mbins)[1:]
qhObs = np.zeros_like(qbins)[1:]
ehObs = np.zeros_like(ebins)[1:]
lphObs = np.zeros_like(lpbins)[1:]
dhObs = np.zeros_like(dbins)[1:]
maghObs = np.zeros_like(magbins)[1:]
rhObs = np.zeros_like(rbins)[1:]
#Recovered
m1hRec = dict()
qhRec = dict()
ehRec = dict()
lphRec = dict()
dhRec = dict()
maghRec = dict()
rhRec = dict()
for f in filters:
m1hRec[f] = np.zeros_like(mbins)[1:]
qhRec[f] = np.zeros_like(qbins)[1:]
ehRec[f] = np.zeros_like(ebins)[1:]
lphRec[f] = np.zeros_like(lpbins)[1:]
dhRec[f] = np.zeros_like(dbins)[1:]
maghRec[f] = np.zeros_like(magbins)[1:]
rhRec[f] = np.zeros_like(rbins)[1:]
RA = []
Dec = []
recFrac = []
recN = []
rawN = []
obsN = []
fileN = []
fileObsN = []
fileRecN = []
allNPrsa = []
obsNPrsa = []
recNPrsa = []
# Lists for period and eccentricity for Andrew's circularization plots
eccAll = []
eccObs = []
eccRec = []
pAll = []
pObs = []
pRec = []
# Using prsa dataframes for these lists because of period cutoff at 1000 days
# Dataframes to write to files later; 3 files for each sub-population - append everything to these
peccAll = pd.DataFrame(columns = ['e', 'p'])
peccObs = pd.DataFrame(columns = ['e', 'p'])
peccRec = pd.DataFrame(columns = ['e', 'p'])
#Read in all the data and make the histograms
d = "./input_files/"
files = os.listdir(d)
IDs = []
for i, f in enumerate(files):
print(round(i/len(files),4), f)
fl = file_len(d+f)
if (fl >= 4):
#read in the header
header = pd.read_csv(d+f, nrows=1)
######################
#NEED TO ACCOUNT FOR THE BINARY FRACTION when combining histograms
#####################
Nmult = header['clusterMass'][0]/mMean
#Nmult = 1.
RA.append(header['OpSimRA'])
Dec.append(header['OpSimDec'])
#read in rest of the file
data = pd.read_csv(d+f, header = 2).fillna(-999)
rF = 0.
rN = 0.
Nrec = 0.
Nobs = 0.
raN = 0.
obN = 0.
fiN = 0.
fioN = 0.
firN = 0.
NallPrsa = 0.
NobsPrsa = 0.
NrecPrsa = 0.
Nall = len(data.index)/intNorm ###is this correct? (and the only place I need to normalize?)
prsa = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] > 15.8) & (data['p'] < 1000) & (data['p'] > 0.5)]
# Appending for Andrew
eccAll.append(prsa['e'].values)
pAll.append(prsa['p'].values)
NallPrsa = len(prsa.index)
if (Nall >= Nlim):
#create histograms
#All
m1hAll0, m1b = np.histogram(data["m1"], bins=mbins)
qhAll0, qb = np.histogram(data["m2"]/data["m1"], bins=qbins)
ehAll0, eb = np.histogram(data["e"], bins=ebins)
lphAll0, lpb = np.histogram(np.ma.log10(data["p"].values).filled(-999), bins=lpbins)
dhAll0, db = np.histogram(data["d"], bins=dbins)
maghAll0, magb = np.histogram(data["appMagMean_r"], bins=magbins)
rhAll0, rb = np.histogram(data["r2"]/data["r1"], bins=rbins)
if (doIndividualPlots):
axmass.step(m1b[0:-1], m1hAll0/np.sum(m1hAll0), color='black', alpha=0.1)
axqrat.step(qb[0:-1], qhAll0/np.sum(qhAll0), color='black', alpha=0.1)
axecc.step(eb[0:-1], ehAll0/np.sum(ehAll0), color='black', alpha=0.1)
axlper.step(lpb[0:-1], lphAll0/np.sum(lphAll0), color='black', alpha=0.1)
axdist.step(db[0:-1], dhAll0/np.sum(dhAll0), color='black', alpha=0.1)
axmag.step(magb[0:-1], maghAll0/np.sum(maghAll0), color='black', alpha=0.1)
axrad.step(rb[0:-1], rhAll0/np.sum(rhAll0), color='black', alpha=0.1)
#account for the binary fraction, as a function of mass
dm1 = np.diff(m1b)
m1val = m1b[:-1] + dm1/2.
fb = np.sum(m1hAll0/len(data.index)*fbFit(m1val))
#account for the hard-soft boundary
Phs = getPhs(header['clusterVdisp'].iloc[0]*units.km/units.s).to(units.day).value
fb *= RagNormal(np.log10(Phs), cdf = True)
print("fb, Phs = ", fb, Phs)
Nmult *= fb
m1hAll += m1hAll0/Nall*Nmult
qhAll += qhAll0/Nall*Nmult
ehAll += ehAll0/Nall*Nmult
lphAll += lphAll0/Nall*Nmult
dhAll += dhAll0/Nall*Nmult
maghAll += maghAll0/Nall*Nmult
rhAll += rhAll0/Nall*Nmult
#Obs
obs = data.loc[data['LSM_PERIOD'] != -999]
Nobs = len(obs.index)
prsaObs = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] > 15.8) & (data['p'] < 1000) & (data['p'] >0.5) & (data['LSM_PERIOD'] != -999)]
NobsPrsa = len(prsaObs.index)
# Appending for Andrew's files
eccObs.append(prsaObs['e'].values)
pObs.append(prsaObs['p'].values)
if (Nobs >= Nlim):
m1hObs0, m1b = np.histogram(obs["m1"], bins=mbins)
qhObs0, qb = np.histogram(obs["m2"]/obs["m1"], bins=qbins)
ehObs0, eb = np.histogram(obs["e"], bins=ebins)
lphObs0, lpb = np.histogram(np.ma.log10(obs["p"].values).filled(-999), bins=lpbins)
dhObs0, db = np.histogram(obs["d"], bins=dbins)
maghObs0, magb = np.histogram(obs["appMagMean_r"], bins=magbins)
rhObs0, rb = np.histogram(obs["r2"]/obs["r1"], bins=rbins)
m1hObs += m1hObs0/Nall*Nmult
qhObs += qhObs0/Nall*Nmult
ehObs += ehObs0/Nall*Nmult
lphObs += lphObs0/Nall*Nmult
dhObs += dhObs0/Nall*Nmult
maghObs += maghObs0/Nall*Nmult
rhObs += rhObs0/Nall*Nmult
#Rec
recCombined = pd.DataFrame()
prsaRecCombined = pd.DataFrame()
for filt in filters:
key = filt+'LSS_PERIOD'
if (filt == 'all'):
key = 'LSM_PERIOD'
fullP = abs(data[key] - data['p'])/data['p']
halfP = abs(data[key] - 0.5*data['p'])/(0.5*data['p'])
twiceP = abs(data[key] - 2.*data['p'])/(2.*data['p'])
rec = data.loc[(data[key] != -999) & ( (fullP < Pcut) | (halfP < Pcut) | (twiceP < Pcut))]
prsaRec = data.loc[(data['appMagMean_r'] <= 19.5) & (data['appMagMean_r'] >15.8) & (data['p'] < 1000) & (data['p'] >0.5) & (data['LSM_PERIOD'] != -999) & ( (fullP < Pcut) | (halfP < Pcut) | (twiceP < Pcut))]
Nrec = len(rec.index)
#I'd like to account for all filters here to have more accurate numbers
recCombined = recCombined.append(rec)
prsaRecCombined = prsaRecCombined.append(prsaRec)
# Going to use prsaRecCombined for ecc-p plots to account for all filters
eccRec.append(prsaRec['e'].values)
pRec.append(prsaRec['p'].values)
if (filt == 'all'):
recCombined.drop_duplicates(inplace=True)
prsaRecCombined.drop_duplicates(inplace=True)
if (Nrec >= Nlim):
m1hRec0, m1b = np.histogram(rec["m1"], bins=mbins)
qhRec0, qb = np.histogram(rec["m2"]/rec["m1"], bins=qbins)
ehRec0, eb = np.histogram(rec["e"], bins=ebins)
lphRec0, lpb = np.histogram(np.ma.log10(rec["p"].values).filled(-999), bins=lpbins)
dhRec0, db = np.histogram(rec["d"], bins=dbins)
maghRec0, magb = np.histogram(rec["appMagMean_r"], bins=magbins)
rhRec0, rb = np.histogram(rec["r2"]/rec["r1"], bins=rbins)
m1hRec[filt] += m1hRec0/Nall*Nmult
qhRec[filt] += qhRec0/Nall*Nmult
ehRec[filt] += ehRec0/Nall*Nmult
lphRec[filt] += lphRec0/Nall*Nmult
dhRec[filt] += dhRec0/Nall*Nmult
maghRec[filt] += maghRec0/Nall*Nmult
rhRec[filt] += rhRec0/Nall*Nmult
#for the mollweide
if (filt == 'all'):
Nrec = len(recCombined.index)
rF = Nrec/Nall
rN = Nrec/Nall*Nmult
raN = Nmult
obN = Nobs/Nall*Nmult
fiN = Nall
fioN = Nobs
firN = Nrec
NrecPrsa = len(prsaRecCombined.index)
NrecPrsa = NrecPrsa/Nall*Nmult
NobsPrsa = NobsPrsa/Nall*Nmult
NallPrsa = NallPrsa/Nall*Nmult
recFrac.append(rF)
recN.append(rN)
rawN.append(raN)
obsN.append(obN)
fileN.append(fiN)
fileObsN.append(fioN)
fileRecN.append(firN)
allNPrsa.append(NallPrsa)
obsNPrsa.append(NobsPrsa)
recNPrsa.append(NrecPrsa)
#print(np.sum(lphRec), np.sum(recN), np.sum(lphRec)/np.sum(recN), np.sum(lphRec0), Nrec, np.sum(lphRec0)/Nrec, np.sum(lphObs), np.sum(obsN), np.sum(lphObs)/np.sum(obsN))
# Concatenating p and ecc lists
eccAll = np.concatenate(eccAll)
eccObs = np.concatenate(eccObs)
eccRec = np.concatenate(eccRec)
pAll = np.concatenate(pAll)
pObs = np.concatenate(pObs)
pRec = np.concatenate(pRec)
# print('Ecc lists:', eccAll, eccObs, eccRec)
# print('P lists:', pAll, pObs, pRec)
# Appending lists with all the p/ecc values to our dataframes
# All dataframe
peccAll['e'] = eccAll
peccAll['p'] = pAll
# Observable dataframe
peccObs['e'] = eccObs
peccObs['p'] = pObs
# Recovered dataframe
peccRec['e'] = eccRec
peccRec['p'] = pRec
# print('Final Dataframes:', peccAll, peccObs, peccRec)
# print(peccRec.columns)
# 3 letter code corresponds to scenario (OC/GC, baseline/colossus, crowding/no crowding)
peccAll.to_csv('./pecc/all-M67BN-ecc-p.csv', header = ['e', 'p'])
peccObs.to_csv('./pecc/obs-M67BN-ecc-p.csv', header = ['e', 'p'])
peccRec.to_csv('./pecc/rec-M67BN-ecc-p.csv', header = ['e', 'p'])
#plot and save the histograms
saveHist(m1hAll, m1hObs, m1hRec, m1b, 'm1 (Msolar)', 'EBLSST_m1hist')
saveHist(qhAll, qhObs, qhRec, qb, 'q (m2/m1)', 'EBLSST_qhist')
saveHist(ehAll, ehObs, ehRec, eb, 'e', 'EBLSST_ehist')
saveHist(lphAll, lphObs, lphRec, lpb, 'log(P [days])', 'EBLSST_lphist')
saveHist(dhAll, dhObs, dhRec, db, 'd (kpc)', 'EBLSST_dhist')
saveHist(maghAll, maghObs, maghRec, magb, 'mag', 'EBLSST_maghist')
saveHist(rhAll, rhObs, rhRec, rb, 'r2/r1', 'EBLSST_rhist')
#make the mollweide
coords = SkyCoord(RA, Dec, unit=(units.degree, units.degree),frame='icrs')
lGal = coords.galactic.l.wrap_at(180.*units.degree).degree
bGal = coords.galactic.b.wrap_at(180.*units.degree).degree
RAwrap = coords.ra.wrap_at(180.*units.degree).degree
Decwrap = coords.dec.wrap_at(180.*units.degree).degree
f, ax = plt.subplots(subplot_kw={'projection': "mollweide"}, figsize=(8,5))
ax.grid(True)
#ax.set_xlabel(r"$l$",fontsize=16)
#ax.set_ylabel(r"$b$",fontsize=16)
#mlw = ax.scatter(lGal.ravel()*np.pi/180., bGal.ravel()*np.pi/180., c=np.log10(np.array(recFrac)*100.), cmap='viridis_r', s = 4)
ax.set_xlabel("RA",fontsize=16)
ax.set_ylabel("Dec",fontsize=16)
mlw = ax.scatter(np.array(RAwrap).ravel()*np.pi/180., np.array(Decwrap).ravel()*np.pi/180., c=np.array(recFrac)*100., cmap='viridis_r', s = 4)
cbar = f.colorbar(mlw, shrink=0.7)
cbar.set_label(r'% recovered')
f.savefig('./plots/' + 'mollweide_pct.pdf',format='pdf', bbox_inches = 'tight')
f, ax = plt.subplots(subplot_kw={'projection': "mollweide"}, figsize=(8,5))
ax.grid(True)
#ax.set_xlabel(r"$l$",fontsize=16)
#ax.set_ylabel(r"$b$",fontsize=16)
#mlw = ax.scatter(lGal.ravel()*np.pi/180., bGal.ravel()*np.pi/180., c=np.log10(np.array(recN)), cmap='viridis_r', s = 4)
ax.set_xlabel("RA",fontsize=16)
ax.set_ylabel("Dec",fontsize=16)
mlw = ax.scatter(np.array(RAwrap).ravel()*np.pi/180., np.array(Decwrap).ravel()*np.pi/180., c=np.log10(np.array(recN)), cmap='viridis_r', s = 4)
cbar = f.colorbar(mlw, shrink=0.7)
cbar.set_label(r'log10(N) recovered')
f.savefig('./plots/' + 'mollweide_N.pdf',format='pdf', bbox_inches = 'tight')
if (doIndividualPlots):
fmass.savefig('./plots/' + 'massPDFall.pdf',format='pdf', bbox_inches = 'tight')
fqrat.savefig('./plots/' + 'qPDFall.pdf',format='pdf', bbox_inches = 'tight')
fecc.savefig('./plots/' + 'eccPDFall.pdf',format='pdf', bbox_inches = 'tight')
flper.savefig('./plots/' + 'lperPDFall.pdf',format='pdf', bbox_inches = 'tight')
fdist.savefig('./plots/' + 'distPDFall.pdf',format='pdf', bbox_inches = 'tight')
fmag.savefig('./plots/' + 'magPDFall.pdf',format='pdf', bbox_inches = 'tight')
frad.savefig('./plots/' + 'radPDFall.pdf',format='pdf', bbox_inches = 'tight')
print("###################")
print("number of binaries in input files (raw, log):",np.sum(fileN), np.log10(np.sum(fileN)))
print("number of binaries in tested with gatspy (raw, log):",np.sum(fileObsN), np.log10(np.sum(fileObsN)))
print("number of binaries in recovered with gatspy (raw, log):",np.sum(fileRecN), np.log10(np.sum(fileRecN)))
print("recovered/observable*100 with gatspy:",np.sum(fileRecN)/np.sum(fileObsN)*100.)
print("###################")
print("total in sample (raw, log):",np.sum(rawN), np.log10(np.sum(rawN)))
print("total observable (raw, log):",np.sum(obsN), np.log10(np.sum(obsN)))
print("total recovered (raw, log):",np.sum(recN), np.log10(np.sum(recN)))
print("recovered/observable*100:",np.sum(recN)/np.sum(obsN)*100.)
print("###################")
print("total in Prsa 15.8<r<19.5 P<1000d sample (raw, log):",np.sum(allNPrsa), np.log10(np.sum(allNPrsa)))
print("total observable in Prsa 15.8<r<19.5 P<1000d sample (raw, log):",np.sum(obsNPrsa), np.log10(np.sum(obsNPrsa)))
print("total recovered in Prsa 15.8<r<19.5 P<1000d sample (raw, log):",np.sum(recNPrsa), np.log10(np.sum(recNPrsa)))
print("Prsa 15.8<r<19.5 P<1000d rec/obs*100:",np.sum(recNPrsa)/np.sum(obsNPrsa)*100.)
| [
"numpy.log10",
"numpy.sqrt",
"pandas.read_csv",
"numpy.array",
"numpy.arange",
"numpy.histogram",
"astropy.modeling.models.PowerLaw1D",
"os.listdir",
"numpy.diff",
"numpy.concatenate",
"pandas.DataFrame",
"matplotlib.use",
"scipy.integrate.quad",
"astropy.modeling.fitting.LevMarLSQFitter",
"numpy.insert",
"astropy.coordinates.SkyCoord",
"numpy.sum",
"numpy.ma.log10",
"numpy.zeros_like",
"matplotlib.pyplot.subplots"
] | [((710, 731), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (724, 731), False, 'import matplotlib\n'), ((1380, 1431), 'astropy.modeling.models.PowerLaw1D', 'models.PowerLaw1D', ([], {'amplitude': '(0.5)', 'x_0': '(1)', 'alpha': '(-1.0)'}), '(amplitude=0.5, x_0=1, alpha=-1.0)\n', (1397, 1431), False, 'from astropy.modeling import models, fitting\n'), ((1441, 1466), 'astropy.modeling.fitting.LevMarLSQFitter', 'fitting.LevMarLSQFitter', ([], {}), '()\n', (1464, 1466), False, 'from astropy.modeling import models, fitting\n'), ((1895, 1936), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(8, 6)', 'sharex': '(True)'}), '(figsize=(8, 6), sharex=True)\n', (1907, 1936), True, 'from matplotlib import pyplot as plt\n'), ((1988, 2012), 'numpy.insert', 'np.insert', (['histAll', '(0)', '(0)'], {}), '(histAll, 0, 0)\n', (1997, 2012), True, 'import numpy as np\n'), ((2022, 2046), 'numpy.insert', 'np.insert', (['histObs', '(0)', '(0)'], {}), '(histObs, 0, 0)\n', (2031, 2046), True, 'import numpy as np\n'), ((3834, 3858), 'scipy.integrate.quad', 'quad', (['RagNormal', '(-20)', '(20)'], {}), '(RagNormal, -20, 20)\n', (3838, 3858), False, 'from scipy.integrate import quad\n'), ((4396, 4432), 'numpy.arange', 'np.arange', (['(0)', '(10)', '(0.1)'], {'dtype': '"""float"""'}), "(0, 10, 0.1, dtype='float')\n", (4405, 4432), True, 'import numpy as np\n'), ((4441, 4476), 'numpy.arange', 'np.arange', (['(0)', '(1)', '(0.1)'], {'dtype': '"""float"""'}), "(0, 1, 0.1, dtype='float')\n", (4450, 4476), True, 'import numpy as np\n'), ((4485, 4524), 'numpy.arange', 'np.arange', (['(0)', '(1.05)', '(0.05)'], {'dtype': '"""float"""'}), "(0, 1.05, 0.05, dtype='float')\n", (4494, 4524), True, 'import numpy as np\n'), ((4535, 4572), 'numpy.arange', 'np.arange', (['(-2)', '(10)', '(0.5)'], {'dtype': '"""float"""'}), "(-2, 10, 0.5, dtype='float')\n", (4544, 4572), True, 'import numpy as np\n'), ((4582, 4616), 'numpy.arange', 'np.arange', (['(0)', '(40)', '(1)'], {'dtype': '"""float"""'}), "(0, 40, 1, dtype='float')\n", (4591, 4616), True, 'import numpy as np\n'), ((4628, 4663), 'numpy.arange', 'np.arange', (['(11)', '(25)', '(1)'], {'dtype': '"""float"""'}), "(11, 25, 1, dtype='float')\n", (4637, 4663), True, 'import numpy as np\n'), ((4673, 4710), 'numpy.arange', 'np.arange', (['(0)', '(100)', '(0.2)'], {'dtype': '"""float"""'}), "(0, 100, 0.2, dtype='float')\n", (4682, 4710), True, 'import numpy as np\n'), ((6161, 6193), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['e', 'p']"}), "(columns=['e', 'p'])\n", (6173, 6193), True, 'import pandas as pd\n'), ((6207, 6239), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['e', 'p']"}), "(columns=['e', 'p'])\n", (6219, 6239), True, 'import pandas as pd\n'), ((6253, 6285), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['e', 'p']"}), "(columns=['e', 'p'])\n", (6265, 6285), True, 'import pandas as pd\n'), ((6367, 6380), 'os.listdir', 'os.listdir', (['d'], {}), '(d)\n', (6377, 6380), False, 'import os\n'), ((13000, 13022), 'numpy.concatenate', 'np.concatenate', (['eccAll'], {}), '(eccAll)\n', (13014, 13022), True, 'import numpy as np\n'), ((13033, 13055), 'numpy.concatenate', 'np.concatenate', (['eccObs'], {}), '(eccObs)\n', (13047, 13055), True, 'import numpy as np\n'), ((13066, 13088), 'numpy.concatenate', 'np.concatenate', (['eccRec'], {}), '(eccRec)\n', (13080, 13088), True, 'import numpy as np\n'), ((13098, 13118), 'numpy.concatenate', 'np.concatenate', (['pAll'], {}), '(pAll)\n', (13112, 13118), True, 'import numpy as np\n'), ((13127, 13147), 'numpy.concatenate', 'np.concatenate', (['pObs'], {}), '(pObs)\n', (13141, 13147), True, 'import numpy as np\n'), ((13156, 13176), 'numpy.concatenate', 'np.concatenate', (['pRec'], {}), '(pRec)\n', (13170, 13176), True, 'import numpy as np\n'), ((14422, 14488), 'astropy.coordinates.SkyCoord', 'SkyCoord', (['RA', 'Dec'], {'unit': '(units.degree, units.degree)', 'frame': '"""icrs"""'}), "(RA, Dec, unit=(units.degree, units.degree), frame='icrs')\n", (14430, 14488), False, 'from astropy.coordinates import SkyCoord\n'), ((14729, 14797), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'subplot_kw': "{'projection': 'mollweide'}", 'figsize': '(8, 5)'}), "(subplot_kw={'projection': 'mollweide'}, figsize=(8, 5))\n", (14741, 14797), True, 'from matplotlib import pyplot as plt\n'), ((15385, 15453), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'subplot_kw': "{'projection': 'mollweide'}", 'figsize': '(8, 5)'}), "(subplot_kw={'projection': 'mollweide'}, figsize=(8, 5))\n", (15397, 15453), True, 'from matplotlib import pyplot as plt\n'), ((2079, 2106), 'numpy.insert', 'np.insert', (['histRec[f]', '(0)', '(0)'], {}), '(histRec[f], 0, 0)\n', (2088, 2106), True, 'import numpy as np\n'), ((3895, 3915), 'numpy.log10', 'np.log10', (['(365 * 10.0)'], {}), '(365 * 10.0)\n', (3903, 3915), True, 'import numpy as np\n'), ((4137, 4151), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4149, 4151), True, 'from matplotlib import pyplot as plt\n'), ((4170, 4184), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4182, 4184), True, 'from matplotlib import pyplot as plt\n'), ((4201, 4215), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4213, 4215), True, 'from matplotlib import pyplot as plt\n'), ((4234, 4248), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4246, 4248), True, 'from matplotlib import pyplot as plt\n'), ((4267, 4281), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4279, 4281), True, 'from matplotlib import pyplot as plt\n'), ((4298, 4312), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4310, 4312), True, 'from matplotlib import pyplot as plt\n'), ((4329, 4343), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (4341, 4343), True, 'from matplotlib import pyplot as plt\n'), ((4756, 4776), 'numpy.zeros_like', 'np.zeros_like', (['mbins'], {}), '(mbins)\n', (4769, 4776), True, 'import numpy as np\n'), ((4790, 4810), 'numpy.zeros_like', 'np.zeros_like', (['qbins'], {}), '(qbins)\n', (4803, 4810), True, 'import numpy as np\n'), ((4824, 4844), 'numpy.zeros_like', 'np.zeros_like', (['ebins'], {}), '(ebins)\n', (4837, 4844), True, 'import numpy as np\n'), ((4859, 4880), 'numpy.zeros_like', 'np.zeros_like', (['lpbins'], {}), '(lpbins)\n', (4872, 4880), True, 'import numpy as np\n'), ((4894, 4914), 'numpy.zeros_like', 'np.zeros_like', (['dbins'], {}), '(dbins)\n', (4907, 4914), True, 'import numpy as np\n'), ((4930, 4952), 'numpy.zeros_like', 'np.zeros_like', (['magbins'], {}), '(magbins)\n', (4943, 4952), True, 'import numpy as np\n'), ((4966, 4986), 'numpy.zeros_like', 'np.zeros_like', (['rbins'], {}), '(rbins)\n', (4979, 4986), True, 'import numpy as np\n'), ((5014, 5034), 'numpy.zeros_like', 'np.zeros_like', (['mbins'], {}), '(mbins)\n', (5027, 5034), True, 'import numpy as np\n'), ((5048, 5068), 'numpy.zeros_like', 'np.zeros_like', (['qbins'], {}), '(qbins)\n', (5061, 5068), True, 'import numpy as np\n'), ((5082, 5102), 'numpy.zeros_like', 'np.zeros_like', (['ebins'], {}), '(ebins)\n', (5095, 5102), True, 'import numpy as np\n'), ((5117, 5138), 'numpy.zeros_like', 'np.zeros_like', (['lpbins'], {}), '(lpbins)\n', (5130, 5138), True, 'import numpy as np\n'), ((5152, 5172), 'numpy.zeros_like', 'np.zeros_like', (['dbins'], {}), '(dbins)\n', (5165, 5172), True, 'import numpy as np\n'), ((5188, 5210), 'numpy.zeros_like', 'np.zeros_like', (['magbins'], {}), '(magbins)\n', (5201, 5210), True, 'import numpy as np\n'), ((5224, 5244), 'numpy.zeros_like', 'np.zeros_like', (['rbins'], {}), '(rbins)\n', (5237, 5244), True, 'import numpy as np\n'), ((16721, 16734), 'numpy.sum', 'np.sum', (['fileN'], {}), '(fileN)\n', (16727, 16734), True, 'import numpy as np\n'), ((16823, 16839), 'numpy.sum', 'np.sum', (['fileObsN'], {}), '(fileObsN)\n', (16829, 16839), True, 'import numpy as np\n'), ((16934, 16950), 'numpy.sum', 'np.sum', (['fileRecN'], {}), '(fileRecN)\n', (16940, 16950), True, 'import numpy as np\n'), ((17134, 17146), 'numpy.sum', 'np.sum', (['rawN'], {}), '(rawN)\n', (17140, 17146), True, 'import numpy as np\n'), ((17210, 17222), 'numpy.sum', 'np.sum', (['obsN'], {}), '(obsN)\n', (17216, 17222), True, 'import numpy as np\n'), ((17285, 17297), 'numpy.sum', 'np.sum', (['recN'], {}), '(recN)\n', (17291, 17297), True, 'import numpy as np\n'), ((17482, 17498), 'numpy.sum', 'np.sum', (['allNPrsa'], {}), '(allNPrsa)\n', (17488, 17498), True, 'import numpy as np\n'), ((17601, 17617), 'numpy.sum', 'np.sum', (['obsNPrsa'], {}), '(obsNPrsa)\n', (17607, 17617), True, 'import numpy as np\n'), ((17719, 17735), 'numpy.sum', 'np.sum', (['recNPrsa'], {}), '(recNPrsa)\n', (17725, 17735), True, 'import numpy as np\n'), ((2141, 2156), 'numpy.sum', 'np.sum', (['histAll'], {}), '(histAll)\n', (2147, 2156), True, 'import numpy as np\n'), ((2197, 2212), 'numpy.sum', 'np.sum', (['histObs'], {}), '(histObs)\n', (2203, 2212), True, 'import numpy as np\n'), ((2315, 2333), 'numpy.sum', 'np.sum', (['histRec[f]'], {}), '(histRec[f])\n', (2321, 2333), True, 'import numpy as np\n'), ((5410, 5430), 'numpy.zeros_like', 'np.zeros_like', (['mbins'], {}), '(mbins)\n', (5423, 5430), True, 'import numpy as np\n'), ((5448, 5468), 'numpy.zeros_like', 'np.zeros_like', (['qbins'], {}), '(qbins)\n', (5461, 5468), True, 'import numpy as np\n'), ((5486, 5506), 'numpy.zeros_like', 'np.zeros_like', (['ebins'], {}), '(ebins)\n', (5499, 5506), True, 'import numpy as np\n'), ((5525, 5546), 'numpy.zeros_like', 'np.zeros_like', (['lpbins'], {}), '(lpbins)\n', (5538, 5546), True, 'import numpy as np\n'), ((5564, 5584), 'numpy.zeros_like', 'np.zeros_like', (['dbins'], {}), '(dbins)\n', (5577, 5584), True, 'import numpy as np\n'), ((5604, 5626), 'numpy.zeros_like', 'np.zeros_like', (['magbins'], {}), '(magbins)\n', (5617, 5626), True, 'import numpy as np\n'), ((5644, 5664), 'numpy.zeros_like', 'np.zeros_like', (['rbins'], {}), '(rbins)\n', (5657, 5664), True, 'import numpy as np\n'), ((6528, 6555), 'pandas.read_csv', 'pd.read_csv', (['(d + f)'], {'nrows': '(1)'}), '(d + f, nrows=1)\n', (6539, 6555), True, 'import pandas as pd\n'), ((16745, 16758), 'numpy.sum', 'np.sum', (['fileN'], {}), '(fileN)\n', (16751, 16758), True, 'import numpy as np\n'), ((16850, 16866), 'numpy.sum', 'np.sum', (['fileObsN'], {}), '(fileObsN)\n', (16856, 16866), True, 'import numpy as np\n'), ((16961, 16977), 'numpy.sum', 'np.sum', (['fileRecN'], {}), '(fileRecN)\n', (16967, 16977), True, 'import numpy as np\n'), ((17157, 17169), 'numpy.sum', 'np.sum', (['rawN'], {}), '(rawN)\n', (17163, 17169), True, 'import numpy as np\n'), ((17233, 17245), 'numpy.sum', 'np.sum', (['obsN'], {}), '(obsN)\n', (17239, 17245), True, 'import numpy as np\n'), ((17308, 17320), 'numpy.sum', 'np.sum', (['recN'], {}), '(recN)\n', (17314, 17320), True, 'import numpy as np\n'), ((17509, 17525), 'numpy.sum', 'np.sum', (['allNPrsa'], {}), '(allNPrsa)\n', (17515, 17525), True, 'import numpy as np\n'), ((17628, 17644), 'numpy.sum', 'np.sum', (['obsNPrsa'], {}), '(obsNPrsa)\n', (17634, 17644), True, 'import numpy as np\n'), ((17746, 17762), 'numpy.sum', 'np.sum', (['recNPrsa'], {}), '(recNPrsa)\n', (17752, 17762), True, 'import numpy as np\n'), ((7455, 7491), 'numpy.histogram', 'np.histogram', (["data['m1']"], {'bins': 'mbins'}), "(data['m1'], bins=mbins)\n", (7467, 7491), True, 'import numpy as np\n'), ((7509, 7558), 'numpy.histogram', 'np.histogram', (["(data['m2'] / data['m1'])"], {'bins': 'qbins'}), "(data['m2'] / data['m1'], bins=qbins)\n", (7521, 7558), True, 'import numpy as np\n'), ((7574, 7609), 'numpy.histogram', 'np.histogram', (["data['e']"], {'bins': 'ebins'}), "(data['e'], bins=ebins)\n", (7586, 7609), True, 'import numpy as np\n'), ((7716, 7751), 'numpy.histogram', 'np.histogram', (["data['d']"], {'bins': 'dbins'}), "(data['d'], bins=dbins)\n", (7728, 7751), True, 'import numpy as np\n'), ((7773, 7821), 'numpy.histogram', 'np.histogram', (["data['appMagMean_r']"], {'bins': 'magbins'}), "(data['appMagMean_r'], bins=magbins)\n", (7785, 7821), True, 'import numpy as np\n'), ((7839, 7888), 'numpy.histogram', 'np.histogram', (["(data['r2'] / data['r1'])"], {'bins': 'rbins'}), "(data['r2'] / data['r1'], bins=rbins)\n", (7851, 7888), True, 'import numpy as np\n'), ((8528, 8540), 'numpy.diff', 'np.diff', (['m1b'], {}), '(m1b)\n', (8535, 8540), True, 'import numpy as np\n'), ((15176, 15193), 'numpy.array', 'np.array', (['recFrac'], {}), '(recFrac)\n', (15184, 15193), True, 'import numpy as np\n'), ((15833, 15847), 'numpy.array', 'np.array', (['recN'], {}), '(recN)\n', (15841, 15847), True, 'import numpy as np\n'), ((17027, 17043), 'numpy.sum', 'np.sum', (['fileRecN'], {}), '(fileRecN)\n', (17033, 17043), True, 'import numpy as np\n'), ((17044, 17060), 'numpy.sum', 'np.sum', (['fileObsN'], {}), '(fileObsN)\n', (17050, 17060), True, 'import numpy as np\n'), ((17358, 17370), 'numpy.sum', 'np.sum', (['recN'], {}), '(recN)\n', (17364, 17370), True, 'import numpy as np\n'), ((17371, 17383), 'numpy.sum', 'np.sum', (['obsN'], {}), '(obsN)\n', (17377, 17383), True, 'import numpy as np\n'), ((17812, 17828), 'numpy.sum', 'np.sum', (['recNPrsa'], {}), '(recNPrsa)\n', (17818, 17828), True, 'import numpy as np\n'), ((17829, 17845), 'numpy.sum', 'np.sum', (['obsNPrsa'], {}), '(obsNPrsa)\n', (17835, 17845), True, 'import numpy as np\n'), ((1005, 1017), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (1012, 1017), True, 'import numpy as np\n'), ((6833, 6861), 'pandas.read_csv', 'pd.read_csv', (['(d + f)'], {'header': '(2)'}), '(d + f, header=2)\n', (6844, 6861), True, 'import pandas as pd\n'), ((8771, 8784), 'numpy.log10', 'np.log10', (['Phs'], {}), '(Phs)\n', (8779, 8784), True, 'import numpy as np\n'), ((9512, 9547), 'numpy.histogram', 'np.histogram', (["obs['m1']"], {'bins': 'mbins'}), "(obs['m1'], bins=mbins)\n", (9524, 9547), True, 'import numpy as np\n'), ((9566, 9613), 'numpy.histogram', 'np.histogram', (["(obs['m2'] / obs['m1'])"], {'bins': 'qbins'}), "(obs['m2'] / obs['m1'], bins=qbins)\n", (9578, 9613), True, 'import numpy as np\n'), ((9630, 9664), 'numpy.histogram', 'np.histogram', (["obs['e']"], {'bins': 'ebins'}), "(obs['e'], bins=ebins)\n", (9642, 9664), True, 'import numpy as np\n'), ((9772, 9806), 'numpy.histogram', 'np.histogram', (["obs['d']"], {'bins': 'dbins'}), "(obs['d'], bins=dbins)\n", (9784, 9806), True, 'import numpy as np\n'), ((9829, 9876), 'numpy.histogram', 'np.histogram', (["obs['appMagMean_r']"], {'bins': 'magbins'}), "(obs['appMagMean_r'], bins=magbins)\n", (9841, 9876), True, 'import numpy as np\n'), ((9895, 9942), 'numpy.histogram', 'np.histogram', (["(obs['r2'] / obs['r1'])"], {'bins': 'rbins'}), "(obs['r2'] / obs['r1'], bins=rbins)\n", (9907, 9942), True, 'import numpy as np\n'), ((10203, 10217), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (10215, 10217), True, 'import pandas as pd\n'), ((10241, 10255), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (10253, 10255), True, 'import pandas as pd\n'), ((15099, 15115), 'numpy.array', 'np.array', (['RAwrap'], {}), '(RAwrap)\n', (15107, 15115), True, 'import numpy as np\n'), ((15136, 15153), 'numpy.array', 'np.array', (['Decwrap'], {}), '(Decwrap)\n', (15144, 15153), True, 'import numpy as np\n'), ((15747, 15763), 'numpy.array', 'np.array', (['RAwrap'], {}), '(RAwrap)\n', (15755, 15763), True, 'import numpy as np\n'), ((15784, 15801), 'numpy.array', 'np.array', (['Decwrap'], {}), '(Decwrap)\n', (15792, 15801), True, 'import numpy as np\n'), ((7642, 7671), 'numpy.ma.log10', 'np.ma.log10', (["data['p'].values"], {}), "(data['p'].values)\n", (7653, 7671), True, 'import numpy as np\n'), ((7952, 7967), 'numpy.sum', 'np.sum', (['m1hAll0'], {}), '(m1hAll0)\n', (7958, 7967), True, 'import numpy as np\n'), ((8029, 8043), 'numpy.sum', 'np.sum', (['qhAll0'], {}), '(qhAll0)\n', (8035, 8043), True, 'import numpy as np\n'), ((8104, 8118), 'numpy.sum', 'np.sum', (['ehAll0'], {}), '(ehAll0)\n', (8110, 8118), True, 'import numpy as np\n'), ((8182, 8197), 'numpy.sum', 'np.sum', (['lphAll0'], {}), '(lphAll0)\n', (8188, 8197), True, 'import numpy as np\n'), ((8259, 8273), 'numpy.sum', 'np.sum', (['dhAll0'], {}), '(dhAll0)\n', (8265, 8273), True, 'import numpy as np\n'), ((8338, 8354), 'numpy.sum', 'np.sum', (['maghAll0'], {}), '(maghAll0)\n', (8344, 8354), True, 'import numpy as np\n'), ((8415, 8429), 'numpy.sum', 'np.sum', (['rhAll0'], {}), '(rhAll0)\n', (8421, 8429), True, 'import numpy as np\n'), ((11393, 11428), 'numpy.histogram', 'np.histogram', (["rec['m1']"], {'bins': 'mbins'}), "(rec['m1'], bins=mbins)\n", (11405, 11428), True, 'import numpy as np\n'), ((11449, 11496), 'numpy.histogram', 'np.histogram', (["(rec['m2'] / rec['m1'])"], {'bins': 'qbins'}), "(rec['m2'] / rec['m1'], bins=qbins)\n", (11461, 11496), True, 'import numpy as np\n'), ((11515, 11549), 'numpy.histogram', 'np.histogram', (["rec['e']"], {'bins': 'ebins'}), "(rec['e'], bins=ebins)\n", (11527, 11549), True, 'import numpy as np\n'), ((11661, 11695), 'numpy.histogram', 'np.histogram', (["rec['d']"], {'bins': 'dbins'}), "(rec['d'], bins=dbins)\n", (11673, 11695), True, 'import numpy as np\n'), ((11720, 11767), 'numpy.histogram', 'np.histogram', (["rec['appMagMean_r']"], {'bins': 'magbins'}), "(rec['appMagMean_r'], bins=magbins)\n", (11732, 11767), True, 'import numpy as np\n'), ((11788, 11835), 'numpy.histogram', 'np.histogram', (["(rec['r2'] / rec['r1'])"], {'bins': 'rbins'}), "(rec['r2'] / rec['r1'], bins=rbins)\n", (11800, 11835), True, 'import numpy as np\n'), ((9698, 9726), 'numpy.ma.log10', 'np.ma.log10', (["obs['p'].values"], {}), "(obs['p'].values)\n", (9709, 9726), True, 'import numpy as np\n'), ((11585, 11613), 'numpy.ma.log10', 'np.ma.log10', (["rec['p'].values"], {}), "(rec['p'].values)\n", (11596, 11613), True, 'import numpy as np\n')] |
from django.urls import path
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,
TokenVerifyView
)
urlpatterns = [
path('obtain/', TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('refresh/', TokenRefreshView.as_view(), name='token_refresh'),
path('verify/', TokenVerifyView.as_view(), name='token_verify'),
]
| [
"rest_framework_simplejwt.views.TokenVerifyView.as_view",
"rest_framework_simplejwt.views.TokenObtainPairView.as_view",
"rest_framework_simplejwt.views.TokenRefreshView.as_view"
] | [((180, 209), 'rest_framework_simplejwt.views.TokenObtainPairView.as_view', 'TokenObtainPairView.as_view', ([], {}), '()\n', (207, 209), False, 'from rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView, TokenVerifyView\n'), ((259, 285), 'rest_framework_simplejwt.views.TokenRefreshView.as_view', 'TokenRefreshView.as_view', ([], {}), '()\n', (283, 285), False, 'from rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView, TokenVerifyView\n'), ((330, 355), 'rest_framework_simplejwt.views.TokenVerifyView.as_view', 'TokenVerifyView.as_view', ([], {}), '()\n', (353, 355), False, 'from rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView, TokenVerifyView\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the knowledge base."""
import unittest
from plaso.containers import artifacts
from plaso.engine import knowledge_base
from tests import test_lib as shared_test_lib
class KnowledgeBaseTest(shared_test_lib.BaseTestCase):
"""Tests for the knowledge base."""
# pylint: disable=protected-access
_MACOS_PATHS = [
'/Users/dude/Library/Application Data/Google/Chrome/Default/Extensions',
('/Users/dude/Library/Application Data/Google/Chrome/Default/Extensions/'
'apdfllckaahabafndbhieahigkjlhalf'),
'/private/var/log/system.log',
'/Users/frank/Library/Application Data/Google/Chrome/Default',
'/Users/hans/Library/Application Data/Google/Chrome/Default',
('/Users/frank/Library/Application Data/Google/Chrome/Default/'
'Extensions/pjkljhegncpnkpknbcohdijeoejaedia'),
'/Users/frank/Library/Application Data/Google/Chrome/Default/Extensions']
_MACOS_USERS = [
{'name': 'root', 'path': '/var/root', 'sid': '0'},
{'name': 'frank', 'path': '/Users/frank', 'sid': '4052'},
{'name': 'hans', 'path': '/Users/hans', 'sid': '4352'},
{'name': 'dude', 'path': '/Users/dude', 'sid': '1123'}]
_WINDOWS_PATHS = [
'C:\\Users\\Dude\\SomeFolder\\Chrome\\Default\\Extensions',
('C:\\Users\\Dude\\SomeNoneStandardFolder\\Chrome\\Default\\Extensions\\'
'hmjkmjkepdijhoojdojkdfohbdgmmhki'),
('C:\\Users\\frank\\AppData\\Local\\Google\\Chrome\\Extensions\\'
'blpcfgokakmgnkcojhhkbfbldkacnbeo'),
'C:\\Users\\frank\\AppData\\Local\\Google\\Chrome\\Extensions',
('C:\\Users\\frank\\AppData\\Local\\Google\\Chrome\\Extensions\\'
'icppfcnhkcmnfdhfhphakoifcfokfdhg'),
'C:\\Windows\\System32',
'C:\\Stuff/with path separator\\Folder']
_WINDOWS_USERS = [
{'name': 'dude', 'path': 'C:\\Users\\dude', 'sid': 'S-1'},
{'name': 'frank', 'path': 'C:\\Users\\frank', 'sid': 'S-2'}]
def _SetUserAccounts(self, knowledge_base_object, users):
"""Sets the user accounts in the knowledge base.
Args:
knowledge_base_object (KnowledgeBase): knowledge base.
users (list[dict[str,str])): users.
"""
for user in users:
identifier = user.get('sid', user.get('uid', None))
if not identifier:
continue
user_account = artifacts.UserAccountArtifact(
identifier=identifier, user_directory=user.get('path', None),
username=user.get('name', None))
knowledge_base_object.AddUserAccount(user_account)
def testCodepageProperty(self):
"""Tests the codepage property."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self.assertEqual(knowledge_base_object.codepage, 'cp1252')
def testHostnameProperty(self):
"""Tests the hostname property."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self.assertEqual(knowledge_base_object.hostname, '')
def testOperatingSystemProperty(self):
"""Tests the operating_system property."""
knowledge_base_object = knowledge_base.KnowledgeBase()
operating_system = knowledge_base_object.GetValue('operating_system')
self.assertIsNone(operating_system)
knowledge_base_object.SetValue('operating_system', 'Windows')
operating_system = knowledge_base_object.GetValue('operating_system')
self.assertEqual(operating_system, 'Windows')
def testTimezoneProperty(self):
"""Tests the timezone property."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self.assertEqual(knowledge_base_object.timezone.zone, 'UTC')
def testUserAccountsProperty(self):
"""Tests the user accounts property."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self.assertEqual(len(knowledge_base_object.user_accounts), 0)
user_account = artifacts.UserAccountArtifact(
identifier='1000', user_directory='/home/testuser',
username='testuser')
knowledge_base_object.AddUserAccount(user_account)
self.assertEqual(len(knowledge_base_object.user_accounts), 1)
def testYearProperty(self):
"""Tests the year property."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self.assertEqual(knowledge_base_object.year, 0)
def testAddUserAccount(self):
"""Tests the AddUserAccount function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
user_account = artifacts.UserAccountArtifact(
identifier='1000', user_directory='/home/testuser',
username='testuser')
knowledge_base_object.AddUserAccount(user_account)
with self.assertRaises(KeyError):
knowledge_base_object.AddUserAccount(user_account)
def testAddEnvironmentVariable(self):
"""Tests the AddEnvironmentVariable function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
environment_variable = artifacts.EnvironmentVariableArtifact(
case_sensitive=False, name='SystemRoot', value='C:\\Windows')
knowledge_base_object.AddEnvironmentVariable(environment_variable)
with self.assertRaises(KeyError):
knowledge_base_object.AddEnvironmentVariable(environment_variable)
def testGetEnvironmentVariable(self):
"""Tests the GetEnvironmentVariable functions."""
knowledge_base_object = knowledge_base.KnowledgeBase()
environment_variable = artifacts.EnvironmentVariableArtifact(
case_sensitive=False, name='SystemRoot', value='C:\\Windows')
knowledge_base_object.AddEnvironmentVariable(environment_variable)
test_environment_variable = knowledge_base_object.GetEnvironmentVariable(
'SystemRoot')
self.assertIsNotNone(test_environment_variable)
test_environment_variable = knowledge_base_object.GetEnvironmentVariable(
'sYsTeMrOoT')
self.assertIsNotNone(test_environment_variable)
test_environment_variable = knowledge_base_object.GetEnvironmentVariable(
'Bogus')
self.assertIsNone(test_environment_variable)
def testGetEnvironmentVariables(self):
"""Tests the GetEnvironmentVariables function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
environment_variable = artifacts.EnvironmentVariableArtifact(
case_sensitive=False, name='SystemRoot', value='C:\\Windows')
knowledge_base_object.AddEnvironmentVariable(environment_variable)
environment_variable = artifacts.EnvironmentVariableArtifact(
case_sensitive=False, name='WinDir', value='C:\\Windows')
knowledge_base_object.AddEnvironmentVariable(environment_variable)
environment_variables = knowledge_base_object.GetEnvironmentVariables()
self.assertEqual(len(environment_variables), 2)
def testGetHostname(self):
"""Tests the GetHostname function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
hostname = knowledge_base_object.GetHostname()
self.assertEqual(hostname, '')
# TODO: add tests for GetMountPoint.
def testGetSourceConfigurationArtifacts(self):
"""Tests the GetSourceConfigurationArtifacts function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain')
knowledge_base_object.SetHostname(hostname_artifact)
user_account = artifacts.UserAccountArtifact(
identifier='1000', user_directory='/home/testuser',
username='testuser')
knowledge_base_object.AddUserAccount(user_account)
source_configurations = (
knowledge_base_object.GetSourceConfigurationArtifacts())
self.assertEqual(len(source_configurations), 1)
self.assertIsNotNone(source_configurations[0])
system_configuration = source_configurations[0].system_configuration
self.assertIsNotNone(system_configuration)
self.assertIsNotNone(system_configuration.hostname)
self.assertEqual(system_configuration.hostname.name, 'myhost.mydomain')
def testGetSystemConfigurationArtifact(self):
"""Tests the _GetSystemConfigurationArtifact function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain')
knowledge_base_object.SetHostname(hostname_artifact)
user_account = artifacts.UserAccountArtifact(
identifier='1000', user_directory='/home/testuser',
username='testuser')
knowledge_base_object.AddUserAccount(user_account)
system_configuration = (
knowledge_base_object._GetSystemConfigurationArtifact())
self.assertIsNotNone(system_configuration)
self.assertIsNotNone(system_configuration.hostname)
self.assertEqual(system_configuration.hostname.name, 'myhost.mydomain')
# TODO: add tests for GetTextPrepend.
def testGetUsernameByIdentifier(self):
"""Tests the GetUsernameByIdentifier function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
user_account = artifacts.UserAccountArtifact(
identifier='1000', user_directory='/home/testuser',
username='testuser')
knowledge_base_object.AddUserAccount(user_account)
usename = knowledge_base_object.GetUsernameByIdentifier('1000')
self.assertEqual(usename, 'testuser')
usename = knowledge_base_object.GetUsernameByIdentifier(1000)
self.assertEqual(usename, '')
usename = knowledge_base_object.GetUsernameByIdentifier('1001')
self.assertEqual(usename, '')
def testGetUsernameForPath(self):
"""Tests the GetUsernameForPath function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self._SetUserAccounts(knowledge_base_object, self._MACOS_USERS)
username = knowledge_base_object.GetUsernameForPath(
self._MACOS_PATHS[0])
self.assertEqual(username, 'dude')
username = knowledge_base_object.GetUsernameForPath(
self._MACOS_PATHS[4])
self.assertEqual(username, 'hans')
username = knowledge_base_object.GetUsernameForPath(
self._WINDOWS_PATHS[0])
self.assertIsNone(username)
knowledge_base_object = knowledge_base.KnowledgeBase()
self._SetUserAccounts(knowledge_base_object, self._WINDOWS_USERS)
username = knowledge_base_object.GetUsernameForPath(
self._WINDOWS_PATHS[0])
self.assertEqual(username, 'dude')
username = knowledge_base_object.GetUsernameForPath(
self._WINDOWS_PATHS[2])
self.assertEqual(username, 'frank')
username = knowledge_base_object.GetUsernameForPath(
self._MACOS_PATHS[2])
self.assertIsNone(username)
def testGetSetValue(self):
"""Tests the Get and SetValue functions."""
knowledge_base_object = knowledge_base.KnowledgeBase()
expected_value = 'test value'
knowledge_base_object.SetValue('Test', expected_value)
value = knowledge_base_object.GetValue('Test')
self.assertEqual(value, expected_value)
value = knowledge_base_object.GetValue('tEsT')
self.assertEqual(value, expected_value)
value = knowledge_base_object.GetValue('Bogus')
self.assertIsNone(value)
def testHasUserAccounts(self):
"""Tests the HasUserAccounts function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
self.assertFalse(knowledge_base_object.HasUserAccounts())
user_account = artifacts.UserAccountArtifact(
identifier='1000', user_directory='/home/testuser',
username='testuser')
knowledge_base_object.AddUserAccount(user_account)
self.assertTrue(knowledge_base_object.HasUserAccounts())
def testReadSystemConfigurationArtifact(self):
"""Tests the ReadSystemConfigurationArtifact function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
system_configuration = artifacts.SystemConfigurationArtifact()
system_configuration.hostname = artifacts.HostnameArtifact(
name='myhost.mydomain')
user_account = artifacts.UserAccountArtifact(
identifier='1000', user_directory='/home/testuser',
username='testuser')
system_configuration.user_accounts.append(user_account)
knowledge_base_object.ReadSystemConfigurationArtifact(system_configuration)
hostname = knowledge_base_object.GetHostname()
self.assertEqual(hostname, 'myhost.mydomain')
def testSetActiveSession(self):
"""Tests the SetActiveSession function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
knowledge_base_object.SetActiveSession('ddda05bedf324cbd99fa8c24b8a0037a')
self.assertEqual(
knowledge_base_object._active_session,
'ddda05bedf324cbd99fa8c24b8a0037a')
knowledge_base_object.SetActiveSession(
knowledge_base_object._DEFAULT_ACTIVE_SESSION)
self.assertEqual(
knowledge_base_object._active_session,
knowledge_base_object._DEFAULT_ACTIVE_SESSION)
def testSetCodepage(self):
"""Tests the SetCodepage function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
knowledge_base_object.SetCodepage('cp1252')
with self.assertRaises(ValueError):
knowledge_base_object.SetCodepage('bogus')
def testSetHostname(self):
"""Tests the SetHostname function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
hostname_artifact = artifacts.HostnameArtifact(name='myhost.mydomain')
knowledge_base_object.SetHostname(hostname_artifact)
# TODO: add tests for SetMountPoint.
# TODO: add tests for SetTextPrepend.
def testSetTimeZone(self):
"""Tests the SetTimeZone function."""
knowledge_base_object = knowledge_base.KnowledgeBase()
time_zone_artifact = artifacts.TimeZoneArtifact(
localized_name='Eastern (standaardtijd)', mui_form='@tzres.dll,-112',
name='Eastern Standard Time')
knowledge_base_object.AddAvailableTimeZone(time_zone_artifact)
# Set an IANA time zone name.
knowledge_base_object.SetTimeZone('Europe/Zurich')
self.assertEqual(knowledge_base_object._time_zone.zone, 'Europe/Zurich')
# Set a Windows time zone name.
knowledge_base_object.SetTimeZone('Eastern Standard Time')
self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York')
# Set a localized Windows time zone name.
knowledge_base_object.SetTimeZone('Eastern (standaardtijd)')
self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York')
# Set a MUI form Windows time zone name.
knowledge_base_object.SetTimeZone('@tzres.dll,-112')
self.assertEqual(knowledge_base_object._time_zone.zone, 'America/New_York')
with self.assertRaises(ValueError):
knowledge_base_object.SetTimeZone('Bogus')
if __name__ == '__main__':
unittest.main()
| [
"plaso.engine.knowledge_base.KnowledgeBase",
"plaso.containers.artifacts.HostnameArtifact",
"plaso.containers.artifacts.SystemConfigurationArtifact",
"plaso.containers.artifacts.UserAccountArtifact",
"unittest.main",
"plaso.containers.artifacts.EnvironmentVariableArtifact",
"plaso.containers.artifacts.TimeZoneArtifact"
] | [((14511, 14526), 'unittest.main', 'unittest.main', ([], {}), '()\n', (14524, 14526), False, 'import unittest\n'), ((2660, 2690), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ([], {}), '()\n', (2688, 2690), False, 'from plaso.engine import knowledge_base\n'), ((2857, 2887), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ([], {}), '()\n', (2885, 2887), False, 'from plaso.engine import knowledge_base\n'), ((3063, 3093), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ([], {}), '()\n', (3091, 3093), False, 'from plaso.engine import knowledge_base\n'), ((3503, 3533), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ([], {}), '()\n', (3531, 3533), False, 'from plaso.engine import knowledge_base\n'), ((3711, 3741), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ([], {}), '()\n', (3739, 3741), False, 'from plaso.engine import knowledge_base\n'), ((3829, 3936), 'plaso.containers.artifacts.UserAccountArtifact', 'artifacts.UserAccountArtifact', ([], {'identifier': '"""1000"""', 'user_directory': '"""/home/testuser"""', 'username': '"""testuser"""'}), "(identifier='1000', user_directory=\n '/home/testuser', username='testuser')\n", (3858, 3936), False, 'from plaso.containers import artifacts\n'), ((4165, 4195), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ([], {}), '()\n', (4193, 4195), False, 'from plaso.engine import knowledge_base\n'), ((4355, 4385), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ([], {}), '()\n', (4383, 4385), False, 'from plaso.engine import knowledge_base\n'), ((4406, 4513), 'plaso.containers.artifacts.UserAccountArtifact', 'artifacts.UserAccountArtifact', ([], {'identifier': '"""1000"""', 'user_directory': '"""/home/testuser"""', 'username': '"""testuser"""'}), "(identifier='1000', user_directory=\n '/home/testuser', username='testuser')\n", (4435, 4513), False, 'from plaso.containers import artifacts\n'), ((4799, 4829), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ([], {}), '()\n', (4827, 4829), False, 'from plaso.engine import knowledge_base\n'), ((4858, 4962), 'plaso.containers.artifacts.EnvironmentVariableArtifact', 'artifacts.EnvironmentVariableArtifact', ([], {'case_sensitive': '(False)', 'name': '"""SystemRoot"""', 'value': '"""C:\\\\Windows"""'}), "(case_sensitive=False, name=\n 'SystemRoot', value='C:\\\\Windows')\n", (4895, 4962), False, 'from plaso.containers import artifacts\n'), ((5274, 5304), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ([], {}), '()\n', (5302, 5304), False, 'from plaso.engine import knowledge_base\n'), ((5333, 5437), 'plaso.containers.artifacts.EnvironmentVariableArtifact', 'artifacts.EnvironmentVariableArtifact', ([], {'case_sensitive': '(False)', 'name': '"""SystemRoot"""', 'value': '"""C:\\\\Windows"""'}), "(case_sensitive=False, name=\n 'SystemRoot', value='C:\\\\Windows')\n", (5370, 5437), False, 'from plaso.containers import artifacts\n'), ((6088, 6118), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ([], {}), '()\n', (6116, 6118), False, 'from plaso.engine import knowledge_base\n'), ((6147, 6251), 'plaso.containers.artifacts.EnvironmentVariableArtifact', 'artifacts.EnvironmentVariableArtifact', ([], {'case_sensitive': '(False)', 'name': '"""SystemRoot"""', 'value': '"""C:\\\\Windows"""'}), "(case_sensitive=False, name=\n 'SystemRoot', value='C:\\\\Windows')\n", (6184, 6251), False, 'from plaso.containers import artifacts\n'), ((6355, 6454), 'plaso.containers.artifacts.EnvironmentVariableArtifact', 'artifacts.EnvironmentVariableArtifact', ([], {'case_sensitive': '(False)', 'name': '"""WinDir"""', 'value': '"""C:\\\\Windows"""'}), "(case_sensitive=False, name='WinDir',\n value='C:\\\\Windows')\n", (6392, 6454), False, 'from plaso.containers import artifacts\n'), ((6760, 6790), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ([], {}), '()\n', (6788, 6790), False, 'from plaso.engine import knowledge_base\n'), ((7058, 7088), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ([], {}), '()\n', (7086, 7088), False, 'from plaso.engine import knowledge_base\n'), ((7114, 7164), 'plaso.containers.artifacts.HostnameArtifact', 'artifacts.HostnameArtifact', ([], {'name': '"""myhost.mydomain"""'}), "(name='myhost.mydomain')\n", (7140, 7164), False, 'from plaso.containers import artifacts\n'), ((7242, 7349), 'plaso.containers.artifacts.UserAccountArtifact', 'artifacts.UserAccountArtifact', ([], {'identifier': '"""1000"""', 'user_directory': '"""/home/testuser"""', 'username': '"""testuser"""'}), "(identifier='1000', user_directory=\n '/home/testuser', username='testuser')\n", (7271, 7349), False, 'from plaso.containers import artifacts\n'), ((8008, 8038), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ([], {}), '()\n', (8036, 8038), False, 'from plaso.engine import knowledge_base\n'), ((8064, 8114), 'plaso.containers.artifacts.HostnameArtifact', 'artifacts.HostnameArtifact', ([], {'name': '"""myhost.mydomain"""'}), "(name='myhost.mydomain')\n", (8090, 8114), False, 'from plaso.containers import artifacts\n'), ((8192, 8299), 'plaso.containers.artifacts.UserAccountArtifact', 'artifacts.UserAccountArtifact', ([], {'identifier': '"""1000"""', 'user_directory': '"""/home/testuser"""', 'username': '"""testuser"""'}), "(identifier='1000', user_directory=\n '/home/testuser', username='testuser')\n", (8221, 8299), False, 'from plaso.containers import artifacts\n'), ((8806, 8836), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ([], {}), '()\n', (8834, 8836), False, 'from plaso.engine import knowledge_base\n'), ((8857, 8964), 'plaso.containers.artifacts.UserAccountArtifact', 'artifacts.UserAccountArtifact', ([], {'identifier': '"""1000"""', 'user_directory': '"""/home/testuser"""', 'username': '"""testuser"""'}), "(identifier='1000', user_directory=\n '/home/testuser', username='testuser')\n", (8886, 8964), False, 'from plaso.containers import artifacts\n'), ((9461, 9491), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ([], {}), '()\n', (9489, 9491), False, 'from plaso.engine import knowledge_base\n'), ((9965, 9995), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ([], {}), '()\n', (9993, 9995), False, 'from plaso.engine import knowledge_base\n'), ((10551, 10581), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ([], {}), '()\n', (10579, 10581), False, 'from plaso.engine import knowledge_base\n'), ((11058, 11088), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ([], {}), '()\n', (11086, 11088), False, 'from plaso.engine import knowledge_base\n'), ((11172, 11279), 'plaso.containers.artifacts.UserAccountArtifact', 'artifacts.UserAccountArtifact', ([], {'identifier': '"""1000"""', 'user_directory': '"""/home/testuser"""', 'username': '"""testuser"""'}), "(identifier='1000', user_directory=\n '/home/testuser', username='testuser')\n", (11201, 11279), False, 'from plaso.containers import artifacts\n'), ((11549, 11579), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ([], {}), '()\n', (11577, 11579), False, 'from plaso.engine import knowledge_base\n'), ((11608, 11647), 'plaso.containers.artifacts.SystemConfigurationArtifact', 'artifacts.SystemConfigurationArtifact', ([], {}), '()\n', (11645, 11647), False, 'from plaso.containers import artifacts\n'), ((11684, 11734), 'plaso.containers.artifacts.HostnameArtifact', 'artifacts.HostnameArtifact', ([], {'name': '"""myhost.mydomain"""'}), "(name='myhost.mydomain')\n", (11710, 11734), False, 'from plaso.containers import artifacts\n'), ((11764, 11871), 'plaso.containers.artifacts.UserAccountArtifact', 'artifacts.UserAccountArtifact', ([], {'identifier': '"""1000"""', 'user_directory': '"""/home/testuser"""', 'username': '"""testuser"""'}), "(identifier='1000', user_directory=\n '/home/testuser', username='testuser')\n", (11793, 11871), False, 'from plaso.containers import artifacts\n'), ((12237, 12267), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ([], {}), '()\n', (12265, 12267), False, 'from plaso.engine import knowledge_base\n'), ((12785, 12815), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ([], {}), '()\n', (12813, 12815), False, 'from plaso.engine import knowledge_base\n'), ((13055, 13085), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ([], {}), '()\n', (13083, 13085), False, 'from plaso.engine import knowledge_base\n'), ((13111, 13161), 'plaso.containers.artifacts.HostnameArtifact', 'artifacts.HostnameArtifact', ([], {'name': '"""myhost.mydomain"""'}), "(name='myhost.mydomain')\n", (13137, 13161), False, 'from plaso.containers import artifacts\n'), ((13399, 13429), 'plaso.engine.knowledge_base.KnowledgeBase', 'knowledge_base.KnowledgeBase', ([], {}), '()\n', (13427, 13429), False, 'from plaso.engine import knowledge_base\n'), ((13456, 13586), 'plaso.containers.artifacts.TimeZoneArtifact', 'artifacts.TimeZoneArtifact', ([], {'localized_name': '"""Eastern (standaardtijd)"""', 'mui_form': '"""@tzres.dll,-112"""', 'name': '"""Eastern Standard Time"""'}), "(localized_name='Eastern (standaardtijd)',\n mui_form='@tzres.dll,-112', name='Eastern Standard Time')\n", (13482, 13586), False, 'from plaso.containers import artifacts\n')] |
import numpy as np
from itertools import product
from typing import List
from src.config import ConfigChess
from src.chess.board import Board
from src.chess.move import Move
def get_all_possible_moves() -> List[Move]:
all_possible_moves = set()
array = np.zeros((ConfigChess.board_size, ConfigChess.board_size)).astype("int8")
for i, j, piece in product(
range(ConfigChess.board_size), range(ConfigChess.board_size), ["Q", "N"]
):
array[i][j] = Board.piece_symbol_to_int(piece)
all_possible_moves.update(
set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves))
)
array[i][j] = 0
# underpromotion moves
array[1, :] = Board.piece_symbol_to_int("P")
all_possible_moves.update(
set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves))
)
array[0, :] = Board.piece_symbol_to_int("p")
all_possible_moves.update(
set(map(lambda move: Move(uci=move.uci()), Board(array=array).legal_moves))
)
# no need to add castling moves: they have already be added with queen moves under UCI notation
return sorted(list(all_possible_moves))
| [
"numpy.zeros",
"src.chess.board.Board.piece_symbol_to_int",
"src.chess.board.Board"
] | [((715, 745), 'src.chess.board.Board.piece_symbol_to_int', 'Board.piece_symbol_to_int', (['"""P"""'], {}), "('P')\n", (740, 745), False, 'from src.chess.board import Board\n'), ((885, 915), 'src.chess.board.Board.piece_symbol_to_int', 'Board.piece_symbol_to_int', (['"""p"""'], {}), "('p')\n", (910, 915), False, 'from src.chess.board import Board\n'), ((480, 512), 'src.chess.board.Board.piece_symbol_to_int', 'Board.piece_symbol_to_int', (['piece'], {}), '(piece)\n', (505, 512), False, 'from src.chess.board import Board\n'), ((264, 322), 'numpy.zeros', 'np.zeros', (['(ConfigChess.board_size, ConfigChess.board_size)'], {}), '((ConfigChess.board_size, ConfigChess.board_size))\n', (272, 322), True, 'import numpy as np\n'), ((828, 846), 'src.chess.board.Board', 'Board', ([], {'array': 'array'}), '(array=array)\n', (833, 846), False, 'from src.chess.board import Board\n'), ((998, 1016), 'src.chess.board.Board', 'Board', ([], {'array': 'array'}), '(array=array)\n', (1003, 1016), False, 'from src.chess.board import Board\n'), ((603, 621), 'src.chess.board.Board', 'Board', ([], {'array': 'array'}), '(array=array)\n', (608, 621), False, 'from src.chess.board import Board\n')] |
from random import gauss
class MultiRotor:
"""Simple vertical dynamics for a multirotor vehicle."""
GRAVITY = -9.81
def __init__(
self, altitude=10, velocity=0, mass=1.54, emc=10.0, dt=0.05, noise=0.1
):
"""
Args:
altitude (float): initial altitude of the vehicle
velocity (float): initial velocity of the vehicle
mass (float): mass of the vehicle
emc (float): electromechanical constant for the vehicle
dt (float): simulation time step
noise (float): standard deviation of normally distributed simulation noise
"""
self.y0 = altitude
self.y1 = velocity
self.mass = mass
self.emc = emc
self.dt = dt
self.noise = noise
def step(self, effort):
"""Advance the multirotor simulation and apply motor forces.
Args:
effort (float): related to the upward thrust of the vehicle,
it must be >= 0
Return:
The current state (altitude, velocity) of the vehicle.
"""
effort = max(0, effort)
scaled_effort = self.emc / self.mass * effort
net_acceleration = MultiRotor.GRAVITY - 0.75 * self.y1 + scaled_effort
# Don't let the vehcicle fall through the ground
if self.y0 <= 0 and net_acceleration < 0:
y0dot = 0
y1dot = 0
else:
y0dot = self.y1
y1dot = net_acceleration
self.y0 += y0dot * self.dt
self.y1 += y1dot * self.dt
self.y0 += gauss(0, self.noise)
return self.y0, self.y1
def get_altitude(self):
"""Return the current altitude."""
return self.y0
def get_delta_time(self):
"""Return the simulation time step."""
return self.dt
| [
"random.gauss"
] | [((1595, 1615), 'random.gauss', 'gauss', (['(0)', 'self.noise'], {}), '(0, self.noise)\n', (1600, 1615), False, 'from random import gauss\n')] |
import sys
import os.path
import timeit
sys.path.insert( 0, os.path.normpath(os.path.join( os.path.dirname( __file__ ), '..') ))
from aql_tests import skip, AqlTestCase, runLocalTests
from aql.util_types import UniqueList, SplitListType, List, ValueListType
#//===========================================================================//
class TestListTypes( AqlTestCase ):
def test_unique_list(self):
ul = UniqueList( [1,2,3,2,1,3] ); ul.selfTest()
self.assertEqual( ul, [2,3,1])
self.assertEqual( list(ul), [1,2,3])
ul = UniqueList()
ul.append( 1 ); ul.selfTest()
ul.append( 3 ); ul.selfTest()
ul.append( 1 ); ul.selfTest()
ul.append( 2 ); ul.selfTest()
ul.append( 3 ); ul.selfTest()
ul.append( 1 ); ul.selfTest()
self.assertEqual( list(ul), [1,3,2])
ul.append_front( 2 ); ul.selfTest()
self.assertEqual( list(ul), [2,1,3])
ul.extend( [4,1,2,2,5] ); ul.selfTest()
self.assertEqual( list(ul), [2,1,3,4,5])
ul.extend_front( [1,2,2,3,1,1,5,5] ); ul.selfTest()
self.assertEqual( list(ul), [1,2,3,5,4])
self.assertEqual( list(ul), [1,2,3,5,4])
ul.remove( 1 ); ul.selfTest()
self.assertEqual( list(ul), [2,3,5,4])
ul.remove( 5 ); ul.selfTest()
self.assertEqual( list(ul), [2,3,4])
ul.remove( 55 ); ul.selfTest()
self.assertEqual( list(ul), [2,3,4])
self.assertEqual( ul.pop(), 4 ); ul.selfTest()
self.assertEqual( ul.pop_front(), 2 ); ul.selfTest()
self.assertEqual( ul.pop_front(), 3 ); ul.selfTest()
ul += [1,2,2,2,3,1,2,4,3,3,5,4,5,5]; ul.selfTest()
self.assertEqual( list(ul), [1,2,3,4,5])
ul -= [2,2,2,4,33]; ul.selfTest()
self.assertEqual( list(ul), [1,3,5])
self.assertEqual( ul[0], 1)
self.assertEqual( ul[2], 5)
self.assertEqual( ul[1], 3)
self.assertIn( 1, ul)
self.assertEqual( list(reversed(ul)), [5,3,1])
ul.reverse(); ul.selfTest()
self.assertEqual( ul, [5,3,1] )
ul.reverse(); ul.selfTest()
self.assertEqual( str(ul), "[1, 3, 5]" )
self.assertEqual( ul, UniqueList([1, 3, 5]) )
self.assertEqual( ul, UniqueList(ul) )
self.assertLess( UniqueList([1,2,2,2,3]), UniqueList([1,2,1,1,1,4]) )
self.assertLess( UniqueList([1,2,2,2,3]), [1,2,1,1,1,4] )
#//===========================================================================//
def test_splitlist(self):
l = SplitListType( List, ", \t\n\r" )("1,2, 3,,, \n\r\t4")
self.assertEqual( l, ['1','2','3','4'] )
self.assertEqual( l, "1,2,3,4" )
self.assertEqual( l, "1 2 3 4" )
self.assertEqual( str(l), "1,2,3,4" )
l += "7, 8"
self.assertEqual( l, ['1','2','3','4','7','8'] )
l -= "2, 3"
self.assertEqual( l, ['1','4','7','8'] )
l -= "5"
self.assertEqual( l, ['1','4','7','8'] )
l.extend_front( "10,12" )
self.assertEqual( l, ['10','12','1','4','7','8'] )
l.extend( "0,-1" )
self.assertEqual( l, ['10','12','1','4','7','8', '0', '-1'] )
#//===========================================================================//
def test_valuelist(self):
l = SplitListType( ValueListType( List, int ), ", \t\n\r" )("1,2, 3,,, \n\r\t4")
self.assertEqual( l, [1,2,3,4] )
self.assertEqual( l, "1,2,3,4" )
self.assertEqual( l, "1 2 3 4" )
self.assertEqual( str(l), "1,2,3,4" )
l += [7, 8]
self.assertEqual( l, ['1','2','3','4','7','8'] )
l += 78
self.assertEqual( l, ['1','2','3','4','7','8', 78] )
l -= 78
self.assertEqual( l, ['1','2','3','4','7','8'] )
l -= "2, 3"
self.assertEqual( l, ['1','4','7','8'] )
l -= "5"
self.assertEqual( l, ['1','4','7','8'] )
l.extend_front( "10,12" )
self.assertEqual( l, ['10','12','1','4','7','8'] )
l.extend( "0,-1" )
self.assertEqual( l, [10,12,1,4,7,8,0,-1] )
l[0] = "5"
self.assertEqual( l, [5,12,1,4,7,8,0,-1] )
#//===========================================================================//
def test_list(self):
l = List([1,2,3,4])
self.assertEqual( l, [1,2,3,4] )
l += [7, 8]
self.assertEqual( l, [1,2,3,4,7,8] )
l += 78
self.assertEqual( l, [1,2,3,4,7,8,78] )
l -= 78
self.assertEqual( l, [1,2,3,4,7,8] )
l -= [2, 3]
self.assertEqual( l, [1,4,7,8] )
l -= 5
self.assertEqual( l, [1,4,7,8] )
l.extend_front( [10,12] )
self.assertEqual( l, [10,12,1,4,7,8] )
l.extend( [0,-1] )
self.assertEqual( l, [10,12,1,4,7,8, 0, -1] )
#//===========================================================================//
if __name__ == "__main__":
runLocalTests()
| [
"aql.util_types.UniqueList",
"aql_tests.runLocalTests",
"aql.util_types.List",
"aql.util_types.ValueListType",
"aql.util_types.SplitListType"
] | [((4754, 4769), 'aql_tests.runLocalTests', 'runLocalTests', ([], {}), '()\n', (4767, 4769), False, 'from aql_tests import skip, AqlTestCase, runLocalTests\n'), ((424, 454), 'aql.util_types.UniqueList', 'UniqueList', (['[1, 2, 3, 2, 1, 3]'], {}), '([1, 2, 3, 2, 1, 3])\n', (434, 454), False, 'from aql.util_types import UniqueList, SplitListType, List, ValueListType\n'), ((562, 574), 'aql.util_types.UniqueList', 'UniqueList', ([], {}), '()\n', (572, 574), False, 'from aql.util_types import UniqueList, SplitListType, List, ValueListType\n'), ((4141, 4159), 'aql.util_types.List', 'List', (['[1, 2, 3, 4]'], {}), '([1, 2, 3, 4])\n', (4145, 4159), False, 'from aql.util_types import UniqueList, SplitListType, List, ValueListType\n'), ((2152, 2173), 'aql.util_types.UniqueList', 'UniqueList', (['[1, 3, 5]'], {}), '([1, 3, 5])\n', (2162, 2173), False, 'from aql.util_types import UniqueList, SplitListType, List, ValueListType\n'), ((2202, 2216), 'aql.util_types.UniqueList', 'UniqueList', (['ul'], {}), '(ul)\n', (2212, 2216), False, 'from aql.util_types import UniqueList, SplitListType, List, ValueListType\n'), ((2240, 2267), 'aql.util_types.UniqueList', 'UniqueList', (['[1, 2, 2, 2, 3]'], {}), '([1, 2, 2, 2, 3])\n', (2250, 2267), False, 'from aql.util_types import UniqueList, SplitListType, List, ValueListType\n'), ((2265, 2295), 'aql.util_types.UniqueList', 'UniqueList', (['[1, 2, 1, 1, 1, 4]'], {}), '([1, 2, 1, 1, 1, 4])\n', (2275, 2295), False, 'from aql.util_types import UniqueList, SplitListType, List, ValueListType\n'), ((2314, 2341), 'aql.util_types.UniqueList', 'UniqueList', (['[1, 2, 2, 2, 3]'], {}), '([1, 2, 2, 2, 3])\n', (2324, 2341), False, 'from aql.util_types import UniqueList, SplitListType, List, ValueListType\n'), ((2481, 2512), 'aql.util_types.SplitListType', 'SplitListType', (['List', "', \\t\\n\\r'"], {}), "(List, ', \\t\\n\\r')\n", (2494, 2512), False, 'from aql.util_types import UniqueList, SplitListType, List, ValueListType\n'), ((3225, 3249), 'aql.util_types.ValueListType', 'ValueListType', (['List', 'int'], {}), '(List, int)\n', (3238, 3249), False, 'from aql.util_types import UniqueList, SplitListType, List, ValueListType\n')] |
# config params
KB = 1024
MB = 1024*KB
GB = 1024*MB
# name of meta root dir
META_DIR = ".metasync"
# batching time for daemon
SYNC_WAIT = 3
# blob size
BLOB_UNIT = 32*MB
# Increase of Paxos proposal number
PAXOS_PNUM_INC = 10
# authentication directory
import os
AUTH_DIR = os.path.join(os.path.expanduser("~"), ".metasync")
| [
"os.path.expanduser"
] | [((294, 317), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (312, 317), False, 'import os\n')] |
import unittest
from py.tests.utils import test
from py import valid_parentheses as vp
class TestValidParentheses(unittest.TestCase):
@test(vp.Solution.is_valid)
def test_valid_parentheses(self) -> None:
test("()", result=True)
test("()[]{}", result=True)
test("(]", result=False)
test("([)]", result=False)
test("{[]}", result=True)
test("", result=True)
test(")()", result=False)
test("(())((())))", result=False)
| [
"py.tests.utils.test"
] | [((142, 168), 'py.tests.utils.test', 'test', (['vp.Solution.is_valid'], {}), '(vp.Solution.is_valid)\n', (146, 168), False, 'from py.tests.utils import test\n'), ((223, 246), 'py.tests.utils.test', 'test', (['"""()"""'], {'result': '(True)'}), "('()', result=True)\n", (227, 246), False, 'from py.tests.utils import test\n'), ((264, 291), 'py.tests.utils.test', 'test', (['"""()[]{}"""'], {'result': '(True)'}), "('()[]{}', result=True)\n", (268, 291), False, 'from py.tests.utils import test\n'), ((305, 329), 'py.tests.utils.test', 'test', (['"""(]"""'], {'result': '(False)'}), "('(]', result=False)\n", (309, 329), False, 'from py.tests.utils import test\n'), ((347, 373), 'py.tests.utils.test', 'test', (['"""([)]"""'], {'result': '(False)'}), "('([)]', result=False)\n", (351, 373), False, 'from py.tests.utils import test\n'), ((389, 414), 'py.tests.utils.test', 'test', (['"""{[]}"""'], {'result': '(True)'}), "('{[]}', result=True)\n", (393, 414), False, 'from py.tests.utils import test\n'), ((430, 451), 'py.tests.utils.test', 'test', (['""""""'], {'result': '(True)'}), "('', result=True)\n", (434, 451), False, 'from py.tests.utils import test\n'), ((471, 496), 'py.tests.utils.test', 'test', (['""")()"""'], {'result': '(False)'}), "(')()', result=False)\n", (475, 496), False, 'from py.tests.utils import test\n'), ((513, 546), 'py.tests.utils.test', 'test', (['"""(())((())))"""'], {'result': '(False)'}), "('(())((())))', result=False)\n", (517, 546), False, 'from py.tests.utils import test\n')] |
from django import forms
from fobi.base import FormFieldPlugin, form_element_plugin_registry
from .forms import HouseholdTenureForm
class HouseholdTenurePlugin(FormFieldPlugin):
"""HouseholdTenurePlugin."""
uid = "household_tenure"
name = "What year did you move into your current address?"
form = HouseholdTenureForm
group = "Intercept" # Group to which the plugin belongs to
def get_form_field_instances(self, request=None, form_entry=None,
form_element_entries=None, **kwargs):
field_kwargs = {
'required': self.data.required,
'label': self.data.label,
'widget': forms.widgets.NumberInput(attrs={}),
}
return [(self.data.name, forms.IntegerField, field_kwargs)]
form_element_plugin_registry.register(HouseholdTenurePlugin)
| [
"django.forms.widgets.NumberInput",
"fobi.base.form_element_plugin_registry.register"
] | [((793, 853), 'fobi.base.form_element_plugin_registry.register', 'form_element_plugin_registry.register', (['HouseholdTenurePlugin'], {}), '(HouseholdTenurePlugin)\n', (830, 853), False, 'from fobi.base import FormFieldPlugin, form_element_plugin_registry\n'), ((675, 710), 'django.forms.widgets.NumberInput', 'forms.widgets.NumberInput', ([], {'attrs': '{}'}), '(attrs={})\n', (700, 710), False, 'from django import forms\n')] |
from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file
import bottle
import controller
from controller import dobi_parcele_za_prikaz, dobi_info_parcele, dodaj_gosta_na_rezervacijo, naredi_rezervacijo, dobi_rezervacijo_po_id, zakljuci_na_datum_in_placaj, dobi_postavke_racuna
import datetime as dt
@bottle.get('/')
def root():
redirect('/domov')
@bottle.get('/domov')
def index():
parcele = dobi_parcele_za_prikaz(dt.date.today())
return template("domov", parcele=parcele, hide_header_back=True)
@bottle.get("/parcela/<id_parcele>")
def parcela(id_parcele):
'Preverimo stanje parcele'
rez, gostje = dobi_info_parcele(id_parcele, dt.date.today())
if rez is not None:
stanje = "Parcela je trenutno zasedena"
else:
stanje = "Parcela je trenutno na voljo"
return template('parcela', id_parcela=id_parcele, rezervacija=rez, stanje=stanje, gostje=gostje)
@bottle.get("/naredi-rezervacijo/<id_parcele>")
def nova_rezervacija(id_parcele=None):
print(id_parcele)
today = dt.date.today()
tomorrow = today + dt.timedelta(days=1)
return template('nova_rezervacija', id_parcele=id_parcele, today=today, tomorrow=tomorrow)
@bottle.post("/naredi-rezervacijo")
def naredi_novo_rezervacijo():
" V modelu naredi novo rezervacijo in ji doda prvega gosta"
# Preberemo lastnosti iz forme
ime = request.forms.ime#get("")
priimek = request.forms.priimek#get("")
emso = request.forms.emso#get("")
drzava = request.forms.drzava#get("")
id_parcele = request.forms.id_parcele#get("")
od = request.forms.zacetek#get("")
do = request.forms.konec#get("")
print(ime, priimek)
try:
datum_od = dt.datetime.fromisoformat(od).date()
datum_do = dt.datetime.fromisoformat(do).date()
except Exception as e:
print(e)
print("Napaka pri pretvorbi datumov")
return redirect("/naredi-rezervacijo")
rezervacija = naredi_rezervacijo(id_parcele)
dodaj_gosta_na_rezervacijo(rezervacija.id_rezervacije, {
"EMSO":emso,
"ime":ime,
"priimek":priimek,
"drzava":drzava,
}, datum_od, datum_do)
return redirect(f"/parcela/{id_parcele}")
@bottle.get("/dodaj-gosta/<id_rezervacije>")
def get_dodaj_gosta_na_rezervacijo(id_rezervacije):
today = dt.date.today()
tomorrow = today + dt.timedelta(days=1)
rezervacija = dobi_rezervacijo_po_id(id_rezervacije)
if not rezervacija:
return template("error", sporocilo="Rezervacija ne obstaja!", naslov="Napaka")
return template("dodajanje_gosta", id_rezervacije=id_rezervacije, today=today, tomorrow=tomorrow)
@bottle.post("/dodaj-gosta-na-rezervacijo")
def post_dodaj_gosta_na_rezervacijo():
" V modelu rezervaciji doda gosta"
# Preberemo lastnosti iz forme
ime = request.forms.ime
priimek = request.forms.priimek
emso = request.forms.emso#get("")
drzava = request.forms.drzava#get("")
id_rezervacije = request.forms.rez#get("")
od = request.forms.zacetek#get("")
do = request.forms.konec#get("")
try:
datum_od = dt.datetime.fromisoformat(od).date()
datum_do = dt.datetime.fromisoformat(do).date()
except Exception as e:
print(e)
print("Napaka pri pretvorbi datumov")
return redirect("/dodaj-gosta")
rezervacija = dobi_rezervacijo_po_id(id_rezervacije)
if not rezervacija:
return template("error", sporocilo="Rezervacija ne obstaja!", naslov="Napaka")
dodaj_gosta_na_rezervacijo(rezervacija.id_rezervacije, {
"EMSO":emso,
"ime":ime,
"priimek":priimek,
"drzava":drzava,
},datum_od,datum_do)
print(id_rezervacije)
return redirect(f"/parcela/{rezervacija.id_parcele}")
@bottle.get("/predracun/<id_rezervacije>")
def predracun(id_rezervacije):
rezervacija = dobi_rezervacijo_po_id(id_rezervacije)
if not rezervacija:
return template("error", sporocilo="Rezervacija ne obstaja!", naslov="Napaka")
today = dt.date.today()
gostje = rezervacija.gostje
sestevek, postavke = dobi_postavke_racuna(rezervacija)
slovar_cen = {}
slovar_kolicin = {}
for gost in gostje:
slovar_kolicin[gost] = len(gost.nocitve)
slovar_cen[gost] = format(gost.cena_nocitve() * slovar_kolicin.get(gost), '.2f')
return template("racun", id_rezervacije=id_rezervacije, sestevek=format(sestevek, '.2f'), gostje=gostje, today=today.strftime("%d/%m/%Y"), slovar_cen=slovar_cen, slovar_kolicin=slovar_kolicin)
@bottle.get("/zakljuci/<id_rezervacije>")
def racun(id_rezervacije):
rezervacija = dobi_rezervacijo_po_id(id_rezervacije)
if not rezervacija:
return template("error", sporocilo="Rezervacija ne obstaja!", naslov="Napaka")
today = dt.date.today()
gostje = rezervacija.gostje
sestevek, postavke = zakljuci_na_datum_in_placaj(rezervacija, dt.date.today())
slovar_cen = {}
slovar_kolicin = {}
for gost in gostje:
slovar_kolicin[gost] = len(gost.nocitve)
slovar_cen[gost] = format(gost.cena_nocitve() * slovar_kolicin.get(gost), '.2f')
return template("racun", id_rezervacije=id_rezervacije, sestevek=format(sestevek, '.2f'), gostje=gostje, today=today.strftime("%d/%m/%Y"), slovar_cen=slovar_cen, slovar_kolicin=slovar_kolicin)
@bottle.error(404)
def napaka404(a):
return template("error", sporocilo="Stran ne obstaja!", naslov="404")
@bottle.error(500)
def napaka500(a):
return template("error", sporocilo="Napaka streznika!", naslov="500")
bottle.run(reloader=True, debug=True)
| [
"controller.naredi_rezervacijo",
"bottle.template",
"controller.dobi_postavke_racuna",
"controller.dobi_rezervacijo_po_id",
"bottle.post",
"controller.dodaj_gosta_na_rezervacijo",
"datetime.date.today",
"datetime.timedelta",
"datetime.datetime.fromisoformat",
"bottle.get",
"bottle.run",
"bottle.error",
"bottle.redirect"
] | [((386, 401), 'bottle.get', 'bottle.get', (['"""/"""'], {}), "('/')\n", (396, 401), False, 'import bottle\n'), ((439, 459), 'bottle.get', 'bottle.get', (['"""/domov"""'], {}), "('/domov')\n", (449, 459), False, 'import bottle\n'), ((598, 633), 'bottle.get', 'bottle.get', (['"""/parcela/<id_parcele>"""'], {}), "('/parcela/<id_parcele>')\n", (608, 633), False, 'import bottle\n'), ((989, 1035), 'bottle.get', 'bottle.get', (['"""/naredi-rezervacijo/<id_parcele>"""'], {}), "('/naredi-rezervacijo/<id_parcele>')\n", (999, 1035), False, 'import bottle\n'), ((1266, 1300), 'bottle.post', 'bottle.post', (['"""/naredi-rezervacijo"""'], {}), "('/naredi-rezervacijo')\n", (1277, 1300), False, 'import bottle\n'), ((2284, 2327), 'bottle.get', 'bottle.get', (['"""/dodaj-gosta/<id_rezervacije>"""'], {}), "('/dodaj-gosta/<id_rezervacije>')\n", (2294, 2327), False, 'import bottle\n'), ((2734, 2776), 'bottle.post', 'bottle.post', (['"""/dodaj-gosta-na-rezervacijo"""'], {}), "('/dodaj-gosta-na-rezervacijo')\n", (2745, 2776), False, 'import bottle\n'), ((3844, 3885), 'bottle.get', 'bottle.get', (['"""/predracun/<id_rezervacije>"""'], {}), "('/predracun/<id_rezervacije>')\n", (3854, 3885), False, 'import bottle\n'), ((4609, 4649), 'bottle.get', 'bottle.get', (['"""/zakljuci/<id_rezervacije>"""'], {}), "('/zakljuci/<id_rezervacije>')\n", (4619, 4649), False, 'import bottle\n'), ((5393, 5410), 'bottle.error', 'bottle.error', (['(404)'], {}), '(404)\n', (5405, 5410), False, 'import bottle\n'), ((5505, 5522), 'bottle.error', 'bottle.error', (['(500)'], {}), '(500)\n', (5517, 5522), False, 'import bottle\n'), ((5616, 5653), 'bottle.run', 'bottle.run', ([], {'reloader': '(True)', 'debug': '(True)'}), '(reloader=True, debug=True)\n', (5626, 5653), False, 'import bottle\n'), ((418, 436), 'bottle.redirect', 'redirect', (['"""/domov"""'], {}), "('/domov')\n", (426, 436), False, 'from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file\n'), ((538, 595), 'bottle.template', 'template', (['"""domov"""'], {'parcele': 'parcele', 'hide_header_back': '(True)'}), "('domov', parcele=parcele, hide_header_back=True)\n", (546, 595), False, 'from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file\n'), ((896, 989), 'bottle.template', 'template', (['"""parcela"""'], {'id_parcela': 'id_parcele', 'rezervacija': 'rez', 'stanje': 'stanje', 'gostje': 'gostje'}), "('parcela', id_parcela=id_parcele, rezervacija=rez, stanje=stanje,\n gostje=gostje)\n", (904, 989), False, 'from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file\n'), ((1109, 1124), 'datetime.date.today', 'dt.date.today', ([], {}), '()\n', (1122, 1124), True, 'import datetime as dt\n'), ((1180, 1268), 'bottle.template', 'template', (['"""nova_rezervacija"""'], {'id_parcele': 'id_parcele', 'today': 'today', 'tomorrow': 'tomorrow'}), "('nova_rezervacija', id_parcele=id_parcele, today=today, tomorrow=\n tomorrow)\n", (1188, 1268), False, 'from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file\n'), ((2025, 2055), 'controller.naredi_rezervacijo', 'naredi_rezervacijo', (['id_parcele'], {}), '(id_parcele)\n', (2043, 2055), False, 'from controller import dobi_parcele_za_prikaz, dobi_info_parcele, dodaj_gosta_na_rezervacijo, naredi_rezervacijo, dobi_rezervacijo_po_id, zakljuci_na_datum_in_placaj, dobi_postavke_racuna\n'), ((2060, 2204), 'controller.dodaj_gosta_na_rezervacijo', 'dodaj_gosta_na_rezervacijo', (['rezervacija.id_rezervacije', "{'EMSO': emso, 'ime': ime, 'priimek': priimek, 'drzava': drzava}", 'datum_od', 'datum_do'], {}), "(rezervacija.id_rezervacije, {'EMSO': emso, 'ime':\n ime, 'priimek': priimek, 'drzava': drzava}, datum_od, datum_do)\n", (2086, 2204), False, 'from controller import dobi_parcele_za_prikaz, dobi_info_parcele, dodaj_gosta_na_rezervacijo, naredi_rezervacijo, dobi_rezervacijo_po_id, zakljuci_na_datum_in_placaj, dobi_postavke_racuna\n'), ((2247, 2281), 'bottle.redirect', 'redirect', (['f"""/parcela/{id_parcele}"""'], {}), "(f'/parcela/{id_parcele}')\n", (2255, 2281), False, 'from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file\n'), ((2392, 2407), 'datetime.date.today', 'dt.date.today', ([], {}), '()\n', (2405, 2407), True, 'import datetime as dt\n'), ((2475, 2513), 'controller.dobi_rezervacijo_po_id', 'dobi_rezervacijo_po_id', (['id_rezervacije'], {}), '(id_rezervacije)\n', (2497, 2513), False, 'from controller import dobi_parcele_za_prikaz, dobi_info_parcele, dodaj_gosta_na_rezervacijo, naredi_rezervacijo, dobi_rezervacijo_po_id, zakljuci_na_datum_in_placaj, dobi_postavke_racuna\n'), ((2641, 2735), 'bottle.template', 'template', (['"""dodajanje_gosta"""'], {'id_rezervacije': 'id_rezervacije', 'today': 'today', 'tomorrow': 'tomorrow'}), "('dodajanje_gosta', id_rezervacije=id_rezervacije, today=today,\n tomorrow=tomorrow)\n", (2649, 2735), False, 'from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file\n'), ((3430, 3468), 'controller.dobi_rezervacijo_po_id', 'dobi_rezervacijo_po_id', (['id_rezervacije'], {}), '(id_rezervacije)\n', (3452, 3468), False, 'from controller import dobi_parcele_za_prikaz, dobi_info_parcele, dodaj_gosta_na_rezervacijo, naredi_rezervacijo, dobi_rezervacijo_po_id, zakljuci_na_datum_in_placaj, dobi_postavke_racuna\n'), ((3584, 3728), 'controller.dodaj_gosta_na_rezervacijo', 'dodaj_gosta_na_rezervacijo', (['rezervacija.id_rezervacije', "{'EMSO': emso, 'ime': ime, 'priimek': priimek, 'drzava': drzava}", 'datum_od', 'datum_do'], {}), "(rezervacija.id_rezervacije, {'EMSO': emso, 'ime':\n ime, 'priimek': priimek, 'drzava': drzava}, datum_od, datum_do)\n", (3610, 3728), False, 'from controller import dobi_parcele_za_prikaz, dobi_info_parcele, dodaj_gosta_na_rezervacijo, naredi_rezervacijo, dobi_rezervacijo_po_id, zakljuci_na_datum_in_placaj, dobi_postavke_racuna\n'), ((3795, 3841), 'bottle.redirect', 'redirect', (['f"""/parcela/{rezervacija.id_parcele}"""'], {}), "(f'/parcela/{rezervacija.id_parcele}')\n", (3803, 3841), False, 'from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file\n'), ((3935, 3973), 'controller.dobi_rezervacijo_po_id', 'dobi_rezervacijo_po_id', (['id_rezervacije'], {}), '(id_rezervacije)\n', (3957, 3973), False, 'from controller import dobi_parcele_za_prikaz, dobi_info_parcele, dodaj_gosta_na_rezervacijo, naredi_rezervacijo, dobi_rezervacijo_po_id, zakljuci_na_datum_in_placaj, dobi_postavke_racuna\n'), ((4097, 4112), 'datetime.date.today', 'dt.date.today', ([], {}), '()\n', (4110, 4112), True, 'import datetime as dt\n'), ((4170, 4203), 'controller.dobi_postavke_racuna', 'dobi_postavke_racuna', (['rezervacija'], {}), '(rezervacija)\n', (4190, 4203), False, 'from controller import dobi_parcele_za_prikaz, dobi_info_parcele, dodaj_gosta_na_rezervacijo, naredi_rezervacijo, dobi_rezervacijo_po_id, zakljuci_na_datum_in_placaj, dobi_postavke_racuna\n'), ((4695, 4733), 'controller.dobi_rezervacijo_po_id', 'dobi_rezervacijo_po_id', (['id_rezervacije'], {}), '(id_rezervacije)\n', (4717, 4733), False, 'from controller import dobi_parcele_za_prikaz, dobi_info_parcele, dodaj_gosta_na_rezervacijo, naredi_rezervacijo, dobi_rezervacijo_po_id, zakljuci_na_datum_in_placaj, dobi_postavke_racuna\n'), ((4857, 4872), 'datetime.date.today', 'dt.date.today', ([], {}), '()\n', (4870, 4872), True, 'import datetime as dt\n'), ((5440, 5502), 'bottle.template', 'template', (['"""error"""'], {'sporocilo': '"""Stran ne obstaja!"""', 'naslov': '"""404"""'}), "('error', sporocilo='Stran ne obstaja!', naslov='404')\n", (5448, 5502), False, 'from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file\n'), ((5552, 5614), 'bottle.template', 'template', (['"""error"""'], {'sporocilo': '"""Napaka streznika!"""', 'naslov': '"""500"""'}), "('error', sporocilo='Napaka streznika!', naslov='500')\n", (5560, 5614), False, 'from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file\n'), ((510, 525), 'datetime.date.today', 'dt.date.today', ([], {}), '()\n', (523, 525), True, 'import datetime as dt\n'), ((738, 753), 'datetime.date.today', 'dt.date.today', ([], {}), '()\n', (751, 753), True, 'import datetime as dt\n'), ((1148, 1168), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (1160, 1168), True, 'import datetime as dt\n'), ((2431, 2451), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (2443, 2451), True, 'import datetime as dt\n'), ((2553, 2624), 'bottle.template', 'template', (['"""error"""'], {'sporocilo': '"""Rezervacija ne obstaja!"""', 'naslov': '"""Napaka"""'}), "('error', sporocilo='Rezervacija ne obstaja!', naslov='Napaka')\n", (2561, 2624), False, 'from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file\n'), ((3508, 3579), 'bottle.template', 'template', (['"""error"""'], {'sporocilo': '"""Rezervacija ne obstaja!"""', 'naslov': '"""Napaka"""'}), "('error', sporocilo='Rezervacija ne obstaja!', naslov='Napaka')\n", (3516, 3579), False, 'from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file\n'), ((4013, 4084), 'bottle.template', 'template', (['"""error"""'], {'sporocilo': '"""Rezervacija ne obstaja!"""', 'naslov': '"""Napaka"""'}), "('error', sporocilo='Rezervacija ne obstaja!', naslov='Napaka')\n", (4021, 4084), False, 'from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file\n'), ((4773, 4844), 'bottle.template', 'template', (['"""error"""'], {'sporocilo': '"""Rezervacija ne obstaja!"""', 'naslov': '"""Napaka"""'}), "('error', sporocilo='Rezervacija ne obstaja!', naslov='Napaka')\n", (4781, 4844), False, 'from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file\n'), ((4971, 4986), 'datetime.date.today', 'dt.date.today', ([], {}), '()\n', (4984, 4986), True, 'import datetime as dt\n'), ((1970, 2001), 'bottle.redirect', 'redirect', (['"""/naredi-rezervacijo"""'], {}), "('/naredi-rezervacijo')\n", (1978, 2001), False, 'from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file\n'), ((3385, 3409), 'bottle.redirect', 'redirect', (['"""/dodaj-gosta"""'], {}), "('/dodaj-gosta')\n", (3393, 3409), False, 'from bottle import TEMPLATE_PATH, route, run, template, redirect, get, post, request, response, auth_basic, Bottle, abort, error, static_file\n'), ((1772, 1801), 'datetime.datetime.fromisoformat', 'dt.datetime.fromisoformat', (['od'], {}), '(od)\n', (1797, 1801), True, 'import datetime as dt\n'), ((1828, 1857), 'datetime.datetime.fromisoformat', 'dt.datetime.fromisoformat', (['do'], {}), '(do)\n', (1853, 1857), True, 'import datetime as dt\n'), ((3187, 3216), 'datetime.datetime.fromisoformat', 'dt.datetime.fromisoformat', (['od'], {}), '(od)\n', (3212, 3216), True, 'import datetime as dt\n'), ((3243, 3272), 'datetime.datetime.fromisoformat', 'dt.datetime.fromisoformat', (['do'], {}), '(do)\n', (3268, 3272), True, 'import datetime as dt\n')] |
#!/usr/bin/env python
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Use the painter to draw using colors.
# This is not a pipeline object. It will support pipeline objects.
# Please do not use this object directly.
imageCanvas = vtk.vtkImageCanvasSource2D()
imageCanvas.SetNumberOfScalarComponents(3)
imageCanvas.SetScalarTypeToUnsignedChar()
imageCanvas.SetExtent(0,320,0,320,0,0)
imageCanvas.SetDrawColor(0,0,0)
imageCanvas.FillBox(0,511,0,511)
# r, g, b
imageCanvas.SetDrawColor(255,0,0)
imageCanvas.FillBox(0,50,0,100)
imageCanvas.SetDrawColor(128,128,0)
imageCanvas.FillBox(50,100,0,100)
imageCanvas.SetDrawColor(0,255,0)
imageCanvas.FillBox(100,150,0,100)
imageCanvas.SetDrawColor(0,128,128)
imageCanvas.FillBox(150,200,0,100)
imageCanvas.SetDrawColor(0,0,255)
imageCanvas.FillBox(200,250,0,100)
imageCanvas.SetDrawColor(128,0,128)
imageCanvas.FillBox(250,300,0,100)
# intensity scale
imageCanvas.SetDrawColor(5,5,5)
imageCanvas.FillBox(0,50,110,210)
imageCanvas.SetDrawColor(55,55,55)
imageCanvas.FillBox(50,100,110,210)
imageCanvas.SetDrawColor(105,105,105)
imageCanvas.FillBox(100,150,110,210)
imageCanvas.SetDrawColor(155,155,155)
imageCanvas.FillBox(150,200,110,210)
imageCanvas.SetDrawColor(205,205,205)
imageCanvas.FillBox(200,250,110,210)
imageCanvas.SetDrawColor(255,255,255)
imageCanvas.FillBox(250,300,110,210)
# saturation scale
imageCanvas.SetDrawColor(245,0,0)
imageCanvas.FillBox(0,50,220,320)
imageCanvas.SetDrawColor(213,16,16)
imageCanvas.FillBox(50,100,220,320)
imageCanvas.SetDrawColor(181,32,32)
imageCanvas.FillBox(100,150,220,320)
imageCanvas.SetDrawColor(149,48,48)
imageCanvas.FillBox(150,200,220,320)
imageCanvas.SetDrawColor(117,64,64)
imageCanvas.FillBox(200,250,220,320)
imageCanvas.SetDrawColor(85,80,80)
imageCanvas.FillBox(250,300,220,320)
convert = vtk.vtkImageRGBToHSV()
convert.SetInputConnection(imageCanvas.GetOutputPort())
convertBack = vtk.vtkImageHSVToRGB()
convertBack.SetInputConnection(convert.GetOutputPort())
cast = vtk.vtkImageCast()
cast.SetInputConnection(convertBack.GetOutputPort())
cast.SetOutputScalarTypeToFloat()
cast.ReleaseDataFlagOff()
viewer = vtk.vtkImageViewer()
viewer.SetInputConnection(convertBack.GetOutputPort())
#viewer SetInputConnection [imageCanvas GetOutputPort]
viewer.SetColorWindow(256)
viewer.SetColorLevel(127.5)
viewer.SetSize(320,320)
viewer.Render()
# --- end of script --
| [
"vtk.util.misc.vtkGetDataRoot",
"vtk.vtkImageViewer",
"vtk.vtkImageHSVToRGB",
"vtk.vtkImageRGBToHSV",
"vtk.vtkImageCanvasSource2D",
"vtk.vtkImageCast"
] | [((90, 106), 'vtk.util.misc.vtkGetDataRoot', 'vtkGetDataRoot', ([], {}), '()\n', (104, 106), False, 'from vtk.util.misc import vtkGetDataRoot\n'), ((272, 300), 'vtk.vtkImageCanvasSource2D', 'vtk.vtkImageCanvasSource2D', ([], {}), '()\n', (298, 300), False, 'import vtk\n'), ((1831, 1853), 'vtk.vtkImageRGBToHSV', 'vtk.vtkImageRGBToHSV', ([], {}), '()\n', (1851, 1853), False, 'import vtk\n'), ((1924, 1946), 'vtk.vtkImageHSVToRGB', 'vtk.vtkImageHSVToRGB', ([], {}), '()\n', (1944, 1946), False, 'import vtk\n'), ((2010, 2028), 'vtk.vtkImageCast', 'vtk.vtkImageCast', ([], {}), '()\n', (2026, 2028), False, 'import vtk\n'), ((2151, 2171), 'vtk.vtkImageViewer', 'vtk.vtkImageViewer', ([], {}), '()\n', (2169, 2171), False, 'import vtk\n')] |
# Copyright 2020 - 2021 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from typing import Tuple
import numpy as np
import torch
from ignite.engine import Engine
from monai.handlers import SurfaceDistance
def create_spherical_seg_3d(
radius: float = 20.0, centre: Tuple[int, int, int] = (49, 49, 49), im_shape: Tuple[int, int, int] = (99, 99, 99)
) -> np.ndarray:
"""
Return a 3D image with a sphere inside. Voxel values will be
1 inside the sphere, and 0 elsewhere.
Args:
radius: radius of sphere (in terms of number of voxels, can be partial)
centre: location of sphere centre.
im_shape: shape of image to create
See also:
:py:meth:`~create_test_image_3d`
"""
# Create image
image = np.zeros(im_shape, dtype=np.int32)
spy, spx, spz = np.ogrid[
-centre[0] : im_shape[0] - centre[0], -centre[1] : im_shape[1] - centre[1], -centre[2] : im_shape[2] - centre[2]
]
circle = (spx * spx + spy * spy + spz * spz) <= radius * radius
image[circle] = 1
image[~circle] = 0
return image
sampler_sphere = torch.Tensor(create_spherical_seg_3d(radius=20, centre=(20, 20, 20))).unsqueeze(0).unsqueeze(0)
# test input a list of channel-first tensor
sampler_sphere_gt = [torch.Tensor(create_spherical_seg_3d(radius=20, centre=(10, 20, 20))).unsqueeze(0)]
sampler_sphere_zeros = torch.zeros_like(sampler_sphere)
TEST_SAMPLE_1 = [sampler_sphere, sampler_sphere_gt]
TEST_SAMPLE_2 = [sampler_sphere_gt, sampler_sphere_gt]
TEST_SAMPLE_3 = [sampler_sphere_zeros, sampler_sphere_gt]
TEST_SAMPLE_4 = [sampler_sphere_zeros, sampler_sphere_zeros]
class TestHandlerSurfaceDistance(unittest.TestCase):
# TODO test multi node Surface Distance
def test_compute(self):
sur_metric = SurfaceDistance(include_background=True)
def _val_func(engine, batch):
pass
engine = Engine(_val_func)
sur_metric.attach(engine, "surface_distance")
y_pred, y = TEST_SAMPLE_1
sur_metric.update([y_pred, y])
self.assertAlmostEqual(sur_metric.compute(), 4.17133, places=4)
y_pred, y = TEST_SAMPLE_2
sur_metric.update([y_pred, y])
self.assertAlmostEqual(sur_metric.compute(), 2.08566, places=4)
y_pred, y = TEST_SAMPLE_3
sur_metric.update([y_pred, y])
self.assertAlmostEqual(sur_metric.compute(), float("inf"))
y_pred, y = TEST_SAMPLE_4
sur_metric.update([y_pred, y])
self.assertAlmostEqual(sur_metric.compute(), float("inf"))
def test_shape_mismatch(self):
sur_metric = SurfaceDistance(include_background=True)
with self.assertRaises((AssertionError, ValueError)):
y_pred = TEST_SAMPLE_1[0]
y = torch.ones((1, 1, 10, 10, 10))
sur_metric.update([y_pred, y])
if __name__ == "__main__":
unittest.main()
| [
"monai.handlers.SurfaceDistance",
"ignite.engine.Engine",
"numpy.zeros",
"unittest.main",
"torch.zeros_like",
"torch.ones"
] | [((1895, 1927), 'torch.zeros_like', 'torch.zeros_like', (['sampler_sphere'], {}), '(sampler_sphere)\n', (1911, 1927), False, 'import torch\n'), ((1285, 1319), 'numpy.zeros', 'np.zeros', (['im_shape'], {'dtype': 'np.int32'}), '(im_shape, dtype=np.int32)\n', (1293, 1319), True, 'import numpy as np\n'), ((3383, 3398), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3396, 3398), False, 'import unittest\n'), ((2304, 2344), 'monai.handlers.SurfaceDistance', 'SurfaceDistance', ([], {'include_background': '(True)'}), '(include_background=True)\n', (2319, 2344), False, 'from monai.handlers import SurfaceDistance\n'), ((2419, 2436), 'ignite.engine.Engine', 'Engine', (['_val_func'], {}), '(_val_func)\n', (2425, 2436), False, 'from ignite.engine import Engine\n'), ((3119, 3159), 'monai.handlers.SurfaceDistance', 'SurfaceDistance', ([], {'include_background': '(True)'}), '(include_background=True)\n', (3134, 3159), False, 'from monai.handlers import SurfaceDistance\n'), ((3276, 3306), 'torch.ones', 'torch.ones', (['(1, 1, 10, 10, 10)'], {}), '((1, 1, 10, 10, 10))\n', (3286, 3306), False, 'import torch\n')] |
#!/usr/bin/env python3
import sys
import logging
import yaml
import pandas as pd
import numpy as np
from collections import defaultdict
from sklearn.model_selection import train_test_split
from sklearn.ensemble import IsolationForest
from sklearn.impute import SimpleImputer
from anoflows.hpo import find_best_flows
from data_loading import load_data
logging.getLogger().setLevel(logging.INFO)
if len(sys.argv) == 1:
logging.error("YAML data specification missing from the command line arguments")
exit(1)
spec_file = sys.argv[1]
df, spec = load_data(spec_file)
max_rows = min(len(df), spec.get("max_rows", 40000))
novelty_detection = spec.get("novelty", True)
normal_classes = spec["normal_classes"]
precision = defaultdict(list)
for rounds in range(spec.get("rounds", 1)):
# random sampling
df = df.sample(n=max_rows, replace=False)
label_col = spec["label_column"]
y = df[label_col].values
other = df.drop(label_col, inplace=False, axis=1)
X = other.values
# imputing
X = SimpleImputer(copy=False).fit_transform(X)
# train/test split
X_train, X_test, y_train, y_test = \
train_test_split(X, y, shuffle=False, test_size=0.5)
if novelty_detection:
keep = np.where(np.isin(y_train, normal_classes))[0]
X_train = X_train[keep, :]
y_train = y_train[keep]
# training
#flows, loss = find_best_flows(X_train, device='cpu', n_trials=1)
from anoflows.anoflow_bagging import AnoFlowBagging
flows = AnoFlowBagging()
flows.fit(X_train)
iforest = IsolationForest().fit(X_train)
# prediction
pred = {
"anoflows": flows.likelihood(X_test),
"iforest": iforest.decision_function(X_test)
}
# evaluation
y_true = np.where(np.isin(y_test, spec["anomaly_classes"]))[0]
ref = np.zeros(len(y_test))
ref[y_true] = 1
k = len(y_true)
for name, y_pred in pred.items():
anomaly_indices = y_pred.argsort()[:k]
prec = ref[anomaly_indices].sum() / k
logging.info("%s: %.1f%% (%d anomalies / %d rows)" % (name, 100*prec, k, len(y_test)))
precision[name].append(prec)
logging.info("* SUMMARY %s", spec_file)
for name, prec in precision.items():
prec = 100 * np.array(prec)
mean = np.mean(prec)
std = np.std(prec)
logging.info("%s; mean=%.1f%% std=%.1f%%" % (name, mean, std))
| [
"logging.getLogger",
"numpy.mean",
"anoflows.anoflow_bagging.AnoFlowBagging",
"sklearn.model_selection.train_test_split",
"sklearn.ensemble.IsolationForest",
"logging.info",
"numpy.isin",
"numpy.array",
"collections.defaultdict",
"sklearn.impute.SimpleImputer",
"numpy.std",
"logging.error",
"data_loading.load_data"
] | [((556, 576), 'data_loading.load_data', 'load_data', (['spec_file'], {}), '(spec_file)\n', (565, 576), False, 'from data_loading import load_data\n'), ((729, 746), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (740, 746), False, 'from collections import defaultdict\n'), ((2144, 2183), 'logging.info', 'logging.info', (['"""* SUMMARY %s"""', 'spec_file'], {}), "('* SUMMARY %s', spec_file)\n", (2156, 2183), False, 'import logging\n'), ((427, 512), 'logging.error', 'logging.error', (['"""YAML data specification missing from the command line arguments"""'], {}), "('YAML data specification missing from the command line arguments'\n )\n", (440, 512), False, 'import logging\n'), ((1141, 1193), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'shuffle': '(False)', 'test_size': '(0.5)'}), '(X, y, shuffle=False, test_size=0.5)\n', (1157, 1193), False, 'from sklearn.model_selection import train_test_split\n'), ((1502, 1518), 'anoflows.anoflow_bagging.AnoFlowBagging', 'AnoFlowBagging', ([], {}), '()\n', (1516, 1518), False, 'from anoflows.anoflow_bagging import AnoFlowBagging\n'), ((2264, 2277), 'numpy.mean', 'np.mean', (['prec'], {}), '(prec)\n', (2271, 2277), True, 'import numpy as np\n'), ((2288, 2300), 'numpy.std', 'np.std', (['prec'], {}), '(prec)\n', (2294, 2300), True, 'import numpy as np\n'), ((2305, 2367), 'logging.info', 'logging.info', (["('%s; mean=%.1f%% std=%.1f%%' % (name, mean, std))"], {}), "('%s; mean=%.1f%% std=%.1f%%' % (name, mean, std))\n", (2317, 2367), False, 'import logging\n'), ((356, 375), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (373, 375), False, 'import logging\n'), ((2238, 2252), 'numpy.array', 'np.array', (['prec'], {}), '(prec)\n', (2246, 2252), True, 'import numpy as np\n'), ((1025, 1050), 'sklearn.impute.SimpleImputer', 'SimpleImputer', ([], {'copy': '(False)'}), '(copy=False)\n', (1038, 1050), False, 'from sklearn.impute import SimpleImputer\n'), ((1556, 1573), 'sklearn.ensemble.IsolationForest', 'IsolationForest', ([], {}), '()\n', (1571, 1573), False, 'from sklearn.ensemble import IsolationForest\n'), ((1763, 1803), 'numpy.isin', 'np.isin', (['y_test', "spec['anomaly_classes']"], {}), "(y_test, spec['anomaly_classes'])\n", (1770, 1803), True, 'import numpy as np\n'), ((1244, 1276), 'numpy.isin', 'np.isin', (['y_train', 'normal_classes'], {}), '(y_train, normal_classes)\n', (1251, 1276), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
#
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Extracts random constraints from reference files."""
import argparse
import random
import sys
from sacrebleu import extract_ngrams
def get_phrase(words, index, length):
assert index < len(words) - length + 1
phr = " ".join(words[index : index + length])
for i in range(index, index + length):
words.pop(index)
return phr
def main(args):
if args.seed:
random.seed(args.seed)
for line in sys.stdin:
constraints = []
def add_constraint(constraint):
constraints.append(constraint)
source = line.rstrip()
if "\t" in line:
source, target = line.split("\t")
if args.add_sos:
target = f"<s> {target}"
if args.add_eos:
target = f"{target} </s>"
if len(target.split()) >= args.len:
words = [target]
num = args.number
choices = {}
for i in range(num):
if len(words) == 0:
break
segmentno = random.choice(range(len(words)))
segment = words.pop(segmentno)
tokens = segment.split()
phrase_index = random.choice(range(len(tokens)))
choice = " ".join(
tokens[phrase_index : min(len(tokens), phrase_index + args.len)]
)
for j in range(
phrase_index, min(len(tokens), phrase_index + args.len)
):
tokens.pop(phrase_index)
if phrase_index > 0:
words.append(" ".join(tokens[0:phrase_index]))
if phrase_index + 1 < len(tokens):
words.append(" ".join(tokens[phrase_index:]))
choices[target.find(choice)] = choice
# mask out with spaces
target = target.replace(choice, " " * len(choice), 1)
for key in sorted(choices.keys()):
add_constraint(choices[key])
print(source, *constraints, sep="\t")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--number", "-n", type=int, default=1, help="number of phrases")
parser.add_argument("--len", "-l", type=int, default=1, help="phrase length")
parser.add_argument(
"--add-sos", default=False, action="store_true", help="add <s> token"
)
parser.add_argument(
"--add-eos", default=False, action="store_true", help="add </s> token"
)
parser.add_argument("--seed", "-s", default=0, type=int)
args = parser.parse_args()
Main(args)
| [
"argparse.ArgumentParser",
"random.seed"
] | [((2429, 2454), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2452, 2454), False, 'import argparse\n'), ((600, 622), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (611, 622), False, 'import random\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import argparse
import csv
import furl
import json
import re
import sys
from collections import defaultdict
def filter_records_without_url(records: []) -> []:
return [r for r in records if any(r.get("urls"))]
def build_furl(url: str) -> furl.furl:
try:
furl_obj = furl.furl(url)
if not furl_obj.host:
furl_obj = furl.furl("http://" + url)
return furl_obj
except ValueError:
return furl.furl("https://invalid-url.xyz")
def determine_host(url: str) -> str:
furl_obj = build_furl(url)
return re.sub(r"^www[0-9]*\.", "", furl_obj.host)
def build_hosts_to_urls(records: []) -> {str: {str}}:
result = defaultdict(set)
for record in records:
for url in record.get("urls"):
host = determine_host(url.get("url"))
result[host].add(url.get("url"))
return result
def print_most_common_url_hosts(hosts_to_urls: {}, n: int):
hosts = [h for h in hosts_to_urls.keys() if len(hosts_to_urls[h]) > n]
hosts = sorted(hosts, key=lambda h: len(hosts_to_urls[h]))
for host in hosts:
print("% 6d\t%s" % (len(hosts_to_urls[host]), host))
def print_urls_for_host(hosts_to_urls: {}, host: str):
urls = hosts_to_urls.get(host, [])
for url in urls:
print(url)
if not any(urls):
print(f"No urls for host: '{host}'", file=sys.stderr)
def print_how_often_url_patterns_cooccur(records: [{}], pattern1: str, pattern2: str):
# It should be ok, to only pattern match the hosts here...
ids1 = {r.get("id") for r in records if record_has_matching_url(r, pattern1)}
ids2 = {r.get("id") for r in records if record_has_matching_url(r, pattern2)}
ids_both = ids1.intersection(ids2)
for host, number in {pattern1: len(ids1), pattern2: len(ids2), "both": len(ids_both)}.items():
print(f"{host}: {number}")
def record_has_matching_url(record: {}, pattern: str) -> bool:
return any(record_get_urls_matching(record, pattern))
def record_get_urls_matching(record: {}, pattern: str) -> [{}]:
result = []
for url in record.get("urls"):
if any(re.findall(pattern, url.get("url"))):
result.append(url)
return result
def record_remove_urls_not_matching(record: {}, pattern: str):
record["urls"] = record_get_urls_matching(record, pattern)
def earliest_year(year_strings: [str]) -> str:
years = []
for year_s in year_strings:
try:
years.append(int(year_s))
except ValueError:
print(f"Not a string that is a year: '{year_s}'", file=sys.stderr)
continue
return str(sorted(years)[0]) if any(years) else ""
def main(args: argparse.Namespace):
with open(args.scrape_file, "r") as file:
records = json.load(file)
records = filter_records_without_url(records)
# filter urls by the user-provided filter list
if args.desc_filters:
with open(args.desc_filters, "r") as file:
filters = file.read().splitlines()
for record in records:
record["urls"] = [url for url in record.get("urls") if url.get("desc") not in filters]
records = filter_records_without_url(records)
# print unique hosts or urls, then exit
if args.print_host_urls or args.print_common_hosts >= 0:
hosts_to_urls = build_hosts_to_urls(records)
if args.print_common_hosts >= 0:
print_most_common_url_hosts(hosts_to_urls, n=args.print_common_hosts)
elif args.print_host_urls:
print_urls_for_host(hosts_to_urls, host=args.print_host_urls)
exit(0)
# check in how many records the two given hosts co-occur, then exit
if args.patterns_cooccur:
host1, host2 = args.patterns_cooccur.split(",")
print_how_often_url_patterns_cooccur(records, host1, host2)
exit(0)
# do some selection based on a url pattern, remove all non-matching urls from the record
if args.select_by_url:
pattern = args.select_by_url
records = [r for r in records if record_has_matching_url(r, pattern)]
for record in records:
record_remove_urls_not_matching(record, pattern)
# sort the records by id, to be extra sure, that we get the same order every time this is called
# print each line as a csv column
records = sorted(records, key=lambda r: r.get("id"))
writer = csv.writer(sys.stdout, delimiter=",", quoting=csv.QUOTE_ALL)
for record in records:
to_print = []
if args.print_id:
to_print.append(record.get("id", ""))
if args.print_url:
to_print.append(record.get("urls")[0].get("url") if any(record.get("urls")) else "")
if args.print_pub_date:
to_print.append(earliest_year(record.get("publicationDates", [])))
if args.print_languages:
to_print.append("|".join(record.get("languages", [])))
writer.writerow(to_print)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Process a file with zenon json records and print some information about them.")
parser.add_argument("scrape_file", type=str, help="The file that contains the zenon dumps as json.")
parser.add_argument("--desc-filters", type=str, help="A file to filter urls by. Excludes urls with 'desc' fields matching a line in the file.")
# these are arguments to print some specific information
parser.add_argument("--print-common-hosts", type=int, default=-1, help="Print hosts that appear more than n times in the records urls, then exit.")
parser.add_argument("--print-host-urls", type=str, help="Print all urls for the host, then exit.")
parser.add_argument("--patterns-cooccur", type=str, help="Format: 'pattern1,pattern2', print how often these occur in single records url fields, then exit.")
# these are meant to work together select by a url pattern then print information about the records
parser.add_argument("--select-by-url", type=str, help="Give a pattern for a url to select records by.")
parser.add_argument("--print-url", action="store_true", help="Print the first of each urls for the selected records. (Ignores other urls present on the records if --select-url is given.)")
parser.add_argument("--print-pub-date", action="store_true", help="Print the earliest publication year for each of the selected records.")
parser.add_argument("--print-id", action="store_true", help="Print the selected records' ids")
parser.add_argument("--print-languages", action="store_true", help="Print the selected records' languages")
main(parser.parse_args())
| [
"argparse.ArgumentParser",
"csv.writer",
"collections.defaultdict",
"json.load",
"re.sub",
"furl.furl"
] | [((608, 650), 're.sub', 're.sub', (['"""^www[0-9]*\\\\."""', '""""""', 'furl_obj.host'], {}), "('^www[0-9]*\\\\.', '', furl_obj.host)\n", (614, 650), False, 'import re\n'), ((720, 736), 'collections.defaultdict', 'defaultdict', (['set'], {}), '(set)\n', (731, 736), False, 'from collections import defaultdict\n'), ((4425, 4485), 'csv.writer', 'csv.writer', (['sys.stdout'], {'delimiter': '""","""', 'quoting': 'csv.QUOTE_ALL'}), "(sys.stdout, delimiter=',', quoting=csv.QUOTE_ALL)\n", (4435, 4485), False, 'import csv\n'), ((5021, 5147), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Process a file with zenon json records and print some information about them."""'}), "(description=\n 'Process a file with zenon json records and print some information about them.'\n )\n", (5044, 5147), False, 'import argparse\n'), ((333, 347), 'furl.furl', 'furl.furl', (['url'], {}), '(url)\n', (342, 347), False, 'import furl\n'), ((2810, 2825), 'json.load', 'json.load', (['file'], {}), '(file)\n', (2819, 2825), False, 'import json\n'), ((401, 427), 'furl.furl', 'furl.furl', (["('http://' + url)"], {}), "('http://' + url)\n", (410, 427), False, 'import furl\n'), ((490, 526), 'furl.furl', 'furl.furl', (['"""https://invalid-url.xyz"""'], {}), "('https://invalid-url.xyz')\n", (499, 526), False, 'import furl\n')] |
# ==================================================================================================
# Copyright 2012 Twitter, Inc.
# --------------------------------------------------------------------------------------------------
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this work except in compliance with the License.
# You may obtain a copy of the License in the LICENSE file, or at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==================================================================================================
__author__ = '<NAME>'
from twitter.pants.targets.exportable_jvm_library import ExportableJvmLibrary
class JavaAntlrLibrary(ExportableJvmLibrary):
"""Defines a target that builds java stubs from an Antlr grammar file."""
def __init__(self,
name,
sources,
provides = None,
dependencies = None,
excludes = None,
compiler = 'antlr3'):
"""name: The name of this module target, addressable via pants via the portion of the spec
following the colon
sources: A list of paths containing the Antlr source files this module's jar is compiled from
provides: An optional Dependency object indicating the The ivy artifact to export
dependencies: An optional list of Dependency objects specifying the binary (jar) dependencies of
this module.
excludes: An optional list of dependency exclude patterns to filter all of this module's
transitive dependencies against.
compiler: The name of the compiler used to compile the ANTLR files.
Currently only supports 'antlr3' and 'antlr4'"""
ExportableJvmLibrary.__init__(self,
name,
sources,
provides,
dependencies,
excludes)
self.add_labels('codegen')
if compiler not in ['antlr3', 'antlr4']:
raise ValueError("Illegal value for 'compiler': {}".format(compiler))
self.compiler = compiler
def _as_jar_dependency(self):
return ExportableJvmLibrary._as_jar_dependency(self).with_sources()
| [
"twitter.pants.targets.exportable_jvm_library.ExportableJvmLibrary.__init__",
"twitter.pants.targets.exportable_jvm_library.ExportableJvmLibrary._as_jar_dependency"
] | [((2028, 2116), 'twitter.pants.targets.exportable_jvm_library.ExportableJvmLibrary.__init__', 'ExportableJvmLibrary.__init__', (['self', 'name', 'sources', 'provides', 'dependencies', 'excludes'], {}), '(self, name, sources, provides, dependencies,\n excludes)\n', (2057, 2116), False, 'from twitter.pants.targets.exportable_jvm_library import ExportableJvmLibrary\n'), ((2511, 2556), 'twitter.pants.targets.exportable_jvm_library.ExportableJvmLibrary._as_jar_dependency', 'ExportableJvmLibrary._as_jar_dependency', (['self'], {}), '(self)\n', (2550, 2556), False, 'from twitter.pants.targets.exportable_jvm_library import ExportableJvmLibrary\n')] |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2018-2020 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import json
import os
from datetime import datetime, timedelta
from world import world
from nose.tools import eq_, assert_less
from bigml.api import HTTP_CREATED
from bigml.api import HTTP_ACCEPTED
from bigml.api import FINISHED
from bigml.api import FAULTY
from bigml.api import get_status
from read_pca_steps import i_get_the_pca
#@step(r'the pca name is "(.*)"')
def i_check_pca_name(step, name):
pca_name = world.pca['name']
eq_(name, pca_name)
#@step(r'I create a PCA from a dataset$')
def i_create_a_pca_from_dataset(step):
dataset = world.dataset.get('resource')
resource = world.api.create_pca(dataset, {'name': 'new PCA'})
world.status = resource['code']
eq_(world.status, HTTP_CREATED)
world.location = resource['location']
world.pca = resource['object']
world.pcas.append(resource['resource'])
#@step(r'I create a PCA from a dataset$')
def i_create_a_pca_with_params(step, params):
params = json.loads(params)
dataset = world.dataset.get('resource')
resource = world.api.create_pca(dataset, params)
world.status = resource['code']
eq_(world.status, HTTP_CREATED)
world.location = resource['location']
world.pca = resource['object']
world.pcas.append(resource['resource'])
def i_create_a_pca(step):
i_create_a_pca_from_dataset(step)
#@step(r'I update the PCA name to "(.*)"$')
def i_update_pca_name(step, name):
resource = world.api.update_pca(world.pca['resource'],
{'name': name})
world.status = resource['code']
eq_(world.status, HTTP_ACCEPTED)
world.location = resource['location']
world.pca = resource['object']
#@step(r'I wait until the PCA status code is either (\d) or (-\d) less than (\d+)')
def wait_until_pca_status_code_is(step, code1, code2, secs):
start = datetime.utcnow()
delta = int(secs) * world.delta
pca_id = world.pca['resource']
i_get_the_pca(step, pca_id)
status = get_status(world.pca)
while (status['code'] != int(code1) and
status['code'] != int(code2)):
time.sleep(3)
assert_less(datetime.utcnow() - start, timedelta(seconds=delta))
i_get_the_pca(step, pca_id)
status = get_status(world.pca)
eq_(status['code'], int(code1))
#@step(r'I wait until the PCA is ready less than (\d+)')
def the_pca_is_finished_in_less_than(step, secs):
wait_until_pca_status_code_is(step, FINISHED, FAULTY, secs)
| [
"json.loads",
"nose.tools.eq_",
"world.world.api.update_pca",
"datetime.datetime.utcnow",
"bigml.api.get_status",
"time.sleep",
"datetime.timedelta",
"world.world.api.create_pca",
"world.world.dataset.get",
"read_pca_steps.i_get_the_pca",
"world.world.pcas.append"
] | [((1076, 1095), 'nose.tools.eq_', 'eq_', (['name', 'pca_name'], {}), '(name, pca_name)\n', (1079, 1095), False, 'from nose.tools import eq_, assert_less\n'), ((1192, 1221), 'world.world.dataset.get', 'world.dataset.get', (['"""resource"""'], {}), "('resource')\n", (1209, 1221), False, 'from world import world\n'), ((1237, 1287), 'world.world.api.create_pca', 'world.api.create_pca', (['dataset', "{'name': 'new PCA'}"], {}), "(dataset, {'name': 'new PCA'})\n", (1257, 1287), False, 'from world import world\n'), ((1328, 1359), 'nose.tools.eq_', 'eq_', (['world.status', 'HTTP_CREATED'], {}), '(world.status, HTTP_CREATED)\n', (1331, 1359), False, 'from nose.tools import eq_, assert_less\n'), ((1441, 1480), 'world.world.pcas.append', 'world.pcas.append', (["resource['resource']"], {}), "(resource['resource'])\n", (1458, 1480), False, 'from world import world\n'), ((1584, 1602), 'json.loads', 'json.loads', (['params'], {}), '(params)\n', (1594, 1602), False, 'import json\n'), ((1617, 1646), 'world.world.dataset.get', 'world.dataset.get', (['"""resource"""'], {}), "('resource')\n", (1634, 1646), False, 'from world import world\n'), ((1662, 1699), 'world.world.api.create_pca', 'world.api.create_pca', (['dataset', 'params'], {}), '(dataset, params)\n', (1682, 1699), False, 'from world import world\n'), ((1740, 1771), 'nose.tools.eq_', 'eq_', (['world.status', 'HTTP_CREATED'], {}), '(world.status, HTTP_CREATED)\n', (1743, 1771), False, 'from nose.tools import eq_, assert_less\n'), ((1853, 1892), 'world.world.pcas.append', 'world.pcas.append', (["resource['resource']"], {}), "(resource['resource'])\n", (1870, 1892), False, 'from world import world\n'), ((2054, 2113), 'world.world.api.update_pca', 'world.api.update_pca', (["world.pca['resource']", "{'name': name}"], {}), "(world.pca['resource'], {'name': name})\n", (2074, 2113), False, 'from world import world\n'), ((2190, 2222), 'nose.tools.eq_', 'eq_', (['world.status', 'HTTP_ACCEPTED'], {}), '(world.status, HTTP_ACCEPTED)\n', (2193, 2222), False, 'from nose.tools import eq_, assert_less\n'), ((2459, 2476), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (2474, 2476), False, 'from datetime import datetime, timedelta\n'), ((2552, 2579), 'read_pca_steps.i_get_the_pca', 'i_get_the_pca', (['step', 'pca_id'], {}), '(step, pca_id)\n', (2565, 2579), False, 'from read_pca_steps import i_get_the_pca\n'), ((2593, 2614), 'bigml.api.get_status', 'get_status', (['world.pca'], {}), '(world.pca)\n', (2603, 2614), False, 'from bigml.api import get_status\n'), ((2712, 2725), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (2722, 2725), False, 'import time\n'), ((2813, 2840), 'read_pca_steps.i_get_the_pca', 'i_get_the_pca', (['step', 'pca_id'], {}), '(step, pca_id)\n', (2826, 2840), False, 'from read_pca_steps import i_get_the_pca\n'), ((2861, 2882), 'bigml.api.get_status', 'get_status', (['world.pca'], {}), '(world.pca)\n', (2871, 2882), False, 'from bigml.api import get_status\n'), ((2776, 2800), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'delta'}), '(seconds=delta)\n', (2785, 2800), False, 'from datetime import datetime, timedelta\n'), ((2749, 2766), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (2764, 2766), False, 'from datetime import datetime, timedelta\n')] |
'''
<NAME> 2/20/21
sandwich-maker.py uses pyinputplus to validate user input for sandwich preferences
'''
import pyinputplus as ip
def get_cost(food_name):
'''gets the cost of items in sandwich_builder'''
food_dict = {
'sourdough':1.75,
'rye':2.0,
'wheat':1.50,
'white':1.25,
'chicken':2.0,
'turkey':1.50,
'ham':2.0,
'tofu':1.25,
'cheddar':2.0,
'swiss':2.5,
'mozzarella':2.5,
'yes':0.25, # toppings return 'yes' in sandwich_builder(), so I made them all cost 0.25
'no':0 # saying no to a topping costs nothing
}
return food_dict[food_name]
def sandwich_builder():
print('Enter your sandwich preferences below:\n')
bread_prompt = 'What bread type would you like? (sourdough, rye, wheat, or white)\n'
bread_type = ip.inputChoice(['sourdough', 'rye', 'wheat', 'white'], prompt=bread_prompt)
protein_prompt = 'What type of protein would you like? (chicken, turkey, ham, or tofu)\n'
protein_type = ip.inputChoice(['chicken', 'turkey', 'ham', 'tofu'], prompt=protein_prompt)
mayo = ip.inputYesNo(prompt='Would you like mayo?\n')
mustard = ip.inputYesNo(prompt='Would you like mustard?\n')
tomato = ip.inputYesNo(prompt='Would you like tomato?\n')
lettuce = ip.inputYesNo(prompt='Would you like lettuce?\n')
like_cheese = ip.inputYesNo(prompt='Do you like cheese on your sandwich?\n')
if like_cheese is 'yes':
cheese_prompt = 'What kind of cheese would you like? (cheddar, swiss, mozzarella)\n'
cheese_type = ip.inputChoice(['cheddar', 'swiss', 'mozzarella'], prompt=cheese_prompt)
sandwich = []
cost = 0
sandwich.extend([bread_type, protein_type, cheese_type, mayo, mustard, tomato, lettuce])
for item in sandwich:
cost += get_cost(item)
else:
sandwich = []
cost = 0
sandwich.extend([bread_type, protein_type, mayo, mustard, tomato, lettuce])
for item in sandwich:
cost += get_cost(item)
how_many_prompt = 'How many sandwiches would you like?\n'
how_many = ip.inputInt(min=1, prompt=how_many_prompt)
print('\nFinal cost: ${}'.format(round(cost * how_many * 1.06, 2)))
sandwich_builder() | [
"pyinputplus.inputYesNo",
"pyinputplus.inputInt",
"pyinputplus.inputChoice"
] | [((862, 937), 'pyinputplus.inputChoice', 'ip.inputChoice', (["['sourdough', 'rye', 'wheat', 'white']"], {'prompt': 'bread_prompt'}), "(['sourdough', 'rye', 'wheat', 'white'], prompt=bread_prompt)\n", (876, 937), True, 'import pyinputplus as ip\n'), ((1054, 1129), 'pyinputplus.inputChoice', 'ip.inputChoice', (["['chicken', 'turkey', 'ham', 'tofu']"], {'prompt': 'protein_prompt'}), "(['chicken', 'turkey', 'ham', 'tofu'], prompt=protein_prompt)\n", (1068, 1129), True, 'import pyinputplus as ip\n'), ((1145, 1191), 'pyinputplus.inputYesNo', 'ip.inputYesNo', ([], {'prompt': '"""Would you like mayo?\n"""'}), "(prompt='Would you like mayo?\\n')\n", (1158, 1191), True, 'import pyinputplus as ip\n'), ((1206, 1255), 'pyinputplus.inputYesNo', 'ip.inputYesNo', ([], {'prompt': '"""Would you like mustard?\n"""'}), "(prompt='Would you like mustard?\\n')\n", (1219, 1255), True, 'import pyinputplus as ip\n'), ((1270, 1318), 'pyinputplus.inputYesNo', 'ip.inputYesNo', ([], {'prompt': '"""Would you like tomato?\n"""'}), "(prompt='Would you like tomato?\\n')\n", (1283, 1318), True, 'import pyinputplus as ip\n'), ((1333, 1382), 'pyinputplus.inputYesNo', 'ip.inputYesNo', ([], {'prompt': '"""Would you like lettuce?\n"""'}), "(prompt='Would you like lettuce?\\n')\n", (1346, 1382), True, 'import pyinputplus as ip\n'), ((1406, 1468), 'pyinputplus.inputYesNo', 'ip.inputYesNo', ([], {'prompt': '"""Do you like cheese on your sandwich?\n"""'}), "(prompt='Do you like cheese on your sandwich?\\n')\n", (1419, 1468), True, 'import pyinputplus as ip\n'), ((2187, 2229), 'pyinputplus.inputInt', 'ip.inputInt', ([], {'min': '(1)', 'prompt': 'how_many_prompt'}), '(min=1, prompt=how_many_prompt)\n', (2198, 2229), True, 'import pyinputplus as ip\n'), ((1616, 1688), 'pyinputplus.inputChoice', 'ip.inputChoice', (["['cheddar', 'swiss', 'mozzarella']"], {'prompt': 'cheese_prompt'}), "(['cheddar', 'swiss', 'mozzarella'], prompt=cheese_prompt)\n", (1630, 1688), True, 'import pyinputplus as ip\n')] |
notice = """
Cone Demo
-----------------------------------
| Copyright 2022 by <NAME> |
| [<EMAIL>] |
|-----------------------------------|
| We make absolutely no warranty |
| of any kind, expressed or implied |
|-----------------------------------|
| This graphics library outputs |
| to a bitmap file. |
-----------------------------------
"""
from Python_BMP.BITMAPlib import(
newBMP,
centercoord,
plot3Dsolid,
getRGBfactors,
rotvec3D,
conevertandsurface,
saveBMP
)
import subprocess as proc
from os import path
def main():
print(notice)
imgedt = 'mspaint' # replace with another editor if Unix
rootdir = path.dirname(__file__) # get path of this script
mx = my = 250 # x=y square bmp
file = 'HelloCone.bmp' # some random file name as string
bmp = newBMP(mx, my, 24) # RGB bmp
cenpt = centercoord(bmp) # helper method to get center of a bitmap
cf = getRGBfactors() # color info with presets
d, translationvector = 400, [0, 0, 200] # be careful with these variables or object goes offscreen
isSolid = True # toggle solid or outline
showoutline = False # can show outline even if solid
cf = getRGBfactors() # color list
color = cf['brightyellow'] # color of solid
outlinecolor = 0 # outline color
rotation = rotvec3D(25,240,70) # rotation vector (x,y,z) in degrees
vcen = (1,0,0) # x y z coords
r = 40 # radius of cone
zlen = 40 # height of cone
deganglestep = 5 # how finely we tile flat surfaces around the cone
obj3D = conevertandsurface(vcen, r, zlen, deganglestep)# A solid is defined by vertices and surfaces
plot3Dsolid(bmp, obj3D, isSolid, color,
showoutline, outlinecolor,
rotation, translationvector, d, cenpt)
saveBMP(file, bmp) # save file
print('Saved to %s in %s\nAll done close %s to finish' % \
(file, rootdir, imgedt))
ret = proc.call([imgedt, file])
if __name__=="__main__":
main()
| [
"Python_BMP.BITMAPlib.conevertandsurface",
"Python_BMP.BITMAPlib.rotvec3D",
"Python_BMP.BITMAPlib.newBMP",
"Python_BMP.BITMAPlib.centercoord",
"os.path.dirname",
"Python_BMP.BITMAPlib.getRGBfactors",
"subprocess.call",
"Python_BMP.BITMAPlib.saveBMP",
"Python_BMP.BITMAPlib.plot3Dsolid"
] | [((773, 795), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (785, 795), False, 'from os import path\n'), ((943, 961), 'Python_BMP.BITMAPlib.newBMP', 'newBMP', (['mx', 'my', '(24)'], {}), '(mx, my, 24)\n', (949, 961), False, 'from Python_BMP.BITMAPlib import newBMP, centercoord, plot3Dsolid, getRGBfactors, rotvec3D, conevertandsurface, saveBMP\n'), ((989, 1005), 'Python_BMP.BITMAPlib.centercoord', 'centercoord', (['bmp'], {}), '(bmp)\n', (1000, 1005), False, 'from Python_BMP.BITMAPlib import newBMP, centercoord, plot3Dsolid, getRGBfactors, rotvec3D, conevertandsurface, saveBMP\n'), ((1062, 1077), 'Python_BMP.BITMAPlib.getRGBfactors', 'getRGBfactors', ([], {}), '()\n', (1075, 1077), False, 'from Python_BMP.BITMAPlib import newBMP, centercoord, plot3Dsolid, getRGBfactors, rotvec3D, conevertandsurface, saveBMP\n'), ((1338, 1353), 'Python_BMP.BITMAPlib.getRGBfactors', 'getRGBfactors', ([], {}), '()\n', (1351, 1353), False, 'from Python_BMP.BITMAPlib import newBMP, centercoord, plot3Dsolid, getRGBfactors, rotvec3D, conevertandsurface, saveBMP\n'), ((1482, 1503), 'Python_BMP.BITMAPlib.rotvec3D', 'rotvec3D', (['(25)', '(240)', '(70)'], {}), '(25, 240, 70)\n', (1490, 1503), False, 'from Python_BMP.BITMAPlib import newBMP, centercoord, plot3Dsolid, getRGBfactors, rotvec3D, conevertandsurface, saveBMP\n'), ((1741, 1788), 'Python_BMP.BITMAPlib.conevertandsurface', 'conevertandsurface', (['vcen', 'r', 'zlen', 'deganglestep'], {}), '(vcen, r, zlen, deganglestep)\n', (1759, 1788), False, 'from Python_BMP.BITMAPlib import newBMP, centercoord, plot3Dsolid, getRGBfactors, rotvec3D, conevertandsurface, saveBMP\n'), ((1843, 1952), 'Python_BMP.BITMAPlib.plot3Dsolid', 'plot3Dsolid', (['bmp', 'obj3D', 'isSolid', 'color', 'showoutline', 'outlinecolor', 'rotation', 'translationvector', 'd', 'cenpt'], {}), '(bmp, obj3D, isSolid, color, showoutline, outlinecolor, rotation,\n translationvector, d, cenpt)\n', (1854, 1952), False, 'from Python_BMP.BITMAPlib import newBMP, centercoord, plot3Dsolid, getRGBfactors, rotvec3D, conevertandsurface, saveBMP\n'), ((1992, 2010), 'Python_BMP.BITMAPlib.saveBMP', 'saveBMP', (['file', 'bmp'], {}), '(file, bmp)\n', (1999, 2010), False, 'from Python_BMP.BITMAPlib import newBMP, centercoord, plot3Dsolid, getRGBfactors, rotvec3D, conevertandsurface, saveBMP\n'), ((2148, 2173), 'subprocess.call', 'proc.call', (['[imgedt, file]'], {}), '([imgedt, file])\n', (2157, 2173), True, 'import subprocess as proc\n')] |
import sqlite3
import subprocess, datetime
from flask import Flask, request, session, g, redirect, url_for, \
abort, render_template, flash
from contextlib import closing
from tquery import get_latest_record
from config import *
app = Flask(__name__)
app.config.from_object(__name__)
# DB helper functions
def connect_db():
return sqlite3.connect(app.config['DATABASE'])
def init_db():
"""Initializes the sqlite3 database. This function must be imported and
executed from the Python interpreter before the application is first run."""
with closing(connect_db()) as db:
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
# Auto-open and close DB when serving requests
@app.before_request
def before_request():
g.db = connect_db()
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
@app.route('/', methods=['GET', 'POST'])
def welcome_page():
if 'username' in session and session['username']:
return redirect(url_for('submit_page'))
error = None
if request.method == 'POST': # someone's logging in
if not request.form['username'] in app.config['USERNAMES']:
error = 'username'
elif request.form['password'] != app.config['PASSWORD']:
error = 'password'
else: # successful login
session['username'] = request.form['username']
flash('Hi ' + session['username'] + '!')
return redirect(url_for('submit_page'))
return render_template('welcome_page.html', commands=command_history(),
error=error, last_record=last_record())
@app.route('/submit', methods=['GET', 'POST'])
def submit_page():
error = None
if not session.get('username'):
abort(401)
if request.method == 'POST': # command is being issued to AC
user_mode = request.form['mode']
user_temperature = request.form['temperature']
validation_codes = validate_AC_command(user_mode, user_temperature)
if (validation_codes['mode_error'] or
validation_codes['temperature_error']):
error=validation_codes
else:
subprocess.call(['/usr/bin/irsend','SEND_ONCE', 'lgac',
validation_codes['command']])
g.db.execute('insert into commands (command, ts, user) values (?, ?, ?)',
[validation_codes['command'],
datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
session['username']])
g.db.commit()
flash('Command submitted')
return render_template('submit_page.html', commands=command_history(),
error=error, last_record=last_record())
@app.route('/logout')
def logout():
session.pop('logged_in', None)
flash('You were logged out')
return redirect(url_for('welcome_page'))
def validate_AC_command(user_mode, user_temperature):
"""Validates and sanitizes user-input command; translates command
into irsend call."""
codes = dict()
if user_mode not in app.config['ACMODES']:
codes['mode_error'] = True
else:
codes['mode_error'] = False
if user_mode is not 'off' and user_temperature not in app.config['ACTEMPERATURES']:
codes['temperature_error'] = True
else:
codes['temperature_error'] = False
if not codes['mode_error'] and not codes['temperature_error']:
codes['mode'] = user_mode
codes['temperature'] = user_temperature
if codes['mode'] == 'off':
command_postfix = 'off'
elif codes['mode'] == 'heat':
command_postfix = 'heat' + codes['temperature']
else:
command_postfix = codes['temperature']
codes['command'] = command_postfix
return codes
def command_history():
"""Returns a list of dictionaries, each containing a command issued
to the AC previously. The list is ordered chronologically, from newest
to oldest."""
cur = g.db.execute('select command, ts, user from commands order by id desc')
command_history = []
for row in cur.fetchall():
if row[0][0] == 'h':
cmd = 'heat to ' + row[0][4:]
elif row[0] == 'off':
cmd = 'off'
else:
cmd = 'cool to ' + row[0]
command_history.append(dict(command=cmd, ts=row[1], user=row[2]))
return command_history
def last_record():
"""Returns the last temperature and humidity record data.
The returned object is a dict with keys ts, fahrenheit, celsius and
humidity.
"""
db_record = get_latest_record()
out_record = dict()
out_record['date'] = db_record[0].strftime("%Y-%m-%d")
out_record['time'] = db_record[0].strftime("%H:%M")
out_record['celsius'] = db_record[1]
out_record['fahrenheit'] = int(round(out_record['celsius']*9/5.0 + 32))
out_record['humidity'] = int(round(db_record[2]))
return out_record
if __name__ == '__main__':
app.run(host='0.0.0.0')
| [
"flask.flash",
"sqlite3.connect",
"flask.Flask",
"flask.session.get",
"flask.abort",
"tquery.get_latest_record",
"flask.url_for",
"flask.g.db.commit",
"datetime.datetime.now",
"subprocess.call",
"flask.g.db.execute",
"flask.session.pop"
] | [((272, 287), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (277, 287), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash\n'), ((373, 412), 'sqlite3.connect', 'sqlite3.connect', (["app.config['DATABASE']"], {}), "(app.config['DATABASE'])\n", (388, 412), False, 'import sqlite3\n'), ((2950, 2980), 'flask.session.pop', 'session.pop', (['"""logged_in"""', 'None'], {}), "('logged_in', None)\n", (2961, 2980), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash\n'), ((2985, 3013), 'flask.flash', 'flash', (['"""You were logged out"""'], {}), "('You were logged out')\n", (2990, 3013), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash\n'), ((4199, 4270), 'flask.g.db.execute', 'g.db.execute', (['"""select command, ts, user from commands order by id desc"""'], {}), "('select command, ts, user from commands order by id desc')\n", (4211, 4270), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash\n'), ((4802, 4821), 'tquery.get_latest_record', 'get_latest_record', ([], {}), '()\n', (4819, 4821), False, 'from tquery import get_latest_record\n'), ((1877, 1900), 'flask.session.get', 'session.get', (['"""username"""'], {}), "('username')\n", (1888, 1900), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash\n'), ((1910, 1920), 'flask.abort', 'abort', (['(401)'], {}), '(401)\n', (1915, 1920), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash\n'), ((3034, 3057), 'flask.url_for', 'url_for', (['"""welcome_page"""'], {}), "('welcome_page')\n", (3041, 3057), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash\n'), ((1150, 1172), 'flask.url_for', 'url_for', (['"""submit_page"""'], {}), "('submit_page')\n", (1157, 1172), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash\n'), ((2317, 2408), 'subprocess.call', 'subprocess.call', (["['/usr/bin/irsend', 'SEND_ONCE', 'lgac', validation_codes['command']]"], {}), "(['/usr/bin/irsend', 'SEND_ONCE', 'lgac', validation_codes[\n 'command']])\n", (2332, 2408), False, 'import subprocess, datetime\n'), ((2714, 2727), 'flask.g.db.commit', 'g.db.commit', ([], {}), '()\n', (2725, 2727), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash\n'), ((2740, 2766), 'flask.flash', 'flash', (['"""Command submitted"""'], {}), "('Command submitted')\n", (2745, 2766), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash\n'), ((1546, 1586), 'flask.flash', 'flash', (["('Hi ' + session['username'] + '!')"], {}), "('Hi ' + session['username'] + '!')\n", (1551, 1586), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash\n'), ((1615, 1637), 'flask.url_for', 'url_for', (['"""submit_page"""'], {}), "('submit_page')\n", (1622, 1637), False, 'from flask import Flask, request, session, g, redirect, url_for, abort, render_template, flash\n'), ((2599, 2622), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2620, 2622), False, 'import subprocess, datetime\n')] |
"""Helper functions to tests."""
import numpy as np
def norm(vs: np.array) -> float:
"""Compute the norm of a vector."""
return np.sqrt(np.dot(vs, vs))
def create_random_matrix(size: int) -> np.array:
"""Create a numpy random matrix."""
return np.random.normal(size=size ** 2).reshape(size, size)
def create_symmetic_matrix(size: int) -> np.array:
"""Create a numpy symmetric matrix."""
xs = create_random_matrix(size)
return xs + xs.T
def check_eigenpairs(
matrix: np.ndarray, eigenvalues: np.ndarray,
eigenvectors: np.ndarray) -> bool:
"""Check that the eigenvalue equation holds."""
for i, value in enumerate(eigenvalues):
residue = np.dot(
matrix, eigenvectors[:, i]) - value * eigenvectors[:, i]
assert norm(residue) < 1e-8
| [
"numpy.random.normal",
"numpy.dot"
] | [((147, 161), 'numpy.dot', 'np.dot', (['vs', 'vs'], {}), '(vs, vs)\n', (153, 161), True, 'import numpy as np\n'), ((265, 297), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(size ** 2)'}), '(size=size ** 2)\n', (281, 297), True, 'import numpy as np\n'), ((705, 739), 'numpy.dot', 'np.dot', (['matrix', 'eigenvectors[:, i]'], {}), '(matrix, eigenvectors[:, i])\n', (711, 739), True, 'import numpy as np\n')] |
#pylint:disable=no-member
import cv2 as cv
import numpy as np
img = cv.imread('/Users/webileapp/Desktop/niharika_files/projects/opencv_course_master/Resources/Photos/cats.jpg')
cv.imshow('Cats', img)
#
blank = np.zeros(img.shape[:2], dtype='uint8')
cv.imshow('Blank', blank)
gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
cv.imshow('Gray', gray)
#
blur = cv.GaussianBlur(gray, (5,5), cv.BORDER_DEFAULT)
cv.imshow('Blur', blur)
canny = cv.Canny(blur, 125, 175)
cv.imshow('Canny Edges', canny)
#
ret, thresh = cv.threshold(gray, 125, 255, cv.THRESH_BINARY)
cv.imshow('Thresh', thresh)
#
contours, hierarchies = cv.findContours(canny, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)
print(f'{len(contours)} contour(s) found!')
#
cv.drawContours(blank, contours, -1, (200,120,100), 1)
cv.imshow('Contours Drawn', blank)
cv.waitKey(0) | [
"cv2.drawContours",
"cv2.threshold",
"cv2.Canny",
"cv2.imshow",
"numpy.zeros",
"cv2.waitKey",
"cv2.cvtColor",
"cv2.findContours",
"cv2.GaussianBlur",
"cv2.imread"
] | [((70, 188), 'cv2.imread', 'cv.imread', (['"""/Users/webileapp/Desktop/niharika_files/projects/opencv_course_master/Resources/Photos/cats.jpg"""'], {}), "(\n '/Users/webileapp/Desktop/niharika_files/projects/opencv_course_master/Resources/Photos/cats.jpg'\n )\n", (79, 188), True, 'import cv2 as cv\n'), ((179, 201), 'cv2.imshow', 'cv.imshow', (['"""Cats"""', 'img'], {}), "('Cats', img)\n", (188, 201), True, 'import cv2 as cv\n'), ((212, 250), 'numpy.zeros', 'np.zeros', (['img.shape[:2]'], {'dtype': '"""uint8"""'}), "(img.shape[:2], dtype='uint8')\n", (220, 250), True, 'import numpy as np\n'), ((251, 276), 'cv2.imshow', 'cv.imshow', (['"""Blank"""', 'blank'], {}), "('Blank', blank)\n", (260, 276), True, 'import cv2 as cv\n'), ((285, 320), 'cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_BGR2GRAY'], {}), '(img, cv.COLOR_BGR2GRAY)\n', (296, 320), True, 'import cv2 as cv\n'), ((321, 344), 'cv2.imshow', 'cv.imshow', (['"""Gray"""', 'gray'], {}), "('Gray', gray)\n", (330, 344), True, 'import cv2 as cv\n'), ((354, 402), 'cv2.GaussianBlur', 'cv.GaussianBlur', (['gray', '(5, 5)', 'cv.BORDER_DEFAULT'], {}), '(gray, (5, 5), cv.BORDER_DEFAULT)\n', (369, 402), True, 'import cv2 as cv\n'), ((402, 425), 'cv2.imshow', 'cv.imshow', (['"""Blur"""', 'blur'], {}), "('Blur', blur)\n", (411, 425), True, 'import cv2 as cv\n'), ((435, 459), 'cv2.Canny', 'cv.Canny', (['blur', '(125)', '(175)'], {}), '(blur, 125, 175)\n', (443, 459), True, 'import cv2 as cv\n'), ((460, 491), 'cv2.imshow', 'cv.imshow', (['"""Canny Edges"""', 'canny'], {}), "('Canny Edges', canny)\n", (469, 491), True, 'import cv2 as cv\n'), ((508, 554), 'cv2.threshold', 'cv.threshold', (['gray', '(125)', '(255)', 'cv.THRESH_BINARY'], {}), '(gray, 125, 255, cv.THRESH_BINARY)\n', (520, 554), True, 'import cv2 as cv\n'), ((555, 582), 'cv2.imshow', 'cv.imshow', (['"""Thresh"""', 'thresh'], {}), "('Thresh', thresh)\n", (564, 582), True, 'import cv2 as cv\n'), ((609, 669), 'cv2.findContours', 'cv.findContours', (['canny', 'cv.RETR_LIST', 'cv.CHAIN_APPROX_SIMPLE'], {}), '(canny, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE)\n', (624, 669), True, 'import cv2 as cv\n'), ((716, 772), 'cv2.drawContours', 'cv.drawContours', (['blank', 'contours', '(-1)', '(200, 120, 100)', '(1)'], {}), '(blank, contours, -1, (200, 120, 100), 1)\n', (731, 772), True, 'import cv2 as cv\n'), ((771, 805), 'cv2.imshow', 'cv.imshow', (['"""Contours Drawn"""', 'blank'], {}), "('Contours Drawn', blank)\n", (780, 805), True, 'import cv2 as cv\n'), ((807, 820), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (817, 820), True, 'import cv2 as cv\n')] |
"""
Micro webapp based on WebOb, Jinja2, WSGI with a simple router
"""
import os
import hmac
import hashlib
import mimetypes
from wsgiref.simple_server import WSGIServer, WSGIRequestHandler
from webob import Request
from webob import Response
from jinja2 import Environment, FileSystemLoader
class MicroServer(object):
"""Small web server."""
def __init__(self):
"""Initializes the class and configures the paths
and the Jinja2 environment so it can find and render pages."""
if self.static_root is None:
self.static_root = 'static'
if self.templates_root is None:
self.templates_root = 'templates'
if self.routes is None:
self.routes = {}
# Set up the paths and environment for Jinja. This is how it finds the templates.
self.template_path = os.path.join(os.path.dirname(__file__), self.templates_root)
self.env = Environment(autoescape=True, loader=FileSystemLoader(self.template_path))
# Figure out what directory the server is running it as save the path.
# The path will be used later to find the site's resources.
self.current_dir = os.path.dirname(os.path.realpath(__file__))
def __call__(self, environ, start_response):
"""This method is called by the HTTPServer when
there is a request to be handled."""
# Create the WebOb Request and Response objects for
# used to read the request and write the response.
self.request = Request(environ)
self.response = Response()
# Find a handler for the path if there is one.
handler = self.routes.get(self.request.path_info)
# If there is call it. If not call the static handler.
if handler:
handler()
else:
self.static()
return self.response(environ, start_response)
def static(self, resource=''):
"""Handles request for static pages. It is the default handler."""
# Build a file path using either the resource parameter or the path in the request.
if resource:
file_path = os.path.join(self.current_dir, self.static_root, resource)
else:
file_path = os.path.join(self.current_dir, self.static_root, self.request.path_info[1:])
print("File path:", file_path)
# Try to open the file. If we can then guess its type and write its
# content to the response object to send it to the client.
# If we can't find the file then return an error to the client.
try:
file_type = mimetypes.guess_type(file_path)[0]
self.response.content_type = file_type
data = open(file_path, 'rb').read()
self.response.body_file.write(data)
except Exception as e:
self.response.status = 404
self.response.write(str(e))
def render_template(self, template_name, template_values={}):
"""Renders Jinja2 templates into HTML"""
# Find the template and render it to HTML
# then write it to the response object to send it to the client.
template = self.env.get_template(template_name)
html = template.render(template_values)
self.response.write(html)
def get_signature(self, passphrase, *parts):
"""Creates a hash from strings based on a passphrase."""
cookiehash = hmac.new(passphrase.encode(), digestmod=hashlib.sha1)
for part in parts:
cookiehash.update(part.encode())
return cookiehash.hexdigest()
def run(self, port):
"""Starts the HTTP server and tells it what port to listen on"""
# Create the WSGI HTTP server. Set the port it should listen on.
# And start the server.
server = WSGIServer(('', 8000), WSGIRequestHandler)
server.set_app(self)
print("Serving on http://localhost:8000/ ...")
server.serve_forever()
| [
"wsgiref.simple_server.WSGIServer",
"webob.Request",
"os.path.join",
"os.path.realpath",
"os.path.dirname",
"mimetypes.guess_type",
"jinja2.FileSystemLoader",
"webob.Response"
] | [((1522, 1538), 'webob.Request', 'Request', (['environ'], {}), '(environ)\n', (1529, 1538), False, 'from webob import Request\n'), ((1563, 1573), 'webob.Response', 'Response', ([], {}), '()\n', (1571, 1573), False, 'from webob import Response\n'), ((3802, 3844), 'wsgiref.simple_server.WSGIServer', 'WSGIServer', (["('', 8000)", 'WSGIRequestHandler'], {}), "(('', 8000), WSGIRequestHandler)\n", (3812, 3844), False, 'from wsgiref.simple_server import WSGIServer, WSGIRequestHandler\n'), ((867, 892), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (882, 892), False, 'import os\n'), ((1199, 1225), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1215, 1225), False, 'import os\n'), ((2139, 2197), 'os.path.join', 'os.path.join', (['self.current_dir', 'self.static_root', 'resource'], {}), '(self.current_dir, self.static_root, resource)\n', (2151, 2197), False, 'import os\n'), ((2236, 2312), 'os.path.join', 'os.path.join', (['self.current_dir', 'self.static_root', 'self.request.path_info[1:]'], {}), '(self.current_dir, self.static_root, self.request.path_info[1:])\n', (2248, 2312), False, 'import os\n'), ((970, 1006), 'jinja2.FileSystemLoader', 'FileSystemLoader', (['self.template_path'], {}), '(self.template_path)\n', (986, 1006), False, 'from jinja2 import Environment, FileSystemLoader\n'), ((2606, 2637), 'mimetypes.guess_type', 'mimetypes.guess_type', (['file_path'], {}), '(file_path)\n', (2626, 2637), False, 'import mimetypes\n')] |
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.core.urlresolvers import reverse
from django.shortcuts import render
from django.http import HttpResponseRedirect
from core.models import Post, Category, Tag
from backend.forms import PostForm, CategoryForm, TagForm
# Create your views here.
@login_required()
def index(request):
context = {}
context['nav_active'] = 'index'
return render(request, 'backend/index.html', context)
@login_required()
def posts(request):
context = {}
context['nav_active'] = 'posts'
post_list = Post.objects.all()
paginator = Paginator(list(reversed(post_list)), 10)
page = request.GET.get('page')
try:
posts = paginator.page(page)
except PageNotAnInteger:
posts = paginator.page(1)
except EmptyPage:
posts = paginator.page(paginator.num_pages)
context['posts'] = posts
return render(request, 'backend/posts.html', context)
@login_required()
def add_post(request):
context = {}
context['nav_active'] = 'posts'
form = PostForm()
if request.method == 'POST':
form = PostForm(request.POST, request.FILES)
if form.is_valid():
form.save()
messages.success(request, 'Post created.')
return HttpResponseRedirect(reverse('user_panel_posts'))
context['form'] = form
return render(request, 'backend/edit_post.html', context)
@login_required()
def edit_post(request, post_id):
context = {}
context['nav_active'] = 'posts'
post = Post.objects.get(pk=post_id)
context['post'] = post
form = PostForm(instance=post)
if request.method == 'POST':
form = PostForm(request.POST, request.FILES, instance=post)
if form.is_valid():
form.save()
messages.success(request, 'Post updated.')
return HttpResponseRedirect(reverse('user_panel_posts'))
context['form'] = form
return render(request, 'backend/edit_post.html', context)
@login_required()
def delete_post(request, post_id):
context = {}
context['nav_active'] = 'posts'
post = Post.objects.get(pk=post_id)
post.delete()
messages.success(request, 'Post deleted.')
return HttpResponseRedirect(reverse('user_panel_posts'))
@login_required()
def categories(request):
context = {}
context['nav_active'] = 'categories'
categories_list = Category.objects.all()
paginator = Paginator(list(reversed(categories_list)), 10)
page = request.GET.get('page')
try:
categories = paginator.page(page)
except PageNotAnInteger:
categories = paginator.page(1)
except EmptyPage:
categories = paginator.page(paginator.num_pages)
context['categories'] = categories
return render(request, 'backend/categories.html', context)
@login_required()
def add_category(request):
context = {}
context['nav_active'] = 'categories'
form = CategoryForm()
if request.method == 'POST':
form = CategoryForm(request.POST, request.FILES)
if form.is_valid():
form.save()
messages.success(request, 'Category created.')
return HttpResponseRedirect(reverse('user_panel_categories'))
context['form'] = form
return render(request, 'backend/edit_category.html', context)
@login_required()
def edit_category(request, category_id):
context = {}
context['nav_active'] = 'categories'
category = Category.objects.get(pk=category_id)
context['category'] = category
form = CategoryForm(instance=category)
if request.method == 'POST':
form = CategoryForm(request.POST, request.FILES, instance=category)
if form.is_valid():
form.save()
messages.success(request, 'Category updated.')
return HttpResponseRedirect(reverse('user_panel_categories'))
context['form'] = form
return render(request, 'backend/edit_category.html', context)
@login_required()
def delete_category(request, category_id):
context = {}
context['nav_active'] = 'categories'
category = Category.objects.get(pk=category_id)
category.delete()
messages.success(request, 'Category deleted.')
return HttpResponseRedirect(reverse('user_panel_categories'))
@login_required()
def tags(request):
context = {}
context['nav_active'] = 'tags'
tags_list = Tag.objects.all()
paginator = Paginator(list(reversed(tags_list)), 10)
page = request.GET.get('page')
try:
tags = paginator.page(page)
except PageNotAnInteger:
tags = paginator.page(1)
except EmptyPage:
tags = paginator.page(paginator.num_pages)
context['tags'] = tags
return render(request, 'backend/tags.html', context)
@login_required()
def add_tag(request):
context = {}
context['nav_active'] = 'tags'
form = TagForm()
if request.method == 'POST':
form = TagForm(request.POST, request.FILES)
if form.is_valid():
form.save()
messages.success(request, 'Tag created.')
return HttpResponseRedirect(reverse('user_panel_tags'))
context['form'] = form
return render(request, 'backend/edit_tag.html', context)
@login_required()
def edit_tag(request, tag_id):
context = {}
context['nav_active'] = 'tags'
tag = Tag.objects.get(pk=tag_id)
context['tag'] = tag
form = TagForm(instance=tag)
if request.method == 'POST':
form = TagForm(request.POST, request.FILES, instance=tag)
if form.is_valid():
form.save()
messages.success(request, 'Tag updated.')
return HttpResponseRedirect(reverse('user_panel_tags'))
context['form'] = form
return render(request, 'backend/edit_tag.html', context)
@login_required()
def delete_tag(request, tag_id):
context = {}
context['nav_active'] = 'tags'
tag = Tag.objects.get(pk=tag_id)
tag.delete()
messages.success(request, 'Tag deleted.')
return HttpResponseRedirect(reverse('user_panel_tags')) | [
"django.shortcuts.render",
"core.models.Post.objects.get",
"backend.forms.PostForm",
"core.models.Category.objects.all",
"core.models.Tag.objects.get",
"django.core.urlresolvers.reverse",
"core.models.Post.objects.all",
"backend.forms.TagForm",
"core.models.Tag.objects.all",
"django.contrib.auth.decorators.login_required",
"django.contrib.messages.success",
"core.models.Category.objects.get",
"backend.forms.CategoryForm"
] | [((424, 440), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {}), '()\n', (438, 440), False, 'from django.contrib.auth.decorators import login_required\n'), ((575, 591), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {}), '()\n', (589, 591), False, 'from django.contrib.auth.decorators import login_required\n'), ((1070, 1086), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {}), '()\n', (1084, 1086), False, 'from django.contrib.auth.decorators import login_required\n'), ((1544, 1560), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {}), '()\n', (1558, 1560), False, 'from django.contrib.auth.decorators import login_required\n'), ((2124, 2140), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {}), '()\n', (2138, 2140), False, 'from django.contrib.auth.decorators import login_required\n'), ((2400, 2416), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {}), '()\n', (2414, 2416), False, 'from django.contrib.auth.decorators import login_required\n'), ((2950, 2966), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {}), '()\n', (2964, 2966), False, 'from django.contrib.auth.decorators import login_required\n'), ((3454, 3470), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {}), '()\n', (3468, 3470), False, 'from django.contrib.auth.decorators import login_required\n'), ((4096, 4112), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {}), '()\n', (4110, 4112), False, 'from django.contrib.auth.decorators import login_required\n'), ((4410, 4426), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {}), '()\n', (4424, 4426), False, 'from django.contrib.auth.decorators import login_required\n'), ((4895, 4911), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {}), '()\n', (4909, 4911), False, 'from django.contrib.auth.decorators import login_required\n'), ((5362, 5378), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {}), '()\n', (5376, 5378), False, 'from django.contrib.auth.decorators import login_required\n'), ((5927, 5943), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {}), '()\n', (5941, 5943), False, 'from django.contrib.auth.decorators import login_required\n'), ((526, 572), 'django.shortcuts.render', 'render', (['request', '"""backend/index.html"""', 'context'], {}), "(request, 'backend/index.html', context)\n", (532, 572), False, 'from django.shortcuts import render\n'), ((682, 700), 'core.models.Post.objects.all', 'Post.objects.all', ([], {}), '()\n', (698, 700), False, 'from core.models import Post, Category, Tag\n'), ((1021, 1067), 'django.shortcuts.render', 'render', (['request', '"""backend/posts.html"""', 'context'], {}), "(request, 'backend/posts.html', context)\n", (1027, 1067), False, 'from django.shortcuts import render\n'), ((1175, 1185), 'backend.forms.PostForm', 'PostForm', ([], {}), '()\n', (1183, 1185), False, 'from backend.forms import PostForm, CategoryForm, TagForm\n'), ((1491, 1541), 'django.shortcuts.render', 'render', (['request', '"""backend/edit_post.html"""', 'context'], {}), "(request, 'backend/edit_post.html', context)\n", (1497, 1541), False, 'from django.shortcuts import render\n'), ((1659, 1687), 'core.models.Post.objects.get', 'Post.objects.get', ([], {'pk': 'post_id'}), '(pk=post_id)\n', (1675, 1687), False, 'from core.models import Post, Category, Tag\n'), ((1727, 1750), 'backend.forms.PostForm', 'PostForm', ([], {'instance': 'post'}), '(instance=post)\n', (1735, 1750), False, 'from backend.forms import PostForm, CategoryForm, TagForm\n'), ((2071, 2121), 'django.shortcuts.render', 'render', (['request', '"""backend/edit_post.html"""', 'context'], {}), "(request, 'backend/edit_post.html', context)\n", (2077, 2121), False, 'from django.shortcuts import render\n'), ((2241, 2269), 'core.models.Post.objects.get', 'Post.objects.get', ([], {'pk': 'post_id'}), '(pk=post_id)\n', (2257, 2269), False, 'from core.models import Post, Category, Tag\n'), ((2293, 2335), 'django.contrib.messages.success', 'messages.success', (['request', '"""Post deleted."""'], {}), "(request, 'Post deleted.')\n", (2309, 2335), False, 'from django.contrib import messages\n'), ((2523, 2545), 'core.models.Category.objects.all', 'Category.objects.all', ([], {}), '()\n', (2543, 2545), False, 'from core.models import Post, Category, Tag\n'), ((2896, 2947), 'django.shortcuts.render', 'render', (['request', '"""backend/categories.html"""', 'context'], {}), "(request, 'backend/categories.html', context)\n", (2902, 2947), False, 'from django.shortcuts import render\n'), ((3064, 3078), 'backend.forms.CategoryForm', 'CategoryForm', ([], {}), '()\n', (3076, 3078), False, 'from backend.forms import PostForm, CategoryForm, TagForm\n'), ((3397, 3451), 'django.shortcuts.render', 'render', (['request', '"""backend/edit_category.html"""', 'context'], {}), "(request, 'backend/edit_category.html', context)\n", (3403, 3451), False, 'from django.shortcuts import render\n'), ((3586, 3622), 'core.models.Category.objects.get', 'Category.objects.get', ([], {'pk': 'category_id'}), '(pk=category_id)\n', (3606, 3622), False, 'from core.models import Post, Category, Tag\n'), ((3670, 3701), 'backend.forms.CategoryForm', 'CategoryForm', ([], {'instance': 'category'}), '(instance=category)\n', (3682, 3701), False, 'from backend.forms import PostForm, CategoryForm, TagForm\n'), ((4039, 4093), 'django.shortcuts.render', 'render', (['request', '"""backend/edit_category.html"""', 'context'], {}), "(request, 'backend/edit_category.html', context)\n", (4045, 4093), False, 'from django.shortcuts import render\n'), ((4230, 4266), 'core.models.Category.objects.get', 'Category.objects.get', ([], {'pk': 'category_id'}), '(pk=category_id)\n', (4250, 4266), False, 'from core.models import Post, Category, Tag\n'), ((4294, 4340), 'django.contrib.messages.success', 'messages.success', (['request', '"""Category deleted."""'], {}), "(request, 'Category deleted.')\n", (4310, 4340), False, 'from django.contrib import messages\n'), ((4515, 4532), 'core.models.Tag.objects.all', 'Tag.objects.all', ([], {}), '()\n', (4530, 4532), False, 'from core.models import Post, Category, Tag\n'), ((4847, 4892), 'django.shortcuts.render', 'render', (['request', '"""backend/tags.html"""', 'context'], {}), "(request, 'backend/tags.html', context)\n", (4853, 4892), False, 'from django.shortcuts import render\n'), ((4998, 5007), 'backend.forms.TagForm', 'TagForm', ([], {}), '()\n', (5005, 5007), False, 'from backend.forms import PostForm, CategoryForm, TagForm\n'), ((5310, 5359), 'django.shortcuts.render', 'render', (['request', '"""backend/edit_tag.html"""', 'context'], {}), "(request, 'backend/edit_tag.html', context)\n", (5316, 5359), False, 'from django.shortcuts import render\n'), ((5473, 5499), 'core.models.Tag.objects.get', 'Tag.objects.get', ([], {'pk': 'tag_id'}), '(pk=tag_id)\n', (5488, 5499), False, 'from core.models import Post, Category, Tag\n'), ((5537, 5558), 'backend.forms.TagForm', 'TagForm', ([], {'instance': 'tag'}), '(instance=tag)\n', (5544, 5558), False, 'from backend.forms import PostForm, CategoryForm, TagForm\n'), ((5875, 5924), 'django.shortcuts.render', 'render', (['request', '"""backend/edit_tag.html"""', 'context'], {}), "(request, 'backend/edit_tag.html', context)\n", (5881, 5924), False, 'from django.shortcuts import render\n'), ((6040, 6066), 'core.models.Tag.objects.get', 'Tag.objects.get', ([], {'pk': 'tag_id'}), '(pk=tag_id)\n', (6055, 6066), False, 'from core.models import Post, Category, Tag\n'), ((6089, 6130), 'django.contrib.messages.success', 'messages.success', (['request', '"""Tag deleted."""'], {}), "(request, 'Tag deleted.')\n", (6105, 6130), False, 'from django.contrib import messages\n'), ((1235, 1272), 'backend.forms.PostForm', 'PostForm', (['request.POST', 'request.FILES'], {}), '(request.POST, request.FILES)\n', (1243, 1272), False, 'from backend.forms import PostForm, CategoryForm, TagForm\n'), ((1800, 1852), 'backend.forms.PostForm', 'PostForm', (['request.POST', 'request.FILES'], {'instance': 'post'}), '(request.POST, request.FILES, instance=post)\n', (1808, 1852), False, 'from backend.forms import PostForm, CategoryForm, TagForm\n'), ((2369, 2396), 'django.core.urlresolvers.reverse', 'reverse', (['"""user_panel_posts"""'], {}), "('user_panel_posts')\n", (2376, 2396), False, 'from django.core.urlresolvers import reverse\n'), ((3128, 3169), 'backend.forms.CategoryForm', 'CategoryForm', (['request.POST', 'request.FILES'], {}), '(request.POST, request.FILES)\n', (3140, 3169), False, 'from backend.forms import PostForm, CategoryForm, TagForm\n'), ((3751, 3811), 'backend.forms.CategoryForm', 'CategoryForm', (['request.POST', 'request.FILES'], {'instance': 'category'}), '(request.POST, request.FILES, instance=category)\n', (3763, 3811), False, 'from backend.forms import PostForm, CategoryForm, TagForm\n'), ((4374, 4406), 'django.core.urlresolvers.reverse', 'reverse', (['"""user_panel_categories"""'], {}), "('user_panel_categories')\n", (4381, 4406), False, 'from django.core.urlresolvers import reverse\n'), ((5057, 5093), 'backend.forms.TagForm', 'TagForm', (['request.POST', 'request.FILES'], {}), '(request.POST, request.FILES)\n', (5064, 5093), False, 'from backend.forms import PostForm, CategoryForm, TagForm\n'), ((5608, 5658), 'backend.forms.TagForm', 'TagForm', (['request.POST', 'request.FILES'], {'instance': 'tag'}), '(request.POST, request.FILES, instance=tag)\n', (5615, 5658), False, 'from backend.forms import PostForm, CategoryForm, TagForm\n'), ((6164, 6190), 'django.core.urlresolvers.reverse', 'reverse', (['"""user_panel_tags"""'], {}), "('user_panel_tags')\n", (6171, 6190), False, 'from django.core.urlresolvers import reverse\n'), ((1338, 1380), 'django.contrib.messages.success', 'messages.success', (['request', '"""Post created."""'], {}), "(request, 'Post created.')\n", (1354, 1380), False, 'from django.contrib import messages\n'), ((1918, 1960), 'django.contrib.messages.success', 'messages.success', (['request', '"""Post updated."""'], {}), "(request, 'Post updated.')\n", (1934, 1960), False, 'from django.contrib import messages\n'), ((3235, 3281), 'django.contrib.messages.success', 'messages.success', (['request', '"""Category created."""'], {}), "(request, 'Category created.')\n", (3251, 3281), False, 'from django.contrib import messages\n'), ((3877, 3923), 'django.contrib.messages.success', 'messages.success', (['request', '"""Category updated."""'], {}), "(request, 'Category updated.')\n", (3893, 3923), False, 'from django.contrib import messages\n'), ((5159, 5200), 'django.contrib.messages.success', 'messages.success', (['request', '"""Tag created."""'], {}), "(request, 'Tag created.')\n", (5175, 5200), False, 'from django.contrib import messages\n'), ((5724, 5765), 'django.contrib.messages.success', 'messages.success', (['request', '"""Tag updated."""'], {}), "(request, 'Tag updated.')\n", (5740, 5765), False, 'from django.contrib import messages\n'), ((1422, 1449), 'django.core.urlresolvers.reverse', 'reverse', (['"""user_panel_posts"""'], {}), "('user_panel_posts')\n", (1429, 1449), False, 'from django.core.urlresolvers import reverse\n'), ((2002, 2029), 'django.core.urlresolvers.reverse', 'reverse', (['"""user_panel_posts"""'], {}), "('user_panel_posts')\n", (2009, 2029), False, 'from django.core.urlresolvers import reverse\n'), ((3323, 3355), 'django.core.urlresolvers.reverse', 'reverse', (['"""user_panel_categories"""'], {}), "('user_panel_categories')\n", (3330, 3355), False, 'from django.core.urlresolvers import reverse\n'), ((3965, 3997), 'django.core.urlresolvers.reverse', 'reverse', (['"""user_panel_categories"""'], {}), "('user_panel_categories')\n", (3972, 3997), False, 'from django.core.urlresolvers import reverse\n'), ((5242, 5268), 'django.core.urlresolvers.reverse', 'reverse', (['"""user_panel_tags"""'], {}), "('user_panel_tags')\n", (5249, 5268), False, 'from django.core.urlresolvers import reverse\n'), ((5807, 5833), 'django.core.urlresolvers.reverse', 'reverse', (['"""user_panel_tags"""'], {}), "('user_panel_tags')\n", (5814, 5833), False, 'from django.core.urlresolvers import reverse\n')] |
import dataclasses
import io
import multiprocessing as _mp
import uuid
import zipfile
from concurrent.futures import Future
from multiprocessing.connection import Connection
from typing import List, Optional, Tuple
import numpy
from tiktorch import log
from tiktorch.rpc import Shutdown
from tiktorch.rpc import mp as _mp_rpc
from tiktorch.rpc.mp import MPServer
from tiktorch.server.reader import eval_model_zip
from .backend import base
from .rpc_interface import IRPCModelSession
@dataclasses.dataclass
class ModelInfo:
# TODO: Test for model info
name: str
input_axes: str
output_axes: str
valid_shapes: List[List[Tuple[str, int]]]
halo: List[Tuple[str, int]]
offset: List[Tuple[str, int]]
scale: List[Tuple[str, float]]
class ModelSessionProcess(IRPCModelSession):
def __init__(self, model_zip: bytes, devices: List[str]) -> None:
with zipfile.ZipFile(io.BytesIO(model_zip)) as model_file:
self._model = eval_model_zip(model_file, devices)
self._datasets = {}
self._worker = base.SessionBackend(self._model)
def forward(self, input_tensor: numpy.ndarray) -> Future:
res = self._worker.forward(input_tensor)
return res
def create_dataset(self, mean, stddev):
id_ = uuid.uuid4().hex
self._datasets[id_] = {"mean": mean, "stddev": stddev}
return id_
def get_model_info(self) -> ModelInfo:
return ModelInfo(
self._model.name,
self._model.input_axes,
self._model.output_axes,
valid_shapes=[self._model.input_shape],
halo=self._model.halo,
scale=self._model.scale,
offset=self._model.offset,
)
def shutdown(self) -> Shutdown:
self._worker.shutdown()
return Shutdown()
def _run_model_session_process(
conn: Connection, model_zip: bytes, devices: List[str], log_queue: Optional[_mp.Queue] = None
):
try:
# from: https://github.com/pytorch/pytorch/issues/973#issuecomment-346405667
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))
except ModuleNotFoundError:
pass # probably running on windows
if log_queue:
log.configure(log_queue)
session_proc = ModelSessionProcess(model_zip, devices)
srv = MPServer(session_proc, conn)
srv.listen()
def start_model_session_process(
model_zip: bytes, devices: List[str], log_queue: Optional[_mp.Queue] = None
) -> Tuple[_mp.Process, IRPCModelSession]:
client_conn, server_conn = _mp.Pipe()
proc = _mp.Process(
target=_run_model_session_process,
name="ModelSessionProcess",
kwargs={"conn": server_conn, "devices": devices, "log_queue": log_queue, "model_zip": model_zip},
)
proc.start()
return proc, _mp_rpc.create_client(IRPCModelSession, client_conn)
| [
"tiktorch.server.reader.eval_model_zip",
"tiktorch.rpc.mp.create_client",
"multiprocessing.Process",
"resource.getrlimit",
"tiktorch.log.configure",
"io.BytesIO",
"uuid.uuid4",
"resource.setrlimit",
"tiktorch.rpc.mp.MPServer",
"tiktorch.rpc.Shutdown",
"multiprocessing.Pipe"
] | [((2405, 2433), 'tiktorch.rpc.mp.MPServer', 'MPServer', (['session_proc', 'conn'], {}), '(session_proc, conn)\n', (2413, 2433), False, 'from tiktorch.rpc.mp import MPServer\n'), ((2640, 2650), 'multiprocessing.Pipe', '_mp.Pipe', ([], {}), '()\n', (2648, 2650), True, 'import multiprocessing as _mp\n'), ((2662, 2842), 'multiprocessing.Process', '_mp.Process', ([], {'target': '_run_model_session_process', 'name': '"""ModelSessionProcess"""', 'kwargs': "{'conn': server_conn, 'devices': devices, 'log_queue': log_queue,\n 'model_zip': model_zip}"}), "(target=_run_model_session_process, name='ModelSessionProcess',\n kwargs={'conn': server_conn, 'devices': devices, 'log_queue': log_queue,\n 'model_zip': model_zip})\n", (2673, 2842), True, 'import multiprocessing as _mp\n'), ((1812, 1822), 'tiktorch.rpc.Shutdown', 'Shutdown', ([], {}), '()\n', (1820, 1822), False, 'from tiktorch.rpc import Shutdown\n'), ((2094, 2136), 'resource.getrlimit', 'resource.getrlimit', (['resource.RLIMIT_NOFILE'], {}), '(resource.RLIMIT_NOFILE)\n', (2112, 2136), False, 'import resource\n'), ((2145, 2206), 'resource.setrlimit', 'resource.setrlimit', (['resource.RLIMIT_NOFILE', '(4096, rlimit[1])'], {}), '(resource.RLIMIT_NOFILE, (4096, rlimit[1]))\n', (2163, 2206), False, 'import resource\n'), ((2310, 2334), 'tiktorch.log.configure', 'log.configure', (['log_queue'], {}), '(log_queue)\n', (2323, 2334), False, 'from tiktorch import log\n'), ((2900, 2952), 'tiktorch.rpc.mp.create_client', '_mp_rpc.create_client', (['IRPCModelSession', 'client_conn'], {}), '(IRPCModelSession, client_conn)\n', (2921, 2952), True, 'from tiktorch.rpc import mp as _mp_rpc\n'), ((972, 1007), 'tiktorch.server.reader.eval_model_zip', 'eval_model_zip', (['model_file', 'devices'], {}), '(model_file, devices)\n', (986, 1007), False, 'from tiktorch.server.reader import eval_model_zip\n'), ((1283, 1295), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (1293, 1295), False, 'import uuid\n'), ((908, 929), 'io.BytesIO', 'io.BytesIO', (['model_zip'], {}), '(model_zip)\n', (918, 929), False, 'import io\n')] |
import unittest
import tests.settings_mock as settings_mock
from tests.activity.classes_mock import FakeLogger
from workflow.workflow_IngestAcceptedSubmission import workflow_IngestAcceptedSubmission
class TestWorkflowIngestAcceptedSubmission(unittest.TestCase):
def setUp(self):
self.workflow = workflow_IngestAcceptedSubmission(
settings_mock, FakeLogger(), None, None, None, None
)
def test_init(self):
self.assertEqual(self.workflow.name, "IngestAcceptedSubmission")
| [
"tests.activity.classes_mock.FakeLogger"
] | [((372, 384), 'tests.activity.classes_mock.FakeLogger', 'FakeLogger', ([], {}), '()\n', (382, 384), False, 'from tests.activity.classes_mock import FakeLogger\n')] |
from urllib import urlencode
import urlparse
from django.shortcuts import Http404, redirect
from django.contrib.auth.views import logout
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from vumi.utils import load_class_by_string
from go.base.utils import vumi_api
def token(request, token):
# We only need the redis manager here, but it's saner to get a whole
# vumi_api and not worry about all the setup magic.
api = vumi_api()
token_data = api.token_manager.get(token)
if not token_data:
raise Http404
user_id = int(token_data['user_id'])
redirect_to = token_data['redirect_to']
system_token = token_data['system_token']
# If we're authorized and we're the same user_id then redirect to
# where we need to be
if not user_id or request.user.id == user_id:
path, _, qs = redirect_to.partition('?')
params = urlparse.parse_qs(qs)
# since the token can be custom we prepend the size of the user_token
# to the token being forwarded so the view handling the `redirect_to`
# can lookup the token and verify the system token.
params.update({'token': '%s-%s%s' % (len(token), token, system_token)})
return redirect('%s?%s' % (path, urlencode(params)))
# If we got here then we need authentication and the user's either not
# logged in or is logged in with a wrong account.
if request.user.is_authenticated():
logout(request)
messages.info(request, 'Wrong account for this token.')
return redirect('%s?%s' % (reverse('auth_login'), urlencode({
'next': reverse('token', kwargs={'token': token}),
})))
@login_required
def token_task(request):
api = request.user_api.api
token = request.GET.get('token')
token_data = api.token_manager.verify_get(token)
if not token_data:
raise Http404
params = token_data['extra_params']
callback_name = params['callback_name']
callback_args = params['callback_args']
callback_kwargs = params['callback_kwargs']
return_to = params['return_to']
message = params['message']
message_level = params['message_level']
callback = load_class_by_string(callback_name)
callback(*callback_args, **callback_kwargs)
messages.add_message(request, message_level, message)
return redirect(return_to)
| [
"urlparse.parse_qs",
"go.base.utils.vumi_api",
"django.contrib.messages.info",
"django.core.urlresolvers.reverse",
"django.shortcuts.redirect",
"urllib.urlencode",
"django.contrib.messages.add_message",
"django.contrib.auth.views.logout",
"vumi.utils.load_class_by_string"
] | [((526, 536), 'go.base.utils.vumi_api', 'vumi_api', ([], {}), '()\n', (534, 536), False, 'from go.base.utils import vumi_api\n'), ((2263, 2298), 'vumi.utils.load_class_by_string', 'load_class_by_string', (['callback_name'], {}), '(callback_name)\n', (2283, 2298), False, 'from vumi.utils import load_class_by_string\n'), ((2351, 2404), 'django.contrib.messages.add_message', 'messages.add_message', (['request', 'message_level', 'message'], {}), '(request, message_level, message)\n', (2371, 2404), False, 'from django.contrib import messages\n'), ((2416, 2435), 'django.shortcuts.redirect', 'redirect', (['return_to'], {}), '(return_to)\n', (2424, 2435), False, 'from django.shortcuts import Http404, redirect\n'), ((973, 994), 'urlparse.parse_qs', 'urlparse.parse_qs', (['qs'], {}), '(qs)\n', (990, 994), False, 'import urlparse\n'), ((1530, 1545), 'django.contrib.auth.views.logout', 'logout', (['request'], {}), '(request)\n', (1536, 1545), False, 'from django.contrib.auth.views import logout\n'), ((1554, 1609), 'django.contrib.messages.info', 'messages.info', (['request', '"""Wrong account for this token."""'], {}), "(request, 'Wrong account for this token.')\n", (1567, 1609), False, 'from django.contrib import messages\n'), ((1641, 1662), 'django.core.urlresolvers.reverse', 'reverse', (['"""auth_login"""'], {}), "('auth_login')\n", (1648, 1662), False, 'from django.core.urlresolvers import reverse\n'), ((1332, 1349), 'urllib.urlencode', 'urlencode', (['params'], {}), '(params)\n', (1341, 1349), False, 'from urllib import urlencode\n'), ((1692, 1733), 'django.core.urlresolvers.reverse', 'reverse', (['"""token"""'], {'kwargs': "{'token': token}"}), "('token', kwargs={'token': token})\n", (1699, 1733), False, 'from django.core.urlresolvers import reverse\n')] |
"""Read, write, create Brainvoyager VMR file format."""
import struct
import numpy as np
from bvbabel.utils import (read_variable_length_string,
write_variable_length_string)
# =============================================================================
def read_vmr(filename):
"""Read Brainvoyager VMR file.
Parameters
----------
filename : string
Path to file.
Returns
-------
header : dictionary
Pre-data and post-data headers.
data : 3D numpy.array
Image data.
"""
header = dict()
with open(filename, 'rb') as f:
# ---------------------------------------------------------------------
# VMR Pre-Data Header
# ---------------------------------------------------------------------
# NOTE(Developer Guide 2.6): VMR files contain anatomical 3D data sets,
# typically containing the whole brain (head) of subjects. The
# intensity values are stored as a series of bytes. See the V16 format
# for a version storing each intensity value with two bytes (short
# integers). The VMR format contains a small header followed by the
# actual data followed by a second, more extensive, header. The current
# version of VMR files is "4", which is only slightly different from
# version 3 (as indicated below). Version 3 added offset values to
# format 2 in order to represent large data sets efficiently, e.g. in
# the context of advanced segmentation processing. Compared to the
# original file version "1", file versions 2 and higher contain
# additional header information after the actual data ("post-data
# header"). This allows to read VMR data sets with minimal header
# checking if the extended information is not needed. The information
# in the post-data header contains position information (if available)
# and stores a series of spatial transformations, which might have been
# performed to the original data set ("history record"). The
# post-header data can be probably ignored for custom routines, but is
# important in BrainVoyager QX for spatial transformation and
# coregistration routines as well as for proper visualization.
# Expected binary data: unsigned short int (2 bytes)
data, = struct.unpack('<H', f.read(2))
header["File version"] = data
data, = struct.unpack('<H', f.read(2))
header["DimX"] = data
data, = struct.unpack('<H', f.read(2))
header["DimY"] = data
data, = struct.unpack('<H', f.read(2))
header["DimZ"] = data
# ---------------------------------------------------------------------
# VMR Data
# ---------------------------------------------------------------------
# NOTE(Developer Guide 2.6): Each data element (intensity value) is
# represented in 1 byte. The data is organized in three loops:
# DimZ
# DimY
# DimX
#
# The axes terminology follows the internal BrainVoyager (BV) format.
# The mapping to Talairach axes is as follows:
# BV (X front -> back) [axis 2 after np.reshape] = Y in Tal space
# BV (Y top -> bottom) [axis 1 after np.reshape] = Z in Tal space
# BV (Z left -> right) [axis 0 after np.reshape] = X in Tal space
# Expected binary data: unsigned char (1 byte)
data_img = np.zeros((header["DimZ"] * header["DimY"] * header["DimX"]),
dtype="<B")
for i in range(data_img.size):
data_img[i], = struct.unpack('<B', f.read(1))
data_img = np.reshape(
data_img, (header["DimZ"], header["DimY"], header["DimX"]))
data_img = np.transpose(data_img, (0, 2, 1)) # BV to Tal
data_img = data_img[::-1, ::-1, ::-1] # Flip BV axes
# ---------------------------------------------------------------------
# VMR Post-Data Header
# ---------------------------------------------------------------------
# NOTE(Developer Guide 2.6): The first four entries of the post-data
# header are new since file version "3" and contain offset values for
# each dimension as well as a value indicating the size of a cube with
# iso-dimensions to which the data set will be internally "expanded"
# for certain operations. The axes labels are in terms of
# BrainVoyager's internal format. These four entries are followed by
# scan position information from the original file headers, e.g. from
# DICOM files. The coordinate axes labels in these entries are not in
# terms of BrainVoyager's internal conventions but follow the DICOM
# standard. Then follows eventually a section listing spatial
# transformations which have been eventually performed to create the
# current VMR (e.g. ACPC transformation). Finally, additional
# information further descries the data set, including the assumed
# left-right convention, the reference space (e.g. Talairach after
# normalization) and voxel resolution.
if header["File version"] >= 3:
# NOTE(Developer Guide 2.6): These four entries have been added in
# file version "3" with BrainVoyager QX 1.7. All other entries are
# identical to file version "2".
# Expected binary data: short int (2 bytes)
data, = struct.unpack('<h', f.read(2))
header["OffsetX"] = data
data, = struct.unpack('<h', f.read(2))
header["OffsetY"] = data
data, = struct.unpack('<h', f.read(2))
header["OffsetZ"] = data
data, = struct.unpack('<h', f.read(2))
header["FramingCubeDim"] = data
# Expected binary data: int (4 bytes)
data, = struct.unpack('<i', f.read(4))
header["PosInfosVerified"] = data
data, = struct.unpack('<i', f.read(4))
header["CoordinateSystem"] = data
# Expected binary data: float (4 bytes)
data, = struct.unpack('<f', f.read(4))
header["Slice1CenterX"] = data # First slice center X coordinate
data, = struct.unpack('<f', f.read(4))
header["Slice1CenterY"] = data # First slice center Y coordinate
data, = struct.unpack('<f', f.read(4))
header["Slice1CenterZ"] = data # First slice center Z coordinate
data, = struct.unpack('<f', f.read(4))
header["SliceNCenterX"] = data # Last slice center X coordinate
data, = struct.unpack('<f', f.read(4))
header["SliceNCenterY"] = data # Last slice center Y coordinate
data, = struct.unpack('<f', f.read(4))
header["SliceNCenterZ"] = data # Last slice center Z coordinate
data, = struct.unpack('<f', f.read(4))
header["RowDirX"] = data # Slice row direction vector X component
data, = struct.unpack('<f', f.read(4))
header["RowDirY"] = data # Slice row direction vector Y component
data, = struct.unpack('<f', f.read(4))
header["RowDirZ"] = data # Slice row direction vector Z component
data, = struct.unpack('<f', f.read(4))
header["ColDirX"] = data # Slice column direction vector X component
data, = struct.unpack('<f', f.read(4))
header["ColDirY"] = data # Slice column direction vector Y component
data, = struct.unpack('<f', f.read(4))
header["ColDirZ"] = data # Slice column direction vector Z component
# Expected binary data: int (4 bytes)
data, = struct.unpack('<i', f.read(4))
header["NRows"] = data # Nr of rows of slice image matrix
data, = struct.unpack('<i', f.read(4))
header["NCols"] = data # Nr of columns of slice image matrix
# Expected binary data: float (4 bytes)
data, = struct.unpack('<f', f.read(4))
header["FoVRows"] = data # Field of view extent in row direction [mm]
data, = struct.unpack('<f', f.read(4))
header["FoVCols"] = data # Field of view extent in column dir. [mm]
data, = struct.unpack('<f', f.read(4))
header["SliceThickness"] = data # Slice thickness [mm]
data, = struct.unpack('<f', f.read(4))
header["GapThickness"] = data # Gap thickness [mm]
# Expected binary data: int (4 bytes)
data, = struct.unpack('<i', f.read(4))
header["NrOfPastSpatialTransformations"] = data
if header["NrOfPastSpatialTransformations"] != 0:
# NOTE(Developer Guide 2.6): For each past transformation, the
# information specified in the following table is stored. The
# "type of transformation" is a value determining how many
# subsequent values define the transformation:
# "1": Rigid body+scale (3 translation, 3 rotation, 3 scale)
# "2": Affine transformation (16 values, 4x4 matrix)
# "4": Talairach transformation
# "5": Un-Talairach transformation (1 - 5 -> BV axes)
header["PastTransformation"] = []
for i in range(header["NrOfPastSpatialTransformations"]):
header["PastTransformation"].append(dict())
# Expected binary data: variable-length string
data = read_variable_length_string(f)
header["PastTransformation"][i]["Name"] = data
# Expected binary data: int (4 bytes)
data, = struct.unpack('<i', f.read(4))
header["PastTransformation"][i]["Type"] = data
# Expected binary data: variable-length string
data = read_variable_length_string(f)
header["PastTransformation"][i]["SourceFileName"] = data
# Expected binary data: int (4 bytes)
data, = struct.unpack('<i', f.read(4))
header["PastTransformation"][i]["NrOfValues"] = data
# Store transformation values as a list
trans_values = []
for j in range(header["PastTransformation"][i]["NrOfValues"]):
# Expected binary data: float (4 bytes)
data, = struct.unpack('<f', f.read(4))
trans_values.append(data)
header["PastTransformation"][i]["Values"] = trans_values
# Expected binary data: char (1 byte)
data, = struct.unpack('<B', f.read(1))
header["LeftRightConvention"] = data # modified in v4
data, = struct.unpack('<B', f.read(1))
header["ReferenceSpaceVMR"] = data # new in v4
# Expected binary data: float (4 bytes)
data, = struct.unpack('<f', f.read(4))
header["VoxelSizeX"] = data # Voxel resolution along X axis
data, = struct.unpack('<f', f.read(4))
header["VoxelSizeY"] = data # Voxel resolution along Y axis
data, = struct.unpack('<f', f.read(4))
header["VoxelSizeZ"] = data # Voxel resolution along Z axis
# Expected binary data: char (1 byte)
data, = struct.unpack('<B', f.read(1))
header["VoxelResolutionVerified"] = data
data, = struct.unpack('<B', f.read(1))
header["VoxelResolutionInTALmm"] = data
# Expected binary data: int (4 bytes)
data, = struct.unpack('<i', f.read(4))
header["VMROrigV16MinValue"] = data # 16-bit data min intensity
data, = struct.unpack('<i', f.read(4))
header["VMROrigV16MeanValue"] = data # 16-bit data mean intensity
data, = struct.unpack('<i', f.read(4))
header["VMROrigV16MaxValue"] = data # 16-bit data max intensity
return header, data_img
# =============================================================================
def write_vmr(filename, header, data_img):
"""Protocol to write Brainvoyager VMR file.
Parameters
----------
filename : string
Output filename.
header : dictionary
Header of VMR file.
data_img : numpy.array, 3D
Image.
"""
with open(filename, 'wb') as f:
# ---------------------------------------------------------------------
# VMR Pre-Data Header
# ---------------------------------------------------------------------
# Expected binary data: unsigned short int (2 bytes)
data = header["File version"]
f.write(struct.pack('<H', data))
data = header["DimX"]
f.write(struct.pack('<H', data))
data = header["DimY"]
f.write(struct.pack('<H', data))
data = header["DimZ"]
f.write(struct.pack('<H', data))
# ---------------------------------------------------------------------
# VMR Data
# ---------------------------------------------------------------------
# Convert axes from Nifti standard back to BV standard
data_img = data_img[::-1, ::-1, ::-1] # Flip BV axes
data_img = np.transpose(data_img, (0, 2, 1)) # BV to Tal
# Expected binary data: unsigned char (1 byte)
data_img = data_img.flatten()
for i in range(data_img.size):
f.write(struct.pack('<B', data_img[i]))
# ---------------------------------------------------------------------
# VMR Post-Data Header
# ---------------------------------------------------------------------
if header["File version"] >= 3:
# Expected binary data: short int (2 bytes)
data = header["OffsetX"]
f.write(struct.pack('<h', data))
data = header["OffsetY"]
f.write(struct.pack('<h', data))
data = header["OffsetZ"]
f.write(struct.pack('<h', data))
data = header["FramingCubeDim"]
f.write(struct.pack('<h', data))
# Expected binary data: int (4 bytes)
data = header["PosInfosVerified"]
f.write(struct.pack('<i', data))
data = header["CoordinateSystem"]
f.write(struct.pack('<i', data))
# Expected binary data: float (4 bytes)
data = header["Slice1CenterX"]
f.write(struct.pack('<f', data))
data = header["Slice1CenterY"]
f.write(struct.pack('<f', data))
data = header["Slice1CenterZ"]
f.write(struct.pack('<f', data))
data = header["SliceNCenterX"]
f.write(struct.pack('<f', data))
data = header["SliceNCenterY"]
f.write(struct.pack('<f', data))
data = header["SliceNCenterZ"]
f.write(struct.pack('<f', data))
data = header["RowDirX"]
f.write(struct.pack('<f', data))
data = header["RowDirY"]
f.write(struct.pack('<f', data))
data = header["RowDirZ"]
f.write(struct.pack('<f', data))
data = header["ColDirX"]
f.write(struct.pack('<f', data))
data = header["ColDirY"]
f.write(struct.pack('<f', data))
data = header["ColDirZ"]
f.write(struct.pack('<f', data))
# Expected binary data: int (4 bytes)
data = header["NRows"]
f.write(struct.pack('<i', data))
data = header["NCols"]
f.write(struct.pack('<i', data))
# Expected binary data: float (4 bytes)
data = header["FoVRows"]
f.write(struct.pack('<f', data))
data = header["FoVCols"]
f.write(struct.pack('<f', data))
data = header["SliceThickness"]
f.write(struct.pack('<f', data))
data = header["GapThickness"]
f.write(struct.pack('<f', data))
# Expected binary data: int (4 bytes)
data = header["NrOfPastSpatialTransformations"]
f.write(struct.pack('<i', data))
if header["NrOfPastSpatialTransformations"] != 0:
for i in range(header["NrOfPastSpatialTransformations"]):
# Expected binary data: variable-length string
data = header["PastTransformation"][i]["Name"]
write_variable_length_string(f, data)
# Expected binary data: int (4 bytes)
data = header["PastTransformation"][i]["Type"]
f.write(struct.pack('<i', data))
# Expected binary data: variable-length string
data = header["PastTransformation"][i]["SourceFileName"]
write_variable_length_string(f, data)
# Expected binary data: int (4 bytes)
data = header["PastTransformation"][i]["NrOfValues"]
f.write(struct.pack('<i', data))
# Transformation values are stored as a list
trans_values = header["PastTransformation"][i]["Values"]
for j in range(header["PastTransformation"][i]["NrOfValues"]):
# Expected binary data: float (4 bytes)
f.write(struct.pack('<f', trans_values[j]))
# Expected binary data: char (1 byte)
data = header["LeftRightConvention"]
f.write(struct.pack('<B', data))
data = header["ReferenceSpaceVMR"]
f.write(struct.pack('<B', data))
# Expected binary data: float (4 bytes)
data = header["VoxelSizeX"]
f.write(struct.pack('<f', data))
data = header["VoxelSizeY"]
f.write(struct.pack('<f', data))
data = header["VoxelSizeZ"]
f.write(struct.pack('<f', data))
# Expected binary data: char (1 byte)
data = header["VoxelResolutionVerified"]
f.write(struct.pack('<B', data))
data = header["VoxelResolutionInTALmm"]
f.write(struct.pack('<B', data))
# Expected binary data: int (4 bytes)
data = header["VMROrigV16MinValue"]
f.write(struct.pack('<i', data))
data = header["VMROrigV16MeanValue"]
f.write(struct.pack('<i', data))
data = header["VMROrigV16MaxValue"]
f.write(struct.pack('<i', data))
return print("VMR saved.")
| [
"bvbabel.utils.write_variable_length_string",
"numpy.reshape",
"struct.pack",
"numpy.zeros",
"numpy.transpose",
"bvbabel.utils.read_variable_length_string"
] | [((3535, 3605), 'numpy.zeros', 'np.zeros', (["(header['DimZ'] * header['DimY'] * header['DimX'])"], {'dtype': '"""<B"""'}), "(header['DimZ'] * header['DimY'] * header['DimX'], dtype='<B')\n", (3543, 3605), True, 'import numpy as np\n'), ((3752, 3822), 'numpy.reshape', 'np.reshape', (['data_img', "(header['DimZ'], header['DimY'], header['DimX'])"], {}), "(data_img, (header['DimZ'], header['DimY'], header['DimX']))\n", (3762, 3822), True, 'import numpy as np\n'), ((3856, 3889), 'numpy.transpose', 'np.transpose', (['data_img', '(0, 2, 1)'], {}), '(data_img, (0, 2, 1))\n', (3868, 3889), True, 'import numpy as np\n'), ((13096, 13129), 'numpy.transpose', 'np.transpose', (['data_img', '(0, 2, 1)'], {}), '(data_img, (0, 2, 1))\n', (13108, 13129), True, 'import numpy as np\n'), ((12534, 12557), 'struct.pack', 'struct.pack', (['"""<H"""', 'data'], {}), "('<H', data)\n", (12545, 12557), False, 'import struct\n'), ((12605, 12628), 'struct.pack', 'struct.pack', (['"""<H"""', 'data'], {}), "('<H', data)\n", (12616, 12628), False, 'import struct\n'), ((12676, 12699), 'struct.pack', 'struct.pack', (['"""<H"""', 'data'], {}), "('<H', data)\n", (12687, 12699), False, 'import struct\n'), ((12747, 12770), 'struct.pack', 'struct.pack', (['"""<H"""', 'data'], {}), "('<H', data)\n", (12758, 12770), False, 'import struct\n'), ((14056, 14079), 'struct.pack', 'struct.pack', (['"""<i"""', 'data'], {}), "('<i', data)\n", (14067, 14079), False, 'import struct\n'), ((14139, 14162), 'struct.pack', 'struct.pack', (['"""<i"""', 'data'], {}), "('<i', data)\n", (14150, 14162), False, 'import struct\n'), ((14268, 14291), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (14279, 14291), False, 'import struct\n'), ((14348, 14371), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (14359, 14371), False, 'import struct\n'), ((14428, 14451), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (14439, 14451), False, 'import struct\n'), ((14508, 14531), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (14519, 14531), False, 'import struct\n'), ((14588, 14611), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (14599, 14611), False, 'import struct\n'), ((14668, 14691), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (14679, 14691), False, 'import struct\n'), ((14742, 14765), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (14753, 14765), False, 'import struct\n'), ((14816, 14839), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (14827, 14839), False, 'import struct\n'), ((14890, 14913), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (14901, 14913), False, 'import struct\n'), ((14964, 14987), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (14975, 14987), False, 'import struct\n'), ((15038, 15061), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (15049, 15061), False, 'import struct\n'), ((15112, 15135), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (15123, 15135), False, 'import struct\n'), ((15231, 15254), 'struct.pack', 'struct.pack', (['"""<i"""', 'data'], {}), "('<i', data)\n", (15242, 15254), False, 'import struct\n'), ((15303, 15326), 'struct.pack', 'struct.pack', (['"""<i"""', 'data'], {}), "('<i', data)\n", (15314, 15326), False, 'import struct\n'), ((15426, 15449), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (15437, 15449), False, 'import struct\n'), ((15500, 15523), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (15511, 15523), False, 'import struct\n'), ((15581, 15604), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (15592, 15604), False, 'import struct\n'), ((15660, 15683), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (15671, 15683), False, 'import struct\n'), ((15804, 15827), 'struct.pack', 'struct.pack', (['"""<i"""', 'data'], {}), "('<i', data)\n", (15815, 15827), False, 'import struct\n'), ((17115, 17138), 'struct.pack', 'struct.pack', (['"""<B"""', 'data'], {}), "('<B', data)\n", (17126, 17138), False, 'import struct\n'), ((17199, 17222), 'struct.pack', 'struct.pack', (['"""<B"""', 'data'], {}), "('<B', data)\n", (17210, 17222), False, 'import struct\n'), ((17325, 17348), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (17336, 17348), False, 'import struct\n'), ((17402, 17425), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (17413, 17425), False, 'import struct\n'), ((17479, 17502), 'struct.pack', 'struct.pack', (['"""<f"""', 'data'], {}), "('<f', data)\n", (17490, 17502), False, 'import struct\n'), ((17616, 17639), 'struct.pack', 'struct.pack', (['"""<B"""', 'data'], {}), "('<B', data)\n", (17627, 17639), False, 'import struct\n'), ((17705, 17728), 'struct.pack', 'struct.pack', (['"""<B"""', 'data'], {}), "('<B', data)\n", (17716, 17728), False, 'import struct\n'), ((17837, 17860), 'struct.pack', 'struct.pack', (['"""<i"""', 'data'], {}), "('<i', data)\n", (17848, 17860), False, 'import struct\n'), ((17923, 17946), 'struct.pack', 'struct.pack', (['"""<i"""', 'data'], {}), "('<i', data)\n", (17934, 17946), False, 'import struct\n'), ((18008, 18031), 'struct.pack', 'struct.pack', (['"""<i"""', 'data'], {}), "('<i', data)\n", (18019, 18031), False, 'import struct\n'), ((9457, 9487), 'bvbabel.utils.read_variable_length_string', 'read_variable_length_string', (['f'], {}), '(f)\n', (9484, 9487), False, 'from bvbabel.utils import read_variable_length_string, write_variable_length_string\n'), ((9811, 9841), 'bvbabel.utils.read_variable_length_string', 'read_variable_length_string', (['f'], {}), '(f)\n', (9838, 9841), False, 'from bvbabel.utils import read_variable_length_string, write_variable_length_string\n'), ((13296, 13326), 'struct.pack', 'struct.pack', (['"""<B"""', 'data_img[i]'], {}), "('<B', data_img[i])\n", (13307, 13326), False, 'import struct\n'), ((13673, 13696), 'struct.pack', 'struct.pack', (['"""<h"""', 'data'], {}), "('<h', data)\n", (13684, 13696), False, 'import struct\n'), ((13755, 13778), 'struct.pack', 'struct.pack', (['"""<h"""', 'data'], {}), "('<h', data)\n", (13766, 13778), False, 'import struct\n'), ((13837, 13860), 'struct.pack', 'struct.pack', (['"""<h"""', 'data'], {}), "('<h', data)\n", (13848, 13860), False, 'import struct\n'), ((13926, 13949), 'struct.pack', 'struct.pack', (['"""<h"""', 'data'], {}), "('<h', data)\n", (13937, 13949), False, 'import struct\n'), ((16100, 16137), 'bvbabel.utils.write_variable_length_string', 'write_variable_length_string', (['f', 'data'], {}), '(f, data)\n', (16128, 16137), False, 'from bvbabel.utils import read_variable_length_string, write_variable_length_string\n'), ((16458, 16495), 'bvbabel.utils.write_variable_length_string', 'write_variable_length_string', (['f', 'data'], {}), '(f, data)\n', (16486, 16495), False, 'from bvbabel.utils import read_variable_length_string, write_variable_length_string\n'), ((16280, 16303), 'struct.pack', 'struct.pack', (['"""<i"""', 'data'], {}), "('<i', data)\n", (16291, 16303), False, 'import struct\n'), ((16644, 16667), 'struct.pack', 'struct.pack', (['"""<i"""', 'data'], {}), "('<i', data)\n", (16655, 16667), False, 'import struct\n'), ((16971, 17005), 'struct.pack', 'struct.pack', (['"""<f"""', 'trans_values[j]'], {}), "('<f', trans_values[j])\n", (16982, 17005), False, 'import struct\n')] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
test pretrained models
"""
from __future__ import print_function
import mxnet as mx
from common import find_mxnet, modelzoo
from score import score
VAL_DATA='data/val-5k-256.rec'
def download_data():
return mx.test_utils.download(
'http://data.mxnet.io/data/val-5k-256.rec', VAL_DATA)
def test_imagenet1k_resnet(**kwargs):
models = ['imagenet1k-resnet-50', 'imagenet1k-resnet-152']
accs = [.77, .78]
for (m, g) in zip(models, accs):
acc = mx.metric.create('acc')
(speed,) = score(model=m, data_val=VAL_DATA,
rgb_mean='0,0,0', metrics=acc, **kwargs)
r = acc.get()[1]
print('Tested %s, acc = %f, speed = %f img/sec' % (m, r, speed))
assert r > g and r < g + .1
def test_imagenet1k_inception_bn(**kwargs):
acc = mx.metric.create('acc')
m = 'imagenet1k-inception-bn'
g = 0.75
(speed,) = score(model=m,
data_val=VAL_DATA,
rgb_mean='123.68,116.779,103.939', metrics=acc, **kwargs)
r = acc.get()[1]
print('Tested %s acc = %f, speed = %f img/sec' % (m, r, speed))
assert r > g and r < g + .1
if __name__ == '__main__':
gpus = mx.test_utils.list_gpus()
assert len(gpus) > 0
batch_size = 16 * len(gpus)
gpus = ','.join([str(i) for i in gpus])
kwargs = {'gpus':gpus, 'batch_size':batch_size, 'max_num_examples':500}
download_data()
test_imagenet1k_resnet(**kwargs)
test_imagenet1k_inception_bn(**kwargs)
| [
"score.score",
"mxnet.test_utils.download",
"mxnet.metric.create",
"mxnet.test_utils.list_gpus"
] | [((1002, 1078), 'mxnet.test_utils.download', 'mx.test_utils.download', (['"""http://data.mxnet.io/data/val-5k-256.rec"""', 'VAL_DATA'], {}), "('http://data.mxnet.io/data/val-5k-256.rec', VAL_DATA)\n", (1024, 1078), True, 'import mxnet as mx\n'), ((1595, 1618), 'mxnet.metric.create', 'mx.metric.create', (['"""acc"""'], {}), "('acc')\n", (1611, 1618), True, 'import mxnet as mx\n'), ((1681, 1776), 'score.score', 'score', ([], {'model': 'm', 'data_val': 'VAL_DATA', 'rgb_mean': '"""123.68,116.779,103.939"""', 'metrics': 'acc'}), "(model=m, data_val=VAL_DATA, rgb_mean='123.68,116.779,103.939',\n metrics=acc, **kwargs)\n", (1686, 1776), False, 'from score import score\n'), ((1975, 2000), 'mxnet.test_utils.list_gpus', 'mx.test_utils.list_gpus', ([], {}), '()\n', (1998, 2000), True, 'import mxnet as mx\n'), ((1263, 1286), 'mxnet.metric.create', 'mx.metric.create', (['"""acc"""'], {}), "('acc')\n", (1279, 1286), True, 'import mxnet as mx\n'), ((1306, 1380), 'score.score', 'score', ([], {'model': 'm', 'data_val': 'VAL_DATA', 'rgb_mean': '"""0,0,0"""', 'metrics': 'acc'}), "(model=m, data_val=VAL_DATA, rgb_mean='0,0,0', metrics=acc, **kwargs)\n", (1311, 1380), False, 'from score import score\n')] |
# (c) Copyright [2018-2022] Micro Focus or one of its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# |_ |~) _ _| _ /~\ _ |.
# |_)\/ |_)(_|(_|| \_/|_|(_|||
# /
# ____________ ______
# / __ `\ / /
# | \/ / / /
# |______ / / /
# |____/ / /
# _____________ / /
# \ / / /
# \ / / /
# \_______/ / /
# ______ / /
# \ / / /
# \ / / /
# \/ / /
# / /
# / /
# \ /
# \ /
# \/
# _
# \ / _ __|_. _ _ |_)
# \/ (/_| | |(_(_|| \/
# /
# VerticaPy is a Python library with scikit-like functionality for conducting
# data science projects on data stored in Vertica, taking advantage Vertica’s
# speed and built-in analytics and machine learning features. It supports the
# entire data science life cycle, uses a ‘pipeline’ mechanism to sequentialize
# data transformation operations, and offers beautiful graphical options.
#
# VerticaPy aims to do all of the above. The idea is simple: instead of moving
# data around for processing, VerticaPy brings the logic to the data.
#
#
# Modules
#
# Standard Python Modules
import math, re, decimal, warnings, datetime
from collections.abc import Iterable
from typing import Union
# VerticaPy Modules
import verticapy
from verticapy.utilities import *
from verticapy.toolbox import *
from verticapy.errors import *
##
#
# __ __ ______ ______ __ __ __ __ __ __ __
# /\ \ / / /\ ___\ /\ __ \ /\ \ /\ \/\ \ /\ "-./ \ /\ "-.\ \
# \ \ \'/ \ \ \____ \ \ \/\ \ \ \ \____ \ \ \_\ \ \ \ \-./\ \ \ \ \-. \
# \ \__| \ \_____\ \ \_____\ \ \_____\ \ \_____\ \ \_\ \ \_\ \ \_\\"\_\
# \/_/ \/_____/ \/_____/ \/_____/ \/_____/ \/_/ \/_/ \/_/ \/_/
#
#
# ---#
class vColumn(str_sql):
"""
---------------------------------------------------------------------------
Python object which that stores all user transformations. If the vDataFrame
represents the entire relation, a vColumn can be seen as one column of that
relation. vColumns simplify several processes with its abstractions.
Parameters
----------
alias: str
vColumn alias.
transformations: list, optional
List of the different transformations. Each transformation must be similar
to the following: (function, type, category)
parent: vDataFrame, optional
Parent of the vColumn. One vDataFrame can have multiple children vColumns
whereas one vColumn can only have one parent.
catalog: dict, optional
Catalog where each key corresponds to an aggregation. vColumns will memorize
the already computed aggregations to gain in performance. The catalog will
be updated when the parent vDataFrame is modified.
Attributes
----------
alias, str : vColumn alias.
catalog, dict : Catalog of pre-computed aggregations.
parent, vDataFrame : Parent of the vColumn.
transformations, str : List of the different transformations.
"""
#
# Special Methods
#
# ---#
def __init__(
self, alias: str, transformations: list = [], parent=None, catalog: dict = {}
):
self.parent, self.alias, self.transformations = (
parent,
alias,
[elem for elem in transformations],
)
self.catalog = {
"cov": {},
"pearson": {},
"spearman": {},
"spearmand": {},
"kendall": {},
"cramer": {},
"biserial": {},
"regr_avgx": {},
"regr_avgy": {},
"regr_count": {},
"regr_intercept": {},
"regr_r2": {},
"regr_slope": {},
"regr_sxx": {},
"regr_sxy": {},
"regr_syy": {},
}
for elem in catalog:
self.catalog[elem] = catalog[elem]
# ---#
def __getitem__(self, index):
if isinstance(index, slice):
assert index.step in (1, None), ValueError(
"vColumn doesn't allow slicing having steps different than 1."
)
index_stop = index.stop
index_start = index.start
if not (isinstance(index_start, int)):
index_start = 0
if index_start < 0:
index_start += self.parent.shape()[0]
if isinstance(index_stop, int):
if index_stop < 0:
index_stop += self.parent.shape()[0]
limit = index_stop - index_start
if limit <= 0:
limit = 0
limit = " LIMIT {}".format(limit)
else:
limit = ""
query = "(SELECT {} FROM {}{} OFFSET {}{}) VERTICAPY_SUBTABLE".format(
self.alias,
self.parent.__genSQL__(),
self.parent.__get_last_order_by__(),
index_start,
limit,
)
return vDataFrameSQL(query)
elif isinstance(index, int):
cast = "::float" if self.category() == "float" else ""
if index < 0:
index += self.parent.shape()[0]
query = "SELECT {}{} FROM {}{} OFFSET {} LIMIT 1".format(
self.alias,
cast,
self.parent.__genSQL__(),
self.parent.__get_last_order_by__(),
index,
)
return executeSQL(
query=query,
title="Getting the vColumn element.",
method="fetchfirstelem",
)
else:
return getattr(self, index)
# ---#
def __len__(self):
return int(self.count())
# ---#
def __nonzero__(self):
return self.count() > 0
# ---#
def __repr__(self):
return self.head(limit=verticapy.options["max_rows"]).__repr__()
# ---#
def _repr_html_(self):
return self.head(limit=verticapy.options["max_rows"])._repr_html_()
# ---#
def __setattr__(self, attr, val):
self.__dict__[attr] = val
#
# Methods
#
# ---#
def aad(self):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using 'aad' (Average Absolute Deviation).
Returns
-------
float
aad
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
return self.aggregate(["aad"]).values[self.alias][0]
# ---#
def abs(self):
"""
---------------------------------------------------------------------------
Applies the absolute value function to the input vColumn.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].apply : Applies a function to the input vColumn.
"""
return self.apply(func="ABS({})")
# ---#
def add(self, x: float):
"""
---------------------------------------------------------------------------
Adds the input element to the vColumn.
Parameters
----------
x: float
If the vColumn type is date like (date, datetime ...), the parameter 'x'
will represent the number of seconds, otherwise it will represent a number.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].apply : Applies a function to the input vColumn.
"""
check_types([("x", x, [int, float])])
if self.isdate():
return self.apply(func="TIMESTAMPADD(SECOND, {}, {})".format(x, "{}"))
else:
return self.apply(func="{} + ({})".format("{}", x))
# ---#
def add_copy(self, name: str):
"""
---------------------------------------------------------------------------
Adds a copy vColumn to the parent vDataFrame.
Parameters
----------
name: str
Name of the copy.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame.eval : Evaluates a customized expression.
"""
check_types([("name", name, [str])])
name = quote_ident(name.replace('"', "_"))
assert name.replace('"', ""), EmptyParameter(
"The parameter 'name' must not be empty"
)
assert not (self.parent.is_colname_in(name)), NameError(
f"A vColumn has already the alias {name}.\nBy changing the parameter 'name', you'll be able to solve this issue."
)
new_vColumn = vColumn(
name,
parent=self.parent,
transformations=[item for item in self.transformations],
catalog=self.catalog,
)
setattr(self.parent, name, new_vColumn)
setattr(self.parent, name[1:-1], new_vColumn)
self.parent._VERTICAPY_VARIABLES_["columns"] += [name]
self.parent.__add_to_history__(
"[Add Copy]: A copy of the vColumn {} named {} was added to the vDataFrame.".format(
self.alias, name
)
)
return self.parent
# ---#
def aggregate(self, func: list):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using the input functions.
Parameters
----------
func: list
List of the different aggregation.
aad : average absolute deviation
approx_unique : approximative cardinality
count : number of non-missing elements
cvar : conditional value at risk
dtype : vColumn type
iqr : interquartile range
kurtosis : kurtosis
jb : Jarque-Bera index
mad : median absolute deviation
max : maximum
mean : average
median : median
min : minimum
mode : most occurent element
percent : percent of non-missing elements
q% : q quantile (ex: 50% for the median)
prod : product
range : difference between the max and the min
sem : standard error of the mean
skewness : skewness
sum : sum
std : standard deviation
topk : kth most occurent element (ex: top1 for the mode)
topk_percent : kth most occurent element density
unique : cardinality (count distinct)
var : variance
Other aggregations could work if it is part of
the DB version you are using.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
See Also
--------
vDataFrame.analytic : Adds a new vColumn to the vDataFrame by using an advanced
analytical function on a specific vColumn.
"""
return self.parent.aggregate(func=func, columns=[self.alias]).transpose()
agg = aggregate
# ---#
def apply(self, func: str, copy_name: str = ""):
"""
---------------------------------------------------------------------------
Applies a function to the vColumn.
Parameters
----------
func: str,
Function in pure SQL used to transform the vColumn.
The function variable must be composed of two flower brackets {}. For
example to apply the function: x -> x^2 + 2 use "POWER({}, 2) + 2".
copy_name: str, optional
If not empty, a copy will be created using the input Name.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame.apply : Applies functions to the input vColumns.
vDataFrame.applymap : Applies a function to all the vColumns.
vDataFrame.eval : Evaluates a customized expression.
"""
if isinstance(func, str_sql):
func = str(func)
check_types([("func", func, [str]), ("copy_name", copy_name, [str])])
try:
try:
ctype = get_data_types(
"SELECT {} AS apply_test_feature FROM {} WHERE {} IS NOT NULL LIMIT 0".format(
func.replace("{}", self.alias),
self.parent.__genSQL__(),
self.alias,
),
"apply_test_feature",
)
except:
ctype = get_data_types(
"SELECT {} AS apply_test_feature FROM {} WHERE {} IS NOT NULL LIMIT 0".format(
func.replace("{}", self.alias),
self.parent.__genSQL__(),
self.alias,
),
"apply_test_feature",
)
category = get_category_from_vertica_type(ctype=ctype)
all_cols, max_floor = self.parent.get_columns(), 0
for column in all_cols:
try:
if (quote_ident(column) in func) or (
re.search(
re.compile("\\b{}\\b".format(column.replace('"', ""))), func
)
):
max_floor = max(
len(self.parent[column].transformations), max_floor
)
except:
pass
max_floor -= len(self.transformations)
if copy_name:
self.add_copy(name=copy_name)
for k in range(max_floor):
self.parent[copy_name].transformations += [
("{}", self.ctype(), self.category())
]
self.parent[copy_name].transformations += [(func, ctype, category)]
self.parent[copy_name].catalog = self.catalog
self.parent.__add_to_history__(
"[Apply]: The vColumn '{}' was transformed with the func 'x -> {}'.".format(
copy_name.replace('"', ""), func.replace("{}", "x"),
)
)
else:
for k in range(max_floor):
self.transformations += [("{}", self.ctype(), self.category())]
self.transformations += [(func, ctype, category)]
self.parent.__update_catalog__(erase=True, columns=[self.alias])
self.parent.__add_to_history__(
"[Apply]: The vColumn '{}' was transformed with the func 'x -> {}'.".format(
self.alias.replace('"', ""), func.replace("{}", "x"),
)
)
return self.parent
except Exception as e:
raise QueryError(
"{}\nError when applying the func 'x -> {}' to '{}'".format(
e, func.replace("{}", "x"), self.alias.replace('"', "")
)
)
# ---#
def apply_fun(self, func: str, x: float = 2):
"""
---------------------------------------------------------------------------
Applies a default function to the vColumn.
Parameters
----------
func: str
Function to use to transform the vColumn.
abs : absolute value
acos : trigonometric inverse cosine
asin : trigonometric inverse sine
atan : trigonometric inverse tangent
cbrt : cube root
ceil : value up to the next whole number
cos : trigonometric cosine
cosh : hyperbolic cosine
cot : trigonometric cotangent
exp : exponential function
floor : value down to the next whole number
ln : natural logarithm
log : logarithm
log10 : base 10 logarithm
mod : remainder of a division operation
pow : number raised to the power of another number
round : rounds a value to a specified number of decimal places
sign : arithmetic sign
sin : trigonometric sine
sinh : hyperbolic sine
sqrt : arithmetic square root
tan : trigonometric tangent
tanh : hyperbolic tangent
x: int/float, optional
If the function has two arguments (example, power or mod), 'x' represents
the second argument.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].apply : Applies a function to the vColumn.
"""
check_types(
[
(
"func",
func,
[
"abs",
"acos",
"asin",
"atan",
"cbrt",
"ceil",
"cos",
"cosh",
"cot",
"exp",
"floor",
"ln",
"log",
"log10",
"mod",
"pow",
"round",
"sign",
"sin",
"sinh",
"sqrt",
"tan",
"tanh",
],
),
("x", x, [int, float]),
]
)
if func not in ("log", "mod", "pow", "round"):
expr = "{}({})".format(func.upper(), "{}")
else:
expr = "{}({}, {})".format(func.upper(), "{}", x)
return self.apply(func=expr)
# ---#
def astype(self, dtype: str):
"""
---------------------------------------------------------------------------
Converts the vColumn to the input type.
Parameters
----------
dtype: str
New type.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame.astype : Converts the vColumns to the input type.
"""
check_types([("dtype", dtype, [str])])
try:
query = "SELECT {}::{} AS {} FROM {} WHERE {} IS NOT NULL LIMIT 20".format(
self.alias, dtype, self.alias, self.parent.__genSQL__(), self.alias
)
executeSQL(query, title="Testing the Type casting.")
self.transformations += [
(
"{}::{}".format("{}", dtype),
dtype,
get_category_from_vertica_type(ctype=dtype),
)
]
self.parent.__add_to_history__(
"[AsType]: The vColumn {} was converted to {}.".format(
self.alias, dtype
)
)
return self.parent
except Exception as e:
raise ConversionError(
"{}\nThe vColumn {} can not be converted to {}".format(
e, self.alias, dtype
)
)
# ---#
def avg(self):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using 'avg' (Average).
Returns
-------
float
average
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
return self.aggregate(["avg"]).values[self.alias][0]
mean = avg
# ---#
def bar(
self,
method: str = "density",
of: str = "",
max_cardinality: int = 6,
nbins: int = 0,
h: float = 0,
ax=None,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the bar chart of the vColumn based on an aggregation.
Parameters
----------
method: str, optional
The method to use to aggregate the data.
count : Number of elements.
density : Percentage of the distribution.
mean : Average of the vColumn 'of'.
min : Minimum of the vColumn 'of'.
max : Maximum of the vColumn 'of'.
sum : Sum of the vColumn 'of'.
q% : q Quantile of the vColumn 'of' (ex: 50% to get the median).
It can also be a cutomized aggregation (ex: AVG(column1) + 5).
of: str, optional
The vColumn to use to compute the aggregation.
max_cardinality: int, optional
Maximum number of the vColumn distinct elements to be used as categorical
(No h will be picked or computed)
nbins: int, optional
Number of nbins. If empty, an optimized number of nbins will be computed.
h: float, optional
Interval width of the bar. If empty, an optimized h will be computed.
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
ax
Matplotlib axes object
See Also
--------
vDataFrame[].hist : Draws the histogram of the vColumn based on an aggregation.
"""
check_types(
[
("method", method, [str]),
("of", of, [str]),
("max_cardinality", max_cardinality, [int, float]),
("nbins", nbins, [int, float]),
("h", h, [int, float]),
]
)
if of:
self.parent.are_namecols_in(of)
of = self.parent.format_colnames(of)
from verticapy.plot import bar
return bar(self, method, of, max_cardinality, nbins, h, ax=ax, **style_kwds)
# ---#
def boxplot(
self,
by: str = "",
h: float = 0,
max_cardinality: int = 8,
cat_priority: list = [],
ax=None,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the box plot of the vColumn.
Parameters
----------
by: str, optional
vColumn to use to partition the data.
h: float, optional
Interval width if the vColumn is numerical or of type date like. Optimized
h will be computed if the parameter is empty or invalid.
max_cardinality: int, optional
Maximum number of vColumn distinct elements to be used as categorical.
The less frequent elements will be gathered together to create a new
category : 'Others'.
cat_priority: list, optional
List of the different categories to consider when drawing the box plot.
The other categories will be filtered.
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
ax
Matplotlib axes object
See Also
--------
vDataFrame.boxplot : Draws the Box Plot of the input vColumns.
"""
if isinstance(cat_priority, str) or not (isinstance(cat_priority, Iterable)):
cat_priority = [cat_priority]
check_types(
[
("by", by, [str]),
("max_cardinality", max_cardinality, [int, float]),
("h", h, [int, float]),
("cat_priority", cat_priority, [list]),
]
)
if by:
self.parent.are_namecols_in(by)
by = self.parent.format_colnames(by)
from verticapy.plot import boxplot
return boxplot(self, by, h, max_cardinality, cat_priority, ax=ax, **style_kwds)
# ---#
def category(self):
"""
---------------------------------------------------------------------------
Returns the category of the vColumn. The category will be one of the following:
date / int / float / text / binary / spatial / uuid / undefined
Returns
-------
str
vColumn category.
See Also
--------
vDataFrame[].ctype : Returns the vColumn database type.
"""
return self.transformations[-1][2]
# ---#
def clip(self, lower=None, upper=None):
"""
---------------------------------------------------------------------------
Clips the vColumn by transforming the values lesser than the lower bound to
the lower bound itself and the values higher than the upper bound to the upper
bound itself.
Parameters
----------
lower: float, optional
Lower bound.
upper: float, optional
Upper bound.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].fill_outliers : Fills the vColumn outliers using the input method.
"""
check_types([("lower", lower, [float, int]), ("upper", upper, [float, int])])
assert (lower != None) or (upper != None), ParameterError(
"At least 'lower' or 'upper' must have a numerical value"
)
lower_when = (
"WHEN {} < {} THEN {} ".format("{}", lower, lower)
if (isinstance(lower, (float, int)))
else ""
)
upper_when = (
"WHEN {} > {} THEN {} ".format("{}", upper, upper)
if (isinstance(upper, (float, int)))
else ""
)
func = "(CASE {}{}ELSE {} END)".format(lower_when, upper_when, "{}")
self.apply(func=func)
return self.parent
# ---#
def count(self):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using 'count' (Number of non-Missing elements).
Returns
-------
int
number of non-Missing elements.
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
return self.aggregate(["count"]).values[self.alias][0]
# ---#
def cut(
self,
breaks: list,
labels: list = [],
include_lowest: bool = True,
right: bool = True,
):
"""
---------------------------------------------------------------------------
Discretizes the vColumn using the input list.
Parameters
----------
breaks: list
List of values used to cut the vColumn.
labels: list, optional
Labels used to name the new categories. If empty, names will be generated.
include_lowest: bool, optional
If set to True, the lowest element of the list will be included.
right: bool, optional
How the intervals should be closed. If set to True, the intervals will be
closed on the right.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].apply : Applies a function to the input vColumn.
"""
check_types(
[
("breaks", breaks, [list]),
("labels", labels, [list]),
("include_lowest", include_lowest, [bool]),
("right", right, [bool]),
]
)
assert self.isnum() or self.isdate(), TypeError(
"cut only works on numerical / date-like vColumns."
)
assert len(breaks) >= 2, ParameterError(
"Length of parameter 'breaks' must be greater or equal to 2."
)
assert len(breaks) == len(labels) + 1 or not (labels), ParameterError(
"Length of parameter breaks must be equal to the length of parameter 'labels' + 1 or parameter 'labels' must be empty."
)
conditions, column = [], self.alias
for idx in range(len(breaks) - 1):
first_elem, second_elem = breaks[idx], breaks[idx + 1]
if right:
op1, op2, close_l, close_r = "<", "<=", "]", "]"
else:
op1, op2, close_l, close_r = "<=", "<", "[", "["
if idx == 0 and include_lowest:
op1, close_l = "<=", "["
elif idx == 0:
op1, close_l = "<", "]"
if labels:
label = labels[idx]
else:
label = f"{close_l}{first_elem};{second_elem}{close_r}"
conditions += [
f"'{first_elem}' {op1} {column} AND {column} {op2} '{second_elem}' THEN '{label}'"
]
expr = "CASE WHEN " + " WHEN ".join(conditions) + " END"
self.apply(func=expr)
# ---#
def ctype(self):
"""
---------------------------------------------------------------------------
Returns the vColumn DB type.
Returns
-------
str
vColumn DB type.
"""
return self.transformations[-1][1].lower()
dtype = ctype
# ---#
def date_part(self, field: str):
"""
---------------------------------------------------------------------------
Extracts a specific TS field from the vColumn (only if the vColumn type is
date like). The vColumn will be transformed.
Parameters
----------
field: str
The field to extract. It must be one of the following:
CENTURY / DAY / DECADE / DOQ / DOW / DOY / EPOCH / HOUR / ISODOW / ISOWEEK /
ISOYEAR / MICROSECONDS / MILLENNIUM / MILLISECONDS / MINUTE / MONTH / QUARTER /
SECOND / TIME ZONE / TIMEZONE_HOUR / TIMEZONE_MINUTE / WEEK / YEAR
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].slice : Slices the vColumn using a time series rule.
"""
return self.apply(func="DATE_PART('{}', {})".format(field, "{}"))
# ---#
def decode(self, *argv):
"""
---------------------------------------------------------------------------
Encodes the vColumn using a user-defined encoding.
Parameters
----------
argv: object
Any amount of expressions.
The expression generated will look like:
even: CASE ... WHEN vColumn = argv[2 * i] THEN argv[2 * i + 1] ... END
odd : CASE ... WHEN vColumn = argv[2 * i] THEN argv[2 * i + 1] ... ELSE argv[n] END
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame.case_when : Creates a new feature by evaluating some conditions.
vDataFrame[].discretize : Discretizes the vColumn.
vDataFrame[].label_encode : Encodes the vColumn with Label Encoding.
vDataFrame[].get_dummies : Encodes the vColumn with One-Hot Encoding.
vDataFrame[].mean_encode : Encodes the vColumn using the mean encoding of a response.
"""
import verticapy.stats as st
return self.apply(func=st.decode(str_sql("{}"), *argv))
# ---#
def density(
self,
by: str = "",
bandwidth: float = 1.0,
kernel: str = "gaussian",
nbins: int = 200,
xlim: tuple = None,
ax=None,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the vColumn Density Plot.
Parameters
----------
by: str, optional
vColumn to use to partition the data.
bandwidth: float, optional
The bandwidth of the kernel.
kernel: str, optional
The method used for the plot.
gaussian : Gaussian kernel.
logistic : Logistic kernel.
sigmoid : Sigmoid kernel.
silverman : Silverman kernel.
nbins: int, optional
Maximum number of points to use to evaluate the approximate density function.
Increasing this parameter will increase the precision but will also increase
the time of the learning and scoring phases.
xlim: tuple, optional
Set the x limits of the current axes.
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
ax
Matplotlib axes object
See Also
--------
vDataFrame[].hist : Draws the histogram of the vColumn based on an aggregation.
"""
check_types(
[
("by", by, [str]),
("kernel", kernel, ["gaussian", "logistic", "sigmoid", "silverman"]),
("bandwidth", bandwidth, [int, float]),
("nbins", nbins, [float, int]),
]
)
if by:
self.parent.are_namecols_in(by)
by = self.parent.format_colnames(by)
from verticapy.plot import gen_colors
from matplotlib.lines import Line2D
colors = gen_colors()
if not xlim:
xmin = self.min()
xmax = self.max()
else:
xmin, xmax = xlim
custom_lines = []
columns = self.parent[by].distinct()
for idx, column in enumerate(columns):
param = {"color": colors[idx % len(colors)]}
ax = self.parent.search(
"{} = '{}'".format(self.parent[by].alias, column)
)[self.alias].density(
bandwidth=bandwidth,
kernel=kernel,
nbins=nbins,
xlim=(xmin, xmax),
ax=ax,
**updated_dict(param, style_kwds, idx),
)
custom_lines += [
Line2D(
[0],
[0],
color=updated_dict(param, style_kwds, idx)["color"],
lw=4,
),
]
ax.set_title("KernelDensity")
ax.legend(
custom_lines,
columns,
title=by,
loc="center left",
bbox_to_anchor=[1, 0.5],
)
ax.set_xlabel(self.alias)
return ax
kernel = kernel.lower()
from verticapy.learn.neighbors import KernelDensity
schema = verticapy.options["temp_schema"]
if not (schema):
schema = "public"
name = gen_tmp_name(schema=schema, name="kde")
if isinstance(xlim, (tuple, list)):
xlim_tmp = [xlim]
else:
xlim_tmp = []
model = KernelDensity(
name,
bandwidth=bandwidth,
kernel=kernel,
nbins=nbins,
xlim=xlim_tmp,
store=False,
)
try:
result = model.fit(self.parent.__genSQL__(), [self.alias]).plot(
ax=ax, **style_kwds
)
model.drop()
return result
except:
model.drop()
raise
# ---#
def describe(
self, method: str = "auto", max_cardinality: int = 6, numcol: str = ""
):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using multiple statistical aggregations:
min, max, median, unique... depending on the input method.
Parameters
----------
method: str, optional
The describe method.
auto : Sets the method to 'numerical' if the vColumn is numerical
, 'categorical' otherwise.
categorical : Uses only categorical aggregations during the computation.
cat_stats : Computes statistics of a numerical column for each vColumn
category. In this case, the parameter 'numcol' must be defined.
numerical : Uses popular numerical aggregations during the computation.
max_cardinality: int, optional
Cardinality threshold to use to determine if the vColumn will be considered
as categorical.
numcol: str, optional
Numerical vColumn to use when the parameter method is set to 'cat_stats'.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
check_types(
[
("method", method, ["auto", "numerical", "categorical", "cat_stats"]),
("max_cardinality", max_cardinality, [int, float]),
("numcol", numcol, [str]),
]
)
method = method.lower()
assert (method != "cat_stats") or (numcol), ParameterError(
"The parameter 'numcol' must be a vDataFrame column if the method is 'cat_stats'"
)
distinct_count, is_numeric, is_date = (
self.nunique(),
self.isnum(),
self.isdate(),
)
if (is_date) and not (method == "categorical"):
result = self.aggregate(["count", "min", "max"])
index = result.values["index"]
result = result.values[self.alias]
elif (method == "cat_stats") and (numcol != ""):
numcol = self.parent.format_colnames(numcol)
assert self.parent[numcol].category() in ("float", "int"), TypeError(
"The column 'numcol' must be numerical"
)
cast = "::int" if (self.parent[numcol].isbool()) else ""
query, cat = [], self.distinct()
if len(cat) == 1:
lp, rp = "(", ")"
else:
lp, rp = "", ""
for category in cat:
tmp_query = """SELECT
'{0}' AS 'index',
COUNT({1}) AS count,
100 * COUNT({1}) / {2} AS percent,
AVG({3}{4}) AS mean,
STDDEV({3}{4}) AS std,
MIN({3}{4}) AS min,
APPROXIMATE_PERCENTILE ({3}{4}
USING PARAMETERS percentile = 0.1) AS 'approx_10%',
APPROXIMATE_PERCENTILE ({3}{4}
USING PARAMETERS percentile = 0.25) AS 'approx_25%',
APPROXIMATE_PERCENTILE ({3}{4}
USING PARAMETERS percentile = 0.5) AS 'approx_50%',
APPROXIMATE_PERCENTILE ({3}{4}
USING PARAMETERS percentile = 0.75) AS 'approx_75%',
APPROXIMATE_PERCENTILE ({3}{4}
USING PARAMETERS percentile = 0.9) AS 'approx_90%',
MAX({3}{4}) AS max
FROM vdf_table""".format(
category, self.alias, self.parent.shape()[0], numcol, cast,
)
tmp_query += (
" WHERE {} IS NULL".format(self.alias)
if (category in ("None", None))
else " WHERE {} = '{}'".format(
bin_spatial_to_str(self.category(), self.alias), category,
)
)
query += [lp + tmp_query + rp]
query = "WITH vdf_table AS (SELECT * FROM {}) {}".format(
self.parent.__genSQL__(), " UNION ALL ".join(query)
)
title = "Describes the statics of {} partitioned by {}.".format(
numcol, self.alias
)
values = to_tablesample(query, title=title).values
elif (
((distinct_count < max_cardinality + 1) and (method != "numerical"))
or not (is_numeric)
or (method == "categorical")
):
query = """(SELECT
{0} || '',
COUNT(*)
FROM vdf_table
GROUP BY {0}
ORDER BY COUNT(*) DESC
LIMIT {1})""".format(
self.alias, max_cardinality
)
if distinct_count > max_cardinality:
query += (
"UNION ALL (SELECT 'Others', SUM(count) FROM (SELECT COUNT(*) AS count"
" FROM vdf_table WHERE {0} IS NOT NULL GROUP BY {0} ORDER BY COUNT(*)"
" DESC OFFSET {1}) VERTICAPY_SUBTABLE) ORDER BY count DESC"
).format(self.alias, max_cardinality + 1)
query = "WITH vdf_table AS (SELECT * FROM {}) {}".format(
self.parent.__genSQL__(), query
)
query_result = executeSQL(
query=query,
title="Computing the descriptive statistics of {}.".format(self.alias),
method="fetchall",
)
result = [distinct_count, self.count()] + [item[1] for item in query_result]
index = ["unique", "count"] + [item[0] for item in query_result]
else:
result = (
self.parent.describe(
method="numerical", columns=[self.alias], unique=False
)
.transpose()
.values[self.alias]
)
result = [distinct_count] + result
index = [
"unique",
"count",
"mean",
"std",
"min",
"approx_25%",
"approx_50%",
"approx_75%",
"max",
]
if method != "cat_stats":
values = {
"index": ["name", "dtype"] + index,
"value": [self.alias, self.ctype()] + result,
}
if ((is_date) and not (method == "categorical")) or (
method == "is_numeric"
):
self.parent.__update_catalog__({"index": index, self.alias: result})
for elem in values:
for i in range(len(values[elem])):
if isinstance(values[elem][i], decimal.Decimal):
values[elem][i] = float(values[elem][i])
return tablesample(values)
# ---#
def discretize(
self,
method: str = "auto",
h: float = 0,
nbins: int = -1,
k: int = 6,
new_category: str = "Others",
RFmodel_params: dict = {},
response: str = "",
return_enum_trans: bool = False,
):
"""
---------------------------------------------------------------------------
Discretizes the vColumn using the input method.
Parameters
----------
method: str, optional
The method to use to discretize the vColumn.
auto : Uses method 'same_width' for numerical vColumns, cast
the other types to varchar.
same_freq : Computes bins with the same number of elements.
same_width : Computes regular width bins.
smart : Uses the Random Forest on a response column to find the most
relevant interval to use for the discretization.
topk : Keeps the topk most frequent categories and merge the other
into one unique category.
h: float, optional
The interval size to convert to use to convert the vColumn. If this parameter
is equal to 0, an optimised interval will be computed.
nbins: int, optional
Number of bins used for the discretization (must be > 1)
k: int, optional
The integer k of the 'topk' method.
new_category: str, optional
The name of the merging category when using the 'topk' method.
RFmodel_params: dict, optional
Dictionary of the Random Forest model parameters used to compute the best splits
when 'method' is set to 'smart'. A RF Regressor will be trained if the response
is numerical (except ints and bools), a RF Classifier otherwise.
Example: Write {"n_estimators": 20, "max_depth": 10} to train a Random Forest with
20 trees and a maximum depth of 10.
response: str, optional
Response vColumn when method is set to 'smart'.
return_enum_trans: bool, optional
Returns the transformation instead of the vDataFrame parent and do not apply
it. This parameter is very useful for testing to be able to look at the final
transformation.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].decode : Encodes the vColumn with user defined Encoding.
vDataFrame[].get_dummies : Encodes the vColumn with One-Hot Encoding.
vDataFrame[].label_encode : Encodes the vColumn with Label Encoding.
vDataFrame[].mean_encode : Encodes the vColumn using the mean encoding of a response.
"""
check_types(
[
("RFmodel_params", RFmodel_params, [dict]),
("return_enum_trans", return_enum_trans, [bool]),
("h", h, [int, float]),
("response", response, [str]),
("nbins", nbins, [int, float]),
(
"method",
method,
["auto", "smart", "same_width", "same_freq", "topk"],
),
("return_enum_trans", return_enum_trans, [bool]),
]
)
method = method.lower()
if self.isnum() and method == "smart":
schema = verticapy.options["temp_schema"]
if not (schema):
schema = "public"
tmp_view_name = gen_tmp_name(schema=schema, name="view")
tmp_model_name = gen_tmp_name(schema=schema, name="model")
assert nbins >= 2, ParameterError(
"Parameter 'nbins' must be greater or equals to 2 in case of discretization using the method 'smart'."
)
assert response, ParameterError(
"Parameter 'response' can not be empty in case of discretization using the method 'smart'."
)
self.parent.are_namecols_in(response)
response = self.parent.format_colnames(response)
drop(tmp_view_name, method="view")
self.parent.to_db(tmp_view_name)
from verticapy.learn.ensemble import (
RandomForestClassifier,
RandomForestRegressor,
)
drop(tmp_model_name, method="model")
if self.parent[response].category() == "float":
model = RandomForestRegressor(tmp_model_name)
else:
model = RandomForestClassifier(tmp_model_name)
model.set_params({"n_estimators": 20, "max_depth": 8, "nbins": 100})
model.set_params(RFmodel_params)
parameters = model.get_params()
try:
model.fit(tmp_view_name, [self.alias], response)
query = [
"(SELECT READ_TREE(USING PARAMETERS model_name = '{}', tree_id = {}, format = 'tabular'))".format(
tmp_model_name, i
)
for i in range(parameters["n_estimators"])
]
query = "SELECT split_value FROM (SELECT split_value, MAX(weighted_information_gain) FROM ({}) VERTICAPY_SUBTABLE WHERE split_value IS NOT NULL GROUP BY 1 ORDER BY 2 DESC LIMIT {}) VERTICAPY_SUBTABLE ORDER BY split_value::float".format(
" UNION ALL ".join(query), nbins - 1
)
result = executeSQL(
query=query,
title="Computing the optimized histogram nbins using Random Forest.",
method="fetchall",
)
result = [elem[0] for elem in result]
except:
drop(tmp_view_name, method="view")
drop(tmp_model_name, method="model")
raise
drop(tmp_view_name, method="view")
drop(tmp_model_name, method="model")
result = [self.min()] + result + [self.max()]
elif method == "topk":
assert k >= 2, ParameterError(
"Parameter 'k' must be greater or equals to 2 in case of discretization using the method 'topk'"
)
distinct = self.topk(k).values["index"]
trans = (
"(CASE WHEN {} IN ({}) THEN {} || '' ELSE '{}' END)".format(
bin_spatial_to_str(self.category()),
", ".join(
[
"'{}'".format(str(elem).replace("'", "''"))
for elem in distinct
]
),
bin_spatial_to_str(self.category()),
new_category.replace("'", "''"),
),
"varchar",
"text",
)
elif self.isnum() and method == "same_freq":
assert nbins >= 2, ParameterError(
"Parameter 'nbins' must be greater or equals to 2 in case of discretization using the method 'same_freq'"
)
count = self.count()
nb = int(float(count / int(nbins)))
assert nb != 0, Exception(
"Not enough values to compute the Equal Frequency discretization"
)
total, query, nth_elems = nb, [], []
while total < int(float(count / int(nbins))) * int(nbins):
nth_elems += [str(total)]
total += nb
where = "WHERE _verticapy_row_nb_ IN ({})".format(
", ".join(["1"] + nth_elems + [str(count)])
)
query = "SELECT {} FROM (SELECT {}, ROW_NUMBER() OVER (ORDER BY {}) AS _verticapy_row_nb_ FROM {} WHERE {} IS NOT NULL) VERTICAPY_SUBTABLE {}".format(
self.alias,
self.alias,
self.alias,
self.parent.__genSQL__(),
self.alias,
where,
)
result = executeSQL(
query=query,
title="Computing the equal frequency histogram bins.",
method="fetchall",
)
result = [elem[0] for elem in result]
elif self.isnum() and method in ("same_width", "auto"):
if not (h) or h <= 0:
if nbins <= 0:
h = self.numh()
else:
h = (self.max() - self.min()) * 1.01 / nbins
if h > 0.01:
h = round(h, 2)
elif h > 0.0001:
h = round(h, 4)
elif h > 0.000001:
h = round(h, 6)
if self.category() == "int":
h = int(max(math.floor(h), 1))
floor_end = -1 if (self.category() == "int") else ""
if (h > 1) or (self.category() == "float"):
trans = (
"'[' || FLOOR({} / {}) * {} || ';' || (FLOOR({} / {}) * {} + {}{}) || ']'".format(
"{}", h, h, "{}", h, h, h, floor_end
),
"varchar",
"text",
)
else:
trans = ("FLOOR({}) || ''", "varchar", "text")
else:
trans = ("{} || ''", "varchar", "text")
if (self.isnum() and method == "same_freq") or (
self.isnum() and method == "smart"
):
n = len(result)
trans = "(CASE "
for i in range(1, n):
trans += "WHEN {} BETWEEN {} AND {} THEN '[{};{}]' ".format(
"{}", result[i - 1], result[i], result[i - 1], result[i]
)
trans += " ELSE NULL END)"
trans = (trans, "varchar", "text")
if return_enum_trans:
return trans
else:
self.transformations += [trans]
sauv = {}
for elem in self.catalog:
sauv[elem] = self.catalog[elem]
self.parent.__update_catalog__(erase=True, columns=[self.alias])
try:
if "count" in sauv:
self.catalog["count"] = sauv["count"]
self.catalog["percent"] = (
100 * sauv["count"] / self.parent.shape()[0]
)
except:
pass
self.parent.__add_to_history__(
"[Discretize]: The vColumn {} was discretized.".format(self.alias)
)
return self.parent
# ---#
def distinct(self, **kwargs):
"""
---------------------------------------------------------------------------
Returns the distinct categories of the vColumn.
Returns
-------
list
Distinct caterogies of the vColumn.
See Also
--------
vDataFrame.topk : Returns the vColumn most occurent elements.
"""
if "agg" not in kwargs:
query = "SELECT {} AS {} FROM {} WHERE {} IS NOT NULL GROUP BY {} ORDER BY {}".format(
bin_spatial_to_str(self.category(), self.alias),
self.alias,
self.parent.__genSQL__(),
self.alias,
self.alias,
self.alias,
)
else:
query = "SELECT {} FROM (SELECT {} AS {}, {} AS verticapy_agg FROM {} WHERE {} IS NOT NULL GROUP BY 1) x ORDER BY verticapy_agg DESC".format(
self.alias,
bin_spatial_to_str(self.category(), self.alias),
self.alias,
kwargs["agg"],
self.parent.__genSQL__(),
self.alias,
)
query_result = executeSQL(
query=query,
title="Computing the distinct categories of {}.".format(self.alias),
method="fetchall",
)
return [item for sublist in query_result for item in sublist]
# ---#
def div(self, x: float):
"""
---------------------------------------------------------------------------
Divides the vColumn by the input element.
Parameters
----------
x: float
Input number.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].apply : Applies a function to the input vColumn.
"""
check_types([("x", x, [int, float])])
assert x != 0, ValueError("Division by 0 is forbidden !")
return self.apply(func="{} / ({})".format("{}", x))
# ---#
def drop(self, add_history: bool = True):
"""
---------------------------------------------------------------------------
Drops the vColumn from the vDataFrame. Dropping a vColumn means simply
not selecting it in the final generated SQL code.
Note: Dropping a vColumn can make the vDataFrame "heavier" if it is used
to compute other vColumns.
Parameters
----------
add_history: bool, optional
If set to True, the information will be stored in the vDataFrame history.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame.drop: Drops the input vColumns from the vDataFrame.
"""
check_types([("add_history", add_history, [bool])])
try:
parent = self.parent
force_columns = [
column for column in self.parent._VERTICAPY_VARIABLES_["columns"]
]
force_columns.remove(self.alias)
executeSQL(
"SELECT * FROM {} LIMIT 10".format(
self.parent.__genSQL__(force_columns=force_columns)
),
print_time_sql=False,
)
self.parent._VERTICAPY_VARIABLES_["columns"].remove(self.alias)
delattr(self.parent, self.alias)
except:
self.parent._VERTICAPY_VARIABLES_["exclude_columns"] += [self.alias]
if add_history:
self.parent.__add_to_history__(
"[Drop]: vColumn {} was deleted from the vDataFrame.".format(self.alias)
)
return parent
# ---#
def drop_outliers(
self, threshold: float = 4.0, use_threshold: bool = True, alpha: float = 0.05
):
"""
---------------------------------------------------------------------------
Drops outliers in the vColumn.
Parameters
----------
threshold: float, optional
Uses the Gaussian distribution to identify outliers. After normalizing
the data (Z-Score), if the absolute value of the record is greater than
the threshold, it will be considered as an outlier.
use_threshold: bool, optional
Uses the threshold instead of the 'alpha' parameter.
alpha: float, optional
Number representing the outliers threshold. Values lesser than
quantile(alpha) or greater than quantile(1-alpha) will be dropped.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame.fill_outliers : Fills the outliers in the vColumn.
vDataFrame.outliers : Adds a new vColumn labeled with 0 and 1
(1 meaning global outlier).
"""
check_types(
[
("alpha", alpha, [int, float]),
("use_threshold", use_threshold, [bool]),
("threshold", threshold, [int, float]),
]
)
if use_threshold:
result = self.aggregate(func=["std", "avg"]).transpose().values
self.parent.filter(
"ABS({} - {}) / {} < {}".format(
self.alias, result["avg"][0], result["std"][0], threshold
)
)
else:
p_alpha, p_1_alpha = (
self.parent.quantile([alpha, 1 - alpha], [self.alias])
.transpose()
.values[self.alias]
)
self.parent.filter(
"({} BETWEEN {} AND {})".format(self.alias, p_alpha, p_1_alpha)
)
return self.parent
# ---#
def dropna(self):
"""
---------------------------------------------------------------------------
Filters the vDataFrame where the vColumn is missing.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame.filter: Filters the data using the input expression.
"""
self.parent.filter("{} IS NOT NULL".format(self.alias))
return self.parent
# ---#
def fill_outliers(
self,
method: str = "winsorize",
threshold: float = 4.0,
use_threshold: bool = True,
alpha: float = 0.05,
):
"""
---------------------------------------------------------------------------
Fills the vColumns outliers using the input method.
Parameters
----------
method: str, optional
Method to use to fill the vColumn outliers.
mean : Replaces the upper and lower outliers by their respective
average.
null : Replaces the outliers by the NULL value.
winsorize : Clips the vColumn using as lower bound quantile(alpha) and as
upper bound quantile(1-alpha) if 'use_threshold' is set to False else
the lower and upper ZScores.
threshold: float, optional
Uses the Gaussian distribution to define the outliers. After normalizing the
data (Z-Score), if the absolute value of the record is greater than the
threshold it will be considered as an outlier.
use_threshold: bool, optional
Uses the threshold instead of the 'alpha' parameter.
alpha: float, optional
Number representing the outliers threshold. Values lesser than quantile(alpha)
or greater than quantile(1-alpha) will be filled.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].drop_outliers : Drops outliers in the vColumn.
vDataFrame.outliers : Adds a new vColumn labeled with 0 and 1
(1 meaning global outlier).
"""
if isinstance(method, str):
method = method.lower()
check_types(
[
("method", method, ["winsorize", "null", "mean"]),
("alpha", alpha, [int, float]),
("use_threshold", use_threshold, [bool]),
("threshold", threshold, [int, float]),
]
)
if use_threshold:
result = self.aggregate(func=["std", "avg"]).transpose().values
p_alpha, p_1_alpha = (
-threshold * result["std"][0] + result["avg"][0],
threshold * result["std"][0] + result["avg"][0],
)
else:
query = "SELECT PERCENTILE_CONT({}) WITHIN GROUP (ORDER BY {}) OVER (), PERCENTILE_CONT(1 - {}) WITHIN GROUP (ORDER BY {}) OVER () FROM {} LIMIT 1".format(
alpha, self.alias, alpha, self.alias, self.parent.__genSQL__()
)
p_alpha, p_1_alpha = executeSQL(
query=query,
title="Computing the quantiles of {}.".format(self.alias),
method="fetchrow",
)
if method == "winsorize":
self.clip(lower=p_alpha, upper=p_1_alpha)
elif method == "null":
self.apply(
func="(CASE WHEN ({} BETWEEN {} AND {}) THEN {} ELSE NULL END)".format(
"{}", p_alpha, p_1_alpha, "{}"
)
)
elif method == "mean":
query = "WITH vdf_table AS (SELECT * FROM {}) (SELECT AVG({}) FROM vdf_table WHERE {} < {}) UNION ALL (SELECT AVG({}) FROM vdf_table WHERE {} > {})".format(
self.parent.__genSQL__(),
self.alias,
self.alias,
p_alpha,
self.alias,
self.alias,
p_1_alpha,
)
mean_alpha, mean_1_alpha = [
item[0]
for item in executeSQL(
query=query,
title="Computing the average of the {}'s lower and upper outliers.".format(
self.alias
),
method="fetchall",
)
]
if mean_alpha == None:
mean_alpha = "NULL"
if mean_1_alpha == None:
mean_alpha = "NULL"
self.apply(
func="(CASE WHEN {} < {} THEN {} WHEN {} > {} THEN {} ELSE {} END)".format(
"{}", p_alpha, mean_alpha, "{}", p_1_alpha, mean_1_alpha, "{}"
)
)
return self.parent
# ---#
def fillna(
self,
val=None,
method: str = "auto",
expr: str = "",
by: list = [],
order_by: list = [],
):
"""
---------------------------------------------------------------------------
Fills missing elements in the vColumn with a user-specified rule.
Parameters
----------
val: int/float/str, optional
Value to use to impute the vColumn.
method: dict, optional
Method to use to impute the missing values.
auto : Mean for the numerical and Mode for the categorical vColumns.
bfill : Back Propagation of the next element (Constant Interpolation).
ffill : Propagation of the first element (Constant Interpolation).
mean : Average.
median : median.
mode : mode (most occurent element).
0ifnull : 0 when the vColumn is null, 1 otherwise.
expr: str, optional
SQL expression.
by: list, optional
vColumns used in the partition.
order_by: list, optional
List of the vColumns to use to sort the data when using TS methods.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].dropna : Drops the vColumn missing values.
"""
if isinstance(by, str):
by = [by]
if isinstance(order_by, str):
order_by = [order_by]
check_types(
[
(
"method",
method,
[
"auto",
"mode",
"0ifnull",
"mean",
"avg",
"median",
"ffill",
"pad",
"bfill",
"backfill",
],
),
("expr", expr, [str]),
("by", by, [list]),
("order_by", order_by, [list]),
]
)
method = method.lower()
self.parent.are_namecols_in([elem for elem in order_by] + by)
by = self.parent.format_colnames(by)
if method == "auto":
method = "mean" if (self.isnum() and self.nunique(True) > 6) else "mode"
total = self.count()
if (method == "mode") and (val == None):
val = self.mode(dropna=True)
if val == None:
warning_message = "The vColumn {} has no mode (only missing values).\nNothing was filled.".format(
self.alias
)
warnings.warn(warning_message, Warning)
return self.parent
if isinstance(val, str):
val = val.replace("'", "''")
if val != None:
new_column = "COALESCE({}, '{}')".format("{}", val)
elif expr:
new_column = "COALESCE({}, {})".format("{}", expr)
elif method == "0ifnull":
new_column = "DECODE({}, NULL, 0, 1)"
elif method in ("mean", "avg", "median"):
fun = "MEDIAN" if (method == "median") else "AVG"
if by == []:
if fun == "AVG":
val = self.avg()
elif fun == "MEDIAN":
val = self.median()
new_column = "COALESCE({}, {})".format("{}", val)
elif (len(by) == 1) and (self.parent[by[0]].nunique() < 50):
try:
if fun == "MEDIAN":
fun = "APPROXIMATE_MEDIAN"
query = "SELECT {}, {}({}) FROM {} GROUP BY {};".format(
by[0], fun, self.alias, self.parent.__genSQL__(), by[0]
)
result = executeSQL(
query,
title="Computing the different aggregations.",
method="fetchall",
)
for idx, elem in enumerate(result):
result[idx][0] = (
"NULL"
if (elem[0] == None)
else "'{}'".format(str(elem[0]).replace("'", "''"))
)
result[idx][1] = "NULL" if (elem[1] == None) else str(elem[1])
new_column = "COALESCE({}, DECODE({}, {}, NULL))".format(
"{}",
by[0],
", ".join(
["{}, {}".format(elem[0], elem[1]) for elem in result]
),
)
executeSQL(
"SELECT {} FROM {} LIMIT 1".format(
new_column.format(self.alias), self.parent.__genSQL__()
),
print_time_sql=False,
)
except:
new_column = "COALESCE({}, {}({}) OVER (PARTITION BY {}))".format(
"{}", fun, "{}", ", ".join(by)
)
else:
new_column = "COALESCE({}, {}({}) OVER (PARTITION BY {}))".format(
"{}", fun, "{}", ", ".join(by)
)
elif method in ("ffill", "pad", "bfill", "backfill"):
assert order_by, ParameterError(
"If the method is in ffill|pad|bfill|backfill then 'order_by' must be a list of at least one element to use to order the data"
)
desc = "" if (method in ("ffill", "pad")) else " DESC"
partition_by = (
"PARTITION BY {}".format(
", ".join([quote_ident(column) for column in by])
)
if (by)
else ""
)
order_by_ts = ", ".join([quote_ident(column) + desc for column in order_by])
new_column = "COALESCE({}, LAST_VALUE({} IGNORE NULLS) OVER ({} ORDER BY {}))".format(
"{}", "{}", partition_by, order_by_ts
)
if method in ("mean", "median") or isinstance(val, float):
category, ctype = "float", "float"
elif method == "0ifnull":
category, ctype = "int", "bool"
else:
category, ctype = self.category(), self.ctype()
copy_trans = [elem for elem in self.transformations]
total = self.count()
if method not in ["mode", "0ifnull"]:
max_floor = 0
all_partition = by
if method in ["ffill", "pad", "bfill", "backfill"]:
all_partition += [elem for elem in order_by]
for elem in all_partition:
if len(self.parent[elem].transformations) > max_floor:
max_floor = len(self.parent[elem].transformations)
max_floor -= len(self.transformations)
for k in range(max_floor):
self.transformations += [("{}", self.ctype(), self.category())]
self.transformations += [(new_column, ctype, category)]
try:
sauv = {}
for elem in self.catalog:
sauv[elem] = self.catalog[elem]
self.parent.__update_catalog__(erase=True, columns=[self.alias])
total = abs(self.count() - total)
except Exception as e:
self.transformations = [elem for elem in copy_trans]
raise QueryError("{}\nAn Error happened during the filling.".format(e))
if total > 0:
try:
if "count" in sauv:
self.catalog["count"] = int(sauv["count"]) + total
self.catalog["percent"] = (
100 * (int(sauv["count"]) + total) / self.parent.shape()[0]
)
except:
pass
total = int(total)
conj = "s were " if total > 1 else " was "
if verticapy.options["print_info"]:
print("{} element{}filled.".format(total, conj))
self.parent.__add_to_history__(
"[Fillna]: {} {} missing value{} filled.".format(
total, self.alias, conj,
)
)
else:
if verticapy.options["print_info"]:
print("Nothing was filled.")
self.transformations = [elem for elem in copy_trans]
for elem in sauv:
self.catalog[elem] = sauv[elem]
return self.parent
# ---#
def geo_plot(self, *args, **kwargs):
"""
---------------------------------------------------------------------------
Draws the Geospatial object.
Parameters
----------
*args / **kwargs
Any optional parameter to pass to the geopandas plot function.
For more information, see:
https://geopandas.readthedocs.io/en/latest/docs/reference/api/
geopandas.GeoDataFrame.plot.html
Returns
-------
ax
Matplotlib axes object
"""
columns = [self.alias]
check = True
if len(args) > 0:
column = args[0]
elif "column" in kwargs:
column = kwargs["column"]
else:
check = False
if check:
self.parent.are_namecols_in(column)
column = self.parent.format_colnames(column)
columns += [column]
if not ("cmap" in kwargs):
from verticapy.plot import gen_cmap
kwargs["cmap"] = gen_cmap()[0]
else:
if not ("color" in kwargs):
from verticapy.plot import gen_colors
kwargs["color"] = gen_colors()[0]
if not ("legend" in kwargs):
kwargs["legend"] = True
if not ("figsize" in kwargs):
kwargs["figsize"] = (14, 10)
return self.parent[columns].to_geopandas(self.alias).plot(*args, **kwargs)
# ---#
def get_dummies(
self,
prefix: str = "",
prefix_sep: str = "_",
drop_first: bool = True,
use_numbers_as_suffix: bool = False,
):
"""
---------------------------------------------------------------------------
Encodes the vColumn with the One-Hot Encoding algorithm.
Parameters
----------
prefix: str, optional
Prefix of the dummies.
prefix_sep: str, optional
Prefix delimitor of the dummies.
drop_first: bool, optional
Drops the first dummy to avoid the creation of correlated features.
use_numbers_as_suffix: bool, optional
Uses numbers as suffix instead of the vColumns categories.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].decode : Encodes the vColumn with user defined Encoding.
vDataFrame[].discretize : Discretizes the vColumn.
vDataFrame[].label_encode : Encodes the vColumn with Label Encoding.
vDataFrame[].mean_encode : Encodes the vColumn using the mean encoding of a response.
"""
check_types(
[
("prefix", prefix, [str]),
("prefix_sep", prefix_sep, [str]),
("drop_first", drop_first, [bool]),
("use_numbers_as_suffix", use_numbers_as_suffix, [bool]),
]
)
distinct_elements = self.distinct()
if distinct_elements not in ([0, 1], [1, 0]) or self.isbool():
all_new_features = []
prefix = (
self.alias.replace('"', "") + prefix_sep.replace('"', "_")
if not (prefix)
else prefix.replace('"', "_") + prefix_sep.replace('"', "_")
)
n = 1 if drop_first else 0
for k in range(len(distinct_elements) - n):
name = (
'"{}{}"'.format(prefix, k)
if (use_numbers_as_suffix)
else '"{}{}"'.format(
prefix, str(distinct_elements[k]).replace('"', "_")
)
)
assert not (self.parent.is_colname_in(name)), NameError(
f"A vColumn has already the alias of one of the dummies ({name}).\n"
"It can be the result of using previously the method on the vColumn "
"or simply because of ambiguous columns naming.\nBy changing one of "
"the parameters ('prefix', 'prefix_sep'), you'll be able to solve this "
"issue."
)
for k in range(len(distinct_elements) - n):
name = (
'"{}{}"'.format(prefix, k)
if (use_numbers_as_suffix)
else '"{}{}"'.format(
prefix, str(distinct_elements[k]).replace('"', "_")
)
)
name = (
name.replace(" ", "_")
.replace("/", "_")
.replace(",", "_")
.replace("'", "_")
)
expr = "DECODE({}, '{}', 1, 0)".format(
"{}", str(distinct_elements[k]).replace("'", "''")
)
transformations = self.transformations + [(expr, "bool", "int")]
new_vColumn = vColumn(
name,
parent=self.parent,
transformations=transformations,
catalog={
"min": 0,
"max": 1,
"count": self.parent.shape()[0],
"percent": 100.0,
"unique": 2,
"approx_unique": 2,
"prod": 0,
},
)
setattr(self.parent, name, new_vColumn)
setattr(self.parent, name.replace('"', ""), new_vColumn)
self.parent._VERTICAPY_VARIABLES_["columns"] += [name]
all_new_features += [name]
conj = "s were " if len(all_new_features) > 1 else " was "
self.parent.__add_to_history__(
"[Get Dummies]: One hot encoder was applied to the vColumn {}\n{} feature{}created: {}".format(
self.alias, len(all_new_features), conj, ", ".join(all_new_features)
)
+ "."
)
return self.parent
one_hot_encode = get_dummies
# ---#
def head(self, limit: int = 5):
"""
---------------------------------------------------------------------------
Returns the head of the vColumn.
Parameters
----------
limit: int, optional
Number of elements to display.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
See Also
--------
vDataFrame[].tail : Returns the a part of the vColumn.
"""
return self.iloc(limit=limit)
# ---#
def hist(
self,
method: str = "density",
of: str = "",
max_cardinality: int = 6,
nbins: int = 0,
h: float = 0,
ax=None,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the histogram of the vColumn based on an aggregation.
Parameters
----------
method: str, optional
The method to use to aggregate the data.
count : Number of elements.
density : Percentage of the distribution.
mean : Average of the vColumn 'of'.
min : Minimum of the vColumn 'of'.
max : Maximum of the vColumn 'of'.
sum : Sum of the vColumn 'of'.
q% : q Quantile of the vColumn 'of' (ex: 50% to get the median).
It can also be a cutomized aggregation (ex: AVG(column1) + 5).
of: str, optional
The vColumn to use to compute the aggregation.
max_cardinality: int, optional
Maximum number of the vColumn distinct elements to be used as categorical
(No h will be picked or computed)
nbins: int, optional
Number of bins. If empty, an optimized number of bins will be computed.
h: float, optional
Interval width of the bar. If empty, an optimized h will be computed.
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
ax
Matplotlib axes object
See Also
--------
vDataFrame[].bar : Draws the Bar Chart of vColumn based on an aggregation.
"""
check_types(
[
("method", method, [str]),
("of", of, [str]),
("max_cardinality", max_cardinality, [int, float]),
("h", h, [int, float]),
("nbins", nbins, [int, float]),
]
)
if of:
self.parent.are_namecols_in(of)
of = self.parent.format_colnames(of)
from verticapy.plot import hist
return hist(self, method, of, max_cardinality, nbins, h, ax=ax, **style_kwds)
# ---#
def iloc(self, limit: int = 5, offset: int = 0):
"""
---------------------------------------------------------------------------
Returns a part of the vColumn (delimited by an offset and a limit).
Parameters
----------
limit: int, optional
Number of elements to display.
offset: int, optional
Number of elements to skip.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
See Also
--------
vDataFrame[].head : Returns the head of the vColumn.
vDataFrame[].tail : Returns the tail of the vColumn.
"""
check_types([("limit", limit, [int, float]), ("offset", offset, [int, float])])
if offset < 0:
offset = max(0, self.parent.shape()[0] - limit)
title = "Reads {}.".format(self.alias)
tail = to_tablesample(
"SELECT {} AS {} FROM {}{} LIMIT {} OFFSET {}".format(
bin_spatial_to_str(self.category(), self.alias),
self.alias,
self.parent.__genSQL__(),
self.parent.__get_last_order_by__(),
limit,
offset,
),
title=title,
)
tail.count = self.parent.shape()[0]
tail.offset = offset
tail.dtype[self.alias] = self.ctype()
tail.name = self.alias
return tail
# ---#
def isbool(self):
"""
---------------------------------------------------------------------------
Returns True if the vColumn is boolean, False otherwise.
Returns
-------
bool
True if the vColumn is boolean.
See Also
--------
vDataFrame[].isdate : Returns True if the vColumn category is date.
vDataFrame[].isnum : Returns True if the vColumn is numerical.
"""
return self.ctype().lower() in ("bool", "boolean")
# ---#
def isdate(self):
"""
---------------------------------------------------------------------------
Returns True if the vColumn category is date, False otherwise.
Returns
-------
bool
True if the vColumn category is date.
See Also
--------
vDataFrame[].isbool : Returns True if the vColumn is boolean.
vDataFrame[].isnum : Returns True if the vColumn is numerical.
"""
return self.category() == "date"
# ---#
def isin(self, val: list, *args):
"""
---------------------------------------------------------------------------
Looks if some specific records are in the vColumn and it returns the new
vDataFrame of the search.
Parameters
----------
val: list
List of the different records. For example, to check if Badr and Fouad
are in the vColumn. You can write the following list: ["Fouad", "Badr"]
Returns
-------
vDataFrame
The vDataFrame of the search.
See Also
--------
vDataFrame.isin : Looks if some specific records are in the vDataFrame.
"""
if isinstance(val, str) or not (isinstance(val, Iterable)):
val = [val]
val += list(args)
check_types([("val", val, [list])])
val = {self.alias: val}
return self.parent.isin(val)
# ---#
def isnum(self):
"""
---------------------------------------------------------------------------
Returns True if the vColumn is numerical, False otherwise.
Returns
-------
bool
True if the vColumn is numerical.
See Also
--------
vDataFrame[].isbool : Returns True if the vColumn is boolean.
vDataFrame[].isdate : Returns True if the vColumn category is date.
"""
return self.category() in ("float", "int")
# ---#
def iv_woe(self, y: str, nbins: int = 10):
"""
---------------------------------------------------------------------------
Computes the Information Value (IV) / Weight Of Evidence (WOE) Table. It tells
the predictive power of an independent variable in relation to the dependent
variable.
Parameters
----------
y: str
Response vColumn.
nbins: int, optional
Maximum number of nbins used for the discretization (must be > 1)
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
See Also
--------
vDataFrame.iv_woe : Computes the Information Value (IV) Table.
"""
check_types([("y", y, [str]), ("nbins", nbins, [int])])
self.parent.are_namecols_in(y)
y = self.parent.format_colnames(y)
assert self.parent[y].nunique() == 2, TypeError(
"vColumn {} must be binary to use iv_woe.".format(y)
)
response_cat = self.parent[y].distinct()
response_cat.sort()
assert response_cat == [0, 1], TypeError(
"vColumn {} must be binary to use iv_woe.".format(y)
)
self.parent[y].distinct()
trans = self.discretize(
method="same_width" if self.isnum() else "topk",
nbins=nbins,
k=nbins,
new_category="Others",
return_enum_trans=True,
)[0].replace("{}", self.alias)
query = "SELECT {} AS {}, {} AS ord, {}::int AS {} FROM {}".format(
trans, self.alias, self.alias, y, y, self.parent.__genSQL__(),
)
query = "SELECT {}, MIN(ord) AS ord, SUM(1 - {}) AS non_events, SUM({}) AS events FROM ({}) x GROUP BY 1".format(
self.alias, y, y, query,
)
query = "SELECT {}, ord, non_events, events, non_events / NULLIFZERO(SUM(non_events) OVER ()) AS pt_non_events, events / NULLIFZERO(SUM(events) OVER ()) AS pt_events FROM ({}) x".format(
self.alias, query,
)
query = "SELECT {} AS index, non_events, events, pt_non_events, pt_events, CASE WHEN non_events = 0 OR events = 0 THEN 0 ELSE ZEROIFNULL(LN(pt_non_events / NULLIFZERO(pt_events))) END AS woe, CASE WHEN non_events = 0 OR events = 0 THEN 0 ELSE (pt_non_events - pt_events) * ZEROIFNULL(LN(pt_non_events / NULLIFZERO(pt_events))) END AS iv FROM ({}) x ORDER BY ord".format(
self.alias, query,
)
title = "Computing WOE & IV of {} (response = {}).".format(self.alias, y)
result = to_tablesample(query, title=title)
result.values["index"] += ["total"]
result.values["non_events"] += [sum(result["non_events"])]
result.values["events"] += [sum(result["events"])]
result.values["pt_non_events"] += [""]
result.values["pt_events"] += [""]
result.values["woe"] += [""]
result.values["iv"] += [sum(result["iv"])]
return result
# ---#
def kurtosis(self):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using 'kurtosis'.
Returns
-------
float
kurtosis
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
return self.aggregate(["kurtosis"]).values[self.alias][0]
kurt = kurtosis
# ---#
def label_encode(self):
"""
---------------------------------------------------------------------------
Encodes the vColumn using a bijection from the different categories to
[0, n - 1] (n being the vColumn cardinality).
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].decode : Encodes the vColumn with a user defined Encoding.
vDataFrame[].discretize : Discretizes the vColumn.
vDataFrame[].get_dummies : Encodes the vColumn with One-Hot Encoding.
vDataFrame[].mean_encode : Encodes the vColumn using the mean encoding of a response.
"""
if self.category() in ["date", "float"]:
warning_message = (
"label_encode is only available for categorical variables."
)
warnings.warn(warning_message, Warning)
else:
distinct_elements = self.distinct()
expr = ["DECODE({}"]
text_info = "\n"
for k in range(len(distinct_elements)):
expr += [
"'{}', {}".format(str(distinct_elements[k]).replace("'", "''"), k)
]
text_info += "\t{} => {}".format(distinct_elements[k], k)
expr = ", ".join(expr) + ", {})".format(len(distinct_elements))
self.transformations += [(expr, "int", "int")]
self.parent.__update_catalog__(erase=True, columns=[self.alias])
self.catalog["count"] = self.parent.shape()[0]
self.catalog["percent"] = 100
self.parent.__add_to_history__(
"[Label Encoding]: Label Encoding was applied to the vColumn {} using the following mapping:{}".format(
self.alias, text_info
)
)
return self.parent
# ---#
def mad(self):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using 'mad' (median absolute deviation).
Returns
-------
float
mad
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
return self.aggregate(["mad"]).values[self.alias][0]
# ---#
def max(self):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using 'max' (Maximum).
Returns
-------
float/str
maximum
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
return self.aggregate(["max"]).values[self.alias][0]
# ---#
def mean_encode(self, response: str):
"""
---------------------------------------------------------------------------
Encodes the vColumn using the average of the response partitioned by the
different vColumn categories.
Parameters
----------
response: str
Response vColumn.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].decode : Encodes the vColumn using a user-defined encoding.
vDataFrame[].discretize : Discretizes the vColumn.
vDataFrame[].label_encode : Encodes the vColumn with Label Encoding.
vDataFrame[].get_dummies : Encodes the vColumn with One-Hot Encoding.
"""
check_types([("response", response, [str])])
self.parent.are_namecols_in(response)
response = self.parent.format_colnames(response)
assert self.parent[response].isnum(), TypeError(
"The response column must be numerical to use a mean encoding"
)
max_floor = len(self.parent[response].transformations) - len(
self.transformations
)
for k in range(max_floor):
self.transformations += [("{}", self.ctype(), self.category())]
self.transformations += [
("AVG({}) OVER (PARTITION BY {})".format(response, "{}"), "int", "float")
]
self.parent.__update_catalog__(erase=True, columns=[self.alias])
self.parent.__add_to_history__(
"[Mean Encode]: The vColumn {} was transformed using a mean encoding with {} as Response Column.".format(
self.alias, response
)
)
if verticapy.options["print_info"]:
print("The mean encoding was successfully done.")
return self.parent
# ---#
def median(
self, approx: bool = True,
):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using 'median'.
Parameters
----------
approx: bool, optional
If set to True, the approximate median is returned. By setting this
parameter to False, the function's performance can drastically decrease.
Returns
-------
float/str
median
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
return self.quantile(0.5, approx=approx)
# ---#
def memory_usage(self):
"""
---------------------------------------------------------------------------
Returns the vColumn memory usage.
Returns
-------
float
vColumn memory usage (byte)
See Also
--------
vDataFrame.memory_usage : Returns the vDataFrame memory usage.
"""
import sys
total = (
sys.getsizeof(self)
+ sys.getsizeof(self.alias)
+ sys.getsizeof(self.transformations)
+ sys.getsizeof(self.catalog)
)
for elem in self.catalog:
total += sys.getsizeof(elem)
return total
# ---#
def min(self):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using 'min' (Minimum).
Returns
-------
float/str
minimum
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
return self.aggregate(["min"]).values[self.alias][0]
# ---#
def mode(self, dropna: bool = False, n: int = 1):
"""
---------------------------------------------------------------------------
Returns the nth most occurent element.
Parameters
----------
dropna: bool, optional
If set to True, NULL values will not be considered during the computation.
n: int, optional
Integer corresponding to the offset. For example, if n = 1 then this
method will return the mode of the vColumn.
Returns
-------
str/float/int
vColumn nth most occurent element.
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
check_types([("dropna", dropna, [bool]), ("n", n, [int, float])])
if n == 1:
pre_comp = self.parent.__get_catalog_value__(self.alias, "top")
if pre_comp != "VERTICAPY_NOT_PRECOMPUTED":
if not (dropna) and (pre_comp != None):
return pre_comp
assert n >= 1, ParameterError("Parameter 'n' must be greater or equal to 1")
where = " WHERE {} IS NOT NULL ".format(self.alias) if (dropna) else " "
result = executeSQL(
"SELECT {} FROM (SELECT {}, COUNT(*) AS _verticapy_cnt_ FROM {}{}GROUP BY {} ORDER BY _verticapy_cnt_ DESC LIMIT {}) VERTICAPY_SUBTABLE ORDER BY _verticapy_cnt_ ASC LIMIT 1".format(
self.alias, self.alias, self.parent.__genSQL__(), where, self.alias, n
),
title="Computing the mode.",
method="fetchall",
)
top = None if not (result) else result[0][0]
if not (dropna):
n = "" if (n == 1) else str(int(n))
if isinstance(top, decimal.Decimal):
top = float(top)
self.parent.__update_catalog__(
{"index": ["top{}".format(n)], self.alias: [top]}
)
return top
# ---#
def mul(self, x: float):
"""
---------------------------------------------------------------------------
Multiplies the vColumn by the input element.
Parameters
----------
x: float
Input number.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].apply : Applies a function to the input vColumn.
"""
check_types([("x", x, [int, float])])
return self.apply(func="{} * ({})".format("{}", x))
# ---#
def nlargest(self, n: int = 10):
"""
---------------------------------------------------------------------------
Returns the n largest vColumn elements.
Parameters
----------
n: int, optional
Offset.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
See Also
--------
vDataFrame[].nsmallest : Returns the n smallest elements in the vColumn.
"""
check_types([("n", n, [int, float])])
query = "SELECT * FROM {} WHERE {} IS NOT NULL ORDER BY {} DESC LIMIT {}".format(
self.parent.__genSQL__(), self.alias, self.alias, n
)
title = "Reads {} {} largest elements.".format(self.alias, n)
return to_tablesample(query, title=title)
# ---#
def normalize(
self, method: str = "zscore", by: list = [], return_trans: bool = False
):
"""
---------------------------------------------------------------------------
Normalizes the input vColumns using the input method.
Parameters
----------
method: str, optional
Method to use to normalize.
zscore : Normalization using the Z-Score (avg and std).
(x - avg) / std
robust_zscore : Normalization using the Robust Z-Score (median and mad).
(x - median) / (1.4826 * mad)
minmax : Normalization using the MinMax (min and max).
(x - min) / (max - min)
by: list, optional
vColumns used in the partition.
return_trans: bool, optimal
If set to True, the method will return the transformation used instead of
the parent vDataFrame. This parameter is used for testing purpose.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame.outliers : Computes the vDataFrame Global Outliers.
"""
if isinstance(by, str):
by = [by]
check_types(
[
("method", method, ["zscore", "robust_zscore", "minmax"]),
("by", by, [list]),
("return_trans", return_trans, [bool]),
]
)
method = method.lower()
self.parent.are_namecols_in(by)
by = self.parent.format_colnames(by)
nullifzero, n = 1, len(by)
if self.isbool():
warning_message = "Normalize doesn't work on booleans".format(self.alias)
warnings.warn(warning_message, Warning)
elif self.isnum():
if method == "zscore":
if n == 0:
nullifzero = 0
avg, stddev = self.aggregate(["avg", "std"]).values[self.alias]
if stddev == 0:
warning_message = "Can not normalize {} using a Z-Score - The Standard Deviation is null !".format(
self.alias
)
warnings.warn(warning_message, Warning)
return self
elif (n == 1) and (self.parent[by[0]].nunique() < 50):
try:
result = executeSQL(
"SELECT {}, AVG({}), STDDEV({}) FROM {} GROUP BY {}".format(
by[0],
self.alias,
self.alias,
self.parent.__genSQL__(),
by[0],
),
title="Computing the different categories to normalize.",
method="fetchall",
)
for i in range(len(result)):
if result[i][2] == None:
pass
elif math.isnan(result[i][2]):
result[i][2] = None
avg = "DECODE({}, {}, NULL)".format(
by[0],
", ".join(
[
"{}, {}".format(
"'{}'".format(str(elem[0]).replace("'", "''"))
if elem[0] != None
else "NULL",
elem[1] if elem[1] != None else "NULL",
)
for elem in result
if elem[1] != None
]
),
)
stddev = "DECODE({}, {}, NULL)".format(
by[0],
", ".join(
[
"{}, {}".format(
"'{}'".format(str(elem[0]).replace("'", "''"))
if elem[0] != None
else "NULL",
elem[2] if elem[2] != None else "NULL",
)
for elem in result
if elem[2] != None
]
),
)
executeSQL(
"SELECT {}, {} FROM {} LIMIT 1".format(
avg, stddev, self.parent.__genSQL__()
),
print_time_sql=False,
)
except:
avg, stddev = (
"AVG({}) OVER (PARTITION BY {})".format(
self.alias, ", ".join(by)
),
"STDDEV({}) OVER (PARTITION BY {})".format(
self.alias, ", ".join(by)
),
)
else:
avg, stddev = (
"AVG({}) OVER (PARTITION BY {})".format(
self.alias, ", ".join(by)
),
"STDDEV({}) OVER (PARTITION BY {})".format(
self.alias, ", ".join(by)
),
)
if return_trans:
return "({} - {}) / {}({})".format(
self.alias, avg, "NULLIFZERO" if (nullifzero) else "", stddev
)
else:
final_transformation = [
(
"({} - {}) / {}({})".format(
"{}", avg, "NULLIFZERO" if (nullifzero) else "", stddev
),
"float",
"float",
)
]
elif method == "robust_zscore":
if n > 0:
warning_message = "The method 'robust_zscore' is available only if the parameter 'by' is empty\nIf you want to normalize by grouping by elements, please use a method in zscore|minmax"
warnings.warn(warning_message, Warning)
return self
mad, med = self.aggregate(["mad", "approx_median"]).values[self.alias]
mad *= 1.4826
if mad != 0:
if return_trans:
return "({} - {}) / ({})".format(self.alias, med, mad)
else:
final_transformation = [
(
"({} - {}) / ({})".format("{}", med, mad),
"float",
"float",
)
]
else:
warning_message = "Can not normalize {} using a Robust Z-Score - The MAD is null !".format(
self.alias
)
warnings.warn(warning_message, Warning)
return self
elif method == "minmax":
if n == 0:
nullifzero = 0
cmin, cmax = self.aggregate(["min", "max"]).values[self.alias]
if cmax - cmin == 0:
warning_message = "Can not normalize {} using the MIN and the MAX. MAX = MIN !".format(
self.alias
)
warnings.warn(warning_message, Warning)
return self
elif n == 1:
try:
result = executeSQL(
"SELECT {}, MIN({}), MAX({}) FROM {} GROUP BY {}".format(
by[0],
self.alias,
self.alias,
self.parent.__genSQL__(),
by[0],
),
title="Computing the different categories {} to normalize.".format(
by[0]
),
method="fetchall",
)
cmin = "DECODE({}, {}, NULL)".format(
by[0],
", ".join(
[
"{}, {}".format(
"'{}'".format(str(elem[0]).replace("'", "''"))
if elem[0] != None
else "NULL",
elem[1] if elem[1] != None else "NULL",
)
for elem in result
if elem[1] != None
]
),
)
cmax = "DECODE({}, {}, NULL)".format(
by[0],
", ".join(
[
"{}, {}".format(
"'{}'".format(str(elem[0]).replace("'", "''"))
if elem[0] != None
else "NULL",
elem[2] if elem[2] != None else "NULL",
)
for elem in result
if elem[2] != None
]
),
)
executeSQL(
"SELECT {}, {} FROM {} LIMIT 1".format(
cmax, cmin, self.parent.__genSQL__()
),
print_time_sql=False,
)
except:
cmax, cmin = (
"MAX({}) OVER (PARTITION BY {})".format(
self.alias, ", ".join(by)
),
"MIN({}) OVER (PARTITION BY {})".format(
self.alias, ", ".join(by)
),
)
else:
cmax, cmin = (
"MAX({}) OVER (PARTITION BY {})".format(
self.alias, ", ".join(by)
),
"MIN({}) OVER (PARTITION BY {})".format(
self.alias, ", ".join(by)
),
)
if return_trans:
return "({} - {}) / {}({} - {})".format(
self.alias,
cmin,
"NULLIFZERO" if (nullifzero) else "",
cmax,
cmin,
)
else:
final_transformation = [
(
"({} - {}) / {}({} - {})".format(
"{}",
cmin,
"NULLIFZERO" if (nullifzero) else "",
cmax,
cmin,
),
"float",
"float",
)
]
if method != "robust_zscore":
max_floor = 0
for elem in by:
if len(self.parent[elem].transformations) > max_floor:
max_floor = len(self.parent[elem].transformations)
max_floor -= len(self.transformations)
for k in range(max_floor):
self.transformations += [("{}", self.ctype(), self.category())]
self.transformations += final_transformation
sauv = {}
for elem in self.catalog:
sauv[elem] = self.catalog[elem]
self.parent.__update_catalog__(erase=True, columns=[self.alias])
try:
if "count" in sauv:
self.catalog["count"] = sauv["count"]
self.catalog["percent"] = (
100 * sauv["count"] / self.parent.shape()[0]
)
for elem in sauv:
if "top" in elem:
if "percent" in elem:
self.catalog[elem] = sauv[elem]
elif elem == None:
self.catalog[elem] = None
elif method == "robust_zscore":
self.catalog[elem] = (sauv[elem] - sauv["approx_50%"]) / (
1.4826 * sauv["mad"]
)
elif method == "zscore":
self.catalog[elem] = (sauv[elem] - sauv["mean"]) / sauv[
"std"
]
elif method == "minmax":
self.catalog[elem] = (sauv[elem] - sauv["min"]) / (
sauv["max"] - sauv["min"]
)
except:
pass
if method == "robust_zscore":
self.catalog["median"] = 0
self.catalog["mad"] = 1 / 1.4826
elif method == "zscore":
self.catalog["mean"] = 0
self.catalog["std"] = 1
elif method == "minmax":
self.catalog["min"] = 0
self.catalog["max"] = 1
self.parent.__add_to_history__(
"[Normalize]: The vColumn '{}' was normalized with the method '{}'.".format(
self.alias, method
)
)
else:
raise TypeError("The vColumn must be numerical for Normalization")
return self.parent
# ---#
def nsmallest(self, n: int = 10):
"""
---------------------------------------------------------------------------
Returns the n smallest elements in the vColumn.
Parameters
----------
n: int, optional
Offset.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
See Also
--------
vDataFrame[].nlargest : Returns the n largest vColumn elements.
"""
check_types([("n", n, [int, float])])
query = "SELECT * FROM {} WHERE {} IS NOT NULL ORDER BY {} ASC LIMIT {}".format(
self.parent.__genSQL__(), self.alias, self.alias, n
)
title = "Reads {} {} smallest elements.".format(n, self.alias)
return to_tablesample(query, title=title)
# ---#
def numh(self, method: str = "auto"):
"""
---------------------------------------------------------------------------
Computes the optimal vColumn bar width.
Parameters
----------
method: str, optional
Method to use to compute the optimal h.
auto : Combination of Freedman Diaconis and Sturges.
freedman_diaconis : Freedman Diaconis [2 * IQR / n ** (1 / 3)]
sturges : Sturges [CEIL(log2(n)) + 1]
Returns
-------
float
optimal bar width.
"""
check_types(
[("method", method, ["sturges", "freedman_diaconis", "fd", "auto"])]
)
method = method.lower()
if method == "auto":
pre_comp = self.parent.__get_catalog_value__(self.alias, "numh")
if pre_comp != "VERTICAPY_NOT_PRECOMPUTED":
return pre_comp
assert self.isnum() or self.isdate(), ParameterError(
"numh is only available on type numeric|date"
)
if self.isnum():
result = (
self.parent.describe(
method="numerical", columns=[self.alias], unique=False
)
.transpose()
.values[self.alias]
)
count, vColumn_min, vColumn_025, vColumn_075, vColumn_max = (
result[0],
result[3],
result[4],
result[6],
result[7],
)
elif self.isdate():
min_date = self.min()
table = "(SELECT DATEDIFF('second', '{}'::timestamp, {}) AS {} FROM {}) VERTICAPY_OPTIMAL_H_TABLE".format(
min_date, self.alias, self.alias, self.parent.__genSQL__()
)
query = "SELECT COUNT({}) AS NAs, MIN({}) AS min, APPROXIMATE_PERCENTILE({} USING PARAMETERS percentile = 0.25) AS Q1, APPROXIMATE_PERCENTILE({} USING PARAMETERS percentile = 0.75) AS Q3, MAX({}) AS max FROM {}".format(
self.alias, self.alias, self.alias, self.alias, self.alias, table
)
result = executeSQL(
query,
title="Different aggregations to compute the optimal h.",
method="fetchrow",
)
count, vColumn_min, vColumn_025, vColumn_075, vColumn_max = result
sturges = max(
float(vColumn_max - vColumn_min) / int(math.floor(math.log(count, 2) + 2)),
1e-99,
)
fd = max(2.0 * (vColumn_075 - vColumn_025) / (count) ** (1.0 / 3.0), 1e-99)
if method.lower() == "sturges":
best_h = sturges
elif method.lower() in ("freedman_diaconis", "fd"):
best_h = fd
else:
best_h = max(sturges, fd)
self.parent.__update_catalog__({"index": ["numh"], self.alias: [best_h]})
if self.category() == "int":
best_h = max(math.floor(best_h), 1)
return best_h
# ---#
def nunique(self, approx: bool = True):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using 'unique' (cardinality).
Parameters
----------
approx: bool, optional
If set to True, the approximate cardinality is returned. By setting
this parameter to False, the function's performance can drastically
decrease.
Returns
-------
int
vColumn cardinality (or approximate cardinality).
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
check_types([("approx", approx, [bool])])
if approx:
return self.aggregate(func=["approx_unique"]).values[self.alias][0]
else:
return self.aggregate(func=["unique"]).values[self.alias][0]
# ---#
def pie(
self,
method: str = "density",
of: str = "",
max_cardinality: int = 6,
h: float = 0,
pie_type: str = "auto",
ax=None,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the pie chart of the vColumn based on an aggregation.
Parameters
----------
method: str, optional
The method to use to aggregate the data.
count : Number of elements.
density : Percentage of the distribution.
mean : Average of the vColumn 'of'.
min : Minimum of the vColumn 'of'.
max : Maximum of the vColumn 'of'.
sum : Sum of the vColumn 'of'.
q% : q Quantile of the vColumn 'of' (ex: 50% to get the median).
It can also be a cutomized aggregation (ex: AVG(column1) + 5).
of: str, optional
The vColumn to use to compute the aggregation.
max_cardinality: int, optional
Maximum number of the vColumn distinct elements to be used as categorical
(No h will be picked or computed)
h: float, optional
Interval width of the bar. If empty, an optimized h will be computed.
pie_type: str, optional
The type of pie chart.
auto : Regular pie chart.
donut : Donut chart.
rose : Rose chart.
It can also be a cutomized aggregation (ex: AVG(column1) + 5).
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
ax
Matplotlib axes object
See Also
--------
vDataFrame.donut : Draws the donut chart of the vColumn based on an aggregation.
"""
if isinstance(pie_type, str):
pie_type = pie_type.lower()
check_types(
[
("method", method, [str]),
("of", of, [str]),
("max_cardinality", max_cardinality, [int, float]),
("h", h, [int, float]),
("pie_type", pie_type, ["auto", "donut", "rose"]),
]
)
donut = True if pie_type == "donut" else False
rose = True if pie_type == "rose" else False
if of:
self.parent.are_namecols_in(of)
of = self.parent.format_colnames(of)
from verticapy.plot import pie
return pie(
self, method, of, max_cardinality, h, donut, rose, ax=None, **style_kwds,
)
# ---#
def plot(
self,
ts: str,
by: str = "",
start_date: Union[str, datetime.datetime, datetime.date] = "",
end_date: Union[str, datetime.datetime, datetime.date] = "",
area: bool = False,
step: bool = False,
ax=None,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the Time Series of the vColumn.
Parameters
----------
ts: str
TS (Time Series) vColumn to use to order the data. The vColumn type must be
date like (date, datetime, timestamp...) or numerical.
by: str, optional
vColumn to use to partition the TS.
start_date: str / date, optional
Input Start Date. For example, time = '03-11-1993' will filter the data when
'ts' is lesser than November 1993 the 3rd.
end_date: str / date, optional
Input End Date. For example, time = '03-11-1993' will filter the data when
'ts' is greater than November 1993 the 3rd.
area: bool, optional
If set to True, draw an Area Plot.
step: bool, optional
If set to True, draw a Step Plot.
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
ax
Matplotlib axes object
See Also
--------
vDataFrame.plot : Draws the time series.
"""
check_types(
[
("ts", ts, [str]),
("by", by, [str]),
("start_date", start_date, [str, datetime.datetime, datetime.date]),
("end_date", end_date, [str, datetime.datetime, datetime.date]),
("area", area, [bool]),
("step", step, [bool]),
]
)
self.parent.are_namecols_in(ts)
ts = self.parent.format_colnames(ts)
if by:
self.parent.are_namecols_in(by)
by = self.parent.format_colnames(by)
from verticapy.plot import ts_plot
return ts_plot(
self, ts, by, start_date, end_date, area, step, ax=ax, **style_kwds,
)
# ---#
def product(self):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using 'product'.
Returns
-------
float
product
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
return self.aggregate(func=["prod"]).values[self.alias][0]
prod = product
# ---#
def quantile(self, x: float, approx: bool = True):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using an input 'quantile'.
Parameters
----------
x: float
A float between 0 and 1 that represents the quantile.
For example: 0.25 represents Q1.
approx: bool, optional
If set to True, the approximate quantile is returned. By setting this
parameter to False, the function's performance can drastically decrease.
Returns
-------
float
quantile (or approximate quantile).
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
check_types([("x", x, [int, float], ("approx", approx, [bool]))])
prefix = "approx_" if approx else ""
return self.aggregate(func=[prefix + "{}%".format(x * 100)]).values[self.alias][
0
]
# ---#
def range_plot(
self,
ts: str,
q: tuple = (0.25, 0.75),
start_date: Union[str, datetime.datetime, datetime.date] = "",
end_date: Union[str, datetime.datetime, datetime.date] = "",
plot_median: bool = False,
ax=None,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the range plot of the vColumn. The aggregations used are the median
and two input quantiles.
Parameters
----------
ts: str
TS (Time Series) vColumn to use to order the data. The vColumn type must be
date like (date, datetime, timestamp...) or numerical.
q: tuple, optional
Tuple including the 2 quantiles used to draw the Plot.
start_date: str / date, optional
Input Start Date. For example, time = '03-11-1993' will filter the data when
'ts' is lesser than November 1993 the 3rd.
end_date: str / date, optional
Input End Date. For example, time = '03-11-1993' will filter the data when
'ts' is greater than November 1993 the 3rd.
plot_median: bool, optional
If set to True, the Median will be drawn.
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
ax
Matplotlib axes object
See Also
--------
vDataFrame.plot : Draws the time series.
"""
check_types(
[
("ts", ts, [str]),
("q", q, [tuple]),
(
"start_date",
start_date,
[str, datetime.datetime, datetime.date, int, float],
),
(
"end_date",
end_date,
[str, datetime.datetime, datetime.date, int, float],
),
("plot_median", plot_median, [bool]),
]
)
self.parent.are_namecols_in(ts)
ts = self.parent.format_colnames(ts)
from verticapy.plot import range_curve_vdf
return range_curve_vdf(
self, ts, q, start_date, end_date, plot_median, ax=ax, **style_kwds,
)
# ---#
def rename(self, new_name: str):
"""
---------------------------------------------------------------------------
Renames the vColumn by dropping the current vColumn and creating a copy with
the specified name.
\u26A0 Warning : SQL code generation will be slower if the vDataFrame has been
transformed multiple times, so it's better practice to use
this method when first preparing your data.
Parameters
----------
new_name: str
The new vColumn alias.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame.add_copy : Creates a copy of the vColumn.
"""
check_types([("new_name", new_name, [str])])
old_name = quote_ident(self.alias)
new_name = new_name.replace('"', "")
assert not (self.parent.is_colname_in(new_name)), NameError(
f"A vColumn has already the alias {new_name}.\nBy changing the parameter 'new_name', you'll be able to solve this issue."
)
self.add_copy(new_name)
parent = self.drop(add_history=False)
parent.__add_to_history__(
"[Rename]: The vColumn {} was renamed '{}'.".format(old_name, new_name)
)
return parent
# ---#
def round(self, n: int):
"""
---------------------------------------------------------------------------
Rounds the vColumn by keeping only the input number of digits after the comma.
Parameters
----------
n: int
Number of digits to keep after the comma.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].apply : Applies a function to the input vColumn.
"""
check_types([("n", n, [int, float])])
return self.apply(func="ROUND({}, {})".format("{}", n))
# ---#
def sem(self):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using 'sem' (standard error of mean).
Returns
-------
float
sem
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
return self.aggregate(["sem"]).values[self.alias][0]
# ---#
def skewness(self):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using 'skewness'.
Returns
-------
float
skewness
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
return self.aggregate(["skewness"]).values[self.alias][0]
skew = skewness
# ---#
def slice(self, length: int, unit: str = "second", start: bool = True):
"""
---------------------------------------------------------------------------
Slices and transforms the vColumn using a time series rule.
Parameters
----------
length: int
Slice size.
unit: str, optional
Slice size unit. For example, it can be 'minute' 'hour'...
start: bool, optional
If set to True, the record will be sliced using the floor of the slicing
instead of the ceiling.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].date_part : Extracts a specific TS field from the vColumn.
"""
check_types(
[
("length", length, [int, float]),
("unit", unit, [str]),
("start", start, [bool]),
]
)
start_or_end = "START" if (start) else "END"
return self.apply(
func="TIME_SLICE({}, {}, '{}', '{}')".format(
"{}", length, unit.upper(), start_or_end
)
)
# ---#
def spider(
self,
by: str = "",
method: str = "density",
of: str = "",
max_cardinality: Union[int, tuple] = (6, 6),
h: Union[int, float, tuple] = (None, None),
ax=None,
**style_kwds,
):
"""
---------------------------------------------------------------------------
Draws the spider plot of the input vColumn based on an aggregation.
Parameters
----------
by: str, optional
vColumn to use to partition the data.
method: str, optional
The method to use to aggregate the data.
count : Number of elements.
density : Percentage of the distribution.
mean : Average of the vColumn 'of'.
min : Minimum of the vColumn 'of'.
max : Maximum of the vColumn 'of'.
sum : Sum of the vColumn 'of'.
q% : q Quantile of the vColumn 'of' (ex: 50% to get the median).
It can also be a cutomized aggregation (ex: AVG(column1) + 5).
of: str, optional
The vColumn to use to compute the aggregation.
h: int/float/tuple, optional
Interval width of the vColumns 1 and 2 bars. It is only valid if the
vColumns are numerical. Optimized h will be computed if the parameter
is empty or invalid.
max_cardinality: int/tuple, optional
Maximum number of distinct elements for vColumns 1 and 2 to be used as
categorical (No h will be picked or computed)
ax: Matplotlib axes object, optional
The axes to plot on.
**style_kwds
Any optional parameter to pass to the Matplotlib functions.
Returns
-------
ax
Matplotlib axes object
See Also
--------
vDataFrame.bar : Draws the Bar Chart of the input vColumns based on an aggregation.
"""
check_types(
[
("by", by, [str]),
("method", method, [str]),
("of", of, [str]),
("max_cardinality", max_cardinality, [list]),
("h", h, [list, float, int]),
]
)
if by:
self.parent.are_namecols_in(by)
by = self.parent.format_colnames(by)
columns = [self.alias, by]
else:
columns = [self.alias]
if of:
self.parent.are_namecols_in(of)
of = self.parent.format_colnames(of)
from verticapy.plot import spider as spider_plot
return spider_plot(
self.parent, columns, method, of, max_cardinality, h, ax=ax, **style_kwds,
)
# ---#
def std(self):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using 'std' (Standard Deviation).
Returns
-------
float
std
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
return self.aggregate(["stddev"]).values[self.alias][0]
stddev = std
# ---#
def store_usage(self):
"""
---------------------------------------------------------------------------
Returns the vColumn expected store usage (unit: b).
Returns
-------
int
vColumn expected store usage.
See Also
--------
vDataFrame.expected_store_usage : Returns the vDataFrame expected store usage.
"""
pre_comp = self.parent.__get_catalog_value__(self.alias, "store_usage")
if pre_comp != "VERTICAPY_NOT_PRECOMPUTED":
return pre_comp
store_usage = executeSQL(
"SELECT ZEROIFNULL(SUM(LENGTH({}::varchar))) FROM {}".format(
bin_spatial_to_str(self.category(), self.alias),
self.parent.__genSQL__(),
),
title="Computing the Store Usage of the vColumn {}.".format(self.alias),
method="fetchfirstelem",
)
self.parent.__update_catalog__(
{"index": ["store_usage"], self.alias: [store_usage]}
)
return store_usage
# ---#
def str_contains(self, pat: str):
"""
---------------------------------------------------------------------------
Verifies if the regular expression is in each of the vColumn records.
The vColumn will be transformed.
Parameters
----------
pat: str
Regular expression.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].str_count : Computes the number of matches for the regular expression
in each record of the vColumn.
vDataFrame[].extract : Extracts the regular expression in each record of the
vColumn.
vDataFrame[].str_replace : Replaces the regular expression matches in each of the
vColumn records by an input value.
vDataFrame[].str_slice : Slices the vColumn.
"""
check_types([("pat", pat, [str])])
return self.apply(
func="REGEXP_COUNT({}, '{}') > 0".format("{}", pat.replace("'", "''"))
)
# ---#
def str_count(self, pat: str):
"""
---------------------------------------------------------------------------
Computes the number of matches for the regular expression in each record of
the vColumn. The vColumn will be transformed.
Parameters
----------
pat: str
regular expression.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].str_contains : Verifies if the regular expression is in each of the
vColumn records.
vDataFrame[].extract : Extracts the regular expression in each record of the
vColumn.
vDataFrame[].str_replace : Replaces the regular expression matches in each of the
vColumn records by an input value.
vDataFrame[].str_slice : Slices the vColumn.
"""
check_types([("pat", pat, [str])])
return self.apply(
func="REGEXP_COUNT({}, '{}')".format("{}", pat.replace("'", "''"))
)
# ---#
def str_extract(self, pat: str):
"""
---------------------------------------------------------------------------
Extracts the regular expression in each record of the vColumn.
The vColumn will be transformed.
Parameters
----------
pat: str
regular expression.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].str_contains : Verifies if the regular expression is in each of the
vColumn records.
vDataFrame[].str_count : Computes the number of matches for the regular expression
in each record of the vColumn.
vDataFrame[].str_replace : Replaces the regular expression matches in each of the
vColumn records by an input value.
vDataFrame[].str_slice : Slices the vColumn.
"""
check_types([("pat", pat, [str])])
return self.apply(
func="REGEXP_SUBSTR({}, '{}')".format("{}", pat.replace("'", "''"))
)
# ---#
def str_replace(self, to_replace: str, value: str = ""):
"""
---------------------------------------------------------------------------
Replaces the regular expression matches in each of the vColumn record by an
input value. The vColumn will be transformed.
Parameters
----------
to_replace: str
Regular expression to replace.
value: str, optional
New value.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].str_contains : Verifies if the regular expression is in each of the
vColumn records.
vDataFrame[].str_count : Computes the number of matches for the regular expression
in each record of the vColumn.
vDataFrame[].extract : Extracts the regular expression in each record of the
vColumn.
vDataFrame[].str_slice : Slices the vColumn.
"""
check_types([("to_replace", to_replace, [str]), ("value", value, [str])])
return self.apply(
func="REGEXP_REPLACE({}, '{}', '{}')".format(
"{}", to_replace.replace("'", "''"), value.replace("'", "''")
)
)
# ---#
def str_slice(self, start: int, step: int):
"""
---------------------------------------------------------------------------
Slices the vColumn. The vColumn will be transformed.
Parameters
----------
start: int
Start of the slicing.
step: int
Size of the slicing.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].str_contains : Verifies if the regular expression is in each of the
vColumn records.
vDataFrame[].str_count : Computes the number of matches for the regular expression
in each record of the vColumn.
vDataFrame[].extract : Extracts the regular expression in each record of the
vColumn.
vDataFrame[].str_replace : Replaces the regular expression matches in each of the
vColumn records by an input value.
"""
check_types([("start", start, [int, float]), ("step", step, [int, float])])
return self.apply(func="SUBSTR({}, {}, {})".format("{}", start, step))
# ---#
def sub(self, x: float):
"""
---------------------------------------------------------------------------
Subtracts the input element from the vColumn.
Parameters
----------
x: float
If the vColumn type is date like (date, datetime ...), the parameter 'x'
will represent the number of seconds, otherwise it will represent a number.
Returns
-------
vDataFrame
self.parent
See Also
--------
vDataFrame[].apply : Applies a function to the input vColumn.
"""
check_types([("x", x, [int, float])])
if self.isdate():
return self.apply(func="TIMESTAMPADD(SECOND, -({}), {})".format(x, "{}"))
else:
return self.apply(func="{} - ({})".format("{}", x))
# ---#
def sum(self):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using 'sum'.
Returns
-------
float
sum
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
return self.aggregate(["sum"]).values[self.alias][0]
# ---#
def tail(self, limit: int = 5):
"""
---------------------------------------------------------------------------
Returns the tail of the vColumn.
Parameters
----------
limit: int, optional
Number of elements to display.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
See Also
--------
vDataFrame[].head : Returns the head of the vColumn.
"""
return self.iloc(limit=limit, offset=-1)
# ---#
def topk(self, k: int = -1, dropna: bool = True):
"""
---------------------------------------------------------------------------
Returns the k most occurent elements and their distributions as percents.
Parameters
----------
k: int, optional
Number of most occurent elements to return.
dropna: bool, optional
If set to True, NULL values will not be considered during the computation.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
See Also
--------
vDataFrame[].describe : Computes the vColumn descriptive statistics.
"""
check_types([("k", k, [int, float]), ("dropna", dropna, [bool])])
topk = "" if (k < 1) else "LIMIT {}".format(k)
dropna = " WHERE {} IS NOT NULL".format(self.alias) if (dropna) else ""
query = "SELECT {} AS {}, COUNT(*) AS _verticapy_cnt_, 100 * COUNT(*) / {} AS percent FROM {}{} GROUP BY {} ORDER BY _verticapy_cnt_ DESC {}".format(
bin_spatial_to_str(self.category(), self.alias),
self.alias,
self.parent.shape()[0],
self.parent.__genSQL__(),
dropna,
self.alias,
topk,
)
result = executeSQL(
query,
title="Computing the top{} categories of {}.".format(
k if k > 0 else "", self.alias
),
method="fetchall",
)
values = {
"index": [item[0] for item in result],
"count": [int(item[1]) for item in result],
"percent": [float(round(item[2], 3)) for item in result],
}
return tablesample(values)
# ---#
def value_counts(self, k: int = 30):
"""
---------------------------------------------------------------------------
Returns the k most occurent elements, how often they occur, and other
statistical information.
Parameters
----------
k: int, optional
Number of most occurent elements to return.
Returns
-------
tablesample
An object containing the result. For more information, see
utilities.tablesample.
See Also
--------
vDataFrame[].describe : Computes the vColumn descriptive statistics.
"""
return self.describe(method="categorical", max_cardinality=k)
# ---#
def var(self):
"""
---------------------------------------------------------------------------
Aggregates the vColumn using 'var' (Variance).
Returns
-------
float
var
See Also
--------
vDataFrame.aggregate : Computes the vDataFrame input aggregations.
"""
return self.aggregate(["variance"]).values[self.alias][0]
variance = var
| [
"verticapy.plot.ts_plot",
"verticapy.learn.ensemble.RandomForestClassifier",
"math.floor",
"verticapy.plot.bar",
"verticapy.plot.spider",
"sys.getsizeof",
"math.log",
"verticapy.learn.neighbors.KernelDensity",
"verticapy.plot.gen_cmap",
"verticapy.plot.range_curve_vdf",
"verticapy.learn.ensemble.RandomForestRegressor",
"verticapy.plot.boxplot",
"verticapy.plot.pie",
"warnings.warn",
"verticapy.plot.gen_colors",
"verticapy.plot.hist",
"math.isnan"
] | [((21802, 21871), 'verticapy.plot.bar', 'bar', (['self', 'method', 'of', 'max_cardinality', 'nbins', 'h'], {'ax': 'ax'}), '(self, method, of, max_cardinality, nbins, h, ax=ax, **style_kwds)\n', (21805, 21871), False, 'from verticapy.plot import bar\n'), ((23663, 23735), 'verticapy.plot.boxplot', 'boxplot', (['self', 'by', 'h', 'max_cardinality', 'cat_priority'], {'ax': 'ax'}), '(self, by, h, max_cardinality, cat_priority, ax=ax, **style_kwds)\n', (23670, 23735), False, 'from verticapy.plot import boxplot\n'), ((34053, 34154), 'verticapy.learn.neighbors.KernelDensity', 'KernelDensity', (['name'], {'bandwidth': 'bandwidth', 'kernel': 'kernel', 'nbins': 'nbins', 'xlim': 'xlim_tmp', 'store': '(False)'}), '(name, bandwidth=bandwidth, kernel=kernel, nbins=nbins, xlim=\n xlim_tmp, store=False)\n', (34066, 34154), False, 'from verticapy.learn.neighbors import KernelDensity\n'), ((78840, 78910), 'verticapy.plot.hist', 'hist', (['self', 'method', 'of', 'max_cardinality', 'nbins', 'h'], {'ax': 'ax'}), '(self, method, of, max_cardinality, nbins, h, ax=ax, **style_kwds)\n', (78844, 78910), False, 'from verticapy.plot import hist\n'), ((116534, 116611), 'verticapy.plot.pie', 'pie', (['self', 'method', 'of', 'max_cardinality', 'h', 'donut', 'rose'], {'ax': 'None'}), '(self, method, of, max_cardinality, h, donut, rose, ax=None, **style_kwds)\n', (116537, 116611), False, 'from verticapy.plot import pie\n'), ((118679, 118755), 'verticapy.plot.ts_plot', 'ts_plot', (['self', 'ts', 'by', 'start_date', 'end_date', 'area', 'step'], {'ax': 'ax'}), '(self, ts, by, start_date, end_date, area, step, ax=ax, **style_kwds)\n', (118686, 118755), False, 'from verticapy.plot import ts_plot\n'), ((122295, 122384), 'verticapy.plot.range_curve_vdf', 'range_curve_vdf', (['self', 'ts', 'q', 'start_date', 'end_date', 'plot_median'], {'ax': 'ax'}), '(self, ts, q, start_date, end_date, plot_median, ax=ax, **\n style_kwds)\n', (122310, 122384), False, 'from verticapy.plot import range_curve_vdf\n'), ((128550, 128641), 'verticapy.plot.spider', 'spider_plot', (['self.parent', 'columns', 'method', 'of', 'max_cardinality', 'h'], {'ax': 'ax'}), '(self.parent, columns, method, of, max_cardinality, h, ax=ax, **\n style_kwds)\n', (128561, 128641), True, 'from verticapy.plot import spider as spider_plot\n'), ((32354, 32366), 'verticapy.plot.gen_colors', 'gen_colors', ([], {}), '()\n', (32364, 32366), False, 'from verticapy.plot import gen_colors\n'), ((86794, 86833), 'warnings.warn', 'warnings.warn', (['warning_message', 'Warning'], {}), '(warning_message, Warning)\n', (86807, 86833), False, 'import math, re, decimal, warnings, datetime\n'), ((91370, 91397), 'sys.getsizeof', 'sys.getsizeof', (['self.catalog'], {}), '(self.catalog)\n', (91383, 91397), False, 'import sys\n'), ((91463, 91482), 'sys.getsizeof', 'sys.getsizeof', (['elem'], {}), '(elem)\n', (91476, 91482), False, 'import sys\n'), ((96570, 96609), 'warnings.warn', 'warnings.warn', (['warning_message', 'Warning'], {}), '(warning_message, Warning)\n', (96583, 96609), False, 'import math, re, decimal, warnings, datetime\n'), ((46007, 46044), 'verticapy.learn.ensemble.RandomForestRegressor', 'RandomForestRegressor', (['tmp_model_name'], {}), '(tmp_model_name)\n', (46028, 46044), False, 'from verticapy.learn.ensemble import RandomForestClassifier, RandomForestRegressor\n'), ((46087, 46125), 'verticapy.learn.ensemble.RandomForestClassifier', 'RandomForestClassifier', (['tmp_model_name'], {}), '(tmp_model_name)\n', (46109, 46125), False, 'from verticapy.learn.ensemble import RandomForestClassifier, RandomForestRegressor\n'), ((64488, 64527), 'warnings.warn', 'warnings.warn', (['warning_message', 'Warning'], {}), '(warning_message, Warning)\n', (64501, 64527), False, 'import math, re, decimal, warnings, datetime\n'), ((91320, 91355), 'sys.getsizeof', 'sys.getsizeof', (['self.transformations'], {}), '(self.transformations)\n', (91333, 91355), False, 'import sys\n'), ((113257, 113275), 'math.floor', 'math.floor', (['best_h'], {}), '(best_h)\n', (113267, 113275), False, 'import math, re, decimal, warnings, datetime\n'), ((71394, 71404), 'verticapy.plot.gen_cmap', 'gen_cmap', ([], {}), '()\n', (71402, 71404), False, 'from verticapy.plot import gen_cmap\n'), ((71551, 71563), 'verticapy.plot.gen_colors', 'gen_colors', ([], {}), '()\n', (71561, 71563), False, 'from verticapy.plot import gen_colors\n'), ((91246, 91265), 'sys.getsizeof', 'sys.getsizeof', (['self'], {}), '(self)\n', (91259, 91265), False, 'import sys\n'), ((91280, 91305), 'sys.getsizeof', 'sys.getsizeof', (['self.alias'], {}), '(self.alias)\n', (91293, 91305), False, 'import sys\n'), ((97070, 97109), 'warnings.warn', 'warnings.warn', (['warning_message', 'Warning'], {}), '(warning_message, Warning)\n', (97083, 97109), False, 'import math, re, decimal, warnings, datetime\n'), ((101480, 101519), 'warnings.warn', 'warnings.warn', (['warning_message', 'Warning'], {}), '(warning_message, Warning)\n', (101493, 101519), False, 'import math, re, decimal, warnings, datetime\n'), ((102343, 102382), 'warnings.warn', 'warnings.warn', (['warning_message', 'Warning'], {}), '(warning_message, Warning)\n', (102356, 102382), False, 'import math, re, decimal, warnings, datetime\n'), ((112765, 112783), 'math.log', 'math.log', (['count', '(2)'], {}), '(count, 2)\n', (112773, 112783), False, 'import math, re, decimal, warnings, datetime\n'), ((102841, 102880), 'warnings.warn', 'warnings.warn', (['warning_message', 'Warning'], {}), '(warning_message, Warning)\n', (102854, 102880), False, 'import math, re, decimal, warnings, datetime\n'), ((50296, 50309), 'math.floor', 'math.floor', (['h'], {}), '(h)\n', (50306, 50309), False, 'import math, re, decimal, warnings, datetime\n'), ((97966, 97990), 'math.isnan', 'math.isnan', (['result[i][2]'], {}), '(result[i][2])\n', (97976, 97990), False, 'import math, re, decimal, warnings, datetime\n')] |
import os
import logging
from tempfile import mkstemp, mkdtemp
from shutil import rmtree
from zipfile import ZipFile, ZIP_DEFLATED
from datetime import datetime
from boto.s3.connection import S3Connection
from boto.s3.key import Key
__version__ = "0.1.8"
__author__ = "<NAME>"
__email__ = "<EMAIL>"
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
LOGGING_DEFAULTS = {"level": logging.INFO,
"format": "%(asctime)s [%(levelname)s]: %(message)s"}
def setup_logging(**kwargs):
"""Convenience function for setting up some sane logging defaults"""
opts = dict(LOGGING_DEFAULTS.items() + kwargs.items())
logging.basicConfig(**opts)
class ZipBackup(object):
"""
A compressed ZIP file backup
Note: large inclusion operations can sometimes take time as files
are compressed on the fly. This prevents all the files being copied
to a temporary location (and using unnecessary extra space) and
storing up the need for a potentially large compression at the end.
"""
def __init__(self, name):
self.name = name
_, self._path = mkstemp()
logger.debug("Created temporary file %s" % self._path)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
os.remove(self._path)
logger.debug("Removed temporary file %s" % self._path)
def include_directory(self, path, preserve_paths=False, name=None):
"""Add the contents of a directory to the backup"""
path = os.path.abspath(path)
logger.debug("Adding directory %s" % path)
with ZipFile(self._path, 'a', ZIP_DEFLATED, allowZip64=True) as zipfile:
for base,dirs,files in os.walk(path):
logger.debug("Walking directory %s" % path)
for file in files:
filename = os.path.join(base, file)
try:
zipfile.write(filename,
self._get_filename_for_archive(
path, filename, preserve_paths, name))
logger.info("Added file %s" % filename)
except:
logger.warn("Could not add file %s" % file, exc_info=True)
logger.debug("Finished directory %s" % path)
def save_to_s3(self, bucket, access_key, secret_key, **kwargs):
"""Save the backup to Amazon S3"""
logger.info("Saving to S3 in '%s' bucket" % bucket)
conn = S3Connection(access_key, secret_key, **kwargs)
bucket = conn.get_bucket(bucket)
key = Key(bucket)
key.key = '%<KEY>' % \
(self.name, datetime.now().strftime("%Y%m%d%H%M%S"))
key.set_contents_from_filename(self._path)
logger.info("Saving to S3 done %s" % key.key)
def include_new_dir(self, name):
"""Add a new empty directory to the backup"""
return BackupIncludedDirectory(name, self)
def _get_filename_for_archive(self, directory, filename,
preserve_paths, name):
if not preserve_paths:
filename = filename.replace(directory, "")
if name is not None:
filename = name + os.sep + filename
return filename
class BackupIncludedDirectory(object):
"""A new directory which is subsequently added to the backup"""
def __init__(self, name, owner):
self.name = name
self.path = mkdtemp()
self._owner = owner
logger.debug("Created temporary directory %s" % self.path)
def __str__(self):
return self.path
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self._owner.include_directory(self.path, preserve_paths=False,
name=self.name)
rmtree(self.path)
logger.debug("Removed temporary directory %s" % self.path)
| [
"logging.getLogger",
"logging.basicConfig",
"logging.NullHandler",
"zipfile.ZipFile",
"os.path.join",
"boto.s3.connection.S3Connection",
"boto.s3.key.Key",
"datetime.datetime.now",
"tempfile.mkdtemp",
"shutil.rmtree",
"os.path.abspath",
"tempfile.mkstemp",
"os.walk",
"os.remove"
] | [((310, 337), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (327, 337), False, 'import logging\n'), ((356, 377), 'logging.NullHandler', 'logging.NullHandler', ([], {}), '()\n', (375, 377), False, 'import logging\n'), ((663, 690), 'logging.basicConfig', 'logging.basicConfig', ([], {}), '(**opts)\n', (682, 690), False, 'import logging\n'), ((1133, 1142), 'tempfile.mkstemp', 'mkstemp', ([], {}), '()\n', (1140, 1142), False, 'from tempfile import mkstemp, mkdtemp\n'), ((1372, 1393), 'os.remove', 'os.remove', (['self._path'], {}), '(self._path)\n', (1381, 1393), False, 'import os\n'), ((1609, 1630), 'os.path.abspath', 'os.path.abspath', (['path'], {}), '(path)\n', (1624, 1630), False, 'import os\n'), ((2591, 2637), 'boto.s3.connection.S3Connection', 'S3Connection', (['access_key', 'secret_key'], {}), '(access_key, secret_key, **kwargs)\n', (2603, 2637), False, 'from boto.s3.connection import S3Connection\n'), ((2693, 2704), 'boto.s3.key.Key', 'Key', (['bucket'], {}), '(bucket)\n', (2696, 2704), False, 'from boto.s3.key import Key\n'), ((3564, 3573), 'tempfile.mkdtemp', 'mkdtemp', ([], {}), '()\n', (3571, 3573), False, 'from tempfile import mkstemp, mkdtemp\n'), ((3974, 3991), 'shutil.rmtree', 'rmtree', (['self.path'], {}), '(self.path)\n', (3980, 3991), False, 'from shutil import rmtree\n'), ((1695, 1750), 'zipfile.ZipFile', 'ZipFile', (['self._path', '"""a"""', 'ZIP_DEFLATED'], {'allowZip64': '(True)'}), "(self._path, 'a', ZIP_DEFLATED, allowZip64=True)\n", (1702, 1750), False, 'from zipfile import ZipFile, ZIP_DEFLATED\n'), ((1798, 1811), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (1805, 1811), False, 'import os\n'), ((1939, 1963), 'os.path.join', 'os.path.join', (['base', 'file'], {}), '(base, file)\n', (1951, 1963), False, 'import os\n'), ((2760, 2774), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2772, 2774), False, 'from datetime import datetime\n')] |
# Generated by Django 2.0.2 on 2019-03-08 13:03
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Delivery',
fields=[
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('is_delete', models.BooleanField(default=False, verbose_name='删除标记')),
('id', models.AutoField(primary_key=True, serialize=False, verbose_name='投递ID')),
('delivery_status', models.CharField(choices=[('DD', '待定'), ('YQ', '邀请面试'), ('WJ', '婉拒')], default='DD', max_length=2, verbose_name='投递状态')),
],
options={
'verbose_name': '面试',
'verbose_name_plural': '面试',
},
),
]
| [
"django.db.models.DateTimeField",
"django.db.models.AutoField",
"django.db.models.CharField",
"django.db.models.BooleanField"
] | [((313, 373), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'verbose_name': '"""创建时间"""'}), "(auto_now_add=True, verbose_name='创建时间')\n", (333, 373), False, 'from django.db import migrations, models\n'), ((408, 464), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'verbose_name': '"""更新时间"""'}), "(auto_now=True, verbose_name='更新时间')\n", (428, 464), False, 'from django.db import migrations, models\n'), ((497, 552), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'verbose_name': '"""删除标记"""'}), "(default=False, verbose_name='删除标记')\n", (516, 552), False, 'from django.db import migrations, models\n'), ((578, 650), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""投递ID"""'}), "(primary_key=True, serialize=False, verbose_name='投递ID')\n", (594, 650), False, 'from django.db import migrations, models\n'), ((689, 812), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('DD', '待定'), ('YQ', '邀请面试'), ('WJ', '婉拒')]", 'default': '"""DD"""', 'max_length': '(2)', 'verbose_name': '"""投递状态"""'}), "(choices=[('DD', '待定'), ('YQ', '邀请面试'), ('WJ', '婉拒')],\n default='DD', max_length=2, verbose_name='投递状态')\n", (705, 812), False, 'from django.db import migrations, models\n')] |
"""Test deCONZ diagnostics."""
from unittest.mock import patch
from pydeconz.websocket import STATE_RUNNING
from homeassistant.const import Platform
from .test_gateway import DECONZ_CONFIG, setup_deconz_integration
from tests.components.diagnostics import get_diagnostics_for_config_entry
async def test_entry_diagnostics(
hass, hass_client, aioclient_mock, mock_deconz_websocket
):
"""Test config entry diagnostics."""
config_entry = await setup_deconz_integration(hass, aioclient_mock)
await mock_deconz_websocket(state=STATE_RUNNING)
await hass.async_block_till_done()
with patch(
"homeassistant.helpers.system_info.async_get_system_info",
return_value={"get_system_info": "fake data"},
):
assert await get_diagnostics_for_config_entry(
hass, hass_client, config_entry
) == {
"home_assistant": {"get_system_info": "fake data"},
"config_entry": dict(config_entry.data),
"deconz_config": DECONZ_CONFIG,
"websocket_state": STATE_RUNNING,
"deconz_ids": {},
"entities": {
str(Platform.ALARM_CONTROL_PANEL): [],
str(Platform.BINARY_SENSOR): [],
str(Platform.CLIMATE): [],
str(Platform.COVER): [],
str(Platform.FAN): [],
str(Platform.LIGHT): [],
str(Platform.LOCK): [],
str(Platform.NUMBER): [],
str(Platform.SENSOR): [],
str(Platform.SIREN): [],
str(Platform.SWITCH): [],
},
"events": {},
"alarm_systems": {},
"groups": {},
"lights": {},
"scenes": {},
"sensors": {},
}
| [
"tests.components.diagnostics.get_diagnostics_for_config_entry",
"unittest.mock.patch"
] | [((610, 725), 'unittest.mock.patch', 'patch', (['"""homeassistant.helpers.system_info.async_get_system_info"""'], {'return_value': "{'get_system_info': 'fake data'}"}), "('homeassistant.helpers.system_info.async_get_system_info',\n return_value={'get_system_info': 'fake data'})\n", (615, 725), False, 'from unittest.mock import patch\n'), ((767, 832), 'tests.components.diagnostics.get_diagnostics_for_config_entry', 'get_diagnostics_for_config_entry', (['hass', 'hass_client', 'config_entry'], {}), '(hass, hass_client, config_entry)\n', (799, 832), False, 'from tests.components.diagnostics import get_diagnostics_for_config_entry\n')] |
from __future__ import print_function # For Py2/3 compatibility
import async_eel
import random
import asyncio
loop = asyncio.get_event_loop()
@async_eel.expose
async def py_random():
return random.random()
async def print_num(n):
"""callback of js_random"""
print('Got this from Javascript:', n)
async def main():
try:
async_eel.init('web')
await async_eel.start('callbacks.html', size=(400, 300))
# Call Javascript function, and pass explicit callback function
await async_eel.js_random()(print_num)
# Do the same with an inline callback
await async_eel.js_random()(lambda n: print('2Got this from Javascript:', n))
except Exception:
import traceback
traceback.print_exc()
if __name__ == '__main__':
asyncio.run_coroutine_threadsafe(main(), loop)
loop.run_forever()
| [
"async_eel.init",
"async_eel.js_random",
"traceback.print_exc",
"random.random",
"asyncio.get_event_loop",
"async_eel.start"
] | [((119, 143), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (141, 143), False, 'import asyncio\n'), ((198, 213), 'random.random', 'random.random', ([], {}), '()\n', (211, 213), False, 'import random\n'), ((351, 372), 'async_eel.init', 'async_eel.init', (['"""web"""'], {}), "('web')\n", (365, 372), False, 'import async_eel\n'), ((387, 437), 'async_eel.start', 'async_eel.start', (['"""callbacks.html"""'], {'size': '(400, 300)'}), "('callbacks.html', size=(400, 300))\n", (402, 437), False, 'import async_eel\n'), ((746, 767), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (765, 767), False, 'import traceback\n'), ((525, 546), 'async_eel.js_random', 'async_eel.js_random', ([], {}), '()\n', (544, 546), False, 'import async_eel\n'), ((619, 640), 'async_eel.js_random', 'async_eel.js_random', ([], {}), '()\n', (638, 640), False, 'import async_eel\n')] |
from dataset.baseset import BaseSet
import random, cv2
import numpy as np
class iNaturalist(BaseSet):
def __init__(self, mode='train', cfg=None, transform=None):
super(iNaturalist, self).__init__(mode, cfg, transform)
random.seed(0)
self.class_dict = self._get_class_dict()
def __getitem__(self, index):
if self.cfg.TRAIN.SAMPLER.TYPE == "weighted sampler" and self.mode == 'train':
assert self.cfg.TRAIN.SAMPLER.WEIGHTED_SAMPLER.TYPE in ["balance", 'square', 'progressive']
if self.cfg.TRAIN.SAMPLER.WEIGHTED_SAMPLER.TYPE == "balance":
sample_class = random.randint(0, self.num_classes - 1)
elif self.cfg.TRAIN.SAMPLER.WEIGHTED_SAMPLER.TYPE == "square":
sample_class = np.random.choice(np.arange(self.num_classes), p=self.square_p)
else:
sample_class = np.random.choice(np.arange(self.num_classes), p=self.progress_p)
sample_indexes = self.class_dict[sample_class]
index = random.choice(sample_indexes)
now_info = self.data[index]
img = self._get_image(now_info)
image = self.transform(img)
meta = dict()
image_label = now_info['category_id'] # 0-index
return image, image_label, meta
| [
"random.choice",
"random.randint",
"random.seed",
"numpy.arange"
] | [((248, 262), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (259, 262), False, 'import random, cv2\n'), ((1059, 1088), 'random.choice', 'random.choice', (['sample_indexes'], {}), '(sample_indexes)\n', (1072, 1088), False, 'import random, cv2\n'), ((651, 690), 'random.randint', 'random.randint', (['(0)', '(self.num_classes - 1)'], {}), '(0, self.num_classes - 1)\n', (665, 690), False, 'import random, cv2\n'), ((816, 843), 'numpy.arange', 'np.arange', (['self.num_classes'], {}), '(self.num_classes)\n', (825, 843), True, 'import numpy as np\n'), ((930, 957), 'numpy.arange', 'np.arange', (['self.num_classes'], {}), '(self.num_classes)\n', (939, 957), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import mock
from nose.tools import * # noqa (PEP8 asserts)
import hmac
import hashlib
from StringIO import StringIO
from django.core.exceptions import ValidationError
from django.db import IntegrityError
import furl
from framework.auth import get_or_create_user
from framework.auth.core import Auth
from osf.models import OSFUser, AbstractNode
from addons.wiki.models import WikiVersion
from osf.exceptions import BlacklistedEmailError
from website import settings
from website.conferences import views
from website.conferences import utils, message
from website.util import api_url_for, web_url_for
from tests.base import OsfTestCase, fake
from osf_tests.factories import ConferenceFactory, ProjectFactory, UserFactory
def assert_absolute(url):
parsed_domain = furl.furl(settings.DOMAIN)
parsed_url = furl.furl(url)
assert_equal(parsed_domain.host, parsed_url.host)
def assert_equal_urls(first, second):
parsed_first = furl.furl(first)
parsed_first.port = None
parsed_second = furl.furl(second)
parsed_second.port = None
assert_equal(parsed_first, parsed_second)
def create_fake_conference_nodes(n, conference):
nodes = []
for i in range(n):
node = ProjectFactory(is_public=True)
conference.submissions.add(node)
node.save()
nodes.append(node)
return nodes
def create_fake_conference_nodes_bad_data(conference, n, bad_n, endpoint):
nodes = []
for i in range(n):
node = ProjectFactory(is_public=True)
conference.submissions.add(node)
# inject bad data
if i < bad_n:
# Delete only contributor
node.contributor_set.filter(user=node.contributors.first()).delete()
node.save()
nodes.append(node)
return nodes
class TestConferenceUtils(OsfTestCase):
def test_get_or_create_user_exists(self):
user = UserFactory()
fetched, created = get_or_create_user(user.fullname, user.username, is_spam=True)
assert_false(created)
assert_equal(user._id, fetched._id)
assert_false('is_spam' in fetched.system_tags)
def test_get_or_create_user_not_exists(self):
fullname = '<NAME>'
username = '<EMAIL>'
fetched, created = get_or_create_user(fullname, username, is_spam=False)
fetched.save() # in order to access m2m fields, e.g. tags
assert_true(created)
assert_equal(fetched.fullname, fullname)
assert_equal(fetched.username, username)
assert_false('is_spam' in fetched.system_tags)
def test_get_or_create_user_is_spam(self):
fullname = '<NAME>'
username = '<EMAIL>'
fetched, created = get_or_create_user(fullname, username, is_spam=True)
fetched.save() # in order to access m2m fields, e.g. tags
assert_true(created)
assert_equal(fetched.fullname, fullname)
assert_equal(fetched.username, username)
assert_true('is_spam' in fetched.system_tags)
def test_get_or_create_user_with_blacklisted_domain(self):
fullname = 'Kanye West'
username = '<EMAIL>'
with assert_raises(BlacklistedEmailError) as e:
get_or_create_user(fullname, username, is_spam=True)
assert_equal(e.exception.message, 'Invalid Email')
class ContextTestCase(OsfTestCase):
MAILGUN_API_KEY = 'mailkimp'
@classmethod
def setUpClass(cls):
super(ContextTestCase, cls).setUpClass()
settings.MAILGUN_API_KEY, cls._MAILGUN_API_KEY = cls.MAILGUN_API_KEY, settings.MAILGUN_API_KEY
@classmethod
def tearDownClass(cls):
super(ContextTestCase, cls).tearDownClass()
settings.MAILGUN_API_KEY = cls._MAILGUN_API_KEY
def make_context(self, method='POST', **kwargs):
data = {
'X-Mailgun-Sscore': 0,
'timestamp': '123',
'token': 'secret',
'signature': hmac.new(
key=settings.MAILGUN_API_KEY,
msg='{}{}'.format('123', 'secret'),
digestmod=hashlib.sha256,
).hexdigest(),
}
data.update(kwargs.pop('data', {}))
data = {
key: value
for key, value in data.items()
if value is not None
}
return self.app.app.test_request_context(method=method, data=data, **kwargs)
class TestProvisionNode(ContextTestCase):
def setUp(self):
super(TestProvisionNode, self).setUp()
self.node = ProjectFactory()
self.user = self.node.creator
self.conference = ConferenceFactory()
self.body = 'dragon on my back'
self.content = 'dragon attack'
self.attachment = StringIO(self.content)
self.recipient = '{0}{1}-<EMAIL>'.format(
'test-' if settings.DEV_MODE else '',
self.conference.endpoint,
)
def make_context(self, **kwargs):
data = {
'attachment-count': '1',
'attachment-1': (self.attachment, 'attachment-1'),
'X-Mailgun-Sscore': 0,
'recipient': self.recipient,
'stripped-text': self.body,
}
data.update(kwargs.pop('data', {}))
return super(TestProvisionNode, self).make_context(data=data, **kwargs)
def test_provision(self):
with self.make_context():
msg = message.ConferenceMessage()
utils.provision_node(self.conference, msg, self.node, self.user)
assert_true(self.node.is_public)
assert_in(self.conference.admins.first(), self.node.contributors)
assert_in('emailed', self.node.system_tags)
assert_in(self.conference.endpoint, self.node.system_tags)
assert self.node in self.conference.submissions.all()
assert_not_in('spam', self.node.system_tags)
def test_provision_private(self):
self.conference.public_projects = False
self.conference.save()
with self.make_context():
msg = message.ConferenceMessage()
utils.provision_node(self.conference, msg, self.node, self.user)
assert_false(self.node.is_public)
assert_in(self.conference.admins.first(), self.node.contributors)
assert_in('emailed', self.node.system_tags)
assert_not_in('spam', self.node.system_tags)
def test_provision_spam(self):
with self.make_context(data={'X-Mailgun-Sscore': message.SSCORE_MAX_VALUE + 1}):
msg = message.ConferenceMessage()
utils.provision_node(self.conference, msg, self.node, self.user)
assert_false(self.node.is_public)
assert_in(self.conference.admins.first(), self.node.contributors)
assert_in('emailed', self.node.system_tags)
assert_in('spam', self.node.system_tags)
@mock.patch('website.conferences.utils.waterbutler_api_url_for')
@mock.patch('website.conferences.utils.requests.put')
def test_upload(self, mock_put, mock_get_url):
mock_get_url.return_value = 'http://queen.com/'
file_name = 'hammer-to-fall'
self.attachment.filename = file_name
self.attachment.content_type = 'application/json'
utils.upload_attachment(self.user, self.node, self.attachment)
mock_get_url.assert_called_with(
self.node._id,
'osfstorage',
_internal=True,
base_url=self.node.osfstorage_region.waterbutler_url,
cookie=self.user.get_or_create_cookie(),
name=file_name
)
mock_put.assert_called_with(
mock_get_url.return_value,
data=self.content,
)
@mock.patch('website.conferences.utils.waterbutler_api_url_for')
@mock.patch('website.conferences.utils.requests.put')
def test_upload_no_file_name(self, mock_put, mock_get_url):
mock_get_url.return_value = 'http://queen.com/'
self.attachment.filename = ''
self.attachment.content_type = 'application/json'
utils.upload_attachment(self.user, self.node, self.attachment)
mock_get_url.assert_called_with(
self.node._id,
'osfstorage',
_internal=True,
base_url=self.node.osfstorage_region.waterbutler_url,
cookie=self.user.get_or_create_cookie(),
name=settings.MISSING_FILE_NAME,
)
mock_put.assert_called_with(
mock_get_url.return_value,
data=self.content,
)
@mock.patch('website.conferences.utils.upload_attachments')
def test_add_poster_by_email(self, mock_upload_attachments):
conference = ConferenceFactory()
with self.make_context(data={'from': '<EMAIL>', 'subject': 'It\'s PARTY TIME!'}):
msg = message.ConferenceMessage()
views.add_poster_by_email(conference, msg)
user = OSFUser.objects.get(username='<EMAIL>')
assert user.email == '<EMAIL>'
assert user.fullname == user._id # user's shouldn't be able to use email as fullname, so we use the guid.
class TestMessage(ContextTestCase):
PUSH_CONTEXT = False
def test_verify_signature_valid(self):
with self.make_context():
msg = message.ConferenceMessage()
msg.verify_signature()
def test_verify_signature_invalid(self):
with self.make_context(data={'signature': 'fake'}):
self.app.app.preprocess_request()
msg = message.ConferenceMessage()
with assert_raises(message.ConferenceError):
msg.verify_signature()
def test_is_spam_false_missing_headers(self):
ctx = self.make_context(
method='POST',
data={'X-Mailgun-Sscore': message.SSCORE_MAX_VALUE - 1},
)
with ctx:
msg = message.ConferenceMessage()
assert not msg.is_spam
def test_is_spam_false_all_headers(self):
ctx = self.make_context(
method='POST',
data={
'X-Mailgun-Sscore': message.SSCORE_MAX_VALUE - 1,
'X-Mailgun-Dkim-Check-Result': message.DKIM_PASS_VALUES[0],
'X-Mailgun-Spf': message.SPF_PASS_VALUES[0],
},
)
with ctx:
msg = message.ConferenceMessage()
assert not msg.is_spam
def test_is_spam_true_sscore(self):
ctx = self.make_context(
method='POST',
data={'X-Mailgun-Sscore': message.SSCORE_MAX_VALUE + 1},
)
with ctx:
msg = message.ConferenceMessage()
assert msg.is_spam
def test_is_spam_true_dkim(self):
ctx = self.make_context(
method='POST',
data={'X-Mailgun-Dkim-Check-Result': message.DKIM_PASS_VALUES[0][::-1]},
)
with ctx:
msg = message.ConferenceMessage()
assert msg.is_spam
def test_is_spam_true_spf(self):
ctx = self.make_context(
method='POST',
data={'X-Mailgun-Spf': message.SPF_PASS_VALUES[0][::-1]},
)
with ctx:
msg = message.ConferenceMessage()
assert msg.is_spam
def test_subject(self):
ctx = self.make_context(
method='POST',
data={'subject': 'RE: Hip Hopera'},
)
with ctx:
msg = message.ConferenceMessage()
assert_equal(msg.subject, 'Hip Hopera')
def test_recipient(self):
address = '<EMAIL>'
ctx = self.make_context(
method='POST',
data={'recipient': address},
)
with ctx:
msg = message.ConferenceMessage()
assert_equal(msg.recipient, address)
def test_text(self):
text = 'welcome to my nuclear family'
ctx = self.make_context(
method='POST',
data={'stripped-text': text},
)
with ctx:
msg = message.ConferenceMessage()
assert_equal(msg.text, text)
def test_sender_name(self):
names = [
(' Fred', 'Fred'),
(u'Me䬟', u'Me䬟'),
(u'<EMAIL>', u'<EMAIL>'),
(u'Fred <<EMAIL>>', u'Fred'),
(u'"Fred" <<EMAIL>>', u'Fred'),
]
for name in names:
with self.make_context(data={'from': name[0]}):
msg = message.ConferenceMessage()
assert_equal(msg.sender_name, name[1])
def test_sender_email(self):
emails = [
(u'<EMAIL>', u'<EMAIL>'),
(u'<EMAIL>', u'<EMAIL>')
]
for email in emails:
with self.make_context(data={'from': email[0]}):
msg = message.ConferenceMessage()
assert_equal(msg.sender_email, email[1])
def test_route_invalid_pattern(self):
with self.make_context(data={'recipient': '<EMAIL>'}):
self.app.app.preprocess_request()
msg = message.ConferenceMessage()
with assert_raises(message.ConferenceError):
msg.route
def test_route_invalid_test(self):
recipient = '{0}<EMAIL>'.format('' if settings.DEV_MODE else 'stage-')
with self.make_context(data={'recipient': recipient}):
self.app.app.preprocess_request()
msg = message.ConferenceMessage()
with assert_raises(message.ConferenceError):
msg.route
def test_route_valid_alternate(self):
conf = ConferenceFactory(endpoint='chocolate', active=True)
conf.name = 'Chocolate Conference'
conf.field_names['submission2'] = 'data'
conf.save()
recipient = '{0}<EMAIL>'.format('test-' if settings.DEV_MODE else '')
with self.make_context(data={'recipient': recipient}):
self.app.app.preprocess_request()
msg = message.ConferenceMessage()
assert_equal(msg.conference_name, 'chocolate')
assert_equal(msg.conference_category, 'data')
conf.__class__.delete(conf)
def test_route_valid_b(self):
recipient = '{0}<EMAIL>'.format('test-' if settings.DEV_MODE else '')
with self.make_context(data={'recipient': recipient}):
self.app.app.preprocess_request()
msg = message.ConferenceMessage()
assert_equal(msg.conference_name, 'conf')
assert_equal(msg.conference_category, 'poster')
def test_alternate_route_invalid(self):
recipient = '{0}<EMAIL>'.format('test-' if settings.DEV_MODE else '')
with self.make_context(data={'recipient': recipient}):
self.app.app.preprocess_request()
msg = message.ConferenceMessage()
with assert_raises(message.ConferenceError):
msg.route
def test_attachments_count_zero(self):
with self.make_context(data={'attachment-count': '0'}):
msg = message.ConferenceMessage()
assert_equal(msg.attachments, [])
def test_attachments_count_one(self):
content = 'slightly mad'
sio = StringIO(content)
ctx = self.make_context(
method='POST',
data={
'attachment-count': 1,
'attachment-1': (sio, 'attachment-1'),
},
)
with ctx:
msg = message.ConferenceMessage()
assert_equal(len(msg.attachments), 1)
assert_equal(msg.attachments[0].read(), content)
class TestConferenceEmailViews(OsfTestCase):
def test_redirect_to_meetings_url(self):
url = '/presentations/'
res = self.app.get(url)
assert_equal(res.status_code, 302)
res = res.follow()
assert_equal(res.request.path, '/meetings/')
def test_conference_submissions(self):
AbstractNode.objects.all().delete()
conference1 = ConferenceFactory()
conference2 = ConferenceFactory()
# Create conference nodes
create_fake_conference_nodes(
3,
conference1,
)
create_fake_conference_nodes(
2,
conference2,
)
url = api_url_for('conference_submissions')
res = self.app.get(url)
assert_equal(res.json['success'], True)
def test_conference_plain_returns_200(self):
conference = ConferenceFactory()
url = web_url_for('conference_results__plain', meeting=conference.endpoint)
res = self.app.get(url)
assert_equal(res.status_code, 200)
def test_conference_data(self):
conference = ConferenceFactory()
# Create conference nodes
n_conference_nodes = 3
create_fake_conference_nodes(
n_conference_nodes,
conference,
)
# Create a non-conference node
ProjectFactory()
url = api_url_for('conference_data', meeting=conference.endpoint)
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), n_conference_nodes)
# Regression for OSF-8864 to confirm bad project data does not make whole conference break
def test_conference_bad_data(self):
conference = ConferenceFactory()
# Create conference nodes
n_conference_nodes = 3
n_conference_nodes_bad = 1
create_fake_conference_nodes_bad_data(
conference,
n_conference_nodes,
n_conference_nodes_bad,
conference,
)
# Create a non-conference node
ProjectFactory()
url = api_url_for('conference_data', meeting=conference.endpoint)
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), n_conference_nodes - n_conference_nodes_bad)
def test_conference_data_url_upper(self):
conference = ConferenceFactory()
# Create conference nodes
n_conference_nodes = 3
create_fake_conference_nodes(
n_conference_nodes,
conference,
)
# Create a non-conference node
ProjectFactory()
url = api_url_for('conference_data', meeting=conference.endpoint.upper())
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), n_conference_nodes)
def test_conference_data_tag_upper(self):
conference = ConferenceFactory()
# Create conference nodes
n_conference_nodes = 3
create_fake_conference_nodes(
n_conference_nodes,
conference,
)
# Create a non-conference node
ProjectFactory()
url = api_url_for('conference_data', meeting=conference.endpoint)
res = self.app.get(url)
assert_equal(res.status_code, 200)
assert_equal(len(res.json), n_conference_nodes)
def test_conference_results(self):
conference = ConferenceFactory()
url = web_url_for('conference_results', meeting=conference.endpoint)
res = self.app.get(url)
assert_equal(res.status_code, 200)
def test_confererence_results_endpoint_is_case_insensitive(self):
ConferenceFactory(endpoint='StudySwap')
url = web_url_for('conference_results', meeting='studyswap')
res = self.app.get(url)
assert_equal(res.status_code, 200)
class TestConferenceModel(OsfTestCase):
def test_endpoint_is_required(self):
with assert_raises(IntegrityError):
ConferenceFactory(endpoint=None, name=fake.company()).save()
def test_name_is_required(self):
with assert_raises(IntegrityError):
ConferenceFactory(endpoint='spsp2014', name=None).save()
def test_default_field_names(self):
conf = ConferenceFactory(endpoint='cookie', name='Cookies Conference')
conf.save()
assert_equal(conf.field_names['submission1'], 'poster')
assert_equal(conf.field_names['mail_subject'], 'Presentation title')
def test_conference_valid_submissions(self):
conf = ConferenceFactory(endpoint='Hamburgers', name='Hamburger conference')
conf.save()
# 3 good nodes added
create_fake_conference_nodes(3, conf)
# Deleted node added
deleted_node = ProjectFactory(is_public=True)
deleted_node.is_deleted = True
deleted_node.save()
conf.submissions.add(deleted_node)
# Private node added
private_node = ProjectFactory(is_public=False)
conf.submissions.add(private_node)
assert_equal(conf.submissions.count(), 5)
assert_equal(conf.valid_submissions.count(), 3)
class TestConferenceIntegration(ContextTestCase):
@mock.patch('website.conferences.views.send_mail')
@mock.patch('website.conferences.utils.upload_attachments')
def test_integration(self, mock_upload, mock_send_mail):
fullname = '<NAME>'
username = '<EMAIL>'
title = 'good songs'
conference = ConferenceFactory()
body = 'dragon on my back'
content = 'dragon attack'
recipient = '{0}{1}-<EMAIL>'.format(
'test-' if settings.DEV_MODE else '',
conference.endpoint,
)
self.app.post(
api_url_for('meeting_hook'),
{
'X-Mailgun-Sscore': 0,
'timestamp': '123',
'token': 'secret',
'signature': hmac.new(
key=settings.MAILGUN_API_KEY,
msg='{}{}'.format('123', 'secret'),
digestmod=hashlib.sha256,
).hexdigest(),
'attachment-count': '1',
'X-Mailgun-Sscore': 0,
'from': '{0} <{1}>'.format(fullname, username),
'recipient': recipient,
'subject': title,
'stripped-text': body,
},
upload_files=[
('attachment-1', 'attachment-1', content),
],
)
assert_true(mock_upload.called)
users = OSFUser.objects.filter(username=username)
assert_equal(users.count(), 1)
nodes = AbstractNode.objects.filter(title=title)
assert_equal(nodes.count(), 1)
node = nodes[0]
assert_equal(WikiVersion.objects.get_for_node(node, 'home').content, body)
assert_true(mock_send_mail.called)
call_args, call_kwargs = mock_send_mail.call_args
assert_absolute(call_kwargs['conf_view_url'])
assert_absolute(call_kwargs['set_password_url'])
assert_absolute(call_kwargs['profile_url'])
assert_absolute(call_kwargs['file_url'])
assert_absolute(call_kwargs['node_url'])
@mock.patch('website.conferences.views.send_mail')
def test_integration_inactive(self, mock_send_mail):
conference = ConferenceFactory(active=False)
fullname = '<NAME>'
username = '<EMAIL>'
title = 'good songs'
body = 'dragon on my back'
recipient = '{0}{1}-<EMAIL>'.format(
'test-' if settings.DEV_MODE else '',
conference.endpoint,
)
res = self.app.post(
api_url_for('meeting_hook'),
{
'X-Mailgun-Sscore': 0,
'timestamp': '123',
'token': 'secret',
'signature': hmac.new(
key=settings.MAILGUN_API_KEY,
msg='{}{}'.format('123', 'secret'),
digestmod=hashlib.sha256,
).hexdigest(),
'attachment-count': '1',
'X-Mailgun-Sscore': 0,
'from': '{0} <{1}>'.format(fullname, username),
'recipient': recipient,
'subject': title,
'stripped-text': body,
},
expect_errors=True,
)
assert_equal(res.status_code, 406)
call_args, call_kwargs = mock_send_mail.call_args
assert_equal(call_args, (username, views.CONFERENCE_INACTIVE))
assert_equal(call_kwargs['fullname'], fullname)
assert_equal_urls(
call_kwargs['presentations_url'],
web_url_for('conference_view', _absolute=True),
)
@mock.patch('website.conferences.views.send_mail')
@mock.patch('website.conferences.utils.upload_attachments')
def test_integration_wo_full_name(self, mock_upload, mock_send_mail):
username = '<EMAIL>'
title = 'no full name only email'
conference = ConferenceFactory()
body = 'dragon on my back'
content = 'dragon attack'
recipient = '{0}{1}-<EMAIL>'.format(
'test-' if settings.DEV_MODE else '',
conference.endpoint,
)
self.app.post(
api_url_for('meeting_hook'),
{
'X-Mailgun-Sscore': 0,
'timestamp': '123',
'token': 'secret',
'signature': hmac.new(
key=settings.MAILGUN_API_KEY,
msg='{}{}'.format('123', 'secret'),
digestmod=hashlib.sha256,
).hexdigest(),
'attachment-count': '1',
'X-Mailgun-Sscore': 0,
'from': username,
'recipient': recipient,
'subject': title,
'stripped-text': body,
},
upload_files=[
('attachment-1', 'attachment-1', content),
],
)
assert_true(mock_upload.called)
users = OSFUser.objects.filter(username=username)
assert_equal(users.count(), 1)
nodes = AbstractNode.objects.filter(title=title)
assert_equal(nodes.count(), 1)
node = nodes[0]
assert_equal(WikiVersion.objects.get_for_node(node, 'home').content, body)
assert_true(mock_send_mail.called)
call_args, call_kwargs = mock_send_mail.call_args
assert_absolute(call_kwargs['conf_view_url'])
assert_absolute(call_kwargs['set_password_url'])
assert_absolute(call_kwargs['profile_url'])
assert_absolute(call_kwargs['file_url'])
assert_absolute(call_kwargs['node_url'])
@mock.patch('website.conferences.views.send_mail')
@mock.patch('website.conferences.utils.upload_attachments')
def test_create_conference_node_with_same_name_as_existing_node(self, mock_upload, mock_send_mail):
conference = ConferenceFactory()
user = UserFactory()
title = 'Long Live Greg'
ProjectFactory(creator=user, title=title)
body = 'Greg is a good plant'
content = 'Long may they reign.'
recipient = '{0}{1}-<EMAIL>'.format(
'test-' if settings.DEV_MODE else '',
conference.endpoint,
)
self.app.post(
api_url_for('meeting_hook'),
{
'X-Mailgun-Sscore': 0,
'timestamp': '123',
'token': 'secret',
'signature': hmac.new(
key=settings.MAILGUN_API_KEY,
msg='{}{}'.format('123', 'secret'),
digestmod=hashlib.sha256,
).hexdigest(),
'attachment-count': '1',
'X-Mailgun-Sscore': 0,
'from': '{0} <{1}>'.format(user.fullname, user.username),
'recipient': recipient,
'subject': title,
'stripped-text': body,
},
upload_files=[
('attachment-1', 'attachment-1', content),
],
)
assert AbstractNode.objects.filter(title=title, creator=user).count() == 2
assert mock_upload.called
assert mock_send_mail.called
| [
"website.util.web_url_for",
"osf.models.AbstractNode.objects.all",
"addons.wiki.models.WikiVersion.objects.get_for_node",
"StringIO.StringIO",
"mock.patch",
"osf_tests.factories.UserFactory",
"website.conferences.utils.upload_attachment",
"osf.models.OSFUser.objects.filter",
"website.conferences.utils.provision_node",
"website.conferences.message.ConferenceMessage",
"osf_tests.factories.ConferenceFactory",
"osf.models.AbstractNode.objects.filter",
"furl.furl",
"website.util.api_url_for",
"framework.auth.get_or_create_user",
"osf_tests.factories.ProjectFactory",
"website.conferences.views.add_poster_by_email",
"tests.base.fake.company",
"osf.models.OSFUser.objects.get"
] | [((799, 825), 'furl.furl', 'furl.furl', (['settings.DOMAIN'], {}), '(settings.DOMAIN)\n', (808, 825), False, 'import furl\n'), ((843, 857), 'furl.furl', 'furl.furl', (['url'], {}), '(url)\n', (852, 857), False, 'import furl\n'), ((971, 987), 'furl.furl', 'furl.furl', (['first'], {}), '(first)\n', (980, 987), False, 'import furl\n'), ((1037, 1054), 'furl.furl', 'furl.furl', (['second'], {}), '(second)\n', (1046, 1054), False, 'import furl\n'), ((6792, 6855), 'mock.patch', 'mock.patch', (['"""website.conferences.utils.waterbutler_api_url_for"""'], {}), "('website.conferences.utils.waterbutler_api_url_for')\n", (6802, 6855), False, 'import mock\n'), ((6861, 6913), 'mock.patch', 'mock.patch', (['"""website.conferences.utils.requests.put"""'], {}), "('website.conferences.utils.requests.put')\n", (6871, 6913), False, 'import mock\n'), ((7633, 7696), 'mock.patch', 'mock.patch', (['"""website.conferences.utils.waterbutler_api_url_for"""'], {}), "('website.conferences.utils.waterbutler_api_url_for')\n", (7643, 7696), False, 'import mock\n'), ((7702, 7754), 'mock.patch', 'mock.patch', (['"""website.conferences.utils.requests.put"""'], {}), "('website.conferences.utils.requests.put')\n", (7712, 7754), False, 'import mock\n'), ((8461, 8519), 'mock.patch', 'mock.patch', (['"""website.conferences.utils.upload_attachments"""'], {}), "('website.conferences.utils.upload_attachments')\n", (8471, 8519), False, 'import mock\n'), ((20628, 20677), 'mock.patch', 'mock.patch', (['"""website.conferences.views.send_mail"""'], {}), "('website.conferences.views.send_mail')\n", (20638, 20677), False, 'import mock\n'), ((20683, 20741), 'mock.patch', 'mock.patch', (['"""website.conferences.utils.upload_attachments"""'], {}), "('website.conferences.utils.upload_attachments')\n", (20693, 20741), False, 'import mock\n'), ((22638, 22687), 'mock.patch', 'mock.patch', (['"""website.conferences.views.send_mail"""'], {}), "('website.conferences.views.send_mail')\n", (22648, 22687), False, 'import mock\n'), ((24164, 24213), 'mock.patch', 'mock.patch', (['"""website.conferences.views.send_mail"""'], {}), "('website.conferences.views.send_mail')\n", (24174, 24213), False, 'import mock\n'), ((24219, 24277), 'mock.patch', 'mock.patch', (['"""website.conferences.utils.upload_attachments"""'], {}), "('website.conferences.utils.upload_attachments')\n", (24229, 24277), False, 'import mock\n'), ((26142, 26191), 'mock.patch', 'mock.patch', (['"""website.conferences.views.send_mail"""'], {}), "('website.conferences.views.send_mail')\n", (26152, 26191), False, 'import mock\n'), ((26197, 26255), 'mock.patch', 'mock.patch', (['"""website.conferences.utils.upload_attachments"""'], {}), "('website.conferences.utils.upload_attachments')\n", (26207, 26255), False, 'import mock\n'), ((1235, 1265), 'osf_tests.factories.ProjectFactory', 'ProjectFactory', ([], {'is_public': '(True)'}), '(is_public=True)\n', (1249, 1265), False, 'from osf_tests.factories import ConferenceFactory, ProjectFactory, UserFactory\n'), ((1500, 1530), 'osf_tests.factories.ProjectFactory', 'ProjectFactory', ([], {'is_public': '(True)'}), '(is_public=True)\n', (1514, 1530), False, 'from osf_tests.factories import ConferenceFactory, ProjectFactory, UserFactory\n'), ((1906, 1919), 'osf_tests.factories.UserFactory', 'UserFactory', ([], {}), '()\n', (1917, 1919), False, 'from osf_tests.factories import ConferenceFactory, ProjectFactory, UserFactory\n'), ((1947, 2009), 'framework.auth.get_or_create_user', 'get_or_create_user', (['user.fullname', 'user.username'], {'is_spam': '(True)'}), '(user.fullname, user.username, is_spam=True)\n', (1965, 2009), False, 'from framework.auth import get_or_create_user\n'), ((2274, 2327), 'framework.auth.get_or_create_user', 'get_or_create_user', (['fullname', 'username'], {'is_spam': '(False)'}), '(fullname, username, is_spam=False)\n', (2292, 2327), False, 'from framework.auth import get_or_create_user\n'), ((2709, 2761), 'framework.auth.get_or_create_user', 'get_or_create_user', (['fullname', 'username'], {'is_spam': '(True)'}), '(fullname, username, is_spam=True)\n', (2727, 2761), False, 'from framework.auth import get_or_create_user\n'), ((4505, 4521), 'osf_tests.factories.ProjectFactory', 'ProjectFactory', ([], {}), '()\n', (4519, 4521), False, 'from osf_tests.factories import ConferenceFactory, ProjectFactory, UserFactory\n'), ((4586, 4605), 'osf_tests.factories.ConferenceFactory', 'ConferenceFactory', ([], {}), '()\n', (4603, 4605), False, 'from osf_tests.factories import ConferenceFactory, ProjectFactory, UserFactory\n'), ((4711, 4733), 'StringIO.StringIO', 'StringIO', (['self.content'], {}), '(self.content)\n', (4719, 4733), False, 'from StringIO import StringIO\n'), ((7169, 7231), 'website.conferences.utils.upload_attachment', 'utils.upload_attachment', (['self.user', 'self.node', 'self.attachment'], {}), '(self.user, self.node, self.attachment)\n', (7192, 7231), False, 'from website.conferences import utils, message\n'), ((7979, 8041), 'website.conferences.utils.upload_attachment', 'utils.upload_attachment', (['self.user', 'self.node', 'self.attachment'], {}), '(self.user, self.node, self.attachment)\n', (8002, 8041), False, 'from website.conferences import utils, message\n'), ((8606, 8625), 'osf_tests.factories.ConferenceFactory', 'ConferenceFactory', ([], {}), '()\n', (8623, 8625), False, 'from osf_tests.factories import ConferenceFactory, ProjectFactory, UserFactory\n'), ((8834, 8873), 'osf.models.OSFUser.objects.get', 'OSFUser.objects.get', ([], {'username': '"""<EMAIL>"""'}), "(username='<EMAIL>')\n", (8853, 8873), False, 'from osf.models import OSFUser, AbstractNode\n'), ((13432, 13484), 'osf_tests.factories.ConferenceFactory', 'ConferenceFactory', ([], {'endpoint': '"""chocolate"""', 'active': '(True)'}), "(endpoint='chocolate', active=True)\n", (13449, 13484), False, 'from osf_tests.factories import ConferenceFactory, ProjectFactory, UserFactory\n'), ((15016, 15033), 'StringIO.StringIO', 'StringIO', (['content'], {}), '(content)\n', (15024, 15033), False, 'from StringIO import StringIO\n'), ((15797, 15816), 'osf_tests.factories.ConferenceFactory', 'ConferenceFactory', ([], {}), '()\n', (15814, 15816), False, 'from osf_tests.factories import ConferenceFactory, ProjectFactory, UserFactory\n'), ((15839, 15858), 'osf_tests.factories.ConferenceFactory', 'ConferenceFactory', ([], {}), '()\n', (15856, 15858), False, 'from osf_tests.factories import ConferenceFactory, ProjectFactory, UserFactory\n'), ((16084, 16121), 'website.util.api_url_for', 'api_url_for', (['"""conference_submissions"""'], {}), "('conference_submissions')\n", (16095, 16121), False, 'from website.util import api_url_for, web_url_for\n'), ((16273, 16292), 'osf_tests.factories.ConferenceFactory', 'ConferenceFactory', ([], {}), '()\n', (16290, 16292), False, 'from osf_tests.factories import ConferenceFactory, ProjectFactory, UserFactory\n'), ((16307, 16376), 'website.util.web_url_for', 'web_url_for', (['"""conference_results__plain"""'], {'meeting': 'conference.endpoint'}), "('conference_results__plain', meeting=conference.endpoint)\n", (16318, 16376), False, 'from website.util import api_url_for, web_url_for\n'), ((16510, 16529), 'osf_tests.factories.ConferenceFactory', 'ConferenceFactory', ([], {}), '()\n', (16527, 16529), False, 'from osf_tests.factories import ConferenceFactory, ProjectFactory, UserFactory\n'), ((16747, 16763), 'osf_tests.factories.ProjectFactory', 'ProjectFactory', ([], {}), '()\n', (16761, 16763), False, 'from osf_tests.factories import ConferenceFactory, ProjectFactory, UserFactory\n'), ((16779, 16838), 'website.util.api_url_for', 'api_url_for', (['"""conference_data"""'], {'meeting': 'conference.endpoint'}), "('conference_data', meeting=conference.endpoint)\n", (16790, 16838), False, 'from website.util import api_url_for, web_url_for\n'), ((17127, 17146), 'osf_tests.factories.ConferenceFactory', 'ConferenceFactory', ([], {}), '()\n', (17144, 17146), False, 'from osf_tests.factories import ConferenceFactory, ProjectFactory, UserFactory\n'), ((17468, 17484), 'osf_tests.factories.ProjectFactory', 'ProjectFactory', ([], {}), '()\n', (17482, 17484), False, 'from osf_tests.factories import ConferenceFactory, ProjectFactory, UserFactory\n'), ((17500, 17559), 'website.util.api_url_for', 'api_url_for', (['"""conference_data"""'], {'meeting': 'conference.endpoint'}), "('conference_data', meeting=conference.endpoint)\n", (17511, 17559), False, 'from website.util import api_url_for, web_url_for\n'), ((17784, 17803), 'osf_tests.factories.ConferenceFactory', 'ConferenceFactory', ([], {}), '()\n', (17801, 17803), False, 'from osf_tests.factories import ConferenceFactory, ProjectFactory, UserFactory\n'), ((18021, 18037), 'osf_tests.factories.ProjectFactory', 'ProjectFactory', ([], {}), '()\n', (18035, 18037), False, 'from osf_tests.factories import ConferenceFactory, ProjectFactory, UserFactory\n'), ((18320, 18339), 'osf_tests.factories.ConferenceFactory', 'ConferenceFactory', ([], {}), '()\n', (18337, 18339), False, 'from osf_tests.factories import ConferenceFactory, ProjectFactory, UserFactory\n'), ((18557, 18573), 'osf_tests.factories.ProjectFactory', 'ProjectFactory', ([], {}), '()\n', (18571, 18573), False, 'from osf_tests.factories import ConferenceFactory, ProjectFactory, UserFactory\n'), ((18589, 18648), 'website.util.api_url_for', 'api_url_for', (['"""conference_data"""'], {'meeting': 'conference.endpoint'}), "('conference_data', meeting=conference.endpoint)\n", (18600, 18648), False, 'from website.util import api_url_for, web_url_for\n'), ((18841, 18860), 'osf_tests.factories.ConferenceFactory', 'ConferenceFactory', ([], {}), '()\n', (18858, 18860), False, 'from osf_tests.factories import ConferenceFactory, ProjectFactory, UserFactory\n'), ((18876, 18938), 'website.util.web_url_for', 'web_url_for', (['"""conference_results"""'], {'meeting': 'conference.endpoint'}), "('conference_results', meeting=conference.endpoint)\n", (18887, 18938), False, 'from website.util import api_url_for, web_url_for\n'), ((19093, 19132), 'osf_tests.factories.ConferenceFactory', 'ConferenceFactory', ([], {'endpoint': '"""StudySwap"""'}), "(endpoint='StudySwap')\n", (19110, 19132), False, 'from osf_tests.factories import ConferenceFactory, ProjectFactory, UserFactory\n'), ((19147, 19201), 'website.util.web_url_for', 'web_url_for', (['"""conference_results"""'], {'meeting': '"""studyswap"""'}), "('conference_results', meeting='studyswap')\n", (19158, 19201), False, 'from website.util import api_url_for, web_url_for\n'), ((19685, 19748), 'osf_tests.factories.ConferenceFactory', 'ConferenceFactory', ([], {'endpoint': '"""cookie"""', 'name': '"""Cookies Conference"""'}), "(endpoint='cookie', name='Cookies Conference')\n", (19702, 19748), False, 'from osf_tests.factories import ConferenceFactory, ProjectFactory, UserFactory\n'), ((19975, 20044), 'osf_tests.factories.ConferenceFactory', 'ConferenceFactory', ([], {'endpoint': '"""Hamburgers"""', 'name': '"""Hamburger conference"""'}), "(endpoint='Hamburgers', name='Hamburger conference')\n", (19992, 20044), False, 'from osf_tests.factories import ConferenceFactory, ProjectFactory, UserFactory\n'), ((20194, 20224), 'osf_tests.factories.ProjectFactory', 'ProjectFactory', ([], {'is_public': '(True)'}), '(is_public=True)\n', (20208, 20224), False, 'from osf_tests.factories import ConferenceFactory, ProjectFactory, UserFactory\n'), ((20388, 20419), 'osf_tests.factories.ProjectFactory', 'ProjectFactory', ([], {'is_public': '(False)'}), '(is_public=False)\n', (20402, 20419), False, 'from osf_tests.factories import ConferenceFactory, ProjectFactory, UserFactory\n'), ((20910, 20929), 'osf_tests.factories.ConferenceFactory', 'ConferenceFactory', ([], {}), '()\n', (20927, 20929), False, 'from osf_tests.factories import ConferenceFactory, ProjectFactory, UserFactory\n'), ((21986, 22027), 'osf.models.OSFUser.objects.filter', 'OSFUser.objects.filter', ([], {'username': 'username'}), '(username=username)\n', (22008, 22027), False, 'from osf.models import OSFUser, AbstractNode\n'), ((22083, 22123), 'osf.models.AbstractNode.objects.filter', 'AbstractNode.objects.filter', ([], {'title': 'title'}), '(title=title)\n', (22110, 22123), False, 'from osf.models import OSFUser, AbstractNode\n'), ((22766, 22797), 'osf_tests.factories.ConferenceFactory', 'ConferenceFactory', ([], {'active': '(False)'}), '(active=False)\n', (22783, 22797), False, 'from osf_tests.factories import ConferenceFactory, ProjectFactory, UserFactory\n'), ((24444, 24463), 'osf_tests.factories.ConferenceFactory', 'ConferenceFactory', ([], {}), '()\n', (24461, 24463), False, 'from osf_tests.factories import ConferenceFactory, ProjectFactory, UserFactory\n'), ((25490, 25531), 'osf.models.OSFUser.objects.filter', 'OSFUser.objects.filter', ([], {'username': 'username'}), '(username=username)\n', (25512, 25531), False, 'from osf.models import OSFUser, AbstractNode\n'), ((25587, 25627), 'osf.models.AbstractNode.objects.filter', 'AbstractNode.objects.filter', ([], {'title': 'title'}), '(title=title)\n', (25614, 25627), False, 'from osf.models import OSFUser, AbstractNode\n'), ((26381, 26400), 'osf_tests.factories.ConferenceFactory', 'ConferenceFactory', ([], {}), '()\n', (26398, 26400), False, 'from osf_tests.factories import ConferenceFactory, ProjectFactory, UserFactory\n'), ((26416, 26429), 'osf_tests.factories.UserFactory', 'UserFactory', ([], {}), '()\n', (26427, 26429), False, 'from osf_tests.factories import ConferenceFactory, ProjectFactory, UserFactory\n'), ((26471, 26512), 'osf_tests.factories.ProjectFactory', 'ProjectFactory', ([], {'creator': 'user', 'title': 'title'}), '(creator=user, title=title)\n', (26485, 26512), False, 'from osf_tests.factories import ConferenceFactory, ProjectFactory, UserFactory\n'), ((3203, 3255), 'framework.auth.get_or_create_user', 'get_or_create_user', (['fullname', 'username'], {'is_spam': '(True)'}), '(fullname, username, is_spam=True)\n', (3221, 3255), False, 'from framework.auth import get_or_create_user\n'), ((5371, 5398), 'website.conferences.message.ConferenceMessage', 'message.ConferenceMessage', ([], {}), '()\n', (5396, 5398), False, 'from website.conferences import utils, message\n'), ((5411, 5475), 'website.conferences.utils.provision_node', 'utils.provision_node', (['self.conference', 'msg', 'self.node', 'self.user'], {}), '(self.conference, msg, self.node, self.user)\n', (5431, 5475), False, 'from website.conferences import utils, message\n'), ((5995, 6022), 'website.conferences.message.ConferenceMessage', 'message.ConferenceMessage', ([], {}), '()\n', (6020, 6022), False, 'from website.conferences import utils, message\n'), ((6035, 6099), 'website.conferences.utils.provision_node', 'utils.provision_node', (['self.conference', 'msg', 'self.node', 'self.user'], {}), '(self.conference, msg, self.node, self.user)\n', (6055, 6099), False, 'from website.conferences import utils, message\n'), ((6464, 6491), 'website.conferences.message.ConferenceMessage', 'message.ConferenceMessage', ([], {}), '()\n', (6489, 6491), False, 'from website.conferences import utils, message\n'), ((6504, 6568), 'website.conferences.utils.provision_node', 'utils.provision_node', (['self.conference', 'msg', 'self.node', 'self.user'], {}), '(self.conference, msg, self.node, self.user)\n', (6524, 6568), False, 'from website.conferences import utils, message\n'), ((8735, 8762), 'website.conferences.message.ConferenceMessage', 'message.ConferenceMessage', ([], {}), '()\n', (8760, 8762), False, 'from website.conferences import utils, message\n'), ((8775, 8817), 'website.conferences.views.add_poster_by_email', 'views.add_poster_by_email', (['conference', 'msg'], {}), '(conference, msg)\n', (8800, 8817), False, 'from website.conferences import views\n'), ((9188, 9215), 'website.conferences.message.ConferenceMessage', 'message.ConferenceMessage', ([], {}), '()\n', (9213, 9215), False, 'from website.conferences import utils, message\n'), ((9421, 9448), 'website.conferences.message.ConferenceMessage', 'message.ConferenceMessage', ([], {}), '()\n', (9446, 9448), False, 'from website.conferences import utils, message\n'), ((9771, 9798), 'website.conferences.message.ConferenceMessage', 'message.ConferenceMessage', ([], {}), '()\n', (9796, 9798), False, 'from website.conferences import utils, message\n'), ((10224, 10251), 'website.conferences.message.ConferenceMessage', 'message.ConferenceMessage', ([], {}), '()\n', (10249, 10251), False, 'from website.conferences import utils, message\n'), ((10503, 10530), 'website.conferences.message.ConferenceMessage', 'message.ConferenceMessage', ([], {}), '()\n', (10528, 10530), False, 'from website.conferences import utils, message\n'), ((10792, 10819), 'website.conferences.message.ConferenceMessage', 'message.ConferenceMessage', ([], {}), '()\n', (10817, 10819), False, 'from website.conferences import utils, message\n'), ((11065, 11092), 'website.conferences.message.ConferenceMessage', 'message.ConferenceMessage', ([], {}), '()\n', (11090, 11092), False, 'from website.conferences import utils, message\n'), ((11307, 11334), 'website.conferences.message.ConferenceMessage', 'message.ConferenceMessage', ([], {}), '()\n', (11332, 11334), False, 'from website.conferences import utils, message\n'), ((11593, 11620), 'website.conferences.message.ConferenceMessage', 'message.ConferenceMessage', ([], {}), '()\n', (11618, 11620), False, 'from website.conferences import utils, message\n'), ((11890, 11917), 'website.conferences.message.ConferenceMessage', 'message.ConferenceMessage', ([], {}), '()\n', (11915, 11917), False, 'from website.conferences import utils, message\n'), ((12906, 12933), 'website.conferences.message.ConferenceMessage', 'message.ConferenceMessage', ([], {}), '()\n', (12931, 12933), False, 'from website.conferences import utils, message\n'), ((13263, 13290), 'website.conferences.message.ConferenceMessage', 'message.ConferenceMessage', ([], {}), '()\n', (13288, 13290), False, 'from website.conferences import utils, message\n'), ((13802, 13829), 'website.conferences.message.ConferenceMessage', 'message.ConferenceMessage', ([], {}), '()\n', (13827, 13829), False, 'from website.conferences import utils, message\n'), ((14223, 14250), 'website.conferences.message.ConferenceMessage', 'message.ConferenceMessage', ([], {}), '()\n', (14248, 14250), False, 'from website.conferences import utils, message\n'), ((14615, 14642), 'website.conferences.message.ConferenceMessage', 'message.ConferenceMessage', ([], {}), '()\n', (14640, 14642), False, 'from website.conferences import utils, message\n'), ((14852, 14879), 'website.conferences.message.ConferenceMessage', 'message.ConferenceMessage', ([], {}), '()\n', (14877, 14879), False, 'from website.conferences import utils, message\n'), ((15268, 15295), 'website.conferences.message.ConferenceMessage', 'message.ConferenceMessage', ([], {}), '()\n', (15293, 15295), False, 'from website.conferences import utils, message\n'), ((21172, 21199), 'website.util.api_url_for', 'api_url_for', (['"""meeting_hook"""'], {}), "('meeting_hook')\n", (21183, 21199), False, 'from website.util import api_url_for, web_url_for\n'), ((23098, 23125), 'website.util.api_url_for', 'api_url_for', (['"""meeting_hook"""'], {}), "('meeting_hook')\n", (23109, 23125), False, 'from website.util import api_url_for, web_url_for\n'), ((24100, 24146), 'website.util.web_url_for', 'web_url_for', (['"""conference_view"""'], {'_absolute': '(True)'}), "('conference_view', _absolute=True)\n", (24111, 24146), False, 'from website.util import api_url_for, web_url_for\n'), ((24706, 24733), 'website.util.api_url_for', 'api_url_for', (['"""meeting_hook"""'], {}), "('meeting_hook')\n", (24717, 24733), False, 'from website.util import api_url_for, web_url_for\n'), ((26766, 26793), 'website.util.api_url_for', 'api_url_for', (['"""meeting_hook"""'], {}), "('meeting_hook')\n", (26777, 26793), False, 'from website.util import api_url_for, web_url_for\n'), ((12318, 12345), 'website.conferences.message.ConferenceMessage', 'message.ConferenceMessage', ([], {}), '()\n', (12343, 12345), False, 'from website.conferences import utils, message\n'), ((12651, 12678), 'website.conferences.message.ConferenceMessage', 'message.ConferenceMessage', ([], {}), '()\n', (12676, 12678), False, 'from website.conferences import utils, message\n'), ((15739, 15765), 'osf.models.AbstractNode.objects.all', 'AbstractNode.objects.all', ([], {}), '()\n', (15763, 15765), False, 'from osf.models import OSFUser, AbstractNode\n'), ((22208, 22254), 'addons.wiki.models.WikiVersion.objects.get_for_node', 'WikiVersion.objects.get_for_node', (['node', '"""home"""'], {}), "(node, 'home')\n", (22240, 22254), False, 'from addons.wiki.models import WikiVersion\n'), ((25712, 25758), 'addons.wiki.models.WikiVersion.objects.get_for_node', 'WikiVersion.objects.get_for_node', (['node', '"""home"""'], {}), "(node, 'home')\n", (25744, 25758), False, 'from addons.wiki.models import WikiVersion\n'), ((19572, 19621), 'osf_tests.factories.ConferenceFactory', 'ConferenceFactory', ([], {'endpoint': '"""spsp2014"""', 'name': 'None'}), "(endpoint='spsp2014', name=None)\n", (19589, 19621), False, 'from osf_tests.factories import ConferenceFactory, ProjectFactory, UserFactory\n'), ((27550, 27604), 'osf.models.AbstractNode.objects.filter', 'AbstractNode.objects.filter', ([], {'title': 'title', 'creator': 'user'}), '(title=title, creator=user)\n', (27577, 27604), False, 'from osf.models import OSFUser, AbstractNode\n'), ((19455, 19469), 'tests.base.fake.company', 'fake.company', ([], {}), '()\n', (19467, 19469), False, 'from tests.base import OsfTestCase, fake\n')] |
import socketserver
import socket
import sys
import threading
import json
import queue
import time
import datetime
import traceback
class TCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
def server_bind(self):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.socket.bind(self.server_address)
class Listener(threading.Thread):
def run(self):
kwargs = self._kwargs
print("Listener: Started: %s" % kwargs)
Handler = self._kwargs["handler"]
server = self._kwargs["server"]
class Server(socketserver.BaseRequestHandler):
def handle(self):
print("Listener: Connection request received: %s" % kwargs)
Handler(server, self.request)
self.server = TCPServer((kwargs["host"], kwargs["port"]), Server)
self.server.serve_forever()
def stop(self):
self.server.shutdown()
self.server.server_close()
class Connector(threading.Thread):
def __init__(self, *arg, **kw):
self.is_stopping = False
threading.Thread.__init__(self, *arg, **kw)
def run(self):
print("Connector: Started: %s" % self._kwargs)
while not self.is_stopping:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
try:
sock.connect((self._kwargs["host"], self._kwargs["port"]))
print("Connector: Connected: %s" % self._kwargs)
self._kwargs["handler"](self._kwargs["server"], sock)
except Exception as e:
print(e)
traceback.print_exc()
finally:
sock.close()
time.sleep(1)
def stop(self):
self.is_stopping = True
class Handler(object):
encoding = "utf-8"
binary = False
filemode = "r"
def __init__(self, server, conn):
self.server = server
self.conn = conn
self.makefile()
self.handle()
def makefile(self):
args = {"mode": self.filemode + ["", "b"][self.binary]}
if not self.binary:
args["encoding"] = self.encoding
self.file = self.conn.makefile(**args)
def handle(self):
"""self.conn is a socket object, self.file a file wrapper for that
socket"""
def __hash__(self):
return id(self)
class ReceiveHandler(Handler):
filemode = "r"
class SendHandler(Handler):
filemode = "w"
class Server(object):
def __init__(self, handlers):
self.handlers = handlers
self.config = None
self.servers = {}
def configure(self, config):
self.config = config
connections = {self.connection_key(connection): connection for connection in config["connections"]}
to_create = connections.keys() - self.servers.keys()
to_destroy = self.servers.keys() - connections.keys()
for key in to_create:
server = self.start_connection(connections[key])
server.start()
self.servers[key] = server
for key in to_destroy:
server = self.servers.pop(key)
server.stop()
def connection_key(self, connection):
return json.dumps(connection, sort_keys=True, separators=(',', ':'))
def start_connection(self, connection):
handler = self.handlers[connection["handler"]]
addr = connection["address"].split(":")
assert addr[0] == "tcp"
host = "0.0.0.0"
port = 1024
if len(addr) == 2:
port = addr[1]
if len(addr) == 3:
host, port = addr[1:]
port = int(port)
connhandler = {"listen": Listener, "connect": Connector}[connection["type"]]
return connhandler(kwargs={"server": self, "host": host, "port": port, "handler": handler})
def run(config, handlers):
server = Server(handlers)
server.configure(config)
return server
| [
"threading.Thread.__init__",
"socket.socket",
"json.dumps",
"time.sleep",
"traceback.print_exc"
] | [((1097, 1140), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self', '*arg'], {}), '(self, *arg, **kw)\n', (1122, 1140), False, 'import threading\n'), ((3331, 3392), 'json.dumps', 'json.dumps', (['connection'], {'sort_keys': '(True)', 'separators': "(',', ':')"}), "(connection, sort_keys=True, separators=(',', ':'))\n", (3341, 3392), False, 'import json\n'), ((1270, 1319), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (1283, 1319), False, 'import socket\n'), ((1752, 1765), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1762, 1765), False, 'import time\n'), ((1668, 1689), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1687, 1689), False, 'import traceback\n')] |
# -*- coding: ascii -*-
import sys
import json
def check(data):
OnOffstart = data.find(b"OnOff")
if OnOffstart != -1:
fxName=""
OnOffblockSize = 0x30
for j in range(12):
if data[OnOffstart + j + OnOffblockSize] == 0x00:
break
fxName = fxName + chr(data[OnOffstart + j + OnOffblockSize])
tD = {
"fxname" :fxName
}
mmax = []
mdefault = []
name = []
mpedal = []
numParameters = 0
#print("OnOffStart at {}".format(OnOffstart))
try:
# this is WAY too large, let except break the loop
for j in range(0, 2000):
"""
if not ( data[OnOffstart + (j+1) * OnOffblockSize - 1] == 0x00
and data[OnOffstart + (j+1) * OnOffblockSize - 2] == 0x00):
# ZD2 format has a length and PRME offset. ZDL has none of this.
print("End of the parameters")
break;
if not ( data[OnOffstart + (j) * OnOffblockSize + 0x18 ] == 0x00
and data[OnOffstart + (j) * OnOffblockSize + 0x19] == 0x00
and data[OnOffstart + (j) * OnOffblockSize + 0x1A] == 0x00
and data[OnOffstart + (j) * OnOffblockSize + 0x1B] == 0x00 ):
print("Empty next slot")
break
"""
currName = ""
for i in range(12):
if data[OnOffstart + j * OnOffblockSize + i] == 0x00:
break
currName = currName + chr(data[OnOffstart + j * OnOffblockSize + i])
if data[OnOffstart + j * OnOffblockSize + i] & 0x80:
raise Exception("Non binary char")
if currName == "":
break
name.append(currName)
mmax.append( data[OnOffstart + j * OnOffblockSize + 12] +
data[OnOffstart + j * OnOffblockSize + 13] * 256)
mdefault.append(data[OnOffstart + j * OnOffblockSize + 16] +
data[OnOffstart + j * OnOffblockSize + 17] * 256);
if data[OnOffstart + j * OnOffblockSize + 0x2C]:
mpedal.append(True)
else:
mpedal.append(False)
#print(mmax[j])
#print(mdefault[j])
"""
print("[{}] {} {} {} {}".format(
OnOffstart + (j+1) * OnOffblockSize,
hex(data[OnOffstart + (j+1) * OnOffblockSize]),
hex(data[OnOffstart + (j+1) * OnOffblockSize + 1]),
hex(data[OnOffstart + (j+1) * OnOffblockSize + 2]),
hex(data[OnOffstart + (j+1) * OnOffblockSize + 3])) )
"""
#print("increment params")
numParameters = numParameters + 1
except:
pass
#print("Found {} parameters.".format(numParameters))
tD['Parameters'] = []
# 0 is the OnOff state
# 1 is the name
# so actual paramters start from index 2, but clearly there are 2 less
for i in range(numParameters - 2):
#print(i)
tD['Parameters'].append({'name': name[i+2], 'mmax': mmax[i + 2], 'mdefault': mdefault[i + 2], 'pedal': mpedal[i+2]})
#json.dump(tD, sys.stdout, indent=4)
f = open(fxName+'.json', "w")
json.dump(tD, f, indent=4)
f.close()
return fxName+'.OnOff'
# handles a zoom firmware
if __name__ == "__main__":
if len(sys.argv) == 2:
f = open(sys.argv[1], "rb")
data = f.read()
f.close()
check(data)
| [
"json.dump"
] | [((3647, 3673), 'json.dump', 'json.dump', (['tD', 'f'], {'indent': '(4)'}), '(tD, f, indent=4)\n', (3656, 3673), False, 'import json\n')] |
# 随机6位密码 a-zA-Z0-9下划线
import random
source = ''
lower_char = [chr(x) for x in range(ord('a'), ord('z') + 1)]
upper_char = [chr(x) for x in range(ord('A'), ord('Z') + 1)]
number_char = [chr(x) for x in range(ord('0'), ord('9') + 1)]
source += "".join(lower_char)
source += "".join(upper_char)
source += "".join(number_char)
source += "_"
print(source)
# 随机取出20位字符串,包括下划线
while True:
s = "".join(random.sample(source, 20))
if '_' in s:
print(s)
break
| [
"random.sample"
] | [((400, 425), 'random.sample', 'random.sample', (['source', '(20)'], {}), '(source, 20)\n', (413, 425), False, 'import random\n')] |
from django.test import TestCase
from rest_framework.test import APIRequestFactory
from .models import GBIC, GBICType
from .views import GBICListViewSet
# Create your tests here.
class GBICTest(TestCase):
def test_gbic_view_set(self):
request = APIRequestFactory().get("")
gbic_detail = GBICListViewSet.as_view(actions={'get': 'retrieve'})
gbic_type_test = GBICType.objects.create(description='muito_bom')
gbic_test = GBIC.objects.create(
serial='showdaxuxa',
patrimony_number='666',
gbic_type=gbic_type_test
)
response = gbic_detail(request, pk=gbic_test.pk)
self.assertEqual(response.status_code, 200)
def test_deleted_gbic_view_set(self):
request = APIRequestFactory().get("")
gbic_detail = GBICListViewSet.as_view(actions={'get': 'retrieve'})
gbic_type_test = GBICType.objects.create(description='muitoruim')
gbic_test = GBIC.objects.create(
serial='showdomilhao',
patrimony_number='777',
gbic_type=gbic_type_test
)
gbic_test.delete()
response = gbic_detail(request, pk=gbic_test.pk)
self.assertEqual(response.status_code, 404)
| [
"rest_framework.test.APIRequestFactory"
] | [((260, 279), 'rest_framework.test.APIRequestFactory', 'APIRequestFactory', ([], {}), '()\n', (277, 279), False, 'from rest_framework.test import APIRequestFactory\n'), ((765, 784), 'rest_framework.test.APIRequestFactory', 'APIRequestFactory', ([], {}), '()\n', (782, 784), False, 'from rest_framework.test import APIRequestFactory\n')] |
import inspect
from typing import List, Union, Set, Any
import numpy as np
from fruits.cache import Cache, CoquantileCache
from fruits.scope import force_input_shape, FitTransform
from fruits.core.callback import AbstractCallback
from fruits.signature.iss import SignatureCalculator, CachePlan
from fruits.words.word import Word
from fruits.sieving.abstract import FeatureSieve
from fruits.preparation.abstract import DataPreparateur
class Fruit:
"""Feature Extractor using iterated sums.
A Fruit consists of a number of
:class:`~fruits.core.fruit.FruitBranch` objects.
At the end of the pipeline, each branch returns their own features
and they will be concatenated by this class.
A simple example (using two branches):
.. code-block:: python
fruit = fruits.Fruit("My Fruit")
# optional: add preparateurs for preprocessing
fruit.add(fruits.preparation.INC)
# add words for iterated sums calculation
fruit.add(fruits.words.creation.simplewords_by_weight(4))
# choose sieves
fruit.add(fruits.sieving.PPV(0.5))
fruit.add(fruits.sieving.END)
# add a new branch without INC
fruit.fork()
fruit.add(fruits.words.creation.simplewords_by_weight(4))
fruit.add(fruits.sieving.PPV(0.5))
fruit.add(fruits.sieving.END)
# configure the fruit
fruit.configure(mode="extended")
# fit the fruit on a time series dataset
fruit.fit(X_train)
# transform the dataset
X_train_transformed = fruit.transform(X_train)
X_test_tranformed = fruit.transform(X_test)
# use the transformed results (features) in a classifier
...
The ``fruit`` above will result in ``2*8*2=32`` features per time
series.
"""
def __init__(self, name: str = ""):
self.name: str = name
# list of FruitBranches
self._branches: List[FruitBranch] = []
# pointer for the current branch index
self._cbi: int = 0
self._fitted: bool = False
@property
def name(self) -> str:
"""Simple identifier for the Fruit object."""
return self._name
@name.setter
def name(self, name: str):
self._name = name
def fork(self, branch: "FruitBranch" = None):
"""Adds a new branch to the pipeline. If none is given, an
empty FruitBranch will be created and switched to.
:type branch: FruitBranch, optional
"""
if branch is None:
branch = FruitBranch()
self._branches.append(branch)
self._cbi = len(self._branches) - 1
self._fitted = False
def branch(self, index: int = None):
"""Returns the currently selected branch or the branch with the
given index.
:rtype: FruitBranch
"""
if index is None:
return self._branches[self._cbi]
return self._branches[index]
def branches(self) -> list:
"""Returns all branches of this Fruit object.
:rtype: list
"""
return self._branches
def switch_branch(self, index: int):
"""Switches to the branch with the given index.
:param index: Integer in ``[0, 1, ..., len(self.branches())-1]``
:type index: int
"""
if not (0 <= index < len(self._branches)):
raise IndexError("Index has to be in [0, len(self.branches()))")
self._cbi = index
def add(self, *objects: Union[FitTransform, Word, type]):
"""Adds one or multiple object(s) to the currently selected
branch.
:param objects: One or more objects of the following types:
- :class:`~fruits.preparation.abstract.DataPreparateur`
- :class:`~fruits.words.word.Word`
- :class:`~fruits.sieving.abstract.FeatureSieve`
:type objects: Union[FitTransform, Word]
"""
if len(self._branches) == 0:
self.fork()
self._branches[self._cbi].add(*objects)
self._fitted = False
def nfeatures(self) -> int:
"""Returns the total number of features of all branches
combined.
:rtype: int
"""
return sum([branch.nfeatures() for branch in self._branches])
def configure(self, **kwargs: Any):
"""Makes changes to the default configuration of a all branches
if arguments differ from ``None``.
:param kwargs: For possible options, have a look at
:meth:`fruits.core.fruit.FruitBranch.configure`.
:type kwargs: Any
"""
for branch in self._branches:
branch.configure(**kwargs)
def fit(self, X: np.ndarray):
"""Fits all branches to the given data.
:param X: (Multidimensional) time series dataset as an array
of three dimensions. Have a look at
:meth:`~fruits.scope.force_input_shape`.
:type X: np.ndarray
"""
for branch in self._branches:
branch.fit(X)
self._fitted = True
def transform(self, X: np.ndarray,
callbacks: List[AbstractCallback] = []) -> np.ndarray:
"""Returns a two dimensional array of all features from all
branches this Fruit object contains.
:param X: (Multidimensional) time series dataset as an array
of three dimensions. Have a look at
:meth:`~fruits.scope.force_input_shape`.
:type X: np.ndarray
:param callbacks: List of callbacks. To write your own callback,
override the class
:class:`~fruits.core.callback.AbstractCallback`.,
defaults to None
:type callbacks: List[AbstractCallback], optional
:rtype: np.ndarray
:raises: RuntimeError if Fruit.fit wasn't called
"""
if not self._fitted:
raise RuntimeError("Missing call of self.fit")
result = np.zeros((X.shape[0], self.nfeatures()))
index = 0
for branch in self._branches:
for callback in callbacks:
callback.on_next_branch()
k = branch.nfeatures()
result[:, index:index+k] = branch.transform(X, callbacks)
index += k
result = np.nan_to_num(result, copy=False, nan=0.0)
return result
def fit_transform(self, X: np.ndarray) -> np.ndarray:
"""Fits all branches to the given dataset and returns the
transformed results of X from all branches.
:param X: (Multidimensional) time series dataset
:type X: np.ndarray
:returns: Two dimensional feature array
:rtype: np.ndarray
"""
self.fit(X)
return self.transform(X)
def summary(self) -> str:
"""Returns a summary of this object. The summary contains a
summary for each FruitBranch in this Fruit object.
:rtype: str
"""
summary = "{:=^80}".format(f"Summary of fruits.Fruit: '{self.name}'")
summary += f"\nBranches: {len(self.branches())}"
summary += f"\nFeatures: {self.nfeatures()}"
for branch in self.branches():
summary += "\n\n" + branch.summary()
summary += "\n{:=^80}".format(f"End of Summary")
return summary
def copy(self) -> "Fruit":
"""Creates a shallow copy of this Fruit object.
This also creates shallow copies of all branches in this object.
:rtype: Fruit
"""
copy_ = Fruit(self.name+" (Copy)")
for branch in self._branches:
copy_.fork(branch.copy())
return copy_
def deepcopy(self) -> "Fruit":
"""Creates a deep copy of this Fruit object.
This also creates deep copies of all branches in this object.
:rtype: Fruit
"""
copy_ = Fruit(self.name+" (Copy)")
for branch in self._branches:
copy_.fork(branch.deepcopy())
return copy_
class FruitBranch:
"""One branch of a Fruit object.
A FruitBranch object extracts values from time series data that are
somehow representative of the input data.
The user can customize any of the following three steps.
- Preparing data:
Apply functions at the start of the extraction procedure.
There are many so called
:class:`~fruits.preparation.abstract.DataPreparateur`
objects in fruits available for preprocessing. The
preparateurs will be applied sequentially to the input data.
- Calculating Iterated Sums:
The preprocessed data is now used to calculate the iterated
sums signature for different
:class:`~fruits.words.word.Word` objects the user can
specify.
- Extracting Features:
Each :class:`~fruits.sieving.abstract.FeatureSieve` added to
the branch will be fitted on the iterated sums from the
previous step. The branch then returns an array of numbers
(the transformed results from those sieves), i.e. the
features for each time series.
"""
def __init__(self):
# lists of used classes for data processing
self._preparateurs: list = []
self._words: list = []
self._sieves: list = []
# calculator options used in the ISS calculation
self._calculator_options: dict = {"batch_size": 1, "mode": "single"}
# list with inner lists containing sieves
# all sieves in one list are trained on one specific output
# of an ISS-result
self._sieves_extended: list = []
# configurations for fitting
self._fitted: bool = False
self._fit_sample_size: Union[float, int] = 1
# cache that is calculated at fitting and also used in the
# transformation process
self._cache: Cache
def configure(self,
mode: str = None,
batch_size: int = None,
fit_sample_size: Union[float, int] = None):
"""Makes changes to the default configuration of a fruit branch
if arguments differ from ``None``.
:param mode: See
:meth:`fruits.signature.iss.SignatureCalculator.transform`,
defaults to None
:type mode: str, optional
:param batch_size: See
:meth:`~ruits.signature.iss.SignatureCalculator.transform`,
defaults to None
:type batch_size: int, optional
:param fit_sample_size: Size of the random time series sample
that is used for fitting. This is represented as a float
which will be multiplied by ``X.shape[0]`` or ``1`` for one
random time series., defaults to 1
:type fit_sample_size: Union[float, int]
"""
if mode is not None:
self._calculator_options["mode"] = mode
if batch_size is not None:
self._calculator_options["batch_size"] = batch_size
if fit_sample_size is not None:
self._fit_sample_size = fit_sample_size
def add_preparateur(self, preparateur: DataPreparateur):
"""Adds a preparateur to the branch.
:type preparateur: DataPreparateur
"""
if not isinstance(preparateur, DataPreparateur):
raise TypeError
self._preparateurs.append(preparateur)
self._fitted = False
def get_preparateurs(self) -> List[DataPreparateur]:
"""Returns a list of all preparateurs added to the
branch.
:rtype: List[DataPreparateur]
"""
return self._preparateurs
def clear_preparateurs(self):
"""Removes all preparateurs that were added to this branch."""
self._preparateurs = []
self._fitted = False
def add_word(self, word: Word):
"""Adds a word to the branch.
:type word: Word
"""
if not isinstance(word, Word):
raise TypeError
self._words.append(word)
self._fitted = False
def get_words(self) -> List[Word]:
"""Returns a list of all words in the branch.
:rtype: List[Word]
"""
return self._words
def clear_words(self):
"""Removes all words that were added to this branch."""
self._words = []
self._sieves_extended = []
self._fitted = False
def add_sieve(self, sieve: FeatureSieve):
"""Appends a new feature sieve to the FruitBranch.
:type sieve: FeatureSieve
"""
if not isinstance(sieve, FeatureSieve):
raise TypeError
self._sieves.append(sieve)
self._fitted = False
def get_sieves(self) -> List[FeatureSieve]:
"""Returns a list of all feature sieves added to the branch.
:rtype: List[FeatureSieve]
"""
return self._sieves
def clear_sieves(self):
"""Removes all feature sieves that were added to this branch."""
self._sieves = []
self._sieve_prerequisites = None
self._sieves_extended = []
self._fitted = False
def add(self, *objects: Union[FitTransform, Word, type]):
"""Adds one or multiple object(s) to the branch.
:type objects: One or more objects of the following types:
- :class:`~fruits.preparation.abstract.DataPreparateur`
- :class:`~fruits.words.word.Word`
- :class:`~fruits.sieving.abstract.FeatureSieve`
"""
objects_flattened = np.array(objects, dtype=object).flatten()
for obj in objects_flattened:
if inspect.isclass(obj):
obj = obj()
if isinstance(obj, DataPreparateur):
self.add_preparateur(obj)
elif isinstance(obj, Word):
self.add_word(obj)
elif isinstance(obj, FeatureSieve):
self.add_sieve(obj)
else:
raise TypeError("Cannot add variable of type"+str(type(obj)))
def clear(self):
"""Clears all settings, configurations and calculated results
the branch has.
After the branch is cleared, it has the same settings as a newly
created FruitBranch object.
"""
self.clear_preparateurs()
self.clear_words()
self.clear_sieves()
self._calculator_options = {"batch_size": 1, "mode": "single"}
def nfeatures(self) -> int:
"""Returns the total number of features the current
configuration produces.
:rtype: int
"""
if self._calculator_options["mode"] == "extended":
return (
sum([s.nfeatures() for s in self._sieves])
* CachePlan(self._words).n_iterated_sums(
list(range(len(self._words)))
)
)
else:
return (
sum([s.nfeatures() for s in self._sieves])
* len(self._words)
)
def _compile(self):
# checks if the FruitBranch is configured correctly and ready
# for fitting
if not self._words:
raise RuntimeError("No words specified for ISS calculation")
if not self._sieves:
raise RuntimeError("No FeatureSieve objects specified")
def _collect_cache_keys(self) -> Set[str]:
# collects cache keys of all FitTransformers in the branch
keys: Set[str] = set()
for prep in self._preparateurs:
prep_keys = prep._get_cache_keys()
if 'coquantile' in prep_keys:
keys = keys.union(prep_keys['coquantile'])
for sieve in self._sieves:
sieve_keys = sieve._get_cache_keys()
if 'coquantile' in sieve_keys:
keys = keys.union(sieve_keys['coquantile'])
return keys
def _get_cache(self, X: np.ndarray):
# returns the already processed cache needed in this branch
self._cache = CoquantileCache()
self._cache.process(X, list(self._collect_cache_keys()))
def _select_fit_sample(self, X: np.ndarray) -> np.ndarray:
# returns a sample of the data used for fitting
if (isinstance(self._fit_sample_size, int)
and self._fit_sample_size == 1):
ind = np.random.randint(0, X.shape[0])
return X[ind:ind+1, :, :]
else:
s = int(self._fit_sample_size * X.shape[0])
if s < 1:
s = 1
indices = np.random.choice(X.shape[0], size=s, replace=False)
return X[indices, :, :]
def fit(self, X: np.ndarray):
"""Fits the branch to the given dataset. What this action
explicitly does depends on the FruitBranch configuration.
:param X: (Multidimensional) time series dataset as an array
of three dimensions. Have a look at
:meth:`~fruits.scope.force_input_shape`.
:type X: np.ndarray
"""
self._compile()
self._get_cache(X)
prepared_data = self._select_fit_sample(X)
for prep in self._preparateurs:
prep.fit(prepared_data)
prepared_data = prep.transform(prepared_data, cache=self._cache)
self._sieves_extended = []
iss_calculations = SignatureCalculator().transform(
prepared_data,
words=self._words,
**self._calculator_options
)[0]
for iterated_data in iss_calculations:
iterated_data = iterated_data.reshape(iterated_data.shape[0]
* iterated_data.shape[1],
iterated_data.shape[2])
sieves_copy = [sieve.copy() for sieve in self._sieves]
for sieve in sieves_copy:
sieve.fit(iterated_data[:, :])
self._sieves_extended.append(sieves_copy)
self._fitted = True
def transform(self, X: np.ndarray,
callbacks: List[AbstractCallback] = []) -> np.ndarray:
"""Transforms the given time series dataset. The results are
the calculated features for the different time series.
:param X: (Multidimensional) time series dataset as an array
of three dimensions. Have a look at
:meth:`~fruits.scope.force_input_shape`.
:type X: np.ndarray
:param callbacks: List of callbacks. To write your own callback,
override the class
:class:`~fruits.core.callback.AbstractCallback`.,
defaults to []
:type callbacks: List[AbstractCallback], optional
:rtype: np.ndarray
:raises: RuntimeError if ``self.fit`` wasn't called
"""
if not self._fitted:
raise RuntimeError("Missing call of self.fit")
self._get_cache(X)
prepared_data = force_input_shape(X)
for prep in self._preparateurs:
prepared_data = prep.transform(prepared_data, cache=self._cache)
for callback in callbacks:
callback.on_preparateur(prepared_data)
for callback in callbacks:
callback.on_preparation_end(prepared_data)
sieved_data = np.zeros((prepared_data.shape[0],
self.nfeatures()))
k = 0
iss_calculations = SignatureCalculator().transform(
prepared_data,
words=self._words,
**self._calculator_options
)[0]
for i, iterated_data in enumerate(iss_calculations):
for callback in callbacks:
callback.on_iterated_sum(iterated_data)
for sieve in self._sieves_extended[i]:
nf = sieve.nfeatures()
new_features = nf * iterated_data.shape[1]
for it in range(iterated_data.shape[1]):
sieved_data[:, k+it*nf:k+(it+1)*nf] = sieve.transform(
iterated_data[:, it, :],
cache=self._cache,
)
for callback in callbacks:
callback.on_sieve(sieved_data[k:k+new_features])
k += new_features
for callback in callbacks:
callback.on_sieving_end(sieved_data)
return sieved_data
def fit_transform(self, X: np.ndarray) -> np.ndarray:
"""This function does the same that calling ``self.fit(X)`` and
``self.transform(X)`` consecutively does.
:param X: (Multidimensional) time series dataset as an array
of three dimensions. Have a look at
`:meth:`~fruits.scope.force_input_shape`.
:type X: np.ndarray
:returns: Array of features.
:rtype: np.ndarray
"""
self.fit(X)
return self.transform(X)
def summary(self) -> str:
"""Returns a summary of this object. The summary contains all
added preparateurs, words and sieves.
:rtype: str
"""
summary = "{:-^80}".format("fruits.FruitBranch")
summary += f"\nNumber of features: {self.nfeatures()}"
summary += f"\n\nPreparateurs ({len(self._preparateurs)}): "
if len(self._preparateurs) == 0:
summary += "-"
else:
summary += "\n\t+ " + \
"\n\t+ ".join([str(x) for x in self._preparateurs])
summary += f"\nIterators ({len(self._words)}): "
if len(self._words) == 0:
summary += "-"
elif len(self._words) > 10:
summary += "\n\t+ " + \
"\n\t+ ".join([str(x) for x in self._words[:9]])
summary += "\n\t..."
else:
summary += "\n\t+ " + \
"\n\t+ ".join([str(x) for x in self._words])
summary += f"\nSieves ({len(self._sieves)}): "
if len(self._sieves) == 0:
summary += "-"
else:
for x in self._sieves:
lines = x.summary().split("\n")
summary += "\n\t+ " + lines[0]
summary += "\n\t "
summary += "\n\t ".join(lines[1:])
return summary
def copy(self) -> "FruitBranch":
"""Returns a shallow copy of this FruitBranch object.
:returns: Copy of the branch with same settings but all
calculations done erased.
:rtype: FruitBranch
"""
copy_ = FruitBranch()
for preparateur in self._preparateurs:
copy_.add(preparateur)
for iterator in self._words:
copy_.add(iterator)
for sieve in self._sieves:
copy_.add(sieve)
return copy_
def deepcopy(self) -> "FruitBranch":
"""Returns a deep copy of this FruitBranch object.
:returns: Deepcopy of the branch with same settings but all
calculations done erased.
:rtype: FruitBranch
"""
copy_ = FruitBranch()
for preparateur in self._preparateurs:
copy_.add(preparateur.copy())
for iterator in self._words:
copy_.add(iterator.copy())
for sieve in self._sieves:
copy_.add(sieve.copy())
copy_._calculator_options = self._calculator_options.copy()
return copy_
| [
"fruits.signature.iss.CachePlan",
"numpy.random.choice",
"fruits.signature.iss.SignatureCalculator",
"numpy.array",
"numpy.random.randint",
"fruits.scope.force_input_shape",
"inspect.isclass",
"fruits.cache.CoquantileCache",
"numpy.nan_to_num"
] | [((6257, 6299), 'numpy.nan_to_num', 'np.nan_to_num', (['result'], {'copy': '(False)', 'nan': '(0.0)'}), '(result, copy=False, nan=0.0)\n', (6270, 6299), True, 'import numpy as np\n'), ((15954, 15971), 'fruits.cache.CoquantileCache', 'CoquantileCache', ([], {}), '()\n', (15969, 15971), False, 'from fruits.cache import Cache, CoquantileCache\n'), ((18848, 18868), 'fruits.scope.force_input_shape', 'force_input_shape', (['X'], {}), '(X)\n', (18865, 18868), False, 'from fruits.scope import force_input_shape, FitTransform\n'), ((13590, 13610), 'inspect.isclass', 'inspect.isclass', (['obj'], {}), '(obj)\n', (13605, 13610), False, 'import inspect\n'), ((16275, 16307), 'numpy.random.randint', 'np.random.randint', (['(0)', 'X.shape[0]'], {}), '(0, X.shape[0])\n', (16292, 16307), True, 'import numpy as np\n'), ((16482, 16533), 'numpy.random.choice', 'np.random.choice', (['X.shape[0]'], {'size': 's', 'replace': '(False)'}), '(X.shape[0], size=s, replace=False)\n', (16498, 16533), True, 'import numpy as np\n'), ((13495, 13526), 'numpy.array', 'np.array', (['objects'], {'dtype': 'object'}), '(objects, dtype=object)\n', (13503, 13526), True, 'import numpy as np\n'), ((17267, 17288), 'fruits.signature.iss.SignatureCalculator', 'SignatureCalculator', ([], {}), '()\n', (17286, 17288), False, 'from fruits.signature.iss import SignatureCalculator, CachePlan\n'), ((19319, 19340), 'fruits.signature.iss.SignatureCalculator', 'SignatureCalculator', ([], {}), '()\n', (19338, 19340), False, 'from fruits.signature.iss import SignatureCalculator, CachePlan\n'), ((14699, 14721), 'fruits.signature.iss.CachePlan', 'CachePlan', (['self._words'], {}), '(self._words)\n', (14708, 14721), False, 'from fruits.signature.iss import SignatureCalculator, CachePlan\n')] |
"""Provide trimming of input reads from Fastq or BAM files.
"""
import os
import sys
import tempfile
from bcbio.utils import (file_exists, safe_makedir,
replace_suffix, append_stem, is_pair,
replace_directory, map_wrap)
from bcbio.log import logger
from bcbio.bam import fastq
from bcbio.provenance import do
from Bio.Seq import Seq
from itertools import izip, repeat
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils
SUPPORTED_ADAPTERS = {
"illumina": ["AACACTCTTTCCCT", "AGATCGGAAGAGCG"],
"truseq": ["AGATCGGAAGAG"],
"polya": ["AAAAAAAAAAAAA"],
"nextera": ["AATGATACGGCGA", "CAAGCAGAAGACG"]}
QUALITY_FLAGS = {5: ['"E"', '"&"'],
20: ['"T"', '"5"']}
def trim_adapters(fastq_files, dirs, config):
QUALITY_CUTOFF = 5
to_trim = _get_sequences_to_trim(config)
resources = config_utils.get_resources("AlienTrimmer", config)
try:
jarpath = config_utils.get_program("AlienTrimmer", config, "dir")
# fall back on Cutadapt if AlienTrimmer is not installed
# XXX: remove after it has been live for a while
except:
return trim_read_through(fastq_files, dirs, config)
jarfile = config_utils.get_jar("AlienTrimmer", jarpath)
jvm_opts = " ".join(resources.get("jvm_opts", ["-Xms750m", "-Xmx2g"]))
base_cmd = ("java -jar {jvm_opts} {jarfile} -k 10 ")
fastq1 = fastq_files[0]
supplied_quality_format = _get_quality_format(config)
cores = config["algorithm"].get("num_cores", 0)
out_files = _get_read_through_trimmed_outfiles(fastq_files, dirs)
fastq1_out = out_files[0]
if supplied_quality_format == "illumina":
quality_flag = QUALITY_FLAGS[QUALITY_CUTOFF][0]
else:
quality_flag = QUALITY_FLAGS[QUALITY_CUTOFF][1]
quality_flag = '-q ' + quality_flag
if len(fastq_files) == 1:
if file_exists(fastq1_out):
return [fastq1_out]
base_cmd += ("-i {fastq1} -o {tx_fastq1_out} -c {temp_file} "
"{quality_flag}")
message = "Trimming %s from %s with AlienTrimmer." % (to_trim, fastq1)
else:
fastq2 = fastq_files[1]
fastq2_out = out_files[1]
if all(map(file_exists, [fastq1_out, fastq2_out])):
return [fastq1_out, fastq2_out]
base_cmd += ("-if {fastq1} -ir {fastq2} -of {tx_fastq1_out} "
"-or {tx_fastq2_out} -c {temp_file} {quality_flag}")
message = ("Trimming %s from %s and %s with AlienTrimmer."
% (to_trim, fastq1, fastq2))
with tempfile.NamedTemporaryFile(delete=False) as temp:
temp_file = temp.name
for adapter in to_trim:
temp.write(adapter + "\n")
temp.close()
if len(fastq_files) == 1:
with file_transaction(fastq1_out) as tx_fastq1_out:
do.run(base_cmd.format(**locals()), message)
return [fastq1_out]
else:
with file_transaction([fastq1_out, fastq2_out]) as tx_out_files:
tx_fastq1_out = tx_out_files[0]
tx_fastq2_out = tx_out_files[1]
do.run(base_cmd.format(**locals()), message)
return [fastq1_out, fastq2_out]
def trim_read_through(fastq_files, dirs, lane_config):
"""
for small insert sizes, the read length can be longer than the insert
resulting in the reverse complement of the 3' adapter being sequenced.
this takes adapter sequences and trims the only the reverse complement
of the adapter
MYSEQUENCEAAAARETPADA -> MYSEQUENCEAAAA (no polyA trim)
"""
quality_format = _get_quality_format(lane_config)
to_trim = _get_sequences_to_trim(lane_config)
out_files = _get_read_through_trimmed_outfiles(fastq_files, dirs)
fixed_files = append_stem(out_files, ".fixed")
if all(map(file_exists, fixed_files)):
return fixed_files
logger.info("Trimming %s from the 3' end of reads in %s using "
"cutadapt." % (", ".join(to_trim),
", ".join(fastq_files)))
cores = lane_config["algorithm"].get("num_cores", 1)
out_files = _cutadapt_trim(fastq_files, quality_format,
to_trim, out_files, cores)
fixed_files = remove_short_reads(out_files, dirs, lane_config)
return fixed_files
def remove_short_reads(fastq_files, dirs, lane_config):
"""
remove reads from a single or pair of fastq files which fall below
a length threshold (30 bases)
"""
min_length = int(lane_config["algorithm"].get("min_read_length", 20))
supplied_quality_format = _get_quality_format(lane_config)
if supplied_quality_format == "illumina":
quality_format = "fastq-illumina"
else:
quality_format = "fastq-sanger"
if is_pair(fastq_files):
fastq1, fastq2 = fastq_files
out_files = fastq.filter_reads_by_length(fastq1, fastq2, quality_format, min_length)
else:
out_files = [fastq.filter_single_reads_by_length(fastq_files[0],
quality_format, min_length)]
map(os.remove, fastq_files)
return out_files
def _get_read_through_trimmed_outfiles(fastq_files, dirs):
out_dir = os.path.join(dirs["work"], "trim")
safe_makedir(out_dir)
out_files = replace_directory(append_stem(fastq_files, "_trimmed"),
out_dir)
return out_files
def _get_sequences_to_trim(lane_config):
builtin_adapters = _get_builtin_adapters(lane_config)
polya = builtin_adapters.get("polya", [None])[0]
# allow for trimming of custom sequences for advanced users
custom_trim = lane_config["algorithm"].get("custom_trim", [])
builtin_adapters = {k: v for k, v in builtin_adapters.items() if
k != "polya"}
trim_sequences = custom_trim
# for unstranded RNA-seq, libraries, both polyA and polyT can appear
# at the 3' end as well
if polya:
trim_sequences += [polya, str(Seq(polya).reverse_complement())]
# also trim the reverse complement of the adapters
for _, v in builtin_adapters.items():
trim_sequences += [str(Seq(sequence)) for sequence in v]
trim_sequences += [str(Seq(sequence).reverse_complement()) for
sequence in v]
return trim_sequences
def _cutadapt_trim(fastq_files, quality_format, adapters, out_files, cores):
"""Trimming with cutadapt, using version installed with bcbio-nextgen.
Uses the system executable to find the version next to our Anaconda Python.
TODO: Could we use cutadapt as a library to avoid this?
"""
if quality_format == "illumina":
quality_base = "64"
else:
quality_base = "33"
# --times=2 tries twice remove adapters which will allow things like:
# realsequenceAAAAAAadapter to remove both the poly-A and the adapter
# this behavior might not be what we want; we could also do two or
# more passes of cutadapt
cutadapt = os.path.join(os.path.dirname(sys.executable), "cutadapt")
base_cmd = [cutadapt, "--times=" + "2", "--quality-base=" + quality_base,
"--quality-cutoff=5", "--format=fastq", "--minimum-length=0"]
adapter_cmd = map(lambda x: "--adapter=" + x, adapters)
base_cmd.extend(adapter_cmd)
if all(map(file_exists, out_files)):
return out_files
with file_transaction(out_files) as tmp_out_files:
if isinstance(tmp_out_files, basestring):
tmp_out_files = [tmp_out_files]
map(_run_cutadapt_on_single_file, izip(repeat(base_cmd), fastq_files,
tmp_out_files))
return out_files
@map_wrap
def _run_cutadapt_on_single_file(base_cmd, fastq_file, out_file):
stat_file = replace_suffix(out_file, ".trim_stats.txt")
with open(stat_file, "w") as stat_handle:
cmd = list(base_cmd)
cmd.extend(["--output=" + out_file, fastq_file])
do.run(cmd, "Running cutadapt on %s." % (fastq_file), None)
def _get_quality_format(lane_config):
SUPPORTED_FORMATS = ["illumina", "standard"]
quality_format = lane_config["algorithm"].get("quality_format",
"standard").lower()
if quality_format not in SUPPORTED_FORMATS:
logger.error("quality_format is set to an unsupported format. "
"Supported formats are %s."
% (", ".join(SUPPORTED_FORMATS)))
exit(1)
return quality_format
def _get_builtin_adapters(lane_config):
chemistries = lane_config["algorithm"].get("adapters", [])
adapters = {chemistry: SUPPORTED_ADAPTERS[chemistry] for
chemistry in chemistries if chemistry in SUPPORTED_ADAPTERS}
return adapters
| [
"bcbio.utils.safe_makedir",
"bcbio.utils.file_exists",
"bcbio.utils.is_pair",
"Bio.Seq.Seq",
"bcbio.provenance.do.run",
"os.path.join",
"bcbio.utils.append_stem",
"bcbio.pipeline.config_utils.get_program",
"bcbio.bam.fastq.filter_reads_by_length",
"bcbio.bam.fastq.filter_single_reads_by_length",
"os.path.dirname",
"bcbio.utils.replace_suffix",
"tempfile.NamedTemporaryFile",
"bcbio.pipeline.config_utils.get_resources",
"bcbio.distributed.transaction.file_transaction",
"bcbio.pipeline.config_utils.get_jar",
"itertools.repeat"
] | [((917, 967), 'bcbio.pipeline.config_utils.get_resources', 'config_utils.get_resources', (['"""AlienTrimmer"""', 'config'], {}), "('AlienTrimmer', config)\n", (943, 967), False, 'from bcbio.pipeline import config_utils\n'), ((1251, 1296), 'bcbio.pipeline.config_utils.get_jar', 'config_utils.get_jar', (['"""AlienTrimmer"""', 'jarpath'], {}), "('AlienTrimmer', jarpath)\n", (1271, 1296), False, 'from bcbio.pipeline import config_utils\n'), ((3797, 3829), 'bcbio.utils.append_stem', 'append_stem', (['out_files', '""".fixed"""'], {}), "(out_files, '.fixed')\n", (3808, 3829), False, 'from bcbio.utils import file_exists, safe_makedir, replace_suffix, append_stem, is_pair, replace_directory, map_wrap\n'), ((4803, 4823), 'bcbio.utils.is_pair', 'is_pair', (['fastq_files'], {}), '(fastq_files)\n', (4810, 4823), False, 'from bcbio.utils import file_exists, safe_makedir, replace_suffix, append_stem, is_pair, replace_directory, map_wrap\n'), ((5251, 5285), 'os.path.join', 'os.path.join', (["dirs['work']", '"""trim"""'], {}), "(dirs['work'], 'trim')\n", (5263, 5285), False, 'import os\n'), ((5290, 5311), 'bcbio.utils.safe_makedir', 'safe_makedir', (['out_dir'], {}), '(out_dir)\n', (5302, 5311), False, 'from bcbio.utils import file_exists, safe_makedir, replace_suffix, append_stem, is_pair, replace_directory, map_wrap\n'), ((7808, 7851), 'bcbio.utils.replace_suffix', 'replace_suffix', (['out_file', '""".trim_stats.txt"""'], {}), "(out_file, '.trim_stats.txt')\n", (7822, 7851), False, 'from bcbio.utils import file_exists, safe_makedir, replace_suffix, append_stem, is_pair, replace_directory, map_wrap\n'), ((995, 1050), 'bcbio.pipeline.config_utils.get_program', 'config_utils.get_program', (['"""AlienTrimmer"""', 'config', '"""dir"""'], {}), "('AlienTrimmer', config, 'dir')\n", (1019, 1050), False, 'from bcbio.pipeline import config_utils\n'), ((1916, 1939), 'bcbio.utils.file_exists', 'file_exists', (['fastq1_out'], {}), '(fastq1_out)\n', (1927, 1939), False, 'from bcbio.utils import file_exists, safe_makedir, replace_suffix, append_stem, is_pair, replace_directory, map_wrap\n'), ((2609, 2650), 'tempfile.NamedTemporaryFile', 'tempfile.NamedTemporaryFile', ([], {'delete': '(False)'}), '(delete=False)\n', (2636, 2650), False, 'import tempfile\n'), ((4882, 4954), 'bcbio.bam.fastq.filter_reads_by_length', 'fastq.filter_reads_by_length', (['fastq1', 'fastq2', 'quality_format', 'min_length'], {}), '(fastq1, fastq2, quality_format, min_length)\n', (4910, 4954), False, 'from bcbio.bam import fastq\n'), ((5346, 5382), 'bcbio.utils.append_stem', 'append_stem', (['fastq_files', '"""_trimmed"""'], {}), "(fastq_files, '_trimmed')\n", (5357, 5382), False, 'from bcbio.utils import file_exists, safe_makedir, replace_suffix, append_stem, is_pair, replace_directory, map_wrap\n'), ((7044, 7075), 'os.path.dirname', 'os.path.dirname', (['sys.executable'], {}), '(sys.executable)\n', (7059, 7075), False, 'import os\n'), ((7413, 7440), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['out_files'], {}), '(out_files)\n', (7429, 7440), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((7992, 8049), 'bcbio.provenance.do.run', 'do.run', (['cmd', "('Running cutadapt on %s.' % fastq_file)", 'None'], {}), "(cmd, 'Running cutadapt on %s.' % fastq_file, None)\n", (7998, 8049), False, 'from bcbio.provenance import do\n'), ((2827, 2855), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['fastq1_out'], {}), '(fastq1_out)\n', (2843, 2855), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((2982, 3024), 'bcbio.distributed.transaction.file_transaction', 'file_transaction', (['[fastq1_out, fastq2_out]'], {}), '([fastq1_out, fastq2_out])\n', (2998, 3024), False, 'from bcbio.distributed.transaction import file_transaction\n'), ((4986, 5065), 'bcbio.bam.fastq.filter_single_reads_by_length', 'fastq.filter_single_reads_by_length', (['fastq_files[0]', 'quality_format', 'min_length'], {}), '(fastq_files[0], quality_format, min_length)\n', (5021, 5065), False, 'from bcbio.bam import fastq\n'), ((6187, 6200), 'Bio.Seq.Seq', 'Seq', (['sequence'], {}), '(sequence)\n', (6190, 6200), False, 'from Bio.Seq import Seq\n'), ((7600, 7616), 'itertools.repeat', 'repeat', (['base_cmd'], {}), '(base_cmd)\n', (7606, 7616), False, 'from itertools import izip, repeat\n'), ((6024, 6034), 'Bio.Seq.Seq', 'Seq', (['polya'], {}), '(polya)\n', (6027, 6034), False, 'from Bio.Seq import Seq\n'), ((6252, 6265), 'Bio.Seq.Seq', 'Seq', (['sequence'], {}), '(sequence)\n', (6255, 6265), False, 'from Bio.Seq import Seq\n')] |
import numpy as np
from sklearn.metrics import fbeta_score, roc_curve, auc, confusion_matrix
from sklearn.decomposition import PCA
from sklearn import random_projection
from sklearn import svm
from sklearn.ensemble import IsolationForest
import matplotlib.pyplot as plt
from keras.layers import Dense, Input, Dropout
from keras.models import Model
from keras import regularizers
from keras.models import Sequential
from keras.optimizers import Adam
from keras.regularizers import l2
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
import xgboost as xgb
def one_class_svm(x_train, x_test, x_attacks, svm_results):
# SVM Hyper-parameters
nus = [0.01]
gammas = ['auto']
dimensions = [int(i*x_test.shape[1]) for i in [0.25, 0.35, 0.5, 0.75, 0.9, 1]]
dimensions = list(filter(lambda x: x > 0, dimensions))
for n in dimensions:
x_reduced_pca, test_reduced_pca, attack_reduced_pca = reduce_dimensionality(n, x_train, x_test, 'PCA',
attack=x_attacks)
for nu in nus:
for gamma in gammas:
# Fit classifier with PCA reduced data
classifier = svm.OneClassSVM(kernel='rbf', gamma=gamma, nu=nu, cache_size=7000)
classifier.fit(x_reduced_pca)
fb, area, tnr, tpr_train, tpr_test = unsupervised_evaluation(classifier, x_reduced_pca,
test_reduced_pca,
attack_reduced_pca)
svm_results = svm_results.append({'nu': nu, 'gamma': gamma, 'n_components': n, 'TPR_train': tpr_train,
'TPR_test': tpr_test, 'TNR': tnr, 'model': 'svm', 'auc': area,
'f_beta': fb, 'projection': 'PCA'}, ignore_index=True)
# Fit classifier with RP reduced data
classifier = svm.OneClassSVM(kernel='rbf', gamma=gamma, nu=nu, cache_size=7000)
classifier.fit(x_train)
fb, area, tnr, tpr_train, tpr_test = unsupervised_evaluation(classifier, x_train,
x_test, x_attacks)
svm_results = svm_results.append({'nu': nu, 'gamma': gamma, 'n_components': x_test.shape[1],
'TPR_train': tpr_train,
'TPR_test': tpr_test, 'TNR': tnr, 'model': 'svm', 'auc': area,
'f_beta': fb, 'projection': 'None'}, ignore_index=True)
return svm_results
def isolation_forest(x_train, x_test, x_attacks, isolation_results):
# Isolation Forest Hyper-parameters
estimators = [200, 100]
contaminations = [0.01]
dimensions = [int(i*x_test.shape[1]) for i in [0.25, 0.5, 0.9, 1]]
dimensions = list(filter(lambda x: x > 0, dimensions))
for n in dimensions:
x_reduced_pca, test_reduced_pca, attack_reduced_pca = reduce_dimensionality(n, x_train, x_test, 'PCA',
attack=x_attacks)
x_reduced_rp, test_reduced_rp, attack_reduced_rp = reduce_dimensionality(n, x_train, x_test, 'RP',
attack=x_attacks)
max_features = list(range(1, n + 1, 4))
for estimator in estimators:
for contamination in contaminations:
for max_feature in max_features:
classifier = IsolationForest(n_estimators=estimator,
contamination=contamination,
max_features=max_feature,
n_jobs=7)
classifier.fit(x_reduced_pca)
fb, area, tnr, tpr_train, tpr_test = unsupervised_evaluation(classifier, x_reduced_pca,
test_reduced_pca, attack_reduced_pca)
isolation_results = isolation_results.append({'estimators': estimator, 'contamination': contamination,
'n_components': n, 'max_features': max_feature,
'TPR_train': tpr_train,
'TPR_test': tpr_test,
'TNR': tnr,
'model': 'isolation_forest',
'auc': area,
'f_beta': fb,
'projection': 'PCA'}, ignore_index=True)
classifier = IsolationForest(n_estimators=estimator,
contamination=contamination,
max_features=max_feature,
n_jobs=7)
classifier.fit(x_reduced_rp)
fb, area, tnr, tpr_train, tpr_test = unsupervised_evaluation(classifier, x_reduced_rp,
test_reduced_rp, attack_reduced_rp)
isolation_results = isolation_results.append({'estimators': estimator, 'contamination': contamination,
'n_components': n, 'max_features': max_feature,
'TPR_train': tpr_train,
'TPR_test': tpr_test,
'TNR': tnr,
'model': 'isolation_forest',
'auc': area,
'f_beta': fb,
'projection': 'RP'}, ignore_index=True)
return isolation_results
def autoencoder(x_train, x_test, x_attacks, ae_svm_results):
latent_dim = 3
input_vector = Input(shape=(x_train.shape[1],))
encoded = Dense(latent_dim, activation='relu')(input_vector)
decoded = Dense(x_train.shape[1], activity_regularizer=regularizers.l1(10e-5))(encoded)
autoencoder = Model(input_vector, decoded)
encoder = Model(input_vector, encoded)
autoencoder.compile(optimizer=Adam(lr=0.001), loss='mse')
network_history = autoencoder.fit(x_train, x_train, shuffle=True, batch_size=16, epochs=10,
validation_data=(x_test, x_test), verbose=True)
plot_history(network_history, 'AE history')
print('Mean loss on train: {}'.format(autoencoder.evaluate(x_train, x_train, batch_size=8, verbose=False)))
print('Mean loss on test: {}'.format(autoencoder.evaluate(x_test, x_test, batch_size=8, verbose=False)))
print('Mean loss on attacks: {}'.format(autoencoder.evaluate(x_attacks, x_attacks, batch_size=8, verbose=False)))
x_train_red = encoder.predict(x_train, batch_size=8)
x_test_red = encoder.predict(x_test, batch_size=8)
x_attacks_red = encoder.predict(x_attacks, batch_size=8)
nus = [0.01]
gammas = [x_train_red.shape[1], 2*x_train_red.shape[1], x_train_red.shape[1]/2, 'auto']
for nu in nus:
for gamma in gammas:
classifier = svm.OneClassSVM(kernel='rbf', gamma=gamma, nu=nu, cache_size=7000)
classifier.fit(x_train_red)
fb, area, tnr, tpr_train, tpr_test = unsupervised_evaluation(classifier, x_train_red,
x_test_red, x_attacks_red)
ae_svm_results = ae_svm_results.append({'nu': nu, 'gamma': gamma, 'n_components': latent_dim,
'TPR_train': tpr_train, 'TPR_test': tpr_test, 'TNR': tnr,
'model': 'ae-svm', 'auc': area, 'f_beta': fb}, ignore_index=True)
return ae_svm_results
def unsupervised_evaluation(classifier, train_set, test_set, attack_set, beta=20):
y_pred_train = classifier.predict(train_set)
y_pred_test = classifier.predict(test_set)
y_pred_outliers = classifier.predict(attack_set)
n_accurate_train = y_pred_train[y_pred_train == 1].size
n_accurate_test = y_pred_test[y_pred_test == 1].size
n_accurate_outliers = y_pred_outliers[y_pred_outliers == -1].size
fpr, tpr, _ = roc_curve(np.concatenate([np.ones(y_pred_test.shape[0]), -1*np.ones(y_pred_outliers.shape[0])]),
np.concatenate([y_pred_test, y_pred_outliers]), pos_label=1)
fb = fbeta_score(np.concatenate([np.ones(y_pred_test.shape[0]), -1*np.ones(y_pred_outliers.shape[0])]),
np.concatenate([y_pred_test, y_pred_outliers]), beta=beta, pos_label=1)
tnr = n_accurate_outliers/attack_set.shape[0]
tpr_test = n_accurate_test/test_set.shape[0]
tpr_train = n_accurate_train/train_set.shape[0]
area = auc(fpr, tpr)
return fb, area, tnr, tpr_train, tpr_test
def neural_network(x_train, y_train, x_test, y_test):
model = Sequential()
model.add(Dense(128, input_shape=(x_train.shape[1],), activation='relu', kernel_regularizer=l2(0.01)))
model.add(Dropout(0.1))
model.add(Dense(64, activation='relu', kernel_regularizer=l2(0.01)))
model.add(Dropout(0.2))
model.add(Dense(128, kernel_initializer='glorot_uniform', activation='sigmoid'))
model.add(Dropout(0.4))
model.add(Dense(64, kernel_initializer='glorot_uniform', activation='tanh'))
model.add(Dropout(0.5))
model.add(Dense(32, kernel_initializer='glorot_uniform', activation='tanh'))
model.add(Dropout(0.4))
model.add(Dense(128, kernel_initializer='glorot_uniform', activation='tanh'))
model.add(Dropout(0.3))
model.add(Dense(1, kernel_initializer='normal', activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='sgd', metrics=['accuracy'])
network_history = model.fit(x_train, y_train, batch_size=128, epochs=10, verbose=0,
validation_data=(x_test, y_test))
plot_history_with_acc(network_history)
return model
def random_forest(x_train, y_train, x_test, y_test, random_forest_results):
# Random forest Hyper-parameters
estimators = [150, 200]
dimensions = [int(i*x_test.shape[1]) for i in [1]]
for estimator in estimators:
for n in dimensions:
x_reduced_pca, test_reduced_pca = reduce_dimensionality(n, x_train, x_test, 'PCA')
x_reduced_rp, test_reduced_rp = reduce_dimensionality(n, x_train, x_test, 'RP')
classifier = RandomForestClassifier(n_estimators=estimator, n_jobs=7)
classifier.fit(x_reduced_pca, y_train)
fb, area, tnr, tpr = supervised_evaluation(classifier, test_reduced_pca, y_test)
random_forest_results = random_forest_results.append({'estimators': estimator,
'n_components': n,
'TPR': tpr,
'TNR': tnr,
'model': 'random_forest',
'auc': area,
'f_beta': fb,
'projection': 'PCA'}, ignore_index=True)
classifier = RandomForestClassifier(n_estimators=estimator, n_jobs=7)
classifier.fit(x_reduced_rp, y_train)
fb, area, tnr, tpr = supervised_evaluation(classifier, test_reduced_rp, y_test)
random_forest_results = random_forest_results.append({'estimators': estimator,
'n_components': n,
'TPR': tpr,
'TNR': tnr,
'model': 'random_forest',
'auc': area,
'f_beta': fb,
'projection': 'RP'}, ignore_index=True)
classifier = RandomForestClassifier(n_estimators=estimator, n_jobs=7)
classifier.fit(x_train, y_train)
fb, area, tnr, tpr = supervised_evaluation(classifier, x_test, y_test)
random_forest_results = random_forest_results.append({'estimators': estimator,
'n_components': x_test.shape[1],
'TPR': tpr,
'TNR': tnr,
'model': 'random_forest',
'auc': area,
'f_beta': fb,
'projection': 'None'}, ignore_index=True)
return random_forest_results
def ada_boost(x_train, y_train, x_test, y_test, ada_boost_results):
# AdaBoost Hyper-parameters
learning_rates = [0.55]
dimensions = [int(i*x_test.shape[1]) for i in [1]]
for n in dimensions:
x_reduced_pca, test_reduced_pca = reduce_dimensionality(n, x_train, x_test, 'PCA')
x_reduced_rp, test_reduced_rp = reduce_dimensionality(n, x_train, x_test, 'RP')
for lr in learning_rates:
classifier = AdaBoostClassifier(learning_rate=lr)
classifier.fit(x_reduced_pca, y_train)
fb, area, tnr, tpr = supervised_evaluation(classifier, test_reduced_pca, y_test)
ada_boost_results = ada_boost_results.append({'LR': lr,
'n_components': n,
'TPR': tpr,
'TNR': tnr,
'model': 'ada_boost',
'auc': area,
'f_beta': fb,
'projection': 'PCA'}, ignore_index=True)
classifier = AdaBoostClassifier(learning_rate=lr)
classifier.fit(x_reduced_rp, y_train)
fb, area, tnr, tpr = supervised_evaluation(classifier, test_reduced_rp, y_test)
ada_boost_results = ada_boost_results.append({'LR': lr,
'n_components': n,
'TPR': tpr,
'TNR': tnr,
'model': 'ada_boost',
'auc': area,
'f_beta': fb,
'projection': 'RP'}, ignore_index=True)
return ada_boost_results
def svm_classifier(x_train, y_train, x_test, y_test, svm_results):
# SVC Hyper-parameters
dimensions = [int(i*x_test.shape[1]) for i in [1]]
for n in dimensions:
x_reduced_pca, test_reduced_pca = reduce_dimensionality(n, x_train, x_test, 'PCA')
x_reduced_rp, test_reduced_rp = reduce_dimensionality(n, x_train, x_test, 'RP')
classifier = svm.SVC(gamma='auto', cache_size=7000)
classifier.fit(x_reduced_pca, y_train)
fb, area, tnr, tpr = supervised_evaluation(classifier, test_reduced_pca, y_test)
svm_results = svm_results.append({
'n_components': n,
'TPR': tpr,
'TNR': tnr,
'model': 'svm',
'auc': area,
'f_beta': fb,
'projection': 'PCA'}, ignore_index=True)
classifier = svm.SVC(gamma='auto', cache_size=7000)
classifier.fit(x_reduced_rp, y_train)
fb, area, tnr, tpr = supervised_evaluation(classifier, test_reduced_rp, y_test)
svm_results = svm_results.append({
'n_components': n,
'TPR': tpr,
'TNR': tnr,
'model': 'svm',
'auc': area,
'f_beta': fb,
'projection': 'RP'}, ignore_index=True)
return svm_results
def xg_boost(x_train, y_train, x_test, y_test, xg_boost_results):
# XGBoost Hyper-parameters
dimensions = [int(i*x_test.shape[1]) for i in [1]]
for n in dimensions:
x_reduced_pca, test_reduced_pca = reduce_dimensionality(n, x_train, x_test, 'PCA')
x_reduced_rp, test_reduced_rp = reduce_dimensionality(n, x_train, x_test, 'RP')
classifier = xgb.XGBClassifier()
grid = {'max_depth': 10}
classifier.set_params(**grid)
classifier.fit(x_reduced_pca, y_train)
fb, area, tnr, tpr = supervised_evaluation(classifier, test_reduced_pca, y_test)
xg_boost_results = xg_boost_results.append({
'n_components': n,
'TPR': tpr,
'TNR': tnr,
'model': 'xgboost',
'auc': area,
'f_beta': fb,
'projection': 'PCA'}, ignore_index=True)
classifier = xgb.XGBClassifier()
grid = {'max_depth': 10}
classifier.set_params(**grid)
classifier.fit(x_reduced_rp, y_train)
fb, area, tnr, tpr = supervised_evaluation(classifier, test_reduced_rp, y_test)
xg_boost_results = xg_boost_results.append({
'n_components': n,
'TPR': tpr,
'TNR': tnr,
'model': 'xgboost',
'auc': area,
'f_beta': fb,
'projection': 'RP'}, ignore_index=True)
classifier = xgb.XGBClassifier()
grid = {'max_depth': 10}
classifier.set_params(**grid)
classifier.fit(x_train, y_train)
fb, area, tnr, tpr = supervised_evaluation(classifier, x_test, y_test)
xg_boost_results = xg_boost_results.append({
'n_components': x_test.shape[1],
'TPR': tpr,
'TNR': tnr,
'model': 'xgboost',
'auc': area,
'f_beta': fb,
'projection': 'None'}, ignore_index=True)
return xg_boost_results
def supervised_evaluation(classifier, x_test, y_test, beta=20, nn=False):
if not nn:
y_pred = classifier.predict(x_test)
confusion_matrix(y_test, y_pred)
fpr, tpr, _ = roc_curve(y_test, y_pred)
fb = fbeta_score(y_test, y_pred, beta=beta, pos_label=1)
area = auc(fpr, tpr)
tpr = tpr[1]
tnr = 1 - fpr[1]
return fb, area, tnr, tpr
def plot_roc(classifier, test, attacks, title):
y_pred_test = classifier.predict(test)
y_pred_outliers = classifier.predict(attacks)
fpr, tpr, _ = roc_curve(np.concatenate([np.ones(y_pred_test.shape[0]),
-1*np.ones(y_pred_outliers.shape[0])]),
np.concatenate([y_pred_test, y_pred_outliers]), pos_label=1)
roc_auc = auc(fpr, tpr)
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic: {}'.format(title))
plt.legend(loc='lower right')
plt.show()
def plot_roc_supervised(classifier, x_test, y_test, title, nn=False):
y_pred = classifier.predict(x_test)
fpr, tpr, _ = roc_curve(y_test, y_pred)
if nn:
y_pred = [round(x[0]) for x in y_pred]
print(confusion_matrix(y_test, y_pred))
roc_auc = auc(fpr, tpr)
plt.figure()
lw = 2
plt.plot(fpr, tpr, color='darkorange',
lw=lw, label='ROC curve (area = %0.2f)' % roc_auc)
plt.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic {}'.format(title))
plt.legend(loc='lower right')
plt.show()
def plot_history(network_history, title):
plt.figure(figsize=(10, 5))
plt.title(title)
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.semilogy(network_history.history['loss'])
plt.semilogy(network_history.history['val_loss'])
plt.legend(['Training', 'Validation'])
plt.show()
def plot_history_with_acc(network_history, title='Loss and Accuracy'):
plt.figure(figsize=(15, 10))
plt.subplot(211)
plt.title(title)
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.semilogy(network_history.history['loss'])
plt.semilogy(network_history.history['val_loss'])
plt.legend(['Training', 'Validation'])
plt.subplot(212)
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.plot(network_history.history['acc'])
plt.plot(network_history.history['val_acc'])
plt.legend(['Training', 'Validation'], loc='lower right')
plt.show()
def reduce_dimensionality(n_components, train, test, method, attack=None):
if method == 'PCA':
matrix = PCA(n_components=n_components)
elif method == 'RP':
matrix = random_projection.SparseRandomProjection(n_components=n_components, random_state=7)
else:
print('unknown projection method, choose either RP or PCA')
return None
train = matrix.fit_transform(train)
test = matrix.transform(test)
if attack is None:
return train, test
attack = matrix.transform(attack)
return train, test, attack
| [
"matplotlib.pyplot.ylabel",
"sklearn.metrics.auc",
"sklearn.ensemble.AdaBoostClassifier",
"sklearn.metrics.roc_curve",
"keras.layers.Dense",
"matplotlib.pyplot.semilogy",
"sklearn.decomposition.PCA",
"sklearn.random_projection.SparseRandomProjection",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"keras.models.Model",
"numpy.concatenate",
"matplotlib.pyplot.ylim",
"sklearn.svm.OneClassSVM",
"keras.regularizers.l1",
"sklearn.metrics.confusion_matrix",
"keras.optimizers.Adam",
"numpy.ones",
"sklearn.ensemble.RandomForestClassifier",
"keras.models.Sequential",
"sklearn.metrics.fbeta_score",
"keras.regularizers.l2",
"matplotlib.pyplot.title",
"matplotlib.pyplot.xlim",
"keras.layers.Dropout",
"matplotlib.pyplot.legend",
"xgboost.XGBClassifier",
"matplotlib.pyplot.show",
"sklearn.svm.SVC",
"sklearn.ensemble.IsolationForest",
"keras.layers.Input",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.subplot"
] | [((6712, 6744), 'keras.layers.Input', 'Input', ([], {'shape': '(x_train.shape[1],)'}), '(shape=(x_train.shape[1],))\n', (6717, 6744), False, 'from keras.layers import Dense, Input, Dropout\n'), ((6920, 6948), 'keras.models.Model', 'Model', (['input_vector', 'decoded'], {}), '(input_vector, decoded)\n', (6925, 6948), False, 'from keras.models import Model\n'), ((6963, 6991), 'keras.models.Model', 'Model', (['input_vector', 'encoded'], {}), '(input_vector, encoded)\n', (6968, 6991), False, 'from keras.models import Model\n'), ((9640, 9653), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (9643, 9653), False, 'from sklearn.metrics import fbeta_score, roc_curve, auc, confusion_matrix\n'), ((9768, 9780), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (9778, 9780), False, 'from keras.models import Sequential\n'), ((19241, 19260), 'xgboost.XGBClassifier', 'xgb.XGBClassifier', ([], {}), '()\n', (19258, 19260), True, 'import xgboost as xgb\n'), ((20524, 20537), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (20527, 20537), False, 'from sklearn.metrics import fbeta_score, roc_curve, auc, confusion_matrix\n'), ((20542, 20554), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (20552, 20554), True, 'import matplotlib.pyplot as plt\n'), ((20570, 20664), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr'], {'color': '"""darkorange"""', 'lw': 'lw', 'label': "('ROC curve (area = %0.2f)' % roc_auc)"}), "(fpr, tpr, color='darkorange', lw=lw, label=\n 'ROC curve (area = %0.2f)' % roc_auc)\n", (20578, 20664), True, 'import matplotlib.pyplot as plt\n'), ((20677, 20738), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]'], {'color': '"""navy"""', 'lw': 'lw', 'linestyle': '"""--"""'}), "([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n", (20685, 20738), True, 'import matplotlib.pyplot as plt\n'), ((20743, 20763), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (20751, 20763), True, 'import matplotlib.pyplot as plt\n'), ((20768, 20789), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (20776, 20789), True, 'import matplotlib.pyplot as plt\n'), ((20794, 20827), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (20804, 20827), True, 'import matplotlib.pyplot as plt\n'), ((20832, 20864), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (20842, 20864), True, 'import matplotlib.pyplot as plt\n'), ((20938, 20967), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (20948, 20967), True, 'import matplotlib.pyplot as plt\n'), ((20972, 20982), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (20980, 20982), True, 'import matplotlib.pyplot as plt\n'), ((21114, 21139), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (21123, 21139), False, 'from sklearn.metrics import fbeta_score, roc_curve, auc, confusion_matrix\n'), ((21258, 21271), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (21261, 21271), False, 'from sklearn.metrics import fbeta_score, roc_curve, auc, confusion_matrix\n'), ((21276, 21288), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (21286, 21288), True, 'import matplotlib.pyplot as plt\n'), ((21304, 21398), 'matplotlib.pyplot.plot', 'plt.plot', (['fpr', 'tpr'], {'color': '"""darkorange"""', 'lw': 'lw', 'label': "('ROC curve (area = %0.2f)' % roc_auc)"}), "(fpr, tpr, color='darkorange', lw=lw, label=\n 'ROC curve (area = %0.2f)' % roc_auc)\n", (21312, 21398), True, 'import matplotlib.pyplot as plt\n'), ((21411, 21472), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]'], {'color': '"""navy"""', 'lw': 'lw', 'linestyle': '"""--"""'}), "([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')\n", (21419, 21472), True, 'import matplotlib.pyplot as plt\n'), ((21477, 21497), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (21485, 21497), True, 'import matplotlib.pyplot as plt\n'), ((21502, 21523), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (21510, 21523), True, 'import matplotlib.pyplot as plt\n'), ((21528, 21561), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (21538, 21561), True, 'import matplotlib.pyplot as plt\n'), ((21566, 21598), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (21576, 21598), True, 'import matplotlib.pyplot as plt\n'), ((21671, 21700), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (21681, 21700), True, 'import matplotlib.pyplot as plt\n'), ((21705, 21715), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21713, 21715), True, 'import matplotlib.pyplot as plt\n'), ((21764, 21791), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 5)'}), '(figsize=(10, 5))\n', (21774, 21791), True, 'import matplotlib.pyplot as plt\n'), ((21796, 21812), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (21805, 21812), True, 'import matplotlib.pyplot as plt\n'), ((21817, 21837), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (21827, 21837), True, 'import matplotlib.pyplot as plt\n'), ((21842, 21860), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (21852, 21860), True, 'import matplotlib.pyplot as plt\n'), ((21865, 21910), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (["network_history.history['loss']"], {}), "(network_history.history['loss'])\n", (21877, 21910), True, 'import matplotlib.pyplot as plt\n'), ((21915, 21964), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (["network_history.history['val_loss']"], {}), "(network_history.history['val_loss'])\n", (21927, 21964), True, 'import matplotlib.pyplot as plt\n'), ((21969, 22007), 'matplotlib.pyplot.legend', 'plt.legend', (["['Training', 'Validation']"], {}), "(['Training', 'Validation'])\n", (21979, 22007), True, 'import matplotlib.pyplot as plt\n'), ((22012, 22022), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22020, 22022), True, 'import matplotlib.pyplot as plt\n'), ((22100, 22128), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(15, 10)'}), '(figsize=(15, 10))\n', (22110, 22128), True, 'import matplotlib.pyplot as plt\n'), ((22133, 22149), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (22144, 22149), True, 'import matplotlib.pyplot as plt\n'), ((22154, 22170), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (22163, 22170), True, 'import matplotlib.pyplot as plt\n'), ((22175, 22195), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (22185, 22195), True, 'import matplotlib.pyplot as plt\n'), ((22200, 22218), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (22210, 22218), True, 'import matplotlib.pyplot as plt\n'), ((22223, 22268), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (["network_history.history['loss']"], {}), "(network_history.history['loss'])\n", (22235, 22268), True, 'import matplotlib.pyplot as plt\n'), ((22273, 22322), 'matplotlib.pyplot.semilogy', 'plt.semilogy', (["network_history.history['val_loss']"], {}), "(network_history.history['val_loss'])\n", (22285, 22322), True, 'import matplotlib.pyplot as plt\n'), ((22327, 22365), 'matplotlib.pyplot.legend', 'plt.legend', (["['Training', 'Validation']"], {}), "(['Training', 'Validation'])\n", (22337, 22365), True, 'import matplotlib.pyplot as plt\n'), ((22371, 22387), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (22382, 22387), True, 'import matplotlib.pyplot as plt\n'), ((22392, 22412), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epochs"""'], {}), "('Epochs')\n", (22402, 22412), True, 'import matplotlib.pyplot as plt\n'), ((22417, 22439), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), "('Accuracy')\n", (22427, 22439), True, 'import matplotlib.pyplot as plt\n'), ((22444, 22484), 'matplotlib.pyplot.plot', 'plt.plot', (["network_history.history['acc']"], {}), "(network_history.history['acc'])\n", (22452, 22484), True, 'import matplotlib.pyplot as plt\n'), ((22489, 22533), 'matplotlib.pyplot.plot', 'plt.plot', (["network_history.history['val_acc']"], {}), "(network_history.history['val_acc'])\n", (22497, 22533), True, 'import matplotlib.pyplot as plt\n'), ((22538, 22595), 'matplotlib.pyplot.legend', 'plt.legend', (["['Training', 'Validation']"], {'loc': '"""lower right"""'}), "(['Training', 'Validation'], loc='lower right')\n", (22548, 22595), True, 'import matplotlib.pyplot as plt\n'), ((22600, 22610), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (22608, 22610), True, 'import matplotlib.pyplot as plt\n'), ((6759, 6795), 'keras.layers.Dense', 'Dense', (['latent_dim'], {'activation': '"""relu"""'}), "(latent_dim, activation='relu')\n", (6764, 6795), False, 'from keras.layers import Dense, Input, Dropout\n'), ((9214, 9260), 'numpy.concatenate', 'np.concatenate', (['[y_pred_test, y_pred_outliers]'], {}), '([y_pred_test, y_pred_outliers])\n', (9228, 9260), True, 'import numpy as np\n'), ((9404, 9450), 'numpy.concatenate', 'np.concatenate', (['[y_pred_test, y_pred_outliers]'], {}), '([y_pred_test, y_pred_outliers])\n', (9418, 9450), True, 'import numpy as np\n'), ((9903, 9915), 'keras.layers.Dropout', 'Dropout', (['(0.1)'], {}), '(0.1)\n', (9910, 9915), False, 'from keras.layers import Dense, Input, Dropout\n'), ((10005, 10017), 'keras.layers.Dropout', 'Dropout', (['(0.2)'], {}), '(0.2)\n', (10012, 10017), False, 'from keras.layers import Dense, Input, Dropout\n'), ((10034, 10103), 'keras.layers.Dense', 'Dense', (['(128)'], {'kernel_initializer': '"""glorot_uniform"""', 'activation': '"""sigmoid"""'}), "(128, kernel_initializer='glorot_uniform', activation='sigmoid')\n", (10039, 10103), False, 'from keras.layers import Dense, Input, Dropout\n'), ((10119, 10131), 'keras.layers.Dropout', 'Dropout', (['(0.4)'], {}), '(0.4)\n', (10126, 10131), False, 'from keras.layers import Dense, Input, Dropout\n'), ((10148, 10213), 'keras.layers.Dense', 'Dense', (['(64)'], {'kernel_initializer': '"""glorot_uniform"""', 'activation': '"""tanh"""'}), "(64, kernel_initializer='glorot_uniform', activation='tanh')\n", (10153, 10213), False, 'from keras.layers import Dense, Input, Dropout\n'), ((10229, 10241), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (10236, 10241), False, 'from keras.layers import Dense, Input, Dropout\n'), ((10258, 10323), 'keras.layers.Dense', 'Dense', (['(32)'], {'kernel_initializer': '"""glorot_uniform"""', 'activation': '"""tanh"""'}), "(32, kernel_initializer='glorot_uniform', activation='tanh')\n", (10263, 10323), False, 'from keras.layers import Dense, Input, Dropout\n'), ((10339, 10351), 'keras.layers.Dropout', 'Dropout', (['(0.4)'], {}), '(0.4)\n', (10346, 10351), False, 'from keras.layers import Dense, Input, Dropout\n'), ((10368, 10434), 'keras.layers.Dense', 'Dense', (['(128)'], {'kernel_initializer': '"""glorot_uniform"""', 'activation': '"""tanh"""'}), "(128, kernel_initializer='glorot_uniform', activation='tanh')\n", (10373, 10434), False, 'from keras.layers import Dense, Input, Dropout\n'), ((10450, 10462), 'keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), '(0.3)\n', (10457, 10462), False, 'from keras.layers import Dense, Input, Dropout\n'), ((10479, 10538), 'keras.layers.Dense', 'Dense', (['(1)'], {'kernel_initializer': '"""normal"""', 'activation': '"""sigmoid"""'}), "(1, kernel_initializer='normal', activation='sigmoid')\n", (10484, 10538), False, 'from keras.layers import Dense, Input, Dropout\n'), ((16503, 16541), 'sklearn.svm.SVC', 'svm.SVC', ([], {'gamma': '"""auto"""', 'cache_size': '(7000)'}), "(gamma='auto', cache_size=7000)\n", (16510, 16541), False, 'from sklearn import svm\n'), ((17166, 17204), 'sklearn.svm.SVC', 'svm.SVC', ([], {'gamma': '"""auto"""', 'cache_size': '(7000)'}), "(gamma='auto', cache_size=7000)\n", (17173, 17204), False, 'from sklearn import svm\n'), ((18210, 18229), 'xgboost.XGBClassifier', 'xgb.XGBClassifier', ([], {}), '()\n', (18227, 18229), True, 'import xgboost as xgb\n'), ((18729, 18748), 'xgboost.XGBClassifier', 'xgb.XGBClassifier', ([], {}), '()\n', (18746, 18748), True, 'import xgboost as xgb\n'), ((19862, 19894), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (19878, 19894), False, 'from sklearn.metrics import fbeta_score, roc_curve, auc, confusion_matrix\n'), ((19917, 19942), 'sklearn.metrics.roc_curve', 'roc_curve', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (19926, 19942), False, 'from sklearn.metrics import fbeta_score, roc_curve, auc, confusion_matrix\n'), ((19956, 20007), 'sklearn.metrics.fbeta_score', 'fbeta_score', (['y_test', 'y_pred'], {'beta': 'beta', 'pos_label': '(1)'}), '(y_test, y_pred, beta=beta, pos_label=1)\n', (19967, 20007), False, 'from sklearn.metrics import fbeta_score, roc_curve, auc, confusion_matrix\n'), ((20023, 20036), 'sklearn.metrics.auc', 'auc', (['fpr', 'tpr'], {}), '(fpr, tpr)\n', (20026, 20036), False, 'from sklearn.metrics import fbeta_score, roc_curve, auc, confusion_matrix\n'), ((20449, 20495), 'numpy.concatenate', 'np.concatenate', (['[y_pred_test, y_pred_outliers]'], {}), '([y_pred_test, y_pred_outliers])\n', (20463, 20495), True, 'import numpy as np\n'), ((21209, 21241), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (21225, 21241), False, 'from sklearn.metrics import fbeta_score, roc_curve, auc, confusion_matrix\n'), ((22729, 22759), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'n_components'}), '(n_components=n_components)\n', (22732, 22759), False, 'from sklearn.decomposition import PCA\n'), ((7026, 7040), 'keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.001)'}), '(lr=0.001)\n', (7030, 7040), False, 'from keras.optimizers import Adam\n'), ((7980, 8046), 'sklearn.svm.OneClassSVM', 'svm.OneClassSVM', ([], {'kernel': '"""rbf"""', 'gamma': 'gamma', 'nu': 'nu', 'cache_size': '(7000)'}), "(kernel='rbf', gamma=gamma, nu=nu, cache_size=7000)\n", (7995, 8046), False, 'from sklearn import svm\n'), ((11317, 11373), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': 'estimator', 'n_jobs': '(7)'}), '(n_estimators=estimator, n_jobs=7)\n', (11339, 11373), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((12236, 12292), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': 'estimator', 'n_jobs': '(7)'}), '(n_estimators=estimator, n_jobs=7)\n', (12258, 12292), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((13152, 13208), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': 'estimator', 'n_jobs': '(7)'}), '(n_estimators=estimator, n_jobs=7)\n', (13174, 13208), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((14491, 14527), 'sklearn.ensemble.AdaBoostClassifier', 'AdaBoostClassifier', ([], {'learning_rate': 'lr'}), '(learning_rate=lr)\n', (14509, 14527), False, 'from sklearn.ensemble import AdaBoostClassifier\n'), ((15307, 15343), 'sklearn.ensemble.AdaBoostClassifier', 'AdaBoostClassifier', ([], {'learning_rate': 'lr'}), '(learning_rate=lr)\n', (15325, 15343), False, 'from sklearn.ensemble import AdaBoostClassifier\n'), ((22802, 22889), 'sklearn.random_projection.SparseRandomProjection', 'random_projection.SparseRandomProjection', ([], {'n_components': 'n_components', 'random_state': '(7)'}), '(n_components=n_components,\n random_state=7)\n', (22842, 22889), False, 'from sklearn import random_projection\n'), ((1260, 1326), 'sklearn.svm.OneClassSVM', 'svm.OneClassSVM', ([], {'kernel': '"""rbf"""', 'gamma': 'gamma', 'nu': 'nu', 'cache_size': '(7000)'}), "(kernel='rbf', gamma=gamma, nu=nu, cache_size=7000)\n", (1275, 1326), False, 'from sklearn import svm\n'), ((2091, 2157), 'sklearn.svm.OneClassSVM', 'svm.OneClassSVM', ([], {'kernel': '"""rbf"""', 'gamma': 'gamma', 'nu': 'nu', 'cache_size': '(7000)'}), "(kernel='rbf', gamma=gamma, nu=nu, cache_size=7000)\n", (2106, 2157), False, 'from sklearn import svm\n'), ((6869, 6892), 'keras.regularizers.l1', 'regularizers.l1', (['(0.0001)'], {}), '(0.0001)\n', (6884, 6892), False, 'from keras import regularizers\n'), ((9115, 9144), 'numpy.ones', 'np.ones', (['y_pred_test.shape[0]'], {}), '(y_pred_test.shape[0])\n', (9122, 9144), True, 'import numpy as np\n'), ((9312, 9341), 'numpy.ones', 'np.ones', (['y_pred_test.shape[0]'], {}), '(y_pred_test.shape[0])\n', (9319, 9341), True, 'import numpy as np\n'), ((9878, 9886), 'keras.regularizers.l2', 'l2', (['(0.01)'], {}), '(0.01)\n', (9880, 9886), False, 'from keras.regularizers import l2\n'), ((9980, 9988), 'keras.regularizers.l2', 'l2', (['(0.01)'], {}), '(0.01)\n', (9982, 9988), False, 'from keras.regularizers import l2\n'), ((20306, 20335), 'numpy.ones', 'np.ones', (['y_pred_test.shape[0]'], {}), '(y_pred_test.shape[0])\n', (20313, 20335), True, 'import numpy as np\n'), ((3784, 3892), 'sklearn.ensemble.IsolationForest', 'IsolationForest', ([], {'n_estimators': 'estimator', 'contamination': 'contamination', 'max_features': 'max_feature', 'n_jobs': '(7)'}), '(n_estimators=estimator, contamination=contamination,\n max_features=max_feature, n_jobs=7)\n', (3799, 3892), False, 'from sklearn.ensemble import IsolationForest\n'), ((5203, 5311), 'sklearn.ensemble.IsolationForest', 'IsolationForest', ([], {'n_estimators': 'estimator', 'contamination': 'contamination', 'max_features': 'max_feature', 'n_jobs': '(7)'}), '(n_estimators=estimator, contamination=contamination,\n max_features=max_feature, n_jobs=7)\n', (5218, 5311), False, 'from sklearn.ensemble import IsolationForest\n'), ((9149, 9182), 'numpy.ones', 'np.ones', (['y_pred_outliers.shape[0]'], {}), '(y_pred_outliers.shape[0])\n', (9156, 9182), True, 'import numpy as np\n'), ((9346, 9379), 'numpy.ones', 'np.ones', (['y_pred_outliers.shape[0]'], {}), '(y_pred_outliers.shape[0])\n', (9353, 9379), True, 'import numpy as np\n'), ((20384, 20417), 'numpy.ones', 'np.ones', (['y_pred_outliers.shape[0]'], {}), '(y_pred_outliers.shape[0])\n', (20391, 20417), True, 'import numpy as np\n')] |
# encoding: utf-8
import unittest
import os
import sys
sys.path.append(os.getcwd())
from notifo import Notifo, send_message
class TestNotifyUser(unittest.TestCase):
def setUp(self):
self.provider = "test_provider"
self.provider_banned = "test_provider_msg_banned"
self.user = "test_user"
self.sender = "test_user2"
self.banned = "test_user_banned"
self.banned_token = "<KEY>"
self.sender_token = "x633a05b18f7f65bf461ffb3900c6eb70eaafb0ed"
self.provider_token = "<KEY>"
self.provider_banned_token = "<KEY>"
self.user_token = "<KEY>"
def test_message(self):
res = send_message(self.sender, self.sender_token,
to=self.user, msg="foo test")
self.assertEqual(2201, res["response_code"])
def test_message_with_object(self):
res = Notifo(self.sender, self.sender_token).send_message(
to=self.user, msg="foo test")
self.assertEqual(2201, res["response_code"])
def test_message_banned(self):
res = send_message(self.banned, self.banned_token,
to=self.user, msg="foo test")
self.assertEqual(403, res["response_code"])
def test_message_provider(self):
res = send_message(self.provider, self.provider_token,
to=self.user, msg="foo test")
self.assertEqual(2201, res["response_code"])
def test_message_provider_banned(self):
res = send_message(self.provider_banned, self.provider_banned_token,
to=self.user, msg="foo test")
self.assertEqual(403, res["response_code"])
if __name__ == '__main__':
unittest.main()
| [
"unittest.main",
"notifo.Notifo",
"notifo.send_message",
"os.getcwd"
] | [((71, 82), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (80, 82), False, 'import os\n'), ((1738, 1753), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1751, 1753), False, 'import unittest\n'), ((662, 736), 'notifo.send_message', 'send_message', (['self.sender', 'self.sender_token'], {'to': 'self.user', 'msg': '"""foo test"""'}), "(self.sender, self.sender_token, to=self.user, msg='foo test')\n", (674, 736), False, 'from notifo import Notifo, send_message\n'), ((1095, 1169), 'notifo.send_message', 'send_message', (['self.banned', 'self.banned_token'], {'to': 'self.user', 'msg': '"""foo test"""'}), "(self.banned, self.banned_token, to=self.user, msg='foo test')\n", (1107, 1169), False, 'from notifo import Notifo, send_message\n'), ((1306, 1384), 'notifo.send_message', 'send_message', (['self.provider', 'self.provider_token'], {'to': 'self.user', 'msg': '"""foo test"""'}), "(self.provider, self.provider_token, to=self.user, msg='foo test')\n", (1318, 1384), False, 'from notifo import Notifo, send_message\n'), ((1529, 1625), 'notifo.send_message', 'send_message', (['self.provider_banned', 'self.provider_banned_token'], {'to': 'self.user', 'msg': '"""foo test"""'}), "(self.provider_banned, self.provider_banned_token, to=self.user,\n msg='foo test')\n", (1541, 1625), False, 'from notifo import Notifo, send_message\n'), ((877, 915), 'notifo.Notifo', 'Notifo', (['self.sender', 'self.sender_token'], {}), '(self.sender, self.sender_token)\n', (883, 915), False, 'from notifo import Notifo, send_message\n')] |
from typing import List
from presidio_analyzer import EntityRecognizer, RecognizerResult, AnalysisExplanation
from presidio_analyzer.nlp_engine import NlpArtifacts
from hebsafeharbor.common.terms_recognizer import TermsRecognizer
class LexiconBasedRecognizer(EntityRecognizer):
"""
A class which extends the EntityRecognizer (@Presidio) and recognize entities based on a lexicon
"""
DEFAULT_CONFIDENCE_LEVEL = 0.7 # expected confidence level for this recognizer
def __init__(self, name: str, supported_entity: str, phrase_list: List[str], supported_language: str = "he",
allowed_prepositions: List[str] = None):
"""
Initializes Hebrew LexiconBasedRecognizer
:param name: recognizer's name
:param supported_entity: entity type to be associated with the entities recognized by the lexicon based
recognizer
:param phrase_list: lexicon's phrases
:param supported_language: the language that the recognizer supports. Hebrew is the default
:param allowed_prepositions: prepositions that allowed to be recognized as part of the entity (in addition to
the lexicon phrase itself). Empty list (which means prepositions are not allowed) is the default
"""
super().__init__(name=name, supported_entities=[supported_entity], supported_language=supported_language)
self.terms_recognizer = TermsRecognizer(phrase_list)
self.allowed_prepositions = allowed_prepositions if allowed_prepositions else []
def load(self) -> None:
"""No loading is required."""
pass
def analyze(
self, text: str, entities: List[str], nlp_artifacts: NlpArtifacts
) -> List[RecognizerResult]:
"""
Recognize entities based on lexicon
:param text: text for recognition
:param entities: supported entities
:param nlp_artifacts: artifacts of the nlp engine
:return list of entities recognized based on the lexicon
"""
results = []
terms_offsets = self.terms_recognizer(text, prefixes=self.allowed_prepositions)
# Iterate over the Automaton offsets and create Recognizer result for each of them
for start_offset, length in terms_offsets:
result = RecognizerResult(
entity_type=self.supported_entities[0],
start=start_offset,
end=start_offset + length,
score=self.DEFAULT_CONFIDENCE_LEVEL,
analysis_explanation=AnalysisExplanation(self.name, self.DEFAULT_CONFIDENCE_LEVEL),
recognition_metadata={RecognizerResult.RECOGNIZER_NAME_KEY: self.name}
)
results.append(result)
return results
| [
"presidio_analyzer.AnalysisExplanation",
"hebsafeharbor.common.terms_recognizer.TermsRecognizer"
] | [((1415, 1443), 'hebsafeharbor.common.terms_recognizer.TermsRecognizer', 'TermsRecognizer', (['phrase_list'], {}), '(phrase_list)\n', (1430, 1443), False, 'from hebsafeharbor.common.terms_recognizer import TermsRecognizer\n'), ((2536, 2597), 'presidio_analyzer.AnalysisExplanation', 'AnalysisExplanation', (['self.name', 'self.DEFAULT_CONFIDENCE_LEVEL'], {}), '(self.name, self.DEFAULT_CONFIDENCE_LEVEL)\n', (2555, 2597), False, 'from presidio_analyzer import EntityRecognizer, RecognizerResult, AnalysisExplanation\n')] |
# encoding: utf-8
#
# Copyright (C) 2018 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from hamcrest.core.base_matcher import BaseMatcher
from hamcrest import ( assert_that,
contains,
contains_string,
equal_to,
has_entries,
has_entry,
matches_regexp )
from pprint import pprint
import requests
import os.path
from ycmd.tests.clangd import ( IsolatedYcmd,
SharedYcmd,
PathToTestFile,
RunAfterInitialized )
from ycmd.tests.test_utils import ( BuildRequest,
ChunkMatcher,
CombineRequest,
LineColMatcher,
LocationMatcher,
ErrorMatcher,
WithRetry,
WaitUntilCompleterServerReady )
from ycmd.utils import ReadFile
# This test is isolated to trigger objcpp hooks, rather than fetching completer
# from cache.
@IsolatedYcmd()
def Subcommands_DefinedSubcommands_test( app ):
file_path = PathToTestFile( 'GoTo_Clang_ZeroBasedLineAndColumn_test.cc' )
RunAfterInitialized( app, {
'request': {
'completer_target': 'filetype_default',
'line_num': 10,
'column_num': 3,
'filetype': 'objcpp',
'filepath': file_path
},
'expect': {
'response': requests.codes.ok,
'data': contains( *sorted( [ 'ExecuteCommand',
'FixIt',
'Format',
'GetDoc',
'GetDocImprecise',
'GetType',
'GetTypeImprecise',
'GoTo',
'GoToDeclaration',
'GoToDefinition',
'GoToImprecise',
'GoToInclude',
'GoToReferences',
'RefactorRename',
'RestartServer' ] ) )
},
'route': '/defined_subcommands',
} )
@SharedYcmd
def Subcommands_GoTo_ZeroBasedLineAndColumn_test( app ):
file_path = PathToTestFile( 'GoTo_Clang_ZeroBasedLineAndColumn_test.cc' )
RunAfterInitialized( app, {
'request': {
'contents': ReadFile( file_path ),
'completer_target': 'filetype_default',
'command_arguments': [ 'GoToDefinition' ],
'line_num': 10,
'column_num': 3,
'filetype': 'cpp',
'filepath': file_path
},
'expect': {
'response': requests.codes.ok,
'data': {
'filepath': os.path.abspath( file_path ),
'line_num': 2,
'column_num': 8
}
},
'route': '/run_completer_command',
} )
@SharedYcmd
def RunGoToTest_all( app, folder, command, test ):
filepath = PathToTestFile( folder, test[ 'req' ][ 0 ] )
common_request = {
'completer_target' : 'filetype_default',
'filepath' : filepath,
'command_arguments': [ command ],
'contents' : ReadFile( filepath ),
'filetype' : 'cpp'
}
request = common_request
request.update( {
'line_num' : test[ 'req' ][ 1 ],
'column_num': test[ 'req' ][ 2 ],
} )
response = test[ 'res' ]
if isinstance( response, list ):
expect = {
'response': requests.codes.ok,
'data': contains( *[
LocationMatcher(
PathToTestFile( folder, os.path.normpath( location[ 0 ] ) ),
location[ 1 ],
location[ 2 ]
) for location in response
] )
}
elif isinstance( response, tuple ):
expect = {
'response': requests.codes.ok,
'data': LocationMatcher(
PathToTestFile( folder, os.path.normpath( response[ 0 ] ) ),
response[ 1 ],
response[ 2 ]
)
}
else:
expect = {
'response': requests.codes.internal_server_error,
'data': ErrorMatcher( RuntimeError, test[ 'res' ] )
}
RunAfterInitialized( app, {
'request': request,
'route' : '/run_completer_command',
'expect' : expect
} )
def Subcommands_GoTo_all_test():
tests = [
# Local::x -> definition/declaration of x
{ 'req': ( 'goto.cc', 23, 21 ), 'res': ( 'goto.cc', 4, 9 ) },
# Local::in_line -> definition/declaration of Local::in_line
{ 'req': ( 'goto.cc', 24, 26 ), 'res': ( 'goto.cc', 6, 10 ) },
# Local -> definition/declaration of Local
{ 'req': ( 'goto.cc', 24, 16 ), 'res': ( 'goto.cc', 2, 11 ) },
# Local::out_of_line -> definition of Local::out_of_line
{ 'req': ( 'goto.cc', 25, 27 ), 'res': ( 'goto.cc', 14, 13 ) },
# GoToDeclaration alternates between definition and declaration
{ 'req': ( 'goto.cc', 14, 13 ), 'res': ( 'goto.cc', 11, 10 ) },
{ 'req': ( 'goto.cc', 11, 10 ), 'res': ( 'goto.cc', 14, 13 ) },
# test -> definition and declaration of test
{ 'req': ( 'goto.cc', 21, 5 ), 'res': ( 'goto.cc', 19, 5 ) },
{ 'req': ( 'goto.cc', 19, 5 ), 'res': ( 'goto.cc', 21, 5 ) },
# Unicøde
{ 'req': ( 'goto.cc', 34, 9 ), 'res': ( 'goto.cc', 32, 26 ) },
# Another_Unicøde
{ 'req': ( 'goto.cc', 36, 17 ), 'res': ( 'goto.cc', 32, 54 ) },
{ 'req': ( 'goto.cc', 36, 25 ), 'res': ( 'goto.cc', 32, 54 ) },
{ 'req': ( 'goto.cc', 38, 3 ), 'res': ( 'goto.cc', 36, 28 ) },
# Expected failures
{ 'req': ( 'goto.cc', 13, 1 ), 'res': 'Cannot jump to location' },
{ 'req': ( 'goto.cc', 16, 6 ), 'res': 'Cannot jump to location' },
]
for test in tests:
for cmd in [ 'GoToDefinition', 'GoTo', 'GoToImprecise' ]:
yield RunGoToTest_all, '', cmd, test
def Subcommands_GoToDeclaration_all_test():
tests = [
# Local::x -> definition/declaration of x
{ 'req': ( 'goto.cc', 23, 21 ), 'res': ( 'goto.cc', 4, 9 ) },
# Local::in_line -> definition/declaration of Local::in_line
{ 'req': ( 'goto.cc', 24, 26 ), 'res': ( 'goto.cc', 6, 10 ) },
# Local -> definition/declaration of Local
{ 'req': ( 'goto.cc', 24, 16 ), 'res': ( 'goto.cc', 2, 11 ) },
# Local::out_of_line -> declaration of Local::out_of_line
{ 'req': ( 'goto.cc', 25, 27 ), 'res': ( 'goto.cc', 11, 10 ) },
# GoToDeclaration alternates between definition and declaration
{ 'req': ( 'goto.cc', 14, 13 ), 'res': ( 'goto.cc', 11, 10 ) },
{ 'req': ( 'goto.cc', 11, 10 ), 'res': ( 'goto.cc', 14, 13 ) },
# test -> definition and declaration of test
{ 'req': ( 'goto.cc', 21, 5 ), 'res': ( 'goto.cc', 19, 5 ) },
{ 'req': ( 'goto.cc', 19, 5 ), 'res': ( 'goto.cc', 21, 5 ) },
# Unicøde
{ 'req': ( 'goto.cc', 34, 9 ), 'res': ( 'goto.cc', 32, 26 ) },
# Another_Unicøde
{ 'req': ( 'goto.cc', 36, 17 ), 'res': ( 'goto.cc', 32, 54 ) },
{ 'req': ( 'goto.cc', 36, 25 ), 'res': ( 'goto.cc', 32, 54 ) },
{ 'req': ( 'goto.cc', 38, 3 ), 'res': ( 'goto.cc', 36, 28 ) },
# Expected failures
{ 'req': ( 'goto.cc', 13, 1 ), 'res': 'Cannot jump to location' },
{ 'req': ( 'goto.cc', 16, 6 ), 'res': 'Cannot jump to location' },
]
for test in tests:
yield RunGoToTest_all, '', 'GoToDeclaration', test
def Subcommands_GoToInclude_test():
tests = [
{ 'req': ( 'main.cpp', 1, 6 ), 'res': ( 'a.hpp', 1, 1 ) },
{ 'req': ( 'main.cpp', 2, 14 ), 'res': ( 'system/a.hpp', 1, 1 ) },
{ 'req': ( 'main.cpp', 3, 1 ), 'res': ( 'quote/b.hpp', 1, 1 ) },
# FIXME: should fail since b.hpp is included with angled brackets but its
# folder is added with -iquote.
{ 'req': ( 'main.cpp', 4, 10 ), 'res': ( 'quote/b.hpp', 1, 1 ) },
{ 'req': ( 'main.cpp', 5, 11 ), 'res': ( 'system/c.hpp', 1, 1 ) },
{ 'req': ( 'main.cpp', 6, 11 ), 'res': ( 'system/c.hpp', 1, 1 ) },
# Expected failures
{ 'req': ( 'main.cpp', 7, 1 ), 'res': 'Cannot jump to location' },
{ 'req': ( 'main.cpp', 10, 13 ), 'res': 'Cannot jump to location' },
]
for test in tests:
for cmd in [ 'GoToInclude', 'GoTo', 'GoToImprecise' ]:
yield RunGoToTest_all, 'test-include', cmd, test
def Subcommands_GoToReferences_test():
tests = [
# Function
{ 'req': ( 'goto.cc', 14, 21 ), 'res': [ ( 'goto.cc', 11, 10 ),
( 'goto.cc', 14, 13 ),
( 'goto.cc', 25, 22 ) ] },
# Namespace
{ 'req': ( 'goto.cc', 24, 17 ), 'res': [ ( 'goto.cc', 2, 11 ),
( 'goto.cc', 14, 6 ),
( 'goto.cc', 23, 14 ),
( 'goto.cc', 24, 15 ),
( 'goto.cc', 25, 15 ) ] },
# Expected failure
{ 'req': ( 'goto.cc', 27, 8 ), 'res': 'Cannot jump to location' },
]
for test in tests:
yield RunGoToTest_all, '', 'GoToReferences', test
@SharedYcmd
def RunGetSemanticTest( app,
filepath,
filetype,
test,
command,
response = requests.codes.ok ):
contents = ReadFile( filepath )
common_args = {
'completer_target' : 'filetype_default',
'command_arguments': command,
'line_num' : 10,
'column_num' : 3,
'filepath' : filepath,
'contents' : contents,
'filetype' : filetype
}
args = test[ 0 ]
if response == requests.codes.ok:
if not isinstance( test[ 1 ], BaseMatcher ):
expected = has_entry( 'message', contains_string( test[ 1 ] ) )
else:
expected = has_entry( 'message', test[ 1 ] )
else:
expected = test[ 1 ]
request = common_args
request.update( args )
test = { 'request': request,
'route': '/run_completer_command',
'expect': { 'response': response,
'data': expected } }
RunAfterInitialized( app, test )
def Subcommands_GetType_test():
tests = [
# Basic pod types
[ { 'line_num': 24, 'column_num': 3 }, 'Foo' ],
# [ { 'line_num': 12, 'column_num': 2 }, 'Foo' ],
[ { 'line_num': 12, 'column_num': 8 }, 'Foo' ],
[ { 'line_num': 12, 'column_num': 9 }, 'Foo' ],
[ { 'line_num': 12, 'column_num': 10 }, 'Foo' ],
# [ { 'line_num': 13, 'column_num': 3 }, 'int' ],
[ { 'line_num': 13, 'column_num': 7 }, 'int' ],
# [ { 'line_num': 15, 'column_num': 7 }, 'char' ],
# Function
# [ { 'line_num': 22, 'column_num': 2 }, 'int main()' ],
[ { 'line_num': 22, 'column_num': 6 }, 'int main()' ],
# Declared and canonical type
# On Ns::
[ { 'line_num': 25, 'column_num': 3 }, 'namespace Ns' ],
# On Type (Type)
# [ { 'line_num': 25, 'column_num': 8 },
# 'Ns::Type => Ns::BasicType<char>' ],
# On "a" (Ns::Type)
# [ { 'line_num': 25, 'column_num': 15 },
# 'Ns::Type => Ns::BasicType<char>' ],
# [ { 'line_num': 26, 'column_num': 13 },
# 'Ns::Type => Ns::BasicType<char>' ],
# Cursor on decl for refs & pointers
[ { 'line_num': 39, 'column_num': 3 }, 'Foo' ],
[ { 'line_num': 39, 'column_num': 11 }, 'Foo &' ],
[ { 'line_num': 39, 'column_num': 15 }, 'Foo' ],
[ { 'line_num': 40, 'column_num': 3 }, 'Foo' ],
[ { 'line_num': 40, 'column_num': 11 }, 'Foo *' ],
[ { 'line_num': 40, 'column_num': 18 }, 'Foo' ],
# [ { 'line_num': 42, 'column_num': 3 }, 'const Foo &' ],
[ { 'line_num': 42, 'column_num': 16 }, 'const struct Foo &' ],
# [ { 'line_num': 43, 'column_num': 3 }, 'const Foo *' ],
[ { 'line_num': 43, 'column_num': 16 }, 'const struct Foo *' ],
# Cursor on usage
[ { 'line_num': 45, 'column_num': 13 }, 'const struct Foo' ],
# [ { 'line_num': 45, 'column_num': 19 }, 'const int' ],
[ { 'line_num': 46, 'column_num': 13 }, 'const struct Foo *' ],
# [ { 'line_num': 46, 'column_num': 20 }, 'const int' ],
[ { 'line_num': 47, 'column_num': 12 }, 'Foo' ],
[ { 'line_num': 47, 'column_num': 17 }, 'int' ],
[ { 'line_num': 48, 'column_num': 12 }, 'Foo *' ],
[ { 'line_num': 48, 'column_num': 18 }, 'int' ],
# Auto in declaration
# [ { 'line_num': 28, 'column_num': 3 }, 'struct Foo &' ],
# [ { 'line_num': 28, 'column_num': 11 }, 'struct Foo &' ],
[ { 'line_num': 28, 'column_num': 18 }, 'struct Foo' ],
# [ { 'line_num': 29, 'column_num': 3 }, 'Foo *' ],
# [ { 'line_num': 29, 'column_num': 11 }, 'Foo *' ],
[ { 'line_num': 29, 'column_num': 18 }, 'Foo' ],
# [ { 'line_num': 31, 'column_num': 3 }, 'const Foo &' ],
# [ { 'line_num': 31, 'column_num': 16 }, 'const Foo &' ],
# [ { 'line_num': 32, 'column_num': 3 }, 'const Foo *' ],
# [ { 'line_num': 32, 'column_num': 16 }, 'const Foo *' ],
# Auto in usage
# [ { 'line_num': 34, 'column_num': 14 }, 'const Foo' ],
# [ { 'line_num': 34, 'column_num': 21 }, 'const int' ],
# [ { 'line_num': 35, 'column_num': 14 }, 'const Foo *' ],
# [ { 'line_num': 35, 'column_num': 22 }, 'const int' ],
[ { 'line_num': 36, 'column_num': 13 }, 'Foo' ],
[ { 'line_num': 36, 'column_num': 19 }, 'int' ],
# [ { 'line_num': 37, 'column_num': 13 }, 'Foo *' ],
[ { 'line_num': 37, 'column_num': 20 }, 'int' ],
# Unicode
[ { 'line_num': 51, 'column_num': 13 }, 'Unicøde *' ],
# Bound methods
# On Win32, methods pick up an __attribute__((thiscall)) to annotate their
# calling convention. This shows up in the type, which isn't ideal, but
# also prohibitively complex to try and strip out.
[ { 'line_num': 53, 'column_num': 15 },
matches_regexp(
r'int bar\(int i\)(?: __attribute__\(\(thiscall\)\))?' ) ],
[ { 'line_num': 54, 'column_num': 18 },
matches_regexp(
r'int bar\(int i\)(?: __attribute__\(\(thiscall\)\))?' ) ],
]
for subcommand in [ 'GetType', 'GetTypeImprecise' ]:
for test in tests:
yield ( RunGetSemanticTest,
PathToTestFile( 'GetType_Clang_test.cc' ),
'cpp',
test,
[ subcommand ] )
def Subcommands_GetDoc_test():
tests = [
# from local file
[ { 'line_num': 5, 'column_num': 10 }, 'docstring', requests.codes.ok ],
# from header
[ { 'line_num': 6, 'column_num': 10 }, 'docstring', requests.codes.ok ],
# no docstring
[ { 'line_num': 7, 'column_num': 7 }, 'int x = 3', requests.codes.ok ],
# no hover
[ { 'line_num': 8, 'column_num': 1 },
ErrorMatcher( RuntimeError, 'No hover information.' ),
requests.codes.server_error ]
]
for subcommand in [ 'GetDoc', 'GetDocImprecise' ]:
for test in tests:
yield ( RunGetSemanticTest,
PathToTestFile( 'GetDoc_Clang_test.cc' ),
'cpp',
test,
[ subcommand ],
test[ 2 ] )
@SharedYcmd
def RunFixItTest( app, line, column, lang, file_path, check ):
contents = ReadFile( file_path )
language_options = {
'cpp11': {
'filetype' : 'cpp',
},
'cuda': {
'filetype' : 'cuda',
},
'objective-c': {
'filetype' : 'objc',
},
}
args = {
'completer_target' : 'filetype_default',
'contents' : contents,
'filepath' : file_path,
'command_arguments': [ 'FixIt' ],
'line_num' : line,
'column_num' : column,
}
args.update( language_options[ lang ] )
test = { 'request': args, 'route': '/detailed_diagnostic' }
# First get diags.
diags = RunAfterInitialized( app, test )
while 'message' in diags and 'diagnostics' in diags[ 'message' ].lower():
receive_diags = { 'request': args, 'route': '/receive_messages' }
RunAfterInitialized( app, receive_diags )
diags = RunAfterInitialized( app, test )
results = app.post_json( '/run_completer_command',
BuildRequest( **args ) ).json
pprint( results )
check( results )
def FixIt_Check_cpp11_Ins( results ):
# First fixit
# switch(A()) { // expected-error{{explicit conversion to}}
assert_that( results, has_entries( {
'fixits': contains( has_entries( {
'chunks': contains(
has_entries( {
'replacement_text': equal_to( 'static_cast<int>(' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 16, 'column_num': 10 } ),
'end' : has_entries( { 'line_num': 16, 'column_num': 10 } ),
} ),
} ),
has_entries( {
'replacement_text': equal_to( ')' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 16, 'column_num': 13 } ),
'end' : has_entries( { 'line_num': 16, 'column_num': 13 } ),
} ),
} )
),
'location': has_entries( { 'line_num': 16, 'column_num': 0 } )
} ) )
} ) )
def FixIt_Check_cpp11_InsMultiLine( results ):
# Similar to FixIt_Check_cpp11_1 but inserts split across lines
#
assert_that( results, has_entries( {
'fixits': contains( has_entries( {
'chunks': contains(
has_entries( {
'replacement_text': equal_to( 'static_cast<int>(' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 26, 'column_num': 7 } ),
'end' : has_entries( { 'line_num': 26, 'column_num': 7 } ),
} ),
} ),
has_entries( {
'replacement_text': equal_to( ')' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 28, 'column_num': 2 } ),
'end' : has_entries( { 'line_num': 28, 'column_num': 2 } ),
} ),
} )
),
'location': has_entries( { 'line_num': 25, 'column_num': 14 } )
} ) )
} ) )
def FixIt_Check_cpp11_Del( results ):
# Removal of ::
assert_that( results, has_entries( {
'fixits': contains( has_entries( {
'chunks': contains(
has_entries( {
'replacement_text': equal_to( '' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 35, 'column_num': 7 } ),
'end' : has_entries( { 'line_num': 35, 'column_num': 9 } ),
} ),
} )
),
'location': has_entries( { 'line_num': 35, 'column_num': 7 } )
} ) )
} ) )
def FixIt_Check_cpp11_Repl( results ):
assert_that( results, has_entries( {
'fixits': contains( has_entries( {
'chunks': contains(
has_entries( {
'replacement_text': equal_to( 'foo' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 40, 'column_num': 6 } ),
'end' : has_entries( { 'line_num': 40, 'column_num': 9 } ),
} ),
} )
),
'location': has_entries( { 'line_num': 40, 'column_num': 6 } )
} ) )
} ) )
def FixIt_Check_cpp11_DelAdd( results ):
assert_that( results, has_entries( {
'fixits': contains(
has_entries( {
'chunks': contains(
has_entries( {
'replacement_text': equal_to( '' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 48, 'column_num': 3 } ),
'end' : has_entries( { 'line_num': 48, 'column_num': 4 } ),
} ),
} ),
has_entries( {
'replacement_text': equal_to( '~' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 48, 'column_num': 9 } ),
'end' : has_entries( { 'line_num': 48, 'column_num': 9 } ),
} ),
} ),
),
'location': has_entries( { 'line_num': 48, 'column_num': 3 } )
} ),
has_entries( {
'chunks': contains(
has_entries( {
'replacement_text': equal_to( '= default;' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 48, 'column_num': 15 } ),
'end' : has_entries( { 'line_num': 48, 'column_num': 17 } ),
} ),
} ),
),
'location': has_entries( { 'line_num': 48, 'column_num': 3 } )
} ),
)
} ) )
def FixIt_Check_objc( results ):
assert_that( results, has_entries( {
'fixits': contains( has_entries( {
'chunks': contains(
has_entries( {
'replacement_text': equal_to( 'id' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 5, 'column_num': 3 } ),
'end' : has_entries( { 'line_num': 5, 'column_num': 3 } ),
} ),
} )
),
'location': has_entries( { 'line_num': 5, 'column_num': 3 } )
} ) )
} ) )
def FixIt_Check_objc_NoFixIt( results ):
# and finally, a warning with no fixits
assert_that( results, equal_to( { 'fixits': [] } ) )
def FixIt_Check_cpp11_MultiFirst( results ):
assert_that( results, has_entries( {
'fixits': contains(
# first fix-it at 54,16
has_entries( {
'chunks': contains(
has_entries( {
'replacement_text': equal_to( 'foo' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 54, 'column_num': 16 } ),
'end' : has_entries( { 'line_num': 54, 'column_num': 19 } ),
} ),
} )
),
'location': has_entries( { 'line_num': 54, 'column_num': 15 } )
} ),
# second fix-it at 54,52
has_entries( {
'chunks': contains(
has_entries( {
'replacement_text': equal_to( '' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 54, 'column_num': 52 } ),
'end' : has_entries( { 'line_num': 54, 'column_num': 53 } ),
} ),
} ),
has_entries( {
'replacement_text': equal_to( '~' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 54, 'column_num': 58 } ),
'end' : has_entries( { 'line_num': 54, 'column_num': 58 } ),
} ),
} ),
),
'location': has_entries( { 'line_num': 54, 'column_num': 15 } )
} ),
has_entries( {
'chunks': contains(
has_entries( {
'replacement_text': equal_to( '= default;' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 54, 'column_num': 64 } ),
'end' : has_entries( { 'line_num': 54, 'column_num': 67 } ),
} ),
} )
),
'location': has_entries( { 'line_num': 54, 'column_num': 15 } )
} ),
)
} ) )
def FixIt_Check_cpp11_MultiSecond( results ):
assert_that( results, has_entries( {
'fixits': contains(
# first fix-it at 54,16
has_entries( {
'chunks': contains(
has_entries( {
'replacement_text': equal_to( 'foo' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 54, 'column_num': 16 } ),
'end' : has_entries( { 'line_num': 54, 'column_num': 19 } ),
} ),
} )
),
'location': has_entries( { 'line_num': 54, 'column_num': 51 } )
} ),
# second fix-it at 54,52
has_entries( {
'chunks': contains(
has_entries( {
'replacement_text': equal_to( '' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 54, 'column_num': 52 } ),
'end' : has_entries( { 'line_num': 54, 'column_num': 53 } ),
} ),
} ),
has_entries( {
'replacement_text': equal_to( '~' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 54, 'column_num': 58 } ),
'end' : has_entries( { 'line_num': 54, 'column_num': 58 } ),
} ),
} ),
),
'location': has_entries( { 'line_num': 54, 'column_num': 51 } )
} ),
has_entries( {
'chunks': contains(
has_entries( {
'replacement_text': equal_to( '= default;' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 54, 'column_num': 64 } ),
'end' : has_entries( { 'line_num': 54, 'column_num': 67 } ),
} ),
} )
),
'location': has_entries( { 'line_num': 54, 'column_num': 51 } )
} ),
)
} ) )
def FixIt_Check_unicode_Ins( results ):
assert_that( results, has_entries( {
'fixits': contains( has_entries( {
'chunks': contains(
has_entries( {
'replacement_text': equal_to( '=' ),
'range': has_entries( {
'start': has_entries( { 'line_num': 21, 'column_num': 9 } ),
'end' : has_entries( { 'line_num': 21, 'column_num': 11 } ),
} ),
} )
),
'location': has_entries( { 'line_num': 21, 'column_num': 16 } )
} ) )
} ) )
def FixIt_Check_cpp11_Note( results ):
assert_that( results, has_entries( {
'fixits': contains(
# First note: put parens around it
has_entries( {
'text': contains_string( 'parentheses around the assignment' ),
'chunks': contains(
ChunkMatcher( '(',
LineColMatcher( 59, 8 ),
LineColMatcher( 59, 8 ) ),
ChunkMatcher( ')',
LineColMatcher( 61, 12 ),
LineColMatcher( 61, 12 ) )
),
'location': LineColMatcher( 60, 1 ),
} ),
# Second note: change to ==
has_entries( {
'text': contains_string( '==' ),
'chunks': contains(
ChunkMatcher( '==',
LineColMatcher( 60, 8 ),
LineColMatcher( 60, 9 ) )
),
'location': LineColMatcher( 60, 1 ),
} ),
# Unresolved, requires /resolve_fixit request
has_entries( {
'text': 'Extract subexpression to variable',
'resolve': True,
'command': has_entries( { 'command': 'clangd.applyTweak' } )
} )
)
} ) )
def FixIt_Check_cpp11_SpellCheck( results ):
assert_that( results, has_entries( {
'fixits': contains(
# Change to SpellingIsNotMyStrongPoint
has_entries( {
'text': contains_string( "change 'SpellingIsNotMyStringPiont' to "
"'SpellingIsNotMyStrongPoint'" ),
'chunks': contains(
ChunkMatcher( 'SpellingIsNotMyStrongPoint',
LineColMatcher( 72, 9 ),
LineColMatcher( 72, 35 ) )
),
'location': LineColMatcher( 72, 9 ),
} ) )
} ) )
def FixIt_Check_cuda( results ):
assert_that( results, has_entries( {
'fixits': contains(
has_entries( {
'text': contains_string(
"change 'int' to 'void'" ),
'chunks': contains(
ChunkMatcher( 'void',
LineColMatcher( 3, 12 ),
LineColMatcher( 3, 15 ) )
),
'location': LineColMatcher( 3, 12 ),
} ) )
} ) )
def FixIt_Check_SubexprExtract_Resolved( results ):
assert_that( results, has_entries( {
'fixits': contains( has_entries( {
'text': 'Extract subexpression to variable',
'chunks': contains(
ChunkMatcher( 'auto dummy = foo(i + 3);\n ',
LineColMatcher( 84, 3 ),
LineColMatcher( 84, 3 ) ),
ChunkMatcher( 'dummy',
LineColMatcher( 84, 10 ),
LineColMatcher( 84, 22 ) ),
)
} ) )
} ) )
def FixIt_Check_RawStringReplace_Resolved( results ):
assert_that( results, has_entries( {
'fixits': contains( has_entries( {
'text': 'Convert to raw string',
'chunks': contains(
ChunkMatcher( 'R"(\\\\r\\asd\n\\v)"',
LineColMatcher( 80, 19 ),
LineColMatcher( 80, 36 ) ),
)
} ) )
} ) )
def FixIt_Check_MacroExpand_Resolved( results ):
assert_that( results, has_entries( {
'fixits': contains( has_entries( {
'text': "Expand macro 'DECLARE_INT'",
'chunks': contains(
ChunkMatcher( 'int i',
LineColMatcher( 83, 3 ),
LineColMatcher( 83, 17 ) ),
)
} ) )
} ) )
def FixIt_Check_AutoExpand_Resolved( results ):
assert_that( results, has_entries( {
'fixits': contains( has_entries( {
'text': "Expand auto type",
'chunks': contains(
ChunkMatcher( 'const char *',
LineColMatcher( 80, 1 ),
LineColMatcher( 80, 6 ) ),
)
} ) )
} ) )
def Subcommands_FixIt_all_test():
cfile = PathToTestFile( 'FixIt_Clang_cpp11.cpp' )
mfile = PathToTestFile( 'objc', 'FixIt_Clang_objc.m' )
cufile = PathToTestFile( 'cuda', 'fixit_test.cu' )
ufile = PathToTestFile( 'unicode.cc' )
tests = [
# L
# i C
# n o
# e l Lang File, Checker
[ 16, 0, 'cpp11', cfile, FixIt_Check_cpp11_Ins ],
[ 25, 14, 'cpp11', cfile, FixIt_Check_cpp11_InsMultiLine ],
[ 35, 7, 'cpp11', cfile, FixIt_Check_cpp11_Del ],
[ 40, 6, 'cpp11', cfile, FixIt_Check_cpp11_Repl ],
[ 48, 3, 'cpp11', cfile, FixIt_Check_cpp11_DelAdd ],
[ 5, 3, 'objective-c', mfile, FixIt_Check_objc ],
[ 7, 1, 'objective-c', mfile, FixIt_Check_objc_NoFixIt ],
[ 3, 12, 'cuda', cufile, FixIt_Check_cuda ],
# multiple errors on a single line; both with fixits
[ 54, 15, 'cpp11', cfile, FixIt_Check_cpp11_MultiFirst ],
# should put closest fix-it first?
[ 54, 51, 'cpp11', cfile, FixIt_Check_cpp11_MultiSecond ],
# unicode in line for fixit
[ 21, 16, 'cpp11', ufile, FixIt_Check_unicode_Ins ],
# FixIt attached to a "child" diagnostic (i.e. a Note)
[ 60, 1, 'cpp11', cfile, FixIt_Check_cpp11_Note ],
# FixIt due to forced spell checking
[ 72, 9, 'cpp11', cfile, FixIt_Check_cpp11_SpellCheck ],
]
for test in tests:
yield RunFixItTest, test[ 0 ], test[ 1 ], test[ 2 ], test[ 3 ], test[ 4 ]
@WithRetry
@SharedYcmd
def RunRangedFixItTest( app, rng, expected ):
contents = ReadFile( PathToTestFile( 'FixIt_Clang_cpp11.cpp' ) )
args = {
'completer_target' : 'filetype_default',
'contents' : contents,
'filepath' : PathToTestFile( 'FixIt_Clang_cpp11.cpp' ),
'command_arguments': [ 'FixIt' ],
'range' : rng,
'filetype' : 'cpp'
}
app.post_json( '/event_notification',
CombineRequest( args, {
'event_name': 'FileReadyToParse',
} ),
expect_errors = True )
WaitUntilCompleterServerReady( app, 'cpp' )
response = app.post_json( '/run_completer_command',
BuildRequest( **args ) ).json
args[ 'fixit' ] = response[ 'fixits' ][ 0 ]
response = app.post_json( '/resolve_fixit',
BuildRequest( **args ) ).json
print( 'Resolved fixit response = ' )
print( response )
expected( response )
def Subcommands_FixIt_Ranged_test():
expand_auto_range = {
'start': { 'line_num': 80, 'column_num': 1 },
'end': { 'line_num': 80, 'column_num': 4 },
}
subexpression_extract_range = {
'start': { 'line_num': 84, 'column_num': 14 },
'end': { 'line_num': 84, 'column_num': 20 },
}
macro_expand_range = {
'start': { 'line_num': 83, 'column_num': 3 },
'end': { 'line_num': 83, 'column_num': 13 },
}
raw_string_range = {
'start': { 'line_num': 80, 'column_num': 19 },
'end': { 'line_num': 80, 'column_num': 35 },
}
tests = [
[ expand_auto_range, FixIt_Check_AutoExpand_Resolved ],
[ macro_expand_range, FixIt_Check_MacroExpand_Resolved ],
[ subexpression_extract_range, FixIt_Check_SubexprExtract_Resolved ],
[ raw_string_range, FixIt_Check_RawStringReplace_Resolved ],
]
for test in tests:
yield RunRangedFixItTest, test[ 0 ], test[ 1 ]
@WithRetry
@SharedYcmd
def Subcommands_FixIt_AlreadyResolved_test( app ):
filename = PathToTestFile( 'FixIt_Clang_cpp11.cpp' )
request = {
'completer_target' : 'filetype_default',
'contents' : ReadFile( filename ),
'filepath' : filename,
'command_arguments': [ 'FixIt' ],
'line_num' : 16,
'column_num' : 1,
'filetype' : 'cpp'
}
app.post_json( '/event_notification',
CombineRequest( request, {
'event_name': 'FileReadyToParse',
} ),
expect_errors = True )
WaitUntilCompleterServerReady( app, 'cpp' )
expected = app.post_json( '/run_completer_command',
BuildRequest( **request ) ).json
print( 'expected = ' )
print( expected )
request[ 'fixit' ] = expected[ 'fixits' ][ 0 ]
actual = app.post_json( '/resolve_fixit',
BuildRequest( **request ) ).json
print( 'actual = ' )
print( actual )
assert_that( actual, equal_to( expected ) )
@SharedYcmd
def Subcommands_RefactorRename_test( app ):
test = {
'request': {
'filetype': 'cpp',
'completer_target': 'filetype_default',
'contents': ReadFile( PathToTestFile( 'basic.cpp' ) ),
'filepath': PathToTestFile( 'basic.cpp' ),
'command_arguments': [ 'RefactorRename', 'Bar' ],
'line_num': 17,
'column_num': 4,
},
'expect': {
'response': requests.codes.ok,
'data': has_entries( {
'fixits': contains( has_entries( {
'chunks': contains(
ChunkMatcher( 'Bar',
LineColMatcher( 1, 8 ),
LineColMatcher( 1, 11 ) ),
ChunkMatcher( 'Bar',
LineColMatcher( 9, 3 ),
LineColMatcher( 9, 6 ) ),
ChunkMatcher( '\n\n',
LineColMatcher( 12, 2 ),
LineColMatcher( 15, 1 ) ),
ChunkMatcher( 'Bar',
LineColMatcher( 15, 8 ),
LineColMatcher( 15, 11 ) ),
ChunkMatcher( ' ',
LineColMatcher( 15, 46 ),
LineColMatcher( 16, 1 ) ),
ChunkMatcher( 'Bar',
LineColMatcher( 17, 3 ),
LineColMatcher( 17, 6 ) ),
ChunkMatcher( '',
LineColMatcher( 17, 14 ),
LineColMatcher( 17, 15 ) ),
ChunkMatcher( ' ',
LineColMatcher( 17, 17 ),
LineColMatcher( 17, 17 ) ),
ChunkMatcher( ' ',
LineColMatcher( 17, 19 ),
LineColMatcher( 17, 19 ) ),
)
} ) )
} )
},
'route': '/run_completer_command'
}
RunAfterInitialized( app, test )
| [
"ycmd.tests.clangd.RunAfterInitialized",
"pprint.pprint",
"hamcrest.contains_string",
"ycmd.tests.test_utils.ErrorMatcher",
"hamcrest.matches_regexp",
"hamcrest.has_entries",
"ycmd.tests.clangd.PathToTestFile",
"hamcrest.equal_to",
"ycmd.tests.test_utils.BuildRequest",
"ycmd.tests.test_utils.CombineRequest",
"ycmd.tests.test_utils.LineColMatcher",
"hamcrest.has_entry",
"ycmd.tests.test_utils.WaitUntilCompleterServerReady",
"ycmd.utils.ReadFile",
"ycmd.tests.clangd.IsolatedYcmd"
] | [((1957, 1971), 'ycmd.tests.clangd.IsolatedYcmd', 'IsolatedYcmd', ([], {}), '()\n', (1969, 1971), False, 'from ycmd.tests.clangd import IsolatedYcmd, SharedYcmd, PathToTestFile, RunAfterInitialized\n'), ((2034, 2093), 'ycmd.tests.clangd.PathToTestFile', 'PathToTestFile', (['"""GoTo_Clang_ZeroBasedLineAndColumn_test.cc"""'], {}), "('GoTo_Clang_ZeroBasedLineAndColumn_test.cc')\n", (2048, 2093), False, 'from ycmd.tests.clangd import IsolatedYcmd, SharedYcmd, PathToTestFile, RunAfterInitialized\n'), ((3294, 3353), 'ycmd.tests.clangd.PathToTestFile', 'PathToTestFile', (['"""GoTo_Clang_ZeroBasedLineAndColumn_test.cc"""'], {}), "('GoTo_Clang_ZeroBasedLineAndColumn_test.cc')\n", (3308, 3353), False, 'from ycmd.tests.clangd import IsolatedYcmd, SharedYcmd, PathToTestFile, RunAfterInitialized\n'), ((4016, 4054), 'ycmd.tests.clangd.PathToTestFile', 'PathToTestFile', (['folder', "test['req'][0]"], {}), "(folder, test['req'][0])\n", (4030, 4054), False, 'from ycmd.tests.clangd import IsolatedYcmd, SharedYcmd, PathToTestFile, RunAfterInitialized\n'), ((5146, 5249), 'ycmd.tests.clangd.RunAfterInitialized', 'RunAfterInitialized', (['app', "{'request': request, 'route': '/run_completer_command', 'expect': expect}"], {}), "(app, {'request': request, 'route':\n '/run_completer_command', 'expect': expect})\n", (5165, 5249), False, 'from ycmd.tests.clangd import IsolatedYcmd, SharedYcmd, PathToTestFile, RunAfterInitialized\n'), ((10245, 10263), 'ycmd.utils.ReadFile', 'ReadFile', (['filepath'], {}), '(filepath)\n', (10253, 10263), False, 'from ycmd.utils import ReadFile\n'), ((11015, 11045), 'ycmd.tests.clangd.RunAfterInitialized', 'RunAfterInitialized', (['app', 'test'], {}), '(app, test)\n', (11034, 11045), False, 'from ycmd.tests.clangd import IsolatedYcmd, SharedYcmd, PathToTestFile, RunAfterInitialized\n'), ((16013, 16032), 'ycmd.utils.ReadFile', 'ReadFile', (['file_path'], {}), '(file_path)\n', (16021, 16032), False, 'from ycmd.utils import ReadFile\n'), ((16607, 16637), 'ycmd.tests.clangd.RunAfterInitialized', 'RunAfterInitialized', (['app', 'test'], {}), '(app, test)\n', (16626, 16637), False, 'from ycmd.tests.clangd import IsolatedYcmd, SharedYcmd, PathToTestFile, RunAfterInitialized\n'), ((16991, 17006), 'pprint.pprint', 'pprint', (['results'], {}), '(results)\n', (16997, 17006), False, 'from pprint import pprint\n'), ((29770, 29809), 'ycmd.tests.clangd.PathToTestFile', 'PathToTestFile', (['"""FixIt_Clang_cpp11.cpp"""'], {}), "('FixIt_Clang_cpp11.cpp')\n", (29784, 29809), False, 'from ycmd.tests.clangd import IsolatedYcmd, SharedYcmd, PathToTestFile, RunAfterInitialized\n'), ((29822, 29866), 'ycmd.tests.clangd.PathToTestFile', 'PathToTestFile', (['"""objc"""', '"""FixIt_Clang_objc.m"""'], {}), "('objc', 'FixIt_Clang_objc.m')\n", (29836, 29866), False, 'from ycmd.tests.clangd import IsolatedYcmd, SharedYcmd, PathToTestFile, RunAfterInitialized\n'), ((29880, 29919), 'ycmd.tests.clangd.PathToTestFile', 'PathToTestFile', (['"""cuda"""', '"""fixit_test.cu"""'], {}), "('cuda', 'fixit_test.cu')\n", (29894, 29919), False, 'from ycmd.tests.clangd import IsolatedYcmd, SharedYcmd, PathToTestFile, RunAfterInitialized\n'), ((29932, 29960), 'ycmd.tests.clangd.PathToTestFile', 'PathToTestFile', (['"""unicode.cc"""'], {}), "('unicode.cc')\n", (29946, 29960), False, 'from ycmd.tests.clangd import IsolatedYcmd, SharedYcmd, PathToTestFile, RunAfterInitialized\n'), ((31741, 31782), 'ycmd.tests.test_utils.WaitUntilCompleterServerReady', 'WaitUntilCompleterServerReady', (['app', '"""cpp"""'], {}), "(app, 'cpp')\n", (31770, 31782), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((33126, 33165), 'ycmd.tests.clangd.PathToTestFile', 'PathToTestFile', (['"""FixIt_Clang_cpp11.cpp"""'], {}), "('FixIt_Clang_cpp11.cpp')\n", (33140, 33165), False, 'from ycmd.tests.clangd import IsolatedYcmd, SharedYcmd, PathToTestFile, RunAfterInitialized\n'), ((33640, 33681), 'ycmd.tests.test_utils.WaitUntilCompleterServerReady', 'WaitUntilCompleterServerReady', (['app', '"""cpp"""'], {}), "(app, 'cpp')\n", (33669, 33681), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((35933, 35963), 'ycmd.tests.clangd.RunAfterInitialized', 'RunAfterInitialized', (['app', 'test'], {}), '(app, test)\n', (35952, 35963), False, 'from ycmd.tests.clangd import IsolatedYcmd, SharedYcmd, PathToTestFile, RunAfterInitialized\n'), ((4225, 4243), 'ycmd.utils.ReadFile', 'ReadFile', (['filepath'], {}), '(filepath)\n', (4233, 4243), False, 'from ycmd.utils import ReadFile\n'), ((16790, 16829), 'ycmd.tests.clangd.RunAfterInitialized', 'RunAfterInitialized', (['app', 'receive_diags'], {}), '(app, receive_diags)\n', (16809, 16829), False, 'from ycmd.tests.clangd import IsolatedYcmd, SharedYcmd, PathToTestFile, RunAfterInitialized\n'), ((16844, 16874), 'ycmd.tests.clangd.RunAfterInitialized', 'RunAfterInitialized', (['app', 'test'], {}), '(app, test)\n', (16863, 16874), False, 'from ycmd.tests.clangd import IsolatedYcmd, SharedYcmd, PathToTestFile, RunAfterInitialized\n'), ((21777, 21801), 'hamcrest.equal_to', 'equal_to', (["{'fixits': []}"], {}), "({'fixits': []})\n", (21785, 21801), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((31237, 31276), 'ycmd.tests.clangd.PathToTestFile', 'PathToTestFile', (['"""FixIt_Clang_cpp11.cpp"""'], {}), "('FixIt_Clang_cpp11.cpp')\n", (31251, 31276), False, 'from ycmd.tests.clangd import IsolatedYcmd, SharedYcmd, PathToTestFile, RunAfterInitialized\n'), ((31397, 31436), 'ycmd.tests.clangd.PathToTestFile', 'PathToTestFile', (['"""FixIt_Clang_cpp11.cpp"""'], {}), "('FixIt_Clang_cpp11.cpp')\n", (31411, 31436), False, 'from ycmd.tests.clangd import IsolatedYcmd, SharedYcmd, PathToTestFile, RunAfterInitialized\n'), ((31600, 31656), 'ycmd.tests.test_utils.CombineRequest', 'CombineRequest', (['args', "{'event_name': 'FileReadyToParse'}"], {}), "(args, {'event_name': 'FileReadyToParse'})\n", (31614, 31656), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((33252, 33270), 'ycmd.utils.ReadFile', 'ReadFile', (['filename'], {}), '(filename)\n', (33260, 33270), False, 'from ycmd.utils import ReadFile\n'), ((33496, 33555), 'ycmd.tests.test_utils.CombineRequest', 'CombineRequest', (['request', "{'event_name': 'FileReadyToParse'}"], {}), "(request, {'event_name': 'FileReadyToParse'})\n", (33510, 33555), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((34060, 34078), 'hamcrest.equal_to', 'equal_to', (['expected'], {}), '(expected)\n', (34068, 34078), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((10730, 10759), 'hamcrest.has_entry', 'has_entry', (['"""message"""', 'test[1]'], {}), "('message', test[1])\n", (10739, 10759), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((14700, 14775), 'hamcrest.matches_regexp', 'matches_regexp', (['"""int bar\\\\(int i\\\\)(?: __attribute__\\\\(\\\\(thiscall\\\\)\\\\))?"""'], {}), "('int bar\\\\(int i\\\\)(?: __attribute__\\\\(\\\\(thiscall\\\\)\\\\))?')\n", (14714, 14775), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((14836, 14911), 'hamcrest.matches_regexp', 'matches_regexp', (['"""int bar\\\\(int i\\\\)(?: __attribute__\\\\(\\\\(thiscall\\\\)\\\\))?"""'], {}), "('int bar\\\\(int i\\\\)(?: __attribute__\\\\(\\\\(thiscall\\\\)\\\\))?')\n", (14850, 14911), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((15565, 15616), 'ycmd.tests.test_utils.ErrorMatcher', 'ErrorMatcher', (['RuntimeError', '"""No hover information."""'], {}), "(RuntimeError, 'No hover information.')\n", (15577, 15616), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((16958, 16978), 'ycmd.tests.test_utils.BuildRequest', 'BuildRequest', ([], {}), '(**args)\n', (16970, 16978), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((31867, 31887), 'ycmd.tests.test_utils.BuildRequest', 'BuildRequest', ([], {}), '(**args)\n', (31879, 31887), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((32017, 32037), 'ycmd.tests.test_utils.BuildRequest', 'BuildRequest', ([], {}), '(**args)\n', (32029, 32037), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((33766, 33789), 'ycmd.tests.test_utils.BuildRequest', 'BuildRequest', ([], {}), '(**request)\n', (33778, 33789), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((33963, 33986), 'ycmd.tests.test_utils.BuildRequest', 'BuildRequest', ([], {}), '(**request)\n', (33975, 33986), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((34319, 34346), 'ycmd.tests.clangd.PathToTestFile', 'PathToTestFile', (['"""basic.cpp"""'], {}), "('basic.cpp')\n", (34333, 34346), False, 'from ycmd.tests.clangd import IsolatedYcmd, SharedYcmd, PathToTestFile, RunAfterInitialized\n'), ((3427, 3446), 'ycmd.utils.ReadFile', 'ReadFile', (['file_path'], {}), '(file_path)\n', (3435, 3446), False, 'from ycmd.utils import ReadFile\n'), ((5093, 5132), 'ycmd.tests.test_utils.ErrorMatcher', 'ErrorMatcher', (['RuntimeError', "test['res']"], {}), "(RuntimeError, test['res'])\n", (5105, 5132), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((10672, 10696), 'hamcrest.contains_string', 'contains_string', (['test[1]'], {}), '(test[1])\n', (10687, 10696), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((34268, 34295), 'ycmd.tests.clangd.PathToTestFile', 'PathToTestFile', (['"""basic.cpp"""'], {}), "('basic.cpp')\n", (34282, 34295), False, 'from ycmd.tests.clangd import IsolatedYcmd, SharedYcmd, PathToTestFile, RunAfterInitialized\n'), ((15053, 15092), 'ycmd.tests.clangd.PathToTestFile', 'PathToTestFile', (['"""GetType_Clang_test.cc"""'], {}), "('GetType_Clang_test.cc')\n", (15067, 15092), False, 'from ycmd.tests.clangd import IsolatedYcmd, SharedYcmd, PathToTestFile, RunAfterInitialized\n'), ((15784, 15822), 'ycmd.tests.clangd.PathToTestFile', 'PathToTestFile', (['"""GetDoc_Clang_test.cc"""'], {}), "('GetDoc_Clang_test.cc')\n", (15798, 15822), False, 'from ycmd.tests.clangd import IsolatedYcmd, SharedYcmd, PathToTestFile, RunAfterInitialized\n'), ((17854, 17900), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 16, 'column_num': 0}"], {}), "({'line_num': 16, 'column_num': 0})\n", (17865, 17900), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((18744, 18791), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 25, 'column_num': 14}"], {}), "({'line_num': 25, 'column_num': 14})\n", (18755, 18791), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((19279, 19325), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 35, 'column_num': 7}"], {}), "({'line_num': 35, 'column_num': 7})\n", (19290, 19325), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((19799, 19845), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 40, 'column_num': 6}"], {}), "({'line_num': 40, 'column_num': 6})\n", (19810, 19845), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((20637, 20683), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 48, 'column_num': 3}"], {}), "({'line_num': 48, 'column_num': 3})\n", (20648, 20683), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((21082, 21128), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 48, 'column_num': 3}"], {}), "({'line_num': 48, 'column_num': 3})\n", (21093, 21128), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((21600, 21645), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 5, 'column_num': 3}"], {}), "({'line_num': 5, 'column_num': 3})\n", (21611, 21645), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((22323, 22370), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 54, 'column_num': 15}"], {}), "({'line_num': 54, 'column_num': 15})\n", (22334, 22370), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((23084, 23131), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 54, 'column_num': 15}"], {}), "({'line_num': 54, 'column_num': 15})\n", (23095, 23131), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((23529, 23576), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 54, 'column_num': 15}"], {}), "({'line_num': 54, 'column_num': 15})\n", (23540, 23576), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((24122, 24169), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 54, 'column_num': 51}"], {}), "({'line_num': 54, 'column_num': 51})\n", (24133, 24169), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((24883, 24930), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 54, 'column_num': 51}"], {}), "({'line_num': 54, 'column_num': 51})\n", (24894, 24930), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((25328, 25375), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 54, 'column_num': 51}"], {}), "({'line_num': 54, 'column_num': 51})\n", (25339, 25375), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((25856, 25903), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 21, 'column_num': 16}"], {}), "({'line_num': 21, 'column_num': 16})\n", (25867, 25903), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((26108, 26160), 'hamcrest.contains_string', 'contains_string', (['"""parentheses around the assignment"""'], {}), "('parentheses around the assignment')\n", (26123, 26160), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((26482, 26503), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(60)', '(1)'], {}), '(60, 1)\n', (26496, 26503), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((26590, 26611), 'hamcrest.contains_string', 'contains_string', (['"""=="""'], {}), "('==')\n", (26605, 26611), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((26803, 26824), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(60)', '(1)'], {}), '(60, 1)\n', (26817, 26824), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((27009, 27054), 'hamcrest.has_entries', 'has_entries', (["{'command': 'clangd.applyTweak'}"], {}), "({'command': 'clangd.applyTweak'})\n", (27020, 27054), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((27275, 27366), 'hamcrest.contains_string', 'contains_string', (['"""change \'SpellingIsNotMyStringPiont\' to \'SpellingIsNotMyStrongPoint\'"""'], {}), '(\n "change \'SpellingIsNotMyStringPiont\' to \'SpellingIsNotMyStrongPoint\'")\n', (27290, 27366), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((27614, 27635), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(72)', '(9)'], {}), '(72, 9)\n', (27628, 27635), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((27794, 27835), 'hamcrest.contains_string', 'contains_string', (['"""change \'int\' to \'void\'"""'], {}), '("change \'int\' to \'void\'")\n', (27809, 27835), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((28040, 28061), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(3)', '(12)'], {}), '(3, 12)\n', (28054, 28061), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((26245, 26266), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(59)', '(8)'], {}), '(59, 8)\n', (26259, 26266), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((26294, 26315), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(59)', '(8)'], {}), '(59, 8)\n', (26308, 26315), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((26374, 26396), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(61)', '(12)'], {}), '(61, 12)\n', (26388, 26396), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((26424, 26446), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(61)', '(12)'], {}), '(61, 12)\n', (26438, 26446), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((26697, 26718), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(60)', '(8)'], {}), '(60, 8)\n', (26711, 26718), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((26746, 26767), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(60)', '(9)'], {}), '(60, 9)\n', (26760, 26767), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((27507, 27528), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(72)', '(9)'], {}), '(72, 9)\n', (27521, 27528), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((27556, 27578), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(72)', '(35)'], {}), '(72, 35)\n', (27570, 27578), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((27934, 27955), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(3)', '(12)'], {}), '(3, 12)\n', (27948, 27955), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((27983, 28004), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(3)', '(15)'], {}), '(3, 15)\n', (27997, 28004), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((28378, 28399), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(84)', '(3)'], {}), '(84, 3)\n', (28392, 28399), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((28427, 28448), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(84)', '(3)'], {}), '(84, 3)\n', (28441, 28448), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((28511, 28533), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(84)', '(10)'], {}), '(84, 10)\n', (28525, 28533), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((28561, 28583), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(84)', '(22)'], {}), '(84, 22)\n', (28575, 28583), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((28892, 28914), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(80)', '(19)'], {}), '(80, 19)\n', (28906, 28914), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((28942, 28964), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(80)', '(36)'], {}), '(80, 36)\n', (28956, 28964), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((29258, 29279), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(83)', '(3)'], {}), '(83, 3)\n', (29272, 29279), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((29308, 29330), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(83)', '(17)'], {}), '(83, 17)\n', (29322, 29330), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((29620, 29641), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(80)', '(1)'], {}), '(80, 1)\n', (29634, 29641), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((29669, 29690), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(80)', '(6)'], {}), '(80, 6)\n', (29683, 29690), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((17305, 17334), 'hamcrest.equal_to', 'equal_to', (['"""static_cast<int>("""'], {}), "('static_cast<int>(')\n", (17313, 17334), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((17601, 17614), 'hamcrest.equal_to', 'equal_to', (['""")"""'], {}), "(')')\n", (17609, 17614), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((18199, 18228), 'hamcrest.equal_to', 'equal_to', (['"""static_cast<int>("""'], {}), "('static_cast<int>(')\n", (18207, 18228), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((18493, 18506), 'hamcrest.equal_to', 'equal_to', (['""")"""'], {}), "(')')\n", (18501, 18506), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((19029, 19041), 'hamcrest.equal_to', 'equal_to', (['""""""'], {}), "('')\n", (19037, 19041), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((19546, 19561), 'hamcrest.equal_to', 'equal_to', (['"""foo"""'], {}), "('foo')\n", (19554, 19561), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((20080, 20092), 'hamcrest.equal_to', 'equal_to', (['""""""'], {}), "('')\n", (20088, 20092), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((20371, 20384), 'hamcrest.equal_to', 'equal_to', (['"""~"""'], {}), "('~')\n", (20379, 20384), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((20805, 20827), 'hamcrest.equal_to', 'equal_to', (['"""= default;"""'], {}), "('= default;')\n", (20813, 20827), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((21350, 21364), 'hamcrest.equal_to', 'equal_to', (['"""id"""'], {}), "('id')\n", (21358, 21364), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((22054, 22069), 'hamcrest.equal_to', 'equal_to', (['"""foo"""'], {}), "('foo')\n", (22062, 22069), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((22523, 22535), 'hamcrest.equal_to', 'equal_to', (['""""""'], {}), "('')\n", (22531, 22535), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((22816, 22829), 'hamcrest.equal_to', 'equal_to', (['"""~"""'], {}), "('~')\n", (22824, 22829), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((23253, 23275), 'hamcrest.equal_to', 'equal_to', (['"""= default;"""'], {}), "('= default;')\n", (23261, 23275), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((23853, 23868), 'hamcrest.equal_to', 'equal_to', (['"""foo"""'], {}), "('foo')\n", (23861, 23868), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((24322, 24334), 'hamcrest.equal_to', 'equal_to', (['""""""'], {}), "('')\n", (24330, 24334), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((24615, 24628), 'hamcrest.equal_to', 'equal_to', (['"""~"""'], {}), "('~')\n", (24623, 24628), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((25052, 25074), 'hamcrest.equal_to', 'equal_to', (['"""= default;"""'], {}), "('= default;')\n", (25060, 25074), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((25604, 25617), 'hamcrest.equal_to', 'equal_to', (['"""="""'], {}), "('=')\n", (25612, 25617), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((34672, 34692), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(1)', '(8)'], {}), '(1, 8)\n', (34686, 34692), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((34722, 34743), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(1)', '(11)'], {}), '(1, 11)\n', (34736, 34743), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((34808, 34828), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(9)', '(3)'], {}), '(9, 3)\n', (34822, 34828), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((34858, 34878), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(9)', '(6)'], {}), '(9, 6)\n', (34872, 34878), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((34944, 34965), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(12)', '(2)'], {}), '(12, 2)\n', (34958, 34965), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((34995, 35016), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(15)', '(1)'], {}), '(15, 1)\n', (35009, 35016), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((35081, 35102), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(15)', '(8)'], {}), '(15, 8)\n', (35095, 35102), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((35133, 35155), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(15)', '(11)'], {}), '(15, 11)\n', (35147, 35155), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((35218, 35240), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(15)', '(46)'], {}), '(15, 46)\n', (35232, 35240), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((35271, 35292), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(16)', '(1)'], {}), '(16, 1)\n', (35285, 35292), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((35358, 35379), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(17)', '(3)'], {}), '(17, 3)\n', (35372, 35379), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((35409, 35430), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(17)', '(6)'], {}), '(17, 6)\n', (35423, 35430), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((35492, 35514), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(17)', '(14)'], {}), '(17, 14)\n', (35506, 35514), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((35544, 35566), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(17)', '(15)'], {}), '(17, 15)\n', (35558, 35566), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((35629, 35651), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(17)', '(17)'], {}), '(17, 17)\n', (35643, 35651), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((35681, 35703), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(17)', '(17)'], {}), '(17, 17)\n', (35695, 35703), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((35766, 35788), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(17)', '(19)'], {}), '(17, 19)\n', (35780, 35788), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((35818, 35840), 'ycmd.tests.test_utils.LineColMatcher', 'LineColMatcher', (['(17)', '(19)'], {}), '(17, 19)\n', (35832, 35840), False, 'from ycmd.tests.test_utils import BuildRequest, ChunkMatcher, CombineRequest, LineColMatcher, LocationMatcher, ErrorMatcher, WithRetry, WaitUntilCompleterServerReady\n'), ((17393, 17440), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 16, 'column_num': 10}"], {}), "({'line_num': 16, 'column_num': 10})\n", (17404, 17440), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((17467, 17514), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 16, 'column_num': 10}"], {}), "({'line_num': 16, 'column_num': 10})\n", (17478, 17514), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((17673, 17720), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 16, 'column_num': 13}"], {}), "({'line_num': 16, 'column_num': 13})\n", (17684, 17720), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((17747, 17794), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 16, 'column_num': 13}"], {}), "({'line_num': 16, 'column_num': 13})\n", (17758, 17794), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((18287, 18333), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 26, 'column_num': 7}"], {}), "({'line_num': 26, 'column_num': 7})\n", (18298, 18333), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((18360, 18406), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 26, 'column_num': 7}"], {}), "({'line_num': 26, 'column_num': 7})\n", (18371, 18406), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((18565, 18611), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 28, 'column_num': 2}"], {}), "({'line_num': 28, 'column_num': 2})\n", (18576, 18611), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((18638, 18684), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 28, 'column_num': 2}"], {}), "({'line_num': 28, 'column_num': 2})\n", (18649, 18684), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((19100, 19146), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 35, 'column_num': 7}"], {}), "({'line_num': 35, 'column_num': 7})\n", (19111, 19146), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((19173, 19219), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 35, 'column_num': 9}"], {}), "({'line_num': 35, 'column_num': 9})\n", (19184, 19219), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((19620, 19666), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 40, 'column_num': 6}"], {}), "({'line_num': 40, 'column_num': 6})\n", (19631, 19666), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((19693, 19739), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 40, 'column_num': 9}"], {}), "({'line_num': 40, 'column_num': 9})\n", (19704, 19739), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((20155, 20201), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 48, 'column_num': 3}"], {}), "({'line_num': 48, 'column_num': 3})\n", (20166, 20201), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((20230, 20276), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 48, 'column_num': 4}"], {}), "({'line_num': 48, 'column_num': 4})\n", (20241, 20276), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((20447, 20493), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 48, 'column_num': 9}"], {}), "({'line_num': 48, 'column_num': 9})\n", (20458, 20493), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((20522, 20568), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 48, 'column_num': 9}"], {}), "({'line_num': 48, 'column_num': 9})\n", (20533, 20568), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((20890, 20937), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 48, 'column_num': 15}"], {}), "({'line_num': 48, 'column_num': 15})\n", (20901, 20937), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((20966, 21013), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 48, 'column_num': 17}"], {}), "({'line_num': 48, 'column_num': 17})\n", (20977, 21013), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((21423, 21468), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 5, 'column_num': 3}"], {}), "({'line_num': 5, 'column_num': 3})\n", (21434, 21468), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((21495, 21540), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 5, 'column_num': 3}"], {}), "({'line_num': 5, 'column_num': 3})\n", (21506, 21540), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((22132, 22179), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 54, 'column_num': 16}"], {}), "({'line_num': 54, 'column_num': 16})\n", (22143, 22179), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((22208, 22255), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 54, 'column_num': 19}"], {}), "({'line_num': 54, 'column_num': 19})\n", (22219, 22255), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((22598, 22645), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 54, 'column_num': 52}"], {}), "({'line_num': 54, 'column_num': 52})\n", (22609, 22645), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((22674, 22721), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 54, 'column_num': 53}"], {}), "({'line_num': 54, 'column_num': 53})\n", (22685, 22721), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((22892, 22939), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 54, 'column_num': 58}"], {}), "({'line_num': 54, 'column_num': 58})\n", (22903, 22939), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((22968, 23015), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 54, 'column_num': 58}"], {}), "({'line_num': 54, 'column_num': 58})\n", (22979, 23015), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((23338, 23385), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 54, 'column_num': 64}"], {}), "({'line_num': 54, 'column_num': 64})\n", (23349, 23385), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((23414, 23461), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 54, 'column_num': 67}"], {}), "({'line_num': 54, 'column_num': 67})\n", (23425, 23461), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((23931, 23978), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 54, 'column_num': 16}"], {}), "({'line_num': 54, 'column_num': 16})\n", (23942, 23978), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((24007, 24054), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 54, 'column_num': 19}"], {}), "({'line_num': 54, 'column_num': 19})\n", (24018, 24054), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((24397, 24444), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 54, 'column_num': 52}"], {}), "({'line_num': 54, 'column_num': 52})\n", (24408, 24444), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((24473, 24520), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 54, 'column_num': 53}"], {}), "({'line_num': 54, 'column_num': 53})\n", (24484, 24520), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((24691, 24738), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 54, 'column_num': 58}"], {}), "({'line_num': 54, 'column_num': 58})\n", (24702, 24738), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((24767, 24814), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 54, 'column_num': 58}"], {}), "({'line_num': 54, 'column_num': 58})\n", (24778, 24814), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((25137, 25184), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 54, 'column_num': 64}"], {}), "({'line_num': 54, 'column_num': 64})\n", (25148, 25184), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((25213, 25260), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 54, 'column_num': 67}"], {}), "({'line_num': 54, 'column_num': 67})\n", (25224, 25260), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((25676, 25722), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 21, 'column_num': 9}"], {}), "({'line_num': 21, 'column_num': 9})\n", (25687, 25722), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n'), ((25749, 25796), 'hamcrest.has_entries', 'has_entries', (["{'line_num': 21, 'column_num': 11}"], {}), "({'line_num': 21, 'column_num': 11})\n", (25760, 25796), False, 'from hamcrest import assert_that, contains, contains_string, equal_to, has_entries, has_entry, matches_regexp\n')] |
"""
Config class containing all the settings for running sentiment scoring tool
"""
import jsonpickle
class Config(object):
"""Container for sentiment scoring tool settings.
"""
def __init__(self):
"""Initializes the Config instance.
"""
#Elasticsearch settings
self.elasticsearch_host = ""
self.elasticsearch_verify_certs = False
self.elasticsearch_index_name = ""
self.elasticsearch_batch_size = 500
self.elasticsearch_timeout_secs = 30
#Processing settings
self.sentiment_modelpath = ""
self.sentiment_max_seq_length = 512
self.sleep_idle_secs = 5
self.sleep_not_idle_secs = 0.01
self.log_level = "ERROR"
@staticmethod
def load(filepath):
"""Loads the config from a JSON file.
Args:
filepath: path of the JSON file.
"""
with open(filepath, "r") as file:
json = file.read()
config = jsonpickle.decode(json)
return config | [
"jsonpickle.decode"
] | [((987, 1010), 'jsonpickle.decode', 'jsonpickle.decode', (['json'], {}), '(json)\n', (1004, 1010), False, 'import jsonpickle\n')] |
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Testing Invert op in DE
"""
import numpy as np
import mindspore.dataset as ds
import mindspore.dataset.transforms.py_transforms
import mindspore.dataset.vision.py_transforms as F
import mindspore.dataset.vision.c_transforms as C
from mindspore import log as logger
from util import visualize_list, save_and_check_md5, diff_mse
DATA_DIR = "../data/dataset/testImageNetData/train/"
GENERATE_GOLDEN = False
def test_invert_py(plot=False):
"""
Test Invert python op
"""
logger.info("Test Invert Python op")
# Original Images
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transforms_original = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(),
F.Resize((224, 224)),
F.ToTensor()])
ds_original = data_set.map(operations=transforms_original, input_columns="image")
ds_original = ds_original.batch(512)
for idx, (image, _) in enumerate(ds_original):
if idx == 0:
images_original = np.transpose(image.asnumpy(), (0, 2, 3, 1))
else:
images_original = np.append(images_original,
np.transpose(image.asnumpy(), (0, 2, 3, 1)),
axis=0)
# Color Inverted Images
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transforms_invert = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(),
F.Resize((224, 224)),
F.Invert(),
F.ToTensor()])
ds_invert = data_set.map(operations=transforms_invert, input_columns="image")
ds_invert = ds_invert.batch(512)
for idx, (image, _) in enumerate(ds_invert):
if idx == 0:
images_invert = np.transpose(image.asnumpy(), (0, 2, 3, 1))
else:
images_invert = np.append(images_invert,
np.transpose(image.asnumpy(), (0, 2, 3, 1)),
axis=0)
num_samples = images_original.shape[0]
mse = np.zeros(num_samples)
for i in range(num_samples):
mse[i] = np.mean((images_invert[i] - images_original[i]) ** 2)
logger.info("MSE= {}".format(str(np.mean(mse))))
if plot:
visualize_list(images_original, images_invert)
def test_invert_c(plot=False):
"""
Test Invert Cpp op
"""
logger.info("Test Invert cpp op")
# Original Images
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transforms_original = [C.Decode(), C.Resize(size=[224, 224])]
ds_original = data_set.map(operations=transforms_original, input_columns="image")
ds_original = ds_original.batch(512)
for idx, (image, _) in enumerate(ds_original):
if idx == 0:
images_original = image.asnumpy()
else:
images_original = np.append(images_original,
image.asnumpy(),
axis=0)
# Invert Images
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transform_invert = [C.Decode(), C.Resize(size=[224, 224]),
C.Invert()]
ds_invert = data_set.map(operations=transform_invert, input_columns="image")
ds_invert = ds_invert.batch(512)
for idx, (image, _) in enumerate(ds_invert):
if idx == 0:
images_invert = image.asnumpy()
else:
images_invert = np.append(images_invert,
image.asnumpy(),
axis=0)
if plot:
visualize_list(images_original, images_invert)
num_samples = images_original.shape[0]
mse = np.zeros(num_samples)
for i in range(num_samples):
mse[i] = diff_mse(images_invert[i], images_original[i])
logger.info("MSE= {}".format(str(np.mean(mse))))
def test_invert_py_c(plot=False):
"""
Test Invert Cpp op and python op
"""
logger.info("Test Invert cpp and python op")
# Invert Images in cpp
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[C.Decode(), C.Resize((224, 224))], input_columns=["image"])
ds_c_invert = data_set.map(operations=C.Invert(), input_columns="image")
ds_c_invert = ds_c_invert.batch(512)
for idx, (image, _) in enumerate(ds_c_invert):
if idx == 0:
images_c_invert = image.asnumpy()
else:
images_c_invert = np.append(images_c_invert,
image.asnumpy(),
axis=0)
# invert images in python
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[C.Decode(), C.Resize((224, 224))], input_columns=["image"])
transforms_p_invert = mindspore.dataset.transforms.py_transforms.Compose([lambda img: img.astype(np.uint8),
F.ToPIL(),
F.Invert(),
np.array])
ds_p_invert = data_set.map(operations=transforms_p_invert, input_columns="image")
ds_p_invert = ds_p_invert.batch(512)
for idx, (image, _) in enumerate(ds_p_invert):
if idx == 0:
images_p_invert = image.asnumpy()
else:
images_p_invert = np.append(images_p_invert,
image.asnumpy(),
axis=0)
num_samples = images_c_invert.shape[0]
mse = np.zeros(num_samples)
for i in range(num_samples):
mse[i] = diff_mse(images_p_invert[i], images_c_invert[i])
logger.info("MSE= {}".format(str(np.mean(mse))))
if plot:
visualize_list(images_c_invert, images_p_invert, visualize_mode=2)
def test_invert_one_channel():
"""
Test Invert cpp op with one channel image
"""
logger.info("Test Invert C Op With One Channel Images")
c_op = C.Invert()
try:
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
data_set = data_set.map(operations=[C.Decode(), C.Resize((224, 224)),
lambda img: np.array(img[:, :, 0])], input_columns=["image"])
data_set.map(operations=c_op, input_columns="image")
except RuntimeError as e:
logger.info("Got an exception in DE: {}".format(str(e)))
assert "The shape" in str(e)
def test_invert_md5_py():
"""
Test Invert python op with md5 check
"""
logger.info("Test Invert python op with md5 check")
# Generate dataset
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transforms_invert = mindspore.dataset.transforms.py_transforms.Compose([F.Decode(),
F.Invert(),
F.ToTensor()])
data = data_set.map(operations=transforms_invert, input_columns="image")
# Compare with expected md5 from images
filename = "invert_01_result_py.npz"
save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
def test_invert_md5_c():
"""
Test Invert cpp op with md5 check
"""
logger.info("Test Invert cpp op with md5 check")
# Generate dataset
data_set = ds.ImageFolderDataset(dataset_dir=DATA_DIR, shuffle=False)
transforms_invert = [C.Decode(),
C.Resize(size=[224, 224]),
C.Invert(),
F.ToTensor()]
data = data_set.map(operations=transforms_invert, input_columns="image")
# Compare with expected md5 from images
filename = "invert_01_result_c.npz"
save_and_check_md5(data, filename, generate_golden=GENERATE_GOLDEN)
if __name__ == "__main__":
test_invert_py(plot=False)
test_invert_c(plot=False)
test_invert_py_c(plot=False)
test_invert_one_channel()
test_invert_md5_py()
test_invert_md5_c()
| [
"mindspore.log.info",
"util.save_and_check_md5",
"numpy.mean",
"util.diff_mse",
"mindspore.dataset.vision.py_transforms.Resize",
"mindspore.dataset.vision.py_transforms.ToPIL",
"util.visualize_list",
"mindspore.dataset.vision.c_transforms.Resize",
"mindspore.dataset.vision.py_transforms.Invert",
"mindspore.dataset.vision.c_transforms.Invert",
"mindspore.dataset.vision.py_transforms.Decode",
"numpy.array",
"numpy.zeros",
"mindspore.dataset.ImageFolderDataset",
"mindspore.dataset.vision.c_transforms.Decode",
"mindspore.dataset.vision.py_transforms.ToTensor"
] | [((1160, 1196), 'mindspore.log.info', 'logger.info', (['"""Test Invert Python op"""'], {}), "('Test Invert Python op')\n", (1171, 1196), True, 'from mindspore import log as logger\n'), ((1235, 1293), 'mindspore.dataset.ImageFolderDataset', 'ds.ImageFolderDataset', ([], {'dataset_dir': 'DATA_DIR', 'shuffle': '(False)'}), '(dataset_dir=DATA_DIR, shuffle=False)\n', (1256, 1293), True, 'import mindspore.dataset as ds\n'), ((2102, 2160), 'mindspore.dataset.ImageFolderDataset', 'ds.ImageFolderDataset', ([], {'dataset_dir': 'DATA_DIR', 'shuffle': '(False)'}), '(dataset_dir=DATA_DIR, shuffle=False)\n', (2123, 2160), True, 'import mindspore.dataset as ds\n'), ((3041, 3062), 'numpy.zeros', 'np.zeros', (['num_samples'], {}), '(num_samples)\n', (3049, 3062), True, 'import numpy as np\n'), ((3365, 3398), 'mindspore.log.info', 'logger.info', (['"""Test Invert cpp op"""'], {}), "('Test Invert cpp op')\n", (3376, 3398), True, 'from mindspore import log as logger\n'), ((3437, 3495), 'mindspore.dataset.ImageFolderDataset', 'ds.ImageFolderDataset', ([], {'dataset_dir': 'DATA_DIR', 'shuffle': '(False)'}), '(dataset_dir=DATA_DIR, shuffle=False)\n', (3458, 3495), True, 'import mindspore.dataset as ds\n'), ((4023, 4081), 'mindspore.dataset.ImageFolderDataset', 'ds.ImageFolderDataset', ([], {'dataset_dir': 'DATA_DIR', 'shuffle': '(False)'}), '(dataset_dir=DATA_DIR, shuffle=False)\n', (4044, 4081), True, 'import mindspore.dataset as ds\n'), ((4707, 4728), 'numpy.zeros', 'np.zeros', (['num_samples'], {}), '(num_samples)\n', (4715, 4728), True, 'import numpy as np\n'), ((4972, 5016), 'mindspore.log.info', 'logger.info', (['"""Test Invert cpp and python op"""'], {}), "('Test Invert cpp and python op')\n", (4983, 5016), True, 'from mindspore import log as logger\n'), ((5060, 5118), 'mindspore.dataset.ImageFolderDataset', 'ds.ImageFolderDataset', ([], {'dataset_dir': 'DATA_DIR', 'shuffle': '(False)'}), '(dataset_dir=DATA_DIR, shuffle=False)\n', (5081, 5118), True, 'import mindspore.dataset as ds\n'), ((5680, 5738), 'mindspore.dataset.ImageFolderDataset', 'ds.ImageFolderDataset', ([], {'dataset_dir': 'DATA_DIR', 'shuffle': '(False)'}), '(dataset_dir=DATA_DIR, shuffle=False)\n', (5701, 5738), True, 'import mindspore.dataset as ds\n'), ((6698, 6719), 'numpy.zeros', 'np.zeros', (['num_samples'], {}), '(num_samples)\n', (6706, 6719), True, 'import numpy as np\n'), ((7062, 7117), 'mindspore.log.info', 'logger.info', (['"""Test Invert C Op With One Channel Images"""'], {}), "('Test Invert C Op With One Channel Images')\n", (7073, 7117), True, 'from mindspore import log as logger\n'), ((7130, 7140), 'mindspore.dataset.vision.c_transforms.Invert', 'C.Invert', ([], {}), '()\n', (7138, 7140), True, 'import mindspore.dataset.vision.c_transforms as C\n'), ((7697, 7748), 'mindspore.log.info', 'logger.info', (['"""Test Invert python op with md5 check"""'], {}), "('Test Invert python op with md5 check')\n", (7708, 7748), True, 'from mindspore import log as logger\n'), ((7788, 7846), 'mindspore.dataset.ImageFolderDataset', 'ds.ImageFolderDataset', ([], {'dataset_dir': 'DATA_DIR', 'shuffle': '(False)'}), '(dataset_dir=DATA_DIR, shuffle=False)\n', (7809, 7846), True, 'import mindspore.dataset as ds\n'), ((8282, 8349), 'util.save_and_check_md5', 'save_and_check_md5', (['data', 'filename'], {'generate_golden': 'GENERATE_GOLDEN'}), '(data, filename, generate_golden=GENERATE_GOLDEN)\n', (8300, 8349), False, 'from util import visualize_list, save_and_check_md5, diff_mse\n'), ((8435, 8483), 'mindspore.log.info', 'logger.info', (['"""Test Invert cpp op with md5 check"""'], {}), "('Test Invert cpp op with md5 check')\n", (8446, 8483), True, 'from mindspore import log as logger\n'), ((8523, 8581), 'mindspore.dataset.ImageFolderDataset', 'ds.ImageFolderDataset', ([], {'dataset_dir': 'DATA_DIR', 'shuffle': '(False)'}), '(dataset_dir=DATA_DIR, shuffle=False)\n', (8544, 8581), True, 'import mindspore.dataset as ds\n'), ((8914, 8981), 'util.save_and_check_md5', 'save_and_check_md5', (['data', 'filename'], {'generate_golden': 'GENERATE_GOLDEN'}), '(data, filename, generate_golden=GENERATE_GOLDEN)\n', (8932, 8981), False, 'from util import visualize_list, save_and_check_md5, diff_mse\n'), ((3113, 3166), 'numpy.mean', 'np.mean', (['((images_invert[i] - images_original[i]) ** 2)'], {}), '((images_invert[i] - images_original[i]) ** 2)\n', (3120, 3166), True, 'import numpy as np\n'), ((3242, 3288), 'util.visualize_list', 'visualize_list', (['images_original', 'images_invert'], {}), '(images_original, images_invert)\n', (3256, 3288), False, 'from util import visualize_list, save_and_check_md5, diff_mse\n'), ((3524, 3534), 'mindspore.dataset.vision.c_transforms.Decode', 'C.Decode', ([], {}), '()\n', (3532, 3534), True, 'import mindspore.dataset.vision.c_transforms as C\n'), ((3536, 3561), 'mindspore.dataset.vision.c_transforms.Resize', 'C.Resize', ([], {'size': '[224, 224]'}), '(size=[224, 224])\n', (3544, 3561), True, 'import mindspore.dataset.vision.c_transforms as C\n'), ((4107, 4117), 'mindspore.dataset.vision.c_transforms.Decode', 'C.Decode', ([], {}), '()\n', (4115, 4117), True, 'import mindspore.dataset.vision.c_transforms as C\n'), ((4119, 4144), 'mindspore.dataset.vision.c_transforms.Resize', 'C.Resize', ([], {'size': '[224, 224]'}), '(size=[224, 224])\n', (4127, 4144), True, 'import mindspore.dataset.vision.c_transforms as C\n'), ((4170, 4180), 'mindspore.dataset.vision.c_transforms.Invert', 'C.Invert', ([], {}), '()\n', (4178, 4180), True, 'import mindspore.dataset.vision.c_transforms as C\n'), ((4606, 4652), 'util.visualize_list', 'visualize_list', (['images_original', 'images_invert'], {}), '(images_original, images_invert)\n', (4620, 4652), False, 'from util import visualize_list, save_and_check_md5, diff_mse\n'), ((4779, 4825), 'util.diff_mse', 'diff_mse', (['images_invert[i]', 'images_original[i]'], {}), '(images_invert[i], images_original[i])\n', (4787, 4825), False, 'from util import visualize_list, save_and_check_md5, diff_mse\n'), ((6770, 6818), 'util.diff_mse', 'diff_mse', (['images_p_invert[i]', 'images_c_invert[i]'], {}), '(images_p_invert[i], images_c_invert[i])\n', (6778, 6818), False, 'from util import visualize_list, save_and_check_md5, diff_mse\n'), ((6894, 6960), 'util.visualize_list', 'visualize_list', (['images_c_invert', 'images_p_invert'], {'visualize_mode': '(2)'}), '(images_c_invert, images_p_invert, visualize_mode=2)\n', (6908, 6960), False, 'from util import visualize_list, save_and_check_md5, diff_mse\n'), ((7170, 7228), 'mindspore.dataset.ImageFolderDataset', 'ds.ImageFolderDataset', ([], {'dataset_dir': 'DATA_DIR', 'shuffle': '(False)'}), '(dataset_dir=DATA_DIR, shuffle=False)\n', (7191, 7228), True, 'import mindspore.dataset as ds\n'), ((8608, 8618), 'mindspore.dataset.vision.c_transforms.Decode', 'C.Decode', ([], {}), '()\n', (8616, 8618), True, 'import mindspore.dataset.vision.c_transforms as C\n'), ((8645, 8670), 'mindspore.dataset.vision.c_transforms.Resize', 'C.Resize', ([], {'size': '[224, 224]'}), '(size=[224, 224])\n', (8653, 8670), True, 'import mindspore.dataset.vision.c_transforms as C\n'), ((8697, 8707), 'mindspore.dataset.vision.c_transforms.Invert', 'C.Invert', ([], {}), '()\n', (8705, 8707), True, 'import mindspore.dataset.vision.c_transforms as C\n'), ((8734, 8746), 'mindspore.dataset.vision.py_transforms.ToTensor', 'F.ToTensor', ([], {}), '()\n', (8744, 8746), True, 'import mindspore.dataset.vision.py_transforms as F\n'), ((1373, 1383), 'mindspore.dataset.vision.py_transforms.Decode', 'F.Decode', ([], {}), '()\n', (1381, 1383), True, 'import mindspore.dataset.vision.py_transforms as F\n'), ((1463, 1483), 'mindspore.dataset.vision.py_transforms.Resize', 'F.Resize', (['(224, 224)'], {}), '((224, 224))\n', (1471, 1483), True, 'import mindspore.dataset.vision.py_transforms as F\n'), ((1563, 1575), 'mindspore.dataset.vision.py_transforms.ToTensor', 'F.ToTensor', ([], {}), '()\n', (1573, 1575), True, 'import mindspore.dataset.vision.py_transforms as F\n'), ((2238, 2248), 'mindspore.dataset.vision.py_transforms.Decode', 'F.Decode', ([], {}), '()\n', (2246, 2248), True, 'import mindspore.dataset.vision.py_transforms as F\n'), ((2326, 2346), 'mindspore.dataset.vision.py_transforms.Resize', 'F.Resize', (['(224, 224)'], {}), '((224, 224))\n', (2334, 2346), True, 'import mindspore.dataset.vision.py_transforms as F\n'), ((2424, 2434), 'mindspore.dataset.vision.py_transforms.Invert', 'F.Invert', ([], {}), '()\n', (2432, 2434), True, 'import mindspore.dataset.vision.py_transforms as F\n'), ((2512, 2524), 'mindspore.dataset.vision.py_transforms.ToTensor', 'F.ToTensor', ([], {}), '()\n', (2522, 2524), True, 'import mindspore.dataset.vision.py_transforms as F\n'), ((5262, 5272), 'mindspore.dataset.vision.c_transforms.Invert', 'C.Invert', ([], {}), '()\n', (5270, 5272), True, 'import mindspore.dataset.vision.c_transforms as C\n'), ((6030, 6039), 'mindspore.dataset.vision.py_transforms.ToPIL', 'F.ToPIL', ([], {}), '()\n', (6037, 6039), True, 'import mindspore.dataset.vision.py_transforms as F\n'), ((6119, 6129), 'mindspore.dataset.vision.py_transforms.Invert', 'F.Invert', ([], {}), '()\n', (6127, 6129), True, 'import mindspore.dataset.vision.py_transforms as F\n'), ((7924, 7934), 'mindspore.dataset.vision.py_transforms.Decode', 'F.Decode', ([], {}), '()\n', (7932, 7934), True, 'import mindspore.dataset.vision.py_transforms as F\n'), ((8012, 8022), 'mindspore.dataset.vision.py_transforms.Invert', 'F.Invert', ([], {}), '()\n', (8020, 8022), True, 'import mindspore.dataset.vision.py_transforms as F\n'), ((8100, 8112), 'mindspore.dataset.vision.py_transforms.ToTensor', 'F.ToTensor', ([], {}), '()\n', (8110, 8112), True, 'import mindspore.dataset.vision.py_transforms as F\n'), ((3204, 3216), 'numpy.mean', 'np.mean', (['mse'], {}), '(mse)\n', (3211, 3216), True, 'import numpy as np\n'), ((4863, 4875), 'numpy.mean', 'np.mean', (['mse'], {}), '(mse)\n', (4870, 4875), True, 'import numpy as np\n'), ((5159, 5169), 'mindspore.dataset.vision.c_transforms.Decode', 'C.Decode', ([], {}), '()\n', (5167, 5169), True, 'import mindspore.dataset.vision.c_transforms as C\n'), ((5171, 5191), 'mindspore.dataset.vision.c_transforms.Resize', 'C.Resize', (['(224, 224)'], {}), '((224, 224))\n', (5179, 5191), True, 'import mindspore.dataset.vision.c_transforms as C\n'), ((5779, 5789), 'mindspore.dataset.vision.c_transforms.Decode', 'C.Decode', ([], {}), '()\n', (5787, 5789), True, 'import mindspore.dataset.vision.c_transforms as C\n'), ((5791, 5811), 'mindspore.dataset.vision.c_transforms.Resize', 'C.Resize', (['(224, 224)'], {}), '((224, 224))\n', (5799, 5811), True, 'import mindspore.dataset.vision.c_transforms as C\n'), ((6856, 6868), 'numpy.mean', 'np.mean', (['mse'], {}), '(mse)\n', (6863, 6868), True, 'import numpy as np\n'), ((7273, 7283), 'mindspore.dataset.vision.c_transforms.Decode', 'C.Decode', ([], {}), '()\n', (7281, 7283), True, 'import mindspore.dataset.vision.c_transforms as C\n'), ((7285, 7305), 'mindspore.dataset.vision.c_transforms.Resize', 'C.Resize', (['(224, 224)'], {}), '((224, 224))\n', (7293, 7305), True, 'import mindspore.dataset.vision.c_transforms as C\n'), ((7363, 7385), 'numpy.array', 'np.array', (['img[:, :, 0]'], {}), '(img[:, :, 0])\n', (7371, 7385), True, 'import numpy as np\n')] |
from parameterized import parameterized
from numpy.testing import TestCase
from .. import candy
class TestCollectCandies(TestCase):
@parameterized.expand(
[(5, 5, 12,
[[2, 1, 1, 1, 1], [2, 2, 1, 1, 1], [1, 2, 1, 1, 1],
[2, 2, 1, 1, 3], [2, 2, 2, 2, 2]])]
)
def test_candy(self, n, m, t, candies):
collector = candy.CollectCandies(n, m, t, candies)
for pos, expected in [[(1, 1), [(0, 1), (2, 1), (1, 0), (1, 2)]],
[(0, 0), [(1, 0), (0, 1)]],
[(4, 4), [(3, 4), (4, 3)]]]:
self.assertListEqual(
collector.get_next_positions(pos), expected + [pos])
self.assertEqual(collector.get_max_sum(), 27)
| [
"parameterized.parameterized.expand"
] | [((140, 266), 'parameterized.parameterized.expand', 'parameterized.expand', (['[(5, 5, 12, [[2, 1, 1, 1, 1], [2, 2, 1, 1, 1], [1, 2, 1, 1, 1], [2, 2, 1, 1,\n 3], [2, 2, 2, 2, 2]])]'], {}), '([(5, 5, 12, [[2, 1, 1, 1, 1], [2, 2, 1, 1, 1], [1, 2, \n 1, 1, 1], [2, 2, 1, 1, 3], [2, 2, 2, 2, 2]])])\n', (160, 266), False, 'from parameterized import parameterized\n')] |
from __future__ import absolute_import, unicode_literals, print_function
import mock
import unittest
import d43_aws_tools as aws_tools
from boto3.dynamodb.conditions import Attr
class DynamoDBHandlerTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
with mock.patch("d43_aws_tools.dynamodb_handler.boto3", mock.MagicMock()):
cls.handler = aws_tools.dynamodb_handler.DynamoDBHandler("table_name")
cls.handler.table = mock.MagicMock()
def setUp(self):
self.handler.table.reset_mock()
def test_get_item(self):
"""Test a successful invocation of `get_item`."""
expected = dict(field1="1", field2="2")
self.handler.table.get_item.return_value = {
"Item": expected
}
self.assertEqual(self.handler.get_item("key"), expected)
def test_get_item_malformed(self):
"""Test an unsuccessful invocation of `get_item`."""
self.handler.table.get_item.return_value = {
"TheWrongKey": dict(field1="1", field2="2")
}
self.assertIsNone(self.handler.get_item("key"))
def test_insert_item(self):
"""Test a successful invocation of `insert_item`."""
data = dict(x="x", y="y", three=3)
self.handler.insert_item(data)
self.handler.table.put_item.assert_called_once_with(Item=data)
def test_update_item(self):
"""Test a successful invocation of `update_item`."""
key = {"id": 1}
data = {"age": 40, "name": "<NAME>"}
self.handler.update_item(key, data)
self.handler.table.update_item.assert_called_once()
_, kwargs = self.handler.table.update_item.call_args
self.assertIn("Key", kwargs)
self.assertEqual(kwargs["Key"], key)
self.assertIn("UpdateExpression", kwargs)
# ignore whitespace and order of assignments
expr = kwargs["UpdateExpression"].replace(" ", "")
self.assertTrue(expr.startswith("SET"))
self.assertIn("age=:age", expr)
self.assertIn("#item_name=:name", expr)
self.assertIn("ExpressionAttributeValues", kwargs)
self.assertEqual(kwargs["ExpressionAttributeValues"],
{":age": 40, ":name": "<NAME>"})
self.assertIn("ExpressionAttributeNames", kwargs)
self.assertEqual(kwargs["ExpressionAttributeNames"],
{"#item_name": "name"})
def test_delete_item(self):
"""Test a successful invocation of `delete_item`."""
key = {"id": 1234}
self.handler.delete_item(key)
self.handler.table.delete_item.assert_called_once_with(Key=key)
def test_query_item(self):
""" Test a successful invocation of `query_item`."""
for cond in ("ne", "lt", "lte", "gt", "gte",
"begins_with", "is_in", "contains"):
self.handler.table.reset_mock()
query = {
"age": {
"condition": "eq",
"value": 25
},
"full_name": {
"condition": cond,
"value": "<NAME>"
}
}
data = {"age": 30, "full_name": "<NAME>"}
self.handler.table.scan.return_value = {"Items": data}
self.assertEqual(self.handler.query_items(query), data)
self.handler.table.scan.assert_called_once()
def test_query_bool_item(self):
""" Test a successful invocation of `query_item`. with a False boolean query"""
for cond in ("ne", "lt", "lte", "gt", "gte",
"begins_with", "is_in", "contains"):
self.handler.table.reset_mock()
query = {
"ready": False
}
data = {"age": 30, "full_name": "<NAME>", "ready": False}
self.handler.table.scan.return_value = {"Items": data}
self.assertEqual(self.handler.query_items(query), data)
self.handler.table.scan.assert_called_once()
err_msg = 'query_items: Expecting FilterExpression parameter for table.scan() but non found'
try:
self.handler.table.scan.assert_called_once_with()
# If the scan ran without an argument this is a failure
self.assertTrue(False, err_msg)
except Exception as e:
if err_msg in str(e):
raise e
def test_query_item_no_query(self):
"""Test a invocation of `query_item` with no query."""
data = {"age": 30, "full_name": "<NAME>"}
self.handler.table.scan.return_value = {"Items": data}
self.assertEqual(self.handler.query_items(), data)
self.handler.table.scan.assert_called_once_with()
| [
"d43_aws_tools.dynamodb_handler.DynamoDBHandler",
"mock.MagicMock"
] | [((463, 479), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (477, 479), False, 'import mock\n'), ((378, 434), 'd43_aws_tools.dynamodb_handler.DynamoDBHandler', 'aws_tools.dynamodb_handler.DynamoDBHandler', (['"""table_name"""'], {}), "('table_name')\n", (420, 434), True, 'import d43_aws_tools as aws_tools\n'), ((333, 349), 'mock.MagicMock', 'mock.MagicMock', ([], {}), '()\n', (347, 349), False, 'import mock\n')] |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple model for image classification.
The model is multiple
conv/locally_connected/wide_conv/low_rank_locally_connected layers followed
by a fully connected layer. Changes to the model architecture can be made by
modifying simple_model_config.py file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
import tensorflow.compat.v1 as tf
from low_rank_local_connectivity import layers
from low_rank_local_connectivity import utils
MOMENTUM = 0.9
EPS = 1e-5
class SimpleNetwork(tf.keras.Model):
"""Locally Connected Network."""
def __init__(self, config, variable_scope='simple_network'):
super(SimpleNetwork, self).__init__()
self.variable_scope = variable_scope
self.config = copy.deepcopy(config)
filters_list = self.config.num_filters_list
depth = len(filters_list)
self.pass_is_training_list = []
self.layers_list = []
if self.config.num_channels < 1:
raise ValueError('num_channels should be > 0')
input_channels = self.config.num_channels
if self.config.coord_conv:
# Add two coordinate conv channels.
input_channels = input_channels + 2
if len(self.config.layer_types) < depth:
self.config.layer_types.extend(
['conv2d'] * (depth - len(self.config.layer_types)))
chin = input_channels
for i, (kernel_size, num_filters, strides, layer_type) in enumerate(zip(
self.config.kernel_size_list,
filters_list,
self.config.strides_list,
self.config.layer_types)):
padding = 'valid'
if layer_type == 'conv2d':
chout = num_filters
layer = tf.keras.layers.Conv2D(
filters=chout,
kernel_size=kernel_size,
strides=(strides, strides),
padding=padding,
activation=None,
use_bias=not self.config.batch_norm,
kernel_initializer=self.config.kernel_initializer,
name=os.path.join(self.variable_scope, 'layer%d' %i, layer_type))
elif layer_type == 'wide_conv2d':
# Conv. layer with equivalent params to low rank locally connected.
if self.config.rank < 1:
raise ValueError('rank should be > 0 for %s layer.' % layer_type)
chout = int((self.config.rank * chin + num_filters) / float(
chin + num_filters) * num_filters)
layer = tf.keras.layers.Conv2D(
filters=chout if i < (depth-1)
else int(num_filters * self.config.rank),
kernel_size=kernel_size, strides=(strides, strides),
padding=padding,
activation=None,
use_bias=not self.config.batch_norm,
kernel_initializer=self.config.kernel_initializer,
name=os.path.join(self.variable_scope, 'layer%d' %i, layer_type))
elif layer_type == 'locally_connected2d':
# Full locally connected layer.
chout = num_filters
layer = tf.keras.layers.LocallyConnected2D(
filters=chout,
kernel_size=(kernel_size, kernel_size),
strides=(strides, strides),
padding=padding,
activation=None,
use_bias=True, # not self.config.batch_norm,
name=os.path.join(self.variable_scope, 'layer%d' %i, layer_type),
kernel_initializer=self.config.kernel_initializer)
elif layer_type == 'low_rank_locally_connected2d':
if self.config.rank < 1:
raise ValueError('rank should be > 0 for %s layer.' % layer_type)
chout = num_filters
layer = layers.LowRankLocallyConnected2D(
filters=chout,
kernel_size=(kernel_size, kernel_size),
strides=(strides, strides),
padding=padding,
activation=None,
use_bias=not self.config.batch_norm,
name=os.path.join(self.variable_scope, 'layer%d' %i, layer_type),
kernel_initializer=self.config.kernel_initializer,
combining_weights_initializer=(
self.config.combining_weights_initializer),
spatial_rank=self.config.rank,
normalize_weights=self.config.normalize_weights,
input_dependent=config.input_dependent,
share_row_combining_weights=self.config.share_row_combining_weights,
share_col_combining_weights=self.config.share_col_combining_weights)
else:
raise ValueError('Can not recognize layer %s type.' % layer_type)
chin = chout
self.layers_list.append(layer)
self.pass_is_training_list.append(False)
if self.config.batch_norm:
layer = tf.keras.layers.BatchNormalization(
trainable=True, momentum=MOMENTUM, epsilon=EPS)
self.layers_list.append(layer)
self.pass_is_training_list.append(True)
layer = tf.keras.layers.ReLU()
self.layers_list.append(layer)
self.pass_is_training_list.append(False)
if self.config.global_avg_pooling:
self.layers_list.append(tf.keras.layers.GlobalAveragePooling2D())
else:
self.layers_list.append(tf.keras.layers.Flatten())
self.pass_is_training_list.append(False)
self.layers_list.append(tf.keras.layers.Dense(
units=self.config.num_classes, activation=None, use_bias=True,
name='logits'))
self.pass_is_training_list.append(False)
def __call__(self, images, is_training):
endpoints = {}
if self.config.coord_conv:
# Append position channels.
net = tf.concat([images, utils.position_channels(images)], axis=3)
else:
net = images
for i, (pass_is_training, layer) in enumerate(
zip(self.pass_is_training_list, self.layers_list)):
net = layer(net, training=is_training) if pass_is_training else layer(net)
endpoints['layer%d' % i] = net
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, layer.updates)
self.add_update(layer.updates)
logits = net
return logits, endpoints
| [
"tensorflow.compat.v1.keras.layers.Flatten",
"os.path.join",
"tensorflow.compat.v1.keras.layers.ReLU",
"tensorflow.compat.v1.add_to_collection",
"copy.deepcopy",
"low_rank_local_connectivity.utils.position_channels",
"tensorflow.compat.v1.keras.layers.Dense",
"tensorflow.compat.v1.keras.layers.GlobalAveragePooling2D",
"tensorflow.compat.v1.keras.layers.BatchNormalization"
] | [((1396, 1417), 'copy.deepcopy', 'copy.deepcopy', (['config'], {}), '(config)\n', (1409, 1417), False, 'import copy\n'), ((5475, 5497), 'tensorflow.compat.v1.keras.layers.ReLU', 'tf.keras.layers.ReLU', ([], {}), '()\n', (5495, 5497), True, 'import tensorflow.compat.v1 as tf\n'), ((5836, 5939), 'tensorflow.compat.v1.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'self.config.num_classes', 'activation': 'None', 'use_bias': '(True)', 'name': '"""logits"""'}), "(units=self.config.num_classes, activation=None,\n use_bias=True, name='logits')\n", (5857, 5939), True, 'import tensorflow.compat.v1 as tf\n'), ((6465, 6525), 'tensorflow.compat.v1.add_to_collection', 'tf.add_to_collection', (['tf.GraphKeys.UPDATE_OPS', 'layer.updates'], {}), '(tf.GraphKeys.UPDATE_OPS, layer.updates)\n', (6485, 6525), True, 'import tensorflow.compat.v1 as tf\n'), ((5276, 5362), 'tensorflow.compat.v1.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {'trainable': '(True)', 'momentum': 'MOMENTUM', 'epsilon': 'EPS'}), '(trainable=True, momentum=MOMENTUM,\n epsilon=EPS)\n', (5310, 5362), True, 'import tensorflow.compat.v1 as tf\n'), ((5652, 5692), 'tensorflow.compat.v1.keras.layers.GlobalAveragePooling2D', 'tf.keras.layers.GlobalAveragePooling2D', ([], {}), '()\n', (5690, 5692), True, 'import tensorflow.compat.v1 as tf\n'), ((5734, 5759), 'tensorflow.compat.v1.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (5757, 5759), True, 'import tensorflow.compat.v1 as tf\n'), ((6158, 6189), 'low_rank_local_connectivity.utils.position_channels', 'utils.position_channels', (['images'], {}), '(images)\n', (6181, 6189), False, 'from low_rank_local_connectivity import utils\n'), ((2606, 2666), 'os.path.join', 'os.path.join', (['self.variable_scope', "('layer%d' % i)", 'layer_type'], {}), "(self.variable_scope, 'layer%d' % i, layer_type)\n", (2618, 2666), False, 'import os\n'), ((3397, 3457), 'os.path.join', 'os.path.join', (['self.variable_scope', "('layer%d' % i)", 'layer_type'], {}), "(self.variable_scope, 'layer%d' % i, layer_type)\n", (3409, 3457), False, 'import os\n'), ((3879, 3939), 'os.path.join', 'os.path.join', (['self.variable_scope', "('layer%d' % i)", 'layer_type'], {}), "(self.variable_scope, 'layer%d' % i, layer_type)\n", (3891, 3939), False, 'import os\n'), ((4490, 4550), 'os.path.join', 'os.path.join', (['self.variable_scope', "('layer%d' % i)", 'layer_type'], {}), "(self.variable_scope, 'layer%d' % i, layer_type)\n", (4502, 4550), False, 'import os\n')] |
import datetime
import json
import os
from pathlib import Path
from types import SimpleNamespace
from typing import List
from typing import NamedTuple, Union, Optional, Callable
from uuid import uuid3, NAMESPACE_DNS
from dateutil.parser import parse
_VIDEO_SUFFIXES = [".mkv", ".mp4"]
_IMAGE_SUFFIXES = [".jpg"]
_PERMITTED_EXTENSIONS = _VIDEO_SUFFIXES + _IMAGE_SUFFIXES
class PathDetails(NamedTuple):
path: Path
event_id: Optional[int]
camera_id: Optional[int]
timestamp: datetime.datetime
camera_name: str
is_image: bool
is_lowres: bool
class Event(SimpleNamespace):
event_id: str
timestamp: Union[datetime.datetime, str]
camera_name: str
high_res_image_path: str
low_res_image_path: str
high_res_video_path: str
low_res_video_path: str
def get_sorted_paths(path: Path) -> List[Path]:
return sorted(Path(path).iterdir(), key=os.path.getmtime)
def format_timestamp_for_go(timestamp: Union[datetime.datetime, str]) -> str:
if isinstance(timestamp, str):
timestamp = parse(timestamp)
us = timestamp.strftime("%f")
tz_raw = timestamp.strftime("%z")
tz = "{}:{}".format(tz_raw[0:3], tz_raw[3:])
return timestamp.strftime(f"%Y-%m-%dT%H:%M:%S.{us}00{tz}")
def parse_paths(paths: List[Path], tzinfo: datetime.tzinfo, parse_method: Callable) -> List[PathDetails]:
return [
y
for y in [parse_method(path=x, tzinfo=tzinfo) for x in paths if x is not None]
if y is not None
]
def build_event_for_some_path_details(some_path_details: List[PathDetails], path: Path):
if len(some_path_details) != 4:
raise ValueError(
f"expected some_path_details to be 4 long (and related); instead it was {len(some_path_details)} long"
)
event_ids = list(set([x.event_id for x in some_path_details]))
if len(event_ids) != 1:
raise ValueError(
f"expected all PathDetails to have a common event_id; instead they were {event_ids}"
)
camera_ids = list(set([x.camera_id for x in some_path_details]))
if len(camera_ids) != 1:
raise ValueError(
f"expected all PathDetails to have a common camera_id; instead they were {camera_ids}"
)
camera_names = list(set([x.camera_name for x in some_path_details]))
if len(camera_names) != 1:
raise ValueError(
f"expected all PathDetails to have a common camera_name; instead they were {camera_names}"
)
high_res_image_paths = list(
set([x.path for x in some_path_details if x.is_image and not x.is_lowres])
)
if len(high_res_image_paths) != 1:
raise ValueError(
f"expected to find 1 high_res_image_path from PathDetails; instead found {high_res_image_paths}"
)
low_res_image_paths = list(
set([x.path for x in some_path_details if x.is_image and x.is_lowres])
)
if len(low_res_image_paths) != 1:
raise ValueError(
f"expected to find 1 low_res_image_path from PathDetails; instead found {low_res_image_paths}"
)
high_res_video_paths = list(
set([x.path for x in some_path_details if not x.is_image and not x.is_lowres])
)
if len(high_res_video_paths) != 1:
raise ValueError(
f"expected to find 1 high_res_video_path from PathDetails; instead found {high_res_video_paths}"
)
low_res_video_paths = list(
set([x.path for x in some_path_details if not x.is_image and x.is_lowres])
)
if len(low_res_video_paths) != 1:
raise ValueError(
f"expected to find 1 low_res_video_path from PathDetails; instead found {low_res_video_paths}"
)
timestamp = sorted([x.timestamp for x in some_path_details])[0]
high_res_image_path = high_res_image_paths[0]
low_res_image_path = low_res_image_paths[0]
high_res_video_path = high_res_video_paths[0]
low_res_video_path = low_res_video_paths[0]
# in Go:
# eventId := uuid.NewSHA1(
# uuid.NameSpaceDNS,
# []byte(fmt.Sprintf("%v, %v, %v, %v, %v", timestamp, highResImagePath, lowResImagePath, highResVideoPath, lowResVideoPath)),
# )
event_id = uuid3(
NAMESPACE_DNS,
f"{format_timestamp_for_go(timestamp)}, {high_res_image_path}, {low_res_image_path}, {high_res_video_path}, {low_res_video_path}",
)
return Event(
event_id=str(event_id),
timestamp=timestamp,
camera_name=camera_names[0],
high_res_image_path=str(path / high_res_image_path),
low_res_image_path=str(path / low_res_image_path),
high_res_video_path=str(path / high_res_video_path),
low_res_video_path=str(path / low_res_video_path),
)
def relate_path_details(
some_path_details: List[PathDetails],
get_key_methods: List[Callable]
) -> List[List[PathDetails]]:
some_path_details_by_key = {}
for path_details in some_path_details:
keys = [x(path_details) for x in get_key_methods]
for key in keys:
some_path_details_by_key.setdefault(key, [])
some_path_details_by_key[key] += [path_details]
viable_some_path_details_by_key = {
k: v for k, v in some_path_details_by_key.items() if len(v) == 4
}
deduplicated_path_details = []
for some_path_details in viable_some_path_details_by_key.values():
if some_path_details not in deduplicated_path_details:
deduplicated_path_details += [some_path_details]
return deduplicated_path_details
def build_events_for_related_path_details(
related_path_details: List[List[PathDetails]], path: Path
) -> List[Event]:
events: List[Event] = []
for some_path_details in related_path_details:
events += [
build_event_for_some_path_details(
some_path_details=some_path_details, path=path
)
]
sorted_events = sorted(events, key=lambda x: x.timestamp)
for event in sorted_events:
event.timestamp = format_timestamp_for_go(timestamp=event.timestamp)
return sorted_events
def build_json_lines_from_events(events: List[Event]) -> str:
return "\n".join(
[
json.dumps(
{
"event_id": x.event_id,
"timestamp": x.timestamp,
"camera_name": x.camera_name,
"high_res_image_path": x.high_res_image_path,
"low_res_image_path": x.low_res_image_path,
"high_res_video_path": x.high_res_video_path,
"low_res_video_path": x.low_res_video_path,
}
)
for x in events
]
)
def write_to_file(path: Path, data: str):
with open(str(path), "w") as f:
f.write(data)
def rebuild_event_store(root_path: Path, tzinfo: datetime.tzinfo, json_path: Path, parse_method: Callable, get_key_methods: List[Callable]):
print(f"getting sorted paths from {root_path}...")
sorted_paths = get_sorted_paths(path=root_path)
print(f"got {len(sorted_paths)} sorted paths")
print("parsing sorted paths...")
some_path_details = parse_paths(paths=sorted_paths, tzinfo=tzinfo, parse_method=parse_method)
print(f"got {len(some_path_details)} parsed paths")
print("relating parsed paths...")
related_path_details = relate_path_details(some_path_details=some_path_details,
get_key_methods=get_key_methods)
print(f"got {len(related_path_details)} related paths")
print("building events...")
events = build_events_for_related_path_details(
related_path_details=related_path_details, path=root_path
)
print(f"built {len(events)} events")
print("building json lines...")
json_lines = build_json_lines_from_events(events=events)
print(f"built {len(json_lines)} bytes")
print(f"writing to {json_path}")
write_to_file(path=json_path, data=json_lines)
print("done.")
| [
"dateutil.parser.parse",
"json.dumps",
"pathlib.Path"
] | [((1047, 1063), 'dateutil.parser.parse', 'parse', (['timestamp'], {}), '(timestamp)\n', (1052, 1063), False, 'from dateutil.parser import parse\n'), ((6220, 6505), 'json.dumps', 'json.dumps', (["{'event_id': x.event_id, 'timestamp': x.timestamp, 'camera_name': x.\n camera_name, 'high_res_image_path': x.high_res_image_path,\n 'low_res_image_path': x.low_res_image_path, 'high_res_video_path': x.\n high_res_video_path, 'low_res_video_path': x.low_res_video_path}"], {}), "({'event_id': x.event_id, 'timestamp': x.timestamp, 'camera_name':\n x.camera_name, 'high_res_image_path': x.high_res_image_path,\n 'low_res_image_path': x.low_res_image_path, 'high_res_video_path': x.\n high_res_video_path, 'low_res_video_path': x.low_res_video_path})\n", (6230, 6505), False, 'import json\n'), ((868, 878), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (872, 878), False, 'from pathlib import Path\n')] |
# Generated by Django 2.2.1 on 2019-09-27 14:23
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('schoolio', '0004_auto_20190927_0405'),
]
operations = [
migrations.AlterField(
model_name='student_assessment',
name='assessment_mark',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='student_assessment',
name='assessment_score',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='student_assessment',
name='understanding_level',
field=models.CharField(blank=True, max_length=150, null=True),
),
]
| [
"django.db.models.CharField",
"django.db.models.IntegerField"
] | [((358, 400), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (377, 400), False, 'from django.db import migrations, models\n'), ((544, 586), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (563, 586), False, 'from django.db import migrations, models\n'), ((733, 788), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(150)', 'null': '(True)'}), '(blank=True, max_length=150, null=True)\n', (749, 788), False, 'from django.db import migrations, models\n')] |
import random
print("Title : Eat, Drink, And Be Sick")
noun = []
for i in range(4):
n = input("Enter noun : ")
noun.append(n)
plural = []
for i in range(6):
pn = input("Enter plural noun : ")
plural.append(pn)
adjective = []
for i in range(2):
a = input("Enter adjective : ")
adjective.append(a)
adverb = input("Enter adverb : ")
letter = input("Enter any letter : ")
body_part = input("Enter any body part : ")
print("An inspector from the Department of Health and ", random.choice(noun) , " Services paid a surprise visit to our " , random.choice(adjective) , " school cafeteria.")
print("The lunch special, prepared by our " , random.choice(adjective) , "dietician, was spaghetti and " , random.choice(noun) , " balls with a choice of either a " , random.choice(noun) , " salad or French " , random.choice(plural) , ".")
print("The inspector found the meat-" , random.choice(plural) , " to be overcooked and discovered a live " , random.choice(noun) , " in the fries,causing him to have a " + body_part + " ache.")
print("In response, he threw up all over his " , random.choice(plural) , ".")
print("In his report, the inspector " + adverb + " recommended that the school cafeteria serve only nutritious " , random.choice(plural) , " as well as low-calorie " , random.choice(plural) , " and that all of the saturated " , random.choice(plural) , " be eliminated.")
print("He rated the cafeteria a " + letter + "-minus.")
| [
"random.choice"
] | [((493, 512), 'random.choice', 'random.choice', (['noun'], {}), '(noun)\n', (506, 512), False, 'import random\n'), ((559, 583), 'random.choice', 'random.choice', (['adjective'], {}), '(adjective)\n', (572, 583), False, 'import random\n'), ((654, 678), 'random.choice', 'random.choice', (['adjective'], {}), '(adjective)\n', (667, 678), False, 'import random\n'), ((715, 734), 'random.choice', 'random.choice', (['noun'], {}), '(noun)\n', (728, 734), False, 'import random\n'), ((775, 794), 'random.choice', 'random.choice', (['noun'], {}), '(noun)\n', (788, 794), False, 'import random\n'), ((819, 840), 'random.choice', 'random.choice', (['plural'], {}), '(plural)\n', (832, 840), False, 'import random\n'), ((888, 909), 'random.choice', 'random.choice', (['plural'], {}), '(plural)\n', (901, 909), False, 'import random\n'), ((957, 976), 'random.choice', 'random.choice', (['noun'], {}), '(noun)\n', (970, 976), False, 'import random\n'), ((1091, 1112), 'random.choice', 'random.choice', (['plural'], {}), '(plural)\n', (1104, 1112), False, 'import random\n'), ((1235, 1256), 'random.choice', 'random.choice', (['plural'], {}), '(plural)\n', (1248, 1256), False, 'import random\n'), ((1288, 1309), 'random.choice', 'random.choice', (['plural'], {}), '(plural)\n', (1301, 1309), False, 'import random\n'), ((1348, 1369), 'random.choice', 'random.choice', (['plural'], {}), '(plural)\n', (1361, 1369), False, 'import random\n')] |
import numpy
g = open('/home/srallaba/mgc/transposed/arctic_a0404.mgc','w')
x = numpy.loadtxt('/home/srallaba/mgc_spaces/arctic_a0404.mgc')
numpy.savetxt(g, numpy.transpose(x))
g.close()
| [
"numpy.loadtxt",
"numpy.transpose"
] | [((82, 141), 'numpy.loadtxt', 'numpy.loadtxt', (['"""/home/srallaba/mgc_spaces/arctic_a0404.mgc"""'], {}), "('/home/srallaba/mgc_spaces/arctic_a0404.mgc')\n", (95, 141), False, 'import numpy\n'), ((159, 177), 'numpy.transpose', 'numpy.transpose', (['x'], {}), '(x)\n', (174, 177), False, 'import numpy\n')] |
from rest_framework.response import Response
from rest_framework.views import APIView
from django_redis import get_redis_connection
from goods.models import SKU
from decimal import Decimal
from rest_framework.generics import CreateAPIView,ListAPIView
from rest_framework.mixins import ListModelMixin
from orders.serializers import OrderShowSerializer, OrderSaveSerializer, OrderListSerializer, CommentSerializers, \
CommentSaveSerializers, CommentShowSerializers
from users.models import User
from orders.models import OrderInfo,OrderGoods
from orders.utils import PageNum
from rest_framework.filters import OrderingFilter
# 展示订单信息
class OrdersShowView(APIView):
def get(self, request):
# 获取用户对象
user = request.user
# 建立redis连接
conn = get_redis_connection('cart')
# 获取hash数据sku_id ,count
sku_id_count = conn.hgetall('cart_%s' %user.id) # {10:1}
# 将byte类型数据转为整形
cart = {}
for sku_id, count in sku_id_count.items():
cart[int(sku_id)] = int(count)
# 获取集合数据
sku_ids = conn.smembers('cart_selected_%s' %user.id)
# 查询所有选中状态的数据对象
skus = SKU.objects.filter(id__in=sku_ids)
# 商品对象添加count属性(sku表中没有count字段,要手动添加属性)
for sku in skus:
sku.count = cart[sku.id]
# 生成运费
freight = Decimal(10.00)
# 序列化返回商品对象
ser = OrderShowSerializer({'freight': freight, 'skus': skus})
return Response(ser.data)
# 保存订单信息
class OrderSaveView(ListModelMixin, CreateAPIView):
serializer_class = OrderSaveSerializer
# 订单列表数据获取
class OrderListView(ListAPIView):
pagination_class = PageNum
serializer_class = OrderListSerializer
def get_queryset(self):
user = self.request.user
order = OrderInfo.objects.filter(user = user)
return order
# 评论-获取商品信息
class OrderComment(ListAPIView):
serializer_class = CommentSerializers
def get_queryset(self):
order_id = self.kwargs['order_id']
skus = OrderGoods.objects.filter(order_id = order_id, is_commented=False)
return skus
# 保存评论
class SaveSkuComment(CreateAPIView):
serializer_class = CommentSaveSerializers
# 商品详情中的评论展示
class ShowComment(ListAPIView):
serializer_class = CommentShowSerializers
def get_queryset(self):
# 从kwargs中获取sku_id
sku_id = self.kwargs['sku_id']
# 获取商品信息
orders = OrderGoods.objects.filter(sku_id=sku_id, is_commented = True)
for sku in orders:
skuinfo = OrderInfo.objects.get(order_id=sku.order_id)
user = User.objects.get(id = skuinfo.user_id)
# 获取用户名,判断是否匿名
sku.username = user.username
if sku.is_anonymous == True:
sku.username = '****'
return orders
| [
"django_redis.get_redis_connection",
"orders.serializers.OrderShowSerializer",
"goods.models.SKU.objects.filter",
"orders.models.OrderGoods.objects.filter",
"orders.models.OrderInfo.objects.get",
"orders.models.OrderInfo.objects.filter",
"rest_framework.response.Response",
"users.models.User.objects.get",
"decimal.Decimal"
] | [((777, 805), 'django_redis.get_redis_connection', 'get_redis_connection', (['"""cart"""'], {}), "('cart')\n", (797, 805), False, 'from django_redis import get_redis_connection\n'), ((1156, 1190), 'goods.models.SKU.objects.filter', 'SKU.objects.filter', ([], {'id__in': 'sku_ids'}), '(id__in=sku_ids)\n', (1174, 1190), False, 'from goods.models import SKU\n'), ((1334, 1347), 'decimal.Decimal', 'Decimal', (['(10.0)'], {}), '(10.0)\n', (1341, 1347), False, 'from decimal import Decimal\n'), ((1383, 1438), 'orders.serializers.OrderShowSerializer', 'OrderShowSerializer', (["{'freight': freight, 'skus': skus}"], {}), "({'freight': freight, 'skus': skus})\n", (1402, 1438), False, 'from orders.serializers import OrderShowSerializer, OrderSaveSerializer, OrderListSerializer, CommentSerializers, CommentSaveSerializers, CommentShowSerializers\n'), ((1454, 1472), 'rest_framework.response.Response', 'Response', (['ser.data'], {}), '(ser.data)\n', (1462, 1472), False, 'from rest_framework.response import Response\n'), ((1778, 1813), 'orders.models.OrderInfo.objects.filter', 'OrderInfo.objects.filter', ([], {'user': 'user'}), '(user=user)\n', (1802, 1813), False, 'from orders.models import OrderInfo, OrderGoods\n'), ((2012, 2076), 'orders.models.OrderGoods.objects.filter', 'OrderGoods.objects.filter', ([], {'order_id': 'order_id', 'is_commented': '(False)'}), '(order_id=order_id, is_commented=False)\n', (2037, 2076), False, 'from orders.models import OrderInfo, OrderGoods\n'), ((2410, 2469), 'orders.models.OrderGoods.objects.filter', 'OrderGoods.objects.filter', ([], {'sku_id': 'sku_id', 'is_commented': '(True)'}), '(sku_id=sku_id, is_commented=True)\n', (2435, 2469), False, 'from orders.models import OrderInfo, OrderGoods\n'), ((2521, 2565), 'orders.models.OrderInfo.objects.get', 'OrderInfo.objects.get', ([], {'order_id': 'sku.order_id'}), '(order_id=sku.order_id)\n', (2542, 2565), False, 'from orders.models import OrderInfo, OrderGoods\n'), ((2585, 2621), 'users.models.User.objects.get', 'User.objects.get', ([], {'id': 'skuinfo.user_id'}), '(id=skuinfo.user_id)\n', (2601, 2621), False, 'from users.models import User\n')] |
import argparse
import errno
import logging
import os
import platform
import signal
import sys
from collections import OrderedDict
from contextlib import closing
from distutils.version import StrictVersion
from functools import partial
from gettext import gettext
from itertools import chain
from pathlib import Path
from time import sleep
from typing import List
import requests
from socks import __version__ as socks_version
from websocket import __version__ as websocket_version
import streamlink.logger as logger
from streamlink import NoPluginError, PluginError, StreamError, Streamlink, __version__ as streamlink_version
from streamlink.cache import Cache
from streamlink.exceptions import FatalPluginError
from streamlink.plugin import Plugin, PluginOptions
from streamlink.stream import StreamIO, StreamProcess
from streamlink.utils.named_pipe import NamedPipe
from streamlink_cli.argparser import build_parser
from streamlink_cli.compat import DeprecatedPath, is_win32, stdout
from streamlink_cli.console import ConsoleOutput, ConsoleUserInputRequester
from streamlink_cli.constants import CONFIG_FILES, DEFAULT_STREAM_METADATA, LOG_DIR, PLUGIN_DIRS, STREAM_SYNONYMS
from streamlink_cli.output import FileOutput, Output, PlayerOutput
from streamlink_cli.utils import Formatter, HTTPServer, datetime, ignored, progress, stream_to_url
ACCEPTABLE_ERRNO = (errno.EPIPE, errno.EINVAL, errno.ECONNRESET)
try:
ACCEPTABLE_ERRNO += (errno.WSAECONNABORTED,)
except AttributeError:
pass # Not windows
QUIET_OPTIONS = ("json", "stream_url", "subprocess_cmdline", "quiet")
args = None
console: ConsoleOutput = None
output: Output = None
plugin: Plugin = None
stream_fd: StreamIO = None
streamlink: Streamlink = None
log = logging.getLogger("streamlink.cli")
def get_formatter(plugin: Plugin):
return Formatter(
{
"url": lambda: args.url,
"author": lambda: plugin.get_author(),
"category": lambda: plugin.get_category(),
"game": lambda: plugin.get_category(),
"title": lambda: plugin.get_title(),
"time": lambda: datetime.now()
},
{
"time": lambda dt, fmt: dt.strftime(fmt)
}
)
def check_file_output(filename, force):
"""Checks if file already exists and ask the user if it should
be overwritten if it does."""
log.debug("Checking file output")
if os.path.isfile(filename) and not force:
if sys.stdin.isatty():
answer = console.ask(f"File {filename} already exists! Overwrite it? [y/N] ")
if answer.lower() != "y":
sys.exit()
else:
log.error(f"File {filename} already exists, use --force to overwrite it.")
sys.exit()
return FileOutput(filename)
def create_output(formatter: Formatter):
"""Decides where to write the stream.
Depending on arguments it can be one of these:
- The stdout pipe
- A subprocess' stdin pipe
- A named pipe that the subprocess reads from
- A regular file
"""
if (args.output or args.stdout) and (args.record or args.record_and_pipe):
console.exit("Cannot use record options with other file output options.")
if args.output:
if args.output == "-":
out = FileOutput(fd=stdout)
else:
out = check_file_output(formatter.filename(args.output, args.fs_safe_rules), args.force)
elif args.stdout:
out = FileOutput(fd=stdout)
elif args.record_and_pipe:
record = check_file_output(formatter.filename(args.record_and_pipe, args.fs_safe_rules), args.force)
out = FileOutput(fd=stdout, record=record)
else:
http = namedpipe = record = None
if not args.player:
console.exit("The default player (VLC) does not seem to be "
"installed. You must specify the path to a player "
"executable with --player.")
if args.player_fifo:
try:
namedpipe = NamedPipe()
except OSError as err:
console.exit(f"Failed to create pipe: {err}")
elif args.player_http:
http = create_http_server()
if args.record:
record = check_file_output(formatter.filename(args.record, args.fs_safe_rules), args.force)
log.info(f"Starting player: {args.player}")
out = PlayerOutput(
args.player,
args=args.player_args,
quiet=not args.verbose_player,
kill=not args.player_no_close,
namedpipe=namedpipe,
http=http,
record=record,
title=formatter.title(args.title, defaults=DEFAULT_STREAM_METADATA) if args.title else args.url
)
return out
def create_http_server(*_args, **_kwargs):
"""Creates a HTTP server listening on a given host and port.
If host is empty, listen on all available interfaces, and if port is 0,
listen on a random high port.
"""
try:
http = HTTPServer()
http.bind(*_args, **_kwargs)
except OSError as err:
console.exit(f"Failed to create HTTP server: {err}")
return http
def iter_http_requests(server, player):
"""Repeatedly accept HTTP connections on a server.
Forever if the serving externally, or while a player is running if it is not
empty.
"""
while not player or player.running:
try:
yield server.open(timeout=2.5)
except OSError:
continue
def output_stream_http(plugin, initial_streams, formatter: Formatter, external=False, port=0):
"""Continuously output the stream over HTTP."""
global output
if not external:
if not args.player:
console.exit("The default player (VLC) does not seem to be "
"installed. You must specify the path to a player "
"executable with --player.")
server = create_http_server()
player = output = PlayerOutput(
args.player,
args=args.player_args,
filename=server.url,
quiet=not args.verbose_player,
title=formatter.title(args.title, defaults=DEFAULT_STREAM_METADATA) if args.title else args.url
)
try:
log.info(f"Starting player: {args.player}")
if player:
player.open()
except OSError as err:
console.exit(f"Failed to start player: {args.player} ({err})")
else:
server = create_http_server(host=None, port=port)
player = None
log.info("Starting server, access with one of:")
for url in server.urls:
log.info(" " + url)
for req in iter_http_requests(server, player):
user_agent = req.headers.get("User-Agent") or "unknown player"
log.info(f"Got HTTP request from {user_agent}")
stream_fd = prebuffer = None
while not stream_fd and (not player or player.running):
try:
streams = initial_streams or fetch_streams(plugin)
initial_streams = None
for stream_name in (resolve_stream_name(streams, s) for s in args.stream):
if stream_name in streams:
stream = streams[stream_name]
break
else:
log.info("Stream not available, will re-fetch streams in 10 sec")
sleep(10)
continue
except PluginError as err:
log.error(f"Unable to fetch new streams: {err}")
continue
try:
log.info(f"Opening stream: {stream_name} ({type(stream).shortname()})")
stream_fd, prebuffer = open_stream(stream)
except StreamError as err:
log.error(err)
if stream_fd and prebuffer:
log.debug("Writing stream to player")
read_stream(stream_fd, server, prebuffer, formatter)
server.close(True)
player.close()
server.close()
def output_stream_passthrough(stream, formatter: Formatter):
"""Prepares a filename to be passed to the player."""
global output
filename = f'"{stream_to_url(stream)}"'
output = PlayerOutput(
args.player,
args=args.player_args,
filename=filename,
call=True,
quiet=not args.verbose_player,
title=formatter.title(args.title, defaults=DEFAULT_STREAM_METADATA) if args.title else args.url
)
try:
log.info(f"Starting player: {args.player}")
output.open()
except OSError as err:
console.exit(f"Failed to start player: {args.player} ({err})")
return False
return True
def open_stream(stream):
"""Opens a stream and reads 8192 bytes from it.
This is useful to check if a stream actually has data
before opening the output.
"""
global stream_fd
# Attempts to open the stream
try:
stream_fd = stream.open()
except StreamError as err:
raise StreamError(f"Could not open stream: {err}")
# Read 8192 bytes before proceeding to check for errors.
# This is to avoid opening the output unnecessarily.
try:
log.debug("Pre-buffering 8192 bytes")
prebuffer = stream_fd.read(8192)
except OSError as err:
stream_fd.close()
raise StreamError(f"Failed to read data from stream: {err}")
if not prebuffer:
stream_fd.close()
raise StreamError("No data returned from stream")
return stream_fd, prebuffer
def output_stream(stream, formatter: Formatter):
"""Open stream, create output and finally write the stream to output."""
global output
success_open = False
for i in range(args.retry_open):
try:
stream_fd, prebuffer = open_stream(stream)
success_open = True
break
except StreamError as err:
log.error(f"Try {i + 1}/{args.retry_open}: Could not open stream {stream} ({err})")
if not success_open:
console.exit(f"Could not open stream {stream}, tried {args.retry_open} times, exiting")
output = create_output(formatter)
try:
output.open()
except OSError as err:
if isinstance(output, PlayerOutput):
console.exit(f"Failed to start player: {args.player} ({err})")
else:
console.exit(f"Failed to open output: {output.filename} ({err})")
with closing(output):
log.debug("Writing stream to output")
read_stream(stream_fd, output, prebuffer, formatter)
return True
def read_stream(stream, output, prebuffer, formatter: Formatter, chunk_size=8192):
"""Reads data from stream and then writes it to the output."""
is_player = isinstance(output, PlayerOutput)
is_http = isinstance(output, HTTPServer)
is_fifo = is_player and output.namedpipe
show_progress = (
isinstance(output, FileOutput)
and output.fd is not stdout
and (sys.stdout.isatty() or args.force_progress)
)
show_record_progress = (
hasattr(output, "record")
and isinstance(output.record, FileOutput)
and output.record.fd is not stdout
and (sys.stdout.isatty() or args.force_progress)
)
stream_iterator = chain(
[prebuffer],
iter(partial(stream.read, chunk_size), b"")
)
if show_progress:
stream_iterator = progress(
stream_iterator,
prefix=os.path.basename(output.filename)
)
elif show_record_progress:
stream_iterator = progress(
stream_iterator,
prefix=os.path.basename(output.record.filename)
)
try:
for data in stream_iterator:
# We need to check if the player process still exists when
# using named pipes on Windows since the named pipe is not
# automatically closed by the player.
if is_win32 and is_fifo:
output.player.poll()
if output.player.returncode is not None:
log.info("Player closed")
break
try:
output.write(data)
except OSError as err:
if is_player and err.errno in ACCEPTABLE_ERRNO:
log.info("Player closed")
elif is_http and err.errno in ACCEPTABLE_ERRNO:
log.info("HTTP connection closed")
else:
console.exit(f"Error when writing to output: {err}, exiting")
break
except OSError as err:
console.exit(f"Error when reading from stream: {err}, exiting")
finally:
stream.close()
log.info("Stream ended")
def handle_stream(plugin, streams, stream_name):
"""Decides what to do with the selected stream.
Depending on arguments it can be one of these:
- Output internal command-line
- Output JSON represenation
- Continuously output the stream over HTTP
- Output stream data to selected output
"""
stream_name = resolve_stream_name(streams, stream_name)
stream = streams[stream_name]
# Print internal command-line if this stream
# uses a subprocess.
if args.subprocess_cmdline:
if isinstance(stream, StreamProcess):
try:
cmdline = stream.cmdline()
except StreamError as err:
console.exit(err)
console.msg(cmdline)
else:
console.exit("The stream specified cannot be translated to a command")
# Print JSON representation of the stream
elif args.json:
console.msg_json(
stream,
metadata=plugin.get_metadata()
)
elif args.stream_url:
try:
console.msg(stream.to_url())
except TypeError:
console.exit("The stream specified cannot be translated to a URL")
# Output the stream
else:
# Find any streams with a '_alt' suffix and attempt
# to use these in case the main stream is not usable.
alt_streams = list(filter(lambda k: stream_name + "_alt" in k,
sorted(streams.keys())))
file_output = args.output or args.stdout
formatter = get_formatter(plugin)
for stream_name in [stream_name] + alt_streams:
stream = streams[stream_name]
stream_type = type(stream).shortname()
if stream_type in args.player_passthrough and not file_output:
log.info(f"Opening stream: {stream_name} ({stream_type})")
success = output_stream_passthrough(stream, formatter)
elif args.player_external_http:
return output_stream_http(plugin, streams, formatter, external=True,
port=args.player_external_http_port)
elif args.player_continuous_http and not file_output:
return output_stream_http(plugin, streams, formatter)
else:
log.info(f"Opening stream: {stream_name} ({stream_type})")
success = output_stream(stream, formatter)
if success:
break
def fetch_streams(plugin):
"""Fetches streams using correct parameters."""
return plugin.streams(stream_types=args.stream_types,
sorting_excludes=args.stream_sorting_excludes)
def fetch_streams_with_retry(plugin, interval, count):
"""Attempts to fetch streams repeatedly
until some are returned or limit hit."""
try:
streams = fetch_streams(plugin)
except PluginError as err:
log.error(err)
streams = None
if not streams:
log.info(f"Waiting for streams, retrying every {interval} second(s)")
attempts = 0
while not streams:
sleep(interval)
try:
streams = fetch_streams(plugin)
except FatalPluginError:
raise
except PluginError as err:
log.error(err)
if count > 0:
attempts += 1
if attempts >= count:
break
return streams
def resolve_stream_name(streams, stream_name):
"""Returns the real stream name of a synonym."""
if stream_name in STREAM_SYNONYMS and stream_name in streams:
for name, stream in streams.items():
if stream is streams[stream_name] and name not in STREAM_SYNONYMS:
return name
return stream_name
def format_valid_streams(plugin, streams):
"""Formats a dict of streams.
Filters out synonyms and displays them next to
the stream they point to.
Streams are sorted according to their quality
(based on plugin.stream_weight).
"""
delimiter = ", "
validstreams = []
for name, stream in sorted(streams.items(),
key=lambda stream: plugin.stream_weight(stream[0])):
if name in STREAM_SYNONYMS:
continue
def synonymfilter(n):
return stream is streams[n] and n is not name
synonyms = list(filter(synonymfilter, streams.keys()))
if len(synonyms) > 0:
joined = delimiter.join(synonyms)
name = f"{name} ({joined})"
validstreams.append(name)
return delimiter.join(validstreams)
def handle_url():
"""The URL handler.
Attempts to resolve the URL to a plugin and then attempts
to fetch a list of available streams.
Proceeds to handle stream if user specified a valid one,
otherwise output list of valid streams.
"""
try:
plugin = streamlink.resolve_url(args.url)
setup_plugin_options(streamlink, plugin)
log.info(f"Found matching plugin {plugin.module} for URL {args.url}")
if args.retry_max or args.retry_streams:
retry_streams = 1
retry_max = 0
if args.retry_streams:
retry_streams = args.retry_streams
if args.retry_max:
retry_max = args.retry_max
streams = fetch_streams_with_retry(plugin, retry_streams,
retry_max)
else:
streams = fetch_streams(plugin)
except NoPluginError:
console.exit(f"No plugin can handle URL: {args.url}")
except PluginError as err:
console.exit(err)
if not streams:
console.exit(f"No playable streams found on this URL: {args.url}")
if args.default_stream and not args.stream and not args.json:
args.stream = args.default_stream
if args.stream:
validstreams = format_valid_streams(plugin, streams)
for stream_name in args.stream:
if stream_name in streams:
log.info(f"Available streams: {validstreams}")
handle_stream(plugin, streams, stream_name)
return
err = f"The specified stream(s) '{', '.join(args.stream)}' could not be found"
if args.json:
console.msg_json(
plugin=plugin.module,
metadata=plugin.get_metadata(),
streams=streams,
error=err
)
else:
console.exit(f"{err}.\n Available streams: {validstreams}")
elif args.json:
console.msg_json(
plugin=plugin.module,
metadata=plugin.get_metadata(),
streams=streams
)
elif args.stream_url:
try:
console.msg(streams[list(streams)[-1]].to_manifest_url())
except TypeError:
console.exit("The stream specified cannot be translated to a URL")
else:
validstreams = format_valid_streams(plugin, streams)
console.msg(f"Available streams: {validstreams}")
def print_plugins():
"""Outputs a list of all plugins Streamlink has loaded."""
pluginlist = list(streamlink.get_plugins().keys())
pluginlist_formatted = ", ".join(sorted(pluginlist))
if args.json:
console.msg_json(pluginlist)
else:
console.msg(f"Loaded plugins: {pluginlist_formatted}")
def load_plugins(dirs: List[Path], showwarning: bool = True):
"""Attempts to load plugins from a list of directories."""
for directory in dirs:
if directory.is_dir():
success = streamlink.load_plugins(str(directory))
if success and type(directory) is DeprecatedPath:
log.info(f"Loaded plugins from deprecated path, see CLI docs for how to migrate: {directory}")
elif showwarning:
log.warning(f"Plugin path {directory} does not exist or is not a directory!")
def setup_args(parser: argparse.ArgumentParser, config_files: List[Path] = None, ignore_unknown: bool = False):
"""Parses arguments."""
global args
arglist = sys.argv[1:]
# Load arguments from config files
configs = [f"@{config_file}" for config_file in config_files or []]
args, unknown = parser.parse_known_args(configs + arglist)
if unknown and not ignore_unknown:
msg = gettext("unrecognized arguments: %s")
parser.error(msg % " ".join(unknown))
# Force lowercase to allow case-insensitive lookup
if args.stream:
args.stream = [stream.lower() for stream in args.stream]
if not args.url and args.url_param:
args.url = args.url_param
def setup_config_args(parser, ignore_unknown=False):
config_files = []
if args.config:
# We want the config specified last to get highest priority
for config_file in map(lambda path: Path(path).expanduser(), reversed(args.config)):
if config_file.is_file():
config_files.append(config_file)
else:
# Only load first available default config
for config_file in filter(lambda path: path.is_file(), CONFIG_FILES):
if type(config_file) is DeprecatedPath:
log.info(f"Loaded config from deprecated path, see CLI docs for how to migrate: {config_file}")
config_files.append(config_file)
break
if streamlink and args.url:
# Only load first available plugin config
with ignored(NoPluginError):
plugin = streamlink.resolve_url(args.url)
for config_file in CONFIG_FILES:
config_file = config_file.with_name(f"{config_file.name}.{plugin.module}")
if not config_file.is_file():
continue
if type(config_file) is DeprecatedPath:
log.info(f"Loaded plugin config from deprecated path, see CLI docs for how to migrate: {config_file}")
config_files.append(config_file)
break
if config_files:
setup_args(parser, config_files, ignore_unknown=ignore_unknown)
def setup_signals():
# Handle SIGTERM just like SIGINT
signal.signal(signal.SIGTERM, signal.default_int_handler)
def setup_http_session():
"""Sets the global HTTP settings, such as proxy and headers."""
if args.http_proxy:
streamlink.set_option("http-proxy", args.http_proxy)
if args.https_proxy:
streamlink.set_option("https-proxy", args.https_proxy)
if args.http_cookie:
streamlink.set_option("http-cookies", dict(args.http_cookie))
if args.http_header:
streamlink.set_option("http-headers", dict(args.http_header))
if args.http_query_param:
streamlink.set_option("http-query-params", dict(args.http_query_param))
if args.http_ignore_env:
streamlink.set_option("http-trust-env", False)
if args.http_no_ssl_verify:
streamlink.set_option("http-ssl-verify", False)
if args.http_disable_dh:
streamlink.set_option("http-disable-dh", True)
if args.http_ssl_cert:
streamlink.set_option("http-ssl-cert", args.http_ssl_cert)
if args.http_ssl_cert_crt_key:
streamlink.set_option("http-ssl-cert", tuple(args.http_ssl_cert_crt_key))
if args.http_timeout:
streamlink.set_option("http-timeout", args.http_timeout)
def setup_plugins(extra_plugin_dir=None):
"""Loads any additional plugins."""
load_plugins(PLUGIN_DIRS, showwarning=False)
if extra_plugin_dir:
load_plugins([Path(path).expanduser() for path in extra_plugin_dir])
def setup_streamlink():
"""Creates the Streamlink session."""
global streamlink
streamlink = Streamlink({"user-input-requester": ConsoleUserInputRequester(console)})
def setup_options():
"""Sets Streamlink options."""
if args.interface:
streamlink.set_option("interface", args.interface)
if args.ipv4:
streamlink.set_option("ipv4", args.ipv4)
if args.ipv6:
streamlink.set_option("ipv6", args.ipv6)
if args.ringbuffer_size:
streamlink.set_option("ringbuffer-size", args.ringbuffer_size)
if args.mux_subtitles:
streamlink.set_option("mux-subtitles", args.mux_subtitles)
if args.hds_live_edge:
streamlink.set_option("hds-live-edge", args.hds_live_edge)
if args.hls_live_edge:
streamlink.set_option("hls-live-edge", args.hls_live_edge)
if args.hls_playlist_reload_attempts:
streamlink.set_option("hls-playlist-reload-attempts", args.hls_playlist_reload_attempts)
if args.hls_playlist_reload_time:
streamlink.set_option("hls-playlist-reload-time", args.hls_playlist_reload_time)
if args.hls_segment_ignore_names:
streamlink.set_option("hls-segment-ignore-names", args.hls_segment_ignore_names)
if args.hls_segment_key_uri:
streamlink.set_option("hls-segment-key-uri", args.hls_segment_key_uri)
if args.hls_audio_select:
streamlink.set_option("hls-audio-select", args.hls_audio_select)
if args.hls_start_offset:
streamlink.set_option("hls-start-offset", args.hls_start_offset)
if args.hls_duration:
streamlink.set_option("hls-duration", args.hls_duration)
if args.hls_live_restart:
streamlink.set_option("hls-live-restart", args.hls_live_restart)
if args.rtmp_rtmpdump:
streamlink.set_option("rtmp-rtmpdump", args.rtmp_rtmpdump)
elif args.rtmpdump:
streamlink.set_option("rtmp-rtmpdump", args.rtmpdump)
if args.rtmp_proxy:
streamlink.set_option("rtmp-proxy", args.rtmp_proxy)
# deprecated
if args.hds_segment_attempts:
streamlink.set_option("hds-segment-attempts", args.hds_segment_attempts)
if args.hds_segment_threads:
streamlink.set_option("hds-segment-threads", args.hds_segment_threads)
if args.hds_segment_timeout:
streamlink.set_option("hds-segment-timeout", args.hds_segment_timeout)
if args.hds_timeout:
streamlink.set_option("hds-timeout", args.hds_timeout)
if args.hls_segment_attempts:
streamlink.set_option("hls-segment-attempts", args.hls_segment_attempts)
if args.hls_segment_threads:
streamlink.set_option("hls-segment-threads", args.hls_segment_threads)
if args.hls_segment_timeout:
streamlink.set_option("hls-segment-timeout", args.hls_segment_timeout)
if args.hls_timeout:
streamlink.set_option("hls-timeout", args.hls_timeout)
if args.http_stream_timeout:
streamlink.set_option("http-stream-timeout", args.http_stream_timeout)
if args.rtmp_timeout:
streamlink.set_option("rtmp-timeout", args.rtmp_timeout)
# generic stream- arguments take precedence over deprecated stream-type arguments
if args.stream_segment_attempts:
streamlink.set_option("stream-segment-attempts", args.stream_segment_attempts)
if args.stream_segment_threads:
streamlink.set_option("stream-segment-threads", args.stream_segment_threads)
if args.stream_segment_timeout:
streamlink.set_option("stream-segment-timeout", args.stream_segment_timeout)
if args.stream_timeout:
streamlink.set_option("stream-timeout", args.stream_timeout)
if args.ffmpeg_ffmpeg:
streamlink.set_option("ffmpeg-ffmpeg", args.ffmpeg_ffmpeg)
if args.ffmpeg_verbose:
streamlink.set_option("ffmpeg-verbose", args.ffmpeg_verbose)
if args.ffmpeg_verbose_path:
streamlink.set_option("ffmpeg-verbose-path", args.ffmpeg_verbose_path)
if args.ffmpeg_fout:
streamlink.set_option("ffmpeg-fout", args.ffmpeg_fout)
if args.ffmpeg_video_transcode:
streamlink.set_option("ffmpeg-video-transcode", args.ffmpeg_video_transcode)
if args.ffmpeg_audio_transcode:
streamlink.set_option("ffmpeg-audio-transcode", args.ffmpeg_audio_transcode)
if args.ffmpeg_copyts:
streamlink.set_option("ffmpeg-copyts", args.ffmpeg_copyts)
if args.ffmpeg_start_at_zero:
streamlink.set_option("ffmpeg-start-at-zero", args.ffmpeg_start_at_zero)
streamlink.set_option("subprocess-errorlog", args.subprocess_errorlog)
streamlink.set_option("subprocess-errorlog-path", args.subprocess_errorlog_path)
streamlink.set_option("locale", args.locale)
def setup_plugin_args(session, parser):
"""Sets Streamlink plugin options."""
plugin_args = parser.add_argument_group("Plugin options")
for pname, plugin in session.plugins.items():
defaults = {}
group = plugin_args.add_argument_group(pname.capitalize())
for parg in plugin.arguments:
if not parg.is_global:
group.add_argument(parg.argument_name(pname), **parg.options)
defaults[parg.dest] = parg.default
else:
pargdest = parg.dest
for action in parser._actions:
# find matching global argument
if pargdest != action.dest:
continue
defaults[pargdest] = action.default
# add plugin to global argument
plugins = getattr(action, "plugins", [])
plugins.append(pname)
setattr(action, "plugins", plugins)
plugin.options = PluginOptions(defaults)
def setup_plugin_options(session, plugin):
"""Sets Streamlink plugin options."""
pname = plugin.module
required = OrderedDict({})
for parg in plugin.arguments:
if parg.options.get("help") == argparse.SUPPRESS:
continue
value = getattr(args, parg.dest if parg.is_global else parg.namespace_dest(pname))
session.set_plugin_option(pname, parg.dest, value)
if not parg.is_global:
if parg.required:
required[parg.name] = parg
# if the value is set, check to see if any of the required arguments are not set
if parg.required or value:
try:
for rparg in plugin.arguments.requires(parg.name):
required[rparg.name] = rparg
except RuntimeError:
log.error(f"{pname} plugin has a configuration error and the arguments cannot be parsed")
break
if required:
for req in required.values():
if not session.get_plugin_option(pname, req.dest):
prompt = f"{req.prompt or f'Enter {pname} {req.name}'}: "
session.set_plugin_option(
pname,
req.dest,
console.askpass(prompt) if req.sensitive else console.ask(prompt)
)
def log_root_warning():
if hasattr(os, "getuid"):
if os.geteuid() == 0:
log.info("streamlink is running as root! Be careful!")
def log_current_versions():
"""Show current installed versions"""
if not logger.root.isEnabledFor(logging.DEBUG):
return
# macOS
if sys.platform == "darwin":
os_version = f"macOS {platform.mac_ver()[0]}"
# Windows
elif sys.platform == "win32":
os_version = f"{platform.system()} {platform.release()}"
# Linux / other
else:
os_version = platform.platform()
log.debug(f"OS: {os_version}")
log.debug(f"Python: {platform.python_version()}")
log.debug(f"Streamlink: {streamlink_version}")
log.debug(f"Requests({requests.__version__}), "
f"Socks({socks_version}), "
f"Websocket({websocket_version})")
def log_current_arguments(session, parser):
global args
if not logger.root.isEnabledFor(logging.DEBUG):
return
sensitive = set()
for pname, plugin in session.plugins.items():
for parg in plugin.arguments:
if parg.sensitive:
sensitive.add(parg.argument_name(pname))
log.debug("Arguments:")
for action in parser._actions:
if not hasattr(args, action.dest):
continue
value = getattr(args, action.dest)
if action.default != value:
name = next( # pragma: no branch
(option for option in action.option_strings if option.startswith("--")),
action.option_strings[0]
) if action.option_strings else action.dest
log.debug(f" {name}={value if name not in sensitive else '*' * 8}")
def check_version(force=False):
cache = Cache(filename="cli.json")
latest_version = cache.get("latest_version")
if force or not latest_version:
res = requests.get("https://pypi.python.org/pypi/streamlink/json")
data = res.json()
latest_version = data.get("info").get("version")
cache.set("latest_version", latest_version, (60 * 60 * 24))
version_info_printed = cache.get("version_info_printed")
if not force and version_info_printed:
return
installed_version = StrictVersion(streamlink.version)
latest_version = StrictVersion(latest_version)
if latest_version > installed_version:
log.info(f"A new version of Streamlink ({latest_version}) is available!")
cache.set("version_info_printed", True, (60 * 60 * 6))
elif force:
log.info(f"Your Streamlink version ({installed_version}) is up to date!")
if force:
sys.exit()
def setup_logger_and_console(stream=sys.stdout, filename=None, level="info", json=False):
global console
if filename == "-":
filename = LOG_DIR / f"{datetime.now()}.log"
elif filename:
filename = Path(filename).expanduser().resolve()
if filename:
filename.parent.mkdir(parents=True, exist_ok=True)
streamhandler = logger.basicConfig(
stream=stream,
filename=filename,
level=level,
style="{",
format=("[{asctime}]" if level == "trace" else "") + "[{name}][{levelname}] {message}",
datefmt="%H:%M:%S" + (".%f" if level == "trace" else "")
)
console = ConsoleOutput(streamhandler.stream, json)
def main():
error_code = 0
parser = build_parser()
setup_args(parser, ignore_unknown=True)
# call argument set up as early as possible to load args from config files
setup_config_args(parser, ignore_unknown=True)
# Console output should be on stderr if we are outputting
# a stream to stdout.
if args.stdout or args.output == "-" or args.record_and_pipe:
console_out = sys.stderr
else:
console_out = sys.stdout
# We don't want log output when we are printing JSON or a command-line.
silent_log = any(getattr(args, attr) for attr in QUIET_OPTIONS)
log_level = args.loglevel if not silent_log else "none"
log_file = args.logfile if log_level != "none" else None
setup_logger_and_console(console_out, log_file, log_level, args.json)
setup_signals()
setup_streamlink()
# load additional plugins
setup_plugins(args.plugin_dirs)
setup_plugin_args(streamlink, parser)
# call setup args again once the plugin specific args have been added
setup_args(parser)
setup_config_args(parser)
# update the logging level if changed by a plugin specific config
log_level = args.loglevel if not silent_log else "none"
logger.root.setLevel(log_level)
setup_http_session()
log_root_warning()
log_current_versions()
log_current_arguments(streamlink, parser)
if args.version_check or args.auto_version_check:
with ignored(Exception):
check_version(force=args.version_check)
if args.plugins:
print_plugins()
elif args.can_handle_url:
try:
streamlink.resolve_url(args.can_handle_url)
except NoPluginError:
error_code = 1
except KeyboardInterrupt:
error_code = 130
elif args.can_handle_url_no_redirect:
try:
streamlink.resolve_url_no_redirect(args.can_handle_url_no_redirect)
except NoPluginError:
error_code = 1
except KeyboardInterrupt:
error_code = 130
elif args.url:
try:
setup_options()
handle_url()
except KeyboardInterrupt:
# Close output
if output:
output.close()
console.msg("Interrupted! Exiting...")
error_code = 130
finally:
if stream_fd:
try:
log.info("Closing currently open stream...")
stream_fd.close()
except KeyboardInterrupt:
error_code = 130
elif args.help:
parser.print_help()
else:
usage = parser.format_usage()
console.msg(
f"{usage}\n"
f"Use -h/--help to see the available options or read the manual at https://streamlink.github.io"
)
sys.exit(error_code)
def parser_helper():
session = Streamlink()
parser = build_parser()
setup_plugin_args(session, parser)
return parser
| [
"logging.getLogger",
"streamlink.logger.root.setLevel",
"streamlink.utils.named_pipe.NamedPipe",
"streamlink.logger.root.isEnabledFor",
"streamlink.cache.Cache",
"streamlink.Streamlink",
"time.sleep",
"platform.release",
"contextlib.closing",
"sys.exit",
"streamlink_cli.utils.ignored",
"streamlink_cli.utils.datetime.now",
"pathlib.Path",
"streamlink_cli.console.ConsoleOutput",
"platform.platform",
"gettext.gettext",
"platform.system",
"streamlink.logger.basicConfig",
"streamlink_cli.output.FileOutput",
"collections.OrderedDict",
"streamlink_cli.utils.stream_to_url",
"streamlink_cli.argparser.build_parser",
"streamlink.plugin.PluginOptions",
"platform.mac_ver",
"requests.get",
"os.path.isfile",
"sys.stdout.isatty",
"sys.stdin.isatty",
"distutils.version.StrictVersion",
"platform.python_version",
"signal.signal",
"streamlink_cli.utils.HTTPServer",
"streamlink_cli.console.ConsoleUserInputRequester",
"streamlink.StreamError",
"os.geteuid",
"functools.partial",
"os.path.basename"
] | [((1732, 1767), 'logging.getLogger', 'logging.getLogger', (['"""streamlink.cli"""'], {}), "('streamlink.cli')\n", (1749, 1767), False, 'import logging\n'), ((2765, 2785), 'streamlink_cli.output.FileOutput', 'FileOutput', (['filename'], {}), '(filename)\n', (2775, 2785), False, 'from streamlink_cli.output import FileOutput, Output, PlayerOutput\n'), ((22981, 23038), 'signal.signal', 'signal.signal', (['signal.SIGTERM', 'signal.default_int_handler'], {}), '(signal.SIGTERM, signal.default_int_handler)\n', (22994, 23038), False, 'import signal\n'), ((30271, 30286), 'collections.OrderedDict', 'OrderedDict', (['{}'], {}), '({})\n', (30282, 30286), False, 'from collections import OrderedDict\n'), ((33269, 33295), 'streamlink.cache.Cache', 'Cache', ([], {'filename': '"""cli.json"""'}), "(filename='cli.json')\n", (33274, 33295), False, 'from streamlink.cache import Cache\n'), ((33753, 33786), 'distutils.version.StrictVersion', 'StrictVersion', (['streamlink.version'], {}), '(streamlink.version)\n', (33766, 33786), False, 'from distutils.version import StrictVersion\n'), ((33808, 33837), 'distutils.version.StrictVersion', 'StrictVersion', (['latest_version'], {}), '(latest_version)\n', (33821, 33837), False, 'from distutils.version import StrictVersion\n'), ((34522, 34756), 'streamlink.logger.basicConfig', 'logger.basicConfig', ([], {'stream': 'stream', 'filename': 'filename', 'level': 'level', 'style': '"""{"""', 'format': "(('[{asctime}]' if level == 'trace' else '') +\n '[{name}][{levelname}] {message}')", 'datefmt': "('%H:%M:%S' + ('.%f' if level == 'trace' else ''))"}), "(stream=stream, filename=filename, level=level, style='{',\n format=('[{asctime}]' if level == 'trace' else '') +\n '[{name}][{levelname}] {message}', datefmt='%H:%M:%S' + ('.%f' if level ==\n 'trace' else ''))\n", (34540, 34756), True, 'import streamlink.logger as logger\n'), ((34814, 34855), 'streamlink_cli.console.ConsoleOutput', 'ConsoleOutput', (['streamhandler.stream', 'json'], {}), '(streamhandler.stream, json)\n', (34827, 34855), False, 'from streamlink_cli.console import ConsoleOutput, ConsoleUserInputRequester\n'), ((34902, 34916), 'streamlink_cli.argparser.build_parser', 'build_parser', ([], {}), '()\n', (34914, 34916), False, 'from streamlink_cli.argparser import build_parser\n'), ((36078, 36109), 'streamlink.logger.root.setLevel', 'logger.root.setLevel', (['log_level'], {}), '(log_level)\n', (36098, 36109), True, 'import streamlink.logger as logger\n'), ((37685, 37705), 'sys.exit', 'sys.exit', (['error_code'], {}), '(error_code)\n', (37693, 37705), False, 'import sys\n'), ((37743, 37755), 'streamlink.Streamlink', 'Streamlink', ([], {}), '()\n', (37753, 37755), False, 'from streamlink import NoPluginError, PluginError, StreamError, Streamlink, __version__ as streamlink_version\n'), ((37769, 37783), 'streamlink_cli.argparser.build_parser', 'build_parser', ([], {}), '()\n', (37781, 37783), False, 'from streamlink_cli.argparser import build_parser\n'), ((2403, 2427), 'os.path.isfile', 'os.path.isfile', (['filename'], {}), '(filename)\n', (2417, 2427), False, 'import os\n'), ((2454, 2472), 'sys.stdin.isatty', 'sys.stdin.isatty', ([], {}), '()\n', (2470, 2472), False, 'import sys\n'), ((5045, 5057), 'streamlink_cli.utils.HTTPServer', 'HTTPServer', ([], {}), '()\n', (5055, 5057), False, 'from streamlink_cli.utils import Formatter, HTTPServer, datetime, ignored, progress, stream_to_url\n'), ((9557, 9600), 'streamlink.StreamError', 'StreamError', (['"""No data returned from stream"""'], {}), "('No data returned from stream')\n", (9568, 9600), False, 'from streamlink import NoPluginError, PluginError, StreamError, Streamlink, __version__ as streamlink_version\n'), ((10534, 10549), 'contextlib.closing', 'closing', (['output'], {}), '(output)\n', (10541, 10549), False, 'from contextlib import closing\n'), ((15946, 15961), 'time.sleep', 'sleep', (['interval'], {}), '(interval)\n', (15951, 15961), False, 'from time import sleep\n'), ((21175, 21212), 'gettext.gettext', 'gettext', (['"""unrecognized arguments: %s"""'], {}), "('unrecognized arguments: %s')\n", (21182, 21212), False, 'from gettext import gettext\n'), ((30119, 30142), 'streamlink.plugin.PluginOptions', 'PluginOptions', (['defaults'], {}), '(defaults)\n', (30132, 30142), False, 'from streamlink.plugin import Plugin, PluginOptions\n'), ((31740, 31779), 'streamlink.logger.root.isEnabledFor', 'logger.root.isEnabledFor', (['logging.DEBUG'], {}), '(logging.DEBUG)\n', (31764, 31779), True, 'import streamlink.logger as logger\n'), ((32449, 32488), 'streamlink.logger.root.isEnabledFor', 'logger.root.isEnabledFor', (['logging.DEBUG'], {}), '(logging.DEBUG)\n', (32473, 32488), True, 'import streamlink.logger as logger\n'), ((33396, 33456), 'requests.get', 'requests.get', (['"""https://pypi.python.org/pypi/streamlink/json"""'], {}), "('https://pypi.python.org/pypi/streamlink/json')\n", (33408, 33456), False, 'import requests\n'), ((34148, 34158), 'sys.exit', 'sys.exit', ([], {}), '()\n', (34156, 34158), False, 'import sys\n'), ((2742, 2752), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2750, 2752), False, 'import sys\n'), ((3292, 3313), 'streamlink_cli.output.FileOutput', 'FileOutput', ([], {'fd': 'stdout'}), '(fd=stdout)\n', (3302, 3313), False, 'from streamlink_cli.output import FileOutput, Output, PlayerOutput\n'), ((3465, 3486), 'streamlink_cli.output.FileOutput', 'FileOutput', ([], {'fd': 'stdout'}), '(fd=stdout)\n', (3475, 3486), False, 'from streamlink_cli.output import FileOutput, Output, PlayerOutput\n'), ((8271, 8292), 'streamlink_cli.utils.stream_to_url', 'stream_to_url', (['stream'], {}), '(stream)\n', (8284, 8292), False, 'from streamlink_cli.utils import Formatter, HTTPServer, datetime, ignored, progress, stream_to_url\n'), ((9112, 9156), 'streamlink.StreamError', 'StreamError', (['f"""Could not open stream: {err}"""'], {}), "(f'Could not open stream: {err}')\n", (9123, 9156), False, 'from streamlink import NoPluginError, PluginError, StreamError, Streamlink, __version__ as streamlink_version\n'), ((9439, 9493), 'streamlink.StreamError', 'StreamError', (['f"""Failed to read data from stream: {err}"""'], {}), "(f'Failed to read data from stream: {err}')\n", (9450, 9493), False, 'from streamlink import NoPluginError, PluginError, StreamError, Streamlink, __version__ as streamlink_version\n'), ((11076, 11095), 'sys.stdout.isatty', 'sys.stdout.isatty', ([], {}), '()\n', (11093, 11095), False, 'import sys\n'), ((11295, 11314), 'sys.stdout.isatty', 'sys.stdout.isatty', ([], {}), '()\n', (11312, 11314), False, 'import sys\n'), ((11409, 11441), 'functools.partial', 'partial', (['stream.read', 'chunk_size'], {}), '(stream.read, chunk_size)\n', (11416, 11441), False, 'from functools import partial\n'), ((22283, 22305), 'streamlink_cli.utils.ignored', 'ignored', (['NoPluginError'], {}), '(NoPluginError)\n', (22290, 22305), False, 'from streamlink_cli.utils import Formatter, HTTPServer, datetime, ignored, progress, stream_to_url\n'), ((24556, 24590), 'streamlink_cli.console.ConsoleUserInputRequester', 'ConsoleUserInputRequester', (['console'], {}), '(console)\n', (24581, 24590), False, 'from streamlink_cli.console import ConsoleOutput, ConsoleUserInputRequester\n'), ((31571, 31583), 'os.geteuid', 'os.geteuid', ([], {}), '()\n', (31581, 31583), False, 'import os\n'), ((32060, 32079), 'platform.platform', 'platform.platform', ([], {}), '()\n', (32077, 32079), False, 'import platform\n'), ((36301, 36319), 'streamlink_cli.utils.ignored', 'ignored', (['Exception'], {}), '(Exception)\n', (36308, 36319), False, 'from streamlink_cli.utils import Formatter, HTTPServer, datetime, ignored, progress, stream_to_url\n'), ((2108, 2122), 'streamlink_cli.utils.datetime.now', 'datetime.now', ([], {}), '()\n', (2120, 2122), False, 'from streamlink_cli.utils import Formatter, HTTPServer, datetime, ignored, progress, stream_to_url\n'), ((2618, 2628), 'sys.exit', 'sys.exit', ([], {}), '()\n', (2626, 2628), False, 'import sys\n'), ((3641, 3677), 'streamlink_cli.output.FileOutput', 'FileOutput', ([], {'fd': 'stdout', 'record': 'record'}), '(fd=stdout, record=record)\n', (3651, 3677), False, 'from streamlink_cli.output import FileOutput, Output, PlayerOutput\n'), ((11560, 11593), 'os.path.basename', 'os.path.basename', (['output.filename'], {}), '(output.filename)\n', (11576, 11593), False, 'import os\n'), ((32153, 32178), 'platform.python_version', 'platform.python_version', ([], {}), '()\n', (32176, 32178), False, 'import platform\n'), ((7490, 7499), 'time.sleep', 'sleep', (['(10)'], {}), '(10)\n', (7495, 7499), False, 'from time import sleep\n'), ((11719, 11759), 'os.path.basename', 'os.path.basename', (['output.record.filename'], {}), '(output.record.filename)\n', (11735, 11759), False, 'import os\n'), ((31872, 31890), 'platform.mac_ver', 'platform.mac_ver', ([], {}), '()\n', (31888, 31890), False, 'import platform\n'), ((31968, 31985), 'platform.system', 'platform.system', ([], {}), '()\n', (31983, 31985), False, 'import platform\n'), ((31988, 32006), 'platform.release', 'platform.release', ([], {}), '()\n', (32004, 32006), False, 'import platform\n'), ((34327, 34341), 'streamlink_cli.utils.datetime.now', 'datetime.now', ([], {}), '()\n', (34339, 34341), False, 'from streamlink_cli.utils import Formatter, HTTPServer, datetime, ignored, progress, stream_to_url\n'), ((4037, 4048), 'streamlink.utils.named_pipe.NamedPipe', 'NamedPipe', ([], {}), '()\n', (4046, 4048), False, 'from streamlink.utils.named_pipe import NamedPipe\n'), ((21685, 21695), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (21689, 21695), False, 'from pathlib import Path\n'), ((24357, 24367), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (24361, 24367), False, 'from pathlib import Path\n'), ((34386, 34400), 'pathlib.Path', 'Path', (['filename'], {}), '(filename)\n', (34390, 34400), False, 'from pathlib import Path\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.