text
stringlengths 957
885k
|
---|
from numpy import genfromtxt
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
''' load_dataset: Loads and merges cleaned-up data
@param threshold: emittance threshold below which a data point is considered positive example
Set threshold = -1 for non-binary models
@return tuple of numpy arrays for MPIDs, features, and labels
'''
def load_dataset(filename, threshold=0.2):
# Get data
Y_full = pd.read_csv('emittance_labels.csv')
X_full = None
if filename == "combined":
X_unit_cell = pd.read_csv('unit_cell_data_16.csv')
# X_avg = pd.read_csv('material_average_data.csv')
X_avg = pd.read_csv('material_average_data_plus.csv')
X_full = pd.merge(X_unit_cell, X_avg, on='MPID')
else:
X_full = pd.read_csv(filename)
total = pd.merge(X_full, Y_full, on="MPID")
# Random state below is a seed - change this when we go to run for real
total = np.array(total.sample(frac=1, random_state=229).reset_index(drop=True))
total = np.array([total[i] for i in range(len(total)) if total[i, -1] != float('inf')])
MPIDs = np.array(total[:, 0])
X = np.array(total[:, 1:-1])
# Replace NaN/-1/0's with average col value as needed
nan_locs = np.isnan(X)
X[nan_locs] = -1
# print(len(X[0]))
# print(X)
_, colnum = X.shape
nonexistent = -1
if filename == 'material_average_data_plus.csv':
nonexistent = 0
for col in range(colnum):
adj_col = X[:, col]
mask = adj_col != nonexistent
mean = np.mean(adj_col * mask)
adj_col[adj_col == nonexistent] = mean
X[:, col] = adj_col
# if filename == 'material_average_data.csv' or 'combined':
# scaler = StandardScaler()
# scaler.fit(X[-9:])
# X[-9:] = scaler.transform(X[-9:])
# Scale data
# if filename == 'material_average_data.csv' or 'combined':
# scaler = StandardScaler()
# scaler.fit(X[-8:]) # scale everything except MPID and atomic number
# X = scaler.transform(X)
# if filename == 'material_average_data_plus.csv':
# scaler = StandardScaler()
# scaler.fit(X[-16:]) # scale everything except MPID
# X = scaler.transform(X)
Y = np.array(total[:, -1])
print(max(Y))
print(min(Y))
test = [x for x in Y if x <= 0.2]
print(len(test))
if threshold != -1:
Y = [1 if y_i <= threshold else 0 for y_i in Y]
return (MPIDs, X, Y)
''' load_dataset: Loads and merges cleaned-up data
@param tup: tuple of MPIs, X's, Y's as returned by load_dataset
@return tuple of numpy arrays for training, validation, and test sets
'''
def split_data(tup, train_split = 0.8, valid_split = 0.1, test_split = 0.1):
MPIDs, X, Y = tup
assert (train_split + valid_split + test_split == 1),"The proportion of data dedicated to train, validation, and test sets does not sum to 1."
training_threshold = train_split
valid_threshold = train_split + valid_split
X_train = X[:int(len(X)*training_threshold)]
Y_train = Y[:int(len(Y)*training_threshold)]
MPIDs_train = MPIDs[:int(len(MPIDs)*training_threshold)]
X_valid = X[int(len(X)*training_threshold):int(len(X)*valid_threshold)]
Y_valid = Y[int(len(Y)*training_threshold):int(len(X)*valid_threshold)]
MPIDs_valid = MPIDs[int(len(MPIDs)*training_threshold):int(len(X)*valid_threshold)]
X_test = X[int(len(X)*valid_threshold):]
Y_test = Y[int(len(Y)*valid_threshold):]
MPIDs_test = MPIDs[int(len(MPIDs)*valid_threshold):]
return (X_train, Y_train, MPIDs_train, X_valid, Y_valid, MPIDs_valid, X_test, Y_test, MPIDs_test)
def accuracy_metric(Y_predictions, Y_actual):
true_positives = 0.0
true_negatives = 0.0
false_positives = 0.0
false_negatives = 0.0
for i, prediction in enumerate(Y_predictions):
if Y_actual[i] == 1 and Y_predictions[i] == 1:
true_positives += 1
if Y_actual[i] == 0 and Y_predictions[i] == 0:
true_negatives += 1
if Y_actual[i] == 1 and Y_predictions[i] == 0:
false_negatives += 1
if Y_actual[i] == 0 and Y_predictions[i] == 1:
false_positives += 1
accuracy = (true_positives + true_negatives) / len(Y_actual)
precision = true_positives / (true_positives + true_negatives)
recall = true_positives / (true_positives + false_negatives)
F1 = 2 * (precision * recall) / (precision + recall)
print("Correctly Predicted Proportion : " + str(accuracy))
print("Precision : " + str(precision))
print("Recall : " + str(recall))
print("F1 : " + str(F1))
def augment_data(X, Y, num_permutations):
atoms = X[:, -64:]
# print(X)
# atoms = X[:, -6:]
XT = atoms.T
m, n = np.shape(XT)[0] // 2, np.shape(XT)[1]
all_new_inputs = None
all_labels = None
for i in range(num_permutations):
perm = XT.reshape(m, -1, n)[np.random.permutation(m)].reshape(-1,n)
new_data = np.concatenate((X[:, :-64], perm.T), axis=1)
# print(new_data)
if i == 0:
all_new_inputs = new_data
all_labels = Y
else:
# print('Concatenating!')
all_new_inputs = np.concatenate((all_new_inputs, new_data), axis=0)
all_labels = np.concatenate((all_labels, Y), axis=0)
return (np.concatenate((X, all_new_inputs), axis=0), np.concatenate((Y, all_labels), axis=0)) |
# Volatility
# Copyright (C) 2007-2013 Volatility Foundation
#
# This file is part of Volatility.
#
# Volatility is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Volatility is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Volatility. If not, see <http://www.gnu.org/licenses/>.
#
import zlib
import volatility.obj as obj
import volatility.plugins.addrspaces.standard as standard
class HPAKVTypes(obj.ProfileModification):
def modification(self, profile):
profile.vtypes.update({
'HPAK_HEADER' : [ 0x20, {
'Magic' : [ 0, ['String', dict(length = 4)]],
}],
'HPAK_SECTION': [ 0xE0, {
'Header' : [ 0, ['String', dict(length = 32)]],
'Compressed' : [ 0x8C, ['unsigned int']],
'Length' : [ 0x98, ['unsigned long long']],
'Offset' : [ 0xA8, ['unsigned long long']],
'NextSection' : [ 0xB0, ['unsigned long long']],
'CompressedSize' : [ 0xB8, ['unsigned long long']],
'Name' : [ 0xD4, ['String', dict(length = 12)]],
}],
})
profile.object_classes.update({'HPAK_HEADER': HPAK_HEADER})
class HPAK_HEADER(obj.CType):
"""A class for B.S. Hairy headers"""
def Sections(self):
## The initial section object
section = obj.Object("HPAK_SECTION",
offset = self.obj_vm.profile.get_obj_size("HPAK_HEADER"),
vm = self.obj_vm)
## Iterate through the sections
while section.is_valid():
yield section
section = section.NextSection.dereference_as("HPAK_SECTION")
class HPAKAddressSpace(standard.FileAddressSpace):
""" This AS supports the HPAK format """
order = 30
def __init__(self, base, config, **kwargs):
## We must have an AS below us
self.as_assert(base, "No base Address Space")
standard.FileAddressSpace.__init__(self, base, config, layered = True, **kwargs)
self.header = obj.Object("HPAK_HEADER", offset = 0, vm = base)
## Check the magic
self.as_assert(self.header.Magic == 'HPAK', "Invalid magic found")
self.physmem = None
## cycle though looking for the PHYSDUMP header
for section in self.header.Sections():
if str(section.Header) == "HPAKSECTHPAK_SECTION_PHYSDUMP":
self.physmem = section
break
self.as_assert(self.physmem is not None, "Cannot find the PHYSDUMP section")
def read(self, addr, length):
return self.base.read(addr + self.physmem.Offset, length)
def zread(self, addr, length):
return self.base.zread(addr + self.physmem.Offset, length)
def is_valid_address(self, addr):
return self.base.is_valid_address(addr + self.physmem.Offset)
def get_header(self):
return self.header
def convert_to_raw(self, outfd):
"""The standard imageinfo plugin won't work on
hpak images so we provide this method. It wraps
the zlib compression if necessary"""
zlibdec = zlib.decompressobj(16 + zlib.MAX_WBITS)
if self.physmem.Compressed == 1:
length = self.physmem.CompressedSize
else:
length = self.physmem.Length
chunk_size = 4096
chunks = length / chunk_size
def get_chunk(addr, size):
data = self.base.read(addr, size)
if self.physmem.Compressed == 1:
data = zlibdec.decompress(data)
return data
for i in range(chunks):
addr = self.physmem.Offset + i * chunk_size
data = get_chunk(addr, chunk_size)
outfd.write(data)
leftover = length % chunk_size
if leftover > 0:
data = get_chunk(addr + chunk_size, leftover)
outfd.write(data)
return True
|
<gh_stars>10-100
#!/usr/bin/env python
# --------------------------------------------------------
# Fast/er/ R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
"""Generate RPN proposals."""
import _init_paths
import numpy as np
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from datasets.factory import get_imdb
from rpn.generate import imdb_proposals
import cPickle
import caffe
import argparse
import pprint
import time, os, sys
import multiprocessing as mp
from easydict import EasyDict
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Test a Fast R-CNN network')
parser.add_argument('--gpu', type=int, nargs='+',
default=[0], help="List of device ids.")
parser.add_argument('--def', dest='prototxt',
help='prototxt file defining the network',
default=None, type=str)
parser.add_argument('--net', dest='caffemodel',
help='model to test',
default=None, type=str)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file', default=None, type=str)
parser.add_argument('--wait', dest='wait',
help='wait until net file exists',
default=True, type=bool)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to test',
default='voc_2007_test', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args
def rpn_generate_single_gpu(prototxt, caffemodel, imdb, rank, gpus, output_dir):
cfg.GPU_ID = gpus[rank]
caffe.set_mode_gpu()
caffe.set_device(cfg.GPU_ID)
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
imdb_boxes = imdb_proposals(net, imdb, rank, len(gpus), output_dir)
if __name__ == '__main__':
args = parse_args()
print('Called with args:')
print(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
# RPN test settings
cfg.TEST.RPN_PRE_NMS_TOP_N = -1
cfg.TEST.RPN_POST_NMS_TOP_N = 300
print('Using config:')
pprint.pprint(cfg)
while not os.path.exists(args.caffemodel) and args.wait:
print('Waiting for {} to exist...'.format(args.caffemodel))
time.sleep(10)
fake_net = EasyDict()
fake_net.name = os.path.splitext(os.path.basename(args.caffemodel))[0]
gpus = args.gpu
imdb = get_imdb(args.imdb_name)
output_dir = get_output_dir(imdb, fake_net)
output_dir = os.path.join(output_dir, "proposals_test")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
procs=[]
for rank in range(len(gpus)):
p = mp.Process(target=rpn_generate_single_gpu,
args=(args.prototxt, args.caffemodel, imdb, rank, gpus, output_dir))
p.daemon = True
p.start()
procs.append(p)
for p in procs:
p.join()
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 <NAME>, <EMAIL>
#
# This module is part of python-sqlparse and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
from sqlparse import sql, tokens as T
from sqlparse.utils import split_unquoted_newlines
class StripWhitespaceFilter(object):
def _stripws(self, tlist):
func_name = '_stripws_{cls}'.format(cls=type(tlist).__name__)
func = getattr(self, func_name.lower(), self._stripws_default)
func(tlist)
@staticmethod
def _stripws_default(tlist):
last_was_ws = False
is_first_char = True
for token in list(tlist.tokens):
if token.is_whitespace:
if last_was_ws or is_first_char:
tlist.tokens.remove(token)
continue # continue to remove multiple ws on first char
else:
token.value = ' '
last_was_ws = token.is_whitespace
is_first_char = False
def _stripws_identifierlist(self, tlist):
# Removes newlines before commas, see issue140
last_nl = None
for token in list(tlist.tokens):
if last_nl and token.ttype is T.Punctuation and token.value == ',':
tlist.tokens.remove(last_nl)
last_nl = token if token.is_whitespace else None
# # Add space after comma.
# next_ = tlist.token_next(token, skip_ws=False)
# if (next_ is not None and not next_.is_whitespace and
# token.ttype is T.Punctuation and token.value == ','):
# tlist.insert_after(token, sql.Token(T.Whitespace, ' '))
return self._stripws_default(tlist)
def _stripws_parenthesis(self, tlist):
while tlist.tokens[1].is_whitespace:
tlist.tokens.pop(1)
while tlist.tokens[-2].is_whitespace:
tlist.tokens.pop(-2)
self._stripws_default(tlist)
def process(self, stmt, depth=0):
[self.process(sgroup, depth + 1) for sgroup in stmt.get_sublists()]
self._stripws(stmt)
if depth == 0 and stmt.tokens and stmt.tokens[-1].is_whitespace:
stmt.tokens.pop(-1)
return stmt
class SpacesAroundOperatorsFilter(object):
@staticmethod
def _process(tlist):
ttypes = (T.Operator, T.Comparison)
tidx, token = tlist.token_next_by(t=ttypes)
while token:
nidx, next_ = tlist.token_next(tidx, skip_ws=False)
if next_ and next_.ttype != T.Whitespace:
tlist.insert_after(tidx, sql.Token(T.Whitespace, ' '))
pidx, prev_ = tlist.token_prev(tidx, skip_ws=False)
if prev_ and prev_.ttype != T.Whitespace:
tlist.insert_before(tidx, sql.Token(T.Whitespace, ' '))
tidx += 1 # has to shift since token inserted before it
# assert tlist.token_index(token) == tidx
tidx, token = tlist.token_next_by(t=ttypes, idx=tidx)
def process(self, stmt):
[self.process(sgroup) for sgroup in stmt.get_sublists()]
SpacesAroundOperatorsFilter._process(stmt)
return stmt
# ---------------------------
# postprocess
class SerializerUnicode(object):
@staticmethod
def process(stmt):
lines = split_unquoted_newlines(stmt)
return '\n'.join(line.rstrip() for line in lines)
|
<reponame>eupston/Deepbeat-beatbox2midi
import numpy as np
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph.opengl as gl
import pyqtgraph as pg
import sys
from opensimplex import OpenSimplex
from PyQt5.QtCore import Qt
from PyQt5 import QtGui
import time
class Terrain(object):
def __init__(self):
"""
Initialize the graphics window and mesh
"""
# setup the view window
self.w = gl.GLViewWidget()
# self.w.setGeometry(0, 110, 1000, 1000)
self.w.setCameraPosition(distance=30, elevation=10)
self.w.setWindowFlags(Qt.FramelessWindowHint)
self.w.setAttribute(Qt.WA_TranslucentBackground, True)
self.w.setSizePolicy(QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Expanding)
# constants and arrays
self.nsteps = 1
self.ypoints = range(-20, 22, self.nsteps)
self.xpoints = range(-20, 22, self.nsteps)
self.nfaces = len(self.ypoints)
self.offset = 0
# perlin noise object
self.tmp = OpenSimplex()
# create the veritices array
verts = np.array([
[
x, y, 1.5 * self.tmp.noise2d(x=n / 5, y=m / 5)
] for n, x in enumerate(self.xpoints) for m, y in enumerate(self.ypoints)
], dtype=np.float32)
# create the faces and colors arrays
faces = []
colors = []
for m in range(self.nfaces - 1):
yoff = m * self.nfaces
for n in range(self.nfaces - 1):
faces.append([n + yoff, yoff + n + self.nfaces, yoff + n + self.nfaces + 1])
faces.append([n + yoff, yoff + n + 1, yoff + n + self.nfaces + 1])
colors.append([0, 0, 0, 0])
colors.append([0, 0, 0, 0])
faces = np.array(faces)
colors = np.array(colors)
# create the mesh item
self.m1 = gl.GLMeshItem(
vertexes=verts,
faces=faces, faceColors=colors,
smooth=False, drawEdges=False,
)
# self.m1.setGLOptions('opaque')
self.m1.setGLOptions('additive')
self.w.addItem(self.m1)
def getwidget(self):
return self.w
def update(self):
"""
update the mesh and shift the noise each time
"""
verts = np.array([
[
x, y, 2.5 * self.tmp.noise2d(x=n / 5 + self.offset, y=m / 5 + self.offset)
] for n, x in enumerate(self.xpoints) for m, y in enumerate(self.ypoints)
], dtype=np.float32)
faces = []
colors = []
for m in range(self.nfaces - 1):
yoff = m * self.nfaces
for n in range(self.nfaces - 1):
faces.append([n + yoff, yoff + n + self.nfaces, yoff + n + self.nfaces + 1])
faces.append([n + yoff, yoff + n + 1, yoff + n + self.nfaces + 1])
colors.append([n / self.nfaces, 1 - n / self.nfaces, m / self.nfaces, 0.7])
colors.append([n / self.nfaces, 1 - n / self.nfaces, m / self.nfaces, 0.8])
faces = np.array(faces, dtype=np.uint32)
colors = np.array(colors, dtype=np.float32)
self.m1.setMeshData(
vertexes=verts, faces=faces, faceColors=colors
)
self.offset -= 0.10
def animate(self):
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.update)
self.timer.start(10)
def stop_animate(self):
self.timer.stop() |
<reponame>OpenSourceEconomics/handout-eckstein-keane-wolpin-models
"""Figures for the handout.
This module creates all figures for the handout. They are all used in the illustrative example.
"""
import colorsys
import os
from pathlib import Path
import matplotlib as mpl
import matplotlib.colors as mc
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import respy as rp
PROJECT_DIR = Path(os.environ["PROJECT_DIR"])
def make_grayscale_cmap(cmap):
"""Return a grayscale version of given colormap.
Parameters:
-----------
cmap: matplotlib.colors.LinearSegmentedColormap
Matplotlib color map (see
https://matplotlib.org/tutorials/colors/colormaps.html for available
color maps).
Returns:
--------
cmap: 'matplotlib.colors.LinearSegmentedColormap
Grayscale version color map of the given non-grayscale color map.
"""
cmap = plt.cm.get_cmap(cmap)
colors = cmap(np.arange(cmap.N))
# Conversion of RGBA to grayscale lum by rgb_weight
# rgb_weight given by http://alienryderflex.com/hsp.html
rgb_weight = [0.299, 0.587, 0.114]
lum = np.sqrt(np.dot(colors[:, :3] ** 2, rgb_weight))
colors[:, :3] = lum[:, np.newaxis]
return cmap.from_list(cmap.name + "_grayscale", colors, cmap.N)
def make_color_lighter(color, amount=0.5):
"""Return a brightened (darkened) color.
Parameters:
-----------
color: matplotlib color string, hex string, RGB tuple
Name of color that will be brightened.
amount: positive float
Amount the color should be brightened (<1) or darkened (>1).
Returns:
--------
_color: matplotlib color string, hex string, RGB tuple
Brightened-up color (same format).
"""
try:
_color = mc.cnames[color]
except Exception:
_color = color
_color = colorsys.rgb_to_hls(*mc.to_rgb(_color))
return colorsys.hls_to_rgb(_color[0], 1 - amount * (1 - _color[1]), _color[2])
def plot_decisions_by_age(df_subset, color="color"):
"""Share of individuals in each occupation at any period (age)."""
fig, ax = plt.subplots()
shares = df_subset.loc[("empirical", slice(10)), labels] * 100
shares.plot.bar(
stacked=True, ax=ax, width=0.8, color=list(color_scheme[color].values())[:-1]
)
ax.set_xlabel("Age", labelpad=20, fontsize=30)
ax.set_xticklabels(np.arange(16, 27, 1), rotation="horizontal")
ax.set_ylabel("Share (in %)", labelpad=20, fontsize=30)
ax.set_ylim(0, 100)
ax.yaxis.get_major_ticks()[0].set_visible(False)
ax.legend(
labels=[label.split("_")[0].capitalize() for label in labels],
loc="lower center",
bbox_to_anchor=(0.5, 1.04),
ncol=5,
prop={"size": 22.5},
)
plt.savefig(f"fig-data-choice-all{color_scheme[color]['extension']}")
def plot_average_wage(df_subset, color="colors"):
"""Average of wages at any period."""
fig, ax = plt.subplots()
for label in ["blue_collar", "white_collar", "military"]:
y = df_subset.loc[("empirical", slice(10)), label].values / 1000
# We do not report wages if less than ten observations.
if label == "military":
y[-1] = np.nan
str_ = label.replace("_collar", "").capitalize()
ax.plot(range(11), y, color=color_scheme[color][label], label=str_)
ax.set_ylim(5, 30)
ax.set_xlabel("Age", labelpad=20, fontsize=30)
ax.legend(prop={"size": 26})
ax.xaxis.set_ticks(range(11))
ax.set_xticklabels(np.arange(16, 27, 1), rotation="horizontal")
ax.yaxis.get_major_ticks()[0].set_visible(False)
ax.set_ylabel("Wage (in $1,000)", labelpad=20, fontsize=30)
fig.savefig(f"fig-data-wage-occupations{color_scheme[color]['extension']}")
def plot_mechanism_subsidy(subsidies, levels, color="color"):
"""Effect tuition subsidy on average final schooling."""
fig, ax = plt.subplots()
ax.fill_between(
subsidies,
levels,
color=color_scheme[color]["blue_collar"],
)
ax.yaxis.get_major_ticks()[0].set_visible(False)
ax.set_ylabel("Average final schooling", labelpad=20, fontsize=30)
ax.set_ylim([10, 16])
ax.xaxis.set_major_formatter(mpl.ticker.StrMethodFormatter("{x:,.0f}"))
ax.set_xlabel("Tuition subsidy", labelpad=20, fontsize=30)
ax.set_xlim([None, 4150])
fig.savefig(f"fig-policy-forecast{color_scheme[color]['extension']}")
def plot_mechanism_time(deltas, levels, color="color"):
"""Effect time preferences on average final schooling."""
fig, ax = plt.subplots()
ax.fill_between(deltas, levels, color=color_scheme[color]["blue_collar"])
ax.yaxis.get_major_ticks()[0].set_visible(False)
ax.set_ylabel("Average final schooling", labelpad=20, fontsize=30)
ax.set_ylim([10, 16])
ax.xaxis.set_major_locator(plt.MaxNLocator(5))
ax.set_xlabel(r"$\delta$", labelpad=20, fontsize=30)
ax.set_xlim([0.9075, 0.9575])
fig.savefig(f"fig-economic-mechanism{color_scheme[color]['extension']}")
def plot_model_fit(df, color="color"):
for label in ["blue_collar", "white_collar", "military", "school", "home", "all"]:
fig, ax = plt.subplots()
if label == "blue_collar":
df_subset = df["probs"]
fname = f"fig-model-fit-choice-blue{color_scheme[color]['extension']}"
elif label == "white_collar":
df_subset = df["probs"]
fname = f"fig-model-fit-choice-white{color_scheme[color]['extension']}"
elif label == "military":
df_subset = df["probs"]
fname = f"fig-model-fit-choice-military{color_scheme[color]['extension']}"
elif label == "home":
df_subset = df["probs"]
fname = f"fig-model-fit-choice-home{color_scheme[color]['extension']}"
elif label == "school":
df_subset = df["probs"]
fname = f"fig-model-fit-choice-school{color_scheme[color]['extension']}"
else:
df_subset = df["mean"]
fname = f"fig-model-fit-wage-all{color_scheme[color]['extension']}"
y_empirical = df_subset.loc[("empirical", slice(10)), label].values
y_simulation = df_subset.loc[("simulated", slice(10)), label].values
if label == "blue_collar":
y_empirical = y_empirical * 100
y_simulation *= 100
elif label == "white_collar":
y_empirical = y_empirical * 100
y_simulation *= 100
elif label == "military":
y_empirical = y_empirical * 100
y_simulation *= 100
elif label == "home":
y_empirical = y_empirical * 100
y_simulation *= 100
elif label == "school":
y_empirical = y_empirical * 100
y_simulation *= 100
else:
y_empirical = y_empirical / 1000
y_simulation /= 1_000
ax.plot(
range(11),
y_empirical,
label="Empirical",
color=color_scheme[color]["blue_collar"],
)
ax.plot(
range(11),
y_simulation,
label="Simulated",
color=color_scheme[color]["school"],
)
if label == "school":
ax.legend(loc="upper right", prop={"size": 26})
else:
ax.legend(loc="upper left", prop={"size": 26})
ax.set_xlabel("Age", labelpad=20, fontsize=30)
ax.xaxis.set_ticks(range(11))
ax.set_xticklabels(np.arange(16, 27, 1), rotation="horizontal")
ax.yaxis.get_major_ticks()[0].set_visible(False)
if label == "blue_collar":
ax.set_ylabel("Share (in %)", labelpad=20, fontsize=30)
ax.set_ylim(0, 100)
elif label == "white_collar":
ax.set_ylabel("Share (in %)", labelpad=20, fontsize=30)
ax.set_ylim(0, 100)
elif label == "military":
ax.set_ylabel("Share (in %)", labelpad=20, fontsize=30)
ax.set_ylim(0, 100)
elif label == "home":
ax.set_ylabel("Share (in %)", labelpad=20, fontsize=30)
ax.set_ylim(0, 100)
elif label == "school":
ax.set_ylabel("Share (in %)", labelpad=20, fontsize=30)
ax.set_ylim(0, 100)
if label == "all":
ax.set_ylim(5, 30)
ax.set_ylabel("Wage (in $1,000)", labelpad=20, fontsize=30)
fig.savefig(fname.replace("_", "-"))
# Define the color schemes for "color" and "bw"
_cmap = make_grayscale_cmap("copper")
color_scheme = {
"bw": {
"blue_collar": _cmap(0.29),
"white_collar": _cmap(0.16),
"military": _cmap(0.51),
"school": _cmap(0.93),
"home": _cmap(0.76),
"extension": "-bw",
},
"color": {
"blue_collar": "tab:blue",
"white_collar": "tab:red",
"military": "tab:purple",
"school": "tab:orange",
"home": "tab:green",
"extension": "",
},
}
# Ordering OSE convention: blue-collar, white-collar, military, school, home
labels = ["blue_collar", "white_collar", "military", "school", "home"]
# We plot the model fit in and out of the support.
df_descriptives = pd.read_pickle("../material/data-descriptives.pkl")
# We start with the empirical data only.
for col_scheme in ["color", "bw"]:
plot_decisions_by_age(df_descriptives["probs"], col_scheme)
plot_average_wage(df_descriptives["mean"], col_scheme)
# We than combine the descriptives from the empirical and simulated data.
plot_model_fit(df_descriptives, col_scheme)
# We plot the counterfactual predictions of the model.
df_exploration = pd.read_pickle("../material/model-exploration.pkl")
subsidies = (
df_exploration.loc["subsidy", :].index.get_level_values("Change").to_numpy(np.float)
)
levels = df_exploration.loc[("subsidy", slice(None)), "level"].to_numpy(np.float)
plot_mechanism_subsidy(subsidies, levels)
plot_mechanism_subsidy(subsidies, levels, "bw")
deltas = (
df_exploration.loc["delta", :].index.get_level_values("Change").to_numpy(np.float)
)
deltas = deltas[deltas != 0.96]
levels = df_exploration.loc[("delta", slice(None)), "level"].to_numpy(np.float)
levels = levels[levels != 16.5442]
plot_mechanism_time(deltas, levels)
plot_mechanism_time(deltas, levels, "bw")
# TODO: This part was added after the fact for the presentation. Students turned out to be
# particularly interested in the sample size over time.
df = rp.get_example_model("kw_97_extended", with_data=True)[2]
fig, ax = plt.subplots()
ax.xaxis.set_ticks(range(11))
ax.get_yaxis().set_major_formatter(
mpl.ticker.FuncFormatter(lambda x, p: format(int(x), ","))
)
ax.set_xticklabels(np.arange(16, 27, 1), rotation="horizontal")
ax.yaxis.get_major_ticks()[0].set_visible(False)
ax.set_xlabel("Age", labelpad=20, fontsize=30)
ax.set_ylabel("Sample size", labelpad=20, fontsize=30)
ax.set_ylim(0, 1400)
ax.bar(range(11), df.groupby("Period")["Choice"].count())
fig.savefig("fig-data-sample-size.pdf")
|
<reponame>paul-ang/nas-segm-pytorch<gh_stars>100-1000
"""REINFORCE and PPO for controller training"""
import random
import torch
import torch.nn as nn
from helpers.storage import RolloutStorage
from helpers.utils import parse_geno_log
class REINFORCE(object):
"""REINFORCE gradient estimator
"""
def __init__(self, controller, lr, baseline_decay, max_grad_norm=2.0):
"""
Args:
controller (Controller): RNN architecture generator
lr (float): learning rate for controller optimizer
baseline_decay (float): moving average baseline decay
max_grad_norm (float): controller gradient clip
"""
self.baseline = None
self.decay = baseline_decay
self.controller = controller
self.optimizer = torch.optim.Adam(controller.parameters(), lr=lr)
self.max_grad_norm = max_grad_norm
def update(self, sample):
"""Perform one gradient step for controller and update baseline.
Args:
sample (tuple): (reward, action, log_prob)
reward (float): current reward
action (list): representation of current architecture
log_prob (float): log probability of current architecture
Returns:
loss (torch.FloatTensor): controller loss
entropy (torch.FloatTensor): entropy of current architecture
"""
reward, action, _, _ = sample
_, _, entropy, log_prob = self.controller.evaluate(action)
with torch.no_grad():
if self.baseline is None:
self.baseline = reward
else:
self.baseline = self.decay * self.baseline + (1 - self.decay) * reward
adv = reward - self.baseline
loss = -log_prob * adv
self.optimizer.zero_grad()
loss.backward()
nn.utils.clip_grad_norm_(self.controller.parameters(), self.max_grad_norm)
self.optimizer.step()
return loss, entropy
def state_dict(self):
return {
"baseline": self.baseline,
"controller": self.controller.state_dict(),
"optimizer": self.optimizer.state_dict(),
}
def load_state_dict(self, states):
self.controller.load_state_dict(states["controller"])
self.baseline = states["baseline"]
self.optimizer.load_state_dict(states["optimizer"])
class PPO(object):
"""Proximal Policy Optimization with rollout buffer
part of the update code modified from:
https://github.com/ikostrikov/pytorch-a2c-ppo-acktr-gail/blob/master/a2c_ppo_acktr/algo/ppo.py
"""
def __init__(
self,
controller,
clip_param,
lr,
baseline_decay,
action_size=18,
ppo_epoch=1,
num_mini_batch=100,
max_grad_norm=2.0,
entropy_coef=0,
num_steps=100,
num_processes=1,
):
"""
Args:
controller (Controller): RNN architecture generator
clip_param (float): PPO clip parameter epsilon
lr (float): learning rate for controller optimizer
baseline_decay (float): moving average baseline decay
action_size (int): length of architecture representation
ppo_epoch (int): number of epochs to train
num_mini_batch (int): number of mini batches in the rollout buffer
max_grad_norm (float): controller gradient clip
entropy_coef (float): gradient coefficient for entropy regularization
num_steps (int): number of steps to train
num_processes (int): samples per step
"""
self.ppo_epoch = ppo_epoch
self.controller = controller
self.optimizer = torch.optim.Adam(controller.parameters(), lr=lr)
self.num_mini_batch = num_mini_batch
self.clip_param = clip_param
self.max_grad_norm = max_grad_norm
self.entropy_coef = entropy_coef
self.rollouts = RolloutStorage(num_steps, num_processes, action_size)
self.baseline = None
self.decay = baseline_decay
def state_dict(self):
return {
"baseline": self.baseline,
"rollouts": self.rollouts,
"controller": self.controller.state_dict(),
"optimizer": self.optimizer.state_dict(),
}
def load_state_dict(self, states):
self.controller.load_state_dict(states["controller"])
self.optimizer.load_state_dict(states["optimizer"])
self.baseline = states["baseline"]
if "rollouts" not in states:
# continue from old checkpoint format
# fill in rollouts
with open("genotypes.out") as ro_file:
lines = ro_file.readlines()
# randomly pick
random.shuffle(lines)
records = lines[: self.rollouts.num_steps]
for record in records:
reward, action = parse_geno_log(record)
with torch.no_grad():
_, _, _, log_prob = self.controller.evaluate(action)
self.update((reward, action, log_prob), is_train=False)
print(self.rollouts.actions)
else:
self.rollouts = states["rollouts"]
def update(self, sample, is_train=True):
reward, action, log_prob = sample
if self.baseline is None:
self.baseline = reward
else:
self.baseline = self.decay * self.baseline + (1 - self.decay) * reward
self.rollouts.insert(action, log_prob, reward)
if not is_train:
return -1, -1
advantages = self.rollouts.rewards - self.baseline
loss_epoch = 0
entropy_epoch = 0
for _ in range(self.ppo_epoch):
data_generator = self.rollouts.generator(advantages, self.num_mini_batch)
for sample in data_generator:
(
actions_batch,
rewards_batch,
old_actions_log_probs_batch,
adv_targ,
) = sample
action_log_probs, entropy = self.controller.evaluate_actions(
actions_batch
)
ratio = torch.exp(
action_log_probs
- torch.from_numpy(old_actions_log_probs_batch).float()
)
adv_targ_th = torch.from_numpy(adv_targ).float()
surr1 = ratio * adv_targ_th
surr2 = (
torch.clamp(ratio, 1.0 - self.clip_param, 1.0 + self.clip_param)
* adv_targ_th
)
action_loss = -torch.min(surr1, surr2).mean()
self.optimizer.zero_grad()
dist_entropy = entropy.mean()
(action_loss - dist_entropy * self.entropy_coef).backward()
nn.utils.clip_grad_norm_(
self.controller.parameters(), self.max_grad_norm
)
self.optimizer.step()
loss_epoch += action_loss.item()
entropy_epoch += dist_entropy.item()
num_updates = self.ppo_epoch * self.num_mini_batch
return loss_epoch / num_updates, entropy_epoch / num_updates
|
# coding: utf-8
import os,glob,re
import numpy as np
import tensorflow as tf
from numpy.random import randint,choice
from metrics import *
from augment import *
from multiprocessing import Pool
import itertools
import sys
from scipy.special import logit
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
os.environ['HDF5_USE_FILE_LOCKING'] = 'FALSE'
config = tf.ConfigProto()
sess = tf.Session(config=config)
folder="../data3d"
dim=(320,320,1)
epochs=5
drop=0.1
layers={}
rgl=tf.keras.regularizers.l1(10e-10)
inp,inps=[],[]
for i in range(2):
inp.append(tf.keras.layers.Input(shape=dim))
x=tf.keras.layers.Conv2D(32, (3,3), activation='relu', activity_regularizer=rgl)(inp[i])
x=tf.keras.layers.MaxPooling2D((2,2), padding='valid')(x)
inps.append(x)
layers[0]=tf.keras.layers.add(inps)
layers[1]=tf.keras.layers.Conv2D(32, (3,3), activation='relu', activity_regularizer=rgl)(layers[0])
layers[1.1]=tf.keras.layers.Dropout(drop)(layers[1])
layers[1.9]=tf.keras.layers.MaxPooling2D((4,4))(layers[1.1])
layers[2]=tf.keras.layers.Conv2D(32, (3,3), activation='relu', activity_regularizer=rgl)(layers[1.9])
layers[2.1]=tf.keras.layers.Dropout(drop)(layers[2])
layers[2.9]=tf.keras.layers.MaxPooling2D((4,4))(layers[2.1])
layers[3]=tf.keras.layers.Conv2D(32, (3,3), activation='relu', activity_regularizer=rgl)(layers[2.9])
layers[3.1]=tf.keras.layers.Dropout(drop)(layers[3])
layers[3.9]=tf.keras.layers.MaxPooling2D((4,4))(layers[3.1])
layers[3.95]=tf.keras.layers.Flatten()(layers[3.9])
layers[4]=tf.keras.layers.Dense(1, activation='sigmoid', activity_regularizer=rgl)(layers[3.95])
model = tf.keras.models.Model(inp, layers[4])
model.summary()
bce = tf.keras.losses.CategoricalCrossentropy()
model.compile(optimizer=tf.keras.optimizers.Adamax(), loss='mae', metrics=['accuracy'])
checkpoint_path = "verif.ckpt"
checkpoint_dir = os.path.dirname(checkpoint_path)
#model.load_weights(checkpoint_path)
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path,
save_weights_only=True,verbose=1)
def process(i):
try:
a=np.load("{}/{}.npy".format(folder,i))
a=np.mean(a,axis=0)
return a
except:
print(i)
def prepare(data):
data=[np.where(im<0, 0, im) for im in data]
data=[a[:-1,:-1] for a in data]
data=[a.reshape(dim) for a in data]
return data
def mse(A,B):
return ((A - B)**2).mean()
indices=np.load('indices.npy')
def train():
while True:
print('start')
prange=indices
data=[process(x) for x in prange]
data=prepare(data)
print('2222222') # noise
noise=[np.random.normal(loc=0,scale=2,size=dim) for x in prange]
data2=[data[i]+noise[i] for i in range(len(prange))]
print('3333333') # displacement
data3=[np.roll(m, np.random.randint(100), axis=np.random.randint(2)) for m in data]
print("44444444") # different
data4=np.random.permutation(data)
print("55555555") # scale
scales=[np.random.choice(np.arange(0.25, 2.01, 0.25)) for m in data ]
data5=[clipped_zoom(m[:,:,0], scales[i]) for i,m in enumerate(data) ]
data5=[a.reshape(dim) for a in data5]
print(data5[0].shape)
print("66666666")
data_a=[a for a in data for _ in range(4)]
data_b=[l[i] for i in range(len(data)) for l in [data2,data3,data4,data5]]
data_o=[x for i in range(len(data)) for x in range(4)]
data_a=np.array(data_a)
data_b=np.array(data_b)
data_o=np.array(data_o)
print("##################################################################")
history = model.fit([data_a,data_b], data_o, epochs=epochs, batch_size=1 ,validation_split=0.1, callbacks=[cp_callback])
def eval():
prange=indices[:2000:1000]
prange=[5665,31259]
print(prange)
data=[process(x) for x in prange]
data=prepare(data)
in1=data
in2=[data[0] for i in range(2)]
in1=np.array(in1)
in2=np.array(in2)
res=model.predict([in1,in2],verbose=1)
res=[logit(x) for x in res]
res=[.05*x+.5 for x in res]
res=[x-res[0] for x in res]
res=[min(x,1) for x in res]
for i,r in enumerate(res):
m=mse(in1[i][:,:,0],in2[i][:,:,0])
print(i,r,m)
return res
train()
|
# -*- coding: utf-8 -*-
"""
foulefactoryapilib.models.account_writer_service_model
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ) on 09/16/2016
"""
import dateutil.parser
from .base_model import BaseModel
class AccountWriterServiceModel(BaseModel):
"""Implementation of the 'AccountWriterServiceModel' model.
TODO: type model description here.
Attributes:
id_gender (int): TODO: type description here.
first_name (string): TODO: type description here.
name (string): TODO: type description here.
email (string): TODO: type description here.
phone (string): TODO: type description here.
birthday (DateTime): TODO: type description here.
address_1 (string): TODO: type description here.
city (string): TODO: type description here.
postal_code (string): TODO: type description here.
country_code (string): TODO: type description here.
nationality (string): TODO: type description here.
optin (bool): TODO: type description here.
company (string): TODO: type description here.
address_2 (string): TODO: type description here.
bill_address_1 (string): TODO: type description here.
bill_address_2 (string): TODO: type description here.
bill_city (string): TODO: type description here.
bill_postal_code (string): TODO: type description here.
"""
def __init__(self,
id_gender = None,
first_name = None,
name = None,
email = None,
phone = None,
birthday = None,
address_1 = None,
city = None,
postal_code = None,
country_code = None,
nationality = None,
optin = None,
company = None,
address_2 = None,
bill_address_1 = None,
bill_address_2 = None,
bill_city = None,
bill_postal_code = None):
"""Constructor for the AccountWriterServiceModel class"""
# Initialize members of the class
self.id_gender = id_gender
self.first_name = first_name
self.name = name
self.email = email
self.phone = phone
self.birthday = birthday
self.address_1 = address_1
self.city = city
self.postal_code = postal_code
self.country_code = country_code
self.nationality = nationality
self.optin = optin
self.company = company
self.address_2 = address_2
self.bill_address_1 = bill_address_1
self.bill_address_2 = bill_address_2
self.bill_city = bill_city
self.bill_postal_code = bill_postal_code
# Create a mapping from Model property names to API property names
self.names = {
"id_gender" : "IdGender",
"first_name" : "FirstName",
"name" : "Name",
"email" : "Email",
"phone" : "Phone",
"birthday" : "Birthday",
"address_1" : "Address1",
"city" : "City",
"postal_code" : "PostalCode",
"country_code" : "CountryCode",
"nationality" : "Nationality",
"optin" : "Optin",
"company" : "Company",
"address_2" : "Address2",
"bill_address_1" : "BillAddress1",
"bill_address_2" : "BillAddress2",
"bill_city" : "BillCity",
"bill_postal_code" : "BillPostalCode",
}
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary == None:
return None
else:
# Extract variables from the dictionary
id_gender = dictionary.get("IdGender")
first_name = dictionary.get("FirstName")
name = dictionary.get("Name")
email = dictionary.get("Email")
phone = dictionary.get("Phone")
birthday = dateutil.parser.parse(dictionary.get("Birthday")) if dictionary.get("Birthday") else None
address_1 = dictionary.get("Address1")
city = dictionary.get("City")
postal_code = dictionary.get("PostalCode")
country_code = dictionary.get("CountryCode")
nationality = dictionary.get("Nationality")
optin = dictionary.get("Optin")
company = dictionary.get("Company")
address_2 = dictionary.get("Address2")
bill_address_1 = dictionary.get("BillAddress1")
bill_address_2 = dictionary.get("BillAddress2")
bill_city = dictionary.get("BillCity")
bill_postal_code = dictionary.get("BillPostalCode")
# Return an object of this model
return cls(id_gender,
first_name,
name,
email,
phone,
birthday,
address_1,
city,
postal_code,
country_code,
nationality,
optin,
company,
address_2,
bill_address_1,
bill_address_2,
bill_city,
bill_postal_code)
|
import torch.nn as nn
import torch.nn.functional as F
import torch
#import mkl
import os, sys, time, numpy as np, librosa, scipy, pandas as pd,pdb
from tqdm import tqdm
from util import *
# def progress_bar(epoch, epochs, step, n_step, time, loss, mode):
# line = []
# line = f'\rEpoch {epoch}/ {epochs}'
# loss = loss/step
# if step==n_step:
# progress = '='*30
# else :
# n = int(30*step/n_step)
# progress = '='*n + '>' + '.'*(29-n)
# eta = time*(n_step-step)/step
# line += f'[{progress}] - {step}/{n_step} |Time :{int(time)}s |ETA :{int(eta)}s '
# if step==n_step:
# line += '\n'
# sys.stdout.write(line)
# sys.stdout.flush()
class Trainer:
def __init__(self, model, epochs, epoch, best_loss, optimizer,
criterion, device, loader, writer, model_path, score_path, args):
# self.step = 0
self.epoch = epoch
self.epoch_count = 0
self.epochs = epochs
self.best_loss = best_loss
self.model = model.to(device)
self.optimizer = optimizer
self.device = device
self.loader = loader
self.criterion = criterion
self.train_loss = 0
self.val_loss = 0
self.writer = writer
self.model_path = model_path
self.score_path = score_path
self.task = args.task
if args.mode=='train':
self.train_step = len(loader['train'])
self.val_step = len(loader['val'])
self.args = args
config_path = f'{args.pwg_path}/config.yml'
model_path = f'{args.pwg_path}/checkpoint-400000steps.pkl'
self.config = get_config(config_path)
self.mel_basis = torch.nn.Parameter(torch.load(f'{args.pwg_path}/mel_basis.pt')).to(device)
self.mean = torch.nn.Parameter(torch.load(f'{args.pwg_path}/mean.pt')).to(device)
self.scale = torch.nn.Parameter(torch.load(f'{args.pwg_path}/scale.pt')).to(device)
self.g_model = get_model(model_path,self.config).to(device)
for param in self.g_model.parameters():
param.requires_grad = False
self.get_output= get_output(self.config,self.g_model,device)
def save_checkpoint(self,):
state_dict = {
'epoch': self.epoch,
'model': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'best_loss': self.best_loss
}
check_folder(self.model_path)
torch.save(state_dict, self.model_path)
def _train_epoch(self):
self.train_loss = 0
self.model.train()
t_start = time.time()
step = 0
if self.args.encode_loss:
self._train_step = getattr(self,f'_train_step_mode_{self.task}_encode')
else:
self._train_step = getattr(self,f'_train_step_mode_{self.task}')
for data in self.loader['train']:
step += 1
self._train_step(data)
progress_bar(self.epoch,self.epochs,step,self.train_step,time.time()-t_start,loss=self.train_loss,mode='train')
self.train_loss /= len(self.loader['train'])
print(f'train_loss:{self.train_loss}')
# @torch.no_grad()
def _val_epoch(self):
self.val_loss = 0
self.model.eval()
t_start = time.time()
step = 0
if self.args.encode_loss:
self._val_step = getattr(self,f'_val_step_mode_{self.task}_encode')
else:
self._val_step = getattr(self,f'_val_step_mode_{self.task}')
for data in self.loader['val']:
step += 1
self._val_step(data)
progress_bar(self.epoch,self.epochs,step,self.val_step,time.time()-t_start,loss=self.val_loss,mode='val')
self.val_loss /= len(self.loader['val'])
print(f'val_loss:{self.val_loss}')
if self.best_loss > self.val_loss:
self.epoch_count = 0
print(f"Save model to '{self.model_path}'")
self.save_checkpoint()
self.best_loss = self.val_loss
def train(self):
model_name = self.model.__class__.__name__
while self.epoch < self.epochs and self.epoch_count<15:
self._train_epoch()
self._val_epoch()
self.writer.add_scalars(f'{self.args.task}/{model_name}_{self.args.optim}_{self.args.loss_fn}', {'train': self.train_loss},self.epoch)
self.writer.add_scalars(f'{self.args.task}/{model_name}_{self.args.optim}_{self.args.loss_fn}', {'val': self.val_loss},self.epoch)
self.epoch += 1
self.epoch_count += 1
self.writer.close()
def write_score(self,test_file,test_path,write_wav=False):
self.model.eval()
step = 2 if self.args.task=='denoise' else 1
outname = test_file.replace(f'{test_path}','').replace('/','_')
if self.args.task=='denoise':
noisy,sr = librosa.load(test_file,sr=16000)
wavname = test_file.split('/')[-1].split('.')[0]
c_file = os.path.join(self.args.test_clean,wavname.split('_')[0],test_file.split('/')[-1])
clean,sr = librosa.load(c_file,sr=16000)
n_data,n_phase,n_len = make_spectrum(y = noisy)
n_data = torch.from_numpy(n_data).t()
mat = read_emma(c_file.replace('.wav','.mat'),step,self.args.task)
n_data = pad_data(n_data,mat).to(self.device).unsqueeze(0).type(torch.float32)
spec = n_data[:,:,:257]
emma = n_data[:,:,257:]
pred = self.model(spec,emma).cpu().detach().numpy()
enhanced = recons_spec_phase(pred.squeeze().transpose(),n_phase,n_len)
elif self.args.task=='synthesis':
clean,sr = librosa.load(test_file,sr=16000)
cdata = get_mel(clean,self.config,self.mel_basis.cpu(),self.mean.cpu(),self.scale.cpu())[0]
mat = read_emma(test_file.replace('.wav','.mat'),step,self.args.task)
cdata = pad_data(cdata,mat).to(self.device).unsqueeze(0).type(torch.float32)
emma = cdata[:,:,513:]
pred = self.model(emma)[1] if self.args.encode_loss else self.model(emma)
spc = torch.expm1(pred)
mel = torch.matmul(spc, self.mel_basis)
mel = torch.log10(torch.max(mel,mel.new_ones(mel.size())*1e-10))
mel = mel.sub(self.mean[None,:]).div(self.scale[None,:])
enhanced = self.get_output(mel).squeeze().cpu().detach().numpy()
s_pesq, s_stoi = cal_score(clean,enhanced[:len(clean)])
with open(self.score_path, 'a') as f:
f.write(f'{outname},{s_pesq},{s_stoi}\n')
if write_wav:
method = self.model.__class__.__name__
wav_path = test_file.replace(f'{test_path}',f'./Enhanced/{method}')
check_folder(wav_path)
enhanced = enhanced/abs(enhanced).max()
librosa.output.write_wav(wav_path,enhanced,sr)
def test(self):
# load model
#mkl.set_num_threads(1)
self.model.eval()
checkpoint = torch.load(self.model_path)
self.model.load_state_dict(checkpoint['model'])
test_path = self.args.test_noisy if self.args.task=='denoise' else self.args.test_clean
test_folders = get_filepaths(test_path)
check_folder(self.score_path)
if os.path.exists(self.score_path):
os.remove(self.score_path)
with open(self.score_path, 'a') as f:
f.write('Filename,PESQ,STOI\n')
for test_file in tqdm(test_folders):
self.write_score(test_file,test_path,write_wav=True)
data = pd.read_csv(self.score_path)
pesq_mean = data['PESQ'].to_numpy().astype('float').mean()
stoi_mean = data['STOI'].to_numpy().astype('float').mean()
with open(self.score_path, 'a') as f:
f.write(','.join(('Average',str(pesq_mean),str(stoi_mean)))+'\n')
def _train_step_mode_denoise(self, data):
device = self.device
noisy, clean = data
noisy, clean = noisy.to(device).type(torch.float32), clean.to(device).type(torch.float32)
emma = clean[:,:,257:]
spec = clean[:,:,:257]
pred = self.model(noisy,emma)
loss = self.criterion(pred, spec)
self.train_loss += loss.item()
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def _val_step_mode_denoise(self, data):
device = self.device
noisy, clean = data
noisy, clean = noisy.to(device).type(torch.float32), clean.to(device).type(torch.float32)
emma = clean[:,:,257:]
spec = clean[:,:,:257]
pred = self.model(noisy,emma)
loss = self.criterion(pred, spec)
self.val_loss += loss.item()
def _train_step_mode_synthesis(self, data):
device = self.device
emma, spec = data[:,:,513:],data[:,:,:513]
emma, spec = emma.to(device).type(torch.float32), spec.to(device).type(torch.float32)
pred = self.model(emma)
loss = self.criterion(pred, spec)
self.train_loss += loss.item()
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def _val_step_mode_synthesis(self, data):
device = self.device
emma, spec = data[:,:,513:],data[:,:,:513]
emma, spec = emma.to(device).type(torch.float32), spec.to(device).type(torch.float32)
pred = self.model(emma)
loss = self.criterion(pred, spec)
self.val_loss += loss.item()
def _train_step_mode_synthesis_encode(self, data):
device = self.device
emma, spec = data[:,:,513:],data[:,:,:513]
emma, spec = emma.to(device).type(torch.float32), spec.to(device).type(torch.float32)
# stage 1
_,pred = self.model(spec)
loss = self.criterion(pred, spec)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# stage 2
enc_emma, pred = self.model(emma)
enc_spec, _ = self.model(spec)
loss = self.criterion(pred, spec)+self.criterion(enc_emma, enc_spec)
self.train_loss += self.criterion(pred, spec).item()
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
def _val_step_mode_synthesis_encode(self, data):
device = self.device
emma, spec = data[:,:,513:],data[:,:,:513]
emma, spec = emma.to(device).type(torch.float32), spec.to(device).type(torch.float32)
_, pred = self.model(emma)
loss = self.criterion(pred, spec)
self.val_loss += loss.item()
|
<filename>mmdet2trt/converters/generalized_attention.py
import math
import mmdet2trt
import numpy as np
import torch
import torch.nn.functional as F
from torch2trt_dynamic.torch2trt_dynamic import tensorrt_converter
def get_position_embedding(self,
x_q,
x_kv,
q_stride,
kv_stride,
feat_dim,
wave_length=1000):
h_idxs = mmdet2trt.ops.util_ops.arange_by_input(x_q, 2)
h_idxs = h_idxs.unsqueeze(1) * q_stride
w_idxs = mmdet2trt.ops.util_ops.arange_by_input(x_q, 3)
w_idxs = w_idxs.unsqueeze(1) * q_stride
h_kv_idxs = mmdet2trt.ops.util_ops.arange_by_input(x_kv, 2)
h_kv_idxs = h_kv_idxs.unsqueeze(1) * kv_stride
w_kv_idxs = mmdet2trt.ops.util_ops.arange_by_input(x_kv, 3)
w_kv_idxs = w_kv_idxs.unsqueeze(1) * kv_stride
# (h, h_kv, 1)
h_diff = h_idxs.unsqueeze(1) - h_kv_idxs.unsqueeze(0)
h_diff *= self.position_magnitude
# (w, w_kv, 1)
w_diff = w_idxs.unsqueeze(1) - w_kv_idxs.unsqueeze(0)
w_diff *= self.position_magnitude
feat_range = torch.arange(0, feat_dim / 4, device=x_q.device)
dim_mat = x_q.new_tensor([wave_length])
dim_mat = dim_mat**((4. / feat_dim) * feat_range)
dim_mat = dim_mat.view((1, 1, -1))
embedding_x = torch.cat(
((w_diff / dim_mat).sin(), (w_diff / dim_mat).cos()), dim=2)
embedding_y = torch.cat(
((h_diff / dim_mat).sin(), (h_diff / dim_mat).cos()), dim=2)
return embedding_x, embedding_y
@tensorrt_converter(
'mmcv.cnn.bricks.GeneralizedAttention.forward', is_real=False)
def convert_GeneralizeAttention(ctx):
self = ctx.method_args[0]
x_input = ctx.method_args[1]
output = ctx.method_return
num_heads = self.num_heads
# use empirical_attention
if self.q_downsample is not None:
x_q = self.q_downsample(x_input)
else:
x_q = x_input
n, _, h, w = x_q.shape
if self.kv_downsample is not None:
x_kv = self.kv_downsample(x_input)
else:
x_kv = x_input
_, _, h_kv, w_kv = x_kv.shape
if self.attention_type[0] or self.attention_type[1]:
proj_query = self.query_conv(x_q).view(
(n, num_heads, self.qk_embed_dim, h * w))
proj_query = proj_query.permute(0, 1, 3, 2)
if self.attention_type[0] or self.attention_type[2]:
proj_key = self.key_conv(x_kv).view(
(n, num_heads, self.qk_embed_dim, h_kv * w_kv))
if self.attention_type[1] or self.attention_type[3]:
position_embed_x, position_embed_y = get_position_embedding(
self, x_q, x_kv, self.q_stride, self.kv_stride,
self.position_embedding_dim)
# (n, num_heads, w, w_kv, dim)
position_feat_x = self.appr_geom_fc_x(position_embed_x).\
view(1, w, w_kv, num_heads, self.qk_embed_dim).\
permute(0, 3, 1, 2, 4)
# (n, num_heads, h, h_kv, dim)
position_feat_y = self.appr_geom_fc_y(position_embed_y).\
view(1, h, h_kv, num_heads, self.qk_embed_dim).\
permute(0, 3, 1, 2, 4)
position_feat_x /= math.sqrt(2)
position_feat_y /= math.sqrt(2)
# accelerate for saliency only
if (np.sum(self.attention_type) == 1) and self.attention_type[2]:
appr_bias = self.appr_bias.\
view(1, num_heads, 1, self.qk_embed_dim)
energy = torch.matmul(appr_bias, proj_key).\
view(n, num_heads, 1, h_kv * w_kv)
h = 1
w = 1
else:
# (n, num_heads, h*w, h_kv*w_kv), query before key, 540mb for
if not self.attention_type[0]:
energy = x_input.new_zeros(n, num_heads, h, w, h_kv, w_kv)
# attention_type[0]: appr - appr
# attention_type[1]: appr - position
# attention_type[2]: bias - appr
# attention_type[3]: bias - position
if self.attention_type[0] or self.attention_type[2]:
if self.attention_type[0] and self.attention_type[2]:
appr_bias = self.appr_bias.\
view(1, num_heads, 1, self.qk_embed_dim)
energy = torch.matmul(proj_query + appr_bias, proj_key).\
view(n, num_heads, h, w, h_kv, w_kv)
elif self.attention_type[0]:
energy = torch.matmul(proj_query, proj_key).\
view(n, num_heads, h, w, h_kv, w_kv)
elif self.attention_type[2]:
appr_bias = self.appr_bias.\
view(1, num_heads, 1, self.qk_embed_dim)
energy += torch.matmul(appr_bias, proj_key).\
view(n, num_heads, 1, 1, h_kv, w_kv)
if self.attention_type[1] or self.attention_type[3]:
if self.attention_type[1] and self.attention_type[3]:
geom_bias = self.geom_bias.\
view(1, num_heads, 1, self.qk_embed_dim)
proj_query_reshape = (proj_query + geom_bias).\
view(n, num_heads, h, w, self.qk_embed_dim)
energy_x = torch.matmul(
proj_query_reshape.permute(0, 1, 3, 2, 4),
position_feat_x.permute(0, 1, 2, 4, 3))
energy_x = energy_x.\
permute(0, 1, 3, 2, 4).unsqueeze(4)
energy_y = torch.matmul(proj_query_reshape,
position_feat_y.permute(0, 1, 2, 4, 3))
energy_y = energy_y.unsqueeze(5)
energy += energy_x + energy_y
elif self.attention_type[1]:
proj_query_reshape = proj_query.\
view(n, num_heads, h, w, self.qk_embed_dim)
proj_query_reshape = proj_query_reshape.\
permute(0, 1, 3, 2, 4)
position_feat_x_reshape = position_feat_x.\
permute(0, 1, 2, 4, 3)
position_feat_y_reshape = position_feat_y.\
permute(0, 1, 2, 4, 3)
energy_x = torch.matmul(proj_query_reshape,
position_feat_x_reshape)
energy_x = energy_x.permute(0, 1, 3, 2, 4).unsqueeze(4)
energy_y = torch.matmul(proj_query_reshape,
position_feat_y_reshape)
energy_y = energy_y.unsqueeze(5)
energy += energy_x + energy_y
elif self.attention_type[3]:
geom_bias = self.geom_bias.\
view(1, num_heads, self.qk_embed_dim, 1)
position_feat_x_reshape = position_feat_x.\
view(n, num_heads, w*w_kv, self.qk_embed_dim)
position_feat_y_reshape = position_feat_y.\
view(n, num_heads, h * h_kv, self.qk_embed_dim)
energy_x = torch.matmul(position_feat_x_reshape, geom_bias)
energy_x = energy_x.view(n, num_heads, 1, w, 1, w_kv)
energy_y = torch.matmul(position_feat_y_reshape, geom_bias)
energy_y = energy_y.view(n, num_heads, h, 1, h_kv, 1)
energy += energy_x + energy_y
energy = energy.view(n, num_heads, h * w, h_kv * w_kv)
if self.spatial_range >= 0:
cur_local_constraint_map = \
self.local_constraint_map[:h, :w, :h_kv, :w_kv].\
contiguous().\
view(1, 1, h*w, h_kv*w_kv)
energy = energy.masked_fill_(cur_local_constraint_map, float('-inf'))
attention = F.softmax(energy, 3)
proj_value = self.value_conv(x_kv)
proj_value_reshape = proj_value.\
view((n, num_heads, self.v_dim, h_kv * w_kv)).\
permute(0, 1, 3, 2)
out = torch.matmul(attention, proj_value_reshape).\
permute(0, 1, 3, 2).\
contiguous().\
view(n, self.v_dim * self.num_heads, h, w)
out = self.proj_conv(out)
# output is downsampled, upsample back to input size
if self.q_downsample is not None:
out = F.interpolate(
out, size=x_input.shape[2:], mode='bilinear', align_corners=False)
out = self.gamma * out + x_input
output._trt = out._trt
ctx.method_return = output
|
<gh_stars>1-10
import os
import re
from googleplaces import GooglePlaces, types, lang
API_KEY = '<KEY>'
google_places = GooglePlaces(API_KEY)
monthAbbreviations = {'jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug', 'sep', 'oct', 'nov', 'dec'}
class InfoExtractor:
date = ''
time = ''
locations = []
titles = []
def extractWords(self, list):
global time
global locations
global date
for i in range(0, len(list)):
word = str(list[i])
#checking date
if re.match('[0-9]{1,2}/[0-9]{1,2}', word) is not None or re.match('[0-9]{1,2}\.[0-9]{1,2}', word) or re.match('[0-9]{1,2}\-[0-9]{1,2}', word):
if list[i+1] != 'am' and list[i+1] != 'pm' and list[i][-2:] != 'am' and list[i][-2:] != 'pm': # check if i+1 exists
date = word
continue
if word[:3].lower() in monthAbbreviations:
if list[i+1] is not None:
date = list[i] + " "
date += list[i+1]
continue
#checking time
if word == 'am' or word == 'pm':
if re.match('\d{1,2}\:\d{2}\-\d{1,2}\:\d{2}', list[i-1]) is not None:
time = str(list[i-1][0:list[i-1].index('-')]) + word
continue
elif re.match('\d{1,2}\-\d{1,2}', list[i-1]) is not None:
time = str(list[i-1][0:list[i-1].index('-')]) + word
continue
elif re.match('\d{1,2}\:\d{2}', list[i-1]) is not None:
if i > 2 and list[i-2] == '-':
time = str(list[i-3]) + word
else:
time = str(list[i-1]) + word
continue
elif re.match('\d{1,2}', list[i-1]) is not None:
if i > 2 and list[i-2] == '-':
time = str(list[i-3]) + word
else:
time = str(list[i-1]) + word
continue
elif word[-2:] == 'am' or word[-2:] == 'pm':
extracted_time = word[:-2]
if re.match('\d{1,2}\:\d{2}\-\d{1,2}\:\d{2}', extracted_time) is not None:
time = extracted_time + word[-2:]
continue
elif re.match('\d{1,2}\-\d{1,2}', extracted_time) is not None:
time = extracted_time + word[-2:]
continue
elif re.match('\d{1,2}\:\d{2}', extracted_time) is not None:
if i > 1 and list[i-1] == '-':
time = str(list[i-2]) + word[-2:]
else:
time = extracted_time + word[-2:]
continue
elif re.match('\d{1,2}', extracted_time) is not None:
if i > 1 and list[i-1] == '-':
time = str(list[i-2]) + word[-2:]
else:
time = extracted_time + word[-2:]
continue
#checking location
# query_result = google_places.nearby_search(location='West Lafayette, United States', keyword=list[i])
# if len(query_result.places) > 0:
# location = query_result.places[0].name
# if i < len(list)-1:
# self.locations.append(query_result.places[0].name + ' Room ' + list[i+1])
# else:
# self.locations.append(query_result.places[0].name)
self.locations = ['aahan']
if(i < 6):
self.titles.append(list[i])
def getDate(self):
return date
def getTime(self):
return time
def getLocations(self):
return self.locations
def getTitles(self):
return self.titles
|
<gh_stars>100-1000
"""
Copyright 2019 Samsung SDS
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from brightics.common.repr import BrtcReprBuilder, strip_margin, dict2MD
from brightics.function.utils import _model_dict
from brightics.common.groupby import _function_by_group
from brightics.common.utils import check_required_parameters
from brightics.common.utils import get_default_from_parameters_if_required
from brightics.common.validation import raise_runtime_error
from brightics.common.validation import validate, greater_than_or_equal_to, greater_than, less_than_or_equal_to
from gensim.corpora import Dictionary
import pandas as pd
import operator
def bow(table, group_by=None, **params):
check_required_parameters(_bow, params, ['table'])
params = get_default_from_parameters_if_required(params, _bow)
param_validation_check = [greater_than_or_equal_to(params, 0, 'no_below'),
less_than_or_equal_to(params, 1.0, 'no_above'),
greater_than(params, 0.0, 'no_above'),
greater_than_or_equal_to(params, 1, 'keep_n')]
validate(*param_validation_check)
if group_by is not None:
return _function_by_group(_bow, table, group_by=group_by, **params)
else:
return _bow(table, **params)
def _bow(table, input_col, add_words=None, no_below=1, no_above=0.8, keep_n=10000):
word_list = table[input_col].tolist()
dictionary = Dictionary(word_list)
if add_words != None:
dictionary.add_documents([add_words])
dictionary.filter_extremes(no_below=no_below, no_above=no_above, keep_n=keep_n, keep_tokens=None)
params = {
'Input Column': input_col,
'Minimum Number of Occurrence': no_below,
'Maximum Fraction of Occurrence': no_above,
'Keep N most Frequent': keep_n
}
empty_description = ''
if len(list(dictionary.dfs.values())) == 0:
out_table = pd.DataFrame([], columns=['token', 'document_frequency'])
empty_description = 'Out table is empty since parameter \"Minimum Number of Occurrence\" is greater than the maximum of document frequency.'
else:
out_table = pd.DataFrame.from_dict(dictionary.token2id, orient='index').drop([0], axis=1)
out_table.insert(loc=0, column='token', value=dictionary.token2id.keys())
token_cnt = sorted(dictionary.dfs.items(), key=operator.itemgetter(0))
dfs_list = []
for i in range(len(dictionary.dfs)):
dfs_list.append(token_cnt[i][1])
out_table['document_frequency'] = dfs_list
rb = BrtcReprBuilder()
rb.addMD(strip_margin("""
|# Bag of Words Result
|### Parameters
|
| {display_params}
|
| {description}
|
""".format(display_params=dict2MD(params), description=empty_description)))
model = _model_dict('bow')
model['dict_table'] = out_table
model['dictionary'] = dictionary
model['add_words'] = add_words
model['_repr_brtc_'] = rb.get()
return {'model' : model, 'out_table': out_table}
|
from __future__ import absolute_import
import cgi
import cStringIO as StringIO
from itertools import islice
import logging
import socket, time, urllib, urlparse
import warnings
from .schema import SolrSchema, SolrError
from .search import LuceneQuery, MltSolrSearch, SolrSearch, params_from_dict
MAX_LENGTH_GET_URL = 2048
# Jetty default is 4096; Tomcat default is 8192; picking 2048 to be conservative.
class SolrConnection(object):
def __init__(self, url, http_connection, retry_timeout, max_length_get_url):
if http_connection:
self.http_connection = http_connection
else:
import httplib2
self.http_connection = httplib2.Http()
self.url = url.rstrip("/") + "/"
self.update_url = self.url + "update/"
self.select_url = self.url + "select/"
self.mlt_url = self.url + "mlt/"
self.retry_timeout = retry_timeout
self.max_length_get_url = max_length_get_url
def request(self, *args, **kwargs):
try:
return self.http_connection.request(*args, **kwargs)
except socket.error:
if self.retry_timeout < 0:
raise
time.sleep(self.retry_timeout)
return self.http_connection.request(*args, **kwargs)
def commit(self, waitSearcher=None, expungeDeletes=None, softCommit=None):
response = self.update('<commit/>', commit=True,
waitSearcher=waitSearcher, expungeDeletes=expungeDeletes, softCommit=softCommit)
def optimize(self, waitSearcher=None, maxSegments=None):
response = self.update('<optimize/>', optimize=True,
waitSearcher=waitSearcher, maxSegments=maxSegments)
# For both commit & optimize above, we use the XML body instead
# of the URL parameter, because if we're using POST (which we
# should) then only the former works.
def rollback(self):
response = self.update("<rollback/>")
def update(self, update_doc, **kwargs):
body = update_doc
if body:
headers = {"Content-Type":"text/xml; charset=utf-8"}
else:
headers = {}
url = self.url_for_update(**kwargs)
r, c = self.request(url, method="POST", body=body,
headers=headers)
if r.status != 200:
raise SolrError(r, c)
def url_for_update(self, commit=None, commitWithin=None, softCommit=None, optimize=None, waitSearcher=None, expungeDeletes=None, maxSegments=None):
extra_params = {}
if commit is not None:
extra_params['commit'] = "true" if commit else "false"
if commitWithin is not None:
try:
extra_params['commitWithin'] = str(float(commitWithin))
except (TypeError, ValueError):
raise ValueError("commitWithin should be a number in milliseconds")
if extra_params['commitWithin'] < 0:
raise ValueError("commitWithin should be a number in milliseconds")
if softCommit is not None:
extra_params['softCommit'] = "true" if softCommit else "false"
if optimize is not None:
extra_params['optimize'] = "true" if optimize else "false"
if waitSearcher is not None:
extra_params['waitSearcher'] = "true" if waitSearcher else "false"
if expungeDeletes is not None:
extra_params['expungeDeletes'] = "true" if expungeDeletes else "false"
if maxSegments is not None:
try:
extra_params['maxSegments'] = str(int(maxSegments))
except (TypeError, ValueError):
raise ValueError("maxSegments")
if extra_params['maxSegments'] <= 0:
raise ValueError("maxSegments should be a positive number")
if 'expungeDeletes' in extra_params and 'commit' not in extra_params:
raise ValueError("Can't do expungeDeletes without commit")
if 'maxSegments' in extra_params and 'optimize' not in extra_params:
raise ValueError("Can't do maxSegments without optimize")
if extra_params:
return "%s?%s" % (self.update_url, urllib.urlencode(sorted(extra_params.items())))
else:
return self.update_url
def select(self, params):
qs = urllib.urlencode(params)
url = "%s?%s" % (self.select_url, qs)
if len(url) > self.max_length_get_url:
warnings.warn("Long query URL encountered - POSTing instead of "
"GETting. This query will not be cached at the HTTP layer")
url = self.select_url
kwargs = dict(
method="POST",
body=qs,
headers={"Content-Type": "application/x-www-form-urlencoded"},
)
else:
kwargs = dict(method="GET")
r, c = self.request(url, **kwargs)
if r.status != 200:
raise SolrError(r, c)
return c
def mlt(self, params, content=None):
"""Perform a MoreLikeThis query using the content specified
There may be no content if stream.url is specified in the params.
"""
qs = urllib.urlencode(params)
base_url = "%s?%s" % (self.mlt_url, qs)
if content is None:
kwargs = {'uri': base_url, 'method': "GET"}
else:
get_url = "%s&stream.body=%s" % (base_url, urllib.quote_plus(content))
if len(get_url) <= self.max_length_get_url:
kwargs = {'uri': get_url, 'method': "GET"}
else:
kwargs = {'uri': base_url, 'method': "POST",
'body': content, 'headers': {"Content-Type": "text/plain; charset=utf-8"}}
r, c = self.request(**kwargs)
if r.status != 200:
raise SolrError(r, c)
return c
class SolrInterface(object):
readable = True
writeable = True
remote_schema_file = "admin/file/?file=schema.xml"
def __init__(self, url, schemadoc=None, http_connection=None, mode='', retry_timeout=-1, max_length_get_url=MAX_LENGTH_GET_URL):
self.conn = SolrConnection(url, http_connection, retry_timeout, max_length_get_url)
self.schemadoc = schemadoc
if mode == 'r':
self.writeable = False
elif mode == 'w':
self.readable = False
self.init_schema()
def init_schema(self):
if self.schemadoc:
schemadoc = self.schemadoc
else:
r, c = self.conn.request(
urlparse.urljoin(self.conn.url, self.remote_schema_file))
if r.status != 200:
raise EnvironmentError("Couldn't retrieve schema document from server - received status code %s\n%s" % (r.status, c))
schemadoc = StringIO.StringIO(c)
self.schema = SolrSchema(schemadoc)
def add(self, docs, chunk=100, **kwargs):
if not self.writeable:
raise TypeError("This Solr instance is only for reading")
if hasattr(docs, "items") or not hasattr(docs, "__iter__"):
docs = [docs]
# to avoid making messages too large, we break the message every
# chunk docs.
for doc_chunk in grouper(docs, chunk):
update_message = self.schema.make_update(doc_chunk)
self.conn.update(str(update_message), **kwargs)
def delete(self, docs=None, queries=None, **kwargs):
if not self.writeable:
raise TypeError("This Solr instance is only for reading")
if not docs and not queries:
raise SolrError("No docs or query specified for deletion")
elif docs is not None and (hasattr(docs, "items") or not hasattr(docs, "__iter__")):
docs = [docs]
delete_message = self.schema.make_delete(docs, queries)
self.conn.update(str(delete_message), **kwargs)
def commit(self, *args, **kwargs):
if not self.writeable:
raise TypeError("This Solr instance is only for reading")
self.conn.commit(*args, **kwargs)
def optimize(self, *args, **kwargs):
if not self.writeable:
raise TypeError("This Solr instance is only for reading")
self.conn.optimize(*args, **kwargs)
def rollback(self):
if not self.writeable:
raise TypeError("This Solr instance is only for reading")
self.conn.rollback()
def delete_all(self):
if not self.writeable:
raise TypeError("This Solr instance is only for reading")
# When deletion is fixed to escape query strings, this will need fixed.
self.delete(queries=self.Q(**{"*":"*"}))
def search(self, **kwargs):
if not self.readable:
raise TypeError("This Solr instance is only for writing")
params = params_from_dict(**kwargs)
return self.schema.parse_response(self.conn.select(params))
def query(self, *args, **kwargs):
if not self.readable:
raise TypeError("This Solr instance is only for writing")
q = SolrSearch(self)
if len(args) + len(kwargs) > 0:
return q.query(*args, **kwargs)
else:
return q
def mlt_search(self, content=None, **kwargs):
if not self.readable:
raise TypeError("This Solr instance is only for writing")
params = params_from_dict(**kwargs)
return self.schema.parse_response(self.conn.mlt(params, content=content))
def mlt_query(self, fields=None, content=None, content_charset=None, url=None, query_fields=None,
**kwargs):
"""Perform a similarity query on MoreLikeThisHandler
The MoreLikeThisHandler is expected to be registered at the '/mlt'
endpoint in the solrconfig.xml file of the server.
fields is the list of field names to compute similarity upon. If not
provided, we just use the default search field.
query_fields can be used to adjust boosting values on a subset of those
fields.
Other MoreLikeThis specific parameters can be passed as kwargs without
the 'mlt.' prefix.
"""
if not self.readable:
raise TypeError("This Solr instance is only for writing")
q = MltSolrSearch(self, content=content, content_charset=content_charset, url=url)
return q.mlt(fields=fields, query_fields=query_fields, **kwargs)
def Q(self, *args, **kwargs):
q = LuceneQuery(self.schema)
q.add(args, kwargs)
return q
def grouper(iterable, n):
"grouper('ABCDEFG', 3) --> [['ABC'], ['DEF'], ['G']]"
i = iter(iterable)
g = list(islice(i, 0, n))
while g:
yield g
g = list(islice(i, 0, n))
|
<reponame>danfunk/r01_dropout<gh_stars>0
import numpy as np
from feature_generation import file_read_and_feature_extract, mindtrails_feature_vector_generation, \
templeton_feature_vector_generation
def model_prediction_testing():
prediction_session_index = 2
platform_list = ['mindtrails', 'templeton']
for platform in platform_list:
if platform == 'mindtrails':
session_list = ['PRE', 'SESSION1', 'SESSION2', 'SESSION3', 'SESSION4', 'SESSION5', 'SESSION6', 'SESSION7',
'SESSION8']
demographic_dict, QOL_dict, OASIS_dict, RR_dict, BBSIQ_dict, DASS21_AS_dict, DASS21_DS_dict, trial_dict, \
dwell_time_dict, session_completion_dict, dropout_label, control_normal_dict = file_read_and_feature_extract(
platform)
prediction_session = session_list[prediction_session_index]
training_set_session = session_list[0: prediction_session_index]
participant_list = []
for e in dropout_label:
if (int(e) > 419) or (int(e) < 20):
if (e in control_normal_dict['training']) and (
dropout_label[e][session_list[prediction_session_index - 1]] == '0'):
participant_list.append(e)
feature_vector, truth_vector, feature_item_list = mindtrails_feature_vector_generation(training_set_session,
prediction_session,
participant_list,
demographic_dict,
QOL_dict, OASIS_dict,
RR_dict, BBSIQ_dict,
DASS21_AS_dict,
DASS21_DS_dict,
trial_dict,
dwell_time_dict,
dropout_label)
elif platform == 'templeton':
session_list = ['preTest', 'firstSession', 'secondSession', 'thridSession', 'fourthSession']
demographic_dict, affect_dict, credibility_dict, mental_dict, whatibelieve_dict, relatability_dict, \
expectancy_dict, phq4_dict, trial_dict, session_completion_dict, dropout_label = file_read_and_feature_extract(
platform)
prediction_session = session_list[prediction_session_index]
training_set_session = session_list[0: prediction_session_index]
participant_list = []
for e in dropout_label:
if dropout_label[e][session_list[prediction_session_index - 1]] == '0':
participant_list.append(e)
feature_vector, truth_vector, feature_item_list = templeton_feature_vector_generation(training_set_session,
prediction_session,
participant_list,
demographic_dict,
affect_dict,
credibility_dict,
mental_dict,
whatibelieve_dict,
relatability_dict,
expectancy_dict,
phq4_dict, trial_dict,
dropout_label)
print 'dataset ==>', platform
print 'number of participants for predict session ==>', prediction_session, len(feature_vector)
print 'feature dimension ==>', len(feature_vector[0]), len(feature_item_list)
X = range(len(feature_vector))
kf = KFold(n_splits=10, random_state=None, shuffle=True)
kf.get_n_splits(X)
f1_score_svm_list = []
f1_score_lr_list = []
f1_score_rf_list = []
f1_score_multi_svm_list = []
for train_index, test_index in kf.split(X):
data_train = []
truth_train = []
data_test = []
truth_test = []
participant_list_train = []
participant_list_test = []
for i in train_index:
data_train.append(feature_vector[i])
truth_train.append(truth_vector[i])
participant_list_train.append(participant_list[i])
for i in test_index:
data_test.append(feature_vector[i])
truth_test.append(truth_vector[i])
participant_list_test.append(participant_list[i])
svm_model = svm.LinearSVC(C=1, tol=1e-3)
svm_model.fit(data_train, truth_train)
lr_model = linear_model.LogisticRegression()
lr_model.fit(data_train, truth_train)
rf_model = RandomForestClassifier(
n_estimators=10, criterion="gini", max_features="auto", max_depth=2, min_samples_split=2,
min_samples_leaf=1, random_state=0, bootstrap=True, min_weight_fraction_leaf=0.0,
n_jobs=1, oob_score=False, verbose=0, warm_start=False
)
rf_model.fit(data_train, truth_train)
svm_prediction = svm_model.predict(data_test)
lr_prediction = lr_model.predict(data_test)
rf_prediction = rf_model.predict(data_test)
truth_prediction_svm = []
truth_prediction_lr = []
truth_prediction_rf = []
truth_test_new = []
for i in range(len(svm_prediction)):
truth_prediction_svm.append(int(svm_prediction[i]))
truth_prediction_lr.append(int(lr_prediction[i]))
truth_prediction_rf.append(int(rf_prediction[i]))
truth_test_new.append(int(truth_test[i]))
fscore_svm = metrics.f1_score(truth_test_new, truth_prediction_svm, average='micro')
fscore_lr = metrics.f1_score(truth_test_new, truth_prediction_lr, average='micro')
fscore_rf = metrics.f1_score(truth_test_new, truth_prediction_rf, average='micro')
f1_score_svm_list.append(fscore_svm)
f1_score_lr_list.append(fscore_lr)
f1_score_rf_list.append(fscore_rf)
multi_svm_model = svm_multi_task_training(data_train, truth_train, participant_list_train, demographic_dict)
truth_prediction_multi_svm = []
truth_test_new = []
for i in range(len(data_test)):
participant_id = participant_list_test[i]
testing_feature_vector = data_test[i]
prediction_value = svm_multi_task_prediction(testing_feature_vector, demographic_dict, participant_id, multi_svm_model)
truth_prediction_multi_svm.append(prediction_value)
truth_test_new.append(int(truth_test[i]))
fscore_multi_svm = metrics.f1_score(truth_test_new, truth_prediction_multi_svm, average='micro')
f1_score_multi_svm_list.append(fscore_multi_svm)
mean_f1score_svm = np.mean(f1_score_svm_list)
std_f1score_svm = np.std(f1_score_svm_list)
mean_f1score_lr = np.mean(f1_score_lr_list)
std_f1score_lr = np.std(f1_score_lr_list)
mean_f1score_rf = np.mean(f1_score_rf_list)
std_f1score_rf = np.std(f1_score_rf_list)
mean_f1score_multi_svm = np.mean(f1_score_multi_svm_list)
std_f1score_multi_svm = np.std(f1_score_multi_svm_list)
print 'prediction_session ==>', prediction_session
print 'SVM classifier ==> ', 'f1 score mean', mean_f1score_svm, 'f1 score std', std_f1score_svm
print 'Logisitc Regression classifier ==>', 'f1 score mean', mean_f1score_lr, 'f1 score std', std_f1score_lr
print 'Random Forest classifier ==> ', 'f1 score mean', mean_f1score_rf, 'f1 score std', std_f1score_rf
print 'Multi-SVM classifier ==> ', 'f1 score mean', mean_f1score_multi_svm, 'f1 score std', std_f1score_multi_svm
print '\n'
model_prediction_testing()
|
<reponame>carlabguillen/spack
# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Chill(AutotoolsPackage):
"""A polyheadral compiler for autotuning"""
homepage = "http://github.com/CtopCsUtahEdu/chill"
url = "https://github.com/CtopCsUtahEdu/chill/archive/v0.3.tar.gz"
git = "https://github.com/CtopCsUtahEdu/chill.git"
maintainers = ['dhuth']
version('master', branch='master')
version('0.3', sha256='574b622368a6bfaadbe9c1fa02fabefdc6c006069246f67d299f943b7e1d8aa3')
depends_on('[email protected] cxxstd=11', type='build')
depends_on('[email protected]: +cxx11', type=('build', 'run'))
depends_on('autoconf', type='build')
depends_on('[email protected]:', type='build')
depends_on('libtool', type='build')
depends_on('m4', type='build')
depends_on('iegenlib', type=('build', 'run'))
depends_on('[email protected]:', type='build')
depends_on('flex', type='build')
# Does not currrently work with Python3
depends_on('[email protected]:2.8')
depends_on('isl', type='build')
depends_on('gmp', type='build')
patch('Add-ISLHOME-option.patch')
patch('Add-GMPHOME-option.patch')
patch('Add-GCC-libquadmath-for-rose.patch')
build_directory = 'spack-build'
def autoreconf(self, spec, prefix):
bash = which('bash')
bash('./bootstrap')
def setup_build_environment(self, env):
rose_home = self.spec['rose'].prefix
boost_home = self.spec['boost'].prefix
iegen_home = self.spec['iegenlib'].prefix
isl_home = self.spec['isl'].prefix
gmp_home = self.spec['gmp'].prefix
env.set('ROSEHOME', rose_home)
env.set('BOOSTHOME', boost_home)
env.set('IEGENHOME', iegen_home)
env.set('ISLHOME', isl_home)
env.set('GMPHOME', gmp_home)
env.append_path('LD_LIBRARY_PATH', rose_home.lib)
env.append_path('LD_LIBRARY_PATH', boost_home.lib)
env.append_path('LD_LIBRARY_PATH', iegen_home.lib)
env.append_path('LD_LIBRARY_PATH', isl_home.lib)
env.append_path('LD_LIBRARY_PATH', gmp_home.lib)
def setup_run_environment(self, env):
rose_home = self.spec['rose'].prefix
boost_home = self.spec['boost'].prefix
iegen_home = self.spec['iegenlib'].prefix
isl_home = self.spec['isl'].prefix
gmp_home = self.spec['gmp'].prefix
env.append_path('LD_LIBRARY_PATH', rose_home.lib)
env.append_path('LD_LIBRARY_PATH', boost_home.lib)
env.append_path('LD_LIBRARY_PATH', iegen_home.lib)
env.append_path('LD_LIBRARY_PATH', isl_home.lib)
env.append_path('LD_LIBRARY_PATH', gmp_home.lib)
def configure_args(self):
args = ['--with-rose={0}'.format(self.spec['rose'].prefix),
'--with-boost={0}'.format(self.spec['boost'].prefix),
'--with-iegen={0}'.format(self.spec['iegenlib'].prefix),
'--with-isl={0}'.format(self.spec['isl'].prefix),
'--with-gmp={0}'.format(self.spec['gmp'].prefix)]
return args
|
"""
Common code for the pyslim test cases.
"""
import os
import json
import random
import base64
import pyslim
import tskit
import msprime
import pytest
import attr
import numpy as np
class PyslimTestCase:
'''
Base class for test cases in pyslim.
'''
def verify_haplotype_equality(self, ts, slim_ts):
assert ts.num_sites == slim_ts.num_sites
for j, v1, v2 in zip(range(ts.num_sites), ts.variants(),
slim_ts.variants()):
g1 = [v1.alleles[x] for x in v1.genotypes]
g2 = [v2.alleles[x] for x in v2.genotypes]
assert np.array_equal(g1, g2)
def assertTablesEqual(self, t1, t2, label=''):
# make it easy to see what's wrong
if hasattr(t1, "metadata_schema"):
if t1.metadata_schema != t2.metadata_schema:
print(f"{label} :::::::::: t1 ::::::::::::")
print(t1.metadata_schema)
print(f"{label} :::::::::: t2 ::::::::::::")
print(t2.metadata_schema)
assert t1.metadata_schema == t2.metadata_schema
if t1.num_rows != t2.num_rows:
print(f"{label}: t1.num_rows {t1.num_rows} != {t2.num_rows} t2.num_rows")
for k, (e1, e2) in enumerate(zip(t1, t2)):
if e1 != e2:
print(f"{label} :::::::::: t1 ({k}) ::::::::::::")
print(e1)
print(f"{label} :::::::::: t2 ({k}) ::::::::::::")
print(e2)
assert e1 == e2
assert t1.num_rows == t2.num_rows
assert t1 == t2
def assertMetadataEqual(self, t1, t2):
# check top-level metadata, first the parsed version:
assert t1.metadata_schema == t2.metadata_schema
assert t1.metadata == t2.metadata
# and now check the underlying bytes
# TODO: use the public interface if https://github.com/tskit-dev/tskit/issues/832 happens
md1 = t1._ll_tables.metadata
md2 = t2._ll_tables.metadata
assert md1 == md2
def verify_trees_equal(self, ts1, ts2):
# check that trees are equal by checking MRCAs between randomly
# chosen nodes with matching slim_ids
random.seed(23)
assert ts1.sequence_length == ts2.sequence_length
if isinstance(ts1, tskit.TableCollection):
ts1 = ts1.tree_sequence()
if isinstance(ts2, tskit.TableCollection):
ts2 = ts2.tree_sequence()
map1 = {}
for j, n in enumerate(ts1.nodes()):
if n.metadata is not None:
map1[n.metadata['slim_id']] = j
map2 = {}
for j, n in enumerate(ts2.nodes()):
if n.metadata is not None:
map2[n.metadata['slim_id']] = j
assert set(map1.keys()) == set(map2.keys())
sids = list(map1.keys())
for sid in sids:
n1 = ts1.node(map1[sid])
n2 = ts2.node(map2[sid])
assert n1.time == n2.time
assert n1.metadata == n2.metadata
i1 = ts1.individual(n1.individual)
i2 = ts2.individual(n2.individual)
assert i1.metadata == i2.metadata
for _ in range(10):
pos = random.uniform(0, ts1.sequence_length)
t1 = ts1.at(pos)
t2 = ts2.at(pos)
for _ in range(10):
a, b = random.choices(sids, k=2)
assert t1.tmrca(map1[a], map1[b]) == t2.tmrca(map2[a], map2[b])
def assertTableCollectionsEqual(self, t1, t2,
skip_provenance=False, check_metadata_schema=True,
reordered_individuals=False):
if isinstance(t1, tskit.TreeSequence):
t1 = t1.dump_tables()
if isinstance(t2, tskit.TreeSequence):
t2 = t2.dump_tables()
t1_samples = [(n.metadata['slim_id'], j) for j, n in enumerate(t1.nodes) if (n.flags & tskit.NODE_IS_SAMPLE)]
t1_samples.sort()
t2_samples = [(n.metadata['slim_id'], j) for j, n in enumerate(t2.nodes) if (n.flags & tskit.NODE_IS_SAMPLE)]
t2_samples.sort()
t1.simplify([j for (_, j) in t1_samples], record_provenance=False)
t2.simplify([j for (_, j) in t2_samples], record_provenance=False)
if skip_provenance is True:
t1.provenances.clear()
t2.provenances.clear()
if skip_provenance == -1:
assert t1.provenances.num_rows + 1 == t2.provenances.num_rows
t2.provenances.truncate(t1.provenances.num_rows)
assert t1.provenances.num_rows == t2.provenances.num_rows
if check_metadata_schema:
# this is redundant now, but will help diagnose if things go wrong
assert t1.metadata_schema.schema == t2.metadata_schema.schema
assert t1.populations.metadata_schema.schema == t2.populations.metadata_schema.schema
assert t1.individuals.metadata_schema.schema == t2.individuals.metadata_schema.schema
assert t1.nodes.metadata_schema.schema == t2.nodes.metadata_schema.schema
assert t1.edges.metadata_schema.schema == t2.edges.metadata_schema.schema
assert t1.sites.metadata_schema.schema == t2.sites.metadata_schema.schema
assert t1.mutations.metadata_schema.schema == t2.mutations.metadata_schema.schema
assert t1.migrations.metadata_schema.schema == t2.migrations.metadata_schema.schema
if not check_metadata_schema:
# need to pull out metadata to compare as dicts before zeroing the schema
m1 = t1.metadata
m2 = t2.metadata
ms = tskit.MetadataSchema(None)
for t in (t1, t2):
t.metadata_schema = ms
t.populations.metadata_schema = ms
t.individuals.metadata_schema = ms
t.nodes.metadata_schema = ms
t.edges.metadata_schema = ms
t.sites.metadata_schema = ms
t.mutations.metadata_schema = ms
t.migrations.metadata_schema = ms
t1.metadata = b''
t2.metadata = b''
assert m1 == m2
if reordered_individuals:
ind1 = {i.metadata['pedigree_id']: j for j, i in enumerate(t1.individuals)}
ind2 = {i.metadata['pedigree_id']: j for j, i in enumerate(t2.individuals)}
for pid in ind1:
if not pid in ind2:
print("not in t2:", ind1[pid])
assert pid in ind2
if t1.individuals[ind1[pid]] != t2.individuals[ind2[pid]]:
print("t1:", t1.individuals[ind1[pid]])
print("t2:", t2.individuals[ind2[pid]])
assert t1.individuals[ind1[pid]] == t2.individuals[ind2[pid]]
for pid in ind2:
if not pid in ind1:
print("not in t1:", ind2[pid])
assert pid in ind1
t1.individuals.clear()
t2.individuals.clear()
# go through one-by-one so we know which fails
self.assertTablesEqual(t1.populations, t2.populations, "populations")
self.assertTablesEqual(t1.individuals, t2.individuals, "individuals")
self.assertTablesEqual(t1.nodes, t2.nodes, "nodes")
self.assertTablesEqual(t1.edges, t2.edges, "edges")
self.assertTablesEqual(t1.sites, t2.sites, "sites")
self.assertTablesEqual(t1.mutations, t2.mutations, "mutations")
self.assertTablesEqual(t1.migrations, t2.migrations, "migrations")
self.assertTablesEqual(t1.provenances, t2.provenances, "provenances")
self.assertMetadataEqual(t1, t2)
assert t1.sequence_length == t2.sequence_length
assert t1 == t2
|
<reponame>YangHee-Min/spinalcordtoolbox
#!/usr/bin/env python
# -*- coding: utf-8
#########################################################################################
#
# Function to segment the multiple sclerosis lesions using convolutional neural networks
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2017 Polytechnique Montreal <www.neuro.polymtl.ca>
# Authors: <NAME>
# Modified: 2018-06-11
#
# About the license: see the file LICENSE.TXT
#########################################################################################
from __future__ import print_function, absolute_import, division
import os
import sys
import numpy as np
from msct_parser import Parser
import sct_utils as sct
from spinalcordtoolbox.image import Image
from spinalcordtoolbox.deepseg_lesion.core import deep_segmentation_MSlesion
def get_parser():
"""Initialize the parser."""
parser = Parser(__file__)
parser.usage.set_description("""MS lesion Segmentation using convolutional networks. \n\nReference: <NAME>, <NAME>, et al. Automatic segmentation of the spinal cord and intramedullary multiple sclerosis lesions with convolutional neural networks (2018). arxiv.org/abs/1805.06349""")
parser.add_option(name="-i",
type_value="image_nifti",
description="input image.",
mandatory=True,
example="t1.nii.gz")
parser.add_option(name="-c",
type_value="multiple_choice",
description="type of image contrast. \nt2: T2w scan with isotropic or anisotropic resolution. \nt2_ax: T2w scan with axial orientation and thick slices. \nt2s: T2*w scan with axial orientation and thick slices.",
mandatory=True,
example=['t2', 't2_ax', 't2s'])
parser.add_option(name="-centerline",
type_value="multiple_choice",
description="Method used for extracting the centerline.\nsvm: automatic centerline detection, based on Support Vector Machine algorithm.\ncnn: automatic centerline detection, based on Convolutional Neural Network.\nviewer: semi-automatic centerline generation, based on manual selection of a few points using an interactive viewer, then approximation with NURBS.\nfile: use an existing centerline by specifying its filename with flag -file_centerline (e.g. -file_centerline t2_centerline_manual.nii.gz).\n",
mandatory=False,
example=['svm', 'cnn', 'viewer', 'file'],
default_value="svm")
parser.add_option(name="-file_centerline",
type_value="image_nifti",
description="Input centerline file (to use with flag -centerline manual).",
mandatory=False,
example="t2_centerline_manual.nii.gz")
parser.add_option(name="-brain",
type_value="multiple_choice",
description="indicate if the input image is expected to contain brain sections:\n1: contains brain section\n0: no brain section.\nTo indicate this parameter could speed the segmentation process. Note that this flag is only effective with -centerline cnn.",
mandatory=False,
example=["0", "1"],
default_value="1")
parser.add_option(name="-ofolder",
type_value="folder_creation",
description="output folder.",
mandatory=False,
example="My_Output_Folder/",
default_value="")
parser.add_option(name="-r",
type_value="multiple_choice",
description="remove temporary files.",
mandatory=False,
example=['0', '1'],
default_value='1')
parser.add_option(name="-v",
type_value="multiple_choice",
description="1: display on (default), 0: display off, 2: extended",
mandatory=False,
example=["0", "1", "2"],
default_value="1")
parser.add_option(name='-igt',
type_value='image_nifti',
description='File name of ground-truth segmentation.',
mandatory=False)
return parser
def main():
"""Main function."""
sct.init_sct()
parser = get_parser()
args = sys.argv[1:]
arguments = parser.parse(args)
fname_image = arguments['-i']
contrast_type = arguments['-c']
ctr_algo = arguments["-centerline"]
brain_bool = bool(int(arguments["-brain"]))
if "-brain" not in args and contrast_type in ['t2s', 't2_ax']:
brain_bool = False
if '-ofolder' not in args:
output_folder = os.getcwd()
else:
output_folder = arguments["-ofolder"]
if ctr_algo == 'file' and "-file_centerline" not in args:
sct.printv('Please use the flag -file_centerline to indicate the centerline filename.', 1, 'error')
sys.exit(1)
if "-file_centerline" in args:
manual_centerline_fname = arguments["-file_centerline"]
ctr_algo = 'file'
else:
manual_centerline_fname = None
remove_temp_files = int(arguments['-r'])
verbose = int(arguments.get('-v'))
sct.init_sct(log_level=verbose, update=True) # Update log level
algo_config_stg = '\nMethod:'
algo_config_stg += '\n\tCenterline algorithm: ' + str(ctr_algo)
algo_config_stg += '\n\tAssumes brain section included in the image: ' + str(brain_bool) + '\n'
sct.printv(algo_config_stg)
im_image = Image(fname_image)
im_seg, im_labels_viewer, im_ctr = deep_segmentation_MSlesion(im_image, contrast_type, ctr_algo=ctr_algo, ctr_file=manual_centerline_fname,
brain_bool=brain_bool, remove_temp_files=remove_temp_files, verbose=verbose)
# Save segmentation
fname_seg = os.path.abspath(os.path.join(output_folder, sct.extract_fname(fname_image)[1] + '_lesionseg' +
sct.extract_fname(fname_image)[2]))
im_seg.save(fname_seg)
if ctr_algo == 'viewer':
# Save labels
fname_labels = os.path.abspath(os.path.join(output_folder, sct.extract_fname(fname_image)[1] + '_labels-centerline' +
sct.extract_fname(fname_image)[2]))
im_labels_viewer.save(fname_labels)
if verbose == 2:
# Save ctr
fname_ctr = os.path.abspath(os.path.join(output_folder, sct.extract_fname(fname_image)[1] + '_centerline' +
sct.extract_fname(fname_image)[2]))
im_ctr.save(fname_ctr)
sct.display_viewer_syntax([fname_image, fname_seg], colormaps=['gray', 'red'], opacities=['', '0.7'])
if __name__ == "__main__":
main()
|
<reponame>lahsuk/sanic-cors
# -*- coding: utf-8 -*-
"""
test
~~~~
Sanic-CORS is a simple extension to Sanic allowing you to support cross
origin resource sharing (CORS) using a simple decorator.
:copyright: (c) 2020 by <NAME> (based on flask-cors by <NAME>).
:license: MIT, see LICENSE for more details.
"""
from ..base_test import SanicCorsTestCase
from sanic import Sanic
from sanic.response import text
import re
from sanic_cors import *
from sanic_cors.core import *
letters = 'abcdefghijklmnopqrstuvwxyz' # string.letters is not PY3 compatible
class OriginsTestCase(SanicCorsTestCase):
def setUp(self):
self.app = Sanic(__name__)
@self.app.route('/', methods=['GET', 'HEAD', 'OPTIONS'])
@cross_origin(self.app)
def wildcard(request):
return text('Welcome!')
@self.app.route('/test_always_send')
@cross_origin(self.app, always_send=True)
def test_always_send(request):
return text('Welcome!')
@self.app.route('/test_always_send_no_wildcard')
@cross_origin(self.app, always_send=True, send_wildcard=False)
def test_always_send_no_wildcard(request):
return text('Welcome!')
@self.app.route('/test_send_wildcard_with_origin', methods=['GET', 'HEAD', 'OPTIONS'])
@cross_origin(self.app, send_wildcard=True)
def test_send_wildcard_with_origin(request):
return text('Welcome!')
@self.app.route('/test_list')
@cross_origin(self.app, origins=["http://foo.com", "http://bar.com"])
def test_list(request):
return text('Welcome!')
@self.app.route('/test_string')
@cross_origin(self.app, origins="http://foo.com")
def test_string(request):
return text('Welcome!')
@self.app.route('/test_set')
@cross_origin(self.app, origins=set(["http://foo.com", "http://bar.com"]))
def test_set(request):
return text('Welcome!')
@self.app.route('/test_subdomain_regex', methods=['GET', 'HEAD', 'OPTIONS'])
@cross_origin(self.app, origins=r"http?://\w*\.?example\.com:?\d*/?.*")
def test_subdomain_regex(request):
return text('')
@self.app.route('/test_compiled_subdomain_regex', methods=['GET', 'HEAD', 'OPTIONS'])
@cross_origin(self.app, origins=re.compile(r"http?://\w*\.?example\.com:?\d*/?.*"))
def test_compiled_subdomain_regex(request):
return text('')
@self.app.route('/test_regex_list', methods=['GET', 'HEAD', 'OPTIONS'])
@cross_origin(self.app, origins=[r".*.example.com", r".*.otherexample.com"])
def test_regex_list(request):
return text('')
@self.app.route('/test_regex_mixed_list', methods=['GET', 'HEAD', 'OPTIONS'])
@cross_origin(self.app, origins=["http://example.com", r".*.otherexample.com"])
def test_regex_mixed_list(request):
return text('')
@self.app.route('/test_multiple_protocols')
@cross_origin(self.app, origins="https?://example.com")
def test_multiple_protocols(request):
return text('')
def test_defaults_no_origin(self):
''' If there is no Origin header in the request, the
Access-Control-Allow-Origin header should be '*' by default.
'''
for resp in self.iter_responses('/'):
self.assertEqual(resp.headers.get(ACL_ORIGIN), '*')
def test_defaults_with_origin(self):
''' If there is an Origin header in the request, the
Access-Control-Allow-Origin header should be included.
'''
for resp in self.iter_responses('/', origin='http://example.com'):
self.assertEqual(resp.status, 200)
self.assertEqual(resp.headers.get(ACL_ORIGIN), 'http://example.com')
def test_always_send_no_wildcard(self):
'''
If send_wildcard=False, but the there is '*' in the
allowed origins, we should send it anyways.
'''
for resp in self.iter_responses('/'):
self.assertEqual(resp.status, 200)
self.assertEqual(resp.headers.get(ACL_ORIGIN), '*')
def test_always_send_no_wildcard_origins(self):
for resp in self.iter_responses('/'):
self.assertEqual(resp.status, 200)
self.assertEqual(resp.headers.get(ACL_ORIGIN), '*')
def test_send_wildcard_with_origin(self):
''' If there is an Origin header in the request, the
Access-Control-Allow-Origin header should be included.
'''
for resp in self.iter_responses('/test_send_wildcard_with_origin', origin='http://example.com'):
self.assertEqual(resp.status, 200)
self.assertEqual(resp.headers.get(ACL_ORIGIN), '*')
def test_list_serialized(self):
''' If there is an Origin header in the request, the
Access-Control-Allow-Origin header should be echoed.
'''
resp = self.get('/test_list', origin='http://bar.com')
self.assertEqual(resp.headers.get(ACL_ORIGIN), 'http://bar.com')
def test_string_serialized(self):
''' If there is an Origin header in the request,
the Access-Control-Allow-Origin header should be echoed back.
'''
resp = self.get('/test_string', origin='http://foo.com')
self.assertEqual(resp.headers.get(ACL_ORIGIN), 'http://foo.com')
def test_set_serialized(self):
''' If there is an Origin header in the request,
the Access-Control-Allow-Origin header should be echoed back.
'''
resp = self.get('/test_set', origin='http://bar.com')
allowed = resp.headers.get(ACL_ORIGIN)
# Order is not garaunteed
self.assertEqual(allowed, 'http://bar.com')
def test_not_matching_origins(self):
for resp in self.iter_responses('/test_list', origin="http://bazz.com"):
self.assertFalse(ACL_ORIGIN in resp.headers)
def test_subdomain_regex(self):
for sub in letters:
domain = "http://%s.example.com" % sub
for resp in self.iter_responses('/test_subdomain_regex',
headers={'origin': domain}):
self.assertEqual(domain, resp.headers.get(ACL_ORIGIN))
def test_compiled_subdomain_regex(self):
for sub in letters:
domain = "http://%s.example.com" % sub
for resp in self.iter_responses('/test_compiled_subdomain_regex',
headers={'origin': domain}):
self.assertEqual(domain, resp.headers.get(ACL_ORIGIN))
def test_regex_list(self):
for parent in 'example.com', 'otherexample.com':
for sub in letters:
domain = "http://%s.%s.com" % (sub, parent)
for resp in self.iter_responses('/test_regex_list',
headers={'origin': domain}):
self.assertEqual(domain, resp.headers.get(ACL_ORIGIN))
def test_regex_mixed_list(self):
'''
Tests the corner case occurs when the send_always setting is True
and no Origin header in the request, it is not possible to match
the regular expression(s) to determine the correct
Access-Control-Allow-Origin header to be returned. Instead, the
list of origins is serialized, and any strings which seem like
regular expressions (e.g. are not a '*' and contain either '*'
or '?') will be skipped.
Thus, the list of returned Access-Control-Allow-Origin header
is garaunteed to be 'null', the origin or "*", as per the w3
http://www.w3.org/TR/cors/#access-control-allow-origin-response-header
'''
for sub in letters:
domain = "http://%s.otherexample.com" % sub
for resp in self.iter_responses('/test_regex_mixed_list',
origin=domain):
self.assertEqual(domain, resp.headers.get(ACL_ORIGIN))
self.assertEquals("http://example.com",
self.get('/test_regex_mixed_list', origin='http://example.com').headers.get(ACL_ORIGIN))
def test_multiple_protocols(self):
import logging
logging.getLogger('sanic_cors').level = logging.DEBUG
resp = self.get('/test_multiple_protocols', origin='https://example.com')
self.assertEqual('https://example.com', resp.headers.get(ACL_ORIGIN))
if __name__ == "__main__":
unittest.main()
|
<gh_stars>100-1000
from pathlib import Path
from typing import Dict
from rotkehlchen.config import default_data_directory
from rotkehlchen.constants.resolver import strethaddress_to_identifier
from rotkehlchen.globaldb.handler import GlobalDBHandler
from rotkehlchen.utils.misc import timestamp_to_date, ts_now
class ContextManager():
"""Manages the parsing context of the assets template"""
def __init__(self) -> None:
self.id_to_variable: Dict[str, str] = {}
self.globaldb = GlobalDBHandler(default_data_directory())
def add_asset_initialization(self, var_name: str, identifier: str) -> str:
generated_text = ''
asset_data = self.globaldb.get_asset_data(identifier=identifier, form_with_incomplete_data=False) # noqa: E501
var_forked = 'None'
if asset_data.forked:
if asset_data.forked in self.id_to_variable:
var_forked = self.id_to_variable[asset_data.forked]
else:
var_forked = f'{identifier.upper()}_forked'
generated_text += self.add_asset_initialization(var_forked, asset_data.forked.identifier) # noqa: E501
var_swappedfor = 'None'
if asset_data.swapped_for:
if asset_data.swapped_for in self.id_to_variable:
var_swappedfor = self.id_to_variable[asset_data.swapped_for]
else:
var_swappedfor = f'{identifier.upper()}_swappedfor'
generated_text += self.add_asset_initialization(var_swappedfor, asset_data.swapped_for.identifier) # noqa:E501
name = f'"{asset_data.name}"' if asset_data.name else None
symbol = f'\'{asset_data.symbol}\'' if asset_data.symbol else None
started = f'Timestamp({asset_data.started})' if asset_data.started else None
coingecko = f'\'{asset_data.coingecko}\'' if asset_data.coingecko else None
cryptocompare = f'\'{asset_data.cryptocompare}\'' if asset_data.cryptocompare else None
generated_text += (
f'{var_name} = Asset.initialize(\n'
f' identifier=\'{identifier}\',\n'
f' asset_type=AssetType.{asset_data.asset_type.name},\n'
f' name={name},\n'
f' symbol={symbol},\n'
f' started={started},\n'
f' forked={var_forked},\n'
f' swapped_for={var_swappedfor},\n'
f' coingecko={coingecko},\n'
f' cryptocompare={cryptocompare},\n'
f')\n'
f'CONSTANT_ASSETS.append({var_name})\n'
)
if identifier in self.id_to_variable:
raise ValueError(f'Asset with identifier {identifier} and var_name {var_name} is defined twice') # noqa: E501
self.id_to_variable[identifier] = var_name
return generated_text
def add_ethtoken_initialization(self, var_name: str, address: str) -> str:
generated_text = ''
token = self.globaldb.get_ethereum_token(address=address)
var_swappedfor = 'None'
if token.swapped_for:
if token.swapped_for in self.id_to_variable:
var_swappedfor = self.id_to_variable[token.swapped_for]
else:
var_swappedfor = f'{strethaddress_to_identifier(address).upper()}_swappedfor'
generated_text += self.add_asset_initialization(var_swappedfor, token.swapped_for.identifier) # noqa: E501
name = f'"{token.name}"' if token.name else None
symbol = f'\'{token.symbol}\'' if token.symbol else None
started = f'Timestamp({token.started})' if token.started else None
coingecko = f'\'{token.coingecko}\'' if token.coingecko else None
cryptocompare = f'\'{token.cryptocompare}\'' if token.cryptocompare else None
protocol = f'\'{token.protocol}\'' if token.protocol else None
if token.underlying_tokens is not None:
raise ValueError(
f'Found token {address} with underlying tokens. Not supported '
f'at constants asset generation yet. Can implement when needed.',
)
generated_text += (
f'{var_name} = EthereumToken.initialize(\n'
f' address=string_to_ethereum_address(\'{address}\'),\n'
f' decimals={token.decimals},\n'
f' name={name},\n'
f' symbol={symbol},\n'
f' started={started},\n'
f' swapped_for={var_swappedfor},\n'
f' coingecko={coingecko},\n'
f' cryptocompare={cryptocompare},\n'
f' protocol={protocol},\n'
f')\n'
f'CONSTANT_ASSETS.append({var_name})\n'
)
identifier = strethaddress_to_identifier(address)
if identifier in self.id_to_variable:
raise ValueError(f'Token with identifier {identifier} and varname {var_name} is defined twice') # noqa: E501
self.id_to_variable[identifier] = var_name
return generated_text
def main() -> None:
"""Goes through the assets template, reads the built-in assets DB and generates
assets.py with initialization of all constant assets"""
root_dir = Path(__file__).resolve().parent.parent.parent
constants_dir = root_dir / 'rotkehlchen' / 'constants'
template_file = constants_dir / 'assets.py.template'
date = timestamp_to_date(ts_now())
generated_text = (
f'# This python file was generated automatically by\n'
f'# {__file__} at {date}.\n'
f'# Do not edit manually!\n'
f'\n'
)
ctx = ContextManager()
with open(template_file, 'r') as f:
for line in f:
line = line.strip('\n\r')
if 'Asset(\'' in line:
initial_split = line.split(' = Asset(\'')
var_name = initial_split[0]
identifier = initial_split[1].split('\'')[0]
generated_text += ctx.add_asset_initialization(var_name, identifier)
continue
if 'EthereumToken(\'' in line:
initial_split = line.split(' = EthereumToken(\'')
var_name = initial_split[0]
identifier = initial_split[1].split('\'')[0]
generated_text += ctx.add_ethtoken_initialization(var_name, identifier)
continue
# else just copy text
generated_text += line + '\n'
assets_file = constants_dir / 'assets.py'
with open(assets_file, 'w') as f:
f.write(generated_text)
print('constants/assets.py generated succesfully!')
if __name__ == "__main__":
main()
|
import scipy.io.wavfile
import numpy as np
import matplotlib.pyplot as plt
import time
import librosa
from scipy.fftpack import fft
import multiprocessing
audData, rate = librosa.core.load("../SoundSamples/journey_no_noise_8k.wav", sr = None)
sampData_floor11, rate = librosa.core.load("../SoundSamples/eleven_8k_short.wav", sr = None)
sampData_beep, rate = librosa.core.load("../SoundSamples/beep_8k_short.wav", sr = None)
audtime = np.arange(0, len(audData), 1)
samptime_floor11 = np.arange(0, len(sampData_floor11), 1)
samptime_beep = np.arange(0, len(sampData_beep), 1)
"""
Define here your window sizes
"""
WINDOW_SIZE_FLOOR11 = 100
WINDOW_SIZE_BEEP = 20
"""
Define here your minimum values
"""
MIN_VALUE_FLOOR11 = 2.01
MIN_VALUE_BEEP = 0.1
#cross_cor_floor11 = np.zeros([len(audtime)])
#cross_cor_beep = np.zeros([len(audtime)])
def computer_floor11(cross_cor_floor11, hits_floor11, count_floor11):
print("starting p1......")
timer = time.time()
i = 0
counter = 0
pointer = 0
buffer = np.zeros([len(samptime_floor11)])
array = np.zeros([len(audtime)])
array1 = np.zeros([len(audtime)])
while i <= len(audtime)-1:
buffer[pointer] = audData[i]
if counter > WINDOW_SIZE_FLOOR11:
for k in range(len(buffer)):
array[i-WINDOW_SIZE_FLOOR11-1:i] += buffer[k]*sampData_floor11[k]
counter = 0
if abs(array[i-5]) > MIN_VALUE_FLOOR11:
array1[i-WINDOW_SIZE_FLOOR11-1:i] = 1
if array1[i-WINDOW_SIZE_FLOOR11-1] == 1 and array1[i-WINDOW_SIZE_FLOOR11-2] == 0:
count_floor11.value += 1
#print("hit")
i += 1
pointer += 1
counter += 1
if pointer >= len(buffer)-1:
pointer = 0
cross_cor_floor11[:] = array
hits_floor11[:] = array1
print("p1 finished in:",time.time()-timer)
def computer_beep(cross_cor_beep, hits_beep, count_beeps):
print("starting p2.....")
timer = time.time()
g = 0
counter = 0
pointer = 0
buffer = np.zeros([len(samptime_beep)])
array = np.zeros([len(audtime)])
array1 = np.zeros([len(audtime)])
while g <= len(audtime)-1:
buffer[pointer] = audData[g]
if counter > WINDOW_SIZE_BEEP:
for k in range(len(buffer)):
array[g-WINDOW_SIZE_BEEP-1:g] += buffer[k]*sampData_beep[k]
counter=0
if abs(array[g-5]) > MIN_VALUE_BEEP:
array1[g-WINDOW_SIZE_BEEP-1:g] = 1
if array1[g-WINDOW_SIZE_BEEP-1] == 1 and array1[g-WINDOW_SIZE_BEEP-2] == 0:
count_beeps.value += 1
#print("hit")
g += 1
pointer += 1
counter += 1
if pointer >= len(buffer)-1:
pointer = 0
cross_cor_beep[:] = array
hits_beep[:] = array1
print("p2 finished in:",time.time()-timer)
def plot(array1, array2):
plt.figure(0)
plt.subplot(311)
plt.plot(audtime/1000, audData, linewidth=0.5, alpha=0.9, label = 'Journey no noise')
plt.xlabel('Time (s)')
plt.ylabel('Amplitude')
plt.legend()
plt.subplot(312)
plt.plot(audtime/1000, array1, linewidth=0.8, alpha=0.9, color = 'r')
plt.xlabel('Time (s)')
plt.ylabel('Hit beep')
plt.subplot(313)
plt.plot(audtime/1000, array2, linewidth=0.8, alpha=0.9)
plt.xlabel('Time (s)')
plt.ylabel('Hit floor11')
plt.show()
if __name__ =="__main__":
cross_cor_floor11 = multiprocessing.Array('f', len(audtime))
cross_cor_beep = multiprocessing.Array('f', len(audtime))
hits_floor11 = multiprocessing.Array('f', len(audtime))
hits_beep = multiprocessing.Array('f', len(audtime))
count_beeps = multiprocessing.Value('i', 0)
count_floor11 = multiprocessing.Value('i', 0)
p1 = multiprocessing.Process(target=computer_floor11, args = (cross_cor_floor11, hits_floor11, count_floor11))
p2 = multiprocessing.Process(target=computer_beep, args = (cross_cor_beep, hits_beep, count_beeps))
p1.start()
p2.start()
p1.join()
p2.join()
np.save("../.npy/cross_cor_total_floor11.npy", cross_cor_floor11[:])
np.save("../.npy/cross_cor_total_beep.npy", cross_cor_beep[:])
#cross_cor_beep = np.load("cross_cor_total_beep.npy")
#cross_cor_floor11 = np.load("cross_cor_total_floor11.npy")
plot(cross_cor_beep[:], cross_cor_floor11[:])
plot(hits_beep[:], hits_floor11[:])
print("beep count : ", count_beeps.value)
print("floor11 count : ", count_floor11.value)
"""
if __name__ =="__main__":
thread1 = threading.Thread(target=computer_floor11)
thread2 = threading.Thread(target=computer_beep)
thread1.start()
thread2.start()
while True:
if Done1 and Done2 == True:
plot()
break
"""
|
<gh_stars>1-10
import numpy
import random
import spacy
from spacy import displacy
from spacy.util import minibatch, compounding
from spacy.training import Example
from spacy.scorer import Scorer
from sklearn.base import BaseEstimator
from utilities import load_cleaned_data, split_data, DROPOUT, ITERATIONS, draw_prf_graph, plot_training_loss_graph, \
draw_train_eval_compare_graph, save_list_to_pickle, load_list_from_pickle, LEARN_RATE
import pickle
numpy.random.seed(0)
def load_spacy():
nlp = spacy.load("en_core_web_sm")
# Getting the pipeline component
ner = nlp.get_pipe("ner")
return ner, nlp
class NerModel(BaseEstimator):
def __init__(self, ner, nlp, n_iter=64, dropout=0.1, lr=0.001, **model_hyper_parameters):
super().__init__()
self.ner = ner
self.nlp = nlp
self.n_iter = n_iter
self.dropout = dropout
self.lr = lr
def clear_model(self):
self.nlp = spacy.load("en_core_web_sm")
self.ner = self.nlp.get_pipe("ner")
def fit(self, train_data, eval_data):
""" train the Named Entity Recognition model
:param eval_data: evaluation data for testing after every epoch
:param train_data: processed training data
:return: evaluation fscore of the final epoch
"""
# Adding labels to the NER
for _, annotations in train_data:
for ent in annotations.get("entities"):
self.ner.add_label(ent[2])
# Disable pipeline components that are not changed
pipe_exceptions = ["ner"]
unaffected_pipes = [pipe for pipe in self.nlp.pipe_names if pipe not in pipe_exceptions]
scorer = Scorer()
# Store the PRF scores for every iteration
train_scores = []
eval_scores = []
# Store losses after every iteration
# Each loss is itself an average of losses within a single iteration
loss_list = []
# Train the NER model
with self.nlp.select_pipes(enable=pipe_exceptions, disable=unaffected_pipes):
# Create a list of Examples objects
examples = []
for text, annots in train_data:
examples.append(Example.from_dict(self.nlp.make_doc(text), annots))
# Create an optimizer for the pipeline component, and set lr
optimizer = self.nlp.create_optimizer()
# optimizer = nlp.initialize()
# NOTE: Cannot use nlp.initilaize (v3) (aka nlp.begin_training for v2) on pretrained models.
# Use nlp.create_optimizer for training on existing model (We used pretrained en_core_web_sm).
# ref: https://stackoverflow.com/a/66369163/6475377
optimizer.learn_rate = self.lr
for iteration in range(ITERATIONS):
# print("Iteration: ", iteration)
# shuffling examples before every iteration
random.shuffle(examples)
losses = {}
# optimizer = self.nlp.resume_training()
# batch up the examples using spaCy's minibatch
batches = minibatch(examples, size=compounding(4.0, 32.0, 1.001))
for count, batch in enumerate(batches):
self.nlp.update(
batch,
drop=DROPOUT, # dropout - make it harder to memorise data
losses=losses,
sgd=optimizer
)
loss = losses["ner"] / (count + 1)
print(f"Loss at epoch {iteration}: ", loss)
loss_list.append(loss)
# After training every iteration, calculate scores
example_list = []
for text, annot in train_data:
# Create a Doc of our text
# doc_gold_text = nlp.make_doc(text)
pred_value = self.nlp(text)
# reference = (Example.from_dict(doc_gold_text, annot))
gold_standard = {"entities": annot["entities"]}
# Store prediction and gold standard ref. for each sentence
# (to be used by Scorer.score)
example_list.append(Example.from_dict(pred_value, gold_standard))
# Generate per-entity scores by comparing predicted with gold-standard values
scores = scorer.score(examples=example_list)
train_scores.append(scores)
# Evaluate on eval_data
eval_scores.append(self.evaluate(test_data=eval_data))
self.nlp.to_disk("./saved_model")
draw_prf_graph(train_scores, keyword="train")
draw_prf_graph(eval_scores, keyword="eval")
draw_train_eval_compare_graph(train_scores, eval_scores)
plot_training_loss_graph(loss_list, "Losses with epochs")
# Just write the last epoch's eval fscore in txt file
eval_fscore = []
for i, eval_score in enumerate(eval_scores):
for key, cat in eval_score.items():
if key == "ents_f": eval_fscore.append(cat)
# with open("img/k_cv_scores.txt", 'a') as f:
# f.write("%s\n" % str(eval_fscore[-1]))
return eval_fscore[-1]
def evaluate(self, test_data):
""" evaluate the trained NER model
:param test_data: processed test data
:return: None
"""
# for example in test_data:
# print(example[0])
# doc = self.nlp(example[0])
# print("Entities", [(ent.text, ent.label_) for ent in doc.ents])
scorer = Scorer(self.nlp)
example_list = []
random.shuffle(test_data)
# Get the PRF scores for test_data
for text, annot in test_data:
# Create a Doc of our text
doc_gold_text = self.nlp.make_doc(text)
# Create gold-standard using the Doc of text
# and original (correct) entities
gold_standard = {"text": doc_gold_text, "entities": annot["entities"]}
# Get the predictions of current test data sentence
pred_value = self.nlp(text)
# Create and append to the example list (of type Example) the prediction
# as well as the gold standard (reference)
example_list.append(Example.from_dict(pred_value, gold_standard))
# Generate per-entity scores by comparing predicted with gold-standard values
scores = scorer.score(examples=example_list)
# print("All scores: ", scores)
#
# print("\nents_p (aka Precision): ", scores['ents_p'])
# print("ents_r (aka Recall): ", scores['ents_r'])
# print("ents_f (aka fscore): ", scores['ents_f'])
#
# print("\nINSTR: ", scores['ents_per_type']['INSTR'])
# print("QLTY: ", scores['ents_per_type']['QLTY'])
# print("EDGE: ", scores['ents_per_type']['EDGE'])
# print("\n")
return scores
def test(self, test_data):
"""
Perform final testing on unseen test_data
:param test_data: the unseen test data
:return:
"""
# TODO
def predict(self, X):
""" make inferences on unseen data
:param X: sentence to make inferences on
:return: None
"""
self.nlp = spacy.load("./saved_model")
doc = self.nlp(X)
print("Entities", [(ent.text, ent.label_) for ent in doc.ents])
# def predict_proba(self):
# '''
#
# :return:
# '''
def k_cross_validation(self, data, k=10):
print(f"{k}-fold Cross Validation")
random.shuffle(data)
num_groups = int(len(data) / k)
print(f"Size of each eval set: {num_groups}\n")
batches = minibatch(data, size=num_groups)
for count, batch in enumerate(batches):
# Discard the last batch if it has very few example sentences
if len(batch) > num_groups / 2:
print(f"Fold no.: {count + 1}")
train_data = [x for x in data if x not in batch]
test_data = batch
print(f"Train, Test :: {len(train_data)}, {len(test_data)}")
fscore = self.fit(train_data=train_data, eval_data=test_data)
print(f"fscore: {fscore}\n")
self.clear_model()
if __name__ == '__main__':
print("spaCy version: ", spacy.__version__)
ner, nlp = load_spacy()
# DATA = load_cleaned_data()
# TRAIN_DATA, EVAL_DATA, TEST_DATA = split_data(DATA)
# save_list_to_pickle(TRAIN_DATA, "TRAIN_DATA")
# save_list_to_pickle(EVAL_DATA, "EVAL_DATA")
# save_list_to_pickle(TEST_DATA, "TEST_DATA")
# Load pickled data list from data folder
TRAIN_DATA = load_list_from_pickle("TRAIN_DATA")
EVAL_DATA = load_list_from_pickle("EVAL_DATA")
TEST_DATA = load_list_from_pickle("TEST_DATA")
# Create the NER model class consisting of fit and evaluate methods.
ner_model = NerModel(ner, nlp, n_iter=ITERATIONS, dropout=DROPOUT, lr=LEARN_RATE)
# We're gonna use TEST (5% + 5% = 10%) for evaluation
TEST = EVAL_DATA + TEST_DATA
print("Size of total TRAIN data: ", len(TRAIN_DATA))
print("Size of total TEST (Evaluation) data: ", len(TEST))
ner_model.fit(TRAIN_DATA, TEST)
# Perform k-fold Cross Validation
# data = TRAIN_DATA + EVAL_DATA + TEST_DATA
# ner_model.k_cross_validation(data, k=10)
# sentence = 'I really like the distortion in this guitar'
# ner.predict(sentence)
|
from rdflib import Graph, Namespace
from rdflib.namespace import XSD, RDF, RDFS, OWL, SH
from rdflib.namespace import NamespaceManager
from rdflib.term import URIRef, Literal, BNode
import collections
import json
from rdflib.collection import Collection
import pkg_resources
from .generator import Generator
"""
current assumptions:
- rdfs:range / unionOf - this is only being used on either all Classes or all Datatypes - is this correct?
- restrictions on a given property do not require domain or range declaration to be present
"""
class schema(Generator):
def __init__(self, graph: Graph, prefixes=None):
self.G = graph
self.CLASSES = collections.OrderedDict()
self.PROPS = collections.OrderedDict()
self.REST = collections.OrderedDict()
self.datatypes = [
XSD.string,
XSD.boolean,
XSD.time,
XSD.date,
XSD.dateTime,
XSD.integer,
XSD.decimal,
XSD.nonNegativeInteger,
XSD.negativeInteger,
RDFS.Literal,
XSD.positiveInteger,
XSD.nonPositiveInteger,
]
path = "prefixes/namespaces.json"
filepath = pkg_resources.resource_filename(__name__, path)
self.namespaces = NamespaceManager(graph=Graph())
self.namespaces.bind("sh", SH)
with open(filepath, "r", encoding="utf-8") as fin:
for prefix, namespace in json.load(fin).items():
self.namespaces.bind(prefix, namespace)
if prefixes:
with open(prefixes, "r", encoding="utf-8") as fin:
for prefix, namespace in json.load(fin).items():
self.namespaces.bind(prefix, namespace)
def extract_props(self):
properties = []
self.PROPS = {}
# gather properties
property_types = [
OWL.DatatypeProperty,
OWL.ObjectProperty,
OWL.AnnotationProperty,
OWL.TransitiveProperty,
OWL.FunctionalProperty,
RDF.Property,
OWL.InverseFunctionalProperty,
OWL.SymmetricProperty,
]
for types in property_types:
for s, p, o in self.G.triples((None, RDF.type, types)):
properties.append(s)
for p in sorted(properties):
self.PROPS[p] = {}
# gather property values
count = 0
for prop in self.PROPS.keys():
count = count + 1
s = URIRef(prop)
self.PROPS[prop]["domain"] = None
self.PROPS[prop]["domain_union"] = None
self.PROPS[prop]["range"] = None
self.PROPS[prop]["range_union"] = None
self.PROPS[prop]["range_value"] = None
self.PROPS[prop]["e_prop"] = []
self.PROPS[prop]["label"] = self.sh_label_gen(prop)
self.PROPS[prop]["shape_name"] = None
self.PROPS[prop]["definition"] = None
self.PROPS[prop]["type"] = []
for obje in self.G.objects(subject=prop, predicate=RDF.type):
self.PROPS[prop]["type"].append(obje)
for sub, pred, ob in self.G.triples((s, RDFS.domain, None)):
if type(ob) != BNode:
self.PROPS[prop]["domain"] = ob
else:
for sub1, pred1, ob1 in self.G.triples((ob, None, None)):
if pred1 == OWL.unionOf:
c = Collection(self.G, ob1)
self.PROPS[prop]["domain_union"] = c
for sub, pred, ob in self.G.triples((s, RDFS.range, None)):
if type(ob) != BNode:
self.PROPS[prop]["range"] = ob
else:
for sub1, pred1, ob1 in self.G.triples((ob, None, None)):
if pred1 == OWL.oneOf:
c = Collection(self.G, ob1)
self.PROPS[prop]["range_value"] = c
if pred1 == OWL.unionOf:
c = Collection(self.G, ob1)
self.PROPS[prop]["range_union"] = c
for equal in self.G.objects(subject=s, predicate=OWL.equivalentProperty):
self.PROPS[prop]["e_prop"].append(equal)
for defin in self.G.objects(subject=s, predicate=RDFS.comment):
self.PROPS[prop]["definition"] = defin
for name in self.G.objects(subject=s, predicate=RDFS.label):
self.PROPS[prop]["shape_name"] = name
if self.PROPS[prop]["shape_name"] is None:
self.PROPS[prop]["shape_name"] = self.sh_label_gen(prop)
def extract_classes(self):
classes = []
for s, p, o in self.G.triples((None, RDF.type, OWL.Class)):
if type(s) != BNode:
classes.append(s)
else:
pass
for s, p, o in self.G.triples((None, RDF.type, RDFS.Class)):
if type(s) != BNode:
classes.append(s)
else:
pass
for c in sorted(classes):
self.CLASSES[c] = {}
for c in self.CLASSES.keys():
self.CLASSES[c]["label"] = self.sh_label_gen(c)
self.CLASSES[c]["definition"] = None
s = URIRef(c)
for name in self.G.objects(subject=s, predicate=RDFS.label):
self.CLASSES[c]["shape_name"] = name
if self.CLASSES[c]["shape_name"] is None:
self.CLASSES[c]["shape_name"] = self.sh_label_gen(c)
for defin in self.G.objects(subject=s, predicate=RDFS.comment):
self.CLASSES[c]["definition"] = defin
def extract_restrictions(self):
"""
need equivalent classes
"""
restrictions = []
for s, p, o in self.G.triples((None, OWL.onProperty, None)):
restriction = s
for s in self.G.subjects(object=restriction, predicate=None):
if type(s) != BNode:
for o in self.G.objects(
subject=restriction, predicate=OWL.onProperty
):
if type(o) != BNode:
restrictions.append(restriction)
for r in sorted(restrictions):
self.REST[r] = {}
for rest in self.REST.keys():
for o in self.G.objects(subject=rest, predicate=OWL.onProperty):
self.REST[rest]["onProp"] = o
for s in self.G.subjects(object=rest, predicate=None):
self.REST[rest]["onClass"] = s
rest_type = []
rest_val = []
for s, p, o in self.G.triples((rest, OWL.maxCardinality, None)):
rest_type.append(p)
rest_val.append(o)
for p in self.G.triples((rest, OWL.minCardinality, None)):
rest_type.append(p)
rest_val.append(o)
for s, p, o in self.G.triples((rest, OWL.cardinality, None)):
rest_type.append(p)
rest_val.append(o)
for s, p, o in self.G.triples((rest, OWL.allValuesFrom, None)):
rest_type.append(p)
rest_val.append(o)
for s, p, o in self.G.triples((rest, OWL.someValuesFrom, None)):
rest_type.append(p)
rest_val.append(o)
for s, p, o in self.G.triples((rest, OWL.hasValue, None)):
rest_type.append(p)
rest_val.append(o)
for s, p, o in self.G.triples((rest, OWL.minQualifiedCardinality, None)):
rest_type.append(p)
rest_val.append(o)
self.REST[rest]["type"] = rest_type[0]
self.REST[rest]["value"] = rest_val[0]
def gen_graph(self, namespace=None, implicit_class_target=False):
self.extract_props()
self.extract_classes()
self.extract_restrictions()
ng = Graph(namespace_manager=self.namespaces)
if namespace is not None:
if self.uri_validator(namespace[0]):
uri = namespace[0]
if namespace[0][-1] not in ["#", "/", "\\"]:
uri = namespace[0] + "/"
EX = Namespace(uri)
ng.bind(namespace[1], EX)
else:
print("##malformed URI, using http://example.org/ instead...")
EX = Namespace("http://www.example.org/")
ng.bind("ex", EX)
else:
EX = Namespace("http://www.example.org/")
ng.bind("ex", EX)
# add class Node Shapes
for c in self.CLASSES.keys():
subject = c
clabel = self.CLASSES[c]["label"]
if not implicit_class_target:
subject = EX[clabel]
ng.add((subject, SH.targetClass, c))
else:
ng.add((subject, RDF.type, RDFS.Class))
# Copy rdfs:subClassOf
for t in self.G.triples((subject, RDFS.subClassOf, None)):
ng.add(t)
ng.add((subject, RDF.type, SH.NodeShape))
# ng.add((EX[clabel], SH.name, Literal(self.CLASSES[c]['shape_name']+' Node shape')))
ng.add((subject, SH.nodeKind, SH.BlankNodeOrIRI))
if self.CLASSES[c]["definition"] is not None:
ng.add(
(subject, SH.description, Literal((self.CLASSES[c]["definition"])))
)
for p in self.PROPS.keys():
label = self.PROPS[p]["label"]
# ng.add((EX[label], SH.name, Literal(str(self.PROPS[p]['shape_name']) +' Property shape')))
# copy rdfs:label as property shape names
for o in self.G.objects(subject=p, predicate=RDFS.label):
ng.add((EX[label], SH.name, o))
ng.add((EX[label], RDF.type, SH.PropertyShape))
ng.add((EX[label], SH.path, p))
if OWL.FunctionalProperty in self.PROPS[p]["type"]:
ng.add((EX[label], SH.maxCount, Literal(1)))
if OWL.InverseFunctionalProperty in self.PROPS[p]["type"]:
ng.add((EX[label], SH.path, BNode(p + "inverse")))
ng.add((BNode(p + "inverse"), SH.inversePath, p))
ng.add((BNode(p + "inverse"), SH.maxCount, Literal(1)))
if self.PROPS[p]["range_value"] is not None:
rang = self.PROPS[p]["range_value"]
st = BNode()
ng.add((EX[label], SH["in"], st))
Collection(ng, st, [Literal(x) for x in rang])
if self.PROPS[p]["range"] is not None:
rang = self.PROPS[p]["range"]
if rang in self.datatypes:
ng.add((EX[label], SH.datatype, rang))
else:
ng.add((EX[label], SH["class"], rang))
if self.PROPS[p]["e_prop"] is not None:
for x in self.PROPS[p]["e_prop"]:
ng.add((EX[label], SH.equals, x))
# create range unions using sh:or
if self.PROPS[p]["range_union"] is not None:
rang = self.PROPS[p]["range_union"]
if set(rang).issubset(self.datatypes):
st = BNode(label + str(0) + "a")
ng.add((EX[label], EX["or"], st))
for x, y in enumerate(rang):
if x == 0:
ng.add((st, RDF.first, BNode(label + str(x) + "_name")))
ng.add((BNode(label + str(x) + "_name"), SH["datatype"], y))
ng.add((st, RDF.rest, BNode(label + str(x + 1) + "a")))
else:
ng.add(
(
BNode(label + str(x) + "a"),
RDF.first,
BNode(label + str(x) + "_name"),
)
)
ng.add((BNode(label + str(x) + "_name"), SH["datatype"], y))
if x + 1 == len(rang):
ng.add((BNode(label + str(x) + "a"), RDF.rest, RDF.nil))
else:
ng.add(
(
BNode(label + str(x) + "a"),
RDF.rest,
BNode(label + str(x + 1) + "a"),
)
)
else:
st = BNode(label + str(0) + "a")
ng.add((EX[label], EX["or"], st))
for x, y in enumerate(rang):
if x == 0:
ng.add((st, RDF.first, BNode(label + str(x) + "_name")))
ng.add((BNode(label + str(x) + "_name"), SH["class"], y))
ng.add((st, RDF.rest, BNode(label + str(x + 1) + "a")))
else:
ng.add(
(
BNode(label + str(x) + "a"),
RDF.first,
BNode(label + str(x) + "_name"),
)
)
ng.add((BNode(label + str(x) + "_name"), SH["class"], y))
if x + 1 == len(rang):
ng.add((BNode(label + str(x) + "a"), RDF.rest, RDF.nil))
else:
ng.add(
(
BNode(label + str(x) + "a"),
RDF.rest,
BNode(label + str(x + 1) + "a"),
)
)
if self.PROPS[p]["definition"] is not None:
ng.add(
(EX[label], SH.description, Literal((self.PROPS[p]["definition"])))
)
if self.PROPS[p]["domain"] is not None:
subject = self.PROPS[p]["domain"]
if subject in self.CLASSES.keys():
plabel = self.PROPS[p]["label"]
if implicit_class_target:
ng.add((subject, SH.property, EX[plabel]))
else:
dlabel = self.CLASSES[subject]["label"]
ng.add((EX[dlabel], SH.property, EX[plabel]))
if self.PROPS[p]["domain_union"] is not None:
for d in self.PROPS[p]["domain_union"]:
if d in self.CLASSES.keys():
plabel = self.PROPS[p]["label"]
if implicit_class_target:
ng.add((d, SH.property, EX[plabel]))
else:
dlabel = self.CLASSES[d]["label"]
ng.add((EX[dlabel], SH.property, EX[plabel]))
for r in self.REST.keys():
blank = BNode()
# if self.REST[r]['onProp'] == p: #and self.REST[r]['onClass'] == self.PROPS[p]['domain']:
ng.add((EX[self.sh_label_gen(self.REST[r]["onClass"])], SH.property, blank))
ng.add((blank, SH.path, self.REST[r]["onProp"]))
if self.REST[r]["type"] in [OWL.cardinality]:
ng.add(
(
blank,
SH.minCount,
Literal(self.REST[r]["value"], datatype=XSD.integer),
)
)
ng.add(
(
blank,
SH.maxCount,
Literal(self.REST[r]["value"], datatype=XSD.integer),
)
)
elif self.REST[r]["type"] in [OWL.minCardinality]:
ng.add(
(
blank,
SH.minCount,
Literal(self.REST[r]["value"], datatype=XSD.integer),
)
)
elif self.REST[r]["type"] in [OWL.maxCardinality]:
ng.add(
(
blank,
SH.maxCount,
Literal(self.REST[r]["value"], datatype=XSD.integer),
)
)
elif self.REST[r]["type"] in [OWL.allValuesFrom]:
if type(self.REST[r]["value"]) == BNode:
for sub1, pred1, ob1 in self.G.triples(
(self.REST[r]["value"], None, None)
):
if pred1 == OWL.unionOf:
union_c = Collection(self.G, ob1)
dummy = r + self.REST[r]["value"]
nest = BNode(dummy + str(0) + "a")
ng.add((blank, SH["or"], nest))
for x, y in enumerate(union_c):
if x == 0:
ng.add(
(
nest,
RDF.first,
BNode(dummy + str(x) + "_name"),
)
)
ng.add(
(
BNode(dummy + str(x) + "_name"),
SH["class"],
y,
)
)
ng.add(
(
nest,
RDF.rest,
BNode(dummy + str(x + 1) + "a"),
)
)
else:
ng.add(
(
BNode(dummy + str(x) + "a"),
RDF.first,
BNode(dummy + str(x) + "_name"),
)
)
ng.add(
(
BNode(dummy + str(x) + "_name"),
SH["class"],
y,
)
)
if x == len(rang):
ng.add(
(BNode(dummy + str(x) + "a"), RDF.rest, RDF.nil)
)
else:
ng.add(
(
BNode(dummy + str(x) + "a"),
RDF.rest,
BNode(dummy + str(x + 1) + "a"),
)
)
elif type(self.REST[r]["value"]) in self.datatypes:
ng.add((blank, SH["datatype"], self.REST[r]["value"]))
else:
ng.add((blank, SH["class"], self.REST[r]["value"]))
elif self.REST[r]["type"] in [OWL.someValuesFrom]:
ng.add(
(blank, SH["qualifiedMinCount"], Literal(1, datatype=XSD.integer))
)
ng.add((blank, SH["qualifiedValueShape"], BNode("count" + str(r))))
ng.add((BNode("count" + str(r)), SH["class"], self.REST[r]["value"]))
else:
pass
return ng
|
<reponame>Ikomia-dev/ikomia-oakd<gh_stars>0
from pathlib import Path
import depthai as dai
import numpy as np
import cv2
import sys
# Importing from parent folder
sys.path.insert(0, str(Path(__file__).parent.parent.parent)) # move to parent path
from utils.compute import to_planar, get_landmark_3d, get_vector_intersection
from utils.visualize import LandmarksCubeVisualizer, LandmarksDepthVisualizer
from utils.draw import drawROI, displayFPS
from utils.OakRunner import OakRunner
frame_width, frame_height = 300, 300
visualizeLandmarksCube = True # define visualization mode (cube with centered face or cameras fields of views)
# Function called before entering inside the process loop, useful to set few arguments
def init(runner, device):
runner.custom_arguments["required_confidence"] = 0.2
colors = [(255,255,255), (255,255,255), (0,255,255), (255,0,255), (255,0,255)]
pairs = [(0,2), (1,2), (3,4)]
if(visualizeLandmarksCube):
runner.custom_arguments["visualizer"] = LandmarksCubeVisualizer(window_width=frame_width, window_height=frame_height, cameras_positions=[runner.left_camera_location, runner.right_camera_location], size=1, colors=colors, pairs=pairs)
else:
runner.custom_arguments["visualizer"] = LandmarksDepthVisualizer(frame_width*2, frame_height, [runner.left_camera_location, runner.right_camera_location], colors=colors, pairs=pairs)
runner.custom_arguments["visualizer"].start()
# Function called inside the process loop, useful to apply any treatment
def process(runner):
spatial_vectors = dict()
for side in ["left", "right"]:
spatial_vectors[side] = []
frame = runner.output_queues[side+"_cam"].get().getCvFrame()
faces_data = runner.output_queues["nn_"+side+"_faces"].get().getFirstLayerFp16()
if(faces_data[2] > runner.custom_arguments["required_confidence"]):
# Get pixels instead of percentages
xmin = int(faces_data[3]*frame_width) if faces_data[3]>0 else 0
ymin = int(faces_data[4]*frame_height) if faces_data[4]>0 else 0
xmax = int(faces_data[5]*frame_width) if faces_data[5]<1 else frame_width
ymax = int(faces_data[6]*frame_height) if faces_data[6]<1 else frame_height
# Compute the face to get landmarks
land_data = dai.NNData()
planar_cropped_face = to_planar(frame[ymin:ymax, xmin:xmax], (48, 48))
land_data.setLayer("0", planar_cropped_face)
runner.input_queues["nn_"+side+"_landmarks"].send(land_data)
output = runner.output_queues["nn_"+side+"_landmarks"].get().getFirstLayerFp16()
landmarks = np.array(output).reshape(5,2)
# print(landmarks)
# Draw detections
drawROI(frame, (xmin,ymin), (xmax,ymax), color=(0,200,230))
for x,y in landmarks:
cv2.circle(frame, (int(x*(xmax-xmin))+xmin,int(y*(ymax-ymin))+ymin), 2, (0,0,255))
# Set spatial vectors
spatial_landmarks = [get_landmark_3d((x,y)) for x,y in landmarks]
camera_location = runner.left_camera_location if(side == "left") else runner.right_camera_location
for i in range(5):
spatial_vectors[side].append([spatial_landmarks[i][j] - camera_location[j] for j in range(3)])
spatial_vectors[side] = np.array(spatial_vectors[side]) # convert list to numpy array
displayFPS(frame, runner.getFPS())
cv2.imshow(side, frame)
# Determined depth to precisely locate landmarks in space
landmark_spatial_locations = []
if(len(spatial_vectors["left"])>4 and len(spatial_vectors["right"])>4):
for i in range(5):
landmark_spatial_locations.append(get_vector_intersection(spatial_vectors["left"][i], runner.left_camera_location, spatial_vectors["right"][i], runner.right_camera_location))
runner.custom_arguments["visualizer"].setLandmarks(landmark_spatial_locations)
runner = OakRunner()
for side in ["left", "right"]:
if(side == "left"):
runner.setLeftCamera(frame_width, frame_height)
face_manip = runner.getLeftCameraManip()
else:
runner.setRightCamera(frame_width, frame_height)
face_manip = runner.getRightCameraManip()
face_manip.initialConfig.setFrameType(dai.RawImgFrame.Type.BGR888p) # Switch to BGR (but still grayscaled)
runner.addNeuralNetworkModel(stream_name="nn_"+side+"_faces", path=str(Path(__file__).parent) + "/../../../_models/face_detection.blob", handle_mono_depth=False)
face_manip.out.link(runner.neural_networks["nn_"+side+"_faces"].input) # link transformed video stream to neural network entry
runner.addNeuralNetworkModel(stream_name="nn_"+side+"_landmarks", path=str(Path(__file__).parent) + "/../../../_models/tiny_face_landmarks.blob", handle_mono_depth=False)
runner.run(process=process, init=init) |
<reponame>guyfleeman/gem5-website
#!/usr/bin/env python3
#This is a job launch script for boot tests
import os
import sys
from uuid import UUID
from gem5art.artifact.artifact import Artifact
from gem5art.run import gem5Run
from gem5art.tasks.tasks import run_gem5_instance
"""packer = Artifact.registerArtifact(
command = '''wget https://releases.hashicorp.com/packer/1.4.3/packer_1.4.3_linux_amd64.zip;
unzip packer_1.4.3_linux_amd64.zip;
''',
typ = 'binary',
name = 'packer',
path = 'disk-image/packer',
cwd = 'disk-image',
documentation = 'Program to build disk images. Downloaded sometime in August from hashicorp.'
)"""
experiments_repo = Artifact.registerArtifact(
command = 'git clone https://github.com/darchr/microbenchmark-experiments.git',
typ = 'git repo',
name = 'microbenchmark-tests',
path = './',
cwd = '../',
documentation = 'main experiments repo to run microbenchmarks with gem5'
)
gem5_repo = Artifact.registerArtifact(
command = 'git clone https://github.com/darchr/gem5',
typ = 'git repo',
name = 'gem5',
path = 'gem5/',
cwd = './',
documentation = 'git repo with gem5 master branch on Sep 23rd'
)
m5_binary = Artifact.registerArtifact(
command = 'make -f Makefile.x86',
typ = 'binary',
name = 'm5',
path = 'gem5/util/m5/m5',
cwd = 'gem5/util/m5',
inputs = [gem5_repo,],
documentation = 'm5 utility'
)
"""disk_image = Artifact.registerArtifact(
command = 'packer build template.json',
typ = 'disk image',
name = 'boot-disk',
cwd = 'disk-image',
path = 'disk-image/boot-exit/boot-exit-image/boot-exit',
inputs = [packer, experiments_repo, m5_binary,],
documentation = 'Ubuntu with m5 binary installed and root auto login'
)"""
gem5_binary = Artifact.registerArtifact(
command = 'scons build/X86/gem5.opt',
typ = 'gem5 binary',
name = 'gem5',
cwd = 'gem5/',
path = 'gem5/build/X86/gem5.opt',
inputs = [gem5_repo,],
documentation = 'default gem5 x86'
)
"""linux_repo = Artifact.registerArtifact(
command = '''git clone https://github.com/torvalds/linux.git;
mv linux linux-stable''',
typ = 'git repo',
name = 'linux-stable',
path = 'linux-stable/',
cwd = './',
documentation = 'linux kernel source code repo from Sep 23rd'
)"""
"""linuxes = ['5.2.3', '4.14.134', '4.9.186', '4.4.186']
linux_binaries = {
version: Artifact.registerArtifact(
name = f'vmlinux-{version}',
typ = 'kernel',
path = f'linux-stable/vmlinux-{version}',
cwd = 'linux-stable/',
command = f'''git checkout v{version};
cp ../linux-configs/config.{version} .config;
make -j8;
cp vmlinux vmlinux-{version};
'''.format(v='5.2.3'),
inputs = [experiments_repo, linux_repo,],
documentation = f"Kernel binary for {version} with simple "
"config file",
)
for version in linuxes
}"""
if __name__ == "__main__":
boot_types = ['init', 'systemd']
num_cpus = ['1', '2', '4', '8']
cpu_types = ['Simple']
#mem_types = ['classic']#, 'ruby']
mem_types = ['Slow']# [SingleCycle][Inf]
bm_list =['MC','MCS','CCa','CCe','CCh','CCh_st']
#Architecture to run with.
arch='X86' #'ARM'
if arch =='X86':
bm='bench.X86'
elif arch =='ARM':
bm='bench.ARM'
path = 'microbench'
for bms in bm_list:
for cpu in cpu_types:
for mem in mem_types:
run = gem5Run.createSERun(
'gem5/build/X86/gem5.opt',
'configs-boot-tests/run_config1.py',
gem5_binary, gem5_repo, experiments_repo,
cpu, mem, os.path.join(path,bms,bm))
#run_gem5_instance.apply_async((run,))
run.run()
|
<filename>src/commons/big_query/copy_job_async/copy_job/copy_job_request.py
from src.commons.big_query.big_query_table import BigQueryTable
from src.commons.big_query.copy_job_async.post_copy_action_request import \
PostCopyActionRequest
class CopyJobRequest(object):
def __init__(self, task_name_suffix, copy_job_type_id,
source_big_query_table, target_big_query_table,
create_disposition, write_disposition,
retry_count=0, post_copy_action_request=None,
):
self.__task_name_suffix = task_name_suffix
self.__copy_job_type_id = copy_job_type_id
self.__source_big_query_table = source_big_query_table
self.__target_big_query_table = target_big_query_table
self.__retry_count = retry_count
self.__post_copy_action_request = post_copy_action_request
self.__create_disposition = create_disposition
self.__write_disposition = write_disposition
@property
def task_name_suffix(self):
return self.__task_name_suffix
@property
def copy_job_type_id(self):
return self.__copy_job_type_id
@property
def retry_count(self):
return self.__retry_count
@property
def source_big_query_table(self):
return self.__source_big_query_table
@property
def target_big_query_table(self):
return self.__target_big_query_table
@property
def create_disposition(self):
return self.__create_disposition
@property
def write_disposition(self):
return self.__write_disposition
@property
def post_copy_action_request(self):
return self.__post_copy_action_request
def __str__(self):
return 'task_name_suffix: {}, copyJobTypeId: {}, sourceTable: {}, ' \
'targetTable: {}, retryCount: {}, postCopyActionRequest: {}, ' \
'create_disposition: {}, write_disposition: {}'.format(
self.__task_name_suffix, self.__copy_job_type_id,
self.__source_big_query_table, self.__target_big_query_table,
self.__retry_count, self.__post_copy_action_request,
self.__create_disposition, self.__write_disposition)
def __repr__(self):
return self.__str__()
def __eq__(self, o):
return type(o) is CopyJobRequest \
and self.__task_name_suffix == o.__task_name_suffix \
and self.__copy_job_type_id == o.__copy_job_type_id \
and self.__source_big_query_table == o.__source_big_query_table \
and self.__target_big_query_table == o.__target_big_query_table \
and self.__retry_count == o.__retry_count \
and self.__post_copy_action_request == o.__post_copy_action_request \
and self.__create_disposition == o.__create_disposition \
and self.__write_disposition == o.__write_disposition
def __ne__(self, other):
return not (self == other)
def to_json(self):
return dict(task_name_suffix=self.task_name_suffix,
copy_job_type_id=self.copy_job_type_id,
source_big_query_table=self.__source_big_query_table,
target_big_query_table=self.__target_big_query_table,
retry_count=self.__retry_count,
post_copy_action_request=self.__post_copy_action_request,
create_disposition=self.__create_disposition,
write_disposition=self.__write_disposition)
@classmethod
def from_json(cls, json):
source_big_query_table = BigQueryTable.from_json(json["source_big_query_table"])
target_big_query_table = BigQueryTable.from_json(json["target_big_query_table"])
post_copy_action_request = PostCopyActionRequest.from_json(json["post_copy_action_request"])
return CopyJobRequest(
task_name_suffix=json["task_name_suffix"],
copy_job_type_id=json["copy_job_type_id"],
source_big_query_table=source_big_query_table,
target_big_query_table=target_big_query_table,
create_disposition=json["create_disposition"],
write_disposition=json["write_disposition"],
retry_count=json["retry_count"],
post_copy_action_request=post_copy_action_request)
|
<reponame>orionzhou/biolib<filename>formats/vcf.ase.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import os.path as op
import sys
import logging
from jcvi.apps.base import sh, mkdir
from jcvi.formats.base import must_open
def main(args):
pre = args.fo
if op.isfile(f"{pre}.bcf"):
if not args.force:
logging.warning(f"{pre}.bcf already exists - skip")
sys.exit(0)
else:
logging.warning(f"{pre}.bcf already exists - overwriting")
sh(f"rm {pre}.*")
cmd = ''
if args.s2 == '' or args.s2 == 'self':
samples = args.s1
cmd = (
f'bcftools view --trim-alt-alleles -s {samples} -Ou {args.vcf} |',
f'bcftools filter -i \'N_PASS(GQ>={args.gq} & GT!="mis")=1 && FORMAT/GT[0]="AA"\' |'
'bcftools annotate -x QUAL,INFO,^FORMAT/GT |'
'bioawk -t \'{if(!/^#/){$10=substr($10,1,1)"|"substr($10,1,1)}; print}\' |',
f'bcftools reheader -h {args.header} > {pre}.vcf'
)
else:
samples = f"{args.s1},{args.s2}"
ref_rule = ''
if args.s1.endswith("B73"):
ref_rule = '&& FORMAT/GT[0]="RR"'
elif args.s2.endswith("B73"):
ref_rule = '&& FORMAT/GT[1]="RR"'
cmd = (
f'bcftools view --trim-alt-alleles -s {samples} -Ou {args.vcf} |',
f'bcftools filter -i \'N_PASS(GQ>={args.gq} & GT!="mis")=2 && N_PASS(GT="RR")=1 && N_PASS(GT="AA")=1 {ref_rule}\' |',
'bcftools annotate -x QUAL,INFO,^FORMAT/GT |',
'bioawk -t \'{if(!/^#/){$10=substr($10,1,1)"|"substr($11,1,1)}; $11=""; print}\' |',
f'bcftools reheader -h {args.header} > {pre}.vcf'
)
sh(''.join(cmd))
sh(f"bgzip -f {pre}.vcf")
sh(f"bcftools index -t {pre}.vcf.gz")
sh(f"bcftools view {pre}.vcf.gz -Ob -o {pre}.bcf")
sh(f"bcftools index {pre}.bcf")
sh(f"bcftools stats -s - {pre}.bcf > {pre}.txt")
sh(f"bcftools query -f '%CHROM\\t%POS\t[%TGT]\\n' {pre}.bcf | sed 's/|/,/' > {pre}.tsv")
if __name__ == "__main__":
import argparse
ps = argparse.ArgumentParser(
formatter_class = argparse.ArgumentDefaultsHelpFormatter,
description = "create variant files for nextflow/ASE pipeline"
)
ps.add_argument('s1', help = 'sample 1')
ps.add_argument('fo', help = 'output file prefix')
ps.add_argument('--s2', default='', help = 'sample 2')
ps.add_argument('--vcf', default=f"{os.environ['s3']}/zhoup-nfo/zm.vt10/04.snp.vcf.gz", help='input (joint) VCF')
ps.add_argument('--gq', default=30, help='minimum Genotype Quality score')
ps.add_argument('--header', default=f"{os.environ['maize']}/assets/dummy_header.vcf", help='vcf header')
ps.add_argument('-f', '--force', action='store_true', help='overwrite existing files?')
args = ps.parse_args()
main(args)
|
#!/usr/bin/env python
# Copyright 2016 Samsung Electronics Co., Ltd.
# Copyright 2016 University of Szeged.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import shutil
import subprocess
import sys
from os import makedirs, uname
from settings import *
BUILD_DIR = path.join(PROJECT_DIR, 'build')
def default_toolchain():
(sysname, _, _, _, machine) = uname()
toolchain = path.join(PROJECT_DIR, 'cmake', 'toolchain_%s_%s.cmake' % (sysname.lower(), machine.lower()))
return toolchain if path.isfile(toolchain) else None
def add_build_args(parser):
parser.add_argument('--verbose', '-v', action='store_const', const='ON', default='OFF', help='Increase verbosity')
parser.add_argument('--unittests', action='store_const', const='ON', default='OFF', help='Build unittests too')
parser.add_argument('--clean', action='store_true', default=False, help='Clean build')
parser.add_argument('--builddir', action='store', default=BUILD_DIR, help='Specify output directory (default: %(default)s)')
parser.add_argument('--strip', choices=['on', 'off'], default='on', help='Strip release binary (default: %(default)s)')
parser.add_argument('--all-in-one', choices=['on', 'off'], default='off', help='All-in-one build (default: %(default)s)')
parser.add_argument('--debug', action='store_const', const='Debug', default='Release', dest='build_type', help='Debug build')
parser.add_argument('--lto', choices=['on', 'off'], default='on', help='Enable link-time optimizations (default: %(default)s)')
parser.add_argument('--profile', choices=['full', 'minimal'], default='full', help='Specify the profile (default: %(default)s)')
parser.add_argument('--error-messages', choices=['on', 'off'], default='off', help='Enable error messages (default: %(default)s)')
parser.add_argument('--valgrind', choices=['on', 'off'], default='off', help='Enable Valgrind support (default: %(default)s)')
parser.add_argument('--valgrind-freya', choices=['on', 'off'], default='off', help='Enable Valgrind-Freya support (default: %(default)s)')
parser.add_argument('--show-opcodes', choices=['on', 'off'], default='off', help='Enable parser byte-code dumps (default: %(default)s)')
parser.add_argument('--show-regexp-opcodes', choices=['on', 'off'], default='off', help='Enable regexp byte-code dumps (default: %(default)s)')
parser.add_argument('--mem-stats', choices=['on', 'off'], default='off', help='Enable memory statistics (default: %(default)s)')
parser.add_argument('--mem-stress-test', choices=['on', 'off'], default='off', help='Enable mem-stress test (default: %(default)s)')
parser.add_argument('--snapshot-save', choices=['on', 'off'], default='on', help='Allow to save snapshot files (default: %(default)s)')
parser.add_argument('--snapshot-exec', choices=['on', 'off'], default='on', help='Allow to execute snapshot files (default: %(default)s)')
parser.add_argument('--cmake-param', action='append', default=[], help='Add custom arguments to CMake')
parser.add_argument('--compile-flag', action='append', default=[], help='Add custom compile flag')
parser.add_argument('--linker-flag', action='append', default=[], help='Add custom linker flag')
parser.add_argument('--toolchain', action='store', default=default_toolchain(), help='Add toolchain file (default: %(default)s)')
parser.add_argument('--jerry-libc', choices=['on', 'off'], default='on', help='Use jerry-libc (default: %(default)s)')
parser.add_argument('--compiler-default-libc', choices=['on', 'off'], default='off', help='Use compiler-default libc (default: %(default)s)')
parser.add_argument('--jerry-core', choices=['on', 'off'], default='on', help='Use jerry-core (default: %(default)s)')
parser.add_argument('--jerry-libm', choices=['on', 'off'], default='on', help='Use jerry-libm (default: %(default)s)')
parser.add_argument('--jerry-cmdline', choices=['on', 'off'], default='on', help='Use jerry commandline tool (default: %(default)s)')
def get_arguments():
parser = argparse.ArgumentParser()
add_build_args(parser)
return parser.parse_args()
def generate_build_options(arguments):
build_options = []
build_options.append('-DJERRY_LIBC=%s' % arguments.jerry_libc.upper())
build_options.append('-DJERRY_CORE=%s' % arguments.jerry_core.upper())
build_options.append('-DJERRY_LIBM=%s' % arguments.jerry_libm.upper())
build_options.append('-DJERRY_CMDLINE=%s' % arguments.jerry_cmdline.upper())
build_options.append('-DCOMPILER_DEFAULT_LIBC=%s' % arguments.compiler_default_libc.upper())
build_options.append('-DCMAKE_VERBOSE_MAKEFILE=%s' % arguments.verbose)
build_options.append('-DCMAKE_BUILD_TYPE=%s' % arguments.build_type)
build_options.append('-DFEATURE_PROFILE=%s' % arguments.profile)
build_options.append('-DFEATURE_ERROR_MESSAGES=%s' % arguments.error_messages.upper())
build_options.append('-DFEATURE_VALGRIND=%s' % arguments.valgrind.upper())
build_options.append('-DFEATURE_VALGRIND_FREYA=%s' % arguments.valgrind_freya.upper())
build_options.append('-DFEATURE_PARSER_DUMP=%s' % arguments.show_opcodes.upper())
build_options.append('-DFEATURE_REGEXP_DUMP=%s' % arguments.show_regexp_opcodes.upper())
build_options.append('-DFEATURE_MEM_STATS=%s' % arguments.mem_stats.upper())
build_options.append('-DFEATURE_MEM_STRESS_TEST=%s' % arguments.mem_stress_test.upper())
build_options.append('-DFEATURE_SNAPSHOT_SAVE=%s' % arguments.snapshot_save.upper())
build_options.append('-DFEATURE_SNAPSHOT_EXEC=%s' % arguments.snapshot_exec.upper())
build_options.append('-DENABLE_ALL_IN_ONE=%s' % arguments.all_in_one.upper())
build_options.append('-DENABLE_LTO=%s' % arguments.lto.upper())
build_options.append('-DENABLE_STRIP=%s' % arguments.strip.upper())
build_options.append('-DUNITTESTS=%s' % arguments.unittests)
build_options.extend(arguments.cmake_param)
build_options.append('-DEXTERNAL_COMPILE_FLAGS=' + ' '.join(arguments.compile_flag))
build_options.append('-DEXTERNAL_LINKER_FLAGS=' + ' '.join(arguments.linker_flag))
if arguments.toolchain:
build_options.append('-DCMAKE_TOOLCHAIN_FILE=%s' % arguments.toolchain)
return build_options
def configure_output_dir(arguments):
global BUILD_DIR
if os.path.isabs(arguments.builddir):
BUILD_DIR = arguments.builddir
else:
BUILD_DIR = path.join(PROJECT_DIR, arguments.builddir)
if arguments.clean and os.path.exists(BUILD_DIR):
shutil.rmtree(BUILD_DIR)
if not os.path.exists(BUILD_DIR):
makedirs(BUILD_DIR)
def configure_build(arguments):
configure_output_dir(arguments)
build_options = generate_build_options(arguments)
cmake_cmd = ['cmake', '-B' + BUILD_DIR, '-H' + PROJECT_DIR]
cmake_cmd.extend(build_options)
return subprocess.call(cmake_cmd)
def build_jerry(arguments):
return subprocess.call(['make', '--no-print-directory','-j', '-C', BUILD_DIR])
def print_result(ret):
print('=' * 30)
if ret:
print('Build failed with exit code: %s' % (ret))
else:
print('Build succeeded!')
print('=' * 30)
def main():
arguments = get_arguments()
ret = configure_build(arguments)
if not ret:
ret = build_jerry(arguments)
print_result(ret)
sys.exit(ret)
if __name__ == "__main__":
main()
|
<filename>tests/test_stack.py
import os
from subprocess import STDOUT
from tempfile import TemporaryDirectory
from unittest import mock
from unittest.mock import call
import kubernetes
import pytest
import yaml
from k8s_app_abstraction.models.pod_controllers import (
Daemonset,
Deployment,
Statefulset,
)
from k8s_app_abstraction.models.stack import Stack
def test_basic():
"""Instantiate a stack with an empty definition"""
definition = ""
stack = Stack.new("my-stack", definition)
assert stack.name == "my-stack"
assert stack.dict()["name"] == "my-stack"
def test_stack_api():
stack = Stack(
name="my-stack",
deployments=[Deployment(name="a-deploy", image="bar")],
daemonsets=[Daemonset(name="a-daemonset", image="bar")],
statefulsets=[Statefulset(name="a-statefulset", image="bar")],
)
assert len(stack.deployments) == 1
assert len(stack.daemonsets) == 1
assert len(stack.statefulsets) == 1
assert len(stack.cronjobs) == 0
assert stack.deployments[0].name == "a-deploy"
assert stack.deployments[0].image == "bar"
assert stack.deployments[0].replicas == 1
assert stack.daemonsets[0].name == "a-daemonset"
assert stack.daemonsets[0].image == "bar"
assert stack.statefulsets[0].name == "a-statefulset"
assert stack.statefulsets[0].image == "bar"
assert stack.statefulsets[0].replicas == 1
def test_stack_to_yaml():
stack = Stack(
name="my-stack",
deployments=[Deployment(name="a-deploy", image="bar", replicas=2)],
daemonsets=[Daemonset(name="a-daemonset", image="bar")],
statefulsets=[Statefulset(name="a-statefulset", image="bar")],
)
stack_yaml = stack.to_yaml()
generated = {
f"{item['kind']}/{item['metadata']['name']}": item
for item in yaml.safe_load_all(stack_yaml)
}
deploy = generated["Deployment/my-stack-a-deploy"]
assert deploy["apiVersion"] == "apps/v1"
assert deploy["spec"]["replicas"] == 2
daemonset = generated["DaemonSet/my-stack-a-daemonset"]
assert daemonset["apiVersion"] == "apps/v1"
def test_stack_to_chart():
stack = Stack(
name="my-stack",
deployments=[Deployment(name="a-deploy", image="bar", replicas=2)],
daemonsets=[Daemonset(name="a-daemonset", image="bar")],
statefulsets=[Statefulset(name="a-statefulset", image="bar")],
)
files = dict(stack.chart.generate_files())
assert list(files.keys()) == [
"Chart.yaml",
"templates/deployment-a-deploy.yml",
"templates/daemonset-a-daemonset.yml",
"templates/statefulset-a-statefulset.yml",
]
def test_stack_dump():
stack = Stack(
name="my-stack",
deployments=[Deployment(name="a-deploy", image="bar", replicas=2)],
daemonsets=[Daemonset(name="a-daemonset", image="bar")],
statefulsets=[Statefulset(name="a-statefulset", image="bar")],
)
with TemporaryDirectory() as location:
stack.chart.dump(location)
assert set(os.listdir(location)) == {"templates", "Chart.yaml"}
assert set(os.listdir(os.path.join(location, "templates"))) == {
"deployment-a-deploy.yml",
"daemonset-a-daemonset.yml",
"statefulset-a-statefulset.yml",
}
@mock.patch("k8s_app_abstraction.models.stack.config")
@mock.patch("k8s_app_abstraction.models.stack.client")
def test_stack_install(mock_client, mock_config):
# Force compatibility with mocked kubernetes api and library version
kver = kubernetes.__version__.split(".")[0]
mock_client.VersionApi().get_code().git_version = f"v1.{kver}.0"
mock_client.VersionApi().get_code().minor = kver
stack = Stack(
name="my-stack",
deployments=[Deployment(name="a-deploy", image="bar", replicas=2)],
daemonsets=[Daemonset(name="a-daemonset", image="bar")],
statefulsets=[Statefulset(name="a-statefulset", image="bar")],
)
with TemporaryDirectory() as location:
with mock.patch(
"k8s_app_abstraction.models.stack.check_output"
) as check_output:
stack.chart.rollout(location)
check_output.assert_called_once_with(
["helm", "upgrade", "--install", "my-stack", location], stderr=STDOUT
)
|
"""
Django settings for api project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
import os
from urlparse import urlparse
from website import settings as osf_settings
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
DATABASES = {
'default': {
'CONN_MAX_AGE': 0,
'ENGINE': 'osf.db.backends.postgresql', # django.db.backends.postgresql
'NAME': os.environ.get('OSF_DB_NAME', 'osf'),
'USER': os.environ.get('OSF_DB_USER', 'postgres'),
'PASSWORD': os.environ.get('OSF_DB_PASSWORD', ''),
'HOST': os.environ.get('OSF_DB_HOST', '127.0.0.1'),
'PORT': os.environ.get('OSF_DB_PORT', '5432'),
'ATOMIC_REQUESTS': True,
'TEST': {
'SERIALIZE': False,
},
},
}
DATABASE_ROUTERS = ['osf.db.router.PostgreSQLFailoverRouter', ]
PASSWORD_HASHERS = [
'django.contrib.auth.hashers.BCryptSHA256PasswordHasher',
'django.contrib.auth.hashers.BCryptPasswordHasher',
]
AUTH_USER_MODEL = 'osf.OSFUser'
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = osf_settings.SECRET_KEY
AUTHENTICATION_BACKENDS = (
'api.base.authentication.backends.ODMBackend',
'guardian.backends.ObjectPermissionBackend',
)
# SECURITY WARNING: don't run with debug turned on in production!
DEV_MODE = osf_settings.DEV_MODE
DEBUG = osf_settings.DEBUG_MODE
DEBUG_PROPAGATE_EXCEPTIONS = True
# session:
SESSION_COOKIE_NAME = 'api'
SESSION_COOKIE_SECURE = osf_settings.SECURE_MODE
SESSION_COOKIE_HTTPONLY = osf_settings.SESSION_COOKIE_HTTPONLY
# csrf:
CSRF_COOKIE_NAME = 'api-csrf'
CSRF_COOKIE_SECURE = osf_settings.SECURE_MODE
CSRF_COOKIE_HTTPONLY = osf_settings.SECURE_MODE
ALLOWED_HOSTS = [
'.osf.io',
]
# Application definition
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.messages',
'django.contrib.sessions',
'django.contrib.staticfiles',
'django.contrib.admin',
# 3rd party
'django_celery_beat',
'django_celery_results',
'rest_framework',
'corsheaders',
'raven.contrib.django.raven_compat',
'django_extensions',
'guardian',
'storages',
'waffle',
'elasticsearch_metrics',
# OSF
'osf',
# Addons
'addons.osfstorage',
'addons.bitbucket',
'addons.box',
'addons.dataverse',
'addons.dropbox',
'addons.figshare',
'addons.forward',
'addons.github',
'addons.gitlab',
'addons.googledrive',
'addons.mendeley',
'addons.onedrive',
'addons.owncloud',
'addons.s3',
'addons.twofactor',
'addons.wiki',
'addons.zotero',
)
# local development using https
if osf_settings.SECURE_MODE and DEBUG:
INSTALLED_APPS += ('sslserver',)
# TODO: Are there more granular ways to configure reporting specifically related to the API?
RAVEN_CONFIG = {
'tags': {'App': 'api'},
'dsn': osf_settings.SENTRY_DSN,
'release': osf_settings.VERSION,
}
BULK_SETTINGS = {
'DEFAULT_BULK_LIMIT': 100,
}
MAX_PAGE_SIZE = 100
REST_FRAMEWORK = {
'PAGE_SIZE': 10,
'DEFAULT_RENDERER_CLASSES': (
'api.base.renderers.JSONAPIRenderer',
'api.base.renderers.JSONRendererWithESISupport',
'api.base.renderers.BrowsableAPIRendererNoForms',
),
'DEFAULT_PARSER_CLASSES': (
'api.base.parsers.JSONAPIParser',
'api.base.parsers.JSONAPIParserForRegularJSON',
'rest_framework.parsers.FormParser',
'rest_framework.parsers.MultiPartParser',
),
'EXCEPTION_HANDLER': 'api.base.exceptions.json_api_exception_handler',
'DEFAULT_CONTENT_NEGOTIATION_CLASS': 'api.base.content_negotiation.JSONAPIContentNegotiation',
'DEFAULT_VERSIONING_CLASS': 'api.base.versioning.BaseVersioning',
'DEFAULT_VERSION': '2.0',
'ALLOWED_VERSIONS': (
'2.0',
'2.1',
'2.2',
'2.3',
'2.4',
'2.5',
'2.6',
'2.7',
'2.8',
'2.9',
'2.10',
'2.11',
'2.12',
'2.13',
'2.14',
'2.15',
'2.16',
'2.17',
),
'DEFAULT_FILTER_BACKENDS': ('api.base.filters.OSFOrderingFilter',),
'DEFAULT_PAGINATION_CLASS': 'api.base.pagination.JSONAPIPagination',
'ORDERING_PARAM': 'sort',
'DEFAULT_AUTHENTICATION_CLASSES': (
# Custom auth classes
'api.base.authentication.drf.OSFBasicAuthentication',
'api.base.authentication.drf.OSFSessionAuthentication',
'api.base.authentication.drf.OSFCASAuthentication',
),
'DEFAULT_THROTTLE_CLASSES': (
'rest_framework.throttling.UserRateThrottle',
'api.base.throttling.NonCookieAuthThrottle',
),
'DEFAULT_THROTTLE_RATES': {
'user': '10000/day',
'non-cookie-auth': '100/hour',
'add-contributor': '10/second',
'create-guid': '1000/hour',
'root-anon-throttle': '1000/hour',
'test-user': '2/hour',
'test-anon': '1/hour',
'send-email': '2/minute',
},
}
# Settings related to CORS Headers addon: allow API to receive authenticated requests from OSF
# CORS plugin only matches based on "netloc" part of URL, so as workaround we add that to the list
CORS_ORIGIN_ALLOW_ALL = False
CORS_ORIGIN_WHITELIST = (
urlparse(osf_settings.DOMAIN).netloc,
osf_settings.DOMAIN,
)
# This needs to remain True to allow cross origin requests that are in CORS_ORIGIN_WHITELIST to
# use cookies.
CORS_ALLOW_CREDENTIALS = True
# Set dynamically on app init
ORIGINS_WHITELIST = ()
MIDDLEWARE = (
'api.base.middleware.DjangoGlobalMiddleware',
'api.base.middleware.CeleryTaskMiddleware',
'api.base.middleware.PostcommitTaskMiddleware',
# A profiling middleware. ONLY FOR DEV USE
# Uncomment and add "prof" to url params to recieve a profile for that url
# 'api.base.middleware.ProfileMiddleware',
# 'django.contrib.sessions.middleware.SessionMiddleware',
'api.base.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
# 'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
# 'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'waffle.middleware.WaffleMiddleware',
)
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
},
]
ROOT_URLCONF = 'api.base.urls'
WSGI_APPLICATION = 'api.base.wsgi.application'
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# https://django-storages.readthedocs.io/en/latest/backends/gcloud.html
if os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', False):
# Required to interact with Google Cloud Storage
DEFAULT_FILE_STORAGE = 'api.base.storage.RequestlessURLGoogleCloudStorage'
GS_BUCKET_NAME = os.environ.get('GS_BUCKET_NAME', 'cos-osf-stage-cdn-us')
GS_FILE_OVERWRITE = os.environ.get('GS_FILE_OVERWRITE', False)
elif osf_settings.DEV_MODE or osf_settings.DEBUG_MODE:
DEFAULT_FILE_STORAGE = 'api.base.storage.DevFileSystemStorage'
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static/vendor')
API_BASE = 'v2/'
API_PRIVATE_BASE = '_/'
STATIC_URL = '/static/'
NODE_CATEGORY_MAP = osf_settings.NODE_CATEGORY_MAP
DEBUG_TRANSACTIONS = DEBUG
JWT_SECRET = 'osf_api_cas_login_jwt_secret_32b'
JWE_SECRET = 'osf_api_cas_login_jwe_secret_32b'
ENABLE_VARNISH = osf_settings.ENABLE_VARNISH
ENABLE_ESI = osf_settings.ENABLE_ESI
VARNISH_SERVERS = osf_settings.VARNISH_SERVERS
ESI_MEDIA_TYPES = osf_settings.ESI_MEDIA_TYPES
ADDONS_FOLDER_CONFIGURABLE = ['box', 'dropbox', 's3', 'googledrive', 'figshare', 'owncloud', 'onedrive']
ADDONS_OAUTH = ADDONS_FOLDER_CONFIGURABLE + ['dataverse', 'github', 'bitbucket', 'gitlab', 'mendeley', 'zotero', 'forward']
BYPASS_THROTTLE_TOKEN = '<PASSWORD>'
OSF_SHELL_USER_IMPORTS = None
# Settings for use in the admin
OSF_URL = 'https://osf.io'
SELECT_FOR_UPDATE_ENABLED = True
# Disable anonymous user permissions in django-guardian
ANONYMOUS_USER_NAME = None
# If set to True, automated tests with extra queries will fail.
NPLUSONE_RAISE = False
# salt used for generating hashids
HASHIDS_SALT = 'pinkhimalayan'
# django-elasticsearch-metrics
ELASTICSEARCH_DSL = {
'default': {
'hosts': os.environ.get('ELASTIC6_URI', '127.0.0.1:9201'),
'retry_on_timeout': True,
},
}
# Store yearly indices for time-series metrics
ELASTICSEARCH_METRICS_DATE_FORMAT = '%Y'
WAFFLE_CACHE_NAME = 'waffle_cache'
STORAGE_USAGE_CACHE_NAME = 'storage_usage'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
STORAGE_USAGE_CACHE_NAME: {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'osf_cache_table',
},
WAFFLE_CACHE_NAME: {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
}
|
<reponame>masumhabib/quest<filename>tests/mini_regression/tmfsc_4Contact.py<gh_stars>1-10
# Simulation parameters
import numpy as np
import os
from math import sqrt
from math import pi
from math import exp as exponential
# Geometry
# =============================================================================
def setupGeom(self):
"""Sets up the geometry."""
factor = 1
ly = 3.9*1000.0/factor # width
lx = 7.9*1000.0/factor # length
wc = 0.3*1000.0/factor # contact width
wc2 = wc
lc = 0.05*1000.0/factor # contact length
xdc = 1.95*1000.0/factor # horizontal distance between two contacts
ydc = ly / 2.0
dg = 50/factor # gate split
xg = 5.0*1000.0/factor # position of gate from left
splitLen = 80.0 # gate Split nm
juncShift = 0.0 # shift of physical junction
x1 = -lx/2; y1 = -ly/2
x2 = -xdc; y2 = y1;
x3 = x2+2*xdc; y3 = y2;
x4 = lx/2; y4 = y3;
x5 = x4; y5 = y4+ydc;
x6 = x5; y6 = y5+ydc;
x7 = xdc; y7 = y6;
x8 = x7-2*xdc; y8 = y7;
x9 = -lx/2; y9 = y8;
x10 = x9; y10 = y9-ydc;
self.dc = xdc*2
self.addPoint(x1, y1)
self.addPoint(x2-wc2/2, y2)
self.addPoint(x2-wc2/2, y2-lc)
self.addPoint(x2+wc2/2, y2-lc)
self.addPoint(x2+wc2/2, y2)
self.addPoint(x3-wc/2, y3)
self.addPoint(x3-wc/2, y3-lc)
self.addPoint(x3+wc/2, y3-lc)
self.addPoint(x3+wc/2, y3)
self.addPoint(x4, y4)
self.addPoint(x6, y6)
self.addPoint(x7+wc/2, y7)
self.addPoint(x7+wc/2, y7+lc)
self.addPoint(x7-wc/2, y7+lc)
self.addPoint(x7-wc/2, y7)
self.addPoint(x8+wc/2, y8)
self.addPoint(x8+wc/2, y8+lc)
self.addPoint(x8-wc/2, y8+lc)
self.addPoint(x8-wc/2, y8)
self.addPoint(x9, y9)
npts = self.addPoint(x1, y1) + 1
self.mprint("\n-D- Number of vertices in geometry", npts)
#add reflecting edges
for ip in range(npts):
self.addEdge(ip, (ip+1) % npts);
#add contacts
for ip in range(1, npts):
# Absorbing Side Contact
if ip == 2 or ip == 6 or ip == 12 or ip == 16 or ip ==9 or ip == 19:
# Without Absorbing Side Contact
#if ip == 2 or ip == 6 or ip == 12 or ip == 16:
self.setEdgeType(ip, EDGE_ABSORB)
# Add the transmitting edge
ipt1 = self.addPoint(0+juncShift, -ly/2)
ipt2 = self.addPoint(0+juncShift, ly/2)
self.addEdge(ipt1, ipt2, EDGE_TRANSMIT, splitLen)
# add gates
self.addGate((-lx/2-lc,-ly/2-lc), (0+juncShift, -ly/2-lc),
(0+juncShift, ly/2+lc), (-lx/2-lc, ly/2+lc), 0.0)
self.addGate((0+juncShift,-ly/2-lc), (lx/2+lc, -ly/2-lc),
(lx/2+lc, ly/2+lc), (0+juncShift, ly/2+lc), -1.0)
HallBar.setupGeom = setupGeom
# Biasing scheme
# =============================================================================
def setupBias(self, Ef, B, V1, V2, m = 1, singleResonance = True,
Efmax= None, NEf = 1, Bmax = None, NB = 1, V1max = None, NV1 = 1, V2max=None, NV2=1):
""" Sets up the bias points """
EEf = []
BB = []
VV1 = []
VV2 = []
if NEf == 1:
EEf = np.array([Ef])
else:
assert Efmax is not None, "Efmax is None"
EEf = np.linspace(Ef, Efmax, NEf)
if NB == 1:
BB = np.array([B])
else:
assert Bmax is not None, "Vmax is None"
BB = np.linspace(B, Bmax, NB)
if singleResonance == True:
B0 = abs(self.EF-V1)/vf/nm/self.dc*2.0
BB = m*np.array([B0])
if NV1 == 1:
VV1 = np.array([V1])
else:
assert V1max is not None, "V1max is None"
VV1 = np.linspace(V1, V1max, NV1)
if NV2 == 1:
VV2 = np.array([V2])
else:
assert V2max is not None, "V2max is None"
VV2 = np.linspace(V2, V2max, NV2)
self.bias.clear()
self.bias.append(EEf, 'Ef')
self.bias.append(BB, 'B')
self.bias.append(VV1, 'V')
self.bias.append(VV2, 'V')
HallBar.setupBias = setupBias
hallbar.clear()
## Simulation Parameters
# output directory
hallbar.outDir = 'dummyOut/'
hallbar.DebugLevel = 0
hallbar.verbosity = 1
hallbar.OccupationTol = 1E-4
# Minimum number of electron injection
hallbar.MinmNoInjection = 100
# If EdgeRefRghnsEff is set to any other value than 1.0,
# it will activate crude model
hallbar.EdgeRefRghnsEff = 1.0
# If InjecAngleSpread is not set, cosine distribution will be used
hallbar.InjecAngleSpread = pi/15
hallbar.setupGeom()
hallbar.mprint("\n-D- OccupationTol", hallbar.OccupationTol)
hallbar.mprint("\n-D- Injection spread angle:", hallbar.sim.InjecAngleSpread)
## Biasing Parameters
V1 = 0.0928
V2min = 0.0928
V2max = 0.0928
Bmin = 0.0476
Bmax = 0.0476
hallbar.mprint("\n-D- V1:", V1)
hallbar.mprint("\n-D- V2min,max:", V2min, V2max)
hallbar.mprint("\n-D- Bmin,max:", Bmin, Bmax)
hallbar.setupBias(Ef=0.0, B=Bmin, V1=V1, V2=V2min, Efmax= 2.0, NEf = 10, Bmax=Bmax, V2max=V2max, NB=1, NV2=1, singleResonance=False)
# for setting custom gate color
# gateColors = np.array( [[1, 1, 1], [0.816, 0.812, 0.792]] )
# hallbar.drawGeom(gateBorder = 1.0, gateColors = gateColors)
hallbar.printBiasList()
hallbar.drawGeom(gateBorder = 1.0)
hallbar.enableCyclotronCalc()
# Single Bias point calculation - for seeing trajectory
hallbar.calcSingleTrans(saveTrajectory=True, contId=0)
hallbar.drawTrajectory(color='#343B9C', marker='', width=0.3)
# All Bias point calculation - no trajectory
#hallbar.calcAllTrans(dl=10, nth=20, contId=%CONTID%)
# saving trajectory image
hallbar.saveTraj("trajectory.png", dpi=600)
# Save to a file all bias point or single bias point data
#hallbar.save()
|
import os
from datetime import datetime
import time
import argparse
import torch
from torch.utils.data import DataLoader
from torchvision import transforms
import torch.distributed as dist
from torch.utils.data.distributed import DistributedSampler
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.tensorboard import SummaryWriter
from torch.cuda.amp import autocast as autocast, GradScaler
from byol import BYOL
from dataset import KeratitisUnlabeled
from utils import get_model
from functools import reduce
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default='densenet121')
parser.add_argument('--save_dir', type=str, default='./byol_save')
parser.add_argument('--batch_size', type=int, default=128)
parser.add_argument('--num_gpu', type=int, default=4)
parser.add_argument('--local_rank', type=int)
parser.add_argument('--use_amp', type=bool, default=True)
args = parser.parse_args()
def save_dict_to_text(mydict, path):
stream = [str(k)+': '+str(v)+'\n' for k,v in mydict.items()]
stream = reduce(lambda x, y: x+y, stream)
with open(path, 'w') as f:
f.writelines(stream)
def main():
torch.distributed.init_process_group(backend='nccl')
local_rank = dist.get_rank()
torch.cuda.set_device(local_rank)
# To make sure each process has a different seed
print(f"loacal_rank {local_rank}: cuda_seed: {torch.cuda.initial_seed()} seed: {torch.initial_seed()}")
transform=transforms.Compose([transforms.Resize([224, 224], interpolation=2), transforms.ToTensor()])
train_dataset = KeratitisUnlabeled(transform=transform)
num_steps = len(train_dataset) // args.batch_size
if local_rank == 0:
writer = SummaryWriter(log_dir=args.save_dir)
print(f"Dataset length: {len(train_dataset)}")
print(f"Total steps per epoch: {num_steps}")
train_sampler = DistributedSampler(train_dataset)
dataloader = DataLoader(train_dataset, batch_size=args.batch_size//args.num_gpu,
num_workers=8, sampler=train_sampler)
model = get_model(args.model, 4).cuda(args.local_rank)
learner = BYOL(
model,
image_size = 224,
hidden_layer = 'avgpool',
projection_size = 256, # the projection size
projection_hidden_size = 4096, # the hidden dimension of the MLP for both the projection and prediction
moving_average_decay = 0.99, # the moving average decay factor for the target encoder, already set at what paper recommends
use_momentum = False)
learner = DDP(learner, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True)
#opt = torch.optim.SGD(learner.parameters(), lr=0.02* args.batch_size / 256, momentum=0.9, nesterov=True)
#scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=1000)
optimizer = torch.optim.AdamW(learner.parameters(), lr=1e-4 * args.batch_size / 256, weight_decay=1e-6)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1000, gamma=0.5)
if args.use_amp:
scaler = GradScaler()
if local_rank == 0:
print('AMP activated')
if local_rank == 0: # Save ckpt_0 for debug
print(f"Model type = {args.model}")
save_dict_to_text(vars(args), f'{args.save_dir}/args.text')
torch.save(learner.module.online_encoder.net.state_dict(), f'{args.save_dir}/byol_{0}.pth')
for epoch in range(1, 1000+1):
train_sampler.set_epoch(epoch)
if local_rank == 0:
start_time = time.time()
for step, data in enumerate(dataloader):
images = data[0]
images = images.cuda(non_blocking=True)
optimizer.zero_grad()
if args.use_amp:
with autocast():
loss = learner(images)
loss_value = loss.detach().cpu()
scaler.scale(loss).backward()
#total_norm = torch.nn.utils.clip_grad_norm_(learner.parameters(), 2*65536)
scaler.step(optimizer)
scaler.update()
else:
loss = learner(images)
loss_value = loss.detach().cpu()
loss.backward()
#torch.nn.utils.clip_grad_norm_(learner.parameters(), 2)
optimizer.step()
if local_rank == 0:
writer.add_scalar("Loss", loss_value, step + epoch*num_steps)
writer.add_scalar("LR", optimizer.param_groups[0]['lr'], step + epoch*num_steps)
if step%50 == 0:
print("%s:" % str(datetime.now())[:19] , end=" ")
print(f"Epoch {epoch}, Step {step}, Loss: {loss_value}, ", end="")
print("time_used: {:.3}".format(time.time()-start_time))
start_time = time.time()
if local_rank == 0:
scheduler.step()
if epoch % 10 == 0 :
torch.save(learner.module.online_encoder.net.state_dict(), f'{args.save_dir}/byol_{epoch}.pth')
if __name__ == '__main__':
main()
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple
from google.api_core import gapic_v1 # type: ignore
from google.api_core import grpc_helpers_async # type: ignore
from google.api_core import operations_v1 # type: ignore
from google import auth # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.datastore_admin_v1.types import datastore_admin
from google.cloud.datastore_admin_v1.types import index
from google.longrunning import operations_pb2 as operations # type: ignore
from .base import DatastoreAdminTransport, DEFAULT_CLIENT_INFO
from .grpc import DatastoreAdminGrpcTransport
class DatastoreAdminGrpcAsyncIOTransport(DatastoreAdminTransport):
"""gRPC AsyncIO backend transport for DatastoreAdmin.
Google Cloud Datastore Admin API
The Datastore Admin API provides several admin services for
Cloud Datastore.
-----------------------------------------------------------------------------
## Concepts
Project, namespace, kind, and entity as defined in the Google
Cloud Datastore API.
Operation: An Operation represents work being performed in the
background.
EntityFilter: Allows specifying a subset of entities in a
project. This is specified as a combination of kinds and
namespaces (either or both of which may be all).
-----------------------------------------------------------------------------
## Services
# Export/Import
The Export/Import service provides the ability to copy all or a
subset of entities to/from Google Cloud Storage.
Exported data may be imported into Cloud Datastore for any
Google Cloud Platform project. It is not restricted to the
export source project. It is possible to export from one project
and then import into another.
Exported data can also be loaded into Google BigQuery for
analysis.
Exports and imports are performed asynchronously. An Operation
resource is created for each export/import. The state (including
any errors encountered) of the export/import may be queried via
the Operation resource.
# Index
The index service manages Cloud Datastore composite indexes.
Index creation and deletion are performed asynchronously. An
Operation resource is created for each such asynchronous
operation. The state of the operation (including any errors
encountered) may be queried via the Operation resource.
# Operation
The Operations collection provides a record of actions performed
for the specified project (including any operations in
progress). Operations are not created directly but through calls
on other collections or resources.
An operation that is not yet done may be cancelled. The request
to cancel is asynchronous and the operation may continue to run
for some time after the request to cancel is made.
An operation that is done may be deleted so that it is no longer
listed as part of the Operation collection.
ListOperations returns all pending operations, but not completed
operations.
Operations are created by service DatastoreAdmin,
but are accessed via service google.longrunning.Operations.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "datastore.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
address (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
scopes = scopes or cls.AUTH_SCOPES
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
**kwargs,
)
def __init__(
self,
*,
host: str = "datastore.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: aio.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
quota_project_id=None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or applicatin default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for grpc channel. It is ignored if ``channel`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._ssl_channel_credentials = ssl_channel_credentials
if channel:
# Sanity check: Ensure that channel and credentials are not both
# provided.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
elif api_mtls_endpoint:
warnings.warn(
"api_mtls_endpoint and client_cert_source are deprecated",
DeprecationWarning,
)
host = (
api_mtls_endpoint
if ":" in api_mtls_endpoint
else api_mtls_endpoint + ":443"
)
if credentials is None:
credentials, _ = auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
ssl_credentials = SslCredentials().ssl_credentials
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
)
self._ssl_channel_credentials = ssl_credentials
else:
host = host if ":" in host else host + ":443"
if credentials is None:
credentials, _ = auth.default(
scopes=self.AUTH_SCOPES, quota_project_id=quota_project_id
)
# create a new channel. The provided one is ignored.
self._grpc_channel = type(self).create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
ssl_credentials=ssl_channel_credentials,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
)
# Run the base constructor.
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes or self.AUTH_SCOPES,
quota_project_id=quota_project_id,
client_info=client_info,
)
self._stubs = {}
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if "operations_client" not in self.__dict__:
self.__dict__["operations_client"] = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self.__dict__["operations_client"]
@property
def export_entities(
self,
) -> Callable[
[datastore_admin.ExportEntitiesRequest], Awaitable[operations.Operation]
]:
r"""Return a callable for the export entities method over gRPC.
Exports a copy of all or a subset of entities from
Google Cloud Datastore to another storage system, such
as Google Cloud Storage. Recent updates to entities may
not be reflected in the export. The export occurs in the
background and its progress can be monitored and managed
via the Operation resource that is created. The output
of an export may only be used once the associated
operation is done. If an export operation is cancelled
before completion it may leave partial data behind in
Google Cloud Storage.
Returns:
Callable[[~.ExportEntitiesRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "export_entities" not in self._stubs:
self._stubs["export_entities"] = self.grpc_channel.unary_unary(
"/google.datastore.admin.v1.DatastoreAdmin/ExportEntities",
request_serializer=datastore_admin.ExportEntitiesRequest.serialize,
response_deserializer=operations.Operation.FromString,
)
return self._stubs["export_entities"]
@property
def import_entities(
self,
) -> Callable[
[datastore_admin.ImportEntitiesRequest], Awaitable[operations.Operation]
]:
r"""Return a callable for the import entities method over gRPC.
Imports entities into Google Cloud Datastore.
Existing entities with the same key are overwritten. The
import occurs in the background and its progress can be
monitored and managed via the Operation resource that is
created. If an ImportEntities operation is cancelled, it
is possible that a subset of the data has already been
imported to Cloud Datastore.
Returns:
Callable[[~.ImportEntitiesRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "import_entities" not in self._stubs:
self._stubs["import_entities"] = self.grpc_channel.unary_unary(
"/google.datastore.admin.v1.DatastoreAdmin/ImportEntities",
request_serializer=datastore_admin.ImportEntitiesRequest.serialize,
response_deserializer=operations.Operation.FromString,
)
return self._stubs["import_entities"]
@property
def get_index(
self,
) -> Callable[[datastore_admin.GetIndexRequest], Awaitable[index.Index]]:
r"""Return a callable for the get index method over gRPC.
Gets an index.
Returns:
Callable[[~.GetIndexRequest],
Awaitable[~.Index]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_index" not in self._stubs:
self._stubs["get_index"] = self.grpc_channel.unary_unary(
"/google.datastore.admin.v1.DatastoreAdmin/GetIndex",
request_serializer=datastore_admin.GetIndexRequest.serialize,
response_deserializer=index.Index.deserialize,
)
return self._stubs["get_index"]
@property
def list_indexes(
self,
) -> Callable[
[datastore_admin.ListIndexesRequest],
Awaitable[datastore_admin.ListIndexesResponse],
]:
r"""Return a callable for the list indexes method over gRPC.
Lists the indexes that match the specified filters.
Datastore uses an eventually consistent query to fetch
the list of indexes and may occasionally return stale
results.
Returns:
Callable[[~.ListIndexesRequest],
Awaitable[~.ListIndexesResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_indexes" not in self._stubs:
self._stubs["list_indexes"] = self.grpc_channel.unary_unary(
"/google.datastore.admin.v1.DatastoreAdmin/ListIndexes",
request_serializer=datastore_admin.ListIndexesRequest.serialize,
response_deserializer=datastore_admin.ListIndexesResponse.deserialize,
)
return self._stubs["list_indexes"]
__all__ = ("DatastoreAdminGrpcAsyncIOTransport",)
|
<gh_stars>0
import torch, pickle, os
from torch.utils.data import Dataset
use_cuda = torch.cuda.is_available()
import numpy as np
import pandas as pd
from sklearn.utils import shuffle as sk_shuffle
import random as rd
class pretrainDataset:
def __init__(self, file_path, training=True):
with open(file_path, 'rb') as file:
self.data = pickle.load(file)['data'].astype(np.float32)
self.data = self.data[:int(self.data.shape[0] * 0.8)] if training else self.data[int(self.data.shape[0] * 0.8):]
def __len__(self):
return self.data.shape[0]
def __getitem__(self, index):
return (self.data.getrow(index).toarray().reshape(-1), self.data.getrow(index).toarray().reshape(-1))
class finetuneDataset(Dataset):
"""Face Landmarks dataset."""
def __init__(self, ratings_path, movies_data_path, folder='train', random_state=1):
assert os.path.isfile(ratings_path)
# Loading data
self.folder_path = '.'.join(ratings_path.split('.')[:-1])
self.chunkSize = 1e5
self.offset = 0
self.shuffle = False
if random_state is not None:
rd.seed(random_state)
self.shuffle = True
with open(movies_data_path, 'rb') as file:
raw_data = pickle.load(file)
self.movies_data = raw_data['data']
self.movies_ids = dict([(id, i) for i, id in enumerate(raw_data['ids'])])
if not os.path.isdir(self.folder_path):
os.mkdir(self.folder_path)
train_folder_path = self.folder_path + '/train/'
valid_folder_path = self.folder_path + '/valid/'
test_folder_path = self.folder_path + '/test/'
os.mkdir(train_folder_path)
os.mkdir(valid_folder_path)
os.mkdir(test_folder_path)
ratings_df = pd.read_csv(ratings_path)
ratings_df = sk_shuffle(ratings_df).reset_index(drop=True)
input_size = self.movies_data.shape[1]
nb_users = ratings_df['userId'].max()
with open(self.folder_path + '/params.pkl', 'wb') as pickler:
pickle.dump((int(input_size), int(nb_users), 46000), pickler)
while self.chunkSize > ratings_df.shape[0] * 0.8:
self.chunkSize /= 2
self.currentPart = 0
dataToSave = []
for i, line in ratings_df.iterrows():
if int(line['movieId']) in self.movies_ids:
# Train set
if i < ratings_df.shape[0] * 0.8:
subfolder_path = train_folder_path
# Valid set
elif i < ratings_df.shape[0] * 0.9:
if subfolder_path == train_folder_path:
if len(dataToSave) > 0:
with open(subfolder_path + 'chunk_' + str(self.currentPart) + '.npy', 'wb') as new_file:
pickle.dump(dataToSave, new_file)
dataToSave = []
self.currentPart = 0
subfolder_path = valid_folder_path
# Test set
else:
if subfolder_path == valid_folder_path:
if len(dataToSave) > 0:
with open(subfolder_path + 'chunk_' + str(self.currentPart) + '.npy', 'wb') as new_file:
pickle.dump(dataToSave, new_file)
dataToSave = []
self.currentPart = 0
subfolder_path = test_folder_path
if len(dataToSave) < self.chunkSize:
dataToSave.append((int(line['userId']), int(line['movieId']), float(line['rating'])))
else:
with open(subfolder_path + 'chunk_' + str(self.currentPart) + '.npy', 'wb') as new_file:
pickle.dump(dataToSave, new_file)
dataToSave = []
self.currentPart += 1
if len(dataToSave) != 0:
print(len(dataToSave))
with open(subfolder_path + 'chunk_' + str(self.currentPart) + '.npy', 'wb') as new_file:
pickle.dump(dataToSave, new_file)
self.folder_path += '/' + folder
files = os.listdir(self.folder_path)
files_nb = sorted([int(name.split('_')[1].split('.')[0]) for name in files])
if len(files) > 1:
with open(self.folder_path + '/chunk_' + str(files_nb[0]) + '.npy', 'rb') as pickler:
self.chunkSize = int(len(pickle.load(pickler)))
with open(self.folder_path + '/chunk_' + str(files_nb[-1]) + '.npy', 'rb') as pickler:
self.length = int(len(pickle.load(pickler)) + (len(files) - 1) * self.chunkSize)
self.currentPart = 0
with open(self.folder_path + '/chunk_' + str(self.currentPart) + '.npy', 'rb') as pickler:
self.currentData = pickle.load(pickler)
if self.shuffle:
indexes = list(range(len(self.currentData)))
rd.shuffle(indexes)
self.currentData = [self.currentData[i] for i in indexes]
def __len__(self):
return self.length
def __getitem__(self, idx):
idx += self.offset
# go to next file
if idx // self.chunkSize != self.currentPart:
self.currentPart = int(idx // self.chunkSize)
with open(self.folder_path + '/chunk_' + str(self.currentPart) + '.npy', 'rb') as pickler:
self.currentData = pickle.load(pickler)
if self.shuffle:
indexes = list(range(len(self.currentData)))
rd.shuffle(indexes)
self.currentData = [self.currentData[i] for i in indexes]
i = int(idx - self.currentPart * self.chunkSize)
userID = self.currentData[i][0] - 1
movieID = self.movies_ids[self.currentData[i][1]]
movieData = self.movies_data[movieID].toarray().astype(np.float32).reshape(-1)
return ((movieData, userID, movieID), np.float32(self.currentData[i][2]))
def resume(self, offset):
self.offset = offset
self.currentPart = int(offset // self.chunkSize)
with open(self.folder_path + '/chunk_' + str(self.currentPart) + '.npy', 'rb') as pickler:
self.currentData = pickle.load(pickler)
if self.shuffle:
indexes = list(range(len(self.currentData)))
rd.shuffle(indexes)
self.currentData = [self.currentData[i] for i in indexes]
if __name__ == '__main__':
finetuneDataset("./data/ratings_small.csv", "./data/data.npy", training=False)
print(len(finetuneDataset("./data/ratings_small.csv", "./data/data.npy", training=False)))
print(len(finetuneDataset("./data/ratings_small.csv", "./data/data.npy", training=True)))
|
<reponame>zhanzju/bsl
{
'variables': {
'bsltf_sources': [
'bsltf_allocbitwisemoveabletesttype.cpp',
'bsltf_alloctesttype.cpp',
'bsltf_bitwisemoveabletesttype.cpp',
'bsltf_convertiblevaluewrapper.cpp',
'bsltf_degeneratefunctor.cpp',
'bsltf_enumeratedtesttype.cpp',
'bsltf_evilbooleantype.cpp',
'bsltf_nonassignabletesttype.cpp',
'bsltf_noncopyconstructibletesttype.cpp',
'bsltf_nondefaultconstructibletesttype.cpp',
'bsltf_nonequalcomparabletesttype.cpp',
'bsltf_nontypicaloverloadstesttype.cpp',
'bsltf_simpletesttype.cpp',
'bsltf_stdstatefulallocator.cpp',
'bsltf_stdtestallocator.cpp',
'bsltf_templatetestfacility.cpp',
'bsltf_testvaluesarray.cpp',
'bsltf_uniontesttype.cpp',
],
'bsltf_tests': [
'bsltf_allocbitwisemoveabletesttype.t',
'bsltf_alloctesttype.t',
'bsltf_bitwisemoveabletesttype.t',
'bsltf_convertiblevaluewrapper.t',
'bsltf_degeneratefunctor.t',
'bsltf_enumeratedtesttype.t',
'bsltf_evilbooleantype.t',
'bsltf_nonassignabletesttype.t',
'bsltf_noncopyconstructibletesttype.t',
'bsltf_nondefaultconstructibletesttype.t',
'bsltf_nonequalcomparabletesttype.t',
'bsltf_nontypicaloverloadstesttype.t',
'bsltf_simpletesttype.t',
'bsltf_stdstatefulallocator.t',
'bsltf_stdtestallocator.t',
'bsltf_templatetestfacility.t',
'bsltf_testvaluesarray.t',
'bsltf_uniontesttype.t',
],
'bsltf_tests_paths': [
'<(PRODUCT_DIR)/bsltf_allocbitwisemoveabletesttype.t',
'<(PRODUCT_DIR)/bsltf_alloctesttype.t',
'<(PRODUCT_DIR)/bsltf_bitwisemoveabletesttype.t',
'<(PRODUCT_DIR)/bsltf_convertiblevaluewrapper.t',
'<(PRODUCT_DIR)/bsltf_degeneratefunctor.t',
'<(PRODUCT_DIR)/bsltf_enumeratedtesttype.t',
'<(PRODUCT_DIR)/bsltf_evilbooleantype.t',
'<(PRODUCT_DIR)/bsltf_nonassignabletesttype.t',
'<(PRODUCT_DIR)/bsltf_noncopyconstructibletesttype.t',
'<(PRODUCT_DIR)/bsltf_nondefaultconstructibletesttype.t',
'<(PRODUCT_DIR)/bsltf_nonequalcomparabletesttype.t',
'<(PRODUCT_DIR)/bsltf_nontypicaloverloadstesttype.t',
'<(PRODUCT_DIR)/bsltf_simpletesttype.t',
'<(PRODUCT_DIR)/bsltf_stdstatefulallocator.t',
'<(PRODUCT_DIR)/bsltf_stdtestallocator.t',
'<(PRODUCT_DIR)/bsltf_templatetestfacility.t',
'<(PRODUCT_DIR)/bsltf_testvaluesarray.t',
'<(PRODUCT_DIR)/bsltf_uniontesttype.t',
],
'bsltf_pkgdeps': [
'../bslalg/bslalg.gyp:bslalg',
'../bslma/bslma.gyp:bslma',
'../bslmf/bslmf.gyp:bslmf',
'../bsls/bsls.gyp:bsls',
'../bslscm/bslscm.gyp:bslscm',
],
},
'targets': [
{
'target_name': 'bsltf_sources',
'type': 'none',
'direct_dependent_settings': {
'sources': [ '<@(bsltf_sources)' ],
'include_dirs': [ '.' ],
},
},
{
'target_name': 'bsltf_tests_build',
'type': 'none',
'dependencies': [ '<@(bsltf_tests)' ],
},
{
'target_name': 'bsltf_tests_run',
'type': 'none',
'dependencies': [ 'bsltf_tests_build' ],
'sources': [ '<@(bsltf_tests_paths)' ],
'rules': [
{
'rule_name': 'run_unit_tests',
'extension': 't',
'inputs': [ '<@(bsltf_tests_paths)' ],
'outputs': [ '<(INTERMEDIATE_DIR)/<(RULE_INPUT_ROOT).t.ran' ],
'action': [ '<(python_path)', '<(DEPTH)/tools/run_unit_tests.py',
'<(RULE_INPUT_PATH)',
'<@(_outputs)',
'--abi=<(ABI_bits)',
'--lib=<(library)'
],
'msvs_cygwin_shell': 0,
},
],
},
{
'target_name': 'bsltf',
'type': '<(library)',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bsltf_pkgdeps)',
'bsltf_sources', ],
'export_dependent_settings': [ '<@(bsltf_pkgdeps)' ],
'direct_dependent_settings': { 'include_dirs': [ '.' ] },
# Mac OS X empty LD_DYLIB_INSTALL_NAME causes executable and shared
# libraries linking against dylib to store same path for use at runtime
'xcode_settings': { 'LD_DYLIB_INSTALL_NAME': '' },
},
{
'target_name': 'bsltf_allocbitwisemoveabletesttype.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bsltf_pkgdeps)', 'bsltf' ],
'include_dirs': [ '.' ],
'sources': [ 'bsltf_allocbitwisemoveabletesttype.t.cpp' ],
},
{
'target_name': 'bsltf_alloctesttype.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bsltf_pkgdeps)', 'bsltf' ],
'include_dirs': [ '.' ],
'sources': [ 'bsltf_alloctesttype.t.cpp' ],
},
{
'target_name': 'bsltf_bitwisemoveabletesttype.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bsltf_pkgdeps)', 'bsltf' ],
'include_dirs': [ '.' ],
'sources': [ 'bsltf_bitwisemoveabletesttype.t.cpp' ],
},
{
'target_name': 'bsltf_convertiblevaluewrapper.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bsltf_pkgdeps)', 'bsltf' ],
'include_dirs': [ '.' ],
'sources': [ 'bsltf_convertiblevaluewrapper.t.cpp' ],
},
{
'target_name': 'bsltf_degeneratefunctor.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bsltf_pkgdeps)', 'bsltf' ],
'include_dirs': [ '.' ],
'sources': [ 'bsltf_degeneratefunctor.t.cpp' ],
},
{
'target_name': 'bsltf_enumeratedtesttype.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bsltf_pkgdeps)', 'bsltf' ],
'include_dirs': [ '.' ],
'sources': [ 'bsltf_enumeratedtesttype.t.cpp' ],
},
{
'target_name': 'bsltf_evilbooleantype.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bsltf_pkgdeps)', 'bsltf' ],
'include_dirs': [ '.' ],
'sources': [ 'bsltf_evilbooleantype.t.cpp' ],
},
{
'target_name': 'bsltf_nonassignabletesttype.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bsltf_pkgdeps)', 'bsltf' ],
'include_dirs': [ '.' ],
'sources': [ 'bsltf_nonassignabletesttype.t.cpp' ],
},
{
'target_name': 'bsltf_noncopyconstructibletesttype.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bsltf_pkgdeps)', 'bsltf' ],
'include_dirs': [ '.' ],
'sources': [ 'bsltf_noncopyconstructibletesttype.t.cpp' ],
},
{
'target_name': 'bsltf_nondefaultconstructibletesttype.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bsltf_pkgdeps)', 'bsltf' ],
'include_dirs': [ '.' ],
'sources': [ 'bsltf_nondefaultconstructibletesttype.t.cpp' ],
},
{
'target_name': 'bsltf_nonequalcomparabletesttype.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bsltf_pkgdeps)', 'bsltf' ],
'include_dirs': [ '.' ],
'sources': [ 'bsltf_nonequalcomparabletesttype.t.cpp' ],
},
{
'target_name': 'bsltf_nontypicaloverloadstesttype.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bsltf_pkgdeps)', 'bsltf' ],
'include_dirs': [ '.' ],
'sources': [ 'bsltf_nontypicaloverloadstesttype.t.cpp' ],
},
{
'target_name': 'bsltf_simpletesttype.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bsltf_pkgdeps)', 'bsltf' ],
'include_dirs': [ '.' ],
'sources': [ 'bsltf_simpletesttype.t.cpp' ],
},
{
'target_name': 'bsltf_stdstatefulallocator.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bsltf_pkgdeps)', 'bsltf' ],
'include_dirs': [ '.' ],
'sources': [ 'bsltf_stdstatefulallocator.t.cpp' ],
},
{
'target_name': 'bsltf_stdtestallocator.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bsltf_pkgdeps)', 'bsltf' ],
'include_dirs': [ '.' ],
'sources': [ 'bsltf_stdtestallocator.t.cpp' ],
},
{
'target_name': 'bsltf_templatetestfacility.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bsltf_pkgdeps)', 'bsltf' ],
'include_dirs': [ '.' ],
'sources': [ 'bsltf_templatetestfacility.t.cpp' ],
},
{
'target_name': 'bsltf_testvaluesarray.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bsltf_pkgdeps)', 'bsltf' ],
'include_dirs': [ '.' ],
'sources': [ 'bsltf_testvaluesarray.t.cpp' ],
},
{
'target_name': 'bsltf_uniontesttype.t',
'type': 'executable',
'dependencies': [ '../bsl_deps.gyp:bsl_grpdeps',
'<@(bsltf_pkgdeps)', 'bsltf' ],
'include_dirs': [ '.' ],
'sources': [ 'bsltf_uniontesttype.t.cpp' ],
},
],
}
|
# -*- coding: utf-8 -*-
# @Time : 2019-02-22 23:50
# @Author : <NAME>
# @Email : <EMAIL>
import requests
import json
import smtplib
import time
import os
from email.mime.text import MIMEText
from email.header import Header
from db.db_manager import DatabaseManager
from config import mail_host, mail_user, mail_pass
def convert_chinese_runtime_to_second(chinese_runtime):
chinese_time_units = ['天', '小时', '分钟', '秒']
convert_second_times = [86400, 3600, 60, 1]
total_second = 0
for i, chinese_time_unit in enumerate(chinese_time_units):
idx = chinese_runtime.find(chinese_time_unit)
if idx != -1:
num = int(chinese_runtime[:idx])
total_second += convert_second_times[i] * num
chinese_runtime = chinese_runtime[idx + len(chinese_time_unit):]
return total_second
def get_current_user_process_info_dict(db):
node_gpu_msg_list = db.get_node_msg_list()
user_process_info_dict = {}
for node_gpu_msg in node_gpu_msg_list:
hostname = node_gpu_msg['hostname']
for gpu in node_gpu_msg['gpus']:
gpu_id = gpu['index']
for process_info in gpu['processes']:
user_container_name = process_info['username']
username = user_container_name.split('-node')[0]
pid = process_info['pid']
runtime = process_info['runtime']
runtime = convert_chinese_runtime_to_second(runtime)
card_id = '%s_%s' % (hostname, gpu_id)
p_info = {'hostname': hostname, 'gpu_id': gpu_id, 'pid': pid, 'runtime': runtime}
if username not in user_process_info_dict:
user_process_info_dict[username] = {card_id: [p_info]}
elif card_id not in user_process_info_dict[username]:
user_process_info_dict[username][card_id] = [p_info]
else:
user_process_info_dict[username][card_id].append(p_info)
return user_process_info_dict
def get_sorted_card_info_list(process_info):
sorted_card_info_list = []
for card_id, p_info_list in process_info.items():
sorted_card_info_list.append(p_info_list)
sorted_card_info_list = sorted(sorted_card_info_list, key=lambda x: max(x, key=lambda x: x['runtime'])['runtime'], reverse=True)
return sorted_card_info_list
def kill_process_by_p_info(p_info, history_p_info_dict):
fused_pid = '%s-%s' % (p_info['hostname'], p_info['pid'])
if fused_pid in history_p_info_dict:
return
os.system("ssh %s kill -9 %s" % (p_info['hostname'], p_info['pid']))
def send_email(user_info, used_card_num, soft_max_card_num, hard_max_card_num, soft_time):
sender = '<EMAIL>'
receiver = user_info['email']
username = user_info['username']
if receiver in ['test', 'NA', '']:
return
receivers = [receiver] # 接收邮件,可设置为你的QQ邮箱或者其他邮箱
mail_content = '''
Hi, %s:
你的用卡数量(%d) 已经超过规定数量(%d), 请及时下线不必要的程序, 超过的部分将于%d秒后自动关闭.
注意, 如果你的用卡数量超过%d张, 多出的部分将会被立即关闭.
Best Wishes
AI集群运维团队
''' % (username, used_card_num, soft_max_card_num, soft_time, hard_max_card_num)
message = MIMEText(mail_content, 'plain', 'utf-8')
message['From'] = Header("AI集群管理", 'utf-8')
message['To'] = Header("AI集群用户", 'utf-8')
message['Subject'] = Header('AI集群过载提醒', 'utf-8')
try:
smtpObj = smtplib.SMTP()
smtpObj.connect(mail_host, 587) # 25 为 SMTP 端口号
smtpObj.ehlo()
smtpObj.starttls()
smtpObj.ehlo()
smtpObj.login(mail_user, mail_pass)
smtpObj.sendmail(sender, receivers, message.as_string())
print("%s 邮件发送成功" % receiver)
return True
except smtplib.SMTPException:
print("Error: %s 无法发送邮件" % receiver)
time.sleep(10)
send_email(user_info, used_card_num, soft_max_card_num, hard_max_card_num, soft_time)
def get_history_p_info_dict(db):
current_user_process_info_dict = get_current_user_process_info_dict(db)
history_p_info_dict = {}
for user, process_info in current_user_process_info_dict.items():
for card_id, p_info_list in process_info.items():
for p_info in p_info_list:
hostname = p_info['hostname']
pid = p_info['pid']
fused_pid = '%s-%s' % (hostname, pid)
history_p_info_dict[fused_pid] = 1
return history_p_info_dict
def convert_user_info_list_to_dict(user_info_list):
user_info_dict = {}
for user_info in user_info_list:
user_info_dict[user_info['username']] = user_info
return user_info_dict
def main():
soft_max_card_num = 8
hard_max_card_num = 20
soft_time = 3600
db = DatabaseManager()
middle_user_dict = {}
history_p_info_dict = get_history_p_info_dict(db)
while True:
current_user_process_info_dict = get_current_user_process_info_dict(db)
user_info_list = db.get_all_user_info()
user_info_dict = convert_user_info_list_to_dict(user_info_list)
for user, process_info in current_user_process_info_dict.items():
if user not in user_info_dict:
continue
permission_num = len(user_info_dict[user]['permission'])
if permission_num <= 10:
continue
card_num = len(process_info)
"""
normal
"""
if card_num <= soft_max_card_num:
middle_user_dict[user] = {'count': 0, 'card_num': card_num}
continue
"""
middle
"""
if card_num <= hard_max_card_num:
if user not in middle_user_dict:
middle_user_dict[user] = {'count': 1, 'card_num': card_num}
else:
middle_user_dict[user]['count'] += 1
middle_user_dict[user]['card_num'] = card_num
sorted_card_info_list = get_sorted_card_info_list(process_info)
for card_info in sorted_card_info_list[soft_max_card_num:]:
for p_info in card_info:
if p_info['runtime'] > soft_time:
kill_process_by_p_info(p_info, history_p_info_dict)
continue
"""
hard
"""
if user not in middle_user_dict:
middle_user_dict[user] = {'count': 1, 'card_num': card_num}
else:
middle_user_dict[user]['count'] += 1
middle_user_dict[user]['card_num'] = card_num
sorted_card_info_list = get_sorted_card_info_list(process_info)
for card_info in sorted_card_info_list[hard_max_card_num:]:
for p_info in card_info:
kill_process_by_p_info(p_info, history_p_info_dict)
"""
send email
"""
for user_info in user_info_list:
uname = user_info['username']
if uname in middle_user_dict and middle_user_dict[uname]['count'] == 1:
send_email(user_info, middle_user_dict[uname]['card_num'], soft_max_card_num, hard_max_card_num, soft_time)
if __name__ == '__main__':
main()
|
<filename>tests/test_computations.py
# Copyright 2014 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from numpy import array
from numpy.testing import assert_array_equal
from mine.mine import *
class computations_test(unittest.TestCase):
def setUp(self):
# Example from wiki https://en.wikipedia.org/wiki/Marginal_distribution
# Appears also in Cover et.al. book pg. 18
self.wiki_example = array([[4. / 32, 2. / 32, 1. / 32, 1. / 32],
[2. / 32, 4. / 32, 1. / 32, 1. / 32],
[2. / 32, 2. / 32, 2. / 32, 2. / 32],
[8. / 32, 0., 0., 0.]])
# Example from Guttler's presentation
self.guttler_example = array([[1. / 20, 1. / 20, 1. / 20],
[3. / 20, 5. / 20, 2. / 20],
[3. / 20, 2. / 20, 2. / 20]])
assert np.sum(self.wiki_example) == 1.
def test_H(self):
X = get_x_distribution(self.wiki_example)
assert H(X) == 7. / 4
Y = get_y_distribution(self.wiki_example)
assert H(Y) == 2.
# Joint entropy
assert H(self.wiki_example) == 27. / 8
def test_getXDistribution(self):
assert_array_equal(get_x_distribution(self.wiki_example), array([1. / 2, 1. / 4, 1. / 8, 1. / 8]))
def test_getYDistribution(self):
assert_array_equal(get_y_distribution(self.wiki_example), array([1. / 4, 1. / 4, 1. / 4, 1. / 4]))
def test_I(self):
assert I(self.wiki_example) == 3. / 8
def test_HP(self):
"""
4 | | |x|
3 | | x | |
2 | |x | |
1 |x x| | |
0 x| | x| |
0|1 2|3 4 5|6|
"""
assert HP(np.array([-1, 0, 2, 5, 6])) == H(np.array([1. / 7, 2. / 7, 3. / 7, 1. / 7]))
assert HP(np.array([0, 2, 5])) == H(np.array([2. / 5, 3. / 5]))
assert HP(np.array([-1, 0])) == H(np.array([1. / 1]))
assert HP(np.array([-1, 5, 6])) == H(np.array([6. / 7, 1. / 7]))
def test_HQ(self):
"""
4 x
3 x
2 x
*----+---+-----+-
1 x
*----+---+-----+-
0 x x
0 1 2 3 4 5 6
"""
assert HQ(
{
(0, 0): 0,
(5, 0): 0,
(1, 1): 1,
(3, 2): 2,
(4, 3): 2,
(6, 4): 2
}
) == H(np.array([2. / 6, 1. / 6, 3. / 6]))
"""
4 x
3 x
2 x
*----+---+-----+-
1 x x
0 x x
0 1 2 3 4 5 6
"""
assert HQ(
{
(0, 0): 0,
(5, 0): 0,
(1, 1): 0,
(2, 1): 0,
(3, 2): 1,
(4, 3): 1,
(6, 4): 1
}
) == H(np.array([4. / 7, 3. / 7]))
def test_HPQ(self):
"""
4 | | |x
3 | | x |
2 | |x |
*----+---+-----+-
1 |x x| |
*----+---+-----+-
0 x| | x|
0|1 2|3 4 5 6
| | |
"""
P_ordinals = [-1, 0, 2, 5]
Q_map = {
(0, 0): 0,
(5, 0): 0,
(1, 1): 1,
(2, 1): 1,
(3, 2): 2,
(4, 3): 2,
(6, 4): 2
}
self.assertEqual(HPQ(P_ordinals, Q_map),
H(np.array([0. / 6, 0. / 6, 2. / 6, 0. / 6, 2. / 6, 0. / 6, 1. / 6, 0. / 6, 1. / 6])))
if __name__ == '__main__':
unittest.main()
|
<filename>required_scripts/addRC_to_Delly_VCF_f4d178e.py
from multiprocessing import Process, Queue, cpu_count
import pysam, sys
import getopt
import fileinput
from sys import argv # Used to bring in the feature argv, variables or arguments
def main(scriptname, argv):
vcf = '' ; tbam = '' ; nbam = '' ; slop = 1000; ## Default values
try:
opts, args = getopt.getopt(argv,"hi:t:n:s:",["vcf=","tbam=", "nbam=", "slop="])
print(opts)
except getopt.GetoptError:
print("USAGE: \n" + scriptname + '-i <inputfile VCF file[M]> -t <Tumor BAM file [M]> -n <normal BAM file [M]> -s <slop[O]>\n')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print("USAGE: \n" + scriptname + ' -i <inputfile VCF file[M]> -t <Tumor BAM file [M]> -n <normal BAM file [M]> -s <slop[O]>\n')
sys.exit()
elif opt in ("-i", "--vcf"):
vcf = arg
elif opt in ("-t", "--tbam"):
tbam = arg
elif opt in ("-n", "--nbam"):
nbam = arg
elif opt in ("-s", "--slop"):
slop = int(arg)
## Filename can be full path or relative assuming the right current folder
print('VCF is: ', vcf ) ;
print('TBAM is: ', tbam)
print('NBAM is: ', nbam)
print('SLOP is: ', slop)
newheader,LVAR = process_files_and_get_dist_of_discordant_pairs_and_return_new_vcf_records(vcf, tbam, nbam, slop)
write_new_vcf(vcf, newheader, LVAR)
def getDiscordantReadsDistribution_parallel_version(bam, chr, pos, slop, chr2, pos2, myqueue):
res = getDiscordantReadsDistribution(bam, chr, pos, slop, chr2, pos2)
myqueue.put((res))
def getDiscordantReadsDistribution(bam, chr, pos, slop, chr2, pos2):
bamf = pysam.AlignmentFile(bam, "rb")
iter = bamf.fetch(chr, max(pos-slop,1), pos+slop) ## TODO check the max length of the chr and pos+slop should not go out of range
pos2lowend=pos2-slop
pos2highend=pos2+slop
# print(str(chr) + " <--> " + str(pos-slop) + " <--> " + str(pos) + " <--> " + str(pos+slop))
# print(str(chr) + ' <--> ' + str(chr2))
## init list of coordinates
CR = []
for x in iter: ## x is a read line or aka an alignment record
if x.has_tag('MQ'): ## some alignemnt records do not have the MQ value in the mate when pair aligned; BWA-known-issue
mq_mate=x.get_tag('MQ')
else:
mq_mate=60 ## we assume that if the mate quality is not present it is still a good one. TODO: Re-Calculate MQ if possible
mq=x.mapping_quality
## here the heart of the read selection:
if not x.is_unmapped and not x.mate_is_unmapped and not x.is_proper_pair and not x.is_duplicate and mq>=1 and mq_mate>=1:
# print(str(x))
# print(str(pos2lowend), "<=", str(x.next_reference_start),"<=",str(pos2highend))
# print(pos2lowend <= x.next_reference_start <= pos2highend)
if pos2lowend <= x.next_reference_start <= pos2highend:
CR.append(x.reference_start)
if not CR:
# print("No Discordant Pairs")
return(-1,0) ## This mean there was no discordant reads in that region ; we can return 0,0 or -1,0
else:
return(max(CR)-min(CR),len(CR))
def process_files_and_get_dist_of_discordant_pairs_and_return_new_vcf_records(vcf, tbam, nbam, slop):
tumor_bam_fn=tbam ; #"MMRF_1816_1_BM_CD138pos_T1_KHWGL_L06685.bwa.final.bam"
normal_bam_fn=nbam ; # "MMRF_1816_1_PB_Whole_C2_KHWGL_L06684.bwa.final.bam"
#read the input VCF file using pysam (note a bug was reported on github (issue #350) but does not impact us here as we do not use the full record information)
myvcf=pysam.VariantFile(vcf,"r")
# Add the new fields to the header.
myvcf.header.formats.add("RCALT","1","Integer","Read Count supporting ALT SV RC=(DV+RV) captured by Delly")
myvcf.header.formats.add("RDISTDISC1","1","Integer","Distribution size of Discordant pair at CHR:POS ; value -1 means no discordant reads found in captured interval")
myvcf.header.formats.add("RDISTDISC2","1","Integer","Distribution size of Discordant pair at CHR2:THEEND ; value -1 means no discordant reads found in captured interval")
myvcf.header.formats.add("RCDIS1","1","Integer","Number of Recaptured Discordant Pairs in Left Region with which we calculated the range distribution RDISTDISC1")
myvcf.header.formats.add("RCDIS2","1","Integer","Number of Recaptured Discordant Pairs in Right Region with which we calculated the range distribution RDISTDISC2")
LVAR = [] ## init list for updated variants
slop_ori = slop ## we need to keep the otiginal value of the slop variable
for variant in myvcf: # loop over the list of variants records
variant.samples[0]['RCALT']=variant.samples[0]['DV']+variant.samples[0]['RV']
variant.samples[1]['RCALT']=variant.samples[1]['DV']+variant.samples[1]['RV']
# print(variant)
## we check if there is an overlap between the left and right region, if so we reduce the slop
## but we can not have the slop less than 1
if variant.chrom == variant.info['CHR2']:
if variant.alts[0]!="<INV>":
while variant.info['ENDPOSSV']-slop <= variant.pos+slop:
## TODO: avoid the out of range error here by putting: max(variant.info['ENDPOSSV']-slop,1)
slop=max(slop-1,1)
else:
while variant.info['ENDPOSSV']+slop <= variant.pos-slop:
## TODO: avoid the out of range error here by putting: max(variant.pos-slop,1)
slop=max(slop-1,1)
if cpu_count() > 4:
myqueue1 = Queue() ; myqueue2 = Queue() ;myqueue3 = Queue() ;myqueue4 = Queue()
p1 = Process(target=getDiscordantReadsDistribution_parallel_version, args=(tumor_bam_fn,variant.chrom,variant.pos,slop,variant.info["CHR2"],variant.info["ENDPOSSV"],myqueue1))
p2 = Process(target=getDiscordantReadsDistribution_parallel_version, args=(tumor_bam_fn,variant.info["CHR2"],variant.info["ENDPOSSV"],slop,variant.chrom,variant.pos,myqueue2))
p3 = Process(target=getDiscordantReadsDistribution_parallel_version, args=(normal_bam_fn,variant.chrom,variant.pos,slop,variant.info["CHR2"],variant.info["ENDPOSSV"],myqueue3))
p4 = Process(target=getDiscordantReadsDistribution_parallel_version, args=(normal_bam_fn,variant.info["CHR2"],variant.info["ENDPOSSV"],slop,variant.chrom,variant.pos,myqueue4))
p1.start() ; p2.start() ; p3.start() ; p4.start();
p1.join() ; p2.join() ; p3.join() ; p4.join() ;
TUMDISTCHR = myqueue1.get()
TUMDISTCHR2 = myqueue2.get()
NORMDISTCHR = myqueue3.get()
NORMDISTCHR2 = myqueue4.get()
myqueue1.close() ; myqueue2.close() ; myqueue3.close() ; myqueue4.close()
else:
TUMDISTCHR = getDiscordantReadsDistribution(tumor_bam_fn, variant.chrom, variant.pos, slop, variant.info['CHR2'], variant.info['ENDPOSSV'])
TUMDISTCHR2 = getDiscordantReadsDistribution(tumor_bam_fn, variant.info['CHR2'], variant.info['ENDPOSSV'], slop, variant.chrom, variant.pos)
NORMDISTCHR = getDiscordantReadsDistribution(normal_bam_fn, variant.chrom, variant.pos, slop, variant.info['CHR2'], variant.info['ENDPOSSV'])
NORMDISTCHR2 = getDiscordantReadsDistribution(normal_bam_fn, variant.info['CHR2'], variant.info['ENDPOSSV'], slop, variant.chrom, variant.pos)
variant.samples[0]['RDISTDISC1']=TUMDISTCHR[0]
variant.samples[0]['RDISTDISC2']=TUMDISTCHR2[0]
variant.samples[1]['RDISTDISC1']=NORMDISTCHR[0]
variant.samples[1]['RDISTDISC2']=NORMDISTCHR2[0]
variant.samples[0]['RCDIS1']=TUMDISTCHR[1]
variant.samples[0]['RCDIS2']=TUMDISTCHR2[1]
variant.samples[1]['RCDIS1']=NORMDISTCHR[1]
variant.samples[1]['RCDIS2']=NORMDISTCHR2[1]
LVAR.append(variant)
slop = slop_ori ## we reinit here the slop here in case we entered one of the while loop under the << if variant.chrom == variant.info['CHR2']: >>
return(myvcf.header, LVAR)
def write_new_vcf(vcf, newheader,LVAR):
""" function to write the updated records in a new vcf"""
with open(vcf+"_addDist.vcf", "w") as f:
f.write(str(newheader))
for record in LVAR:
f.write(str(record))
if __name__ == "__main__":
main(argv[0], argv[1:])
|
<reponame>julinas/town-sim-py
# BSD 3-Clause License
#
# Copyright (c) 2019, Augmented Design Lab
# All rights reserved.
import math
from lot import Lot
from util import Type, get_line
def get_closest_point(node, lots, road_segments, road_type, leave_lot, correction=5):
# check if road can leave lot
(x, y) = (node.x, node.y)
nodes = road_segments
# if not leave_lot:
# if node.lot is None:
# return None
# nodes = set(road_segments) & node.lot.get_nodes()
# filter out bridges
nodes = [n for n in nodes if Type.BRIDGE not in n.type]
if len(nodes) == 0:
print("leave_lot = {} no road segments".format(leave_lot))
return None, None
dists = [math.hypot(n.x - x, n.y - y) for n in nodes]
node2 = nodes[dists.index(min(dists))]
(x2, y2) = (node2.x, node2.y)
# if node.lot is not None and road_type is not Type.MINOR_ROAD:
# if abs(x - x2) < correction:
# x = x2
# node = node.landscape.array[x][y]
# elif abs(y - y2) < correction:
# y = y2
# node = node.landscape.array[x][y]
if node.lot is None:
if road_type is not Type.MINOR_ROAD and abs(x2 - x) > 10 and abs(y2 - y) > 10:
if node2.lot is not None:
(cx2, cy2) = node2.lot.center
(x, y) = (x + x - cx2, y + y - cy2)
if x >= node.landscape.x:
x = node.landscape.x - 1
if x < 0:
x = 0
if y >= node.landscape.y:
y = node.landscape.y - 1
if y < 0:
y = 0
# else:
# (x2, y2) = (node2.x, node2.y)
if abs(x2 - x) > 10 and abs(y2 - y) > 10: # and road_type is Type.MAJOR_ROAD:
if not node.landscape.add_lot([(x2, y2), (x, y)]):
print("leave_lot = {} add lot failed".format(leave_lot))
return None, None
# else:
# print("leave_lot = {} proposed lot is too small{} or road is not MAJOR_ROAD{}".format(leave_lot, abs(x2 - x) > 10 and abs(y2 - y) > 10, road_type is Type.MAJOR_ROAD))
# return None
else:
return None, None
points = get_line((x, y), (node2.x, node2.y))
if len(points) <=2:
return None, None
if not leave_lot:
for (i, j) in points:
if Type.WATER in node.landscape.array[i][j].type:
return None, None
return (node2.x, node2.y), points
def get_point_to_close_gap_minor(x1, y1, landscape, points):
# connects 2nd end of minor roads to the nearest major or minor road
(x_, y_) = points[1]
x = x1 - x_
y = y1 - y_
(x2, y2) = (x1 + x, y1 + y)
while True:
if x2 >= landscape.x or y2 >= landscape.y or x2 < 0 or y2 < 0:
break
landtype = landscape.array[x2][y2].type
if Type.GREEN in landtype or Type.FOREST in landtype or Type.WATER in landtype:
break
if Type.MAJOR_ROAD in landtype or Type.MINOR_ROAD in landtype and Type.BYPASS not in landtype:
return (x2, y2)
(x2, y2) = (x2 + x, y2 + y)
return None
def get_point_to_close_gap_major(node, x1, y1, landscape, points):
# extends a major road to the edge of a lot
if node.lot is None:
return None
(x_, y_) = points[1]
x = x1 - x_
y = y1 - y_
(x2, y2) = (x1 + x, y1 + y)
border = node.lot.border
while True:
if x2 >= landscape.x or y2 >= landscape.y or x2 < 0 or y2 < 0:
break
landtype = landscape.array[x2][y2].type
if Type.WATER in landtype:
break
if (x2, y2) in border:
landtype = landscape.array[x2][y2].type
return (x2, y2)
# elif Type.MAJOR_ROAD in landtype:
# return get_line((x1, y1), (x2, y2))
(x2, y2) = (x2 + x, y2 + y)
return None
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import multiprocessing
import os
import re
import string
from pathlib import Path
from typing import List
import scipy.io.wavfile as wav
from normalization_helpers import LATIN_TO_RU, NUMBERS_TO_ENG, NUMBERS_TO_RU, RU_ABBREVIATIONS
from nemo.collections import asr as nemo_asr
parser = argparse.ArgumentParser(description="Prepares text and audio files for segmentation")
parser.add_argument("--in_text", type=str, default=None, help='Path to a text file or a directory with .txt files')
parser.add_argument("--output_dir", type=str, required=True, help='Path to output directory')
parser.add_argument("--audio_dir", type=str, help='Path to folder with .mp3 audio files')
parser.add_argument('--sample_rate', type=int, default=16000, help='Sampling rate used during ASR model training')
parser.add_argument('--language', type=str, default='eng', choices=['eng', 'ru', 'other'])
parser.add_argument(
'--cut_prefix', type=int, default=0, help='Number of seconds to cut from the beginning of the audio files.',
)
parser.add_argument(
'--model', type=str, default='QuartzNet15x5Base-En', help='Pre-trained model name or path to model checkpoint'
)
parser.add_argument('--min_length', type=int, default=20, help='Minimal sentence length.')
def convert_mp3_to_wav(mp3_file: str, wav_file: str = None, sample_rate: int = 16000) -> str:
"""
Convert .mp3 to .wav and change sample rate if needed
Args:
mp3_file: Path to .mp3 file
sample_rate: Desired sample rate
Returns:
path to .wav file
"""
print(f"Converting {mp3_file} to .wav format with sample rate {sample_rate}")
if not mp3_file.endswith(".mp3"):
raise ValueError(f'.mp3 file expected but {mp3_file} passed')
if wav_file is None:
wav_file = mp3_file.replace(".mp3", ".wav")
os.system(f'ffmpeg -i {mp3_file} -ac 1 -af aresample=resampler=soxr -ar {sample_rate} {wav_file} -y')
return wav_file
def process_audio(mp3_file: str, wav_file: str = None, cut_prefix: int = 0, sample_rate: int = 16000):
"""Process audio file: .mp3 to .wav conversion and cut a few seconds from the beginning of the audio
Args:
mp3_file: path to the .mp3 file for processing
wav_file: path to the output .wav file
cut_prefix: number of seconds to cut from the beginning of the audio file
sample_rate: target sampling rate
"""
wav_audio = convert_mp3_to_wav(str(mp3_file), wav_file, sample_rate)
if cut_prefix > 0:
# cut a few seconds of audio from the beginning
sample_rate, signal = wav.read(wav_audio)
wav.write(wav_audio, data=signal[cut_prefix * sample_rate :], rate=sample_rate)
def split_text(
in_file: str,
out_file: str,
vocabulary: List[str] = None,
language='eng',
remove_square_brackets=True,
do_lower_case=True,
min_length=20,
):
"""
Breaks down the in_file into sentences. Each sentence will be on a separate line.
Also replaces numbers with a simple spoken equivalent based on NUMBERS_TO_<lang> map and removes punctuation
Args:
in_file: path to original transcript
out_file: path to the output file
vocabulary: ASR model vocabulary
language: text language
remove_square_brackets: Set to True if square brackets [] should be removed from text.
Text in square brackets often contains unaudibale fragments like notes or translations
do_lower_case: flag that determines whether to apply lower case to the in_file text
"""
print(f'Splitting text in {in_file} into sentences.')
with open(in_file, "r") as f:
transcript = f.read()
# remove some symbols for better split into sentences
transcript = (
transcript.replace("\n", " ")
.replace("\t", " ")
.replace("…", "...")
.replace("»", "")
.replace("«", "")
.replace("\\", "")
.replace("”", "")
.replace("„", "")
)
# remove extra space
transcript = re.sub(r' +', ' ', transcript)
if remove_square_brackets:
transcript = re.sub(r'(\[.*?\])', ' ', transcript)
# Read and split transcript by utterance (roughly, sentences)
split_pattern = "(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<![A-Z]\.)(?<=\.|\?|\!)\s"
if language == 'ru':
lower_case_ru_letters_unicode = '\u0430-\u04FF'
upper_case_ru_letters_unicode = '\u0410-\u042F'
# remove space in the middle of the lower case abbreviation to avoid spliting into separate sentences
matches = re.findall(r'[a-z\u0430-\u04FF]\.\s[a-z\u0430-\u04FF]\.', transcript)
for match in matches:
transcript = transcript.replace(match, match.replace('. ', '.'))
split_pattern = (
"(?<!\w\.\w.)(?<![A-Z"
+ upper_case_ru_letters_unicode
+ "][a-z"
+ lower_case_ru_letters_unicode
+ "]\.)(?<!["
+ upper_case_ru_letters_unicode
+ "]\.)(?<=\.|\?|\!)\s"
)
elif language not in ['ru', 'eng']:
print(f'Consider using {language} unicode letters for better sentence split.')
sentences = re.split(split_pattern, transcript)
sentences_comb = []
# adds a short sentence to the previous one
for i in range(len(sentences)):
if len(sentences[i]) < min_length and len(sentences_comb) > 0:
sentences_comb[-1] += ' ' + sentences[i].strip()
else:
sentences_comb.append(sentences[i].strip())
sentences = "\n".join([s.strip() for s in sentences_comb if s])
# save split text with original punctuation and case
out_dir, out_file_name = os.path.split(out_file)
with open(os.path.join(out_dir, out_file_name[:-4] + '_with_punct.txt'), "w") as f:
f.write(sentences)
# substitute common abbreviations before applying lower case
if language == 'ru':
for k, v in RU_ABBREVIATIONS.items():
sentences = sentences.replace(k, v)
if do_lower_case:
sentences = sentences.lower()
if language == 'eng':
for k, v in NUMBERS_TO_ENG.items():
sentences = sentences.replace(k, v)
# remove non acsii characters
sentences = ''.join(i for i in sentences if ord(i) < 128)
elif language == 'ru':
if vocabulary and '-' not in vocabulary:
sentences = sentences.replace('-', ' ')
for k, v in NUMBERS_TO_RU.items():
sentences = sentences.replace(k, v)
# replace Latin characters with Russian
for k, v in LATIN_TO_RU.items():
sentences = sentences.replace(k, v)
# make sure to leave punctuation present in vocabulary
all_punct_marks = string.punctuation + "–—’“”"
if vocabulary:
for v in vocabulary:
all_punct_marks = all_punct_marks.replace(v, '')
sentences = re.sub("[" + all_punct_marks + "]", "", sentences).strip()
with open(out_file, "w") as f:
f.write(sentences)
if __name__ == '__main__':
args = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
text_files = []
if args.in_text:
vocabulary = None
if args.model is None:
print(f"No model provided, vocabulary won't be used")
elif os.path.exists(args.model):
asr_model = nemo_asr.models.EncDecCTCModel.restore_from(args.model)
vocabulary = asr_model.cfg.decoder['params']['vocabulary']
elif args.model in nemo_asr.models.EncDecCTCModel.get_available_model_names():
asr_model = nemo_asr.models.EncDecCTCModel.from_pretrained(args.model)
vocabulary = asr_model.cfg.decoder['params']['vocabulary']
else:
raise ValueError(
f'Provide path to the pretrained checkpoint or choose from {nemo_asr.models.EncDecCTCModel.list_available_models()}'
)
if os.path.isdir(args.in_text):
text_files = Path(args.in_text).glob(("*.txt"))
else:
text_files.append(Path(args.in_text))
for text in text_files:
base_name = os.path.basename(text)[:-4]
out_text_file = os.path.join(args.output_dir, base_name + '.txt')
split_text(text, out_text_file, vocabulary=vocabulary, language=args.language, min_length=args.min_length)
print(f'Processed text saved at {args.output_dir}')
if args.audio_dir:
if not os.path.exists(args.audio_dir):
raise ValueError(f'{args.audio_dir} not found. "--audio_dir" should contain .mp3 files.')
audio_paths = list(Path(args.audio_dir).glob("*.mp3"))
workers = []
for i in range(len(audio_paths)):
wav_file = os.path.join(args.output_dir, audio_paths[i].name.replace(".mp3", ".wav"))
worker = multiprocessing.Process(
target=process_audio, args=(audio_paths[i], wav_file, args.cut_prefix, args.sample_rate),
)
workers.append(worker)
worker.start()
for w in workers:
w.join()
print('Done.')
|
# -*- coding: utf-8 -*-
# Copyright (C) 2019, QuantStack
# SPDX-License-Identifier: BSD-3-Clause
# conda env equivalent environment creation
from __future__ import absolute_import, print_function
from os.path import basename
import os
from conda._vendor.boltons.setutils import IndexedSet
from conda.base.context import context
from conda.core.solve import Solver
from conda.models.channel import Channel, prioritize_channels
from conda.models.match_spec import MatchSpec
from conda.core.link import UnlinkLinkTransaction, PrefixSetup
from conda.cli.install import handle_txn
from conda_env.installers import conda
from conda.core.prefix_data import PrefixData
from conda.core.solve import diff_for_unlink_link_precs
from conda.models.prefix_graph import PrefixGraph
from mamba.utils import get_env_index, to_package_record_from_subjson
import mamba.mamba_api as api
import tempfile
import threading
import sys
def mamba_install(prefix, specs, args, env, *_, **kwargs):
# TODO: support all various ways this happens
# Including 'nodefaults' in the channels list disables the defaults
channel_urls = [chan for chan in env.channels if chan != 'nodefaults']
if 'nodefaults' not in env.channels:
channel_urls.extend(context.channels)
_channel_priority_map = prioritize_channels(channel_urls)
index = get_env_index(_channel_priority_map)
channel_json = []
for x in index:
# add priority here
priority = len(_channel_priority_map) - _channel_priority_map[x.url_w_subdir][1]
subpriority = 0 if x.channel.platform == 'noarch' else 1
if os.path.exists(x.cache_path_solv):
cache_file = x.cache_path_solv
else:
cache_file = x.cache_path_json
channel_json.append((str(x.channel), cache_file, priority, subpriority))
specs = [MatchSpec(s) for s in specs]
mamba_solve_specs = [s.conda_build_form() for s in specs]
print("\n\nLooking for: {}\n\n".format(mamba_solve_specs))
# TODO!
installed_json_f = tempfile.NamedTemporaryFile('w', delete=False)
installed_json_f.write("") # stupid!
installed_json_f.flush()
solver_options = [(api.SOLVER_FLAG_ALLOW_DOWNGRADE, 1)]
to_link, to_unlink = api.solve(channel_json,
installed_json_f.name,
mamba_solve_specs,
solver_options,
api.SOLVER_INSTALL,
False,
context.quiet,
context.verbosity)
to_link_records, to_unlink_records = [], []
final_precs = IndexedSet(PrefixData(prefix).iter_records())
def get_channel(c):
for x in index:
if str(x.channel) == c:
return x
for c, pkg in to_unlink:
for i_rec in installed_pkg_recs:
if i_rec.fn == pkg:
final_precs.remove(i_rec)
to_unlink_records.append(i_rec)
break
else:
print("No package record found!")
for c, pkg, jsn_s in to_link:
sdir = get_channel(c)
rec = to_package_record_from_subjson(sdir, pkg, jsn_s)
final_precs.add(rec)
to_link_records.append(rec)
unlink_precs, link_precs = diff_for_unlink_link_precs(prefix,
final_precs=IndexedSet(PrefixGraph(final_precs).graph),
specs_to_add=specs,
force_reinstall=context.force_reinstall)
pref_setup = PrefixSetup(
target_prefix = prefix,
unlink_precs = unlink_precs,
link_precs = link_precs,
remove_specs = [],
update_specs = specs,
neutered_specs = ()
)
conda_transaction = UnlinkLinkTransaction(pref_setup)
pfe = conda_transaction._get_pfe()
pfe.execute()
conda_transaction.execute()
try:
installed_json_f.close()
os.unlink(installed_json_f.name)
except:
pass
conda.install = mamba_install
def main():
from conda_env.cli.main import main
sys.argv = sys.argv[0:1] + sys.argv[2:]
main() |
<filename>dragon/python/core/util/six.py
# Copyright (c) 2010-2019 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Utilities for writing code that runs on Python 2 and 3"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import types
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY3:
integer_types = int,
string_types = str,
else:
integer_types = (int, long)
string_types = (str, unicode)
if PY3:
import collections.abc
collections_abc = collections.abc
else:
import collections
collections_abc = collections
if PY3:
def get_unbound_function(unbound):
return unbound
else:
def get_unbound_function(unbound):
return unbound.im_func
def with_metaclass(meta, *bases):
class MetaClass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
@classmethod
def __prepare__(cls, name, this_bases):
return meta.__prepare__(name, bases)
return type.__new__(MetaClass, 'TemporaryClass', (), {})
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
class _SixMetaPathImporter(object):
"""A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
_importer = _SixMetaPathImporter(__name__)
_moved_attributes = [MovedModule('pickle', 'cPickle', 'pickle')]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + '.moves')
_importer._add_module(moves, 'moves')
|
<filename>temporary/cli_header.py
# cli.py Test of socket. Run on Pyboard D
import gc
gc.collect()
import usocket as socket
import uasyncio as asyncio
import ujson as json
import errno
gc.collect()
import network
import ubinascii
import machine
import uio
MY_ID = ubinascii.hexlify(machine.unique_id()).decode()
PORT = 8888
SERVER = '192.168.178.60'
ACK = -1
# Create message ID's. Initially 0 then 1 2 ... 254 255 1 2
def gmid():
mid = 0
while True:
yield mid
mid = (mid + 1) & 0xff
mid = mid if mid else 1
getmid = gmid()
async def run(loop):
s = network.WLAN()
print('Waiting for WiFi') # ESP8266 with stored connection
while not s.isconnected():
await asyncio.sleep_ms(200)
print('WiFi OK')
sock = socket.socket()
try:
serv = socket.getaddrinfo(SERVER, PORT)[0][-1] # server read
# If server is down OSError e.args[0] = 111 ECONNREFUSED
sock.connect(serv)
except OSError:
print('Connect fail.')
return
sock.setblocking(False)
loop.create_task(reader(sock))
loop.create_task(writer(sock))
async def reader(sock):
try:
print('Reader start')
last = -1
while True:
line = await readline(sock)
message = uio.StringIO(line)
preheader = bytearray(ubinascii.unhexlify(message.read(10)))
try:
data = json.load(message)
except Exception:
data = message.read()
finally:
message.close()
del message
mid = preheader[0]
print('Got', data)
if last >= 0 and data[0] - last - 1:
raise OSError('Missed message')
last = data[0]
except Exception as e:
raise e
finally:
print("Reader stopped")
try:
print("Closing socket")
sock.close()
except:
pass
async def writer(sock):
print('Writer start')
data = [0, 'Message from client {!s}.'.format(MY_ID)]
try:
while True:
mid = next(getmid)
d = json.dumps(data)
preheader = bytearray(5)
preheader[0] = mid
preheader[1] = 0
preheader[2] = (len(d) & 0xFF) - (1 if d.endswith(b"\n") else 0)
preheader[3] = (len(d) >> 8) & 0xFF # allows for 65535 message length
preheader[4] = 0 # special internal usages, e.g. for esp_link or ACKs
preheader = ubinascii.hexlify(preheader).decode()
d = '{}{}\n'.format(preheader, d)
await send(sock, d.encode('utf8'))
data[0] += 1
await asyncio.sleep_ms(253) # ???
except Exception as e:
raise e
finally:
print("Writer stopped")
async def readline(sock):
line = b''
while True:
if line.endswith(b'\n'):
return line.decode()
d = sock.readline()
if d == b'':
print("Connection closed")
raise OSError
if d is not None: # Something received
line = b''.join((line, d))
await asyncio.sleep(0)
async def send(sock, d): # Write a line to socket.
print("Sending", d)
while d:
try:
ns = sock.send(d)
except OSError as e:
err = e.args[0]
if err == errno.EAGAIN: # Would block: try later
print("EAGAIN send")
await asyncio.sleep_ms(100)
else:
d = d[ns:]
if d: # Partial write: trim data and pause
await asyncio.sleep_ms(20)
loop = asyncio.get_event_loop()
loop.create_task(run(loop))
loop.run_forever()
|
<gh_stars>0
# Copyright (c) 2020-2021, NVIDIA CORPORATION. All rights reserved.
#
# NVIDIA CORPORATION and its licensors retain all intellectual property
# and proprietary rights in and to this software, related documentation
# and any modifications thereto. Any use, reproduction, disclosure or
# distribution of this software and related documentation without an express
# license agreement from NVIDIA CORPORATION is strictly prohibited.
import os
import sys
import numpy as np
import torch
import nvdiffrast.torch as dr
import imageio
from PIL import Image, ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES=1
import pdb
#----------------------------------------------------------------------------
# Vector operations
#----------------------------------------------------------------------------
def dot(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return torch.sum(x*y, -1, keepdim=True)
def reflect(x: torch.Tensor, n: torch.Tensor) -> torch.Tensor:
return 2*dot(x, n)*n - x
def length(x: torch.Tensor, eps: float =1e-20) -> torch.Tensor:
return torch.sqrt(torch.clamp(dot(x,x), min=eps)) # Clamp to avoid nan gradients because grad(sqrt(0)) = NaN
def safe_normalize(x: torch.Tensor, eps: float =1e-20) -> torch.Tensor:
return x / length(x, eps)
def to_hvec(x: torch.Tensor, w: float) -> torch.Tensor:
return torch.nn.functional.pad(x, pad=(0,1), mode='constant', value=w)
#----------------------------------------------------------------------------
# Tonemapping
#----------------------------------------------------------------------------
def tonemap_srgb(f: torch.Tensor) -> torch.Tensor:
return torch.where(f > 0.0031308, torch.pow(torch.clamp(f, min=0.0031308), 1.0/2.4)*1.055 - 0.055, 12.92*f)
#----------------------------------------------------------------------------
# sRGB color transforms
#----------------------------------------------------------------------------
def _rgb_to_srgb(f: torch.Tensor) -> torch.Tensor:
return torch.where(f <= 0.0031308, f * 12.92, torch.pow(torch.clamp(f, 0.0031308), 1.0/2.4)*1.055 - 0.055)
def rgb_to_srgb(f: torch.Tensor) -> torch.Tensor:
assert f.shape[-1] == 3 or f.shape[-1] == 4
out = torch.cat((_rgb_to_srgb(f[..., 0:3]), f[..., 3:4]), dim=-1) if f.shape[-1] == 4 else _rgb_to_srgb(f)
assert out.shape[0] == f.shape[0] and out.shape[1] == f.shape[1] and out.shape[2] == f.shape[2]
return out
def _srgb_to_rgb(f: torch.Tensor) -> torch.Tensor:
return torch.where(f <= 0.04045, f / 12.92, torch.pow((torch.clamp(f, 0.04045) + 0.055) / 1.055, 2.4))
def srgb_to_rgb(f: torch.Tensor) -> torch.Tensor:
assert f.shape[-1] == 3 or f.shape[-1] == 4
out = torch.cat((_srgb_to_rgb(f[..., 0:3]), f[..., 3:4]), dim=-1) if f.shape[-1] == 4 else _srgb_to_rgb(f)
assert out.shape[0] == f.shape[0] and out.shape[1] == f.shape[1] and out.shape[2] == f.shape[2]
return out
#----------------------------------------------------------------------------
# Displacement texture lookup
#----------------------------------------------------------------------------
def get_miplevels(texture: np.ndarray) -> float:
minDim = min(texture.shape[0], texture.shape[1])
return np.floor(np.log2(minDim))
# TODO: Handle wrapping maybe
def tex_2d(tex_map : torch.Tensor, coords : torch.Tensor, filter='nearest') -> torch.Tensor:
tex_map = tex_map[None, ...] # Add batch dimension
tex_map = tex_map.permute(0, 3, 1, 2) # NHWC -> NCHW
tex = torch.nn.functional.grid_sample(tex_map, coords[None, None, ...] * 2 - 1, mode=filter, align_corners=False)
tex = tex.permute(0, 2, 3, 1) # NCHW -> NHWC
return tex[0, 0, ...]
#----------------------------------------------------------------------------
# Image scaling
#----------------------------------------------------------------------------
def scale_img_hwc(x : torch.Tensor, size, mag='bilinear', min='area') -> torch.Tensor:
return scale_img_nhwc(x[None, ...], size, mag, min)[0]
def scale_img_nhwc(x : torch.Tensor, size, mag='bilinear', min='area') -> torch.Tensor:
pdb.set_trace()
assert (x.shape[1] >= size[0] and x.shape[2] >= size[1]) or (x.shape[1] < size[0] and x.shape[2] < size[1]), "Trying to magnify image in one dimension and minify in the other"
y = x.permute(0, 3, 1, 2) # NHWC -> NCHW
if x.shape[1] > size[0] and x.shape[2] > size[1]: # Minification, previous size was bigger
y = torch.nn.functional.interpolate(y, size, mode=min)
else: # Magnification
if mag == 'bilinear' or mag == 'bicubic':
y = torch.nn.functional.interpolate(y, size, mode=mag, align_corners=True)
else:
y = torch.nn.functional.interpolate(y, size, mode=mag)
return y.permute(0, 2, 3, 1).contiguous() # NCHW -> NHWC
def avg_pool_nhwc(x : torch.Tensor, size) -> torch.Tensor:
y = x.permute(0, 3, 1, 2) # NHWC -> NCHW
y = torch.nn.functional.avg_pool2d(y, size)
return y.permute(0, 2, 3, 1).contiguous() # NCHW -> NHWC
#----------------------------------------------------------------------------
# Behaves similar to tf.segment_sum
#----------------------------------------------------------------------------
def segment_sum(data: torch.Tensor, segment_ids: torch.Tensor) -> torch.Tensor:
num_segments = torch.unique_consecutive(segment_ids).shape[0]
# Repeats ids until same dimension as data
if len(segment_ids.shape) == 1:
s = torch.prod(torch.tensor(data.shape[1:], dtype=torch.int64, device='cuda')).long()
segment_ids = segment_ids.repeat_interleave(s).view(segment_ids.shape[0], *data.shape[1:])
assert data.shape == segment_ids.shape, "data.shape and segment_ids.shape should be equal"
shape = [num_segments] + list(data.shape[1:])
result = torch.zeros(*shape, dtype=torch.float32, device='cuda')
result = result.scatter_add(0, segment_ids, data)
return result
#----------------------------------------------------------------------------
# Projection and transformation matrix helpers.
#----------------------------------------------------------------------------
def projection(x=0.1, n=1.0, f=50.0):
return np.array([[n/x, 0, 0, 0],
[ 0, n/-x, 0, 0],
[ 0, 0, -(f+n)/(f-n), -(2*f*n)/(f-n)],
[ 0, 0, -1, 0]]).astype(np.float32)
def translate(x, y, z):
return np.array([[1, 0, 0, x],
[0, 1, 0, y],
[0, 0, 1, z],
[0, 0, 0, 1]]).astype(np.float32)
def rotate_x(a):
s, c = np.sin(a), np.cos(a)
return np.array([[1, 0, 0, 0],
[0, c, s, 0],
[0, -s, c, 0],
[0, 0, 0, 1]]).astype(np.float32)
def rotate_y(a):
s, c = np.sin(a), np.cos(a)
return np.array([[ c, 0, s, 0],
[ 0, 1, 0, 0],
[-s, 0, c, 0],
[ 0, 0, 0, 1]]).astype(np.float32)
def scale(s):
return np.array([[ s, 0, 0, 0],
[ 0, s, 0, 0],
[ 0, 0, s, 0],
[ 0, 0, 0, 1]]).astype(np.float32)
def lookAt(eye, at, up):
a = eye - at
b = up
w = a / np.linalg.norm(a)
u = np.cross(b, w)
u = u / np.linalg.norm(u)
v = np.cross(w, u)
translate = np.array([[1, 0, 0, -eye[0]],
[0, 1, 0, -eye[1]],
[0, 0, 1, -eye[2]],
[0, 0, 0, 1]]).astype(np.float32)
rotate = np.array([[u[0], u[1], u[2], 0],
[v[0], v[1], v[2], 0],
[w[0], w[1], w[2], 0],
[0, 0, 0, 1]]).astype(np.float32)
return np.matmul(rotate, translate)
def random_rotation_translation(t):
m = np.random.normal(size=[3, 3])
m[1] = np.cross(m[0], m[2])
m[2] = np.cross(m[0], m[1])
m = m / np.linalg.norm(m, axis=1, keepdims=True)
m = np.pad(m, [[0, 1], [0, 1]], mode='constant')
m[3, 3] = 1.0
m[:3, 3] = np.random.uniform(-t, t, size=[3])
return m
#----------------------------------------------------------------------------
# Cosine sample around a vector N
#----------------------------------------------------------------------------
def cosine_sample(N : np.ndarray) -> np.ndarray:
# construct local frame
N = N/np.linalg.norm(N)
dx0 = np.array([0, N[2], -N[1]])
dx1 = np.array([-N[2], 0, N[0]])
dx = dx0 if np.dot(dx0,dx0) > np.dot(dx1,dx1) else dx1
dx = dx/np.linalg.norm(dx)
dy = np.cross(N,dx)
dy = dy/np.linalg.norm(dy)
# cosine sampling in local frame
phi = 2.0*np.pi*np.random.uniform()
s = np.random.uniform()
costheta = np.sqrt(s)
sintheta = np.sqrt(1.0 - s)
# cartesian vector in local space
x = np.cos(phi)*sintheta
y = np.sin(phi)*sintheta
z = costheta
# local to world
return dx*x + dy*y + N*z
#----------------------------------------------------------------------------
# Cosine sampled light directions around the vector N
#----------------------------------------------------------------------------
def cosine_sample_texture(res, N : np.ndarray) -> torch.Tensor:
# construct local frame
N = N/np.linalg.norm(N)
dx0 = np.array([0, N[2], -N[1]])
dx1 = np.array([-N[2], 0, N[0]])
dx = dx0 if np.dot(dx0,dx0) > np.dot(dx1,dx1) else dx1
dx = dx/np.linalg.norm(dx)
dy = np.cross(N,dx)
dy = dy/np.linalg.norm(dy)
X = torch.tensor(dx, dtype=torch.float32, device='cuda')
Y = torch.tensor(dy, dtype=torch.float32, device='cuda')
Z = torch.tensor(N, dtype=torch.float32, device='cuda')
# cosine sampling in local frame
phi = 2.0*np.pi*torch.rand(res, res, 1, dtype=torch.float32, device='cuda')
s = torch.rand(res, res, 1, dtype=torch.float32, device='cuda')
costheta = torch.sqrt(s)
sintheta = torch.sqrt(1.0 - s)
# cartesian vector in local space
x = torch.cos(phi)*sintheta
y = torch.sin(phi)*sintheta
z = costheta
# local to world
return X*x + Y*y + Z*z
#----------------------------------------------------------------------------
# Bilinear downsample by 2x.
#----------------------------------------------------------------------------
def bilinear_downsample(x : torch.tensor) -> torch.Tensor:
w = torch.tensor([[1, 3, 3, 1], [3, 9, 9, 3], [3, 9, 9, 3], [1, 3, 3, 1]], dtype=torch.float32, device=x.device) / 64.0
w = w.expand(x.shape[-1], 1, 4, 4)
x = torch.nn.functional.conv2d(x.permute(0, 3, 1, 2), w, padding=1, stride=2, groups=x.shape[-1])
return x.permute(0, 2, 3, 1)
#----------------------------------------------------------------------------
# Bilinear downsample log(spp) steps
#----------------------------------------------------------------------------
def bilinear_downsample(x : torch.tensor, spp) -> torch.Tensor:
w = torch.tensor([[1, 3, 3, 1], [3, 9, 9, 3], [3, 9, 9, 3], [1, 3, 3, 1]], dtype=torch.float32, device=x.device) / 64.0
g = x.shape[-1]
w = w.expand(g, 1, 4, 4)
x = x.permute(0, 3, 1, 2) # NHWC -> NCHW
steps = int(np.log2(spp))
for _ in range(steps):
xp = torch.nn.functional.pad(x, (1,1,1,1), mode='replicate')
x = torch.nn.functional.conv2d(xp, w, padding=0, stride=2, groups=g)
return x.permute(0, 2, 3, 1).contiguous() # NCHW -> NHWC
#----------------------------------------------------------------------------
# Image display function using OpenGL.
#----------------------------------------------------------------------------
_glfw_window = None
def display_image(image, zoom=None, size=None, title=None): # HWC
# Import OpenGL and glfw.
import OpenGL.GL as gl
import glfw
# Zoom image if requested.
image = np.asarray(image)
if size is not None:
assert zoom is None
zoom = max(1, size // image.shape[0])
if zoom is not None:
image = image.repeat(zoom, axis=0).repeat(zoom, axis=1)
height, width, channels = image.shape
# Initialize window.
if title is None:
title = 'Debug window'
global _glfw_window
if _glfw_window is None:
glfw.init()
_glfw_window = glfw.create_window(width, height, title, None, None)
glfw.make_context_current(_glfw_window)
glfw.show_window(_glfw_window)
glfw.swap_interval(0)
else:
glfw.make_context_current(_glfw_window)
glfw.set_window_title(_glfw_window, title)
glfw.set_window_size(_glfw_window, width, height)
# Update window.
glfw.poll_events()
gl.glClearColor(0, 0, 0, 1)
gl.glClear(gl.GL_COLOR_BUFFER_BIT)
gl.glWindowPos2f(0, 0)
gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 1)
gl_format = {3: gl.GL_RGB, 2: gl.GL_RG, 1: gl.GL_LUMINANCE}[channels]
gl_dtype = {'uint8': gl.GL_UNSIGNED_BYTE, 'float32': gl.GL_FLOAT}[image.dtype.name]
gl.glDrawPixels(width, height, gl_format, gl_dtype, image[::-1])
glfw.swap_buffers(_glfw_window)
if glfw.window_should_close(_glfw_window):
return False
return True
#----------------------------------------------------------------------------
# Image save helper.
#----------------------------------------------------------------------------
def save_image(fn, x : np.ndarray) -> np.ndarray:
imageio.imwrite(fn, np.clip(np.rint(x * 255.0), 0, 255).astype(np.uint8))
def load_image(fn) -> np.ndarray:
#img = imageio.imread(fn)
#img = image.imread(fn)
img = Image.open(fn)
if img.mode != 'RGB':
img = img.convert('RGB')
img = np.asarray(img)
print(img.shape)
if img.dtype == np.float32: # HDR image
return img
else: # LDR image
return img.astype(np.float32) / 255
#----------------------------------------------------------------------------
def time_to_text(x):
if x > 3600:
return "%.2f h" % (x / 3600)
elif x > 60:
return "%.2f m" % (x / 60)
else:
return "%.2f s" % x
#----------------------------------------------------------------------------
def checkerboard(width, repetitions) -> np.ndarray:
tilesize = int(width//repetitions//2)
check = np.kron([[1, 0] * repetitions, [0, 1] * repetitions] * repetitions, np.ones((tilesize, tilesize)))*0.33 + 0.33
return np.stack((check, check, check), axis=-1)[None, ...]
|
<reponame>sanyaade-teachings/cep<gh_stars>100-1000
"""
This example demonstrates how to accelerate a facemesh estimation operation
using multiple oak-d cameras. First, frames are grab by one oak-d camera in constant
time. Then they are sent to 3 oak-d cameras for inference with an offset from each
other. The offset is measured and calculated such that after the first 5 frames,
each result can be retrieved in a constant time. This constant time is shorter
than the original inference time on one single oak-d camera.
"""
import time
import numpy as np
import queue
import threading
import cv2
import base64
from curt.command import CURTCommands
# Modify these to your own workers
# Format is "<host_name>/<module_type>/<service_name>/<worker_name>"
CHARLIE_PIPELINE_WORKER = "charlie/vision/oakd_service/oakd_pipeline"
OAKD2_PIPELINE_WORKER = "oakd2/vision/oakd_service/oakd_pipeline"
OAKD3_PIPELINE_WORKER = "oakd3/vision/oakd_service/oakd_pipeline"
RGB_CAMERA_WORKER = "charlie/vision/oakd_service/oakd_rgb_camera_input"
FACE_DETECTION_WORKER = "charlie/vision/oakd_service/oakd_face_detection"
FACEMESH_WORKER_1 = "charlie/vision/oakd_service/oakd_facemesh"
FACEMESH_WORKER_2 = "oakd2/vision/oakd_service/oakd_facemesh"
FACEMESH_WORKER_3 = "oakd3/vision/oakd_service/oakd_facemesh"
preview_width = 640
preview_heigth = 360
face_detection_nn_input_size = 300
facemesh_nn_input_size = 192
face_detection_confidence = 0.6
detect_largest_face_only = False
target_fps = 25
result_queue = queue.Queue()
frame_queue = queue.Queue()
frame_handler_queue = queue.Queue()
CURTCommands.initialize()
charlie_pipeline_config = [
["add_rgb_cam_node", preview_width, preview_heigth, False],
["add_rgb_cam_preview_node"],
[
"add_nn_node_pipeline",
"face_detection",
"face-detection-retail-0004_openvino_2021.2_6shave.blob",
face_detection_nn_input_size,
face_detection_nn_input_size,
],
["add_nn_node", "facemesh", "facemesh_sh6.blob", facemesh_nn_input_size, facemesh_nn_input_size],
]
oakd2_pipeline_config = [["add_nn_node", "facemesh", "facemesh_sh6.blob", facemesh_nn_input_size, facemesh_nn_input_size]]
oakd3_pipeline_config = [["add_nn_node", "facemesh", "facemesh_sh6.blob", facemesh_nn_input_size, facemesh_nn_input_size]]
charlie_pipeline_worker = CURTCommands.get_worker(
CHARLIE_PIPELINE_WORKER
)
oakd2_pipeline_worker = CURTCommands.get_worker(
OAKD2_PIPELINE_WORKER
)
oakd3_pipeline_worker = CURTCommands.get_worker(
OAKD3_PIPELINE_WORKER
)
config_handler_1 = CURTCommands.config_worker(charlie_pipeline_worker, charlie_pipeline_config)
config_handler_2 = CURTCommands.config_worker(oakd2_pipeline_worker, oakd2_pipeline_config)
config_handler_3 = CURTCommands.config_worker(oakd3_pipeline_worker, oakd3_pipeline_config)
success = CURTCommands.get_result(config_handler_1)["dataValue"]["data"]
success = CURTCommands.get_result(config_handler_2)["dataValue"]["data"]
success = CURTCommands.get_result(config_handler_3)["dataValue"]["data"]
def display_func():
global result_queue
global frame_queue
while True:
frame = frame_queue.get()
result = result_queue.get()
def grab_frame_func():
global frame_handler_queue
global frame_queue
while True:
frame_handler = frame_handler_queue.get()
# print(frame_handler.name)
image_data = CURTCommands.get_result(frame_handler)
# print(image_data["dataValue"]["worker"])
jpg_original = base64.b64decode(image_data["dataValue"]["data"])
jpg_as_np = np.frombuffer(jpg_original, dtype=np.uint8)
frame = cv2.imdecode(jpg_as_np, flags=1)
# print(frame.shape)
frame_queue.put(frame)
if __name__ == "__main__":
display_thread = threading.Thread(target=display_func, daemon=True)
grab_frame_thread = threading.Thread(target=grab_frame_func, daemon=True)
display_thread.start()
grab_frame_thread.start()
facemesh_handler_queue = queue.Queue()
rgb_camera_worker = CURTCommands.get_worker(
RGB_CAMERA_WORKER
)
face_detection_worker = CURTCommands.get_worker(
FACE_DETECTION_WORKER
)
facemesh_worker1 = CURTCommands.get_worker(
FACEMESH_WORKER_1
)
facemesh_worker2 = CURTCommands.get_worker(
FACEMESH_WORKER_2
)
facemesh_worker3 = CURTCommands.get_worker(
FACEMESH_WORKER_3
)
rgb_frame_handler = CURTCommands.request(
rgb_camera_worker, params=["get_rgb_frame"]
)
face_detection_handler = CURTCommands.request(
face_detection_worker, params=["detect_face_pipeline", face_detection_confidence, detect_largest_face_only]
)
facemesh_handler_1 = CURTCommands.request(
facemesh_worker1,
params=[rgb_frame_handler, face_detection_handler],
listen_to_handler=True,
)
frame_handler_queue.put(rgb_frame_handler)
facemesh_handler_queue.put([facemesh_handler_1, facemesh_worker1])
time.sleep(1.0 / target_fps)
rgb_frame_handler = CURTCommands.request(
rgb_camera_worker, params=["get_rgb_frame"]
)
face_detection_handler = CURTCommands.request(
face_detection_worker, params=["detect_face_pipeline", face_detection_confidence, detect_largest_face_only]
)
facemesh_handler_2 = CURTCommands.request(
facemesh_worker2, params=[rgb_frame_handler, face_detection_handler]
)
frame_handler_queue.put(rgb_frame_handler)
facemesh_handler_queue.put([facemesh_handler_2, facemesh_worker2])
time.sleep(1.0 / target_fps)
rgb_frame_handler = CURTCommands.request(
rgb_camera_worker, params=["get_rgb_frame"]
)
face_detection_handler = CURTCommands.request(
face_detection_worker, params=["detect_face_pipeline", face_detection_confidence, detect_largest_face_only]
)
facemesh_handler_1 = CURTCommands.request(
facemesh_worker1, params=[rgb_frame_handler, face_detection_handler]
)
frame_handler_queue.put(rgb_frame_handler)
facemesh_handler_queue.put([facemesh_handler_1, facemesh_worker1])
time.sleep(1.0 / target_fps)
rgb_frame_handler = CURTCommands.request(
rgb_camera_worker, params=["get_rgb_frame"]
)
face_detection_handler = CURTCommands.request(
face_detection_worker, params=["detect_face_pipeline", face_detection_confidence, detect_largest_face_only]
)
facemesh_handler_3 = CURTCommands.request(
facemesh_worker3, params=[rgb_frame_handler, face_detection_handler]
)
frame_handler_queue.put(rgb_frame_handler)
facemesh_handler_queue.put([facemesh_handler_3, facemesh_worker3])
time.sleep(1.0 / target_fps)
rgb_frame_handler = CURTCommands.request(
rgb_camera_worker, params=["get_rgb_frame"]
)
face_detection_handler = CURTCommands.request(
face_detection_worker, params=["detect_face_pipeline", face_detection_confidence, detect_largest_face_only]
)
facemesh_handler_1 = CURTCommands.request(
facemesh_worker1, params=[rgb_frame_handler, face_detection_handler]
)
frame_handler_queue.put(rgb_frame_handler)
facemesh_handler_queue.put([facemesh_handler_1, facemesh_worker1])
frame_count = 0
start_time = time.time()
while not facemesh_handler_queue.empty():
elapsed_time = time.time() - start_time
if elapsed_time >= 1:
print("FPS:", frame_count / elapsed_time)
frame_count = 0
start_time = time.time()
facemesh_handler, facemesh_worker = facemesh_handler_queue.get()
facemesh_result = CURTCommands.get_result(facemesh_handler)["dataValue"]["data"]
frame_count = frame_count + 1
result_queue.put(facemesh_result)
rgb_frame_handler = CURTCommands.request(
rgb_camera_worker, params=["get_rgb_frame"]
)
face_detection_handler = CURTCommands.request(
face_detection_worker, params=["detect_face_pipeline", face_detection_confidence, detect_largest_face_only]
)
facemesh_handler = CURTCommands.request(
facemesh_worker, params=[rgb_frame_handler, face_detection_handler]
)
frame_handler_queue.put(rgb_frame_handler)
facemesh_handler_queue.put([facemesh_handler, facemesh_worker])
time.sleep(1.0 / target_fps)
|
<reponame>rcamuccio/Zeus<gh_stars>0
# !/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Zeus Weather System
Date: 29 Mar 2020
Last update: 16 Jul 2021
"""
__author__ = "<NAME>"
__version__ = "2.0.0"
import json
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
import os
import requests
import time
import xml.etree.ElementTree as ET
from astropy.time import Time
from colored import fg, bg, attr
from datetime import datetime
from dateutil import tz
from twython import Twython
class Zeus:
def __init__(self):
self.__access_token = ""
self.__access_token_secret = ""
self.__consumer_key = ""
self.__consumer_secret = ""
self.__url_time = "https://api.sunrise-sunset.org/json?lat=33.5706&lng=-101.8553&formatted=0"
def get_cardinal_direction(self, wind_direction):
if wind_direction > 10 and wind_direction < 31:
cardinal_direction = "NNE"
elif wind_direction > 30 and wind_direction < 51:
cardinal_direction = "NE"
elif wind_direction > 50 and wind_direction < 71:
cardinal_direction = "ENE"
elif wind_direction > 70 and wind_direction < 101:
cardinal_direction = "E"
elif wind_direction > 100 and wind_direction < 121:
cardinal_direction = "ESE"
elif wind_direction > 120 and wind_direction < 141:
cardinal_direction = "SE"
elif wind_direction > 140 and wind_direction < 161:
cardinal_direction = "SSE"
elif wind_direction > 160 and wind_direction < 191:
cardinal_direction = "S"
elif wind_direction > 190 and wind_direction < 211:
cardinal_direction = "SSW"
elif wind_direction > 210 and wind_direction < 231:
cardinal_direction = "SW"
elif wind_direction > 230 and wind_direction < 251:
cardinal_direction = "WSW"
elif wind_direction > 250 and wind_direction < 281:
cardinal_direction = "W"
elif wind_direction > 280 and wind_direction < 301:
cardinal_direction = "WNW"
elif wind_direction > 300 and wind_direction < 321:
cardinal_direcion = "NW"
elif wind_direction > 320 and wind_direction < 341:
cardinal_direction = "NNW"
elif wind_direction > 340 and wind_direction < 11:
cardinal_direction = "N"
return cardinal_direction
def get_color(self, parameter, value):
if parameter == "cloud_amount":
if value < 10:
color = "#003F7F"
elif value > 9 and value < 20:
color = "#135393"
elif value > 19 and value < 30:
color = "#2767A7"
elif value > 29 and value < 40:
color = "#4F8FCF"
elif value > 39 and value < 50:
color = "#63A3E3"
elif value > 49 and value < 60:
color = "#77B7F7"
elif value > 59 and value < 70:
color = "#9ADADA"
elif value > 69 and value < 80:
color = "#AEEEEE"
elif value > 79 and value < 90:
color = "#C2C2C2"
elif value > 89 and value < 100:
color = "#EAEAEA"
elif value > 99:
color = "#FBFBFB"
if parameter == "humidity":
if value < 25:
color = "#08035D"
elif value > 24 and value < 31:
color = "#0D4D8D"
elif value > 30 and value < 36:
color = "#3070B0"
elif value > 35 and value < 41:
color = "#4E8ECE"
elif value > 40 and value < 46:
color = "#71B1F1"
elif value > 45 and value < 51:
color = "#80C0C0"
elif value > 50 and value < 56:
color = "#09FEED"
elif value > 55 and value < 61:
color = "#55FAAD"
elif value > 60 and value < 66:
color = "#94FE6A"
elif value > 65 and value < 71:
color = "#EAFB16"
elif value > 70 and value < 76:
color = "#FEC600"
elif value > 75 and value < 81:
color = "#FC8602"
elif value > 80 and value < 86:
color = "#FE3401"
elif value > 85 and value < 91:
color = "#EA0000"
elif value > 90 and value < 96:
color = "#B70000"
elif value > 95 and value < 101:
color = "#E10000"
if parameter == "temperature":
if value < -40:
color = "#FC00FC"
elif value > -41 and value < -30:
color = "#000085"
elif value > -31 and value < -20:
color = "#0000B2"
elif value > -21 and value < -11:
color = "#0000EC"
elif value > -12 and value < -2:
color = "#0034FE"
elif value > -3 and value < 6:
color = "#0089FE"
elif value > 5 and value < 15:
color = "#00D4FE"
elif value > 14 and value < 24:
color = "#1EFEDE"
elif value > 23 and value < 33:
color = "#FBFBFB"
elif value > 32 and value < 42:
color = "#5EFE9E"
elif value > 41 and value < 51:
color = "#A2FE5A"
elif value > 50 and value < 60:
color = "#FEDE00"
elif value > 59 and value < 69:
color = "#FE9E00"
elif value > 68 and value < 78:
color = "#FE5A00"
elif value > 77 and value < 87:
color = "#FE1E00"
elif value > 86 and value < 96:
color = "#E20000"
elif value > 95 and value < 105:
color = "#A90000"
elif value > 104 and value < 114:
color = "#7E0000"
elif value > 113:
color = "#C6C6C6"
if parameter == "wind_speed":
if value < 6:
color = "#003F7F"
elif value > 5 and value < 12:
color = "#2C6CAC"
elif value > 11 and value < 17:
color = "#63A3E3"
elif value > 16 and value < 29:
color = "#95D5D5"
elif value > 28 and value < 46:
color = "#C7C7C7"
elif value > 45:
color = "#F9F9F9"
return color
def get_dst_index(self):
response = requests.get(url="https://services.swpc.noaa.gov/products/kyoto-dst.json")
response_text = json.loads(response.text)
time_tag = response_text[len(response_text) - 1][0]
dst = response_text[len(response_text) - 1][1]
return time_tag, dst
def get_forecast(self):
response = requests.get(url="https://forecast.weather.gov/MapClick.php?lat=33.5706&lon=-101.8553&FcstType=digitalDWML")
response_status_code = response.status_code
response_content_type = response.headers["content-type"]
response_encoding = response.encoding
xml_content = response.content
root = ET.fromstring(xml_content)
element_creation_date = root[0][0][0]
element_time_layout = root[1][2]
element_dew_point = root[1][3][0]
element_heat_index = root[1][3][1]
element_wind_speed = root[1][3][2]
element_cloud_amount = root[1][3][3]
element_precipitation = root[1][3][4]
element_humidity = root[1][3][5]
element_wind_direction = root[1][3][6]
element_temperature = root[1][3][7]
element_gust = root[1][3][8]
element_hourly_qpf = root[1][3][9]
element_conditions = root[1][3][10]
# Location
for element in root.iter("point"):
latitude = element.attrib["latitude"]
longitude = element.attrib["longitude"]
latitude_float = float(latitude)
longitude_float = float(longitude)
for element in root.iter("city"):
location_str = element.text
for element in root.iter("height"):
element_height_str = element.text
# Time
time_start_list = []
time_start_jd_list = []
for i in range(1, 337, 2):
time_start = element_time_layout[i]
time_start_str = time_start.text
if time_start_str.endswith("-05:00"):
time_start_str = time_start_str[:-6]
time_start_list.append(time_start_str)
time_start_obj = Time(time_start_str, format="isot", scale="utc")
time_start_jd = time_start_obj.jd
time_start_jd_list.append(time_start_jd)
# Weather conditions
dew_point_list = []
heat_index_list = []
wind_speed_list = []
cloud_amount_list = []
prob_of_precip_list = []
humidity_list = []
wind_direction_list = []
temperature_list = []
gust_list = []
hourly_qpf_list = []
for i in range(0, 168):
# Dew point list
dew_point = element_dew_point[i]
dew_point_str = dew_point.text
dew_point_int = int(dew_point_str)
dew_point_list.append(dew_point_int)
# Heat index list
heat_index = element_heat_index[i]
heat_index_str = heat_index.text
if heat_index_str == None:
heat_index_int = 0
else:
heat_index_int = int(heat_index_str)
heat_index_list.append(heat_index_int)
# Wind speed list
wind_speed = element_wind_speed[i]
wind_speed_str = wind_speed.text
wind_speed_int = int(wind_speed_str)
wind_speed_list.append(wind_speed_int)
# Cloud amount list
cloud_amount = element_cloud_amount[i]
cloud_amount_str = cloud_amount.text
cloud_amount_int = int(cloud_amount_str)
cloud_amount_list.append(cloud_amount_int)
# Probability of precipitation list
prob_of_precip = element_precipitation[i]
prob_of_precip_str = prob_of_precip.text
prob_of_precip_int = int(prob_of_precip_str)
prob_of_precip_list.append(prob_of_precip_int)
# Humidity list
humidity = element_humidity[i]
humidity_str = humidity.text
humidity_int = int(humidity_str)
humidity_list.append(humidity_int)
# Wind direction list
wind_direction = element_wind_direction[i]
wind_direction_str = wind_direction.text
wind_direction_int = int(wind_direction_str)
wind_direction_list.append(wind_direction_int)
# Temperature list
temperature = element_temperature[i]
temperature_str = temperature.text
temperature_int = int(temperature_str)
temperature_list.append(temperature_int)
# Gust list
gust = element_gust[i]
gust_str = gust.text
if gust_str == None:
gust_int = 0
else:
gust_int = int(gust_str)
gust_list.append(gust_int)
# Hourly QPF list
hourly_qpf = element_hourly_qpf[i]
hourly_qpf_str = hourly_qpf.text
if hourly_qpf_str == None:
hourly_qpf_float = 0.0
else:
hourly_qpf_float = float(hourly_qpf_str)
hourly_qpf_list.append(hourly_qpf_float)
response = requests.get(url="https://forecast.weather.gov/MapClick.php?lat=33.5706&lon=-101.8553&unit=0&lg=english&FcstType=dwml")
response_status_code = response.status_code
response_content_type = response.headers["content-type"]
response_encoding = response.encoding
xml_content = response.content
root = ET.fromstring(xml_content)
pressure = root[2][3][8][0]
pressure_str = pressure.text
visibility = root[2][3][3][2][0][0]
visibility_str = visibility.text
return {"latitude" : latitude_float,
"longitude" : longitude_float,
"location" : location_str,
"height" : element_height_str,
"time" : time_start_list,
"time_jd" : time_start_jd_list,
"dew_point" : dew_point_list,
"heat_index" : heat_index_list,
"wind_speed" : wind_speed_list,
"cloud_amount" : cloud_amount_list,
"prob_of_precip" : prob_of_precip_list,
"humidity" : humidity_list,
"wind_direction" : wind_direction_list,
"temperature" : temperature_list,
"gust" : gust_list,
"hourly_qpf" : hourly_qpf_list,
"pressure" : pressure_str,
"visibility" : visibility_str}
def get_k_index(self):
response = requests.get(url="https://services.swpc.noaa.gov/products/noaa-estimated-planetary-k-index-1-minute.json")
response_text = json.loads(response.text)
time_tag = response_text[len(response_text) - 1][0]
k_index = response_text[len(response_text) - 1][1]
return time_tag, k_index
def get_radio_flux(self):
response = requests.get(url="https://services.swpc.noaa.gov/products/10cm-flux-30-day.json")
response_text = json.loads(response.text)
time_tag = response_text[len(response_text) - 1][0]
flux = response_text[len(response_text) - 1][1]
return time_tag, flux
def get_solar_magnetic_field(self):
response = requests.get(url="https://services.swpc.noaa.gov/products/solar-wind/mag-5-minute.json")
response_text = json.loads(response.text)
time_tag = response_text[len(response_text) - 1][0]
b_total = response_text[len(response_text) - 1][6]
return time_tag, b_total
def get_solar_wind(self):
response = requests.get(url="https://services.swpc.noaa.gov/products/solar-wind/plasma-5-minute.json")
response_text = json.loads(response.text)
time_tag = response_text[len(response_text) - 1][0]
density = response_text[len(response_text) - 1][1]
speed = response_text[len(response_text) - 1][2]
temperature = response_text[len(response_text) - 1][3]
return time_tag, density, speed, temperature
def get_xray_flux(self):
response = requests.get(url="https://services.swpc.noaa.gov/json/goes/primary/xray-flares-latest.json")
response_text = json.loads(response.text)
current_time = response_text[0]["time_tag"]
current_class = response_text[0]["current_class"]
max_time = response_text[0]["max_time"]
max_class = response_text[0]["max_class"]
max_flux = response_text[0]["max_xrlong"]
return current_time, current_class, max_time, max_class, max_flux
def get_times(self):
from_zone = tz.tzutc()
to_zone = tz.tzlocal()
response_sunset = requests.get(url=self.__url_time)
response_sunset_status_code = response_sunset.status_code
response_sunset_content_type = response_sunset.headers["content-type"]
response_sunset_encoding = response_sunset.encoding
parse_response_sunset = json.loads(response_sunset.text)
# Begin astronomical twilight
astronomical_twilight_begin_str = parse_response_sunset["results"]["astronomical_twilight_begin"]
astronomical_twilight_begin_datetime = datetime.strptime(astronomical_twilight_begin_str, "%Y-%m-%dT%H:%M:%S+00:00")
astronomical_twilight_begin_datetime_utc = astronomical_twilight_begin_datetime.replace(tzinfo=from_zone)
astronomical_twilight_begin_datetime_local = astronomical_twilight_begin_datetime_utc.astimezone(to_zone)
# Begin nautical twilight
nautical_twilight_begin_str = parse_response_sunset["results"]["nautical_twilight_begin"]
nautical_twilight_begin_datetime = datetime.strptime(nautical_twilight_begin_str, "%Y-%m-%dT%H:%M:%S+00:00")
nautical_twilight_begin_datetime_utc = nautical_twilight_begin_datetime.replace(tzinfo=from_zone)
nautical_twilight_begin_datetime_local = nautical_twilight_begin_datetime_utc.astimezone(to_zone)
# Begin civil twilight
civil_twilight_begin_str = parse_response_sunset["results"]["civil_twilight_begin"]
civil_twilight_begin_datetime = datetime.strptime(civil_twilight_begin_str, "%Y-%m-%dT%H:%M:%S+00:00")
civil_twilight_begin_datetime_utc = civil_twilight_begin_datetime.replace(tzinfo=from_zone)
civil_twilight_begin_datetime_local = civil_twilight_begin_datetime_utc.astimezone(to_zone)
# Sunrise
sunrise_str = parse_response_sunset["results"]["sunrise"]
sunrise_datetime = datetime.strptime(sunrise_str, "%Y-%m-%dT%H:%M:%S+00:00")
sunrise_datetime_utc = sunrise_datetime.replace(tzinfo=from_zone)
sunrise_datetime_local = sunrise_datetime_utc.astimezone(to_zone)
# Solar noon
solar_noon_str = parse_response_sunset["results"]["solar_noon"]
solar_noon_datetime = datetime.strptime(solar_noon_str, "%Y-%m-%dT%H:%M:%S+00:00")
solar_noon_datetime_utc = solar_noon_datetime.replace(tzinfo=from_zone)
solar_noon_datetime_local = solar_noon_datetime_utc.astimezone(to_zone)
# Sunset
sunset_str = parse_response_sunset["results"]["sunset"]
sunset_datetime = datetime.strptime(sunset_str, "%Y-%m-%dT%H:%M:%S+00:00")
sunset_datetime_utc = sunset_datetime.replace(tzinfo=from_zone)
sunset_datetime_local = sunset_datetime_utc.astimezone(to_zone)
# End civil twilight
civil_twilight_end_str = parse_response_sunset["results"]["civil_twilight_end"]
civil_twilight_end_datetime = datetime.strptime(civil_twilight_end_str, "%Y-%m-%dT%H:%M:%S+00:00")
civil_twilight_end_datetime_utc = civil_twilight_end_datetime.replace(tzinfo=from_zone)
civil_twilight_end_datetime_local = civil_twilight_end_datetime_utc.astimezone(to_zone)
# End nautical twilight
nautical_twilight_end_str = parse_response_sunset["results"]["nautical_twilight_end"]
nautical_twilight_end_datetime = datetime.strptime(nautical_twilight_end_str, "%Y-%m-%dT%H:%M:%S+00:00")
nautical_twilight_end_datetime_utc = nautical_twilight_end_datetime.replace(tzinfo=from_zone)
nautical_twilight_end_datetime_local = nautical_twilight_end_datetime_utc.astimezone(to_zone)
# End astronomical twilight
astronomical_twilight_end_str = parse_response_sunset["results"]["astronomical_twilight_end"]
astronomical_twilight_end_datetime = datetime.strptime(astronomical_twilight_end_str, "%Y-%m-%dT%H:%M:%S+00:00")
astronomical_twilight_end_datetime_utc = astronomical_twilight_end_datetime.replace(tzinfo=from_zone)
astronomical_twilight_end_datetime_local = astronomical_twilight_end_datetime_utc.astimezone(to_zone)
return {"astronomical_twilight_begin" : astronomical_twilight_begin_datetime_local,
"nautical_twilight_begin" : nautical_twilight_begin_datetime_local,
"civil_twilight_begin" : civil_twilight_begin_datetime_local,
"sunrise" : sunrise_datetime_local,
"solar_noon" : solar_noon_datetime_local,
"sunset" : sunset_datetime_local,
"civil_twilight_end" : civil_twilight_end_datetime_local,
"nautical_twilight_end" : nautical_twilight_end_datetime_local,
"astronomical_twilight_end" : astronomical_twilight_end_datetime_local}
"""
def send_tweet(self, message="#ctmosays Hello world!"):
twitter = Twython(self.__consumer_key, self.__consumer_secret, self.__access_token, self.__access_token_secret)
#twitter.update_status(status = message)
print(" Tweeted: %s" % message)
return
"""
if __name__ == "__main__":
os.system("clear")
start_time = time.time()
zeus = Zeus()
print(" ZEUS Weather System v2.0")
res = attr("reset")
time_now = datetime.now()
times = zeus.get_times()
astronomical_twilight_begin = times["astronomical_twilight_begin"]
nautical_twilight_begin = times["nautical_twilight_begin"]
civil_twilight_begin = times["civil_twilight_begin"]
sunrise = times["sunrise"]
solar_noon = times["solar_noon"]
sunset = times["sunset"]
civil_twilight_end = times["civil_twilight_end"]
nautical_twilight_end = times["nautical_twilight_end"]
astronomical_twilight_end = times["astronomical_twilight_end"]
forecast = zeus.get_forecast()
latitude = forecast["latitude"]
longitude = forecast["longitude"]
location = forecast["location"]
height = forecast["height"]
temperature = forecast["temperature"][0]
dew_point = forecast["dew_point"][0]
heat_index = forecast["heat_index"][0]
wind_speed = forecast["wind_speed"][0]
wind_direction = forecast["wind_direction"][0]
cardinal_direction = zeus.get_cardinal_direction(wind_direction)
gust = forecast["gust"][0]
humidity = forecast["humidity"][0]
cloud_amount = forecast["cloud_amount"][0]
prob_of_precip = forecast["prob_of_precip"][0]
hourly_qpf = forecast["hourly_qpf"][0]
pressure = forecast["pressure"]
visibility = forecast["visibility"]
tencm_flux = zeus.get_radio_flux()
radio_flux_time = tencm_flux[0]
radio_flux = tencm_flux[1]
b_field = zeus.get_solar_magnetic_field()
b_time = b_field[0]
b_total = b_field[1]
solar_wind = zeus.get_solar_wind()
solar_wind_time = solar_wind[0]
solar_wind_density = solar_wind[1]
solar_wind_speed = solar_wind[2]
solar_wind_temperature = solar_wind[3]
kyoto_dst = zeus.get_dst_index()
dst_time = kyoto_dst[0]
dst = kyoto_dst[1]
planetary_k = zeus.get_k_index()
k_index_time = planetary_k[0]
k_index = planetary_k[1]
xray_flares = zeus.get_xray_flux()
xray_current_time = xray_flares[0]
xray_current_class = xray_flares[1]
xray_max_time = xray_flares[2]
xray_max_class = xray_flares[3]
xray_max_flux = xray_flares[4]
print()
print("\033[1m" + " [Location]" + "\033[0m", location, (latitude, longitude))
print(" ", height, "ft")
print()
print("\033[1m" + " [Time]" + "\033[0m", time_now.date(), time_now.strftime("%H:%M:%S"))
print()
print(" [BMAT]", astronomical_twilight_begin.strftime("%H:%M:%S"), "| [Rise]", sunrise.strftime("%H:%M:%S"), "| [EECT]", civil_twilight_end.strftime("%H:%M:%S"))
print(" [BMNT]", nautical_twilight_begin.strftime("%H:%M:%S"), "| [Noon]", solar_noon.strftime("%H:%M:%S"), "| [EENT]", nautical_twilight_end.strftime("%H:%M:%S"))
print(" [BMCT]", civil_twilight_begin.strftime("%H:%M:%S"), "| [Set] ", sunset.strftime("%H:%M:%S"), "| [EEAT]", astronomical_twilight_end.strftime("%H:%M:%S"))
print()
print("\033[1m" + " [Weather]" + "\033[0m" + " --------------------------------------------")
print()
print(" [Temperature] ", temperature, "F")
print(" [Heat Index] ", heat_index, "F")
print(" [Dew Point] ", dew_point, "F")
print()
print(" [Humidity] ", humidity, "%")
print(" [Pressure] ", pressure, "mmHg")
print(" [Visibility] ", visibility, "mi")
print()
print(" [Clouds] ", cloud_amount, "%")
print(" [Wind] ", cardinal_direction, wind_speed, "mph")
print(" [Gusts] ", gust, "mph")
print(" ", )
print(" [Precipitation] ", str(prob_of_precip) + "%")
print(" [QPF] ", hourly_qpf, "in/hr")
print()
print("\033[1m" + " [Solar]" + "\033[0m" + " ----------------------------------------------")
print()
print(" [Wind Speed] ", solar_wind_speed, "km/s")
print(" [Density] ", solar_wind_density, "protons/cm^3")
print(" [Temperature] ", solar_wind_temperature, "K")
print()
print(" [F10.7 Flux] ", radio_flux, "sfu at")
print(" [B Field] ", b_total, "nT")
print()
print(" [X-ray Current] ", xray_current_class)
print(" [X-ray Max] ", xray_max_class, " " + "{:0.2e}".format(xray_max_flux) + " W/m^2")
print(" [K_p Index] ", k_index)
print(" [DST Index] ", dst)
##############################################################################################
print()
print("\033[1m" + " [Forecast]" + "\033[0m" + " -------------------------------------------")
print()
block_day = " Date (0000 CDT) "
block_hour = " "
block_temperature = " Temperature (F) "
block_humidity = " Humidity (%) "
block_cloud_amount =" Clouds (%) "
block_wind_speed = " Wind (mph) "
days = 5
blocks = days * 24
i = 0
j = 0
while i < blocks:
month = datetime.fromisoformat(forecast["time"][i]).month
day = datetime.fromisoformat(forecast["time"][i]).day
hour = datetime.fromisoformat(forecast["time"][i]).hour
if hour == 0:
hour_str = "|"
block_hour += hour_str
block_day += str(month) + "/" + str(day)
j = 3
else:
if j > 0:
hour_str = " "
block_hour += hour_str
block_day += ""
j -= 1
else:
hour_str = " "
block_hour += hour_str
block_day += " "
# Build temperature blocks
temperature = forecast["temperature"][i]
color_temperature = zeus.get_color("temperature", temperature)
block_temperature += fg(color_temperature) + u"\u25A0" + res
# Build humidity blocks
humidity = forecast["humidity"][i]
color_humidity = zeus.get_color("humidity", humidity)
block_humidity += fg(color_humidity) + u"\u25A0" + res
# Build cloud amount blocks
cloud_amount = forecast["cloud_amount"][i]
color_cloud_amount = zeus.get_color("cloud_amount", cloud_amount)
block_cloud_amount += fg(color_cloud_amount) + u"\u25A0" + res
# Build wind speed blocks
wind_speed = forecast["wind_speed"][i]
color_wind_speed = zeus.get_color("wind_speed", wind_speed)
block_wind_speed += fg(color_wind_speed) + u"\u25A0" + res
i += 1
print(block_day)
print(block_hour)
print(block_temperature)
print(block_humidity)
print(block_hour)
print(block_cloud_amount)
print(block_wind_speed)
print(block_hour)
print()
##############################################################################################
end_time = time.time()
total_time = end_time - start_time
print(" ZEUS ended after", "%.1f" % total_time, "seconds")
print() |
import pandas
import xmltodict
import os
from urllib import parse
from app.modules.baseClient import BaseClient
from .models.BusStation import BusStation
from .models.BusRoute import BusRoute
from .models.BusStationAround import BusStationAround
from .models.UlsanArrival import UlsanBusArrival
from .KoreaBIS import KoreaBIS
from app.directory import directory
from app.modules.errors import *
from app.utils import haversine, get_list_from_ordered_dict, get_float
class UlsanBIS(BaseClient):
def __init__(self, token: str, korea_token: str = None):
super().__init__("http://openapi.its.ulsan.kr")
self.token = token
self.korea_token = korea_token or token
self.korea_client = KoreaBIS(korea_token, 26)
with open(
os.path.join(directory, "data", "ulsan_busstop.xml"),
"r", encoding='utf8'
) as fp:
self._station_data = xmltodict.parse(fp.read())
with open(
os.path.join(directory, "data", "ulsan_bus.xml"),
"r", encoding='utf8'
) as fp:
self._bus_data = xmltodict.parse(fp.read())
def request(self, **kwargs):
params = {
'serviceKey': self.token
}
return super(UlsanBIS, self).request(_default_params=params, _default_xml=True, **kwargs)
def get_station_data(self):
rows = []
for station in self._station_data['tableInfo']['list']['row']:
rows.append({
"id": station.get("STOPID"),
"name": station.get("STOPNAME"),
"posX": get_float(station.get("STOPX")),
"posY": get_float(station.get("STOPY")),
"displayId": 0
})
return pandas.DataFrame(rows, columns=['id', 'name', 'posX', 'posY', 'displayId'])
def get_bus_data(self):
rows = []
for station in self._bus_data['tableInfo']['list']['row']:
rows.append({
"id": station.get("BRTID"),
"departure": station.get("STOPSTID"),
"destination": station.get("STOPEDID"),
"name": station.get("BRTNO"),
"type": station.get("BRTTYPE"),
"direction": station.get("DIRECTION"),
"displayId": station.get("DISPLAYID")
})
return pandas.DataFrame(
rows,
columns=[
"id", "departure", "destination", "name", "type", "direction"
])
def get_station(self, name: str):
data = self.get_station_data()
result = data[data['name'].str.contains(name)].to_dict('records')
return [BusStation.from_ulsan(x) for x in result]
def get_station_around(
self,
pos_x: float,
pos_y: float,
radius: int = 500
):
data = self.get_station_data().to_dict('records')
result = []
for station in data:
station['distance'] = haversine(station['posX'], station['posY'], pos_x, pos_y)
if station['distance'] < radius:
result.append(
BusStationAround.from_ulsan(station)
)
return result
def get_route(self, station_id: int):
bus_ids = [str(x.id).lstrip("USB") for x in self.korea_client.get_route(
"USB{}".format(station_id)
)]
result = []
bus_data = self.get_bus_data()
for bus_id in bus_ids:
result += bus_data[bus_data['id'] == bus_id].to_dict("records")
return [BusRoute.from_ulsan(x) for x in result]
def get_arrival(self, station_id: int):
data = self.get(
path="/UlsanAPI/getBusArrivalInfo.xo",
params={
"stopid": station_id,
"pageNo": 1,
"numOfRows": 1000
}
)
result = data['tableInfo']
# HEAD AND BODY
body = result['list']
if body is None:
raise EmptyData()
item_list = body['row']
return [UlsanBusArrival(x) for x in get_list_from_ordered_dict(item_list)]
|
<reponame>kding1225/PackDet<gh_stars>1-10
import itertools
import torch
import torch.nn.functional as F
from torch import nn
from core.layers import ConvBlock, NoopLayer
from core.utils.registry import Registry
from core.modeling.rpn.utils import meshgrid
MONTAGE_BOXES = Registry()
# register levels
MONTAGE_LEVELS = {
'type-1': [0, 1, 2, 3, 4],
'type-2': [0, 1, 2, 3, 4, 2, 3, 4],
'type-3': [0, 1, 2, 3, 4],
'type-4': [0, 1, 2, 3, 4, 2, 3, 4],
'type-5': [0, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4],
'type-6': [0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4],
'type-7': [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4],
'type-8': [0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4],
'type-9': [0, 1, 2, 2, 3, 4, 3, 4, 3, 4, 3, 4, 3, 4],
'type-stair1': [1, 1, 1, 1],
'type-stair2': [2, 2, 2, 2],
'type-stair3': [3, 3, 3, 3],
'type-stair4': [4, 4, 4, 4],
'type-sbs1': [1, 1, 1, 1],
'type-sbs2': [2, 2, 2, 2],
'type-sbs3': [3, 3, 3, 3],
'type-sbs4': [4, 4, 4, 4],
'type-grid1': [1, 1, 1, 1],
'type-grid2': [2, 2, 2, 2],
'type-grid3': [3, 3, 3, 3],
'type-grid4': [4, 4, 4, 4]
}
class BasicMontageBlock(torch.nn.Module):
"""
montage block to pack features from different scales
"""
def __init__(self, fpn_strides, montage_box, montage_levels, device='cpu'):
super(BasicMontageBlock, self).__init__()
self.fpn_strides = fpn_strides
self.device = device
self.montage_box = montage_box
self.levels = montage_levels
def forward(self, features, sizes, visualizer=None):
"""
put all features P3-P7 in a large feature map to get the montage feature map
"""
N, C = features[0].shape[:2]
boxes, mon_height, mon_width = self.montage_box(sizes, self.device)
mon_feature = features[0].new_zeros(N, C, mon_height, mon_width) # no-grad
locations_map = -features[0].new_ones((mon_height, mon_width, 2), dtype=torch.float)
scales_map = features[0].new_zeros((mon_height, mon_width), dtype=torch.long)
# copy features, locations and scales
all_locations = self.compute_locations(sizes)
for i, (level, feat, box, loc) in enumerate(zip(self.levels, features, boxes, all_locations)):
x0, y0, x1, y1 = box
mon_feature[..., y0:y1, x0:x1] = feat
locations_map[y0:y1, x0:x1, :] = loc
scales_map[y0:y1, x0:x1] = level
montage_info = dict(
locations_map=locations_map,
ranges=boxes,
scales_map=scales_map,
levels=self.levels
)
return mon_feature, montage_info
def compute_locations(self, sizes):
locations = []
for l, siz in enumerate(sizes):
h, w = siz
locations_per_level = self.compute_locations_per_level(
h, w, self.fpn_strides[l],
self.device
)
locations.append(locations_per_level)
locations = [locations[l]+0.5*self.fpn_strides[l] for i, l in enumerate(self.levels)]
return locations
@staticmethod
def compute_locations_per_level(h, w, stride, device):
shift_y, shift_x = meshgrid(h, w, stride, device, dtype=torch.float32)
locations = torch.cat([shift_x[..., None], shift_y[..., None]], dim=2)
return locations
def plot_montage(boxes, height, width):
import numpy as np
import matplotlib.pyplot as plt
boxes = boxes.cpu().numpy()
scale_map = -np.ones((height, width), dtype=float)
for i, box in enumerate(boxes):
x0, y0, x1, y1 = box
scale_map[y0:y1, x0:x1] = i+1
plt.figure(figsize=(15, 10))
plt.imshow(scale_map)
plt.show()
# plt.savefig("scale_map.png")
return scale_map
# *********************** montage boxes ****************************
# different methods to generate boxes, when adding a new montage kind
# please register it to MONTAGE_BOXES
def montage_pos_type12(sizes, device, use_extra_features=False):
"""
type-1 and type-2 montage positioning, need image size divisible by 32
sizes: list[(h,w)]
"""
assert len(sizes) == 5
a0, b0, a1, b1, a2, b2, a3, b3, a4, b4 = list(itertools.chain(*sizes))
boxes = [
[0, 0, b0, a0],
[0, a0, b1, a0 + a1],
[b1, a0, b1 + b2, a0 + a2],
[b1 + b2, a0, b1 + b2 + b3, a0 + a3],
[b1 + b2 + b3, a0, b1 + b2 + b3 + b4, a0 + a4]
]
if use_extra_features:
boxes.extend([
[b1, a0 + a2, b1 + b2, a0 + a1],
[b1 + b2, a0 + a2, b1 + b2 + b3, a0 + a2 + a3],
[b1 + b2 + b3, a0 + a2, b1 + b2 + b3 + b4, a0 + a2 + a4]
])
boxes = torch.tensor(boxes).to(device).long()
mon_height, mon_width = a0 + a1, b0
return boxes, mon_height, mon_width
@MONTAGE_BOXES.register("type-1")
def montage_pos_type1(sizes, device='cpu'):
return montage_pos_type12(sizes, device, False)
@MONTAGE_BOXES.register("type-2")
def montage_pos_type2(sizes, device='cpu'):
return montage_pos_type12(sizes, device, True)
def montage_pos_type34e_(sizes, device, use_extra_features=False):
"""
type-3 and type-4 montage positioning, need image size divisible by 32
sizes: list[(h,w)]
"""
assert len(sizes) == 5
a0, b0, a1, b1, a2, b2, a3, b3, a4, b4 = list(itertools.chain(*sizes))
boxes = [
[b0 - b2 * 2 - b1, a1 - a2 - a3 - a4, 2*b0 - b2 * 2 - b1, a0 + a1 - a2 - a3 - a4], # 0
[b0 - b2 * 2-b1, a0 + a1 - a2 - a3 - a4, b0 - b2 * 2, a0 + a1 - a2 - a3 - a4+a1], # 1
[b0 - b2, a0 + a1 - a2, b0, a0 + a1], # 2
[b0 - b2 - b3, a0 + a1 - a2 - a3, b0 - b2, a0 + a1 - a2], # 3
[b0 - b2 - b3 - b4, a0 + a1 - a2 - a3 - a4, b0 - b2 - b3, a0 + a1 - a2 - a3], # 4
]
if use_extra_features:
boxes.extend([
[b0 - b2 * 2, a0 + a1 - a2, b0 - b2, a0 + a1], # 2'
[b0 - b3, a0 + a1 - a2 - a3, b0, a0 + a1 - a2], # 3'
[b0 - b3 - b4, a0 + a1 - a2 - a3 - a4, b0 - b3, a0 + a1 - a2 - a3] # 4'
])
boxes = torch.tensor(boxes).to(device).long()
xy_min = torch.min(boxes[:, :2], dim=0, keepdim=True)[0]
boxes[:, :2] = boxes[:, :2] - xy_min
boxes[:, 2:] = boxes[:, 2:] - xy_min
mon_height, mon_width = torch.max(boxes[:, 3]), torch.max(boxes[:, 2])
return boxes, mon_height, mon_width
def montage_pos_type34(sizes, device, use_extra_features=False):
"""
type-3 and type-4 montage positioning, need image size divisible by 32
sizes: list[(h,w)]
"""
assert len(sizes) == 5
a0, b0, a1, b1, a2, b2, a3, b3, a4, b4 = list(itertools.chain(*sizes))
boxes = [
[0, 0, b0, a0],
[0, a0, b1, a0 + a1],
[b0 - b2, a0 + a1 - a2, b0, a0 + a1],
[b0 - b2 - b3, a0 + a1 - a2 - a3, b0 - b2, a0 + a1 - a2],
[b0 - b2 - b3 - b4, a0 + a1 - a2 - a3 - a4, b0 - b2 - b3, a0 + a1 - a2 - a3],
]
if use_extra_features:
boxes.extend([
[b1, a0 + a1 - a2, b1 + b2, a0 + a1],
[b0 - b3, a0 + a1 - a2 - a3, b0, a0 + a1 - a2],
[b0 - b3 - b4, a0 + a1 - a2 - a3 - a4, b0 - b3, a0 + a1 - a2 - a3]
])
boxes = torch.tensor(boxes).to(device).long()
mon_height, mon_width = a0 + a1, b0
return boxes, mon_height, mon_width
@MONTAGE_BOXES.register("type-3")
def montage_pos_type3(sizes, device='cpu'):
return montage_pos_type34(sizes, device, False)
@MONTAGE_BOXES.register("type-4")
def montage_pos_type4(sizes, device='cpu'):
return montage_pos_type34(sizes, device, True)
@MONTAGE_BOXES.register("type-5")
def montage_pos_type5(sizes, device='cpu'):
"""
type-5 montage positioning, need image size divisible by 128
sizes: list[(h,w)]
"""
assert len(sizes) == 5
a0, b0, a1, b1, a2, b2, a3, b3, a4, b4 = list(itertools.chain(*sizes))
boxes = [
[0, 0, b0, a0],
[0, a0, b1, a0+a1],
[b1, a0, b1+b2, a0+a2],
[b1+b2, a0, b0, a0+a2],
[b1, a0+a2, b1+b2, a0+a1],
[b1+b2, a0+a2, b0-b3, a0+a1-a3],
[b0-b3, a0+a1-a2, b0, a0+a1-a3],
[b0-b2, a0+a1-a3, b0-b3, a0+a1],
[b0-b3, a0+a1-a3, b0-b4, a0+a1-a4],
[b0-b4, a0+a1-a3, b0, a0+a1-a4],
[b0-b3, a0+a1-a4, b0-b4, a0+a1],
[b0-b4, a0+a1-a4, b0, a0+a1]
]
boxes = torch.tensor(boxes).to(device).long()
mon_height, mon_width = a0 + a1, b0
return boxes, mon_height, mon_width
@MONTAGE_BOXES.register("type-6")
def montage_pos_type6(sizes, device='cpu'):
"""
type-6 montage positioning, need image size divisible by 128
sizes: list[(h,w)]
"""
assert len(sizes) == 5
a0, b0, a1, b1, a2, b2, a3, b3, a4, b4 = list(itertools.chain(*sizes))
a0_mul2 = a0*2
boxes = [
[0, 0, b0, a0],
[0, a0, b1, a0+a1],
[b1, a0, b0, a0+a1],
[0, a0+a1, b1, a0_mul2],
[b1, a0+a1, b1 + b2, a0 + a1 + a2],
[b1 + b2, a0+a1, b0, a0 + a1 + a2],
[b1, a0 + a1 + a2, b1 + b2, a0_mul2],
[b1 + b2, a0 + a1 + a2, b0 - b3, a0_mul2 - a3],
[b0 - b3, a0_mul2 - a2, b0, a0_mul2 - a3],
[b0 - b2, a0_mul2 - a3, b0 - b3, a0_mul2],
[b0 - b3, a0_mul2 - a3, b0 - b4, a0_mul2 - a4],
[b0 - b4, a0_mul2 - a3, b0, a0_mul2 - a4],
[b0 - b3, a0_mul2 - a4, b0 - b4, a0_mul2],
[b0 - b4, a0_mul2 - a4, b0, a0_mul2]
]
boxes = torch.tensor(boxes).to(device).long()
mon_height, mon_width = a0 + 2*a1, b0
return boxes, mon_height, mon_width
@MONTAGE_BOXES.register("type-7")
def montage_pos_type7(sizes, device='cpu'):
"""
type-7 montage positioning, need image size divisible by 128
sizes: list[(h,w)]
"""
assert len(sizes) == 5
a0, b0, a1, b1, a2, b2, a3, b3, a4, b4 = list(itertools.chain(*sizes))
a0_mul2, b0_mul2 = a0*2, b0*2
a01, b01 = a0 + a1, b0 + b1
boxes = [
[0, 0, b0, a0],
[b0, 0, b0_mul2, a0],
[0, a0, b0, 2*a0],
[b0, a0, b01, a01],
[b01, a0, b0_mul2, a01],
[b0, a01, b01, a0_mul2],
[b01, a01, b01 + b2, a01 + a2],
[b01 + b2, a01, b0_mul2, a01 + a2],
[b01, a01 + a2, b01 + b2, a0_mul2],
[b01 + b2, a01 + a2, b0_mul2 - b3, a0_mul2 - a3],
[b0_mul2 - b3, a0_mul2 - a2, b0_mul2, a0_mul2 - a3],
[b0_mul2 - b2, a0_mul2 - a3, b0_mul2 - b3, a0_mul2],
[b0_mul2 - b3, a0_mul2 - a3, b0_mul2 - b4, a0_mul2 - a4],
[b0_mul2 - b4, a0_mul2 - a3, b0_mul2, a0_mul2 - a4],
[b0_mul2 - b3, a0_mul2 - a4, b0_mul2 - b4, a0_mul2],
[b0_mul2 - b4, a0_mul2 - a4, b0_mul2, a0_mul2]
]
boxes = torch.tensor(boxes).to(device).long()
mon_height, mon_width = 2*a0, 2*b0
return boxes, mon_height, mon_width
@MONTAGE_BOXES.register("type-8")
def montage_pos_type8(sizes, device='cpu'):
"""
type-8, it only requires image size being divisible by 32
sizes: list[(h,w)]
"""
assert len(sizes) == 5
a0, b0, a1, b1, a2, b2, a3, b3, a4, b4 = list(itertools.chain(*sizes))
s2 = (b0-3*b2)//3
s3 = (b1-3*b3)//2
s4 = (b1-6*b4)//5
def spacing_boxes(x0, y0, a, b, s, n):
boxes = []
for i in range(n):
boxes.append([x0+i*b+i*s, y0, x0+(i+1)*b+i*s, y0+a])
return boxes
boxes = [
[0, 0, b0, a0],
[b0, 0, b0+b1, a1],
[b0, a1, b0+b1, a0]
]
boxes.extend(spacing_boxes(0, a0, a2, b2, s2, 3))
boxes.extend(spacing_boxes(b0, a0, a3, b3, s3, 3))
boxes.extend(spacing_boxes(b0, a0+a2-a4, a4, b4, s4, 6))
boxes = torch.tensor(boxes).to(device).long()
mon_height, mon_width = a0 + a2, b0 + b1
return boxes, mon_height, mon_width
@MONTAGE_BOXES.register("type-9")
def montage_pos_type9(sizes, device='cpu'):
"""
type-9, it only requires image size being divisible by 32
sizes: list[(h,w)]
"""
assert len(sizes) == 5
a0, b0, a1, b1, a2, b2, a3, b3, a4, b4 = list(itertools.chain(*sizes))
a3t = a3//2
a3b = a3 - a3t
b3l = b3//2
b3r = b3 - b3l
a4t = a4 // 2
a4b = a4 - a4t
b4l = b4 // 2
b4r = b4 - b4l
boxes = [
[0, 0, b0, a0], # 0
[b1, a0, b0, a0+a1], # 1
[0, a0+a2, b2, a0+a1], # 2
[b2, a0+a1, b1, a0+a1+a2], # 2
[b2, a0+a2-a3, b2+b3, a0+a2], # 3
[b2+b3, a0+a2-a3-a4, b2+b3+b4, a0+a2-a3], # 4
[b2//2-b3l, a0+a2//2-a3t, b2//2+b3r, a0+a2//2+a3b], # 3
[b2+b2//2-b4l, a0+a2+a2//2-a4t, b2+b2//2+b4r, a0+a2+a2//2+a4b], # 4
[0, a0+a1+a2-a3, b3, a0+a1+a2], # 3
[b3, a0+a1+a2-a3-a4, b3+b4, a0+a1+a2-a3], # 4
[b0-b3, a0+a1+a2-a3, b0, a0+a1+a2], # 3
[b0-b3-b4, a0+a1+a2-a3-a4, b0-b3, a0+a1+a2-a3], # 4
[b0-b2-b3, a0+a1+a2-a3, b0-b2, a0+a1+a2], # 3
[b0-b2-b3-b4, a0+a1+a2-a3-a4, b0-b2-b3, a0+a1+a2-a3], # 4
]
boxes = torch.tensor(boxes).to(device).long()
mon_height, mon_width = a0+a1+a2, b0
return boxes, mon_height, mon_width
def montage_pos_stair(sizes, alpha, scale_id, device='cpu'):
assert len(sizes) == 5
a0, b0 = sizes[0]
a, b = sizes[scale_id]
mon_height, mon_width = int(a0*alpha), int(b0*alpha)
cy, cx = mon_height//2, mon_width//2
boxes = [
[cx, cy-a, cx+b, cy],
[cx+b, cy-2*a, cx+2*b, cy-a],
[cx-b, cy, cx, cy+a],
[cx-2*b, cy+a, cx-b, cy+2*a]
]
boxes = torch.tensor(boxes).to(device).long()
return boxes, mon_height, mon_width
@MONTAGE_BOXES.register("type-stair1")
def montage_pos_stair1(sizes, device='cpu'):
return montage_pos_stair(sizes, 2, 1, device)
@MONTAGE_BOXES.register("type-stair2")
def montage_pos_stair2(sizes, device='cpu'):
return montage_pos_stair(sizes, 1, 2, device)
@MONTAGE_BOXES.register("type-stair3")
def montage_pos_stair3(sizes, device='cpu'):
return montage_pos_stair(sizes, 1, 3, device)
@MONTAGE_BOXES.register("type-stair4")
def montage_pos_stair4(sizes, device='cpu'):
return montage_pos_stair(sizes, 1, 4, device)
def montage_pos_side_by_side(sizes, alpha, scale_id, device='cpu'):
assert len(sizes) == 5
a0, b0 = sizes[0]
a, b = sizes[scale_id]
mon_height, mon_width = int(a0*alpha), int(b0*alpha)
cy, cx = mon_height//2, mon_width//2
boxes = [
[cx, cy, cx+b, cy+a],
[cx+b, cy, cx+2*b, cy+a],
[cx-b, cy, cx, cy+a],
[cx-2*b, cy, cx-b, cy+a]
]
boxes = torch.tensor(boxes).to(device).long()
return boxes, mon_height, mon_width
@MONTAGE_BOXES.register("type-sbs1")
def montage_pos_side_by_side1(sizes, device='cpu'):
return montage_pos_side_by_side(sizes, 2, 1, device)
@MONTAGE_BOXES.register("type-sbs2")
def montage_pos_side_by_side2(sizes, device='cpu'):
return montage_pos_side_by_side(sizes, 1, 2, device)
@MONTAGE_BOXES.register("type-sbs3")
def montage_pos_side_by_side3(sizes, device='cpu'):
return montage_pos_side_by_side(sizes, 1, 3, device)
@MONTAGE_BOXES.register("type-sbs4")
def montage_pos_side_by_side4(sizes, device='cpu'):
return montage_pos_side_by_side(sizes, 1, 4, device)
def montage_pos_grid(sizes, alpha, scale_id, device='cpu'):
assert len(sizes) == 5
a0, b0 = sizes[0]
a, b = sizes[scale_id]
mon_height, mon_width = int(a0 * alpha), int(b0 * alpha)
cy, cx = mon_height // 2, mon_width // 2
boxes = [
[cx-b, cy-a, cx, cy],
[cx, cy-a, cx+b, cy],
[cx-b, cy, cx, cy+a],
[cx, cy, cx+b, cy+a]
]
boxes = torch.tensor(boxes).to(device).long()
return boxes, mon_height, mon_width
@MONTAGE_BOXES.register("type-grid1")
def montage_pos_grid1(sizes, device='cpu'):
return montage_pos_grid(sizes, 2, 1, device)
@MONTAGE_BOXES.register("type-grid2")
def montage_pos_grid2(sizes, device='cpu'):
return montage_pos_grid(sizes, 1, 2, device)
@MONTAGE_BOXES.register("type-grid3")
def montage_pos_grid3(sizes, device='cpu'):
return montage_pos_grid(sizes, 1, 3, device)
@MONTAGE_BOXES.register("type-grid4")
def montage_pos_grid4(sizes, device='cpu'):
return montage_pos_grid(sizes, 1, 4, device)
# ************************ montage features *********************
# methods to generate features include extra features
class MontageFeatLayer1234(nn.Module):
"""
use conv-block to make extra features, each extra scale only occurs once
"""
def __init__(self, in_channels, m, n):
super(MontageFeatLayer1234, self).__init__()
self.feat_expasions = nn.ModuleList(
[NoopLayer()] * m + [
ConvBlock(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
for _ in range(n)
])
def forward(self, features, levels):
new_features = []
for level, op in zip(levels, self.feat_expasions):
new_features.append(op(features[level]))
return new_features
class MontageFeatLayer567(nn.Module):
"""
use 1*3/3*1/3*3 convs to make extra features
"""
def __init__(self, in_channels, m, n):
super(MontageFeatLayer567, self).__init__()
module_list = [NoopLayer()]*m
for i in range(n):
module_list.extend(
[
NoopLayer(),
ConvBlock(in_channels, in_channels, kernel_size=(1, 3), stride=1, padding=(0, 1)),
ConvBlock(in_channels, in_channels, kernel_size=(3, 1), stride=1, padding=(1, 0)),
]
)
module_list.append(
ConvBlock(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
self.feat_expasions = nn.ModuleList(module_list)
def forward(self, features, levels):
new_features = []
for level, op in zip(levels, self.feat_expasions):
new_features.append(op(features[level]))
return new_features
class MontageFeatLayer567_3x3conv(nn.Module):
"""
use 3x3 convs to make extra features
"""
def __init__(self, in_channels, m, n):
super(MontageFeatLayer567_3x3conv, self).__init__()
module_list = [NoopLayer()]*m
for i in range(n):
module_list.extend(
[
NoopLayer(),
ConvBlock(in_channels, in_channels, kernel_size=3, stride=1, padding=1),
ConvBlock(in_channels, in_channels, kernel_size=3, stride=1, padding=1),
]
)
module_list.append(
ConvBlock(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
self.feat_expasions = nn.ModuleList(module_list)
def forward(self, features, levels):
new_features = []
level_prv = -1
x_prv = None
for level, op in zip(levels, self.feat_expasions):
if level == level_prv:
x = op(x_prv)
else:
x = op(features[level])
x_prv = x
new_features.append(x)
return new_features
class MontageFeatLayer(nn.Module):
def __init__(self, in_channels, levels, mode):
super(MontageFeatLayer, self).__init__()
self.mode = mode
self.levels = levels
assert mode in ['recursive', 'separate']
uniques, scales_cnt = torch.unique(torch.tensor(levels).int(), sorted=True, return_counts=True)
for i, n in zip(uniques, scales_cnt):
for j in range(n):
if j == 0:
layer = NoopLayer()
else:
layer = ConvBlock(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
setattr(self, "mf_s%d_%d"%(i, j), layer)
print(self)
def forward(self, features):
last_idx_dict = dict()
if self.mode == 'recursive':
last_features_dict = dict()
new_features = []
for l in self.levels:
idx = last_idx_dict.get(l, -1)
idx += 1
last_idx_dict[l] = idx
op = getattr(self, "mf_s%d_%d"%(l, idx))
# print("mf_s%d_%d"%(l, idx))
if self.mode == 'recursive':
x = op(last_features_dict.get(l, features[l]))
last_features_dict[l] = x
else:
x = op(features[l])
new_features.append(x)
return new_features, self.levels
# *********************** build functions ***********************
def build_montage_layer(cfg, in_channels):
basic_montage_type = cfg.MODEL.RETINAPACK.BASIC_MONTAGE_TYPE
montage_box = MONTAGE_BOXES[basic_montage_type]
montage_levels = MONTAGE_LEVELS[basic_montage_type]
fpn_strides = cfg.MODEL.RETINAPACK.FPN_STRIDES
device = cfg.MODEL.DEVICE
return BasicMontageBlock(fpn_strides, montage_box, montage_levels, device)
def build_montage_feat_layer(cfg, in_channels):
basic_montage_type = cfg.MODEL.RETINAPACK.BASIC_MONTAGE_TYPE
levels = MONTAGE_LEVELS[basic_montage_type]
mode = cfg.MODEL.RETINAPACK.MONTAGE_FEAT_MODE
return MontageFeatLayer(in_channels, levels, mode) |
import numpy as np
from sklearn.neighbors import NearestNeighbors
from sklearn.base import check_array
from adapt.base import BaseAdaptEstimator, make_insert_doc
from adapt.utils import set_random_seed
@make_insert_doc()
class NearestNeighborsWeighting(BaseAdaptEstimator):
"""
NNW : Nearest Neighbors Weighting
NNW reweights the source instances in order according to
their number of neighbors in the target dataset.
Parameters
----------
n_neighbors : int, (default=5)
Number of neighbors to use by default for `kneighbors` queries.
radius : float, (default=1.0)
Range of parameter space to use by default for `radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, (default='auto')
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use ``BallTree``
- 'kd_tree' will use ``KDTree``
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to ``fit`` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, (default=30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : str or callable, (default='minkowski')
The distance metric to use for the tree. The default metric is
minkowski, and with p=2 is equivalent to the standard Euclidean
metric. For a list of available metrics, see the documentation of
`sklearn.metrics.DistanceMetric`.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square during fit. X may be a :term:`sparse graph`,
in which case only "nonzero" elements may be considered neighbors.
p : int, (default=2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, (default=None)
Additional keyword arguments for the metric function.
n_jobs : int, (default=None)
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a `joblib.parallel_backend` context.
``-1`` means using all processors.
Attributes
----------
weights_ : numpy array
Training instance weights.
estimator_ : object
Estimator.
See also
--------
KMM
KLIEP
References
----------
.. [1] `[1] <https://arxiv.org/pdf/2102.02291.pdf>`_ \
<NAME>. "Nearest neighbor-based importance weighting". In MLSP 2012.
"""
def __init__(self,
estimator=None,
Xt=None,
n_neighbors=5,
radius=1.0,
algorithm='auto',
leaf_size=30,
metric='minkowski',
p=2,
metric_params=None,
n_jobs=None,
copy=True,
verbose=1,
random_state=None,
**params):
names = self._get_param_names()
kwargs = {k: v for k, v in locals().items() if k in names}
kwargs.update(params)
super().__init__(**kwargs)
def fit_weights(self, Xs, Xt, **kwargs):
"""
Fit importance weighting.
Parameters
----------
Xs : array
Input source data.
Xt : array
Input target data.
kwargs : key, value argument
Not used, present here for adapt consistency.
Returns
-------
weights_ : sample weights
"""
Xs = check_array(Xs)
Xt = check_array(Xt)
set_random_seed(self.random_state)
nn_model = NearestNeighbors(n_neighbors=self.n_neighbors,
radius=self.radius,
algorithm=self.algorithm,
leaf_size=self.leaf_size,
metric=self.metric,
p=self.p,
metric_params=self.metric_params,
n_jobs=self.n_jobs)
nn_model.fit(Xs)
args = nn_model.kneighbors(Xt, return_distance=False)
args = args.ravel()
indices, weights = np.unique(args, return_counts=True)
self.weights_ = np.zeros(len(Xs))
self.weights_[indices] = weights
return self.weights_
def predict_weights(self):
"""
Return fitted source weights
Returns
-------
weights_ : sample weights
"""
if hasattr(self, "weights_"):
return self.weights_
else:
raise NotFittedError("Weights are not fitted yet, please "
"call 'fit_weights' or 'fit' first.") |
<reponame>Rapid-Design-of-Systems-Laboratory/beluga-legacy
from beluga.visualization.renderers import BaseRenderer
from bokeh.plotting import *
from bokeh.palettes import *
from bokeh.models import HoverTool
import webbrowser
class Bokeh(BaseRenderer):
def __init__(self, filename='plot.html'):
self._figures = []
self.filename = filename
output_file(filename, title="beluga Output")
def _get_figure(self,f):
"""
Returns Figure instance from internal list using index
Raises:
ValueError if invalid index is used
"""
try:
fh = self._figures[f]
if fh is None:
raise ValueError('Invalid figure handle specified!')
return fh
except:
raise ValueError('Invalid figure handle specified!')
def create_figure(self, width=600, height=600):
"""
Creates a new figure and returns a handle (index into array)
"""
self._figures.append(figure(width=width, height=height))
return len(self._figures)-1
def close_figure(self,f):
"""
Closes a specified figure
"""
pass
# close(self._get_figure(f))
def show_figure(self,f):
"""
Shows a specified figure
"""
show(self._get_figure(f))
def show_all(self):
"""
Show all rendered figures
"""
p = vplot(*self._figures)
show(p)
# for f in self._figures:
#
# show(f)
webbrowser.open(self.filename, new=2, autoraise=True)
def render_plot(self,f,p):
"""
Adds a line plot using the given data to the specified figure
"""
plot = self._get_figure(f);
for line in p.plot_data:
dataset = [(data['x_data'],data['y_data']) for data in line['data']]
xlist, ylist = zip(*dataset)
if len(dataset) < 5:
mypalette=Spectral4[0:len(dataset)]
else:
mypalette=Spectral10[0:len(dataset)]
plot.multi_line(xs=xlist, ys=ylist, line_color=mypalette)
# NOT WORKING!
# if p._xlabel is not None and p._ylabel is not None:
# hover = HoverTool(
# tooltips=[
# (p._xlabel+":", "$x"),
# (p._ylabel+":", "$y"),
# ]
# )
# else:
# hover = HoverTool(
# tooltips=[
# ("($x, $y)")
# ]
# )
# plot.add_tools(hover)
if p._xlabel is not None:
plot.xaxis.axis_label = p._xlabel
if p._ylabel is not None:
plot.yaxis.axis_label = p._ylabel
if p._title is not None:
plot.title = p._title
def render_subplot(self,f,index,plot):
"""
Adds a subplot to the specified figure
"""
pass
if __name__ == '__main__':
from beluga.visualization.elements import Plot
import dill
r = Bokeh()
fig = r.create_figure()
with open('/Users/tantony/dev/tantony-beluga/examples/planarHypersonic/phu_2k5_eps2.dill','rb') as f:
out = dill.load(f)
p = Plot(-1,-1)
p.line('v/1000','h/1000')
p.xlabel('v (km/s)')
p.ylabel('h (km)')
p.title('Altitude vs. Velocity')
p.preprocess(out['solution'],out['problem_data'])
r.render_plot(fig,p)
r.show_figure(fig)
fig = r.create_figure()
p = Plot(-1,-1)
p.line('theta*re/1000','h/1000')
p.xlabel('downrange (km)')
p.ylabel('altitude (km)')
p.title('Altitude vs. Downrange')
p.preprocess(out['solution'],out['problem_data'])
r.render_plot(fig,p)
r.show_figure(fig)
|
import numpy
import sys
from Utils.conjugate_gradient_method import conjugate_solver
home_dir = '../../../'
sys.path.append(home_dir)
class ACCADELogisticExecutor:
def __init__(self, x_mat, y_vec):
self.s, self.d = x_mat.shape
self.x_mat = x_mat
self.y_vec = y_vec
self.w = numpy.zeros((self.d, 1))
self.p = numpy.zeros((self.d, 1))
self.gamma = None
self.g_tol = None
self.max_iter = None
self.is_search = None
self.num_etas = None
self.eta_list = None
def set_param(self, gamma, g_tol, max_iter, is_search, eta_list):
self.gamma = gamma
self.g_tol = g_tol
self.max_iter = max_iter
self.is_search = is_search
if is_search:
self.eta_list = eta_list
self.num_etas = len(eta_list)
def update_p(self, p):
self.p = p
def update_w(self):
self.w = numpy.subtract(self.w, self.p)
def get_p(self):
return self.p
def get_w(self):
return self.w
def set_w(self, w_vec):
self.w = w_vec
def get_data_size(self):
return self.s
def objective_function(self, w_vec):
"""
f_j (w) = log (1 + exp(-w dot x_j)) + (gamma/2) * ||w||_2^2
return the mean of f_j for all local data x_j
"""
z_vec = numpy.dot(self.x_mat, w_vec.reshape(self.d, 1))
z_vec = numpy.multiply(z_vec, self.y_vec)
loss_vec = numpy.log(1 + numpy.exp(-z_vec))
loss = numpy.mean(loss_vec)
reg = self.gamma / 2 * (numpy.linalg.norm(w_vec) ** 2)
return loss + reg
def search_objective_val(self):
objective_value_vec = numpy.zeros(self.num_etas + 1)
for i in range(self.num_etas):
objective_value_vec[i] = self.objective_function(self.w - self.eta_list[i] * self.p)
objective_value_vec[-1] = self.objective_function(self.w)
return objective_value_vec
def compute_gradient(self):
"""
Compute the gradient of the objective function using local data
"""
z_vec = numpy.dot(self.x_mat, self.w)
z_vec = numpy.multiply(z_vec, self.y_vec)
exp_z_vec = numpy.exp(z_vec)
vec_for_grad = numpy.multiply(-1 / (1 + exp_z_vec), self.y_vec)
grad_term = numpy.dot(self.x_mat.T, vec_for_grad)
return grad_term / self.s + self.gamma * self.w
def compute_newton(self):
z_vec = numpy.dot(self.x_mat, self.w)
z_vec = numpy.multiply(z_vec, self.y_vec)
exp_z_vec = numpy.exp(z_vec)
vec_for_hessian = numpy.sqrt(exp_z_vec) / (1 + exp_z_vec)
a_mat = numpy.multiply(self.x_mat, (vec_for_hessian / numpy.sqrt(self.s)))
p_vec = conjugate_solver(a_mat, self.compute_gradient(), self.gamma, tol=self.g_tol, max_iter=self.max_iter)
self.g_tol *= 0.5
return p_vec
def perform_local_update(self, steps):
w_old = numpy.copy(self.w)
for i in range(steps):
grad = self.compute_gradient()
self.update_p(self.compute_newton())
objective_value_vec = self.search_objective_val()
pg = -0.1 * numpy.sum(numpy.multiply(self.p, grad))
eta = 0
objective_value_old = objective_value_vec[-1]
for j in range(self.num_etas):
objective_value_new = objective_value_vec[j]
eta = self.eta_list[j]
if objective_value_new < objective_value_old + pg * eta:
break
self.update_p(numpy.multiply(eta, self.p))
self.update_w()
real_p_vec = w_old - self.w
self.set_w(w_old)
return real_p_vec
def compute_local_statistics(self):
p_exp = 0
for i in range(self.d):
p_exp += self.p[i]
p_exp /= self.d
p_var = 0
for i in range(self.d):
p_var += (self.p[i] - p_exp) ** 2
p_var /= self.d
return p_exp, p_var
def compute_local_w_statistics(self):
w_exp = 0
for i in range(self.d):
w_exp += self.w[i]
w_exp /= self.d
w_var = 0
for i in range(self.d):
w_var += (self.p[i] - w_exp) ** 2
w_var /= self.d
return w_exp, w_var
|
from pathlib import Path
import os
import time
import cv2
import numpy as np
from Model import Model
from EfficientDet.utils import preprocess_image
from EfficientDet.utils.anchors import anchors_for_shape
class EffModel(Model):
def __init__(self,
engine,
size=640,
num_classes=4):
super().__init__()
self.size = size
self.score_threshold = 0.5
self.num_classes = num_classes
assert size in [
512], f'Net size {size} not in [512]'
def setPreferableEngine(engine):
if engine == 'gpu':
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
else:
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
setPreferableEngine(engine)
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
from EfficientDet.model import efficientdet
phi = 0
weighted_bifpn = False
model_path = 'models/EfficientDet/EfficientDet-d0/EfficientDet-d0.weights'
assert Path(model_path).is_file(), 'Not find model file'
_, self.net = efficientdet(phi=phi,
weighted_bifpn=weighted_bifpn,
num_classes=self.num_classes,
score_threshold=self.score_threshold)
self.net.load_weights(model_path, by_name=True)
def get_GFLOPS(self):
import tensorflow.keras.backend as K
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
graph = K.get_session().graph
flops = tf.profiler.profile(graph, options=tf.profiler.ProfileOptionBuilder.float_operation())
return flops.total_float_ops*1e-9
def preprocess(self, frames):
inputs, anchors, meta = [], [], []
for frame in frames:
image = frame[:, :, ::-1]
h, w = image.shape[:2]
image, scale, offset_h, offset_w = preprocess_image(
image, image_size=self.size)
inputs.append(image)
anchors.append(anchors_for_shape((self.size, self.size)))
meta.append((scale, h, w, offset_h, offset_w))
return [np.array(inputs), np.array(anchors)], meta
def inference(self, frames):
boxes, scores, labels = self.net.predict_on_batch(frames)
return boxes, scores, labels
def predict(self, frames):
st = time.time()
inputs, meta = self.preprocess(frames)
self.preprocess_time += (time.time() - st)
st = time.time()
outs = self.inference(inputs)
self.inference_time += (time.time() - st)
boxes = []
for i in range(len(frames)):
box = self.postprocess(
frames[i], ((outs[0][i], outs[1][i], outs[2][i]), meta[i]))
boxes.append((frames[i], box))
self.postprocess_time += (time.time() - st)
self.count += len(frames)
return boxes
def postprocess(self, frame, outs):
(boxes, scores, labels), (scale, h, w, offset_h, offset_w) = outs
boxes[:, [0, 2]] = boxes[:, [0, 2]] - offset_w
boxes[:, [1, 3]] = boxes[:, [1, 3]] - offset_h
boxes /= scale
boxes[:, 0] = np.clip(boxes[:, 0], 0, w - 1)
boxes[:, 1] = np.clip(boxes[:, 1], 0, h - 1)
boxes[:, 2] = np.clip(boxes[:, 2], 0, w - 1)
boxes[:, 3] = np.clip(boxes[:, 3], 0, h - 1)
# select indices which have a score above the threshold
indices = np.where(scores > self.score_threshold)[0]
# select those detections
boxes = boxes[indices]
scores = scores[indices]
labels = labels[indices]
results = []
for box, score, label in zip(boxes, scores, labels):
xmin = int(round(box[0]))
ymin = int(round(box[1]))
xmax = int(round(box[2]))
ymax = int(round(box[3]))
results.append((int(label), score,
xmin, ymin, xmax, ymax))
return results
|
<gh_stars>0
import requests
from bs4 import BeautifulSoup
from requests.exceptions import ConnectionError, TooManyRedirects
from raccoon_src.utils.web_server_validator import WebServerValidator
from raccoon_src.utils.request_handler import RequestHandler
from raccoon_src.utils.help_utils import HelpUtilities
from raccoon_src.utils.coloring import COLOR, COLORED_COMBOS
from raccoon_src.utils.exceptions import WebAppScannerException, WebServerValidatorException
from raccoon_src.utils.logger import Logger
class WebApplicationScanner:
def __init__(self, host):
self.host = host
self.request_handler = RequestHandler()
self.web_server_validator = WebServerValidator()
self.headers = None
self.robots = None
self.forms = None
self.fuzzable_urls = set()
self.emails = set()
log_file = HelpUtilities.get_output_path("{}/web_scan.txt".format(self.host.target))
self.target_dir = "/".join(log_file.split("/")[:-1])
self.logger = Logger(log_file)
def _detect_cms(self, tries=0):
"""
Detect CMS using whatcms.org.
Has a re-try mechanism because false negatives may occur
:param tries: Count of tries for CMS discovery
"""
# WhatCMS is under CloudFlare which detects and blocks proxied/Tor traffic, hence normal request.
page = requests.get(url="https://whatcms.org/?s={}".format(self.host.target))
soup = BeautifulSoup(page.text, "lxml")
found = soup.select(".panel.panel-success")
if found:
try:
cms = [a for a in soup.select("a") if "/c/" in a.get("href")][0]
self.logger.info("{} CMS detected: target is using {}{}{}".format(
COLORED_COMBOS.GOOD, COLOR.GREEN, cms.get("title"), COLOR.RESET))
except IndexError:
if tries >= 4:
return
else:
self._detect_cms(tries=tries + 1)
else:
if tries >= 4:
return
else:
self._detect_cms(tries=tries + 1)
def _cookie_info(self, jar):
for cookie in jar:
key = cookie.__dict__.get("name")
value = cookie.__dict__.get("value")
domain = cookie.__dict__.get("domain")
secure = cookie.__dict__.get("secure")
http_only = cookie.has_nonstandard_attr("HttpOnly")
try:
if domain in self.host.target or self.host.target in domain:
if not secure or not http_only:
current = "%s Cookie: {%s: %s} -" % (COLORED_COMBOS.GOOD, key, value)
if not secure and not http_only:
current += " both secure and HttpOnly flags are not set"
elif not secure:
current += " secure flag not set"
else:
current += " HttpOnly flag not set"
self.logger.info(current)
except TypeError:
continue
def _server_info(self):
if self.headers.get("server"):
self.logger.info("{} Web server detected: {}{}{}".format(
COLORED_COMBOS.GOOD, COLOR.GREEN, self.headers.get("server"), COLOR.RESET))
def _x_powered_by(self):
if self.headers.get("X-Powered-By"):
self.logger.info("{} X-Powered-By header detected: {}{}{}".format(
COLORED_COMBOS.GOOD, COLOR.GREEN, self.headers.get("X-Powered-By"), COLOR.RESET))
def _anti_clickjacking(self):
if not self.headers.get("X-Frame-Options"):
self.logger.info(
"{} X-Frame-Options header not detected - target might be vulnerable to clickjacking".format(
COLORED_COMBOS.GOOD)
)
def _xss_protection(self):
xss_header = self.headers.get("X-XSS-PROTECTION")
if xss_header and "1" in xss_header:
self.logger.info("{} Found X-XSS-PROTECTION header".format(COLORED_COMBOS.BAD))
def _cors_wildcard(self):
if self.headers.get("Access-Control-Allow-Origin") == "*":
self.logger.info("{} CORS wildcard detected".format(COLORED_COMBOS.GOOD))
def _robots(self):
res = self.request_handler.send(
"GET",
url="{}://{}:{}/robots.txt".format(
self.host.protocol,
self.host.target,
self.host.port
)
)
if res.status_code != 404 and res.text and "<!DOCTYPE html>" not in res.text:
self.logger.info("{} Found robots.txt".format(COLORED_COMBOS.GOOD))
with open("{}/robots.txt".format(self.target_dir), "w") as file:
file.write(res.text)
def _sitemap(self):
res = self.request_handler.send(
"GET",
url="{}://{}:{}/sitemap.xml".format(
self.host.protocol,
self.host.target,
self.host.port
)
)
if res.status_code != 404 and res.text and "<!DOCTYPE html>" not in res.text:
self.logger.info("{} Found sitemap.xml".format(COLORED_COMBOS.GOOD))
with open("{}/sitemap.xml".format(self.target_dir), "w") as file:
file.write(res.text)
def _analyze_hrefs(self, href):
if all(("?" in href, "=" in href, not href.startswith("mailto:"))):
if any(((self.host.naked and self.host.naked in href), self.host.target in href, href.startswith("/"))):
self.fuzzable_urls.add(href)
elif href.startswith("mailto:"):
self._add_to_emails(href)
def _log_fuzzable_urls(self):
base_target = "{}://{}:{}".format(self.host.protocol, self.host.target, self.host.port)
for url in self.fuzzable_urls:
if url.startswith("/"):
self.logger.debug("\t{}{}".format(base_target, url))
else:
self.logger.debug("\t{}".format(url))
def _log_emails(self):
for email in self.emails:
self.logger.debug("\t{}".format(email[7:]))
def _find_urls(self, soup):
urls = soup.select("a")
if urls:
for url in urls:
href = url.get("href")
if href:
self._analyze_hrefs(href)
if self.fuzzable_urls:
self.logger.info("{} {} fuzzable URLs discovered".format(
COLORED_COMBOS.NOTIFY, len(self.fuzzable_urls)))
self._log_fuzzable_urls()
if self.emails:
self.logger.info("{} {} email addresses discovered".format(
COLORED_COMBOS.NOTIFY, len(self.emails)))
self._log_emails()
def _find_forms(self, soup):
# TODO: Analyze interesting input names/ids/params
self.forms = soup.select("form")
if self.forms:
self.logger.info("{} {} HTML forms discovered".format(COLORED_COMBOS.NOTIFY, len(self.forms)))
for form in self.forms:
form_id = form.get("id")
form_class = form.get("class")
form_method = form.get("method")
form_action = form.get("action")
if form_action == "#":
continue
self.logger.debug("\tForm details: ID: {}, Class: {}, Method: {}, action: {}".format(
form_id, form_class, form_method, form_action
))
def _add_to_emails(self, href):
self.emails.add(href)
def get_web_application_info(self):
session = self.request_handler.get_new_session()
try:
with session:
# Test if target is serving HTTP requests
response = session.get(
timeout=20,
url="{}://{}:{}".format(
self.host.protocol,
self.host.target,
self.host.port
)
)
self.headers = response.headers
self._detect_cms()
self._robots()
self._sitemap()
self._server_info()
self._x_powered_by()
self._cors_wildcard()
self._xss_protection()
self._anti_clickjacking()
self._cookie_info(session.cookies)
soup = BeautifulSoup(response.text, "lxml")
self._find_urls(soup)
self._find_forms(soup)
except (ConnectionError, TooManyRedirects) as e:
raise WebAppScannerException("Couldn't get response from server.\n"
"Caused due to exception: {}".format(str(e)))
async def run_scan(self):
self.logger.info("{} Trying to collect {} web application data".format(COLORED_COMBOS.INFO, self.host))
try:
self.web_server_validator.validate_target_webserver(self.host)
self.get_web_application_info()
except WebServerValidatorException:
self.logger.info(
"{} Target does not seem to have an active web server on port: {}. "
"No web application data will be gathered.".format(COLORED_COMBOS.NOTIFY, self.host.port))
return
|
<reponame>PacktPublishing/Learning-Python-Artificial-Intelligence-by-Example
"""
Data generators for loading training, validation and test data sets
"""
import pandas as pd
import numpy as np
import cv2
import os
import scipy.misc
from keras.utils import Sequence
def get_image(image_path, crop_position=100, image_size=(66, 200, 3), debug=False):
"""
Load an image and return resized image for training and actual image for viewing
:param image_path: str
:param crop_position: int
:param image_size: tuple
:param debug: string
:return:
"""
image = cv2.imread(image_path)
cropped = image[crop_position:, :]
resize_to = (int(cropped.shape[1] / 2), int(cropped.shape[0] / 2)) if not image_size \
else (image_size[1], image_size[0])
resized = cv2.resize(cropped, resize_to)
return image, resized
class DataGenerator(Sequence):
"""
Data Generator to load training, validation and test batches
"""
def __init__(self, df: pd.DataFrame, data_dir='./data', data_file='./data/data.txt',
image_size=(256, 455, 3), batch_size=32, limit_batches=0,
label=None, debug=False, log_images=False):
"""
:param df:
:param data_dir:
:param data_file:
:param batch_size:
"""
self.data_dir = data_dir
self.data_file = data_file
self.batch_size = batch_size
self.image_size = image_size
self.channels = 1
self.label = label
self.idx = 0
self.batch_count = 0
self.debug = debug
self.log_images = log_images
self.num_batches = int(np.floor(len(df) / self.batch_size))
self.limit_batches = limit_batches if limit_batches < self.num_batches and limit_batches else self.num_batches
self.df = df.reset_index().loc[:self.limit_batches * self.batch_size]
if debug:
print('DataGenerator(): num_batches = {}, batch_size = {}, len(df) = {}'
.format(self.limit_batches, self.batch_size, len(self.df)))
def __len__(self):
"""
Find number of batches per epoch
"""
return self.limit_batches
def __getitem__(self, batch_num):
"""
Generate one batch of data
:param batch_num:
:return:
"""
batch_data = self.df[batch_num * self.batch_size:(batch_num + 1) * self.batch_size]
X, y = self.__data_generation(batch_data.reset_index())
return X, y
def __data_generation(self, batch_data):
"""
Generates data containing batch_size samples
:param index: list
"""
X = np.zeros((self.batch_size, *self.image_size))
y = np.zeros((self.batch_size), dtype=float)
for i, sample in batch_data.iterrows():
# if self.debug:
# print('{}: Loading image {}, steering angle {}'.format(i, sample['image_name'], sample['angle']))
image_path = os.path.join(self.data_dir, sample['image_name'])
image, resized = get_image(image_path)
# image = cv2.imread(image_path) # , cv2.IMREAD_GRAYSCALE)
# cropped = image[100:, :]
# resize_to = (int(cropped.shape[1] / 2), int(cropped.shape[0] / 2)) if not self.image_size \
# else (self.image_size[1], self.image_size[0])
# resized = cv2.resize(cropped, resize_to) # (self.image_size[1], self.image_size[0]))
X[i] = resized
y[i] = float(sample['angle']) * scipy.pi / 180 # Force into radians
if self.log_images:
text = 'Frame: {} Angle: {}'.format(i, sample['angle'])
cv2.putText(resized, text, (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 1, cv2.LINE_AA)
file = './logs/images/{}-{}-{}'.format(self.label, i, sample['image_name'])
print('Writing debug image to {}'.format(file))
cv2.imwrite(file, resized)
return X, y
|
<reponame>ahti87/waldur-mastermind<gh_stars>0
import base64
import datetime
import logging
import os
import traceback
from io import BytesIO
import pdfkit
from dateutil.relativedelta import relativedelta
from django.conf import settings
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage as storage
from django.db import transaction
from django.db.models import Sum
from django.template.loader import render_to_string
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from PIL import Image
from rest_framework import exceptions as rf_exceptions
from rest_framework import serializers, status
from waldur_core.core import models as core_models
from waldur_core.core import serializers as core_serializers
from waldur_core.core import utils as core_utils
from waldur_core.structure import filters as structure_filters
from waldur_core.structure import models as structure_models
from waldur_mastermind.common.utils import create_request
from waldur_mastermind.invoices import models as invoice_models
from waldur_mastermind.invoices import registrators
from waldur_mastermind.invoices.utils import get_full_days
from waldur_mastermind.marketplace import attribute_types
from . import models, plugins
logger = logging.getLogger(__name__)
def get_order_item_processor(order_item):
if order_item.resource:
offering = order_item.resource.offering
else:
offering = order_item.offering
if order_item.type == models.RequestTypeMixin.Types.CREATE:
return plugins.manager.get_processor(offering.type, 'create_resource_processor')
elif order_item.type == models.RequestTypeMixin.Types.UPDATE:
return plugins.manager.get_processor(offering.type, 'update_resource_processor')
elif order_item.type == models.RequestTypeMixin.Types.TERMINATE:
return plugins.manager.get_processor(offering.type, 'delete_resource_processor')
def process_order_item(order_item: models.OrderItem, user):
processor = get_order_item_processor(order_item)
if not processor:
order_item.error_message = (
'Skipping order item processing because processor is not found.'
)
order_item.set_state_erred()
order_item.save(update_fields=['state', 'error_message'])
return
try:
order_item.set_state_executing()
order_item.save(update_fields=['state'])
processor(order_item).process_order_item(user)
except Exception as e:
# Here it is necessary to catch all exceptions.
# If this is not done, then the order will remain in the executed status.
order_item.error_message = str(e)
order_item.error_traceback = traceback.format_exc()
order_item.set_state_erred()
logger.error(f'Error processing order item { order_item }.')
order_item.save(update_fields=['state', 'error_message', 'error_traceback'])
def validate_order_item(order_item, request):
processor = get_order_item_processor(order_item)
if processor:
try:
processor(order_item).validate_order_item(request)
except NotImplementedError:
# It is okay if validation is not implemented yet
pass
def create_screenshot_thumbnail(screenshot):
pic = screenshot.image
fh = storage.open(pic.name, 'rb')
image = Image.open(fh)
image.thumbnail(settings.WALDUR_MARKETPLACE['THUMBNAIL_SIZE'], Image.ANTIALIAS)
fh.close()
thumb_extension = os.path.splitext(pic.name)[1]
thumb_extension = thumb_extension.lower()
thumb_name = os.path.basename(pic.name)
if thumb_extension in ['.jpg', '.jpeg']:
FTYPE = 'JPEG'
elif thumb_extension == '.gif':
FTYPE = 'GIF'
elif thumb_extension == '.png':
FTYPE = 'PNG'
else:
return
temp_thumb = BytesIO()
image.save(temp_thumb, FTYPE)
temp_thumb.seek(0)
screenshot.thumbnail.save(thumb_name, ContentFile(temp_thumb.read()), save=True)
temp_thumb.close()
def create_order_pdf(order):
logo_path = settings.WALDUR_CORE['SITE_LOGO']
if logo_path:
with open(logo_path, 'rb') as image_file:
deployment_logo = base64.b64encode(image_file.read()).decode("utf-8")
else:
deployment_logo = None
context = dict(
order=order,
currency=settings.WALDUR_CORE['CURRENCY_NAME'],
deployment_name=settings.WALDUR_CORE['SITE_NAME'],
deployment_address=settings.WALDUR_CORE['SITE_ADDRESS'],
deployment_email=settings.WALDUR_CORE['SITE_EMAIL'],
deployment_phone=settings.WALDUR_CORE['SITE_PHONE'],
deployment_logo=deployment_logo,
)
html = render_to_string('marketplace/order.html', context)
pdf = pdfkit.from_string(html, False)
return pdf
def import_resource_metadata(resource):
instance = resource.scope
fields = {'action', 'action_details', 'state', 'runtime_state'}
for field in fields:
if field == 'state':
value = instance.get_state_display()
else:
value = getattr(instance, field, None)
if field in fields:
resource.backend_metadata[field] = value
if instance.backend_id:
resource.backend_id = instance.backend_id
resource.name = instance.name
resource.save(
update_fields=['backend_metadata', 'attributes', 'name', 'backend_id']
)
def get_service_provider_info(source):
try:
resource = models.Resource.objects.get(scope=source)
customer = resource.offering.customer
service_provider = getattr(customer, 'serviceprovider', None)
return {
'service_provider_name': customer.name,
'service_provider_uuid': ''
if not service_provider
else service_provider.uuid.hex,
}
except models.Resource.DoesNotExist:
return {}
def get_offering_details(offering):
if not isinstance(offering, models.Offering):
return {}
return {
'offering_type': offering.type,
'offering_name': offering.name,
'offering_uuid': offering.uuid.hex,
'service_provider_name': offering.customer.name,
'service_provider_uuid': offering.customer.uuid.hex,
}
def format_list(resources):
"""
Format comma-separated list of IDs from Django queryset.
"""
return ', '.join(map(str, sorted(resources.values_list('id', flat=True))))
def get_order_item_url(order_item):
return core_utils.format_homeport_link(
'projects/{project_uuid}/marketplace-order-item-details/{order_item_uuid}/',
order_item_uuid=order_item.uuid.hex,
project_uuid=order_item.order.project.uuid,
)
def fill_activated_field(apps, schema_editor):
# We cannot use RequestTypeMixin.Types.CREATE and OrderItem.States.Done because this function called in migrations
state_done = 3
type_create = 1
OrderItem = apps.get_model('marketplace', 'OrderItem')
for order_item in OrderItem.objects.filter(type=type_create, state=state_done):
if not order_item.activated and order_item.resource:
order_item.activated = order_item.resource.created
order_item.save()
def get_info_about_missing_usage_reports():
now = timezone.now()
billing_period = core_utils.month_start(now)
whitelist_types = [
offering_type
for offering_type in plugins.manager.get_offering_types()
if plugins.manager.enable_usage_notifications(offering_type)
]
offering_ids = models.OfferingComponent.objects.filter(
billing_type=models.OfferingComponent.BillingTypes.USAGE,
offering__type__in=whitelist_types,
).values_list('offering_id', flat=True)
resource_with_usages = models.ComponentUsage.objects.filter(
billing_period=billing_period
).values_list('resource', flat=True)
resources_without_usages = models.Resource.objects.filter(
state=models.Resource.States.OK, offering_id__in=offering_ids
).exclude(id__in=resource_with_usages)
result = []
for resource in resources_without_usages:
rows = list(
filter(lambda x: x['customer'] == resource.offering.customer, result)
)
if rows:
rows[0]['resources'].append(resource)
else:
result.append(
{'customer': resource.offering.customer, 'resources': [resource],}
)
return result
def get_public_resources_url(customer):
return core_utils.format_homeport_link(
'organizations/{organization_uuid}/marketplace-public-resources/',
organization_uuid=customer.uuid,
)
def validate_limits(limits, offering):
if not plugins.manager.can_update_limits(offering.type):
raise serializers.ValidationError(
{'limits': _('Limits update is not supported for this resource.')}
)
valid_component_types = set(
offering.components.filter(
billing_type=models.OfferingComponent.BillingTypes.LIMIT
).values_list('type', flat=True)
)
invalid_types = set(limits.keys()) - valid_component_types
if invalid_types:
raise serializers.ValidationError(
{'limits': _('Invalid types: %s') % ', '.join(invalid_types)}
)
# Validate max and min limit value.
components_map = {
component.type: component
for component in offering.components.filter(type__in=valid_component_types)
}
for key, value in limits.items():
component = components_map.get(key)
if not component:
continue
if component.max_value and value > component.max_value:
raise serializers.ValidationError(
_('The limit %s value cannot be more than %s.')
% (value, component.max_value)
)
if component.min_value and value < component.min_value:
raise serializers.ValidationError(
_('The limit %s value cannot be less than %s.')
% (value, component.min_value)
)
def validate_attributes(attributes, category):
category_attributes = models.Attribute.objects.filter(section__category=category)
required_attributes = category_attributes.filter(required=True).values_list(
'key', flat=True
)
missing_attributes = set(required_attributes) - set(attributes.keys())
if missing_attributes:
raise rf_exceptions.ValidationError(
{
'attributes': _(
'These attributes are required: %s'
% ', '.join(sorted(missing_attributes))
)
}
)
for attribute in category_attributes:
value = attributes.get(attribute.key)
if value is None:
# Use default attribute value if it is defined
if attribute.default is not None:
attributes[attribute.key] = attribute.default
continue
validator = attribute_types.get_attribute_type(attribute.type)
if not validator:
continue
try:
validator.validate(
value, list(attribute.options.values_list('key', flat=True))
)
except ValidationError as e:
raise rf_exceptions.ValidationError({attribute.key: e.message})
def create_offering_components(offering, custom_components=None):
fixed_components = plugins.manager.get_components(offering.type)
category_components = {
component.type: component
for component in models.CategoryComponent.objects.filter(
category=offering.category
)
}
for component_data in fixed_components:
models.OfferingComponent.objects.create(
offering=offering,
parent=category_components.get(component_data.type, None),
**component_data._asdict(),
)
if custom_components:
for component_data in custom_components:
models.OfferingComponent.objects.create(offering=offering, **component_data)
def get_resource_state(state):
SrcStates = core_models.StateMixin.States
DstStates = models.Resource.States
mapping = {
SrcStates.CREATION_SCHEDULED: DstStates.CREATING,
SrcStates.CREATING: DstStates.CREATING,
SrcStates.UPDATE_SCHEDULED: DstStates.UPDATING,
SrcStates.UPDATING: DstStates.UPDATING,
SrcStates.DELETION_SCHEDULED: DstStates.TERMINATING,
SrcStates.DELETING: DstStates.TERMINATING,
SrcStates.OK: DstStates.OK,
SrcStates.ERRED: DstStates.ERRED,
}
return mapping.get(state, DstStates.ERRED)
def get_marketplace_offering_uuid(serializer, scope):
try:
return models.Resource.objects.get(scope=scope).offering.uuid
except ObjectDoesNotExist:
return
def get_marketplace_offering_name(serializer, scope):
try:
return models.Resource.objects.get(scope=scope).offering.name
except ObjectDoesNotExist:
return
def get_marketplace_category_uuid(serializer, scope):
try:
return models.Resource.objects.get(scope=scope).offering.category.uuid
except ObjectDoesNotExist:
return
def get_marketplace_category_name(serializer, scope):
try:
return models.Resource.objects.get(scope=scope).offering.category.title
except ObjectDoesNotExist:
return
def get_marketplace_resource_uuid(serializer, scope):
try:
return models.Resource.objects.get(scope=scope).uuid
except ObjectDoesNotExist:
return
def get_marketplace_plan_uuid(serializer, scope):
try:
resource = models.Resource.objects.get(scope=scope)
if resource.plan:
return resource.plan.uuid
except ObjectDoesNotExist:
return
def get_marketplace_resource_state(serializer, scope):
try:
return models.Resource.objects.get(scope=scope).get_state_display()
except ObjectDoesNotExist:
return
def get_is_usage_based(serializer, scope):
try:
return models.Resource.objects.get(scope=scope).offering.is_usage_based
except ObjectDoesNotExist:
return
def get_is_limit_based(serializer, scope):
try:
return models.Resource.objects.get(scope=scope).offering.is_limit_based
except ObjectDoesNotExist:
return
def add_marketplace_offering(sender, fields, **kwargs):
fields['marketplace_offering_uuid'] = serializers.SerializerMethodField()
setattr(sender, 'get_marketplace_offering_uuid', get_marketplace_offering_uuid)
fields['marketplace_offering_name'] = serializers.SerializerMethodField()
setattr(sender, 'get_marketplace_offering_name', get_marketplace_offering_name)
fields['marketplace_category_uuid'] = serializers.SerializerMethodField()
setattr(sender, 'get_marketplace_category_uuid', get_marketplace_category_uuid)
fields['marketplace_category_name'] = serializers.SerializerMethodField()
setattr(sender, 'get_marketplace_category_name', get_marketplace_category_name)
fields['marketplace_resource_uuid'] = serializers.SerializerMethodField()
setattr(sender, 'get_marketplace_resource_uuid', get_marketplace_resource_uuid)
fields['marketplace_plan_uuid'] = serializers.SerializerMethodField()
setattr(sender, 'get_marketplace_plan_uuid', get_marketplace_plan_uuid)
fields['marketplace_resource_state'] = serializers.SerializerMethodField()
setattr(sender, 'get_marketplace_resource_state', get_marketplace_resource_state)
fields['is_usage_based'] = serializers.SerializerMethodField()
setattr(sender, 'get_is_usage_based', get_is_usage_based)
fields['is_limit_based'] = serializers.SerializerMethodField()
setattr(sender, 'get_is_limit_based', get_is_limit_based)
def get_offering_costs(offering, active_customers, start, end):
costs = []
date = start
while date <= end:
year = date.year
month = date.month
invoice_items = invoice_models.InvoiceItem.objects.filter(
details__offering_uuid=offering.uuid.hex,
project__customer__in=active_customers,
invoice__year=year,
invoice__month=month,
)
stats = {
'tax': 0,
'total': 0,
'price': 0,
'price_current': 0,
'period': '%s-%02d' % (year, month),
}
for item in invoice_items:
stats['tax'] += item.tax
stats['total'] += item.total
stats['price'] += item.price
stats['price_current'] += item.price_current
costs.append(stats)
date += relativedelta(months=1)
return costs
def get_offering_customers(offering, active_customers):
resources = models.Resource.objects.filter(
offering=offering, project__customer__in=active_customers,
)
customers_ids = resources.values_list('project__customer_id', flat=True)
return structure_models.Customer.objects.filter(id__in=customers_ids)
def get_start_and_end_dates_from_request(request):
serializer = core_serializers.DateRangeFilterSerializer(data=request.query_params)
serializer.is_valid(raise_exception=True)
today = datetime.date.today()
default_start = datetime.date(year=today.year - 1, month=today.month, day=1)
start_year, start_month = serializer.validated_data.get(
'start', (default_start.year, default_start.month)
)
end_year, end_month = serializer.validated_data.get(
'end', (today.year, today.month)
)
end = datetime.date(year=end_year, month=end_month, day=1)
start = datetime.date(year=start_year, month=start_month, day=1)
return start, end
def get_active_customers(request, view):
customers = structure_models.Customer.objects.all()
return structure_filters.AccountingStartDateFilter().filter_queryset(
request, customers, view
)
def get_offering_component_stats(offering, active_customers, start, end):
component_stats = []
resources = models.Resource.objects.filter(
offering=offering, project__customer__in=active_customers,
)
resources_ids = resources.values_list('id', flat=True)
date = start
while date <= end:
year = date.year
month = date.month
period = '%s-%02d' % (year, month)
# for consistency with usage resource usage reporting, assume values at the beginning of the last day
period_visible = (
core_utils.month_end(date)
.replace(hour=0, minute=0, second=0, microsecond=0)
.isoformat()
)
invoice_items = invoice_models.InvoiceItem.objects.filter(
resource_id__in=resources_ids, invoice__year=year, invoice__month=month,
)
for item in invoice_items:
# Case when invoice item details includes plan component data.
plan_component_id = item.details.get('plan_component_id')
if not plan_component_id:
continue
try:
plan_component = models.PlanComponent.objects.get(pk=plan_component_id)
offering_component = plan_component.component
if (
offering_component.billing_type
== models.OfferingComponent.BillingTypes.LIMIT
):
component_stats.append(
{
'usage': item.quantity,
'description': offering_component.description,
'measured_unit': offering_component.measured_unit,
'type': offering_component.type,
'name': offering_component.name,
'period': period,
'date': period_visible,
'offering_component_id': offering_component.id,
}
)
if (
offering_component.billing_type
== models.OfferingComponent.BillingTypes.USAGE
):
if [
*filter(
lambda x: x['period'] == period
and x['offering_component_id'] == offering_component.id,
component_stats,
)
]:
continue
usages = models.ComponentUsage.objects.filter(
component=offering_component, billing_period=date
).aggregate(usage=Sum('usage'))['usage']
component_stats.append(
{
'usage': usages,
'description': offering_component.description,
'measured_unit': offering_component.measured_unit,
'type': offering_component.type,
'name': offering_component.name,
'period': period,
'date': period_visible,
'offering_component_id': offering_component.id,
}
)
if (
offering_component.billing_type
== models.OfferingComponent.BillingTypes.FIXED
):
other = [
*filter(
lambda x: x['period'] == period
and x['offering_component_id'] == offering_component.id,
component_stats,
)
]
if other:
other[0]['usage'] += item.get_factor()
continue
component_stats.append(
{
'usage': item.get_factor(),
'description': offering_component.description,
'measured_unit': offering_component.measured_unit,
'type': offering_component.type,
'name': offering_component.name,
'period': period,
'date': period_visible,
'offering_component_id': offering_component.id,
}
)
except models.PlanComponent.DoesNotExist:
logger.error(
'PlanComponent with id %s is not found.' % plan_component_id
)
date += relativedelta(months=1)
# delete internal data
[s.pop('offering_component_id', None) for s in component_stats]
return component_stats
class MoveResourceException(Exception):
pass
@transaction.atomic
def move_resource(resource: models.Resource, project):
if project.customer.blocked:
raise rf_exceptions.ValidationError('New customer must be not blocked')
old_project = resource.project
resource.project = project
resource.save(update_fields=['project'])
if resource.scope:
resource.scope.project = project
resource.scope.save(update_fields=['project'])
for service_settings in structure_models.ServiceSettings.objects.filter(
scope=resource.scope
):
models.Offering.objects.filter(scope=service_settings).update(
project=project
)
order_ids = resource.orderitem_set.values_list('order_id', flat=True)
for order in models.Order.objects.filter(pk__in=order_ids):
if order.items.exclude(resource=resource).exists():
raise MoveResourceException(
'Resource moving is not possible, '
'because related orders are related to other resources.'
)
order.project = project
order.save(update_fields=['project'])
for invoice_item in invoice_models.InvoiceItem.objects.filter(
resource=resource,
invoice__state=invoice_models.Invoice.States.PENDING,
project=old_project,
):
start_invoice = invoice_item.invoice
target_invoice, _ = registrators.RegistrationManager.get_or_create_invoice(
project.customer,
date=datetime.date(
year=start_invoice.year, month=start_invoice.month, day=1
),
)
if target_invoice.state != invoice_models.Invoice.States.PENDING:
raise MoveResourceException(
'Resource moving is not possible, '
'because invoice items moving is not possible.'
)
invoice_item.project = project
invoice_item.project_uuid = project.uuid.hex
invoice_item.project_name = project.name
invoice_item.invoice = target_invoice
invoice_item.save(
update_fields=['project', 'project_uuid', 'project_name', 'invoice']
)
start_invoice.update_current_cost()
target_invoice.update_current_cost()
def get_invoice_item_for_component_usage(component_usage):
if not component_usage.plan_period:
# Field plan_period is optional if component_usage is not connected with billing
return
else:
if component_usage.plan_period.end:
plan_period_end = component_usage.plan_period.end
else:
plan_period_end = core_utils.month_end(component_usage.billing_period)
if component_usage.plan_period.start:
plan_period_start = component_usage.plan_period.start
else:
plan_period_start = component_usage.billing_period
try:
item = invoice_models.InvoiceItem.objects.get(
invoice__year=component_usage.billing_period.year,
invoice__month=component_usage.billing_period.month,
resource=component_usage.resource,
start__gte=plan_period_start,
end__lte=plan_period_end,
details__offering_component_type=component_usage.component.type,
)
return item
except invoice_models.InvoiceItem.DoesNotExist:
pass
def serialize_resource_limit_period(period):
billing_periods = get_full_days(period['start'], period['end'])
return {
'start': period['start'].isoformat(),
'end': period['end'].isoformat(),
'quantity': period['quantity'],
'billing_periods': billing_periods,
'total': str(period['quantity'] * billing_periods),
}
def check_customer_blocked_for_terminating(resource):
try:
project = resource.project
except structure_models.Project.DoesNotExist:
project = structure_models.Project.all_objects.get(pk=resource.project_id)
if project.customer.blocked:
raise rf_exceptions.ValidationError(_('Blocked organization is not available.'))
def schedule_resources_termination(resources):
from waldur_mastermind.marketplace import views
if not resources:
return
view = views.ResourceViewSet.as_view({'post': 'terminate'})
user = core_utils.get_system_robot()
if not user:
logger.error(
'Staff user with username system_robot for terminating resources '
'of project with due date does not exist.'
)
return
for resource in resources:
response = create_request(view, user, {}, uuid=resource.uuid.hex)
if response.status_code != status.HTTP_200_OK:
logger.error(
'Terminating resource %s has failed. %s'
% (resource.uuid.hex, response.content)
)
def create_local_resource(order_item, scope):
resource = models.Resource(
project=order_item.order.project,
offering=order_item.offering,
plan=order_item.plan,
limits=order_item.limits,
attributes=order_item.attributes,
name=order_item.attributes.get('name') or '',
scope=scope if scope and type(scope) != str else None,
backend_id=scope if scope and type(scope) == str else '',
)
resource.init_cost()
resource.save()
resource.init_quotas()
order_item.resource = resource
order_item.save(update_fields=['resource'])
return resource
|
import anndata
import dask.array
import h5py
import numpy as np
import os
import pytest
import scipy.sparse
from sfaira.data import load_store
from sfaira.unit_tests.data_for_tests.loaders import PrepareData
@pytest.mark.parametrize("store_format", ["h5ad", "dao", "anndata"])
def test_fatal(store_format: str):
"""
Test if basic methods of stores abort.
"""
if store_format == "anndata":
stores = PrepareData().prepare_store_anndata()
else:
store_path = PrepareData().prepare_store(store_format=store_format)
stores = load_store(cache_path=store_path, store_format=store_format)
stores.subset(attr_key="organism", values=["Mus musculus"])
store = stores.stores["Mus musculus"]
# Test both single and multi-store:
for x in [store, stores]:
_ = x.n_obs
_ = x.n_vars
_ = x.var_names
_ = x.shape
_ = x.indices
_ = x.genome_container
@pytest.mark.parametrize("store_format", ["h5ad", "dao"])
def test_config(store_format: str):
"""
Test that data set config files can be set, written and recovered.
"""
store_path = PrepareData().prepare_store(store_format=store_format)
config_path = os.path.join(store_path, "config_lung")
store = load_store(cache_path=store_path, store_format=store_format)
store.subset(attr_key="organism", values=["Mus musculus"])
store.subset(attr_key="assay_sc", values=["10x technology"])
store.write_config(fn=config_path)
store2 = load_store(cache_path=store_path, store_format=store_format)
store2.load_config(fn=config_path + ".pickle")
assert np.all(store.indices.keys() == store2.indices.keys())
assert np.all([np.all(store.indices[k] == store2.indices[k])
for k in store.indices.keys()])
@pytest.mark.parametrize("store_format", ["h5ad", "dao"])
def test_store_data(store_format: str):
"""
Test if the data exposed by the store are the same as in the original Dataset instance after streamlining.
"""
data = PrepareData()
# Run standard streamlining workflow on dsg and compare to object relayed via store.
# Prepare dsg.
dsg = data.prepare_dsg(load=True)
# Prepare store.
# Rewriting store to avoid mismatch of randomly generated data in cache and store.
store_path = data.prepare_store(store_format=store_format, rewrite=False, rewrite_store=True)
store = load_store(cache_path=store_path, store_format=store_format)
store.subset(attr_key="doi_journal", values=["no_doi_mock1"])
dataset_id = store.adata_by_key[list(store.indices.keys())[0]].uns["id"]
adata_store = store.adata_by_key[dataset_id]
x_store = store.data_by_key[dataset_id]
adata_ds = dsg.datasets[dataset_id].adata
x_ds = adata_ds.X.todense()
if isinstance(x_store, dask.array.Array):
x_store = x_store.compute()
if isinstance(x_store, h5py.Dataset):
# Need to load sparse matrix into memory if it comes from a backed anndata object.
x_store = x_store[:, :]
if isinstance(x_store, anndata._core.sparse_dataset.SparseDataset):
# Need to load sparse matrix into memory if it comes from a backed anndata object.
x_store = x_store[:, :]
if isinstance(x_store, scipy.sparse.csr_matrix):
x_store = x_store.todense()
if isinstance(x_ds, anndata._core.sparse_dataset.SparseDataset):
# Need to load sparse matrix into memory if it comes from a backed anndata object.
x_ds = x_ds[:, :]
if isinstance(x_ds, scipy.sparse.csr_matrix):
x_ds = x_ds.todense()
# Check that non-zero elements are the same:
assert x_store.shape[0] == x_ds.shape[0]
assert x_store.shape[1] == x_ds.shape[1]
assert np.all(np.where(x_store > 0)[0] == np.where(x_ds > 0)[0]), (np.sum(x_store > 0), np.sum(x_ds > 0))
assert np.all(np.where(x_store > 0)[1] == np.where(x_ds > 0)[1]), (np.sum(x_store > 0), np.sum(x_ds > 0))
assert np.all(x_store - x_ds == 0.), (np.sum(x_store), np.sum(x_ds))
assert x_store.dtype == x_ds.dtype
# Note: Do not run test on sum across entire object if dtype is float32 as this can result in test failures because
# of float overflows.
# Check .obs
obs_store = adata_store.obs
obs_ds = adata_ds.obs
assert np.all(obs_store.columns == obs_ds.columns), (obs_store.columns, obs_ds.columns)
for k, v in obs_store.items():
assert np.all(np.asarray(v.values.tolist()) == np.asarray(obs_ds[k].values.tolist()))
# Check .var
var_store = adata_store.var
var_ds = adata_ds.var
assert np.all(var_store.columns == var_ds.columns), (var_store.columns, var_ds.columns)
for k, v in var_store.items():
assert np.all(np.asarray(v.values.tolist()) == np.asarray(var_ds[k].values.tolist()))
# Check .uns
uns_store = adata_store.uns
uns_ds = adata_ds.uns
assert np.all(uns_store.keys() == uns_ds.keys()), (uns_store.keys(), uns_ds.keys())
for k, v in uns_store.items():
assert np.all(v == uns_ds[k])
|
<filename>hardware/models.py<gh_stars>1-10
from datetime import timedelta
from app import hackathon_variables
from django.db import models
from django.utils import timezone
from user.models import User
class ItemType(models.Model):
"""Represents a kind of hardware"""
# Human readable name
name = models.CharField(max_length=50, unique=True)
# Image of the hardware
image = models.FileField(upload_to='hw_images/')
# Description of this hardware
# what is it used for? which items are contained in the package?
description = models.TextField()
def get_borrowable_items(self):
""" Get items not borrowed already """
availables = Item.objects.filter(item_type=self, available=True)
borrowings = Borrowing.objects.filter(item__item_type=self, return_time__isnull=True)
return availables.exclude(id__in=[x.item.id for x in borrowings])
def get_available_count(self):
ava_count = Item.objects.filter(item_type=self, available=True).count()
req_count = self.get_requested_count()
borrowed_count = self.get_borrowed_count()
return ava_count - req_count - borrowed_count
def get_requested_count(self):
return Request.objects.get_active_by_item_type(self).count()
def get_borrowed_count(self):
return Borrowing.objects.get_active_by_item_type(self).count()
def get_unavailable_count(self):
return Item.objects.filter(item_type=self, available=False).count()
def make_request(self, user):
req = Request(item_type=self, user=user)
req.save()
def __str__(self):
return self.name
class Item(models.Model):
"""Represents a real world object identified by label"""
# Hardware model/type
item_type = models.ForeignKey(ItemType, on_delete=models.CASCADE)
# Identifies a real world object
label = models.CharField(max_length=20, unique=True)
# Is the item available?
available = models.BooleanField(default=True)
# Any other relevant information about this item
comments = models.TextField(blank=True, null=True)
def can_be_borrowed(self):
return Borrowing.objects.filter(return_time__isnull=True, item=self).count() == 0
def __str__(self):
return '{} ({})'.format(self.label, self.item_type.name)
class BorrowingQuerySet(models.QuerySet):
def get_active(self):
return self.filter(return_time__isnull=True)
def get_returned(self):
return self.filter(return_time__isnull=False)
def get_active_by_item_type(self, item_type):
return self.filter(return_time__isnull=True, item__item_type=item_type)
def get_active_by_user(self, user):
return self.filter(return_time__isnull=True, user=user)
class Borrowing(models.Model):
"""
The 'item' has been borrowed to the 'user'
"""
objects = BorrowingQuerySet.as_manager()
user = models.ForeignKey(User, on_delete=models.DO_NOTHING)
item = models.ForeignKey(Item, on_delete=models.DO_NOTHING)
# Instant of creation
picked_up_time = models.DateTimeField(auto_now_add=True)
# If null: item has not been returned yet
return_time = models.DateTimeField(null=True, blank=True)
# Borrowing handled by
borrowing_by = models.ForeignKey(User, related_name='hardware_admin_borrowing', on_delete=models.DO_NOTHING)
# Return handled by (null until returned)
return_by = models.ForeignKey(User, related_name='hardware_admin_return', null=True, blank=True,
on_delete=models.SET_NULL)
def get_picked_up_time_ago(self):
return str(timezone.now() - self.picked_up_time)
def get_return_time_ago(self):
return str(timezone.now() - self.return_time)
def is_active(self):
return self.return_time is None
def __str__(self):
return '{} ({})'.format(self.item.item_type.name, self.user)
class RequestQuerySet(models.QuerySet):
def get_active(self):
delta = timedelta(minutes=hackathon_variables.HARDWARE_REQUEST_TIME)
threshold = timezone.now() - delta
return self.filter(borrowing__isnull=True, request_time__gte=threshold)
def get_borrowed(self):
return self.filter(borrowing__isnull=False)
def get_expired(self):
delta = timedelta(minutes=hackathon_variables.HARDWARE_REQUEST_TIME)
threshold = timezone.now() - delta
return self.filter(borrowing__isnull=True, request_time__lt=threshold)
def get_active_by_user(self, user):
delta = timedelta(minutes=hackathon_variables.HARDWARE_REQUEST_TIME)
threshold = timezone.now() - delta
return self.filter(borrowing__isnull=True, request_time__gte=threshold, user=user)
def get_active_by_item_type(self, item_type):
delta = timedelta(minutes=hackathon_variables.HARDWARE_REQUEST_TIME)
threshold = timezone.now() - delta
return self.filter(borrowing__isnull=True, request_time__gte=threshold, item_type=item_type)
class Request(models.Model):
"""
Represents reservation of an item
of type 'item_type' done by 'user'
"""
objects = RequestQuerySet.as_manager()
# Requested item type
item_type = models.ForeignKey(ItemType, on_delete=models.CASCADE)
# Hacker that made the request
user = models.ForeignKey(User, on_delete=models.CASCADE)
# Borrowing derived from this request
borrowing = models.ForeignKey(Borrowing, null=True, blank=True, on_delete=models.CASCADE)
# Instant of creation
request_time = models.DateTimeField(auto_now_add=True)
def is_active(self):
delta = timedelta(minutes=hackathon_variables.HARDWARE_REQUEST_TIME)
remaining = delta - (timezone.now() - self.request_time)
return not self.borrowing and remaining.total_seconds() > 0
def get_remaining_time(self):
delta = timedelta(minutes=hackathon_variables.HARDWARE_REQUEST_TIME)
remaining = delta - (timezone.now() - self.request_time)
if self.borrowing:
return "Borrowed"
elif remaining.total_seconds() < 0:
return "Expired"
else:
return str(remaining)
def __str__(self):
return '{} ({})'.format(self.item_type, self.user)
|
<gh_stars>100-1000
import unittest
import numpy as np
from skfem.mesh import MeshHex, MeshQuad, MeshTri
from skfem.element import ElementHex1, ElementQuad1, ElementHex2
from skfem.assembly import FacetBasis
from skfem.mapping.mapping_mortar import MappingMortar
class TestIsoparamNormals(unittest.TestCase):
"""Test that normals on x[i] == 0 are correct."""
mesh = MeshHex
elem = ElementHex1
def runTest(self):
m = self.mesh().refined()
e = self.elem()
fb = FacetBasis(m, e)
x = fb.global_coordinates().value
eps = 1e-6
for itr in range(m.p.shape[0]):
case = (x[itr] < eps) * (x[itr] > -eps)
for jtr in range(m.p.shape[0]):
normals = fb.normals.value[jtr][case]
if itr == jtr:
self.assertTrue((normals == -1).all())
else:
self.assertTrue((np.abs(normals) < 1e-14).all())
class TestIsoparamNormalsQuad(TestIsoparamNormals):
mesh = MeshQuad
elem = ElementQuad1
class TestIsoparamNormalsHex2(TestIsoparamNormals):
elem = ElementHex2
class TestInverseMapping(unittest.TestCase):
"""Test that inverse mapping works for non-rectangular elements."""
element = ElementQuad1
def initialize_meshes(self):
m0 = MeshQuad()
m = MeshQuad([[0, 1, 1, 0],
[0, .9, 1, 1]],
m0.t)
return m
def within_refelem(self, y):
return ((np.abs(y) < 1. + 1e-12).all()
and (np.abs(y) > 0. - 1e-12).all())
def runTest(self):
m = self.initialize_meshes()
e = self.element()
fb = FacetBasis(m, e)
x = fb.mapping.G(fb.X, find=fb.find)
Y0 = fb.mapping.invF(x, tind=fb.mesh.f2t[0, fb.find])
assert self.within_refelem(Y0)
class TestInverseMappingHex(TestInverseMapping):
element = ElementHex1
def initialize_meshes(self):
m0 = MeshHex()
m = MeshHex(np.array([[0., 0., 0.],
[0., 0., 1.],
[0., 1., 0.],
[1., 0.7, 0.7],
[0., 1., 1.],
[1., 0., 1.],
[1., 1., 0.],
[1., 1., 1.]]).T, m0.t)
return m
class TestInverseMappingHex2(TestInverseMappingHex):
"""This should be equivalent to TestInverseMappingHex."""
element = ElementHex2
class TestMortarPair(unittest.TestCase):
"""Check that mapped points match."""
mesh1_type = MeshTri
mesh2_type = MeshTri
nrefs1 = 2
nrefs2 = 3
translate_y = 0.0
def init_meshes(self):
m1 = self.mesh1_type().refined(self.nrefs1)
m2 = self.mesh2_type().refined(self.nrefs2).translated((1.0, self.translate_y))
return m1, m2
def runTest(self):
m1, m2 = self.init_meshes()
mp = MappingMortar.init_2D(m1, m2,
m1.facets_satisfying(lambda x: x[0] == 1.),
m2.facets_satisfying(lambda x: x[0] == 1.),
np.array([0., 1.]))
test_points = np.array([np.linspace(0., 1., 7)])
self.assertTrue((mp.G(test_points) -
mp.G(test_points) < 1e-10).all())
class TestMortarPairTriQuad(TestMortarPair):
mesh1_type = MeshTri
mesh2_type = MeshQuad
class TestMortarPairQuadQuad(TestMortarPair):
mesh1_type = MeshQuad
mesh2_type = MeshQuad
class TestMortarPairNoMatch1(TestMortarPair):
mesh1_type = MeshQuad
mesh2_type = MeshTri
translate_y = 0.1
class TestMortarPairNoMatch2(TestMortarPair):
mesh1_type = MeshQuad
mesh2_type = MeshTri
translate_y = -np.pi / 10.
if __name__ == '__main__':
unittest.main()
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmdet.models import HEADS
@HEADS.register_module()
class ImageSegHead(nn.Module):
def __init__(self, img_feat_dim, seg_pts_dim, num_classes, lidar_fc=[], concat_fc=[], class_weights=None):
super(ImageSegHead, self).__init__()
# self.in_channels = in_channels # feat_channels
self.num_classes = num_classes # include background
self.lidar_fc = [seg_pts_dim] + lidar_fc
self.concat_fc = [self.lidar_fc[-1] + img_feat_dim] + concat_fc
if class_weights:
self.class_weights = torch.tensor(class_weights).cuda()
else:
self.class_weights = None
before_fusion = []
for i, (in_dim, out_dim) in enumerate(zip(self.lidar_fc[:-1], self.lidar_fc[1:])):
before_fusion.append(nn.Linear(in_dim, out_dim))
if i == len(lidar_fc) - 1: # do not add activation in the last layer
break
before_fusion.append(nn.ReLU(inplace=True))
self.before_fusion = nn.Sequential(*before_fusion)
after_fusion = []
for i, (in_dim, out_dim) in enumerate(zip(self.concat_fc[:-1], self.concat_fc[1:])):
after_fusion.append(nn.Linear(in_dim, out_dim))
after_fusion.append(nn.ReLU(inplace=True))
self.after_fusion = nn.Sequential(*after_fusion)
self.head = nn.Linear(self.concat_fc[-1], num_classes)
def forward_fusion(self, img_feats, seg_pts, seg_pts_indices):
x = img_feats.permute(0, 2, 3, 1)
sample_feats = []
for i in range(x.shape[0]):
sample_feats.append(x[i][seg_pts_indices[i][:, 0], seg_pts_indices[i][:, 1]])
# sample_feats[i].shape=(img_indices[i].shape[0], 64)
sample_feats = torch.cat(sample_feats) # shape=(M, 64); M=total points in a batch
lidar_feat = torch.cat(seg_pts) # (M, pts_dim=4)
lidar_feat = self.before_fusion(lidar_feat)
fusion_feats = torch.cat([sample_feats, lidar_feat], 1) # (M, 64 + C)
return fusion_feats
def forward_logits(self, fusion_feats):
fusion_feats = self.after_fusion(fusion_feats)
seg_logits = self.head(fusion_feats) # (M, num_classes)
return seg_logits
def forward(self, img_feats, seg_pts, seg_pts_indices):
fusion_feats = self.forward_fusion(img_feats, seg_pts, seg_pts_indices)
seg_logits = self.forward_logits(fusion_feats)
return seg_logits
def loss(self, seg_logits, seg_label, ignore_background=False):
# seg_logits = self.forward(img_feats, img_indices, img_meta)
# seg_label[0].device: cuda:0
y = torch.cat(seg_label) # shape=(M,); dtype=torch.uint8
# y = y.type(torch.LongTensor).cuda()
if ignore_background:
seg_loss = F.cross_entropy(seg_logits, y, weight=self.class_weights, ignore_index=self.num_classes-1)
else:
seg_loss = F.cross_entropy(seg_logits, y, weight=self.class_weights)
return dict(seg_loss=seg_loss)
@HEADS.register_module()
class SepDiscHead(nn.Module):
def __init__(self, img_feat_dim, seg_pts_dim, num_classes, lidar_fc=[], concat_fc=[], class_weights=None):
super(SepDiscHead, self).__init__()
# self.in_channels = in_channels # feat_channels
self.num_classes = num_classes
self.lidar_fc = [seg_pts_dim] + lidar_fc
self.concat_fc = [self.lidar_fc[-1] + img_feat_dim] + concat_fc
if class_weights:
self.class_weights = torch.tensor(class_weights).cuda()
else:
self.class_weights = None
before_fusion = []
for i, (in_dim, out_dim) in enumerate(zip(self.lidar_fc[:-1], self.lidar_fc[1:])):
before_fusion.append(nn.Linear(in_dim, out_dim))
if i == len(lidar_fc) - 1: # do not add activation in the last layer
break
before_fusion.append(nn.ReLU(inplace=True))
self.before_fusion = nn.Sequential(*before_fusion)
after_fusion = []
for i, (in_dim, out_dim) in enumerate(zip(self.concat_fc[:-1], self.concat_fc[1:])):
after_fusion.append(nn.Linear(in_dim, out_dim))
after_fusion.append(nn.ReLU(inplace=True))
self.after_fusion = nn.Sequential(*after_fusion)
self.head = nn.Linear(self.concat_fc[-1], num_classes)
def forward_fusion(self, img_feats, seg_pts, seg_pts_indices):
x = img_feats.permute(0, 2, 3, 1)
sample_feats = []
for i in range(x.shape[0]):
sample_feats.append(x[i][seg_pts_indices[i][:, 0], seg_pts_indices[i][:, 1]])
# sample_feats[i].shape=(img_indices[i].shape[0], 64)
sample_feats = torch.cat(sample_feats) # shape=(M, 64); M=total points in a batch
lidar_feats = torch.cat(seg_pts) # (M, pts_dim=4)
lidar_feats = self.before_fusion(lidar_feats)
fusion_feats = torch.cat([sample_feats, lidar_feats], 1) # (M, 64 + C)
return lidar_feats, fusion_feats
def forward_logits(self, fusion_feats):
fusion_feats = self.after_fusion(fusion_feats)
seg_logits = self.head(fusion_feats) # (M, num_classes)
return seg_logits
def forward(self, img_feats, seg_pts, seg_pts_indices):
fusion_feats = self.forward_fusion(img_feats, seg_pts, seg_pts_indices)
seg_logits = self.forward_logits(fusion_feats)
return seg_logits
def loss(self, seg_logits, seg_label):
# seg_logits = self.forward(img_feats, img_indices, img_meta)
# seg_label[0].device: cuda:0
y = torch.cat(seg_label) # shape=(M,); dtype=torch.uint8
# y = y.type(torch.LongTensor).cuda()
seg_loss = F.cross_entropy(seg_logits, y, weight=self.class_weights)
return dict(seg_loss=seg_loss)
@HEADS.register_module()
class ImageSegHeadWoFusion(nn.Module):
def __init__(self, img_feat_dim, num_classes, class_weights=None):
super(ImageSegHeadWoFusion, self).__init__()
self.num_classes = num_classes
if class_weights:
self.class_weights = torch.tensor(class_weights).cuda()
else:
self.class_weights = None
self.head = nn.Linear(img_feat_dim, num_classes)
def forward(self, img_feats, seg_pts_indices):
x = img_feats.permute(0, 2, 3, 1)
sample_feats = []
for i in range(x.shape[0]):
sample_feats.append(x[i][seg_pts_indices[i][:, 0], seg_pts_indices[i][:, 1]])
# sample_feats[i].shape=(img_indices[i].shape[0], 64)
sample_feats = torch.cat(sample_feats) # shape=(M, 64)
seg_logits = self.head(sample_feats) # (M, num_classes)
return seg_logits
def loss(self, seg_logits, seg_label):
y = torch.cat(seg_label) # shape=(M,); dtype=torch.uint8
seg_loss = F.cross_entropy(seg_logits, y, weight=self.class_weights)
return dict(seg_loss=seg_loss)
|
#! /usr/bin/python3
"""
This file contains GUI code for Configuring of SBS Servo
Developed by - SB Components
http://sb-components.co.uk
"""
from lora_hat import LoraHat
import logging
import os
from tkinter import font
import tkinter as tk
from tkinter import messagebox
import webbrowser
if os.name == "posix":
COMPORT_BASE = "/dev/"
else:
COMPORT_BASE = ""
class MainApp(tk.Tk, LoraHat):
"""
This is a class for Creating Frames and Buttons for left and top frame
"""
port = "COM10"
current_baud = 9600
def __init__(self, *args, **kwargs):
global logo, img, xy_pos
tk.Tk.__init__(self, *args, **kwargs)
LoraHat.__init__(self)
self.screen_width = tk.Tk.winfo_screenwidth(self)
self.screen_height = tk.Tk.winfo_screenheight(self)
self.app_width = 800
self.app_height = 480
self.xpos = (self.screen_width / 2) - (self.app_width / 2)
self.ypos = (self.screen_height / 2) - (self.app_height / 2)
xy_pos = self.xpos, self.ypos
self.label_font = font.Font(family="Helvetica", size=10)
self.heading_font = font.Font(family="Helvetica", size=12)
self.geometry(
"%dx%d+%d+%d" % (self.app_width, self.app_height, self.xpos,
self.ypos))
if not self.screen_width > self.app_width:
self.attributes('-fullscreen', True)
self.title("LORA HAT")
self.config(bg="gray85")
self.label_font = font.Font(family="Helvetica", size=10)
self.heading_font = font.Font(family="Helvetica", size=12)
self.LARGE_FONT = ("Verdana", 12)
img = tk.PhotoImage(file=path + '/Images/settings.png')
logo = tk.PhotoImage(file=path + '/Images/sblogo.png')
self.top_frame_color = "dimgray"
self.left_frame_color = "gray21"
self.right_frame_color = "gray24"
self.top_frame = tk.Frame(self, height=int(self.app_height / 12), bd=2,
width=self.app_width,
bg=self.top_frame_color)
self.top_frame.pack(side="top", fill="both")
self.left_frame = tk.Frame(self, width=int(self.app_width / 4),
bg=self.left_frame_color)
self.left_frame.pack(side="left", fill="both", expand="True")
self.left_frame.pack_propagate(0)
self.right_frame = tk.Frame(self, bg=self.right_frame_color)
self.right_frame.pack(side="right", fill="both", expand=True)
self.right_frame.propagate(0)
self.rtx_frame = TransceiverFrame(parent=self.right_frame,
controller=self)
self.rtx_frame.tkraise()
# Top Bar
tk.Label(self.top_frame, bg="dimgray", fg="ghostwhite",
text="LORA HAT",
font=font.Font(family="Helvetica", size=20)).place(x=340)
url = "https://shop.sb-components.co.uk/"
LabelButton(self.top_frame, url=url, image=logo, height=30,
bg="dimgray", x_pos=580, y_pos=1)
self.left_frame_contents()
def left_frame_contents(self):
"""
This function creates the left frame widgets
"""
global logo
x_ref, y_ref = 10, 20
font_ = font.Font(family="Helvetica", size=11)
self.baud_var = tk.StringVar()
self.parity_var = tk.StringVar()
self.air_rate_var = tk.StringVar()
self.packet_var = tk.StringVar()
self.ch_rssi_var = tk.StringVar()
self.tx_power_var = tk.StringVar()
self.packet_rssi_var = tk.StringVar()
self.reply_var = tk.StringVar()
self.lbt_var = tk.StringVar()
self.wor_var = tk.StringVar()
self.wor_cycle_var = tk.StringVar()
self.address_var = tk.IntVar()
self.net_id_var = tk.IntVar()
self.channel_var = tk.IntVar()
self.encrypt_key_var = tk.IntVar()
self.transmission_mode_var = tk.StringVar()
self._com_port = tk.StringVar()
self._set_baud_rate_var = tk.IntVar()
self.baud_var.set("9600")
self.parity_var.set("8N1")
self.air_rate_var.set("2.4K")
self.packet_var.set("240 bytes")
self.ch_rssi_var.set("DISABLE")
self.tx_power_var.set("22dbm")
self.packet_rssi_var.set("DISABLE")
self.transmission_mode_var.set("Transparent")
self.reply_var.set("DISABLE")
self.lbt_var.set("DISABLE")
self.wor_var.set("WOR Receiver")
self.wor_cycle_var.set("2000 ms")
self._com_port.set(self.port)
self._set_baud_rate_var.set(self.current_baud)
self.address_var.set(0)
self.net_id_var.set(0)
self.channel_var.set(0)
self.encrypt_key_var.set(0)
self.baud_options = ["1200", "2400", "4800", "9600", "19200", "38400",
"57600", "115200"]
self._set_baud_rate_options = [1200, 2400, 4800, 9600, 19200, 38400,
57600, 115200]
self.parity_options = ["8N1", "8O1", "8E1"]
self.air_rate_options = ["0.3K", "1.2K", "2.4K", "4.8K", "9.6K",
"19.2K", "38.4K", "62.5K"]
self.packet_options = ["240 bytes", "128 bytes", "64 bytes",
"32 bytes"]
self.enable_disable_options = ["DISABLE", "ENABLE"]
self.tx_power_options = ["22dbm", "17dbm", "13dbm", "10dbm"]
self.transmission_mode_options = ["Transparent", "Fixed Point"]
self.wor_control_options = ["WOR Receiver", "WOR Transmitter"]
self.wor_cycle_options = ["500 ms", "1000 ms", "1500 ms", "2000 ms",
"2500 ms", "3000 ms", "3500 ms", "4000 ms"]
tk.Label(self.left_frame, fg="white", bg=self.left_frame_color,
font=font_,
text="Baud Rate").place(x=x_ref, y=y_ref)
tk.OptionMenu(self.left_frame, self.baud_var,
*self.baud_options).place(x=x_ref + 140, y=y_ref,
width=100,
height=23)
tk.Label(self.left_frame, fg="white", bg=self.left_frame_color,
font=font_,
text="Parity").place(x=x_ref, y=y_ref + 40)
tk.OptionMenu(self.left_frame, self.parity_var,
*self.parity_options).place(x=x_ref + 140, y=y_ref + 40,
width=100,
height=23)
tk.Label(self.left_frame, fg="white", bg=self.left_frame_color,
font=font_,
text="Air Data Rate").place(x=x_ref, y=y_ref + 80)
tk.OptionMenu(self.left_frame, self.air_rate_var,
*self.air_rate_options).place(x=x_ref + 140,
y=y_ref + 80,
width=100, height=23)
tk.Label(self.left_frame, fg="white", bg=self.left_frame_color,
font=font_,
text="Packet Size").place(x=x_ref, y=y_ref + 120)
tk.OptionMenu(self.left_frame, self.packet_var,
*self.packet_options).place(x=x_ref + 140, y=y_ref + 120,
width=100, height=23)
tk.Label(self.left_frame, fg="white", bg=self.left_frame_color,
font=font_,
text="Packet RSSI").place(x=x_ref, y=y_ref + 160)
tk.OptionMenu(self.left_frame, self.packet_rssi_var,
*self.enable_disable_options).place(x=x_ref + 140,
y=y_ref + 160,
width=100,
height=23)
tk.Label(self.left_frame, fg="white", bg=self.left_frame_color,
font=font_,
text="Transmit Power").place(x=x_ref, y=y_ref + 200)
tk.OptionMenu(self.left_frame, self.tx_power_var,
*self.tx_power_options).place(x=x_ref + 140,
y=y_ref + 200,
width=100,
height=23)
tk.Label(self.left_frame, fg="white", bg=self.left_frame_color,
font=font_,
text="Channel RSSI").place(x=x_ref, y=y_ref + 240)
tk.OptionMenu(self.left_frame, self.ch_rssi_var,
*self.enable_disable_options).place(x=x_ref + 140,
y=y_ref + 240,
width=100,
height=23)
tk.Label(self.left_frame, fg="white", bg=self.left_frame_color,
font=font_,
text="Transmission Mode").place(x=x_ref, y=y_ref + 280)
tk.OptionMenu(self.left_frame, self.transmission_mode_var,
*self.transmission_mode_options).place(x=x_ref + 140,
y=y_ref + 280,
width=100,
height=23)
tk.Label(self.left_frame, fg="white", bg=self.left_frame_color,
font=font_,
text="Reply").place(x=x_ref, y=y_ref + 320)
tk.OptionMenu(self.left_frame, self.reply_var,
*self.enable_disable_options).place(x=x_ref + 140,
y=y_ref + 320,
width=100,
height=23)
tk.Label(self.left_frame, fg="white", bg=self.left_frame_color,
font=font_,
text="LBT").place(x=x_ref, y=y_ref + 360)
tk.OptionMenu(self.left_frame, self.lbt_var,
*self.enable_disable_options).place(x=x_ref + 140,
y=y_ref + 360,
width=100,
height=23)
# Right Side
tk.Label(self.left_frame, fg="white", bg=self.left_frame_color,
font=font_,
text="WOR Control").place(x=x_ref + 270, y=y_ref)
tk.OptionMenu(self.left_frame, self.wor_var,
*self.wor_control_options).place(x=x_ref + 370,
y=y_ref,
width=110,
height=23)
tk.Label(self.left_frame, fg="white", bg=self.left_frame_color,
font=font_,
text="WOR Cycle").place(x=x_ref + 270, y=y_ref + 40)
tk.OptionMenu(self.left_frame, self.wor_cycle_var,
*self.wor_cycle_options).place(x=x_ref + 370,
y=y_ref + 40,
width=110,
height=23)
# Module Address
address_vcmd = (self.register(self.address_validate), '%P')
tk.Label(self.left_frame, fg="white", bg=self.left_frame_color,
font=font_,
text="Address").place(x=x_ref + 270, y=y_ref + 80)
tk.Entry(self.left_frame, fg="black", font=font_, width=12,
validate="key", validatecommand=address_vcmd,
textvariable=self.address_var).place(x=x_ref + 370,
y=y_ref + 80)
net_id_vcmd = (self.register(self.net_id_validate), '%P')
tk.Label(self.left_frame, fg="white", bg=self.left_frame_color,
font=font_,
text="Net ID").place(x=x_ref + 270, y=y_ref + 120)
tk.Entry(self.left_frame, fg="black", font=font_, width=12,
validate="key", validatecommand=net_id_vcmd,
textvariable=self.net_id_var).place(x=x_ref + 370,
y=y_ref + 120)
# Channel
channel_vcmd = (self.register(self.channel_validate), '%P')
tk.Label(self.left_frame, fg="white", bg=self.left_frame_color,
font=font_,
text="Channel").place(x=x_ref + 270, y=y_ref + 160)
tk.Entry(self.left_frame, fg="black", font=font_, width=12,
validate="key", validatecommand=channel_vcmd,
textvariable=self.channel_var).place(x=x_ref + 370,
y=y_ref + 160)
encrypt_vcmd = (self.register(self.encrypt_validate), '%P')
tk.Label(self.left_frame, fg="white", bg=self.left_frame_color,
font=font_,
text="Encrypt Key").place(x=x_ref + 270, y=y_ref + 200)
tk.Entry(self.left_frame, fg="black", font=font_, width=12,
validate="key", validatecommand=encrypt_vcmd,
textvariable=self.encrypt_key_var).place(x=x_ref + 370,
y=y_ref + 200)
self.write_button = tk.Button(self.left_frame, text='Write',
fg="white", bg="gray30", relief="raised",
font=self.LARGE_FONT, bd=2,
highlightthickness=0, width=8,
command=self.write_to_lora)
self.write_button.place(x=x_ref + 270, y=y_ref + 240)
self.read_button = tk.Button(self.left_frame, text='Read',
fg="white", bg="gray30", relief="raised",
font=self.LARGE_FONT, bd=2,
highlightthickness=0, width=8,
command=self.read_from_lora)
self.read_button.place(x=x_ref + 370, y=y_ref + 240)
tk.Label(self.left_frame, fg="white", bg=self.left_frame_color,
font=self.LARGE_FONT, text="Port").place(x=x_ref + 270,
y=y_ref + 290)
self.com_entry = tk.Entry(self.left_frame, fg="black",
font=self.label_font, width=14,
textvariable=self._com_port)
self.com_entry.place(x=x_ref + 370, y=y_ref + 290)
tk.Label(self.left_frame, fg="white", bg=self.left_frame_color,
font=self.LARGE_FONT, text="Baudrate").place(x=x_ref + 270,
y=y_ref + 330)
tk.OptionMenu(self.left_frame, self._set_baud_rate_var,
*self._set_baud_rate_options).place(x=x_ref + 370,
y=y_ref + 330,
width=110,
height=23)
self.connect_button = tk.Button(self.left_frame, text="Connect",
fg="white", bg=self.left_frame_color,
font=self.LARGE_FONT, width=9,
command=self.connect_lora_hat)
self.connect_button.place(x=x_ref + 370, y=y_ref + 370)
self.circle = tk.Canvas(self.left_frame, height=40, width=40,
bg=self.left_frame_color, bd=0,
highlightthickness=0)
self.indication = self.circle.create_oval(10, 10, 25, 25, fill="red")
self.circle.place(x=x_ref + 290, y=y_ref + 365)
def write_to_lora(self):
self.rtx_frame.talk_var.set(0)
if self.alive:
self.write_all_configuration()
else:
messagebox.showerror("Port Error",
"Serial port not connected!")
def read_from_lora(self):
self.rtx_frame.talk_var.set(0)
if self.alive:
self.read_configuration_data()
else:
messagebox.showerror("Port Error",
"Serial port not connected!")
def set_variables(self):
"""
Set Variables to send to LORA module
"""
self._module_address = self.address_var.get()
self._net_id = self.net_id_var.get()
self._baud_rate = self.baud_options.index(self.baud_var.get())
self._parity = self.parity_options.index(self.parity_var.get())
self._air_data_rate = self.air_rate_options.index(
self.air_rate_var.get())
self._packet_size = self.packet_options.index(self.packet_var.get())
self._packet_rssi = 0 if self.packet_rssi_var.get() == "DISABLE" else 1
self._transmit_power = \
self.tx_power_options.index(self.tx_power_var.get())
self._channel = self.channel_var.get()
self._channel_rssi = 0 if self.ch_rssi_var.get() == "DISABLE" else 1
self._transmission_mode = \
self.transmission_mode_options.index(self.transmission_mode_var \
.get())
self._reply = 0 if self.reply_var.get() == "DISABLE" else 1
self._lbt = 0 if self.lbt_var.get() == "DISABLE" else 1
self._wor_tx_control = self.wor_control_options.index(
self.wor_var.get())
self._wor_cycle = self.wor_cycle_options.index(
self.wor_cycle_var.get())
self._encrypt_key = self.encrypt_key_var.get()
def get_values(self, data):
"""
Read data and set GUI variables
Parameters
----------
data: Data array from slave
Returns
-------
None
"""
self.address_var.set(data[3] << 8 | data[4])
self.net_id_var.set(data[5])
self.baud_var.set(self.baud_options[data[6] >> 5])
self.parity_var.set(self.parity_options[(data[6] & 0b11000) >> 3])
self.air_rate_var.set(self.air_rate_options[data[6] & 0b111])
self.packet_var.set(self.packet_options[data[7] >> 6])
self.packet_rssi_var.set("ENABLE" if (data[7] & 0b100000) else "DISABLE")
self.tx_power_var.set(self.tx_power_options[data[7] & 0b11])
self.channel_var.set(data[8])
self.ch_rssi_var.set("ENABLE" if (data[9] & 0x80) else "DISABLE")
self.transmission_mode_var.set(self.transmission_mode_options[(data[
9] & 0x40) >> 6])
self.reply_var.set("ENABLE" if (data[9] & 0x20) else "DISABLE")
self.lbt_var.set("ENABLE" if (data[9] & 0x10) else "DISABLE")
self.wor_var.set(self.wor_control_options[data[9] & 0x08])
self.wor_cycle_var.set(self.wor_cycle_options[data[9] & 0b111])
self.encrypt_key_var.set(data[10] << 8 | data[11])
def connect_lora_hat(self):
"""
This function connects the serial port
"""
if self.connect_button.cget(
'text') == 'Connect' and self._com_port.get():
self.connect_hat(port=COMPORT_BASE + self._com_port.get(),
baud_rate=self._set_baud_rate_var.get())
if self.alive:
self.connect_button.config(relief="sunken", text="Disconnect")
self.circle.itemconfigure(self.indication, fill="green3")
self.com_entry.config(state="readonly")
else:
messagebox.showerror("Port Error",
"Couldn't Connect with {} ".format(self._com_port.get(), self._set_baud_rate_var.get()))
elif self.connect_button.cget('text') == 'Disconnect':
self.connect_button.config(relief="raised", text="Connect")
self.circle.itemconfigure(self.indication, fill="red")
self.com_entry.config(state="normal")
self.disconnect_hat()
def update_rx_data(self, data):
try:
if self.rtx_frame.talk_var.get():
data = data.decode("utf-8")
self.rtx_frame.rx_text.set(data + "\n")
else:
self.get_values(data)
self.rxData = []
except:
pass
def address_validate(self, new_value):
try:
if not new_value or 0 <= int(new_value) <= 0xFFFF:
return True
else:
self.bell()
return False
except ValueError:
self.bell()
return False
def net_id_validate(self, new_value):
try:
if not new_value or 0 <= int(new_value) <= 0xFF:
return True
else:
self.bell()
return False
except ValueError:
self.bell()
return False
def channel_validate(self, new_value):
try:
if not new_value or 0 <= int(new_value) <= 80:
return True
else:
self.bell()
return False
except ValueError:
self.bell()
return False
def encrypt_validate(self, new_value):
try:
if not new_value or 0 <= int(new_value) <= 0xffff:
return True
else:
self.bell()
return False
except ValueError:
self.bell()
return False
class TransceiverFrame(tk.Frame):
"""
This is a class for Creating widgets for Matplotlib Graph
"""
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
self.controller = controller
self.LARGE_FONT = self.controller.LARGE_FONT
self.bg_color = self.controller.right_frame_color
self.talk_var = tk.IntVar()
self.talk_var.set(0)
self.rx_text = tk.StringVar()
tk.Label(parent, fg="white", bg=self.bg_color, font=font.Font(
family="Helvetica", size=11), text="Talk Mode").place(x=10, y=15)
tk.Radiobutton(parent, text="On", variable=self.talk_var, value=1,
command=self.set_talk_mode).place(x=150, y=15)
tk.Radiobutton(parent, text="Off", variable=self.talk_var, value=0,
command=self.set_talk_mode).place(x=230, y=15)
# Receiver Label Box
self.rx_label = tk.Label(parent, justify="left", anchor="nw",
wraplength=270,
bg="gray80", fg="black",
bd=2, height=5, width=37, padx=10, pady=10,
textvariable=self.rx_text)
self.rx_label.place(x=10, y=60)
tk.Label(parent, fg="white", bg=self.bg_color, font=font.Font(
family="Helvetica", size=11), text="Rx Message").place(x=10, y=160)
# Transmitter Text Box
self.tx_text = tk.Text(parent, padx=10, pady=10, bg="gray80",
fg="black", height=6, width=33,
wrap="word",
relief="sunken", state="normal")
self.tx_text.place(x=10, y=200)
tk.Label(parent, fg="white", bg=self.bg_color, font=font.Font(
family="Helvetica", size=11), text="Type Tx Message").place(x=10,
y=320)
self.send_button = tk.Button(parent, text='Send',
fg="white", bg="gray30", relief="raised",
font=self.LARGE_FONT, bd=2,
highlightthickness=0, width=20,
command=self.send_msg)
self.send_button.place(x=50, y=360)
def set_talk_mode(self):
if self.talk_var.get():
self.controller.normal_mode()
else:
self.controller.deep_sleep_mode()
def send_msg(self):
if not self.talk_var.get():
self.talk_var.set(1)
self.controller.normal_mode()
if self.controller.alive:
msg = self.tx_text.get("1.0", "end")
self.controller.transmit_message(msg.encode("utf-8"))
else:
messagebox.showerror("Port Error",
"Serial port not connected!")
class LabelButton(object):
def __init__(self, master, image=None, height=40, width=250, bg="white",
url=None, x_pos=7, y_pos=700):
global logo
# if image is None:
image = logo
self.url = url
self.label = tk.Label(master, image=logo, height=height,
width=width, bg=bg)
self.label.place(x=x_pos, y=y_pos)
self.label.bind("<Button-1>", self.open_url)
def open_url(self, tmp):
webbrowser.open(self.url, new=1)
logo = None
img = None
path = os.path.abspath(os.path.dirname(__file__))
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.DEBUG)
if __name__ == "__main__":
app = MainApp()
app.tk.call('wm', 'iconphoto', app._w, img)
app.resizable(0, 0)
app.mainloop()
|
from enum import Enum
import click
import requests
from bs4 import BeautifulSoup
import agent
from constants import ANIME_STATUS_MAP, ANIME_TYPE_MAP, MANGA_STATUS_MAP, MANGA_TYPE_MAP
import network
import ui
class ListSearchStatusCode(Enum):
""""An Enum represented the type of result of list searches"""
NO_RESULTS = 0
USER_CANCELLED = 1
def update_anime_list_entry(credentials, field_type, search_string, new_value=None):
"""Update the details of a users anime list entry
:param credentials: A tuple containing valid MAL account details in the format (username, password)
:param field_type: A string, the detail to update, must be either "episode", "status" or "score"
:param search_string: A string, the anime that the user wants to update
:param new_value: An int or None, the new value to set for the field_type
"""
# the valid types of fields to update
valid_field_types = ["episode", "status", "score"]
# ensure that the field_type is valid
if field_type not in valid_field_types:
raise ValueError("Invalid argument for {}, must be one of {}.".format(field_type, valid_field_types))
# get the BeautifulSoup tag corresponding to the user's search phrase
anime_entry = search_list(credentials[0], "anime", search_string)
# check that a valid match was returned
if anime_entry == ListSearchStatusCode.USER_CANCELLED:
agent.print_msg("I have cancelled the operation. Nothing was changed.")
return
elif anime_entry == network.StatusCode.CONNECTION_ERROR or anime_entry == network.StatusCode.OTHER_ERROR \
or anime_entry == ListSearchStatusCode.NO_RESULTS:
return
else:
xml_tag_format = "<{0}>{1}</{0}>"
xml_field_tags = ""
new_status = 0
# if we are incrementing the episode count for an anime
if field_type == "episode":
# we are are incrementing the count
if new_value is None:
current_ep_count = int(anime_entry.my_watched_episodes.get_text())
new_value = current_ep_count + 1
# check if the user has reached the last episode
if new_value == int(anime_entry.series_episodes.get_text()):
agent.print_msg("Episode {} is the last in the series.".format(new_value))
if click.confirm("Sammy> Do you wish to change the status to completed?"):
xml_field_tags += xml_tag_format.format("status", "2")
new_status = 2
# check if the user has a status of not watching
elif anime_entry.my_status.get_text() != "1":
if click.confirm("Sammy> Do you wish to change the status to watching?"):
xml_field_tags += xml_tag_format.format("status", "1")
new_status = 1
# set the number of episodes to number in series if status set to completed
elif field_type == "status" and new_value == 2 and anime_entry.series_episodes.get_text() != "0":
xml_field_tags += xml_tag_format.format("episode", anime_entry.series_episodes.get_text())
xml_field_tags += xml_tag_format.format(field_type, new_value)
# form the XML string
xml = '<?xml version="1.0" encoding="UTF-8"?><entry>{}</entry>'.format(xml_field_tags)
# prepare the URL
url = "https://myanimelist.net/api/animelist/update/{}.xml".format(anime_entry.series_animedb_id.get_text())
# send the async request to the server, uses GET due to bug in API handling POST requests
r = ui.threaded_action(network.make_request, msg="Updating", request=requests.get, url=url,
params={"data": xml}, auth=credentials)
# check if there was an error with the user's internet connection
if r == network.StatusCode.CONNECTION_ERROR:
agent.print_connection_error_msg()
return
# inform the user whether the request was successful or not
if r.status_code == 200:
anime_title = anime_entry.series_title.get_text()
updated_msg_format = 'I have updated "{}" to {} "{}".'
updated_msg = updated_msg_format.format(anime_title, field_type, new_value)
if field_type == "status":
updated_msg = updated_msg_format.format(anime_title, field_type, ANIME_STATUS_MAP[str(new_value)])
# check if the status was changed
elif new_status:
updated_msg += " Status set to \"{}\"".format(ANIME_STATUS_MAP[str(new_status)])
agent.print_msg(updated_msg)
else:
agent.print_msg("There was an error updating the anime. Please try again.")
def update_manga_list_entry(credentials, field_type, search_string, new_value=None):
"""Increment the chapter or volume count of a manga on the user's list
:param credentials: A tuple containing valid MAL account details in the format (username, password)
:param field_type: A string, the detail to update, must be either "chapter", "volume", "status" or "score"
:param search_string: A string, the manga that the user wants to update
:param new_value: An int or None, the new value to set for the field_type
"""
valid_field_types = ["chapter", "volume", "status", "score"]
# ensure that the field_type is valid
if field_type not in valid_field_types:
raise ValueError("Invalid argument for {}, must be one of {}.".format(field_type, valid_field_types))
if field_type == "score" and 1 > new_value > 10:
agent.print_msg("I'm sorry, but the new score value must be between 1 and 10.")
return
if new_value is not None and new_value < 0:
agent.print_msg("The value for {} cannot be less than 0.".format(field_type))
return
# get the BeautifulSoup tag corresponding to the user's search phrase
manga_entry = search_list(credentials[0], "manga", search_string)
# check that a valid match was returned
if manga_entry == ListSearchStatusCode.USER_CANCELLED:
agent.print_msg("I have cancelled the operation. Nothing was changed.")
return
elif manga_entry == network.StatusCode.CONNECTION_ERROR or manga_entry == network.StatusCode.OTHER_ERROR \
or manga_entry == ListSearchStatusCode.NO_RESULTS:
return
else:
manga_title = manga_entry.series_title.get_text()
xml_tag_format = "<{0}>{1}</{0}>"
xml_field_tags = ""
new_status = 0
# if we are changing the chapter or volume count for a manga
if field_type in ["chapter", "volume"]:
# we are incrementing the count
if new_value is None:
if field_type == "chapter":
current_value = int(manga_entry.my_read_chapters.get_text())
else:
current_value = int(manga_entry.my_read_volumes.get_text())
new_value = current_value + 1
series_chapters = int(manga_entry.series_chapters.get_text())
series_volumes = int(manga_entry.series_volumes.get_text())
if field_type == "chapters" and series_chapters != 0 and new_value > series_chapters:
agent.print_msg("There are only {} chapters in this series.".format(series_chapters))
return
elif field_type == "volumes" and series_volumes != 0 and new_value > series_volumes:
agent.print_msg("There are only {} volumes in this series.".format(series_volumes))
return
# check if the user has reached either the last chapter or last volume
if (new_value == series_chapters and field_type == "chapter" and series_chapters != 0) or \
(new_value == series_volumes and field_type == "volume" and series_volumes != 0):
agent.print_msg("{} {} is the last in the series.".format(field_type.title(), new_value))
if click.confirm("Sammy> Do you wish to change the status to completed?"):
# set both the chapter and volume counts to the number in the series
xml_field_tags += xml_tag_format.format("status", "2")
xml_field_tags += xml_tag_format.format("chapter", series_chapters)
xml_field_tags += xml_tag_format.format("volume", series_volumes)
new_status = 2
# check if the user has a status of not reading
elif manga_entry.my_status.get_text() != "1":
if click.confirm("Sammy> Do you wish to change the status to watching?"):
xml_field_tags += xml_tag_format.format("status", "1")
new_status = 1
# set the number of chapters and volumes to number in series if status set to completed
elif field_type == "status" and new_value == 2:
if manga_entry.series_chapters.get_text() != "0":
xml_field_tags += xml_tag_format.format("chapter", manga_entry.series_chapters.get_text())
if manga_entry.series_volumes.get_text() != "0":
xml_field_tags += xml_tag_format.format("volume", manga_entry.series_volumes.get_text())
if new_status != 2:
xml_field_tags += xml_tag_format.format(field_type, new_value)
# form the XML string
xml = '<?xml version="1.0" encoding="UTF-8"?><entry>{}</entry>'.format(xml_field_tags)
# prepare the url
url = "https://myanimelist.net/api/mangalist/update/{}.xml".format(manga_entry.series_mangadb_id.get_text())
# send the async request to the server, uses GET due to bug in API handling POST requests
r = ui.threaded_action(network.make_request, msg="Updating", request=requests.get, url=url,
params={"data": xml}, auth=credentials)
if r == network.StatusCode.CONNECTION_ERROR:
agent.print_connection_error_msg()
return
# inform the user whether the request was successful or not
if r.status_code == 200:
updated_msg_format = 'Updated "{}" to {} "{}".'
updated_msg = updated_msg_format.format(manga_title, field_type, new_value)
if field_type == "status":
updated_msg = updated_msg_format.format(manga_title, field_type, MANGA_STATUS_MAP[str(new_value)])
# check if the status was changed
elif new_status:
updated_msg += ' Status set to "{}"'.format(MANGA_STATUS_MAP[str(new_status)])
agent.print_msg(updated_msg)
else:
agent.print_msg("There was an error updating the manga. Please try again.")
def search_list(username, search_type, search_string):
"""Search a user's list for a manga or anime and return the matching entry
:param username: A string, the username of a MAL user
:param search_type: A string, must be either "anime" or "manga"
:param search_string: A string, the entry the user wants to update
:return: A beautiful soup tag or None if unsuccessful
"""
if search_type not in ["anime", "manga"]:
raise ValueError("Invalid argument for {}, must be either {} or {}.".format(search_type, "anime", "manga"))
click.echo()
# the base url of the user list xml data
url = "https://myanimelist.net/malappinfo.php"
# send the async request to the server
r = ui.threaded_action(network.make_request, msg="Searching your {} list".format(search_type), request=requests.get,
url=url, params={"u": username, "type": search_type}, stream=True)
# check if there was an error with the user's internet connection
if r == network.StatusCode.CONNECTION_ERROR:
agent.print_connection_error_msg()
return r
if r.status_code == 200:
r.raw.decode_content = True
soup = BeautifulSoup(r.raw, "xml")
matches = []
# iterate over the returned entries for the search type
for entry in soup.find_all(search_type):
# normalise the title and synonyms to lowercase
series_title_lower = entry.series_title.get_text().lower()
series_synonyms_lower = entry.series_synonyms.get_text().lower()
# if the whole search string matches the entry then add it to our list of matches
if search_string in series_title_lower or search_string in series_synonyms_lower:
matches.append(entry)
continue
# check if any of our tokens matches the entry
for token in search_string.split():
if token in series_title_lower or token in series_synonyms_lower:
matches.append(entry)
break
num_results = len(matches)
if num_results == 0:
agent.print_msg('I could not find "{}" on your {} list'.format(search_string, search_type))
return ListSearchStatusCode.NO_RESULTS
elif num_results == 1:
return matches[0]
else:
agent.print_msg("I found {} results. Did you mean:".format(num_results))
# iterate over the matches and print them out
for i in range(len(matches)):
title_format = "{}> {} ({})" if matches[i].series_synonyms.get_text() != "" else "{}> {}"
click.echo(title_format.format(i + 1, matches[i].series_title.get_text(),
matches[i].series_synonyms.get_text()))
click.echo("{}> [None of these]".format(num_results + 1))
# get a valid choice from the user
while True:
option = click.prompt("Please choose an option", type=int)
if 1 <= option <= num_results + 1:
break
else:
click.echo("You must enter a value between {} and {}".format(1, num_results + 1))
# check that the user didn't choose the none of these option before returning the match
if option != num_results + 1:
return matches[option - 1]
else:
return ListSearchStatusCode.USER_CANCELLED
else:
agent.print_msg("There was an error getting the entry on your list. Please try again.")
return network.StatusCode.OTHER_ERROR
def view_list(username, search_type):
"""View the anime and manga list of a user
:param username: A valid MAL username
:param search_type: A string, must be either "anime" or "manga"
"""
if search_type not in ["anime", "manga"]:
raise ValueError("Invalid argument for {}, must be either {} or {}.".format(search_type, "anime", "manga"))
# the base url of the user list xml data
url = "https://myanimelist.net/malappinfo.php"
# make the request to the server and get the results
r = ui.threaded_action(network.make_request, "Getting {} list".format(search_type),
request=requests.get, url=url, params={"u": username, "type": search_type}, stream=True)
# check if there was an error with the user's internet connection
if r == network.StatusCode.CONNECTION_ERROR:
agent.print_connection_error_msg()
elif r.status_code == 200:
r.raw.decode_content = True
soup = BeautifulSoup(r.raw, "xml")
# use a different layout depending on whether it is anime or manga
layout_string = "{}) {}" + "\n - {}: {}" * (4 if search_type == "anime" else 5)
i = 1
for entry in soup.find_all(search_type):
if search_type == "anime":
click.echo(layout_string.format(
i, entry.series_title.get_text(),
"Status", ANIME_STATUS_MAP[entry.my_status.get_text()],
"Score", entry.my_score.get_text(),
"Type", ANIME_TYPE_MAP[entry.series_type.get_text()],
"Progress", entry.my_watched_episodes.get_text() + "/" + entry.series_episodes.get_text()))
else:
click.echo(layout_string.format(
i, entry.series_title.get_text(),
"Status", MANGA_STATUS_MAP[entry.my_status.get_text()],
"Score", entry.my_score.get_text(),
"Type", MANGA_TYPE_MAP[entry.series_type.get_text()],
"Chapters", entry.my_read_chapters.get_text() + "/" + entry.series_chapters.get_text(),
"Volumes", entry.my_read_volumes.get_text() + "/" + entry.series_volumes.get_text()))
i += 1
else:
agent.print_msg("There was an error getting your {} list. Please try again.".format(search_type))
|
<gh_stars>1-10
import datetime
import json
import os
import requests
import random
import threading
import logging
from flask import Flask
from flask import request
from pymongo import MongoClient
from routing import configuration
from routing import graph
from routing import osm_handler
from routing.utils import bring_closer
mongo_client = MongoClient()
db_client = mongo_client['osm']
map_graph = graph.Graph(db_client)
handler = osm_handler.OsmHandler(db_client)
config = configuration.Configuration()
logging.basicConfig(filename="server.log", level=logging.INFO)
app = Flask(__name__)
import string
import random
def id_generator(size=10, chars=string.ascii_uppercase + string.digits):
return ''.join(random.choice(chars) for _ in range(size))
def process_back_search(id):
map_graph.background_search()
info = config.get_tmp_by_key(id)
info['data'] = {'isEnd': True}
config.set_tmp_by_key(id, info)
logging.info('Server. Back_search has finished.')
def process_backup_create(id):
handler.create_backup(config.get_bounds())
config.set_backup_info({
'exist': True,
'path': '../settings/backup.json',
'date': datetime.datetime.today().strftime("%d.%m.%Y %H:%M")
})
config.save_config()
info = config.get_tmp_by_key(id)
info['data'] = config.get_backup_info()
config.set_tmp_by_key(id, info)
logging.info('Server. Backup_create has finished.')
def process_backup_load(path, id):
bounds = handler.load_backup(path)
config.set_bounds(bounds)
config.save_config()
info = config.get_tmp_by_key(id)
info['data'] = config.get_backup_info()
config.set_tmp_by_key(id, info)
logging.info('Server. Backup_load has finished.')
def process_map(str_req, id):
r = requests.get(str_req, stream=True)
if r.status_code == 200:
with open(os.path.join('..', 'settings', 'tmp.osm'), 'wb') as f:
for chunk in r.iter_content(1024):
f.write(chunk)
bounds = handler.parse(open(os.path.join('..', 'settings', 'tmp.osm'), 'r', encoding='utf8'))
if os.path.isfile(os.path.join('..', 'settings', 'tmp.osm')):
os.remove(os.path.join('..', 'settings', 'tmp.osm'))
if bounds not in config.get_bounds():
config.add_bounds(bounds)
config.save_config()
info = config.get_tmp_by_key(id)
info['data'] = {'bounds': bounds}
config.set_tmp_by_key(id, info)
logging.info('Server. Process_map has finished.')
else:
logging.error('Server.Process_map: Ошибка скачивания карты.')
@app.route("/api/0.5/fullroute")
# /api/0.5/fullroute?lat1=1.1&lon1=1.2&lat2=2.1&lon2=2.2
def route():
try:
lat1 = float(request.args.get('lat1'))
lon1 = float(request.args.get('lon1'))
lat2 = float(request.args.get('lat2'))
lon2 = float(request.args.get('lon2'))
except:
logging.error("Server.fullroute: Неверные аргументы запроса")
return json.dumps(
{
'error': True,
'data': {},
'msg': "Error in args"
})
try:
node1 = map_graph.find_nearest([lat1, lon1])
node2 = map_graph.find_nearest([lat2, lon2])
logging.info(f'Routing {node1}, {node2}')
right = map_graph.astar(node1, node2)
path_right = []
time_right = 0
length_right = 0
if right:
path_right = right['path']
time_right = right['dist']
for i, node in enumerate(path_right):
if i == len(path_right) - 1:
break
length_right = length_right + map_graph.distance_between(node, path_right[i + 1])
path_right = map_graph.clarify_path_to_loc(path_right) if path_right else []
if path_right:
if len(path_right) > 1:
start = bring_closer({'loc': [lat1, lon1], 'nodes': [[a['lat'], a['lon']] for a in path_right[0:2]]})
middle = path_right[1:len(path_right) - 1]
end = bring_closer({'loc': [lat2, lon2], 'nodes': [[a['lat'], a['lon']] for a in
path_right[len(path_right) - 1:len(path_right) - 3:-1]]})
end.reverse()
else:
start = {'lat': lat1, 'lon': lon1}
middle = path_right
end = {'lat': lat2, 'lon': lon2}
path_right = start + middle + end
left = map_graph.astar(node1, node2, nodes_client_for_left=map_graph.db_client.nodes)
path_left = []
time_left = 0
length_left = 0
if left:
path_left = left['path']
time_left = left['dist']
for i, node in enumerate(path_left):
if i == len(path_left) - 1:
break
length_left = length_left + map_graph.distance_between(node, path_left[i + 1])
path_left = map_graph.clarify_path_to_loc(path_left) if path_left else []
if path_left:
if len(path_left) > 1:
start = bring_closer({'loc': [lat1, lon1], 'nodes': [[a['lat'], a['lon']] for a in path_left[0:2]]})
middle = path_left[1:len(path_left) - 1]
end = bring_closer({'loc': [lat2, lon2], 'nodes': [[a['lat'], a['lon']] for a in
path_left[len(path_left) - 1:len(path_left) - 3:-1]]})
end.reverse()
else:
start = {'lat': lat1, 'lon': lon1}
middle = path_left
end = {'lat': lat2, 'lon': lon2}
path_left = start + middle + end
except ValueError as e:
return json.dumps({'error': True, 'data': {}, 'msg': str(e)})
logging.info(f"""Send this:
{{
'error': False,
'data': {{
'from': {{'lat': {lat1}, 'lon': {lon1}}},
'to': {{'lat': {lat2}, 'lon': {lon2}}},
'path_right': {path_right},
'distance_right': {length_right},
'time_right': {time_right},
'path_left':{path_left},
'distance_left': {length_left},
'time_left': {time_left}
}},
'msg': "Full routing"
}}
""")
return json.dumps(
{
'error': False,
'data': {
'from': {'lat': lat1, 'lon': lon1},
'to': {'lat': lat2, 'lon': lon2},
'path_right': path_right,
'distance_right': length_right,
'time_right': time_right,
'path_left': path_left,
'distance_left': length_left,
'time_left': time_left
},
'msg': "Full routing"
})
@app.route("/api/0.5/route_id")
# /api/0.5/route_id?id1=11&id2=22
def route_id():
try:
id1 = int(request.args.get('id1'))
id2 = int(request.args.get('id2'))
except:
return json.dumps({'error': True, 'data': {}, 'msg': "Error in args"})
try:
path = map_graph.astar(id1, id2)
except ValueError as e:
return json.dumps({'error': True, 'data': {}, 'msg': str(e)})
path = list(path) if path else []
return json.dumps(
{
'error': False,
'data': {'path': path},
'msg': "Routing by id"
})
@app.route("/api/0.5/fullroute_id")
# /api/0.5/fullroute_id?id1=11&id2=22
def fullroute_id():
try:
id1 = int(request.args.get('id1'))
id2 = int(request.args.get('id2'))
except:
logging.error("Server.fullroute_id: Неверные аргументы запроса")
return json.dumps({'error': True, 'data': {}, 'msg': "Error in args"})
try:
path = map_graph.astar(id1, id2)
except ValueError as e:
return json.dumps({'error': True, 'data': {}, 'msg': str(e)})
path = map_graph.clarify_path_to_loc(path) if path else []
return json.dumps(
{
'error': False,
'data': {'path': path},
'msg': "Full routing by id"
})
@app.route("/api/0.5/create_backup")
def create_backup():
id = id_generator()
thread = threading.Thread(target=process_backup_create, args=(id,))
config.add_tmp(id, {'thread': thread})
thread.start()
logging.info("Server.create_backup: Создание backup'a...")
return json.dumps(
{
'error': False,
'data': {'id': id},
'msg': "Backup is starting"
})
@app.route("/api/0.5/load_backup")
def load_backup():
info = config.get_backup_info()
if info['exist']:
id = id_generator()
thread = threading.Thread(target=process_backup_load, args=(info['path'],id))
config.add_tmp(id, {'thread': thread})
thread.start()
logging.info("Server.load_backup: Загрузка backup'a...")
return json.dumps(
{
'error': False,
'data': {'id': id},
'msg': "Backup is loading"
})
logging.info('Server.load_backup: Backup отсутствует')
return json.dumps(
{
'error': True,
'data': {},
'msg': "Backup doesn't exist"
})
@app.route("/api/0.5/load_map")
# /api/0.5/load_map?min_lat=1.1&min_lon=1.2&max_lat=2.1&max_lon=2.2
def load_map():
try:
min_lat = float(request.args.get('min_lat'))
min_lon = float(request.args.get('min_lon'))
max_lat = float(request.args.get('max_lat'))
max_lon = float(request.args.get('max_lon'))
except:
logging.error("Server.load_map: Неверные аргументы запроса")
return json.dumps({'error': True, 'data': {}, 'msg': "Error in args"})
str_req = 'https://overpass-api.de/api/map?bbox=' + str(min_lon) + ',' + str(min_lat) + ',' + str(max_lon) + ',' + str(max_lat)
id = id_generator()
thread = threading.Thread(target=process_map, args=(str_req,id))
config.add_tmp(id, {'thread': thread})
thread.start()
logging.info('Server.load_map: Скачивание карты началось.')
return json.dumps(
{
'error': False,
'data': {'id': id},
'msg': "Downloading has been started"
})
@app.route("/api/0.5/bounds")
def get_bounds():
logging.info(f"""Send this:
{{
'error': False,
'data': {{'bounds': {config.get_bounds()}}},
'msg': "Map's bounds"
}}
""")
return json.dumps(
{
'error': False,
'data': {'bounds': config.get_bounds()},
'msg': "Map's bounds"
})
@app.route("/api/0.5/back_search")
def back_search():
logging.warning('Server. Фоновый поиск запущен.')
id = id_generator()
thread = threading.Thread(target=process_back_search, args=(id,))
config.add_tmp(id, {'thread': thread})
thread.start()
return json.dumps({
'error': False,
'data': {'id': id},
'msg': "Searching has been started"
})
@app.route("/api/0.5/check")
# /api/0.5/check?id=string
def check():
try:
id = request.args.get('id')
except:
logging.error("Server.check: Неверные аргументы запроса")
return json.dumps({'error': True, 'data': {}, 'msg': "Error in args" })
info = config.get_tmp_by_key(id)
if not info:
# если поток не отслеживается
return json.dumps({
'error': True,
'data': {'run': False, 'data': {}},
'msg': "Thread is not monitored."
})
if info['thread'].isAlive():
# если поток ещё запущен
return json.dumps({
'error': False,
'data': {'run': True, 'data': {}},
'msg': "Thread is still running"
})
else:
if 'data' in info:
# поток завершился, данные существуют
config.del_tmp(id)
return json.dumps({
'error': False,
'data': {'run': False, 'data': info['data']},
'msg': "Thread has finished"
})
else:
# поток завершился, данные не существуют
config.del_tmp(id)
return json.dumps({
'error': True,
'data': {'run': False, 'data': {}},
'msg': "Smth was wrong"
})
@app.route("/api/0.5/delete_graph")
def delete_graph():
logging.warning('Server. Удаление графа из БД.')
map_graph.delete_graph()
return json.dumps({
'error': False,
'data': {},
'msg': "Graph has been deleted"
})
@app.route("/api/0.5/drop")
def drop():
logging.warning('Server. Удаление БД.')
db_client.nodes.drop()
db_client.ways.drop()
handler.create_indexes()
config.set_bounds([])
return json.dumps({
'error': False,
'data': {},
'msg': "DB has been dropped"
})
app.run(host=config.get_ip(), port=config.get_port(), debug=True)
|
# MIT License
#
# Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
File: pressure_ratio_friedly_example.py
Author: <NAME>
Date: March, 2021
Description: generates Fig. 2a in Part 2 of Physics of Thermionic Orificed Hollow Cathodes.
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
### Path to HDF5 file
path_to_results = '../../results/friedly.h5'
### Generate a dataframe out of results for the following parameters:
# Discharge current = 5-60 A
# Mass flow rate = 0.37 eqA (5.16 sccm)
# Neutral gas temperature = 2000, 3000, 4000 K
# Sheath voltage = 1-10 V
key_root = 'Xe/simulations/results/'
key_end = ['r20210306023131','r20210306174156','r20210306180132']
Tgvec = [2000,3000,4000]
# Create a list for each dataframe
dlist = []
for TgK, ke in zip(Tgvec,key_end):
# Create the key
# 'Xe/simulations/results/<temperature>/insert/r<UTC time results were written>'
key = key_root + str(TgK) + '/insert/' + ke
# Read the dataframe
d = pd.read_hdf(path_to_results,key=key)
dlist.append(d)
# Append everything to the first dataframe
for d in dlist[1:]:
dlist[0] = dlist[0].append(d)
# Aggregate dataframe
dfall = dlist[0].copy()
### Find the minimum and maximum bounds for each discharge current
Idvec = np.unique(dfall['dischargeCurrent'])
md = np.unique(dfall['massFlowRate_eqA'])[0]
min_ratio = np.zeros_like(Idvec)
max_ratio = np.zeros_like(Idvec)
for kk, Id in enumerate(Idvec):
dfx = dfall[dfall['dischargeCurrent'] == Id]
min_ratio[kk] = np.min(dfx['totalPressure']/dfx['magneticPressure'])
max_ratio[kk] = np.max(dfx['totalPressure']/dfx['magneticPressure'])
# Plot results
plt.loglog(Idvec/md,min_ratio,'k-')
plt.loglog(Idvec/md,max_ratio,'k-')
plt.fill_between(Idvec/md,min_ratio,max_ratio,color=(0.5,0.5,0.5,0.5))
#
## Plot experimental data
xp_data = np.array([
[13.5060666036119,95.6270431300905],
[27.0121332072238,37.3136722293613],
[40.5181998108357,20.824230636672],
[54.0242664144477,14.3747937333768],
[67.5303330180596,10.991317693348],
[81.0363996216715,8.85658208341987],
[94.5424662252834,7.1052844617966],
[108.048532828895,5.96712749131765],
[121.554599432507,5.12223642383175],
[135.060666036119,4.4641982374605],
[148.566732639731,3.8707840003619],
[162.072799243343,3.47665017360819],
])
plt.plot(xp_data[:,0],xp_data[:,1],'ko')
# Plot labels and limits
plt.xlim([10,200])
plt.ylim([1,300])
plt.xlabel("Id / mdot")
plt.ylabel("P / Pmag")
plt.show()
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import mock
from neutron.common import exceptions
from neutron.plugins.nicira.common import exceptions as nvp_exc
from neutron.plugins.nicira.common import utils
from neutron.plugins.nicira.nsxlib import lsn as lsnlib
from neutron.plugins.nicira import NvpApiClient
from neutron.tests import base
class LSNTestCase(base.BaseTestCase):
def setUp(self):
super(LSNTestCase, self).setUp()
self.mock_request_p = mock.patch.object(lsnlib, 'do_request')
self.mock_request = self.mock_request_p.start()
self.cluster = mock.Mock()
self.cluster.default_service_cluster_uuid = 'foo'
self.addCleanup(self.mock_request_p.stop)
def test_service_cluster_None(self):
self.mock_request.return_value = None
expected = lsnlib.service_cluster_exists(None, None)
self.assertFalse(expected)
def test_service_cluster_found(self):
self.mock_request.return_value = {
"results": [
{
"_href": "/ws.v1/service-cluster/foo_uuid",
"display_name": "foo_name",
"uuid": "foo_uuid",
"tags": [],
"_schema": "/ws.v1/schema/ServiceClusterConfig",
"gateways": []
}
],
"result_count": 1
}
expected = lsnlib.service_cluster_exists(None, 'foo_uuid')
self.assertTrue(expected)
def test_service_cluster_not_found(self):
self.mock_request.side_effect = exceptions.NotFound()
expected = lsnlib.service_cluster_exists(None, 'foo_uuid')
self.assertFalse(expected)
def test_lsn_for_network_create(self):
net_id = "foo_network_id"
tags = utils.get_tags(n_network_id=net_id)
obj = {"service_cluster_uuid": "foo", "tags": tags}
lsnlib.lsn_for_network_create(self.cluster, net_id)
self.mock_request.assert_called_once_with(
"POST", "/ws.v1/lservices-node",
json.dumps(obj), cluster=self.cluster)
def test_lsn_for_network_get(self):
net_id = "foo_network_id"
lsn_id = "foo_lsn_id"
self.mock_request.return_value = {
"results": [{"uuid": "foo_lsn_id"}],
"result_count": 1
}
result = lsnlib.lsn_for_network_get(self.cluster, net_id)
self.assertEqual(lsn_id, result)
self.mock_request.assert_called_once_with(
"GET",
("/ws.v1/lservices-node?fields=uuid&tag_scope="
"n_network_id&tag=%s" % net_id),
cluster=self.cluster)
def test_lsn_for_network_get_none(self):
net_id = "foo_network_id"
self.mock_request.return_value = {
"results": [{"uuid": "foo_lsn_id1"}, {"uuid": "foo_lsn_id2"}],
"result_count": 2
}
result = lsnlib.lsn_for_network_get(self.cluster, net_id)
self.assertIsNone(result)
def test_lsn_for_network_get_raise_not_found(self):
net_id = "foo_network_id"
self.mock_request.return_value = {
"results": [], "result_count": 0
}
self.assertRaises(exceptions.NotFound,
lsnlib.lsn_for_network_get,
self.cluster, net_id)
def test_lsn_delete(self):
lsn_id = "foo_id"
lsnlib.lsn_delete(self.cluster, lsn_id)
self.mock_request.assert_called_once_with(
"DELETE",
"/ws.v1/lservices-node/%s" % lsn_id, cluster=self.cluster)
def _test_lsn_port_host_entries_update(self, lsn_type, hosts_data):
lsn_id = 'foo_lsn_id'
lsn_port_id = 'foo_lsn_port_id'
lsnlib.lsn_port_host_entries_update(
self.cluster, lsn_id, lsn_port_id, lsn_type, hosts_data)
self.mock_request.assert_called_once_with(
'PUT',
'/ws.v1/lservices-node/%s/lport/%s/%s' % (lsn_id,
lsn_port_id,
lsn_type),
json.dumps({'hosts': hosts_data}),
cluster=self.cluster)
def test_lsn_port_dhcp_entries_update(self):
hosts_data = [{"ip_address": "11.22.33.44",
"mac_address": "aa:bb:cc:dd:ee:ff"},
{"ip_address": "44.33.22.11",
"mac_address": "ff:ee:dd:cc:bb:aa"}]
self._test_lsn_port_host_entries_update("dhcp", hosts_data)
def test_lsn_port_metadata_entries_update(self):
hosts_data = [{"ip_address": "11.22.33.44",
"device_id": "foo_vm_uuid"}]
self._test_lsn_port_host_entries_update("metadata-proxy", hosts_data)
def test_lsn_port_create(self):
port_data = {
"ip_address": "1.2.3.0/24",
"mac_address": "aa:bb:cc:dd:ee:ff",
"subnet_id": "foo_subnet_id"
}
port_id = "foo_port_id"
self.mock_request.return_value = {"uuid": port_id}
lsn_id = "foo_lsn_id"
result = lsnlib.lsn_port_create(self.cluster, lsn_id, port_data)
self.assertEqual(result, port_id)
tags = utils.get_tags(n_subnet_id=port_data["subnet_id"],
n_mac_address=port_data["mac_address"])
port_obj = {
"ip_address": port_data["ip_address"],
"mac_address": port_data["mac_address"],
"type": "LogicalServicesNodePortConfig",
"tags": tags
}
self.mock_request.assert_called_once_with(
"POST", "/ws.v1/lservices-node/%s/lport" % lsn_id,
json.dumps(port_obj), cluster=self.cluster)
def test_lsn_port_delete(self):
lsn_id = "foo_lsn_id"
lsn_port_id = "foo_port_id"
lsnlib.lsn_port_delete(self.cluster, lsn_id, lsn_port_id)
self.mock_request.assert_called_once_with(
"DELETE",
"/ws.v1/lservices-node/%s/lport/%s" % (lsn_id, lsn_port_id),
cluster=self.cluster)
def test_lsn_port_get_with_filters(self):
lsn_id = "foo_lsn_id"
port_id = "foo_port_id"
filters = {"tag": "foo_tag", "tag_scope": "foo_scope"}
self.mock_request.return_value = {
"results": [{"uuid": port_id}],
"result_count": 1
}
result = lsnlib._lsn_port_get(self.cluster, lsn_id, filters)
self.assertEqual(result, port_id)
self.mock_request.assert_called_once_with(
"GET",
("/ws.v1/lservices-node/%s/lport?fields=uuid&tag_scope=%s&"
"tag=%s" % (lsn_id, filters["tag_scope"], filters["tag"])),
cluster=self.cluster)
def test_lsn_port_get_with_filters_return_none(self):
self.mock_request.return_value = {
"results": [{"uuid": "foo1"}, {"uuid": "foo2"}],
"result_count": 2
}
result = lsnlib._lsn_port_get(self.cluster, "lsn_id", None)
self.assertIsNone(result)
def test_lsn_port_get_with_filters_raises_not_found(self):
self.mock_request.return_value = {"results": [], "result_count": 0}
self.assertRaises(exceptions.NotFound,
lsnlib._lsn_port_get,
self.cluster, "lsn_id", None)
def test_lsn_port_plug_network(self):
lsn_id = "foo_lsn_id"
lsn_port_id = "foo_lsn_port_id"
lswitch_port_id = "foo_lswitch_port_id"
lsnlib.lsn_port_plug_network(
self.cluster, lsn_id, lsn_port_id, lswitch_port_id)
self.mock_request.assert_called_once_with(
"PUT",
("/ws.v1/lservices-node/%s/lport/%s/"
"attachment") % (lsn_id, lsn_port_id),
json.dumps({"peer_port_uuid": lswitch_port_id,
"type": "PatchAttachment"}),
cluster=self.cluster)
def test_lsn_port_plug_network_raise_conflict(self):
lsn_id = "foo_lsn_id"
lsn_port_id = "foo_lsn_port_id"
lswitch_port_id = "foo_lswitch_port_id"
self.mock_request.side_effect = NvpApiClient.Conflict
self.assertRaises(
nvp_exc.LsnConfigurationConflict,
lsnlib.lsn_port_plug_network,
self.cluster, lsn_id, lsn_port_id, lswitch_port_id)
def _test_lsn_port_dhcp_configure(
self, lsn_id, lsn_port_id, is_enabled, opts):
lsnlib.lsn_port_dhcp_configure(
self.cluster, lsn_id, lsn_port_id, is_enabled, opts)
opt_array = ["%s=%s" % (key, val) for key, val in opts.iteritems()]
self.mock_request.assert_has_calls([
mock.call("PUT", "/ws.v1/lservices-node/%s/dhcp" % lsn_id,
json.dumps({"enabled": is_enabled}),
cluster=self.cluster),
mock.call("PUT",
("/ws.v1/lservices-node/%s/"
"lport/%s/dhcp") % (lsn_id, lsn_port_id),
json.dumps({"options": {"options": opt_array}}),
cluster=self.cluster)
])
def test_lsn_port_dhcp_configure_empty_opts(self):
lsn_id = "foo_lsn_id"
lsn_port_id = "foo_lsn_port_id"
is_enabled = False
opts = {}
self._test_lsn_port_dhcp_configure(
lsn_id, lsn_port_id, is_enabled, opts)
def test_lsn_port_dhcp_configure_with_opts(self):
lsn_id = "foo_lsn_id"
lsn_port_id = "foo_lsn_port_id"
is_enabled = True
opts = {"opt1": "val1", "opt2": "val2"}
self._test_lsn_port_dhcp_configure(
lsn_id, lsn_port_id, is_enabled, opts)
def _test_lsn_metadata_configure(
self, lsn_id, is_enabled, opts, expected_opts):
lsnlib.lsn_metadata_configure(
self.cluster, lsn_id, is_enabled, opts)
lsn_obj = {"enabled": is_enabled}
lsn_obj.update(expected_opts)
self.mock_request.assert_has_calls([
mock.call("PUT",
"/ws.v1/lservices-node/%s/metadata-proxy" % lsn_id,
json.dumps(lsn_obj),
cluster=self.cluster),
])
def test_lsn_port_metadata_configure_empty_secret(self):
lsn_id = "foo_lsn_id"
is_enabled = True
opts = {
"metadata_server_ip": "172.16.17.32",
"metadata_server_port": "8775"
}
expected_opts = {
"metadata_server_ip": "172.16.17.32",
"metadata_server_port": "8775",
"misc_options": []
}
self._test_lsn_metadata_configure(
lsn_id, is_enabled, opts, expected_opts)
def test_lsn_metadata_configure_with_secret(self):
lsn_id = "foo_lsn_id"
is_enabled = True
opts = {
"metadata_server_ip": "172.16.17.32",
"metadata_server_port": "8775",
"metadata_proxy_shared_secret": "foo_secret"
}
expected_opts = {
"metadata_server_ip": "172.16.17.32",
"metadata_server_port": "8775",
"misc_options": ["metadata_proxy_shared_secret=foo_secret"]
}
self._test_lsn_metadata_configure(
lsn_id, is_enabled, opts, expected_opts)
def _test_lsn_port_host_action(
self, lsn_port_action_func, extra_action, action, host):
lsn_id = "foo_lsn_id"
lsn_port_id = "foo_lsn_port_id"
lsn_port_action_func(self.cluster, lsn_id, lsn_port_id, host)
self.mock_request.assert_called_once_with(
"POST",
("/ws.v1/lservices-node/%s/lport/"
"%s/%s?action=%s") % (lsn_id, lsn_port_id, extra_action, action),
json.dumps(host), cluster=self.cluster)
def test_lsn_port_dhcp_host_add(self):
host = {
"ip_address": "1.2.3.4",
"mac_address": "aa:bb:cc:dd:ee:ff"
}
self._test_lsn_port_host_action(
lsnlib.lsn_port_dhcp_host_add, "dhcp", "add_host", host)
def test_lsn_port_dhcp_host_remove(self):
host = {
"ip_address": "1.2.3.4",
"mac_address": "aa:bb:cc:dd:ee:ff"
}
self._test_lsn_port_host_action(
lsnlib.lsn_port_dhcp_host_remove, "dhcp", "remove_host", host)
def test_lsn_port_metadata_host_add(self):
host = {
"ip_address": "1.2.3.4",
"instance_id": "foo_instance_id"
}
self._test_lsn_port_host_action(lsnlib.lsn_port_metadata_host_add,
"metadata-proxy", "add_host", host)
def test_lsn_port_metadata_host_remove(self):
host = {
"ip_address": "1.2.3.4",
"instance_id": "foo_instance_id"
}
self._test_lsn_port_host_action(lsnlib.lsn_port_metadata_host_remove,
"metadata-proxy", "remove_host", host)
|
<gh_stars>0
from models.networks import IntroVAE
from IAF.IAF import IAF_flow
import torch
from models.networks_v2 import *
from IAF.layers.utils import accumulate_kl_div, reset_kl_div
class DIF_net(IntroVAE):
def __init__(self,cdim=3,
hdim=512,
channels=[64, 128, 256, 512, 512, 512],
image_size=256,
flow_depth = 3,
flow_C=100,
tanh_flag=True):
super(DIF_net, self).__init__(cdim=cdim, hdim=hdim, channels=channels, image_size=image_size)
self.tanh_flag=tanh_flag
self.C = flow_C
def reparameterize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
eps = torch.randn_like(std)
z = eps.mul(std).add_(mu)
if self.tanh_flag:
return self.C*torch.tanh(z/self.C)
else:
return z
def sample(self,z):
if self.tanh_flag:
return self.decode(self.C * torch.tanh(z / self.C))
else:
return self.decode(z)
def sample_fake_eval(self,n):
z = torch.randn(n,self.hdim).cuda()
return self.sample(z)
def get_latent(self,x):
mu, logvar = self.encode(x)
z = self.reparameterize(mu, logvar)
return z
class DIF_net_flow(IntroVAE):
def __init__(self,cdim=3,
hdim=512,
channels=[64, 128, 256, 512, 512, 512],
image_size=256,
flow_depth = 3,
flow_C=100,
tanh_flag=True):
super(DIF_net_flow, self).__init__(cdim=cdim, hdim=hdim, channels=channels, image_size=image_size)
self.tanh_flag=tanh_flag
self.C = flow_C
self.flow = IAF_flow(hdim,flow_depth,tanh_flag,flow_C)
def forward(self, x):
mu, logvar = self.encode(x)
xi,z,flow_log_det = self.reparameterize(mu, logvar)
y = self.decode(z)
return mu, logvar, z, y, flow_log_det,xi
def reparameterize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
eps = torch.randn_like(mu)
xi = eps.mul(std).add_(mu)
z,log_det = self.flow(xi,logvar)
return xi,z,log_det
def flow_forward_only(self,xi,logvar=None):
output,_ = self.flow(xi, logvar)
return output
def encode_and_flow(self,x):
mu, logvar = self.encode(x)
xi,z,flow_log_det = self.reparameterize(mu, logvar)
return mu, logvar, z, flow_log_det,xi
def get_latent(self,x):
return self.encode_and_flow(x)
def sample(self,xi,logvar):
with torch.no_grad():
z,_ = self.flow(xi,logvar)
return self.decode(z.detach())
def sample_fake_eval(self, n):
z = torch.randn(n, self.hdim).cuda()
logvar = torch.zeros_like(z)
return self.sample(z,logvar)
class DIF_netv2(IntroVAEv2):
def __init__(self,cdim=3,
hdim=512,
channels=[64, 128, 256, 512, 512, 512],
image_size=256,
flow_depth = 3,
flow_C=100,
tanh_flag=True):
super(DIF_netv2, self).__init__(cdim=cdim, hdim=hdim, channels=channels, image_size=image_size)
self.tanh_flag=tanh_flag
self.C = flow_C
def reparameterize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
eps = torch.randn_like(std)
z = eps.mul(std).add_(mu)
if self.tanh_flag:
return self.C*torch.tanh(z/self.C)
else:
return z
def sample(self,z):
if self.tanh_flag:
return self.decode(self.C * torch.tanh(z / self.C))
else:
return self.decode(z)
def sample_fake_eval(self,n):
z = torch.randn(n,self.hdim).cuda()
return self.sample(z)
def get_latent(self,x):
mu, logvar = self.encode(x)
z = self.reparameterize(mu, logvar)
return z
class DIF_net_flow_v2(IntroVAEv2):
def __init__(self,cdim=3,
hdim=512,
channels=[64, 128, 256, 512, 512, 512],
image_size=256,
flow_depth = 3,
flow_C=100,
tanh_flag=True):
super(DIF_net_flow, self).__init__(cdim=cdim, hdim=hdim, channels=channels, image_size=image_size)
self.tanh_flag=tanh_flag
self.C = flow_C
self.flow = IAF_flow(hdim,flow_depth,tanh_flag,flow_C)
def forward(self, x):
mu, logvar = self.encode(x)
xi,z,flow_log_det = self.reparameterize(mu, logvar)
y = self.decode(z)
return mu, logvar, z, y, flow_log_det,xi
def reparameterize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
eps = torch.randn_like(mu)
xi = eps.mul(std).add_(mu)
z,log_det = self.flow(xi,logvar)
return xi,z,log_det
def flow_forward_only(self,xi,logvar=None):
output,_ = self.flow(xi, logvar)
return output
def encode_and_flow(self,x):
mu, logvar = self.encode(x)
xi,z,flow_log_det = self.reparameterize(mu, logvar)
return mu, logvar, z, flow_log_det,xi
def get_latent(self,x):
return self.encode_and_flow(x)
def sample(self,xi,logvar):
with torch.no_grad():
z,_ = self.flow(xi,logvar)
return self.decode(z.detach())
def sample_fake_eval(self, n):
z = torch.randn(n, self.hdim).cuda()
logvar = torch.zeros_like(z)
return self.sample(z,logvar) |
#! /usr/bin/python3
def HE():
q = 0
decimal1 = q
m = 0
decimal2 = m
c = 0
decimal3 = c
fi = 0
decimal4 = fi
ini = 0
decimal5 = ini
dt = 0
s = input('What are we solving for? (q, m, c or ΔT): ').lower()
if s == "q":
if decimal2 is str:
m = int(input('What is the mass in grams(m): '))
else:
m = float(input('What is the mass in grams(m): '))
if decimal4 is str:
fi = int(input('What is the final temperature in °C(ΔT): '))
else:
fi = float(input('What is the final temperature in °C(ΔT): '))
if decimal5 is str:
ini = int(input('What is the initial temperature in °C(ΔT): '))
else:
ini = float(input('What is the initial temperature in °C(ΔT): '))
if decimal3 is str:
c = int(input('What is the specific heat? (c): '))
else:
c = float(input('What is the specific heat? (c): '))
q = m * c*(fi - ini)
print(f'q = {q}J')
elif s == 'm':
if decimal1 is str:
q = int(input('What is the amount of heat in joules(q): '))
else:
q = float(input('What is the amount of heat in joules(q): '))
if decimal4 is str:
fi = int(input('What is the final temperature in °C(ΔT): '))
else:
fi = float(input('What is the final temperature in °C(ΔT): '))
if decimal5 is str:
ini = int(input('What is the initial temperature in °C(ΔT): '))
else:
ini = float(input('What is the initial temperature in °C(ΔT): '))
if decimal3 is str:
c = int(input('What is the specific heat? (c): '))
else:
c = float(input('What is the specific heat? (c): '))
m = q/(c*(fi - ini))
print('m = {m}g')
elif s == 'c':
if decimal1 is str:
q = int(input('What is the amount of heat in joules(m): '))
else:
q = float(input('What is the amount of heat in joules(q): '))
if decimal4 is str:
fi = int(input('What is the final temperature? (c): '))
else:
c = float(input('What is the final temperature? (c): '))
if decimal5 is str:
ini = int(input('What is the initial temperature (c)'))
else:
ini = float(input('What is the initial temperature (c):'))
if decimal2 is str:
m = int(input('What is the mass in grams(m): '))
else:
m = float(input('What is the mass in grams(m): '))
c = q/((fi-ini)*m)
print('c = {c}J/°C')
elif s == 't':
if decimal1 is str:
q = int(input('What is the amount of heat in Joules (q): '))
else:
q = float(input('What is the amount of heat in Joules (q): '))
if decimal2 is str:
m = int(input('What is the mass in grams(m): '))
else:
m = float(input('What is the mass in grams(m): '))
if decimal3 is str:
c = int(input('What is the specific heat? (c): '))
else:
c = int(input('What is the specific heat (c): '))
dt = q/(c*m)
print('ΔT = {dt}°C')
def convert():
convert_or_no = input('Do we need to convert?: ').lower()
if convert_or_no in 'yes, Yes, yEs, yeS, yES, YES, YEs, YEs, y':
uc()
convert()
elif convert_or_no in 'no, No, nO, NO,n':
HE()
def uc():
unknown = input('What Are we converting to temperature or pressure: ')
if unknown in 'temp, temperature, temperature, t':
def TC():
c = 0
decimal = c
f = 0
decimal2 = f
k = 0
decimal3 = k
unknown1 = input(
'What is the starting temp in, C, K or F: ')
unknown2 = input(
'What are we converting to, C, K or F: ')
if unknown1 in 'C, c' and \
unknown2 in 'F, f':
if decimal2 is str:
c = int(input('What is the starting temp in c that you are converting?: '))
f = c * 1.8000 + 32
print('{} °F'.format(f))
else:
c = float(input('What is the starting temp in c that you are converting?: '))
f = c * 1.8000 + 32
print('{} °F'.format(f))
if unknown1 in 'C, c' and \
unknown2 in 'K, k':
if decimal3 is str:
c = int(input('What is the starting temp in c that you are converting?: '))
k = c + 273
print('{} °K'.format(k))
else:
c = float(input('What is the starting temp in c that you are converting?: '))
k = c + 273
print('{} °K'.format(k))
if unknown1 in 'K, k' and \
unknown2 in 'F, f':
if decimal2 is str:
k = int(input('What is the starting temp in k that you are converting?: '))
f = k * 1.8 - 459.67
print('{} °F'.format(f))
else:
k = float(input('What is the starting temp in k that you are converting?: '))
f = k * 1.8 - 459.67
print('{} °F'.format(f))
if unknown1 in 'K, k' and \
unknown2 in 'C, c':
if decimal is str:
k = int(input('What is the starting temp in k that you are converting?: '))
c = k - 273
print('{} °C'.format(c))
else:
k = float(input('What is the starting temp in k that you are converting?: '))
c = k - 273
print('{} °C'.format(c))
if unknown1 in 'F, f' and \
unknown2 in 'K, k':
if decimal3 is str:
f = int(input('What is the starting temp in k that you are converting?: '))
k = ((f - 32)/1.8)+273/15
print('{} °K'.format(k))
else:
f = float(input('What is the starting temp in k that you are converting?: '))
k = ((f - 32)/1.8)+273/15
print('{} °K'.format(k))
if unknown1 in 'F, f' and \
unknown2 in 'C, c':
if decimal is str:
f = int(input('What is the starting temp in f that you are converting?: '))
c = (f - 32)/1.8
print('{} °C'.format(c))
else:
f = float(input('What is the starting temp in f that you are converting?: '))
c = (f - 32)/1.8
print('{} °C'.format(c))
TC()
if unknown in 'pressure, p, P':
def PC():
atm = 0
decimal = atm
torr = 0
decimal2 = torr
mmhg = 0
decimal3 = mmhg
kpa2 = 0
decimal4 = kpa2
unknown1 = input('What are we starting with? (Kpa, Atm, Torr,Mmhg): ').lower()
unknown2 = input('What are we converting to? (Kpa, Atm, Torr,Mmhg): ').lower()
if unknown1 == 'kpa' and \
unknown2 == 'atm':
if decimal is str:
kpa = int(input('How much kpa are you converting?: '))
atm = kpa / 101.325
print(f'{atm} atm')
else:
kpa = float(input('How much kpa are you converting: '))
atm = kpa / 101.325
print(f'{atm} atm')
if unknown1 == 'kpa' and \
unknown2 == 'torr':
if decimal2 is str:
kpa = int(input('How much kpa are you converting?: '))
torr = kpa / 101.3
print(f'{torr} torr')
else:
kpa = float(input('How much kpa are you converting: '))
torr = kpa / 0.1333223684
print(f'{torr} torr')
if unknown1 == 'kpa ' and \
unknown2 == 'mmhg':
if decimal3 is str:
kpa = int(input('How much kpa are you converting?: '))
mmhg = kpa / 0.1333223684
print(f'{mmhg} mmhg')
else:
kpa = float(input('How much kpa are you converting: '))
mmhg = kpa / 0.1333223684
print(f'{mmhg} mmhg')
if unknown1 in 'MMHG, MMHg, MMhg, Mmhg, mMHG, mmHg, mMhg, mmhg, mmhG' and \
unknown2 in 'KPA, KPa, Kpa,kPA, kPa, kpA, kpa' :
if decimal4 is str:
mmhg = int(input('How much mmhg are you converting?: '))
kpa2 = mmhg * 0.133322387415
print('{} kpa'.format(kpa2))
else:
mmhg = float(input('How much mmhg are you converting: '))
kpa2 = mmhg * 0.133322387415
print('{} kpa'.format(kpa2))
if unknown1 in 'MMHG, MMHg, MMhg, Mmhg, mMHG, mmHg, mMhg, mmhg, mmhG' and \
unknown2 in 'TORR, TORr, TOrr, Torr, tORR, tORr, tOrr, torr, torR' :
if decimal2 is str:
mmhg = int(input('How much mmhg are you converting?: '))
torr = mmhg * 1
print('{} torr'.format(torr))
else:
mmhg = float(input('How much mmhg are you converting: '))
torr = mmhg * 1
print('{} torr'.format(torr))
if unknown1 in 'MMHG, MMHg, MMhg, Mmhg, mMHG, mmHg, mMhg, mmhg, mmhG' and \
unknown2 in 'ATM, ATm, Atm, aTM, aTm, atM, atm' :
if decimal is str:
mmhg = int(input('How much mmhg are you converting?: '))
atm = mmhg / 760
print('{} atm'.format(atm))
else:
mmhg = float(input('How much mmhg are you converting: '))
atm = mmhg / 760
print('{} atm'.format(atm))
if unknown1 in 'ATM, ATm, Atm, aTM, aTm, atM, atm' and \
unknown2 in 'MMHG, MMHg, MMhg, Mmhg, mMHG, mmHg, mMhg, mmhg, mmhG' :
if decimal3 is str:
atm = int(input('How much atm are you converting?: '))
mmhg = atm * 760
print('{} mmhg'.format(mmhg))
else:
atm = float(input('How much atm are you converting: '))
mmhg = atm * 760
print('{} mmhg'.format(mmhg))
if unknown1 in 'ATM, ATm, Atm, aTM, aTm, atM, atm' and \
unknown2 in 'TORR, TORr, TOrr, Torr, tORR, tORr, tOrr, torr, torR':
if decimal2 is str:
atm = int(input('How much atm are you converting?: '))
torr = atm * 760
print('{} torr'.format(torr))
else:
atm = float(input('How much atm are you converting: '))
torr = atm * 760
print('{} torr'.format(torr))
if unknown1 in 'ATM, ATm, Atm, aTM, aTm, atM, atm' and \
unknown2 in 'KPA, KPa, Kpa,kPA, kPa, kpA, kpa':
if decimal4 is str:
atm = int(input('How much atm are you converting?: '))
kpa2 = atm * 101.325
print('{} kpa'.format(kpa2))
else:
atm = float(input('How much atm are you converting: '))
kpa2 = atm * 101.325
print('{} kpa'.format(kpa2))
if unknown1 in 'TORR, TORr, TOrr, Torr, tORR, tORr, tOrr, torr, torR' and \
unknown2 in 'KPA, KPa, Kpa,kPA, kPa, kpA, kpa':
if decimal4 is str:
torr = int(input('How much torr are you converting?: '))
kpa2 = torr * 0.1333223684
print('{} kpa'.format(kpa2))
else:
torr = float(input('How much torr are you converting: '))
kpa2 = torr * 0.1333223684
print('{} kpa'.format(kpa2))
if unknown1 in 'TORR, TORr, TOrr, Torr, tORR, tORr, tOrr, torr, torR' and \
unknown2 in 'MMHG, MMHg, MMhg, Mmhg, mMHG, mmHg, mMhg, mmhg, mmhG':
if decimal3 is str:
torr = int(input('How much torr are you converting?: '))
mmhg = torr / 1
print('{} mmhg'.format(mmhg))
else:
torr = float(input('How much torr are you converting: '))
mmhg = torr / 1
print('{} mmhg'.format(mmhg))
if unknown1 in 'TORR, TORr, TOrr, Torr, tORR, tORr, tOrr, torr, torR' and \
unknown2 in 'ATM, ATm, Atm, aTM, aTm, atM, atm':
if decimal is str:
torr = int(input('How much torr are you converting?: '))
atm = torr / 760
print('{} atm'.format(atm))
else:
torr = float(input('How much torr are you converting: '))
atm = torr / 760
print('{} atm'.format(atm))
PC()
convert() |
import argparse
import numpy as np
import pandas as pd
import os
from tqdm import tqdm
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.distributed import DistributedSampler
import torch
import random
import pickle
from torch.cuda.amp import autocast, GradScaler
import time
from transformers import DebertaModel, DebertaPreTrainedModel, DebertaConfig, get_linear_schedule_with_warmup, DebertaTokenizer
from transformers.models.deberta.modeling_deberta import ContextPooler
class JRSDebertaDataset(Dataset):
def __init__(self, text_list, tokenizer, max_len):
self.text_list=text_list
self.tokenizer=tokenizer
self.max_len=max_len
def __len__(self):
return len(self.text_list)
def __getitem__(self, index):
tokenized = self.tokenizer(text=self.text_list[index],
padding='max_length',
truncation=True,
max_length=self.max_len,
return_tensors='pt')
return tokenized['input_ids'].squeeze(), tokenized['attention_mask'].squeeze(), tokenized['token_type_ids'].squeeze()
class JRSDebertaModel(DebertaPreTrainedModel):
def __init__(self, config):
super(JRSDebertaModel, self).__init__(config)
self.deberta = DebertaModel(config)
self.pooler = ContextPooler(config)
output_dim = self.pooler.output_dim
self.classifier = nn.Linear(output_dim, 1)
self.init_weights()
@autocast()
def forward(self, input_ids, attention_mask=None, token_type_ids=None):
outputs = self.deberta(input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids)
encoder_layer = outputs[0]
pooled_output = self.pooler(encoder_layer)
logits = self.classifier(pooled_output)
return logits
def main():
start_time = time.time()
###
df = pd.read_csv('../../../input/validation_data.csv')
more_toxic_list = df['more_toxic'].values
less_toxic_list = df['less_toxic'].values
print(len(more_toxic_list), len(less_toxic_list))
# parameters
max_len = 192
batch_size = 96
model_path = "microsoft/deberta-base"
# build model
more_toxic_pred = np.zeros((len(more_toxic_list), ), dtype=np.float32)
less_toxic_pred = np.zeros((len(less_toxic_list), ), dtype=np.float32)
config = DebertaConfig.from_pretrained(model_path)
tokenizer = DebertaTokenizer.from_pretrained(model_path)
model = JRSDebertaModel.from_pretrained('../train/weights/weights', config=config)
model = model.cuda()
if torch.cuda.device_count() > 1:
model = torch.nn.DataParallel(model)
model.eval()
# iterator for validation
dataset = JRSDebertaDataset(more_toxic_list, tokenizer, max_len)
generator = DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=False,
num_workers=8,
pin_memory=True)
for j, (batch_input_ids, batch_attention_mask, batch_token_type_ids) in enumerate(generator):
with torch.no_grad():
start = j*batch_size
end = start+batch_size
if j == len(generator)-1:
end = len(generator.dataset)
batch_input_ids = batch_input_ids.cuda()
batch_attention_mask = batch_attention_mask.cuda()
batch_token_type_ids = batch_token_type_ids.cuda()
with autocast():
logits = model(batch_input_ids, batch_attention_mask, batch_token_type_ids).view(-1)
more_toxic_pred[start:end] += logits.sigmoid().cpu().data.numpy()
# iterator for validation
dataset = JRSDebertaDataset(less_toxic_list, tokenizer, max_len)
generator = DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=False,
num_workers=8,
pin_memory=True)
for j, (batch_input_ids, batch_attention_mask, batch_token_type_ids) in enumerate(generator):
with torch.no_grad():
start = j*batch_size
end = start+batch_size
if j == len(generator)-1:
end = len(generator.dataset)
batch_input_ids = batch_input_ids.cuda()
batch_attention_mask = batch_attention_mask.cuda()
batch_token_type_ids = batch_token_type_ids.cuda()
with autocast():
logits = model(batch_input_ids, batch_attention_mask, batch_token_type_ids).view(-1)
less_toxic_pred[start:end] += logits.sigmoid().cpu().data.numpy()
###
print(less_toxic_pred.shape, more_toxic_pred.shape)
print(np.mean(less_toxic_pred<more_toxic_pred))
end_time = time.time()
print(end_time-start_time)
if __name__ == "__main__":
main()
|
<gh_stars>0
# --------------------------------------------------------
# Subcategory CNN
# Copyright (c) 2015 CVGL Stanford
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# --------------------------------------------------------
import numpy as np
import math
from fast_rcnn.config import cfg
def get_boxes_grid(image_height, image_width):
"""
Return the boxes on image grid.
calling this function when cfg.IS_MULTISCALE is True, otherwise, calling rdl_roidb.prepare_roidb(imdb) instead.
"""
# fixed a bug, change cfg.TRAIN.SCALES to cfg.TRAIN.SCALES_BASE
# coz, here needs a ratio around 1.0, not the accutual size.
# height and width of the feature map
if cfg.NET_NAME == 'CaffeNet':
height = np.floor((image_height * max(cfg.TRAIN.SCALES_BASE) - 1) / 4.0 + 1)
height = np.floor((height - 1) / 2.0 + 1 + 0.5)
height = np.floor((height - 1) / 2.0 + 1 + 0.5)
width = np.floor((image_width * max(cfg.TRAIN.SCALES_BASE) - 1) / 4.0 + 1)
width = np.floor((width - 1) / 2.0 + 1 + 0.5)
width = np.floor((width - 1) / 2.0 + 1 + 0.5)
elif cfg.NET_NAME == 'VGGnet':
height = np.floor(image_height * max(cfg.TRAIN.SCALES_BASE) / 2.0 + 0.5)
height = np.floor(height / 2.0 + 0.5)
height = np.floor(height / 2.0 + 0.5)
height = np.floor(height / 2.0 + 0.5)
width = np.floor(image_width * max(cfg.TRAIN.SCALES_BASE) / 2.0 + 0.5)
width = np.floor(width / 2.0 + 0.5)
width = np.floor(width / 2.0 + 0.5)
width = np.floor(width / 2.0 + 0.5)
else:
assert (1), 'The network architecture is not supported in utils.get_boxes_grid!'
# compute the grid box centers
h = np.arange(height)
w = np.arange(width)
y, x = np.meshgrid(h, w, indexing='ij')
centers = np.dstack((x, y))
centers = np.reshape(centers, (-1, 2))
num = centers.shape[0]
# compute width and height of grid box
area = cfg.TRAIN.KERNEL_SIZE * cfg.TRAIN.KERNEL_SIZE
aspect = cfg.TRAIN.ASPECTS # height / width
num_aspect = len(aspect)
widths = np.zeros((1, num_aspect), dtype=np.float32)
heights = np.zeros((1, num_aspect), dtype=np.float32)
for i in xrange(num_aspect):
widths[0,i] = math.sqrt(area / aspect[i])
heights[0,i] = widths[0,i] * aspect[i]
# construct grid boxes
centers = np.repeat(centers, num_aspect, axis=0)
widths = np.tile(widths, num).transpose()
heights = np.tile(heights, num).transpose()
x1 = np.reshape(centers[:,0], (-1, 1)) - widths * 0.5
x2 = np.reshape(centers[:,0], (-1, 1)) + widths * 0.5
y1 = np.reshape(centers[:,1], (-1, 1)) - heights * 0.5
y2 = np.reshape(centers[:,1], (-1, 1)) + heights * 0.5
boxes_grid = np.hstack((x1, y1, x2, y2)) / cfg.TRAIN.SPATIAL_SCALE
return boxes_grid, centers[:,0], centers[:,1]
|
#!/usr/bin/env python
import io
import sys
opcodes = {
'NOP': (0xF1, ''),
'RET': (0xF2, ''),
'IL': (0xF3, ''),
'IU': (0xF4, ''),
'INT': (0x81, 'R'),
'LPC': (0x82, 'R'),
'LSP': (0x83, 'R'),
'LIP': (0x84, 'R'),
'LCR': (0x85, 'R'),
'NOT': (0x86, 'R'),
'PUS': (0x87, 'R'),
'POP': (0x88, 'R'),
'SIP': (0x89, 'R'),
'SSP': (0x8A, 'R'),
'SCR': (0x8B, 'R'),
'L': (0x01, 'RR'),
'LS': (0x02, 'RR'),
'ST': (0x03, 'RR'),
'A': (0x04, 'RR'),
'AU': (0x05, 'RR'),
'S': (0x06, 'RR'),
'SU': (0x07, 'RR'),
'M': (0x08, 'RR'),
'MU': (0x09, 'RR'),
'AND': (0x0A, 'RR'),
'OR': (0x0B, 'RR'),
'XOR': (0x0C, 'RR'),
'B': (0x0D, 'RR'),
'BAS': (0x0E, 'RR'),
'CP': (0x0F, 'RR'),
'CPU': (0x10, 'RR'),
'SHL': (0x11, 'RR'),
'SHR': (0x12, 'RR'),
'D': (0xC1, 'RRR'),
'DU': (0xC2, 'RRR'),
'BAL': (0xC3, 'RRR'),
'LSM': (0xC4, 'RRR'),
'STM': (0xC5, 'RRR'),
'LUM': (0xC6, 'RRR'),
'SUM': (0xC7, 'RRR'),
'LI': (0xE1,'RI')
}
def parseParams(params, datas):
res = []
pdatas = [x.strip() for x in ' '.join(datas).split(',')]
for i,c in enumerate(params):
if c == 'R':
res.append(int(pdatas[i]) & 0xFF)
elif c == 'I':
mode = pdatas[i][0]
if mode != '#' and mode != '$':
raise ValueError('Expected immediate #num or $num, got: %s' % pdatas[i])
if mode == '$':
imm = int(pdatas[i][1:], 16) & 0xFFFFFFFFFFFFFFFF
else:
imm = int(pdatas[i][1:]) & 0xFFFFFFFFFFFFFFFF
tmp = []
for x in range(0, 8):
tmp.append(imm & 0xFF)
imm >>= 8
res += reversed(tmp)
return res
def parseOpcode(opcode, datas):
res = []
res.append(opcode[0])
res += parseParams(opcode[1], datas[1:])
return res
def parseLine(res, line):
if not line:
return res
if line[0] == ';':
print('Comment: %s' % line)
return res
datas = [x.strip() for x in line.split(' ')]
if not datas:
return res
if datas[0] in opcodes:
res += parseOpcode(opcodes[datas[0]], datas)
return res
def parse(data):
res = []
for l in data:
res = parseLine(res, l.strip())
return res
if __name__ == '__main__':
if len(sys.argv) <= 2:
print('Usage: %s source.asm dest.bin' % sys.argv[0])
sys.exit(1)
with open(sys.argv[1], 'r') as fd:
data = fd.readlines()
res = parse(data)
with open(sys.argv[2], 'wb+') as fd:
fd.write(bytearray(res))
|
<reponame>zhenv5/fedlearner
# Copyright 2020 The FedLearner Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding: utf-8
# pylint: disable=protected-access
import time
from tensorflow.python.client import session
from tensorflow.python.framework import meta_graph, ops
from tensorflow.python.framework.versions import VERSION
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import checkpoint_management, session_manager
from tensorflow.python.training.basic_session_run_hooks \
import CheckpointSaverHook
assert VERSION.startswith("1.15."), "Monkey patch is only valid for TF 1.15."
def new_restore_checkpoint(self,
master,
saver=None,
checkpoint_dir=None,
checkpoint_filename_with_path=None,
wait_for_checkpoint=False,
max_wait_secs=7200,
config=None):
"""Creates a `Session`, and tries to restore a checkpoint if needed.
Args:
master: `String` representation of the TensorFlow master to use.
saver: A `Saver` object used to restore a model.
checkpoint_dir: Path to the checkpoint files. The latest checkpoint
in the dir will be used to restore.
checkpoint_filename_with_path: Full file name path to the checkpoint
file.
wait_for_checkpoint: Whether to wait for checkpoint to become
available.
max_wait_secs: Maximum time to wait for checkpoints to become
available.
config: Optional `ConfigProto` proto used to configure the session.
Returns:
A pair (sess, is_restored) where 'is_restored' is `True` if the
session could be restored, `False` otherwise.
Raises:
ValueError: If both checkpoint_dir and checkpoint_filename_with_path
are set.
"""
self._target = master
sess = session.Session(self._target, graph=self._graph, config=config)
if checkpoint_dir and checkpoint_filename_with_path:
raise ValueError("Can not provide both checkpoint_dir and "
"checkpoint_filename_with_path.")
# If variables & resources in PS has beed initialized, do not recover.
is_ready_for_local_init, _ = self._model_ready_for_local_init(sess)
if is_ready_for_local_init:
return sess, True
# If either saver or checkpoint_* is not specified, cannot restore. Just
# return.
if not saver or not (checkpoint_dir or checkpoint_filename_with_path):
return sess, False
if checkpoint_filename_with_path:
saver.restore(sess, checkpoint_filename_with_path)
return sess, True
# Waits up until max_wait_secs for checkpoint to become available.
wait_time = 0
ckpt = checkpoint_management.get_checkpoint_state(checkpoint_dir)
while not ckpt or not ckpt.model_checkpoint_path:
if wait_for_checkpoint and wait_time < max_wait_secs:
logging.info("Waiting for checkpoint to be available.")
time.sleep(self._recovery_wait_secs)
wait_time += self._recovery_wait_secs
ckpt = checkpoint_management.get_checkpoint_state(checkpoint_dir)
else:
return sess, False
# Loads the checkpoint.
saver.restore(sess, ckpt.model_checkpoint_path)
saver.recover_last_checkpoints(ckpt.all_model_checkpoint_paths)
return sess, True
session_manager.SessionManager._restore_checkpoint = new_restore_checkpoint
old_CheckpointSaverHook_after_create_session = \
CheckpointSaverHook.after_create_session
def _new_CheckpointSaverHook_after_create_session(self, sess, coord):
global_step = sess.run(self._global_step_tensor)
try:
ckpt_tensor = sess.graph.get_tensor_by_name('data_checkpoint:0')
self.data_checkpoint = sess.run(ckpt_tensor)
except KeyError as e:
logging.info("tensor data_checkpoint:0 doesn't exist")
# We do write graph and saver_def at the first call of before_run.
# We cannot do this in begin, since we let other hooks to change graph and
# add variables in begin. Graph is finalized after all begin calls.
logging.info('Skip the writing of [graph.pbtxt]')
# training_util.write_graph(
# ops.get_default_graph().as_graph_def(add_shapes=True),
# self._checkpoint_dir, "graph.pbtxt")
saver_def = self._get_saver().saver_def if self._get_saver() else None
graph = ops.get_default_graph()
meta_graph_def = meta_graph.create_meta_graph_def(
graph_def=graph.as_graph_def(add_shapes=True), saver_def=saver_def)
self._summary_writer.add_graph(graph)
self._summary_writer.add_meta_graph(meta_graph_def)
# The checkpoint saved here is the state at step "global_step".
logging.info('Skip the writing of [checkpoint@%d]', global_step)
# self._save(sess, global_step)
self._timer.update_last_triggered_step(global_step)
CheckpointSaverHook.after_create_session = \
_new_CheckpointSaverHook_after_create_session
|
# coding: utf-8
from __future__ import absolute_import
import datetime
import re
import importlib
import six
from huaweicloudsdkcore.client import Client, ClientBuilder
from huaweicloudsdkcore.exceptions import exceptions
from huaweicloudsdkcore.utils import http_utils
from huaweicloudsdkcore.sdk_stream_request import SdkStreamRequest
class DnsClient(Client):
"""
:param configuration: .Configuration object for this client
:param pool_threads: The number of threads to use for async requests
to the API. More threads means more concurrent API requests.
"""
PRIMITIVE_TYPES = (float, bool, bytes, six.text_type) + six.integer_types
NATIVE_TYPES_MAPPING = {
'int': int,
'long': int if six.PY3 else long,
'float': float,
'str': str,
'bool': bool,
'date': datetime.date,
'datetime': datetime.datetime,
'object': object,
}
def __init__(self):
super(DnsClient, self).__init__()
self.model_package = importlib.import_module("huaweicloudsdkdns.v2.model")
self.preset_headers = {'User-Agent': 'HuaweiCloud-SDK-Python'}
@classmethod
def new_builder(cls, clazz=None):
if clazz is None:
return ClientBuilder(cls)
if clazz.__name__ != "DnsClient":
raise TypeError("client type error, support client type is DnsClient")
return ClientBuilder(clazz)
def create_custom_line(self, request):
"""创建单个自定义线路
创建单个自定义线路
:param CreateCustomLineRequest request
:return: CreateCustomLineResponse
"""
return self.create_custom_line_with_http_info(request)
def create_custom_line_with_http_info(self, request):
"""创建单个自定义线路
创建单个自定义线路
:param CreateCustomLineRequest request
:return: CreateCustomLineResponse
"""
all_params = ['create_custom_lines']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2.1/customlines',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateCustomLineResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_custom_line(self, request):
"""删除单个自定义线路
删除单个自定义线路
:param DeleteCustomLineRequest request
:return: DeleteCustomLineResponse
"""
return self.delete_custom_line_with_http_info(request)
def delete_custom_line_with_http_info(self, request):
"""删除单个自定义线路
删除单个自定义线路
:param DeleteCustomLineRequest request
:return: DeleteCustomLineResponse
"""
all_params = ['line_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'line_id' in local_var_params:
path_params['line_id'] = local_var_params['line_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2.1/customlines/{line_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteCustomLineResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_api_versions(self, request):
"""查询所有的云解析服务API版本号
查询所有的云解析服务API版本号列表
:param ListApiVersionsRequest request
:return: ListApiVersionsResponse
"""
return self.list_api_versions_with_http_info(request)
def list_api_versions_with_http_info(self, request):
"""查询所有的云解析服务API版本号
查询所有的云解析服务API版本号列表
:param ListApiVersionsRequest request
:return: ListApiVersionsResponse
"""
all_params = []
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListApiVersionsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_custom_line(self, request):
"""查询自定义线路
查询自定义线路
:param ListCustomLineRequest request
:return: ListCustomLineResponse
"""
return self.list_custom_line_with_http_info(request)
def list_custom_line_with_http_info(self, request):
"""查询自定义线路
查询自定义线路
:param ListCustomLineRequest request
:return: ListCustomLineResponse
"""
all_params = ['line_id', 'name', 'limit', 'offset', 'show_detail']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'line_id' in local_var_params:
query_params.append(('line_id', local_var_params['line_id']))
if 'name' in local_var_params:
query_params.append(('name', local_var_params['name']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'show_detail' in local_var_params:
query_params.append(('show_detail', local_var_params['show_detail']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2.1/customlines',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListCustomLineResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_name_servers(self, request):
"""查询名称服务器列表
查询名称服务器列表
:param ListNameServersRequest request
:return: ListNameServersResponse
"""
return self.list_name_servers_with_http_info(request)
def list_name_servers_with_http_info(self, request):
"""查询名称服务器列表
查询名称服务器列表
:param ListNameServersRequest request
:return: ListNameServersResponse
"""
all_params = ['type', 'region']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'type' in local_var_params:
query_params.append(('type', local_var_params['type']))
if 'region' in local_var_params:
query_params.append(('region', local_var_params['region']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/nameservers',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListNameServersResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_api_info(self, request):
"""查询指定的云解析服务API版本号
查询指定的云解析服务API版本号
:param ShowApiInfoRequest request
:return: ShowApiInfoResponse
"""
return self.show_api_info_with_http_info(request)
def show_api_info_with_http_info(self, request):
"""查询指定的云解析服务API版本号
查询指定的云解析服务API版本号
:param ShowApiInfoRequest request
:return: ShowApiInfoResponse
"""
all_params = ['version']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'version' in local_var_params:
path_params['version'] = local_var_params['version']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/{version}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowApiInfoResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_custom_line(self, request):
"""更新单个自定义线路
更新单个自定义线路
:param UpdateCustomLineRequest request
:return: UpdateCustomLineResponse
"""
return self.update_custom_line_with_http_info(request)
def update_custom_line_with_http_info(self, request):
"""更新单个自定义线路
更新单个自定义线路
:param UpdateCustomLineRequest request
:return: UpdateCustomLineResponse
"""
all_params = ['line_id', 'update_customs_line_req']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'line_id' in local_var_params:
path_params['line_id'] = local_var_params['line_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2.1/customlines/{line_id}',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateCustomLineResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_eip_record_set(self, request):
"""设置弹性IP的PTR记录
设置弹性IP的PTR记录
:param CreateEipRecordSetRequest request
:return: CreateEipRecordSetResponse
"""
return self.create_eip_record_set_with_http_info(request)
def create_eip_record_set_with_http_info(self, request):
"""设置弹性IP的PTR记录
设置弹性IP的PTR记录
:param CreateEipRecordSetRequest request
:return: CreateEipRecordSetResponse
"""
all_params = ['region', 'floatingip_id', 'create_ptr_req']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'region' in local_var_params:
path_params['region'] = local_var_params['region']
if 'floatingip_id' in local_var_params:
path_params['floatingip_id'] = local_var_params['floatingip_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/reverse/floatingips/{region}:{floatingip_id}',
method='PATCH',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateEipRecordSetResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_ptr_records(self, request):
"""查询租户弹性IP的PTR记录列表
查询租户弹性IP的PTR记录列表
:param ListPtrRecordsRequest request
:return: ListPtrRecordsResponse
"""
return self.list_ptr_records_with_http_info(request)
def list_ptr_records_with_http_info(self, request):
"""查询租户弹性IP的PTR记录列表
查询租户弹性IP的PTR记录列表
:param ListPtrRecordsRequest request
:return: ListPtrRecordsResponse
"""
all_params = ['marker', 'limit', 'offset', 'enterprise_project_id', 'tags', 'status']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'marker' in local_var_params:
query_params.append(('marker', local_var_params['marker']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'enterprise_project_id' in local_var_params:
query_params.append(('enterprise_project_id', local_var_params['enterprise_project_id']))
if 'tags' in local_var_params:
query_params.append(('tags', local_var_params['tags']))
if 'status' in local_var_params:
query_params.append(('status', local_var_params['status']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/reverse/floatingips',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListPtrRecordsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def restore_ptr_record(self, request):
"""将弹性IP的PTR记录恢复为默认值
将弹性IP的PTR记录恢复为默认值
:param RestorePtrRecordRequest request
:return: RestorePtrRecordResponse
"""
return self.restore_ptr_record_with_http_info(request)
def restore_ptr_record_with_http_info(self, request):
"""将弹性IP的PTR记录恢复为默认值
将弹性IP的PTR记录恢复为默认值
:param RestorePtrRecordRequest request
:return: RestorePtrRecordResponse
"""
all_params = ['region', 'floatingip_id', 'restore_ptr_req']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'region' in local_var_params:
path_params['region'] = local_var_params['region']
if 'floatingip_id' in local_var_params:
path_params['floatingip_id'] = local_var_params['floatingip_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/reverse/floatingips/{region}:{floatingip_id}',
method='PATCH',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='RestorePtrRecordResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_ptr_record_set(self, request):
"""查询单个弹性IP的PTR记录
查询单个弹性IP的PTR记录
:param ShowPtrRecordSetRequest request
:return: ShowPtrRecordSetResponse
"""
return self.show_ptr_record_set_with_http_info(request)
def show_ptr_record_set_with_http_info(self, request):
"""查询单个弹性IP的PTR记录
查询单个弹性IP的PTR记录
:param ShowPtrRecordSetRequest request
:return: ShowPtrRecordSetResponse
"""
all_params = ['region', 'floatingip_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'region' in local_var_params:
path_params['region'] = local_var_params['region']
if 'floatingip_id' in local_var_params:
path_params['floatingip_id'] = local_var_params['floatingip_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/reverse/floatingips/{region}:{floatingip_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowPtrRecordSetResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_ptr_record(self, request):
"""修改弹性IP的PTR记录
修改弹性IP的PTR记录
:param UpdatePtrRecordRequest request
:return: UpdatePtrRecordResponse
"""
return self.update_ptr_record_with_http_info(request)
def update_ptr_record_with_http_info(self, request):
"""修改弹性IP的PTR记录
修改弹性IP的PTR记录
:param UpdatePtrRecordRequest request
:return: UpdatePtrRecordResponse
"""
all_params = ['region', 'floatingip_id', 'update_ptr_req']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'region' in local_var_params:
path_params['region'] = local_var_params['region']
if 'floatingip_id' in local_var_params:
path_params['floatingip_id'] = local_var_params['floatingip_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/reverse/floatingips/{region}:{floatingip_id}',
method='PATCH',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdatePtrRecordResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_record_set(self, request):
"""创建单个Record Set
创建单个Record Set
:param CreateRecordSetRequest request
:return: CreateRecordSetResponse
"""
return self.create_record_set_with_http_info(request)
def create_record_set_with_http_info(self, request):
"""创建单个Record Set
创建单个Record Set
:param CreateRecordSetRequest request
:return: CreateRecordSetResponse
"""
all_params = ['zone_id', 'create_record_set_req']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'zone_id' in local_var_params:
path_params['zone_id'] = local_var_params['zone_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/zones/{zone_id}/recordsets',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateRecordSetResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_record_set_with_line(self, request):
"""创建单个Record Set,仅适用于公网DNS
创建单个Record Set,仅适用于公网DNS
:param CreateRecordSetWithLineRequest request
:return: CreateRecordSetWithLineResponse
"""
return self.create_record_set_with_line_with_http_info(request)
def create_record_set_with_line_with_http_info(self, request):
"""创建单个Record Set,仅适用于公网DNS
创建单个Record Set,仅适用于公网DNS
:param CreateRecordSetWithLineRequest request
:return: CreateRecordSetWithLineResponse
"""
all_params = ['zone_id', 'create_record_set_with_line_req']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'zone_id' in local_var_params:
path_params['zone_id'] = local_var_params['zone_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2.1/zones/{zone_id}/recordsets',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateRecordSetWithLineResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_record_set(self, request):
"""删除单个Record Set
删除单个Record Set
:param DeleteRecordSetRequest request
:return: DeleteRecordSetResponse
"""
return self.delete_record_set_with_http_info(request)
def delete_record_set_with_http_info(self, request):
"""删除单个Record Set
删除单个Record Set
:param DeleteRecordSetRequest request
:return: DeleteRecordSetResponse
"""
all_params = ['zone_id', 'recordset_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'zone_id' in local_var_params:
path_params['zone_id'] = local_var_params['zone_id']
if 'recordset_id' in local_var_params:
path_params['recordset_id'] = local_var_params['recordset_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/zones/{zone_id}/recordsets/{recordset_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteRecordSetResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_record_sets(self, request):
"""删除单个Record Set
删除单个Record Set
:param DeleteRecordSetsRequest request
:return: DeleteRecordSetsResponse
"""
return self.delete_record_sets_with_http_info(request)
def delete_record_sets_with_http_info(self, request):
"""删除单个Record Set
删除单个Record Set
:param DeleteRecordSetsRequest request
:return: DeleteRecordSetsResponse
"""
all_params = ['zone_id', 'recordset_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'zone_id' in local_var_params:
path_params['zone_id'] = local_var_params['zone_id']
if 'recordset_id' in local_var_params:
path_params['recordset_id'] = local_var_params['recordset_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2.1/zones/{zone_id}/recordsets/{recordset_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteRecordSetsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_record_sets(self, request):
"""查询租户Record Set资源列表
查询租户Record Set资源列表
:param ListRecordSetsRequest request
:return: ListRecordSetsResponse
"""
return self.list_record_sets_with_http_info(request)
def list_record_sets_with_http_info(self, request):
"""查询租户Record Set资源列表
查询租户Record Set资源列表
:param ListRecordSetsRequest request
:return: ListRecordSetsResponse
"""
all_params = ['zone_type', 'marker', 'limit', 'offset', 'tags', 'status', 'type', 'name', 'id', 'records', 'sort_key', 'sort_dir']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'zone_type' in local_var_params:
query_params.append(('zone_type', local_var_params['zone_type']))
if 'marker' in local_var_params:
query_params.append(('marker', local_var_params['marker']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'tags' in local_var_params:
query_params.append(('tags', local_var_params['tags']))
if 'status' in local_var_params:
query_params.append(('status', local_var_params['status']))
if 'type' in local_var_params:
query_params.append(('type', local_var_params['type']))
if 'name' in local_var_params:
query_params.append(('name', local_var_params['name']))
if 'id' in local_var_params:
query_params.append(('id', local_var_params['id']))
if 'records' in local_var_params:
query_params.append(('records', local_var_params['records']))
if 'sort_key' in local_var_params:
query_params.append(('sort_key', local_var_params['sort_key']))
if 'sort_dir' in local_var_params:
query_params.append(('sort_dir', local_var_params['sort_dir']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/recordsets',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListRecordSetsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_record_sets_by_zone(self, request):
"""查询单个Zone下Record Set列表
查询单个Zone下Record Set列表
:param ListRecordSetsByZoneRequest request
:return: ListRecordSetsByZoneResponse
"""
return self.list_record_sets_by_zone_with_http_info(request)
def list_record_sets_by_zone_with_http_info(self, request):
"""查询单个Zone下Record Set列表
查询单个Zone下Record Set列表
:param ListRecordSetsByZoneRequest request
:return: ListRecordSetsByZoneResponse
"""
all_params = ['zone_id', 'marker', 'limit', 'offset', 'tags', 'status', 'type', 'name', 'id', 'sort_key', 'sort_dir']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'zone_id' in local_var_params:
path_params['zone_id'] = local_var_params['zone_id']
query_params = []
if 'marker' in local_var_params:
query_params.append(('marker', local_var_params['marker']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'tags' in local_var_params:
query_params.append(('tags', local_var_params['tags']))
if 'status' in local_var_params:
query_params.append(('status', local_var_params['status']))
if 'type' in local_var_params:
query_params.append(('type', local_var_params['type']))
if 'name' in local_var_params:
query_params.append(('name', local_var_params['name']))
if 'id' in local_var_params:
query_params.append(('id', local_var_params['id']))
if 'sort_key' in local_var_params:
query_params.append(('sort_key', local_var_params['sort_key']))
if 'sort_dir' in local_var_params:
query_params.append(('sort_dir', local_var_params['sort_dir']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/zones/{zone_id}/recordsets',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListRecordSetsByZoneResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_record_sets_with_line(self, request):
"""查询租户Record Set资源列表
查询租户Record Set资源列表
:param ListRecordSetsWithLineRequest request
:return: ListRecordSetsWithLineResponse
"""
return self.list_record_sets_with_line_with_http_info(request)
def list_record_sets_with_line_with_http_info(self, request):
"""查询租户Record Set资源列表
查询租户Record Set资源列表
:param ListRecordSetsWithLineRequest request
:return: ListRecordSetsWithLineResponse
"""
all_params = ['zone_type', 'marker', 'limit', 'offset', 'line_id', 'tags', 'status', 'type', 'name', 'id', 'records', 'sort_key', 'sort_dir', 'health_check_id', 'search_mode']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'zone_type' in local_var_params:
query_params.append(('zone_type', local_var_params['zone_type']))
if 'marker' in local_var_params:
query_params.append(('marker', local_var_params['marker']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'line_id' in local_var_params:
query_params.append(('line_id', local_var_params['line_id']))
if 'tags' in local_var_params:
query_params.append(('tags', local_var_params['tags']))
if 'status' in local_var_params:
query_params.append(('status', local_var_params['status']))
if 'type' in local_var_params:
query_params.append(('type', local_var_params['type']))
if 'name' in local_var_params:
query_params.append(('name', local_var_params['name']))
if 'id' in local_var_params:
query_params.append(('id', local_var_params['id']))
if 'records' in local_var_params:
query_params.append(('records', local_var_params['records']))
if 'sort_key' in local_var_params:
query_params.append(('sort_key', local_var_params['sort_key']))
if 'sort_dir' in local_var_params:
query_params.append(('sort_dir', local_var_params['sort_dir']))
if 'health_check_id' in local_var_params:
query_params.append(('health_check_id', local_var_params['health_check_id']))
if 'search_mode' in local_var_params:
query_params.append(('search_mode', local_var_params['search_mode']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2.1/recordsets',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListRecordSetsWithLineResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def set_record_sets_status(self, request):
"""设置Record Set状态
设置Record Set状态
:param SetRecordSetsStatusRequest request
:return: SetRecordSetsStatusResponse
"""
return self.set_record_sets_status_with_http_info(request)
def set_record_sets_status_with_http_info(self, request):
"""设置Record Set状态
设置Record Set状态
:param SetRecordSetsStatusRequest request
:return: SetRecordSetsStatusResponse
"""
all_params = ['recordset_id', 'set_record_sets_status_req']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'recordset_id' in local_var_params:
path_params['recordset_id'] = local_var_params['recordset_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2.1/recordsets/{recordset_id}/statuses/set',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='SetRecordSetsStatusResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_record_set(self, request):
"""查询单个Record Set
查询单个Record Set
:param ShowRecordSetRequest request
:return: ShowRecordSetResponse
"""
return self.show_record_set_with_http_info(request)
def show_record_set_with_http_info(self, request):
"""查询单个Record Set
查询单个Record Set
:param ShowRecordSetRequest request
:return: ShowRecordSetResponse
"""
all_params = ['zone_id', 'recordset_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'zone_id' in local_var_params:
path_params['zone_id'] = local_var_params['zone_id']
if 'recordset_id' in local_var_params:
path_params['recordset_id'] = local_var_params['recordset_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/zones/{zone_id}/recordsets/{recordset_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowRecordSetResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_record_set_by_zone(self, request):
"""查询单个Zone下Record Set列表
查询单个Zone下Record Set列表
:param ShowRecordSetByZoneRequest request
:return: ShowRecordSetByZoneResponse
"""
return self.show_record_set_by_zone_with_http_info(request)
def show_record_set_by_zone_with_http_info(self, request):
"""查询单个Zone下Record Set列表
查询单个Zone下Record Set列表
:param ShowRecordSetByZoneRequest request
:return: ShowRecordSetByZoneResponse
"""
all_params = ['zone_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'zone_id' in local_var_params:
path_params['zone_id'] = local_var_params['zone_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2.1/zones/{zone_id}/recordsets',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowRecordSetByZoneResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_record_set_with_line(self, request):
"""查询单个Record Set,仅适用于公网DNS
查询单个Record Set,仅适用于公网DNS
:param ShowRecordSetWithLineRequest request
:return: ShowRecordSetWithLineResponse
"""
return self.show_record_set_with_line_with_http_info(request)
def show_record_set_with_line_with_http_info(self, request):
"""查询单个Record Set,仅适用于公网DNS
查询单个Record Set,仅适用于公网DNS
:param ShowRecordSetWithLineRequest request
:return: ShowRecordSetWithLineResponse
"""
all_params = ['zone_id', 'recordset_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'zone_id' in local_var_params:
path_params['zone_id'] = local_var_params['zone_id']
if 'recordset_id' in local_var_params:
path_params['recordset_id'] = local_var_params['recordset_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2.1/zones/{zone_id}/recordsets/{recordset_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowRecordSetWithLineResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_record_set(self, request):
"""修改单个Record Set
修改单个Record Set
:param UpdateRecordSetRequest request
:return: UpdateRecordSetResponse
"""
return self.update_record_set_with_http_info(request)
def update_record_set_with_http_info(self, request):
"""修改单个Record Set
修改单个Record Set
:param UpdateRecordSetRequest request
:return: UpdateRecordSetResponse
"""
all_params = ['zone_id', 'recordset_id', 'update_record_set_req']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'zone_id' in local_var_params:
path_params['zone_id'] = local_var_params['zone_id']
if 'recordset_id' in local_var_params:
path_params['recordset_id'] = local_var_params['recordset_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/zones/{zone_id}/recordsets/{recordset_id}',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateRecordSetResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_record_sets(self, request):
"""修改单个Record Set
修改单个Record Set
:param UpdateRecordSetsRequest request
:return: UpdateRecordSetsResponse
"""
return self.update_record_sets_with_http_info(request)
def update_record_sets_with_http_info(self, request):
"""修改单个Record Set
修改单个Record Set
:param UpdateRecordSetsRequest request
:return: UpdateRecordSetsResponse
"""
all_params = ['zone_id', 'recordset_id', 'update_record_sets_req']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'zone_id' in local_var_params:
path_params['zone_id'] = local_var_params['zone_id']
if 'recordset_id' in local_var_params:
path_params['recordset_id'] = local_var_params['recordset_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2.1/zones/{zone_id}/recordsets/{recordset_id}',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdateRecordSetsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def batch_create_tag(self, request):
"""为指定实例批量添加或删除标签
为指定实例批量添加或删除标签
:param BatchCreateTagRequest request
:return: BatchCreateTagResponse
"""
return self.batch_create_tag_with_http_info(request)
def batch_create_tag_with_http_info(self, request):
"""为指定实例批量添加或删除标签
为指定实例批量添加或删除标签
:param BatchCreateTagRequest request
:return: BatchCreateTagResponse
"""
all_params = ['resource_type', 'resource_id', 'batch_hand_tags']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'resource_type' in local_var_params:
path_params['resource_type'] = local_var_params['resource_type']
if 'resource_id' in local_var_params:
path_params['resource_id'] = local_var_params['resource_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/{resource_type}/{resource_id}/tags/action',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='BatchCreateTagResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_tag(self, request):
"""为指定实例添加标签
为指定实例添加标签
:param CreateTagRequest request
:return: CreateTagResponse
"""
return self.create_tag_with_http_info(request)
def create_tag_with_http_info(self, request):
"""为指定实例添加标签
为指定实例添加标签
:param CreateTagRequest request
:return: CreateTagResponse
"""
all_params = ['resource_type', 'resource_id', 'create_tag_req']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'resource_type' in local_var_params:
path_params['resource_type'] = local_var_params['resource_type']
if 'resource_id' in local_var_params:
path_params['resource_id'] = local_var_params['resource_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/{resource_type}/{resource_id}/tags',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreateTagResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_tag(self, request):
"""删除资源标签
删除资源标签
:param DeleteTagRequest request
:return: DeleteTagResponse
"""
return self.delete_tag_with_http_info(request)
def delete_tag_with_http_info(self, request):
"""删除资源标签
删除资源标签
:param DeleteTagRequest request
:return: DeleteTagResponse
"""
all_params = ['resource_type', 'resource_id', 'key']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'resource_type' in local_var_params:
path_params['resource_type'] = local_var_params['resource_type']
if 'resource_id' in local_var_params:
path_params['resource_id'] = local_var_params['resource_id']
if 'key' in local_var_params:
path_params['key'] = local_var_params['key']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/{resource_type}/{resource_id}/tags/{key}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeleteTagResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_tag(self, request):
"""使用标签查询资源实例
使用标签查询资源实例
:param ListTagRequest request
:return: ListTagResponse
"""
return self.list_tag_with_http_info(request)
def list_tag_with_http_info(self, request):
"""使用标签查询资源实例
使用标签查询资源实例
:param ListTagRequest request
:return: ListTagResponse
"""
all_params = ['resource_type', 'list_tag_req']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'resource_type' in local_var_params:
path_params['resource_type'] = local_var_params['resource_type']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/{resource_type}/resource_instances/action',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListTagResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_tags(self, request):
"""查询指定实例类型的所有标签集合
查询指定实例类型的所有标签集合
:param ListTagsRequest request
:return: ListTagsResponse
"""
return self.list_tags_with_http_info(request)
def list_tags_with_http_info(self, request):
"""查询指定实例类型的所有标签集合
查询指定实例类型的所有标签集合
:param ListTagsRequest request
:return: ListTagsResponse
"""
all_params = ['resource_type']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'resource_type' in local_var_params:
path_params['resource_type'] = local_var_params['resource_type']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/{resource_type}/tags',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListTagsResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_resource_tag(self, request):
"""查询指定实例的标签信息
查询指定实例的标签信息
:param ShowResourceTagRequest request
:return: ShowResourceTagResponse
"""
return self.show_resource_tag_with_http_info(request)
def show_resource_tag_with_http_info(self, request):
"""查询指定实例的标签信息
查询指定实例的标签信息
:param ShowResourceTagRequest request
:return: ShowResourceTagResponse
"""
all_params = ['resource_type', 'resource_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'resource_type' in local_var_params:
path_params['resource_type'] = local_var_params['resource_type']
if 'resource_id' in local_var_params:
path_params['resource_id'] = local_var_params['resource_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/{project_id}/{resource_type}/{resource_id}/tags',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowResourceTagResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def associate_router(self, request):
"""在内网Zone上关联VPC
在内网Zone上关联VPC
:param AssociateRouterRequest request
:return: AssociateRouterResponse
"""
return self.associate_router_with_http_info(request)
def associate_router_with_http_info(self, request):
"""在内网Zone上关联VPC
在内网Zone上关联VPC
:param AssociateRouterRequest request
:return: AssociateRouterResponse
"""
all_params = ['zone_id', 'associate_router_req']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'zone_id' in local_var_params:
path_params['zone_id'] = local_var_params['zone_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/zones/{zone_id}/associaterouter',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='AssociateRouterResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_private_zone(self, request):
"""创建单个内网Zone
创建单个内网Zone
:param CreatePrivateZoneRequest request
:return: CreatePrivateZoneResponse
"""
return self.create_private_zone_with_http_info(request)
def create_private_zone_with_http_info(self, request):
"""创建单个内网Zone
创建单个内网Zone
:param CreatePrivateZoneRequest request
:return: CreatePrivateZoneResponse
"""
all_params = ['create_private_zone_req']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/zones',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreatePrivateZoneResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def create_public_zone(self, request):
"""创建单个公网Zone
创建单个公网Zone
:param CreatePublicZoneRequest request
:return: CreatePublicZoneResponse
"""
return self.create_public_zone_with_http_info(request)
def create_public_zone_with_http_info(self, request):
"""创建单个公网Zone
创建单个公网Zone
:param CreatePublicZoneRequest request
:return: CreatePublicZoneResponse
"""
all_params = ['create_public_zone']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/zones',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='CreatePublicZoneResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_private_zone(self, request):
"""删除单个内网Zone
删除单个内网Zone
:param DeletePrivateZoneRequest request
:return: DeletePrivateZoneResponse
"""
return self.delete_private_zone_with_http_info(request)
def delete_private_zone_with_http_info(self, request):
"""删除单个内网Zone
删除单个内网Zone
:param DeletePrivateZoneRequest request
:return: DeletePrivateZoneResponse
"""
all_params = ['zone_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'zone_id' in local_var_params:
path_params['zone_id'] = local_var_params['zone_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/zones/{zone_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeletePrivateZoneResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def delete_public_zone(self, request):
"""删除单个公网Zone
删除单个公网Zone
:param DeletePublicZoneRequest request
:return: DeletePublicZoneResponse
"""
return self.delete_public_zone_with_http_info(request)
def delete_public_zone_with_http_info(self, request):
"""删除单个公网Zone
删除单个公网Zone
:param DeletePublicZoneRequest request
:return: DeletePublicZoneResponse
"""
all_params = ['zone_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'zone_id' in local_var_params:
path_params['zone_id'] = local_var_params['zone_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/zones/{zone_id}',
method='DELETE',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DeletePublicZoneResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def disassociate_router(self, request):
"""在Private Zone上解关联VPC
在Private Zone上解关联VPC
:param DisassociateRouterRequest request
:return: DisassociateRouterResponse
"""
return self.disassociate_router_with_http_info(request)
def disassociate_router_with_http_info(self, request):
"""在Private Zone上解关联VPC
在Private Zone上解关联VPC
:param DisassociateRouterRequest request
:return: DisassociateRouterResponse
"""
all_params = ['zone_id', 'disassociaterouter_req']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'zone_id' in local_var_params:
path_params['zone_id'] = local_var_params['zone_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/zones/{zone_id}/disassociaterouter',
method='POST',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='DisassociateRouterResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_private_zones(self, request):
"""查询内网Zone的列表
查询内网Zone的列表
:param ListPrivateZonesRequest request
:return: ListPrivateZonesResponse
"""
return self.list_private_zones_with_http_info(request)
def list_private_zones_with_http_info(self, request):
"""查询内网Zone的列表
查询内网Zone的列表
:param ListPrivateZonesRequest request
:return: ListPrivateZonesResponse
"""
all_params = ['type', 'limit', 'marker', 'offset', 'tags', 'name', 'status', 'enterprise_project_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'type' in local_var_params:
query_params.append(('type', local_var_params['type']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
if 'marker' in local_var_params:
query_params.append(('marker', local_var_params['marker']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'tags' in local_var_params:
query_params.append(('tags', local_var_params['tags']))
if 'name' in local_var_params:
query_params.append(('name', local_var_params['name']))
if 'status' in local_var_params:
query_params.append(('status', local_var_params['status']))
if 'enterprise_project_id' in local_var_params:
query_params.append(('enterprise_project_id', local_var_params['enterprise_project_id']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/zones',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListPrivateZonesResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def list_public_zones(self, request):
"""查询公网Zone的列表
查询公网Zone的列表
:param ListPublicZonesRequest request
:return: ListPublicZonesResponse
"""
return self.list_public_zones_with_http_info(request)
def list_public_zones_with_http_info(self, request):
"""查询公网Zone的列表
查询公网Zone的列表
:param ListPublicZonesRequest request
:return: ListPublicZonesResponse
"""
all_params = ['type', 'limit', 'marker', 'offset', 'tags', 'name', 'status', 'enterprise_project_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
query_params = []
if 'type' in local_var_params:
query_params.append(('type', local_var_params['type']))
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit']))
if 'marker' in local_var_params:
query_params.append(('marker', local_var_params['marker']))
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset']))
if 'tags' in local_var_params:
query_params.append(('tags', local_var_params['tags']))
if 'name' in local_var_params:
query_params.append(('name', local_var_params['name']))
if 'status' in local_var_params:
query_params.append(('status', local_var_params['status']))
if 'enterprise_project_id' in local_var_params:
query_params.append(('enterprise_project_id', local_var_params['enterprise_project_id']))
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/zones',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ListPublicZonesResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_private_zone(self, request):
"""查询单个内网Zone
查询单个内网Zone
:param ShowPrivateZoneRequest request
:return: ShowPrivateZoneResponse
"""
return self.show_private_zone_with_http_info(request)
def show_private_zone_with_http_info(self, request):
"""查询单个内网Zone
查询单个内网Zone
:param ShowPrivateZoneRequest request
:return: ShowPrivateZoneResponse
"""
all_params = ['zone_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'zone_id' in local_var_params:
path_params['zone_id'] = local_var_params['zone_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/zones/{zone_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowPrivateZoneResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_private_zone_name_server(self, request):
"""查询内网Zone的名称服务器
查询内网Zone的列表
:param ShowPrivateZoneNameServerRequest request
:return: ShowPrivateZoneNameServerResponse
"""
return self.show_private_zone_name_server_with_http_info(request)
def show_private_zone_name_server_with_http_info(self, request):
"""查询内网Zone的名称服务器
查询内网Zone的列表
:param ShowPrivateZoneNameServerRequest request
:return: ShowPrivateZoneNameServerResponse
"""
all_params = ['zone_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'zone_id' in local_var_params:
path_params['zone_id'] = local_var_params['zone_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/zones/{zone_id}/nameservers',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowPrivateZoneNameServerResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_public_zone(self, request):
"""查询单个公网Zone
查询单个公网Zone
:param ShowPublicZoneRequest request
:return: ShowPublicZoneResponse
"""
return self.show_public_zone_with_http_info(request)
def show_public_zone_with_http_info(self, request):
"""查询单个公网Zone
查询单个公网Zone
:param ShowPublicZoneRequest request
:return: ShowPublicZoneResponse
"""
all_params = ['zone_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'zone_id' in local_var_params:
path_params['zone_id'] = local_var_params['zone_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/zones/{zone_id}',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowPublicZoneResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def show_public_zone_name_server(self, request):
"""查询单个公网Zone的名称服务器
查询单个公网Zone的名称服务器
:param ShowPublicZoneNameServerRequest request
:return: ShowPublicZoneNameServerResponse
"""
return self.show_public_zone_name_server_with_http_info(request)
def show_public_zone_name_server_with_http_info(self, request):
"""查询单个公网Zone的名称服务器
查询单个公网Zone的名称服务器
:param ShowPublicZoneNameServerRequest request
:return: ShowPublicZoneNameServerResponse
"""
all_params = ['zone_id']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'zone_id' in local_var_params:
path_params['zone_id'] = local_var_params['zone_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/zones/{zone_id}/nameservers',
method='GET',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='ShowPublicZoneNameServerResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_private_zone(self, request):
"""修改单个Zone
修改单个Zone
:param UpdatePrivateZoneRequest request
:return: UpdatePrivateZoneResponse
"""
return self.update_private_zone_with_http_info(request)
def update_private_zone_with_http_info(self, request):
"""修改单个Zone
修改单个Zone
:param UpdatePrivateZoneRequest request
:return: UpdatePrivateZoneResponse
"""
all_params = ['zone_id', 'update_private_zone_info_req']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'zone_id' in local_var_params:
path_params['zone_id'] = local_var_params['zone_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/zones/{zone_id}',
method='PATCH',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdatePrivateZoneResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_public_zone(self, request):
"""修改单个Zone
修改单个Zone
:param UpdatePublicZoneRequest request
:return: UpdatePublicZoneResponse
"""
return self.update_public_zone_with_http_info(request)
def update_public_zone_with_http_info(self, request):
"""修改单个Zone
修改单个Zone
:param UpdatePublicZoneRequest request
:return: UpdatePublicZoneResponse
"""
all_params = ['zone_id', 'update_public_zone_info']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'zone_id' in local_var_params:
path_params['zone_id'] = local_var_params['zone_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/zones/{zone_id}',
method='PATCH',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdatePublicZoneResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def update_public_zone_status(self, request):
"""设置单个公网Zone状态,支持暂停、启用Zone
设置单个公网Zone状态,支持暂停、启用Zone
:param UpdatePublicZoneStatusRequest request
:return: UpdatePublicZoneStatusResponse
"""
return self.update_public_zone_status_with_http_info(request)
def update_public_zone_status_with_http_info(self, request):
"""设置单个公网Zone状态,支持暂停、启用Zone
设置单个公网Zone状态,支持暂停、启用Zone
:param UpdatePublicZoneStatusRequest request
:return: UpdatePublicZoneStatusResponse
"""
all_params = ['zone_id', 'update_public_zone_status']
local_var_params = {}
for attr in request.attribute_map:
if hasattr(request, attr):
local_var_params[attr] = getattr(request, attr)
collection_formats = {}
path_params = {}
if 'zone_id' in local_var_params:
path_params['zone_id'] = local_var_params['zone_id']
query_params = []
header_params = {}
form_params = {}
body_params = None
if 'body' in local_var_params:
body_params = local_var_params['body']
if isinstance(request, SdkStreamRequest):
body_params = request.get_file_stream()
response_headers = []
header_params['Content-Type'] = http_utils.select_header_content_type(
['application/json'])
auth_settings = []
return self.call_api(
resource_path='/v2/zones/{zone_id}/statuses',
method='PUT',
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body_params,
post_params=form_params,
response_type='UpdatePublicZoneStatusResponse',
response_headers=response_headers,
auth_settings=auth_settings,
collection_formats=collection_formats,
request_type=request.__class__.__name__)
def call_api(self, resource_path, method, path_params=None, query_params=None, header_params=None, body=None,
post_params=None, response_type=None, response_headers=None, auth_settings=None,
collection_formats=None, request_type=None):
"""Makes the HTTP request and returns deserialized data.
:param resource_path: Path to method endpoint.
:param method: Method to call.
:param path_params: Path parameters in the url.
:param query_params: Query parameters in the url.
:param header_params: Header parameters to be placed in the request header.
:param body: Request body.
:param post_params dict: Request post form parameters,
for `application/x-www-form-urlencoded`, `multipart/form-data`.
:param auth_settings list: Auth Settings names for the request.
:param response_type: Response data type.
:param response_headers: Header should be added to response data.
:param collection_formats: dict of collection formats for path, query,
header, and post parameters.
:param request_type: Request data type.
:return:
Return the response directly.
"""
return self.do_http_request(
method=method,
resource_path=resource_path,
path_params=path_params,
query_params=query_params,
header_params=header_params,
body=body,
post_params=post_params,
response_type=response_type,
response_headers=response_headers,
collection_formats=collection_formats,
request_type=request_type)
|
<gh_stars>1-10
#!/usr/bin/env python3
import lsimpy
from bench_utils import *
lsim = lsimpy.LSimContext()
HIGH = lsimpy.ValueTrue
LOW = lsimpy.ValueFalse
NONE = lsimpy.ValueUndefined
def test_sr_latch():
truth_table = [
[{'R': HIGH, 'S': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'R': LOW, 'S': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'R': LOW, 'S': HIGH}, {'Q': HIGH, '/Q': LOW}],
[{'R': LOW, 'S': LOW}, {'Q': HIGH, '/Q': LOW}],
[{'R': HIGH, 'S': HIGH}, {'Q': LOW, '/Q': LOW}]
]
run_thruth_table(lsim, "S-R Latch", truth_table)
def test_gated_sr_latch():
truth_table = [
[{'En': HIGH, 'R': HIGH, 'S': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'En': LOW, 'R': HIGH, 'S': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'En': LOW, 'R': LOW, 'S': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'En': LOW, 'R': LOW, 'S': HIGH}, {'Q': LOW, '/Q': HIGH}],
[{'En': HIGH, 'R': LOW, 'S': HIGH}, {'Q': HIGH, '/Q': LOW}],
[{'En': LOW, 'R': LOW, 'S': LOW}, {'Q': HIGH, '/Q': LOW}],
[{'En': HIGH, 'R': LOW, 'S': LOW}, {'Q': HIGH, '/Q': LOW}],
[{'En': HIGH, 'R': HIGH, 'S': LOW}, {'Q': LOW, '/Q': HIGH}]
]
run_thruth_table(lsim, "Gated S-R Latch", truth_table)
def test_d_latch():
truth_table = [
[{'En': HIGH, 'D': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'En': LOW, 'D': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'En': LOW, 'D': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'En': LOW, 'D': HIGH}, {'Q': LOW, '/Q': HIGH}],
[{'En': HIGH, 'D': HIGH}, {'Q': HIGH, '/Q': LOW}],
[{'En': LOW, 'D': LOW}, {'Q': HIGH, '/Q': LOW}],
[{'En': HIGH, 'D': LOW}, {'Q': LOW, '/Q': HIGH}]
]
run_thruth_table(lsim, "D Latch", truth_table)
def test_async_sr_latch():
truth_table = [
[{'En': LOW, 'Res': HIGH, 'Pre': LOW, 'R': HIGH, 'S': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'En': LOW, 'Res': LOW, 'Pre': HIGH, 'R': HIGH, 'S': LOW}, {'Q': HIGH, '/Q': LOW}],
[{'En': LOW, 'Res': HIGH, 'Pre': LOW, 'R': HIGH, 'S': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'En': LOW, 'Res': LOW, 'Pre': LOW, 'R': HIGH, 'S': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'En': LOW, 'Res': LOW, 'Pre': LOW, 'R': LOW, 'S': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'En': LOW, 'Res': LOW, 'Pre': LOW, 'R': LOW, 'S': HIGH}, {'Q': LOW, '/Q': HIGH}],
[{'En': HIGH, 'Res': LOW, 'Pre': LOW, 'R': LOW, 'S': HIGH}, {'Q': HIGH, '/Q': LOW}],
[{'En': LOW, 'Res': LOW, 'Pre': LOW, 'R': LOW, 'S': LOW}, {'Q': HIGH, '/Q': LOW}],
[{'En': HIGH, 'Res': LOW, 'Pre': LOW, 'R': LOW, 'S': LOW}, {'Q': HIGH, '/Q': LOW}],
[{'En': HIGH, 'Res': LOW, 'Pre': LOW, 'R': HIGH, 'S': LOW}, {'Q': LOW, '/Q': HIGH}]
]
run_thruth_table(lsim, "Async S-R Latch", truth_table)
def test_sr_flipflop():
truth_table = [
[{'Clk': LOW, 'R': HIGH, 'S': LOW}, {}],
[{'Clk': HIGH, 'R': HIGH, 'S': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': HIGH, 'R': LOW, 'S': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': HIGH, 'R': LOW, 'S': HIGH}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': LOW, 'R': LOW, 'S': HIGH}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': LOW, 'R': HIGH, 'S': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': LOW, 'R': LOW, 'S': HIGH}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': HIGH, 'R': LOW, 'S': HIGH}, {'Q': HIGH, '/Q': LOW}]
]
run_thruth_table(lsim, "S-R FlipFlop", truth_table)
def test_d_flipflop():
truth_table = [
[{'Clk': LOW, 'D': LOW}, {}],
[{'Clk': LOW, 'D': LOW}, {}],
[{'Clk': LOW, 'D': HIGH}, {}],
[{'Clk': HIGH, 'D': HIGH}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': HIGH, 'D': HIGH}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': HIGH, 'D': LOW}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': LOW, 'D': LOW}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': LOW, 'D': LOW}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': LOW, 'D': HIGH}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': HIGH, 'D': HIGH}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': HIGH, 'D': LOW}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': HIGH, 'D': LOW}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': LOW, 'D': LOW}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': LOW, 'D': LOW}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': LOW, 'D': LOW}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': HIGH, 'D': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': HIGH, 'D': HIGH}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': HIGH, 'D': HIGH}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': LOW, 'D': HIGH}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': LOW, 'D': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': LOW, 'D': HIGH}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': HIGH, 'D': HIGH}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': HIGH, 'D': HIGH}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': HIGH, 'D': LOW}, {'Q': HIGH, '/Q': LOW}]
]
run_thruth_table(lsim, "D FlipFlop", truth_table)
def test_jk_flipflop():
truth_table = [
[{'Clk': LOW, 'J': LOW, 'K': HIGH}, {}],
[{'Clk': HIGH, 'J': LOW, 'K': HIGH}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': HIGH, 'J': HIGH, 'K': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': LOW, 'J': HIGH, 'K': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': LOW, 'J': HIGH, 'K': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': HIGH, 'J': HIGH, 'K': LOW}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': HIGH, 'J': LOW, 'K': LOW}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': LOW, 'J': LOW, 'K': HIGH}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': LOW, 'J': HIGH, 'K': HIGH}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': HIGH, 'J': HIGH, 'K': HIGH}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': LOW, 'J': HIGH, 'K': HIGH}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': HIGH, 'J': HIGH, 'K': HIGH}, {'Q': HIGH, '/Q': LOW}]
]
run_thruth_table(lsim, "J-K FlipFlop", truth_table)
def test_async_sr_flipflop():
truth_table = [
[{'Clk': LOW, 'Res': HIGH, 'Pre': LOW, 'R': HIGH, 'S': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': LOW, 'Res': LOW, 'Pre': HIGH, 'R': HIGH, 'S': LOW}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': LOW, 'Res': HIGH, 'Pre': LOW, 'R': HIGH, 'S': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': HIGH, 'Res': LOW, 'Pre': LOW, 'R': HIGH, 'S': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': HIGH, 'Res': LOW, 'Pre': LOW, 'R': LOW, 'S': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': HIGH, 'Res': LOW, 'Pre': LOW, 'R': LOW, 'S': HIGH}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': LOW, 'Res': LOW, 'Pre': LOW, 'R': LOW, 'S': HIGH}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': LOW, 'Res': LOW, 'Pre': LOW, 'R': HIGH, 'S': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': LOW, 'Res': LOW, 'Pre': LOW, 'R': LOW, 'S': HIGH}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': HIGH, 'Res': LOW, 'Pre': LOW, 'R': LOW, 'S': HIGH}, {'Q': HIGH, '/Q': LOW}]
]
run_thruth_table(lsim, "Async S-R FlipFlop", truth_table)
def test_async_d_flipflop():
truth_table = [
[{'Clk': LOW, 'Res': HIGH, 'Pre': LOW, 'D': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': LOW, 'Res': LOW, 'Pre': HIGH, 'D': LOW}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': LOW, 'Res': HIGH, 'Pre': LOW, 'D': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': LOW, 'Res': LOW, 'Pre': LOW, 'D': HIGH}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': HIGH, 'Res': LOW, 'Pre': LOW, 'D': HIGH}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': HIGH, 'Res': LOW, 'Pre': LOW, 'D': HIGH}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': HIGH, 'Res': LOW, 'Pre': LOW, 'D': LOW}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': LOW, 'Res': LOW, 'Pre': LOW, 'D': LOW}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': LOW, 'Res': LOW, 'Pre': LOW, 'D': LOW}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': LOW, 'Res': LOW, 'Pre': LOW, 'D': HIGH}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': HIGH, 'Res': LOW, 'Pre': LOW, 'D': HIGH}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': HIGH, 'Res': LOW, 'Pre': LOW, 'D': LOW}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': HIGH, 'Res': LOW, 'Pre': LOW, 'D': LOW}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': LOW, 'Res': LOW, 'Pre': LOW, 'D': LOW}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': LOW, 'Res': LOW, 'Pre': LOW, 'D': LOW}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': LOW, 'Res': LOW, 'Pre': LOW, 'D': LOW}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': HIGH, 'Res': LOW, 'Pre': LOW, 'D': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': HIGH, 'Res': LOW, 'Pre': LOW, 'D': HIGH}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': HIGH, 'Res': LOW, 'Pre': LOW, 'D': HIGH}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': LOW, 'Res': LOW, 'Pre': LOW, 'D': HIGH}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': LOW, 'Res': LOW, 'Pre': LOW, 'D': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': LOW, 'Res': LOW, 'Pre': LOW, 'D': HIGH}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': HIGH, 'Res': LOW, 'Pre': LOW, 'D': HIGH}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': HIGH, 'Res': LOW, 'Pre': LOW, 'D': HIGH}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': HIGH, 'Res': LOW, 'Pre': LOW, 'D': LOW}, {'Q': HIGH, '/Q': LOW}]
]
run_thruth_table(lsim, "Async D FlipFlop", truth_table)
def test_async_jk_flipflop():
truth_table = [
[{'Clk': LOW, 'Res': HIGH, 'Pre': LOW, 'J': LOW, 'K': HIGH}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': LOW, 'Res': LOW, 'Pre': HIGH, 'J': LOW, 'K': HIGH}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': LOW, 'Res': HIGH, 'Pre': LOW, 'J': LOW, 'K': HIGH}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': HIGH, 'Res': LOW, 'Pre': LOW, 'J': LOW, 'K': HIGH}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': HIGH, 'Res': LOW, 'Pre': LOW, 'J': HIGH, 'K': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': LOW, 'Res': LOW, 'Pre': LOW, 'J': HIGH, 'K': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': LOW, 'Res': LOW, 'Pre': LOW, 'J': HIGH, 'K': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': HIGH, 'Res': LOW, 'Pre': LOW, 'J': HIGH, 'K': LOW}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': HIGH, 'Res': LOW, 'Pre': LOW, 'J': LOW, 'K': LOW}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': LOW, 'Res': LOW, 'Pre': LOW, 'J': LOW, 'K': HIGH}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': LOW, 'Res': LOW, 'Pre': LOW, 'J': HIGH, 'K': HIGH}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': HIGH, 'Res': LOW, 'Pre': LOW, 'J': HIGH, 'K': HIGH}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': LOW, 'Res': LOW, 'Pre': LOW, 'J': HIGH, 'K': HIGH}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': HIGH, 'Res': LOW, 'Pre': LOW, 'J': HIGH, 'K': HIGH}, {'Q': HIGH, '/Q': LOW}]
]
run_thruth_table(lsim, "Async J-K FlipFlop", truth_table)
def test_masterslave_d_flipflop():
truth_table = [
[{'Clk': LOW, 'Res': HIGH, 'Pre': LOW, 'D': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': LOW, 'Res': LOW, 'Pre': HIGH, 'D': LOW}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': LOW, 'Res': HIGH, 'Pre': LOW, 'D': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': LOW, 'Res': LOW, 'Pre': LOW, 'D': HIGH}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': HIGH, 'Res': LOW, 'Pre': LOW, 'D': HIGH}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': HIGH, 'Res': LOW, 'Pre': LOW, 'D': HIGH}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': HIGH, 'Res': LOW, 'Pre': LOW, 'D': HIGH}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': LOW, 'Res': LOW, 'Pre': LOW, 'D': HIGH}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': LOW, 'Res': LOW, 'Pre': LOW, 'D': HIGH}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': LOW, 'Res': LOW, 'Pre': LOW, 'D': HIGH}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': HIGH, 'Res': LOW, 'Pre': LOW, 'D': HIGH}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': HIGH, 'Res': LOW, 'Pre': LOW, 'D': LOW}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': HIGH, 'Res': LOW, 'Pre': LOW, 'D': LOW}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': LOW, 'Res': LOW, 'Pre': LOW, 'D': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': LOW, 'Res': LOW, 'Pre': LOW, 'D': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': LOW, 'Res': LOW, 'Pre': LOW, 'D': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': HIGH, 'Res': LOW, 'Pre': LOW, 'D': LOW}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': HIGH, 'Res': LOW, 'Pre': LOW, 'D': HIGH}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': HIGH, 'Res': LOW, 'Pre': LOW, 'D': HIGH}, {'Q': LOW, '/Q': HIGH}],
[{'Clk': LOW, 'Res': LOW, 'Pre': LOW, 'D': HIGH}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': LOW, 'Res': LOW, 'Pre': LOW, 'D': LOW}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': LOW, 'Res': LOW, 'Pre': LOW, 'D': HIGH}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': HIGH, 'Res': LOW, 'Pre': LOW, 'D': HIGH}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': HIGH, 'Res': LOW, 'Pre': LOW, 'D': HIGH}, {'Q': HIGH, '/Q': LOW}],
[{'Clk': HIGH, 'Res': LOW, 'Pre': LOW, 'D': LOW}, {'Q': HIGH, '/Q': LOW}]
]
run_thruth_table(lsim, "MasterSlave D FlipFlop", truth_table)
def test_async_d_flipflop_8bit():
truth_table = [
[{'Clk': LOW, 'Res': LOW, 'Pre': HIGH, 'D[0]': LOW, 'D[1]': LOW, 'D[2]': LOW, 'D[3]': LOW, 'D[4]': LOW, 'D[5]': LOW, 'D[6]': LOW, 'D[7]': LOW }, { 'Q[0]': HIGH, 'Q[1]': HIGH, 'Q[2]': HIGH, 'Q[3]': HIGH, 'Q[4]': HIGH, 'Q[5]': HIGH, 'Q[6]': HIGH, 'Q[7]': HIGH}],
[{'Clk': LOW, 'Res': HIGH, 'Pre': LOW, 'D[0]': LOW, 'D[1]': LOW, 'D[2]': LOW, 'D[3]': LOW, 'D[4]': LOW, 'D[5]': LOW, 'D[6]': LOW, 'D[7]': LOW }, { 'Q[0]': LOW, 'Q[1]': LOW, 'Q[2]': LOW, 'Q[3]': LOW, 'Q[4]': LOW, 'Q[5]': LOW, 'Q[6]': LOW, 'Q[7]': LOW}]
]
for v in range(256):
inputs = {f"D[{i:}]" : HIGH if ((v >> i) & 1) == 1 else LOW for i in range(8)}
inputs.update([('Clk', HIGH), ('Res', LOW), ('Pre', LOW)])
outputs = {f"Q[{i:}]" : HIGH if ((v >> i) & 1) == 1 else LOW for i in range(8)}
truth_table.append([inputs, outputs])
inputs = {f"D[{i:}]" : HIGH if ((v >> i) & 1) == 1 else LOW for i in range(8)}
inputs.update([('Clk', LOW), ('Res', LOW), ('Pre', LOW)])
truth_table.append([inputs, outputs])
run_thruth_table(lsim, "Async D FlipFlop 8b", truth_table)
def test_async_d_flipflop_8bit_tristate():
truth_table = [
[{'Clk': LOW, 'Res': LOW, 'Pre': HIGH, 'OE': HIGH, 'D[0]': LOW, 'D[1]': LOW, 'D[2]': LOW, 'D[3]': LOW, 'D[4]': LOW, 'D[5]': LOW, 'D[6]': LOW, 'D[7]': LOW }, { 'Q[0]': HIGH, 'Q[1]': HIGH, 'Q[2]': HIGH, 'Q[3]': HIGH, 'Q[4]': HIGH, 'Q[5]': HIGH, 'Q[6]': HIGH, 'Q[7]': HIGH}],
[{'Clk': LOW, 'Res': LOW, 'Pre': HIGH, 'OE': LOW, 'D[0]': LOW, 'D[1]': LOW, 'D[2]': LOW, 'D[3]': LOW, 'D[4]': LOW, 'D[5]': LOW, 'D[6]': LOW, 'D[7]': LOW }, { 'Q[0]': NONE, 'Q[1]': NONE, 'Q[2]': NONE, 'Q[3]': NONE, 'Q[4]': NONE, 'Q[5]': NONE, 'Q[6]': NONE, 'Q[7]': NONE}],
[{'Clk': LOW, 'Res': HIGH, 'Pre': LOW, 'OE': HIGH, 'D[0]': LOW, 'D[1]': LOW, 'D[2]': LOW, 'D[3]': LOW, 'D[4]': LOW, 'D[5]': LOW, 'D[6]': LOW, 'D[7]': LOW }, { 'Q[0]': LOW, 'Q[1]': LOW, 'Q[2]': LOW, 'Q[3]': LOW, 'Q[4]': LOW, 'Q[5]': LOW, 'Q[6]': LOW, 'Q[7]': LOW}],
[{'Clk': LOW, 'Res': HIGH, 'Pre': LOW, 'OE': LOW, 'D[0]': LOW, 'D[1]': LOW, 'D[2]': LOW, 'D[3]': LOW, 'D[4]': LOW, 'D[5]': LOW, 'D[6]': LOW, 'D[7]': LOW }, { 'Q[0]': NONE, 'Q[1]': NONE, 'Q[2]': NONE, 'Q[3]': NONE, 'Q[4]': NONE, 'Q[5]': NONE, 'Q[6]': NONE, 'Q[7]': NONE}]
]
outputs_none = {f"Q[{i:}]" : NONE for i in range(8)}
for v in range(256):
inputs = {f"D[{i:}]" : HIGH if ((v >> i) & 1) == 1 else LOW for i in range(8)}
inputs.update([('Clk', HIGH), ('Res', LOW), ('Pre', LOW), ('OE', LOW)])
truth_table.append([inputs, outputs_none])
inputs = {f"D[{i:}]" : HIGH if ((v >> i) & 1) == 1 else LOW for i in range(8)}
inputs.update([('Clk', LOW), ('Res', LOW), ('Pre', LOW), ('OE', LOW)])
truth_table.append([inputs, outputs_none])
inputs = {f"D[{i:}]" : HIGH if ((v >> i) & 1) == 1 else LOW for i in range(8)}
inputs.update([('Clk', LOW), ('Res', LOW), ('Pre', LOW), ('OE', HIGH)])
outputs = {f"Q[{i:}]" : HIGH if ((v >> i) & 1) == 1 else LOW for i in range(8)}
truth_table.append([inputs, outputs])
run_thruth_table(lsim, "Async D FlipFlop 8bt", truth_table)
def main():
if (not lsim.load_user_library("../../examples/cpu_8bit/lib_latches.lsim")):
print("Unable to load circuit\n")
exit(-1)
test_sr_latch()
test_gated_sr_latch()
test_d_latch()
test_async_sr_latch()
test_sr_flipflop()
test_d_flipflop()
test_jk_flipflop()
test_async_sr_flipflop()
test_async_d_flipflop()
test_async_jk_flipflop()
test_masterslave_d_flipflop()
test_async_d_flipflop_8bit()
test_async_d_flipflop_8bit_tristate()
print_stats()
if __name__ == "__main__":
main() |
from .expression import (
EqualityExpression,
InExpression,
BetweenExpression)
class DynamoDataType(object):
"""Abstract class for all DataTypes
A DynamoDataType defines a column on the Model. They should be classlevel attributes
that are used build models and supplement any sort of Request performed on the table.
This is the base class for all datatypes and defines many shared expressions used by
all of the different child classes.
"""
def __init__(self, default=None, column_name="", condition_type="", translator_cls=None):
"""Constructor for DynamoDataType
Parameters:
default: a default value for the column. It can be a value or function
column_name: a string defining the name of the column on the table
translator_cls: A Translator class used to translate data to/from DynamoDB
condition_type: A string representing one of the types defined by
DynamoDB for how the data is stored in the database. While DynamoDB
supports different datatypes, they are all represented by the following:
==== ===============
==== ===============
N All number types
S All string types
B ByteBuffer / Binary
BOOL Booleans
SS A set of Strings
NS A set of Numbers
BS A set of ByteBuffers
L Lists of any datatypes
M Maps of key/values
==== ===============
"""
self.default = default
self.set_column_name(column_name)
self.condition_type = condition_type
self.translator = translator_cls(self) if translator_cls else None
def __eq__(self, value):
return EqualityExpression('=', self, value)
def __ne__(self, value):
return EqualityExpression('<>', self, value)
def __lt__(self, value):
return EqualityExpression('<', self, value)
def __le__(self, value):
return EqualityExpression('<=', self, value)
def __gt__(self, value):
return EqualityExpression('>', self, value)
def __ge__(self, value):
return EqualityExpression('>=', self, value)
def between(self, greater_than, less_than):
"""Build a BetweenExpression
BetweenExpression can be used with the filter() on the model or as a
KeyConditionExpression on the sort key of a query
Parameters:
greater_than: a value that the query is greater than or equal to
less_than: a value that the query is less than or equal to
Returns:
A BetweenExpression
For example::
Person.scan.filter(Person.age.between(10, 20))
"""
return BetweenExpression(self, greater_than, less_than)
def in_(self, *values):
"""Build an InExpression
InExpressions can only be used with filter() on the model, it cannot be part of
a KeyConditionExpression. The will filter for the table for values that match
exactly any of the values passed in as arguments
Parameters:
values: anything value to use to filter the table
Returns:
An InExpression
For example::
Person.scan.filter(Person.name.in_("Mom", "Dad"))
"""
return InExpression(self, values)
def set_column_name(self, val):
"""Update the column_name of this instance
Parameters:
value: a string for the new column name
"""
self.column_name = val
def build(self, val):
"""build the column value based on the val passed in
building is called automatically by the DynamoDataAttribute
when the model is initialized. It will use the default value when present if
the val passed in is None
Parameters:
val: A value that will be used to build the attribute on the instance
Returns:
The passed in val or the default when the default is set
"""
if val is None:
return self._get_default(val)
return val
def _get_default(self, val=None):
"""get the default value from the datatype"""
if self.default is not None:
if callable(self.default):
return self.default()
else:
return self.default
return None
|
"""Providing a richer backup system than just every hour for 4 days.
In this file, we attempt to provide backups as follows:
1 backup per hour for 24 hours.
1 backup per day for 7 days
1 backup per week for 4 weeks
1 backup per month for 6 months.
"""
import subprocess
import datetime
import itertools
import os
test_monthly_backups = """
/home/lichess4545/backups/heltour-sql/monthly/heltour-2015-02-01-0021.sql.bz2
/home/lichess4545/backups/heltour-sql/monthly/heltour-2015-03-01-0021.sql.bz2
/home/lichess4545/backups/heltour-sql/monthly/heltour-2015-04-01-0021.sql.bz2
/home/lichess4545/backups/heltour-sql/monthly/heltour-2015-05-01-0021.sql.bz2
/home/lichess4545/backups/heltour-sql/monthly/heltour-2015-06-01-0021.sql.bz2
/home/lichess4545/backups/heltour-sql/monthly/heltour-2015-07-01-0021.sql.bz2
/home/lichess4545/backups/heltour-sql/monthly/heltour-2015-08-01-0021.sql.bz2
"""
test_weekly_backups = """
/home/lichess4545/backups/heltour-sql/weekly/heltour-2015-08-18-0021.sql.bz2
/home/lichess4545/backups/heltour-sql/weekly/heltour-2015-08-25-0021.sql.bz2
/home/lichess4545/backups/heltour-sql/weekly/heltour-2015-09-01-0021.sql.bz2
/home/lichess4545/backups/heltour-sql/weekly/heltour-2015-09-08-0021.sql.bz2
/home/lichess4545/backups/heltour-sql/weekly/heltour-2015-09-15-0021.sql.bz2
"""
test_daily_backups = """
/home/lichess4545/backups/heltour-sql/daily/heltour-2015-09-09-0021.sql.bz2
/home/lichess4545/backups/heltour-sql/daily/heltour-2015-09-09-0021.sql.bz2
/home/lichess4545/backups/heltour-sql/daily/heltour-2015-09-10-0021.sql.bz2
/home/lichess4545/backups/heltour-sql/daily/heltour-2015-09-11-0021.sql.bz2
/home/lichess4545/backups/heltour-sql/daily/heltour-2015-09-12-0021.sql.bz2
/home/lichess4545/backups/heltour-sql/daily/heltour-2015-09-13-0021.sql.bz2
/home/lichess4545/backups/heltour-sql/daily/heltour-2015-09-14-0021.sql.bz2
/home/lichess4545/backups/heltour-sql/daily/heltour-2015-09-15-0021.sql.bz2
/home/lichess4545/backups/heltour-sql/daily/heltour-2015-09-16-0021.sql.bz2
/home/lichess4545/backups/heltour-sql/daily/heltour-2015-09-17-0021.sql.bz2
"""
test_hourly_backups = """
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-14-2321.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-16-0321.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-16-1721.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-15-0721.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-18-0921.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-15-1421.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-14-0121.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-17-0921.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-18-0021.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-17-1621.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-16-1021.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-15-0821.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-14-1721.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-16-2021.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-16-1821.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-16-0521.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-13-1921.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-15-0921.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-16-2121.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-17-0521.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-16-0421.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-16-1921.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-17-0221.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-16-0821.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-13-2221.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-16-1621.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-15-0521.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-14-0621.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-16-0221.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-18-0121.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-18-0421.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-17-0421.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-15-0421.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-17-0621.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-18-1421.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-16-0621.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-16-1121.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-15-2021.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-14-1221.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-15-1321.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-18-1521.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-15-2221.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-17-0321.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-14-0021.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-14-2021.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-13-2121.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-17-1721.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-18-1221.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-15-0221.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-14-1521.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-14-0421.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-17-1321.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-14-0321.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-16-0021.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-17-1421.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-14-1821.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-17-1921.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-14-0921.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-13-2321.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-15-1521.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-14-0721.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-16-1421.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-15-1721.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-13-2021.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-15-0021.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-14-0821.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-17-1821.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-14-1021.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-15-1921.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-14-2121.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-16-0121.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-17-2221.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-17-1121.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-15-2321.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-17-0821.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-16-1521.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-15-0621.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-16-0721.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-17-2121.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-15-1821.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-15-1221.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-17-2021.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-14-1321.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-13-1721.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-18-1021.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-18-0621.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-15-0321.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-18-1321.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-16-2321.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-15-1121.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-18-0221.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-14-1121.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-14-1921.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-18-0521.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-16-2221.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-17-0121.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-18-0721.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-14-0221.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-17-2321.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-14-1421.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-17-0021.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-17-1021.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-15-1621.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-14-1621.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-18-0821.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-16-1321.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/latest.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-18-1121.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-15-1021.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-17-1521.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-18-0321.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-17-0721.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-13-1821.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-16-0921.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-15-0121.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-15-2121.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-13-1621.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-16-1221.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-17-1221.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-14-0521.sql.bz2
/home/lichess4545/backups/heltour-sql/hourly/heltour-2015-09-14-2221.sql.bz2
"""
DEBUG=False
#-------------------------------------------------------------------------------
def run(command):
if DEBUG:
print(command)
else:
return subprocess.getoutput(command)
#-------------------------------------------------------------------------------
def find_backups(target_directory, pattern="*.sql.bz2"):
"""Returns the set of backups from a given directory that match a pattern.
"""
return run("find %s -name \"%s\"" % (target_directory, pattern))
#-------------------------------------------------------------------------------
def parse_backups(backup_string, date_format="%Y-%m-%d-%H%M"):
"""Use this to parse the output of a find command.
returns a sorted tuple of datetime objects and paths.
"""
potential_paths = [path for path in backup_string.split("\n") if path]
paths = []
for path in potential_paths:
try:
backup_time = datetime.datetime.strptime(path, date_format)
paths.append((backup_time, path))
except ValueError:
continue
return sorted(paths)
#-------------------------------------------------------------------------------
def monthly_cutoff(months_ago):
now = datetime.datetime.now()
start_of_month = now.replace(day=1, hour=0, minute=0)
return (start_of_month - datetime.timedelta(days=28*(months_ago-1))).replace(day=1, hour=0, minute=0, second=0, microsecond=0)
#-------------------------------------------------------------------------------
def weekly_cutoff(weeks_ago):
now = datetime.datetime.now()
start_of_week = now
while start_of_week.weekday() != 0:
start_of_week -= datetime.timedelta(days=1)
return start_of_week - datetime.timedelta(weeks=weeks_ago)
#-------------------------------------------------------------------------------
def daily_cutoff(days_ago):
now = datetime.datetime.now()
return now - datetime.timedelta(days=days_ago)
#-------------------------------------------------------------------------------
def hourly_cutoff(hours_ago):
now = datetime.datetime.now()
return now - datetime.timedelta(hours=hours_ago)
#-------------------------------------------------------------------------------
def beginning_of_month():
now = datetime.datetime.now()
return now.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
#-------------------------------------------------------------------------------
def beginning_of_week():
now = datetime.datetime.now()
start_of_week = now
while start_of_week.weekday() != 0:
start_of_week -= datetime.timedelta(days=1)
return start_of_week.replace(hour=0, minute=0, second=0, microsecond=0)
#-------------------------------------------------------------------------------
def beginning_of_day():
now = datetime.datetime.now()
return now.replace(hour=0, minute=0, second=0, microsecond=0)
#-------------------------------------------------------------------------------
def remove_backups(files, cutoff_time):
"""Use this tool to remove backups older than given input from a directory.
"""
def older_than(item):
item_time, item = item
return item_time < cutoff_time
files_to_remove = filter(older_than, files)
for item in files_to_remove:
date_time, file_path = item
run("rm %s" % file_path)
#-------------------------------------------------------------------------------
def add_to_backups(current_files, potential_files, cutoff_time, target_dir):
"""Copies the appropriate file into this backup rotation.
"""
# First figure out if we have a backup in the current rotation that's after
# the cutoff time. If not, we will try and find one in the potential files.
for backup_time, backup in current_files:
if backup_time >= cutoff_time:
return
# If we get here, none of the backups are appropriate
for backup_time, backup in potential_files:
if backup_time >= cutoff_time:
run("cp %s %s" % (backup, target_dir))
return
if __name__ == "__main__":
base_format = "/home/lichess4545/backups/heltour-sql/%s/heltour-%%Y-%%m-%%d-%%H%%M.sql.bz2"
hourly_format = base_format % "hourly"
daily_format = base_format % "daily"
weekly_format = base_format % "weekly"
monthly_format = base_format % "monthly"
base_directory = "/home/lichess4545/backups/heltour-sql/%s/"
hourly_directory = base_directory % "hourly"
daily_directory = base_directory % "daily"
weekly_directory = base_directory % "weekly"
monthly_directory = base_directory % "monthly"
if DEBUG:
hourly_find_output = test_hourly_backups
daily_find_output = test_daily_backups
weekly_find_output = test_weekly_backups
monthly_find_output = test_monthly_backups
else:
hourly_find_output = find_backups(hourly_directory)
daily_find_output = find_backups(daily_directory)
weekly_find_output = find_backups(weekly_directory)
monthly_find_output = find_backups(monthly_directory)
hourly_backups = parse_backups(hourly_find_output, date_format=hourly_format)
daily_backups = parse_backups(daily_find_output, date_format=daily_format)
weekly_backups = parse_backups(weekly_find_output, date_format=weekly_format)
monthly_backups = parse_backups(monthly_find_output, date_format=monthly_format)
if DEBUG: print("Monthly")
remove_backups(monthly_backups, monthly_cutoff(12))
if DEBUG: print(beginning_of_month())
add_to_backups(
monthly_backups,
hourly_backups,
beginning_of_month(),
"/home/lichess4545/backups/heltour-sql/monthly/",
)
if DEBUG: print("weekly")
if DEBUG: print(beginning_of_week())
remove_backups(weekly_backups, weekly_cutoff(8))
add_to_backups(
weekly_backups,
hourly_backups,
beginning_of_week(),
"/home/lichess4545/backups/heltour-sql/weekly/",
)
if DEBUG: print("daily")
if DEBUG: print(beginning_of_day())
remove_backups(daily_backups, daily_cutoff(14))
add_to_backups(
daily_backups,
hourly_backups,
beginning_of_day(),
"/home/lichess4545/backups/heltour-sql/daily/"
)
if DEBUG: print("hourly")
remove_backups(hourly_backups, hourly_cutoff(5*24))
#print parse_backups(test_hourly_backups, date_format=hourly_format)
|
<reponame>AntonBankevich/LJA
# (c) 2020 by Authors
# This file is a part of centroFlye program.
# Released under the BSD license (see LICENSE file)
import logging
from collections import defaultdict, Counter
from config.config import config
import networkx as nx
import numpy as np
from sequence_graph.seq_graph import SequenceGraph
logger = logging.getLogger("centroFlye.sequence_graph.db_graph")
class DeBruijnGraph(SequenceGraph):
coverage = 'coverage'
def __init__(self, nx_graph, nodeindex2label, nodelabel2index, k,
collapse=True):
super().__init__(nx_graph=nx_graph,
nodeindex2label=nodeindex2label,
nodelabel2index=nodelabel2index,
collapse=collapse)
self.k = k # length of an edge in the uncompressed graph
@classmethod
def _generate_label(cls, par_dict):
cov = par_dict[cls.coverage]
length = par_dict[cls.length]
edge_index = par_dict[cls.edge_index]
mean_cov = np.mean(cov)
label = f'index={edge_index}\nlen={length}\ncov={mean_cov:0.2f}'
return label
def _add_edge(self, color, string,
in_node, out_node,
in_data, out_data,
edge_len,
edge_index):
in_cov = in_data[self.coverage]
out_cov = out_data[self.coverage]
cov = sorted(in_cov + out_cov)
assert len(cov) == edge_len
label = self._generate_label({self.length: edge_len,
self.coverage: np.mean(cov),
self.edge_index: edge_index})
key = self.nx_graph.add_edge(in_node, out_node,
string=string,
coverage=cov,
label=label,
length=edge_len,
color=color,
edge_index=edge_index)
self.edge_index2edge[edge_index] = (in_node, out_node, key)
@classmethod
def from_kmers(cls, kmers, kmer_coverages=None,
min_tip_cov=1, collapse=True):
def add_kmer(kmer, edge_index, coverage=1, color='black'):
prefix, suffix = kmer[:-1], kmer[1:]
if prefix in nodelabel2index:
prefix_node_ind = nodelabel2index[prefix]
else:
prefix_node_ind = len(nodelabel2index)
nodelabel2index[prefix] = prefix_node_ind
nodeindex2label[prefix_node_ind] = prefix
if suffix in nodelabel2index:
suffix_node_ind = nodelabel2index[suffix]
else:
suffix_node_ind = len(nodelabel2index)
nodelabel2index[suffix] = suffix_node_ind
nodeindex2label[suffix_node_ind] = suffix
length = 1
coverage = [coverage]
label = cls._generate_label({cls.length: length,
cls.coverage: coverage,
cls.edge_index: edge_index})
nx_graph.add_edge(prefix_node_ind, suffix_node_ind,
string=kmer,
length=length,
coverage=coverage,
label=label,
color=color,
edge_index=edge_index)
def remove_lowcov_tips():
while True:
edges_to_remove = []
for s, e, key, data in nx_graph.edges(keys=True, data=True):
edge = (s, e, key)
cov = data[cls.coverage]
# We save coverage as list due to subsequent collapsing
# At this stage we did not collapse yet
assert len(cov) == 1
cov = cov[0]
indegree = nx_graph.in_degree(s)
outdegree = nx_graph.out_degree(e)
is_tip = (indegree == 0) or (outdegree == 0)
if is_tip and cov < min_tip_cov:
edges_to_remove.append((edge, indegree, outdegree))
if len(edges_to_remove) == 0:
break
for (s, e, key), indegree, outdegree in edges_to_remove:
nx_graph.remove_edge(s, e, key)
isolates = list(nx.isolates(nx_graph))
nx_graph.remove_nodes_from(isolates)
for isolate in isolates:
label = nodeindex2label[isolate]
del nodeindex2label[isolate]
del nodelabel2index[label]
nx_graph = nx.MultiDiGraph()
nodeindex2label = {}
nodelabel2index = {}
kmers = [tuple(kmer) for kmer in kmers]
edge_index = 0
for kmer in kmers:
if kmer_coverages is None:
add_kmer(kmer, edge_index=edge_index)
else:
add_kmer(kmer,
coverage=kmer_coverages[kmer],
edge_index=edge_index)
edge_index += 1
assert len(kmers)
k = len(kmers[0])
assert all(len(kmer) == k for kmer in kmers)
remove_lowcov_tips()
db_graph = cls(nx_graph=nx_graph,
nodeindex2label=nodeindex2label,
nodelabel2index=nodelabel2index,
k=k,
collapse=collapse)
return db_graph
def get_complex_nodes(self):
complex_nodes = []
for node in self.nx_graph.nodes():
indegree = self.nx_graph.in_degree(node)
outdegree = self.nx_graph.out_degree(node)
if indegree > 1 and outdegree > 1:
complex_nodes.append(node)
return complex_nodes
def get_paths_thru_complex_nodes(self, kmer_index, min_mult=4):
complex_nodes = self.get_complex_nodes()
k = self.k
selected_kp1mers = {}
for node in complex_nodes:
for in_edge in self.nx_graph.in_edges(node,
keys=True, data=True):
for out_edge in self.nx_graph.out_edges(node,
keys=True, data=True):
in_kmer = in_edge[3][self.string][-k:]
out_kmer = out_edge[3][self.string][:k]
assert in_kmer[1:] == \
out_kmer[:-1] == \
self.nodeindex2label[node]
kp1 = in_kmer + (out_kmer[-1],)
if kp1 in kmer_index and len(kmer_index[kp1]) >= min_mult:
selected_kp1mers[kp1] = len(kmer_index[kp1])
return selected_kp1mers
def get_all_kmers(self):
kmers = {}
for s, e, key, data in self.nx_graph.edges(keys=True, data=True):
coverage = data[self.coverage]
string = data[self.string]
assert len(coverage) == len(string)-self.k+1
for i in range(len(string)-self.k+1):
kmer = string[i:i+self.k]
kmer = tuple(kmer)
kmers[kmer] = coverage[i]
return kmers
def get_unique_edges(self, paths=None):
def get_topologically_unique_edges(db):
db_cnds = nx.condensation(db.nx_graph)
edges = []
for s, e in db_cnds.edges:
scc1, scc2 = db_cnds.nodes[s], db_cnds.nodes[e]
scc1 = scc1['members']
scc2 = scc2['members']
for n1 in scc1:
for n2 in scc2:
key = 0
while True:
if db.nx_graph.has_edge(n1, n2, key=key):
edges.append((n1, n2, key))
key += 1
else:
break
edges = set(edges)
return edges
def get_mapping_unique_edges(chains):
l_ext, r_ext = defaultdict(list), defaultdict(list)
non_unique = set()
for r_id, r_chains in chains.items():
# use uniquely mapped reads
if len(r_chains) != 1:
continue
chain = r_chains[0]
path = [overlap.edge for overlap in chain.overlap_list]
edges_cnt = Counter(path)
for edge, cnt in edges_cnt.items():
if cnt > 1:
non_unique.add(edge)
for r_id, (path, e_st, e_en) in paths.items():
for i, edge in enumerate(path):
if edge in non_unique:
continue
ex_l_ext, ex_r_ext = l_ext[edge], r_ext[edge]
c_l_ext, c_r_ext = path[:i], path[i+1:]
min_l_ext = min(len(c_l_ext), len(ex_l_ext))
min_r_ext = min(len(c_r_ext), len(ex_r_ext))
if min_l_ext != 0 and \
c_l_ext[-min_l_ext:] != ex_l_ext[-min_l_ext:]:
non_unique.add(edge)
continue
if c_r_ext[:min_r_ext] != ex_r_ext[:min_r_ext]:
non_unique.add(edge)
continue
if len(c_l_ext) > len(ex_l_ext):
l_ext[edge] = c_l_ext
if len(c_r_ext) > len(ex_r_ext):
r_ext[edge] = c_r_ext
unique = set(l_ext.keys()) - non_unique
return unique
unique_edges = get_topologically_unique_edges(self)
if paths is not None:
unique_edges |= get_mapping_unique_edges(paths)
logger.info('Unique edges:')
for edge in unique_edges:
logger.info(f'\t{edge}')
return unique_edges
def map_strings(self, string_set, neutral_symbs,
only_unique_paths=False,
outdir=None,
n_threads=config['common']['threads'],
min_len=None):
if min_len is None:
logger.info(f'For De Bruijn graph aligning min_len = k = {self.k}')
min_len = self.k
return super().map_strings(string_set=string_set,
overlap_penalty=self.k,
neutral_symbs=neutral_symbs,
only_unique_paths=only_unique_paths,
outdir=outdir,
n_threads=n_threads,
min_len=min_len)
|
<reponame>shenzhuxi/node-spatialite<filename>src/spatialite/deps/geos/binding.gyp
{
'target_defaults': {
'default_configuration': 'Debug',
'configurations': {
'Debug': {
'defines': [ 'DEBUG', '_DEBUG' ],
'msvs_settings': {
'VCCLCompilerTool': {
'RuntimeLibrary': 1, # static debug
'RuntimeTypeInfo': 'true'
},
},
},
'Release': {
'defines': [ 'NDEBUG' ],
'msvs_settings': {
'VCCLCompilerTool': {
'RuntimeLibrary': 1, # static release
'RuntimeTypeInfo': 'true'
},
},
}
},
'defines': [
'GEOS_INLINE',
'HAVE_CONFIG_H'
],
'include_dirs': [
'config/<(OS)/<(target_arch)',
'geos/capi',
'geos/include'
],
'conditions': [
['OS == "win"', {
'defines': [
'WIN32'
]
}]
],
},
'targets': [
{
'target_name': 'geos',
'type': 'static_library',
'cflags!': [ '-fno-exceptions', '-fno-rtti' ],
'cflags_cc!': [ '-fno-exceptions', '-fno-rtti' ],
'msvs_settings': {
'VCCLCompilerTool': {
'AdditionalOptions': [ '/GR' ], # there's a bug in gyp where RuntimeTypeInfo doesn't work for Release builds
'RuntimeLibrary': 0,
'RuntimeTypeInfo': 'true'
},
},
'conditions': [
['OS=="mac"', {
'xcode_settings': {
'GCC_ENABLE_CPP_EXCEPTIONS': 'YES',
'GCC_ENABLE_CPP_RTTI': 'YES'
}
}]
],
'direct_dependent_settings': {
'include_dirs': [
'config/<(OS)/<(target_arch)',
'geos/capi',
'geos/include'
],
'defines': [
'GEOS_INLINE',
'HAVE_CONFIG_H'
],
},
'sources': [
'geos/capi/geos_c.cpp',
'geos/capi/geos_ts_c.cpp',
'geos/src/algorithm/Angle.cpp',
'geos/src/algorithm/BoundaryNodeRule.cpp',
'geos/src/algorithm/Centroid.cpp',
'geos/src/algorithm/CentroidArea.cpp',
'geos/src/algorithm/CentroidLine.cpp',
'geos/src/algorithm/CentroidPoint.cpp',
'geos/src/algorithm/CGAlgorithms.cpp',
'geos/src/algorithm/ConvexHull.cpp',
'geos/src/algorithm/distance/DiscreteHausdorffDistance.cpp',
'geos/src/algorithm/distance/DistanceToPoint.cpp',
'geos/src/algorithm/HCoordinate.cpp',
'geos/src/algorithm/InteriorPointArea.cpp',
'geos/src/algorithm/InteriorPointLine.cpp',
'geos/src/algorithm/InteriorPointPoint.cpp',
'geos/src/algorithm/LineIntersector.cpp',
'geos/src/algorithm/locate/IndexedPointInAreaLocator.cpp',
'geos/src/algorithm/locate/PointOnGeometryLocator.cpp',
'geos/src/algorithm/locate/SimplePointInAreaLocator.cpp',
'geos/src/algorithm/MCPointInRing.cpp',
'geos/src/algorithm/MinimumDiameter.cpp',
'geos/src/algorithm/NotRepresentableException.cpp',
'geos/src/algorithm/PointLocator.cpp',
'geos/src/algorithm/RayCrossingCounter.cpp',
'geos/src/algorithm/RobustDeterminant.cpp',
'geos/src/algorithm/SimplePointInRing.cpp',
'geos/src/algorithm/SIRtreePointInRing.cpp',
'geos/src/geom/Coordinate.cpp',
'geos/src/geom/CoordinateArraySequence.cpp',
'geos/src/geom/CoordinateArraySequenceFactory.cpp',
'geos/src/geom/CoordinateSequence.cpp',
'geos/src/geom/CoordinateSequenceFactory.cpp',
'geos/src/geom/Dimension.cpp',
'geos/src/geom/Envelope.cpp',
'geos/src/geom/Geometry.cpp',
'geos/src/geom/GeometryCollection.cpp',
'geos/src/geom/GeometryComponentFilter.cpp',
'geos/src/geom/GeometryFactory.cpp',
'geos/src/geom/GeometryList.cpp',
'geos/src/geom/IntersectionMatrix.cpp',
'geos/src/geom/LinearRing.cpp',
'geos/src/geom/LineSegment.cpp',
'geos/src/geom/LineString.cpp',
'geos/src/geom/Location.cpp',
'geos/src/geom/MultiLineString.cpp',
'geos/src/geom/MultiPoint.cpp',
'geos/src/geom/MultiPolygon.cpp',
'geos/src/geom/Point.cpp',
'geos/src/geom/Polygon.cpp',
'geos/src/geom/PrecisionModel.cpp',
'geos/src/geom/prep/AbstractPreparedPolygonContains.cpp',
'geos/src/geom/prep/BasicPreparedGeometry.cpp',
'geos/src/geom/prep/PreparedGeometry.cpp',
'geos/src/geom/prep/PreparedGeometryFactory.cpp',
'geos/src/geom/prep/PreparedLineString.cpp',
'geos/src/geom/prep/PreparedLineStringIntersects.cpp',
'geos/src/geom/prep/PreparedPoint.cpp',
'geos/src/geom/prep/PreparedPolygon.cpp',
'geos/src/geom/prep/PreparedPolygonContains.cpp',
'geos/src/geom/prep/PreparedPolygonContainsProperly.cpp',
'geos/src/geom/prep/PreparedPolygonCovers.cpp',
'geos/src/geom/prep/PreparedPolygonIntersects.cpp',
'geos/src/geom/prep/PreparedPolygonPredicate.cpp',
'geos/src/geom/Triangle.cpp',
'geos/src/geom/util/ComponentCoordinateExtracter.cpp',
'geos/src/geom/util/CoordinateOperation.cpp',
'geos/src/geom/util/GeometryCombiner.cpp',
'geos/src/geom/util/GeometryEditor.cpp',
'geos/src/geom/util/GeometryTransformer.cpp',
'geos/src/geom/util/ShortCircuitedGeometryVisitor.cpp',
'geos/src/geom/util/SineStarFactory.cpp',
'geos/src/geomgraph/Depth.cpp',
'geos/src/geomgraph/EdgeEnd.cpp',
'geos/src/geomgraph/EdgeEndStar.cpp',
'geos/src/geomgraph/EdgeIntersectionList.cpp',
'geos/src/geomgraph/EdgeList.cpp',
'geos/src/geomgraph/EdgeNodingValidator.cpp',
'geos/src/geomgraph/GeometryGraph.cpp',
'geos/src/geomgraph/GeomGraphDirectedEdge.cpp',
'geos/src/geomgraph/GeomGraphDirectedEdgeStar.cpp',
'geos/src/geomgraph/GeomGraphEdge.cpp',
'geos/src/geomgraph/GeomGraphEdgeRing.cpp',
'geos/src/geomgraph/GeomGraphNode.cpp',
'geos/src/geomgraph/GeomGraphNodeMap.cpp',
'geos/src/geomgraph/GeomGraphPlanarGraph.cpp',
'geos/src/geomgraph/GraphComponent.cpp',
'geos/src/geomgraph/index/GeomGraphSweepLineEvent.cpp',
'geos/src/geomgraph/index/MonotoneChainEdge.cpp',
'geos/src/geomgraph/index/MonotoneChainIndexer.cpp',
'geos/src/geomgraph/index/SegmentIntersector.cpp',
'geos/src/geomgraph/index/SimpleEdgeSetIntersector.cpp',
'geos/src/geomgraph/index/SimpleMCSweepLineIntersector.cpp',
'geos/src/geomgraph/index/SimpleSweepLineIntersector.cpp',
'geos/src/geomgraph/index/SweepLineSegment.cpp',
'geos/src/geomgraph/Label.cpp',
'geos/src/geomgraph/NodeFactory.cpp',
'geos/src/geomgraph/Position.cpp',
'geos/src/geomgraph/Quadrant.cpp',
'geos/src/geomgraph/TopologyLocation.cpp',
'geos/src/index/bintree/Bintree.cpp',
'geos/src/index/bintree/BinTreeInterval.cpp',
'geos/src/index/bintree/BinTreeKey.cpp',
'geos/src/index/bintree/BinTreeNode.cpp',
'geos/src/index/bintree/BinTreeNodeBase.cpp',
'geos/src/index/bintree/BinTreeRoot.cpp',
'geos/src/index/chain/MonotoneChain.cpp',
'geos/src/index/chain/MonotoneChainBuilder.cpp',
'geos/src/index/chain/MonotoneChainOverlapAction.cpp',
'geos/src/index/chain/MonotoneChainSelectAction.cpp',
'geos/src/index/intervalrtree/IntervalRTreeBranchNode.cpp',
'geos/src/index/intervalrtree/IntervalRTreeLeafNode.cpp',
'geos/src/index/intervalrtree/IntervalRTreeNode.cpp',
'geos/src/index/intervalrtree/SortedPackedIntervalRTree.cpp',
'geos/src/index/quadtree/DoubleBits.cpp',
'geos/src/index/quadtree/IntervalSize.cpp',
'geos/src/index/quadtree/Key.cpp',
'geos/src/index/quadtree/NodeBase.cpp',
'geos/src/index/quadtree/Quadtree.cpp',
'geos/src/index/quadtree/QuadTreeNode.cpp',
'geos/src/index/quadtree/Root.cpp',
'geos/src/index/strtree/AbstractNode.cpp',
'geos/src/index/strtree/AbstractSTRtree.cpp',
'geos/src/index/strtree/Interval.cpp',
'geos/src/index/strtree/ItemBoundable.cpp',
'geos/src/index/strtree/SIRtree.cpp',
'geos/src/index/strtree/STRtree.cpp',
'geos/src/index/sweepline/SweepLineEvent.cpp',
'geos/src/index/sweepline/SweepLineIndex.cpp',
'geos/src/index/sweepline/SweepLineInterval.cpp',
'geos/src/inlines.cpp',
'geos/src/io/ByteOrderDataInStream.cpp',
'geos/src/io/ByteOrderValues.cpp',
'geos/src/io/CLocalizer.cpp',
'geos/src/io/ParseException.cpp',
'geos/src/io/StringTokenizer.cpp',
'geos/src/io/Unload.cpp',
'geos/src/io/WKBReader.cpp',
'geos/src/io/WKBWriter.cpp',
'geos/src/io/WKTReader.cpp',
'geos/src/io/WKTWriter.cpp',
'geos/src/io/Writer.cpp',
'geos/src/linearref/ExtractLineByLocation.cpp',
'geos/src/linearref/LengthIndexedLine.cpp',
'geos/src/linearref/LengthIndexOfPoint.cpp',
'geos/src/linearref/LengthLocationMap.cpp',
'geos/src/linearref/LinearGeometryBuilder.cpp',
'geos/src/linearref/LinearIterator.cpp',
'geos/src/linearref/LinearLocation.cpp',
'geos/src/linearref/LocationIndexOfLine.cpp',
'geos/src/linearref/LocationIndexOfPoint.cpp',
'geos/src/noding/BasicSegmentString.cpp',
'geos/src/noding/FastNodingValidator.cpp',
'geos/src/noding/FastSegmentSetIntersectionFinder.cpp',
'geos/src/noding/GeometryNoder.cpp',
'geos/src/noding/IntersectionAdder.cpp',
'geos/src/noding/IntersectionFinderAdder.cpp',
'geos/src/noding/IteratedNoder.cpp',
'geos/src/noding/MCIndexNoder.cpp',
'geos/src/noding/MCIndexSegmentSetMutualIntersector.cpp',
'geos/src/noding/NodedSegmentString.cpp',
'geos/src/noding/NodingValidator.cpp',
'geos/src/noding/Octant.cpp',
'geos/src/noding/OrientedCoordinateArray.cpp',
'geos/src/noding/ScaledNoder.cpp',
'geos/src/noding/SegmentIntersectionDetector.cpp',
'geos/src/noding/SegmentNode.cpp',
'geos/src/noding/SegmentNodeList.cpp',
'geos/src/noding/SegmentString.cpp',
'geos/src/noding/SegmentStringUtil.cpp',
'geos/src/noding/SimpleNoder.cpp',
'geos/src/noding/SingleInteriorIntersectionFinder.cpp',
'geos/src/noding/snapround/HotPixel.cpp',
'geos/src/noding/snapround/MCIndexPointSnapper.cpp',
'geos/src/noding/snapround/MCIndexSnapRounder.cpp',
'geos/src/noding/snapround/SimpleSnapRounder.cpp',
'geos/src/operation/buffer/BufferBuilder.cpp',
'geos/src/operation/buffer/BufferInputLineSimplifier.cpp',
'geos/src/operation/buffer/BufferOp.cpp',
'geos/src/operation/buffer/BufferParameters.cpp',
'geos/src/operation/buffer/BufferSubgraph.cpp',
'geos/src/operation/buffer/OffsetCurveBuilder.cpp',
'geos/src/operation/buffer/OffsetCurveSetBuilder.cpp',
'geos/src/operation/buffer/OffsetSegmentGenerator.cpp',
'geos/src/operation/buffer/RightmostEdgeFinder.cpp',
'geos/src/operation/buffer/SubgraphDepthLocater.cpp',
'geos/src/operation/distance/ConnectedElementLocationFilter.cpp',
'geos/src/operation/distance/ConnectedElementPointFilter.cpp',
'geos/src/operation/distance/DistanceOp.cpp',
'geos/src/operation/distance/GeometryLocation.cpp',
'geos/src/operation/GeometryGraphOperation.cpp',
'geos/src/operation/IsSimpleOp.cpp',
'geos/src/operation/linemerge/EdgeString.cpp',
'geos/src/operation/linemerge/LineMergeDirectedEdge.cpp',
'geos/src/operation/linemerge/LineMergeEdge.cpp',
'geos/src/operation/linemerge/LineMergeGraph.cpp',
'geos/src/operation/linemerge/LineMerger.cpp',
'geos/src/operation/linemerge/LineSequencer.cpp',
'geos/src/operation/overlay/EdgeSetNoder.cpp',
'geos/src/operation/overlay/ElevationMatrix.cpp',
'geos/src/operation/overlay/ElevationMatrixCell.cpp',
'geos/src/operation/overlay/LineBuilder.cpp',
'geos/src/operation/overlay/MaximalEdgeRing.cpp',
'geos/src/operation/overlay/MinimalEdgeRing.cpp',
'geos/src/operation/overlay/OverlayNodeFactory.cpp',
'geos/src/operation/overlay/OverlayOp.cpp',
'geos/src/operation/overlay/PointBuilder.cpp',
'geos/src/operation/overlay/PolygonBuilder.cpp',
'geos/src/operation/overlay/snap/GeometrySnapper.cpp',
'geos/src/operation/overlay/snap/LineStringSnapper.cpp',
'geos/src/operation/overlay/snap/SnapIfNeededOverlayOp.cpp',
'geos/src/operation/overlay/snap/SnapOverlayOp.cpp',
'geos/src/operation/overlay/validate/FuzzyPointLocator.cpp',
'geos/src/operation/overlay/validate/OffsetPointGenerator.cpp',
'geos/src/operation/overlay/validate/OverlayResultValidator.cpp',
'geos/src/operation/polygonize/EdgeRing.cpp',
'geos/src/operation/polygonize/PolygonizeDirectedEdge.cpp',
'geos/src/operation/polygonize/PolygonizeEdge.cpp',
'geos/src/operation/polygonize/PolygonizeGraph.cpp',
'geos/src/operation/polygonize/Polygonizer.cpp',
'geos/src/operation/predicate/RectangleContains.cpp',
'geos/src/operation/predicate/RectangleIntersects.cpp',
'geos/src/operation/predicate/SegmentIntersectionTester.cpp',
'geos/src/operation/relate/EdgeEndBuilder.cpp',
'geos/src/operation/relate/EdgeEndBundle.cpp',
'geos/src/operation/relate/EdgeEndBundleStar.cpp',
'geos/src/operation/relate/RelateComputer.cpp',
'geos/src/operation/relate/RelateNode.cpp',
'geos/src/operation/relate/RelateNodeFactory.cpp',
'geos/src/operation/relate/RelateNodeGraph.cpp',
'geos/src/operation/relate/RelateOp.cpp',
'geos/src/operation/sharedpaths/SharedPathsOp.cpp',
'geos/src/operation/union/CascadedPolygonUnion.cpp',
'geos/src/operation/union/CascadedUnion.cpp',
'geos/src/operation/union/PointGeometryUnion.cpp',
'geos/src/operation/union/UnaryUnionOp.cpp',
'geos/src/operation/valid/ConnectedInteriorTester.cpp',
'geos/src/operation/valid/ConsistentAreaTester.cpp',
'geos/src/operation/valid/IndexedNestedRingTester.cpp',
'geos/src/operation/valid/IsValidOp.cpp',
'geos/src/operation/valid/QuadtreeNestedRingTester.cpp',
'geos/src/operation/valid/RepeatedPointTester.cpp',
'geos/src/operation/valid/SimpleNestedRingTester.cpp',
'geos/src/operation/valid/SweeplineNestedRingTester.cpp',
'geos/src/operation/valid/TopologyValidationError.cpp',
'geos/src/planargraph/algorithm/ConnectedSubgraphFinder.cpp',
'geos/src/planargraph/DirectedEdge.cpp',
'geos/src/planargraph/DirectedEdgeStar.cpp',
'geos/src/planargraph/Edge.cpp',
'geos/src/planargraph/Node.cpp',
'geos/src/planargraph/NodeMap.cpp',
'geos/src/planargraph/PlanarGraph.cpp',
'geos/src/planargraph/Subgraph.cpp',
'geos/src/precision/CommonBits.cpp',
'geos/src/precision/CommonBitsOp.cpp',
'geos/src/precision/CommonBitsRemover.cpp',
'geos/src/precision/EnhancedPrecisionOp.cpp',
'geos/src/precision/GeometryPrecisionReducer.cpp',
'geos/src/precision/PrecisionReducerCoordinateOperation.cpp',
'geos/src/precision/SimpleGeometryPrecisionReducer.cpp',
'geos/src/simplify/DouglasPeuckerLineSimplifier.cpp',
'geos/src/simplify/DouglasPeuckerSimplifier.cpp',
'geos/src/simplify/LineSegmentIndex.cpp',
'geos/src/simplify/TaggedLineSegment.cpp',
'geos/src/simplify/TaggedLinesSimplifier.cpp',
'geos/src/simplify/TaggedLineString.cpp',
'geos/src/simplify/TaggedLineStringSimplifier.cpp',
'geos/src/simplify/TopologyPreservingSimplifier.cpp',
'geos/src/triangulate/DelaunayTriangulationBuilder.cpp',
'geos/src/triangulate/IncrementalDelaunayTriangulator.cpp',
'geos/src/triangulate/quadedge/LastFoundQuadEdgeLocator.cpp',
'geos/src/triangulate/quadedge/LocateFailureException.cpp',
'geos/src/triangulate/quadedge/QuadEdge.cpp',
'geos/src/triangulate/quadedge/QuadEdgeLocator.cpp',
'geos/src/triangulate/quadedge/QuadEdgeSubdivision.cpp',
'geos/src/triangulate/quadedge/TrianglePredicate.cpp',
'geos/src/triangulate/quadedge/TriangleVisitor.cpp',
'geos/src/triangulate/quadedge/Vertex.cpp',
'geos/src/util/Assert.cpp',
'geos/src/util/GeometricShapeFactory.cpp',
'geos/src/util/Interrupt.cpp',
'geos/src/util/math.cpp',
'geos/src/util/Profiler.cpp'
]
},
]
}
|
<gh_stars>0
from operator import mul
from sympy.core.sympify import _sympify
from sympy.matrices.common import (NonInvertibleMatrixError,
NonSquareMatrixError, ShapeError)
from sympy.polys.constructor import construct_domain
class DDMError(Exception):
"""Base class for errors raised by DDM"""
pass
class DDMBadInputError(DDMError):
"""list of lists is inconsistent with shape"""
pass
class DDMDomainError(DDMError):
"""domains do not match"""
pass
class DDMShapeError(DDMError):
"""shapes are inconsistent"""
pass
class DDM(list):
"""Dense matrix based on polys domain elements
This is a list subclass and is a wrapper for a list of lists that supports
basic matrix arithmetic +, -, *, **.
"""
def __init__(self, rowslist, shape, domain):
super().__init__(rowslist)
self.shape = self.rows, self.cols = m, n = shape
self.domain = domain
if not (len(self) == m and all(len(row) == n for row in self)):
raise DDMBadInputError("Inconsistent row-list/shape")
def __str__(self):
cls = type(self).__name__
rows = list.__str__(self)
return '%s(%s, %s, %s)' % (cls, rows, self.shape, self.domain)
def __eq__(self, other):
if not isinstance(other, DDM):
return False
return (super().__eq__(other) and self.domain == other.domain)
def __ne__(self, other):
return not self.__eq__(other)
@classmethod
def zeros(cls, shape, domain):
z = domain.zero
m, n = shape
rowslist = ([z] * n for _ in range(m))
return DDM(rowslist, shape, domain)
@classmethod
def eye(cls, size, domain):
one = domain.one
ddm = cls.zeros((size, size), domain)
for i in range(size):
ddm[i][i] = one
return ddm
def copy(self):
copyrows = (row[:] for row in self)
return DDM(copyrows, self.shape, self.domain)
def __add__(a, b):
if not isinstance(b, DDM):
return NotImplemented
return a.add(b)
def __sub__(a, b):
if not isinstance(b, DDM):
return NotImplemented
return a.sub(b)
def __neg__(a):
return a.neg()
def __mul__(a, b):
if b in a.domain:
return a.mul(b)
else:
return NotImplemented
def __matmul__(a, b):
if isinstance(b, DDM):
return a.matmul(b)
else:
return NotImplemented
@classmethod
def _check(cls, a, op, b, ashape, bshape):
if a.domain != b.domain:
msg = "Domain mismatch: %s %s %s" % (a.domain, op, b.domain)
raise DDMDomainError(msg)
if ashape != bshape:
msg = "Shape mismatch: %s %s %s" % (a.shape, op, b.shape)
raise DDMShapeError(msg)
def add(a, b):
"""a + b"""
a._check(a, '+', b, a.shape, b.shape)
c = a.copy()
ddm_iadd(c, b)
return c
def sub(a, b):
"""a - b"""
a._check(a, '-', b, a.shape, b.shape)
c = a.copy()
ddm_isub(c, b)
return c
def neg(a):
"""-a"""
b = a.copy()
ddm_ineg(b)
return b
def mul(a, b):
c = a.copy()
ddm_imul(c, b)
return c
def matmul(a, b):
"""a @ b (matrix product)"""
m, o = a.shape
o2, n = b.shape
a._check(a, '*', b, o, o2)
c = a.zeros((m, n), a.domain)
ddm_imatmul(c, a, b)
return c
def rref(a):
"""Reduced-row echelon form of a and list of pivots"""
b = a.copy()
pivots = ddm_irref(b, a.domain)
return b, pivots
def det(a):
"""Determinant of a"""
m, n = a.shape
if m != n:
raise DDMShapeError("Determinant of non-square matrix")
b = a.copy()
K = b.domain
deta = ddm_idet(b, K)
return deta
def inv(a, *, method='GE'):
"""Inverse of a"""
m, n = a.shape
if m != n:
raise DDMShapeError("Inverse of non-square matrix")
if method == 'GE':
return a._inv_ge()
elif method == 'LU':
return a._inv_lu()
elif method == 'charpoly':
return a._inv_charpoly()
else:
raise DDMError('No such method "%s"' % method)
def _inv_ge(a):
"""Inverse using Gaussian elimination"""
ainv = a.copy()
K = a.domain
ddm_iinv(ainv, a, K)
return ainv
def _inv_lu(a):
"""Inverse using LU decomposition"""
e = a.eye(a.shape[0], a.domain)
return a.lu_solve(e)
def _inv_charpoly(a):
coeffs = a.charpoly()
e = a.eye(a.shape[0], a.domain)
an, ais, a0 = coeffs[0], coeffs[1:-1], coeffs[-1]
bi = a*an
for ai in ais[:-1]:
bi = a @ (e*ai + bi)
bi = bi + e*ais[-1]
ainv = bi * (-1/a0)
return ainv
def lu(a):
"""L, U decomposition of a"""
m, n = a.shape
K = a.domain
U = a.copy()
L = a.eye(m, K)
swaps = ddm_ilu_split(L, U, K)
return L, U, swaps
def lu_solve(a, b):
"""x where a*x = b"""
m, n = a.shape
m2, o = b.shape
a._check(a, 'lu_solve', b, m, m2)
L, U, swaps = a.lu()
x = a.zeros((n, o), a.domain)
ddm_ilu_solve(x, L, U, swaps, b)
return x
def charpoly(a):
"""Coefficients of characteristic polynomial of a"""
K = a.domain
m, n = a.shape
if m != n:
raise DDMShapeError("Charpoly of non-square matrix")
vec = ddm_berk(a, K)
coeffs = [vec[i][0] for i in range(n+1)]
return coeffs
def ddm_iadd(a, b):
"""a += b"""
for ai, bi in zip(a, b):
for j, bij in enumerate(bi):
ai[j] += bij
def ddm_isub(a, b):
"""a -= b"""
for ai, bi in zip(a, b):
for j, bij in enumerate(bi):
ai[j] -= bij
def ddm_ineg(a):
"""a <-- -a"""
for ai in a:
for j, aij in enumerate(ai):
ai[j] = -aij
def ddm_imul(a, b):
for ai in a:
for j, aij in enumerate(ai):
ai[j] = b * aij
def ddm_imatmul(a, b, c):
"""a += b @ c"""
cT = list(zip(*c))
for bi, ai in zip(b, a):
for j, cTj in enumerate(cT):
ai[j] = sum(map(mul, bi, cTj), ai[j])
def ddm_irref(a, K):
"""a <-- rref(a)"""
return ddm_irref_score(a, ddm_pivot_scorer(K))
def ddm_pivot_scorer(K):
"""Return scoring function for selecting pivots over K"""
if K.is_FractionField:
def score(e):
n, d = e.numer, e.denom
return (-len(n), -len(d), n.is_ground, d.is_ground)
else:
score = bool
return score
def ddm_irref_score(a, score):
"""a <-- rref(a)"""
# a is (m x n)
m = len(a)
if not m:
return []
n = len(a[0])
i = 0
pivots = []
for j in range(n):
# nonzero pivots
nz_ip = [ip for ip in range(i, m) if a[ip][j]]
# No pivots
if not nz_ip:
continue
# Find a pivot from the ground domain if possible
ip = max(nz_ip, key=lambda ip: score(a[ip][j]))
# Swap pivot to the current row
a[i], a[ip] = a[ip], a[i]
# normalise row
ai = a[i]
aij = ai[j]
for l in range(j, n):
ai[l] /= aij # ai[j] = one
# eliminate above and below to the right
for k, ak in enumerate(a):
if k == i or not ak[j]:
continue
akj = ak[j]
ak[j] -= akj # ak[j] = zero
for l in range(j+1, n):
ak[l] -= akj * ai[l]
# next row
pivots.append(j)
i += 1
# no more rows?
if i >= m:
break
return pivots
def ddm_idet(a, K):
"""a <-- echelon(a); return det"""
# Fraction-free Gaussian elimination
# https://www.math.usm.edu/perry/Research/Thesis_DRL.pdf
# a is (m x n)
m = len(a)
if not m:
return K.one
n = len(a[0])
is_field = K.is_Field
# uf keeps track of the effect of row swaps and multiplies
uf = K.one
for j in range(n-1):
# if zero on the diagonal need to swap
if not a[j][j]:
for l in range(j+1, n):
if a[l][j]:
a[j], a[l] = a[l], a[j]
uf = -uf
break
else:
# unable to swap: det = 0
return K.zero
for i in range(j+1, n):
if a[i][j]:
if not is_field:
d = K.gcd(a[j][j], a[i][j])
b = a[j][j] // d
c = a[i][j] // d
else:
b = a[j][j]
c = a[i][j]
# account for multiplying row i by b
uf = b * uf
for k in range(j+1, n):
a[i][k] = b*a[i][k] - c*a[j][k]
# triangular det is product of diagonal
prod = K.one
for i in range(n):
prod = prod * a[i][i]
# incorporate swaps and multiplies
if not is_field:
D = prod // uf
else:
D = prod / uf
return D
def ddm_iinv(ainv, a, K):
if not K.is_Field:
raise ValueError('Not a field')
# a is (m x n)
m = len(a)
if not m:
return
n = len(a[0])
if m != n:
raise NonSquareMatrixError
eye = [[K.one if i==j else K.zero for j in range(n)] for i in range(n)]
Aaug = [row + eyerow for row, eyerow in zip(a, eye)]
pivots = ddm_irref(Aaug, K)
if pivots != list(range(n)):
raise NonInvertibleMatrixError('Matrix det == 0; not invertible.')
ainv[:] = [row[n:] for row in Aaug]
def ddm_ilu_split(L, U, K):
"""L, U <-- LU(U)"""
m = len(U)
if not m:
return []
n = len(U[0])
swaps = ddm_ilu(U)
zeros = [K.zero] * min(m, n)
for i in range(1, m):
j = min(i, n)
L[i][:j] = U[i][:j]
U[i][:j] = zeros[:j]
return swaps
def ddm_ilu(a):
"""a <-- LU(a)"""
m = len(a)
if not m:
return []
n = len(a[0])
swaps = []
for i in range(min(m, n)):
if not a[i][i]:
for ip in range(i+1, m):
if a[ip][i]:
swaps.append((i, ip))
a[i], a[ip] = a[ip], a[i]
break
else:
# M = Matrix([[1, 0, 0, 0], [0, 0, 0, 0], [0, 0, 1, 1], [0, 0, 1, 2]])
continue
for j in range(i+1, m):
l_ji = a[j][i] / a[i][i]
a[j][i] = l_ji
for k in range(i+1, n):
a[j][k] -= l_ji * a[i][k]
return swaps
def ddm_ilu_solve(x, L, U, swaps, b):
"""x <-- solve(L*U*x = swaps(b))"""
m = len(U)
if not m:
return
n = len(U[0])
m2 = len(b)
if not m2:
raise DDMShapeError("Shape mismtch")
o = len(b[0])
if m != m2:
raise DDMShapeError("Shape mismtch")
if m < n:
raise NotImplementedError("Underdetermined")
if swaps:
b = [row[:] for row in b]
for i1, i2 in swaps:
b[i1], b[i2] = b[i2], b[i1]
# solve Ly = b
y = [[None] * o for _ in range(m)]
for k in range(o):
for i in range(m):
rhs = b[i][k]
for j in range(i):
rhs -= L[i][j] * y[j][k]
y[i][k] = rhs
if m > n:
for i in range(n, m):
for j in range(o):
if y[i][j]:
raise NonInvertibleMatrixError
# Solve Ux = y
for k in range(o):
for i in reversed(range(n)):
if not U[i][i]:
raise NonInvertibleMatrixError
rhs = y[i][k]
for j in range(i+1, n):
rhs -= U[i][j] * x[j][k]
x[i][k] = rhs / U[i][i]
def ddm_berk(M, K):
m = len(M)
if not m:
return [[K.one]]
n = len(M[0])
if m != n:
raise DDMShapeError("Not square")
if n == 1:
return [[K.one], [-M[0][0]]]
a = M[0][0]
R = [M[0][1:]]
C = [[row[0]] for row in M[1:]]
A = [row[1:] for row in M[1:]]
q = ddm_berk(A, K)
T = [[K.zero] * n for _ in range(n+1)]
for i in range(n):
T[i][i] = K.one
T[i+1][i] = -a
for i in range(2, n+1):
if i == 2:
AnC = C
else:
C = AnC
AnC = [[K.zero] for row in C]
ddm_imatmul(AnC, A, C)
RAnC = [[K.zero]]
ddm_imatmul(RAnC, R, AnC)
for j in range(0, n+1-i):
T[i+j][j] = -RAnC[0][0]
qout = [[K.zero] for _ in range(n+1)]
ddm_imatmul(qout, T, q)
return qout
class DomainMatrix:
def __init__(self, rows, shape, domain):
self.rep = DDM(rows, shape, domain)
self.shape = shape
self.domain = domain
@classmethod
def from_ddm(cls, ddm):
return cls(ddm, ddm.shape, ddm.domain)
@classmethod
def from_list_sympy(cls, nrows, ncols, rows):
assert len(rows) == nrows
assert all(len(row) == ncols for row in rows)
items_sympy = [_sympify(item) for row in rows for item in row]
domain, items_domain = cls.get_domain(items_sympy)
domain_rows = [[items_domain[ncols*r + c] for c in range(ncols)] for r in range(nrows)]
return DomainMatrix(domain_rows, (nrows, ncols), domain)
@classmethod
def from_Matrix(cls, M):
return cls.from_list_sympy(*M.shape, M.tolist())
@classmethod
def get_domain(cls, items_sympy, **kwargs):
K, items_K = construct_domain(items_sympy, **kwargs)
return K, items_K
def convert_to(self, K):
Kold = self.domain
new_rows = [[K.convert_from(e, Kold) for e in row] for row in self.rep]
return DomainMatrix(new_rows, self.shape, K)
def to_field(self):
K = self.domain.get_field()
return self.convert_to(K)
def unify(self, other):
K1 = self.domain
K2 = other.domain
if K1 == K2:
return self, other
K = K1.unify(K2)
if K1 != K:
self = self.convert_to(K)
if K2 != K:
other = other.convert_to(K)
return self, other
def to_Matrix(self):
from sympy.matrices.dense import MutableDenseMatrix
rows_sympy = [[self.domain.to_sympy(e) for e in row] for row in self.rep]
return MutableDenseMatrix(rows_sympy)
def __repr__(self):
rows_str = ['[%s]' % (', '.join(map(str, row))) for row in self.rep]
rowstr = '[%s]' % ', '.join(rows_str)
return 'DomainMatrix(%s, %r, %r)' % (rowstr, self.shape, self.domain)
def __add__(A, B):
if not isinstance(B, DomainMatrix):
return NotImplemented
return A.add(B)
def __sub__(A, B):
if not isinstance(B, DomainMatrix):
return NotImplemented
return A.sub(B)
def __neg__(A):
return A.neg()
def __mul__(A, B):
"""A * B"""
if isinstance(B, DomainMatrix):
return A.matmul(B)
elif B in A.domain:
return A.from_ddm(A.rep * B)
else:
return NotImplemented
def __rmul__(A, B):
if B in A.domain:
return A.from_ddm(A.rep * B)
else:
return NotImplemented
def __pow__(A, n):
"""A ** n"""
if not isinstance(n, int):
return NotImplemented
return A.pow(n)
def add(A, B):
if A.shape != B.shape:
raise ShapeError("shape")
if A.domain != B.domain:
raise ValueError("domain")
return A.from_ddm(A.rep.add(B.rep))
def sub(A, B):
if A.shape != B.shape:
raise ShapeError("shape")
if A.domain != B.domain:
raise ValueError("domain")
return A.from_ddm(A.rep.sub(B.rep))
def neg(A):
return A.from_ddm(A.rep.neg())
def matmul(A, B):
return A.from_ddm(A.rep.matmul(B.rep))
def pow(A, n):
if n < 0:
raise NotImplementedError('Negative powers')
elif n == 0:
m, n = A.shape
rows = [[A.domain.zero] * m for _ in range(m)]
for i in range(m):
rows[i][i] = A.domain.one
return type(A)(rows, A.shape, A.domain)
elif n == 1:
return A
elif n % 2 == 1:
return A * A**(n - 1)
else:
sqrtAn = A ** (n // 2)
return sqrtAn * sqrtAn
def rref(self):
if not self.domain.is_Field:
raise ValueError('Not a field')
rref_ddm, pivots = self.rep.rref()
return self.from_ddm(rref_ddm), tuple(pivots)
def inv(self, *, method='GE'):
if not self.domain.is_Field:
raise ValueError('Not a field')
m, n = self.shape
if m != n:
raise NonSquareMatrixError
inv = self.rep.inv(method=method)
return self.from_ddm(inv)
def det(self):
m, n = self.shape
if m != n:
raise NonSquareMatrixError
return self.rep.det()
def lu(self):
if not self.domain.is_Field:
raise ValueError('Not a field')
L, U, swaps = self.rep.lu()
return self.from_ddm(L), self.from_ddm(U), swaps
def lu_solve(self, rhs):
if self.shape[0] != rhs.shape[0]:
raise ShapeError("Shape")
if not self.domain.is_Field:
raise ValueError('Not a field')
sol = self.rep.lu_solve(rhs.rep)
return self.from_ddm(sol)
def charpoly(self):
m, n = self.shape
if m != n:
raise NonSquareMatrixError("not square")
return self.rep.charpoly()
def __eq__(A, B):
"""A == B"""
if not isinstance(B, DomainMatrix):
return NotImplemented
return A.rep == B.rep
|
<filename>dataset_tools/datasets/detection_dataset.py
import os
import json
from collections import OrderedDict
from . import _getters
class DetectionDataset(object):
def __init__(self, dataset_file, root_dir=None, classes=None):
"""
Dataset for detection and segmentation tasks
Either loads an existing dataset or prepares to create a new dataset
- If loading an existing dataset, root_dir and classes should be None
- If creating new dataset, root_dir and classes must have values
Args
dataset_file : Path to detection dataset json file
root_dir : Path to root directory of all image files in dataset (Only used when creating new dataset)
classes : list of strings representing all classes
"""
self.dataset_file = dataset_file
# Create data structures to store data
self.id_to_class_info = OrderedDict()
self.name_to_class_info = OrderedDict()
self.image_infos = OrderedDict()
self.ann_infos = OrderedDict()
self.img_to_ann = OrderedDict()
self.next_image_id = 0
new_dataset = (not os.path.isfile(self.dataset_file)) or root_dir or classes
if new_dataset:
assert root_dir is not None and classes is not None, 'If creating new dataset, both root_dir and classes must be provided'
self._init_new_dataset(root_dir, classes)
print('New dataset initialized')
else:
print('Loading dataset')
self._load_dataset()
print('Dataset loaded')
def _init_new_dataset(self, root_dir, classes):
assert os.path.isdir(root_dir), '{} is not a valid path for root_dir'.format(root_dir)
self.root_dir = root_dir
for class_id, class_name in enumerate(classes):
class_info = {
'id' : class_id,
'name' : class_name
}
self.id_to_class_info[class_id] = class_info
self.name_to_class_info[class_name] = class_info
def _load_dataset(self):
with open(self.dataset_file, 'r') as f:
data = json.load(f)
# save root dir
self.root_dir = data['root_dir']
# retrieve class information
for class_id, class_name in enumerate(data['classes']):
class_info = {
'id' : class_id,
'name': class_name
}
self.id_to_class_info[class_id] = class_info
self.name_to_class_info[class_name] = class_info
# Retrieve image information
for image_info in data['images']:
self.image_infos[image_info['id']] = image_info
self.next_image_id = max(self.image_infos.keys()) + 1
# Config annotation infos such that it is retrievable through annotation id
for ann_info in data['annotations']:
self.ann_infos[ann_info['id']] = ann_info
# Make the img_to_ann dict
for image_info in data['images']:
self.img_to_ann[image_info['id']] = []
for ann_info in data['annotations']:
self.img_to_ann[ann_info['image_id']].append(ann_info['id'])
def save_dataset(self, dataset_file=None, force_overwrite=False):
""" Save DetectionDataset to a json file
Args
dataset_file : Path to DetectionDataset json file (or None if saving to same file dataset is loaded from)
force_overwrite : Flag to raise if overwriting over existing dataset file
"""
if dataset_file is not None:
self.dataset_file = dataset_file
assert self.dataset_file is not None
# Initialize dict
json_dataset = OrderedDict()
# Save dataset info
json_dataset['root_dir'] = self.root_dir
json_dataset['classes'] = list(self.name_to_class_info.keys())
json_dataset['images'] = list(self.image_infos.values())
json_dataset['annotations'] = list(self.ann_infos.values())
# Save dataset into json file
if (not os.path.isfile(self.dataset_file)) or force_overwrite:
print('Saving dataset as an annotation file, this can take a while')
with open(self.dataset_file, 'w') as f:
json.dump(json_dataset, f)
print('Dataset saved')
else:
raise FileExistsError('Dataset not saved as it already exists, consider overwriting')
###########################################################################
#### Dataset misc functions
###########################################################################
#### Dataset getter and loaders
get_dataset_file = _getters.get_dataset_file
get_size = _getters.get_size
get_root_dir = _getters.get_root_dir
get_num_classes = _getters.get_num_classes
name_to_label = _getters.name_to_label
label_to_name = _getters.label_to_name
get_all_classes = _getters.get_all_classes
get_classes_dict = _getters.get_classes_dict
get_all_image_index = _getters.get_all_image_index
get_all_ann_index = _getters.get_all_ann_index
get_image_info = _getters.get_image_info
get_image_pil = _getters.get_image_pil
get_image_array = _getters.get_image_array
get_ann_info = _getters.get_ann_info
get_ann_array = _getters.get_ann_array
get_mask_pil = _getters.get_mask_pil
get_mask_array = _getters.get_mask_array
###########################################################################
#### Dataset setters
def set_image(
self,
image_path=None,
image_url=None,
image_id=None,
height=None,
width=None,
force_overwrite=False
):
""" Sets an image entry in the dataset
Required variables:
image_path/image_url (atleast 1 required)
Args
image_path : The path to the locally stored image relative to root_dir
image_url : The http public url to the image
image_id : An integer to use for the image id
height : The image pixel-wise height
width : The image pixel-wise width
force_overwrite : Flag to trigger the overwrite of image at image_id
Returns
image info (Dataset object will also be updated with this new image info)
"""
assert (image_url is not None) or (image_path is not None), 'Atleast one of image path or image url must be provided'
# Identify image id
if image_id is None:
image_id = self.next_image_id
self.next_image_id += 1
else:
assert isinstance(image_id, int), 'Image id if provided must be an integer, got {}'.format(type(image_id))
assert (image_id not in self.image_infos) or force_overwrite, 'Image id {} already exists, consider overwrite'.format(image_id)
self.next_image_id = max(self.next_image_id, image_id) + 1
image_info = {
'id' : image_id,
'image_path' : image_path,
'image_url' : image_url,
'width' : width,
'height' : height
}
# Store all required info
self.image_infos[image_id] = image_info
self.img_to_ann[image_id] = []
return image_info
def set_ann(
self,
image_id,
bbox,
class_name=None,
class_id=None,
segmentation=None
):
""" Sets a single image detection annotation, set_classes has to be ran in advanced
Args
image_id : Image id associated to this detection
bbox : Bounding box for detection
class_name : Class name of object
class_id : Class id of object
segmentation: RLE of the object mask
"""
assert (class_name is not None) or (class_id is not None), 'Either class_name or class_id must be present'
if class_name is not None:
assert class_name in self.name_to_class_info
class_id = self.name_to_label(class_name)
else:
assert class_id in self.id_to_class_info
class_name = self.label_to_name(class_id)
# Prepare ann_info
ann_id = len(self.ann_infos)
ann_info = {
'id' : ann_id,
'image_id' : image_id,
'bbox' : list(bbox),
'class_id' : class_id,
'class_name' : class_name,
'segmentation': segmentation
}
# Store ann info
self.ann_infos[ann_id] = ann_info
self.img_to_ann[image_id] += [ann_id]
return ann_info
###########################################################################
#### Dataset editor
|
<filename>4-1.Seq2Seq/Seq2Seq-Tensor.py<gh_stars>1000+
'''
code by <NAME>(<NAME>) @graykode
reference : https://github.com/golbin/TensorFlow-Tutorials/blob/master/10%20-%20RNN/03%20-%20Seq2Seq.py
'''
import tensorflow as tf
import numpy as np
tf.reset_default_graph()
# S: Symbol that shows starting of decoding input
# E: Symbol that shows starting of decoding output
# P: Symbol that will fill in blank sequence if current batch data size is short than time steps
char_arr = [c for c in 'SEPabcdefghijklmnopqrstuvwxyz']
num_dic = {n: i for i, n in enumerate(char_arr)}
seq_data = [['man', 'women'], ['black', 'white'], ['king', 'queen'], ['girl', 'boy'], ['up', 'down'], ['high', 'low']]
# Seq2Seq Parameter
n_step = 5
n_hidden = 128
n_class = len(num_dic) # number of class(=number of vocab)
def make_batch(seq_data):
input_batch, output_batch, target_batch = [], [], []
for seq in seq_data:
for i in range(2):
seq[i] = seq[i] + 'P' * (n_step - len(seq[i]))
input = [num_dic[n] for n in seq[0]]
output = [num_dic[n] for n in ('S' + seq[1])]
target = [num_dic[n] for n in (seq[1] + 'E')]
input_batch.append(np.eye(n_class)[input])
output_batch.append(np.eye(n_class)[output])
target_batch.append(target)
return input_batch, output_batch, target_batch
# Model
enc_input = tf.placeholder(tf.float32, [None, None, n_class]) # [batch_size, max_len(=encoder_step), n_class]
dec_input = tf.placeholder(tf.float32, [None, None, n_class]) # [batch_size, max_len+1(=decoder_step) (becase of 'S' or 'E'), n_class]
targets = tf.placeholder(tf.int64, [None, None]) # [batch_size, max_len+1], not one-hot
with tf.variable_scope('encode'):
enc_cell = tf.nn.rnn_cell.BasicRNNCell(n_hidden)
enc_cell = tf.nn.rnn_cell.DropoutWrapper(enc_cell, output_keep_prob=0.5)
_, enc_states = tf.nn.dynamic_rnn(enc_cell, enc_input, dtype=tf.float32)
# encoder state will go to decoder initial_state, enc_states : [batch_size, n_hidden(=128)]
with tf.variable_scope('decode'):
dec_cell = tf.nn.rnn_cell.BasicRNNCell(n_hidden)
dec_cell = tf.nn.rnn_cell.DropoutWrapper(dec_cell, output_keep_prob=0.5)
outputs, _ = tf.nn.dynamic_rnn(dec_cell, dec_input, initial_state=enc_states, dtype=tf.float32)
# outputs : [batch_size, max_len+1, n_hidden(=128)]
model = tf.layers.dense(outputs, n_class, activation=None) # model : [batch_size, max_len+1, n_class]
cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=model, labels=targets))
optimizer = tf.train.AdamOptimizer(0.001).minimize(cost)
# Training
sess = tf.Session()
sess.run(tf.global_variables_initializer())
input_batch, output_batch, target_batch = make_batch(seq_data)
for epoch in range(5000):
_, loss = sess.run([optimizer, cost], feed_dict={enc_input: input_batch, dec_input: output_batch, targets: target_batch})
if (epoch + 1)%1000 == 0:
print('Epoch:', '%04d' % (epoch + 1), 'cost =', '{:.6f}'.format(loss))
# Test
def translate(word):
seq_data = [word, 'P' * len(word)]
input_batch, output_batch, _ = make_batch([seq_data])
prediction = tf.argmax(model, 2)
result = sess.run(prediction, feed_dict={enc_input: input_batch, dec_input: output_batch})
decoded = [char_arr[i] for i in result[0]]
end = decoded.index('E')
translated = ''.join(decoded[:end])
return translated.replace('P','')
print('test')
print('man ->', translate('man'))
print('mans ->', translate('mans'))
print('king ->', translate('king'))
print('black ->', translate('black'))
print('upp ->', translate('upp')) |
# Copyright (c) 2011, <NAME> <<EMAIL>>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the authors nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL ANDRES MOREIRA BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
from distutils.core import setup, Extension
from distutils import ccompiler
version = '0.5'
long_description = """
Python bindings for the snappy compression library from Google.
More details about Snappy library: http://code.google.com/p/snappy
"""
EXTRA_OPT=0
if "--extra-optimization" in sys.argv:
# Support legacy output format functions
EXTRA_OPT=1
sys.argv.remove("--extra-optimization")
if ccompiler.get_default_compiler() == "msvc":
extra_compile_args = ["/Wall"]
if EXTRA_OPT:
extra_compile_args.insert(0, "/O2")
else:
extra_compile_args.insert(0, "/Ot")
else:
extra_compile_args = ["-Wall", "-DFORTIFY_SOURCE=2", "-fstack-protector"]
if EXTRA_OPT:
extra_compile_args.insert(0, "-march=native")
extra_compile_args.insert(0, "-O3")
else:
extra_compile_args.insert(0, "-O2")
snappymodule = Extension(
'_snappy',
libraries=['snappy'],
sources=['src/snappymodule.cc', 'src/crc32c.c'],
extra_compile_args = extra_compile_args
)
setup(
name='python-snappy',
version=version,
author='<NAME>',
author_email='<EMAIL>',
url='http://github.com/andrix/python-snappy',
description='Python library for the snappy compression library from Google',
long_description=long_description,
keywords='snappy, compression, google',
license='BSD',
classifiers=['Development Status :: 4 - Beta',
'Topic :: Internet',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: System :: Archiving :: Compression',
'License :: OSI Approved :: BSD License',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: MacOS :: MacOS X',
# 'Operating System :: Microsoft :: Windows', -- Not tested yet
'Operating System :: POSIX',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
],
packages=[],
package_dir={'': 'src'},
ext_modules=[snappymodule]
)
|
<reponame>abdellaui/des_pipeline_ui
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'main2.ui'
#
# Created by: PyQt5 UI code generator 5.9.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1349, 835)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(MainWindow.sizePolicy().hasHeightForWidth())
MainWindow.setSizePolicy(sizePolicy)
MainWindow.setMinimumSize(QtCore.QSize(0, 0))
MainWindow.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setMinimumSize(QtCore.QSize(850, 835))
self.centralwidget.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.scrollEingabe = QtWidgets.QScrollArea(self.centralwidget)
self.scrollEingabe.setMinimumSize(QtCore.QSize(500, 0))
self.scrollEingabe.setWidgetResizable(True)
self.scrollEingabe.setObjectName("scrollEingabe")
self.scrollAreaWidgetContents_2 = QtWidgets.QWidget()
self.scrollAreaWidgetContents_2.setGeometry(QtCore.QRect(0, 0, 498, 815))
self.scrollAreaWidgetContents_2.setObjectName("scrollAreaWidgetContents_2")
self.scrollEingabe.setWidget(self.scrollAreaWidgetContents_2)
self.horizontalLayout.addWidget(self.scrollEingabe)
self.scrollAusgabe = QtWidgets.QScrollArea(self.centralwidget)
self.scrollAusgabe.setMinimumSize(QtCore.QSize(825, 0))
self.scrollAusgabe.setWidgetResizable(True)
self.scrollAusgabe.setObjectName("scrollAusgabe")
self.scrollAreaWidgetContents = QtWidgets.QWidget()
self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 823, 815))
self.scrollAreaWidgetContents.setObjectName("scrollAreaWidgetContents")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.scrollAreaWidgetContents)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.top = QtWidgets.QFrame(self.scrollAreaWidgetContents)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.top.sizePolicy().hasHeightForWidth())
self.top.setSizePolicy(sizePolicy)
self.top.setMinimumSize(QtCore.QSize(800, 225))
self.top.setMaximumSize(QtCore.QSize(800, 225))
self.top.setStyleSheet("background-image: url(:/Bild/img/top.png);")
self.top.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.top.setFrameShadow(QtWidgets.QFrame.Raised)
self.top.setObjectName("top")
self.eingang_ip_button = QtWidgets.QPushButton(self.top)
self.eingang_ip_button.setGeometry(QtCore.QRect(115, 111, 174, 59))
palette = QtGui.QPalette()
self.eingang_ip_button.setPalette(palette)
self.eingang_ip_button.setContextMenuPolicy(QtCore.Qt.DefaultContextMenu)
self.eingang_ip_button.setAutoFillBackground(False)
self.eingang_ip_button.setStyleSheet("background-image: url(:/Bild/img/btnbg.png);")
self.eingang_ip_button.setObjectName("eingang_ip_button")
self.klartext_button = QtWidgets.QPushButton(self.top)
self.klartext_button.setGeometry(QtCore.QRect(131, 25, 141, 31))
self.klartext_button.setObjectName("klartext_button")
self.schlussel_k_button = QtWidgets.QPushButton(self.top)
self.schlussel_k_button.setGeometry(QtCore.QRect(534, 24, 141, 31))
self.schlussel_k_button.setObjectName("schlussel_k_button")
self.pc_1_button = QtWidgets.QPushButton(self.top)
self.pc_1_button.setGeometry(QtCore.QRect(532, 110, 141, 31))
self.pc_1_button.setObjectName("pc_1_button")
self.verticalLayout_2.addWidget(self.top)
self.bottom = QtWidgets.QFrame(self.scrollAreaWidgetContents)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.bottom.sizePolicy().hasHeightForWidth())
self.bottom.setSizePolicy(sizePolicy)
self.bottom.setMinimumSize(QtCore.QSize(800, 295))
self.bottom.setMaximumSize(QtCore.QSize(800, 295))
self.bottom.setStyleSheet("background-image: url(:/Bild/img/bottom.png);")
self.bottom.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.bottom.setFrameShadow(QtWidgets.QFrame.Raised)
self.bottom.setObjectName("bottom")
self.ausgang_ip_1_button = QtWidgets.QPushButton(self.bottom)
self.ausgang_ip_1_button.setGeometry(QtCore.QRect(115, 116, 174, 60))
self.ausgang_ip_1_button.setObjectName("ausgang_ip_1_button")
self.verticalLayout_2.addWidget(self.bottom)
self.scrollAusgabe.setWidget(self.scrollAreaWidgetContents)
self.horizontalLayout.addWidget(self.scrollAusgabe)
MainWindow.setCentralWidget(self.centralwidget)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
self.eingang_ip_button.setText(_translate("MainWindow", "Eingang IP"))
self.klartext_button.setText(_translate("MainWindow", "Klartext x"))
self.schlussel_k_button.setText(_translate("MainWindow", "Schlussel k"))
self.pc_1_button.setText(_translate("MainWindow", "PC-1"))
self.ausgang_ip_1_button.setText(_translate("MainWindow", "Ausgang IP_1"))
import Bilder_rc
|
<filename>tests/tests_config.py
#
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import os
from adh_deployment_manager.config import Config
from adh_deployment_manager.query import Parameters
import adh_deployment_manager.utils as utils
# define sample config used for running test against
_SAMPLE_CONFIG_PATH = "sample_config.yml"
_CONFIG = {
"queries_setup": [
{
"queries": ["sample_query_1_1", "sample_query_1_2"],
"parameters": {
"parameter_1": {
"type": "STRING",
"values": "1"
}
},
"wait": "each",
},
{
"queries": ["sample_query_2_1", "sample_query_2_2"],
"parameters": {
"parameter_atomic": {
"type": "STRING",
"values": "1"
},
"parameter_array": {
"type": "STRING",
"values": ["1", "2"]
}
},
"wait": "block",
"execution_mode": "batch"
},
{
"queries": ["sample_query_3"],
"execution_mode": "normal",
"replace": {
"key": "value"
}
},
]
}
# Define fixtures to be used by pytest
# Define common setup that contains list of queries
@pytest.fixture
def setup():
config = Config(_SAMPLE_CONFIG_PATH, os.path.dirname(__file__))
config.config = _CONFIG
return config.extract_queries_setup()
# define setup that does not contain valid key 'queries'
@pytest.fixture
def broken_setup():
return {
"queries_setup": [
{
"query": ["sample_query_1_1", "sample_query_1_2"],
"parameters": {
"parameter_1": {
"type": "STRING",
"values": "1"
}
},
"wait": "each",
},
]
}
### TESTS
# verify that reading config returns required set of elements to run script
def test_get_config(setup):
config = Config(_SAMPLE_CONFIG_PATH, os.path.dirname(__file__))
assert set(["developer_key", "bq_project", "bq_dataset",
"queries_setup"]).issubset(config.get_config().keys())
# extract_queries_setup returns correct 'wait' modes
@pytest.mark.parametrize(
"expected,query",
[
(True, "sample_query_1_1"), # wait on each query in the block
(True, "sample_query_1_2"), # wait on each query in the block
(False, "sample_query_2_1"), # don't wait for first query in the block
(True, "sample_query_2_2"), # wait on each query in the block
(False, "sample_query_3") # don't wait for query
])
def test_extract_queries_setup_wait_mode(setup, expected, query):
assert expected == setup[query]["wait"]
# extract_queries_setup returns None if replacements aren't specified
def test_extract_queries_setup_empty_replacements(setup):
assert setup["sample_query_1_1"].get("replacements") is None
# extract_queries_setup returns not empty result if replacements are specified
def test_extract_queries_setup_not_empty_replacements(setup):
assert setup["sample_query_3"].get("replacements") is not None
# extract_queries_setup returns dict if replacements are specified
def test_extract_queries_setup_replacements_type(setup):
assert isinstance(setup["sample_query_3"].get("replacements"), dict)
# extract_queries_setup return correct 'batch' modes
@pytest.mark.parametrize(
"expected,query",
[
(False,
"sample_query_1_1"), # batch mode not specified, False by default
(True, "sample_query_2_1") # batch mode specified as 'batch' -> True
])
def test_extract_queries_setup_execution_mode(setup, expected, query):
assert expected == setup[query]["batch_mode"]
# broken setup - raises KeyError
@pytest.mark.skip(reason="Implement KeyError expection in a method")
def test_extract_queries_setup_broken_setup(broken_setup):
config = Config(_SAMPLE_CONFIG_PATH, os.path.dirname(__file__))
config.queries = broken_setup
with pytest.raises(KeyError):
queries_setup = config.extract_queries_setup()
# _define_query_parameters returns correct values when parsing atomic parameters
def test_define_query_parameters_single():
parameters = {"parameter_atomic": {"type": "STRING", "values": "1"}}
defined_parameters = Parameters.define_query_parameters(parameters)
assert defined_parameters.get("PARAMETER_ATOMIC") == {
"type": {
"type": "STRING"
}
}
|
<gh_stars>1-10
"""
@author: <NAME>
@brief: graph utils for optimized code generation.
"""
import matplotlib.pyplot as plt
import sympy
"""
Computes the nodes and rank them based on their in node degree and out node degree threshold values.
"""
def sorted_nodes_by_in_degree(expr_g,thresh_in,thresh_out=0):
G=expr_g._G_
n_list=list()
for n in G.nodes():
if(G.in_degree(n)>=thresh_in and G.out_degree(n) >thresh_out ):
n_list.append((G.in_degree(n),n))
n_list.sort(key=lambda x : x[0],reverse=True)
return n_list
"""
expr = (x+y + x*y)**2 + x*y
print(gutils.expr_term_rewrite(expr,x*y,Symbol('DENDRO_0')))
expr: sympy expression
sub_expr: sub expression to rewrite
replace_expr: replace expr sub_expr will get replaced by replace_expr and expr will get regenerated.
"""
def expr_term_rewrite(expr,sub_expr,replace_expr):
def _preorder_search(expr,arg_list,sub_expr,replace_expr):
#print("befor : ", arg_list)
for i,arg in enumerate(arg_list):
if(arg == sub_expr):
arg_list[i] = replace_expr
else:
aa_list = list(arg.args)
if len(aa_list) > 0:
_preorder_search(arg,aa_list,sub_expr,replace_expr)
aa = arg.func(* tuple(aa_list))
arg_list[i] = aa
#print("after: ",arg_list)
arg_list = list(expr.args)
_preorder_search(expr,arg_list,sub_expr,replace_expr)
return expr.func(*tuple(arg_list))
"""
returns True if sub_expr is contained in expr
"""
def is_sub_expr(expr,sub_expr):
for e in sympy.preorder_traversal(expr):
if e == sub_expr:
return True
return False
"""
subexpression elimination based on the high in node value.
"""
def high_node_cse(expr_g, reuse_thresh, rename_prefix="DENDRO_", dep_thresh=0):
G = expr_g._G_
expr_dict = dict(expr_g._sympy_expr)
n_list=list()
for n in G.nodes():
if(G.in_degree(n)>=reuse_thresh and G.out_degree(n) >dep_thresh ):
n_list.append((G.in_degree(n),n))
n_list.sort(key=lambda x : x[0],reverse=True)
cse_list=list()
for node in n_list:
expr = sympy.parse_expr(str(node[1]))
cse_list.append(expr)
# original list of expressions that we will eliminate
cse_renamed_list = list(cse_list)
# tuple list containing replacement (j,i) expression_i is a subset in expression_j
replace_idx=list()
for i,expr_i in enumerate(cse_list):
for j,expr_j in enumerate(cse_list):
if (i < j and is_sub_expr(expr_j,expr_i)):
replace_idx.append((j,i))
# elimination of the CSE in the selected sub expressions.
for (expr_i,sub_i) in replace_idx:
cse_renamed_list[expr_i] = expr_term_rewrite(cse_renamed_list[expr_i],cse_renamed_list[sub_i],sympy.Symbol(rename_prefix + str(sub_i)))
# now replace sub expressions in the actual main expressions.
for (k,expr) in expr_dict.items():
print("expr renaming for ", k)
for i,sub_expr in enumerate(cse_list):
if is_sub_expr(expr,sub_expr):
expr=expr_term_rewrite(expr,sub_expr,sympy.Symbol(rename_prefix + str(i)))
expr_dict[k]=expr
#for (k,expr) in expr_dict.items():
# print("expr name : %s expression %s " %(k,expr))
cse_dict=dict()
for i,expr in enumerate(cse_renamed_list):
cse_dict[rename_prefix+str(i)]=expr
return [cse_dict,expr_dict]
"""
Generate C sequential code based on high degree node cse
expr_g : expression graph
replace_dic : variable renaming to perform during code generation.
custom_rename_func: function to perform andy renames.
cse_mode (=None): if none will use the in node degree to cse metric
reuse_thresh: reuse threshold
"""
def generate_cpu_c_code(expr_g, reuse_thresh, fname, replace_dict={}, custom_rename_func=None, user_func={}, cse_mode=None):
cse_dict = dict()
exp_dict = dict()
fo = open(fname, "w")
if cse_mode==None:
[cse_dict,exp_dict] = high_node_cse(expr_g,reuse_thresh,rename_prefix="DENDRO_",dep_thresh=0)
print('// Dendro: printing temp variables',file=fo)
for (l,r) in cse_dict.items():
print("const double %s = " %l, end="",file=fo)
c_code=sympy.ccode(r,user_functions=user_func)
for (f,r) in replace_dict.items():
c_code=c_code.replace(str(f),str(r))
if custom_rename_func!= None:
c_code = custom_rename_func(c_code)
print(c_code+";",file=fo)
print('// Dendro: printing RHS variables\n\n\n',file=fo)
for (l,r) in exp_dict.items():
c_code=sympy.ccode(r,user_functions=user_func)
for (f,r) in replace_dict.items():
c_code=c_code.replace(str(f),str(r))
if custom_rename_func!= None:
c_code = custom_rename_func(c_code)
print(str(l) + " = " + c_code + ";",file=fo)
print("",file=fo)
fo.close()
"""
Computes the number of minimum registors needed to evaluate an expression.
"""
# def min_registers(expr):
# def __pre_traversal(self,expr,reg_count):
# if isinstance(expr.func, sympy.core.function.UndefinedFunction):
# sym_name=str(expr.func)
# for a in expr.args:
# sym_name = sym_name + '_' + str(a)
# reg_count.append(1)
# else if isinstance(expr.func, sympy.core.)
# else:
# node_list.append(expr)
# for arg in expr.args:
# if isinstance(arg.func, sympy.core.function.UndefinedFunction):
# f=arg.func
# sym_name=str(f)
# for a in arg.args:
# sym_name = sym_name + '_' + str(a)
# node_list.append(sympy.Symbol(sym_name))
# edge_list.append((expr,sympy.Symbol(sym_name)))
# else:
# edge_list.append((expr,arg))
# self.__pre_traversal(arg,node_list,edge_list)
|
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import glob
import itertools
import landvoc
import time
import io
import json
from llr.LandLibraryResource import LandLibraryResource
import llr.utils as llrutils
import utils
from langdetect import detect
from langdetect.lang_detect_exception import LangDetectException
LANDVOC = landvoc.LandVoc()
llrs=set()
formats_set= set()
def create_llr_from_lareferencia_record(lareferencia_record):
llr = LandLibraryResource()
#ID. Only one.
internal_id = lareferencia_record["id"]
llr.set_id(u"LaReferencia:"+internal_id)
#title. One
title = lareferencia_record["title"]
llr.set_title(title)
#subtitle. Zero or One
if "subtitle" in lareferencia_record:
subtitle = lareferencia_record["subtitle"]
llr.set_subtitle(subtitle)
# description. Zero or One
if "summary" in lareferencia_record:
description = lareferencia_record["summary"][0]
if description:
llr.set_description(description)
#Language. Zero, one or more
langs_cleared = set()
if "languages" in lareferencia_record:
languages = lareferencia_record["languages"]
for lang in languages:
langs_cleared.add(llrutils.getISO639_1code_from_ISO639_3code(lang))
langs_cleared = set(filter(None,langs_cleared))
if not langs_cleared:
try:
potential_lang = detect(title.lower())
if potential_lang in ["es", "pt", "en"]:
langs_cleared.add(potential_lang)
except LangDetectException:
pass
llr.set_languages(langs_cleared)
#author. One or more
authors = lareferencia_record["primaryAuthors"]
if "secondaryAuthors" in lareferencia_record:
authors+=lareferencia_record["secondaryAuthors"]
llr.set_authors(authors)
#corporate_authors. Could be more than one
if "corporateAuthors" in lareferencia_record:
llr.set_corporate_authors(lareferencia_record["corporateAuthors"])
#publishers. Zero, one or more
if "dc.publisher.none.fl_str_mv" in lareferencia_record["rawData"]:
llr.set_publishers(filter(None,{utils.getPublisher(pub) for pub in lareferencia_record["rawData"]["dc.publisher.none.fl_str_mv"]}))
#type. One
types= set()
formats = lareferencia_record["formats"]
types.add(utils.getLLR_type(formats[0]))
if "dc.type.none.fl_str_mv" in lareferencia_record["rawData"]:
for f in lareferencia_record["rawData"]["dc.type.none.fl_str_mv"]:
if f=="Artículos de congreso":
types.add("Conference Papers & Reports")
if f=="Articulo evaluado por dos pares" or f=='artículos evaluados por pares' or f=='Artículo evaluado por pares ciegos y producto de investigación' or f=='Artículo evaluado por pares' or f=="Art?culo revisado por pares" or f=='Artículo revisado por pares':
types.add("Peer-reviewed publication")
llr.set_types(list(types))
#number of pages. Only one
#If there is a last page, there is an initial page
if "dc.description.lastpage.pt.fl_txt_mv" in lareferencia_record["rawData"]:
lastpage = lareferencia_record["rawData"]["dc.description.lastpage.pt.fl_txt_mv"][0]
initialpage = lareferencia_record["rawData"]["dc.description.initialpage.pt.fl_txt_mv"][0]
number_pages = int(lastpage) - int(initialpage)
if number_pages:
llr.set_number_pages(number_pages)
#date.
publicationDates = lareferencia_record["publicationDates"][0]
best_date = publicationDates
if "dc.date.none.fl_str_mv" in lareferencia_record["rawData"]:
for potentialDate in lareferencia_record["rawData"]["dc.date.none.fl_str_mv"]:
if publicationDates in potentialDate:
best_date = utils.clean_date(potentialDate.split("T")[0])
llr.set_date(utils.clean_date(best_date))
#original url. Only one
lareferencia_url = "http://www.lareferencia.info/vufind/Record/"+internal_id
llr.set_original_url(lareferencia_url)
#resource url. Only one. Remove locahost
resource_url = None
if "bitstream.url.fl_str_mv" in lareferencia_record["rawData"]:
potential_urls = lareferencia_record["rawData"]["bitstream.url.fl_str_mv"]
if len(potential_urls)==1 and ("://localhost" not in potential_urls[0]):
resource_url = potential_urls[0]
else:
for url in potential_urls:
if "://localhost" in url:
continue
if url.endswith(".pdf") or url.endswith(".PDF"):
resource_url = url
if not resource_url and ("://localhost" not in url):
resource_url = potential_urls[0] # arbitray. Take the first one
elif "url" in lareferencia_record["rawData"]:
resource_url = lareferencia_record["rawData"]["url"][0]
llr.set_resource_url(resource_url)
#License
license_llr = None
copyright_details = None
if "dc.rights.none.fl_str_mv" in lareferencia_record["rawData"]:
for potential_license in lareferencia_record["rawData"]["dc.rights.none.fl_str_mv"]:
if not llrutils.checkOpenAccess(potential_license):
return None # STOP. Return None
if "info:eu-repo/semantics/openAccess" in potential_license:
copyright_details = "info:eu-repo/semantics/openAccess : Open Access, this refers to access without restrictions, and without financial incentives. Access to the resource is gained directly, without any obstacles." #From https://wiki.surfnet.nl/display/standards/info-eu-repo/#info-eu-repo-AccessRights if "rights_invalid_str_mv" in lareferencia_record["rawData"]:
if "rights_invalid_str_mv" in lareferencia_record["rawData"]:
for potential_license in lareferencia_record["rawData"]["rights_invalid_str_mv"]:
if not llrutils.checkOpenAccess(potential_license):
return None # STOP. Return None
if "Copyright" in potential_license:
copyright_details = potential_license
if "creativecommons.org" in potential_license:
license_llr = llrutils.getCCLicenseAcronym(potential_license)
if "info:eu-repo/semantics/openAccess" in potential_license and not copyright_details:
copyright_details = "info:eu-repo/semantics/openAccess : Open Access, this refers to access without restrictions, and without financial incentives. Access to the resource is gained directly, without any obstacles." #From https://wiki.surfnet.nl/display/standards/info-eu-repo/#info-eu-repo-AccessRights if "rights_invalid_str_mv" in lareferencia_record["rawData"]:
if "dc.rights.driver.fl_str_mv" in lareferencia_record["rawData"]:
for potential_license in lareferencia_record["rawData"]["dc.rights.driver.fl_str_mv"]:
if not llrutils.checkOpenAccess(potential_license):
return None # STOP. Return None
if "Copyright" in potential_license:
copyright_details = potential_license
if "creativecommons.org" in potential_license:
license_llr = llrutils.getCCLicenseAcronym(potential_license)
if "info:eu-repo/semantics/openAccess" in potential_license and not copyright_details:
copyright_details = "info:eu-repo/semantics/openAccess : Open Access, this refers to access without restrictions, and without financial incentives. Access to the resource is gained directly, without any obstacles." #From https://wiki.surfnet.nl/display/standards/info-eu-repo/#info-eu-repo-AccessRights if "rights_invalid_str_mv" in lareferencia_record["rawData"]:
llr.set_license(license_llr)
llr.set_copyright_details(copyright_details)
#data provider
llr.set_data_provider(u"LA Referencia")
#image
#llr.set_image("")
#keywords
potential_subjects=set()
for subject in lareferencia_record["subjects"]:
potential_subjects.add(LANDVOC.get_EnglishPrefLabel(subject[0],lang="es"))
potential_subjects.add(LANDVOC.get_EnglishPrefLabel(subject[0],lang="en"))
potential_subjects.add(LANDVOC.get_EnglishPrefLabel(subject[0],lang="pt"))
concepts = [unicode(s, "utf-8") for s in filter(None,potential_subjects)]
themes=LANDVOC.get_fixed_themes(concepts)
oacs=LANDVOC.get_fixed_oacs(concepts)
llr.set_concepts(concepts);
llr.set_themes(themes);
llr.set_overarching_categories(oacs);
#geographical focus. list
countries_focus = set()
countries_focus |= set(utils.getPlaceET_fromText_NLTK(llr.title)) | set(utils.getPlaceET_fromText_GeoText(llr.title))
countries_focus |= set(utils.getPlaceET_fromText_NLTK(llr.description)) | set(utils.getPlaceET_fromText_GeoText(llr.description))
for subject in lareferencia_record["subjects"]:
countries_focus |= set(utils.getPlaceET_fromText_NLTK(subject[0])) | set(utils.getPlaceET_fromText_GeoText(subject[0]))
llr.set_geographical_focus(countries_focus, set())
return llr
###################################################################################
def process_lareferencia_record(lareferencia_record):
record_title = lareferencia_record["title"]
if record_title not in all_records_title:
all_records_title.add(record_title) #debug
# TODO FILTER OUT
llrs.add(create_llr_from_lareferencia_record(lareferencia_record))
counter = 0
def process_lareferencia_json_file(filename):
with open(filename) as fd: #usaban io.open por temas de codificacion
data = json.load(fd)
for record in data:
global counter
counter+=1
if (counter%100 ==0):
pass
#print "Analyzing record #"+str(counter)
process_lareferencia_record(record)
def generate_csv(llrs, filename):
with io.open(filename,'w', encoding='utf-8-sig') as csv_file: #UTF-8 BOM
#csv_file.write((u'\ufeff').encode('utf8')) #BOM
headers = u"ID;Title;Subtitle;Abstract/Description;Authors;Corporate authors;Publishers;Data provider;Publicaton date (YYYY/MM);Language;Related concepts;Themes;Overarching Categories;Geographical focus;Resource types;Link to the original website;Link to the publication;Thumbnail;License;Copyright details;Pages\n"
csv_file.write(headers)
for llr in llrs:
if (not llr.get_description()) or (len(llr.get_concepts())<1) or (len(llr.get_geographical_focus())<1) or (len(llr.get_concepts())==1 and (llr.get_concepts()[0] in ["assessment", "territory", "land", "poverty", "development"])):
pass
else:
csv_file.write(llr.as_csv_line())
csv_file.close()
lareferencia_files = glob.glob("results/*.json")
#top5 = itertools.islice(agris_files, 3)
#selected_resources = set()
all_records_title = set()
for lareferencia_file in lareferencia_files:
#print "----------------"
print lareferencia_file
process_lareferencia_json_file(lareferencia_file)
#print "ALL-Land-related resources="+str(len(all_records_title))
#print "FILTERED-Land-related resources="+str(len(selected_resources))
#print "LLRS="+str(len(llrs))
#print "----------------"
llrs = filter(None,llrs)
timestr = time.strftime("%Y%m%d-%H%M%S")
filename_output = timestr+"-lareferencia.csv"
generate_csv(llrs, filename_output)
|
<reponame>larrycameron80/python-novaclient
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Network interface.
"""
from novaclient import base
from novaclient import exceptions
from novaclient.openstack.common.gettextutils import _
class Network(base.Resource):
"""
A network.
"""
HUMAN_ID = False
NAME_ATTR = "label"
def __repr__(self):
return "<Network: %s>" % self.label
def delete(self):
self.manager.delete(self)
class NetworkManager(base.ManagerWithFind):
"""
Manage :class:`Network` resources.
"""
resource_class = Network
def list(self):
"""
Get a list of all networks.
:rtype: list of :class:`Network`.
"""
return self._list("/os-networks", "networks")
def get(self, network):
"""
Get a specific network.
:param network: The ID of the :class:`Network` to get.
:rtype: :class:`Network`
"""
return self._get("/os-networks/%s" % base.getid(network),
"network")
def delete(self, network):
"""
Delete a specific network.
:param network: The ID of the :class:`Network` to delete.
"""
self._delete("/os-networks/%s" % base.getid(network))
def create(self, **kwargs):
"""
Create (allocate) a network. The following parameters are
optional except for label; cidr or cidr_v6 must be specified, too.
:param label: str
:param bridge: str
:param bridge_interface: str
:param cidr: str
:param cidr_v6: str
:param dns1: str
:param dns2: str
:param fixed_cidr: str
:param gateway: str
:param gateway_v6: str
:param multi_host: str
:param priority: str
:param project_id: str
:param vlan: int
:param vlan_start: int
:param vpn_start: int
:rtype: list of :class:`Network`
"""
body = {"network": kwargs}
return self._create('/os-networks', body, 'network')
def disassociate(self, network, disassociate_host=True,
disassociate_project=True):
"""
Disassociate a specific network from project and/or host.
:param network: The ID of the :class:`Network`.
:param disassociate_host: Whether to disassociate the host
:param disassociate_project: Whether to disassociate the project
"""
if disassociate_host and disassociate_project:
body = {"disassociate": None}
elif disassociate_project:
body = {"disassociate_project": None}
elif disassociate_host:
body = {"disassociate_host": None}
else:
raise exceptions.CommandError(
_("Must disassociate either host or project or both"))
self.api.client.post("/os-networks/%s/action" %
base.getid(network), body=body)
def associate_host(self, network, host):
"""
Associate a specific network with a host.
:param network: The ID of the :class:`Network`.
:param host: The name of the host to associate the network with
"""
self.api.client.post("/os-networks/%s/action" %
base.getid(network),
body={"associate_host": host})
def associate_project(self, network):
"""
Associate a specific network with a project.
The project is defined by the project authenticated against
:param network: The ID of the :class:`Network`.
"""
self.api.client.post("/os-networks/add", body={"id": network})
def add(self, network=None):
"""
Associates the current project with a network. Network can be chosen
automatically or provided explicitly.
:param network: The ID of the :class:`Network` to associate (optional).
"""
self.api.client.post(
"/os-networks/add",
body={"id": base.getid(network) if network else None})
|
# In[] Libs
import numpy as np
import matplotlib.pyplot as plt
from neural_network_classes.layers import Dense
from neural_network_classes.activations import ReLU, Softmax
from neural_network_classes.error_estimation import CategoricalCrossentropy, Accuracy
from utils.generate_dataset import generate_spiral_data, generate_vertical_data
# In[] Run the App
if __name__ == "__main__":
n_classes = 10
# X, y = generate_spiral_data(n_points_per_class=200, n_classes=n_classes, visualization=False)
X, y = generate_vertical_data(n_points_per_class=200, n_classes=n_classes, visualization=True)
n_inputs_1 = X.ndim
n_neurons_1 = 10
n_inputs_2 = n_neurons_1
n_neurons_2 = n_classes*2
n_inputs_3 = n_neurons_2
n_neurons_3 = n_classes
dense_1 = Dense(n_inputs=n_inputs_1, n_neurons=n_neurons_1)
activation_1 = ReLU()
dense_2 = Dense(n_inputs=n_inputs_2, n_neurons=n_neurons_2)
activation_2 = Softmax()
dense_3 = Dense(n_inputs=n_inputs_3, n_neurons=n_neurons_3)
activation_3 = Softmax()
loss_func = CategoricalCrossentropy()
acc_func = Accuracy()
n_epochs = 100000
loss_main = 9999
best_dense_1_weights = dense_1.weights.copy()
best_dense_1_biases = dense_1.biases.copy()
best_dense_2_weights = dense_2.weights.copy()
best_dense_2_biases = dense_2.biases.copy()
best_dense_3_weights = dense_3.weights.copy()
best_dense_3_biases = dense_3.biases.copy()
for idx in range(n_epochs):
tmp = np.random.randn(n_inputs_1, n_neurons_1)
dense_1.weights += tmp / np.max(tmp)
tmp = np.random.randn(1, n_neurons_1)
dense_1.biases += tmp / np.max(tmp)
tmp = np.random.randn(n_inputs_2, n_neurons_2)
dense_2.weights += tmp / np.max(tmp)
tmp = np.random.randn(1, n_neurons_2)
dense_2.biases += tmp / np.max(tmp)
tmp = np.random.randn(n_inputs_3, n_neurons_3)
dense_3.weights += tmp / np.max(tmp)
tmp = np.random.randn(1, n_neurons_3)
dense_3.biases += tmp / np.max(tmp)
dense_1.forward(X)
activation_1.forward(dense_1.y)
dense_2.forward(activation_1.y)
activation_2.forward(dense_2.y)
dense_3.forward(activation_2.y)
activation_3.forward(dense_3.y)
loss = loss_func.calculate(activation_3.y, y)
acc = acc_func.calculate(activation_3.y, y)
if loss < loss_main:
print(f"Learning improved at round {idx}. Loss: {loss}, Acc: {acc}")
best_dense_1_weights = dense_1.weights.copy()
best_dense_1_biases = dense_1.biases.copy()
best_dense_2_weights = dense_2.weights.copy()
best_dense_2_biases = dense_2.biases.copy()
best_dense_3_weights = dense_3.weights.copy()
best_dense_3_biases = dense_3.biases.copy()
loss_main = loss
else:
dense_1.weights = best_dense_1_weights.copy()
dense_1.biases = best_dense_1_biases.copy()
dense_2.weights = best_dense_2_weights.copy()
dense_2.biases = best_dense_2_biases.copy()
dense_3.weights = best_dense_3_weights.copy()
dense_3.biases = best_dense_3_biases.copy()
|
import os
import re
import json
def ltom(list):
map = dict()
i = 0
for name in list:
map[name] = i
i += 1
return map
# Wake up is not on all thermostats, so should only be included when supported
# https://www.ecobee.com/home/developer/api/documentation/v1/objects/Climate.shtml
# Should get this list from the thermostat
# https://www.ecobee.com/home/developer/api/documentation/v1/objects/Program.shtml
# And add unknown since some code relies on that name existing.
climateList = [
'away',
'home',
'sleep',
'smart1',
'smart2',
'smart3',
'smart4',
'smart5',
'smart6',
'smart7',
'vacation',
'smartAway',
'smartHome',
'demandResponse',
'unknown',
'wakeup',
]
climateMap = ltom(climateList)
# Removes invalid charaters for ISY Node description
def get_valid_node_name(name):
# Only allow utf-8 characters
# https://stackoverflow.com/questions/26541968/delete-every-non-utf-8-symbols-froms-string
name = bytes(name, 'utf-8').decode('utf-8','ignore')
# Remove <>`~!@#$%^&*(){}[]?/\;:"'` characters from name
return re.sub(r"[<>`~!@#$%^&*(){}[\]?/\\;:\"']+", "", name)
def toC(tempF):
# Round to the nearest .5
return round(((tempF - 32) / 1.8) * 2) / 2
def toF(tempC):
# Round to nearest whole degree
return int(round(tempC * 1.8) + 32)
def getMapName(map,val):
val = int(val)
for name in map:
if int(map[name]) == val:
return name
def is_int(s):
try:
int(s)
return True
except ValueError:
return False
def make_file_dir(file_path):
directory = os.path.dirname(file_path)
if not os.path.exists(directory):
# TODO: Trap this?
os.makedirs(directory)
return True
def get_profile_info(logger):
pvf = 'profile/version.txt'
try:
with open(pvf) as f:
pv = f.read().replace('\n', '')
f.close()
except Exception as err:
logger.error('get_profile_info: failed to read file {0}: {1}'.format(pvf,err), exc_info=True)
pv = 0
return { 'version': pv }
def get_server_data(logger):
# Read the SERVER info from the json.
sfile = 'server.json'
try:
with open(sfile) as data:
serverdata = json.load(data)
except Exception as err:
logger.error('get_server_data: failed to read file {0}: {1}'.format(sfile,err), exc_info=True)
return False
data.close()
# Get the version info
try:
version = serverdata['credits'][0]['version']
except (KeyError, ValueError):
logger.info('Version not found in server.json.')
version = '0.0.0.0'
# Split version into two floats.
sv = version.split(".");
v1 = 0;
v2 = 0;
if len(sv) == 1:
v1 = int(v1[0])
elif len(sv) > 1:
v1 = float("%s.%s" % (sv[0],str(sv[1])))
if len(sv) == 3:
v2 = int(sv[2])
else:
v2 = float("%s.%s" % (sv[2],str(sv[3])))
serverdata['version'] = version
serverdata['version_major'] = v1
serverdata['version_minor'] = v2
return serverdata
def get_profile_info(logger):
pvf = 'profile/version.txt'
try:
with open(pvf) as f:
pv = f.read().replace('\n', '')
except Exception as err:
logger.error('get_profile_info: failed to read file {0}: {1}'.format(pvf,err), exc_info=True)
pv = 0
f.close()
return { 'version': pv }
|
<reponame>RubenJ01/blues_bot.py
import discord
from discord.ext import commands
from calcs.agility import Agility
from calcs.alchemy import Alchemy
from calcs.experience import next_level_string
from calcs.tasks import Tasks
from calcs.wines import Wines
from calcs.wintertodt import Wintertodt
from calcs.zeah import Zeah
from helpers.urls import get_icon_url
class Calculators(commands.Cog):
""" Commonly used calculators """
def __init__(self, bot):
self.bot = bot
@commands.command(name='tasks',
description='''Slayer task calculator
This calculator is more accurate at higher levels''',
aliases=['task'],
case_insensitive=True)
async def tasks_command(self, ctx, num_of_tasks, *username):
""" Calculates estimated slayer tasks remaining """
async with ctx.typing():
safe_username = ' '.join(username)
user = Tasks(safe_username, num_of_tasks)
embed = discord.Embed(title="Slayer Task Calculator",
description=f'**{user.slayer_level}** slayer ({user.slayer_xp:,} xp) | {safe_username}')
embed.set_thumbnail(url=get_icon_url("slayer"))
embed.add_field(name="Average XP per task", value=f'{user.avg_xp_per_task():,}', inline=True)
embed.add_field(name="Tasks needed to level up",
value=f'{user.tasks_to_level_up() + 1:,.0f} ({user.xp_needed_to_level_up():,} xp)',
inline=True)
if user.slayer_level < 99:
embed.add_field(name="Tasks to level 99", value=f'{user.tasks_to_level_99()}', inline=True)
embed.add_field(name="Estimated total tasks", value=f'{user.estimated_total_tasks()}', inline=True)
embed.set_footer(text="This calculator is more accurate at higher slayer levels")
await ctx.send(f'{ctx.message.author.mention}', embed=embed)
return
@commands.command(name='wines',
description='Wine cooking calculator',
aliases=['wine'],
case_insensitive=True)
async def wines_command(self, ctx, *username):
""" Calculates wines needed to level 99 """
async with ctx.typing():
safe_username = ' '.join(username)
user = Wines(safe_username)
embed = discord.Embed(title="Wine cooking calculator",
description=f'**{user.cooking_level}** cooking ({user.cooking_xp:,} xp) | {safe_username}')
embed.set_thumbnail(url=get_icon_url("cooking"))
if (user.cooking_level < 99):
embed.add_field(name="Wines to reach level 99", value=f'{user.wines_to_level_99():,}', inline=True)
embed.add_field(name="Inventories", value=f'{user.invs_to_level_99():,}', inline=True)
else:
embed.add_field(name="Wines to reach 200m xp", value=f'{user.wines_to_200m():,}', inline=True)
embed.add_field(name="Inventories", value=f'{user.invs_to_200m():,}', inline=True)
await ctx.send(f'{ctx.message.author.mention}', embed=embed)
return
@commands.command(name='zeah',
description='Zeah runecrafting calculator',
aliases=['bloods', 'blood', 'souls', 'soul'],
case_insensitive=True)
async def zeah_command(self, ctx, *username):
""" Blood and soul rune calculator """
async with ctx.typing():
safe_username = ' '.join(username)
user = Zeah(safe_username)
embed = discord.Embed(title="Zeah runecrafting calculator",
description=f'**{user.runecraft_level}** Runecraft ({user.runecraft_xp:,} xp) | {safe_username}')
embed.set_thumbnail(url=get_icon_url("runecraft"))
embed.set_footer(text=f'{next_level_string(user.runecraft_xp, "runecraft")}')
if (user.runecraft_level < 77):
embed.add_field(name="Level too low",
value="You need a runecraft level of at least 77 to make blood runes", inline=True)
elif (user.runecraft_level < 90):
embed.add_field(name="Bloods to level up",
value=f'{user.bloods_to_level_up() + 1:,.0f}\n'
f'({user.blood_trips_to_level_up() + 1:,.0f} trips)',
inline=True)
embed.add_field(name="Bloods to level 99",
value=f'{user.bloods_to_level_99() + 1:,.0f}\n'
f'({user.blood_trips_to_level_99() + 1:,.0f} trips)',
inline=True)
else:
embed.add_field(name="Bloods to level up",
value=f'{user.bloods_to_level_up() + 1:,.0f}\n'
f'({user.blood_trips_to_level_up() + 1:,.0f} trips)',
inline=True)
embed.add_field(name="Souls to level up",
value=f'{user.souls_to_level_up() + 1:,.0f}\n'
f'({user.soul_trips_to_level_up() + 1:,.0f} trips)',
inline=True)
if (user.runecraft_level < 99):
embed.add_field(name="Bloods to level 99",
value=f'{user.bloods_to_level_99() + 1:,.0f}\n'
f'({user.blood_trips_to_level_99() + 1:,.0f} trips)',
inline=True)
embed.add_field(name="Souls to level 99",
value=f'{user.souls_to_level_99() + 1:,.0f}\n'
f'({user.soul_trips_to_level_99() + 1:,.0f} trips)',
inline=True)
await ctx.send(f'{ctx.message.author.mention}', embed=embed)
return
@commands.command(name='rooftop',
description='Rooftop agility course calculator',
aliases=[],
case_insensitive=True)
async def rooftop_command(self, ctx, *username):
""" Rooftop agility course calculator """
async with ctx.typing():
safe_username = ' '.join(username)
user = Agility(safe_username)
embed = discord.Embed(title="Rooftop agility calculator",
description=f'**{user.agility_level}** Agility ({user.agility_xp:,} xp) | {safe_username}')
embed.set_thumbnail(url=get_icon_url("agility"))
if (user.course == None):
embed.add_field(name="Level too low",
value="You need at least 10 agility to use the Draynor rooftop course", inline=False)
else:
embed.add_field(name="XP needed to level up", value=f'{user.xp_needed_to_level_up():,.0f}', inline=True)
embed.add_field(name=f'Laps to level up', value=f'{user.laps_to_level_up():,.0f} on {user.course}',
inline=True)
embed.add_field(name=f'Laps until {user.next_course}', value=f'{user.laps_to_next_course():,.0f}',
inline=True)
await ctx.send(f'{ctx.message.author.mention}', embed=embed)
@commands.command(name='wintertodt',
description='Calculates estimated Wintertodt kill count',
aliases=['wt'],
case_insensitive=True)
async def wintertodt_command(self, ctx, *username):
""" Wintertodt calculator """
async with ctx.typing():
url_safe_name = '+'.join(username)
safe_name = ' '.join(username)
wt = Wintertodt(url_safe_name)
embed = discord.Embed(title="Wintertodt calculator", description=f'{safe_name}')
embed.set_thumbnail(url=f'{get_icon_url("firemaking")}')
if int(wt.firemaking_level) < 50:
embed.add_field(name="Level too low",
value=f'You need at least **50** firemaking to attempt Wintertodt.\n'
f'You currently only have level **{wt.firemaking_level}**')
else:
embed.add_field(name="Firemaking level", value=f'**{int(wt.firemaking_level):,}**', inline=True)
embed.add_field(name="XP", value=f'{int(wt.firemaking_xp):,}', inline=True)
embed.add_field(name="Wintertodt kill count", value=f'{int(wt.kc_wintertodt):,}')
embed.add_field(name="Average XP per kill", value=f'{wt.average():,} xp')
embed.add_field(name="Kills to level up", value=f'{wt.kills_to_level_up():,.0f}')
embed.add_field(name="Kills to level 99",
value=f'{wt.kills_to_level_99():,}\n(Estimated {wt.estimated_total_kills():,} total)')
embed.set_footer(text=f'{next_level_string(int(wt.firemaking_xp), "firemaking")}')
await ctx.send(f'{ctx.message.author.mention}', embed=embed)
return
@commands.command(name='alch',
description='High alchemy calculator',
aliases=['ha'],
case_insensitive=True)
async def alchemy_command(self, ctx, *username):
""" High alch calculator """
async with ctx.typing():
url_safe_name = '+'.join(username)
safe_name = ' '.join(username)
alch = Alchemy(url_safe_name)
embed = discord.Embed(title="High alchemy calculator", description=f'{safe_name}')
embed.set_thumbnail(url=f'{alch.icon}')
if alch.magic_level < 55:
embed.add_field(name="Level too low", value=f'You need at least **55** Magic to use High Alchemy.\n' \
f'You are currently level **{alch.magic_level}**.')
else:
embed.add_field(name="Magic level", value=f'**{alch.magic_level}** ({alch.magic_xp:,} xp)',
inline=False)
embed.add_field(name="Alchs to level up", value=f'**{alch.alchs_to_level_up():,.0f}** Nature runes\n'
f'({alch.price_to_level_up():,.0f} gp)\n'
f'Approx. {alch.time_to_level_up():.1f} hrs')
if alch.magic_level < 99:
embed.add_field(name="Alchs to level 99", value=f'**{alch.alchs_to_level_99():,}** Nature runes\n'
f'({alch.price_to_level_99():,} gp)\n'
f'Approx. {alch.time_to_level_99():.1f} hrs')
embed.set_footer(text=f'{next_level_string(alch.magic_xp, "magic")}\n'
f'Nature rune cost: {alch.current_price} gp\n'
f'Time calculated with 1200 alchs/hr')
await ctx.send(f'{ctx.message.author.mention}', embed=embed)
return
# Cog setup
def setup(bot):
bot.add_cog(Calculators(bot))
|
from .tolact import ShapeTool
from sciapp.object import *
from numpy.linalg import norm
import numpy as np
def mark(shp, types = 'all'):
pts = []
if not (types=='all' or shp.dtype in types): return pts
if shp.dtype == 'point':
pts.append([shp.body])
if shp.dtype == 'points':
pts.append(shp.body)
if shp.dtype == 'line':
pts.append(shp.body)
if shp.dtype == 'lines':
pts.extend(shp.body)
if shp.dtype == 'polygon' and len(shp.body)==1:
pts.append(shp.body[0])
if shp.dtype == 'polygons':
for i in shp.body:
if len(i) != 1: continue
pts.append(i[0])
if shp.dtype == 'rectangle':
l,t,w,h = shp.body
ps = np.mgrid[l:l+w:3j, t:t+h:3j].T.reshape((-1,2))
pts.append(ps)
if shp.dtype == 'rectangles':
for i in range(len(shp.body)):
l,t,w,h = shp.body[i]
ps = np.mgrid[l:l+w:3j, t:t+h:3j].T.reshape((-1,2))
pts.append(ps)
if shp.dtype == 'ellipse':
x0, y0, l1, l2, ang = shp.body
mat = np.array([[np.cos(-ang),-np.sin(-ang)],
[np.sin(-ang),np.cos(-ang)]])
ps = np.mgrid[-l1:l1:3j, -l2:l2:3j].T.reshape((-1,2))
pts.append(mat.dot(ps.T).T + (x0, y0))
if shp.dtype == 'ellipses':
for i in range(len(shp.body)):
x0, y0, l1, l2, ang = shp.body[i]
mat = np.array([[np.cos(-ang),-np.sin(-ang)],
[np.sin(-ang),np.cos(-ang)]])
ps = np.mgrid[-l1:l1:3j, -l2:l2:3j].T.reshape((-1,2))
pts.append(mat.dot(ps.T).T + (x0, y0))
if shp.dtype == 'layer':
minl, obj = 1e8, None
for i in shp.body:
pts.extend(mark(i, types))
return pts
def pick_obj(shp, x, y, lim, types='all'):
obj, minl = None, lim
if not (types=='all' or shp.dtype in types):
return m, obj, minl
if shp.dtype == 'layer':
for i in shp.body:
o, l = pick_obj(i, x, y, lim, types)
if l < minl:
obj, minl = o, l
elif shp.dtype in 'polygons':
b = shp.to_geom().contains(Point([x, y]).to_geom())
if b : return shp, 0
else:
d = shp.to_geom().distance(Point([x, y]).to_geom())
if d<minl: obj, minl = shp, d
return obj, minl
def pick_point(shp, x, y, lim, types='all'):
m, obj, minl = None, None, lim
if not (types=='all' or shp.dtype in types):
return m, obj, minl
if shp.dtype == 'point':
l = ((shp.body-(x, y))**2).sum()
if l < minl:
m, obj, minl = shp, shp.body, l
if shp.dtype == 'points':
l = norm(shp.body-(x,y), axis=1)
n = np.argmin(l)
l = l[n]
if l < minl:
m, obj, minl = shp, shp.body[n], l
if shp.dtype == 'line':
l = norm(shp.body-(x,y), axis=1)
n = np.argmin(l)
l = l[n]
if l < minl:
m, obj, minl = shp, shp.body[n], l
if shp.dtype == 'lines':
for line in shp.body:
l = norm(line-(x,y), axis=1)
n = np.argmin(l)
l = l[n]
if l < minl:
m, obj, minl = shp, line[n], l
if shp.dtype == 'polygon' and len(shp.body)==1:
l = norm(shp.body[0]-(x,y), axis=1)
n = np.argmin(l)
l = l[n]
if l < minl:
m, obj, minl = shp, shp.body[0][n], l
if shp.dtype == 'polygons':
for i in shp.body:
if len(i) != 1: continue
l = norm(i[0]-(x,y), axis=1)
n = np.argmin(l)
l = l[n]
if l < minl:
m, obj, minl = shp, i[0][n], l
if shp.dtype == 'rectangle':
l,t,w,h = shp.body
pts = np.mgrid[l:l+w:3j, t:t+h:3j].T.reshape((-1,2))
names = ['lt','t','rt','l','o','r','lb','b','rb']
l = norm(pts-(x,y), axis=1)
n = np.argmin(l)
if l[n] < minl:
m, obj, minl = shp, names[n], l[n]
if shp.dtype == 'rectangles':
for i in range(len(shp.body)):
l,t,w,h = shp.body[i]
pts = np.mgrid[l:l+w:3j, t:t+h:3j].T.reshape((-1,2))
names = ['lt','t','rt','l','o','r','lb','b','rb']
l = norm(pts-(x,y), axis=1)
n = np.argmin(l)
if l[n] < minl:
m, obj, minl = shp, (names[n], i), l[n]
if shp.dtype == 'ellipse':
x0, y0, l1, l2, ang = shp.body
mat = np.array([[np.cos(-ang),-np.sin(-ang)],
[np.sin(-ang),np.cos(-ang)]])
pts = np.mgrid[-l1:l1:3j, -l2:l2:3j].T.reshape((-1,2))
pts = mat.dot(pts.T).T + (x0, y0)
names = ['lt','t','rt','l','o','r','lb','b','rb']
l = norm(pts-(x,y), axis=1)
n = np.argmin(l)
if l[n] < minl:
m, obj, minl = shp, names[n], l[n]
if shp.dtype == 'ellipses':
for i in range(len(shp.body)):
x0, y0, l1, l2, ang = shp.body[i]
mat = np.array([[np.cos(-ang),-np.sin(-ang)],
[np.sin(-ang),np.cos(-ang)]])
pts = np.mgrid[-l1:l1:3j, -l2:l2:3j].T.reshape((-1,2))
pts = mat.dot(pts.T).T + (x0, y0)
names = ['lt','t','rt','l','o','r','lb','b','rb']
l = norm(pts-(x,y), axis=1)
n = np.argmin(l)
if l[n] < minl:
m, obj, minl = shp, (names[n], i), l[n]
if shp.dtype == 'layer':
# minl, obj = 1e8, None
for i in shp.body:
h, o, l = pick_point(i, x, y, lim, types)
if l < minl:
m, obj, minl = h, o, l
return m, obj, minl
def drag(shp, pt, x, y, types='all'):
if not (types=='all' or shp.dtype in types): return
if shp.dtype == 'rectangle':
body = shp.body
if pt == 'o':body[:2] = (x, y) - body[2:]/2
if 'l' in pt:body[[0,2]] = x, body[0]+body[2]-x
if 'r' in pt:body[2] = x - body[0]
if 't' in pt:body[[1,3]] = y, body[1]+body[3]-y
if 'b' in pt:body[3] = y - body[1]
elif shp.dtype == 'rectangles':
pt, i = pt
body = shp.body[i]
if pt == 'o':body[:2] = (x, y) - body[2:]/2
if 'l' in pt:body[[0,2]] = x, body[0]+body[2]-x
if 'r' in pt:body[2] = x - body[0]
if 't' in pt:body[[1,3]] = y, body[1]+body[3]-y
if 'b' in pt:body[3] = y - body[1]
elif shp.dtype == 'ellipse':
if pt == 'o':
shp.body[:2] = x, y
return
x0, y0, l1, l2, ang = shp.body
v1, v2 = (np.array([[np.cos(-ang),-np.sin(-ang)],
[np.sin(-ang),np.cos(-ang)]]) * (l1, l2)).T
l, r, t, b = np.array([-v1, v1, -v2, v2]) + (x0, y0)
if 'l' in pt: l = v1.dot([x-x0, y-y0])*v1/l1**2+(x0, y0)
if 'r' in pt: r = v1.dot([x-x0, y-y0])*v1/l1**2+(x0, y0)
if 't' in pt: t = v2.dot([x-x0, y-y0])*v2/l2**2+(x0, y0)
if 'b' in pt: b = v2.dot([x-x0, y-y0])*v2/l2**2+(x0, y0)
k = np.linalg.inv(np.array([-v2,v1]).T).dot((l+r-t-b)/2)
shp.body[:2] = (l+r)/2 + v2*k[0]
shp.body[2:4] = np.dot(r-l, v1)/l1/2, np.dot(b-t, v2)/l2/2
elif shp.dtype == 'ellipses':
pt, i = pt
body = shp.body[i]
if pt == 'o':
body[:2] = x, y
return
x0, y0, l1, l2, ang = body
v1, v2 = (np.array([[np.cos(-ang),-np.sin(-ang)],
[np.sin(-ang),np.cos(-ang)]]) * (l1, l2)).T
l, r, t, b = np.array([-v1, v1, -v2, v2]) + (x0, y0)
if 'l' in pt: l = v1.dot([x-x0, y-y0])*v1/l1**2+(x0, y0)
if 'r' in pt: r = v1.dot([x-x0, y-y0])*v1/l1**2+(x0, y0)
if 't' in pt: t = v2.dot([x-x0, y-y0])*v2/l2**2+(x0, y0)
if 'b' in pt: b = v2.dot([x-x0, y-y0])*v2/l2**2+(x0, y0)
k = np.linalg.inv(np.array([-v2,v1]).T).dot((l+r-t-b)/2)
body[:2] = (l+r)/2 + v2*k[0]
body[2:4] = np.dot(r-l, v1)/l1/2, np.dot(b-t, v2)/l2/2
else: pt[:] = x, y
def offset(shp, dx, dy):
if shp.dtype in {'rectangle', 'ellipse', 'circle'}:
shp.body[:2] += dx, dy
elif shp.dtype in {'rectangles', 'ellipses', 'circles'}:
shp.body[:,:2] += dx, dy
elif isinstance(shp, np.ndarray):
shp += dx, dy
elif isinstance(shp.body, list):
for i in shp.body: offset(i, dx, dy)
class BaseEditor(ShapeTool):
def __init__(self, dtype='all'):
self.status, self.oldxy, self.p = '', None, None
self.pick_m, self.pick_obj = None, None
def mouse_down(self, shp, x, y, btn, **key):
self.p = x, y
if btn==2:
self.status = 'move'
self.oldxy = key['px'], key['py']
if btn==1 and self.status=='pick':
m, obj, l = pick_point(shp, x, y, 5)
self.pick_m, self.pick_obj = m, obj
if btn==1 and self.pick_m is None:
m, l = pick_obj(shp, x, y, 5)
self.pick_m, self.pick_obj = m, None
if btn==3:
obj, l = pick_obj(shp, x, y, 5)
if key['alt'] and not key['ctrl']:
if obj is None: del shp.body[:]
else: shp.body.remove(obj)
shp.dirty = True
if key['shift'] and not key['alt'] and not key['ctrl']:
layer = geom2shp(union(shp.to_geom()))
shp.body = layer.body
shp.dirty = True
if not (key['shift'] or key['alt'] or key['ctrl']):
key['canvas'].fit()
def mouse_up(self, shp, x, y, btn, **key):
self.status = ''
if btn==1:
self.pick_m = self.pick_obj = None
if not (key['alt'] and key['ctrl']): return
pts = mark(shp)
if len(pts)>0:
pts = Points(np.vstack(pts), color=(255,0,0))
key['canvas'].marks['anchor'] = pts
shp.dirty = True
def mouse_move(self, shp, x, y, btn, **key):
self.cursor = 'arrow'
if self.status == 'move':
ox, oy = self.oldxy
up = (1,-1)[key['canvas'].up]
key['canvas'].move(key['px']-ox, (key['py']-oy)*up)
self.oldxy = key['px'], key['py']
if key['alt'] and key['ctrl']:
self.status = 'pick'
if not 'anchor' in key['canvas'].marks:
pts = mark(shp)
if len(pts)>0:
pts = Points(np.vstack(pts), color=(255,0,0))
key['canvas'].marks['anchor'] = pts
if 'anchor' in key['canvas'].marks:
m, obj, l = pick_point(key['canvas'].marks['anchor'], x, y, 5)
if not m is None: self.cursor = 'hand'
elif 'anchor' in key['canvas'].marks:
self.status = ''
del key['canvas'].marks['anchor']
shp.dirty = True
if not self.pick_obj is None and not self.pick_m is None:
drag(self.pick_m, self.pick_obj, x, y)
pts = mark(self.pick_m)
if len(pts)>0:
pts = np.vstack(pts)
key['canvas'].marks['anchor'] = Points(pts, color=(255,0,0))
self.pick_m.dirty = True
shp.dirty = True
if self.pick_obj is None and not self.pick_m is None:
offset(self.pick_m, x-self.p[0], y-self.p[1])
pts = mark(self.pick_m)
if len(pts)>0:
pts = np.vstack(pts)
key['canvas'].marks['anchor'] = Points(pts, color=(255,0,0))
self.p = x, y
self.pick_m.dirty =shp.dirty = True
def mouse_wheel(self, shp, x, y, d, **key):
if d>0: key['canvas'].zoomout(x, y, coord='data')
if d<0: key['canvas'].zoomin(x, y, coord='data') |
# -*- coding: utf-8 -*-
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
from io import StringIO
from synapse.logging._terse_json import JsonFormatter, TerseJsonFormatter
from synapse.logging.context import LoggingContext, LoggingContextFilter
from tests.logging import LoggerCleanupMixin
from tests.unittest import TestCase
class TerseJsonTestCase(LoggerCleanupMixin, TestCase):
def setUp(self):
self.output = StringIO()
def get_log_line(self):
# One log message, with a single trailing newline.
data = self.output.getvalue()
logs = data.splitlines()
self.assertEqual(len(logs), 1)
self.assertEqual(data.count("\n"), 1)
return json.loads(logs[0])
def test_terse_json_output(self):
"""
The Terse JSON formatter converts log messages to JSON.
"""
handler = logging.StreamHandler(self.output)
handler.setFormatter(TerseJsonFormatter())
logger = self.get_logger(handler)
logger.info("Hello there, %s!", "wally")
log = self.get_log_line()
# The terse logger should give us these keys.
expected_log_keys = [
"log",
"time",
"level",
"namespace",
]
self.assertCountEqual(log.keys(), expected_log_keys)
self.assertEqual(log["log"], "Hello there, wally!")
def test_extra_data(self):
"""
Additional information can be included in the structured logging.
"""
handler = logging.StreamHandler(self.output)
handler.setFormatter(TerseJsonFormatter())
logger = self.get_logger(handler)
logger.info(
"Hello there, %s!", "wally", extra={"foo": "bar", "int": 3, "bool": True}
)
log = self.get_log_line()
# The terse logger should give us these keys.
expected_log_keys = [
"log",
"time",
"level",
"namespace",
# The additional keys given via extra.
"foo",
"int",
"bool",
]
self.assertCountEqual(log.keys(), expected_log_keys)
# Check the values of the extra fields.
self.assertEqual(log["foo"], "bar")
self.assertEqual(log["int"], 3)
self.assertIs(log["bool"], True)
def test_json_output(self):
"""
The Terse JSON formatter converts log messages to JSON.
"""
handler = logging.StreamHandler(self.output)
handler.setFormatter(JsonFormatter())
logger = self.get_logger(handler)
logger.info("Hello there, %s!", "wally")
log = self.get_log_line()
# The terse logger should give us these keys.
expected_log_keys = [
"log",
"level",
"namespace",
]
self.assertCountEqual(log.keys(), expected_log_keys)
self.assertEqual(log["log"], "Hello there, wally!")
def test_with_context(self):
"""
The logging context should be added to the JSON response.
"""
handler = logging.StreamHandler(self.output)
handler.setFormatter(JsonFormatter())
handler.addFilter(LoggingContextFilter())
logger = self.get_logger(handler)
with LoggingContext(request="test"):
logger.info("Hello there, %s!", "wally")
log = self.get_log_line()
# The terse logger should give us these keys.
expected_log_keys = [
"log",
"level",
"namespace",
"request",
]
self.assertCountEqual(log.keys(), expected_log_keys)
self.assertEqual(log["log"], "Hello there, wally!")
self.assertEqual(log["request"], "test")
|
<reponame>LiuHaolan/models
import oneflow as flow
from oneflow import nn
from typing import Any
__all__ = ["PoseNet", "posenet"]
class BasicConv2d(nn.Module):
def __init__(self, input_channels, output_channels, **kwargs):
super().__init__()
self.conv = nn.Conv2d(input_channels, output_channels, bias=False, **kwargs)
self.bn = nn.BatchNorm2d(output_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, x: flow.Tensor) -> flow.Tensor:
x = self.conv(x)
x = self.bn(x)
x = self.relu(x)
return x
class Stem(nn.Module):
def __init__(self, input_channels):
super().__init__()
self.conv1 = nn.Sequential(
BasicConv2d(input_channels, 32, kernel_size=3),
BasicConv2d(32, 32, kernel_size=3, padding=1),
BasicConv2d(32, 64, kernel_size=3, padding=1),
)
self.branch3x3_conv = BasicConv2d(64, 96, kernel_size=3, padding=1)
self.branch3x3_pool = nn.MaxPool2d(3, stride=1, padding=1)
self.branch7x7a = nn.Sequential(
BasicConv2d(160, 64, kernel_size=1),
BasicConv2d(64, 64, kernel_size=(7, 1), padding=(3, 0)),
BasicConv2d(64, 64, kernel_size=(1, 7), padding=(0, 3)),
BasicConv2d(64, 96, kernel_size=3, padding=1),
)
self.branch7x7b = nn.Sequential(
BasicConv2d(160, 64, kernel_size=1),
BasicConv2d(64, 96, kernel_size=3, padding=1),
)
self.branchpoola = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
self.branchpoolb = BasicConv2d(192, 192, kernel_size=3, stride=1, padding=1)
def forward(self, x: flow.Tensor) -> flow.Tensor:
x = self.conv1(x)
x = [self.branch3x3_conv(x), self.branch3x3_pool(x)]
x = flow.cat(x, 1)
x = [self.branch7x7a(x), self.branch7x7b(x)]
x = flow.cat(x, 1)
x = [self.branchpoola(x), self.branchpoolb(x)]
x = flow.cat(x, 1)
return x
class Mixed_5b(nn.Module):
def __init__(self, input_channels):
super().__init__()
self.Branch_2 = nn.Sequential(
BasicConv2d(input_channels, 64, kernel_size=1),
BasicConv2d(64, 96, kernel_size=3, padding=1),
BasicConv2d(96, 96, kernel_size=3, padding=1),
)
self.Branch_1 = nn.Sequential(
BasicConv2d(input_channels, 48, kernel_size=1, padding=1),
BasicConv2d(48, 64, kernel_size=5, padding=1),
)
self.Branch_0 = BasicConv2d(input_channels, 96, kernel_size=1)
self.Branch_3 = nn.Sequential(
nn.AvgPool2d(kernel_size=3, stride=1, padding=1),
BasicConv2d(input_channels, 64, kernel_size=1),
)
def forward(self, input: flow.Tensor) -> flow.Tensor:
x = input
x = [self.Branch_0(x), self.Branch_1(x), self.Branch_2(x), self.Branch_3(x)]
output = flow.cat(x, 1)
return output
class block35(nn.Module):
def __init__(self, input_channels):
super().__init__()
self.Branch_2 = nn.Sequential(
BasicConv2d(input_channels, 32, kernel_size=1),
BasicConv2d(32, 48, kernel_size=3, padding=1),
BasicConv2d(48, 64, kernel_size=3, padding=1),
)
self.Branch_1 = nn.Sequential(
BasicConv2d(input_channels, 32, kernel_size=1),
BasicConv2d(32, 32, kernel_size=3, padding=1),
)
self.Branch_0 = BasicConv2d(input_channels, 32, kernel_size=1)
self.Conv2d_1x1 = nn.Conv2d(128, 320, kernel_size=1)
self.relu = nn.ReLU(inplace=True)
def forward(self, in_blob: flow.Tensor) -> flow.Tensor:
residual = [
self.Branch_0(in_blob),
self.Branch_1(in_blob),
self.Branch_2(in_blob),
]
residual = flow.cat(residual, 1)
up = self.Conv2d_1x1(residual)
scaled_up = up * 1.0
in_blob += scaled_up
in_blob = self.relu(in_blob)
return in_blob
class block17(nn.Module):
def __init__(self, input_channels):
super().__init__()
self.Branch_1 = nn.Sequential(
BasicConv2d(input_channels, 128, kernel_size=1, padding=1),
BasicConv2d(128, 160, kernel_size=[1, 7], padding=1),
BasicConv2d(160, 192, kernel_size=[7, 1], padding=1),
)
self.Branch_0 = BasicConv2d(input_channels, 192, kernel_size=1)
self.Conv2d_1x1 = nn.Conv2d(384, input_channels, 1)
self.relu = nn.ReLU(inplace=True)
def forward(self, in_blob: flow.Tensor) -> flow.Tensor:
residual = [self.Branch_0(in_blob), self.Branch_1(in_blob)]
mixed = flow.cat(residual, 1)
up = self.Conv2d_1x1(mixed)
scaled_up = up * 1.0
in_blob += scaled_up
in_blob = self.relu(in_blob)
return in_blob
class block8(nn.Module):
def __init__(self, input_channels):
super().__init__()
self.Branch_1 = nn.Sequential(
BasicConv2d(input_channels, 192, kernel_size=1),
BasicConv2d(192, 224, kernel_size=[1, 3], padding=[0, 1]),
BasicConv2d(224, 256, kernel_size=[3, 1], padding=[1, 0]),
)
self.Branch_0 = BasicConv2d(input_channels, 192, kernel_size=1)
self.Conv2d_1x1 = BasicConv2d(448, 2080, kernel_size=1)
self.relu = nn.ReLU(inplace=True)
def forward(self, in_blob: flow.Tensor) -> flow.Tensor:
residual = [self.Branch_0(in_blob), self.Branch_1(in_blob)]
mixed = flow.cat(residual, 1)
up = self.Conv2d_1x1(mixed)
scaled_up = up * 1.0
in_blob += scaled_up
in_blob = self.relu(in_blob)
return in_blob
class Mixed_6a(nn.Module):
def __init__(self, input_channels):
super().__init__()
self.Branch_2 = nn.MaxPool2d(3, stride=2)
self.Branch_0 = nn.Sequential(
BasicConv2d(input_channels, 384, kernel_size=3, stride=2)
)
self.Branch_1 = nn.Sequential(
BasicConv2d(input_channels, 256, kernel_size=1),
BasicConv2d(256, 256, kernel_size=3, padding=1),
BasicConv2d(256, 384, kernel_size=3, stride=2),
)
def forward(self, x: flow.Tensor) -> flow.Tensor:
x = [
self.Branch_0(x),
self.Branch_1(x),
self.Branch_2(x),
]
x = flow.cat(x, 1)
return x
class Mixed_7a(nn.Module):
def __init__(self, input_channels):
super().__init__()
self.Branch_0 = nn.Sequential(
BasicConv2d(input_channels, 256, kernel_size=1),
BasicConv2d(256, 384, kernel_size=3, stride=2),
)
self.Branch_1 = nn.Sequential(
BasicConv2d(input_channels, 256, kernel_size=1),
BasicConv2d(256, 288, kernel_size=3, stride=2),
)
self.Branch_2 = nn.Sequential(
BasicConv2d(input_channels, 256, kernel_size=1),
BasicConv2d(256, 288, kernel_size=3, padding=1),
BasicConv2d(288, 320, kernel_size=3, stride=2),
)
self.Branch_3 = nn.MaxPool2d(3, stride=2)
def forward(self, x: flow.Tensor) -> flow.Tensor:
x = [self.Branch_0(x), self.Branch_1(x), self.Branch_2(x), self.Branch_3(x)]
x = flow.cat(x, 1)
return x
class PoseNet(nn.Module):
def __init__(self, num_classes: int = 5) -> None:
super(PoseNet, self).__init__()
self.conv2d_1a_3x3 = BasicConv2d(3, 32, kernel_size=3, stride=2)
self.conv2d_2a_3x3 = BasicConv2d(32, 32, kernel_size=3)
self.conv2d_2b_3x3 = BasicConv2d(32, 64, kernel_size=3, padding=1)
self.MaxPool_3a_3x3 = nn.MaxPool2d(3, stride=2)
self.conv2d_3b_1x1 = BasicConv2d(64, 80, kernel_size=1)
self.conv2d_4a_3x3 = BasicConv2d(80, 192, kernel_size=3)
self.MaxPool_5a_3x3 = nn.MaxPool2d(kernel_size=3, stride=2) # stem
self.Mixed_5b = self._generate_inception_module(192, 320, 1, Mixed_5b)
self.block35 = self._generate_inception_module(320, 320, 1, block35)
self.conv_ls1 = BasicConv2d(320, 320, kernel_size=3, stride=2, padding=1)
self.MaxPool_3x3_ls1 = nn.MaxPool2d(kernel_size=3, stride=2)
self.Mixed_6a = self._generate_inception_module(320, 1088, 1, Mixed_6a)
self.block17 = self._generate_inception_module(1088, 1088, 1, block17)
self.conv_ls2 = BasicConv2d(1088, 1088, kernel_size=3, stride=2)
self.Mixed_7a = self._generate_inception_module(1088, 2080, 1, Mixed_7a)
self.block8 = self._generate_inception_module(2080, 2080, 1, block8)
self.conv_ls3 = BasicConv2d(3488, 2080, kernel_size=1)
self.Conv2d_7b_1x1 = BasicConv2d(2080, 1536, kernel_size=1)
self.AvgPool_1a_8x8 = nn.AvgPool2d(kernel_size=[8, 8])
self.dense = nn.Linear(1536, num_classes)
self.relu = nn.ReLU(inplace=True)
def forward(self, inputs: flow.Tensor) -> flow.Tensor:
net = self.conv2d_1a_3x3(inputs)
net = self.conv2d_2a_3x3(net)
net = self.conv2d_2b_3x3(net)
net = self.MaxPool_3a_3x3(net)
net = self.conv2d_3b_1x1(net)
net = self.conv2d_4a_3x3(net)
net = self.MaxPool_5a_3x3(net) # stem
net = self.Mixed_5b(net)
net = self.block35(net)
netB1 = self.conv_ls1(net)
netB1 = self.MaxPool_3x3_ls1(netB1)
net = self.Mixed_6a(net)
net = self.block17(net)
netB2 = self.conv_ls2(net)
net = self.Mixed_7a(net)
net = self.block8(net)
netB3 = [netB1, netB2, net]
netAll = flow.cat(netB3, 1)
netAll = self.conv_ls3(netAll)
net = self.Conv2d_7b_1x1(netAll)
net = self.AvgPool_1a_8x8(net)
net = flow.reshape(net, [net.shape[0], -1])
hidden = self.dense(net)
hidden = self.relu(hidden)
return hidden
@staticmethod
def _generate_inception_module(input_channels, output_channels, block_num, block):
layers = nn.Sequential()
for l in range(block_num):
layers.add_module("{}_{}".format(block.__name__, l), block(input_channels))
input_channels = output_channels
return layers
def _posenet(arch: str, **kwargs: Any) -> PoseNet:
model = PoseNet(**kwargs)
return model
def posenet(**kwargs: Any) -> PoseNet:
return _posenet("posenet", **kwargs)
|
<gh_stars>0
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by <NAME> (<EMAIL>)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import metrics
from core.inference import get_max_preds
def calc_dists(preds, target, normalize):
preds = preds.astype(np.float32)
target = target.astype(np.float32)
dists = np.zeros((preds.shape[1], preds.shape[0]))
for n in range(preds.shape[0]):
for c in range(preds.shape[1]):
if target[n, c, 0] > 1 and target[n, c, 1] > 1:
normed_preds = preds[n, c, :] / normalize[n]
normed_targets = target[n, c, :] / normalize[n]
dists[c, n] = np.linalg.norm(normed_preds - normed_targets)
else:
dists[c, n] = -1
return dists
def dist_acc(dists, thr=0.5):
''' Return percentage below threshold while ignoring values with a -1 '''
dist_cal = np.not_equal(dists, -1)
num_dist_cal = dist_cal.sum()
if num_dist_cal > 0:
return np.less(dists[dist_cal], thr).sum() * 1.0 / num_dist_cal
else:
return -1
def accuracy(output, target, hm_type='gaussian', thr=0.5):
'''
Calculate accuracy according to PCK,
but uses ground truth heatmap rather than x,y locations
First value to be returned is average accuracy across 'idxs',
followed by individual accuracies
'''
idx = list(range(output.shape[1]))
norm = 1.0
if hm_type == 'gaussian':
pred, _ = get_max_preds(output)
target, _ = get_max_preds(target)
h = output.shape[2]
w = output.shape[3]
norm = np.ones((pred.shape[0], 2)) * np.array([h, w]) / 10
dists = calc_dists(pred, target, norm)
acc = np.zeros((len(idx) + 1))
avg_acc = 0
cnt = 0
for i in range(len(idx)):
acc[i + 1] = dist_acc(dists[idx[i]])
if acc[i + 1] >= 0:
avg_acc = avg_acc + acc[i + 1]
cnt += 1
avg_acc = avg_acc / cnt if cnt != 0 else 0
if cnt != 0:
acc[0] = avg_acc
return acc, avg_acc, cnt, pred
def compute_metrics(pred, target):
"""Compute precision and recall for binary classification problem
pred: 1d numpy boolean array
target: 1d numpy boolean array
"""
tp = np.intersect1d(np.where(pred==True), np.where(target == True)).size
tn = np.intersect1d(np.where(pred==False), np.where(target == False)).size
fn = np.intersect1d(np.where(pred==False), np.where(target == True)).size
fp = np.intersect1d(np.where(pred==True), np.where(target == False)).size
acc = (tp+tn)/(tp+tn+fn+fp)
if tp > 0:
prec = tp / (tp+fp)
recall = tp / (tp+fn)
f1 = 2 * (prec * recall) / (prec + recall)
else:
prec = 0
recall = 0
f1 = 0
msg = 'Accuracy {:.2%}\t'.format(acc)
msg += 'Precision {:.2%} Recall {:.2%} F1 {:.2%}\n'.format(prec, recall, f1)
msg += 'TP {} TN {} FP {} FN {}\n'.format(tp, tn, fp, fn)
msg += '\n'
return acc, prec, recall, f1, msg
def metrics_notvisible(pred, target):
"""Compute accuracy, precision and recall to detect visible/not visible landmarks
True - landmark is not visible/ Fale - visible
pred - numpy float array of shape (bs, n_ldm) of max values in heatmaps
where 0 - no signal (not visible), >0 - signal on heatmap
target - boolean array of shape (bw, n_ldm), True - not visible, False - visible
"""
message = ''
message += 'Analysing landmark visibility prediction scores over different thresholds\n'
num_ldm = target.size
num_not_visible = np.sum(target)
message += 'Not visible landmark {} out of {}, {:.2%}\n'.format(num_not_visible, num_ldm, num_not_visible/num_ldm)
vis_thr = np.linspace(0, 1, 11)
for thr in vis_thr:
pred_vis = pred <= thr
message += 'Threshold {:.2f}\n'.format(thr)
message += compute_metrics(pred_vis.ravel(), target.ravel())[-1]
#Evaluate with sklearn
fpr, tpr, _ = metrics.roc_curve((~target).astype(int).ravel(), pred.ravel())
roc_auc = metrics.auc(fpr, tpr)
message += 'ROC AUC {:.2f}\n'.format(roc_auc)
return message
|
<gh_stars>1-10
"""Responsible for rendering markdown content to HTML."""
from typing import Callable, Optional, Mapping, Union, Tuple, List
import re
import warnings
from functools import wraps
import xml.etree.ElementTree as ET
from markdown import markdown, Markdown
from markdown.extensions import Extension
from markdown.treeprocessors import Treeprocessor
from mdx_partial_gfm import PartialGithubFlavoredMarkdownExtension
from arxiv.base import logging
from .domain import SourcePage
logger = logging.getLogger(__name__)
ALLOWED_JINJA = r"\$jinja\s*{{ '([{}%]+)' }}([^{]+){{ '([{}%]+)' }}\s*jinja\$"
def render(content: str, dereferencer: Optional[Callable] = None) -> str:
"""
Render markdown content to HTML.
Parameters
----------
content : str
Markdown content.
dereferencer : function
Used for generating URLs from internal paths and slugs. Should accept
a HREF value (str), and return a URL (str). Optional.
static_dereferencer : function
Used for generating URLs for static files. Should accept
a src value (str), and return a URL (str). Optional.
Returns
-------
str
Rendered HTML.
"""
extensions = ['markdown.extensions.tables',
'markdown.extensions.fenced_code',
'markdown.extensions.codehilite',
'markdown.extensions.toc',
'markdown.extensions.attr_list',
PartialGithubFlavoredMarkdownExtension(),
StyleClassExtension(tag="table",
classes=["table", "is-striped"])]
if dereferencer is not None:
extensions.append(ReferenceExtension(tag='a', attr='href',
dereferencer=dereferencer))
extensions.append(ReferenceExtension(tag='img', attr='src',
dereferencer=dereferencer))
# The GFM extension doesn't implement the changes related to positional
# arguments described in the Markdown v2.6 release notes.
# https://python-markdown.github.io/change_log/release-2.6/#positional-arguments-deprecated
with warnings.catch_warnings():
warnings.simplefilter("ignore")
return escape_braces(markdown(content, extensions=extensions))
def escape_braces(content: str) -> str:
"""
Curly braces in content must be escaped.
Otherwise, they are treated as Jinja2 syntax.
"""
def repl(match):
print(match.groups())
return "foo"
return re.sub(ALLOWED_JINJA, r"\1\2\3",
re.sub(r"([{}%]+)", r"{{ '\g<1>' }}", content))
class StyleClassProcessor(Treeprocessor):
"""Adds CSS classes to elements."""
def __init__(self, tag: str = "html", classes: List[str] = []) -> None:
"""Set the target tag and classes to add."""
self.tag = tag
self.classes = classes
def run(self, root: ET.ElementTree) -> None:
"""Add some CSS classes to a table when we find one."""
for child in root:
if child.tag == self.tag:
existing = child.attrib.get("class", "").split()
child.attrib["class"] = " ".join(existing + self.classes)
class ReferenceProcessor(Treeprocessor):
"""Convert internal links to full paths."""
def __init__(self, tag: str = "a", attr: str = "href",
dereferencer: Optional[Callable] = None) -> None:
"""Set the link dereferencer for use during processing."""
self.dereferencer = dereferencer
self.tag = tag
self.attr = attr
def run(self, root: ET.ElementTree) -> None:
"""Perform link conversion on ``root``."""
if self.dereferencer is not None:
self.translate_anchors(root)
def translate_anchors(self, element: ET.Element) -> None:
"""Traverse ``element`` looking for and updating anchor elements."""
for child in element:
if child.tag == self.tag:
value = child.attrib.get(self.attr)
try:
child.attrib[self.attr] = self.dereferencer(value)
except KeyError:
continue
self.translate_anchors(child)
class ReferenceExtension(Extension):
"""Adds :class:`.ReferenceProcessor` to the markdown processor."""
def __init__(self, tag: str = "a", attr: str = "href",
dereferencer: Optional[Callable] = None) -> None:
"""Set the link dereferencer for use during processing."""
self.tag = tag
self.attr = attr
self.dereferencer = dereferencer
def extendMarkdown(self, md: Markdown, md_globals: Mapping) -> None:
"""Add :class:`.ReferenceProcessor` to the markdown processor."""
inst = ReferenceProcessor(tag=self.tag, attr=self.attr,
dereferencer=self.dereferencer)
md.treeprocessors[f'{self.tag}_{self.attr}_reference_processor'] = inst
class StyleClassExtension(Extension):
"""Adds :class:`.ReferenceProcessor` to the markdown processor."""
def __init__(self, tag: str = "a", classes: List[str] = []) -> None:
"""Set the link dereferencer for use during processing."""
self.tag = tag
self.classes = classes
def extendMarkdown(self, md: Markdown, md_globals: Mapping) -> None:
"""Add :class:`.ReferenceProcessor` to the markdown processor."""
inst = StyleClassProcessor(tag=self.tag, classes=self.classes)
md.treeprocessors[f'{self.tag}_style_class_processor'] = inst
def get_linker(page: SourcePage, site_name: str) -> Callable:
def linker(href: str) -> Tuple[str, str, str, Optional[str]]:
# We don't want to mess with things that are clearly not ours to
# fiddle with.
if not href \
or '://' in href \
or href.startswith('/') \
or href.startswith('#') \
or href.startswith('mailto:'):
return href, None, None, None
anchor = None
if '#' in href:
href, anchor = href.split('#', 1)
if href.endswith('.md'):
path = href[:-3]
route = f'{site_name}.from_sitemap'
kwarg = 'page_path'
elif '.' not in href.split('/')[-1]:
path = href
route = f'{site_name}.from_sitemap'
kwarg = 'page_path'
else:
path = href
route = f'{site_name}.static'
kwarg = 'filename'
base_path = '/'.join(page.page_path.split('/')[:-1])
target_path = '/'.join([base_path, path.rstrip('/')]).lstrip('/')
return route, kwarg, target_path, anchor
return linker
def get_deferencer(page: SourcePage, site_name: str) -> Callable:
def link_dereferencer(href: str) -> str:
route, kwarg, target_path, anchor = get_linker(page, site_name)(href)
if kwarg is None:
return route
if anchor is not None:
return "$jinja {{ url_for('%s', %s='%s', _anchor='%s') }} jinja$" \
% (route, kwarg, target_path, anchor)
return "$jinja {{ url_for('%s', %s='%s') }} jinja$" \
% (route, kwarg, target_path)
return link_dereferencer
|
<filename>Final/problem7.py
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 27 15:09:50 2021
@author: akladke
"""
### Do not change the Location or Campus classes. ###
### Location class is the same as in lecture. ###
class Location(object):
def __init__(self, x, y):
self.x = x
self.y = y
def move(self, deltaX, deltaY):
return Location(self.x + deltaX, self.y + deltaY)
def getX(self):
return self.x
def getY(self):
return self.y
def dist_from(self, other):
xDist = self.x - other.x
yDist = self.y - other.y
return (xDist**2 + yDist**2)**0.5
def __eq__(self, other):
return (self.x == other.x and self.y == other.y)
def __str__(self):
return '<' + str(self.x) + ',' + str(self.y) + '>'
class Campus(object):
def __init__(self, center_loc):
self.center_loc = center_loc
def __str__(self):
return str(self.center_loc)
class MITCampus(Campus):
""" A MITCampus is a Campus that contains tents """
def __init__(self, center_loc, tent_loc = Location(0,0)):
""" Assumes center_loc and tent_loc are Location objects
Initializes a new Campus centered at location center_loc
with a tent at location tent_loc """
Campus.__init__(self, center_loc)
self.center_loc = center_loc
self.tent_loc = tent_loc
self.tents = [self.tent_loc]
def add_tent(self, new_tent_loc):
""" Assumes new_tent_loc is a Location
Adds new_tent_loc to the campus only if the tent is at least 0.5 distance
away from all other tents already there. Campus is unchanged otherwise.
Returns True if it could add the tent, False otherwise. """
new_loc = str(new_tent_loc).replace("<", "").replace(">", "").split(",")
new_loc_xy = list(map(float, new_loc))
x = True
for i in self.tents:
cur_loc = str(i).replace("<", "").replace(">", "").split(",")
cur_loc_xy = list(map(float, cur_loc))
distance = ((new_loc_xy[0] - cur_loc_xy[0])**2 + (new_loc_xy[1] - cur_loc_xy[1])**2)**0.5
if new_tent_loc == i or distance < 0.5:
x = False
if x == True:
self.tents.append(new_tent_loc)
return True
return False
def remove_tent(self, tent_loc):
""" Assumes tent_loc is a Location
Removes tent_loc from the campus.
Raises a ValueError if there is not a tent at tent_loc.
Does not return anything """
# Throws a value error if not in the tents list,
# which is the intended functionality
self.tents.remove(tent_loc)
def get_tents(self):
""" Returns a list of all tents on the campus. The list should contain
the string representation of the Location of a tent. The list should
be sorted by the x coordinate of the location. """
new_tents = []
for i in self.tents:
new_tents.append(str(i))
return sorted(new_tents)
c = MITCampus(Location(1,2))
print(c.add_tent(Location(1,2)))
print(c.add_tent(Location(0,0)))
print(c.add_tent(Location(2,3)))
print(c.add_tent(Location(2,3)))
print(c.get_tents())
c = MITCampus(Location(1,2),Location(-1,-2))
print(c.add_tent(Location(1,2))) # this should actually work!
print(c.add_tent(Location(-1,-2)))
print(c.add_tent(Location(-1,-2)))
print(c.add_tent(Location(-1,-2)))
print(c.add_tent(Location(-1,-2)))
print(sorted(c.get_tents()))
c = MITCampus(Location(1,2), Location(0,1))
c.add_tent(Location(-1,2))
c.add_tent(Location(1,-10))
c.add_tent(Location(1,10))
c.add_tent(Location(1,20))
c.add_tent(Location(1,40))
print(sorted(c.get_tents()))
#print(check_if_x_sorted(c.get_tents()))
print("--------------------------------------")
c = MITCampus(Location(1,2), Location(3,1))
print(c.add_tent(Location(2.5,1)))
c = MITCampus(Location(1,2), Location(3,1))
print(c.add_tent(Location(2.49,1)))
c = MITCampus(Location(1,2), Location(3,1))
print(c.add_tent(Location(2.51,1)))
print("--------------------------------------")
c = MITCampus(Location(1,2), Location(3,1))
c.add_tent(Location(1,1))
print(c.add_tent(Location(1.5,1)))
c = MITCampus(Location(1,2), Location(3,1))
c.add_tent(Location(1,1))
print(c.add_tent(Location(1.49,1)))
c = MITCampus(Location(1,2), Location(3,1))
c.add_tent(Location(1,1))
print(c.add_tent(Location(1.51,1)))
print("--------------------------------------")
c = MITCampus(Location(1,2), Location(0,1))
c.add_tent(Location(-1,2))
c.add_tent(Location(1,-10))
c.add_tent(Location(1,10))
c.add_tent(Location(1,20))
c.add_tent(Location(1,40))
print(sorted(c.get_tents()))
print(c.get_tents())
#x = sorted(c.get_tents())
#print(c.get_tents == x)
#print(check_if_x_sorted(c.get_tents()))
# l = Location(1, 2)
# m = Location(3, 1)
# print(l.getX()) |
<reponame>BIAOXYZ/bigchaindb
from bigchaindb.common.exceptions import (InvalidSignature, DoubleSpend,
InputDoesNotExist,
TransactionNotInValidBlock,
AssetIdMismatch, AmountError,
DuplicateTransaction)
from bigchaindb.common.transaction import Transaction
from bigchaindb.common.utils import (validate_txn_obj, validate_key)
from bigchaindb.common.schema import validate_transaction_schema
from bigchaindb.backend.schema import validate_language_key
class Transaction(Transaction):
def validate(self, bigchain, current_transactions=[]):
"""Validate transaction spend
Args:
bigchain (BigchainDB): an instantiated bigchaindb.BigchainDB object.
Returns:
The transaction (Transaction) if the transaction is valid else it
raises an exception describing the reason why the transaction is
invalid.
Raises:
ValidationError: If the transaction is invalid
"""
input_conditions = []
if self.operation == Transaction.CREATE:
duplicates = any(txn for txn in current_transactions if txn.id == self.id)
if bigchain.get_transaction(self.to_dict()['id']) or duplicates:
raise DuplicateTransaction('transaction `{}` already exists'
.format(self.id))
elif self.operation == Transaction.TRANSFER:
# store the inputs so that we can check if the asset ids match
input_txs = []
for input_ in self.inputs:
input_txid = input_.fulfills.txid
input_tx, status = bigchain.\
get_transaction(input_txid, include_status=True)
if input_tx is None:
for ctxn in current_transactions:
# assume that the status as valid for previously validated
# transactions in current round
if ctxn.id == input_txid:
input_tx = ctxn
status = bigchain.TX_VALID
if input_tx is None:
raise InputDoesNotExist("input `{}` doesn't exist"
.format(input_txid))
if status != bigchain.TX_VALID:
raise TransactionNotInValidBlock(
'input `{}` does not exist in a valid block'.format(
input_txid))
spent = bigchain.get_spent(input_txid, input_.fulfills.output,
current_transactions)
if spent and spent.id != self.id:
raise DoubleSpend('input `{}` was already spent'
.format(input_txid))
output = input_tx.outputs[input_.fulfills.output]
input_conditions.append(output)
input_txs.append(input_tx)
# Validate that all inputs are distinct
links = [i.fulfills.to_uri() for i in self.inputs]
if len(links) != len(set(links)):
raise DoubleSpend('tx "{}" spends inputs twice'.format(self.id))
# validate asset id
asset_id = Transaction.get_asset_id(input_txs)
if asset_id != self.asset['id']:
raise AssetIdMismatch(('The asset id of the input does not'
' match the asset id of the'
' transaction'))
input_amount = sum([input_condition.amount for input_condition in input_conditions])
output_amount = sum([output_condition.amount for output_condition in self.outputs])
if output_amount != input_amount:
raise AmountError(('The amount used in the inputs `{}`'
' needs to be same as the amount used'
' in the outputs `{}`')
.format(input_amount, output_amount))
if not self.inputs_valid(input_conditions):
raise InvalidSignature('Transaction signature is invalid.')
return self
@classmethod
def from_dict(cls, tx_body):
super().validate_id(tx_body)
validate_transaction_schema(tx_body)
validate_txn_obj('asset', tx_body['asset'], 'data', validate_key)
validate_txn_obj('metadata', tx_body, 'metadata', validate_key)
validate_language_key(tx_body['asset'], 'data')
return super().from_dict(tx_body)
@classmethod
def from_db(cls, bigchain, tx_dict_list):
"""Helper method that reconstructs a transaction dict that was returned
from the database. It checks what asset_id to retrieve, retrieves the
asset from the asset table and reconstructs the transaction.
Args:
bigchain (:class:`~bigchaindb.BigchainDB`): An instance
of BigchainDB used to perform database queries.
tx_dict_list (:list:`dict` or :obj:`dict`): The transaction dict or
list of transaction dict as returned from the database.
Returns:
:class:`~Transaction`
"""
return_list = True
if isinstance(tx_dict_list, dict):
tx_dict_list = [tx_dict_list]
return_list = False
tx_map = {}
tx_ids = []
for tx in tx_dict_list:
tx.update({'metadata': None})
tx_map[tx['id']] = tx
if tx['operation'] == Transaction.CREATE:
tx_ids.append(tx['id'])
assets = list(bigchain.get_assets(tx_ids))
for asset in assets:
tx = tx_map[asset['id']]
del asset['id']
tx.update({'asset': asset})
tx_ids = list(tx_map.keys())
metadata_list = list(bigchain.get_metadata(tx_ids))
for metadata in metadata_list:
tx = tx_map[metadata['id']]
tx.update({'metadata': metadata.get('metadata')})
if return_list:
tx_list = []
for tx_id, tx in tx_map.items():
tx_list.append(cls.from_dict(tx))
return tx_list
else:
tx = list(tx_map.values())[0]
return cls.from_dict(tx)
class FastTransaction:
"""A minimal wrapper around a transaction dictionary. This is useful for
when validation is not required but a routine expects something that looks
like a transaction, for example during block creation.
Note: immutability could also be provided
"""
def __init__(self, tx_dict):
self.data = tx_dict
@property
def id(self):
return self.data['id']
def to_dict(self):
return self.data
|
<reponame>NASA-IMPACT/covid-api
""" Dataset metadata generator lambda. """
import datetime
import json
import os
import re
from typing import Any, Dict, List, Optional, Union
import boto3
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
DATASETS_JSON_FILEPATH = os.path.join(BASE_PATH, "datasets")
SITES_JSON_FILEPATH = os.path.join(BASE_PATH, "sites")
BUCKET_NAME = os.environ["DATA_BUCKET_NAME"]
DATASET_METADATA_FILENAME = os.environ["DATASET_METADATA_FILENAME"]
# Use this bucket to read dataset info from prod S3 bucket
bucket = boto3.resource("s3").Bucket(BUCKET_NAME)
# If running in AWS, save metadata file to same bucket
metadata_host_bucket = bucket
# If running locally, save metadata file to local S3 bucket
if os.environ.get("AWS_ENDPOINT_URL"):
metadata_host_bucket = boto3.resource(
"s3", endpoint_url=os.environ["AWS_ENDPOINT_URL"]
).Bucket(BUCKET_NAME)
DT_FORMAT = "%Y-%m-%d"
MT_FORMAT = "%Y%m"
def handler(event, context):
"""
Params:
-------
event (dict):
content (dict):
Both params are standard lambda handler invocation params but not used within this
lambda's code.
Returns:
-------
(string): JSON-encoded dict with top level keys for each of the possible
queries that can be run against the `/datasets` endpoint (key: _all_ contains
result of the LIST operation, each of other keys contain the result of
GET /datasets/{spotlight_id | "global"})
"""
# TODO: defined TypedDicts for these!
datasets = _gather_json_data(DATASETS_JSON_FILEPATH)
sites = _gather_json_data(SITES_JSON_FILEPATH)
result = json.dumps(_gather_datasets_metadata(datasets, sites))
print(
f"Saving generated metadata to {DATASET_METADATA_FILENAME} in bucket {metadata_host_bucket.name}"
)
metadata_host_bucket.put_object(
Body=result, Key=DATASET_METADATA_FILENAME, ContentType="application/json",
)
return result
def _gather_datasets_metadata(datasets: List[dict], sites: List[dict]):
"""Reads through the s3 bucket to generate a file that contains
the datasets for each given spotlight option (_all, global, tk, ny, sf,
la, be, du, gh) and their respective domain for each spotlight
Params:
-------
datasets (List[dict]): list of dataset metadata objects (contains fields
like: s3_location, time_unit, swatch, exclusive_with, etc), to use
to generate the result of each of the possible `/datasets` endpoint
queries.
sites (List[dict]): list of site metadata objects
Returns:
--------
(dict): python object with result of each possible query against the `/datasets`
endpoint with each dataset's associated domain.
"""
metadata: Dict[str, dict] = {}
for dataset in datasets:
print(f"Processing dataset: {dataset['name']}")
if not dataset.get("s3_location"):
domain = []
else:
domain_args = {
"dataset_folder": dataset["s3_location"],
"is_periodic": dataset.get("is_periodic"),
"time_unit": dataset.get("time_unit"),
}
domain = _get_dataset_domain(**domain_args)
metadata.setdefault("_all", {}).update({dataset["id"]: {"domain": domain}})
if _is_global_dataset(dataset):
metadata.setdefault("global", {}).update(
{dataset["id"]: {"domain": domain}}
)
continue
for site in sites:
domain_args["spotlight_id"] = site["id"]
if site["id"] in ["du", "gh"]:
domain_args["spotlight_id"] = ["du", "gh", "EUPorts"]
# skip adding dataset to metadata object if no dates were found for the given
# spotlight (indicates dataset is not valid for that spotlight)
try:
domain = _get_dataset_domain(**domain_args)
except NoKeysFoundForSpotlight:
continue
metadata.setdefault(site["id"], {}).update(
{dataset["id"]: {"domain": domain}}
)
return metadata
def _gather_json_data(dirpath: str) -> List[dict]:
"""Gathers all JSON files from within a diven directory"""
results = []
for filename in os.listdir(dirpath):
if not filename.endswith(".json"):
continue
with open(os.path.join(dirpath, filename)) as f:
results.append(json.load(f))
return results
def _is_global_dataset(dataset: dict) -> bool:
"""Returns wether the given dataset is spotlight specific (FALSE)
or non-spotlight specific (TRUE)"""
return not any(
[
i in dataset["source"]["tiles"][0]
for i in ["{spotlightId}", "greatlakes", "togo"]
]
)
def _gather_s3_keys(
spotlight_id: Optional[Union[str, List]] = None, prefix: Optional[str] = "",
) -> List[str]:
"""
Returns a set of S3 keys. If no args are provided, the keys will represent
the entire S3 bucket.
Params:
-------
spotlight_id (Optional[str]):
Id of a spotlight to filter keys by
prefix (Optional[str]):
S3 Prefix under which to gather keys, used to specifcy a specific
dataset folder to search within.
Returns:
-------
set(str)
"""
keys = [x.key for x in bucket.objects.filter(Prefix=prefix)]
if not spotlight_id:
return keys
if isinstance(spotlight_id, list):
spotlight_id = "|".join([s for s in spotlight_id])
pattern = re.compile(rf"""[^a-zA-Z0-9]({spotlight_id})[^a-zA-Z0-9]""")
return list({key for key in keys if pattern.search(key, re.IGNORECASE,)})
def _get_dataset_domain(
dataset_folder: str,
is_periodic: bool,
spotlight_id: Optional[Union[str, List]] = None,
time_unit: Optional[str] = "day",
):
"""
Returns a domain for a given dataset as identified by a folder. If a
time_unit is passed as a function parameter, the function will assume
that the domain is periodic and with only return the min/max dates,
otherwise ALL dates available for that dataset/spotlight will be returned.
Params:
------
dataset_folder (str): dataset folder to search within
time_unit (Optional[str]): time_unit from the dataset's metadata json file
spotlight_id (Optional[str]): a dictionary containing the
`spotlight_id` of a spotlight to restrict the
domain search to.
time_unit (Optional[str] - one of ["day", "month"]):
Wether the {date} object in the S3 filenames should be matched
to YYYY_MM_DD (day) or YYYYMM (month)
Return:
------
List[datetime]
"""
s3_keys_args: Dict[str, Any] = {"prefix": dataset_folder}
if spotlight_id:
s3_keys_args["spotlight_id"] = spotlight_id
keys = _gather_s3_keys(**s3_keys_args)
if not keys:
raise NoKeysFoundForSpotlight
dates = []
for key in keys:
# matches either dates like: YYYYMM or YYYY_MM_DD
pattern = re.compile(
r"[^a-zA-Z0-9]((?P<YEAR>\d{4})_(?P<MONTH>\d{2})_(?P<DAY>\d{2}))[^a-zA-Z0-9]"
)
if time_unit == "month":
pattern = re.compile(
r"[^a-zA-Z0-9](?P<YEAR>(\d{4}))(?P<MONTH>(\d{2}))[^a-zA-Z0-9]"
)
result = pattern.search(key, re.IGNORECASE,)
if not result:
continue
date = None
try:
date = datetime.datetime(
int(result.group("YEAR")),
int(result.group("MONTH")),
int(result.groupdict().get("DAY", 1)),
)
except ValueError:
# Invalid date value matched - skip date
continue
# Some files happen to have 6 consecutive digits (likely an ID of sorts)
# that sometimes gets matched as a date. This further restriction of
# matched timestamps will reduce the number of "false" positives (although
# ID's between 201011 and 203011 will slip by)
if not datetime.datetime(2010, 1, 1) < date < datetime.datetime(2030, 1, 1):
continue
dates.append(date.strftime("%Y-%m-%dT%H:%M:%SZ"))
if is_periodic and len(dates):
return [min(dates), max(dates)]
return sorted(set(dates))
class NoKeysFoundForSpotlight(Exception):
"""Exception to be thrown if no keys are found for a given spotlight"""
pass
if __name__ == "__main__":
handler(event={}, context={})
|
import sys
import os
sys.path.append(os.path.dirname(os.path.abspath(__file__)) + "/../")
try:
from utils.priority_queue import PriorityQueue
except:
raise
from pathfinding.heuristic import euclidean_cost
from math import sqrt, inf
from itertools import product
import numpy as np
def reconstruct_path_to_destination(prev, end):
"""
Constructs an in-order sequence of (x,y) coordinates (list of tuples)
to the end destination using the mapping from nodes to their predecessors
(prev).
"""
path = [end]
curr = end
while curr in prev.keys():
curr = prev[curr]
path.insert(0, curr)
return path
# A* Search
def get_successors(node, grid):
"""
The neighbors of a cell (node) in the grid are the 8-surrounding cells.
"""
successors = []
node_x, node_y = node
n_rows = len(grid)
n_cols = len(grid[0])
for dx, dy in product([-1,0,1],[-1,0,1]):
# skip the current node itself
if (dx == 0 and dy == 0):
continue
x = node_x + dx
y = node_y + dy
if (0 <= x < n_rows and 0 <= y < n_cols):
cost = grid[y][x]
else:
# put infinite penalty on successors that would take us off the edge of the grid
cost = inf
successors.append( ((x, y), cost) )
return successors
def node_with_min_fscore(open_set, f_cost): # open_set is a set (of cell) and f_cost is a dict (with cells as keys)
"""
Find the cell in open set with the smallest f score.
"""
f_cost_open = dict([a for a in f_cost.items() if a[0] in open_set])
return min(f_cost_open, key=f_cost_open.get)
def a_star_search(grid, start, end, heuristic_cost=euclidean_cost):
"""
Implementation of A Star over a 2D grid. Returns a list of waypoints
as a list of (x,y) tuples.
Input:
: grid, 2D matrix
: start, (x,y) tuple, start position
: end, (x,y) tuple, end destination
Output:
: waypoints, list of (x,y) tuples
"""
# the set of cells already evaluated
closed_set = set()
# the set of cells already discovered
open_set = set()
open_set.add(start)
# for each cell, mapping to its least-cost incoming cell
prev = {}
# for each node, cost of reaching it from start (g_cost)
# for each node, cost of getting from start to dest via that node (f_cost)
# note: cell->dest component of f_cost will be estimated using a heuristic
g_cost = {}
f_cost = {}
for cell in product(range(len(grid)), range(len(grid[0]))):
g_cost[cell] = inf
f_cost[cell] = inf
g_cost[start] = 0
f_cost[start] = heuristic_cost(start, end)
while open_set:
# node in open set with min fscore
curr = node_with_min_fscore(open_set, f_cost)
# if we've reached the destination
if curr == end:
return reconstruct_path_to_destination(prev, curr)
open_set.remove(curr)
closed_set.add(curr)
for neighbor, cost in get_successors(curr, grid):
# ignore neighbors which have already been evaluated
if neighbor in closed_set:
continue
curr_g_score = g_cost[curr] + cost
# add neighbor to newly discovered nodes
if neighbor not in open_set:
open_set.add(neighbor)
# if we've already got a lower g_score for neighbor, then move on
elif curr_g_score >= g_cost[neighbor]:
continue
prev[neighbor] = curr
g_cost[neighbor] = curr_g_score
f_cost[neighbor] = g_cost[neighbor] + heuristic_cost(neighbor, end)
# if we get to this point, it's not possible to reach the end destination
return [] |
<reponame>gilbertekalea/booking.com_crawler
# DO NOT EDIT THIS FILE!
#
# This file is generated from the CDP specification. If you need to make
# changes, edit the generator and regenerate all of the modules.
#
# CDP domain: DOMStorage (experimental)
from __future__ import annotations
from .util import event_class, T_JSON_DICT
from dataclasses import dataclass
import enum
import typing
@dataclass
class StorageId:
'''
DOM Storage identifier.
'''
#: Security origin for the storage.
security_origin: str
#: Whether the storage is local storage (not session storage).
is_local_storage: bool
def to_json(self):
json = dict()
json['securityOrigin'] = self.security_origin
json['isLocalStorage'] = self.is_local_storage
return json
@classmethod
def from_json(cls, json):
return cls(
security_origin=str(json['securityOrigin']),
is_local_storage=bool(json['isLocalStorage']),
)
class Item(list):
'''
DOM Storage item.
'''
def to_json(self) -> typing.List[str]:
return self
@classmethod
def from_json(cls, json: typing.List[str]) -> Item:
return cls(json)
def __repr__(self):
return 'Item({})'.format(super().__repr__())
def clear(
storage_id: StorageId
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
'''
:param storage_id:
'''
params: T_JSON_DICT = dict()
params['storageId'] = storage_id.to_json()
cmd_dict: T_JSON_DICT = {
'method': 'DOMStorage.clear',
'params': params,
}
json = yield cmd_dict
def disable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
'''
Disables storage tracking, prevents storage events from being sent to the client.
'''
cmd_dict: T_JSON_DICT = {
'method': 'DOMStorage.disable',
}
json = yield cmd_dict
def enable() -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
'''
Enables storage tracking, storage events will now be delivered to the client.
'''
cmd_dict: T_JSON_DICT = {
'method': 'DOMStorage.enable',
}
json = yield cmd_dict
def get_dom_storage_items(
storage_id: StorageId
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,typing.List[Item]]:
'''
:param storage_id:
:returns:
'''
params: T_JSON_DICT = dict()
params['storageId'] = storage_id.to_json()
cmd_dict: T_JSON_DICT = {
'method': 'DOMStorage.getDOMStorageItems',
'params': params,
}
json = yield cmd_dict
return [Item.from_json(i) for i in json['entries']]
def remove_dom_storage_item(
storage_id: StorageId,
key: str
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
'''
:param storage_id:
:param key:
'''
params: T_JSON_DICT = dict()
params['storageId'] = storage_id.to_json()
params['key'] = key
cmd_dict: T_JSON_DICT = {
'method': 'DOMStorage.removeDOMStorageItem',
'params': params,
}
json = yield cmd_dict
def set_dom_storage_item(
storage_id: StorageId,
key: str,
value: str
) -> typing.Generator[T_JSON_DICT,T_JSON_DICT,None]:
'''
:param storage_id:
:param key:
:param value:
'''
params: T_JSON_DICT = dict()
params['storageId'] = storage_id.to_json()
params['key'] = key
params['value'] = value
cmd_dict: T_JSON_DICT = {
'method': 'DOMStorage.setDOMStorageItem',
'params': params,
}
json = yield cmd_dict
@event_class('DOMStorage.domStorageItemAdded')
@dataclass
class DomStorageItemAdded:
storage_id: StorageId
key: str
new_value: str
@classmethod
def from_json(cls, json: T_JSON_DICT) -> DomStorageItemAdded:
return cls(
storage_id=StorageId.from_json(json['storageId']),
key=str(json['key']),
new_value=str(json['newValue'])
)
@event_class('DOMStorage.domStorageItemRemoved')
@dataclass
class DomStorageItemRemoved:
storage_id: StorageId
key: str
@classmethod
def from_json(cls, json: T_JSON_DICT) -> DomStorageItemRemoved:
return cls(
storage_id=StorageId.from_json(json['storageId']),
key=str(json['key'])
)
@event_class('DOMStorage.domStorageItemUpdated')
@dataclass
class DomStorageItemUpdated:
storage_id: StorageId
key: str
old_value: str
new_value: str
@classmethod
def from_json(cls, json: T_JSON_DICT) -> DomStorageItemUpdated:
return cls(
storage_id=StorageId.from_json(json['storageId']),
key=str(json['key']),
old_value=str(json['oldValue']),
new_value=str(json['newValue'])
)
@event_class('DOMStorage.domStorageItemsCleared')
@dataclass
class DomStorageItemsCleared:
storage_id: StorageId
@classmethod
def from_json(cls, json: T_JSON_DICT) -> DomStorageItemsCleared:
return cls(
storage_id=StorageId.from_json(json['storageId'])
)
|
"""Auto ARIMA transformer is a time series transformer that predicts target using ARIMA models."""
# For more information about the python ARIMA package
# please visit https://www.alkaline-ml.com/pmdarima/index.html
import importlib
import numpy as np
import pandas as pd
import datatable as dt
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from h2oaicore.systemutils import make_experiment_logger, loggerinfo, loggerwarning
from h2oaicore.transformer_utils import CustomTimeSeriesTransformer
from h2oaicore.separators import orig_feat_prefix, extra_prefix
class MyAutoArimaTransformer(CustomTimeSeriesTransformer):
_binary = False
_multiclass = False
_modules_needed_by_name = ['pmdarima==1.5']
_included_model_classes = None
_testing_can_skip_failure = False # ensure tested as if shouldn't fail
_lag_recipe_allowed = True
_causal_recipe_allowed = False
@staticmethod
def get_default_properties():
return dict(col_type="time_column", min_cols=1, max_cols=1, relative_importance=1)
@staticmethod
def can_use(accuracy, interpretability, **kwargs):
return False # by default auto arima is too slow, but if the only model selected this will still allow use
def fit(self, X: dt.Frame, y: np.array = None):
"""
Fits ARIMA models (1 per time group) using historical target values contained in y
:param X: Datatable frame containing the features
:param y: numpy array containing the historical values of the target
:return: self
"""
# Import the ARIMA python module
pm = importlib.import_module('pmdarima')
self.scalers = None
logger = self._get_experiment_logger()
# 0. Preliminary steps
tgc_wo_time = list(np.setdiff1d(self.tgc, self.time_column))
X = X[:, self.tgc].to_pandas()
# Fill NaNs or None
X = X.replace([None, np.nan], 0)
# Add target, Label encoder is only used for Classif. which we don't support...
if self.labels is not None:
y = LabelEncoder().fit(self.labels).transform(y)
X['y'] = np.array(y)
# 0. Fit general scaler to make predictions for unknown groups
X.rename(columns={self.time_column: "ds"}, inplace=True)
self.general_scaler = MinMaxScaler(feature_range=(1, 2)).fit(X[['y', 'ds']].groupby('ds').median().values)
# 1. Scale target for each individual group
# Go through groups and standard scale them
X['y_skl'] = self.scale_target_per_time_group(X, tgc_wo_time, logger)
# 2. Make time a pandas datetime series so that we can order it
X['ds'] = pd.to_datetime(X['ds'], format=self.datetime_formats[self.time_column])
# 3. Fit a model on averages
X_avg = X[['ds', 'y_skl']].groupby('ds').mean().reset_index()
order = np.argsort(X_avg['ds'])
try:
self.avg_model = pm.auto_arima(X_avg['y_skl'].values[order], error_action='ignore', seasonal=False)
except Exception as e:
loggerinfo(logger, "ARIMA: Average model error : {}".format(e))
self.avg_model = None
# 4. Fit model for Average Groups
self.models = {}
# Go through groups
for grp_col in tgc_wo_time:
print(f'fitting {grp_col}')
# Get the unique dates to be predicted
X_groups = X[['ds', 'y_skl', grp_col]].groupby(grp_col)
print(X.shape)
nb_groups = len(X_groups)
for _i_g, (key, X_grp) in enumerate(X_groups):
# Just say where we are in the fitting process
if (_i_g + 1) % max(1, nb_groups // 20) == 0:
loggerinfo(logger, "Auto ARIMA : %d%% of groups fitted" % (100 * (_i_g + 1) // nb_groups))
# Average over dates
X_grp = X_grp.groupby('ds')['y_skl'].mean().reset_index()
grp_hash = self.get_hash(grp_col, key)
# print("auto arima - fitting on data of shape: %s for group: %s" % (str(X.shape), grp_hash))
X_grp['ds'] = pd.to_datetime(X_grp['ds'], format=self.datetime_formats[self.time_column])
order = np.argsort(X_grp['ds'])
try:
model = pm.auto_arima(X_grp['y_skl'].values[order], error_action='ignore', seasonal=False)
except Exception as e:
loggerinfo(logger, "Auto ARIMA warning: {}".format(e))
model = None
self.models[grp_hash] = model
return self
def get_hash(self, col='', key=None):
# Create dict key to store the min max scaler
if isinstance(key, tuple):
key = [col] + list(key)
elif isinstance(key, list):
pass
else:
# Not tuple, not list
key = [col, key]
grp_hash = '_'.join(map(str, key))
return grp_hash
def scale_target_per_time_group(self, X, tgc_wo_time, logger):
loggerinfo(logger, 'Start of group scaling')
if len(tgc_wo_time) > 0:
X_groups = X.groupby(tgc_wo_time)
else:
X_groups = [([None], X)]
if self.scalers is None:
self.scalers = {}
scaled_ys = []
for key, X_grp in X_groups:
# Create dict key to store the min max scaler
grp_hash = self.get_hash(key)
# Scale target for current group
self.scalers[grp_hash] = MinMaxScaler(feature_range=(1, 2))
y_skl = self.scalers[grp_hash].fit_transform(X_grp[['y']].values)
# Put back in a DataFrame to keep track of original index
y_skl_df = pd.DataFrame(y_skl, columns=['y'])
# (0, 'A') (1, 4) (100, 1) (100, 1)
# print(grp_hash, X_grp.shape, y_skl.shape, y_skl_df.shape)
y_skl_df.index = X_grp.index
scaled_ys.append(y_skl_df)
else:
scaled_ys = []
for key, X_grp in X_groups:
# Create dict key to store the min max scaler
grp_hash = self.get_hash(key)
# Scale target for current group
y_skl = self.scalers[grp_hash].transform(X_grp[['y']].values)
# Put back in a DataFrame to keep track of original index
y_skl_df = pd.DataFrame(y_skl, columns=['y'])
# (0, 'A') (1, 4) (100, 1) (100, 1)
# print(grp_hash, X_grp.shape, y_skl.shape, y_skl_df.shape)
y_skl_df.index = X_grp.index
scaled_ys.append(y_skl_df)
loggerinfo(logger, 'End of group scaling')
return pd.concat(tuple(scaled_ys), axis=0)
def transform(self, X: dt.Frame):
"""
Uses fitted models (1 per time group) to predict the target
If self.is_train exists, it means we are doing in-sample predictions
if it does not then we Arima is used to predict the future
:param X: Datatable Frame containing the features
:return: ARIMA predictions
"""
logger = self._get_experiment_logger()
# 0. Preliminary steps
tgc_wo_time = list(np.setdiff1d(self.tgc, self.time_column))
X = X[:, self.tgc].to_pandas()
# Fill NaNs or None
X = X.replace([None, np.nan], 0)
X.rename(columns={self.time_column: "ds"}, inplace=True)
X['ds'] = pd.to_datetime(X['ds'], format=self.datetime_formats[self.time_column])
# 1. Predict with average model
if self.avg_model is not None:
X_time = X[['ds']].groupby('ds').first().reset_index()
if hasattr(self, 'is_train'):
yhat = self.avg_model.predict_in_sample()
else:
yhat = self.avg_model.predict(n_periods=self.pred_gap + X_time.shape[0])
# Assign predictions the same order the dates had
yhat = yhat[self.pred_gap:]
X_time.sort_values('ds', inplace=True)
X_time['yhat'] = yhat
X_time.sort_index(inplace=True)
# Merge back the average prediction to all similar timestamps
indices = X.index
X = pd.merge(
left=X,
right=X_time[['ds', 'yhat']],
on='ds',
how='left'
)
X.index = indices
else:
X['yhat'] = np.nan
y_avg_model = X['yhat'].values
y_predictions = pd.DataFrame(y_avg_model, columns=['average_pred'])
# 2. Predict for individual group
# Go through groups
for i_tgc, grp_col in enumerate(tgc_wo_time):
y_hat_tgc = np.zeros(X.shape[0])
# Get the unique dates to be predicted
X_groups = X[['ds', grp_col]].groupby(grp_col)
nb_groups = len(X_groups)
dfs = []
for _i_g, (key, X_grp) in enumerate(X_groups):
# Just say where we are in the fitting process
if (_i_g + 1) % max(1, nb_groups // 20) == 0:
loggerinfo(logger, "Auto ARIMA : %d%% of groups transformed" % (100 * (_i_g + 1) // nb_groups))
grp_hash = self.get_hash(grp_col, key)
try:
model = self.models[grp_hash]
except KeyError:
model = None
# Find unique datetime
X_time = X_grp[['ds']].groupby('ds').first().reset_index()
X_time['ds'] = pd.to_datetime(X_time['ds'], format=self.datetime_formats[self.time_column])
X_time = X_time.sort_values('ds')
if model is not None:
# Get predictions from ARIMA model, make sure we include prediction gaps
if hasattr(self, 'is_train'):
print(X_grp.shape, model.predict_in_sample().shape)
# It can happen that in_sample predictions are smaller than the training set used
pred = model.predict_in_sample()
tmp = np.zeros(X_time.shape[0])
tmp[:len(pred)] = pred
X_time['yhat'] = tmp
else:
# In ARIMA, you provide the number of periods you predict on
# So you have to
yhat = model.predict(n_periods=self.pred_gap + X_time.shape[0])
X_time['yhat'] = yhat[self.pred_gap:]
# Now merge back the predictions into X_grp
indices = X_grp.index
X_grp = pd.merge(
left=X_grp,
right=X_time[['ds', 'yhat']],
on='ds',
how='left'
)
X_grp.index = indices
else:
X_grp = X_grp.copy()
X_grp['yhat'] = np.nan
dfs.append(X_grp['yhat'])
y_predictions[f'{grp_col}_pred'] = pd.concat(dfs, axis=0)
# Now we have to invert scale all this
for grp_col in tgc_wo_time:
# Add time group to the predictions, will be used to invert scaling
y_predictions[grp_col] = X[grp_col].copy()
# Fill NaN
y_predictions[f'{grp_col}_pred'] = y_predictions[f'{grp_col}_pred'].fillna(y_predictions['average_pred'])
# Go through groups and recover the scaled target for knowed groups
if len(tgc_wo_time) > 0:
X_groups = y_predictions.groupby(tgc_wo_time)
else:
X_groups = [([None], y_predictions)]
for _f in [f'{grp_col}_pred' for grp_col in tgc_wo_time] + ['average_pred']:
inverted_ys = []
for key, X_grp in X_groups:
grp_hash = self.get_hash(key)
# Scale target for current group
if grp_hash in self.scalers.keys():
inverted_y = self.scalers[grp_hash].inverse_transform(X_grp[[_f]])
else:
inverted_y = self.general_scaler.inverse_transform(X_grp[[_f]])
# Put back in a DataFrame to keep track of original index
inverted_df = pd.DataFrame(inverted_y, columns=[_f])
inverted_df.index = X_grp.index
inverted_ys.append(inverted_df)
y_predictions[_f] = pd.concat(tuple(inverted_ys), axis=0).sort_index()[_f]
y_predictions.drop(tgc_wo_time, axis=1, inplace=True)
self._output_feature_names = [f'{self.display_name}{orig_feat_prefix}{self.time_column}{extra_prefix}{_f}'
for _f in y_predictions]
self._feature_desc = self._output_feature_names
return y_predictions
def fit_transform(self, X: dt.Frame, y: np.array = None):
"""
Fits the ARIMA models (1 per time group) and outputs the corresponding predictions
:param X: Datatable Frame
:param y: Target to be used to fit the ARIMA model and perdict in-sample
:return: in-sample ARIMA predictions
"""
# Flag the fact we are doing in-sample predictions
self.is_train = True
ret = self.fit(X, y).transform(X)
del self.is_train
return ret
def update_history(self, X: dt.Frame, y: np.array = None):
"""
Update the model fit with additional observed endog/exog values.
Updating an ARIMA adds new observations to the model, updating the MLE of the parameters
accordingly by performing several new iterations (maxiter) from the existing model parameters.
:param X: Datatable Frame containing input features
:param y: Numpy array containing new observations to update the ARIMA model
:return:
"""
X = X.to_pandas()
XX = X[self.tgc].copy()
XX['y'] = np.array(y)
tgc_wo_time = list(np.setdiff1d(self.tgc, self.time_column))
if len(tgc_wo_time) > 0:
XX_grp = XX.groupby(tgc_wo_time)
else:
XX_grp = [([None], XX)]
for key, X in XX_grp:
key = key if isinstance(key, list) else [key]
grp_hash = '_'.join(map(str, key))
# print("auto arima - update history with data of shape: %s for group: %s" % (str(X.shape), grp_hash))
order = np.argsort(X[self.time_column])
if grp_hash in self.models:
model = self.models[grp_hash]
if model is not None:
model.update(X['y'].values[order])
return self
def _get_experiment_logger(self):
# Get the logger if it exists
logger = None
if self.context and self.context.experiment_id:
logger = make_experiment_logger(
experiment_id=self.context.experiment_id,
tmp_dir=self.context.tmp_dir,
experiment_tmp_dir=self.context.experiment_tmp_dir
)
return logger
|
<filename>src/Application/PythonScriptModule/pymodules_old/lib/webdav/Connection.py<gh_stars>0
# pylint: disable-msg=W0142,W0102,R0901,R0904,E0203,E1101,C0103
#
# Copyright 2008 German Aerospace Center (DLR)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The contained class extends the HTTPConnection class for WebDAV support.
"""
from httplib import HTTPConnection, CannotSendRequest, BadStatusLine, ResponseNotReady
from copy import copy
import base64 # for basic authentication
import md5
import mimetypes
import os # file handling
import urllib
import types
import socket # to "catch" socket.error
from threading import RLock
from davlib import DAV
from qp_xml import Parser
from webdav.WebdavResponse import MultiStatusResponse, ResponseFormatError
from webdav import Constants
from webdav.logger import getDefaultLogger
__version__ = "$LastChangedRevision: 44 $"
class Connection(DAV):
"""
This class handles a connection to a WebDAV server.
This class is used internally. Client code should prefer classes
L{WebdavClient.ResourceStorer} and L{WebdavClient.CollectionStorer}.
@author: <NAME>
"""
# Constants
# The following switch activates a workaround for the Tamino webdav server:
# Tamino expects URLs which are passed in a HTTP header to be Latin-1 encoded
# instead of Utf-8 encoded.
# Set this switch to zero in order to communicate with conformant servers.
blockSize = 30000
MaxRetries = 10
def __init__(self, *args, **kwArgs):
DAV.__init__(self, *args, **kwArgs)
self.__authorizationInfo = None
self.logger = getDefaultLogger()
self.isConnectedToCatacomb = True
self.serverTypeChecked = False
self.lock = RLock()
def _request(self, method, url, body=None, extra_hdrs={}):
self.lock.acquire()
try:
# add the authorization header
extraHeaders = copy(extra_hdrs)
if self.__authorizationInfo:
# update (digest) authorization data
if hasattr(self.__authorizationInfo, "update"):
self.__authorizationInfo.update(method=method, uri=url)
extraHeaders["AUTHORIZATION"] = self.__authorizationInfo.authorization
# encode message parts
body = _toUtf8(body)
url = _urlEncode(url)
for key, value in extraHeaders.items():
extraHeaders[key] = _toUtf8(value)
if key == "Destination": # copy/move header
if self.isConnectedToCatacomb:
extraHeaders[key] = _toUtf8(value.replace(Constants.SHARP, Constants.QUOTED_SHARP))
else: # in case of TAMINO 4.4
extraHeaders[key] = _urlEncode(value)
# pass message to httplib class
for retry in range(0, Connection.MaxRetries): # retry loop
try:
self.logger.debug("REQUEST Send %s for %s" % (method, url))
self.logger.debug("REQUEST Body: " + repr(body))
for hdr in extraHeaders.items():
self.logger.debug("REQUEST Header: " + repr(hdr))
self.request(method, url, body, extraHeaders)
response = self.getresponse()
break # no retry needed
except (CannotSendRequest, socket.error, BadStatusLine, ResponseNotReady), exc:
# Workaround, start: reconnect and retry...
self.logger.debug("Exception: " + str(exc) + " Retry ... ")
self.close()
try:
self.connect()
except (CannotSendRequest, socket.error, BadStatusLine, ResponseNotReady), exc:
raise WebdavError("Cannot perform request. Connection failed.")
if retry == Connection.MaxRetries - 1:
raise WebdavError("Cannot perform request.")
return self.__evaluateResponse(method, response)
finally:
self.lock.release()
def __evaluateResponse(self, method, response):
""" Evaluates the response of the WebDAV server. """
status, reason = response.status, response.reason
self.logger.debug("Method: " + method + " Status %d: " % status + reason)
if status >= Constants.CODE_LOWEST_ERROR: # error has occured ?
self.logger.debug("ERROR Response: " + response.read())
# identify authentication CODE_UNAUTHORIZED, throw appropriate exception
if status == Constants.CODE_UNAUTHORIZED:
self.logger.debug("EXEPTION: raise AuthorizationError")
raise AuthorizationError(reason, status, response.msg["www-authenticate"])
response.close()
raise WebdavError(reason, status)
if status == Constants.CODE_MULTISTATUS:
content = response.read()
## check for UTF-8 encodig
response.root = Parser().parse(content)
try:
response.msr = MultiStatusResponse(response.root)
except ResponseFormatError:
raise WebdavError("Invalid WebDAV response.")
response.close()
self.logger.debug("RESPONSE (Multi-Status): " + unicode(response.msr))
elif method == 'LOCK' and status == Constants.CODE_SUCCEEDED:
response.parse_lock_response()
response.close()
elif method != 'GET' and method != 'PUT':
self.logger.debug("RESPONSE Body: " + response.read())
response.close()
return response
def addBasicAuthorization(self, user, password, realm=None):
if user and len(user) > 0:
self.__authorizationInfo = _BasicAuthenticationInfo(realm=realm, user=user, password=password)
def addDigestAuthorization(self, user, password, realm, qop, nonce, uri = None, method = None):
if user and len(user) > 0:
# username, realm, password, uri, method, qop are required
self.__authorizationInfo = _DigestAuthenticationInfo(realm=realm, user=user, password=password, uri=uri, method=method, qop=qop, nonce=nonce)
def putFile(self, path, srcfile, header={}):
self.lock.acquire()
try:
# Assemble header
size = os.fstat(srcfile.fileno()).st_size
header["Content-length"] = str(size)
contentType, contentEnc = mimetypes.guess_type(path)
if contentType:
header['Content-Type'] = contentType
if contentEnc:
header['Content-Encoding'] = contentEnc
if self.__authorizationInfo:
header["AUTHORIZATION"] = self.__authorizationInfo.authorization
# send first request
path = _urlEncode(path)
try:
HTTPConnection.request(self, 'PUT', path, "", header)
self._blockCopySocket(srcfile, self, Connection.blockSize)
srcfile.close()
response = self.getresponse()
except (CannotSendRequest, socket.error, BadStatusLine, ResponseNotReady), exc:
self.logger.debug("Exception: " + str(exc) + " Retry ... ")
raise WebdavError("Cannot perform request.")
status, reason = (response.status, response.reason)
self.logger.debug("Status %d: %s" % (status, reason))
try:
if status >= Constants.CODE_LOWEST_ERROR: # error has occured ?
raise WebdavError(reason, status)
finally:
self.logger.debug("RESPONSE Body: " + response.read())
response.close()
return response
finally:
self.lock.release()
def _blockCopySocket(self, source, toSocket, blockSize):
transferedBytes = 0
block = source.read(blockSize)
#while source.readinto(block, blockSize):
while len(block):
toSocket.send(block)
self.logger.debug("Wrote %d bytes." % len(block))
transferedBytes += len(block)
block = source.read(blockSize)
self.logger.info("Transfered %d bytes." % transferedBytes)
def __str__(self):
return self.protocol + "://" + self.host + ':' + str(self.port)
class _BasicAuthenticationInfo(object):
def __init__(self, **kwArgs):
self.__dict__.update(kwArgs)
self.cookie = base64.b64encode("%s:%s" % (self.user, self.password)).strip()
self.authorization = "Basic " + self.cookie
self.password = <PASSWORD> # protect password security
class _DigestAuthenticationInfo(object):
__nc = "0000000" # in hexa without leading 0x
def __init__(self, **kwArgs):
self.__dict__.update(kwArgs)
assert self.qop is not None, "Digest without qop is not implemented."
assert self.qop is not "auth-int", "Digest with qop-int is not implemented."
def update(self, **kwArgs):
""" Update input data between requests"""
self.__dict__.update(kwArgs)
def makeDigest(self):
from uuid import uuid4
# increment nonce count
self.incrementNC()
# username, realm, password, uri, method, qop are required
if (self.uri != None and self.uri != "/"):
self.uri = _urlEncode(self.uri)
A1 = "%s:%s:%s" % (self.user, self.realm, self.password)
HA1 = md5.new(A1).hexdigest()
#qop == auth
A2 = "%s:%s" % (self.method, self.uri)
HA2 = md5.new(A2).hexdigest()
cnonce = str(uuid4())
responseData = "%s:%s:%s:%s:%s:%s" % (HA1, self.nonce, _DigestAuthenticationInfo.__nc, cnonce, self.qop, HA2)
digestResponse = md5.new(responseData).hexdigest()
authorization = "Digest username=\"%s\", realm=\"%s\", nonce=\"%s\", uri=\"%s\", algorithm=MD5, response=\"%s\", qop=auth, nc=%s, cnonce=\"%s\"" % (self.user, self.realm, self.nonce, self.uri, digestResponse, _DigestAuthenticationInfo.__nc, cnonce)
return authorization
authorization = property(makeDigest)
def incrementNC(self):
_DigestAuthenticationInfo.__nc = self.dec2nc(self.nc2dec() + 1)
def nc2dec(self):
return int(_DigestAuthenticationInfo.__nc, 16)
def dec2nc(self, decimal):
return hex(decimal)[2:].zfill(8)
class WebdavError(IOError):
def __init__(self, reason, code=0):
IOError.__init__(self, code)
self.code = code
self.reason = reason
def __str__(self):
return self.reason
class AuthorizationError(WebdavError):
def __init__(self, reason, code, authHeader):
WebdavError.__init__(self, reason, code)
self.authType = authHeader.split(" ")[0]
self.authInfo = authHeader
def _toUtf8(body):
if not body is None:
if type(body) == types.UnicodeType:
body = body.encode('utf-8')
return body
def _urlEncode(url):
if type(url) == types.UnicodeType:
url = url.encode('utf-8')
return urllib.quote(url)
|
Subsets and Splits