max_stars_repo_path
stringlengths 4
286
| max_stars_repo_name
stringlengths 5
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.03M
| content_cleaned
stringlengths 6
1.03M
| language
stringclasses 111
values | language_score
float64 0.03
1
| comments
stringlengths 0
556k
| edu_score
float64 0.32
5.03
| edu_int_score
int64 0
5
|
---|---|---|---|---|---|---|---|---|---|---|
Simple_Use_Of_Random/Simple_Use_Of_Random.py | GracjanBuczek/Python | 0 | 6633251 | import random
#part 1 - Pseudorandom number generator
for n in range (0,10):
n = random.randint(1,100)
print(n)
input("\nPress any key to continue...\n")
#part 2 - Comapring x&y, counter
number1 = random.randint(1,100)
number2 = random.randint(1,100)
counter = 1
while number2!=number1:
print('Value of number1 is:',number1,', value of number2 is:',number2,'. Counter:',counter)
counter = counter+1
number2 = random.randint(1,100)
else:
print('At least! After',counter,'tries it succeed!')
| import random
#part 1 - Pseudorandom number generator
for n in range (0,10):
n = random.randint(1,100)
print(n)
input("\nPress any key to continue...\n")
#part 2 - Comapring x&y, counter
number1 = random.randint(1,100)
number2 = random.randint(1,100)
counter = 1
while number2!=number1:
print('Value of number1 is:',number1,', value of number2 is:',number2,'. Counter:',counter)
counter = counter+1
number2 = random.randint(1,100)
else:
print('At least! After',counter,'tries it succeed!')
| en | 0.217005 | #part 1 - Pseudorandom number generator #part 2 - Comapring x&y, counter | 3.823096 | 4 |
matchups/tests/test.py | MattHJensen/Matchups | 0 | 6633252 | <reponame>MattHJensen/Matchups<gh_stars>0
from matchups import matchups
def test_get_inputs():
assert matchups.get_inputs()
def test_parse_inputs():
adj = {"matchup": {"batter": ["<NAME>"]}}
ew = {"matchup": {"errors": {}, "warnings": {}}}
r = matchups.parse_inputs(adj, "", ew, True)
assert r
def test_get_matchup():
use_2018 = True
user_mods = {
"matchup": {
"start_date": "2018-05-01",
"pitcher": "<NAME>",
"batter": ["<NAME>", "<NAME>"]
}
}
matchups.get_matchup(use_2018, user_mods)
| from matchups import matchups
def test_get_inputs():
assert matchups.get_inputs()
def test_parse_inputs():
adj = {"matchup": {"batter": ["<NAME>"]}}
ew = {"matchup": {"errors": {}, "warnings": {}}}
r = matchups.parse_inputs(adj, "", ew, True)
assert r
def test_get_matchup():
use_2018 = True
user_mods = {
"matchup": {
"start_date": "2018-05-01",
"pitcher": "<NAME>",
"batter": ["<NAME>", "<NAME>"]
}
}
matchups.get_matchup(use_2018, user_mods) | none | 1 | 2.798751 | 3 |
|
pkg/win32/mod_tools/exported/validate.py | victorpopkov/ds-mod-tools | 1 | 6633253 | <gh_stars>1-10
import glob
import sys
import xml.etree.ElementTree as ET
import zipfile
from collections import defaultdict
from clint.textui import progress
anim_map = defaultdict(list)
for zipfilename in progress.bar(glob.glob("*.zip")):
try:
with zipfile.ZipFile(zipfilename, "r") as zf:
root = ET.fromstring(zf.read("animation.xml"))
for anim in root.findall("anim"):
animname = anim.attrib['name']
rootname = anim.attrib['root']
key = (animname, rootname)
anim_map[key].append(zipfilename)
except:
pass
invalid = False
for key, datalist in anim_map.iteritems():
if len(datalist) > 1:
print key
print datalist
print
invalid = True
if invalid:
sys.exit(255)
| import glob
import sys
import xml.etree.ElementTree as ET
import zipfile
from collections import defaultdict
from clint.textui import progress
anim_map = defaultdict(list)
for zipfilename in progress.bar(glob.glob("*.zip")):
try:
with zipfile.ZipFile(zipfilename, "r") as zf:
root = ET.fromstring(zf.read("animation.xml"))
for anim in root.findall("anim"):
animname = anim.attrib['name']
rootname = anim.attrib['root']
key = (animname, rootname)
anim_map[key].append(zipfilename)
except:
pass
invalid = False
for key, datalist in anim_map.iteritems():
if len(datalist) > 1:
print key
print datalist
print
invalid = True
if invalid:
sys.exit(255) | none | 1 | 2.633278 | 3 |
|
parse_corpus.py | crscardellino/sbwce | 30 | 6633254 | <reponame>crscardellino/sbwce
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import fnmatch
import os
import re
import sys
import spacy
from tqdm import tqdm
spacy.prefer_gpu()
nlp = spacy.load("es", disable=['parser', 'ner'])
def traverse_directory(path, file_pattern='*'):
for root, _, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, file_pattern):
yield os.path.join(root, filename)
os.makedirs("./parsed_corpora", exist_ok=True)
for dirname in tqdm(sorted(os.listdir("./corpora"))):
with open("./parsed_corpora/%s.tsv" % dirname, "w") as fho,\
open("./corpora/%s.txt" % (dirname, dirname), "r") as fhi:
for line in fhi:
for token in nlp(line.strip()):
print(token.i, token.text, token.lemma_, token.pos_,
'WSP' if re.match(r'\s+', token.whitespace_) else 'BLK',
sep="\t", file=fho)
print(file=fho)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import fnmatch
import os
import re
import sys
import spacy
from tqdm import tqdm
spacy.prefer_gpu()
nlp = spacy.load("es", disable=['parser', 'ner'])
def traverse_directory(path, file_pattern='*'):
for root, _, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, file_pattern):
yield os.path.join(root, filename)
os.makedirs("./parsed_corpora", exist_ok=True)
for dirname in tqdm(sorted(os.listdir("./corpora"))):
with open("./parsed_corpora/%s.tsv" % dirname, "w") as fho,\
open("./corpora/%s.txt" % (dirname, dirname), "r") as fhi:
for line in fhi:
for token in nlp(line.strip()):
print(token.i, token.text, token.lemma_, token.pos_,
'WSP' if re.match(r'\s+', token.whitespace_) else 'BLK',
sep="\t", file=fho)
print(file=fho) | en | 0.352855 | #!/usr/bin/env python # -*- coding: utf-8 -*- | 2.625909 | 3 |
Lib/site-packages/deriva/transfer/restore/deriva_restore.py | fochoao/cpython | 3 | 6633255 | import io
import os
import sys
import copy
import json
import time
import logging
import datetime
import platform
from collections import OrderedDict
from bdbag import bdbag_api as bdb
from deriva.core import get_credential, format_credential, urlquote, format_exception, DEFAULT_SESSION_CONFIG, \
__version__ as VERSION
from deriva.core.utils.version_utils import get_installed_version
from deriva.core.ermrest_model import Model
from deriva.core.deriva_server import DerivaServer
from deriva.core.ermrest_catalog import ErmrestCatalog, _clone_state_url as CLONE_STATE_URL
from deriva.core.hatrac_store import HatracStore
from deriva.transfer import DerivaUpload, DerivaUploadError, DerivaUploadConfigurationError, GenericUploader
from deriva.transfer.restore import DerivaRestoreError, DerivaRestoreConfigurationError, \
DerivaRestoreAuthenticationError, DerivaRestoreAuthorizationError
class DerivaRestore:
"""
Restore a DERIVA catalog from a bag archive or directory.
Core restore logic re-purposed from ErmrestCatalog.clone_catalog().
"""
RESTORE_STATE_URL = "tag:isrd.isi.edu,2019:restore-status"
BASE_DATA_INPUT_PATH = os.path.join("records", "{}", "{}.json")
BASE_ASSETS_INPUT_PATH = "assets"
def __init__(self, *args, **kwargs):
self.server_args = args[0]
self.hostname = None
self.dst_catalog = None
self.cancelled = False
self.input_path = kwargs.get("input_path")
self.exclude_schemas = kwargs.get("exclude_schemas", list())
self.restore_data = not kwargs.get("no_data", False)
self.data_chunk_size = kwargs.get("data_chunk_size", 10000)
self.restore_annotations = not kwargs.get("no_annotations", False)
self.restore_policy = not kwargs.get("no_policy", False)
self.restore_assets = not kwargs.get("no_assets", False)
self.strict_bag_validation = not kwargs.get("weak_bag_validation", True)
self.no_bag_materialize = kwargs.get("no_bag_materialize", False)
self.upload_config = kwargs.get("asset_config")
self.truncate_after = True
self.envars = kwargs.get("envars", dict())
self.config = kwargs.get("config")
self.credentials = kwargs.get("credentials", dict())
config_file = kwargs.get("config_file")
credential_file = kwargs.get("credential_file")
info = "%s v%s [Python %s, %s]" % (
self.__class__.__name__, get_installed_version(VERSION),
platform.python_version(), platform.platform(aliased=True))
logging.info("Initializing: %s" % info)
if not self.server_args:
raise DerivaRestoreConfigurationError("Target server not specified!")
# server variable initialization
self.hostname = self.server_args.get('host', '')
if not self.hostname:
raise DerivaRestoreConfigurationError("Host not specified!")
protocol = self.server_args.get('protocol', 'https')
self.server_url = protocol + "://" + self.hostname
self.catalog_id = self.server_args.get("catalog_id",)
self.session_config = self.server_args.get('session', DEFAULT_SESSION_CONFIG.copy())
self.session_config["allow_retry_on_all_methods"] = True
# credential initialization
token = kwargs.get("token")
oauth2_token = kwargs.get("oauth2_token")
username = kwargs.get("username")
password = kwargs.<PASSWORD>("password")
if token or oauth2_token or (username and password):
self.credentials = format_credential(token=token,
oauth2_token=oauth2_token,
username=username,
password=password)
else:
self.credentials = get_credential(self.hostname, credential_file)
# destination catalog initialization
self.server = DerivaServer(protocol,
self.hostname,
self.credentials,
caching=True,
session_config=self.session_config)
self.server.dcctx["cid"] = kwargs.get("dcctx_cid", "api/" + self.__class__.__name__)
# process config file
if config_file:
try:
self.config = read_config(config_file)
except Exception as e:
raise DerivaRestoreConfigurationError(e)
def set_config(self, config):
self.config = config
def set_credentials(self, credentials):
self.catalog.set_credentials(credentials, self.hostname)
self.store.set_credentials(credentials, self.hostname)
self.credentials = credentials
def prune_parts(self, dest):
if not self.restore_annotations and 'annotations' in dest:
del dest['annotations']
if not self.restore_policy:
if 'acls' in dest:
del dest['acls']
if 'acl_bindings' in dest:
del dest['acl_bindings']
return dest
def copy_sdef(self, schema):
"""Copy schema definition structure with conditional parts for cloning."""
dest = self.prune_parts(schema.prejson())
if 'tables' in dest:
del dest['tables']
return dest
def copy_tdef_core(self, table):
"""Copy table definition structure with conditional parts excluding fkeys."""
dest = self.prune_parts(table.prejson())
dest['column_definitions'] = [self.prune_parts(column) for column in dest['column_definitions']]
dest['keys'] = [self.prune_parts(column) for column in dest.get('keys', [])]
if 'foreign_keys' in dest:
del dest['foreign_keys']
if 'annotations' not in dest:
dest['annotations'] = {}
dest['annotations'][self.RESTORE_STATE_URL] = 1 if self.restore_data else None
return dest
def copy_tdef_fkeys(self, table):
"""Copy table fkeys structure."""
def check(fkdef):
for fkc in fkdef['referenced_columns']:
if fkc['schema_name'] == 'public' \
and fkc['table_name'] in {'ERMrest_Client', 'ERMrest_Group'} \
and fkc['column_name'] == 'RID':
raise DerivaRestoreError(
"Cannot restore catalog with foreign key reference to "
"%(schema_name)s:%(table_name)s:%(column_name)s" % fkc)
return fkdef
return [self.prune_parts(check(dest)) for dest in table.prejson().get('foreign_keys', [])]
def copy_cdef(self, column):
"""Copy column definition with conditional parts."""
return column.table.schema.name, column.table.name, self.prune_parts(column.prejson())
@staticmethod
def check_column_compatibility(src, dst):
"""Check compatibility of source and destination column definitions."""
def error(fieldname, sv, dv):
return DerivaRestoreError("Source/dest column %s mismatch %s != %s for %s:%s:%s" % (
fieldname,
sv, dv,
src.table.schema.name, src.table.name, src.name
))
if src.type.typename != dst.type.typename:
raise error("type", src.type.typename, dst.type.typename)
if src.nullok != dst.nullok:
raise error("nullok", src.nullok, dst.nullok)
if src.default != dst.default:
raise error("default", src.default, dst.default)
def copy_kdef(self, key):
return key.table.schema.name, key.table.name, self.prune_parts(key.prejson())
def get_table_path(self, sname, tname, is_bag):
return os.path.abspath(
os.path.join(self.input_path, "data" if is_bag else "", self.BASE_DATA_INPUT_PATH.format(sname, tname)))
def load_json_file(self, file_path):
with io.open(file_path, 'r', encoding='UTF-8') as file_data:
return json.load(file_data, object_pairs_hook=OrderedDict)
def open_json_stream_file(self, table_path):
"""
Open a JSON-Stream file for reading, caller is responsible for closing.
"""
table_data = io.open(table_path, 'r', encoding='UTF-8')
line = table_data.readline().strip()
table_data.seek(0)
if line.startswith('{') and line.endswith('}'):
return table_data
else:
table_data.close()
raise DerivaRestoreError(
"Input file %s does not appear to be in the required json-stream format." % table_path)
def get_json_recordset(self, data, chunk_size, after=None, after_column='RID'):
chunk = list()
found = False
for line in data:
if isinstance(line, dict):
row = line
else:
row = json.loads(line, object_pairs_hook=OrderedDict)
if after and not found:
if after == row[after_column]:
found = True
continue
chunk.append(row)
if len(chunk) == chunk_size:
yield chunk
chunk = list()
if chunk:
yield chunk
def restore(self, **kwargs):
"""
Perform the catalog restore operation. The restore process is broken up into six phases:
1. Pre-process the input path.
- If the input path is a file, it is assumed that it is a compressed archive file that can be extracted
into an input directory via a supported codec: `tar`,`tgz`,`bz2`, or `zip`.
- If the input directory is a valid _bag_ directory structure, the bag will be materialized.
2. The catalog schema will be restored first. The schema is restored from a ERMRest JSON schema document file.
The schema document file must be named `catalog-schema.json` and must appear at the root of the input
directory. The restore process can be configured to exclude the restoration of an enumerated set both
schema and tables.
3. The catalog table data will be restored, if present. The table date restoration process is resilient to
interruption and may be restarted. However, if the catalog schema or data is mutated outside of the scope of
the restore function in-between such restarts, the restored catalog's consistency cannot be guaranteed.
The restore process can be configured to exclude the restoration of table data for a set of tables.
4. The catalog foreign keys will be restored.
5. The catalog assets will be restored, if present.
6. On success, the restore state marker annotations will be deleted and the catalog history will be truncated.
:param kwargs:
:return:
"""
success = True
start = datetime.datetime.now()
# pre-process input
logging.info("Processing input path: %s" % self.input_path)
is_file, is_dir, is_uri = bdb.inspect_path(self.input_path)
if not (is_file or is_dir or is_uri):
raise DerivaRestoreError("Invalid input path [%s]. If the specified input path refers to a locally mounted "
"file or directory, it does not exist or cannot be accessed. If the specified "
"path is a URI, the scheme component of the URI could not be determined." %
self.input_path)
if is_file or is_dir:
self.input_path = os.path.abspath(self.input_path)
if is_file:
logging.info("The input path [%s] is a file. Assuming input file is a directory archive and extracting..." %
self.input_path)
self.input_path = bdb.extract_bag(self.input_path)
try:
if not self.no_bag_materialize:
self.input_path = bdb.materialize(self.input_path)
except bdb.bdbagit.BagValidationError as e:
if self.strict_bag_validation:
raise DerivaRestoreError(format_exception(e))
else:
logging.warning("Input bag validation failed and strict validation mode is disabled. %s" %
format_exception(e))
is_bag = bdb.is_bag(self.input_path)
src_schema_file = os.path.abspath(
os.path.join(self.input_path, "data" if is_bag else "", "catalog-schema.json"))
# the src_catalog_stub created below will never be "connected" in any kind of network sense,
# but we need an instance of ErmrestCatalog in order to get a working Model from the schema file.
src_catalog_stub = ErmrestCatalog("file", src_schema_file, "1")
src_model = Model.fromfile(src_catalog_stub, src_schema_file)
# initialize/connect to destination catalog
if not self.catalog_id:
self.catalog_id = self.server.create_ermrest_catalog().catalog_id
self.server_args["catalog_id"] = self.catalog_id
logging.info("Created new target catalog with ID: %s" % self.catalog_id)
self.dst_catalog = self.server.connect_ermrest(self.catalog_id)
# init dcctx cid to a default
self.dst_catalog.dcctx['cid'] = self.__class__.__name__
# build up the model content we will copy to destination
dst_model = self.dst_catalog.getCatalogModel()
logging.info("Restoring %s to catalog: %s" % (self.input_path, self.dst_catalog.get_server_uri()))
# set top-level config right away and find fatal usage errors...
if self.restore_policy:
logging.info("Restoring top-level catalog ACLs...")
if not src_model.acls:
logging.info("Source schema does not contain any ACLs.")
else:
src_model.acls.owner.extend(dst_model.acls.owner)
self.dst_catalog.put('/acl', json=src_model.acls)
if self.restore_annotations:
logging.info("Restoring top-level catalog annotations...")
self.dst_catalog.put('/annotation', json=src_model.annotations)
# build up the model content we will copy to destination
dst_model = self.dst_catalog.getCatalogModel()
new_model = []
new_columns = [] # ERMrest does not currently allow bulk column creation
new_keys = [] # ERMrest does not currently allow bulk key creation
restore_states = {}
fkeys_deferred = {}
exclude_schemas = [] if self.exclude_schemas is None else self.exclude_schemas
try:
for sname, schema in src_model.schemas.items():
if sname in exclude_schemas:
continue
if sname not in dst_model.schemas:
new_model.append(self.copy_sdef(schema))
for tname, table in schema.tables.items():
if table.kind != 'table':
logging.warning('Skipping restore of %s %s:%s' % (table.kind, sname, tname))
continue
if 'RID' not in table.column_definitions.elements:
raise DerivaRestoreError(
"Source table %s.%s lacks system-columns and cannot be restored." % (sname, tname))
# make sure the source table is pruned of any existing restore state markers
if table.annotations.get(CLONE_STATE_URL) is not None:
del table.annotations[CLONE_STATE_URL]
if table.annotations.get(self.RESTORE_STATE_URL) is not None:
del table.annotations[self.RESTORE_STATE_URL]
if sname not in dst_model.schemas or tname not in dst_model.schemas[sname].tables:
new_model.append(self.copy_tdef_core(table))
restore_states[(sname, tname)] = 1 if self.restore_data else None
fkeys_deferred[(sname, tname)] = self.copy_tdef_fkeys(table)
else:
src_columns = {c.name: c for c in table.column_definitions}
dst_columns = {c.name: c for c in dst_model.schemas[sname].tables[tname].column_definitions}
for cname in src_columns:
if cname not in dst_columns:
new_columns.append(self.copy_cdef(src_columns[cname]))
else:
self.check_column_compatibility(src_columns[cname], dst_columns[cname])
for cname in dst_columns:
if cname not in src_columns:
raise DerivaRestoreError(
"Destination column %s.%s.%s does not exist in source catalog." %
(sname, tname, cname))
src_keys = {tuple(sorted(c.name for c in key.unique_columns)): key for key in table.keys}
dst_keys = {tuple(sorted(c.name for c in key.unique_columns)): key for key in
dst_model.schemas[sname].tables[tname].keys}
for utuple in src_keys:
if utuple not in dst_keys:
new_keys.append(self.copy_kdef(src_keys[utuple]))
for utuple in dst_keys:
if utuple not in src_keys:
raise DerivaRestoreError("Destination key %s.%s(%s) does not exist in source catalog."
% (sname, tname, ', '.join(utuple)))
restore_states[(sname, tname)] = \
dst_model.schemas[sname].tables[tname].annotations.get(self.RESTORE_STATE_URL)
if dst_model.schemas[sname].tables[tname].foreign_keys:
# assume that presence of any destination foreign keys means we already completed
if self.restore_assets:
self.upload_assets()
return
else:
fkeys_deferred[(sname, tname)] = self.copy_tdef_fkeys(table)
# apply the stage 1 model to the destination in bulk
logging.info("Restoring catalog schema...")
if new_model:
self.dst_catalog.post("/schema", json=new_model).raise_for_status()
for sname, tname, cdef in new_columns:
self.dst_catalog.post("/schema/%s/table/%s/column" % (urlquote(sname), urlquote(tname)),
json=cdef).raise_for_status()
for sname, tname, kdef in new_keys:
self.dst_catalog.post("/schema/%s/table/%s/key" % (urlquote(sname), urlquote(tname)),
json=kdef).raise_for_status()
# copy data in stage 2
if self.restore_data:
logging.info("Restoring catalog data...")
for sname, tname in restore_states.keys():
tname_uri = "%s:%s" % (urlquote(sname), urlquote(tname))
if restore_states[(sname, tname)] == 1:
# determine current position in (partial?) copy
row = self.dst_catalog.get("/entity/%s@sort(RID::desc::)?limit=1" % tname_uri).json()
if row:
last = row[0]['RID']
logging.info("Existing data detected in table [%s] -- will attempt partial restore of "
"remaining records following last known RID: %s" % (tname_uri, last))
else:
last = None
table_path = self.get_table_path(sname, tname, is_bag)
if not os.path.isfile(table_path):
logging.warning("Restoration of table data [%s] incomplete. File not found: %s" %
(("%s:%s" % (sname, tname)), table_path))
continue
table = self.get_json_recordset(self.open_json_stream_file(table_path),
self.data_chunk_size, after=last)
total = 0
table_success = True
try:
for chunk in table:
if chunk:
self.dst_catalog.post("/entity/%s?nondefaults=RID,RCT,RCB" % tname_uri, json=chunk)
total += len(chunk)
else:
break
except:
table_success = False
finally:
table.close()
if table_success:
logging.info("Restoration of table data [%s] successful. %s rows restored." %
(tname_uri, total))
else:
logging.warning("Restoration of table data [%s] failed. %s rows restored." %
(tname_uri, total))
# record our progress on catalog in case we fail part way through
self.dst_catalog.put(
"/schema/%s/table/%s/annotation/%s" % (
urlquote(sname),
urlquote(tname),
urlquote(self.RESTORE_STATE_URL),
),
json=2
)
elif restore_states[(sname, tname)] is None and (sname, tname) in {
('public', 'ERMrest_Client'),
('public', 'ERMrest_Group'),
}:
# special sync behavior for magic ermrest tables
# HACK: these are assumed small enough to join via local merge of arrays
want = sorted(self.load_json_file(self.get_table_path(sname, tname, is_bag)),
key=lambda r: r['ID'])
have = sorted(self.dst_catalog.get("/entity/%s?limit=none" % tname_uri).json(),
key=lambda r: r['ID'])
create = []
update = []
pos_want = 0
pos_have = 0
while pos_want < len(want):
while pos_have < len(have) and have[pos_have]['ID'] < want[pos_want]['ID']:
# dst-only rows will be retained as is
pos_have += 1
if pos_have >= len(have) or have[pos_have]['ID'] > want[pos_want]['ID']:
# src-only rows will be inserted
create.append(want[pos_want])
pos_want += 1
else:
# overlapping rows will be updated
update.append(want[pos_want])
pos_want += 1
self.dst_catalog.post("/entity/%s?nondefaults=RCT,RCB" % tname_uri, json=create)
self.dst_catalog.put(
"/attributegroup/%s/ID;%s" % (
tname_uri,
",".join([
urlquote(c.name)
for c in src_model.schemas[sname].tables[tname].column_definitions
if c.name not in {'RID', 'RMT', 'RMB', 'ID'}
])
),
json=update
)
# record our progress on catalog in case we fail part way through
self.dst_catalog.put(
"/schema/%s/table/%s/annotation/%s" % (
urlquote(sname),
urlquote(tname),
urlquote(self.RESTORE_STATE_URL),
),
json=2
)
# apply stage 2 model in bulk only... we won't get here unless preceding succeeded
logging.info("Restoring foreign keys...")
new_fkeys = []
for fkeys in fkeys_deferred.values():
new_fkeys.extend(fkeys)
# restore fkeys
if new_fkeys:
self.dst_catalog.post("/schema", json=new_fkeys)
# restore assets
if self.restore_assets:
self.upload_assets()
# cleanup
self.cleanup_restored_catalog()
except:
success = False
raise
finally:
elapsed_time = datetime.datetime.now() - start
total_secs = elapsed_time.total_seconds()
elapsed = time.strftime('%H:%M:%S', time.gmtime(total_secs))
logging.info("Restore of catalog %s %s. %s" % (self.dst_catalog.get_server_uri(),
"completed successfully" if success else "failed",
("Elapsed time: %s" % elapsed) if (total_secs > 0) else ""))
def cleanup_restored_catalog(self):
# cleanup restore state markers
logging.info("Cleaning up restore state...")
dst_model = self.dst_catalog.getCatalogModel()
for sname, schema in dst_model.schemas.items():
for tname, table in schema.tables.items():
annotation_uri = "/schema/%s/table/%s/annotation/%s" % (
urlquote(sname),
urlquote(tname),
urlquote(self.RESTORE_STATE_URL)
)
try:
self.dst_catalog.delete(annotation_uri)
except Exception as e:
logging.warning("Unable to cleanup restore state marker annotation %s: %s" %
(annotation_uri, format_exception(e)))
continue
# truncate restore history
if self.truncate_after:
logging.info("Truncating restore history...")
snaptime = self.dst_catalog.get("/").json()["snaptime"]
self.dst_catalog.delete("/history/,%s" % urlquote(snaptime))
def upload_assets(self):
asset_dir = os.path.join(self.input_path, self.BASE_ASSETS_INPUT_PATH)
if not os.path.isdir(asset_dir):
logging.debug("No asset directory found. Will not attempt to upload file assets.")
return
logging.info("Restoring file assets...")
uploader = GenericUploader(config_file=self.upload_config, server=self.server_args)
uploader.setCredentials(self.credentials)
uploader.setConfig(self.upload_config)
uploader.scanDirectory(asset_dir, abort_on_invalid_input=False, purge_state=False)
uploader.uploadFiles(file_callback=uploader.defaultFileCallback)
uploader.cleanup()
| import io
import os
import sys
import copy
import json
import time
import logging
import datetime
import platform
from collections import OrderedDict
from bdbag import bdbag_api as bdb
from deriva.core import get_credential, format_credential, urlquote, format_exception, DEFAULT_SESSION_CONFIG, \
__version__ as VERSION
from deriva.core.utils.version_utils import get_installed_version
from deriva.core.ermrest_model import Model
from deriva.core.deriva_server import DerivaServer
from deriva.core.ermrest_catalog import ErmrestCatalog, _clone_state_url as CLONE_STATE_URL
from deriva.core.hatrac_store import HatracStore
from deriva.transfer import DerivaUpload, DerivaUploadError, DerivaUploadConfigurationError, GenericUploader
from deriva.transfer.restore import DerivaRestoreError, DerivaRestoreConfigurationError, \
DerivaRestoreAuthenticationError, DerivaRestoreAuthorizationError
class DerivaRestore:
"""
Restore a DERIVA catalog from a bag archive or directory.
Core restore logic re-purposed from ErmrestCatalog.clone_catalog().
"""
RESTORE_STATE_URL = "tag:isrd.isi.edu,2019:restore-status"
BASE_DATA_INPUT_PATH = os.path.join("records", "{}", "{}.json")
BASE_ASSETS_INPUT_PATH = "assets"
def __init__(self, *args, **kwargs):
self.server_args = args[0]
self.hostname = None
self.dst_catalog = None
self.cancelled = False
self.input_path = kwargs.get("input_path")
self.exclude_schemas = kwargs.get("exclude_schemas", list())
self.restore_data = not kwargs.get("no_data", False)
self.data_chunk_size = kwargs.get("data_chunk_size", 10000)
self.restore_annotations = not kwargs.get("no_annotations", False)
self.restore_policy = not kwargs.get("no_policy", False)
self.restore_assets = not kwargs.get("no_assets", False)
self.strict_bag_validation = not kwargs.get("weak_bag_validation", True)
self.no_bag_materialize = kwargs.get("no_bag_materialize", False)
self.upload_config = kwargs.get("asset_config")
self.truncate_after = True
self.envars = kwargs.get("envars", dict())
self.config = kwargs.get("config")
self.credentials = kwargs.get("credentials", dict())
config_file = kwargs.get("config_file")
credential_file = kwargs.get("credential_file")
info = "%s v%s [Python %s, %s]" % (
self.__class__.__name__, get_installed_version(VERSION),
platform.python_version(), platform.platform(aliased=True))
logging.info("Initializing: %s" % info)
if not self.server_args:
raise DerivaRestoreConfigurationError("Target server not specified!")
# server variable initialization
self.hostname = self.server_args.get('host', '')
if not self.hostname:
raise DerivaRestoreConfigurationError("Host not specified!")
protocol = self.server_args.get('protocol', 'https')
self.server_url = protocol + "://" + self.hostname
self.catalog_id = self.server_args.get("catalog_id",)
self.session_config = self.server_args.get('session', DEFAULT_SESSION_CONFIG.copy())
self.session_config["allow_retry_on_all_methods"] = True
# credential initialization
token = kwargs.get("token")
oauth2_token = kwargs.get("oauth2_token")
username = kwargs.get("username")
password = kwargs.<PASSWORD>("password")
if token or oauth2_token or (username and password):
self.credentials = format_credential(token=token,
oauth2_token=oauth2_token,
username=username,
password=password)
else:
self.credentials = get_credential(self.hostname, credential_file)
# destination catalog initialization
self.server = DerivaServer(protocol,
self.hostname,
self.credentials,
caching=True,
session_config=self.session_config)
self.server.dcctx["cid"] = kwargs.get("dcctx_cid", "api/" + self.__class__.__name__)
# process config file
if config_file:
try:
self.config = read_config(config_file)
except Exception as e:
raise DerivaRestoreConfigurationError(e)
def set_config(self, config):
self.config = config
def set_credentials(self, credentials):
self.catalog.set_credentials(credentials, self.hostname)
self.store.set_credentials(credentials, self.hostname)
self.credentials = credentials
def prune_parts(self, dest):
if not self.restore_annotations and 'annotations' in dest:
del dest['annotations']
if not self.restore_policy:
if 'acls' in dest:
del dest['acls']
if 'acl_bindings' in dest:
del dest['acl_bindings']
return dest
def copy_sdef(self, schema):
"""Copy schema definition structure with conditional parts for cloning."""
dest = self.prune_parts(schema.prejson())
if 'tables' in dest:
del dest['tables']
return dest
def copy_tdef_core(self, table):
"""Copy table definition structure with conditional parts excluding fkeys."""
dest = self.prune_parts(table.prejson())
dest['column_definitions'] = [self.prune_parts(column) for column in dest['column_definitions']]
dest['keys'] = [self.prune_parts(column) for column in dest.get('keys', [])]
if 'foreign_keys' in dest:
del dest['foreign_keys']
if 'annotations' not in dest:
dest['annotations'] = {}
dest['annotations'][self.RESTORE_STATE_URL] = 1 if self.restore_data else None
return dest
def copy_tdef_fkeys(self, table):
"""Copy table fkeys structure."""
def check(fkdef):
for fkc in fkdef['referenced_columns']:
if fkc['schema_name'] == 'public' \
and fkc['table_name'] in {'ERMrest_Client', 'ERMrest_Group'} \
and fkc['column_name'] == 'RID':
raise DerivaRestoreError(
"Cannot restore catalog with foreign key reference to "
"%(schema_name)s:%(table_name)s:%(column_name)s" % fkc)
return fkdef
return [self.prune_parts(check(dest)) for dest in table.prejson().get('foreign_keys', [])]
def copy_cdef(self, column):
"""Copy column definition with conditional parts."""
return column.table.schema.name, column.table.name, self.prune_parts(column.prejson())
@staticmethod
def check_column_compatibility(src, dst):
"""Check compatibility of source and destination column definitions."""
def error(fieldname, sv, dv):
return DerivaRestoreError("Source/dest column %s mismatch %s != %s for %s:%s:%s" % (
fieldname,
sv, dv,
src.table.schema.name, src.table.name, src.name
))
if src.type.typename != dst.type.typename:
raise error("type", src.type.typename, dst.type.typename)
if src.nullok != dst.nullok:
raise error("nullok", src.nullok, dst.nullok)
if src.default != dst.default:
raise error("default", src.default, dst.default)
def copy_kdef(self, key):
return key.table.schema.name, key.table.name, self.prune_parts(key.prejson())
def get_table_path(self, sname, tname, is_bag):
return os.path.abspath(
os.path.join(self.input_path, "data" if is_bag else "", self.BASE_DATA_INPUT_PATH.format(sname, tname)))
def load_json_file(self, file_path):
with io.open(file_path, 'r', encoding='UTF-8') as file_data:
return json.load(file_data, object_pairs_hook=OrderedDict)
def open_json_stream_file(self, table_path):
"""
Open a JSON-Stream file for reading, caller is responsible for closing.
"""
table_data = io.open(table_path, 'r', encoding='UTF-8')
line = table_data.readline().strip()
table_data.seek(0)
if line.startswith('{') and line.endswith('}'):
return table_data
else:
table_data.close()
raise DerivaRestoreError(
"Input file %s does not appear to be in the required json-stream format." % table_path)
def get_json_recordset(self, data, chunk_size, after=None, after_column='RID'):
chunk = list()
found = False
for line in data:
if isinstance(line, dict):
row = line
else:
row = json.loads(line, object_pairs_hook=OrderedDict)
if after and not found:
if after == row[after_column]:
found = True
continue
chunk.append(row)
if len(chunk) == chunk_size:
yield chunk
chunk = list()
if chunk:
yield chunk
def restore(self, **kwargs):
"""
Perform the catalog restore operation. The restore process is broken up into six phases:
1. Pre-process the input path.
- If the input path is a file, it is assumed that it is a compressed archive file that can be extracted
into an input directory via a supported codec: `tar`,`tgz`,`bz2`, or `zip`.
- If the input directory is a valid _bag_ directory structure, the bag will be materialized.
2. The catalog schema will be restored first. The schema is restored from a ERMRest JSON schema document file.
The schema document file must be named `catalog-schema.json` and must appear at the root of the input
directory. The restore process can be configured to exclude the restoration of an enumerated set both
schema and tables.
3. The catalog table data will be restored, if present. The table date restoration process is resilient to
interruption and may be restarted. However, if the catalog schema or data is mutated outside of the scope of
the restore function in-between such restarts, the restored catalog's consistency cannot be guaranteed.
The restore process can be configured to exclude the restoration of table data for a set of tables.
4. The catalog foreign keys will be restored.
5. The catalog assets will be restored, if present.
6. On success, the restore state marker annotations will be deleted and the catalog history will be truncated.
:param kwargs:
:return:
"""
success = True
start = datetime.datetime.now()
# pre-process input
logging.info("Processing input path: %s" % self.input_path)
is_file, is_dir, is_uri = bdb.inspect_path(self.input_path)
if not (is_file or is_dir or is_uri):
raise DerivaRestoreError("Invalid input path [%s]. If the specified input path refers to a locally mounted "
"file or directory, it does not exist or cannot be accessed. If the specified "
"path is a URI, the scheme component of the URI could not be determined." %
self.input_path)
if is_file or is_dir:
self.input_path = os.path.abspath(self.input_path)
if is_file:
logging.info("The input path [%s] is a file. Assuming input file is a directory archive and extracting..." %
self.input_path)
self.input_path = bdb.extract_bag(self.input_path)
try:
if not self.no_bag_materialize:
self.input_path = bdb.materialize(self.input_path)
except bdb.bdbagit.BagValidationError as e:
if self.strict_bag_validation:
raise DerivaRestoreError(format_exception(e))
else:
logging.warning("Input bag validation failed and strict validation mode is disabled. %s" %
format_exception(e))
is_bag = bdb.is_bag(self.input_path)
src_schema_file = os.path.abspath(
os.path.join(self.input_path, "data" if is_bag else "", "catalog-schema.json"))
# the src_catalog_stub created below will never be "connected" in any kind of network sense,
# but we need an instance of ErmrestCatalog in order to get a working Model from the schema file.
src_catalog_stub = ErmrestCatalog("file", src_schema_file, "1")
src_model = Model.fromfile(src_catalog_stub, src_schema_file)
# initialize/connect to destination catalog
if not self.catalog_id:
self.catalog_id = self.server.create_ermrest_catalog().catalog_id
self.server_args["catalog_id"] = self.catalog_id
logging.info("Created new target catalog with ID: %s" % self.catalog_id)
self.dst_catalog = self.server.connect_ermrest(self.catalog_id)
# init dcctx cid to a default
self.dst_catalog.dcctx['cid'] = self.__class__.__name__
# build up the model content we will copy to destination
dst_model = self.dst_catalog.getCatalogModel()
logging.info("Restoring %s to catalog: %s" % (self.input_path, self.dst_catalog.get_server_uri()))
# set top-level config right away and find fatal usage errors...
if self.restore_policy:
logging.info("Restoring top-level catalog ACLs...")
if not src_model.acls:
logging.info("Source schema does not contain any ACLs.")
else:
src_model.acls.owner.extend(dst_model.acls.owner)
self.dst_catalog.put('/acl', json=src_model.acls)
if self.restore_annotations:
logging.info("Restoring top-level catalog annotations...")
self.dst_catalog.put('/annotation', json=src_model.annotations)
# build up the model content we will copy to destination
dst_model = self.dst_catalog.getCatalogModel()
new_model = []
new_columns = [] # ERMrest does not currently allow bulk column creation
new_keys = [] # ERMrest does not currently allow bulk key creation
restore_states = {}
fkeys_deferred = {}
exclude_schemas = [] if self.exclude_schemas is None else self.exclude_schemas
try:
for sname, schema in src_model.schemas.items():
if sname in exclude_schemas:
continue
if sname not in dst_model.schemas:
new_model.append(self.copy_sdef(schema))
for tname, table in schema.tables.items():
if table.kind != 'table':
logging.warning('Skipping restore of %s %s:%s' % (table.kind, sname, tname))
continue
if 'RID' not in table.column_definitions.elements:
raise DerivaRestoreError(
"Source table %s.%s lacks system-columns and cannot be restored." % (sname, tname))
# make sure the source table is pruned of any existing restore state markers
if table.annotations.get(CLONE_STATE_URL) is not None:
del table.annotations[CLONE_STATE_URL]
if table.annotations.get(self.RESTORE_STATE_URL) is not None:
del table.annotations[self.RESTORE_STATE_URL]
if sname not in dst_model.schemas or tname not in dst_model.schemas[sname].tables:
new_model.append(self.copy_tdef_core(table))
restore_states[(sname, tname)] = 1 if self.restore_data else None
fkeys_deferred[(sname, tname)] = self.copy_tdef_fkeys(table)
else:
src_columns = {c.name: c for c in table.column_definitions}
dst_columns = {c.name: c for c in dst_model.schemas[sname].tables[tname].column_definitions}
for cname in src_columns:
if cname not in dst_columns:
new_columns.append(self.copy_cdef(src_columns[cname]))
else:
self.check_column_compatibility(src_columns[cname], dst_columns[cname])
for cname in dst_columns:
if cname not in src_columns:
raise DerivaRestoreError(
"Destination column %s.%s.%s does not exist in source catalog." %
(sname, tname, cname))
src_keys = {tuple(sorted(c.name for c in key.unique_columns)): key for key in table.keys}
dst_keys = {tuple(sorted(c.name for c in key.unique_columns)): key for key in
dst_model.schemas[sname].tables[tname].keys}
for utuple in src_keys:
if utuple not in dst_keys:
new_keys.append(self.copy_kdef(src_keys[utuple]))
for utuple in dst_keys:
if utuple not in src_keys:
raise DerivaRestoreError("Destination key %s.%s(%s) does not exist in source catalog."
% (sname, tname, ', '.join(utuple)))
restore_states[(sname, tname)] = \
dst_model.schemas[sname].tables[tname].annotations.get(self.RESTORE_STATE_URL)
if dst_model.schemas[sname].tables[tname].foreign_keys:
# assume that presence of any destination foreign keys means we already completed
if self.restore_assets:
self.upload_assets()
return
else:
fkeys_deferred[(sname, tname)] = self.copy_tdef_fkeys(table)
# apply the stage 1 model to the destination in bulk
logging.info("Restoring catalog schema...")
if new_model:
self.dst_catalog.post("/schema", json=new_model).raise_for_status()
for sname, tname, cdef in new_columns:
self.dst_catalog.post("/schema/%s/table/%s/column" % (urlquote(sname), urlquote(tname)),
json=cdef).raise_for_status()
for sname, tname, kdef in new_keys:
self.dst_catalog.post("/schema/%s/table/%s/key" % (urlquote(sname), urlquote(tname)),
json=kdef).raise_for_status()
# copy data in stage 2
if self.restore_data:
logging.info("Restoring catalog data...")
for sname, tname in restore_states.keys():
tname_uri = "%s:%s" % (urlquote(sname), urlquote(tname))
if restore_states[(sname, tname)] == 1:
# determine current position in (partial?) copy
row = self.dst_catalog.get("/entity/%s@sort(RID::desc::)?limit=1" % tname_uri).json()
if row:
last = row[0]['RID']
logging.info("Existing data detected in table [%s] -- will attempt partial restore of "
"remaining records following last known RID: %s" % (tname_uri, last))
else:
last = None
table_path = self.get_table_path(sname, tname, is_bag)
if not os.path.isfile(table_path):
logging.warning("Restoration of table data [%s] incomplete. File not found: %s" %
(("%s:%s" % (sname, tname)), table_path))
continue
table = self.get_json_recordset(self.open_json_stream_file(table_path),
self.data_chunk_size, after=last)
total = 0
table_success = True
try:
for chunk in table:
if chunk:
self.dst_catalog.post("/entity/%s?nondefaults=RID,RCT,RCB" % tname_uri, json=chunk)
total += len(chunk)
else:
break
except:
table_success = False
finally:
table.close()
if table_success:
logging.info("Restoration of table data [%s] successful. %s rows restored." %
(tname_uri, total))
else:
logging.warning("Restoration of table data [%s] failed. %s rows restored." %
(tname_uri, total))
# record our progress on catalog in case we fail part way through
self.dst_catalog.put(
"/schema/%s/table/%s/annotation/%s" % (
urlquote(sname),
urlquote(tname),
urlquote(self.RESTORE_STATE_URL),
),
json=2
)
elif restore_states[(sname, tname)] is None and (sname, tname) in {
('public', 'ERMrest_Client'),
('public', 'ERMrest_Group'),
}:
# special sync behavior for magic ermrest tables
# HACK: these are assumed small enough to join via local merge of arrays
want = sorted(self.load_json_file(self.get_table_path(sname, tname, is_bag)),
key=lambda r: r['ID'])
have = sorted(self.dst_catalog.get("/entity/%s?limit=none" % tname_uri).json(),
key=lambda r: r['ID'])
create = []
update = []
pos_want = 0
pos_have = 0
while pos_want < len(want):
while pos_have < len(have) and have[pos_have]['ID'] < want[pos_want]['ID']:
# dst-only rows will be retained as is
pos_have += 1
if pos_have >= len(have) or have[pos_have]['ID'] > want[pos_want]['ID']:
# src-only rows will be inserted
create.append(want[pos_want])
pos_want += 1
else:
# overlapping rows will be updated
update.append(want[pos_want])
pos_want += 1
self.dst_catalog.post("/entity/%s?nondefaults=RCT,RCB" % tname_uri, json=create)
self.dst_catalog.put(
"/attributegroup/%s/ID;%s" % (
tname_uri,
",".join([
urlquote(c.name)
for c in src_model.schemas[sname].tables[tname].column_definitions
if c.name not in {'RID', 'RMT', 'RMB', 'ID'}
])
),
json=update
)
# record our progress on catalog in case we fail part way through
self.dst_catalog.put(
"/schema/%s/table/%s/annotation/%s" % (
urlquote(sname),
urlquote(tname),
urlquote(self.RESTORE_STATE_URL),
),
json=2
)
# apply stage 2 model in bulk only... we won't get here unless preceding succeeded
logging.info("Restoring foreign keys...")
new_fkeys = []
for fkeys in fkeys_deferred.values():
new_fkeys.extend(fkeys)
# restore fkeys
if new_fkeys:
self.dst_catalog.post("/schema", json=new_fkeys)
# restore assets
if self.restore_assets:
self.upload_assets()
# cleanup
self.cleanup_restored_catalog()
except:
success = False
raise
finally:
elapsed_time = datetime.datetime.now() - start
total_secs = elapsed_time.total_seconds()
elapsed = time.strftime('%H:%M:%S', time.gmtime(total_secs))
logging.info("Restore of catalog %s %s. %s" % (self.dst_catalog.get_server_uri(),
"completed successfully" if success else "failed",
("Elapsed time: %s" % elapsed) if (total_secs > 0) else ""))
def cleanup_restored_catalog(self):
# cleanup restore state markers
logging.info("Cleaning up restore state...")
dst_model = self.dst_catalog.getCatalogModel()
for sname, schema in dst_model.schemas.items():
for tname, table in schema.tables.items():
annotation_uri = "/schema/%s/table/%s/annotation/%s" % (
urlquote(sname),
urlquote(tname),
urlquote(self.RESTORE_STATE_URL)
)
try:
self.dst_catalog.delete(annotation_uri)
except Exception as e:
logging.warning("Unable to cleanup restore state marker annotation %s: %s" %
(annotation_uri, format_exception(e)))
continue
# truncate restore history
if self.truncate_after:
logging.info("Truncating restore history...")
snaptime = self.dst_catalog.get("/").json()["snaptime"]
self.dst_catalog.delete("/history/,%s" % urlquote(snaptime))
def upload_assets(self):
asset_dir = os.path.join(self.input_path, self.BASE_ASSETS_INPUT_PATH)
if not os.path.isdir(asset_dir):
logging.debug("No asset directory found. Will not attempt to upload file assets.")
return
logging.info("Restoring file assets...")
uploader = GenericUploader(config_file=self.upload_config, server=self.server_args)
uploader.setCredentials(self.credentials)
uploader.setConfig(self.upload_config)
uploader.scanDirectory(asset_dir, abort_on_invalid_input=False, purge_state=False)
uploader.uploadFiles(file_callback=uploader.defaultFileCallback)
uploader.cleanup()
| en | 0.822026 | Restore a DERIVA catalog from a bag archive or directory. Core restore logic re-purposed from ErmrestCatalog.clone_catalog(). # server variable initialization # credential initialization # destination catalog initialization # process config file Copy schema definition structure with conditional parts for cloning. Copy table definition structure with conditional parts excluding fkeys. Copy table fkeys structure. Copy column definition with conditional parts. Check compatibility of source and destination column definitions. Open a JSON-Stream file for reading, caller is responsible for closing. Perform the catalog restore operation. The restore process is broken up into six phases: 1. Pre-process the input path. - If the input path is a file, it is assumed that it is a compressed archive file that can be extracted into an input directory via a supported codec: `tar`,`tgz`,`bz2`, or `zip`. - If the input directory is a valid _bag_ directory structure, the bag will be materialized. 2. The catalog schema will be restored first. The schema is restored from a ERMRest JSON schema document file. The schema document file must be named `catalog-schema.json` and must appear at the root of the input directory. The restore process can be configured to exclude the restoration of an enumerated set both schema and tables. 3. The catalog table data will be restored, if present. The table date restoration process is resilient to interruption and may be restarted. However, if the catalog schema or data is mutated outside of the scope of the restore function in-between such restarts, the restored catalog's consistency cannot be guaranteed. The restore process can be configured to exclude the restoration of table data for a set of tables. 4. The catalog foreign keys will be restored. 5. The catalog assets will be restored, if present. 6. On success, the restore state marker annotations will be deleted and the catalog history will be truncated. :param kwargs: :return: # pre-process input # the src_catalog_stub created below will never be "connected" in any kind of network sense, # but we need an instance of ErmrestCatalog in order to get a working Model from the schema file. # initialize/connect to destination catalog # init dcctx cid to a default # build up the model content we will copy to destination # set top-level config right away and find fatal usage errors... # build up the model content we will copy to destination # ERMrest does not currently allow bulk column creation # ERMrest does not currently allow bulk key creation # make sure the source table is pruned of any existing restore state markers # assume that presence of any destination foreign keys means we already completed # apply the stage 1 model to the destination in bulk # copy data in stage 2 # determine current position in (partial?) copy # record our progress on catalog in case we fail part way through # special sync behavior for magic ermrest tables # HACK: these are assumed small enough to join via local merge of arrays # dst-only rows will be retained as is # src-only rows will be inserted # overlapping rows will be updated # record our progress on catalog in case we fail part way through # apply stage 2 model in bulk only... we won't get here unless preceding succeeded # restore fkeys # restore assets # cleanup # cleanup restore state markers # truncate restore history | 1.780758 | 2 |
tests/conftest.py | graingert/django-data-browser | 0 | 6633256 | <reponame>graingert/django-data-browser<filename>tests/conftest.py<gh_stars>0
import dj_database_url
import django
import pytest
from django.conf import settings
DATABASE_CONFIG = dj_database_url.config(
conn_max_age=600, default="sqlite:///db.sqlite3"
)
POSTGRES = "postgresql" in DATABASE_CONFIG["ENGINE"]
SQLITE = "sqlite" in DATABASE_CONFIG["ENGINE"]
if POSTGRES:
JSON_FIELD_SUPPORT = django.VERSION >= (2, 1)
ARRAY_FIELD_SUPPORT = True
else:
JSON_FIELD_SUPPORT = django.VERSION >= (3, 1)
ARRAY_FIELD_SUPPORT = False
INSTALLED_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.staticfiles",
"django.contrib.admin",
"tests.core",
"data_browser",
]
if JSON_FIELD_SUPPORT: # pragma: no branch
INSTALLED_APPS.append("tests.json")
if ARRAY_FIELD_SUPPORT:
INSTALLED_APPS.append("tests.array")
settings.configure(
INSTALLED_APPS=INSTALLED_APPS,
DATABASES={"default": DATABASE_CONFIG},
ROOT_URLCONF="tests.urls",
MIDDLEWARE=[
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
],
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.template.context_processors.static",
"django.contrib.auth.context_processors.auth",
],
"loaders": ["django.template.loaders.app_directories.Loader"],
},
}
],
STATIC_URL="/static/",
MEDIA_URL="/media/",
DATA_BROWSER_ALLOW_PUBLIC=True,
USE_I18N=True,
USE_TZ=True,
TIME_ZONE="UTC",
SECRET_KEY="secret",
)
@pytest.fixture
def ddb_request(rf):
from data_browser.views import _add_request_info
request = rf.get("/")
_add_request_info(request)
return request
@pytest.fixture
def admin_ddb_request(ddb_request, admin_user):
ddb_request.user = admin_user
return ddb_request
@pytest.fixture
def mock_admin_get_queryset(mocker):
from data_browser.orm_admin import admin_get_queryset
return mocker.patch(
"data_browser.orm_admin.admin_get_queryset", wraps=admin_get_queryset
)
| import dj_database_url
import django
import pytest
from django.conf import settings
DATABASE_CONFIG = dj_database_url.config(
conn_max_age=600, default="sqlite:///db.sqlite3"
)
POSTGRES = "postgresql" in DATABASE_CONFIG["ENGINE"]
SQLITE = "sqlite" in DATABASE_CONFIG["ENGINE"]
if POSTGRES:
JSON_FIELD_SUPPORT = django.VERSION >= (2, 1)
ARRAY_FIELD_SUPPORT = True
else:
JSON_FIELD_SUPPORT = django.VERSION >= (3, 1)
ARRAY_FIELD_SUPPORT = False
INSTALLED_APPS = [
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.staticfiles",
"django.contrib.admin",
"tests.core",
"data_browser",
]
if JSON_FIELD_SUPPORT: # pragma: no branch
INSTALLED_APPS.append("tests.json")
if ARRAY_FIELD_SUPPORT:
INSTALLED_APPS.append("tests.array")
settings.configure(
INSTALLED_APPS=INSTALLED_APPS,
DATABASES={"default": DATABASE_CONFIG},
ROOT_URLCONF="tests.urls",
MIDDLEWARE=[
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
],
TEMPLATES=[
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.template.context_processors.static",
"django.contrib.auth.context_processors.auth",
],
"loaders": ["django.template.loaders.app_directories.Loader"],
},
}
],
STATIC_URL="/static/",
MEDIA_URL="/media/",
DATA_BROWSER_ALLOW_PUBLIC=True,
USE_I18N=True,
USE_TZ=True,
TIME_ZONE="UTC",
SECRET_KEY="secret",
)
@pytest.fixture
def ddb_request(rf):
from data_browser.views import _add_request_info
request = rf.get("/")
_add_request_info(request)
return request
@pytest.fixture
def admin_ddb_request(ddb_request, admin_user):
ddb_request.user = admin_user
return ddb_request
@pytest.fixture
def mock_admin_get_queryset(mocker):
from data_browser.orm_admin import admin_get_queryset
return mocker.patch(
"data_browser.orm_admin.admin_get_queryset", wraps=admin_get_queryset
) | en | 0.381751 | # pragma: no branch | 1.973679 | 2 |
pyladies_cz.py | jacekszymanski/pyladies.cz | 0 | 6633257 | #!/usr/bin/env python3
"""Create or serve the pyladies.cz website
"""
import sys
if sys.version_info < (3, 0):
raise RuntimeError('You need Python 3.')
import os
import fnmatch
import datetime
import collections
from urllib.parse import urlencode
from flask import Flask, render_template, url_for, send_from_directory
from flask_frozen import Freezer
import yaml
import jinja2
import markdown
from elsa import cli
app = Flask('pyladies_cz')
app.config['TEMPLATES_AUTO_RELOAD'] = True
orig_path = os.path.join(app.root_path, 'original/')
v1_path = os.path.join(orig_path, 'v1/')
MISSING = object()
def redirect(url):
"""Return a response with a Meta redirect"""
# With static pages, we can't use HTTP redirects.
# Return a page wit <meta refresh> instead.
#
# When Frozen-Flask gets support for redirects
# (https://github.com/Frozen-Flask/Frozen-Flask/issues/81),
# this should be revisited.
return render_template('meta_redirect.html', url=url)
########
## Views
@app.route('/')
def index():
current_meetups = collections.OrderedDict(
(city, read_meetups_yaml('meetups/{}.yml'.format(city)))
for city in ('praha', 'brno', 'ostrava','plzen', 'ostatni'))
news = read_news_yaml('news.yml')
return render_template('index.html',
cities=read_yaml('cities.yml'),
current_meetups=current_meetups,
news=news)
@app.route('/<city_slug>/')
def city(city_slug):
cities = read_yaml('cities.yml')
city = cities.get(city_slug)
if city is None:
abort(404)
meetups = read_meetups_yaml('meetups/' + city_slug + '.yml')
current_meetups = [m for m in meetups if m['current']]
past_meetups = [m for m in meetups if not m['current']]
registration_meetups = [
m for m in current_meetups if m.get('registration_status')=='running']
return render_template(
'city.html',
city_slug=city_slug,
city_title=city['title'],
team_name=city.get('team-name'),
current_meetups=current_meetups,
past_meetups=past_meetups,
registration_meetups=registration_meetups,
contacts=city.get('contacts'),
team=read_yaml('teams/' + city_slug + '.yml', default=()),
)
@app.route('/<city>_course/')
def course_redirect(city):
return redirect(url_for('city', city_slug=city, _anchor='meetups'))
@app.route('/<city>_info/')
def info_redirect(city):
return redirect(url_for('city', city_slug=city, _anchor='city-info'))
@app.route('/praha-cznic/')
def praha_cznic():
return redirect('https://naucse.python.cz/2018/pyladies-praha-jaro-cznic/')
@app.route('/praha-ntk/')
def praha_ntk():
return redirect('https://naucse.python.cz/2018/pyladies-praha-jaro-ntk/')
@app.route('/stan_se/')
def stan_se():
return render_template('stan_se.html')
@app.route('/faq/')
def faq():
return render_template('faq.html')
@app.route('/v1/<path:path>')
def v1(path):
if path in REDIRECTS:
return redirect(REDIRECTS[path])
return send_from_directory(v1_path, path)
@app.route('/index.html')
def index_html():
return redirect(url_for('index'))
@app.route('/course.html')
def course_html():
return send_from_directory(orig_path, 'course.html')
@app.route('/googlecc704f0f191eda8f.html')
def google_verification():
# Verification page for GMail on our domain
return send_from_directory(app.root_path, 'google-verification.html')
##########
## Template variables
@app.context_processor
def inject_cities():
return dict(cities=read_yaml('cities.yml'))
##########
## Helpers
md = markdown.Markdown(extensions=['meta', 'markdown.extensions.toc'])
@app.template_filter('markdown')
def convert_markdown(text, inline=False):
result = jinja2.Markup(md.convert(text))
if inline and result[:3] == '<p>' and result[-4:] == '</p>':
result = result[3:-4]
return result
@app.template_filter('date_range')
def date_range(dates, sep='–'):
start, end = dates
pieces = []
if start != end:
if start.year != end.year:
pieces.append('{d.day}. {d.month}. {d.year}'.format(d=start))
elif start.month != end.month:
pieces.append('{d.day}. {d.month}.'.format(d=start))
else:
pieces.append('{d.day}.'.format(d=start))
pieces.append('–')
pieces.append('{d.day}. {d.month}. {d.year}'.format(d=end))
return ' '.join(pieces)
def read_yaml(filename, default=MISSING):
try:
file = open(filename, encoding='utf-8')
except FileNotFoundError:
if default is MISSING:
raise
return default
with file:
data = yaml.safe_load(file)
return data
def read_lessons_yaml(filename):
data = read_yaml(filename)
# workaround for http://stackoverflow.com/q/36157569/99057
# Convert datetime objects to strings
for lesson in data:
if 'date' in lesson:
lesson['dates'] = [lesson['date']]
if 'description' in lesson:
lesson['description'] = convert_markdown(lesson['description'],
inline=True)
for mat in lesson.get('materials', ()):
mat['name'] = convert_markdown(mat['name'], inline=True)
# If lesson has no `done` key, add them according to lesson dates
# All lesson's dates must be in past to mark it as done
done = lesson.get('done', None)
if done is None and 'dates' in lesson:
all_done = []
for date in lesson['dates']:
all_done.append(datetime.date.today() > date)
lesson['done'] = all(all_done)
return data
def read_meetups_yaml(filename):
data = read_yaml(filename)
today = datetime.date.today()
for meetup in data:
# 'date' means both start and end
if 'date' in meetup:
meetup['start'] = meetup['date']
meetup['end'] = meetup['date']
# Derive a URL for places that don't have one from the location
if 'place' in meetup:
if ('url' not in meetup['place']
and {'latitude', 'longitude'} <= meetup['place'].keys()):
place = meetup['place']
place['url'] = 'http://mapy.cz/zakladni?' + urlencode({
'y': place['latitude'],
'x': place['longitude'],
'z': '16',
'id': place['longitude'] + ',' + place['latitude'],
'source': 'coor',
'q': place['name'],
})
# Figure out the status of registration
if 'registration' in meetup:
if 'end' in meetup['registration']:
if meetup['start'] <= today:
meetup['registration_status'] = 'meetup_started'
elif meetup['registration']['end'] >= today:
meetup['registration_status'] = 'running'
else:
meetup['registration_status'] = 'closed'
else:
meetup['registration_status'] = 'running'
meetup['current'] = ('end' not in meetup) or (meetup['end'] >= today)
return list(reversed(data))
def read_news_yaml(filename):
data = read_yaml(filename)
today = datetime.date.today()
news = []
for new in data:
if new['expires'] >= today:
news.append(new)
return news
def pathto(name, static=False):
if static:
prefix = '_static/'
if name.startswith(prefix):
return url_for('static', filename=name[len(prefix):])
prefix = 'v1/'
if name.startswith(prefix):
return url_for('v1', path=name[len(prefix):])
return name
return url_for(name)
@app.context_processor
def inject_context():
return {
'pathto': pathto,
'today': datetime.date.today(),
}
############
## Redirects
REDIRECTS_DATA = read_yaml('redirects.yml')
REDIRECTS = {}
for directory, pages in REDIRECTS_DATA['naucse-lessons'].items():
for html_filename, lesson in pages.items():
new_url = 'http://naucse.python.cz/lessons/{}/'.format(lesson)
REDIRECTS['{}/{}'.format(directory, html_filename)] = new_url
##########
## Freezer
freezer = Freezer(app)
@freezer.register_generator
def v1():
IGNORE = ['*.aux', '*.out', '*.log', '*.scss', '.travis.yml', '.gitignore']
for name, dirs, files in os.walk(v1_path):
if '.git' in dirs:
dirs.remove('.git')
for file in files:
if file == '.git':
continue
if not any(fnmatch.fnmatch(file, ig) for ig in IGNORE):
path = os.path.relpath(os.path.join(name, file), v1_path)
yield {'path': path}
for path in REDIRECTS:
yield url_for('v1', path=path)
OLD_CITIES = 'praha', 'brno', 'ostrava'
@freezer.register_generator
def course_redirect():
for city in OLD_CITIES:
yield {'city': city}
@freezer.register_generator
def info_redirect():
for city in OLD_CITIES:
yield {'city': city}
if __name__ == '__main__':
cli(app, freezer=freezer, base_url='http://pyladies.cz')
| #!/usr/bin/env python3
"""Create or serve the pyladies.cz website
"""
import sys
if sys.version_info < (3, 0):
raise RuntimeError('You need Python 3.')
import os
import fnmatch
import datetime
import collections
from urllib.parse import urlencode
from flask import Flask, render_template, url_for, send_from_directory
from flask_frozen import Freezer
import yaml
import jinja2
import markdown
from elsa import cli
app = Flask('pyladies_cz')
app.config['TEMPLATES_AUTO_RELOAD'] = True
orig_path = os.path.join(app.root_path, 'original/')
v1_path = os.path.join(orig_path, 'v1/')
MISSING = object()
def redirect(url):
"""Return a response with a Meta redirect"""
# With static pages, we can't use HTTP redirects.
# Return a page wit <meta refresh> instead.
#
# When Frozen-Flask gets support for redirects
# (https://github.com/Frozen-Flask/Frozen-Flask/issues/81),
# this should be revisited.
return render_template('meta_redirect.html', url=url)
########
## Views
@app.route('/')
def index():
current_meetups = collections.OrderedDict(
(city, read_meetups_yaml('meetups/{}.yml'.format(city)))
for city in ('praha', 'brno', 'ostrava','plzen', 'ostatni'))
news = read_news_yaml('news.yml')
return render_template('index.html',
cities=read_yaml('cities.yml'),
current_meetups=current_meetups,
news=news)
@app.route('/<city_slug>/')
def city(city_slug):
cities = read_yaml('cities.yml')
city = cities.get(city_slug)
if city is None:
abort(404)
meetups = read_meetups_yaml('meetups/' + city_slug + '.yml')
current_meetups = [m for m in meetups if m['current']]
past_meetups = [m for m in meetups if not m['current']]
registration_meetups = [
m for m in current_meetups if m.get('registration_status')=='running']
return render_template(
'city.html',
city_slug=city_slug,
city_title=city['title'],
team_name=city.get('team-name'),
current_meetups=current_meetups,
past_meetups=past_meetups,
registration_meetups=registration_meetups,
contacts=city.get('contacts'),
team=read_yaml('teams/' + city_slug + '.yml', default=()),
)
@app.route('/<city>_course/')
def course_redirect(city):
return redirect(url_for('city', city_slug=city, _anchor='meetups'))
@app.route('/<city>_info/')
def info_redirect(city):
return redirect(url_for('city', city_slug=city, _anchor='city-info'))
@app.route('/praha-cznic/')
def praha_cznic():
return redirect('https://naucse.python.cz/2018/pyladies-praha-jaro-cznic/')
@app.route('/praha-ntk/')
def praha_ntk():
return redirect('https://naucse.python.cz/2018/pyladies-praha-jaro-ntk/')
@app.route('/stan_se/')
def stan_se():
return render_template('stan_se.html')
@app.route('/faq/')
def faq():
return render_template('faq.html')
@app.route('/v1/<path:path>')
def v1(path):
if path in REDIRECTS:
return redirect(REDIRECTS[path])
return send_from_directory(v1_path, path)
@app.route('/index.html')
def index_html():
return redirect(url_for('index'))
@app.route('/course.html')
def course_html():
return send_from_directory(orig_path, 'course.html')
@app.route('/googlecc704f0f191eda8f.html')
def google_verification():
# Verification page for GMail on our domain
return send_from_directory(app.root_path, 'google-verification.html')
##########
## Template variables
@app.context_processor
def inject_cities():
return dict(cities=read_yaml('cities.yml'))
##########
## Helpers
md = markdown.Markdown(extensions=['meta', 'markdown.extensions.toc'])
@app.template_filter('markdown')
def convert_markdown(text, inline=False):
result = jinja2.Markup(md.convert(text))
if inline and result[:3] == '<p>' and result[-4:] == '</p>':
result = result[3:-4]
return result
@app.template_filter('date_range')
def date_range(dates, sep='–'):
start, end = dates
pieces = []
if start != end:
if start.year != end.year:
pieces.append('{d.day}. {d.month}. {d.year}'.format(d=start))
elif start.month != end.month:
pieces.append('{d.day}. {d.month}.'.format(d=start))
else:
pieces.append('{d.day}.'.format(d=start))
pieces.append('–')
pieces.append('{d.day}. {d.month}. {d.year}'.format(d=end))
return ' '.join(pieces)
def read_yaml(filename, default=MISSING):
try:
file = open(filename, encoding='utf-8')
except FileNotFoundError:
if default is MISSING:
raise
return default
with file:
data = yaml.safe_load(file)
return data
def read_lessons_yaml(filename):
data = read_yaml(filename)
# workaround for http://stackoverflow.com/q/36157569/99057
# Convert datetime objects to strings
for lesson in data:
if 'date' in lesson:
lesson['dates'] = [lesson['date']]
if 'description' in lesson:
lesson['description'] = convert_markdown(lesson['description'],
inline=True)
for mat in lesson.get('materials', ()):
mat['name'] = convert_markdown(mat['name'], inline=True)
# If lesson has no `done` key, add them according to lesson dates
# All lesson's dates must be in past to mark it as done
done = lesson.get('done', None)
if done is None and 'dates' in lesson:
all_done = []
for date in lesson['dates']:
all_done.append(datetime.date.today() > date)
lesson['done'] = all(all_done)
return data
def read_meetups_yaml(filename):
data = read_yaml(filename)
today = datetime.date.today()
for meetup in data:
# 'date' means both start and end
if 'date' in meetup:
meetup['start'] = meetup['date']
meetup['end'] = meetup['date']
# Derive a URL for places that don't have one from the location
if 'place' in meetup:
if ('url' not in meetup['place']
and {'latitude', 'longitude'} <= meetup['place'].keys()):
place = meetup['place']
place['url'] = 'http://mapy.cz/zakladni?' + urlencode({
'y': place['latitude'],
'x': place['longitude'],
'z': '16',
'id': place['longitude'] + ',' + place['latitude'],
'source': 'coor',
'q': place['name'],
})
# Figure out the status of registration
if 'registration' in meetup:
if 'end' in meetup['registration']:
if meetup['start'] <= today:
meetup['registration_status'] = 'meetup_started'
elif meetup['registration']['end'] >= today:
meetup['registration_status'] = 'running'
else:
meetup['registration_status'] = 'closed'
else:
meetup['registration_status'] = 'running'
meetup['current'] = ('end' not in meetup) or (meetup['end'] >= today)
return list(reversed(data))
def read_news_yaml(filename):
data = read_yaml(filename)
today = datetime.date.today()
news = []
for new in data:
if new['expires'] >= today:
news.append(new)
return news
def pathto(name, static=False):
if static:
prefix = '_static/'
if name.startswith(prefix):
return url_for('static', filename=name[len(prefix):])
prefix = 'v1/'
if name.startswith(prefix):
return url_for('v1', path=name[len(prefix):])
return name
return url_for(name)
@app.context_processor
def inject_context():
return {
'pathto': pathto,
'today': datetime.date.today(),
}
############
## Redirects
REDIRECTS_DATA = read_yaml('redirects.yml')
REDIRECTS = {}
for directory, pages in REDIRECTS_DATA['naucse-lessons'].items():
for html_filename, lesson in pages.items():
new_url = 'http://naucse.python.cz/lessons/{}/'.format(lesson)
REDIRECTS['{}/{}'.format(directory, html_filename)] = new_url
##########
## Freezer
freezer = Freezer(app)
@freezer.register_generator
def v1():
IGNORE = ['*.aux', '*.out', '*.log', '*.scss', '.travis.yml', '.gitignore']
for name, dirs, files in os.walk(v1_path):
if '.git' in dirs:
dirs.remove('.git')
for file in files:
if file == '.git':
continue
if not any(fnmatch.fnmatch(file, ig) for ig in IGNORE):
path = os.path.relpath(os.path.join(name, file), v1_path)
yield {'path': path}
for path in REDIRECTS:
yield url_for('v1', path=path)
OLD_CITIES = 'praha', 'brno', 'ostrava'
@freezer.register_generator
def course_redirect():
for city in OLD_CITIES:
yield {'city': city}
@freezer.register_generator
def info_redirect():
for city in OLD_CITIES:
yield {'city': city}
if __name__ == '__main__':
cli(app, freezer=freezer, base_url='http://pyladies.cz')
| en | 0.733018 | #!/usr/bin/env python3 Create or serve the pyladies.cz website Return a response with a Meta redirect # With static pages, we can't use HTTP redirects. # Return a page wit <meta refresh> instead. # # When Frozen-Flask gets support for redirects # (https://github.com/Frozen-Flask/Frozen-Flask/issues/81), # this should be revisited. ######## ## Views # Verification page for GMail on our domain ########## ## Template variables ########## ## Helpers # workaround for http://stackoverflow.com/q/36157569/99057 # Convert datetime objects to strings # If lesson has no `done` key, add them according to lesson dates # All lesson's dates must be in past to mark it as done # 'date' means both start and end # Derive a URL for places that don't have one from the location # Figure out the status of registration ############ ## Redirects ########## ## Freezer | 2.306531 | 2 |
captcha22/lib/server/captcha22.py | Hirza-Tango/captcha22 | 0 | 6633258 | <gh_stars>0
#!/usr/bin/python3
import numpy
import os
import time
import glob
import cv2
import ast
import argparse
class captcha:
def __init__(self, path):
self.path = path
self.hasTrained = False
self.busyTraining = False
self.hasModel = False
self.modelActive = False
self.modelPorts = -1
self.currentTrainingLevel = -1
self.image_width = 0
self.image_heigth = 0
self.last_step = 0
self.loss = 0
self.perplexity = 0
self.checkpoint = 0
self.modelName = "null"
self.modelPath = "null"
self.modelOn = False
try:
f = open(self.path + 'model.txt')
lines = f.readlines()
self.hasTrained = ast.literal_eval(lines[0].replace("\n", ""))
self.busyTraining = ast.literal_eval(lines[1].replace("\n", ""))
self.hasModel = ast.literal_eval(lines[2].replace("\n", ""))
self.modelActive = ast.literal_eval(lines[3].replace("\n", ""))
self.modelPorts = ast.literal_eval(lines[4].replace("\n", ""))
self.currentTrainingLevel = ast.literal_eval(
lines[5].replace("\n", ""))
self.image_width = ast.literal_eval(lines[6].replace("\n", ""))
self.image_height = ast.literal_eval(lines[7].replace("\n", ""))
self.last_step = ast.literal_eval(lines[8].replace("\n", ""))
self.loss = ast.literal_eval(lines[9].replace("\n", ""))
self.perplexity = ast.literal_eval(lines[10].replace("\n", ""))
self.checkpoint = ast.literal_eval(lines[11].replace("\n", ""))
self.modelName = lines[12].replace("\n", "")
self.modelPath = lines[13].replace("\n", "")
self.modelOn = ast.literal_eval(lines[14].replace("\n", ""))
except:
self.get_image_size()
self.update_file()
pass
def get_image_size(self):
images = glob.glob(self.path + "data/*.png")
img = cv2.imread(images[0])
self.image_width = img.shape[1]
self.image_height = img.shape[0]
def update_from_file(self):
f = open(self.path + 'model.txt')
lines = f.readlines()
self.hasTrained = ast.literal_eval(lines[0].replace("\n", ""))
self.busyTraining = ast.literal_eval(lines[1].replace("\n", ""))
self.hasModel = ast.literal_eval(lines[2].replace("\n", ""))
self.modelActive = ast.literal_eval(lines[3].replace("\n", ""))
self.modelPorts = ast.literal_eval(lines[4].replace("\n", ""))
self.currentTrainingLevel = ast.literal_eval(
lines[5].replace("\n", ""))
self.image_width = ast.literal_eval(lines[6].replace("\n", ""))
self.image_height = ast.literal_eval(lines[7].replace("\n", ""))
self.last_step = ast.literal_eval(lines[8].replace("\n", ""))
self.loss = ast.literal_eval(lines[9].replace("\n", ""))
self.perplexity = ast.literal_eval(lines[10].replace("\n", ""))
self.checkpoint = ast.literal_eval(lines[11].replace("\n", ""))
self.modelName = lines[12].replace("\n", "")
self.modelPath = lines[13].replace("\n", "")
self.modelOn = ast.literal_eval(lines[14].replace("\n", ""))
def update_file(self):
f = open(self.path + 'model.txt', 'w')
f.write(str(self.hasTrained) + "\n")
f.write(str(self.busyTraining) + "\n")
f.write(str(self.hasModel) + "\n")
f.write(str(self.modelActive) + "\n")
f.write(str(self.modelPorts) + "\n")
f.write(str(self.currentTrainingLevel) + "\n")
f.write(str(self.image_width) + "\n")
f.write(str(self.image_height) + "\n")
f.write(str(self.last_step) + "\n")
f.write(str(self.loss) + "\n")
f.write(str(self.perplexity) + "\n")
f.write(str(self.checkpoint) + "\n")
f.write(str(self.modelName) + "\n")
f.write(str(self.modelPath) + "\n")
f.write(str(self.modelOn) + "\n")
def export_model(self):
print("Going to extract the model")
os.system("(cd " + self.path + " && aocr export --max-height " + str(
self.image_height) + " --max-width " + str(self.image_width) + " exported-model)")
time.sleep(5)
def run_model(self):
print("Starting serving model")
print("nohup tensorflow_model_server --port=" + str(self.modelPorts) + " --rest_api_port=" + str(self.modelPorts + 1) +
" --model_name=" + self.modelName + " --model_base_path=" + os.getcwd() + "/" + self.modelPath + " 2&> /dev/null &")
os.system("nohup tensorflow_model_server --port=" + str(self.modelPorts) + " --rest_api_port=" + str(self.modelPorts + 1) +
" --model_name=" + self.modelName + " --model_base_path=" + os.getcwd() + "/" + self.modelPath + " 2&> /dev/null &")
def stop_model(self):
print("Stoping serving model")
os.system("kill $(ps aux | grep 'tensorflow_model_server --port=" +
str(self.modelPorts) + "' | awk '{print $2}')")
def model_trained(self):
return self.hasTrained
def busy_training(self):
return self.busyTraining
def test_training_level(self):
print("Testing training level")
# Go read the aocr log
f = open(self.path + "aocr.log")
lines = f.readlines()
lastUpdate = ""
for line in lines:
if line.find("Step") != -1:
lastUpdate = line
values = lastUpdate.split(',')
step = ast.literal_eval(values[1].split('Step ')[1].split(':')[0])
# We need to combine two values, the current step and the last saved step. This gives us the total step.
current_checkpoint = 0
try:
f = open(self.path + "/checkpoints/checkpoint")
lines = f.readlines()
current_checkpoint = ast.literal_eval(
lines[0].split('ckpt-')[1].split("\"")[0])
except:
print("No current checkpoint")
pass
while (step > 100):
step -= 100
self.last_step = current_checkpoint + step
self.loss = ast.literal_eval(values[2].split('loss: ')[1])
self.perplexity = ast.literal_eval(values[3].split('perplexity: ')[1].split(
'.')[0] + "." + values[3].split('perplexity: ')[1].split('.')[1])
self.checkpoint = current_checkpoint
print("Values are: ")
print("Step: ", self.last_step)
print("Loss: ", self.loss)
print("Perplexity: ", self.perplexity)
print("Checkpoint: ", self.checkpoint)
self.update_file()
def determine_endpoint(self, steps, loss, perplex):
if self.checkpoint >= steps:
# Time to end
return True
if self.loss < loss and self.perplexity < perplex:
return True
return False
def stop_training(self):
# Sometime the kill is not respected. Do this three times to ensure it is killed
print("Going to stop training")
os.system("kill $(ps aux | grep 'aocr' | awk '{print $2}')")
print("training stopped, waiting")
time.sleep(5)
os.system("kill $(ps aux | grep 'aocr' | awk '{print $2}')")
print("training stopped, waiting")
time.sleep(5)
os.system("kill $(ps aux | grep 'aocr' | awk '{print $2}')")
print("training stopped, waiting")
time.sleep(5)
self.busyTraining = False
self.hasTrained = True
self.update_file()
def test_training(self):
print("Testing")
print("(cd " + self.path + " && aocr test --max-height " + str(self.image_height) +
" --max-width " + str(self.image_width) + " labels/testing.tfrecords 2>&1 | tee test.txt)")
os.system("(cd " + self.path + " && aocr test --max-height " + str(self.image_height) +
" --max-width " + str(self.image_width) + " labels/testing.tfrecords 2>&1 | tee test.txt)")
time.sleep(30)
def start_training(self):
print("Starting training")
self.busyTraining = True
self.update_file()
os.system("(cd " + self.path + " && nohup aocr train --max-height " + str(self.image_height) +
" --max-width " + str(self.image_width) + " labels/training.tfrecords &>/dev/null &)")
class Captcha22:
def __init__(self, max_steps=2000, loss_threshold=0.0002, perplexity_threshold=1.00018, split_percentage=90.0, starting_port=9000, input_folder="./Unsorted", work_folder="./Busy", model_folder="./Model"):
print("Class start")
self.busyTraining = False
self.training_steps_max = int(max_steps)
self.training_loss_min = float(loss_threshold)
self.training_perplexity_min = float(perplexity_threshold)
self.currentPort = int(starting_port)
self.unsorted_URL = input_folder
self.busy_URL = work_folder
self.model_URL = model_folder
try:
os.mkdir(self.unsorted_URL)
except FileExistsError:
pass
try:
os.mkdir(self.busy_URL)
except FileExistsError:
pass
try:
os.mkdir(self.model_URL)
except FileExistsError:
pass
self.data_split = float(split_percentage)
self.new_models = []
self.existing_models = []
def copy_files(self, file):
print("Starting the copy of files")
names = file.split(".")[0].split("/")[-1].split("_")
# Creating folder structure data
os.system('mkdir ' + self.busy_URL + "/" + names[0])
os.system('mkdir ' + self.busy_URL + "/" + names[0] + "/" + names[1])
os.system('mkdir ' + self.busy_URL + "/" +
names[0] + "/" + names[1] + "/" + names[2])
os.system('mkdir ' + self.busy_URL + "/" +
names[0] + "/" + names[1] + "/" + names[2] + "/" + "labels")
# Creating folder structure for model
os.system('mkdir ' + self.model_URL + "/" + names[0])
os.system('mkdir ' + self.model_URL + "/" + names[0] + "/" + names[1])
os.system('mkdir ' + self.model_URL + "/" +
names[0] + "/" + names[1] + "/" + names[2])
os.system('mkdir ' + self.model_URL + "/" +
names[0] + "/" + names[1] + "/" + names[2] + "/exported-model")
os.system('mkdir ' + self.model_URL + "/" +
names[0] + "/" + names[1] + "/" + names[2] + "/exported-model/1")
# Copy the file to the directory
os.system("cp " + file.replace("\n", "") + " " + self.busy_URL +
"/" + names[0] + "/" + names[1] + "/" + names[2])
os.system("rm " + file.replace("\n", ""))
# Unzip the file
os.system("unzip " + self.busy_URL + "/" + names[0] + "/" + names[1] + "/" + names[2] + "/" + file.split(
"/")[-1] + " -d " + self.busy_URL + "/" + names[0] + "/" + names[1] + "/" + names[2] + "/")
os.system("rm " + self.busy_URL + "/" +
names[0] + "/" + names[1] + "/" + names[2] + "/" + file.split("/")[-1])
def export_model(self, model):
paths = model.path.split("/")
shortPath = paths[-4] + "/" + paths[-3] + "/" + paths[-2]
# Ask model to create the model
model.export_model()
# Copy the model to the correct path for safekeeping
os.system("cp -r " + model.path + "exported-model/* " + self.model_URL + "/" + shortPath + "/exported-model/1/")
print("Model copied")
def run_model(self, model):
# Single command to start the model
print("Start model")
model.run_model()
def stop_model(self, model):
print("Stop model")
model.stop_model()
def label_captchas(self, file):
# Function used to label the captchas
names = file.split(".")[0].split("/")[-1].split("_")
read_dir = self.busy_URL + "/" + \
names[0] + "/" + names[1] + "/" + names[2] + "/data/"
write_dir = self.busy_URL + "/" + \
names[0] + "/" + names[1] + "/" + names[2] + "/labels/"
print("Directories is:")
print(read_dir)
print(write_dir)
onlyfiles = glob.glob(read_dir + "*.png")
count = len(onlyfiles)
train_count = int(count * (self.data_split / 100.0))
test_count = count - train_count
# Create train labels
count = 0
labels = open(write_dir + "training_labels.txt", "w")
while (count < train_count):
file = onlyfiles[count]
answer = file.replace('.png', '').split('/')[-1]
labels.write(self.busy_URL + "/" + names[0] + "/" + names[1] + "/" +
names[2] + "/data/" + file.split('/')[-1] + ' ' + answer + '\n')
count += 1
labels.close()
# Create test labels
count = 0
labels = open(write_dir + "testing_labels.txt", "w")
while (count < test_count):
file = onlyfiles[train_count + count]
answer = file.replace('.png', '').split('/')[-1]
labels.write(self.busy_URL + "/" + names[0] + "/" + names[1] + "/" +
names[2] + "/data/" + file.split('/')[-1] + ' ' + answer + '\n')
count += 1
labels.close()
def generate_aocr_records(self, file):
names = file.split(".")[0].split("/")[-1].split("_")
# Creating folder structure data
os.system('aocr dataset ' + self.busy_URL + "/" + names[0] + "/" + names[1] + "/" + names[2] + "/labels/training_labels.txt " +
self.busy_URL + "/" + names[0] + "/" + names[1] + "/" + names[2] + "/labels/training.tfrecords")
time.sleep(1)
os.system('aocr dataset ' + self.busy_URL + "/" + names[0] + "/" + names[1] + "/" + names[2] + "/labels/testing_labels.txt " +
self.busy_URL + "/" + names[0] + "/" + names[1] + "/" + names[2] + "/labels/testing.tfrecords")
time.sleep(5)
def create_model(self, file):
print(file)
names = file.split(".")[0].split("/")[-1].split("_")
path = self.busy_URL + "/" + names[0] + \
"/" + names[1] + "/" + names[2] + "/"
model = captcha(path)
if model.model_trained():
self.existing_models.append(model)
else:
self.new_models.append(model)
def reload_models(self, path):
model = captcha(path)
if model.model_trained():
self.existing_models.append(model)
else:
if model.busy_training():
model.start_training()
self.new_models.append(model)
def check_files(self):
print("Checking if there is any new files")
files = glob.glob(self.unsorted_URL + "/*.zip")
print(files)
print("Start running")
for file in files:
print("Copy files")
self.copy_files(file)
print("Create labels")
self.label_captchas(file)
print("Generate aocr")
self.generate_aocr_records(file)
print("Create model")
self.create_model(file)
print("Updating file")
self.update_file()
print("Done")
def update_file(self):
f = open('models.txt', 'w')
for model in self.existing_models:
f.write(model.path + "\n")
for model in self.new_models:
f.write(model.path + "\n")
f.close()
def continue_training(self):
if len(self.new_models) == 0:
self.busyTraining = False
return
# If there is models, we need to check the first one.
self.busyTraining = True
model = self.new_models[0]
# Check if this model is busy training
if model.busy_training():
# Request an update and kill if needed
print("Model update")
model.test_training_level()
if model.determine_endpoint(self.training_steps_max, self.training_loss_min, self.training_perplexity_min):
# We need to stop training
model.stop_training()
# Do other things such as moving the model
# Test the training of the model
model.test_training()
# Export the model
self.export_model(model)
model.hasModel = True
paths = model.path.split("/")
shortPath = paths[1] + "/" + paths[2] + "/" + paths[3]
model.modelName = paths[1] + "_" + paths[2]
model.modelPath = self.model_URL + "/" + shortPath + "/exported-model/"
model.modelPorts = self.currentPort
self.currentPort + 2
model.update_file()
# Create the server for the model
# Run the server
self.existing_models.append(model)
# Delete model
del self.new_models[0]
self.update_file()
else:
print("Going to start the model training procedure")
# Model not training, start training
model.start_training()
def start_model_server(self):
print("Checking the models")
print(len(self.existing_models))
for model in self.existing_models:
model.update_from_file()
# Check if the start var has been set and active not, then start
if model.modelOn and not model.modelActive:
# The model needs to be started
print("Starting model")
model.modelActive = True
self.run_model(model)
if not model.modelOn and model.modelActive:
# The model is on but needs to be killed
print("Killing model")
model.modelActive = False
self.stop_model(model)
model.update_file()
def run_server(self):
while (True):
if (not self.busyTraining):
self.check_files()
self.continue_training()
if (not self.busyTraining):
self.start_model_server()
print("Starting wait cycle")
time.sleep(30)
def first_start(self):
# Load all models
#New loading method
first_layer = glob.glob(self.busy_URL + "/*")
all_layers = []
for user in first_layer:
second_layer = glob.glob(user + "/*")
for client in second_layer:
third_layer = glob.glob(client + "/*")
for layer in third_layer:
all_layers.append(layer)
for layer in all_layers:
self.reload_models(layer + "/")
self.update_file()
def main(self):
self.first_start()
self.run_server()
if __name__ == "__main__":
server = Captcha22()
server.main()
| #!/usr/bin/python3
import numpy
import os
import time
import glob
import cv2
import ast
import argparse
class captcha:
def __init__(self, path):
self.path = path
self.hasTrained = False
self.busyTraining = False
self.hasModel = False
self.modelActive = False
self.modelPorts = -1
self.currentTrainingLevel = -1
self.image_width = 0
self.image_heigth = 0
self.last_step = 0
self.loss = 0
self.perplexity = 0
self.checkpoint = 0
self.modelName = "null"
self.modelPath = "null"
self.modelOn = False
try:
f = open(self.path + 'model.txt')
lines = f.readlines()
self.hasTrained = ast.literal_eval(lines[0].replace("\n", ""))
self.busyTraining = ast.literal_eval(lines[1].replace("\n", ""))
self.hasModel = ast.literal_eval(lines[2].replace("\n", ""))
self.modelActive = ast.literal_eval(lines[3].replace("\n", ""))
self.modelPorts = ast.literal_eval(lines[4].replace("\n", ""))
self.currentTrainingLevel = ast.literal_eval(
lines[5].replace("\n", ""))
self.image_width = ast.literal_eval(lines[6].replace("\n", ""))
self.image_height = ast.literal_eval(lines[7].replace("\n", ""))
self.last_step = ast.literal_eval(lines[8].replace("\n", ""))
self.loss = ast.literal_eval(lines[9].replace("\n", ""))
self.perplexity = ast.literal_eval(lines[10].replace("\n", ""))
self.checkpoint = ast.literal_eval(lines[11].replace("\n", ""))
self.modelName = lines[12].replace("\n", "")
self.modelPath = lines[13].replace("\n", "")
self.modelOn = ast.literal_eval(lines[14].replace("\n", ""))
except:
self.get_image_size()
self.update_file()
pass
def get_image_size(self):
images = glob.glob(self.path + "data/*.png")
img = cv2.imread(images[0])
self.image_width = img.shape[1]
self.image_height = img.shape[0]
def update_from_file(self):
f = open(self.path + 'model.txt')
lines = f.readlines()
self.hasTrained = ast.literal_eval(lines[0].replace("\n", ""))
self.busyTraining = ast.literal_eval(lines[1].replace("\n", ""))
self.hasModel = ast.literal_eval(lines[2].replace("\n", ""))
self.modelActive = ast.literal_eval(lines[3].replace("\n", ""))
self.modelPorts = ast.literal_eval(lines[4].replace("\n", ""))
self.currentTrainingLevel = ast.literal_eval(
lines[5].replace("\n", ""))
self.image_width = ast.literal_eval(lines[6].replace("\n", ""))
self.image_height = ast.literal_eval(lines[7].replace("\n", ""))
self.last_step = ast.literal_eval(lines[8].replace("\n", ""))
self.loss = ast.literal_eval(lines[9].replace("\n", ""))
self.perplexity = ast.literal_eval(lines[10].replace("\n", ""))
self.checkpoint = ast.literal_eval(lines[11].replace("\n", ""))
self.modelName = lines[12].replace("\n", "")
self.modelPath = lines[13].replace("\n", "")
self.modelOn = ast.literal_eval(lines[14].replace("\n", ""))
def update_file(self):
f = open(self.path + 'model.txt', 'w')
f.write(str(self.hasTrained) + "\n")
f.write(str(self.busyTraining) + "\n")
f.write(str(self.hasModel) + "\n")
f.write(str(self.modelActive) + "\n")
f.write(str(self.modelPorts) + "\n")
f.write(str(self.currentTrainingLevel) + "\n")
f.write(str(self.image_width) + "\n")
f.write(str(self.image_height) + "\n")
f.write(str(self.last_step) + "\n")
f.write(str(self.loss) + "\n")
f.write(str(self.perplexity) + "\n")
f.write(str(self.checkpoint) + "\n")
f.write(str(self.modelName) + "\n")
f.write(str(self.modelPath) + "\n")
f.write(str(self.modelOn) + "\n")
def export_model(self):
print("Going to extract the model")
os.system("(cd " + self.path + " && aocr export --max-height " + str(
self.image_height) + " --max-width " + str(self.image_width) + " exported-model)")
time.sleep(5)
def run_model(self):
print("Starting serving model")
print("nohup tensorflow_model_server --port=" + str(self.modelPorts) + " --rest_api_port=" + str(self.modelPorts + 1) +
" --model_name=" + self.modelName + " --model_base_path=" + os.getcwd() + "/" + self.modelPath + " 2&> /dev/null &")
os.system("nohup tensorflow_model_server --port=" + str(self.modelPorts) + " --rest_api_port=" + str(self.modelPorts + 1) +
" --model_name=" + self.modelName + " --model_base_path=" + os.getcwd() + "/" + self.modelPath + " 2&> /dev/null &")
def stop_model(self):
print("Stoping serving model")
os.system("kill $(ps aux | grep 'tensorflow_model_server --port=" +
str(self.modelPorts) + "' | awk '{print $2}')")
def model_trained(self):
return self.hasTrained
def busy_training(self):
return self.busyTraining
def test_training_level(self):
print("Testing training level")
# Go read the aocr log
f = open(self.path + "aocr.log")
lines = f.readlines()
lastUpdate = ""
for line in lines:
if line.find("Step") != -1:
lastUpdate = line
values = lastUpdate.split(',')
step = ast.literal_eval(values[1].split('Step ')[1].split(':')[0])
# We need to combine two values, the current step and the last saved step. This gives us the total step.
current_checkpoint = 0
try:
f = open(self.path + "/checkpoints/checkpoint")
lines = f.readlines()
current_checkpoint = ast.literal_eval(
lines[0].split('ckpt-')[1].split("\"")[0])
except:
print("No current checkpoint")
pass
while (step > 100):
step -= 100
self.last_step = current_checkpoint + step
self.loss = ast.literal_eval(values[2].split('loss: ')[1])
self.perplexity = ast.literal_eval(values[3].split('perplexity: ')[1].split(
'.')[0] + "." + values[3].split('perplexity: ')[1].split('.')[1])
self.checkpoint = current_checkpoint
print("Values are: ")
print("Step: ", self.last_step)
print("Loss: ", self.loss)
print("Perplexity: ", self.perplexity)
print("Checkpoint: ", self.checkpoint)
self.update_file()
def determine_endpoint(self, steps, loss, perplex):
if self.checkpoint >= steps:
# Time to end
return True
if self.loss < loss and self.perplexity < perplex:
return True
return False
def stop_training(self):
# Sometime the kill is not respected. Do this three times to ensure it is killed
print("Going to stop training")
os.system("kill $(ps aux | grep 'aocr' | awk '{print $2}')")
print("training stopped, waiting")
time.sleep(5)
os.system("kill $(ps aux | grep 'aocr' | awk '{print $2}')")
print("training stopped, waiting")
time.sleep(5)
os.system("kill $(ps aux | grep 'aocr' | awk '{print $2}')")
print("training stopped, waiting")
time.sleep(5)
self.busyTraining = False
self.hasTrained = True
self.update_file()
def test_training(self):
print("Testing")
print("(cd " + self.path + " && aocr test --max-height " + str(self.image_height) +
" --max-width " + str(self.image_width) + " labels/testing.tfrecords 2>&1 | tee test.txt)")
os.system("(cd " + self.path + " && aocr test --max-height " + str(self.image_height) +
" --max-width " + str(self.image_width) + " labels/testing.tfrecords 2>&1 | tee test.txt)")
time.sleep(30)
def start_training(self):
print("Starting training")
self.busyTraining = True
self.update_file()
os.system("(cd " + self.path + " && nohup aocr train --max-height " + str(self.image_height) +
" --max-width " + str(self.image_width) + " labels/training.tfrecords &>/dev/null &)")
class Captcha22:
def __init__(self, max_steps=2000, loss_threshold=0.0002, perplexity_threshold=1.00018, split_percentage=90.0, starting_port=9000, input_folder="./Unsorted", work_folder="./Busy", model_folder="./Model"):
print("Class start")
self.busyTraining = False
self.training_steps_max = int(max_steps)
self.training_loss_min = float(loss_threshold)
self.training_perplexity_min = float(perplexity_threshold)
self.currentPort = int(starting_port)
self.unsorted_URL = input_folder
self.busy_URL = work_folder
self.model_URL = model_folder
try:
os.mkdir(self.unsorted_URL)
except FileExistsError:
pass
try:
os.mkdir(self.busy_URL)
except FileExistsError:
pass
try:
os.mkdir(self.model_URL)
except FileExistsError:
pass
self.data_split = float(split_percentage)
self.new_models = []
self.existing_models = []
def copy_files(self, file):
print("Starting the copy of files")
names = file.split(".")[0].split("/")[-1].split("_")
# Creating folder structure data
os.system('mkdir ' + self.busy_URL + "/" + names[0])
os.system('mkdir ' + self.busy_URL + "/" + names[0] + "/" + names[1])
os.system('mkdir ' + self.busy_URL + "/" +
names[0] + "/" + names[1] + "/" + names[2])
os.system('mkdir ' + self.busy_URL + "/" +
names[0] + "/" + names[1] + "/" + names[2] + "/" + "labels")
# Creating folder structure for model
os.system('mkdir ' + self.model_URL + "/" + names[0])
os.system('mkdir ' + self.model_URL + "/" + names[0] + "/" + names[1])
os.system('mkdir ' + self.model_URL + "/" +
names[0] + "/" + names[1] + "/" + names[2])
os.system('mkdir ' + self.model_URL + "/" +
names[0] + "/" + names[1] + "/" + names[2] + "/exported-model")
os.system('mkdir ' + self.model_URL + "/" +
names[0] + "/" + names[1] + "/" + names[2] + "/exported-model/1")
# Copy the file to the directory
os.system("cp " + file.replace("\n", "") + " " + self.busy_URL +
"/" + names[0] + "/" + names[1] + "/" + names[2])
os.system("rm " + file.replace("\n", ""))
# Unzip the file
os.system("unzip " + self.busy_URL + "/" + names[0] + "/" + names[1] + "/" + names[2] + "/" + file.split(
"/")[-1] + " -d " + self.busy_URL + "/" + names[0] + "/" + names[1] + "/" + names[2] + "/")
os.system("rm " + self.busy_URL + "/" +
names[0] + "/" + names[1] + "/" + names[2] + "/" + file.split("/")[-1])
def export_model(self, model):
paths = model.path.split("/")
shortPath = paths[-4] + "/" + paths[-3] + "/" + paths[-2]
# Ask model to create the model
model.export_model()
# Copy the model to the correct path for safekeeping
os.system("cp -r " + model.path + "exported-model/* " + self.model_URL + "/" + shortPath + "/exported-model/1/")
print("Model copied")
def run_model(self, model):
# Single command to start the model
print("Start model")
model.run_model()
def stop_model(self, model):
print("Stop model")
model.stop_model()
def label_captchas(self, file):
# Function used to label the captchas
names = file.split(".")[0].split("/")[-1].split("_")
read_dir = self.busy_URL + "/" + \
names[0] + "/" + names[1] + "/" + names[2] + "/data/"
write_dir = self.busy_URL + "/" + \
names[0] + "/" + names[1] + "/" + names[2] + "/labels/"
print("Directories is:")
print(read_dir)
print(write_dir)
onlyfiles = glob.glob(read_dir + "*.png")
count = len(onlyfiles)
train_count = int(count * (self.data_split / 100.0))
test_count = count - train_count
# Create train labels
count = 0
labels = open(write_dir + "training_labels.txt", "w")
while (count < train_count):
file = onlyfiles[count]
answer = file.replace('.png', '').split('/')[-1]
labels.write(self.busy_URL + "/" + names[0] + "/" + names[1] + "/" +
names[2] + "/data/" + file.split('/')[-1] + ' ' + answer + '\n')
count += 1
labels.close()
# Create test labels
count = 0
labels = open(write_dir + "testing_labels.txt", "w")
while (count < test_count):
file = onlyfiles[train_count + count]
answer = file.replace('.png', '').split('/')[-1]
labels.write(self.busy_URL + "/" + names[0] + "/" + names[1] + "/" +
names[2] + "/data/" + file.split('/')[-1] + ' ' + answer + '\n')
count += 1
labels.close()
def generate_aocr_records(self, file):
names = file.split(".")[0].split("/")[-1].split("_")
# Creating folder structure data
os.system('aocr dataset ' + self.busy_URL + "/" + names[0] + "/" + names[1] + "/" + names[2] + "/labels/training_labels.txt " +
self.busy_URL + "/" + names[0] + "/" + names[1] + "/" + names[2] + "/labels/training.tfrecords")
time.sleep(1)
os.system('aocr dataset ' + self.busy_URL + "/" + names[0] + "/" + names[1] + "/" + names[2] + "/labels/testing_labels.txt " +
self.busy_URL + "/" + names[0] + "/" + names[1] + "/" + names[2] + "/labels/testing.tfrecords")
time.sleep(5)
def create_model(self, file):
print(file)
names = file.split(".")[0].split("/")[-1].split("_")
path = self.busy_URL + "/" + names[0] + \
"/" + names[1] + "/" + names[2] + "/"
model = captcha(path)
if model.model_trained():
self.existing_models.append(model)
else:
self.new_models.append(model)
def reload_models(self, path):
model = captcha(path)
if model.model_trained():
self.existing_models.append(model)
else:
if model.busy_training():
model.start_training()
self.new_models.append(model)
def check_files(self):
print("Checking if there is any new files")
files = glob.glob(self.unsorted_URL + "/*.zip")
print(files)
print("Start running")
for file in files:
print("Copy files")
self.copy_files(file)
print("Create labels")
self.label_captchas(file)
print("Generate aocr")
self.generate_aocr_records(file)
print("Create model")
self.create_model(file)
print("Updating file")
self.update_file()
print("Done")
def update_file(self):
f = open('models.txt', 'w')
for model in self.existing_models:
f.write(model.path + "\n")
for model in self.new_models:
f.write(model.path + "\n")
f.close()
def continue_training(self):
if len(self.new_models) == 0:
self.busyTraining = False
return
# If there is models, we need to check the first one.
self.busyTraining = True
model = self.new_models[0]
# Check if this model is busy training
if model.busy_training():
# Request an update and kill if needed
print("Model update")
model.test_training_level()
if model.determine_endpoint(self.training_steps_max, self.training_loss_min, self.training_perplexity_min):
# We need to stop training
model.stop_training()
# Do other things such as moving the model
# Test the training of the model
model.test_training()
# Export the model
self.export_model(model)
model.hasModel = True
paths = model.path.split("/")
shortPath = paths[1] + "/" + paths[2] + "/" + paths[3]
model.modelName = paths[1] + "_" + paths[2]
model.modelPath = self.model_URL + "/" + shortPath + "/exported-model/"
model.modelPorts = self.currentPort
self.currentPort + 2
model.update_file()
# Create the server for the model
# Run the server
self.existing_models.append(model)
# Delete model
del self.new_models[0]
self.update_file()
else:
print("Going to start the model training procedure")
# Model not training, start training
model.start_training()
def start_model_server(self):
print("Checking the models")
print(len(self.existing_models))
for model in self.existing_models:
model.update_from_file()
# Check if the start var has been set and active not, then start
if model.modelOn and not model.modelActive:
# The model needs to be started
print("Starting model")
model.modelActive = True
self.run_model(model)
if not model.modelOn and model.modelActive:
# The model is on but needs to be killed
print("Killing model")
model.modelActive = False
self.stop_model(model)
model.update_file()
def run_server(self):
while (True):
if (not self.busyTraining):
self.check_files()
self.continue_training()
if (not self.busyTraining):
self.start_model_server()
print("Starting wait cycle")
time.sleep(30)
def first_start(self):
# Load all models
#New loading method
first_layer = glob.glob(self.busy_URL + "/*")
all_layers = []
for user in first_layer:
second_layer = glob.glob(user + "/*")
for client in second_layer:
third_layer = glob.glob(client + "/*")
for layer in third_layer:
all_layers.append(layer)
for layer in all_layers:
self.reload_models(layer + "/")
self.update_file()
def main(self):
self.first_start()
self.run_server()
if __name__ == "__main__":
server = Captcha22()
server.main() | en | 0.893527 | #!/usr/bin/python3 # Go read the aocr log # We need to combine two values, the current step and the last saved step. This gives us the total step. # Time to end # Sometime the kill is not respected. Do this three times to ensure it is killed # Creating folder structure data # Creating folder structure for model # Copy the file to the directory # Unzip the file # Ask model to create the model # Copy the model to the correct path for safekeeping # Single command to start the model # Function used to label the captchas # Create train labels # Create test labels # Creating folder structure data # If there is models, we need to check the first one. # Check if this model is busy training # Request an update and kill if needed # We need to stop training # Do other things such as moving the model # Test the training of the model # Export the model # Create the server for the model # Run the server # Delete model # Model not training, start training # Check if the start var has been set and active not, then start # The model needs to be started # The model is on but needs to be killed # Load all models #New loading method | 2.598917 | 3 |
app.py | AzisK/Desktop | 0 | 6633259 | import os
import fnmatch
import PySimpleGUI as sg
PATH = f"{os.environ['HOME']}/Desktop"
# DOCUMENTS_FOLDER = f"{PATH}/Documents"
# SCREENSHOTS_FOLDER = f"{PATH}/Screenshots"
# PICTURES_FOLDER = f"{PATH}/Pictures"
DOCUMENT = {
"name": "Documents",
"path": f"{PATH}/Documents",
"description": "document",
"rules": ["*.docx", "*.pdf", "*.txt"],
}
SCREENSHOT = {
"name": "Screenshots",
"path": f"{PATH}/Screenshots",
"description": "screen shot",
"rules": ["Screen Shot*.png"],
}
PICTURE = {
"name": "Pictures",
"path": f"{PATH}/Pictures",
"description": "picture",
"rules": ["*.png", "*.jpg"],
}
def main():
button, values = gui()
if button == "Cancel":
return
adjust_options(values)
organize_desktop()
def organize_desktop():
for file in os.listdir(PATH):
if os.path.isfile(f"{PATH}/{file}"):
print(f"Desktop has {file}")
for file_type in [DOCUMENT, SCREENSHOT, PICTURE]:
if fnmatch_any(file, *file_type['rules']):
move_to_folder(file_type, file)
break
def gui():
layout = [
[sg.Text("Organize your desktop by moving files to proper folders", font=('Helvetica', 20), justification='center')],
get_folder_gui(DOCUMENT),
get_folder_gui(SCREENSHOT),
get_folder_gui(PICTURE),
[sg.Submit(font=('Helvetica', 20)), sg.Cancel(font=('Helvetica', 20))]
]
window = sg.Window('Desktorganizer').Layout(layout)
button, values = window.Read()
return button, values
def adjust_options(values):
if values['Browse']:
DOCUMENT['path'] = values['Browse']
if values['Browse0']:
SCREENSHOT['path'] = values['Browse0']
if values['Browse1']:
PICTURE['path'] = values['Browse1']
def get_folder_gui(folder):
return [
sg.Text(f"{folder['name']} Folder",
size=(20, 1),
font=('Helvetica', 20),
auto_size_text=False,
justification='left'),
sg.InputText(f"{folder['path']}", font=('Helvetica', 20)),
sg.FolderBrowse(font=('Helvetica', 20)),
]
def fnmatch_any(file, *args):
for arg in args:
if fnmatch.fnmatch(file, arg):
return True
return False
def move_to_folder(folder, file: str):
print(f"{file} is a {folder['description']}!")
is_directory(folder['path'])
move(file, folder['path'])
def move(file: str, folder: str):
current_path = f"{PATH}/{file}"
new_path = f"{folder}/{file}"
os.rename(current_path, new_path)
print(f"File '{file}' has been moved from '{current_path}' to '{new_path}'")
def is_directory(directory: str):
if not os.path.exists(directory):
os.makedirs(directory)
if __name__== "__main__":
main()
| import os
import fnmatch
import PySimpleGUI as sg
PATH = f"{os.environ['HOME']}/Desktop"
# DOCUMENTS_FOLDER = f"{PATH}/Documents"
# SCREENSHOTS_FOLDER = f"{PATH}/Screenshots"
# PICTURES_FOLDER = f"{PATH}/Pictures"
DOCUMENT = {
"name": "Documents",
"path": f"{PATH}/Documents",
"description": "document",
"rules": ["*.docx", "*.pdf", "*.txt"],
}
SCREENSHOT = {
"name": "Screenshots",
"path": f"{PATH}/Screenshots",
"description": "screen shot",
"rules": ["Screen Shot*.png"],
}
PICTURE = {
"name": "Pictures",
"path": f"{PATH}/Pictures",
"description": "picture",
"rules": ["*.png", "*.jpg"],
}
def main():
button, values = gui()
if button == "Cancel":
return
adjust_options(values)
organize_desktop()
def organize_desktop():
for file in os.listdir(PATH):
if os.path.isfile(f"{PATH}/{file}"):
print(f"Desktop has {file}")
for file_type in [DOCUMENT, SCREENSHOT, PICTURE]:
if fnmatch_any(file, *file_type['rules']):
move_to_folder(file_type, file)
break
def gui():
layout = [
[sg.Text("Organize your desktop by moving files to proper folders", font=('Helvetica', 20), justification='center')],
get_folder_gui(DOCUMENT),
get_folder_gui(SCREENSHOT),
get_folder_gui(PICTURE),
[sg.Submit(font=('Helvetica', 20)), sg.Cancel(font=('Helvetica', 20))]
]
window = sg.Window('Desktorganizer').Layout(layout)
button, values = window.Read()
return button, values
def adjust_options(values):
if values['Browse']:
DOCUMENT['path'] = values['Browse']
if values['Browse0']:
SCREENSHOT['path'] = values['Browse0']
if values['Browse1']:
PICTURE['path'] = values['Browse1']
def get_folder_gui(folder):
return [
sg.Text(f"{folder['name']} Folder",
size=(20, 1),
font=('Helvetica', 20),
auto_size_text=False,
justification='left'),
sg.InputText(f"{folder['path']}", font=('Helvetica', 20)),
sg.FolderBrowse(font=('Helvetica', 20)),
]
def fnmatch_any(file, *args):
for arg in args:
if fnmatch.fnmatch(file, arg):
return True
return False
def move_to_folder(folder, file: str):
print(f"{file} is a {folder['description']}!")
is_directory(folder['path'])
move(file, folder['path'])
def move(file: str, folder: str):
current_path = f"{PATH}/{file}"
new_path = f"{folder}/{file}"
os.rename(current_path, new_path)
print(f"File '{file}' has been moved from '{current_path}' to '{new_path}'")
def is_directory(directory: str):
if not os.path.exists(directory):
os.makedirs(directory)
if __name__== "__main__":
main()
| en | 0.344249 | # DOCUMENTS_FOLDER = f"{PATH}/Documents" # SCREENSHOTS_FOLDER = f"{PATH}/Screenshots" # PICTURES_FOLDER = f"{PATH}/Pictures" | 3.046417 | 3 |
aidants_connect_web/migrations/0047_cartetotp.py | betagouv/Aidants_Connect | 16 | 6633260 | # Generated by Django 3.1.1 on 2021-02-15 15:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("aidants_connect_web", "0046_organisation_zipcode"),
]
operations = [
migrations.CreateModel(
name="CarteTOTP",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("serial_number", models.CharField(max_length=100)),
("seed", models.CharField(max_length=40)),
("created_at", models.DateTimeField(auto_now_add=True)),
],
),
]
| # Generated by Django 3.1.1 on 2021-02-15 15:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("aidants_connect_web", "0046_organisation_zipcode"),
]
operations = [
migrations.CreateModel(
name="CarteTOTP",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("serial_number", models.CharField(max_length=100)),
("seed", models.CharField(max_length=40)),
("created_at", models.DateTimeField(auto_now_add=True)),
],
),
]
| en | 0.725988 | # Generated by Django 3.1.1 on 2021-02-15 15:19 | 1.788873 | 2 |
dingtalk/python/alibabacloud_dingtalk/alitrip_1_0/models.py | aliyun/dingtalk-sdk | 15 | 6633261 | # -*- coding: utf-8 -*-
# This file is auto-generated, don't edit it. Thanks.
from Tea.model import TeaModel
from typing import Dict, List
class ApproveCityCarApplyHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class ApproveCityCarApplyRequest(TeaModel):
def __init__(
self,
corp_id: str = None,
operate_time: str = None,
remark: str = None,
status: int = None,
third_part_apply_id: str = None,
user_id: str = None,
ding_suite_key: str = None,
ding_corp_id: str = None,
ding_token_grant_type: int = None,
):
# 第三方企业ID
self.corp_id = corp_id
# 审批时间
self.operate_time = operate_time
# 审批备注
self.remark = remark
# 审批结果:1-同意,2-拒绝
self.status = status
# 第三方审批单ID
self.third_part_apply_id = third_part_apply_id
# 审批的第三方员工ID
self.user_id = user_id
# suiteKey
self.ding_suite_key = ding_suite_key
# account
self.ding_corp_id = ding_corp_id
# tokenGrantType
self.ding_token_grant_type = ding_token_grant_type
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.operate_time is not None:
result['operateTime'] = self.operate_time
if self.remark is not None:
result['remark'] = self.remark
if self.status is not None:
result['status'] = self.status
if self.third_part_apply_id is not None:
result['thirdPartApplyId'] = self.third_part_apply_id
if self.user_id is not None:
result['userId'] = self.user_id
if self.ding_suite_key is not None:
result['dingSuiteKey'] = self.ding_suite_key
if self.ding_corp_id is not None:
result['dingCorpId'] = self.ding_corp_id
if self.ding_token_grant_type is not None:
result['dingTokenGrantType'] = self.ding_token_grant_type
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('operateTime') is not None:
self.operate_time = m.get('operateTime')
if m.get('remark') is not None:
self.remark = m.get('remark')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('thirdPartApplyId') is not None:
self.third_part_apply_id = m.get('thirdPartApplyId')
if m.get('userId') is not None:
self.user_id = m.get('userId')
if m.get('dingSuiteKey') is not None:
self.ding_suite_key = m.get('dingSuiteKey')
if m.get('dingCorpId') is not None:
self.ding_corp_id = m.get('dingCorpId')
if m.get('dingTokenGrantType') is not None:
self.ding_token_grant_type = m.get('dingTokenGrantType')
return self
class ApproveCityCarApplyResponseBody(TeaModel):
def __init__(
self,
approve_result: bool = None,
):
# 审批结果
self.approve_result = approve_result
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.approve_result is not None:
result['approveResult'] = self.approve_result
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('approveResult') is not None:
self.approve_result = m.get('approveResult')
return self
class ApproveCityCarApplyResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: ApproveCityCarApplyResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = ApproveCityCarApplyResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class BillSettementHotelHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class BillSettementHotelRequest(TeaModel):
def __init__(
self,
corp_id: str = None,
category: int = None,
page_size: int = None,
period_start: str = None,
page_number: int = None,
period_end: str = None,
):
# 第三方企业
self.corp_id = corp_id
# 类目:机酒火车 1:机票; 2:酒店; 4:用车 6:商旅火车票
self.category = category
# 每页数据量,默认100,最高500
self.page_size = page_size
# 记账更新开始日期
self.period_start = period_start
# 页数,从1开始
self.page_number = page_number
# 记账更新结束日期
self.period_end = period_end
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.category is not None:
result['category'] = self.category
if self.page_size is not None:
result['pageSize'] = self.page_size
if self.period_start is not None:
result['periodStart'] = self.period_start
if self.page_number is not None:
result['pageNumber'] = self.page_number
if self.period_end is not None:
result['periodEnd'] = self.period_end
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('category') is not None:
self.category = m.get('category')
if m.get('pageSize') is not None:
self.page_size = m.get('pageSize')
if m.get('periodStart') is not None:
self.period_start = m.get('periodStart')
if m.get('pageNumber') is not None:
self.page_number = m.get('pageNumber')
if m.get('periodEnd') is not None:
self.period_end = m.get('periodEnd')
return self
class BillSettementHotelResponseBodyModuleDataList(TeaModel):
def __init__(
self,
alipay_trade_no: str = None,
apply_id: str = None,
book_time: str = None,
booker_id: str = None,
booker_name: str = None,
capital_direction: str = None,
cascade_department: str = None,
check_in_date: str = None,
checkout_date: str = None,
city: str = None,
city_code: str = None,
corp_refund_fee: float = None,
corp_total_fee: float = None,
cost_center: str = None,
cost_center_number: str = None,
department: str = None,
department_id: str = None,
fee_type: str = None,
fees: float = None,
fu_point_fee: float = None,
hotel_name: str = None,
index: str = None,
invoice_title: str = None,
is_negotiation: bool = None,
is_share_str: str = None,
nights: int = None,
order_id: str = None,
order_price: float = None,
order_type: str = None,
over_apply_id: str = None,
person_refund_fee: float = None,
person_settle_price: float = None,
primary_id: int = None,
project_code: str = None,
project_name: str = None,
promotion_fee: float = None,
room_number: int = None,
room_price: float = None,
room_type: str = None,
service_fee: float = None,
settlement_fee: int = None,
settlement_time: str = None,
settlement_type: str = None,
status: int = None,
total_nights: int = None,
traveler_id: str = None,
traveler_name: str = None,
booker_job_no: str = None,
traveler_job_no: str = None,
):
# 交易流水号
self.alipay_trade_no = alipay_trade_no
# 审批单号
self.apply_id = apply_id
# 预定时间
self.book_time = book_time
# 预定人use id
self.booker_id = booker_id
# 预订人名称
self.booker_name = booker_name
# 资金方向
self.capital_direction = capital_direction
# 级联部门
self.cascade_department = cascade_department
# 入住时间
self.check_in_date = check_in_date
# 离店时间
self.checkout_date = checkout_date
# 入住城市
self.city = city
# 城市编码
self.city_code = city_code
# 企业退款金额
self.corp_refund_fee = corp_refund_fee
# 企业支付金额
self.corp_total_fee = corp_total_fee
# 成本中心名称
self.cost_center = cost_center
# 成本中心编码
self.cost_center_number = cost_center_number
# 末级部门
self.department = department
# 部门id
self.department_id = department_id
# 费用类型
self.fee_type = fee_type
# 杂费
self.fees = fees
# 福豆支付
self.fu_point_fee = fu_point_fee
# 酒店名称
self.hotel_name = hotel_name
# 序号
self.index = index
# 发票抬头
self.invoice_title = invoice_title
# 是否协议价
self.is_negotiation = is_negotiation
# 是否合住
self.is_share_str = is_share_str
# 入住天数
self.nights = nights
# 订单号
self.order_id = order_id
# 订单金额
self.order_price = order_price
# 订单类型
self.order_type = order_type
# 超标审批单号
self.over_apply_id = over_apply_id
# 个人退款金额
self.person_refund_fee = person_refund_fee
# 个人支付金额
self.person_settle_price = person_settle_price
# 主键id
self.primary_id = primary_id
# 项目编码
self.project_code = project_code
# 项目名称
self.project_name = project_name
# 优惠券
self.promotion_fee = promotion_fee
# 房间数
self.room_number = room_number
# 房价
self.room_price = room_price
# 房间类型
self.room_type = room_type
# 服务费,仅在 feeType 20111、20112中展示
self.service_fee = service_fee
# 结算金额
self.settlement_fee = settlement_fee
# 结算时间
self.settlement_time = settlement_time
# 结算类型
self.settlement_type = settlement_type
# 入账状态
self.status = status
# 总间夜数
self.total_nights = total_nights
# 出行人use id
self.traveler_id = traveler_id
# 出行人名称
self.traveler_name = traveler_name
# 预订人工号
self.booker_job_no = booker_job_no
# 出行人工号
self.traveler_job_no = traveler_job_no
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.alipay_trade_no is not None:
result['alipayTradeNo'] = self.alipay_trade_no
if self.apply_id is not None:
result['applyId'] = self.apply_id
if self.book_time is not None:
result['bookTime'] = self.book_time
if self.booker_id is not None:
result['bookerId'] = self.booker_id
if self.booker_name is not None:
result['bookerName'] = self.booker_name
if self.capital_direction is not None:
result['capitalDirection'] = self.capital_direction
if self.cascade_department is not None:
result['cascadeDepartment'] = self.cascade_department
if self.check_in_date is not None:
result['checkInDate'] = self.check_in_date
if self.checkout_date is not None:
result['checkoutDate'] = self.checkout_date
if self.city is not None:
result['city'] = self.city
if self.city_code is not None:
result['cityCode'] = self.city_code
if self.corp_refund_fee is not None:
result['corpRefundFee'] = self.corp_refund_fee
if self.corp_total_fee is not None:
result['corpTotalFee'] = self.corp_total_fee
if self.cost_center is not None:
result['costCenter'] = self.cost_center
if self.cost_center_number is not None:
result['costCenterNumber'] = self.cost_center_number
if self.department is not None:
result['department'] = self.department
if self.department_id is not None:
result['departmentId'] = self.department_id
if self.fee_type is not None:
result['feeType'] = self.fee_type
if self.fees is not None:
result['fees'] = self.fees
if self.fu_point_fee is not None:
result['fuPointFee'] = self.fu_point_fee
if self.hotel_name is not None:
result['hotelName'] = self.hotel_name
if self.index is not None:
result['index'] = self.index
if self.invoice_title is not None:
result['invoiceTitle'] = self.invoice_title
if self.is_negotiation is not None:
result['isNegotiation'] = self.is_negotiation
if self.is_share_str is not None:
result['isShareStr'] = self.is_share_str
if self.nights is not None:
result['nights'] = self.nights
if self.order_id is not None:
result['orderId'] = self.order_id
if self.order_price is not None:
result['orderPrice'] = self.order_price
if self.order_type is not None:
result['orderType'] = self.order_type
if self.over_apply_id is not None:
result['overApplyId'] = self.over_apply_id
if self.person_refund_fee is not None:
result['personRefundFee'] = self.person_refund_fee
if self.person_settle_price is not None:
result['personSettlePrice'] = self.person_settle_price
if self.primary_id is not None:
result['primaryId'] = self.primary_id
if self.project_code is not None:
result['projectCode'] = self.project_code
if self.project_name is not None:
result['projectName'] = self.project_name
if self.promotion_fee is not None:
result['promotionFee'] = self.promotion_fee
if self.room_number is not None:
result['roomNumber'] = self.room_number
if self.room_price is not None:
result['roomPrice'] = self.room_price
if self.room_type is not None:
result['roomType'] = self.room_type
if self.service_fee is not None:
result['serviceFee'] = self.service_fee
if self.settlement_fee is not None:
result['settlementFee'] = self.settlement_fee
if self.settlement_time is not None:
result['settlementTime'] = self.settlement_time
if self.settlement_type is not None:
result['settlementType'] = self.settlement_type
if self.status is not None:
result['status'] = self.status
if self.total_nights is not None:
result['totalNights'] = self.total_nights
if self.traveler_id is not None:
result['travelerId'] = self.traveler_id
if self.traveler_name is not None:
result['travelerName'] = self.traveler_name
if self.booker_job_no is not None:
result['bookerJobNo'] = self.booker_job_no
if self.traveler_job_no is not None:
result['travelerJobNo'] = self.traveler_job_no
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('alipayTradeNo') is not None:
self.alipay_trade_no = m.get('alipayTradeNo')
if m.get('applyId') is not None:
self.apply_id = m.get('applyId')
if m.get('bookTime') is not None:
self.book_time = m.get('bookTime')
if m.get('bookerId') is not None:
self.booker_id = m.get('bookerId')
if m.get('bookerName') is not None:
self.booker_name = m.get('bookerName')
if m.get('capitalDirection') is not None:
self.capital_direction = m.get('capitalDirection')
if m.get('cascadeDepartment') is not None:
self.cascade_department = m.get('cascadeDepartment')
if m.get('checkInDate') is not None:
self.check_in_date = m.get('checkInDate')
if m.get('checkoutDate') is not None:
self.checkout_date = m.get('checkoutDate')
if m.get('city') is not None:
self.city = m.get('city')
if m.get('cityCode') is not None:
self.city_code = m.get('cityCode')
if m.get('corpRefundFee') is not None:
self.corp_refund_fee = m.get('corpRefundFee')
if m.get('corpTotalFee') is not None:
self.corp_total_fee = m.get('corpTotalFee')
if m.get('costCenter') is not None:
self.cost_center = m.get('costCenter')
if m.get('costCenterNumber') is not None:
self.cost_center_number = m.get('costCenterNumber')
if m.get('department') is not None:
self.department = m.get('department')
if m.get('departmentId') is not None:
self.department_id = m.get('departmentId')
if m.get('feeType') is not None:
self.fee_type = m.get('feeType')
if m.get('fees') is not None:
self.fees = m.get('fees')
if m.get('fuPointFee') is not None:
self.fu_point_fee = m.get('fuPointFee')
if m.get('hotelName') is not None:
self.hotel_name = m.get('hotelName')
if m.get('index') is not None:
self.index = m.get('index')
if m.get('invoiceTitle') is not None:
self.invoice_title = m.get('invoiceTitle')
if m.get('isNegotiation') is not None:
self.is_negotiation = m.get('isNegotiation')
if m.get('isShareStr') is not None:
self.is_share_str = m.get('isShareStr')
if m.get('nights') is not None:
self.nights = m.get('nights')
if m.get('orderId') is not None:
self.order_id = m.get('orderId')
if m.get('orderPrice') is not None:
self.order_price = m.get('orderPrice')
if m.get('orderType') is not None:
self.order_type = m.get('orderType')
if m.get('overApplyId') is not None:
self.over_apply_id = m.get('overApplyId')
if m.get('personRefundFee') is not None:
self.person_refund_fee = m.get('personRefundFee')
if m.get('personSettlePrice') is not None:
self.person_settle_price = m.get('personSettlePrice')
if m.get('primaryId') is not None:
self.primary_id = m.get('primaryId')
if m.get('projectCode') is not None:
self.project_code = m.get('projectCode')
if m.get('projectName') is not None:
self.project_name = m.get('projectName')
if m.get('promotionFee') is not None:
self.promotion_fee = m.get('promotionFee')
if m.get('roomNumber') is not None:
self.room_number = m.get('roomNumber')
if m.get('roomPrice') is not None:
self.room_price = m.get('roomPrice')
if m.get('roomType') is not None:
self.room_type = m.get('roomType')
if m.get('serviceFee') is not None:
self.service_fee = m.get('serviceFee')
if m.get('settlementFee') is not None:
self.settlement_fee = m.get('settlementFee')
if m.get('settlementTime') is not None:
self.settlement_time = m.get('settlementTime')
if m.get('settlementType') is not None:
self.settlement_type = m.get('settlementType')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('totalNights') is not None:
self.total_nights = m.get('totalNights')
if m.get('travelerId') is not None:
self.traveler_id = m.get('travelerId')
if m.get('travelerName') is not None:
self.traveler_name = m.get('travelerName')
if m.get('bookerJobNo') is not None:
self.booker_job_no = m.get('bookerJobNo')
if m.get('travelerJobNo') is not None:
self.traveler_job_no = m.get('travelerJobNo')
return self
class BillSettementHotelResponseBodyModule(TeaModel):
def __init__(
self,
category: int = None,
corp_id: str = None,
data_list: List[BillSettementHotelResponseBodyModuleDataList] = None,
period_end: str = None,
period_start: str = None,
total_num: int = None,
):
# 类目
self.category = category
# 企业id
self.corp_id = corp_id
# 数据集合
self.data_list = data_list
# 记账更新结束日期
self.period_end = period_end
# 记账更新开始日期
self.period_start = period_start
# 总数据量
self.total_num = total_num
def validate(self):
if self.data_list:
for k in self.data_list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.category is not None:
result['category'] = self.category
if self.corp_id is not None:
result['corpId'] = self.corp_id
result['dataList'] = []
if self.data_list is not None:
for k in self.data_list:
result['dataList'].append(k.to_map() if k else None)
if self.period_end is not None:
result['periodEnd'] = self.period_end
if self.period_start is not None:
result['periodStart'] = self.period_start
if self.total_num is not None:
result['totalNum'] = self.total_num
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('category') is not None:
self.category = m.get('category')
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
self.data_list = []
if m.get('dataList') is not None:
for k in m.get('dataList'):
temp_model = BillSettementHotelResponseBodyModuleDataList()
self.data_list.append(temp_model.from_map(k))
if m.get('periodEnd') is not None:
self.period_end = m.get('periodEnd')
if m.get('periodStart') is not None:
self.period_start = m.get('periodStart')
if m.get('totalNum') is not None:
self.total_num = m.get('totalNum')
return self
class BillSettementHotelResponseBody(TeaModel):
def __init__(
self,
result_msg: str = None,
module: BillSettementHotelResponseBodyModule = None,
success: bool = None,
result_code: int = None,
):
# 结果msg
self.result_msg = result_msg
# module
self.module = module
# 是否成功
self.success = success
# 结果code
self.result_code = result_code
def validate(self):
if self.module:
self.module.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.result_msg is not None:
result['resultMsg'] = self.result_msg
if self.module is not None:
result['module'] = self.module.to_map()
if self.success is not None:
result['success'] = self.success
if self.result_code is not None:
result['resultCode'] = self.result_code
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('resultMsg') is not None:
self.result_msg = m.get('resultMsg')
if m.get('module') is not None:
temp_model = BillSettementHotelResponseBodyModule()
self.module = temp_model.from_map(m['module'])
if m.get('success') is not None:
self.success = m.get('success')
if m.get('resultCode') is not None:
self.result_code = m.get('resultCode')
return self
class BillSettementHotelResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: BillSettementHotelResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = BillSettementHotelResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetFlightExceedApplyHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetFlightExceedApplyRequest(TeaModel):
def __init__(
self,
corp_id: str = None,
apply_id: str = None,
):
# 第三方企业id
self.corp_id = corp_id
# 商旅超标审批单id
self.apply_id = apply_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.apply_id is not None:
result['applyId'] = self.apply_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('applyId') is not None:
self.apply_id = m.get('applyId')
return self
class GetFlightExceedApplyResponseBodyApplyIntentionInfoDO(TeaModel):
def __init__(
self,
arr_city: str = None,
arr_city_name: str = None,
arr_time: str = None,
cabin: str = None,
cabin_class: int = None,
cabin_class_str: str = None,
dep_city: str = None,
dep_city_name: str = None,
dep_time: str = None,
discount: float = None,
flight_no: str = None,
price: int = None,
type: int = None,
):
# 到达城市三字码
self.arr_city = arr_city
# 到达城市名称
self.arr_city_name = arr_city_name
# 到达时间
self.arr_time = arr_time
# 超标的舱位,F:头等舱 C:商务舱 Y:经济舱 P:超值经济舱
self.cabin = cabin
# 申请超标的舱等 0:头等舱 1:商务舱 2:经济舱 3:超值经济舱
self.cabin_class = cabin_class
# 舱等描述,头等舱,商务舱,经济舱,超值经济舱
self.cabin_class_str = cabin_class_str
# 出发城市三字码
self.dep_city = dep_city
# 出发城市名称
self.dep_city_name = dep_city_name
# 出发时间
self.dep_time = dep_time
# 折扣
self.discount = discount
# 航班号
self.flight_no = flight_no
# 意向航班价格(元)
self.price = price
# 超标类型,1:折扣 2,8,10:时间 3,9,11:折扣和时间
self.type = type
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.arr_city is not None:
result['arrCity'] = self.arr_city
if self.arr_city_name is not None:
result['arrCityName'] = self.arr_city_name
if self.arr_time is not None:
result['arrTime'] = self.arr_time
if self.cabin is not None:
result['cabin'] = self.cabin
if self.cabin_class is not None:
result['cabinClass'] = self.cabin_class
if self.cabin_class_str is not None:
result['cabinClassStr'] = self.cabin_class_str
if self.dep_city is not None:
result['depCity'] = self.dep_city
if self.dep_city_name is not None:
result['depCityName'] = self.dep_city_name
if self.dep_time is not None:
result['depTime'] = self.dep_time
if self.discount is not None:
result['discount'] = self.discount
if self.flight_no is not None:
result['flightNo'] = self.flight_no
if self.price is not None:
result['price'] = self.price
if self.type is not None:
result['type'] = self.type
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('arrCity') is not None:
self.arr_city = m.get('arrCity')
if m.get('arrCityName') is not None:
self.arr_city_name = m.get('arrCityName')
if m.get('arrTime') is not None:
self.arr_time = m.get('arrTime')
if m.get('cabin') is not None:
self.cabin = m.get('cabin')
if m.get('cabinClass') is not None:
self.cabin_class = m.get('cabinClass')
if m.get('cabinClassStr') is not None:
self.cabin_class_str = m.get('cabinClassStr')
if m.get('depCity') is not None:
self.dep_city = m.get('depCity')
if m.get('depCityName') is not None:
self.dep_city_name = m.get('depCityName')
if m.get('depTime') is not None:
self.dep_time = m.get('depTime')
if m.get('discount') is not None:
self.discount = m.get('discount')
if m.get('flightNo') is not None:
self.flight_no = m.get('flightNo')
if m.get('price') is not None:
self.price = m.get('price')
if m.get('type') is not None:
self.type = m.get('type')
return self
class GetFlightExceedApplyResponseBody(TeaModel):
def __init__(
self,
corp_id: str = None,
apply_id: int = None,
status: int = None,
btrip_cause: str = None,
exceed_type: int = None,
exceed_reason: str = None,
origin_standard: str = None,
submit_time: str = None,
user_id: str = None,
apply_intention_info_do: GetFlightExceedApplyResponseBodyApplyIntentionInfoDO = None,
thirdpart_apply_id: str = None,
):
# 第三方企业id
self.corp_id = corp_id
# 商旅超标审批单id
self.apply_id = apply_id
# 审批单状态 0:审批中 1:已同意 2:已拒绝
self.status = status
# 出差原因
self.btrip_cause = btrip_cause
# 超标类型,1:折扣 2,8,10:时间 3,9,11:折扣和时间
self.exceed_type = exceed_type
# 超标原因
self.exceed_reason = exceed_reason
# 原差旅标准
self.origin_standard = origin_standard
# 审批单提交时间
self.submit_time = submit_time
# 第三方用户id
self.user_id = user_id
# 意向出行信息
self.apply_intention_info_do = apply_intention_info_do
# 第三方出差审批单号
self.thirdpart_apply_id = thirdpart_apply_id
def validate(self):
if self.apply_intention_info_do:
self.apply_intention_info_do.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.apply_id is not None:
result['applyId'] = self.apply_id
if self.status is not None:
result['status'] = self.status
if self.btrip_cause is not None:
result['btripCause'] = self.btrip_cause
if self.exceed_type is not None:
result['exceedType'] = self.exceed_type
if self.exceed_reason is not None:
result['exceedReason'] = self.exceed_reason
if self.origin_standard is not None:
result['originStandard'] = self.origin_standard
if self.submit_time is not None:
result['submitTime'] = self.submit_time
if self.user_id is not None:
result['userId'] = self.user_id
if self.apply_intention_info_do is not None:
result['applyIntentionInfoDO'] = self.apply_intention_info_do.to_map()
if self.thirdpart_apply_id is not None:
result['thirdpartApplyId'] = self.thirdpart_apply_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('applyId') is not None:
self.apply_id = m.get('applyId')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('btripCause') is not None:
self.btrip_cause = m.get('btripCause')
if m.get('exceedType') is not None:
self.exceed_type = m.get('exceedType')
if m.get('exceedReason') is not None:
self.exceed_reason = m.get('exceedReason')
if m.get('originStandard') is not None:
self.origin_standard = m.get('originStandard')
if m.get('submitTime') is not None:
self.submit_time = m.get('submitTime')
if m.get('userId') is not None:
self.user_id = m.get('userId')
if m.get('applyIntentionInfoDO') is not None:
temp_model = GetFlightExceedApplyResponseBodyApplyIntentionInfoDO()
self.apply_intention_info_do = temp_model.from_map(m['applyIntentionInfoDO'])
if m.get('thirdpartApplyId') is not None:
self.thirdpart_apply_id = m.get('thirdpartApplyId')
return self
class GetFlightExceedApplyResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetFlightExceedApplyResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetFlightExceedApplyResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class BillSettementCarHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class BillSettementCarRequest(TeaModel):
def __init__(
self,
corp_id: str = None,
category: int = None,
page_size: int = None,
period_start: str = None,
period_end: str = None,
page_number: int = None,
):
self.corp_id = corp_id
self.category = category
self.page_size = page_size
self.period_start = period_start
self.period_end = period_end
self.page_number = page_number
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.category is not None:
result['category'] = self.category
if self.page_size is not None:
result['pageSize'] = self.page_size
if self.period_start is not None:
result['periodStart'] = self.period_start
if self.period_end is not None:
result['periodEnd'] = self.period_end
if self.page_number is not None:
result['pageNumber'] = self.page_number
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('category') is not None:
self.category = m.get('category')
if m.get('pageSize') is not None:
self.page_size = m.get('pageSize')
if m.get('periodStart') is not None:
self.period_start = m.get('periodStart')
if m.get('periodEnd') is not None:
self.period_end = m.get('periodEnd')
if m.get('pageNumber') is not None:
self.page_number = m.get('pageNumber')
return self
class BillSettementCarResponseBodyModuleDataList(TeaModel):
def __init__(
self,
alipay_trade_no: str = None,
apply_id: str = None,
arr_city: str = None,
arr_date: str = None,
arr_location: str = None,
arr_time: str = None,
book_time: str = None,
booker_id: str = None,
booker_name: str = None,
business_category: str = None,
capital_direction: str = None,
car_level: str = None,
cascade_department: str = None,
cost_center: str = None,
cost_center_number: str = None,
coupon: float = None,
coupon_price: float = None,
department: str = None,
department_id: str = None,
dept_city: str = None,
dept_date: str = None,
dept_location: str = None,
dept_time: str = None,
estimate_drive_distance: str = None,
estimate_price: float = None,
fee_type: str = None,
index: str = None,
invoice_title: str = None,
memo: str = None,
order_id: str = None,
order_price: float = None,
over_apply_id: str = None,
person_settle_fee: float = None,
primary_id: str = None,
project_code: str = None,
project_name: str = None,
provider_name: str = None,
real_drive_distance: str = None,
real_from_addr: str = None,
real_to_addr: str = None,
service_fee: str = None,
settlement_fee: float = None,
settlement_time: str = None,
settlement_type: str = None,
special_order: str = None,
special_reason: str = None,
status: int = None,
traveler_id: str = None,
traveler_name: str = None,
user_confirm_desc: str = None,
booker_job_no: str = None,
traveler_job_no: str = None,
):
# 支付交易流水号
self.alipay_trade_no = alipay_trade_no
# 审批单号
self.apply_id = apply_id
# 到达城市
self.arr_city = arr_city
# 到达日期
self.arr_date = arr_date
# 到达地
self.arr_location = arr_location
# 到达时间
self.arr_time = arr_time
# 预定时间
self.book_time = book_time
# 预定人use id
self.booker_id = booker_id
# 预订人名称
self.booker_name = booker_name
# 用车事由
self.business_category = business_category
# 资金方向
self.capital_direction = capital_direction
# 车型
self.car_level = car_level
# 级联部门
self.cascade_department = cascade_department
# 成本中心名称
self.cost_center = cost_center
# 成本中心编号
self.cost_center_number = cost_center_number
# 优惠券
self.coupon = coupon
# 优惠金额
self.coupon_price = coupon_price
# 末级部门
self.department = department
# 部门id
self.department_id = department_id
# 出发城市
self.dept_city = dept_city
# 出发日期
self.dept_date = dept_date
# 出发地
self.dept_location = dept_location
# 出发时间
self.dept_time = dept_time
# 预估行驶距离
self.estimate_drive_distance = estimate_drive_distance
# 预估金额
self.estimate_price = estimate_price
# 费用类型
self.fee_type = fee_type
# 序号
self.index = index
# 发票抬头
self.invoice_title = invoice_title
# 用车事由
self.memo = memo
# 订单id
self.order_id = order_id
# 订单金额
self.order_price = order_price
# 超标审批单号
self.over_apply_id = over_apply_id
# 个人支付金额
self.person_settle_fee = person_settle_fee
self.primary_id = primary_id
# 项目编码
self.project_code = project_code
# 项目名称
self.project_name = project_name
# 供应商
self.provider_name = provider_name
# 实际行驶距离
self.real_drive_distance = real_drive_distance
# 实际上车点
self.real_from_addr = real_from_addr
# 实际下车点
self.real_to_addr = real_to_addr
# 服务费,仅在feeType 40111 中展示
self.service_fee = service_fee
# 结算金额
self.settlement_fee = settlement_fee
# 结算时间
self.settlement_time = settlement_time
# 结算类型
self.settlement_type = settlement_type
# 特别关注订单
self.special_order = special_order
# 特别关注原因
self.special_reason = special_reason
# 入账状态
self.status = status
# 出行人use id
self.traveler_id = traveler_id
# 出行人名称
self.traveler_name = traveler_name
# 员工是否认可
self.user_confirm_desc = user_confirm_desc
# 预订人工号
self.booker_job_no = booker_job_no
# 出行人工号
self.traveler_job_no = traveler_job_no
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.alipay_trade_no is not None:
result['alipayTradeNo'] = self.alipay_trade_no
if self.apply_id is not None:
result['applyId'] = self.apply_id
if self.arr_city is not None:
result['arrCity'] = self.arr_city
if self.arr_date is not None:
result['arrDate'] = self.arr_date
if self.arr_location is not None:
result['arrLocation'] = self.arr_location
if self.arr_time is not None:
result['arrTime'] = self.arr_time
if self.book_time is not None:
result['bookTime'] = self.book_time
if self.booker_id is not None:
result['bookerId'] = self.booker_id
if self.booker_name is not None:
result['bookerName'] = self.booker_name
if self.business_category is not None:
result['businessCategory'] = self.business_category
if self.capital_direction is not None:
result['capitalDirection'] = self.capital_direction
if self.car_level is not None:
result['carLevel'] = self.car_level
if self.cascade_department is not None:
result['cascadeDepartment'] = self.cascade_department
if self.cost_center is not None:
result['costCenter'] = self.cost_center
if self.cost_center_number is not None:
result['costCenterNumber'] = self.cost_center_number
if self.coupon is not None:
result['coupon'] = self.coupon
if self.coupon_price is not None:
result['couponPrice'] = self.coupon_price
if self.department is not None:
result['department'] = self.department
if self.department_id is not None:
result['departmentId'] = self.department_id
if self.dept_city is not None:
result['deptCity'] = self.dept_city
if self.dept_date is not None:
result['deptDate'] = self.dept_date
if self.dept_location is not None:
result['deptLocation'] = self.dept_location
if self.dept_time is not None:
result['deptTime'] = self.dept_time
if self.estimate_drive_distance is not None:
result['estimateDriveDistance'] = self.estimate_drive_distance
if self.estimate_price is not None:
result['estimatePrice'] = self.estimate_price
if self.fee_type is not None:
result['feeType'] = self.fee_type
if self.index is not None:
result['index'] = self.index
if self.invoice_title is not None:
result['invoiceTitle'] = self.invoice_title
if self.memo is not None:
result['memo'] = self.memo
if self.order_id is not None:
result['orderId'] = self.order_id
if self.order_price is not None:
result['orderPrice'] = self.order_price
if self.over_apply_id is not None:
result['overApplyId'] = self.over_apply_id
if self.person_settle_fee is not None:
result['personSettleFee'] = self.person_settle_fee
if self.primary_id is not None:
result['primaryId'] = self.primary_id
if self.project_code is not None:
result['projectCode'] = self.project_code
if self.project_name is not None:
result['projectName'] = self.project_name
if self.provider_name is not None:
result['providerName'] = self.provider_name
if self.real_drive_distance is not None:
result['realDriveDistance'] = self.real_drive_distance
if self.real_from_addr is not None:
result['realFromAddr'] = self.real_from_addr
if self.real_to_addr is not None:
result['realToAddr'] = self.real_to_addr
if self.service_fee is not None:
result['serviceFee'] = self.service_fee
if self.settlement_fee is not None:
result['settlementFee'] = self.settlement_fee
if self.settlement_time is not None:
result['settlementTime'] = self.settlement_time
if self.settlement_type is not None:
result['settlementType'] = self.settlement_type
if self.special_order is not None:
result['specialOrder'] = self.special_order
if self.special_reason is not None:
result['specialReason'] = self.special_reason
if self.status is not None:
result['status'] = self.status
if self.traveler_id is not None:
result['travelerId'] = self.traveler_id
if self.traveler_name is not None:
result['travelerName'] = self.traveler_name
if self.user_confirm_desc is not None:
result['userConfirmDesc'] = self.user_confirm_desc
if self.booker_job_no is not None:
result['bookerJobNo'] = self.booker_job_no
if self.traveler_job_no is not None:
result['travelerJobNo'] = self.traveler_job_no
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('alipayTradeNo') is not None:
self.alipay_trade_no = m.get('alipayTradeNo')
if m.get('applyId') is not None:
self.apply_id = m.get('applyId')
if m.get('arrCity') is not None:
self.arr_city = m.get('arrCity')
if m.get('arrDate') is not None:
self.arr_date = m.get('arrDate')
if m.get('arrLocation') is not None:
self.arr_location = m.get('arrLocation')
if m.get('arrTime') is not None:
self.arr_time = m.get('arrTime')
if m.get('bookTime') is not None:
self.book_time = m.get('bookTime')
if m.get('bookerId') is not None:
self.booker_id = m.get('bookerId')
if m.get('bookerName') is not None:
self.booker_name = m.get('bookerName')
if m.get('businessCategory') is not None:
self.business_category = m.get('businessCategory')
if m.get('capitalDirection') is not None:
self.capital_direction = m.get('capitalDirection')
if m.get('carLevel') is not None:
self.car_level = m.get('carLevel')
if m.get('cascadeDepartment') is not None:
self.cascade_department = m.get('cascadeDepartment')
if m.get('costCenter') is not None:
self.cost_center = m.get('costCenter')
if m.get('costCenterNumber') is not None:
self.cost_center_number = m.get('costCenterNumber')
if m.get('coupon') is not None:
self.coupon = m.get('coupon')
if m.get('couponPrice') is not None:
self.coupon_price = m.get('couponPrice')
if m.get('department') is not None:
self.department = m.get('department')
if m.get('departmentId') is not None:
self.department_id = m.get('departmentId')
if m.get('deptCity') is not None:
self.dept_city = m.get('deptCity')
if m.get('deptDate') is not None:
self.dept_date = m.get('deptDate')
if m.get('deptLocation') is not None:
self.dept_location = m.get('deptLocation')
if m.get('deptTime') is not None:
self.dept_time = m.get('deptTime')
if m.get('estimateDriveDistance') is not None:
self.estimate_drive_distance = m.get('estimateDriveDistance')
if m.get('estimatePrice') is not None:
self.estimate_price = m.get('estimatePrice')
if m.get('feeType') is not None:
self.fee_type = m.get('feeType')
if m.get('index') is not None:
self.index = m.get('index')
if m.get('invoiceTitle') is not None:
self.invoice_title = m.get('invoiceTitle')
if m.get('memo') is not None:
self.memo = m.get('memo')
if m.get('orderId') is not None:
self.order_id = m.get('orderId')
if m.get('orderPrice') is not None:
self.order_price = m.get('orderPrice')
if m.get('overApplyId') is not None:
self.over_apply_id = m.get('overApplyId')
if m.get('personSettleFee') is not None:
self.person_settle_fee = m.get('personSettleFee')
if m.get('primaryId') is not None:
self.primary_id = m.get('primaryId')
if m.get('projectCode') is not None:
self.project_code = m.get('projectCode')
if m.get('projectName') is not None:
self.project_name = m.get('projectName')
if m.get('providerName') is not None:
self.provider_name = m.get('providerName')
if m.get('realDriveDistance') is not None:
self.real_drive_distance = m.get('realDriveDistance')
if m.get('realFromAddr') is not None:
self.real_from_addr = m.get('realFromAddr')
if m.get('realToAddr') is not None:
self.real_to_addr = m.get('realToAddr')
if m.get('serviceFee') is not None:
self.service_fee = m.get('serviceFee')
if m.get('settlementFee') is not None:
self.settlement_fee = m.get('settlementFee')
if m.get('settlementTime') is not None:
self.settlement_time = m.get('settlementTime')
if m.get('settlementType') is not None:
self.settlement_type = m.get('settlementType')
if m.get('specialOrder') is not None:
self.special_order = m.get('specialOrder')
if m.get('specialReason') is not None:
self.special_reason = m.get('specialReason')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('travelerId') is not None:
self.traveler_id = m.get('travelerId')
if m.get('travelerName') is not None:
self.traveler_name = m.get('travelerName')
if m.get('userConfirmDesc') is not None:
self.user_confirm_desc = m.get('userConfirmDesc')
if m.get('bookerJobNo') is not None:
self.booker_job_no = m.get('bookerJobNo')
if m.get('travelerJobNo') is not None:
self.traveler_job_no = m.get('travelerJobNo')
return self
class BillSettementCarResponseBodyModule(TeaModel):
def __init__(
self,
category: int = None,
corp_id: str = None,
data_list: List[BillSettementCarResponseBodyModuleDataList] = None,
period_end: str = None,
period_start: str = None,
total_num: int = None,
):
# 类目
self.category = category
# 企业id
self.corp_id = corp_id
# 数据集合
self.data_list = data_list
# 记账更新开始日期
self.period_end = period_end
# 记账更新结束日期
self.period_start = period_start
# 总数量
self.total_num = total_num
def validate(self):
if self.data_list:
for k in self.data_list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.category is not None:
result['category'] = self.category
if self.corp_id is not None:
result['corpId'] = self.corp_id
result['dataList'] = []
if self.data_list is not None:
for k in self.data_list:
result['dataList'].append(k.to_map() if k else None)
if self.period_end is not None:
result['periodEnd'] = self.period_end
if self.period_start is not None:
result['periodStart'] = self.period_start
if self.total_num is not None:
result['totalNum'] = self.total_num
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('category') is not None:
self.category = m.get('category')
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
self.data_list = []
if m.get('dataList') is not None:
for k in m.get('dataList'):
temp_model = BillSettementCarResponseBodyModuleDataList()
self.data_list.append(temp_model.from_map(k))
if m.get('periodEnd') is not None:
self.period_end = m.get('periodEnd')
if m.get('periodStart') is not None:
self.period_start = m.get('periodStart')
if m.get('totalNum') is not None:
self.total_num = m.get('totalNum')
return self
class BillSettementCarResponseBody(TeaModel):
def __init__(
self,
result_msg: str = None,
module: BillSettementCarResponseBodyModule = None,
success: bool = None,
result_code: int = None,
):
# 结果msg
self.result_msg = result_msg
# module
self.module = module
# 是否成功
self.success = success
# 结果code
self.result_code = result_code
def validate(self):
if self.module:
self.module.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.result_msg is not None:
result['resultMsg'] = self.result_msg
if self.module is not None:
result['module'] = self.module.to_map()
if self.success is not None:
result['success'] = self.success
if self.result_code is not None:
result['resultCode'] = self.result_code
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('resultMsg') is not None:
self.result_msg = m.get('resultMsg')
if m.get('module') is not None:
temp_model = BillSettementCarResponseBodyModule()
self.module = temp_model.from_map(m['module'])
if m.get('success') is not None:
self.success = m.get('success')
if m.get('resultCode') is not None:
self.result_code = m.get('resultCode')
return self
class BillSettementCarResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: BillSettementCarResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = BillSettementCarResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class BillSettementBtripTrainHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class BillSettementBtripTrainRequest(TeaModel):
def __init__(
self,
corp_id: str = None,
category: int = None,
page_size: int = None,
period_start: str = None,
page_number: int = None,
period_end: str = None,
):
self.corp_id = corp_id
self.category = category
self.page_size = page_size
self.period_start = period_start
self.page_number = page_number
self.period_end = period_end
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.category is not None:
result['category'] = self.category
if self.page_size is not None:
result['pageSize'] = self.page_size
if self.period_start is not None:
result['periodStart'] = self.period_start
if self.page_number is not None:
result['pageNumber'] = self.page_number
if self.period_end is not None:
result['periodEnd'] = self.period_end
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('category') is not None:
self.category = m.get('category')
if m.get('pageSize') is not None:
self.page_size = m.get('pageSize')
if m.get('periodStart') is not None:
self.period_start = m.get('periodStart')
if m.get('pageNumber') is not None:
self.page_number = m.get('pageNumber')
if m.get('periodEnd') is not None:
self.period_end = m.get('periodEnd')
return self
class BillSettementBtripTrainResponseBodyModuleDataList(TeaModel):
def __init__(
self,
alipay_trade_no: str = None,
apply_id: str = None,
arr_date: str = None,
arr_station: str = None,
arr_time: str = None,
book_time: str = None,
booker_id: str = None,
booker_name: str = None,
capital_direction: str = None,
cascade_department: str = None,
change_fee: float = None,
cost_center: str = None,
cost_center_number: str = None,
coupon: float = None,
department: str = None,
department_id: str = None,
dept_date: str = None,
dept_station: str = None,
dept_time: str = None,
fee_type: str = None,
index: str = None,
invoice_title: str = None,
order_id: str = None,
order_price: float = None,
over_apply_id: str = None,
primary_id: int = None,
project_code: str = None,
project_name: str = None,
refund_fee: float = None,
run_time: str = None,
seat_no: str = None,
seat_type: str = None,
service_fee: float = None,
settlement_fee: float = None,
settlement_time: str = None,
settlement_type: str = None,
status: int = None,
ticket_no: str = None,
ticket_price: float = None,
train_no: str = None,
train_type: str = None,
traveler_id: str = None,
traveler_name: str = None,
booker_job_no: str = None,
traveler_job_no: str = None,
voucher_type: int = None,
):
# 交易流水号
self.alipay_trade_no = alipay_trade_no
# 审批单号
self.apply_id = apply_id
# 到达日期
self.arr_date = arr_date
# 到达站点
self.arr_station = arr_station
# 到达时间
self.arr_time = arr_time
# 预定时间
self.book_time = book_time
# 预定人use id
self.booker_id = booker_id
# 预订人名称
self.booker_name = booker_name
# 资金方向
self.capital_direction = capital_direction
# 级联部门
self.cascade_department = cascade_department
# 改签手续费
self.change_fee = change_fee
# 成本中心名称
self.cost_center = cost_center
# 成本中心编码
self.cost_center_number = cost_center_number
# 折扣率
self.coupon = coupon
# 末级部门
self.department = department
# 部门id
self.department_id = department_id
# 出发日期
self.dept_date = dept_date
# 出发站
self.dept_station = dept_station
# 出发时间
self.dept_time = dept_time
# 费用类型
self.fee_type = fee_type
# 序号
self.index = index
# 发票抬头
self.invoice_title = invoice_title
# 订单号
self.order_id = order_id
# 订单金额
self.order_price = order_price
# 超标审批单号
self.over_apply_id = over_apply_id
# 主键id
self.primary_id = primary_id
# 项目编号
self.project_code = project_code
# 项目名称
self.project_name = project_name
# 退款手续费
self.refund_fee = refund_fee
# 运行时长
self.run_time = run_time
# 座位号
self.seat_no = seat_no
# 坐席
self.seat_type = seat_type
# 服务费,仅在feeType 6007、6008中展示
self.service_fee = service_fee
# 结算金额
self.settlement_fee = settlement_fee
# 结算时间
self.settlement_time = settlement_time
# 结算类型
self.settlement_type = settlement_type
# 入账状态
self.status = status
# 票面票号
self.ticket_no = ticket_no
# 票价
self.ticket_price = ticket_price
# 车次号
self.train_no = train_no
# 车次类型
self.train_type = train_type
# 出行人useId
self.traveler_id = traveler_id
# 出行人名称
self.traveler_name = traveler_name
# 预订人工号
self.booker_job_no = booker_job_no
# 出行人工号
self.traveler_job_no = traveler_job_no
# 发票类型
self.voucher_type = voucher_type
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.alipay_trade_no is not None:
result['alipayTradeNo'] = self.alipay_trade_no
if self.apply_id is not None:
result['applyId'] = self.apply_id
if self.arr_date is not None:
result['arrDate'] = self.arr_date
if self.arr_station is not None:
result['arrStation'] = self.arr_station
if self.arr_time is not None:
result['arrTime'] = self.arr_time
if self.book_time is not None:
result['bookTime'] = self.book_time
if self.booker_id is not None:
result['bookerId'] = self.booker_id
if self.booker_name is not None:
result['bookerName'] = self.booker_name
if self.capital_direction is not None:
result['capitalDirection'] = self.capital_direction
if self.cascade_department is not None:
result['cascadeDepartment'] = self.cascade_department
if self.change_fee is not None:
result['changeFee'] = self.change_fee
if self.cost_center is not None:
result['costCenter'] = self.cost_center
if self.cost_center_number is not None:
result['costCenterNumber'] = self.cost_center_number
if self.coupon is not None:
result['coupon'] = self.coupon
if self.department is not None:
result['department'] = self.department
if self.department_id is not None:
result['departmentId'] = self.department_id
if self.dept_date is not None:
result['deptDate'] = self.dept_date
if self.dept_station is not None:
result['deptStation'] = self.dept_station
if self.dept_time is not None:
result['deptTime'] = self.dept_time
if self.fee_type is not None:
result['feeType'] = self.fee_type
if self.index is not None:
result['index'] = self.index
if self.invoice_title is not None:
result['invoiceTitle'] = self.invoice_title
if self.order_id is not None:
result['orderId'] = self.order_id
if self.order_price is not None:
result['orderPrice'] = self.order_price
if self.over_apply_id is not None:
result['overApplyId'] = self.over_apply_id
if self.primary_id is not None:
result['primaryId'] = self.primary_id
if self.project_code is not None:
result['projectCode'] = self.project_code
if self.project_name is not None:
result['projectName'] = self.project_name
if self.refund_fee is not None:
result['refundFee'] = self.refund_fee
if self.run_time is not None:
result['runTime'] = self.run_time
if self.seat_no is not None:
result['seatNo'] = self.seat_no
if self.seat_type is not None:
result['seatType'] = self.seat_type
if self.service_fee is not None:
result['serviceFee'] = self.service_fee
if self.settlement_fee is not None:
result['settlementFee'] = self.settlement_fee
if self.settlement_time is not None:
result['settlementTime'] = self.settlement_time
if self.settlement_type is not None:
result['settlementType'] = self.settlement_type
if self.status is not None:
result['status'] = self.status
if self.ticket_no is not None:
result['ticketNo'] = self.ticket_no
if self.ticket_price is not None:
result['ticketPrice'] = self.ticket_price
if self.train_no is not None:
result['trainNo'] = self.train_no
if self.train_type is not None:
result['trainType'] = self.train_type
if self.traveler_id is not None:
result['travelerId'] = self.traveler_id
if self.traveler_name is not None:
result['travelerName'] = self.traveler_name
if self.booker_job_no is not None:
result['bookerJobNo'] = self.booker_job_no
if self.traveler_job_no is not None:
result['travelerJobNo'] = self.traveler_job_no
if self.voucher_type is not None:
result['voucherType'] = self.voucher_type
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('alipayTradeNo') is not None:
self.alipay_trade_no = m.get('alipayTradeNo')
if m.get('applyId') is not None:
self.apply_id = m.get('applyId')
if m.get('arrDate') is not None:
self.arr_date = m.get('arrDate')
if m.get('arrStation') is not None:
self.arr_station = m.get('arrStation')
if m.get('arrTime') is not None:
self.arr_time = m.get('arrTime')
if m.get('bookTime') is not None:
self.book_time = m.get('bookTime')
if m.get('bookerId') is not None:
self.booker_id = m.get('bookerId')
if m.get('bookerName') is not None:
self.booker_name = m.get('bookerName')
if m.get('capitalDirection') is not None:
self.capital_direction = m.get('capitalDirection')
if m.get('cascadeDepartment') is not None:
self.cascade_department = m.get('cascadeDepartment')
if m.get('changeFee') is not None:
self.change_fee = m.get('changeFee')
if m.get('costCenter') is not None:
self.cost_center = m.get('costCenter')
if m.get('costCenterNumber') is not None:
self.cost_center_number = m.get('costCenterNumber')
if m.get('coupon') is not None:
self.coupon = m.get('coupon')
if m.get('department') is not None:
self.department = m.get('department')
if m.get('departmentId') is not None:
self.department_id = m.get('departmentId')
if m.get('deptDate') is not None:
self.dept_date = m.get('deptDate')
if m.get('deptStation') is not None:
self.dept_station = m.get('deptStation')
if m.get('deptTime') is not None:
self.dept_time = m.get('deptTime')
if m.get('feeType') is not None:
self.fee_type = m.get('feeType')
if m.get('index') is not None:
self.index = m.get('index')
if m.get('invoiceTitle') is not None:
self.invoice_title = m.get('invoiceTitle')
if m.get('orderId') is not None:
self.order_id = m.get('orderId')
if m.get('orderPrice') is not None:
self.order_price = m.get('orderPrice')
if m.get('overApplyId') is not None:
self.over_apply_id = m.get('overApplyId')
if m.get('primaryId') is not None:
self.primary_id = m.get('primaryId')
if m.get('projectCode') is not None:
self.project_code = m.get('projectCode')
if m.get('projectName') is not None:
self.project_name = m.get('projectName')
if m.get('refundFee') is not None:
self.refund_fee = m.get('refundFee')
if m.get('runTime') is not None:
self.run_time = m.get('runTime')
if m.get('seatNo') is not None:
self.seat_no = m.get('seatNo')
if m.get('seatType') is not None:
self.seat_type = m.get('seatType')
if m.get('serviceFee') is not None:
self.service_fee = m.get('serviceFee')
if m.get('settlementFee') is not None:
self.settlement_fee = m.get('settlementFee')
if m.get('settlementTime') is not None:
self.settlement_time = m.get('settlementTime')
if m.get('settlementType') is not None:
self.settlement_type = m.get('settlementType')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('ticketNo') is not None:
self.ticket_no = m.get('ticketNo')
if m.get('ticketPrice') is not None:
self.ticket_price = m.get('ticketPrice')
if m.get('trainNo') is not None:
self.train_no = m.get('trainNo')
if m.get('trainType') is not None:
self.train_type = m.get('trainType')
if m.get('travelerId') is not None:
self.traveler_id = m.get('travelerId')
if m.get('travelerName') is not None:
self.traveler_name = m.get('travelerName')
if m.get('bookerJobNo') is not None:
self.booker_job_no = m.get('bookerJobNo')
if m.get('travelerJobNo') is not None:
self.traveler_job_no = m.get('travelerJobNo')
if m.get('voucherType') is not None:
self.voucher_type = m.get('voucherType')
return self
class BillSettementBtripTrainResponseBodyModule(TeaModel):
def __init__(
self,
category: int = None,
corp_id: str = None,
data_list: List[BillSettementBtripTrainResponseBodyModuleDataList] = None,
period_end: str = None,
period_start: str = None,
total_num: int = None,
):
# 类目
self.category = category
# 企业id
self.corp_id = corp_id
# 数据集合
self.data_list = data_list
# 记账更新开始时间
self.period_end = period_end
# 记账更新结束时间
self.period_start = period_start
# 总数据量
self.total_num = total_num
def validate(self):
if self.data_list:
for k in self.data_list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.category is not None:
result['category'] = self.category
if self.corp_id is not None:
result['corpId'] = self.corp_id
result['dataList'] = []
if self.data_list is not None:
for k in self.data_list:
result['dataList'].append(k.to_map() if k else None)
if self.period_end is not None:
result['periodEnd'] = self.period_end
if self.period_start is not None:
result['periodStart'] = self.period_start
if self.total_num is not None:
result['totalNum'] = self.total_num
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('category') is not None:
self.category = m.get('category')
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
self.data_list = []
if m.get('dataList') is not None:
for k in m.get('dataList'):
temp_model = BillSettementBtripTrainResponseBodyModuleDataList()
self.data_list.append(temp_model.from_map(k))
if m.get('periodEnd') is not None:
self.period_end = m.get('periodEnd')
if m.get('periodStart') is not None:
self.period_start = m.get('periodStart')
if m.get('totalNum') is not None:
self.total_num = m.get('totalNum')
return self
class BillSettementBtripTrainResponseBody(TeaModel):
def __init__(
self,
result_msg: str = None,
module: BillSettementBtripTrainResponseBodyModule = None,
success: bool = None,
result_code: int = None,
):
# 结果msg
self.result_msg = result_msg
# module
self.module = module
# 是否成功
self.success = success
# 结果code
self.result_code = result_code
def validate(self):
if self.module:
self.module.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.result_msg is not None:
result['resultMsg'] = self.result_msg
if self.module is not None:
result['module'] = self.module.to_map()
if self.success is not None:
result['success'] = self.success
if self.result_code is not None:
result['resultCode'] = self.result_code
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('resultMsg') is not None:
self.result_msg = m.get('resultMsg')
if m.get('module') is not None:
temp_model = BillSettementBtripTrainResponseBodyModule()
self.module = temp_model.from_map(m['module'])
if m.get('success') is not None:
self.success = m.get('success')
if m.get('resultCode') is not None:
self.result_code = m.get('resultCode')
return self
class BillSettementBtripTrainResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: BillSettementBtripTrainResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = BillSettementBtripTrainResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class SyncExceedApplyHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class SyncExceedApplyRequest(TeaModel):
def __init__(
self,
remark: str = None,
apply_id: str = None,
corp_id: str = None,
thirdparty_flow_id: str = None,
user_id: str = None,
status: int = None,
):
# 审批意见
self.remark = remark
# 商旅超标审批单id
self.apply_id = apply_id
# 企业id
self.corp_id = corp_id
# 第三方流程实例id
self.thirdparty_flow_id = thirdparty_flow_id
# 用户id
self.user_id = user_id
# 审批单状态 1同意2拒绝
self.status = status
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.remark is not None:
result['remark'] = self.remark
if self.apply_id is not None:
result['applyId'] = self.apply_id
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.thirdparty_flow_id is not None:
result['thirdpartyFlowId'] = self.thirdparty_flow_id
if self.user_id is not None:
result['userId'] = self.user_id
if self.status is not None:
result['status'] = self.status
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('remark') is not None:
self.remark = m.get('remark')
if m.get('applyId') is not None:
self.apply_id = m.get('applyId')
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('thirdpartyFlowId') is not None:
self.thirdparty_flow_id = m.get('thirdpartyFlowId')
if m.get('userId') is not None:
self.user_id = m.get('userId')
if m.get('status') is not None:
self.status = m.get('status')
return self
class SyncExceedApplyResponseBody(TeaModel):
def __init__(
self,
module: bool = None,
):
# 是否同步成功
self.module = module
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.module is not None:
result['module'] = self.module
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('module') is not None:
self.module = m.get('module')
return self
class SyncExceedApplyResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: SyncExceedApplyResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = SyncExceedApplyResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class AddCityCarApplyHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class AddCityCarApplyRequest(TeaModel):
def __init__(
self,
cause: str = None,
city: str = None,
corp_id: str = None,
date: str = None,
project_code: str = None,
project_name: str = None,
status: int = None,
third_part_apply_id: str = None,
third_part_cost_center_id: str = None,
third_part_invoice_id: str = None,
times_total: int = None,
times_type: int = None,
times_used: int = None,
title: str = None,
user_id: str = None,
ding_suite_key: str = None,
ding_corp_id: str = None,
ding_token_grant_type: int = None,
finished_date: str = None,
):
# 出差事由
self.cause = cause
# 用车城市
self.city = city
# 第三方企业ID
self.corp_id = corp_id
# 用车时间,按天管控,比如传值2021-03-18 20:26:56表示2021-03-18当天可用车,跨天情况配合finishedDate参数使用
self.date = date
# 审批单关联的项目code
self.project_code = project_code
# 审批单关联的项目名
self.project_name = project_name
# 审批单状态:0-申请,1-同意,2-拒绝
self.status = status
# 三方审批单ID
self.third_part_apply_id = third_part_apply_id
# 审批单关联的三方成本中心ID
self.third_part_cost_center_id = third_part_cost_center_id
# 审批单关联的三方发票抬头ID
self.third_part_invoice_id = third_part_invoice_id
# 审批单可用总次数
self.times_total = times_total
# 审批单可用次数类型:1-次数不限制,2-用户可指定次数,3-管理员限制次数;如果企业没有限制审批单使用次数的需求,这个参数传1(次数不限制),同时times_total和times_used都传0即可
self.times_type = times_type
# 审批单已用次数
self.times_used = times_used
# 审批单标题
self.title = title
# 发起审批的第三方员工ID
self.user_id = user_id
# suiteKey
self.ding_suite_key = ding_suite_key
# account
self.ding_corp_id = ding_corp_id
# tokenGrantType
self.ding_token_grant_type = ding_token_grant_type
# 用车截止时间,按天管控,比如date传值2021-03-18 20:26:56、finished_date传值2021-03-30 20:26:56表示2021-03-18(含)到2021-03-30(含)之间可用车,该参数不传值情况使用date作为用车截止时间;
self.finished_date = finished_date
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.cause is not None:
result['cause'] = self.cause
if self.city is not None:
result['city'] = self.city
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.date is not None:
result['date'] = self.date
if self.project_code is not None:
result['projectCode'] = self.project_code
if self.project_name is not None:
result['projectName'] = self.project_name
if self.status is not None:
result['status'] = self.status
if self.third_part_apply_id is not None:
result['thirdPartApplyId'] = self.third_part_apply_id
if self.third_part_cost_center_id is not None:
result['thirdPartCostCenterId'] = self.third_part_cost_center_id
if self.third_part_invoice_id is not None:
result['thirdPartInvoiceId'] = self.third_part_invoice_id
if self.times_total is not None:
result['timesTotal'] = self.times_total
if self.times_type is not None:
result['timesType'] = self.times_type
if self.times_used is not None:
result['timesUsed'] = self.times_used
if self.title is not None:
result['title'] = self.title
if self.user_id is not None:
result['userId'] = self.user_id
if self.ding_suite_key is not None:
result['dingSuiteKey'] = self.ding_suite_key
if self.ding_corp_id is not None:
result['dingCorpId'] = self.ding_corp_id
if self.ding_token_grant_type is not None:
result['dingTokenGrantType'] = self.ding_token_grant_type
if self.finished_date is not None:
result['finishedDate'] = self.finished_date
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('cause') is not None:
self.cause = m.get('cause')
if m.get('city') is not None:
self.city = m.get('city')
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('date') is not None:
self.date = m.get('date')
if m.get('projectCode') is not None:
self.project_code = m.get('projectCode')
if m.get('projectName') is not None:
self.project_name = m.get('projectName')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('thirdPartApplyId') is not None:
self.third_part_apply_id = m.get('thirdPartApplyId')
if m.get('thirdPartCostCenterId') is not None:
self.third_part_cost_center_id = m.get('thirdPartCostCenterId')
if m.get('thirdPartInvoiceId') is not None:
self.third_part_invoice_id = m.get('thirdPartInvoiceId')
if m.get('timesTotal') is not None:
self.times_total = m.get('timesTotal')
if m.get('timesType') is not None:
self.times_type = m.get('timesType')
if m.get('timesUsed') is not None:
self.times_used = m.get('timesUsed')
if m.get('title') is not None:
self.title = m.get('title')
if m.get('userId') is not None:
self.user_id = m.get('userId')
if m.get('dingSuiteKey') is not None:
self.ding_suite_key = m.get('dingSuiteKey')
if m.get('dingCorpId') is not None:
self.ding_corp_id = m.get('dingCorpId')
if m.get('dingTokenGrantType') is not None:
self.ding_token_grant_type = m.get('dingTokenGrantType')
if m.get('finishedDate') is not None:
self.finished_date = m.get('finishedDate')
return self
class AddCityCarApplyResponseBody(TeaModel):
def __init__(
self,
apply_id: int = None,
):
# 商旅内部审批单ID
self.apply_id = apply_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.apply_id is not None:
result['applyId'] = self.apply_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('applyId') is not None:
self.apply_id = m.get('applyId')
return self
class AddCityCarApplyResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: AddCityCarApplyResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = AddCityCarApplyResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class BillSettementFlightHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class BillSettementFlightRequest(TeaModel):
def __init__(
self,
corp_id: str = None,
category: int = None,
page_size: int = None,
period_start: str = None,
page_number: int = None,
period_end: str = None,
):
# 第三方企业ID
self.corp_id = corp_id
# 类目:机酒火车 1:机票; 2:酒店; 4:用车 6:商旅火车票
self.category = category
# 每页数据量,默认100,最高500
self.page_size = page_size
# 记账更新开始日期
self.period_start = period_start
# 页数,从1开始
self.page_number = page_number
# 记账更新结束日期
self.period_end = period_end
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.category is not None:
result['category'] = self.category
if self.page_size is not None:
result['pageSize'] = self.page_size
if self.period_start is not None:
result['periodStart'] = self.period_start
if self.page_number is not None:
result['pageNumber'] = self.page_number
if self.period_end is not None:
result['periodEnd'] = self.period_end
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('category') is not None:
self.category = m.get('category')
if m.get('pageSize') is not None:
self.page_size = m.get('pageSize')
if m.get('periodStart') is not None:
self.period_start = m.get('periodStart')
if m.get('pageNumber') is not None:
self.page_number = m.get('pageNumber')
if m.get('periodEnd') is not None:
self.period_end = m.get('periodEnd')
return self
class BillSettementFlightResponseBodyModuleDataList(TeaModel):
def __init__(
self,
advance_day: int = None,
airline_corp_code: str = None,
airline_corp_name: str = None,
alipay_trade_no: str = None,
apply_id: str = None,
arr_airport_code: str = None,
arr_city: str = None,
arr_date: str = None,
arr_station: str = None,
arr_time: str = None,
book_time: str = None,
booker_id: str = None,
booker_name: str = None,
btrip_coupon_fee: float = None,
build_fee: float = None,
cabin: str = None,
cabin_class: str = None,
capital_direction: str = None,
cascade_department: str = None,
change_fee: float = None,
corp_pay_order_fee: float = None,
cost_center: str = None,
cost_center_number: str = None,
coupon: float = None,
dep_airport_code: str = None,
department: str = None,
department_id: str = None,
dept_city: str = None,
dept_date: str = None,
dept_station: str = None,
dept_time: str = None,
discount: str = None,
fee_type: str = None,
flight_no: str = None,
index: str = None,
insurance_fee: float = None,
invoice_title: str = None,
itinerary_num: str = None,
itinerary_price: float = None,
most_difference_dept_time: str = None,
most_difference_discount: float = None,
most_difference_flight_no: str = None,
most_difference_price: float = None,
most_difference_reason: str = None,
most_price: float = None,
negotiation_coupon_fee: float = None,
oil_fee: float = None,
order_id: str = None,
over_apply_id: str = None,
primary_id: int = None,
project_code: str = None,
project_name: str = None,
refund_fee: float = None,
refund_upgrade_cost: float = None,
repeat_refund: str = None,
seal_price: float = None,
service_fee: float = None,
settlement_fee: float = None,
settlement_time: str = None,
settlement_type: str = None,
status: int = None,
ticket_id: str = None,
traveler_id: str = None,
traveler_name: str = None,
upgrade_cost: float = None,
booker_job_no: str = None,
traveler_job_no: str = None,
):
# 提前预定天数
self.advance_day = advance_day
# 航司三字码
self.airline_corp_code = airline_corp_code
# 航司名称
self.airline_corp_name = airline_corp_name
# 交易流水号
self.alipay_trade_no = alipay_trade_no
# 审批单号
self.apply_id = apply_id
# 到达机场二字码
self.arr_airport_code = arr_airport_code
# 到达城市
self.arr_city = arr_city
# 到达日期
self.arr_date = arr_date
# 到达机场
self.arr_station = arr_station
# 到达时间
self.arr_time = arr_time
# 预定时间
self.book_time = book_time
# 预订人use id
self.booker_id = booker_id
# 预订人名称
self.booker_name = booker_name
# 商旅优惠金额
self.btrip_coupon_fee = btrip_coupon_fee
# 基建费
self.build_fee = build_fee
# 舱位
self.cabin = cabin
# 舱位码
self.cabin_class = cabin_class
# 资金方向
self.capital_direction = capital_direction
# 级联部门
self.cascade_department = cascade_department
# 改签费用
self.change_fee = change_fee
# 订单金额
self.corp_pay_order_fee = corp_pay_order_fee
# 成本中心名称
self.cost_center = cost_center
# 成本中心编号
self.cost_center_number = cost_center_number
# 优惠券
self.coupon = coupon
# 起飞机场二字码
self.dep_airport_code = dep_airport_code
# 末级部门
self.department = department
# 部门id
self.department_id = department_id
# 起飞城市
self.dept_city = dept_city
# 起飞日期
self.dept_date = dept_date
# 起飞机场
self.dept_station = dept_station
# 起飞时间
self.dept_time = dept_time
# 折扣率
self.discount = discount
# 费用类型
self.fee_type = fee_type
# 航班号
self.flight_no = flight_no
# 序号
self.index = index
# 保险费
self.insurance_fee = insurance_fee
# 发票抬头
self.invoice_title = invoice_title
# 行程单打印序号
self.itinerary_num = itinerary_num
# 行程单金额
self.itinerary_price = itinerary_price
# 低价提醒(起飞时间)
self.most_difference_dept_time = most_difference_dept_time
# 低价提醒(折扣)
self.most_difference_discount = most_difference_discount
# 低价提醒(航班号)
self.most_difference_flight_no = most_difference_flight_no
# 低价提醒(与最低价差额)
self.most_difference_price = most_difference_price
# 不选低价原因
self.most_difference_reason = most_difference_reason
# 低价航班价格
self.most_price = most_price
# 协议价优惠金额
self.negotiation_coupon_fee = negotiation_coupon_fee
# 燃油费
self.oil_fee = oil_fee
# 订单号
self.order_id = order_id
# 超标审批单号
self.over_apply_id = over_apply_id
# 主键id
self.primary_id = primary_id
# 项目代码
self.project_code = project_code
# 项目名称
self.project_name = project_name
# 退款手续费
self.refund_fee = refund_fee
# 改签退票手续费
self.refund_upgrade_cost = refund_upgrade_cost
# 是否重复退
self.repeat_refund = repeat_refund
# 销售价
self.seal_price = seal_price
# 服务费,仅在feeType 11001、11002中展示
self.service_fee = service_fee
# 结算金额
self.settlement_fee = settlement_fee
# 结算时间
self.settlement_time = settlement_time
# 结算类型
self.settlement_type = settlement_type
# 入账状态
self.status = status
# 行程单号
self.ticket_id = ticket_id
# 出行人use id
self.traveler_id = traveler_id
# 出行人名称
self.traveler_name = traveler_name
# 改签差价
self.upgrade_cost = upgrade_cost
# 预订人工号
self.booker_job_no = booker_job_no
# 出行人工号
self.traveler_job_no = traveler_job_no
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.advance_day is not None:
result['advanceDay'] = self.advance_day
if self.airline_corp_code is not None:
result['airlineCorpCode'] = self.airline_corp_code
if self.airline_corp_name is not None:
result['airlineCorpName'] = self.airline_corp_name
if self.alipay_trade_no is not None:
result['alipayTradeNo'] = self.alipay_trade_no
if self.apply_id is not None:
result['applyId'] = self.apply_id
if self.arr_airport_code is not None:
result['arrAirportCode'] = self.arr_airport_code
if self.arr_city is not None:
result['arrCity'] = self.arr_city
if self.arr_date is not None:
result['arrDate'] = self.arr_date
if self.arr_station is not None:
result['arrStation'] = self.arr_station
if self.arr_time is not None:
result['arrTime'] = self.arr_time
if self.book_time is not None:
result['bookTime'] = self.book_time
if self.booker_id is not None:
result['bookerId'] = self.booker_id
if self.booker_name is not None:
result['bookerName'] = self.booker_name
if self.btrip_coupon_fee is not None:
result['btripCouponFee'] = self.btrip_coupon_fee
if self.build_fee is not None:
result['buildFee'] = self.build_fee
if self.cabin is not None:
result['cabin'] = self.cabin
if self.cabin_class is not None:
result['cabinClass'] = self.cabin_class
if self.capital_direction is not None:
result['capitalDirection'] = self.capital_direction
if self.cascade_department is not None:
result['cascadeDepartment'] = self.cascade_department
if self.change_fee is not None:
result['changeFee'] = self.change_fee
if self.corp_pay_order_fee is not None:
result['corpPayOrderFee'] = self.corp_pay_order_fee
if self.cost_center is not None:
result['costCenter'] = self.cost_center
if self.cost_center_number is not None:
result['costCenterNumber'] = self.cost_center_number
if self.coupon is not None:
result['coupon'] = self.coupon
if self.dep_airport_code is not None:
result['depAirportCode'] = self.dep_airport_code
if self.department is not None:
result['department'] = self.department
if self.department_id is not None:
result['departmentId'] = self.department_id
if self.dept_city is not None:
result['deptCity'] = self.dept_city
if self.dept_date is not None:
result['deptDate'] = self.dept_date
if self.dept_station is not None:
result['deptStation'] = self.dept_station
if self.dept_time is not None:
result['deptTime'] = self.dept_time
if self.discount is not None:
result['discount'] = self.discount
if self.fee_type is not None:
result['feeType'] = self.fee_type
if self.flight_no is not None:
result['flightNo'] = self.flight_no
if self.index is not None:
result['index'] = self.index
if self.insurance_fee is not None:
result['insuranceFee'] = self.insurance_fee
if self.invoice_title is not None:
result['invoiceTitle'] = self.invoice_title
if self.itinerary_num is not None:
result['itineraryNum'] = self.itinerary_num
if self.itinerary_price is not None:
result['itineraryPrice'] = self.itinerary_price
if self.most_difference_dept_time is not None:
result['mostDifferenceDeptTime'] = self.most_difference_dept_time
if self.most_difference_discount is not None:
result['mostDifferenceDiscount'] = self.most_difference_discount
if self.most_difference_flight_no is not None:
result['mostDifferenceFlightNo'] = self.most_difference_flight_no
if self.most_difference_price is not None:
result['mostDifferencePrice'] = self.most_difference_price
if self.most_difference_reason is not None:
result['mostDifferenceReason'] = self.most_difference_reason
if self.most_price is not None:
result['mostPrice'] = self.most_price
if self.negotiation_coupon_fee is not None:
result['negotiationCouponFee'] = self.negotiation_coupon_fee
if self.oil_fee is not None:
result['oilFee'] = self.oil_fee
if self.order_id is not None:
result['orderId'] = self.order_id
if self.over_apply_id is not None:
result['overApplyId'] = self.over_apply_id
if self.primary_id is not None:
result['primaryId'] = self.primary_id
if self.project_code is not None:
result['projectCode'] = self.project_code
if self.project_name is not None:
result['projectName'] = self.project_name
if self.refund_fee is not None:
result['refundFee'] = self.refund_fee
if self.refund_upgrade_cost is not None:
result['refundUpgradeCost'] = self.refund_upgrade_cost
if self.repeat_refund is not None:
result['repeatRefund'] = self.repeat_refund
if self.seal_price is not None:
result['sealPrice'] = self.seal_price
if self.service_fee is not None:
result['serviceFee'] = self.service_fee
if self.settlement_fee is not None:
result['settlementFee'] = self.settlement_fee
if self.settlement_time is not None:
result['settlementTime'] = self.settlement_time
if self.settlement_type is not None:
result['settlementType'] = self.settlement_type
if self.status is not None:
result['status'] = self.status
if self.ticket_id is not None:
result['ticketId'] = self.ticket_id
if self.traveler_id is not None:
result['travelerId'] = self.traveler_id
if self.traveler_name is not None:
result['travelerName'] = self.traveler_name
if self.upgrade_cost is not None:
result['upgradeCost'] = self.upgrade_cost
if self.booker_job_no is not None:
result['bookerJobNo'] = self.booker_job_no
if self.traveler_job_no is not None:
result['travelerJobNo'] = self.traveler_job_no
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('advanceDay') is not None:
self.advance_day = m.get('advanceDay')
if m.get('airlineCorpCode') is not None:
self.airline_corp_code = m.get('airlineCorpCode')
if m.get('airlineCorpName') is not None:
self.airline_corp_name = m.get('airlineCorpName')
if m.get('alipayTradeNo') is not None:
self.alipay_trade_no = m.get('alipayTradeNo')
if m.get('applyId') is not None:
self.apply_id = m.get('applyId')
if m.get('arrAirportCode') is not None:
self.arr_airport_code = m.get('arrAirportCode')
if m.get('arrCity') is not None:
self.arr_city = m.get('arrCity')
if m.get('arrDate') is not None:
self.arr_date = m.get('arrDate')
if m.get('arrStation') is not None:
self.arr_station = m.get('arrStation')
if m.get('arrTime') is not None:
self.arr_time = m.get('arrTime')
if m.get('bookTime') is not None:
self.book_time = m.get('bookTime')
if m.get('bookerId') is not None:
self.booker_id = m.get('bookerId')
if m.get('bookerName') is not None:
self.booker_name = m.get('bookerName')
if m.get('btripCouponFee') is not None:
self.btrip_coupon_fee = m.get('btripCouponFee')
if m.get('buildFee') is not None:
self.build_fee = m.get('buildFee')
if m.get('cabin') is not None:
self.cabin = m.get('cabin')
if m.get('cabinClass') is not None:
self.cabin_class = m.get('cabinClass')
if m.get('capitalDirection') is not None:
self.capital_direction = m.get('capitalDirection')
if m.get('cascadeDepartment') is not None:
self.cascade_department = m.get('cascadeDepartment')
if m.get('changeFee') is not None:
self.change_fee = m.get('changeFee')
if m.get('corpPayOrderFee') is not None:
self.corp_pay_order_fee = m.get('corpPayOrderFee')
if m.get('costCenter') is not None:
self.cost_center = m.get('costCenter')
if m.get('costCenterNumber') is not None:
self.cost_center_number = m.get('costCenterNumber')
if m.get('coupon') is not None:
self.coupon = m.get('coupon')
if m.get('depAirportCode') is not None:
self.dep_airport_code = m.get('depAirportCode')
if m.get('department') is not None:
self.department = m.get('department')
if m.get('departmentId') is not None:
self.department_id = m.get('departmentId')
if m.get('deptCity') is not None:
self.dept_city = m.get('deptCity')
if m.get('deptDate') is not None:
self.dept_date = m.get('deptDate')
if m.get('deptStation') is not None:
self.dept_station = m.get('deptStation')
if m.get('deptTime') is not None:
self.dept_time = m.get('deptTime')
if m.get('discount') is not None:
self.discount = m.get('discount')
if m.get('feeType') is not None:
self.fee_type = m.get('feeType')
if m.get('flightNo') is not None:
self.flight_no = m.get('flightNo')
if m.get('index') is not None:
self.index = m.get('index')
if m.get('insuranceFee') is not None:
self.insurance_fee = m.get('insuranceFee')
if m.get('invoiceTitle') is not None:
self.invoice_title = m.get('invoiceTitle')
if m.get('itineraryNum') is not None:
self.itinerary_num = m.get('itineraryNum')
if m.get('itineraryPrice') is not None:
self.itinerary_price = m.get('itineraryPrice')
if m.get('mostDifferenceDeptTime') is not None:
self.most_difference_dept_time = m.get('mostDifferenceDeptTime')
if m.get('mostDifferenceDiscount') is not None:
self.most_difference_discount = m.get('mostDifferenceDiscount')
if m.get('mostDifferenceFlightNo') is not None:
self.most_difference_flight_no = m.get('mostDifferenceFlightNo')
if m.get('mostDifferencePrice') is not None:
self.most_difference_price = m.get('mostDifferencePrice')
if m.get('mostDifferenceReason') is not None:
self.most_difference_reason = m.get('mostDifferenceReason')
if m.get('mostPrice') is not None:
self.most_price = m.get('mostPrice')
if m.get('negotiationCouponFee') is not None:
self.negotiation_coupon_fee = m.get('negotiationCouponFee')
if m.get('oilFee') is not None:
self.oil_fee = m.get('oilFee')
if m.get('orderId') is not None:
self.order_id = m.get('orderId')
if m.get('overApplyId') is not None:
self.over_apply_id = m.get('overApplyId')
if m.get('primaryId') is not None:
self.primary_id = m.get('primaryId')
if m.get('projectCode') is not None:
self.project_code = m.get('projectCode')
if m.get('projectName') is not None:
self.project_name = m.get('projectName')
if m.get('refundFee') is not None:
self.refund_fee = m.get('refundFee')
if m.get('refundUpgradeCost') is not None:
self.refund_upgrade_cost = m.get('refundUpgradeCost')
if m.get('repeatRefund') is not None:
self.repeat_refund = m.get('repeatRefund')
if m.get('sealPrice') is not None:
self.seal_price = m.get('sealPrice')
if m.get('serviceFee') is not None:
self.service_fee = m.get('serviceFee')
if m.get('settlementFee') is not None:
self.settlement_fee = m.get('settlementFee')
if m.get('settlementTime') is not None:
self.settlement_time = m.get('settlementTime')
if m.get('settlementType') is not None:
self.settlement_type = m.get('settlementType')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('ticketId') is not None:
self.ticket_id = m.get('ticketId')
if m.get('travelerId') is not None:
self.traveler_id = m.get('travelerId')
if m.get('travelerName') is not None:
self.traveler_name = m.get('travelerName')
if m.get('upgradeCost') is not None:
self.upgrade_cost = m.get('upgradeCost')
if m.get('bookerJobNo') is not None:
self.booker_job_no = m.get('bookerJobNo')
if m.get('travelerJobNo') is not None:
self.traveler_job_no = m.get('travelerJobNo')
return self
class BillSettementFlightResponseBodyModule(TeaModel):
def __init__(
self,
category: int = None,
corp_id: str = None,
data_list: List[BillSettementFlightResponseBodyModuleDataList] = None,
period_end: str = None,
period_start: str = None,
total_num: int = None,
):
# 类目
self.category = category
# 企业id
self.corp_id = corp_id
# 数据集合
self.data_list = data_list
# 记账更新开始日期
self.period_end = period_end
# 记账更新结束日期
self.period_start = period_start
# 总数据量
self.total_num = total_num
def validate(self):
if self.data_list:
for k in self.data_list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.category is not None:
result['category'] = self.category
if self.corp_id is not None:
result['corpId'] = self.corp_id
result['dataList'] = []
if self.data_list is not None:
for k in self.data_list:
result['dataList'].append(k.to_map() if k else None)
if self.period_end is not None:
result['periodEnd'] = self.period_end
if self.period_start is not None:
result['periodStart'] = self.period_start
if self.total_num is not None:
result['totalNum'] = self.total_num
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('category') is not None:
self.category = m.get('category')
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
self.data_list = []
if m.get('dataList') is not None:
for k in m.get('dataList'):
temp_model = BillSettementFlightResponseBodyModuleDataList()
self.data_list.append(temp_model.from_map(k))
if m.get('periodEnd') is not None:
self.period_end = m.get('periodEnd')
if m.get('periodStart') is not None:
self.period_start = m.get('periodStart')
if m.get('totalNum') is not None:
self.total_num = m.get('totalNum')
return self
class BillSettementFlightResponseBody(TeaModel):
def __init__(
self,
result_msg: str = None,
module: BillSettementFlightResponseBodyModule = None,
success: bool = None,
result_code: int = None,
):
# 结果msg
self.result_msg = result_msg
# module
self.module = module
# 是否成功
self.success = success
# 结果code
self.result_code = result_code
def validate(self):
if self.module:
self.module.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.result_msg is not None:
result['resultMsg'] = self.result_msg
if self.module is not None:
result['module'] = self.module.to_map()
if self.success is not None:
result['success'] = self.success
if self.result_code is not None:
result['resultCode'] = self.result_code
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('resultMsg') is not None:
self.result_msg = m.get('resultMsg')
if m.get('module') is not None:
temp_model = BillSettementFlightResponseBodyModule()
self.module = temp_model.from_map(m['module'])
if m.get('success') is not None:
self.success = m.get('success')
if m.get('resultCode') is not None:
self.result_code = m.get('resultCode')
return self
class BillSettementFlightResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: BillSettementFlightResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = BillSettementFlightResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetHotelExceedApplyHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetHotelExceedApplyRequest(TeaModel):
def __init__(
self,
corp_id: str = None,
apply_id: str = None,
):
# 第三方企业id
self.corp_id = corp_id
# 商旅超标审批单id
self.apply_id = apply_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.apply_id is not None:
result['applyId'] = self.apply_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('applyId') is not None:
self.apply_id = m.get('applyId')
return self
class GetHotelExceedApplyResponseBodyApplyIntentionInfoDO(TeaModel):
def __init__(
self,
check_in: str = None,
check_out: str = None,
city_code: str = None,
city_name: str = None,
price: int = None,
together: bool = None,
type: int = None,
):
# 入住日期
self.check_in = check_in
# 离店日期
self.check_out = check_out
# 入住城市三字码
self.city_code = city_code
# 入住城市名称
self.city_name = city_name
# 意向酒店金额(分)
self.price = price
# 是否合住
self.together = together
# 超标类型,32:金额超标
self.type = type
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.check_in is not None:
result['checkIn'] = self.check_in
if self.check_out is not None:
result['checkOut'] = self.check_out
if self.city_code is not None:
result['cityCode'] = self.city_code
if self.city_name is not None:
result['cityName'] = self.city_name
if self.price is not None:
result['price'] = self.price
if self.together is not None:
result['together'] = self.together
if self.type is not None:
result['type'] = self.type
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('checkIn') is not None:
self.check_in = m.get('checkIn')
if m.get('checkOut') is not None:
self.check_out = m.get('checkOut')
if m.get('cityCode') is not None:
self.city_code = m.get('cityCode')
if m.get('cityName') is not None:
self.city_name = m.get('cityName')
if m.get('price') is not None:
self.price = m.get('price')
if m.get('together') is not None:
self.together = m.get('together')
if m.get('type') is not None:
self.type = m.get('type')
return self
class GetHotelExceedApplyResponseBody(TeaModel):
def __init__(
self,
corp_id: str = None,
apply_id: int = None,
status: int = None,
btrip_cause: str = None,
exceed_type: int = None,
exceed_reason: str = None,
origin_standard: str = None,
submit_time: str = None,
user_id: str = None,
apply_intention_info_do: GetHotelExceedApplyResponseBodyApplyIntentionInfoDO = None,
thirdpart_apply_id: str = None,
):
# 第三方企业id
self.corp_id = corp_id
# 商旅超标审批单id
self.apply_id = apply_id
# 审批单状态 0:审批中 1:已同意 2:已拒绝
self.status = status
# 出差原因
self.btrip_cause = btrip_cause
# 超标类型,32:金额超标
self.exceed_type = exceed_type
# 超标原因
self.exceed_reason = exceed_reason
# 原差旅标准
self.origin_standard = origin_standard
# 审批单提交时间
self.submit_time = submit_time
# 第三方用户id
self.user_id = user_id
# 意向出行信息
self.apply_intention_info_do = apply_intention_info_do
# 第三方出差审批单号
self.thirdpart_apply_id = thirdpart_apply_id
def validate(self):
if self.apply_intention_info_do:
self.apply_intention_info_do.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.apply_id is not None:
result['applyId'] = self.apply_id
if self.status is not None:
result['status'] = self.status
if self.btrip_cause is not None:
result['btripCause'] = self.btrip_cause
if self.exceed_type is not None:
result['exceedType'] = self.exceed_type
if self.exceed_reason is not None:
result['exceedReason'] = self.exceed_reason
if self.origin_standard is not None:
result['originStandard'] = self.origin_standard
if self.submit_time is not None:
result['submitTime'] = self.submit_time
if self.user_id is not None:
result['userId'] = self.user_id
if self.apply_intention_info_do is not None:
result['applyIntentionInfoDO'] = self.apply_intention_info_do.to_map()
if self.thirdpart_apply_id is not None:
result['thirdpartApplyId'] = self.thirdpart_apply_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('applyId') is not None:
self.apply_id = m.get('applyId')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('btripCause') is not None:
self.btrip_cause = m.get('btripCause')
if m.get('exceedType') is not None:
self.exceed_type = m.get('exceedType')
if m.get('exceedReason') is not None:
self.exceed_reason = m.get('exceedReason')
if m.get('originStandard') is not None:
self.origin_standard = m.get('originStandard')
if m.get('submitTime') is not None:
self.submit_time = m.get('submitTime')
if m.get('userId') is not None:
self.user_id = m.get('userId')
if m.get('applyIntentionInfoDO') is not None:
temp_model = GetHotelExceedApplyResponseBodyApplyIntentionInfoDO()
self.apply_intention_info_do = temp_model.from_map(m['applyIntentionInfoDO'])
if m.get('thirdpartApplyId') is not None:
self.thirdpart_apply_id = m.get('thirdpartApplyId')
return self
class GetHotelExceedApplyResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetHotelExceedApplyResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetHotelExceedApplyResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class QueryUnionOrderHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class QueryUnionOrderRequest(TeaModel):
def __init__(
self,
corp_id: str = None,
third_part_apply_id: str = None,
union_no: str = None,
):
# 第三方企业id
self.corp_id = corp_id
# 第三方申请单id
self.third_part_apply_id = third_part_apply_id
# 关联单号
self.union_no = union_no
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.third_part_apply_id is not None:
result['thirdPartApplyId'] = self.third_part_apply_id
if self.union_no is not None:
result['unionNo'] = self.union_no
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('thirdPartApplyId') is not None:
self.third_part_apply_id = m.get('thirdPartApplyId')
if m.get('unionNo') is not None:
self.union_no = m.get('unionNo')
return self
class QueryUnionOrderResponseBodyFlightList(TeaModel):
def __init__(
self,
flight_order_id: int = None,
flight_order_status: int = None,
):
# 订单id
self.flight_order_id = flight_order_id
# 订单状态:0待支付,1出票中,2已关闭,3有改签单,4有退票单,5出票成功,6退票申请中,7改签申请中
self.flight_order_status = flight_order_status
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.flight_order_id is not None:
result['flightOrderId'] = self.flight_order_id
if self.flight_order_status is not None:
result['flightOrderStatus'] = self.flight_order_status
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('flightOrderId') is not None:
self.flight_order_id = m.get('flightOrderId')
if m.get('flightOrderStatus') is not None:
self.flight_order_status = m.get('flightOrderStatus')
return self
class QueryUnionOrderResponseBodyTrainList(TeaModel):
def __init__(
self,
train_order_id: int = None,
train_orderstatus: int = None,
):
# 火车订单号
self.train_order_id = train_order_id
# 订单状态:0待支付,1出票中,2已关闭,3,改签成功,4退票成功,5出票完成,6退票申请中,7改签申请中,8已出票,已发货,9出票失败,10改签失败,11退票失败
self.train_orderstatus = train_orderstatus
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.train_order_id is not None:
result['trainOrderId'] = self.train_order_id
if self.train_orderstatus is not None:
result['trainOrderstatus'] = self.train_orderstatus
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('trainOrderId') is not None:
self.train_order_id = m.get('trainOrderId')
if m.get('trainOrderstatus') is not None:
self.train_orderstatus = m.get('trainOrderstatus')
return self
class QueryUnionOrderResponseBodyHotelList(TeaModel):
def __init__(
self,
hotel_order_id: int = None,
hotel_order_status: int = None,
):
# 酒店订单号
self.hotel_order_id = hotel_order_id
# 订单状态1:等待确认,2:等待付款,3:预订成功,4:申请退款,5:退款成功,6:已关闭,7:结账成功,8:支付成功
self.hotel_order_status = hotel_order_status
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.hotel_order_id is not None:
result['hotelOrderId'] = self.hotel_order_id
if self.hotel_order_status is not None:
result['hotelOrderStatus'] = self.hotel_order_status
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('hotelOrderId') is not None:
self.hotel_order_id = m.get('hotelOrderId')
if m.get('hotelOrderStatus') is not None:
self.hotel_order_status = m.get('hotelOrderStatus')
return self
class QueryUnionOrderResponseBodyVehicleList(TeaModel):
def __init__(
self,
vehicle_order_id: int = None,
vehicle_order_status: int = None,
):
# 用车订单号
self.vehicle_order_id = vehicle_order_id
# 订单状态:0:初始状态,1:已超时,2:派单成功,3:派单失败,4:已退款,5:已支付,6:已取消
self.vehicle_order_status = vehicle_order_status
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.vehicle_order_id is not None:
result['vehicleOrderId'] = self.vehicle_order_id
if self.vehicle_order_status is not None:
result['vehicleOrderStatus'] = self.vehicle_order_status
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('vehicleOrderId') is not None:
self.vehicle_order_id = m.get('vehicleOrderId')
if m.get('vehicleOrderStatus') is not None:
self.vehicle_order_status = m.get('vehicleOrderStatus')
return self
class QueryUnionOrderResponseBody(TeaModel):
def __init__(
self,
flight_list: List[QueryUnionOrderResponseBodyFlightList] = None,
corp_id: str = None,
train_list: List[QueryUnionOrderResponseBodyTrainList] = None,
hotel_list: List[QueryUnionOrderResponseBodyHotelList] = None,
vehicle_list: List[QueryUnionOrderResponseBodyVehicleList] = None,
):
# 飞机订单信息
self.flight_list = flight_list
# 企业id
self.corp_id = corp_id
# 火车订单信息
self.train_list = train_list
# 酒店订单信息
self.hotel_list = hotel_list
# 用车订单信息
self.vehicle_list = vehicle_list
def validate(self):
if self.flight_list:
for k in self.flight_list:
if k:
k.validate()
if self.train_list:
for k in self.train_list:
if k:
k.validate()
if self.hotel_list:
for k in self.hotel_list:
if k:
k.validate()
if self.vehicle_list:
for k in self.vehicle_list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
result['flightList'] = []
if self.flight_list is not None:
for k in self.flight_list:
result['flightList'].append(k.to_map() if k else None)
if self.corp_id is not None:
result['corpId'] = self.corp_id
result['trainList'] = []
if self.train_list is not None:
for k in self.train_list:
result['trainList'].append(k.to_map() if k else None)
result['hotelList'] = []
if self.hotel_list is not None:
for k in self.hotel_list:
result['hotelList'].append(k.to_map() if k else None)
result['vehicleList'] = []
if self.vehicle_list is not None:
for k in self.vehicle_list:
result['vehicleList'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
self.flight_list = []
if m.get('flightList') is not None:
for k in m.get('flightList'):
temp_model = QueryUnionOrderResponseBodyFlightList()
self.flight_list.append(temp_model.from_map(k))
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
self.train_list = []
if m.get('trainList') is not None:
for k in m.get('trainList'):
temp_model = QueryUnionOrderResponseBodyTrainList()
self.train_list.append(temp_model.from_map(k))
self.hotel_list = []
if m.get('hotelList') is not None:
for k in m.get('hotelList'):
temp_model = QueryUnionOrderResponseBodyHotelList()
self.hotel_list.append(temp_model.from_map(k))
self.vehicle_list = []
if m.get('vehicleList') is not None:
for k in m.get('vehicleList'):
temp_model = QueryUnionOrderResponseBodyVehicleList()
self.vehicle_list.append(temp_model.from_map(k))
return self
class QueryUnionOrderResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: QueryUnionOrderResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = QueryUnionOrderResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class QueryCityCarApplyHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class QueryCityCarApplyRequest(TeaModel):
def __init__(
self,
corp_id: str = None,
created_end_at: str = None,
created_start_at: str = None,
page_number: int = None,
page_size: int = None,
third_part_apply_id: str = None,
user_id: str = None,
):
# 第三方企业ID
self.corp_id = corp_id
# 审批单创建时间小于值
self.created_end_at = created_end_at
# 审批单创建时间大于等于值
self.created_start_at = created_start_at
# 页码,要求大于等于1,默认1
self.page_number = page_number
# 每页数据量,要求大于等于1,默认20
self.page_size = page_size
# 三方审批单ID
self.third_part_apply_id = third_part_apply_id
# 第三方员工ID
self.user_id = user_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.created_end_at is not None:
result['createdEndAt'] = self.created_end_at
if self.created_start_at is not None:
result['createdStartAt'] = self.created_start_at
if self.page_number is not None:
result['pageNumber'] = self.page_number
if self.page_size is not None:
result['pageSize'] = self.page_size
if self.third_part_apply_id is not None:
result['thirdPartApplyId'] = self.third_part_apply_id
if self.user_id is not None:
result['userId'] = self.user_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('createdEndAt') is not None:
self.created_end_at = m.get('createdEndAt')
if m.get('createdStartAt') is not None:
self.created_start_at = m.get('createdStartAt')
if m.get('pageNumber') is not None:
self.page_number = m.get('pageNumber')
if m.get('pageSize') is not None:
self.page_size = m.get('pageSize')
if m.get('thirdPartApplyId') is not None:
self.third_part_apply_id = m.get('thirdPartApplyId')
if m.get('userId') is not None:
self.user_id = m.get('userId')
return self
class QueryCityCarApplyResponseBodyApplyListApproverList(TeaModel):
def __init__(
self,
note: str = None,
operate_time: str = None,
order: int = None,
status: int = None,
status_desc: str = None,
user_id: str = None,
user_name: str = None,
):
# 审批备注
self.note = note
# 审批时间
self.operate_time = operate_time
# 审批人排序值
self.order = order
# 审批状态枚举:审批状态:0-审批中,1-已同意,2-已拒绝
self.status = status
# 审批状态描述
self.status_desc = status_desc
# 审批员工ID
self.user_id = user_id
# 审批员工名
self.user_name = user_name
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.note is not None:
result['note'] = self.note
if self.operate_time is not None:
result['operateTime'] = self.operate_time
if self.order is not None:
result['order'] = self.order
if self.status is not None:
result['status'] = self.status
if self.status_desc is not None:
result['statusDesc'] = self.status_desc
if self.user_id is not None:
result['userId'] = self.user_id
if self.user_name is not None:
result['userName'] = self.user_name
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('note') is not None:
self.note = m.get('note')
if m.get('operateTime') is not None:
self.operate_time = m.get('operateTime')
if m.get('order') is not None:
self.order = m.get('order')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('statusDesc') is not None:
self.status_desc = m.get('statusDesc')
if m.get('userId') is not None:
self.user_id = m.get('userId')
if m.get('userName') is not None:
self.user_name = m.get('userName')
return self
class QueryCityCarApplyResponseBodyApplyListItineraryList(TeaModel):
def __init__(
self,
arr_city: str = None,
arr_city_code: str = None,
arr_date: str = None,
cost_center_id: int = None,
cost_center_name: str = None,
dep_city: str = None,
dep_city_code: str = None,
dep_date: str = None,
invoice_id: int = None,
invoice_name: str = None,
itinerary_id: str = None,
project_code: str = None,
project_title: str = None,
traffic_type: int = None,
):
# 目的地城市
self.arr_city = arr_city
# 目的地城市三字码
self.arr_city_code = arr_city_code
# 到达目的地城市时间
self.arr_date = arr_date
# 商旅内部成本中心ID
self.cost_center_id = cost_center_id
# 成本中心名称
self.cost_center_name = cost_center_name
# 出发城市
self.dep_city = dep_city
# 出发城市三字码
self.dep_city_code = dep_city_code
# 出发时间
self.dep_date = dep_date
# 商旅内部发票抬头ID
self.invoice_id = invoice_id
# 发票抬头名称
self.invoice_name = invoice_name
# 商旅内部行程单ID
self.itinerary_id = itinerary_id
# 项目code
self.project_code = project_code
# 项目名称
self.project_title = project_title
# 交通方式:4-市内交通
self.traffic_type = traffic_type
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.arr_city is not None:
result['arrCity'] = self.arr_city
if self.arr_city_code is not None:
result['arrCityCode'] = self.arr_city_code
if self.arr_date is not None:
result['arrDate'] = self.arr_date
if self.cost_center_id is not None:
result['costCenterId'] = self.cost_center_id
if self.cost_center_name is not None:
result['costCenterName'] = self.cost_center_name
if self.dep_city is not None:
result['depCity'] = self.dep_city
if self.dep_city_code is not None:
result['depCityCode'] = self.dep_city_code
if self.dep_date is not None:
result['depDate'] = self.dep_date
if self.invoice_id is not None:
result['invoiceId'] = self.invoice_id
if self.invoice_name is not None:
result['invoiceName'] = self.invoice_name
if self.itinerary_id is not None:
result['itineraryId'] = self.itinerary_id
if self.project_code is not None:
result['projectCode'] = self.project_code
if self.project_title is not None:
result['projectTitle'] = self.project_title
if self.traffic_type is not None:
result['trafficType'] = self.traffic_type
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('arrCity') is not None:
self.arr_city = m.get('arrCity')
if m.get('arrCityCode') is not None:
self.arr_city_code = m.get('arrCityCode')
if m.get('arrDate') is not None:
self.arr_date = m.get('arrDate')
if m.get('costCenterId') is not None:
self.cost_center_id = m.get('costCenterId')
if m.get('costCenterName') is not None:
self.cost_center_name = m.get('costCenterName')
if m.get('depCity') is not None:
self.dep_city = m.get('depCity')
if m.get('depCityCode') is not None:
self.dep_city_code = m.get('depCityCode')
if m.get('depDate') is not None:
self.dep_date = m.get('depDate')
if m.get('invoiceId') is not None:
self.invoice_id = m.get('invoiceId')
if m.get('invoiceName') is not None:
self.invoice_name = m.get('invoiceName')
if m.get('itineraryId') is not None:
self.itinerary_id = m.get('itineraryId')
if m.get('projectCode') is not None:
self.project_code = m.get('projectCode')
if m.get('projectTitle') is not None:
self.project_title = m.get('projectTitle')
if m.get('trafficType') is not None:
self.traffic_type = m.get('trafficType')
return self
class QueryCityCarApplyResponseBodyApplyList(TeaModel):
def __init__(
self,
approver_list: List[QueryCityCarApplyResponseBodyApplyListApproverList] = None,
depart_id: str = None,
depart_name: str = None,
gmt_create: str = None,
gmt_modified: str = None,
itinerary_list: List[QueryCityCarApplyResponseBodyApplyListItineraryList] = None,
status: int = None,
status_desc: str = None,
third_part_apply_id: str = None,
trip_cause: str = None,
trip_title: str = None,
user_id: str = None,
user_name: str = None,
):
# 审批单列表
self.approver_list = approver_list
# 员工所在部门ID
self.depart_id = depart_id
# 员工所在部门名
self.depart_name = depart_name
# 创建时间
self.gmt_create = gmt_create
# 最近修改时间
self.gmt_modified = gmt_modified
# 审批单关联的行程
self.itinerary_list = itinerary_list
# 审批单状态:0-申请,1-同意,2-拒绝
self.status = status
# 审批单状态:0-申请,1-同意,2-拒绝
self.status_desc = status_desc
# 三方审批单ID
self.third_part_apply_id = third_part_apply_id
# 申请事由
self.trip_cause = trip_cause
# 审批单标题
self.trip_title = trip_title
# 发起审批员工ID
self.user_id = user_id
# 发起审批员工名
self.user_name = user_name
def validate(self):
if self.approver_list:
for k in self.approver_list:
if k:
k.validate()
if self.itinerary_list:
for k in self.itinerary_list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
result['approverList'] = []
if self.approver_list is not None:
for k in self.approver_list:
result['approverList'].append(k.to_map() if k else None)
if self.depart_id is not None:
result['departId'] = self.depart_id
if self.depart_name is not None:
result['departName'] = self.depart_name
if self.gmt_create is not None:
result['gmtCreate'] = self.gmt_create
if self.gmt_modified is not None:
result['gmtModified'] = self.gmt_modified
result['itineraryList'] = []
if self.itinerary_list is not None:
for k in self.itinerary_list:
result['itineraryList'].append(k.to_map() if k else None)
if self.status is not None:
result['status'] = self.status
if self.status_desc is not None:
result['statusDesc'] = self.status_desc
if self.third_part_apply_id is not None:
result['thirdPartApplyId'] = self.third_part_apply_id
if self.trip_cause is not None:
result['tripCause'] = self.trip_cause
if self.trip_title is not None:
result['tripTitle'] = self.trip_title
if self.user_id is not None:
result['userId'] = self.user_id
if self.user_name is not None:
result['userName'] = self.user_name
return result
def from_map(self, m: dict = None):
m = m or dict()
self.approver_list = []
if m.get('approverList') is not None:
for k in m.get('approverList'):
temp_model = QueryCityCarApplyResponseBodyApplyListApproverList()
self.approver_list.append(temp_model.from_map(k))
if m.get('departId') is not None:
self.depart_id = m.get('departId')
if m.get('departName') is not None:
self.depart_name = m.get('departName')
if m.get('gmtCreate') is not None:
self.gmt_create = m.get('gmtCreate')
if m.get('gmtModified') is not None:
self.gmt_modified = m.get('gmtModified')
self.itinerary_list = []
if m.get('itineraryList') is not None:
for k in m.get('itineraryList'):
temp_model = QueryCityCarApplyResponseBodyApplyListItineraryList()
self.itinerary_list.append(temp_model.from_map(k))
if m.get('status') is not None:
self.status = m.get('status')
if m.get('statusDesc') is not None:
self.status_desc = m.get('statusDesc')
if m.get('thirdPartApplyId') is not None:
self.third_part_apply_id = m.get('thirdPartApplyId')
if m.get('tripCause') is not None:
self.trip_cause = m.get('tripCause')
if m.get('tripTitle') is not None:
self.trip_title = m.get('tripTitle')
if m.get('userId') is not None:
self.user_id = m.get('userId')
if m.get('userName') is not None:
self.user_name = m.get('userName')
return self
class QueryCityCarApplyResponseBody(TeaModel):
def __init__(
self,
apply_list: List[QueryCityCarApplyResponseBodyApplyList] = None,
total: int = None,
):
# 审批单列表
self.apply_list = apply_list
# 总数
self.total = total
def validate(self):
if self.apply_list:
for k in self.apply_list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
result['applyList'] = []
if self.apply_list is not None:
for k in self.apply_list:
result['applyList'].append(k.to_map() if k else None)
if self.total is not None:
result['total'] = self.total
return result
def from_map(self, m: dict = None):
m = m or dict()
self.apply_list = []
if m.get('applyList') is not None:
for k in m.get('applyList'):
temp_model = QueryCityCarApplyResponseBodyApplyList()
self.apply_list.append(temp_model.from_map(k))
if m.get('total') is not None:
self.total = m.get('total')
return self
class QueryCityCarApplyResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: QueryCityCarApplyResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = QueryCityCarApplyResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetTrainExceedApplyHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetTrainExceedApplyRequest(TeaModel):
def __init__(
self,
corp_id: str = None,
apply_id: str = None,
):
# 第三方企业id
self.corp_id = corp_id
# 商旅超标审批单id
self.apply_id = apply_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.apply_id is not None:
result['applyId'] = self.apply_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('applyId') is not None:
self.apply_id = m.get('applyId')
return self
class GetTrainExceedApplyResponseBodyApplyIntentionInfoDO(TeaModel):
def __init__(
self,
price: int = None,
dep_city_name: str = None,
arr_city_name: str = None,
dep_city: str = None,
arr_city: str = None,
dep_time: str = None,
arr_time: str = None,
arr_station: str = None,
dep_station: str = None,
train_no: str = None,
train_type_desc: str = None,
seat_name: str = None,
):
# 意向坐席价格(分)
self.price = price
# 出发城市名
self.dep_city_name = dep_city_name
# 到达城市名
self.arr_city_name = arr_city_name
# 出发城市三字码
self.dep_city = dep_city
# 到达城市三字码
self.arr_city = arr_city
# 出发时间
self.dep_time = dep_time
# 到达时间
self.arr_time = arr_time
# 到达站点名称
self.arr_station = arr_station
# 出发站点名称
self.dep_station = dep_station
# 意向车次号
self.train_no = train_no
# 意向车次类型
self.train_type_desc = train_type_desc
# 意向坐席名称
self.seat_name = seat_name
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.price is not None:
result['price'] = self.price
if self.dep_city_name is not None:
result['depCityName'] = self.dep_city_name
if self.arr_city_name is not None:
result['arrCityName'] = self.arr_city_name
if self.dep_city is not None:
result['depCity'] = self.dep_city
if self.arr_city is not None:
result['arrCity'] = self.arr_city
if self.dep_time is not None:
result['depTime'] = self.dep_time
if self.arr_time is not None:
result['arrTime'] = self.arr_time
if self.arr_station is not None:
result['arrStation'] = self.arr_station
if self.dep_station is not None:
result['depStation'] = self.dep_station
if self.train_no is not None:
result['trainNo'] = self.train_no
if self.train_type_desc is not None:
result['trainTypeDesc'] = self.train_type_desc
if self.seat_name is not None:
result['seatName'] = self.seat_name
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('price') is not None:
self.price = m.get('price')
if m.get('depCityName') is not None:
self.dep_city_name = m.get('depCityName')
if m.get('arrCityName') is not None:
self.arr_city_name = m.get('arrCityName')
if m.get('depCity') is not None:
self.dep_city = m.get('depCity')
if m.get('arrCity') is not None:
self.arr_city = m.get('arrCity')
if m.get('depTime') is not None:
self.dep_time = m.get('depTime')
if m.get('arrTime') is not None:
self.arr_time = m.get('arrTime')
if m.get('arrStation') is not None:
self.arr_station = m.get('arrStation')
if m.get('depStation') is not None:
self.dep_station = m.get('depStation')
if m.get('trainNo') is not None:
self.train_no = m.get('trainNo')
if m.get('trainTypeDesc') is not None:
self.train_type_desc = m.get('trainTypeDesc')
if m.get('seatName') is not None:
self.seat_name = m.get('seatName')
return self
class GetTrainExceedApplyResponseBody(TeaModel):
def __init__(
self,
corp_id: str = None,
apply_id: int = None,
status: int = None,
btrip_cause: str = None,
exceed_type: int = None,
exceed_reason: str = None,
origin_standard: str = None,
submit_time: str = None,
user_id: str = None,
apply_intention_info_do: GetTrainExceedApplyResponseBodyApplyIntentionInfoDO = None,
thirdpart_apply_id: str = None,
):
# 第三方企业id
self.corp_id = corp_id
# 商旅超标审批单id
self.apply_id = apply_id
# 审批单状态 0:审批中 1:已同意 2:已拒绝
self.status = status
# 出差原因
self.btrip_cause = btrip_cause
# 超标类型,32:坐席超标
self.exceed_type = exceed_type
# 超标原因
self.exceed_reason = exceed_reason
# 原差旅标准
self.origin_standard = origin_standard
# 审批单提交时间
self.submit_time = submit_time
# 第三方用户id
self.user_id = user_id
# 意向出行信息
self.apply_intention_info_do = apply_intention_info_do
# 第三方出差审批单号
self.thirdpart_apply_id = thirdpart_apply_id
def validate(self):
if self.apply_intention_info_do:
self.apply_intention_info_do.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.apply_id is not None:
result['applyId'] = self.apply_id
if self.status is not None:
result['status'] = self.status
if self.btrip_cause is not None:
result['btripCause'] = self.btrip_cause
if self.exceed_type is not None:
result['exceedType'] = self.exceed_type
if self.exceed_reason is not None:
result['exceedReason'] = self.exceed_reason
if self.origin_standard is not None:
result['originStandard'] = self.origin_standard
if self.submit_time is not None:
result['submitTime'] = self.submit_time
if self.user_id is not None:
result['userId'] = self.user_id
if self.apply_intention_info_do is not None:
result['applyIntentionInfoDO'] = self.apply_intention_info_do.to_map()
if self.thirdpart_apply_id is not None:
result['thirdpartApplyId'] = self.thirdpart_apply_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('applyId') is not None:
self.apply_id = m.get('applyId')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('btripCause') is not None:
self.btrip_cause = m.get('btripCause')
if m.get('exceedType') is not None:
self.exceed_type = m.get('exceedType')
if m.get('exceedReason') is not None:
self.exceed_reason = m.get('exceedReason')
if m.get('originStandard') is not None:
self.origin_standard = m.get('originStandard')
if m.get('submitTime') is not None:
self.submit_time = m.get('submitTime')
if m.get('userId') is not None:
self.user_id = m.get('userId')
if m.get('applyIntentionInfoDO') is not None:
temp_model = GetTrainExceedApplyResponseBodyApplyIntentionInfoDO()
self.apply_intention_info_do = temp_model.from_map(m['applyIntentionInfoDO'])
if m.get('thirdpartApplyId') is not None:
self.thirdpart_apply_id = m.get('thirdpartApplyId')
return self
class GetTrainExceedApplyResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetTrainExceedApplyResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetTrainExceedApplyResponseBody()
self.body = temp_model.from_map(m['body'])
return self
| # -*- coding: utf-8 -*-
# This file is auto-generated, don't edit it. Thanks.
from Tea.model import TeaModel
from typing import Dict, List
class ApproveCityCarApplyHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class ApproveCityCarApplyRequest(TeaModel):
def __init__(
self,
corp_id: str = None,
operate_time: str = None,
remark: str = None,
status: int = None,
third_part_apply_id: str = None,
user_id: str = None,
ding_suite_key: str = None,
ding_corp_id: str = None,
ding_token_grant_type: int = None,
):
# 第三方企业ID
self.corp_id = corp_id
# 审批时间
self.operate_time = operate_time
# 审批备注
self.remark = remark
# 审批结果:1-同意,2-拒绝
self.status = status
# 第三方审批单ID
self.third_part_apply_id = third_part_apply_id
# 审批的第三方员工ID
self.user_id = user_id
# suiteKey
self.ding_suite_key = ding_suite_key
# account
self.ding_corp_id = ding_corp_id
# tokenGrantType
self.ding_token_grant_type = ding_token_grant_type
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.operate_time is not None:
result['operateTime'] = self.operate_time
if self.remark is not None:
result['remark'] = self.remark
if self.status is not None:
result['status'] = self.status
if self.third_part_apply_id is not None:
result['thirdPartApplyId'] = self.third_part_apply_id
if self.user_id is not None:
result['userId'] = self.user_id
if self.ding_suite_key is not None:
result['dingSuiteKey'] = self.ding_suite_key
if self.ding_corp_id is not None:
result['dingCorpId'] = self.ding_corp_id
if self.ding_token_grant_type is not None:
result['dingTokenGrantType'] = self.ding_token_grant_type
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('operateTime') is not None:
self.operate_time = m.get('operateTime')
if m.get('remark') is not None:
self.remark = m.get('remark')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('thirdPartApplyId') is not None:
self.third_part_apply_id = m.get('thirdPartApplyId')
if m.get('userId') is not None:
self.user_id = m.get('userId')
if m.get('dingSuiteKey') is not None:
self.ding_suite_key = m.get('dingSuiteKey')
if m.get('dingCorpId') is not None:
self.ding_corp_id = m.get('dingCorpId')
if m.get('dingTokenGrantType') is not None:
self.ding_token_grant_type = m.get('dingTokenGrantType')
return self
class ApproveCityCarApplyResponseBody(TeaModel):
def __init__(
self,
approve_result: bool = None,
):
# 审批结果
self.approve_result = approve_result
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.approve_result is not None:
result['approveResult'] = self.approve_result
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('approveResult') is not None:
self.approve_result = m.get('approveResult')
return self
class ApproveCityCarApplyResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: ApproveCityCarApplyResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = ApproveCityCarApplyResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class BillSettementHotelHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class BillSettementHotelRequest(TeaModel):
def __init__(
self,
corp_id: str = None,
category: int = None,
page_size: int = None,
period_start: str = None,
page_number: int = None,
period_end: str = None,
):
# 第三方企业
self.corp_id = corp_id
# 类目:机酒火车 1:机票; 2:酒店; 4:用车 6:商旅火车票
self.category = category
# 每页数据量,默认100,最高500
self.page_size = page_size
# 记账更新开始日期
self.period_start = period_start
# 页数,从1开始
self.page_number = page_number
# 记账更新结束日期
self.period_end = period_end
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.category is not None:
result['category'] = self.category
if self.page_size is not None:
result['pageSize'] = self.page_size
if self.period_start is not None:
result['periodStart'] = self.period_start
if self.page_number is not None:
result['pageNumber'] = self.page_number
if self.period_end is not None:
result['periodEnd'] = self.period_end
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('category') is not None:
self.category = m.get('category')
if m.get('pageSize') is not None:
self.page_size = m.get('pageSize')
if m.get('periodStart') is not None:
self.period_start = m.get('periodStart')
if m.get('pageNumber') is not None:
self.page_number = m.get('pageNumber')
if m.get('periodEnd') is not None:
self.period_end = m.get('periodEnd')
return self
class BillSettementHotelResponseBodyModuleDataList(TeaModel):
def __init__(
self,
alipay_trade_no: str = None,
apply_id: str = None,
book_time: str = None,
booker_id: str = None,
booker_name: str = None,
capital_direction: str = None,
cascade_department: str = None,
check_in_date: str = None,
checkout_date: str = None,
city: str = None,
city_code: str = None,
corp_refund_fee: float = None,
corp_total_fee: float = None,
cost_center: str = None,
cost_center_number: str = None,
department: str = None,
department_id: str = None,
fee_type: str = None,
fees: float = None,
fu_point_fee: float = None,
hotel_name: str = None,
index: str = None,
invoice_title: str = None,
is_negotiation: bool = None,
is_share_str: str = None,
nights: int = None,
order_id: str = None,
order_price: float = None,
order_type: str = None,
over_apply_id: str = None,
person_refund_fee: float = None,
person_settle_price: float = None,
primary_id: int = None,
project_code: str = None,
project_name: str = None,
promotion_fee: float = None,
room_number: int = None,
room_price: float = None,
room_type: str = None,
service_fee: float = None,
settlement_fee: int = None,
settlement_time: str = None,
settlement_type: str = None,
status: int = None,
total_nights: int = None,
traveler_id: str = None,
traveler_name: str = None,
booker_job_no: str = None,
traveler_job_no: str = None,
):
# 交易流水号
self.alipay_trade_no = alipay_trade_no
# 审批单号
self.apply_id = apply_id
# 预定时间
self.book_time = book_time
# 预定人use id
self.booker_id = booker_id
# 预订人名称
self.booker_name = booker_name
# 资金方向
self.capital_direction = capital_direction
# 级联部门
self.cascade_department = cascade_department
# 入住时间
self.check_in_date = check_in_date
# 离店时间
self.checkout_date = checkout_date
# 入住城市
self.city = city
# 城市编码
self.city_code = city_code
# 企业退款金额
self.corp_refund_fee = corp_refund_fee
# 企业支付金额
self.corp_total_fee = corp_total_fee
# 成本中心名称
self.cost_center = cost_center
# 成本中心编码
self.cost_center_number = cost_center_number
# 末级部门
self.department = department
# 部门id
self.department_id = department_id
# 费用类型
self.fee_type = fee_type
# 杂费
self.fees = fees
# 福豆支付
self.fu_point_fee = fu_point_fee
# 酒店名称
self.hotel_name = hotel_name
# 序号
self.index = index
# 发票抬头
self.invoice_title = invoice_title
# 是否协议价
self.is_negotiation = is_negotiation
# 是否合住
self.is_share_str = is_share_str
# 入住天数
self.nights = nights
# 订单号
self.order_id = order_id
# 订单金额
self.order_price = order_price
# 订单类型
self.order_type = order_type
# 超标审批单号
self.over_apply_id = over_apply_id
# 个人退款金额
self.person_refund_fee = person_refund_fee
# 个人支付金额
self.person_settle_price = person_settle_price
# 主键id
self.primary_id = primary_id
# 项目编码
self.project_code = project_code
# 项目名称
self.project_name = project_name
# 优惠券
self.promotion_fee = promotion_fee
# 房间数
self.room_number = room_number
# 房价
self.room_price = room_price
# 房间类型
self.room_type = room_type
# 服务费,仅在 feeType 20111、20112中展示
self.service_fee = service_fee
# 结算金额
self.settlement_fee = settlement_fee
# 结算时间
self.settlement_time = settlement_time
# 结算类型
self.settlement_type = settlement_type
# 入账状态
self.status = status
# 总间夜数
self.total_nights = total_nights
# 出行人use id
self.traveler_id = traveler_id
# 出行人名称
self.traveler_name = traveler_name
# 预订人工号
self.booker_job_no = booker_job_no
# 出行人工号
self.traveler_job_no = traveler_job_no
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.alipay_trade_no is not None:
result['alipayTradeNo'] = self.alipay_trade_no
if self.apply_id is not None:
result['applyId'] = self.apply_id
if self.book_time is not None:
result['bookTime'] = self.book_time
if self.booker_id is not None:
result['bookerId'] = self.booker_id
if self.booker_name is not None:
result['bookerName'] = self.booker_name
if self.capital_direction is not None:
result['capitalDirection'] = self.capital_direction
if self.cascade_department is not None:
result['cascadeDepartment'] = self.cascade_department
if self.check_in_date is not None:
result['checkInDate'] = self.check_in_date
if self.checkout_date is not None:
result['checkoutDate'] = self.checkout_date
if self.city is not None:
result['city'] = self.city
if self.city_code is not None:
result['cityCode'] = self.city_code
if self.corp_refund_fee is not None:
result['corpRefundFee'] = self.corp_refund_fee
if self.corp_total_fee is not None:
result['corpTotalFee'] = self.corp_total_fee
if self.cost_center is not None:
result['costCenter'] = self.cost_center
if self.cost_center_number is not None:
result['costCenterNumber'] = self.cost_center_number
if self.department is not None:
result['department'] = self.department
if self.department_id is not None:
result['departmentId'] = self.department_id
if self.fee_type is not None:
result['feeType'] = self.fee_type
if self.fees is not None:
result['fees'] = self.fees
if self.fu_point_fee is not None:
result['fuPointFee'] = self.fu_point_fee
if self.hotel_name is not None:
result['hotelName'] = self.hotel_name
if self.index is not None:
result['index'] = self.index
if self.invoice_title is not None:
result['invoiceTitle'] = self.invoice_title
if self.is_negotiation is not None:
result['isNegotiation'] = self.is_negotiation
if self.is_share_str is not None:
result['isShareStr'] = self.is_share_str
if self.nights is not None:
result['nights'] = self.nights
if self.order_id is not None:
result['orderId'] = self.order_id
if self.order_price is not None:
result['orderPrice'] = self.order_price
if self.order_type is not None:
result['orderType'] = self.order_type
if self.over_apply_id is not None:
result['overApplyId'] = self.over_apply_id
if self.person_refund_fee is not None:
result['personRefundFee'] = self.person_refund_fee
if self.person_settle_price is not None:
result['personSettlePrice'] = self.person_settle_price
if self.primary_id is not None:
result['primaryId'] = self.primary_id
if self.project_code is not None:
result['projectCode'] = self.project_code
if self.project_name is not None:
result['projectName'] = self.project_name
if self.promotion_fee is not None:
result['promotionFee'] = self.promotion_fee
if self.room_number is not None:
result['roomNumber'] = self.room_number
if self.room_price is not None:
result['roomPrice'] = self.room_price
if self.room_type is not None:
result['roomType'] = self.room_type
if self.service_fee is not None:
result['serviceFee'] = self.service_fee
if self.settlement_fee is not None:
result['settlementFee'] = self.settlement_fee
if self.settlement_time is not None:
result['settlementTime'] = self.settlement_time
if self.settlement_type is not None:
result['settlementType'] = self.settlement_type
if self.status is not None:
result['status'] = self.status
if self.total_nights is not None:
result['totalNights'] = self.total_nights
if self.traveler_id is not None:
result['travelerId'] = self.traveler_id
if self.traveler_name is not None:
result['travelerName'] = self.traveler_name
if self.booker_job_no is not None:
result['bookerJobNo'] = self.booker_job_no
if self.traveler_job_no is not None:
result['travelerJobNo'] = self.traveler_job_no
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('alipayTradeNo') is not None:
self.alipay_trade_no = m.get('alipayTradeNo')
if m.get('applyId') is not None:
self.apply_id = m.get('applyId')
if m.get('bookTime') is not None:
self.book_time = m.get('bookTime')
if m.get('bookerId') is not None:
self.booker_id = m.get('bookerId')
if m.get('bookerName') is not None:
self.booker_name = m.get('bookerName')
if m.get('capitalDirection') is not None:
self.capital_direction = m.get('capitalDirection')
if m.get('cascadeDepartment') is not None:
self.cascade_department = m.get('cascadeDepartment')
if m.get('checkInDate') is not None:
self.check_in_date = m.get('checkInDate')
if m.get('checkoutDate') is not None:
self.checkout_date = m.get('checkoutDate')
if m.get('city') is not None:
self.city = m.get('city')
if m.get('cityCode') is not None:
self.city_code = m.get('cityCode')
if m.get('corpRefundFee') is not None:
self.corp_refund_fee = m.get('corpRefundFee')
if m.get('corpTotalFee') is not None:
self.corp_total_fee = m.get('corpTotalFee')
if m.get('costCenter') is not None:
self.cost_center = m.get('costCenter')
if m.get('costCenterNumber') is not None:
self.cost_center_number = m.get('costCenterNumber')
if m.get('department') is not None:
self.department = m.get('department')
if m.get('departmentId') is not None:
self.department_id = m.get('departmentId')
if m.get('feeType') is not None:
self.fee_type = m.get('feeType')
if m.get('fees') is not None:
self.fees = m.get('fees')
if m.get('fuPointFee') is not None:
self.fu_point_fee = m.get('fuPointFee')
if m.get('hotelName') is not None:
self.hotel_name = m.get('hotelName')
if m.get('index') is not None:
self.index = m.get('index')
if m.get('invoiceTitle') is not None:
self.invoice_title = m.get('invoiceTitle')
if m.get('isNegotiation') is not None:
self.is_negotiation = m.get('isNegotiation')
if m.get('isShareStr') is not None:
self.is_share_str = m.get('isShareStr')
if m.get('nights') is not None:
self.nights = m.get('nights')
if m.get('orderId') is not None:
self.order_id = m.get('orderId')
if m.get('orderPrice') is not None:
self.order_price = m.get('orderPrice')
if m.get('orderType') is not None:
self.order_type = m.get('orderType')
if m.get('overApplyId') is not None:
self.over_apply_id = m.get('overApplyId')
if m.get('personRefundFee') is not None:
self.person_refund_fee = m.get('personRefundFee')
if m.get('personSettlePrice') is not None:
self.person_settle_price = m.get('personSettlePrice')
if m.get('primaryId') is not None:
self.primary_id = m.get('primaryId')
if m.get('projectCode') is not None:
self.project_code = m.get('projectCode')
if m.get('projectName') is not None:
self.project_name = m.get('projectName')
if m.get('promotionFee') is not None:
self.promotion_fee = m.get('promotionFee')
if m.get('roomNumber') is not None:
self.room_number = m.get('roomNumber')
if m.get('roomPrice') is not None:
self.room_price = m.get('roomPrice')
if m.get('roomType') is not None:
self.room_type = m.get('roomType')
if m.get('serviceFee') is not None:
self.service_fee = m.get('serviceFee')
if m.get('settlementFee') is not None:
self.settlement_fee = m.get('settlementFee')
if m.get('settlementTime') is not None:
self.settlement_time = m.get('settlementTime')
if m.get('settlementType') is not None:
self.settlement_type = m.get('settlementType')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('totalNights') is not None:
self.total_nights = m.get('totalNights')
if m.get('travelerId') is not None:
self.traveler_id = m.get('travelerId')
if m.get('travelerName') is not None:
self.traveler_name = m.get('travelerName')
if m.get('bookerJobNo') is not None:
self.booker_job_no = m.get('bookerJobNo')
if m.get('travelerJobNo') is not None:
self.traveler_job_no = m.get('travelerJobNo')
return self
class BillSettementHotelResponseBodyModule(TeaModel):
def __init__(
self,
category: int = None,
corp_id: str = None,
data_list: List[BillSettementHotelResponseBodyModuleDataList] = None,
period_end: str = None,
period_start: str = None,
total_num: int = None,
):
# 类目
self.category = category
# 企业id
self.corp_id = corp_id
# 数据集合
self.data_list = data_list
# 记账更新结束日期
self.period_end = period_end
# 记账更新开始日期
self.period_start = period_start
# 总数据量
self.total_num = total_num
def validate(self):
if self.data_list:
for k in self.data_list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.category is not None:
result['category'] = self.category
if self.corp_id is not None:
result['corpId'] = self.corp_id
result['dataList'] = []
if self.data_list is not None:
for k in self.data_list:
result['dataList'].append(k.to_map() if k else None)
if self.period_end is not None:
result['periodEnd'] = self.period_end
if self.period_start is not None:
result['periodStart'] = self.period_start
if self.total_num is not None:
result['totalNum'] = self.total_num
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('category') is not None:
self.category = m.get('category')
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
self.data_list = []
if m.get('dataList') is not None:
for k in m.get('dataList'):
temp_model = BillSettementHotelResponseBodyModuleDataList()
self.data_list.append(temp_model.from_map(k))
if m.get('periodEnd') is not None:
self.period_end = m.get('periodEnd')
if m.get('periodStart') is not None:
self.period_start = m.get('periodStart')
if m.get('totalNum') is not None:
self.total_num = m.get('totalNum')
return self
class BillSettementHotelResponseBody(TeaModel):
def __init__(
self,
result_msg: str = None,
module: BillSettementHotelResponseBodyModule = None,
success: bool = None,
result_code: int = None,
):
# 结果msg
self.result_msg = result_msg
# module
self.module = module
# 是否成功
self.success = success
# 结果code
self.result_code = result_code
def validate(self):
if self.module:
self.module.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.result_msg is not None:
result['resultMsg'] = self.result_msg
if self.module is not None:
result['module'] = self.module.to_map()
if self.success is not None:
result['success'] = self.success
if self.result_code is not None:
result['resultCode'] = self.result_code
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('resultMsg') is not None:
self.result_msg = m.get('resultMsg')
if m.get('module') is not None:
temp_model = BillSettementHotelResponseBodyModule()
self.module = temp_model.from_map(m['module'])
if m.get('success') is not None:
self.success = m.get('success')
if m.get('resultCode') is not None:
self.result_code = m.get('resultCode')
return self
class BillSettementHotelResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: BillSettementHotelResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = BillSettementHotelResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetFlightExceedApplyHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetFlightExceedApplyRequest(TeaModel):
def __init__(
self,
corp_id: str = None,
apply_id: str = None,
):
# 第三方企业id
self.corp_id = corp_id
# 商旅超标审批单id
self.apply_id = apply_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.apply_id is not None:
result['applyId'] = self.apply_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('applyId') is not None:
self.apply_id = m.get('applyId')
return self
class GetFlightExceedApplyResponseBodyApplyIntentionInfoDO(TeaModel):
def __init__(
self,
arr_city: str = None,
arr_city_name: str = None,
arr_time: str = None,
cabin: str = None,
cabin_class: int = None,
cabin_class_str: str = None,
dep_city: str = None,
dep_city_name: str = None,
dep_time: str = None,
discount: float = None,
flight_no: str = None,
price: int = None,
type: int = None,
):
# 到达城市三字码
self.arr_city = arr_city
# 到达城市名称
self.arr_city_name = arr_city_name
# 到达时间
self.arr_time = arr_time
# 超标的舱位,F:头等舱 C:商务舱 Y:经济舱 P:超值经济舱
self.cabin = cabin
# 申请超标的舱等 0:头等舱 1:商务舱 2:经济舱 3:超值经济舱
self.cabin_class = cabin_class
# 舱等描述,头等舱,商务舱,经济舱,超值经济舱
self.cabin_class_str = cabin_class_str
# 出发城市三字码
self.dep_city = dep_city
# 出发城市名称
self.dep_city_name = dep_city_name
# 出发时间
self.dep_time = dep_time
# 折扣
self.discount = discount
# 航班号
self.flight_no = flight_no
# 意向航班价格(元)
self.price = price
# 超标类型,1:折扣 2,8,10:时间 3,9,11:折扣和时间
self.type = type
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.arr_city is not None:
result['arrCity'] = self.arr_city
if self.arr_city_name is not None:
result['arrCityName'] = self.arr_city_name
if self.arr_time is not None:
result['arrTime'] = self.arr_time
if self.cabin is not None:
result['cabin'] = self.cabin
if self.cabin_class is not None:
result['cabinClass'] = self.cabin_class
if self.cabin_class_str is not None:
result['cabinClassStr'] = self.cabin_class_str
if self.dep_city is not None:
result['depCity'] = self.dep_city
if self.dep_city_name is not None:
result['depCityName'] = self.dep_city_name
if self.dep_time is not None:
result['depTime'] = self.dep_time
if self.discount is not None:
result['discount'] = self.discount
if self.flight_no is not None:
result['flightNo'] = self.flight_no
if self.price is not None:
result['price'] = self.price
if self.type is not None:
result['type'] = self.type
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('arrCity') is not None:
self.arr_city = m.get('arrCity')
if m.get('arrCityName') is not None:
self.arr_city_name = m.get('arrCityName')
if m.get('arrTime') is not None:
self.arr_time = m.get('arrTime')
if m.get('cabin') is not None:
self.cabin = m.get('cabin')
if m.get('cabinClass') is not None:
self.cabin_class = m.get('cabinClass')
if m.get('cabinClassStr') is not None:
self.cabin_class_str = m.get('cabinClassStr')
if m.get('depCity') is not None:
self.dep_city = m.get('depCity')
if m.get('depCityName') is not None:
self.dep_city_name = m.get('depCityName')
if m.get('depTime') is not None:
self.dep_time = m.get('depTime')
if m.get('discount') is not None:
self.discount = m.get('discount')
if m.get('flightNo') is not None:
self.flight_no = m.get('flightNo')
if m.get('price') is not None:
self.price = m.get('price')
if m.get('type') is not None:
self.type = m.get('type')
return self
class GetFlightExceedApplyResponseBody(TeaModel):
def __init__(
self,
corp_id: str = None,
apply_id: int = None,
status: int = None,
btrip_cause: str = None,
exceed_type: int = None,
exceed_reason: str = None,
origin_standard: str = None,
submit_time: str = None,
user_id: str = None,
apply_intention_info_do: GetFlightExceedApplyResponseBodyApplyIntentionInfoDO = None,
thirdpart_apply_id: str = None,
):
# 第三方企业id
self.corp_id = corp_id
# 商旅超标审批单id
self.apply_id = apply_id
# 审批单状态 0:审批中 1:已同意 2:已拒绝
self.status = status
# 出差原因
self.btrip_cause = btrip_cause
# 超标类型,1:折扣 2,8,10:时间 3,9,11:折扣和时间
self.exceed_type = exceed_type
# 超标原因
self.exceed_reason = exceed_reason
# 原差旅标准
self.origin_standard = origin_standard
# 审批单提交时间
self.submit_time = submit_time
# 第三方用户id
self.user_id = user_id
# 意向出行信息
self.apply_intention_info_do = apply_intention_info_do
# 第三方出差审批单号
self.thirdpart_apply_id = thirdpart_apply_id
def validate(self):
if self.apply_intention_info_do:
self.apply_intention_info_do.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.apply_id is not None:
result['applyId'] = self.apply_id
if self.status is not None:
result['status'] = self.status
if self.btrip_cause is not None:
result['btripCause'] = self.btrip_cause
if self.exceed_type is not None:
result['exceedType'] = self.exceed_type
if self.exceed_reason is not None:
result['exceedReason'] = self.exceed_reason
if self.origin_standard is not None:
result['originStandard'] = self.origin_standard
if self.submit_time is not None:
result['submitTime'] = self.submit_time
if self.user_id is not None:
result['userId'] = self.user_id
if self.apply_intention_info_do is not None:
result['applyIntentionInfoDO'] = self.apply_intention_info_do.to_map()
if self.thirdpart_apply_id is not None:
result['thirdpartApplyId'] = self.thirdpart_apply_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('applyId') is not None:
self.apply_id = m.get('applyId')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('btripCause') is not None:
self.btrip_cause = m.get('btripCause')
if m.get('exceedType') is not None:
self.exceed_type = m.get('exceedType')
if m.get('exceedReason') is not None:
self.exceed_reason = m.get('exceedReason')
if m.get('originStandard') is not None:
self.origin_standard = m.get('originStandard')
if m.get('submitTime') is not None:
self.submit_time = m.get('submitTime')
if m.get('userId') is not None:
self.user_id = m.get('userId')
if m.get('applyIntentionInfoDO') is not None:
temp_model = GetFlightExceedApplyResponseBodyApplyIntentionInfoDO()
self.apply_intention_info_do = temp_model.from_map(m['applyIntentionInfoDO'])
if m.get('thirdpartApplyId') is not None:
self.thirdpart_apply_id = m.get('thirdpartApplyId')
return self
class GetFlightExceedApplyResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetFlightExceedApplyResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetFlightExceedApplyResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class BillSettementCarHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class BillSettementCarRequest(TeaModel):
def __init__(
self,
corp_id: str = None,
category: int = None,
page_size: int = None,
period_start: str = None,
period_end: str = None,
page_number: int = None,
):
self.corp_id = corp_id
self.category = category
self.page_size = page_size
self.period_start = period_start
self.period_end = period_end
self.page_number = page_number
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.category is not None:
result['category'] = self.category
if self.page_size is not None:
result['pageSize'] = self.page_size
if self.period_start is not None:
result['periodStart'] = self.period_start
if self.period_end is not None:
result['periodEnd'] = self.period_end
if self.page_number is not None:
result['pageNumber'] = self.page_number
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('category') is not None:
self.category = m.get('category')
if m.get('pageSize') is not None:
self.page_size = m.get('pageSize')
if m.get('periodStart') is not None:
self.period_start = m.get('periodStart')
if m.get('periodEnd') is not None:
self.period_end = m.get('periodEnd')
if m.get('pageNumber') is not None:
self.page_number = m.get('pageNumber')
return self
class BillSettementCarResponseBodyModuleDataList(TeaModel):
def __init__(
self,
alipay_trade_no: str = None,
apply_id: str = None,
arr_city: str = None,
arr_date: str = None,
arr_location: str = None,
arr_time: str = None,
book_time: str = None,
booker_id: str = None,
booker_name: str = None,
business_category: str = None,
capital_direction: str = None,
car_level: str = None,
cascade_department: str = None,
cost_center: str = None,
cost_center_number: str = None,
coupon: float = None,
coupon_price: float = None,
department: str = None,
department_id: str = None,
dept_city: str = None,
dept_date: str = None,
dept_location: str = None,
dept_time: str = None,
estimate_drive_distance: str = None,
estimate_price: float = None,
fee_type: str = None,
index: str = None,
invoice_title: str = None,
memo: str = None,
order_id: str = None,
order_price: float = None,
over_apply_id: str = None,
person_settle_fee: float = None,
primary_id: str = None,
project_code: str = None,
project_name: str = None,
provider_name: str = None,
real_drive_distance: str = None,
real_from_addr: str = None,
real_to_addr: str = None,
service_fee: str = None,
settlement_fee: float = None,
settlement_time: str = None,
settlement_type: str = None,
special_order: str = None,
special_reason: str = None,
status: int = None,
traveler_id: str = None,
traveler_name: str = None,
user_confirm_desc: str = None,
booker_job_no: str = None,
traveler_job_no: str = None,
):
# 支付交易流水号
self.alipay_trade_no = alipay_trade_no
# 审批单号
self.apply_id = apply_id
# 到达城市
self.arr_city = arr_city
# 到达日期
self.arr_date = arr_date
# 到达地
self.arr_location = arr_location
# 到达时间
self.arr_time = arr_time
# 预定时间
self.book_time = book_time
# 预定人use id
self.booker_id = booker_id
# 预订人名称
self.booker_name = booker_name
# 用车事由
self.business_category = business_category
# 资金方向
self.capital_direction = capital_direction
# 车型
self.car_level = car_level
# 级联部门
self.cascade_department = cascade_department
# 成本中心名称
self.cost_center = cost_center
# 成本中心编号
self.cost_center_number = cost_center_number
# 优惠券
self.coupon = coupon
# 优惠金额
self.coupon_price = coupon_price
# 末级部门
self.department = department
# 部门id
self.department_id = department_id
# 出发城市
self.dept_city = dept_city
# 出发日期
self.dept_date = dept_date
# 出发地
self.dept_location = dept_location
# 出发时间
self.dept_time = dept_time
# 预估行驶距离
self.estimate_drive_distance = estimate_drive_distance
# 预估金额
self.estimate_price = estimate_price
# 费用类型
self.fee_type = fee_type
# 序号
self.index = index
# 发票抬头
self.invoice_title = invoice_title
# 用车事由
self.memo = memo
# 订单id
self.order_id = order_id
# 订单金额
self.order_price = order_price
# 超标审批单号
self.over_apply_id = over_apply_id
# 个人支付金额
self.person_settle_fee = person_settle_fee
self.primary_id = primary_id
# 项目编码
self.project_code = project_code
# 项目名称
self.project_name = project_name
# 供应商
self.provider_name = provider_name
# 实际行驶距离
self.real_drive_distance = real_drive_distance
# 实际上车点
self.real_from_addr = real_from_addr
# 实际下车点
self.real_to_addr = real_to_addr
# 服务费,仅在feeType 40111 中展示
self.service_fee = service_fee
# 结算金额
self.settlement_fee = settlement_fee
# 结算时间
self.settlement_time = settlement_time
# 结算类型
self.settlement_type = settlement_type
# 特别关注订单
self.special_order = special_order
# 特别关注原因
self.special_reason = special_reason
# 入账状态
self.status = status
# 出行人use id
self.traveler_id = traveler_id
# 出行人名称
self.traveler_name = traveler_name
# 员工是否认可
self.user_confirm_desc = user_confirm_desc
# 预订人工号
self.booker_job_no = booker_job_no
# 出行人工号
self.traveler_job_no = traveler_job_no
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.alipay_trade_no is not None:
result['alipayTradeNo'] = self.alipay_trade_no
if self.apply_id is not None:
result['applyId'] = self.apply_id
if self.arr_city is not None:
result['arrCity'] = self.arr_city
if self.arr_date is not None:
result['arrDate'] = self.arr_date
if self.arr_location is not None:
result['arrLocation'] = self.arr_location
if self.arr_time is not None:
result['arrTime'] = self.arr_time
if self.book_time is not None:
result['bookTime'] = self.book_time
if self.booker_id is not None:
result['bookerId'] = self.booker_id
if self.booker_name is not None:
result['bookerName'] = self.booker_name
if self.business_category is not None:
result['businessCategory'] = self.business_category
if self.capital_direction is not None:
result['capitalDirection'] = self.capital_direction
if self.car_level is not None:
result['carLevel'] = self.car_level
if self.cascade_department is not None:
result['cascadeDepartment'] = self.cascade_department
if self.cost_center is not None:
result['costCenter'] = self.cost_center
if self.cost_center_number is not None:
result['costCenterNumber'] = self.cost_center_number
if self.coupon is not None:
result['coupon'] = self.coupon
if self.coupon_price is not None:
result['couponPrice'] = self.coupon_price
if self.department is not None:
result['department'] = self.department
if self.department_id is not None:
result['departmentId'] = self.department_id
if self.dept_city is not None:
result['deptCity'] = self.dept_city
if self.dept_date is not None:
result['deptDate'] = self.dept_date
if self.dept_location is not None:
result['deptLocation'] = self.dept_location
if self.dept_time is not None:
result['deptTime'] = self.dept_time
if self.estimate_drive_distance is not None:
result['estimateDriveDistance'] = self.estimate_drive_distance
if self.estimate_price is not None:
result['estimatePrice'] = self.estimate_price
if self.fee_type is not None:
result['feeType'] = self.fee_type
if self.index is not None:
result['index'] = self.index
if self.invoice_title is not None:
result['invoiceTitle'] = self.invoice_title
if self.memo is not None:
result['memo'] = self.memo
if self.order_id is not None:
result['orderId'] = self.order_id
if self.order_price is not None:
result['orderPrice'] = self.order_price
if self.over_apply_id is not None:
result['overApplyId'] = self.over_apply_id
if self.person_settle_fee is not None:
result['personSettleFee'] = self.person_settle_fee
if self.primary_id is not None:
result['primaryId'] = self.primary_id
if self.project_code is not None:
result['projectCode'] = self.project_code
if self.project_name is not None:
result['projectName'] = self.project_name
if self.provider_name is not None:
result['providerName'] = self.provider_name
if self.real_drive_distance is not None:
result['realDriveDistance'] = self.real_drive_distance
if self.real_from_addr is not None:
result['realFromAddr'] = self.real_from_addr
if self.real_to_addr is not None:
result['realToAddr'] = self.real_to_addr
if self.service_fee is not None:
result['serviceFee'] = self.service_fee
if self.settlement_fee is not None:
result['settlementFee'] = self.settlement_fee
if self.settlement_time is not None:
result['settlementTime'] = self.settlement_time
if self.settlement_type is not None:
result['settlementType'] = self.settlement_type
if self.special_order is not None:
result['specialOrder'] = self.special_order
if self.special_reason is not None:
result['specialReason'] = self.special_reason
if self.status is not None:
result['status'] = self.status
if self.traveler_id is not None:
result['travelerId'] = self.traveler_id
if self.traveler_name is not None:
result['travelerName'] = self.traveler_name
if self.user_confirm_desc is not None:
result['userConfirmDesc'] = self.user_confirm_desc
if self.booker_job_no is not None:
result['bookerJobNo'] = self.booker_job_no
if self.traveler_job_no is not None:
result['travelerJobNo'] = self.traveler_job_no
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('alipayTradeNo') is not None:
self.alipay_trade_no = m.get('alipayTradeNo')
if m.get('applyId') is not None:
self.apply_id = m.get('applyId')
if m.get('arrCity') is not None:
self.arr_city = m.get('arrCity')
if m.get('arrDate') is not None:
self.arr_date = m.get('arrDate')
if m.get('arrLocation') is not None:
self.arr_location = m.get('arrLocation')
if m.get('arrTime') is not None:
self.arr_time = m.get('arrTime')
if m.get('bookTime') is not None:
self.book_time = m.get('bookTime')
if m.get('bookerId') is not None:
self.booker_id = m.get('bookerId')
if m.get('bookerName') is not None:
self.booker_name = m.get('bookerName')
if m.get('businessCategory') is not None:
self.business_category = m.get('businessCategory')
if m.get('capitalDirection') is not None:
self.capital_direction = m.get('capitalDirection')
if m.get('carLevel') is not None:
self.car_level = m.get('carLevel')
if m.get('cascadeDepartment') is not None:
self.cascade_department = m.get('cascadeDepartment')
if m.get('costCenter') is not None:
self.cost_center = m.get('costCenter')
if m.get('costCenterNumber') is not None:
self.cost_center_number = m.get('costCenterNumber')
if m.get('coupon') is not None:
self.coupon = m.get('coupon')
if m.get('couponPrice') is not None:
self.coupon_price = m.get('couponPrice')
if m.get('department') is not None:
self.department = m.get('department')
if m.get('departmentId') is not None:
self.department_id = m.get('departmentId')
if m.get('deptCity') is not None:
self.dept_city = m.get('deptCity')
if m.get('deptDate') is not None:
self.dept_date = m.get('deptDate')
if m.get('deptLocation') is not None:
self.dept_location = m.get('deptLocation')
if m.get('deptTime') is not None:
self.dept_time = m.get('deptTime')
if m.get('estimateDriveDistance') is not None:
self.estimate_drive_distance = m.get('estimateDriveDistance')
if m.get('estimatePrice') is not None:
self.estimate_price = m.get('estimatePrice')
if m.get('feeType') is not None:
self.fee_type = m.get('feeType')
if m.get('index') is not None:
self.index = m.get('index')
if m.get('invoiceTitle') is not None:
self.invoice_title = m.get('invoiceTitle')
if m.get('memo') is not None:
self.memo = m.get('memo')
if m.get('orderId') is not None:
self.order_id = m.get('orderId')
if m.get('orderPrice') is not None:
self.order_price = m.get('orderPrice')
if m.get('overApplyId') is not None:
self.over_apply_id = m.get('overApplyId')
if m.get('personSettleFee') is not None:
self.person_settle_fee = m.get('personSettleFee')
if m.get('primaryId') is not None:
self.primary_id = m.get('primaryId')
if m.get('projectCode') is not None:
self.project_code = m.get('projectCode')
if m.get('projectName') is not None:
self.project_name = m.get('projectName')
if m.get('providerName') is not None:
self.provider_name = m.get('providerName')
if m.get('realDriveDistance') is not None:
self.real_drive_distance = m.get('realDriveDistance')
if m.get('realFromAddr') is not None:
self.real_from_addr = m.get('realFromAddr')
if m.get('realToAddr') is not None:
self.real_to_addr = m.get('realToAddr')
if m.get('serviceFee') is not None:
self.service_fee = m.get('serviceFee')
if m.get('settlementFee') is not None:
self.settlement_fee = m.get('settlementFee')
if m.get('settlementTime') is not None:
self.settlement_time = m.get('settlementTime')
if m.get('settlementType') is not None:
self.settlement_type = m.get('settlementType')
if m.get('specialOrder') is not None:
self.special_order = m.get('specialOrder')
if m.get('specialReason') is not None:
self.special_reason = m.get('specialReason')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('travelerId') is not None:
self.traveler_id = m.get('travelerId')
if m.get('travelerName') is not None:
self.traveler_name = m.get('travelerName')
if m.get('userConfirmDesc') is not None:
self.user_confirm_desc = m.get('userConfirmDesc')
if m.get('bookerJobNo') is not None:
self.booker_job_no = m.get('bookerJobNo')
if m.get('travelerJobNo') is not None:
self.traveler_job_no = m.get('travelerJobNo')
return self
class BillSettementCarResponseBodyModule(TeaModel):
def __init__(
self,
category: int = None,
corp_id: str = None,
data_list: List[BillSettementCarResponseBodyModuleDataList] = None,
period_end: str = None,
period_start: str = None,
total_num: int = None,
):
# 类目
self.category = category
# 企业id
self.corp_id = corp_id
# 数据集合
self.data_list = data_list
# 记账更新开始日期
self.period_end = period_end
# 记账更新结束日期
self.period_start = period_start
# 总数量
self.total_num = total_num
def validate(self):
if self.data_list:
for k in self.data_list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.category is not None:
result['category'] = self.category
if self.corp_id is not None:
result['corpId'] = self.corp_id
result['dataList'] = []
if self.data_list is not None:
for k in self.data_list:
result['dataList'].append(k.to_map() if k else None)
if self.period_end is not None:
result['periodEnd'] = self.period_end
if self.period_start is not None:
result['periodStart'] = self.period_start
if self.total_num is not None:
result['totalNum'] = self.total_num
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('category') is not None:
self.category = m.get('category')
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
self.data_list = []
if m.get('dataList') is not None:
for k in m.get('dataList'):
temp_model = BillSettementCarResponseBodyModuleDataList()
self.data_list.append(temp_model.from_map(k))
if m.get('periodEnd') is not None:
self.period_end = m.get('periodEnd')
if m.get('periodStart') is not None:
self.period_start = m.get('periodStart')
if m.get('totalNum') is not None:
self.total_num = m.get('totalNum')
return self
class BillSettementCarResponseBody(TeaModel):
def __init__(
self,
result_msg: str = None,
module: BillSettementCarResponseBodyModule = None,
success: bool = None,
result_code: int = None,
):
# 结果msg
self.result_msg = result_msg
# module
self.module = module
# 是否成功
self.success = success
# 结果code
self.result_code = result_code
def validate(self):
if self.module:
self.module.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.result_msg is not None:
result['resultMsg'] = self.result_msg
if self.module is not None:
result['module'] = self.module.to_map()
if self.success is not None:
result['success'] = self.success
if self.result_code is not None:
result['resultCode'] = self.result_code
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('resultMsg') is not None:
self.result_msg = m.get('resultMsg')
if m.get('module') is not None:
temp_model = BillSettementCarResponseBodyModule()
self.module = temp_model.from_map(m['module'])
if m.get('success') is not None:
self.success = m.get('success')
if m.get('resultCode') is not None:
self.result_code = m.get('resultCode')
return self
class BillSettementCarResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: BillSettementCarResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = BillSettementCarResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class BillSettementBtripTrainHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class BillSettementBtripTrainRequest(TeaModel):
def __init__(
self,
corp_id: str = None,
category: int = None,
page_size: int = None,
period_start: str = None,
page_number: int = None,
period_end: str = None,
):
self.corp_id = corp_id
self.category = category
self.page_size = page_size
self.period_start = period_start
self.page_number = page_number
self.period_end = period_end
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.category is not None:
result['category'] = self.category
if self.page_size is not None:
result['pageSize'] = self.page_size
if self.period_start is not None:
result['periodStart'] = self.period_start
if self.page_number is not None:
result['pageNumber'] = self.page_number
if self.period_end is not None:
result['periodEnd'] = self.period_end
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('category') is not None:
self.category = m.get('category')
if m.get('pageSize') is not None:
self.page_size = m.get('pageSize')
if m.get('periodStart') is not None:
self.period_start = m.get('periodStart')
if m.get('pageNumber') is not None:
self.page_number = m.get('pageNumber')
if m.get('periodEnd') is not None:
self.period_end = m.get('periodEnd')
return self
class BillSettementBtripTrainResponseBodyModuleDataList(TeaModel):
def __init__(
self,
alipay_trade_no: str = None,
apply_id: str = None,
arr_date: str = None,
arr_station: str = None,
arr_time: str = None,
book_time: str = None,
booker_id: str = None,
booker_name: str = None,
capital_direction: str = None,
cascade_department: str = None,
change_fee: float = None,
cost_center: str = None,
cost_center_number: str = None,
coupon: float = None,
department: str = None,
department_id: str = None,
dept_date: str = None,
dept_station: str = None,
dept_time: str = None,
fee_type: str = None,
index: str = None,
invoice_title: str = None,
order_id: str = None,
order_price: float = None,
over_apply_id: str = None,
primary_id: int = None,
project_code: str = None,
project_name: str = None,
refund_fee: float = None,
run_time: str = None,
seat_no: str = None,
seat_type: str = None,
service_fee: float = None,
settlement_fee: float = None,
settlement_time: str = None,
settlement_type: str = None,
status: int = None,
ticket_no: str = None,
ticket_price: float = None,
train_no: str = None,
train_type: str = None,
traveler_id: str = None,
traveler_name: str = None,
booker_job_no: str = None,
traveler_job_no: str = None,
voucher_type: int = None,
):
# 交易流水号
self.alipay_trade_no = alipay_trade_no
# 审批单号
self.apply_id = apply_id
# 到达日期
self.arr_date = arr_date
# 到达站点
self.arr_station = arr_station
# 到达时间
self.arr_time = arr_time
# 预定时间
self.book_time = book_time
# 预定人use id
self.booker_id = booker_id
# 预订人名称
self.booker_name = booker_name
# 资金方向
self.capital_direction = capital_direction
# 级联部门
self.cascade_department = cascade_department
# 改签手续费
self.change_fee = change_fee
# 成本中心名称
self.cost_center = cost_center
# 成本中心编码
self.cost_center_number = cost_center_number
# 折扣率
self.coupon = coupon
# 末级部门
self.department = department
# 部门id
self.department_id = department_id
# 出发日期
self.dept_date = dept_date
# 出发站
self.dept_station = dept_station
# 出发时间
self.dept_time = dept_time
# 费用类型
self.fee_type = fee_type
# 序号
self.index = index
# 发票抬头
self.invoice_title = invoice_title
# 订单号
self.order_id = order_id
# 订单金额
self.order_price = order_price
# 超标审批单号
self.over_apply_id = over_apply_id
# 主键id
self.primary_id = primary_id
# 项目编号
self.project_code = project_code
# 项目名称
self.project_name = project_name
# 退款手续费
self.refund_fee = refund_fee
# 运行时长
self.run_time = run_time
# 座位号
self.seat_no = seat_no
# 坐席
self.seat_type = seat_type
# 服务费,仅在feeType 6007、6008中展示
self.service_fee = service_fee
# 结算金额
self.settlement_fee = settlement_fee
# 结算时间
self.settlement_time = settlement_time
# 结算类型
self.settlement_type = settlement_type
# 入账状态
self.status = status
# 票面票号
self.ticket_no = ticket_no
# 票价
self.ticket_price = ticket_price
# 车次号
self.train_no = train_no
# 车次类型
self.train_type = train_type
# 出行人useId
self.traveler_id = traveler_id
# 出行人名称
self.traveler_name = traveler_name
# 预订人工号
self.booker_job_no = booker_job_no
# 出行人工号
self.traveler_job_no = traveler_job_no
# 发票类型
self.voucher_type = voucher_type
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.alipay_trade_no is not None:
result['alipayTradeNo'] = self.alipay_trade_no
if self.apply_id is not None:
result['applyId'] = self.apply_id
if self.arr_date is not None:
result['arrDate'] = self.arr_date
if self.arr_station is not None:
result['arrStation'] = self.arr_station
if self.arr_time is not None:
result['arrTime'] = self.arr_time
if self.book_time is not None:
result['bookTime'] = self.book_time
if self.booker_id is not None:
result['bookerId'] = self.booker_id
if self.booker_name is not None:
result['bookerName'] = self.booker_name
if self.capital_direction is not None:
result['capitalDirection'] = self.capital_direction
if self.cascade_department is not None:
result['cascadeDepartment'] = self.cascade_department
if self.change_fee is not None:
result['changeFee'] = self.change_fee
if self.cost_center is not None:
result['costCenter'] = self.cost_center
if self.cost_center_number is not None:
result['costCenterNumber'] = self.cost_center_number
if self.coupon is not None:
result['coupon'] = self.coupon
if self.department is not None:
result['department'] = self.department
if self.department_id is not None:
result['departmentId'] = self.department_id
if self.dept_date is not None:
result['deptDate'] = self.dept_date
if self.dept_station is not None:
result['deptStation'] = self.dept_station
if self.dept_time is not None:
result['deptTime'] = self.dept_time
if self.fee_type is not None:
result['feeType'] = self.fee_type
if self.index is not None:
result['index'] = self.index
if self.invoice_title is not None:
result['invoiceTitle'] = self.invoice_title
if self.order_id is not None:
result['orderId'] = self.order_id
if self.order_price is not None:
result['orderPrice'] = self.order_price
if self.over_apply_id is not None:
result['overApplyId'] = self.over_apply_id
if self.primary_id is not None:
result['primaryId'] = self.primary_id
if self.project_code is not None:
result['projectCode'] = self.project_code
if self.project_name is not None:
result['projectName'] = self.project_name
if self.refund_fee is not None:
result['refundFee'] = self.refund_fee
if self.run_time is not None:
result['runTime'] = self.run_time
if self.seat_no is not None:
result['seatNo'] = self.seat_no
if self.seat_type is not None:
result['seatType'] = self.seat_type
if self.service_fee is not None:
result['serviceFee'] = self.service_fee
if self.settlement_fee is not None:
result['settlementFee'] = self.settlement_fee
if self.settlement_time is not None:
result['settlementTime'] = self.settlement_time
if self.settlement_type is not None:
result['settlementType'] = self.settlement_type
if self.status is not None:
result['status'] = self.status
if self.ticket_no is not None:
result['ticketNo'] = self.ticket_no
if self.ticket_price is not None:
result['ticketPrice'] = self.ticket_price
if self.train_no is not None:
result['trainNo'] = self.train_no
if self.train_type is not None:
result['trainType'] = self.train_type
if self.traveler_id is not None:
result['travelerId'] = self.traveler_id
if self.traveler_name is not None:
result['travelerName'] = self.traveler_name
if self.booker_job_no is not None:
result['bookerJobNo'] = self.booker_job_no
if self.traveler_job_no is not None:
result['travelerJobNo'] = self.traveler_job_no
if self.voucher_type is not None:
result['voucherType'] = self.voucher_type
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('alipayTradeNo') is not None:
self.alipay_trade_no = m.get('alipayTradeNo')
if m.get('applyId') is not None:
self.apply_id = m.get('applyId')
if m.get('arrDate') is not None:
self.arr_date = m.get('arrDate')
if m.get('arrStation') is not None:
self.arr_station = m.get('arrStation')
if m.get('arrTime') is not None:
self.arr_time = m.get('arrTime')
if m.get('bookTime') is not None:
self.book_time = m.get('bookTime')
if m.get('bookerId') is not None:
self.booker_id = m.get('bookerId')
if m.get('bookerName') is not None:
self.booker_name = m.get('bookerName')
if m.get('capitalDirection') is not None:
self.capital_direction = m.get('capitalDirection')
if m.get('cascadeDepartment') is not None:
self.cascade_department = m.get('cascadeDepartment')
if m.get('changeFee') is not None:
self.change_fee = m.get('changeFee')
if m.get('costCenter') is not None:
self.cost_center = m.get('costCenter')
if m.get('costCenterNumber') is not None:
self.cost_center_number = m.get('costCenterNumber')
if m.get('coupon') is not None:
self.coupon = m.get('coupon')
if m.get('department') is not None:
self.department = m.get('department')
if m.get('departmentId') is not None:
self.department_id = m.get('departmentId')
if m.get('deptDate') is not None:
self.dept_date = m.get('deptDate')
if m.get('deptStation') is not None:
self.dept_station = m.get('deptStation')
if m.get('deptTime') is not None:
self.dept_time = m.get('deptTime')
if m.get('feeType') is not None:
self.fee_type = m.get('feeType')
if m.get('index') is not None:
self.index = m.get('index')
if m.get('invoiceTitle') is not None:
self.invoice_title = m.get('invoiceTitle')
if m.get('orderId') is not None:
self.order_id = m.get('orderId')
if m.get('orderPrice') is not None:
self.order_price = m.get('orderPrice')
if m.get('overApplyId') is not None:
self.over_apply_id = m.get('overApplyId')
if m.get('primaryId') is not None:
self.primary_id = m.get('primaryId')
if m.get('projectCode') is not None:
self.project_code = m.get('projectCode')
if m.get('projectName') is not None:
self.project_name = m.get('projectName')
if m.get('refundFee') is not None:
self.refund_fee = m.get('refundFee')
if m.get('runTime') is not None:
self.run_time = m.get('runTime')
if m.get('seatNo') is not None:
self.seat_no = m.get('seatNo')
if m.get('seatType') is not None:
self.seat_type = m.get('seatType')
if m.get('serviceFee') is not None:
self.service_fee = m.get('serviceFee')
if m.get('settlementFee') is not None:
self.settlement_fee = m.get('settlementFee')
if m.get('settlementTime') is not None:
self.settlement_time = m.get('settlementTime')
if m.get('settlementType') is not None:
self.settlement_type = m.get('settlementType')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('ticketNo') is not None:
self.ticket_no = m.get('ticketNo')
if m.get('ticketPrice') is not None:
self.ticket_price = m.get('ticketPrice')
if m.get('trainNo') is not None:
self.train_no = m.get('trainNo')
if m.get('trainType') is not None:
self.train_type = m.get('trainType')
if m.get('travelerId') is not None:
self.traveler_id = m.get('travelerId')
if m.get('travelerName') is not None:
self.traveler_name = m.get('travelerName')
if m.get('bookerJobNo') is not None:
self.booker_job_no = m.get('bookerJobNo')
if m.get('travelerJobNo') is not None:
self.traveler_job_no = m.get('travelerJobNo')
if m.get('voucherType') is not None:
self.voucher_type = m.get('voucherType')
return self
class BillSettementBtripTrainResponseBodyModule(TeaModel):
def __init__(
self,
category: int = None,
corp_id: str = None,
data_list: List[BillSettementBtripTrainResponseBodyModuleDataList] = None,
period_end: str = None,
period_start: str = None,
total_num: int = None,
):
# 类目
self.category = category
# 企业id
self.corp_id = corp_id
# 数据集合
self.data_list = data_list
# 记账更新开始时间
self.period_end = period_end
# 记账更新结束时间
self.period_start = period_start
# 总数据量
self.total_num = total_num
def validate(self):
if self.data_list:
for k in self.data_list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.category is not None:
result['category'] = self.category
if self.corp_id is not None:
result['corpId'] = self.corp_id
result['dataList'] = []
if self.data_list is not None:
for k in self.data_list:
result['dataList'].append(k.to_map() if k else None)
if self.period_end is not None:
result['periodEnd'] = self.period_end
if self.period_start is not None:
result['periodStart'] = self.period_start
if self.total_num is not None:
result['totalNum'] = self.total_num
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('category') is not None:
self.category = m.get('category')
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
self.data_list = []
if m.get('dataList') is not None:
for k in m.get('dataList'):
temp_model = BillSettementBtripTrainResponseBodyModuleDataList()
self.data_list.append(temp_model.from_map(k))
if m.get('periodEnd') is not None:
self.period_end = m.get('periodEnd')
if m.get('periodStart') is not None:
self.period_start = m.get('periodStart')
if m.get('totalNum') is not None:
self.total_num = m.get('totalNum')
return self
class BillSettementBtripTrainResponseBody(TeaModel):
def __init__(
self,
result_msg: str = None,
module: BillSettementBtripTrainResponseBodyModule = None,
success: bool = None,
result_code: int = None,
):
# 结果msg
self.result_msg = result_msg
# module
self.module = module
# 是否成功
self.success = success
# 结果code
self.result_code = result_code
def validate(self):
if self.module:
self.module.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.result_msg is not None:
result['resultMsg'] = self.result_msg
if self.module is not None:
result['module'] = self.module.to_map()
if self.success is not None:
result['success'] = self.success
if self.result_code is not None:
result['resultCode'] = self.result_code
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('resultMsg') is not None:
self.result_msg = m.get('resultMsg')
if m.get('module') is not None:
temp_model = BillSettementBtripTrainResponseBodyModule()
self.module = temp_model.from_map(m['module'])
if m.get('success') is not None:
self.success = m.get('success')
if m.get('resultCode') is not None:
self.result_code = m.get('resultCode')
return self
class BillSettementBtripTrainResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: BillSettementBtripTrainResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = BillSettementBtripTrainResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class SyncExceedApplyHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class SyncExceedApplyRequest(TeaModel):
def __init__(
self,
remark: str = None,
apply_id: str = None,
corp_id: str = None,
thirdparty_flow_id: str = None,
user_id: str = None,
status: int = None,
):
# 审批意见
self.remark = remark
# 商旅超标审批单id
self.apply_id = apply_id
# 企业id
self.corp_id = corp_id
# 第三方流程实例id
self.thirdparty_flow_id = thirdparty_flow_id
# 用户id
self.user_id = user_id
# 审批单状态 1同意2拒绝
self.status = status
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.remark is not None:
result['remark'] = self.remark
if self.apply_id is not None:
result['applyId'] = self.apply_id
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.thirdparty_flow_id is not None:
result['thirdpartyFlowId'] = self.thirdparty_flow_id
if self.user_id is not None:
result['userId'] = self.user_id
if self.status is not None:
result['status'] = self.status
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('remark') is not None:
self.remark = m.get('remark')
if m.get('applyId') is not None:
self.apply_id = m.get('applyId')
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('thirdpartyFlowId') is not None:
self.thirdparty_flow_id = m.get('thirdpartyFlowId')
if m.get('userId') is not None:
self.user_id = m.get('userId')
if m.get('status') is not None:
self.status = m.get('status')
return self
class SyncExceedApplyResponseBody(TeaModel):
def __init__(
self,
module: bool = None,
):
# 是否同步成功
self.module = module
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.module is not None:
result['module'] = self.module
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('module') is not None:
self.module = m.get('module')
return self
class SyncExceedApplyResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: SyncExceedApplyResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = SyncExceedApplyResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class AddCityCarApplyHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class AddCityCarApplyRequest(TeaModel):
def __init__(
self,
cause: str = None,
city: str = None,
corp_id: str = None,
date: str = None,
project_code: str = None,
project_name: str = None,
status: int = None,
third_part_apply_id: str = None,
third_part_cost_center_id: str = None,
third_part_invoice_id: str = None,
times_total: int = None,
times_type: int = None,
times_used: int = None,
title: str = None,
user_id: str = None,
ding_suite_key: str = None,
ding_corp_id: str = None,
ding_token_grant_type: int = None,
finished_date: str = None,
):
# 出差事由
self.cause = cause
# 用车城市
self.city = city
# 第三方企业ID
self.corp_id = corp_id
# 用车时间,按天管控,比如传值2021-03-18 20:26:56表示2021-03-18当天可用车,跨天情况配合finishedDate参数使用
self.date = date
# 审批单关联的项目code
self.project_code = project_code
# 审批单关联的项目名
self.project_name = project_name
# 审批单状态:0-申请,1-同意,2-拒绝
self.status = status
# 三方审批单ID
self.third_part_apply_id = third_part_apply_id
# 审批单关联的三方成本中心ID
self.third_part_cost_center_id = third_part_cost_center_id
# 审批单关联的三方发票抬头ID
self.third_part_invoice_id = third_part_invoice_id
# 审批单可用总次数
self.times_total = times_total
# 审批单可用次数类型:1-次数不限制,2-用户可指定次数,3-管理员限制次数;如果企业没有限制审批单使用次数的需求,这个参数传1(次数不限制),同时times_total和times_used都传0即可
self.times_type = times_type
# 审批单已用次数
self.times_used = times_used
# 审批单标题
self.title = title
# 发起审批的第三方员工ID
self.user_id = user_id
# suiteKey
self.ding_suite_key = ding_suite_key
# account
self.ding_corp_id = ding_corp_id
# tokenGrantType
self.ding_token_grant_type = ding_token_grant_type
# 用车截止时间,按天管控,比如date传值2021-03-18 20:26:56、finished_date传值2021-03-30 20:26:56表示2021-03-18(含)到2021-03-30(含)之间可用车,该参数不传值情况使用date作为用车截止时间;
self.finished_date = finished_date
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.cause is not None:
result['cause'] = self.cause
if self.city is not None:
result['city'] = self.city
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.date is not None:
result['date'] = self.date
if self.project_code is not None:
result['projectCode'] = self.project_code
if self.project_name is not None:
result['projectName'] = self.project_name
if self.status is not None:
result['status'] = self.status
if self.third_part_apply_id is not None:
result['thirdPartApplyId'] = self.third_part_apply_id
if self.third_part_cost_center_id is not None:
result['thirdPartCostCenterId'] = self.third_part_cost_center_id
if self.third_part_invoice_id is not None:
result['thirdPartInvoiceId'] = self.third_part_invoice_id
if self.times_total is not None:
result['timesTotal'] = self.times_total
if self.times_type is not None:
result['timesType'] = self.times_type
if self.times_used is not None:
result['timesUsed'] = self.times_used
if self.title is not None:
result['title'] = self.title
if self.user_id is not None:
result['userId'] = self.user_id
if self.ding_suite_key is not None:
result['dingSuiteKey'] = self.ding_suite_key
if self.ding_corp_id is not None:
result['dingCorpId'] = self.ding_corp_id
if self.ding_token_grant_type is not None:
result['dingTokenGrantType'] = self.ding_token_grant_type
if self.finished_date is not None:
result['finishedDate'] = self.finished_date
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('cause') is not None:
self.cause = m.get('cause')
if m.get('city') is not None:
self.city = m.get('city')
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('date') is not None:
self.date = m.get('date')
if m.get('projectCode') is not None:
self.project_code = m.get('projectCode')
if m.get('projectName') is not None:
self.project_name = m.get('projectName')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('thirdPartApplyId') is not None:
self.third_part_apply_id = m.get('thirdPartApplyId')
if m.get('thirdPartCostCenterId') is not None:
self.third_part_cost_center_id = m.get('thirdPartCostCenterId')
if m.get('thirdPartInvoiceId') is not None:
self.third_part_invoice_id = m.get('thirdPartInvoiceId')
if m.get('timesTotal') is not None:
self.times_total = m.get('timesTotal')
if m.get('timesType') is not None:
self.times_type = m.get('timesType')
if m.get('timesUsed') is not None:
self.times_used = m.get('timesUsed')
if m.get('title') is not None:
self.title = m.get('title')
if m.get('userId') is not None:
self.user_id = m.get('userId')
if m.get('dingSuiteKey') is not None:
self.ding_suite_key = m.get('dingSuiteKey')
if m.get('dingCorpId') is not None:
self.ding_corp_id = m.get('dingCorpId')
if m.get('dingTokenGrantType') is not None:
self.ding_token_grant_type = m.get('dingTokenGrantType')
if m.get('finishedDate') is not None:
self.finished_date = m.get('finishedDate')
return self
class AddCityCarApplyResponseBody(TeaModel):
def __init__(
self,
apply_id: int = None,
):
# 商旅内部审批单ID
self.apply_id = apply_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.apply_id is not None:
result['applyId'] = self.apply_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('applyId') is not None:
self.apply_id = m.get('applyId')
return self
class AddCityCarApplyResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: AddCityCarApplyResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = AddCityCarApplyResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class BillSettementFlightHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class BillSettementFlightRequest(TeaModel):
def __init__(
self,
corp_id: str = None,
category: int = None,
page_size: int = None,
period_start: str = None,
page_number: int = None,
period_end: str = None,
):
# 第三方企业ID
self.corp_id = corp_id
# 类目:机酒火车 1:机票; 2:酒店; 4:用车 6:商旅火车票
self.category = category
# 每页数据量,默认100,最高500
self.page_size = page_size
# 记账更新开始日期
self.period_start = period_start
# 页数,从1开始
self.page_number = page_number
# 记账更新结束日期
self.period_end = period_end
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.category is not None:
result['category'] = self.category
if self.page_size is not None:
result['pageSize'] = self.page_size
if self.period_start is not None:
result['periodStart'] = self.period_start
if self.page_number is not None:
result['pageNumber'] = self.page_number
if self.period_end is not None:
result['periodEnd'] = self.period_end
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('category') is not None:
self.category = m.get('category')
if m.get('pageSize') is not None:
self.page_size = m.get('pageSize')
if m.get('periodStart') is not None:
self.period_start = m.get('periodStart')
if m.get('pageNumber') is not None:
self.page_number = m.get('pageNumber')
if m.get('periodEnd') is not None:
self.period_end = m.get('periodEnd')
return self
class BillSettementFlightResponseBodyModuleDataList(TeaModel):
def __init__(
self,
advance_day: int = None,
airline_corp_code: str = None,
airline_corp_name: str = None,
alipay_trade_no: str = None,
apply_id: str = None,
arr_airport_code: str = None,
arr_city: str = None,
arr_date: str = None,
arr_station: str = None,
arr_time: str = None,
book_time: str = None,
booker_id: str = None,
booker_name: str = None,
btrip_coupon_fee: float = None,
build_fee: float = None,
cabin: str = None,
cabin_class: str = None,
capital_direction: str = None,
cascade_department: str = None,
change_fee: float = None,
corp_pay_order_fee: float = None,
cost_center: str = None,
cost_center_number: str = None,
coupon: float = None,
dep_airport_code: str = None,
department: str = None,
department_id: str = None,
dept_city: str = None,
dept_date: str = None,
dept_station: str = None,
dept_time: str = None,
discount: str = None,
fee_type: str = None,
flight_no: str = None,
index: str = None,
insurance_fee: float = None,
invoice_title: str = None,
itinerary_num: str = None,
itinerary_price: float = None,
most_difference_dept_time: str = None,
most_difference_discount: float = None,
most_difference_flight_no: str = None,
most_difference_price: float = None,
most_difference_reason: str = None,
most_price: float = None,
negotiation_coupon_fee: float = None,
oil_fee: float = None,
order_id: str = None,
over_apply_id: str = None,
primary_id: int = None,
project_code: str = None,
project_name: str = None,
refund_fee: float = None,
refund_upgrade_cost: float = None,
repeat_refund: str = None,
seal_price: float = None,
service_fee: float = None,
settlement_fee: float = None,
settlement_time: str = None,
settlement_type: str = None,
status: int = None,
ticket_id: str = None,
traveler_id: str = None,
traveler_name: str = None,
upgrade_cost: float = None,
booker_job_no: str = None,
traveler_job_no: str = None,
):
# 提前预定天数
self.advance_day = advance_day
# 航司三字码
self.airline_corp_code = airline_corp_code
# 航司名称
self.airline_corp_name = airline_corp_name
# 交易流水号
self.alipay_trade_no = alipay_trade_no
# 审批单号
self.apply_id = apply_id
# 到达机场二字码
self.arr_airport_code = arr_airport_code
# 到达城市
self.arr_city = arr_city
# 到达日期
self.arr_date = arr_date
# 到达机场
self.arr_station = arr_station
# 到达时间
self.arr_time = arr_time
# 预定时间
self.book_time = book_time
# 预订人use id
self.booker_id = booker_id
# 预订人名称
self.booker_name = booker_name
# 商旅优惠金额
self.btrip_coupon_fee = btrip_coupon_fee
# 基建费
self.build_fee = build_fee
# 舱位
self.cabin = cabin
# 舱位码
self.cabin_class = cabin_class
# 资金方向
self.capital_direction = capital_direction
# 级联部门
self.cascade_department = cascade_department
# 改签费用
self.change_fee = change_fee
# 订单金额
self.corp_pay_order_fee = corp_pay_order_fee
# 成本中心名称
self.cost_center = cost_center
# 成本中心编号
self.cost_center_number = cost_center_number
# 优惠券
self.coupon = coupon
# 起飞机场二字码
self.dep_airport_code = dep_airport_code
# 末级部门
self.department = department
# 部门id
self.department_id = department_id
# 起飞城市
self.dept_city = dept_city
# 起飞日期
self.dept_date = dept_date
# 起飞机场
self.dept_station = dept_station
# 起飞时间
self.dept_time = dept_time
# 折扣率
self.discount = discount
# 费用类型
self.fee_type = fee_type
# 航班号
self.flight_no = flight_no
# 序号
self.index = index
# 保险费
self.insurance_fee = insurance_fee
# 发票抬头
self.invoice_title = invoice_title
# 行程单打印序号
self.itinerary_num = itinerary_num
# 行程单金额
self.itinerary_price = itinerary_price
# 低价提醒(起飞时间)
self.most_difference_dept_time = most_difference_dept_time
# 低价提醒(折扣)
self.most_difference_discount = most_difference_discount
# 低价提醒(航班号)
self.most_difference_flight_no = most_difference_flight_no
# 低价提醒(与最低价差额)
self.most_difference_price = most_difference_price
# 不选低价原因
self.most_difference_reason = most_difference_reason
# 低价航班价格
self.most_price = most_price
# 协议价优惠金额
self.negotiation_coupon_fee = negotiation_coupon_fee
# 燃油费
self.oil_fee = oil_fee
# 订单号
self.order_id = order_id
# 超标审批单号
self.over_apply_id = over_apply_id
# 主键id
self.primary_id = primary_id
# 项目代码
self.project_code = project_code
# 项目名称
self.project_name = project_name
# 退款手续费
self.refund_fee = refund_fee
# 改签退票手续费
self.refund_upgrade_cost = refund_upgrade_cost
# 是否重复退
self.repeat_refund = repeat_refund
# 销售价
self.seal_price = seal_price
# 服务费,仅在feeType 11001、11002中展示
self.service_fee = service_fee
# 结算金额
self.settlement_fee = settlement_fee
# 结算时间
self.settlement_time = settlement_time
# 结算类型
self.settlement_type = settlement_type
# 入账状态
self.status = status
# 行程单号
self.ticket_id = ticket_id
# 出行人use id
self.traveler_id = traveler_id
# 出行人名称
self.traveler_name = traveler_name
# 改签差价
self.upgrade_cost = upgrade_cost
# 预订人工号
self.booker_job_no = booker_job_no
# 出行人工号
self.traveler_job_no = traveler_job_no
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.advance_day is not None:
result['advanceDay'] = self.advance_day
if self.airline_corp_code is not None:
result['airlineCorpCode'] = self.airline_corp_code
if self.airline_corp_name is not None:
result['airlineCorpName'] = self.airline_corp_name
if self.alipay_trade_no is not None:
result['alipayTradeNo'] = self.alipay_trade_no
if self.apply_id is not None:
result['applyId'] = self.apply_id
if self.arr_airport_code is not None:
result['arrAirportCode'] = self.arr_airport_code
if self.arr_city is not None:
result['arrCity'] = self.arr_city
if self.arr_date is not None:
result['arrDate'] = self.arr_date
if self.arr_station is not None:
result['arrStation'] = self.arr_station
if self.arr_time is not None:
result['arrTime'] = self.arr_time
if self.book_time is not None:
result['bookTime'] = self.book_time
if self.booker_id is not None:
result['bookerId'] = self.booker_id
if self.booker_name is not None:
result['bookerName'] = self.booker_name
if self.btrip_coupon_fee is not None:
result['btripCouponFee'] = self.btrip_coupon_fee
if self.build_fee is not None:
result['buildFee'] = self.build_fee
if self.cabin is not None:
result['cabin'] = self.cabin
if self.cabin_class is not None:
result['cabinClass'] = self.cabin_class
if self.capital_direction is not None:
result['capitalDirection'] = self.capital_direction
if self.cascade_department is not None:
result['cascadeDepartment'] = self.cascade_department
if self.change_fee is not None:
result['changeFee'] = self.change_fee
if self.corp_pay_order_fee is not None:
result['corpPayOrderFee'] = self.corp_pay_order_fee
if self.cost_center is not None:
result['costCenter'] = self.cost_center
if self.cost_center_number is not None:
result['costCenterNumber'] = self.cost_center_number
if self.coupon is not None:
result['coupon'] = self.coupon
if self.dep_airport_code is not None:
result['depAirportCode'] = self.dep_airport_code
if self.department is not None:
result['department'] = self.department
if self.department_id is not None:
result['departmentId'] = self.department_id
if self.dept_city is not None:
result['deptCity'] = self.dept_city
if self.dept_date is not None:
result['deptDate'] = self.dept_date
if self.dept_station is not None:
result['deptStation'] = self.dept_station
if self.dept_time is not None:
result['deptTime'] = self.dept_time
if self.discount is not None:
result['discount'] = self.discount
if self.fee_type is not None:
result['feeType'] = self.fee_type
if self.flight_no is not None:
result['flightNo'] = self.flight_no
if self.index is not None:
result['index'] = self.index
if self.insurance_fee is not None:
result['insuranceFee'] = self.insurance_fee
if self.invoice_title is not None:
result['invoiceTitle'] = self.invoice_title
if self.itinerary_num is not None:
result['itineraryNum'] = self.itinerary_num
if self.itinerary_price is not None:
result['itineraryPrice'] = self.itinerary_price
if self.most_difference_dept_time is not None:
result['mostDifferenceDeptTime'] = self.most_difference_dept_time
if self.most_difference_discount is not None:
result['mostDifferenceDiscount'] = self.most_difference_discount
if self.most_difference_flight_no is not None:
result['mostDifferenceFlightNo'] = self.most_difference_flight_no
if self.most_difference_price is not None:
result['mostDifferencePrice'] = self.most_difference_price
if self.most_difference_reason is not None:
result['mostDifferenceReason'] = self.most_difference_reason
if self.most_price is not None:
result['mostPrice'] = self.most_price
if self.negotiation_coupon_fee is not None:
result['negotiationCouponFee'] = self.negotiation_coupon_fee
if self.oil_fee is not None:
result['oilFee'] = self.oil_fee
if self.order_id is not None:
result['orderId'] = self.order_id
if self.over_apply_id is not None:
result['overApplyId'] = self.over_apply_id
if self.primary_id is not None:
result['primaryId'] = self.primary_id
if self.project_code is not None:
result['projectCode'] = self.project_code
if self.project_name is not None:
result['projectName'] = self.project_name
if self.refund_fee is not None:
result['refundFee'] = self.refund_fee
if self.refund_upgrade_cost is not None:
result['refundUpgradeCost'] = self.refund_upgrade_cost
if self.repeat_refund is not None:
result['repeatRefund'] = self.repeat_refund
if self.seal_price is not None:
result['sealPrice'] = self.seal_price
if self.service_fee is not None:
result['serviceFee'] = self.service_fee
if self.settlement_fee is not None:
result['settlementFee'] = self.settlement_fee
if self.settlement_time is not None:
result['settlementTime'] = self.settlement_time
if self.settlement_type is not None:
result['settlementType'] = self.settlement_type
if self.status is not None:
result['status'] = self.status
if self.ticket_id is not None:
result['ticketId'] = self.ticket_id
if self.traveler_id is not None:
result['travelerId'] = self.traveler_id
if self.traveler_name is not None:
result['travelerName'] = self.traveler_name
if self.upgrade_cost is not None:
result['upgradeCost'] = self.upgrade_cost
if self.booker_job_no is not None:
result['bookerJobNo'] = self.booker_job_no
if self.traveler_job_no is not None:
result['travelerJobNo'] = self.traveler_job_no
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('advanceDay') is not None:
self.advance_day = m.get('advanceDay')
if m.get('airlineCorpCode') is not None:
self.airline_corp_code = m.get('airlineCorpCode')
if m.get('airlineCorpName') is not None:
self.airline_corp_name = m.get('airlineCorpName')
if m.get('alipayTradeNo') is not None:
self.alipay_trade_no = m.get('alipayTradeNo')
if m.get('applyId') is not None:
self.apply_id = m.get('applyId')
if m.get('arrAirportCode') is not None:
self.arr_airport_code = m.get('arrAirportCode')
if m.get('arrCity') is not None:
self.arr_city = m.get('arrCity')
if m.get('arrDate') is not None:
self.arr_date = m.get('arrDate')
if m.get('arrStation') is not None:
self.arr_station = m.get('arrStation')
if m.get('arrTime') is not None:
self.arr_time = m.get('arrTime')
if m.get('bookTime') is not None:
self.book_time = m.get('bookTime')
if m.get('bookerId') is not None:
self.booker_id = m.get('bookerId')
if m.get('bookerName') is not None:
self.booker_name = m.get('bookerName')
if m.get('btripCouponFee') is not None:
self.btrip_coupon_fee = m.get('btripCouponFee')
if m.get('buildFee') is not None:
self.build_fee = m.get('buildFee')
if m.get('cabin') is not None:
self.cabin = m.get('cabin')
if m.get('cabinClass') is not None:
self.cabin_class = m.get('cabinClass')
if m.get('capitalDirection') is not None:
self.capital_direction = m.get('capitalDirection')
if m.get('cascadeDepartment') is not None:
self.cascade_department = m.get('cascadeDepartment')
if m.get('changeFee') is not None:
self.change_fee = m.get('changeFee')
if m.get('corpPayOrderFee') is not None:
self.corp_pay_order_fee = m.get('corpPayOrderFee')
if m.get('costCenter') is not None:
self.cost_center = m.get('costCenter')
if m.get('costCenterNumber') is not None:
self.cost_center_number = m.get('costCenterNumber')
if m.get('coupon') is not None:
self.coupon = m.get('coupon')
if m.get('depAirportCode') is not None:
self.dep_airport_code = m.get('depAirportCode')
if m.get('department') is not None:
self.department = m.get('department')
if m.get('departmentId') is not None:
self.department_id = m.get('departmentId')
if m.get('deptCity') is not None:
self.dept_city = m.get('deptCity')
if m.get('deptDate') is not None:
self.dept_date = m.get('deptDate')
if m.get('deptStation') is not None:
self.dept_station = m.get('deptStation')
if m.get('deptTime') is not None:
self.dept_time = m.get('deptTime')
if m.get('discount') is not None:
self.discount = m.get('discount')
if m.get('feeType') is not None:
self.fee_type = m.get('feeType')
if m.get('flightNo') is not None:
self.flight_no = m.get('flightNo')
if m.get('index') is not None:
self.index = m.get('index')
if m.get('insuranceFee') is not None:
self.insurance_fee = m.get('insuranceFee')
if m.get('invoiceTitle') is not None:
self.invoice_title = m.get('invoiceTitle')
if m.get('itineraryNum') is not None:
self.itinerary_num = m.get('itineraryNum')
if m.get('itineraryPrice') is not None:
self.itinerary_price = m.get('itineraryPrice')
if m.get('mostDifferenceDeptTime') is not None:
self.most_difference_dept_time = m.get('mostDifferenceDeptTime')
if m.get('mostDifferenceDiscount') is not None:
self.most_difference_discount = m.get('mostDifferenceDiscount')
if m.get('mostDifferenceFlightNo') is not None:
self.most_difference_flight_no = m.get('mostDifferenceFlightNo')
if m.get('mostDifferencePrice') is not None:
self.most_difference_price = m.get('mostDifferencePrice')
if m.get('mostDifferenceReason') is not None:
self.most_difference_reason = m.get('mostDifferenceReason')
if m.get('mostPrice') is not None:
self.most_price = m.get('mostPrice')
if m.get('negotiationCouponFee') is not None:
self.negotiation_coupon_fee = m.get('negotiationCouponFee')
if m.get('oilFee') is not None:
self.oil_fee = m.get('oilFee')
if m.get('orderId') is not None:
self.order_id = m.get('orderId')
if m.get('overApplyId') is not None:
self.over_apply_id = m.get('overApplyId')
if m.get('primaryId') is not None:
self.primary_id = m.get('primaryId')
if m.get('projectCode') is not None:
self.project_code = m.get('projectCode')
if m.get('projectName') is not None:
self.project_name = m.get('projectName')
if m.get('refundFee') is not None:
self.refund_fee = m.get('refundFee')
if m.get('refundUpgradeCost') is not None:
self.refund_upgrade_cost = m.get('refundUpgradeCost')
if m.get('repeatRefund') is not None:
self.repeat_refund = m.get('repeatRefund')
if m.get('sealPrice') is not None:
self.seal_price = m.get('sealPrice')
if m.get('serviceFee') is not None:
self.service_fee = m.get('serviceFee')
if m.get('settlementFee') is not None:
self.settlement_fee = m.get('settlementFee')
if m.get('settlementTime') is not None:
self.settlement_time = m.get('settlementTime')
if m.get('settlementType') is not None:
self.settlement_type = m.get('settlementType')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('ticketId') is not None:
self.ticket_id = m.get('ticketId')
if m.get('travelerId') is not None:
self.traveler_id = m.get('travelerId')
if m.get('travelerName') is not None:
self.traveler_name = m.get('travelerName')
if m.get('upgradeCost') is not None:
self.upgrade_cost = m.get('upgradeCost')
if m.get('bookerJobNo') is not None:
self.booker_job_no = m.get('bookerJobNo')
if m.get('travelerJobNo') is not None:
self.traveler_job_no = m.get('travelerJobNo')
return self
class BillSettementFlightResponseBodyModule(TeaModel):
def __init__(
self,
category: int = None,
corp_id: str = None,
data_list: List[BillSettementFlightResponseBodyModuleDataList] = None,
period_end: str = None,
period_start: str = None,
total_num: int = None,
):
# 类目
self.category = category
# 企业id
self.corp_id = corp_id
# 数据集合
self.data_list = data_list
# 记账更新开始日期
self.period_end = period_end
# 记账更新结束日期
self.period_start = period_start
# 总数据量
self.total_num = total_num
def validate(self):
if self.data_list:
for k in self.data_list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.category is not None:
result['category'] = self.category
if self.corp_id is not None:
result['corpId'] = self.corp_id
result['dataList'] = []
if self.data_list is not None:
for k in self.data_list:
result['dataList'].append(k.to_map() if k else None)
if self.period_end is not None:
result['periodEnd'] = self.period_end
if self.period_start is not None:
result['periodStart'] = self.period_start
if self.total_num is not None:
result['totalNum'] = self.total_num
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('category') is not None:
self.category = m.get('category')
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
self.data_list = []
if m.get('dataList') is not None:
for k in m.get('dataList'):
temp_model = BillSettementFlightResponseBodyModuleDataList()
self.data_list.append(temp_model.from_map(k))
if m.get('periodEnd') is not None:
self.period_end = m.get('periodEnd')
if m.get('periodStart') is not None:
self.period_start = m.get('periodStart')
if m.get('totalNum') is not None:
self.total_num = m.get('totalNum')
return self
class BillSettementFlightResponseBody(TeaModel):
def __init__(
self,
result_msg: str = None,
module: BillSettementFlightResponseBodyModule = None,
success: bool = None,
result_code: int = None,
):
# 结果msg
self.result_msg = result_msg
# module
self.module = module
# 是否成功
self.success = success
# 结果code
self.result_code = result_code
def validate(self):
if self.module:
self.module.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.result_msg is not None:
result['resultMsg'] = self.result_msg
if self.module is not None:
result['module'] = self.module.to_map()
if self.success is not None:
result['success'] = self.success
if self.result_code is not None:
result['resultCode'] = self.result_code
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('resultMsg') is not None:
self.result_msg = m.get('resultMsg')
if m.get('module') is not None:
temp_model = BillSettementFlightResponseBodyModule()
self.module = temp_model.from_map(m['module'])
if m.get('success') is not None:
self.success = m.get('success')
if m.get('resultCode') is not None:
self.result_code = m.get('resultCode')
return self
class BillSettementFlightResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: BillSettementFlightResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = BillSettementFlightResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetHotelExceedApplyHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetHotelExceedApplyRequest(TeaModel):
def __init__(
self,
corp_id: str = None,
apply_id: str = None,
):
# 第三方企业id
self.corp_id = corp_id
# 商旅超标审批单id
self.apply_id = apply_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.apply_id is not None:
result['applyId'] = self.apply_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('applyId') is not None:
self.apply_id = m.get('applyId')
return self
class GetHotelExceedApplyResponseBodyApplyIntentionInfoDO(TeaModel):
def __init__(
self,
check_in: str = None,
check_out: str = None,
city_code: str = None,
city_name: str = None,
price: int = None,
together: bool = None,
type: int = None,
):
# 入住日期
self.check_in = check_in
# 离店日期
self.check_out = check_out
# 入住城市三字码
self.city_code = city_code
# 入住城市名称
self.city_name = city_name
# 意向酒店金额(分)
self.price = price
# 是否合住
self.together = together
# 超标类型,32:金额超标
self.type = type
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.check_in is not None:
result['checkIn'] = self.check_in
if self.check_out is not None:
result['checkOut'] = self.check_out
if self.city_code is not None:
result['cityCode'] = self.city_code
if self.city_name is not None:
result['cityName'] = self.city_name
if self.price is not None:
result['price'] = self.price
if self.together is not None:
result['together'] = self.together
if self.type is not None:
result['type'] = self.type
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('checkIn') is not None:
self.check_in = m.get('checkIn')
if m.get('checkOut') is not None:
self.check_out = m.get('checkOut')
if m.get('cityCode') is not None:
self.city_code = m.get('cityCode')
if m.get('cityName') is not None:
self.city_name = m.get('cityName')
if m.get('price') is not None:
self.price = m.get('price')
if m.get('together') is not None:
self.together = m.get('together')
if m.get('type') is not None:
self.type = m.get('type')
return self
class GetHotelExceedApplyResponseBody(TeaModel):
def __init__(
self,
corp_id: str = None,
apply_id: int = None,
status: int = None,
btrip_cause: str = None,
exceed_type: int = None,
exceed_reason: str = None,
origin_standard: str = None,
submit_time: str = None,
user_id: str = None,
apply_intention_info_do: GetHotelExceedApplyResponseBodyApplyIntentionInfoDO = None,
thirdpart_apply_id: str = None,
):
# 第三方企业id
self.corp_id = corp_id
# 商旅超标审批单id
self.apply_id = apply_id
# 审批单状态 0:审批中 1:已同意 2:已拒绝
self.status = status
# 出差原因
self.btrip_cause = btrip_cause
# 超标类型,32:金额超标
self.exceed_type = exceed_type
# 超标原因
self.exceed_reason = exceed_reason
# 原差旅标准
self.origin_standard = origin_standard
# 审批单提交时间
self.submit_time = submit_time
# 第三方用户id
self.user_id = user_id
# 意向出行信息
self.apply_intention_info_do = apply_intention_info_do
# 第三方出差审批单号
self.thirdpart_apply_id = thirdpart_apply_id
def validate(self):
if self.apply_intention_info_do:
self.apply_intention_info_do.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.apply_id is not None:
result['applyId'] = self.apply_id
if self.status is not None:
result['status'] = self.status
if self.btrip_cause is not None:
result['btripCause'] = self.btrip_cause
if self.exceed_type is not None:
result['exceedType'] = self.exceed_type
if self.exceed_reason is not None:
result['exceedReason'] = self.exceed_reason
if self.origin_standard is not None:
result['originStandard'] = self.origin_standard
if self.submit_time is not None:
result['submitTime'] = self.submit_time
if self.user_id is not None:
result['userId'] = self.user_id
if self.apply_intention_info_do is not None:
result['applyIntentionInfoDO'] = self.apply_intention_info_do.to_map()
if self.thirdpart_apply_id is not None:
result['thirdpartApplyId'] = self.thirdpart_apply_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('applyId') is not None:
self.apply_id = m.get('applyId')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('btripCause') is not None:
self.btrip_cause = m.get('btripCause')
if m.get('exceedType') is not None:
self.exceed_type = m.get('exceedType')
if m.get('exceedReason') is not None:
self.exceed_reason = m.get('exceedReason')
if m.get('originStandard') is not None:
self.origin_standard = m.get('originStandard')
if m.get('submitTime') is not None:
self.submit_time = m.get('submitTime')
if m.get('userId') is not None:
self.user_id = m.get('userId')
if m.get('applyIntentionInfoDO') is not None:
temp_model = GetHotelExceedApplyResponseBodyApplyIntentionInfoDO()
self.apply_intention_info_do = temp_model.from_map(m['applyIntentionInfoDO'])
if m.get('thirdpartApplyId') is not None:
self.thirdpart_apply_id = m.get('thirdpartApplyId')
return self
class GetHotelExceedApplyResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetHotelExceedApplyResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetHotelExceedApplyResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class QueryUnionOrderHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class QueryUnionOrderRequest(TeaModel):
def __init__(
self,
corp_id: str = None,
third_part_apply_id: str = None,
union_no: str = None,
):
# 第三方企业id
self.corp_id = corp_id
# 第三方申请单id
self.third_part_apply_id = third_part_apply_id
# 关联单号
self.union_no = union_no
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.third_part_apply_id is not None:
result['thirdPartApplyId'] = self.third_part_apply_id
if self.union_no is not None:
result['unionNo'] = self.union_no
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('thirdPartApplyId') is not None:
self.third_part_apply_id = m.get('thirdPartApplyId')
if m.get('unionNo') is not None:
self.union_no = m.get('unionNo')
return self
class QueryUnionOrderResponseBodyFlightList(TeaModel):
def __init__(
self,
flight_order_id: int = None,
flight_order_status: int = None,
):
# 订单id
self.flight_order_id = flight_order_id
# 订单状态:0待支付,1出票中,2已关闭,3有改签单,4有退票单,5出票成功,6退票申请中,7改签申请中
self.flight_order_status = flight_order_status
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.flight_order_id is not None:
result['flightOrderId'] = self.flight_order_id
if self.flight_order_status is not None:
result['flightOrderStatus'] = self.flight_order_status
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('flightOrderId') is not None:
self.flight_order_id = m.get('flightOrderId')
if m.get('flightOrderStatus') is not None:
self.flight_order_status = m.get('flightOrderStatus')
return self
class QueryUnionOrderResponseBodyTrainList(TeaModel):
def __init__(
self,
train_order_id: int = None,
train_orderstatus: int = None,
):
# 火车订单号
self.train_order_id = train_order_id
# 订单状态:0待支付,1出票中,2已关闭,3,改签成功,4退票成功,5出票完成,6退票申请中,7改签申请中,8已出票,已发货,9出票失败,10改签失败,11退票失败
self.train_orderstatus = train_orderstatus
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.train_order_id is not None:
result['trainOrderId'] = self.train_order_id
if self.train_orderstatus is not None:
result['trainOrderstatus'] = self.train_orderstatus
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('trainOrderId') is not None:
self.train_order_id = m.get('trainOrderId')
if m.get('trainOrderstatus') is not None:
self.train_orderstatus = m.get('trainOrderstatus')
return self
class QueryUnionOrderResponseBodyHotelList(TeaModel):
def __init__(
self,
hotel_order_id: int = None,
hotel_order_status: int = None,
):
# 酒店订单号
self.hotel_order_id = hotel_order_id
# 订单状态1:等待确认,2:等待付款,3:预订成功,4:申请退款,5:退款成功,6:已关闭,7:结账成功,8:支付成功
self.hotel_order_status = hotel_order_status
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.hotel_order_id is not None:
result['hotelOrderId'] = self.hotel_order_id
if self.hotel_order_status is not None:
result['hotelOrderStatus'] = self.hotel_order_status
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('hotelOrderId') is not None:
self.hotel_order_id = m.get('hotelOrderId')
if m.get('hotelOrderStatus') is not None:
self.hotel_order_status = m.get('hotelOrderStatus')
return self
class QueryUnionOrderResponseBodyVehicleList(TeaModel):
def __init__(
self,
vehicle_order_id: int = None,
vehicle_order_status: int = None,
):
# 用车订单号
self.vehicle_order_id = vehicle_order_id
# 订单状态:0:初始状态,1:已超时,2:派单成功,3:派单失败,4:已退款,5:已支付,6:已取消
self.vehicle_order_status = vehicle_order_status
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.vehicle_order_id is not None:
result['vehicleOrderId'] = self.vehicle_order_id
if self.vehicle_order_status is not None:
result['vehicleOrderStatus'] = self.vehicle_order_status
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('vehicleOrderId') is not None:
self.vehicle_order_id = m.get('vehicleOrderId')
if m.get('vehicleOrderStatus') is not None:
self.vehicle_order_status = m.get('vehicleOrderStatus')
return self
class QueryUnionOrderResponseBody(TeaModel):
def __init__(
self,
flight_list: List[QueryUnionOrderResponseBodyFlightList] = None,
corp_id: str = None,
train_list: List[QueryUnionOrderResponseBodyTrainList] = None,
hotel_list: List[QueryUnionOrderResponseBodyHotelList] = None,
vehicle_list: List[QueryUnionOrderResponseBodyVehicleList] = None,
):
# 飞机订单信息
self.flight_list = flight_list
# 企业id
self.corp_id = corp_id
# 火车订单信息
self.train_list = train_list
# 酒店订单信息
self.hotel_list = hotel_list
# 用车订单信息
self.vehicle_list = vehicle_list
def validate(self):
if self.flight_list:
for k in self.flight_list:
if k:
k.validate()
if self.train_list:
for k in self.train_list:
if k:
k.validate()
if self.hotel_list:
for k in self.hotel_list:
if k:
k.validate()
if self.vehicle_list:
for k in self.vehicle_list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
result['flightList'] = []
if self.flight_list is not None:
for k in self.flight_list:
result['flightList'].append(k.to_map() if k else None)
if self.corp_id is not None:
result['corpId'] = self.corp_id
result['trainList'] = []
if self.train_list is not None:
for k in self.train_list:
result['trainList'].append(k.to_map() if k else None)
result['hotelList'] = []
if self.hotel_list is not None:
for k in self.hotel_list:
result['hotelList'].append(k.to_map() if k else None)
result['vehicleList'] = []
if self.vehicle_list is not None:
for k in self.vehicle_list:
result['vehicleList'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
self.flight_list = []
if m.get('flightList') is not None:
for k in m.get('flightList'):
temp_model = QueryUnionOrderResponseBodyFlightList()
self.flight_list.append(temp_model.from_map(k))
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
self.train_list = []
if m.get('trainList') is not None:
for k in m.get('trainList'):
temp_model = QueryUnionOrderResponseBodyTrainList()
self.train_list.append(temp_model.from_map(k))
self.hotel_list = []
if m.get('hotelList') is not None:
for k in m.get('hotelList'):
temp_model = QueryUnionOrderResponseBodyHotelList()
self.hotel_list.append(temp_model.from_map(k))
self.vehicle_list = []
if m.get('vehicleList') is not None:
for k in m.get('vehicleList'):
temp_model = QueryUnionOrderResponseBodyVehicleList()
self.vehicle_list.append(temp_model.from_map(k))
return self
class QueryUnionOrderResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: QueryUnionOrderResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = QueryUnionOrderResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class QueryCityCarApplyHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class QueryCityCarApplyRequest(TeaModel):
def __init__(
self,
corp_id: str = None,
created_end_at: str = None,
created_start_at: str = None,
page_number: int = None,
page_size: int = None,
third_part_apply_id: str = None,
user_id: str = None,
):
# 第三方企业ID
self.corp_id = corp_id
# 审批单创建时间小于值
self.created_end_at = created_end_at
# 审批单创建时间大于等于值
self.created_start_at = created_start_at
# 页码,要求大于等于1,默认1
self.page_number = page_number
# 每页数据量,要求大于等于1,默认20
self.page_size = page_size
# 三方审批单ID
self.third_part_apply_id = third_part_apply_id
# 第三方员工ID
self.user_id = user_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.created_end_at is not None:
result['createdEndAt'] = self.created_end_at
if self.created_start_at is not None:
result['createdStartAt'] = self.created_start_at
if self.page_number is not None:
result['pageNumber'] = self.page_number
if self.page_size is not None:
result['pageSize'] = self.page_size
if self.third_part_apply_id is not None:
result['thirdPartApplyId'] = self.third_part_apply_id
if self.user_id is not None:
result['userId'] = self.user_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('createdEndAt') is not None:
self.created_end_at = m.get('createdEndAt')
if m.get('createdStartAt') is not None:
self.created_start_at = m.get('createdStartAt')
if m.get('pageNumber') is not None:
self.page_number = m.get('pageNumber')
if m.get('pageSize') is not None:
self.page_size = m.get('pageSize')
if m.get('thirdPartApplyId') is not None:
self.third_part_apply_id = m.get('thirdPartApplyId')
if m.get('userId') is not None:
self.user_id = m.get('userId')
return self
class QueryCityCarApplyResponseBodyApplyListApproverList(TeaModel):
def __init__(
self,
note: str = None,
operate_time: str = None,
order: int = None,
status: int = None,
status_desc: str = None,
user_id: str = None,
user_name: str = None,
):
# 审批备注
self.note = note
# 审批时间
self.operate_time = operate_time
# 审批人排序值
self.order = order
# 审批状态枚举:审批状态:0-审批中,1-已同意,2-已拒绝
self.status = status
# 审批状态描述
self.status_desc = status_desc
# 审批员工ID
self.user_id = user_id
# 审批员工名
self.user_name = user_name
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.note is not None:
result['note'] = self.note
if self.operate_time is not None:
result['operateTime'] = self.operate_time
if self.order is not None:
result['order'] = self.order
if self.status is not None:
result['status'] = self.status
if self.status_desc is not None:
result['statusDesc'] = self.status_desc
if self.user_id is not None:
result['userId'] = self.user_id
if self.user_name is not None:
result['userName'] = self.user_name
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('note') is not None:
self.note = m.get('note')
if m.get('operateTime') is not None:
self.operate_time = m.get('operateTime')
if m.get('order') is not None:
self.order = m.get('order')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('statusDesc') is not None:
self.status_desc = m.get('statusDesc')
if m.get('userId') is not None:
self.user_id = m.get('userId')
if m.get('userName') is not None:
self.user_name = m.get('userName')
return self
class QueryCityCarApplyResponseBodyApplyListItineraryList(TeaModel):
def __init__(
self,
arr_city: str = None,
arr_city_code: str = None,
arr_date: str = None,
cost_center_id: int = None,
cost_center_name: str = None,
dep_city: str = None,
dep_city_code: str = None,
dep_date: str = None,
invoice_id: int = None,
invoice_name: str = None,
itinerary_id: str = None,
project_code: str = None,
project_title: str = None,
traffic_type: int = None,
):
# 目的地城市
self.arr_city = arr_city
# 目的地城市三字码
self.arr_city_code = arr_city_code
# 到达目的地城市时间
self.arr_date = arr_date
# 商旅内部成本中心ID
self.cost_center_id = cost_center_id
# 成本中心名称
self.cost_center_name = cost_center_name
# 出发城市
self.dep_city = dep_city
# 出发城市三字码
self.dep_city_code = dep_city_code
# 出发时间
self.dep_date = dep_date
# 商旅内部发票抬头ID
self.invoice_id = invoice_id
# 发票抬头名称
self.invoice_name = invoice_name
# 商旅内部行程单ID
self.itinerary_id = itinerary_id
# 项目code
self.project_code = project_code
# 项目名称
self.project_title = project_title
# 交通方式:4-市内交通
self.traffic_type = traffic_type
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.arr_city is not None:
result['arrCity'] = self.arr_city
if self.arr_city_code is not None:
result['arrCityCode'] = self.arr_city_code
if self.arr_date is not None:
result['arrDate'] = self.arr_date
if self.cost_center_id is not None:
result['costCenterId'] = self.cost_center_id
if self.cost_center_name is not None:
result['costCenterName'] = self.cost_center_name
if self.dep_city is not None:
result['depCity'] = self.dep_city
if self.dep_city_code is not None:
result['depCityCode'] = self.dep_city_code
if self.dep_date is not None:
result['depDate'] = self.dep_date
if self.invoice_id is not None:
result['invoiceId'] = self.invoice_id
if self.invoice_name is not None:
result['invoiceName'] = self.invoice_name
if self.itinerary_id is not None:
result['itineraryId'] = self.itinerary_id
if self.project_code is not None:
result['projectCode'] = self.project_code
if self.project_title is not None:
result['projectTitle'] = self.project_title
if self.traffic_type is not None:
result['trafficType'] = self.traffic_type
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('arrCity') is not None:
self.arr_city = m.get('arrCity')
if m.get('arrCityCode') is not None:
self.arr_city_code = m.get('arrCityCode')
if m.get('arrDate') is not None:
self.arr_date = m.get('arrDate')
if m.get('costCenterId') is not None:
self.cost_center_id = m.get('costCenterId')
if m.get('costCenterName') is not None:
self.cost_center_name = m.get('costCenterName')
if m.get('depCity') is not None:
self.dep_city = m.get('depCity')
if m.get('depCityCode') is not None:
self.dep_city_code = m.get('depCityCode')
if m.get('depDate') is not None:
self.dep_date = m.get('depDate')
if m.get('invoiceId') is not None:
self.invoice_id = m.get('invoiceId')
if m.get('invoiceName') is not None:
self.invoice_name = m.get('invoiceName')
if m.get('itineraryId') is not None:
self.itinerary_id = m.get('itineraryId')
if m.get('projectCode') is not None:
self.project_code = m.get('projectCode')
if m.get('projectTitle') is not None:
self.project_title = m.get('projectTitle')
if m.get('trafficType') is not None:
self.traffic_type = m.get('trafficType')
return self
class QueryCityCarApplyResponseBodyApplyList(TeaModel):
def __init__(
self,
approver_list: List[QueryCityCarApplyResponseBodyApplyListApproverList] = None,
depart_id: str = None,
depart_name: str = None,
gmt_create: str = None,
gmt_modified: str = None,
itinerary_list: List[QueryCityCarApplyResponseBodyApplyListItineraryList] = None,
status: int = None,
status_desc: str = None,
third_part_apply_id: str = None,
trip_cause: str = None,
trip_title: str = None,
user_id: str = None,
user_name: str = None,
):
# 审批单列表
self.approver_list = approver_list
# 员工所在部门ID
self.depart_id = depart_id
# 员工所在部门名
self.depart_name = depart_name
# 创建时间
self.gmt_create = gmt_create
# 最近修改时间
self.gmt_modified = gmt_modified
# 审批单关联的行程
self.itinerary_list = itinerary_list
# 审批单状态:0-申请,1-同意,2-拒绝
self.status = status
# 审批单状态:0-申请,1-同意,2-拒绝
self.status_desc = status_desc
# 三方审批单ID
self.third_part_apply_id = third_part_apply_id
# 申请事由
self.trip_cause = trip_cause
# 审批单标题
self.trip_title = trip_title
# 发起审批员工ID
self.user_id = user_id
# 发起审批员工名
self.user_name = user_name
def validate(self):
if self.approver_list:
for k in self.approver_list:
if k:
k.validate()
if self.itinerary_list:
for k in self.itinerary_list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
result['approverList'] = []
if self.approver_list is not None:
for k in self.approver_list:
result['approverList'].append(k.to_map() if k else None)
if self.depart_id is not None:
result['departId'] = self.depart_id
if self.depart_name is not None:
result['departName'] = self.depart_name
if self.gmt_create is not None:
result['gmtCreate'] = self.gmt_create
if self.gmt_modified is not None:
result['gmtModified'] = self.gmt_modified
result['itineraryList'] = []
if self.itinerary_list is not None:
for k in self.itinerary_list:
result['itineraryList'].append(k.to_map() if k else None)
if self.status is not None:
result['status'] = self.status
if self.status_desc is not None:
result['statusDesc'] = self.status_desc
if self.third_part_apply_id is not None:
result['thirdPartApplyId'] = self.third_part_apply_id
if self.trip_cause is not None:
result['tripCause'] = self.trip_cause
if self.trip_title is not None:
result['tripTitle'] = self.trip_title
if self.user_id is not None:
result['userId'] = self.user_id
if self.user_name is not None:
result['userName'] = self.user_name
return result
def from_map(self, m: dict = None):
m = m or dict()
self.approver_list = []
if m.get('approverList') is not None:
for k in m.get('approverList'):
temp_model = QueryCityCarApplyResponseBodyApplyListApproverList()
self.approver_list.append(temp_model.from_map(k))
if m.get('departId') is not None:
self.depart_id = m.get('departId')
if m.get('departName') is not None:
self.depart_name = m.get('departName')
if m.get('gmtCreate') is not None:
self.gmt_create = m.get('gmtCreate')
if m.get('gmtModified') is not None:
self.gmt_modified = m.get('gmtModified')
self.itinerary_list = []
if m.get('itineraryList') is not None:
for k in m.get('itineraryList'):
temp_model = QueryCityCarApplyResponseBodyApplyListItineraryList()
self.itinerary_list.append(temp_model.from_map(k))
if m.get('status') is not None:
self.status = m.get('status')
if m.get('statusDesc') is not None:
self.status_desc = m.get('statusDesc')
if m.get('thirdPartApplyId') is not None:
self.third_part_apply_id = m.get('thirdPartApplyId')
if m.get('tripCause') is not None:
self.trip_cause = m.get('tripCause')
if m.get('tripTitle') is not None:
self.trip_title = m.get('tripTitle')
if m.get('userId') is not None:
self.user_id = m.get('userId')
if m.get('userName') is not None:
self.user_name = m.get('userName')
return self
class QueryCityCarApplyResponseBody(TeaModel):
def __init__(
self,
apply_list: List[QueryCityCarApplyResponseBodyApplyList] = None,
total: int = None,
):
# 审批单列表
self.apply_list = apply_list
# 总数
self.total = total
def validate(self):
if self.apply_list:
for k in self.apply_list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
result['applyList'] = []
if self.apply_list is not None:
for k in self.apply_list:
result['applyList'].append(k.to_map() if k else None)
if self.total is not None:
result['total'] = self.total
return result
def from_map(self, m: dict = None):
m = m or dict()
self.apply_list = []
if m.get('applyList') is not None:
for k in m.get('applyList'):
temp_model = QueryCityCarApplyResponseBodyApplyList()
self.apply_list.append(temp_model.from_map(k))
if m.get('total') is not None:
self.total = m.get('total')
return self
class QueryCityCarApplyResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: QueryCityCarApplyResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = QueryCityCarApplyResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetTrainExceedApplyHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetTrainExceedApplyRequest(TeaModel):
def __init__(
self,
corp_id: str = None,
apply_id: str = None,
):
# 第三方企业id
self.corp_id = corp_id
# 商旅超标审批单id
self.apply_id = apply_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.apply_id is not None:
result['applyId'] = self.apply_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('applyId') is not None:
self.apply_id = m.get('applyId')
return self
class GetTrainExceedApplyResponseBodyApplyIntentionInfoDO(TeaModel):
def __init__(
self,
price: int = None,
dep_city_name: str = None,
arr_city_name: str = None,
dep_city: str = None,
arr_city: str = None,
dep_time: str = None,
arr_time: str = None,
arr_station: str = None,
dep_station: str = None,
train_no: str = None,
train_type_desc: str = None,
seat_name: str = None,
):
# 意向坐席价格(分)
self.price = price
# 出发城市名
self.dep_city_name = dep_city_name
# 到达城市名
self.arr_city_name = arr_city_name
# 出发城市三字码
self.dep_city = dep_city
# 到达城市三字码
self.arr_city = arr_city
# 出发时间
self.dep_time = dep_time
# 到达时间
self.arr_time = arr_time
# 到达站点名称
self.arr_station = arr_station
# 出发站点名称
self.dep_station = dep_station
# 意向车次号
self.train_no = train_no
# 意向车次类型
self.train_type_desc = train_type_desc
# 意向坐席名称
self.seat_name = seat_name
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.price is not None:
result['price'] = self.price
if self.dep_city_name is not None:
result['depCityName'] = self.dep_city_name
if self.arr_city_name is not None:
result['arrCityName'] = self.arr_city_name
if self.dep_city is not None:
result['depCity'] = self.dep_city
if self.arr_city is not None:
result['arrCity'] = self.arr_city
if self.dep_time is not None:
result['depTime'] = self.dep_time
if self.arr_time is not None:
result['arrTime'] = self.arr_time
if self.arr_station is not None:
result['arrStation'] = self.arr_station
if self.dep_station is not None:
result['depStation'] = self.dep_station
if self.train_no is not None:
result['trainNo'] = self.train_no
if self.train_type_desc is not None:
result['trainTypeDesc'] = self.train_type_desc
if self.seat_name is not None:
result['seatName'] = self.seat_name
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('price') is not None:
self.price = m.get('price')
if m.get('depCityName') is not None:
self.dep_city_name = m.get('depCityName')
if m.get('arrCityName') is not None:
self.arr_city_name = m.get('arrCityName')
if m.get('depCity') is not None:
self.dep_city = m.get('depCity')
if m.get('arrCity') is not None:
self.arr_city = m.get('arrCity')
if m.get('depTime') is not None:
self.dep_time = m.get('depTime')
if m.get('arrTime') is not None:
self.arr_time = m.get('arrTime')
if m.get('arrStation') is not None:
self.arr_station = m.get('arrStation')
if m.get('depStation') is not None:
self.dep_station = m.get('depStation')
if m.get('trainNo') is not None:
self.train_no = m.get('trainNo')
if m.get('trainTypeDesc') is not None:
self.train_type_desc = m.get('trainTypeDesc')
if m.get('seatName') is not None:
self.seat_name = m.get('seatName')
return self
class GetTrainExceedApplyResponseBody(TeaModel):
def __init__(
self,
corp_id: str = None,
apply_id: int = None,
status: int = None,
btrip_cause: str = None,
exceed_type: int = None,
exceed_reason: str = None,
origin_standard: str = None,
submit_time: str = None,
user_id: str = None,
apply_intention_info_do: GetTrainExceedApplyResponseBodyApplyIntentionInfoDO = None,
thirdpart_apply_id: str = None,
):
# 第三方企业id
self.corp_id = corp_id
# 商旅超标审批单id
self.apply_id = apply_id
# 审批单状态 0:审批中 1:已同意 2:已拒绝
self.status = status
# 出差原因
self.btrip_cause = btrip_cause
# 超标类型,32:坐席超标
self.exceed_type = exceed_type
# 超标原因
self.exceed_reason = exceed_reason
# 原差旅标准
self.origin_standard = origin_standard
# 审批单提交时间
self.submit_time = submit_time
# 第三方用户id
self.user_id = user_id
# 意向出行信息
self.apply_intention_info_do = apply_intention_info_do
# 第三方出差审批单号
self.thirdpart_apply_id = thirdpart_apply_id
def validate(self):
if self.apply_intention_info_do:
self.apply_intention_info_do.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.apply_id is not None:
result['applyId'] = self.apply_id
if self.status is not None:
result['status'] = self.status
if self.btrip_cause is not None:
result['btripCause'] = self.btrip_cause
if self.exceed_type is not None:
result['exceedType'] = self.exceed_type
if self.exceed_reason is not None:
result['exceedReason'] = self.exceed_reason
if self.origin_standard is not None:
result['originStandard'] = self.origin_standard
if self.submit_time is not None:
result['submitTime'] = self.submit_time
if self.user_id is not None:
result['userId'] = self.user_id
if self.apply_intention_info_do is not None:
result['applyIntentionInfoDO'] = self.apply_intention_info_do.to_map()
if self.thirdpart_apply_id is not None:
result['thirdpartApplyId'] = self.thirdpart_apply_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('applyId') is not None:
self.apply_id = m.get('applyId')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('btripCause') is not None:
self.btrip_cause = m.get('btripCause')
if m.get('exceedType') is not None:
self.exceed_type = m.get('exceedType')
if m.get('exceedReason') is not None:
self.exceed_reason = m.get('exceedReason')
if m.get('originStandard') is not None:
self.origin_standard = m.get('originStandard')
if m.get('submitTime') is not None:
self.submit_time = m.get('submitTime')
if m.get('userId') is not None:
self.user_id = m.get('userId')
if m.get('applyIntentionInfoDO') is not None:
temp_model = GetTrainExceedApplyResponseBodyApplyIntentionInfoDO()
self.apply_intention_info_do = temp_model.from_map(m['applyIntentionInfoDO'])
if m.get('thirdpartApplyId') is not None:
self.thirdpart_apply_id = m.get('thirdpartApplyId')
return self
class GetTrainExceedApplyResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetTrainExceedApplyResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetTrainExceedApplyResponseBody()
self.body = temp_model.from_map(m['body'])
return self
| zh | 0.97566 | # -*- coding: utf-8 -*- # This file is auto-generated, don't edit it. Thanks. # 第三方企业ID # 审批时间 # 审批备注 # 审批结果:1-同意,2-拒绝 # 第三方审批单ID # 审批的第三方员工ID # suiteKey # account # tokenGrantType # 审批结果 # 第三方企业 # 类目:机酒火车 1:机票; 2:酒店; 4:用车 6:商旅火车票 # 每页数据量,默认100,最高500 # 记账更新开始日期 # 页数,从1开始 # 记账更新结束日期 # 交易流水号 # 审批单号 # 预定时间 # 预定人use id # 预订人名称 # 资金方向 # 级联部门 # 入住时间 # 离店时间 # 入住城市 # 城市编码 # 企业退款金额 # 企业支付金额 # 成本中心名称 # 成本中心编码 # 末级部门 # 部门id # 费用类型 # 杂费 # 福豆支付 # 酒店名称 # 序号 # 发票抬头 # 是否协议价 # 是否合住 # 入住天数 # 订单号 # 订单金额 # 订单类型 # 超标审批单号 # 个人退款金额 # 个人支付金额 # 主键id # 项目编码 # 项目名称 # 优惠券 # 房间数 # 房价 # 房间类型 # 服务费,仅在 feeType 20111、20112中展示 # 结算金额 # 结算时间 # 结算类型 # 入账状态 # 总间夜数 # 出行人use id # 出行人名称 # 预订人工号 # 出行人工号 # 类目 # 企业id # 数据集合 # 记账更新结束日期 # 记账更新开始日期 # 总数据量 # 结果msg # module # 是否成功 # 结果code # 第三方企业id # 商旅超标审批单id # 到达城市三字码 # 到达城市名称 # 到达时间 # 超标的舱位,F:头等舱 C:商务舱 Y:经济舱 P:超值经济舱 # 申请超标的舱等 0:头等舱 1:商务舱 2:经济舱 3:超值经济舱 # 舱等描述,头等舱,商务舱,经济舱,超值经济舱 # 出发城市三字码 # 出发城市名称 # 出发时间 # 折扣 # 航班号 # 意向航班价格(元) # 超标类型,1:折扣 2,8,10:时间 3,9,11:折扣和时间 # 第三方企业id # 商旅超标审批单id # 审批单状态 0:审批中 1:已同意 2:已拒绝 # 出差原因 # 超标类型,1:折扣 2,8,10:时间 3,9,11:折扣和时间 # 超标原因 # 原差旅标准 # 审批单提交时间 # 第三方用户id # 意向出行信息 # 第三方出差审批单号 # 支付交易流水号 # 审批单号 # 到达城市 # 到达日期 # 到达地 # 到达时间 # 预定时间 # 预定人use id # 预订人名称 # 用车事由 # 资金方向 # 车型 # 级联部门 # 成本中心名称 # 成本中心编号 # 优惠券 # 优惠金额 # 末级部门 # 部门id # 出发城市 # 出发日期 # 出发地 # 出发时间 # 预估行驶距离 # 预估金额 # 费用类型 # 序号 # 发票抬头 # 用车事由 # 订单id # 订单金额 # 超标审批单号 # 个人支付金额 # 项目编码 # 项目名称 # 供应商 # 实际行驶距离 # 实际上车点 # 实际下车点 # 服务费,仅在feeType 40111 中展示 # 结算金额 # 结算时间 # 结算类型 # 特别关注订单 # 特别关注原因 # 入账状态 # 出行人use id # 出行人名称 # 员工是否认可 # 预订人工号 # 出行人工号 # 类目 # 企业id # 数据集合 # 记账更新开始日期 # 记账更新结束日期 # 总数量 # 结果msg # module # 是否成功 # 结果code # 交易流水号 # 审批单号 # 到达日期 # 到达站点 # 到达时间 # 预定时间 # 预定人use id # 预订人名称 # 资金方向 # 级联部门 # 改签手续费 # 成本中心名称 # 成本中心编码 # 折扣率 # 末级部门 # 部门id # 出发日期 # 出发站 # 出发时间 # 费用类型 # 序号 # 发票抬头 # 订单号 # 订单金额 # 超标审批单号 # 主键id # 项目编号 # 项目名称 # 退款手续费 # 运行时长 # 座位号 # 坐席 # 服务费,仅在feeType 6007、6008中展示 # 结算金额 # 结算时间 # 结算类型 # 入账状态 # 票面票号 # 票价 # 车次号 # 车次类型 # 出行人useId # 出行人名称 # 预订人工号 # 出行人工号 # 发票类型 # 类目 # 企业id # 数据集合 # 记账更新开始时间 # 记账更新结束时间 # 总数据量 # 结果msg # module # 是否成功 # 结果code # 审批意见 # 商旅超标审批单id # 企业id # 第三方流程实例id # 用户id # 审批单状态 1同意2拒绝 # 是否同步成功 # 出差事由 # 用车城市 # 第三方企业ID # 用车时间,按天管控,比如传值2021-03-18 20:26:56表示2021-03-18当天可用车,跨天情况配合finishedDate参数使用 # 审批单关联的项目code # 审批单关联的项目名 # 审批单状态:0-申请,1-同意,2-拒绝 # 三方审批单ID # 审批单关联的三方成本中心ID # 审批单关联的三方发票抬头ID # 审批单可用总次数 # 审批单可用次数类型:1-次数不限制,2-用户可指定次数,3-管理员限制次数;如果企业没有限制审批单使用次数的需求,这个参数传1(次数不限制),同时times_total和times_used都传0即可 # 审批单已用次数 # 审批单标题 # 发起审批的第三方员工ID # suiteKey # account # tokenGrantType # 用车截止时间,按天管控,比如date传值2021-03-18 20:26:56、finished_date传值2021-03-30 20:26:56表示2021-03-18(含)到2021-03-30(含)之间可用车,该参数不传值情况使用date作为用车截止时间; # 商旅内部审批单ID # 第三方企业ID # 类目:机酒火车 1:机票; 2:酒店; 4:用车 6:商旅火车票 # 每页数据量,默认100,最高500 # 记账更新开始日期 # 页数,从1开始 # 记账更新结束日期 # 提前预定天数 # 航司三字码 # 航司名称 # 交易流水号 # 审批单号 # 到达机场二字码 # 到达城市 # 到达日期 # 到达机场 # 到达时间 # 预定时间 # 预订人use id # 预订人名称 # 商旅优惠金额 # 基建费 # 舱位 # 舱位码 # 资金方向 # 级联部门 # 改签费用 # 订单金额 # 成本中心名称 # 成本中心编号 # 优惠券 # 起飞机场二字码 # 末级部门 # 部门id # 起飞城市 # 起飞日期 # 起飞机场 # 起飞时间 # 折扣率 # 费用类型 # 航班号 # 序号 # 保险费 # 发票抬头 # 行程单打印序号 # 行程单金额 # 低价提醒(起飞时间) # 低价提醒(折扣) # 低价提醒(航班号) # 低价提醒(与最低价差额) # 不选低价原因 # 低价航班价格 # 协议价优惠金额 # 燃油费 # 订单号 # 超标审批单号 # 主键id # 项目代码 # 项目名称 # 退款手续费 # 改签退票手续费 # 是否重复退 # 销售价 # 服务费,仅在feeType 11001、11002中展示 # 结算金额 # 结算时间 # 结算类型 # 入账状态 # 行程单号 # 出行人use id # 出行人名称 # 改签差价 # 预订人工号 # 出行人工号 # 类目 # 企业id # 数据集合 # 记账更新开始日期 # 记账更新结束日期 # 总数据量 # 结果msg # module # 是否成功 # 结果code # 第三方企业id # 商旅超标审批单id # 入住日期 # 离店日期 # 入住城市三字码 # 入住城市名称 # 意向酒店金额(分) # 是否合住 # 超标类型,32:金额超标 # 第三方企业id # 商旅超标审批单id # 审批单状态 0:审批中 1:已同意 2:已拒绝 # 出差原因 # 超标类型,32:金额超标 # 超标原因 # 原差旅标准 # 审批单提交时间 # 第三方用户id # 意向出行信息 # 第三方出差审批单号 # 第三方企业id # 第三方申请单id # 关联单号 # 订单id # 订单状态:0待支付,1出票中,2已关闭,3有改签单,4有退票单,5出票成功,6退票申请中,7改签申请中 # 火车订单号 # 订单状态:0待支付,1出票中,2已关闭,3,改签成功,4退票成功,5出票完成,6退票申请中,7改签申请中,8已出票,已发货,9出票失败,10改签失败,11退票失败 # 酒店订单号 # 订单状态1:等待确认,2:等待付款,3:预订成功,4:申请退款,5:退款成功,6:已关闭,7:结账成功,8:支付成功 # 用车订单号 # 订单状态:0:初始状态,1:已超时,2:派单成功,3:派单失败,4:已退款,5:已支付,6:已取消 # 飞机订单信息 # 企业id # 火车订单信息 # 酒店订单信息 # 用车订单信息 # 第三方企业ID # 审批单创建时间小于值 # 审批单创建时间大于等于值 # 页码,要求大于等于1,默认1 # 每页数据量,要求大于等于1,默认20 # 三方审批单ID # 第三方员工ID # 审批备注 # 审批时间 # 审批人排序值 # 审批状态枚举:审批状态:0-审批中,1-已同意,2-已拒绝 # 审批状态描述 # 审批员工ID # 审批员工名 # 目的地城市 # 目的地城市三字码 # 到达目的地城市时间 # 商旅内部成本中心ID # 成本中心名称 # 出发城市 # 出发城市三字码 # 出发时间 # 商旅内部发票抬头ID # 发票抬头名称 # 商旅内部行程单ID # 项目code # 项目名称 # 交通方式:4-市内交通 # 审批单列表 # 员工所在部门ID # 员工所在部门名 # 创建时间 # 最近修改时间 # 审批单关联的行程 # 审批单状态:0-申请,1-同意,2-拒绝 # 审批单状态:0-申请,1-同意,2-拒绝 # 三方审批单ID # 申请事由 # 审批单标题 # 发起审批员工ID # 发起审批员工名 # 审批单列表 # 总数 # 第三方企业id # 商旅超标审批单id # 意向坐席价格(分) # 出发城市名 # 到达城市名 # 出发城市三字码 # 到达城市三字码 # 出发时间 # 到达时间 # 到达站点名称 # 出发站点名称 # 意向车次号 # 意向车次类型 # 意向坐席名称 # 第三方企业id # 商旅超标审批单id # 审批单状态 0:审批中 1:已同意 2:已拒绝 # 出差原因 # 超标类型,32:坐席超标 # 超标原因 # 原差旅标准 # 审批单提交时间 # 第三方用户id # 意向出行信息 # 第三方出差审批单号 | 2.130276 | 2 |
discovery-provider/src/solana/solana_client_manager.py | Tenderize/audius-protocol | 0 | 6633262 | <filename>discovery-provider/src/solana/solana_client_manager.py
import logging
import random
import signal
import time
from contextlib import contextmanager
from typing import Optional, Union
from solana.account import Account
from solana.publickey import PublicKey
from solana.rpc.api import Client
from src.solana.solana_transaction_types import ConfirmedSignatureForAddressResponse
logger = logging.getLogger(__name__)
# maximum number of times to retry get_confirmed_transaction call
DEFAULT_MAX_RETRIES = 5
# number of seconds to wait between calls to get_confirmed_transaction
DELAY_SECONDS = 0.2
class SolanaClientManager:
def __init__(self, solana_endpoints) -> None:
self.endpoints = solana_endpoints.split(",")
self.clients = [Client(endpoint) for endpoint in self.endpoints]
def get_client(self, randomize=False) -> Client:
if not self.clients:
raise Exception(
"solana_client_manager.py | get_client | There are no solana clients"
)
if not randomize:
return self.clients[0]
index = random.randrange(0, len(self.clients))
return self.clients[index]
def get_sol_tx_info(self, tx_sig: str, retries=DEFAULT_MAX_RETRIES):
"""Fetches a solana transaction by signature with retries and a delay."""
def handle_get_sol_tx_info(client, index):
endpoint = self.endpoints[index]
num_retries = retries
while num_retries > 0:
try:
logger.info(
f"solana_client_manager.py | get_sol_tx_info | Fetching tx {tx_sig} {endpoint}"
)
tx_info = client.get_confirmed_transaction(tx_sig)
logger.info(
f"solana_client_manager.py | get_sol_tx_info | Finished fetching tx {tx_sig} {endpoint}"
)
if tx_info["result"] is not None:
return tx_info
except Exception as e:
logger.error(
f"solana_client_manager.py | get_sol_tx_info | \
Error fetching tx {tx_sig} from endpoint {endpoint}, {e}",
exc_info=True,
)
num_retries -= 1
time.sleep(DELAY_SECONDS)
logger.error(
f"solana_client_manager.py | get_sol_tx_info | Retrying tx fetch: {tx_sig} with endpoint {endpoint}"
)
raise Exception(
f"solana_client_manager.py | get_sol_tx_info | Failed to fetch {tx_sig} with endpoint {endpoint}"
)
return _try_all(
self.clients,
handle_get_sol_tx_info,
f"solana_client_manager.py | get_sol_tx_info | All requests failed to fetch {tx_sig}",
)
def get_signatures_for_address(
self,
account: Union[str, Account, PublicKey],
before: Optional[str] = None,
limit: Optional[int] = None,
retries: Optional[int] = DEFAULT_MAX_RETRIES,
):
"""Fetches confirmed signatures for transactions given an address."""
def handle_get_signatures_for_address(client, index):
endpoint = self.endpoints[index]
num_retries = retries
while num_retries > 0:
try:
logger.info(
f"solana_client_manager.py | handle_get_signatures_for_address | Fetching {before} {endpoint}"
)
transactions: ConfirmedSignatureForAddressResponse = (
client.get_signatures_for_address(account, before, limit)
)
logger.info(
f"solana_client_manager.py | handle_get_signatures_for_address | Finished fetching {before} {endpoint}"
)
return transactions
except Exception as e:
logger.error(
f"solana_client_manager.py | handle_get_signatures_for_address | \
Error fetching account {account} from endpoint {endpoint}, {e}",
exc_info=True,
)
num_retries -= 1
time.sleep(DELAY_SECONDS)
logger.error(
f"solana_client_manager.py | handle_get_signatures_for_address | Retrying account fetch: {account} with endpoint {endpoint}"
)
raise Exception(
f"solana_client_manager.py | handle_get_signatures_for_address | Failed to fetch account {account} with endpoint {endpoint}"
)
return _try_all_with_timeout(
self.clients,
handle_get_signatures_for_address,
"solana_client_manager.py | get_signatures_for_address | All requests failed",
)
@contextmanager
def timeout(time):
# Register a function to raise a TimeoutError on the signal.
signal.signal(signal.SIGALRM, raise_timeout)
# Schedule the signal to be sent after ``time``.
signal.alarm(time)
try:
yield
except TimeoutError: # pylint: disable=W0706
raise
finally:
# Unregister the signal so it won't be triggered
# if the timeout is not reached.
signal.signal(signal.SIGALRM, signal.SIG_IGN)
def raise_timeout(signum, frame):
raise TimeoutError
def _try_all(iterable, func, message, randomize=False):
"""Executes a function with retries across the iterable.
If all executions fail, raise an exception."""
items = list(enumerate(iterable))
items = items if not randomize else random.sample(items, k=len(items))
for index, value in items:
try:
return func(value, index)
except Exception:
logger.error(
f"solana_client_manager.py | _try_all | Failed attempt at index {index} for function {func}"
)
if index < len(items) - 1:
logger.info("solana_client_manager.py | _try_all | Retrying")
continue
raise Exception(message)
def _try_all_with_timeout(iterable, func, message, randomize=False):
"""Do not use this function with ThreadPoolExecutor,
doesn't play well with futures
Executes a function with retries across the iterable.
If all executions fail, raise an exception."""
items = list(enumerate(iterable))
items = items if not randomize else random.sample(items, k=len(items))
for index, value in items:
try:
with timeout(30):
return func(value, index)
except Exception:
logger.error(
f"solana_client_manager.py | _try_all | Failed attempt at index {index} for function {func}"
)
if index < len(items) - 1:
logger.info("solana_client_manager.py | _try_all | Retrying")
continue
raise Exception(message)
| <filename>discovery-provider/src/solana/solana_client_manager.py
import logging
import random
import signal
import time
from contextlib import contextmanager
from typing import Optional, Union
from solana.account import Account
from solana.publickey import PublicKey
from solana.rpc.api import Client
from src.solana.solana_transaction_types import ConfirmedSignatureForAddressResponse
logger = logging.getLogger(__name__)
# maximum number of times to retry get_confirmed_transaction call
DEFAULT_MAX_RETRIES = 5
# number of seconds to wait between calls to get_confirmed_transaction
DELAY_SECONDS = 0.2
class SolanaClientManager:
def __init__(self, solana_endpoints) -> None:
self.endpoints = solana_endpoints.split(",")
self.clients = [Client(endpoint) for endpoint in self.endpoints]
def get_client(self, randomize=False) -> Client:
if not self.clients:
raise Exception(
"solana_client_manager.py | get_client | There are no solana clients"
)
if not randomize:
return self.clients[0]
index = random.randrange(0, len(self.clients))
return self.clients[index]
def get_sol_tx_info(self, tx_sig: str, retries=DEFAULT_MAX_RETRIES):
"""Fetches a solana transaction by signature with retries and a delay."""
def handle_get_sol_tx_info(client, index):
endpoint = self.endpoints[index]
num_retries = retries
while num_retries > 0:
try:
logger.info(
f"solana_client_manager.py | get_sol_tx_info | Fetching tx {tx_sig} {endpoint}"
)
tx_info = client.get_confirmed_transaction(tx_sig)
logger.info(
f"solana_client_manager.py | get_sol_tx_info | Finished fetching tx {tx_sig} {endpoint}"
)
if tx_info["result"] is not None:
return tx_info
except Exception as e:
logger.error(
f"solana_client_manager.py | get_sol_tx_info | \
Error fetching tx {tx_sig} from endpoint {endpoint}, {e}",
exc_info=True,
)
num_retries -= 1
time.sleep(DELAY_SECONDS)
logger.error(
f"solana_client_manager.py | get_sol_tx_info | Retrying tx fetch: {tx_sig} with endpoint {endpoint}"
)
raise Exception(
f"solana_client_manager.py | get_sol_tx_info | Failed to fetch {tx_sig} with endpoint {endpoint}"
)
return _try_all(
self.clients,
handle_get_sol_tx_info,
f"solana_client_manager.py | get_sol_tx_info | All requests failed to fetch {tx_sig}",
)
def get_signatures_for_address(
self,
account: Union[str, Account, PublicKey],
before: Optional[str] = None,
limit: Optional[int] = None,
retries: Optional[int] = DEFAULT_MAX_RETRIES,
):
"""Fetches confirmed signatures for transactions given an address."""
def handle_get_signatures_for_address(client, index):
endpoint = self.endpoints[index]
num_retries = retries
while num_retries > 0:
try:
logger.info(
f"solana_client_manager.py | handle_get_signatures_for_address | Fetching {before} {endpoint}"
)
transactions: ConfirmedSignatureForAddressResponse = (
client.get_signatures_for_address(account, before, limit)
)
logger.info(
f"solana_client_manager.py | handle_get_signatures_for_address | Finished fetching {before} {endpoint}"
)
return transactions
except Exception as e:
logger.error(
f"solana_client_manager.py | handle_get_signatures_for_address | \
Error fetching account {account} from endpoint {endpoint}, {e}",
exc_info=True,
)
num_retries -= 1
time.sleep(DELAY_SECONDS)
logger.error(
f"solana_client_manager.py | handle_get_signatures_for_address | Retrying account fetch: {account} with endpoint {endpoint}"
)
raise Exception(
f"solana_client_manager.py | handle_get_signatures_for_address | Failed to fetch account {account} with endpoint {endpoint}"
)
return _try_all_with_timeout(
self.clients,
handle_get_signatures_for_address,
"solana_client_manager.py | get_signatures_for_address | All requests failed",
)
@contextmanager
def timeout(time):
# Register a function to raise a TimeoutError on the signal.
signal.signal(signal.SIGALRM, raise_timeout)
# Schedule the signal to be sent after ``time``.
signal.alarm(time)
try:
yield
except TimeoutError: # pylint: disable=W0706
raise
finally:
# Unregister the signal so it won't be triggered
# if the timeout is not reached.
signal.signal(signal.SIGALRM, signal.SIG_IGN)
def raise_timeout(signum, frame):
raise TimeoutError
def _try_all(iterable, func, message, randomize=False):
"""Executes a function with retries across the iterable.
If all executions fail, raise an exception."""
items = list(enumerate(iterable))
items = items if not randomize else random.sample(items, k=len(items))
for index, value in items:
try:
return func(value, index)
except Exception:
logger.error(
f"solana_client_manager.py | _try_all | Failed attempt at index {index} for function {func}"
)
if index < len(items) - 1:
logger.info("solana_client_manager.py | _try_all | Retrying")
continue
raise Exception(message)
def _try_all_with_timeout(iterable, func, message, randomize=False):
"""Do not use this function with ThreadPoolExecutor,
doesn't play well with futures
Executes a function with retries across the iterable.
If all executions fail, raise an exception."""
items = list(enumerate(iterable))
items = items if not randomize else random.sample(items, k=len(items))
for index, value in items:
try:
with timeout(30):
return func(value, index)
except Exception:
logger.error(
f"solana_client_manager.py | _try_all | Failed attempt at index {index} for function {func}"
)
if index < len(items) - 1:
logger.info("solana_client_manager.py | _try_all | Retrying")
continue
raise Exception(message)
| en | 0.8351 | # maximum number of times to retry get_confirmed_transaction call # number of seconds to wait between calls to get_confirmed_transaction Fetches a solana transaction by signature with retries and a delay. Fetches confirmed signatures for transactions given an address. # Register a function to raise a TimeoutError on the signal. # Schedule the signal to be sent after ``time``. # pylint: disable=W0706 # Unregister the signal so it won't be triggered # if the timeout is not reached. Executes a function with retries across the iterable. If all executions fail, raise an exception. Do not use this function with ThreadPoolExecutor, doesn't play well with futures Executes a function with retries across the iterable. If all executions fail, raise an exception. | 2.344198 | 2 |
pysces/__init__.py | bgoli/pysces | 0 | 6633263 | <reponame>bgoli/pysces
"""
PySCeS - Python Simulator for Cellular Systems (http://pysces.sourceforge.net)
Copyright (C) 2004-2020 <NAME>, <NAME>, <NAME> all rights reserved,
<NAME> (<EMAIL>)
Triple-J Group for Molecular Cell Physiology
Stellenbosch University, South Africa.
Permission to use, modify, and distribute this software is given under the
terms of the PySceS (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
<NAME>
"""
from __future__ import division, print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from .version import __version__
__doc__ = """
PySCeS: the Python Simulator for Cellular Systems
-------------------------------------------------
PySCeS is developed by the Triple-J Group for Molecular Cell Physiology
in order to try model and understand the complex processes and systems
which make up the living cell. PySCeS features, amongst other things:
- A text based Model Description Language.
- A structural analysis module.
- Integrators for time simulation
- Non-linear solvers for steady-state analysis
- A module for performing Metabolic Control Analysis
- A bifurcation module for systems which exhibit multiple steady states
- A variety of extra utilites for parameter scans, data output and plotting.
- A dynamic module loading framework.
- SBML import and export capability.
"""
import os, time
from pkg_resources import get_build_platform
from . import PyscesConfig
from . import PyscesParse
from . import PyscesLink as link
from . import codeutil
from . import PyscesSED as SED
from .PyscesUtils import str2bool
from .PyscesModelMap import ModelMap
# TODO get rid unused imports
from .PyscesWeb import PyscesHTML
html = PyscesHTML()
DEBUG = False
inipath = None
lpath = None
install_dir = None
output_dir = None
model_dir = None
pitcon_switch = False
nleq2_switch = False
__USE_MATPLOTLIB__ = True
__MATPLOTLIB_BACKEND__ = 'TkAgg'
__USE_GNUPLOT__ = False
__SILENT_START__ = False
extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs')
if os.sys.platform == 'win32' and os.path.isdir(extra_dll_dir):
os.environ["PATH"] += os.pathsep + extra_dll_dir
if hasattr(os, 'add_dll_directory'):
os.add_dll_directory(extra_dll_dir)
if os.sys.platform == 'win32':
__PyscesConfigDefault = PyscesConfig.__DefaultWin
else:
__PyscesConfigDefault = PyscesConfig.__DefaultPosix
if DEBUG:
print(time.strftime('1-%H:%M:%S'))
eggdir = 'pysces-%s-py%s.%s-%s.egg' % (
__version__,
os.sys.version_info[0],
os.sys.version_info[1],
get_build_platform(),
)
for path in os.sys.path:
chkPath = path.split(os.path.sep)[-1]
if chkPath == 'pysces' and path != os.getcwd():
if os.path.isdir(os.path.join(path, 'pysces')):
# for in-place development with setup.py develop
install_dir = os.path.join(path, 'pysces')
else:
install_dir = path
inipath = os.path.join(install_dir, 'pyscfg.ini')
break
elif chkPath == eggdir:
install_dir = os.path.join(path, 'pysces')
inipath = os.path.join(install_dir, 'pyscfg.ini')
break
if inipath == None:
for k in os.sys.path_importer_cache:
if k.split(os.path.sep)[-1] == 'pysces':
install_dir = k
inipath = os.path.join(install_dir, 'pyscfg.ini')
break
elif k.split(os.path.sep)[-1] == eggdir:
install_dir = os.path.join(k, 'pysces')
inipath = os.path.join(install_dir, 'pyscfg.ini')
break
del eggdir
if DEBUG:
print(time.strftime('2-%H:%M:%S'))
try:
__config_dict = PyscesConfig.ReadConfig(inipath, config=__PyscesConfigDefault)
except Exception as ex:
print(ex)
print('Cwd', os.getcwd())
print('\nWARNING: Cannot read pyscfg.ini using default values\n')
__config_dict = __PyscesConfigDefault
# Read config
for key in __config_dict:
if key == 'pitcon':
pitcon_switch = str2bool(__config_dict[key])
elif key == 'nleq2':
nleq2_switch = str2bool(__config_dict[key])
elif key == 'matplotlib':
__USE_MATPLOTLIB__ = str2bool(__config_dict[key])
elif key == 'matplotlib_backend':
__MATPLOTLIB_BACKEND__ = __config_dict[key]
elif key == 'gnuplot':
__USE_GNUPLOT__ = str2bool(__config_dict[key])
elif key == 'gnuplot_dir':
GNUPLOT_DIR = __config_dict[key]
if GNUPLOT_DIR == 'None':
GNUPLOT_DIR = None
elif key == 'silentstart':
__SILENT_START__ = str2bool(__config_dict[key])
elif key == 'change_dir_on_start':
__CHGDIR_ON_START__ = str2bool(__config_dict[key])
assert inipath != None, '\nNo configuration file found'
if DEBUG:
print(time.strftime('3-%H:%M:%S'))
__userdict = None
if os.sys.platform != 'win32':
if os.path.exists(
os.path.join(os.path.expanduser('~'), 'Pysces', '.pys_usercfg.ini')
):
__userdict = PyscesConfig.ReadConfig(
os.path.join(os.path.expanduser('~'), 'Pysces', '.pys_usercfg.ini'),
PyscesConfig.__DefaultPosixUsr,
)
else:
if not os.path.exists(os.path.join(os.path.expanduser('~'), 'Pysces')):
os.makedirs(os.path.join(os.path.expanduser('~'), 'Pysces'))
PyscesConfig.WriteConfig(
os.path.join(os.path.expanduser('~'), 'Pysces', '.pys_usercfg.ini'),
config=PyscesConfig.__DefaultPosixUsr,
section='Pysces',
)
__userdict = PyscesConfig.ReadConfig(
os.path.join(os.path.expanduser('~'), 'Pysces', '.pys_usercfg.ini'),
PyscesConfig.__DefaultPosixUsr,
)
else:
if os.path.exists(
os.path.join(os.getenv('HOMEDRIVE') + os.path.sep, 'Pysces', '.pys_usercfg.ini')
):
__userdict = PyscesConfig.ReadConfig(
os.path.join(
os.getenv('HOMEDRIVE') + os.path.sep, 'Pysces', '.pys_usercfg.ini'
),
PyscesConfig.__DefaultWinUsr,
)
elif os.path.exists(
os.path.join(os.getenv('USERPROFILE'), 'Pysces', '.pys_usercfg.ini')
):
__userdict = PyscesConfig.ReadConfig(
os.path.join(os.getenv('USERPROFILE'), 'Pysces', '.pys_usercfg.ini'),
PyscesConfig.__DefaultWinUsr,
)
else:
if not os.path.exists(os.path.join(os.getenv('USERPROFILE'), 'Pysces')):
os.makedirs(os.path.join(os.getenv('USERPROFILE'), 'Pysces'))
PyscesConfig.WriteConfig(
os.path.join(os.getenv('USERPROFILE'), 'Pysces', '.pys_usercfg.ini'),
config=PyscesConfig.__DefaultWinUsr,
section='Pysces',
)
__userdict = PyscesConfig.ReadConfig(
os.path.join(os.getenv('USERPROFILE'), 'Pysces', '.pys_usercfg.ini'),
PyscesConfig.__DefaultWinUsr,
)
for key in __userdict:
if key == 'output_dir':
output_dir = __userdict[key]
if not os.path.exists(__userdict[key]):
os.makedirs(__userdict[key])
elif key == 'model_dir':
model_dir = __userdict[key]
if not os.path.exists(__userdict[key]):
os.makedirs(__userdict[key])
elif key == 'matplotlib':
__USE_MATPLOTLIB__ = str2bool(__userdict[key])
elif key == 'matplotlib_backend':
__MATPLOTLIB_BACKEND__ = __userdict[key]
elif key == 'gnuplot':
__USE_GNUPLOT__ = str2bool(__userdict[key])
elif key == 'gnuplot_dir':
GNUPLOT_DIR = __userdict[key]
if GNUPLOT_DIR == 'None':
GNUPLOT_DIR = None
elif key == 'silentstart':
__SILENT_START__ = str2bool(__userdict[key])
elif key == 'change_dir_on_start':
__CHGDIR_ON_START__ = str2bool(__userdict[key])
assert output_dir != None, '\nNo output directory defined'
assert model_dir != None, '\nNo model directory defined'
# following is to get the full path when .pys_usercfg.ini specifies CWD as follows:
# output_dir = ./
backup_dir = os.getcwd()
os.chdir(output_dir)
output_dir = os.getcwd()
os.chdir(backup_dir)
os.chdir(model_dir)
model_dir = os.getcwd()
os.chdir(backup_dir)
del PyscesConfig
if DEBUG:
print(time.strftime('4-%H:%M:%S'))
# initialise pysces.interface.*
try:
from . import PyscesInterfaces
interface = PyscesInterfaces.Core2interfaces()
except Exception as ex:
print('INFO: pysces.interface.* not available')
print(ex)
interface = None
# initialise pysces.plt.*
from . import PyscesPlot2
gplt = None
mplt = None
if __USE_MATPLOTLIB__:
try:
mplt = PyscesPlot2.MatplotlibUPI(
work_dir=output_dir, backend=__MATPLOTLIB_BACKEND__
)
if not __SILENT_START__:
print('Matplotlib interface loaded (pysces.plt.m)')
except Exception as ex:
print('Matplotlib interface not available')
if DEBUG:
print(ex)
__USE_MATPLOTLIB__ = False
if __USE_GNUPLOT__:
if GNUPLOT_DIR == None or not os.path.exists(GNUPLOT_DIR):
print(
'''GnuPlot has been enabled but the path to the executable has
not been defined (or does not exist). Please set the "gnuplot_dir" key
in your pyscfg.ini file.
'''
)
else:
try:
if DEBUG:
print(GNUPLOT_DIR)
gplt = PyscesPlot2.GnuPlotUPI(work_dir=output_dir, gnuplot_dir=GNUPLOT_DIR)
if not __SILENT_START__:
print('GnuPlot interface loaded (pysces.plt.g)')
except Exception as ex:
print('GnuPlot interface not available')
if DEBUG:
print(ex)
__USE_GNUPLOT__ = False
plt = None
if __USE_MATPLOTLIB__ or __USE_GNUPLOT__:
plt = PyscesPlot2.PyscesUPI()
if __USE_MATPLOTLIB__ and not __USE_GNUPLOT__:
plt.p_setInterface('matplotlib', mplt)
elif __USE_GNUPLOT__ and not __USE_MATPLOTLIB__:
plt.p_setInterface('gnuplot', gplt)
elif __USE_GNUPLOT__ and __USE_MATPLOTLIB__:
plt.p_setInterface('matplotlib', mplt)
plt.p_setInterface('gnuplot', gplt)
plt.p_deactivateInterface('gnuplot')
if DEBUG:
print(time.strftime('5-%H:%M:%S'))
alt_import = False
alt_import_pitcon = False
alt_import_nleq2 = False
if os.sys.platform == 'win32':
if pitcon_switch:
os.sys.path.append(os.path.join(install_dir, 'pitcon'))
try:
from .pitcon import pitcon as pitcon
if not __SILENT_START__:
print('Continuation routines available')
except Exception as ex:
try:
os.environ['path'] = '%s;%s' % (
os.path.join(install_dir, 'win32'),
os.environ['path'],
)
from .pitcon import pitcon as pitcon
if not __SILENT_START__:
print('Continuation routines available')
except Exception as ex:
# print 'Attempting alternate pitcon import ...'
# alt_import = True
# alt_import_pitcon = True
print(ex)
print('INFO: Pitcon import failed: continuation not available')
if nleq2_switch:
os.sys.path.append(os.path.join(install_dir, 'nleq2'))
try:
from .nleq2 import nleq2 as nleq2
if not __SILENT_START__:
print('NLEQ2 routines available')
except Exception as ex:
try:
os.environ['path'] = '%s;%s' % (
os.path.join(install_dir, 'win32'),
os.environ['path'],
)
from .nleq2 import nleq2 as nleq2
if not __SILENT_START__:
print('NLEQ2 routines available')
except Exception as ex:
# print 'Attempting alternate nleq2 import ...'
# alt_import = True
# alt_import_nleq2 = True
print(ex)
print('INFO: NLEQ2 import failed: option not available')
else:
if pitcon_switch:
os.sys.path.append(os.path.join(install_dir, 'pitcon'))
try:
from .pitcon import pitcon as pitcon
if not __SILENT_START__:
print('Pitcon routines available')
except Exception as ex:
# print ex
alt_import = True
alt_import_pitcon = True
print('Attempting alternate pitcon import ...')
if nleq2_switch:
os.sys.path.append(os.path.join(install_dir, 'nleq2'))
try:
from .nleq2 import nleq2 as nleq2
if not __SILENT_START__:
print('NLEQ2 routines available')
except Exception as ex:
# print ex
alt_import = True
alt_import_nleq2 = True
print('Attempting alternate nleq2 import ...')
if DEBUG:
print(time.strftime('6-%H:%M:%S'))
if alt_import:
savedir = os.getcwd()
for tpath in os.sys.path:
if alt_import_pitcon:
try:
if (
os.path.exists(os.path.join(tpath, 'pysces', 'pitcon'))
and tpath != ''
):
os.chdir(os.path.join(tpath, 'pysces', 'pitcon'))
from . import pitcon
if not __SILENT_START__:
print('Continuation routines available (A)')
except Exception as ex:
print(ex)
print(
'INFO: Alternate pitcon import failed: continuation not available'
)
if alt_import_nleq2:
try:
if (
os.path.exists(os.path.join(tpath, 'pysces', 'nleq2'))
and tpath != ''
):
os.chdir(os.path.join(tpath, 'pysces', 'nleq2'))
from . import nleq2
if not __SILENT_START__:
print('NLEQ2 routines available (A)')
except Exception as ex:
print(ex)
nleq2_switch = False
print('INFO: Alternate NLEQ2 import failed: option not available')
os.chdir(savedir)
if DEBUG:
print(time.strftime('7-%H:%M:%S'))
# check for libsbml
try:
import libsbml as SBML
if not __SILENT_START__:
print("SBML support available")
except ImportError as ex:
SBML = None
print(ex)
print("INFO: No SBML library found, SBML support not available")
# This has to come at the end
from .PyscesModel import PysMod as model
from .PyscesModel import ScanDataObj as ScanDataObj
PyscesModel.interface = interface
from .PyscesTest import PyscesTest as test
write = None
try:
from .PyscesUtils import WriteOutput
write = WriteOutput()
del WriteOutput
except ImportError as ex:
pass
from .PyscesScan import PITCONScanUtils, Scanner
try:
from .RateChar import RateChar
if not __SILENT_START__:
print("RateChar is available")
except Exception as ex:
RateChar = None
# print "RateChar not available"
# ParScanner import
try:
from .PyscesParScan import ParScanner
if not __SILENT_START__:
print("Parallel scanner is available")
except ImportError as ex:
ParScanner = None
print(ex)
print("INFO: Parallel scanner not available")
if DEBUG:
print(time.strftime('9-%H:%M:%S'))
if not __SILENT_START__:
print('\nPySCeS environment\n******************')
print('pysces.model_dir = ' + model_dir)
print('pysces.output_dir = ' + output_dir)
print('\n\n***********************************************************************')
print(
'* Welcome to PySCeS ('
+ __version__
+ ') - Python Simulator for Cellular Systems *'
)
print('* http://pysces.sourceforge.net *')
## print '* Somewhere In Time *'
print('* Copyright(C) <NAME>, <NAME>, <NAME>, 2004-2020 *')
print('* Triple-J Group for Molecular Cell Physiology *')
print('* Stellenbosch University, ZA and VU University Amsterdam, NL *')
print('* PySCeS is distributed under the PySCeS (BSD style) licence, see *')
print('* LICENCE.txt (supplied with this release) for details *')
## print '* ** Read about PySCeS ** *'
print('* Please cite PySCeS with: doi:10.1093/bioinformatics/bti046 *')
print('***********************************************************************')
try:
del os, key, gplt, mplt
except Exception as ex:
print(ex)
print('\n\nOops I did it again error ...\n\n')
if DEBUG:
print(time.strftime('10-%H:%M:%S'))
| """
PySCeS - Python Simulator for Cellular Systems (http://pysces.sourceforge.net)
Copyright (C) 2004-2020 <NAME>, <NAME>, <NAME> all rights reserved,
<NAME> (<EMAIL>)
Triple-J Group for Molecular Cell Physiology
Stellenbosch University, South Africa.
Permission to use, modify, and distribute this software is given under the
terms of the PySceS (BSD style) license. See LICENSE.txt that came with
this distribution for specifics.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
<NAME>
"""
from __future__ import division, print_function
from __future__ import absolute_import
from __future__ import unicode_literals
from .version import __version__
__doc__ = """
PySCeS: the Python Simulator for Cellular Systems
-------------------------------------------------
PySCeS is developed by the Triple-J Group for Molecular Cell Physiology
in order to try model and understand the complex processes and systems
which make up the living cell. PySCeS features, amongst other things:
- A text based Model Description Language.
- A structural analysis module.
- Integrators for time simulation
- Non-linear solvers for steady-state analysis
- A module for performing Metabolic Control Analysis
- A bifurcation module for systems which exhibit multiple steady states
- A variety of extra utilites for parameter scans, data output and plotting.
- A dynamic module loading framework.
- SBML import and export capability.
"""
import os, time
from pkg_resources import get_build_platform
from . import PyscesConfig
from . import PyscesParse
from . import PyscesLink as link
from . import codeutil
from . import PyscesSED as SED
from .PyscesUtils import str2bool
from .PyscesModelMap import ModelMap
# TODO get rid unused imports
from .PyscesWeb import PyscesHTML
html = PyscesHTML()
DEBUG = False
inipath = None
lpath = None
install_dir = None
output_dir = None
model_dir = None
pitcon_switch = False
nleq2_switch = False
__USE_MATPLOTLIB__ = True
__MATPLOTLIB_BACKEND__ = 'TkAgg'
__USE_GNUPLOT__ = False
__SILENT_START__ = False
extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs')
if os.sys.platform == 'win32' and os.path.isdir(extra_dll_dir):
os.environ["PATH"] += os.pathsep + extra_dll_dir
if hasattr(os, 'add_dll_directory'):
os.add_dll_directory(extra_dll_dir)
if os.sys.platform == 'win32':
__PyscesConfigDefault = PyscesConfig.__DefaultWin
else:
__PyscesConfigDefault = PyscesConfig.__DefaultPosix
if DEBUG:
print(time.strftime('1-%H:%M:%S'))
eggdir = 'pysces-%s-py%s.%s-%s.egg' % (
__version__,
os.sys.version_info[0],
os.sys.version_info[1],
get_build_platform(),
)
for path in os.sys.path:
chkPath = path.split(os.path.sep)[-1]
if chkPath == 'pysces' and path != os.getcwd():
if os.path.isdir(os.path.join(path, 'pysces')):
# for in-place development with setup.py develop
install_dir = os.path.join(path, 'pysces')
else:
install_dir = path
inipath = os.path.join(install_dir, 'pyscfg.ini')
break
elif chkPath == eggdir:
install_dir = os.path.join(path, 'pysces')
inipath = os.path.join(install_dir, 'pyscfg.ini')
break
if inipath == None:
for k in os.sys.path_importer_cache:
if k.split(os.path.sep)[-1] == 'pysces':
install_dir = k
inipath = os.path.join(install_dir, 'pyscfg.ini')
break
elif k.split(os.path.sep)[-1] == eggdir:
install_dir = os.path.join(k, 'pysces')
inipath = os.path.join(install_dir, 'pyscfg.ini')
break
del eggdir
if DEBUG:
print(time.strftime('2-%H:%M:%S'))
try:
__config_dict = PyscesConfig.ReadConfig(inipath, config=__PyscesConfigDefault)
except Exception as ex:
print(ex)
print('Cwd', os.getcwd())
print('\nWARNING: Cannot read pyscfg.ini using default values\n')
__config_dict = __PyscesConfigDefault
# Read config
for key in __config_dict:
if key == 'pitcon':
pitcon_switch = str2bool(__config_dict[key])
elif key == 'nleq2':
nleq2_switch = str2bool(__config_dict[key])
elif key == 'matplotlib':
__USE_MATPLOTLIB__ = str2bool(__config_dict[key])
elif key == 'matplotlib_backend':
__MATPLOTLIB_BACKEND__ = __config_dict[key]
elif key == 'gnuplot':
__USE_GNUPLOT__ = str2bool(__config_dict[key])
elif key == 'gnuplot_dir':
GNUPLOT_DIR = __config_dict[key]
if GNUPLOT_DIR == 'None':
GNUPLOT_DIR = None
elif key == 'silentstart':
__SILENT_START__ = str2bool(__config_dict[key])
elif key == 'change_dir_on_start':
__CHGDIR_ON_START__ = str2bool(__config_dict[key])
assert inipath != None, '\nNo configuration file found'
if DEBUG:
print(time.strftime('3-%H:%M:%S'))
__userdict = None
if os.sys.platform != 'win32':
if os.path.exists(
os.path.join(os.path.expanduser('~'), 'Pysces', '.pys_usercfg.ini')
):
__userdict = PyscesConfig.ReadConfig(
os.path.join(os.path.expanduser('~'), 'Pysces', '.pys_usercfg.ini'),
PyscesConfig.__DefaultPosixUsr,
)
else:
if not os.path.exists(os.path.join(os.path.expanduser('~'), 'Pysces')):
os.makedirs(os.path.join(os.path.expanduser('~'), 'Pysces'))
PyscesConfig.WriteConfig(
os.path.join(os.path.expanduser('~'), 'Pysces', '.pys_usercfg.ini'),
config=PyscesConfig.__DefaultPosixUsr,
section='Pysces',
)
__userdict = PyscesConfig.ReadConfig(
os.path.join(os.path.expanduser('~'), 'Pysces', '.pys_usercfg.ini'),
PyscesConfig.__DefaultPosixUsr,
)
else:
if os.path.exists(
os.path.join(os.getenv('HOMEDRIVE') + os.path.sep, 'Pysces', '.pys_usercfg.ini')
):
__userdict = PyscesConfig.ReadConfig(
os.path.join(
os.getenv('HOMEDRIVE') + os.path.sep, 'Pysces', '.pys_usercfg.ini'
),
PyscesConfig.__DefaultWinUsr,
)
elif os.path.exists(
os.path.join(os.getenv('USERPROFILE'), 'Pysces', '.pys_usercfg.ini')
):
__userdict = PyscesConfig.ReadConfig(
os.path.join(os.getenv('USERPROFILE'), 'Pysces', '.pys_usercfg.ini'),
PyscesConfig.__DefaultWinUsr,
)
else:
if not os.path.exists(os.path.join(os.getenv('USERPROFILE'), 'Pysces')):
os.makedirs(os.path.join(os.getenv('USERPROFILE'), 'Pysces'))
PyscesConfig.WriteConfig(
os.path.join(os.getenv('USERPROFILE'), 'Pysces', '.pys_usercfg.ini'),
config=PyscesConfig.__DefaultWinUsr,
section='Pysces',
)
__userdict = PyscesConfig.ReadConfig(
os.path.join(os.getenv('USERPROFILE'), 'Pysces', '.pys_usercfg.ini'),
PyscesConfig.__DefaultWinUsr,
)
for key in __userdict:
if key == 'output_dir':
output_dir = __userdict[key]
if not os.path.exists(__userdict[key]):
os.makedirs(__userdict[key])
elif key == 'model_dir':
model_dir = __userdict[key]
if not os.path.exists(__userdict[key]):
os.makedirs(__userdict[key])
elif key == 'matplotlib':
__USE_MATPLOTLIB__ = str2bool(__userdict[key])
elif key == 'matplotlib_backend':
__MATPLOTLIB_BACKEND__ = __userdict[key]
elif key == 'gnuplot':
__USE_GNUPLOT__ = str2bool(__userdict[key])
elif key == 'gnuplot_dir':
GNUPLOT_DIR = __userdict[key]
if GNUPLOT_DIR == 'None':
GNUPLOT_DIR = None
elif key == 'silentstart':
__SILENT_START__ = str2bool(__userdict[key])
elif key == 'change_dir_on_start':
__CHGDIR_ON_START__ = str2bool(__userdict[key])
assert output_dir != None, '\nNo output directory defined'
assert model_dir != None, '\nNo model directory defined'
# following is to get the full path when .pys_usercfg.ini specifies CWD as follows:
# output_dir = ./
backup_dir = os.getcwd()
os.chdir(output_dir)
output_dir = os.getcwd()
os.chdir(backup_dir)
os.chdir(model_dir)
model_dir = os.getcwd()
os.chdir(backup_dir)
del PyscesConfig
if DEBUG:
print(time.strftime('4-%H:%M:%S'))
# initialise pysces.interface.*
try:
from . import PyscesInterfaces
interface = PyscesInterfaces.Core2interfaces()
except Exception as ex:
print('INFO: pysces.interface.* not available')
print(ex)
interface = None
# initialise pysces.plt.*
from . import PyscesPlot2
gplt = None
mplt = None
if __USE_MATPLOTLIB__:
try:
mplt = PyscesPlot2.MatplotlibUPI(
work_dir=output_dir, backend=__MATPLOTLIB_BACKEND__
)
if not __SILENT_START__:
print('Matplotlib interface loaded (pysces.plt.m)')
except Exception as ex:
print('Matplotlib interface not available')
if DEBUG:
print(ex)
__USE_MATPLOTLIB__ = False
if __USE_GNUPLOT__:
if GNUPLOT_DIR == None or not os.path.exists(GNUPLOT_DIR):
print(
'''GnuPlot has been enabled but the path to the executable has
not been defined (or does not exist). Please set the "gnuplot_dir" key
in your pyscfg.ini file.
'''
)
else:
try:
if DEBUG:
print(GNUPLOT_DIR)
gplt = PyscesPlot2.GnuPlotUPI(work_dir=output_dir, gnuplot_dir=GNUPLOT_DIR)
if not __SILENT_START__:
print('GnuPlot interface loaded (pysces.plt.g)')
except Exception as ex:
print('GnuPlot interface not available')
if DEBUG:
print(ex)
__USE_GNUPLOT__ = False
plt = None
if __USE_MATPLOTLIB__ or __USE_GNUPLOT__:
plt = PyscesPlot2.PyscesUPI()
if __USE_MATPLOTLIB__ and not __USE_GNUPLOT__:
plt.p_setInterface('matplotlib', mplt)
elif __USE_GNUPLOT__ and not __USE_MATPLOTLIB__:
plt.p_setInterface('gnuplot', gplt)
elif __USE_GNUPLOT__ and __USE_MATPLOTLIB__:
plt.p_setInterface('matplotlib', mplt)
plt.p_setInterface('gnuplot', gplt)
plt.p_deactivateInterface('gnuplot')
if DEBUG:
print(time.strftime('5-%H:%M:%S'))
alt_import = False
alt_import_pitcon = False
alt_import_nleq2 = False
if os.sys.platform == 'win32':
if pitcon_switch:
os.sys.path.append(os.path.join(install_dir, 'pitcon'))
try:
from .pitcon import pitcon as pitcon
if not __SILENT_START__:
print('Continuation routines available')
except Exception as ex:
try:
os.environ['path'] = '%s;%s' % (
os.path.join(install_dir, 'win32'),
os.environ['path'],
)
from .pitcon import pitcon as pitcon
if not __SILENT_START__:
print('Continuation routines available')
except Exception as ex:
# print 'Attempting alternate pitcon import ...'
# alt_import = True
# alt_import_pitcon = True
print(ex)
print('INFO: Pitcon import failed: continuation not available')
if nleq2_switch:
os.sys.path.append(os.path.join(install_dir, 'nleq2'))
try:
from .nleq2 import nleq2 as nleq2
if not __SILENT_START__:
print('NLEQ2 routines available')
except Exception as ex:
try:
os.environ['path'] = '%s;%s' % (
os.path.join(install_dir, 'win32'),
os.environ['path'],
)
from .nleq2 import nleq2 as nleq2
if not __SILENT_START__:
print('NLEQ2 routines available')
except Exception as ex:
# print 'Attempting alternate nleq2 import ...'
# alt_import = True
# alt_import_nleq2 = True
print(ex)
print('INFO: NLEQ2 import failed: option not available')
else:
if pitcon_switch:
os.sys.path.append(os.path.join(install_dir, 'pitcon'))
try:
from .pitcon import pitcon as pitcon
if not __SILENT_START__:
print('Pitcon routines available')
except Exception as ex:
# print ex
alt_import = True
alt_import_pitcon = True
print('Attempting alternate pitcon import ...')
if nleq2_switch:
os.sys.path.append(os.path.join(install_dir, 'nleq2'))
try:
from .nleq2 import nleq2 as nleq2
if not __SILENT_START__:
print('NLEQ2 routines available')
except Exception as ex:
# print ex
alt_import = True
alt_import_nleq2 = True
print('Attempting alternate nleq2 import ...')
if DEBUG:
print(time.strftime('6-%H:%M:%S'))
if alt_import:
savedir = os.getcwd()
for tpath in os.sys.path:
if alt_import_pitcon:
try:
if (
os.path.exists(os.path.join(tpath, 'pysces', 'pitcon'))
and tpath != ''
):
os.chdir(os.path.join(tpath, 'pysces', 'pitcon'))
from . import pitcon
if not __SILENT_START__:
print('Continuation routines available (A)')
except Exception as ex:
print(ex)
print(
'INFO: Alternate pitcon import failed: continuation not available'
)
if alt_import_nleq2:
try:
if (
os.path.exists(os.path.join(tpath, 'pysces', 'nleq2'))
and tpath != ''
):
os.chdir(os.path.join(tpath, 'pysces', 'nleq2'))
from . import nleq2
if not __SILENT_START__:
print('NLEQ2 routines available (A)')
except Exception as ex:
print(ex)
nleq2_switch = False
print('INFO: Alternate NLEQ2 import failed: option not available')
os.chdir(savedir)
if DEBUG:
print(time.strftime('7-%H:%M:%S'))
# check for libsbml
try:
import libsbml as SBML
if not __SILENT_START__:
print("SBML support available")
except ImportError as ex:
SBML = None
print(ex)
print("INFO: No SBML library found, SBML support not available")
# This has to come at the end
from .PyscesModel import PysMod as model
from .PyscesModel import ScanDataObj as ScanDataObj
PyscesModel.interface = interface
from .PyscesTest import PyscesTest as test
write = None
try:
from .PyscesUtils import WriteOutput
write = WriteOutput()
del WriteOutput
except ImportError as ex:
pass
from .PyscesScan import PITCONScanUtils, Scanner
try:
from .RateChar import RateChar
if not __SILENT_START__:
print("RateChar is available")
except Exception as ex:
RateChar = None
# print "RateChar not available"
# ParScanner import
try:
from .PyscesParScan import ParScanner
if not __SILENT_START__:
print("Parallel scanner is available")
except ImportError as ex:
ParScanner = None
print(ex)
print("INFO: Parallel scanner not available")
if DEBUG:
print(time.strftime('9-%H:%M:%S'))
if not __SILENT_START__:
print('\nPySCeS environment\n******************')
print('pysces.model_dir = ' + model_dir)
print('pysces.output_dir = ' + output_dir)
print('\n\n***********************************************************************')
print(
'* Welcome to PySCeS ('
+ __version__
+ ') - Python Simulator for Cellular Systems *'
)
print('* http://pysces.sourceforge.net *')
## print '* Somewhere In Time *'
print('* Copyright(C) <NAME>, <NAME>, <NAME>, 2004-2020 *')
print('* Triple-J Group for Molecular Cell Physiology *')
print('* Stellenbosch University, ZA and VU University Amsterdam, NL *')
print('* PySCeS is distributed under the PySCeS (BSD style) licence, see *')
print('* LICENCE.txt (supplied with this release) for details *')
## print '* ** Read about PySCeS ** *'
print('* Please cite PySCeS with: doi:10.1093/bioinformatics/bti046 *')
print('***********************************************************************')
try:
del os, key, gplt, mplt
except Exception as ex:
print(ex)
print('\n\nOops I did it again error ...\n\n')
if DEBUG:
print(time.strftime('10-%H:%M:%S')) | en | 0.74236 | PySCeS - Python Simulator for Cellular Systems (http://pysces.sourceforge.net) Copyright (C) 2004-2020 <NAME>, <NAME>, <NAME> all rights reserved, <NAME> (<EMAIL>) Triple-J Group for Molecular Cell Physiology Stellenbosch University, South Africa. Permission to use, modify, and distribute this software is given under the terms of the PySceS (BSD style) license. See LICENSE.txt that came with this distribution for specifics. NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. <NAME> PySCeS: the Python Simulator for Cellular Systems ------------------------------------------------- PySCeS is developed by the Triple-J Group for Molecular Cell Physiology in order to try model and understand the complex processes and systems which make up the living cell. PySCeS features, amongst other things: - A text based Model Description Language. - A structural analysis module. - Integrators for time simulation - Non-linear solvers for steady-state analysis - A module for performing Metabolic Control Analysis - A bifurcation module for systems which exhibit multiple steady states - A variety of extra utilites for parameter scans, data output and plotting. - A dynamic module loading framework. - SBML import and export capability. # TODO get rid unused imports # for in-place development with setup.py develop # Read config # following is to get the full path when .pys_usercfg.ini specifies CWD as follows: # output_dir = ./ # initialise pysces.interface.* # initialise pysces.plt.* GnuPlot has been enabled but the path to the executable has not been defined (or does not exist). Please set the "gnuplot_dir" key in your pyscfg.ini file. # print 'Attempting alternate pitcon import ...' # alt_import = True # alt_import_pitcon = True # print 'Attempting alternate nleq2 import ...' # alt_import = True # alt_import_nleq2 = True # print ex # print ex # check for libsbml # This has to come at the end # print "RateChar not available" # ParScanner import ## print '* Somewhere In Time *' ## print '* ** Read about PySCeS ** *' | 1.888915 | 2 |
redash/handlers/alerts.py | jrbenny35/redash | 0 | 6633264 | import time
from flask import request
from funcy import project
from redash import models
from redash.handlers.base import (BaseResource, get_object_or_404,
require_fields)
from redash.permissions import (require_access, require_admin_or_owner,
require_permission, view_only)
class AlertResource(BaseResource):
def get(self, alert_id):
alert = get_object_or_404(models.Alert.get_by_id_and_org, alert_id, self.current_org)
require_access(alert.groups, self.current_user, view_only)
self.record_event({
'action': 'view',
'timestamp': int(time.time()),
'object_id': alert.id,
'object_type': 'alert'
})
return alert.to_dict()
def post(self, alert_id):
req = request.get_json(True)
params = project(req, ('options', 'name', 'query_id', 'rearm'))
alert = get_object_or_404(models.Alert.get_by_id_and_org, alert_id, self.current_org)
require_admin_or_owner(alert.user.id)
self.update_model(alert, params)
models.db.session.commit()
self.record_event({
'action': 'edit',
'timestamp': int(time.time()),
'object_id': alert.id,
'object_type': 'alert'
})
return alert.to_dict()
def delete(self, alert_id):
alert = get_object_or_404(models.Alert.get_by_id_and_org, alert_id, self.current_org)
require_admin_or_owner(alert.user_id)
models.db.session.delete(alert)
models.db.session.commit()
class AlertListResource(BaseResource):
def post(self):
req = request.get_json(True)
require_fields(req, ('options', 'name', 'query_id'))
query = models.Query.get_by_id_and_org(req['query_id'],
self.current_org)
require_access(query.groups, self.current_user, view_only)
alert = models.Alert(
name=req['name'],
query_rel=query,
user=self.current_user,
rearm=req.get('rearm'),
options=req['options']
)
models.db.session.add(alert)
models.db.session.flush()
models.db.session.commit()
self.record_event({
'action': 'create',
'timestamp': int(time.time()),
'object_id': alert.id,
'object_type': 'alert'
})
return alert.to_dict()
@require_permission('list_alerts')
def get(self):
self.record_event({
'action': 'view',
'timestamp': int(time.time()),
'object_id': 'alerts',
'object_type': 'api_call'
})
return [alert.to_dict() for alert in models.Alert.all(group_ids=self.current_user.group_ids)]
class AlertSubscriptionListResource(BaseResource):
def post(self, alert_id):
req = request.get_json(True)
alert = models.Alert.get_by_id_and_org(alert_id, self.current_org)
require_access(alert.groups, self.current_user, view_only)
kwargs = {'alert': alert, 'user': self.current_user}
if 'destination_id' in req:
destination = models.NotificationDestination.get_by_id_and_org(req['destination_id'], self.current_org)
kwargs['destination'] = destination
subscription = models.AlertSubscription(**kwargs)
models.db.session.add(subscription)
models.db.session.commit()
self.record_event({
'action': 'subscribe',
'timestamp': int(time.time()),
'object_id': alert_id,
'object_type': 'alert',
'destination': req.get('destination_id')
})
d = subscription.to_dict()
return d
def get(self, alert_id):
alert_id = int(alert_id)
alert = models.Alert.get_by_id_and_org(alert_id, self.current_org)
require_access(alert.groups, self.current_user, view_only)
subscriptions = models.AlertSubscription.all(alert_id)
return [s.to_dict() for s in subscriptions]
class AlertSubscriptionResource(BaseResource):
def delete(self, alert_id, subscriber_id):
subscription = models.AlertSubscription.query.get_or_404(subscriber_id)
require_admin_or_owner(subscription.user.id)
models.db.session.delete(subscription)
models.db.session.commit()
self.record_event({
'action': 'unsubscribe',
'timestamp': int(time.time()),
'object_id': alert_id,
'object_type': 'alert'
})
| import time
from flask import request
from funcy import project
from redash import models
from redash.handlers.base import (BaseResource, get_object_or_404,
require_fields)
from redash.permissions import (require_access, require_admin_or_owner,
require_permission, view_only)
class AlertResource(BaseResource):
def get(self, alert_id):
alert = get_object_or_404(models.Alert.get_by_id_and_org, alert_id, self.current_org)
require_access(alert.groups, self.current_user, view_only)
self.record_event({
'action': 'view',
'timestamp': int(time.time()),
'object_id': alert.id,
'object_type': 'alert'
})
return alert.to_dict()
def post(self, alert_id):
req = request.get_json(True)
params = project(req, ('options', 'name', 'query_id', 'rearm'))
alert = get_object_or_404(models.Alert.get_by_id_and_org, alert_id, self.current_org)
require_admin_or_owner(alert.user.id)
self.update_model(alert, params)
models.db.session.commit()
self.record_event({
'action': 'edit',
'timestamp': int(time.time()),
'object_id': alert.id,
'object_type': 'alert'
})
return alert.to_dict()
def delete(self, alert_id):
alert = get_object_or_404(models.Alert.get_by_id_and_org, alert_id, self.current_org)
require_admin_or_owner(alert.user_id)
models.db.session.delete(alert)
models.db.session.commit()
class AlertListResource(BaseResource):
def post(self):
req = request.get_json(True)
require_fields(req, ('options', 'name', 'query_id'))
query = models.Query.get_by_id_and_org(req['query_id'],
self.current_org)
require_access(query.groups, self.current_user, view_only)
alert = models.Alert(
name=req['name'],
query_rel=query,
user=self.current_user,
rearm=req.get('rearm'),
options=req['options']
)
models.db.session.add(alert)
models.db.session.flush()
models.db.session.commit()
self.record_event({
'action': 'create',
'timestamp': int(time.time()),
'object_id': alert.id,
'object_type': 'alert'
})
return alert.to_dict()
@require_permission('list_alerts')
def get(self):
self.record_event({
'action': 'view',
'timestamp': int(time.time()),
'object_id': 'alerts',
'object_type': 'api_call'
})
return [alert.to_dict() for alert in models.Alert.all(group_ids=self.current_user.group_ids)]
class AlertSubscriptionListResource(BaseResource):
def post(self, alert_id):
req = request.get_json(True)
alert = models.Alert.get_by_id_and_org(alert_id, self.current_org)
require_access(alert.groups, self.current_user, view_only)
kwargs = {'alert': alert, 'user': self.current_user}
if 'destination_id' in req:
destination = models.NotificationDestination.get_by_id_and_org(req['destination_id'], self.current_org)
kwargs['destination'] = destination
subscription = models.AlertSubscription(**kwargs)
models.db.session.add(subscription)
models.db.session.commit()
self.record_event({
'action': 'subscribe',
'timestamp': int(time.time()),
'object_id': alert_id,
'object_type': 'alert',
'destination': req.get('destination_id')
})
d = subscription.to_dict()
return d
def get(self, alert_id):
alert_id = int(alert_id)
alert = models.Alert.get_by_id_and_org(alert_id, self.current_org)
require_access(alert.groups, self.current_user, view_only)
subscriptions = models.AlertSubscription.all(alert_id)
return [s.to_dict() for s in subscriptions]
class AlertSubscriptionResource(BaseResource):
def delete(self, alert_id, subscriber_id):
subscription = models.AlertSubscription.query.get_or_404(subscriber_id)
require_admin_or_owner(subscription.user.id)
models.db.session.delete(subscription)
models.db.session.commit()
self.record_event({
'action': 'unsubscribe',
'timestamp': int(time.time()),
'object_id': alert_id,
'object_type': 'alert'
})
| none | 1 | 2.030812 | 2 |
|
flare/kernels/mc_mb_sepcut.py | aaronchen0316/flare | 144 | 6633265 | """
Implementation of three-body kernels using different cutoffs.
The kernels are slightly slower.
"""
import numpy as np
import os
import sys
from numba import njit
from math import exp
import flare.kernels.cutoffs as cf
from flare.env import AtomicEnvironment
from flare.kernels.kernels import (
coordination_number,
q_value,
q_value_mc,
mb_grad_helper_ls_,
mb_grad_helper_ls_,
k_sq_exp_double_dev,
k_sq_exp_dev,
)
from typing import Callable
@njit
def many_body_mc_sepcut_jit(
q_array_1,
q_array_2,
q_neigh_array_1,
q_neigh_array_2,
q_neigh_grads_1,
q_neigh_grads_2,
c1,
c2,
etypes1,
etypes2,
species1,
species2,
d1,
d2,
sig,
ls,
nspec,
spec_mask,
mb_mask,
):
"""many-body multi-element kernel between two force components accelerated
with Numba.
Args:
c1 (int): atomic species of the central atom in env 1
c2 (int): atomic species of the central atom in env 2
etypes1 (np.ndarray): atomic species of atoms in env 1
etypes2 (np.ndarray): atomic species of atoms in env 2
species1 (np.ndarray): all the atomic species present in trajectory 1
species2 (np.ndarray): all the atomic species present in trajectory 2
d1 (int): Force component of the first environment.
d2 (int): Force component of the second environment.
sig (float): many-body signal variance hyperparameter.
ls (float): many-body length scale hyperparameter.
Return:
float: Value of the many-body kernel.
"""
kern = 0
useful_species = np.array(
list(set(species1).intersection(set(species2))), dtype=np.int8
)
bc1 = spec_mask[c1]
bc1n = bc1 * nspec
bc2 = spec_mask[c2]
bc2n = bc2 * nspec
# loop over all possible species
for s in useful_species:
bs = spec_mask[s]
bsn = bs * nspec
mbtype1 = mb_mask[bc1n + bs]
mbtype2 = mb_mask[bc2n + bs]
# Calculate many-body descriptor values for central atoms 1 and 2
s1 = np.where(species1 == s)[0][0]
s2 = np.where(species2 == s)[0][0]
q1 = q_array_1[s1]
q2 = q_array_2[s2]
# compute kernel only if central atoms are of the same species
if c1 == c2:
k12 = k_sq_exp_double_dev(q1, q2, sig[mbtype1], ls[mbtype1])
else:
k12 = 0
# initialise arrays of many body descriptors and gradients for the neighbour atoms in
# the two configurations
# Loop over neighbours i of 1st configuration
for i in range(q_neigh_array_1.shape[0]):
qis = q1i_grads = qi1_grads = ki2s = 0
if etypes1[i] == s:
q1i_grads = q_neigh_grads_1[i, d1 - 1]
if c1 == s:
qi1_grads = q_neigh_grads_1[i, d1 - 1]
# Calculate many-body descriptor value for i
qis = q_neigh_array_1[i, s1]
if c2 == etypes1[i]:
ki2s = k_sq_exp_double_dev(qis, q2, sig[mbtype2], ls[mbtype2])
# Loop over neighbours j of 2
for j in range(q_neigh_array_2.shape[0]):
qjs = qj2_grads = q2j_grads = k1js = 0
if etypes2[j] == s:
q2j_grads = q_neigh_grads_2[j, d2 - 1]
if c2 == s:
qj2_grads = q_neigh_grads_2[j, d2 - 1]
# Calculate many-body descriptor value for j
qjs = q_neigh_array_2[j, s2]
if c1 == etypes2[j]:
k1js = k_sq_exp_double_dev(q1, qjs, sig[mbtype1], ls[mbtype1])
be = spec_mask[etypes1[i]]
mbtype = mb_mask[be + bsn]
if etypes1[i] == etypes2[j]:
kij = k_sq_exp_double_dev(qis, qjs, sig[mbtype], ls[mbtype])
else:
kij = 0
kern += q1i_grads * q2j_grads * k12
kern += qi1_grads * q2j_grads * ki2s
kern += q1i_grads * qj2_grads * k1js
kern += qi1_grads * qj2_grads * kij
return kern
@njit
def many_body_mc_grad_sepcut_jit(
q_array_1,
q_array_2,
q_neigh_array_1,
q_neigh_array_2,
q_neigh_grads_1,
q_neigh_grads_2,
c1,
c2,
etypes1,
etypes2,
species1,
species2,
d1,
d2,
sig,
ls,
nspec,
spec_mask,
nmb,
mb_mask,
):
"""gradient of many-body multi-element kernel between two force components
w.r.t. the hyperparameters, accelerated with Numba.
Args:
c1 (int): atomic species of the central atom in env 1
c2 (int): atomic species of the central atom in env 2
etypes1 (np.ndarray): atomic species of atoms in env 1
etypes2 (np.ndarray): atomic species of atoms in env 2
species1 (np.ndarray): all the atomic species present in trajectory 1
species2 (np.ndarray): all the atomic species present in trajectory 2
d1 (int): Force component of the first environment.
d2 (int): Force component of the second environment.
sig (float): many-body signal variance hyperparameter.
ls (float): many-body length scale hyperparameter.
Return:
array: Value of the many-body kernel and its gradient w.r.t. sig and ls
"""
kern = 0
sig_derv = np.zeros(nmb, dtype=np.float64)
ls_derv = np.zeros(nmb, dtype=np.float64)
useful_species = np.array(
list(set(species1).intersection(set(species2))), dtype=np.int8
)
bc1 = spec_mask[c1]
bc1n = bc1 * nspec
bc2 = spec_mask[c2]
bc2n = bc2 * nspec
for s in useful_species:
bs = spec_mask[s]
bsn = bs * nspec
mbtype1 = mb_mask[bc1n + bs]
mbtype2 = mb_mask[bc2n + bs]
# Calculate many-body descriptor values for central atoms 1 and 2
s1 = np.where(species1 == s)[0][0]
s2 = np.where(species2 == s)[0][0]
q1 = q_array_1[s1]
q2 = q_array_2[s2]
# compute kernel only if central atoms are of the same species
if c1 == c2:
k12 = k_sq_exp_double_dev(q1, q2, sig[mbtype1], ls[mbtype1])
q12diffsq = (q1 - q2) ** 2 # * (q1 - q2)
dk12 = mb_grad_helper_ls_(q12diffsq, sig[mbtype1], ls[mbtype1])
else:
k12 = 0
dk12 = 0
# Compute ki2s, qi1_grads, and qis
for i in range(q_neigh_array_1.shape[0]):
qis = q1i_grads = qi1_grads = ki2s = dki2s = 0
if etypes1[i] == s:
q1i_grads = q_neigh_grads_1[i, d1 - 1]
if c1 == s:
qi1_grads = q_neigh_grads_1[i, d1 - 1]
# Calculate many-body descriptor value for i
qis = q_neigh_array_1[i, s1]
if c2 == etypes1[i]:
ki2s = k_sq_exp_double_dev(qis, q2, sig[mbtype2], ls[mbtype2])
qi2diffsq = (qis - q2) * (qis - q2)
dki2s = mb_grad_helper_ls_(qi2diffsq, sig[mbtype2], ls[mbtype2])
# Compute k1js, qj2_grads and qjs
for j in range(q_neigh_array_2.shape[0]):
qjs = qj2_grads = q2j_grads = k1js = dk1js = 0
if etypes2[j] == s:
q2j_grads = q_neigh_grads_2[j, d2 - 1]
if c2 == s:
qj2_grads = q_neigh_grads_2[j, d2 - 1]
# Calculate many-body descriptor value for j
qjs = q_neigh_array_2[j, s2]
if c1 == etypes2[j]:
k1js = k_sq_exp_double_dev(q1, qjs, sig[mbtype1], ls[mbtype1])
q1jdiffsq = (q1 - qjs) * (q1 - qjs)
dk1js = mb_grad_helper_ls_(q1jdiffsq, sig[mbtype1], ls[mbtype1])
be = spec_mask[etypes2[j]]
mbtype = mb_mask[bsn + be]
if etypes1[i] == etypes2[j]:
kij = k_sq_exp_double_dev(qis, qjs, sig[mbtype], ls[mbtype])
qijdiffsq = (qis - qjs) * (qis - qjs)
dkij = mb_grad_helper_ls_(qijdiffsq, sig[mbtype], ls[mbtype])
else:
kij = 0
dkij = 0
# c1 s and c2 s and if c1==c2 --> c1 s
if k12 != 0:
kern_term_c1s = q1i_grads * q2j_grads * k12
if sig[mbtype1] != 0:
sig_derv[mbtype1] += kern_term_c1s * 2.0 / sig[mbtype1]
kern += kern_term_c1s
ls_derv[mbtype1] += q1i_grads * q2j_grads * dk12
# s e1 and c2 s and c2==e1 --> c2 s
if ki2s != 0:
kern_term_c2s = qi1_grads * q2j_grads * ki2s
if sig[mbtype2] != 0:
sig_derv[mbtype2] += kern_term_c2s * 2.0 / sig[mbtype2]
kern += kern_term_c2s
ls_derv[mbtype2] += qi1_grads * q2j_grads * dki2s
# c1 s and s e2 and c1==e2 --> c1 s
if k1js != 0:
kern_term_c1s = q1i_grads * qj2_grads * k1js
if sig[mbtype1] != 0:
sig_derv[mbtype1] += kern_term_c1s * 2.0 / sig[mbtype1]
kern += kern_term_c1s
ls_derv[mbtype1] += q1i_grads * qj2_grads * dk1js
# s e1 and s e2 and e1 == e2 -> s e
if kij != 0:
kern_term_se = qi1_grads * qj2_grads * kij
if sig[mbtype] != 0:
sig_derv[mbtype] += kern_term_se * 2.0 / sig[mbtype]
kern += kern_term_se
ls_derv[mbtype] += qi1_grads * qj2_grads * dkij
grad = np.zeros(nmb * 2, dtype=np.float64)
grad[:nmb] = sig_derv
grad[nmb:] = ls_derv
return kern, grad
@njit
def many_body_mc_force_en_sepcut_jit(
q_array_1,
q_array_2,
q_neigh_array_1,
q_neigh_grads_1,
c1,
c2,
etypes1,
species1,
species2,
d1,
sig,
ls,
nspec,
spec_mask,
mb_mask,
):
"""many-body many-element kernel between force and energy components accelerated
with Numba.
Args:
To be complete
c1 (int): atomic species of the central atom in env 1
c2 (int): atomic species of the central atom in env 2
etypes1 (np.ndarray): atomic species of atoms in env 1
species1 (np.ndarray): all the atomic species present in trajectory 1
species2 (np.ndarray): all the atomic species present in trajectory 2
d1 (int): Force component of the first environment.
sig (float): many-body signal variance hyperparameter.
ls (float): many-body length scale hyperparameter.
Return:
float: Value of the many-body kernel.
"""
kern = 0
useful_species = np.array(
list(set(species1).intersection(set(species2))), dtype=np.int8
)
bc1 = spec_mask[c1]
bc1n = bc1 * nspec
bc2 = spec_mask[c2]
bc2n = bc2 * nspec
for s in useful_species:
bs = spec_mask[s]
bsn = bs * nspec
mbtype1 = mb_mask[bc1n + bs]
mbtype2 = mb_mask[bc2n + bs]
s1 = np.where(species1 == s)[0][0]
s2 = np.where(species2 == s)[0][0]
q1 = q_array_1[s1]
q2 = q_array_2[s2]
if c1 == c2:
k12 = k_sq_exp_dev(q1, q2, sig[mbtype1], ls[mbtype1])
else:
k12 = 0
# Loop over neighbours i of 1
for i in range(q_neigh_array_1.shape[0]):
qi1_grads = q1i_grads = 0
ki2s = 0
if etypes1[i] == s:
q1i_grads = q_neigh_grads_1[i, d1 - 1]
if (c1 == s) and (c2 == etypes1[i]):
qi1_grads = q_neigh_grads_1[i, d1 - 1]
qis = q_neigh_array_1[i, s1]
ki2s = k_sq_exp_dev(qis, q2, sig[mbtype2], ls[mbtype2])
kern -= q1i_grads * k12 + qi1_grads * ki2s
return kern
@njit
def many_body_mc_en_sepcut_jit(
q_array_1, q_array_2, c1, c2, species1, species2, sig, ls, nspec, spec_mask, mb_mask
):
"""many-body many-element kernel between energy components accelerated
with Numba.
Args:
bond_array_1 (np.ndarray): many-body bond array of the first local
environment.
bond_array_2 (np.ndarray): many-body bond array of the second local
environment.
c1 (int): atomic species of the central atom in env 1
c2 (int): atomic species of the central atom in env 2
etypes1 (np.ndarray): atomic species of atoms in env 1
etypes2 (np.ndarray): atomic species of atoms in env 2
species1 (np.ndarray): all the atomic species present in trajectory 1
species2 (np.ndarray): all the atomic species present in trajectory 2
sig (float): many-body signal variance hyperparameter.
ls (float): many-body length scale hyperparameter.
r_cut (float): many-body cutoff radius.
cutoff_func (Callable): Cutoff function.
Return:
float: Value of the many-body kernel.
"""
useful_species = np.array(
list(set(species1).intersection(set(species2))), dtype=np.int8
)
kern = 0
if c1 == c2:
ls2 = ls * ls
sig2 = sig * sig
bc1 = spec_mask[c1]
bc1n = bc1 * nspec
for s in useful_species:
bs = spec_mask[s]
mbtype = mb_mask[bc1n + bs]
tls2 = ls2[mbtype]
tsig2 = sig2[mbtype]
q1 = q_array_1[np.where(species1 == s)[0][0]]
q2 = q_array_2[np.where(species2 == s)[0][0]]
q1q2diff = q1 - q2
kern += tsig2 * exp(-q1q2diff * q1q2diff / (2 * tls2))
return kern
| """
Implementation of three-body kernels using different cutoffs.
The kernels are slightly slower.
"""
import numpy as np
import os
import sys
from numba import njit
from math import exp
import flare.kernels.cutoffs as cf
from flare.env import AtomicEnvironment
from flare.kernels.kernels import (
coordination_number,
q_value,
q_value_mc,
mb_grad_helper_ls_,
mb_grad_helper_ls_,
k_sq_exp_double_dev,
k_sq_exp_dev,
)
from typing import Callable
@njit
def many_body_mc_sepcut_jit(
q_array_1,
q_array_2,
q_neigh_array_1,
q_neigh_array_2,
q_neigh_grads_1,
q_neigh_grads_2,
c1,
c2,
etypes1,
etypes2,
species1,
species2,
d1,
d2,
sig,
ls,
nspec,
spec_mask,
mb_mask,
):
"""many-body multi-element kernel between two force components accelerated
with Numba.
Args:
c1 (int): atomic species of the central atom in env 1
c2 (int): atomic species of the central atom in env 2
etypes1 (np.ndarray): atomic species of atoms in env 1
etypes2 (np.ndarray): atomic species of atoms in env 2
species1 (np.ndarray): all the atomic species present in trajectory 1
species2 (np.ndarray): all the atomic species present in trajectory 2
d1 (int): Force component of the first environment.
d2 (int): Force component of the second environment.
sig (float): many-body signal variance hyperparameter.
ls (float): many-body length scale hyperparameter.
Return:
float: Value of the many-body kernel.
"""
kern = 0
useful_species = np.array(
list(set(species1).intersection(set(species2))), dtype=np.int8
)
bc1 = spec_mask[c1]
bc1n = bc1 * nspec
bc2 = spec_mask[c2]
bc2n = bc2 * nspec
# loop over all possible species
for s in useful_species:
bs = spec_mask[s]
bsn = bs * nspec
mbtype1 = mb_mask[bc1n + bs]
mbtype2 = mb_mask[bc2n + bs]
# Calculate many-body descriptor values for central atoms 1 and 2
s1 = np.where(species1 == s)[0][0]
s2 = np.where(species2 == s)[0][0]
q1 = q_array_1[s1]
q2 = q_array_2[s2]
# compute kernel only if central atoms are of the same species
if c1 == c2:
k12 = k_sq_exp_double_dev(q1, q2, sig[mbtype1], ls[mbtype1])
else:
k12 = 0
# initialise arrays of many body descriptors and gradients for the neighbour atoms in
# the two configurations
# Loop over neighbours i of 1st configuration
for i in range(q_neigh_array_1.shape[0]):
qis = q1i_grads = qi1_grads = ki2s = 0
if etypes1[i] == s:
q1i_grads = q_neigh_grads_1[i, d1 - 1]
if c1 == s:
qi1_grads = q_neigh_grads_1[i, d1 - 1]
# Calculate many-body descriptor value for i
qis = q_neigh_array_1[i, s1]
if c2 == etypes1[i]:
ki2s = k_sq_exp_double_dev(qis, q2, sig[mbtype2], ls[mbtype2])
# Loop over neighbours j of 2
for j in range(q_neigh_array_2.shape[0]):
qjs = qj2_grads = q2j_grads = k1js = 0
if etypes2[j] == s:
q2j_grads = q_neigh_grads_2[j, d2 - 1]
if c2 == s:
qj2_grads = q_neigh_grads_2[j, d2 - 1]
# Calculate many-body descriptor value for j
qjs = q_neigh_array_2[j, s2]
if c1 == etypes2[j]:
k1js = k_sq_exp_double_dev(q1, qjs, sig[mbtype1], ls[mbtype1])
be = spec_mask[etypes1[i]]
mbtype = mb_mask[be + bsn]
if etypes1[i] == etypes2[j]:
kij = k_sq_exp_double_dev(qis, qjs, sig[mbtype], ls[mbtype])
else:
kij = 0
kern += q1i_grads * q2j_grads * k12
kern += qi1_grads * q2j_grads * ki2s
kern += q1i_grads * qj2_grads * k1js
kern += qi1_grads * qj2_grads * kij
return kern
@njit
def many_body_mc_grad_sepcut_jit(
q_array_1,
q_array_2,
q_neigh_array_1,
q_neigh_array_2,
q_neigh_grads_1,
q_neigh_grads_2,
c1,
c2,
etypes1,
etypes2,
species1,
species2,
d1,
d2,
sig,
ls,
nspec,
spec_mask,
nmb,
mb_mask,
):
"""gradient of many-body multi-element kernel between two force components
w.r.t. the hyperparameters, accelerated with Numba.
Args:
c1 (int): atomic species of the central atom in env 1
c2 (int): atomic species of the central atom in env 2
etypes1 (np.ndarray): atomic species of atoms in env 1
etypes2 (np.ndarray): atomic species of atoms in env 2
species1 (np.ndarray): all the atomic species present in trajectory 1
species2 (np.ndarray): all the atomic species present in trajectory 2
d1 (int): Force component of the first environment.
d2 (int): Force component of the second environment.
sig (float): many-body signal variance hyperparameter.
ls (float): many-body length scale hyperparameter.
Return:
array: Value of the many-body kernel and its gradient w.r.t. sig and ls
"""
kern = 0
sig_derv = np.zeros(nmb, dtype=np.float64)
ls_derv = np.zeros(nmb, dtype=np.float64)
useful_species = np.array(
list(set(species1).intersection(set(species2))), dtype=np.int8
)
bc1 = spec_mask[c1]
bc1n = bc1 * nspec
bc2 = spec_mask[c2]
bc2n = bc2 * nspec
for s in useful_species:
bs = spec_mask[s]
bsn = bs * nspec
mbtype1 = mb_mask[bc1n + bs]
mbtype2 = mb_mask[bc2n + bs]
# Calculate many-body descriptor values for central atoms 1 and 2
s1 = np.where(species1 == s)[0][0]
s2 = np.where(species2 == s)[0][0]
q1 = q_array_1[s1]
q2 = q_array_2[s2]
# compute kernel only if central atoms are of the same species
if c1 == c2:
k12 = k_sq_exp_double_dev(q1, q2, sig[mbtype1], ls[mbtype1])
q12diffsq = (q1 - q2) ** 2 # * (q1 - q2)
dk12 = mb_grad_helper_ls_(q12diffsq, sig[mbtype1], ls[mbtype1])
else:
k12 = 0
dk12 = 0
# Compute ki2s, qi1_grads, and qis
for i in range(q_neigh_array_1.shape[0]):
qis = q1i_grads = qi1_grads = ki2s = dki2s = 0
if etypes1[i] == s:
q1i_grads = q_neigh_grads_1[i, d1 - 1]
if c1 == s:
qi1_grads = q_neigh_grads_1[i, d1 - 1]
# Calculate many-body descriptor value for i
qis = q_neigh_array_1[i, s1]
if c2 == etypes1[i]:
ki2s = k_sq_exp_double_dev(qis, q2, sig[mbtype2], ls[mbtype2])
qi2diffsq = (qis - q2) * (qis - q2)
dki2s = mb_grad_helper_ls_(qi2diffsq, sig[mbtype2], ls[mbtype2])
# Compute k1js, qj2_grads and qjs
for j in range(q_neigh_array_2.shape[0]):
qjs = qj2_grads = q2j_grads = k1js = dk1js = 0
if etypes2[j] == s:
q2j_grads = q_neigh_grads_2[j, d2 - 1]
if c2 == s:
qj2_grads = q_neigh_grads_2[j, d2 - 1]
# Calculate many-body descriptor value for j
qjs = q_neigh_array_2[j, s2]
if c1 == etypes2[j]:
k1js = k_sq_exp_double_dev(q1, qjs, sig[mbtype1], ls[mbtype1])
q1jdiffsq = (q1 - qjs) * (q1 - qjs)
dk1js = mb_grad_helper_ls_(q1jdiffsq, sig[mbtype1], ls[mbtype1])
be = spec_mask[etypes2[j]]
mbtype = mb_mask[bsn + be]
if etypes1[i] == etypes2[j]:
kij = k_sq_exp_double_dev(qis, qjs, sig[mbtype], ls[mbtype])
qijdiffsq = (qis - qjs) * (qis - qjs)
dkij = mb_grad_helper_ls_(qijdiffsq, sig[mbtype], ls[mbtype])
else:
kij = 0
dkij = 0
# c1 s and c2 s and if c1==c2 --> c1 s
if k12 != 0:
kern_term_c1s = q1i_grads * q2j_grads * k12
if sig[mbtype1] != 0:
sig_derv[mbtype1] += kern_term_c1s * 2.0 / sig[mbtype1]
kern += kern_term_c1s
ls_derv[mbtype1] += q1i_grads * q2j_grads * dk12
# s e1 and c2 s and c2==e1 --> c2 s
if ki2s != 0:
kern_term_c2s = qi1_grads * q2j_grads * ki2s
if sig[mbtype2] != 0:
sig_derv[mbtype2] += kern_term_c2s * 2.0 / sig[mbtype2]
kern += kern_term_c2s
ls_derv[mbtype2] += qi1_grads * q2j_grads * dki2s
# c1 s and s e2 and c1==e2 --> c1 s
if k1js != 0:
kern_term_c1s = q1i_grads * qj2_grads * k1js
if sig[mbtype1] != 0:
sig_derv[mbtype1] += kern_term_c1s * 2.0 / sig[mbtype1]
kern += kern_term_c1s
ls_derv[mbtype1] += q1i_grads * qj2_grads * dk1js
# s e1 and s e2 and e1 == e2 -> s e
if kij != 0:
kern_term_se = qi1_grads * qj2_grads * kij
if sig[mbtype] != 0:
sig_derv[mbtype] += kern_term_se * 2.0 / sig[mbtype]
kern += kern_term_se
ls_derv[mbtype] += qi1_grads * qj2_grads * dkij
grad = np.zeros(nmb * 2, dtype=np.float64)
grad[:nmb] = sig_derv
grad[nmb:] = ls_derv
return kern, grad
@njit
def many_body_mc_force_en_sepcut_jit(
q_array_1,
q_array_2,
q_neigh_array_1,
q_neigh_grads_1,
c1,
c2,
etypes1,
species1,
species2,
d1,
sig,
ls,
nspec,
spec_mask,
mb_mask,
):
"""many-body many-element kernel between force and energy components accelerated
with Numba.
Args:
To be complete
c1 (int): atomic species of the central atom in env 1
c2 (int): atomic species of the central atom in env 2
etypes1 (np.ndarray): atomic species of atoms in env 1
species1 (np.ndarray): all the atomic species present in trajectory 1
species2 (np.ndarray): all the atomic species present in trajectory 2
d1 (int): Force component of the first environment.
sig (float): many-body signal variance hyperparameter.
ls (float): many-body length scale hyperparameter.
Return:
float: Value of the many-body kernel.
"""
kern = 0
useful_species = np.array(
list(set(species1).intersection(set(species2))), dtype=np.int8
)
bc1 = spec_mask[c1]
bc1n = bc1 * nspec
bc2 = spec_mask[c2]
bc2n = bc2 * nspec
for s in useful_species:
bs = spec_mask[s]
bsn = bs * nspec
mbtype1 = mb_mask[bc1n + bs]
mbtype2 = mb_mask[bc2n + bs]
s1 = np.where(species1 == s)[0][0]
s2 = np.where(species2 == s)[0][0]
q1 = q_array_1[s1]
q2 = q_array_2[s2]
if c1 == c2:
k12 = k_sq_exp_dev(q1, q2, sig[mbtype1], ls[mbtype1])
else:
k12 = 0
# Loop over neighbours i of 1
for i in range(q_neigh_array_1.shape[0]):
qi1_grads = q1i_grads = 0
ki2s = 0
if etypes1[i] == s:
q1i_grads = q_neigh_grads_1[i, d1 - 1]
if (c1 == s) and (c2 == etypes1[i]):
qi1_grads = q_neigh_grads_1[i, d1 - 1]
qis = q_neigh_array_1[i, s1]
ki2s = k_sq_exp_dev(qis, q2, sig[mbtype2], ls[mbtype2])
kern -= q1i_grads * k12 + qi1_grads * ki2s
return kern
@njit
def many_body_mc_en_sepcut_jit(
q_array_1, q_array_2, c1, c2, species1, species2, sig, ls, nspec, spec_mask, mb_mask
):
"""many-body many-element kernel between energy components accelerated
with Numba.
Args:
bond_array_1 (np.ndarray): many-body bond array of the first local
environment.
bond_array_2 (np.ndarray): many-body bond array of the second local
environment.
c1 (int): atomic species of the central atom in env 1
c2 (int): atomic species of the central atom in env 2
etypes1 (np.ndarray): atomic species of atoms in env 1
etypes2 (np.ndarray): atomic species of atoms in env 2
species1 (np.ndarray): all the atomic species present in trajectory 1
species2 (np.ndarray): all the atomic species present in trajectory 2
sig (float): many-body signal variance hyperparameter.
ls (float): many-body length scale hyperparameter.
r_cut (float): many-body cutoff radius.
cutoff_func (Callable): Cutoff function.
Return:
float: Value of the many-body kernel.
"""
useful_species = np.array(
list(set(species1).intersection(set(species2))), dtype=np.int8
)
kern = 0
if c1 == c2:
ls2 = ls * ls
sig2 = sig * sig
bc1 = spec_mask[c1]
bc1n = bc1 * nspec
for s in useful_species:
bs = spec_mask[s]
mbtype = mb_mask[bc1n + bs]
tls2 = ls2[mbtype]
tsig2 = sig2[mbtype]
q1 = q_array_1[np.where(species1 == s)[0][0]]
q2 = q_array_2[np.where(species2 == s)[0][0]]
q1q2diff = q1 - q2
kern += tsig2 * exp(-q1q2diff * q1q2diff / (2 * tls2))
return kern
| en | 0.649289 | Implementation of three-body kernels using different cutoffs. The kernels are slightly slower. many-body multi-element kernel between two force components accelerated with Numba. Args: c1 (int): atomic species of the central atom in env 1 c2 (int): atomic species of the central atom in env 2 etypes1 (np.ndarray): atomic species of atoms in env 1 etypes2 (np.ndarray): atomic species of atoms in env 2 species1 (np.ndarray): all the atomic species present in trajectory 1 species2 (np.ndarray): all the atomic species present in trajectory 2 d1 (int): Force component of the first environment. d2 (int): Force component of the second environment. sig (float): many-body signal variance hyperparameter. ls (float): many-body length scale hyperparameter. Return: float: Value of the many-body kernel. # loop over all possible species # Calculate many-body descriptor values for central atoms 1 and 2 # compute kernel only if central atoms are of the same species # initialise arrays of many body descriptors and gradients for the neighbour atoms in # the two configurations # Loop over neighbours i of 1st configuration # Calculate many-body descriptor value for i # Loop over neighbours j of 2 # Calculate many-body descriptor value for j gradient of many-body multi-element kernel between two force components w.r.t. the hyperparameters, accelerated with Numba. Args: c1 (int): atomic species of the central atom in env 1 c2 (int): atomic species of the central atom in env 2 etypes1 (np.ndarray): atomic species of atoms in env 1 etypes2 (np.ndarray): atomic species of atoms in env 2 species1 (np.ndarray): all the atomic species present in trajectory 1 species2 (np.ndarray): all the atomic species present in trajectory 2 d1 (int): Force component of the first environment. d2 (int): Force component of the second environment. sig (float): many-body signal variance hyperparameter. ls (float): many-body length scale hyperparameter. Return: array: Value of the many-body kernel and its gradient w.r.t. sig and ls # Calculate many-body descriptor values for central atoms 1 and 2 # compute kernel only if central atoms are of the same species # * (q1 - q2) # Compute ki2s, qi1_grads, and qis # Calculate many-body descriptor value for i # Compute k1js, qj2_grads and qjs # Calculate many-body descriptor value for j # c1 s and c2 s and if c1==c2 --> c1 s # s e1 and c2 s and c2==e1 --> c2 s # c1 s and s e2 and c1==e2 --> c1 s # s e1 and s e2 and e1 == e2 -> s e many-body many-element kernel between force and energy components accelerated with Numba. Args: To be complete c1 (int): atomic species of the central atom in env 1 c2 (int): atomic species of the central atom in env 2 etypes1 (np.ndarray): atomic species of atoms in env 1 species1 (np.ndarray): all the atomic species present in trajectory 1 species2 (np.ndarray): all the atomic species present in trajectory 2 d1 (int): Force component of the first environment. sig (float): many-body signal variance hyperparameter. ls (float): many-body length scale hyperparameter. Return: float: Value of the many-body kernel. # Loop over neighbours i of 1 many-body many-element kernel between energy components accelerated with Numba. Args: bond_array_1 (np.ndarray): many-body bond array of the first local environment. bond_array_2 (np.ndarray): many-body bond array of the second local environment. c1 (int): atomic species of the central atom in env 1 c2 (int): atomic species of the central atom in env 2 etypes1 (np.ndarray): atomic species of atoms in env 1 etypes2 (np.ndarray): atomic species of atoms in env 2 species1 (np.ndarray): all the atomic species present in trajectory 1 species2 (np.ndarray): all the atomic species present in trajectory 2 sig (float): many-body signal variance hyperparameter. ls (float): many-body length scale hyperparameter. r_cut (float): many-body cutoff radius. cutoff_func (Callable): Cutoff function. Return: float: Value of the many-body kernel. | 2.341058 | 2 |
bai07.py | trietto/python | 0 | 6633266 | <reponame>trietto/python
print (abs.__doc__)
print (int.__doc__)
print (input.__doc__)
# Code by <EMAIL>
def square(num):
'''Trả lại giá trị bình phương của số được nhập vào.
Số nhập vào phải là số nguyên.
'''
return num ** 2
print (square.__doc__) | print (abs.__doc__)
print (int.__doc__)
print (input.__doc__)
# Code by <EMAIL>
def square(num):
'''Trả lại giá trị bình phương của số được nhập vào.
Số nhập vào phải là số nguyên.
'''
return num ** 2
print (square.__doc__) | vi | 1.000069 | # Code by <EMAIL> Trả lại giá trị bình phương của số được nhập vào. Số nhập vào phải là số nguyên. | 2.945173 | 3 |
Python/pythonProject/exercise/ex051.py | JoaoMoreira2002/Linguagens-de-programacao | 0 | 6633267 | <filename>Python/pythonProject/exercise/ex051.py
print('Digite o primeiro termo, e a razão de uma PA')
x = int(input('Primeiro termo'))
y = int(input('Razão'))
for n in range(0, 11):
print('{}'.format(x + ((n - 0) * y)))
print('Esses são os 10 primeiros termos') | <filename>Python/pythonProject/exercise/ex051.py
print('Digite o primeiro termo, e a razão de uma PA')
x = int(input('Primeiro termo'))
y = int(input('Razão'))
for n in range(0, 11):
print('{}'.format(x + ((n - 0) * y)))
print('Esses são os 10 primeiros termos') | none | 1 | 3.981554 | 4 |
|
animations/fur.py | TheSkorm/dc26-fur-scripts | 1 | 6633268 | <filename>animations/fur.py
"""Furry Face Simulator
Use: http://liquidthex.com/dcfursBadgePixelator/
Features:
* Will randomly blink every 10 - 20 seconds if no large-scale movements happen (winks)
* Will track eyes with small nudges in up / down, left / right tilts
* Will wink left/right if tilted too far
* Will close eyes if held upside down
* Will close eyes when shaken
"""
import random
import badge
import dcfurs
class fur:
interval = 45
ticks_per_sec = int(1000 / interval)
dimmer = 16
winkRightFace = [
[1, 13],
[1, 14],
[1, 15],
[2, 12],
[2, 13, dimmer],
[2, 14, dimmer],
[2, 15],
[2, 16],
[3, 2],
[3, 3],
[3, 4],
[3, 11],
[3, 14, dimmer],
[3, 15],
[3, 16],
[4, 1],
[4, 5],
[4, 11],
[4, 12],
[4, 14, dimmer],
[4, 15],
[4, 16],
[5, 12],
[5, 13],
[5, 14],
[5, 15],
]
winkLeftFace = [
[1, 2],
[1, 3],
[1, 4],
[2, 1],
[2, 2],
[2, 3, dimmer],
[2, 4, dimmer],
[2, 5],
[3, 1],
[3, 2],
[3, 3, dimmer],
[3, 5],
[3, 6],
[3, 13],
[3, 14],
[3, 15],
[4, 1],
[4, 2],
[4, 3, dimmer],
[4, 6],
[4, 12],
[4, 16],
[5, 2],
[5, 3],
[5, 4],
[5, 5],
]
winkUpperRightFace = [
[1, 13],
[1, 15],
[2, 12],
[2, 13, dimmer],
[2, 16],
[3, 2],
[3, 3],
[3, 4],
[3, 11],
[3, 12],
[3, 13, dimmer],
[3, 14, dimmer],
[3, 15, dimmer],
[3, 16],
[4, 1],
[4, 5],
[4, 11],
[4, 12],
[4, 13],
[4, 14],
[4, 15],
[4, 16],
[5, 12],
[5, 13],
[5, 14],
[5, 15],
]
winkUpperLeftFace = [
[1, 2],
[1, 3],
[1, 4],
[2, 1],
[2, 4, dimmer],
[2, 5],
[3, 1],
[3, 3],
[3, 4, dimmer],
[3, 5],
[3, 6],
[3, 13],
[3, 14],
[3, 15],
[4, 1],
[4, 2],
[4, 3],
[4, 4],
[4, 5],
[4, 6],
[4, 12],
[4, 16],
[5, 2],
[5, 3],
[5, 4],
[5, 5],
]
winkLowerLeftFace = [
[1, 2],
[1, 3],
[1, 4],
[2, 1],
[2, 2],
[2, 3],
[2, 4],
[2, 5],
[3, 1],
[3, 2, dimmer],
[3, 3, dimmer],
[3, 4, dimmer],
[3, 5],
[3, 6],
[3, 13],
[3, 14],
[3, 15],
[4, 1],
[4, 4, dimmer],
[4, 5],
[4, 6],
[4, 12],
[4, 16],
[5, 3],
[5, 4],
[5, 5],
]
winkLowerRightFace = [
[1, 13],
[1, 14],
[1, 15],
[2, 12],
[2, 13],
[2, 14],
[2, 15],
[2, 16],
[3, 2],
[3, 3],
[3, 4],
[3, 11],
[3, 12],
[3, 13, dimmer],
[3, 14, dimmer],
[3, 15, dimmer],
[3, 16],
[4, 1],
[4, 5],
[4, 11],
[4, 12],
[4, 13, dimmer],
[4, 16],
[5, 12],
[5, 13],
[5, 15],
]
standardFace = [
[1, 2],
[1, 3],
[1, 4],
[1, 13],
[1, 14],
[1, 15],
[2, 1],
[2, 2, dimmer],
[2, 3, dimmer],
[2, 4, dimmer],
[2, 5],
[2, 12],
[2, 13, dimmer],
[2, 14, dimmer],
[2, 15, dimmer],
[2, 16],
[3, 1],
[3, 2, dimmer],
[3, 5],
[3, 6],
[3, 11],
[3, 12],
[3, 15, dimmer],
[3, 16],
[4, 1],
[4, 2, dimmer],
[4, 4],
[4, 5],
[4, 6],
[4, 11],
[4, 12],
[4, 13],
[4, 15, dimmer],
[4, 16],
[5, 2],
[5, 3],
[5, 4],
[5, 5],
[5, 12],
[5, 13],
[5, 14],
[5, 15],
]
upperRightFace = [
[1, 2],
[1, 3],
[1, 4],
[1, 13],
[1, 14],
[1, 15],
[2, 1],
[2, 2],
[2, 3, dimmer],
[2, 5],
[2, 12],
[2, 13, dimmer],
[2, 15],
[2, 16],
[3, 1],
[3, 2],
[3, 3, dimmer],
[3, 6],
[3, 11],
[3, 12],
[3, 13, dimmer],
[3, 16],
[4, 1],
[4, 2],
[4, 3, dimmer],
[4, 4, dimmer],
[4, 5, dimmer],
[4, 6],
[4, 11],
[4, 12],
[4, 13, dimmer],
[4, 14, dimmer],
[4, 15, dimmer],
[4, 16],
[5, 2],
[5, 3],
[5, 4],
[5, 5],
[5, 12],
[5, 13],
[5, 14],
[5, 15],
]
upperLeftFace = [
[1, 2],
[1, 3],
[1, 4],
[1, 13],
[1, 14],
[1, 15],
[2, 1],
[2, 2],
[2, 4, dimmer],
[2, 5],
[2, 12],
[2, 13],
[2, 15, dimmer],
[2, 16],
[3, 1],
[3, 4, dimmer],
[3, 5],
[3, 6],
[3, 11],
[3, 12],
[3, 15, dimmer],
[3, 16],
[4, 1],
[4, 2, dimmer],
[4, 3, dimmer],
[4, 4, dimmer],
[4, 5],
[4, 6],
[4, 11],
[4, 12],
[4, 13, dimmer],
[4, 14, dimmer],
[4, 15, dimmer],
[4, 16],
[5, 2],
[5, 3],
[5, 4],
[5, 5],
[5, 12],
[5, 13],
[5, 14],
[5, 15],
]
rightFace = [
[1, 2],
[1, 3],
[1, 4],
[1, 13],
[1, 14],
[1, 15],
[2, 1],
[2, 2],
[2, 3, dimmer],
[2, 4, dimmer],
[2, 5],
[2, 12],
[2, 13, dimmer],
[2, 14, dimmer],
[2, 15],
[2, 16],
[3, 1],
[3, 2],
[3, 3, dimmer],
[3, 5],
[3, 6],
[3, 11],
[3, 12],
[3, 13, dimmer],
[3, 15],
[3, 16],
[4, 1],
[4, 2],
[4, 3, dimmer],
[4, 6],
[4, 11],
[4, 12],
[4, 13, dimmer],
[4, 16],
[5, 2],
[5, 3],
[5, 4],
[5, 5],
[5, 12],
[5, 13],
[5, 14],
[5, 15],
]
leftFace = [
[1, 2],
[1, 3],
[1, 4],
[1, 13],
[1, 14],
[1, 15],
[2, 1],
[2, 2],
[2, 3, dimmer],
[2, 4, dimmer],
[2, 5],
[2, 12],
[2, 13, dimmer],
[2, 14, dimmer],
[2, 15],
[2, 16],
[3, 1],
[3, 4, dimmer],
[3, 5],
[3, 6],
[3, 11],
[3, 14, dimmer],
[3, 15],
[3, 16],
[4, 1],
[4, 2],
[4, 4, dimmer],
[4, 5],
[4, 6],
[4, 11],
[4, 12],
[4, 14, dimmer],
[4, 15],
[4, 16],
[5, 2],
[5, 3],
[5, 4],
[5, 5],
[5, 12],
[5, 13],
[5, 14],
[5, 15],
]
lowerRightFace = [
[1, 2],
[1, 3],
[1, 4],
[1, 13],
[1, 14],
[1, 15],
[2, 1],
[2, 2],
[2, 3],
[2, 4],
[2, 5],
[2, 12],
[2, 13],
[2, 14],
[2, 15],
[2, 16],
[3, 1],
[3, 2],
[3, 3, dimmer],
[3, 4, dimmer],
[3, 5, dimmer],
[3, 6],
[3, 11],
[3, 12],
[3, 13, dimmer],
[3, 14, dimmer],
[3, 15, dimmer],
[3, 16],
[4, 1],
[4, 2],
[4, 3, dimmer],
[4, 6],
[4, 11],
[4, 12],
[4, 13, dimmer],
[4, 16],
[5, 2],
[5, 3],
[5, 5],
[5, 12],
[5, 13],
[5, 15],
]
lowerLeftFace = [
[1, 2],
[1, 3],
[1, 4],
[1, 13],
[1, 14],
[1, 15],
[2, 1],
[2, 2],
[2, 3],
[2, 4],
[2, 5],
[2, 12],
[2, 13],
[2, 14],
[2, 15],
[2, 16],
[3, 1],
[3, 2, dimmer],
[3, 3, dimmer],
[3, 4, dimmer],
[3, 5],
[3, 6],
[3, 11],
[3, 12, dimmer],
[3, 13, dimmer],
[3, 14, dimmer],
[3, 15],
[3, 16],
[4, 1],
[4, 4, dimmer],
[4, 5],
[4, 6],
[4, 11],
[4, 14, dimmer],
[4, 15],
[4, 16],
[5, 2],
[5, 4, dimmer],
[5, 5],
[5, 12],
[5, 14, dimmer],
[5, 15],
]
upperFace = [
[1, 2],
[1, 3],
[1, 14],
[1, 15],
[2, 1],
[2, 2, dimmer],
[2, 5],
[2, 12],
[2, 15, dimmer],
[2, 16],
[3, 1],
[3, 2, dimmer],
[3, 3, dimmer],
[3, 4, dimmer],
[3, 5],
[3, 6],
[3, 11],
[3, 12],
[3, 13, dimmer],
[3, 14, dimmer],
[3, 15, dimmer],
[3, 16],
[4, 1],
[4, 2],
[4, 3],
[4, 4],
[4, 5],
[4, 6],
[4, 11],
[4, 12],
[4, 13],
[4, 14],
[4, 15],
[4, 16],
[5, 2],
[5, 3],
[5, 4],
[5, 5],
[5, 12],
[5, 13],
[5, 14],
[5, 15],
]
lowerFace = [
[1, 2],
[1, 3],
[1, 4],
[1, 13],
[1, 14],
[1, 15],
[2, 1],
[2, 2],
[2, 3],
[2, 4],
[2, 5],
[2, 12],
[2, 13],
[2, 14],
[2, 15],
[2, 16],
[3, 1],
[3, 2, dimmer],
[3, 3, dimmer],
[3, 4, dimmer],
[3, 5, dimmer],
[3, 6],
[3, 11],
[3, 12, dimmer],
[3, 13, dimmer],
[3, 14, dimmer],
[3, 15, dimmer],
[3, 16],
[4, 1],
[4, 2, dimmer],
[4, 5, dimmer],
[4, 6],
[4, 11],
[4, 12, dimmer],
[4, 15, dimmer],
[4, 16],
[5, 2],
[5, 4],
[5, 5],
[5, 12],
[5, 13],
[5, 15],
]
blinkFace = [
[3, 2],
[3, 3],
[3, 4],
[3, 13],
[3, 14],
[3, 15],
[4, 1],
[4, 5],
[4, 12],
[4, 16],
]
last_blink = 0
next_blink = 0
stop_blink = 0
def __init__(self):
self.reset_fbuf()
self.counter = 0
self.next_blink = random.randint(
self.ticks_per_sec * 10, self.ticks_per_sec * 20
)
def reset_fbuf(self):
self.fbuf = [
bytearray(18),
bytearray(18),
bytearray(18),
bytearray(18),
bytearray(18),
bytearray(18),
bytearray(18),
]
def face(self):
faceBuf = self.standardFace
self.reset_fbuf()
move_y = 0
move_x = 0
if self.stop_blink > 0:
self.stop_blink -= 1
faceBuf = self.blinkFace
elif self.last_blink > self.next_blink:
faceBuf = self.blinkFace
self.last_blink = 0
self.stop_blink = random.randint(
int(self.ticks_per_sec * 0.2), int(self.ticks_per_sec * 0.45)
)
self.next_blink = random.randint(
int(self.ticks_per_sec * 10), int(self.ticks_per_sec * 20)
)
elif move_x == -1 and move_y == -1:
faceBuf = self.lowerRightFace
elif move_x == -1 and move_y == 0:
faceBuf = self.rightFace
elif move_x == -1 and move_y == 1:
faceBuf = self.upperRightFace
elif move_x == 0 and move_y == -1:
faceBuf = self.lowerFace
elif move_x == 0 and move_y == 0:
faceBuf = self.standardFace
elif move_x == 0 and move_y == 1:
faceBuf = self.upperFace
elif move_x == 1 and move_y == -1:
faceBuf = self.lowerLeftFace
elif move_x == 1 and move_y == 0:
faceBuf = self.leftFace
elif move_x == 1 and move_y == 1:
faceBuf = self.upperLeftFace
self.last_blink += 1
for xy in range(0, len(faceBuf)):
self.onPixel(faceBuf[xy])
def onPixel(self, xy):
if len(xy) == 3:
self.fbuf[xy[0]][xy[1]] = xy[2]
else:
self.fbuf[xy[0]][xy[1]] = 255
def draw(self):
self.face()
dcfurs.set_frame(self.fbuf)
| <filename>animations/fur.py
"""Furry Face Simulator
Use: http://liquidthex.com/dcfursBadgePixelator/
Features:
* Will randomly blink every 10 - 20 seconds if no large-scale movements happen (winks)
* Will track eyes with small nudges in up / down, left / right tilts
* Will wink left/right if tilted too far
* Will close eyes if held upside down
* Will close eyes when shaken
"""
import random
import badge
import dcfurs
class fur:
interval = 45
ticks_per_sec = int(1000 / interval)
dimmer = 16
winkRightFace = [
[1, 13],
[1, 14],
[1, 15],
[2, 12],
[2, 13, dimmer],
[2, 14, dimmer],
[2, 15],
[2, 16],
[3, 2],
[3, 3],
[3, 4],
[3, 11],
[3, 14, dimmer],
[3, 15],
[3, 16],
[4, 1],
[4, 5],
[4, 11],
[4, 12],
[4, 14, dimmer],
[4, 15],
[4, 16],
[5, 12],
[5, 13],
[5, 14],
[5, 15],
]
winkLeftFace = [
[1, 2],
[1, 3],
[1, 4],
[2, 1],
[2, 2],
[2, 3, dimmer],
[2, 4, dimmer],
[2, 5],
[3, 1],
[3, 2],
[3, 3, dimmer],
[3, 5],
[3, 6],
[3, 13],
[3, 14],
[3, 15],
[4, 1],
[4, 2],
[4, 3, dimmer],
[4, 6],
[4, 12],
[4, 16],
[5, 2],
[5, 3],
[5, 4],
[5, 5],
]
winkUpperRightFace = [
[1, 13],
[1, 15],
[2, 12],
[2, 13, dimmer],
[2, 16],
[3, 2],
[3, 3],
[3, 4],
[3, 11],
[3, 12],
[3, 13, dimmer],
[3, 14, dimmer],
[3, 15, dimmer],
[3, 16],
[4, 1],
[4, 5],
[4, 11],
[4, 12],
[4, 13],
[4, 14],
[4, 15],
[4, 16],
[5, 12],
[5, 13],
[5, 14],
[5, 15],
]
winkUpperLeftFace = [
[1, 2],
[1, 3],
[1, 4],
[2, 1],
[2, 4, dimmer],
[2, 5],
[3, 1],
[3, 3],
[3, 4, dimmer],
[3, 5],
[3, 6],
[3, 13],
[3, 14],
[3, 15],
[4, 1],
[4, 2],
[4, 3],
[4, 4],
[4, 5],
[4, 6],
[4, 12],
[4, 16],
[5, 2],
[5, 3],
[5, 4],
[5, 5],
]
winkLowerLeftFace = [
[1, 2],
[1, 3],
[1, 4],
[2, 1],
[2, 2],
[2, 3],
[2, 4],
[2, 5],
[3, 1],
[3, 2, dimmer],
[3, 3, dimmer],
[3, 4, dimmer],
[3, 5],
[3, 6],
[3, 13],
[3, 14],
[3, 15],
[4, 1],
[4, 4, dimmer],
[4, 5],
[4, 6],
[4, 12],
[4, 16],
[5, 3],
[5, 4],
[5, 5],
]
winkLowerRightFace = [
[1, 13],
[1, 14],
[1, 15],
[2, 12],
[2, 13],
[2, 14],
[2, 15],
[2, 16],
[3, 2],
[3, 3],
[3, 4],
[3, 11],
[3, 12],
[3, 13, dimmer],
[3, 14, dimmer],
[3, 15, dimmer],
[3, 16],
[4, 1],
[4, 5],
[4, 11],
[4, 12],
[4, 13, dimmer],
[4, 16],
[5, 12],
[5, 13],
[5, 15],
]
standardFace = [
[1, 2],
[1, 3],
[1, 4],
[1, 13],
[1, 14],
[1, 15],
[2, 1],
[2, 2, dimmer],
[2, 3, dimmer],
[2, 4, dimmer],
[2, 5],
[2, 12],
[2, 13, dimmer],
[2, 14, dimmer],
[2, 15, dimmer],
[2, 16],
[3, 1],
[3, 2, dimmer],
[3, 5],
[3, 6],
[3, 11],
[3, 12],
[3, 15, dimmer],
[3, 16],
[4, 1],
[4, 2, dimmer],
[4, 4],
[4, 5],
[4, 6],
[4, 11],
[4, 12],
[4, 13],
[4, 15, dimmer],
[4, 16],
[5, 2],
[5, 3],
[5, 4],
[5, 5],
[5, 12],
[5, 13],
[5, 14],
[5, 15],
]
upperRightFace = [
[1, 2],
[1, 3],
[1, 4],
[1, 13],
[1, 14],
[1, 15],
[2, 1],
[2, 2],
[2, 3, dimmer],
[2, 5],
[2, 12],
[2, 13, dimmer],
[2, 15],
[2, 16],
[3, 1],
[3, 2],
[3, 3, dimmer],
[3, 6],
[3, 11],
[3, 12],
[3, 13, dimmer],
[3, 16],
[4, 1],
[4, 2],
[4, 3, dimmer],
[4, 4, dimmer],
[4, 5, dimmer],
[4, 6],
[4, 11],
[4, 12],
[4, 13, dimmer],
[4, 14, dimmer],
[4, 15, dimmer],
[4, 16],
[5, 2],
[5, 3],
[5, 4],
[5, 5],
[5, 12],
[5, 13],
[5, 14],
[5, 15],
]
upperLeftFace = [
[1, 2],
[1, 3],
[1, 4],
[1, 13],
[1, 14],
[1, 15],
[2, 1],
[2, 2],
[2, 4, dimmer],
[2, 5],
[2, 12],
[2, 13],
[2, 15, dimmer],
[2, 16],
[3, 1],
[3, 4, dimmer],
[3, 5],
[3, 6],
[3, 11],
[3, 12],
[3, 15, dimmer],
[3, 16],
[4, 1],
[4, 2, dimmer],
[4, 3, dimmer],
[4, 4, dimmer],
[4, 5],
[4, 6],
[4, 11],
[4, 12],
[4, 13, dimmer],
[4, 14, dimmer],
[4, 15, dimmer],
[4, 16],
[5, 2],
[5, 3],
[5, 4],
[5, 5],
[5, 12],
[5, 13],
[5, 14],
[5, 15],
]
rightFace = [
[1, 2],
[1, 3],
[1, 4],
[1, 13],
[1, 14],
[1, 15],
[2, 1],
[2, 2],
[2, 3, dimmer],
[2, 4, dimmer],
[2, 5],
[2, 12],
[2, 13, dimmer],
[2, 14, dimmer],
[2, 15],
[2, 16],
[3, 1],
[3, 2],
[3, 3, dimmer],
[3, 5],
[3, 6],
[3, 11],
[3, 12],
[3, 13, dimmer],
[3, 15],
[3, 16],
[4, 1],
[4, 2],
[4, 3, dimmer],
[4, 6],
[4, 11],
[4, 12],
[4, 13, dimmer],
[4, 16],
[5, 2],
[5, 3],
[5, 4],
[5, 5],
[5, 12],
[5, 13],
[5, 14],
[5, 15],
]
leftFace = [
[1, 2],
[1, 3],
[1, 4],
[1, 13],
[1, 14],
[1, 15],
[2, 1],
[2, 2],
[2, 3, dimmer],
[2, 4, dimmer],
[2, 5],
[2, 12],
[2, 13, dimmer],
[2, 14, dimmer],
[2, 15],
[2, 16],
[3, 1],
[3, 4, dimmer],
[3, 5],
[3, 6],
[3, 11],
[3, 14, dimmer],
[3, 15],
[3, 16],
[4, 1],
[4, 2],
[4, 4, dimmer],
[4, 5],
[4, 6],
[4, 11],
[4, 12],
[4, 14, dimmer],
[4, 15],
[4, 16],
[5, 2],
[5, 3],
[5, 4],
[5, 5],
[5, 12],
[5, 13],
[5, 14],
[5, 15],
]
lowerRightFace = [
[1, 2],
[1, 3],
[1, 4],
[1, 13],
[1, 14],
[1, 15],
[2, 1],
[2, 2],
[2, 3],
[2, 4],
[2, 5],
[2, 12],
[2, 13],
[2, 14],
[2, 15],
[2, 16],
[3, 1],
[3, 2],
[3, 3, dimmer],
[3, 4, dimmer],
[3, 5, dimmer],
[3, 6],
[3, 11],
[3, 12],
[3, 13, dimmer],
[3, 14, dimmer],
[3, 15, dimmer],
[3, 16],
[4, 1],
[4, 2],
[4, 3, dimmer],
[4, 6],
[4, 11],
[4, 12],
[4, 13, dimmer],
[4, 16],
[5, 2],
[5, 3],
[5, 5],
[5, 12],
[5, 13],
[5, 15],
]
lowerLeftFace = [
[1, 2],
[1, 3],
[1, 4],
[1, 13],
[1, 14],
[1, 15],
[2, 1],
[2, 2],
[2, 3],
[2, 4],
[2, 5],
[2, 12],
[2, 13],
[2, 14],
[2, 15],
[2, 16],
[3, 1],
[3, 2, dimmer],
[3, 3, dimmer],
[3, 4, dimmer],
[3, 5],
[3, 6],
[3, 11],
[3, 12, dimmer],
[3, 13, dimmer],
[3, 14, dimmer],
[3, 15],
[3, 16],
[4, 1],
[4, 4, dimmer],
[4, 5],
[4, 6],
[4, 11],
[4, 14, dimmer],
[4, 15],
[4, 16],
[5, 2],
[5, 4, dimmer],
[5, 5],
[5, 12],
[5, 14, dimmer],
[5, 15],
]
upperFace = [
[1, 2],
[1, 3],
[1, 14],
[1, 15],
[2, 1],
[2, 2, dimmer],
[2, 5],
[2, 12],
[2, 15, dimmer],
[2, 16],
[3, 1],
[3, 2, dimmer],
[3, 3, dimmer],
[3, 4, dimmer],
[3, 5],
[3, 6],
[3, 11],
[3, 12],
[3, 13, dimmer],
[3, 14, dimmer],
[3, 15, dimmer],
[3, 16],
[4, 1],
[4, 2],
[4, 3],
[4, 4],
[4, 5],
[4, 6],
[4, 11],
[4, 12],
[4, 13],
[4, 14],
[4, 15],
[4, 16],
[5, 2],
[5, 3],
[5, 4],
[5, 5],
[5, 12],
[5, 13],
[5, 14],
[5, 15],
]
lowerFace = [
[1, 2],
[1, 3],
[1, 4],
[1, 13],
[1, 14],
[1, 15],
[2, 1],
[2, 2],
[2, 3],
[2, 4],
[2, 5],
[2, 12],
[2, 13],
[2, 14],
[2, 15],
[2, 16],
[3, 1],
[3, 2, dimmer],
[3, 3, dimmer],
[3, 4, dimmer],
[3, 5, dimmer],
[3, 6],
[3, 11],
[3, 12, dimmer],
[3, 13, dimmer],
[3, 14, dimmer],
[3, 15, dimmer],
[3, 16],
[4, 1],
[4, 2, dimmer],
[4, 5, dimmer],
[4, 6],
[4, 11],
[4, 12, dimmer],
[4, 15, dimmer],
[4, 16],
[5, 2],
[5, 4],
[5, 5],
[5, 12],
[5, 13],
[5, 15],
]
blinkFace = [
[3, 2],
[3, 3],
[3, 4],
[3, 13],
[3, 14],
[3, 15],
[4, 1],
[4, 5],
[4, 12],
[4, 16],
]
last_blink = 0
next_blink = 0
stop_blink = 0
def __init__(self):
self.reset_fbuf()
self.counter = 0
self.next_blink = random.randint(
self.ticks_per_sec * 10, self.ticks_per_sec * 20
)
def reset_fbuf(self):
self.fbuf = [
bytearray(18),
bytearray(18),
bytearray(18),
bytearray(18),
bytearray(18),
bytearray(18),
bytearray(18),
]
def face(self):
faceBuf = self.standardFace
self.reset_fbuf()
move_y = 0
move_x = 0
if self.stop_blink > 0:
self.stop_blink -= 1
faceBuf = self.blinkFace
elif self.last_blink > self.next_blink:
faceBuf = self.blinkFace
self.last_blink = 0
self.stop_blink = random.randint(
int(self.ticks_per_sec * 0.2), int(self.ticks_per_sec * 0.45)
)
self.next_blink = random.randint(
int(self.ticks_per_sec * 10), int(self.ticks_per_sec * 20)
)
elif move_x == -1 and move_y == -1:
faceBuf = self.lowerRightFace
elif move_x == -1 and move_y == 0:
faceBuf = self.rightFace
elif move_x == -1 and move_y == 1:
faceBuf = self.upperRightFace
elif move_x == 0 and move_y == -1:
faceBuf = self.lowerFace
elif move_x == 0 and move_y == 0:
faceBuf = self.standardFace
elif move_x == 0 and move_y == 1:
faceBuf = self.upperFace
elif move_x == 1 and move_y == -1:
faceBuf = self.lowerLeftFace
elif move_x == 1 and move_y == 0:
faceBuf = self.leftFace
elif move_x == 1 and move_y == 1:
faceBuf = self.upperLeftFace
self.last_blink += 1
for xy in range(0, len(faceBuf)):
self.onPixel(faceBuf[xy])
def onPixel(self, xy):
if len(xy) == 3:
self.fbuf[xy[0]][xy[1]] = xy[2]
else:
self.fbuf[xy[0]][xy[1]] = 255
def draw(self):
self.face()
dcfurs.set_frame(self.fbuf)
| en | 0.628668 | Furry Face Simulator Use: http://liquidthex.com/dcfursBadgePixelator/ Features: * Will randomly blink every 10 - 20 seconds if no large-scale movements happen (winks) * Will track eyes with small nudges in up / down, left / right tilts * Will wink left/right if tilted too far * Will close eyes if held upside down * Will close eyes when shaken | 2.705143 | 3 |
ppo/ppo/buffer.py | Gregory-Eales/ml-reimplementations | 1 | 6633269 | <filename>ppo/ppo/buffer.py<gh_stars>1-10
import torch
import numpy as np
class Buffer(object):
def __init__(self):
# store actions
self.action_buffer = []
# store old actions
self.old_action_buffer = []
# store state
self.observation_buffer = []
# store reward
self.reward_buffer = []
# store advantage
self.advantage_buffer = []
self.old_policy = None
def store_observation(self, obs):
self.observation_buffer.append(obs)
def store_reward(self, rwrd):
self.reward_buffer.append(rwrd)
def store_action(self, act):
self.action_buffer.append(act)
def store_old_action(self, old_act):
self.old_action_buffer.append(old_act)
def store_advantage(self, adv):
self.advantage_buffer.append(adv)
def clear_buffer(self):
# store actions
self.action_buffer = []
# store old actions
self.old_action_buffer = []
# store state
self.observation_buffer = []
# store reward
self.reward_buffer = []
# store advantage
self.advantage_buffer = []
def get_tensors(self):
observations = torch.Tensor(self.observation_buffer[1:])
actions = torch.cat(self.action_buffer)
old_actions = torch.cat(self.old_action_buffer)
rewards = torch.Tensor(self.reward_buffer).reshape(-1, 1)
advantages = torch.Tensor(self.advantage_buffer)
return observations, actions, old_actions, rewards, advantages
| <filename>ppo/ppo/buffer.py<gh_stars>1-10
import torch
import numpy as np
class Buffer(object):
def __init__(self):
# store actions
self.action_buffer = []
# store old actions
self.old_action_buffer = []
# store state
self.observation_buffer = []
# store reward
self.reward_buffer = []
# store advantage
self.advantage_buffer = []
self.old_policy = None
def store_observation(self, obs):
self.observation_buffer.append(obs)
def store_reward(self, rwrd):
self.reward_buffer.append(rwrd)
def store_action(self, act):
self.action_buffer.append(act)
def store_old_action(self, old_act):
self.old_action_buffer.append(old_act)
def store_advantage(self, adv):
self.advantage_buffer.append(adv)
def clear_buffer(self):
# store actions
self.action_buffer = []
# store old actions
self.old_action_buffer = []
# store state
self.observation_buffer = []
# store reward
self.reward_buffer = []
# store advantage
self.advantage_buffer = []
def get_tensors(self):
observations = torch.Tensor(self.observation_buffer[1:])
actions = torch.cat(self.action_buffer)
old_actions = torch.cat(self.old_action_buffer)
rewards = torch.Tensor(self.reward_buffer).reshape(-1, 1)
advantages = torch.Tensor(self.advantage_buffer)
return observations, actions, old_actions, rewards, advantages
| en | 0.825711 | # store actions # store old actions # store state # store reward # store advantage # store actions # store old actions # store state # store reward # store advantage | 2.304491 | 2 |
test_lock.py | bellahOchola/password-locker | 0 | 6633270 | import unittest
from password import *
class TestUser(unittest.TestCase):
'''
This is a test class that defines test cases
for user login and signup details.
Args:
unittest.TestCase: TestCase class aids in the formation of test cases
'''
def setUp(self):
'''
method to run before each test case.
'''
self.new_user = User('ocholaB','<PASSWORD>')
def tearDown(self):
'''
method that is run after each test case
'''
User.users_list = []
def test_init(self):
'''
tests if the object is initialized in the right way
'''
self.assertEqual(self.new_user.user_name,'ocholaB')
self.assertEqual(self.new_user.password,'<PASSWORD>')
def test_create_account(self):
'''
tests that enables user sign up to the application
'''
self.new_user.create_account()
self.assertEqual(len(User.users_list),1)
def test_save_multiple_accounts(self):
'''
checks if we can store multiple accounts
'''
self.new_user.create_account()
user1 = User('kamau', 'kamau123')
user1.create_account()
self.assertEqual(len(User.users_list),2)
def test_user_existance(self):
'''
checks if the user details already exist by using their username and passwords
'''
self.new_user.create_account()
user1 = User('kamau', 'kamau123')
user1.create_account()
find_user = User.login('kamau','kamau123')
self.assertTrue(find_user)
class TestCredentials(unittest.TestCase):
'''
class that enables user view their credentials, delete and even create new ones
'''
def setUp(self):
'''
method to run before each test case.
'''
self.new_credentials = Credentials('twitter','bellah','<PASSWORD>')
def tearDown(self):
'''
method to run after each test case.
'''
Credentials.credential_list= []
def test_init(self):
'''
tests if the object is initialized in the right way
'''
self.assertEqual(self.new_credentials.account_name,'twitter')
self.assertEqual(self.new_credentials.user_name,'bellah')
self.assertEqual(self.new_credentials.password,'<PASSWORD>')
def test_create_credentials(self):
'''
this test enables user to create new account credentials
'''
self.new_credentials.save_credential()
self.assertEqual(len(Credentials.credential_list),1)
def test_save_multiple_credentials(self):
'''
test that enables user store multiple account credentials
'''
self.new_credentials.save_credential()
user1 = Credentials('facebook','john', '<PASSWORD>')
user1.save_credential()
self.assertEqual(len(Credentials.credential_list),2)
def test_find_credential(self):
'''
test enables user find the password of a specific account
'''
self.new_credentials.save_credential()
user1 = Credentials('facebook', 'john','<PASSWORD>')
user1.save_credential()
find_credential = Credentials.find_by_accountname('facebook')
self.assertEqual(find_credential.password,user1.password)
def test_display_credential(self):
'''
this test enables user view their account credentials
'''
self.assertEqual(Credentials.display_credentials(),Credentials.credential_list)
def test_delete_credential(self):
'''
test allows user delete a given accounts credentials
'''
self.new_credentials.save_credential()
self.new_credentials.delete_credential()
self.assertEqual(len(Credentials.credential_list),0)
if __name__ == '__main__':
unittest.main()
| import unittest
from password import *
class TestUser(unittest.TestCase):
'''
This is a test class that defines test cases
for user login and signup details.
Args:
unittest.TestCase: TestCase class aids in the formation of test cases
'''
def setUp(self):
'''
method to run before each test case.
'''
self.new_user = User('ocholaB','<PASSWORD>')
def tearDown(self):
'''
method that is run after each test case
'''
User.users_list = []
def test_init(self):
'''
tests if the object is initialized in the right way
'''
self.assertEqual(self.new_user.user_name,'ocholaB')
self.assertEqual(self.new_user.password,'<PASSWORD>')
def test_create_account(self):
'''
tests that enables user sign up to the application
'''
self.new_user.create_account()
self.assertEqual(len(User.users_list),1)
def test_save_multiple_accounts(self):
'''
checks if we can store multiple accounts
'''
self.new_user.create_account()
user1 = User('kamau', 'kamau123')
user1.create_account()
self.assertEqual(len(User.users_list),2)
def test_user_existance(self):
'''
checks if the user details already exist by using their username and passwords
'''
self.new_user.create_account()
user1 = User('kamau', 'kamau123')
user1.create_account()
find_user = User.login('kamau','kamau123')
self.assertTrue(find_user)
class TestCredentials(unittest.TestCase):
'''
class that enables user view their credentials, delete and even create new ones
'''
def setUp(self):
'''
method to run before each test case.
'''
self.new_credentials = Credentials('twitter','bellah','<PASSWORD>')
def tearDown(self):
'''
method to run after each test case.
'''
Credentials.credential_list= []
def test_init(self):
'''
tests if the object is initialized in the right way
'''
self.assertEqual(self.new_credentials.account_name,'twitter')
self.assertEqual(self.new_credentials.user_name,'bellah')
self.assertEqual(self.new_credentials.password,'<PASSWORD>')
def test_create_credentials(self):
'''
this test enables user to create new account credentials
'''
self.new_credentials.save_credential()
self.assertEqual(len(Credentials.credential_list),1)
def test_save_multiple_credentials(self):
'''
test that enables user store multiple account credentials
'''
self.new_credentials.save_credential()
user1 = Credentials('facebook','john', '<PASSWORD>')
user1.save_credential()
self.assertEqual(len(Credentials.credential_list),2)
def test_find_credential(self):
'''
test enables user find the password of a specific account
'''
self.new_credentials.save_credential()
user1 = Credentials('facebook', 'john','<PASSWORD>')
user1.save_credential()
find_credential = Credentials.find_by_accountname('facebook')
self.assertEqual(find_credential.password,user1.password)
def test_display_credential(self):
'''
this test enables user view their account credentials
'''
self.assertEqual(Credentials.display_credentials(),Credentials.credential_list)
def test_delete_credential(self):
'''
test allows user delete a given accounts credentials
'''
self.new_credentials.save_credential()
self.new_credentials.delete_credential()
self.assertEqual(len(Credentials.credential_list),0)
if __name__ == '__main__':
unittest.main()
| en | 0.864584 | This is a test class that defines test cases for user login and signup details. Args: unittest.TestCase: TestCase class aids in the formation of test cases method to run before each test case. method that is run after each test case tests if the object is initialized in the right way tests that enables user sign up to the application checks if we can store multiple accounts checks if the user details already exist by using their username and passwords class that enables user view their credentials, delete and even create new ones method to run before each test case. method to run after each test case. tests if the object is initialized in the right way this test enables user to create new account credentials test that enables user store multiple account credentials test enables user find the password of a specific account this test enables user view their account credentials test allows user delete a given accounts credentials | 4.111716 | 4 |
test/cluster_test/test.py | viyadb/viyadb | 109 | 6633271 | <gh_stars>100-1000
#!/usr/bin/env python3
import csv
import json
import kafka
import requests
import time
nodes = ['viyadb_1', 'viyadb_2']
columns = [
'app_id', 'user_id', 'event_time', 'country', 'city', 'device_type',
'device_vendor', 'ad_network', 'campaign', 'site_id', 'event_type',
'event_name', 'organic', 'days_from_install', 'revenue', 'users', 'count'
]
def read_cluster_config(host):
"""Return configuration written by ViyaDB node controller"""
try:
r = requests.get('http://{}:5555/config'.format(host))
if r.status_code == 200:
return r.json()
except requests.exceptions.ConnectionError:
pass
return None
def wait_for_viyadb_cluster():
"""Check whether all the nodes are started by reading their controller config"""
while True:
time.sleep(3)
for node in nodes:
if not read_cluster_config(node):
print(
'Not all ViyaDB nodes have started yet ... will retry in 3s'
)
break
else:
break
print('ViyaDB cluster is ready!')
def wait_for_kafka():
while True:
try:
return kafka.KafkaProducer(
bootstrap_servers='kafka:9092',
value_serializer=lambda v: json.dumps(v).encode('utf-8'))
except kafka.errors.NoBrokersAvailable:
print('Kafka is not available ... will retry in 3s')
time.sleep(3)
def send_new_notifications():
"""Send new micro-batches info to Kafka for triggering real-time ingestion"""
producer = wait_for_kafka()
new_notifications = [
{'id':1565439480000,'tables':{'events':{'paths':['/tmp/viyadb/deepstore/realtime/events/dt=1565439480000/mb=1565439480000'],'columns':columns,'recordCount':156}},'offsets':[{'topic':'events','partition':0,'offset':1536}]}, \
{'id':1565439500000,'tables':{'events':{'paths':['/tmp/viyadb/deepstore/realtime/events/dt=1565439480000/mb=1565439500000'],'columns':columns,'recordCount':173}},'offsets':[{'topic':'events','partition':0,'offset':1739}]}, \
{'id':1565439520000,'tables':{'events':{'paths':['/tmp/viyadb/deepstore/realtime/events/dt=1565439480000/mb=1565439520000'],'columns':columns,'recordCount':174}},'offsets':[{'topic':'events','partition':0,'offset':1931}]}, \
{'id':1565439540000,'tables':{'events':{'paths':['/tmp/viyadb/deepstore/realtime/events/dt=1565439540000/mb=1565439540000'],'columns':columns,'recordCount':161}},'offsets':[{'topic':'events','partition':0,'offset':2133}]}, \
{'id':1565439560000,'tables':{'events':{'paths':['/tmp/viyadb/deepstore/realtime/events/dt=1565439540000/mb=1565439560000'],'columns':columns,'recordCount':170}},'offsets':[{'topic':'events','partition':0,'offset':2338}]}, \
{'id':1565439580000,'tables':{'events':{'paths':['/tmp/viyadb/deepstore/realtime/events/dt=1565439540000/mb=1565439580000'],'columns':columns,'recordCount':173}},'offsets':[{'topic':'events','partition':0,'offset':2530}]}, \
{'id':1565439600000,'tables':{'events':{'paths':['/tmp/viyadb/deepstore/realtime/events/dt=1565439600000/mb=1565439600000'],'columns':columns,'recordCount':174}},'offsets':[{'topic':'events','partition':0,'offset':2734}]}, \
{'id':1565439620000,'tables':{'events':{'paths':['/tmp/viyadb/deepstore/realtime/events/dt=1565439600000/mb=1565439620000'],'columns':columns,'recordCount':170}},'offsets':[{'topic':'events','partition':0,'offset':2928}]}, \
]
for e in new_notifications:
producer.send('rt-notifications', e).get()
def send_sql_query(query, host, port, wait_for_batch=None):
while True:
try:
r = requests.post(
'http://{}:{}/sql'.format(host, port), data=query)
except requests.exceptions.ConnectionError:
print('Host {}:{} is not available ... will retry in 3s'.format(
host, port))
time.sleep(3)
continue
r.raise_for_status()
if wait_for_batch:
last_batch = r.headers.get("X-Last-Batch-ID")
if last_batch and int(last_batch) < wait_for_batch:
time.sleep(3)
continue
break
return csv.reader(
r.text.splitlines(), delimiter='\t', quoting=csv.QUOTE_NONE)
def compare_results(expected, actual):
if expected != actual:
raise Exception('Expected: {}\nActual: {}'.format(expected, actual))
def check_node_bootstrapped(query, host):
compare_results({
'com.skype.raider': '44'
}, dict(send_sql_query(query, host, 5000, 1565439460000)))
compare_results({
'com.dropbox.android': '68'
}, dict(send_sql_query(query, host, 5001, 1565439460000)))
compare_results({
'com.dropbox.android': '68',
'com.skype.raider': '44'
}, dict(send_sql_query(query, host, 5555)))
def check_node_uptodate(query, host):
compare_results({
'com.skype.raider': '76'
}, dict(send_sql_query(query, host, 5000, 1565439620000)))
compare_results({
'com.dropbox.android': '164'
}, dict(send_sql_query(query, host, 5001, 1565439620000)))
compare_results({
'com.dropbox.android': '164',
'com.skype.raider': '76'
}, dict(send_sql_query(query, host, 5555)))
def get_json(host, port, cmd):
r = requests.get('http://{}:{}/{}'.format(host, port, cmd))
r.raise_for_status()
return r.json()
def send_control_cmd(host, cmd):
return get_json(host, 8080, cmd)
def validate_statsd_metrics(host):
if len([
m for m in send_control_cmd(host, 'statsd_metrics')
if m.startswith('viyadb.{}'.format(host))
]) == 0:
raise Exception(
'No StatsD metrics received from host: {}'.format(host))
def validate_metadata(host):
for port in [5000, 5001]:
db_meta = get_json(host, port, 'database/meta')
if db_meta['tables'][0]['name'] != 'events':
raise Exception(
'Wrong database metadata received from host: {}'.format(host))
table_meta = get_json(host, port, 'tables/events/meta')
if not 'cardinality' in [
d for d in table_meta['dimensions'] if d['name'] == 'app_id'
][0]:
raise Exception(
'Wrong table metadata received from host: {}'.format(host))
if __name__ == '__main__':
wait_for_viyadb_cluster()
query = 'SELECT app_id,count FROM events WHERE app_id IN (\'com.dropbox.android\', \'com.skype.raider\')'
for host in nodes:
check_node_bootstrapped(query, host)
send_new_notifications()
time.sleep(3)
for host in nodes:
check_node_uptodate(query, host)
send_control_cmd(nodes[0], 'kill_worker')
check_node_uptodate(query, nodes[0])
for host in nodes:
validate_statsd_metrics(host)
validate_metadata(host)
search_query = 'SELECT SEARCH(site_id, \'booking.com\') FROM events'
compare_results([['http://booking.com/page53']],
list(send_sql_query(search_query, host, 5000)))
| #!/usr/bin/env python3
import csv
import json
import kafka
import requests
import time
nodes = ['viyadb_1', 'viyadb_2']
columns = [
'app_id', 'user_id', 'event_time', 'country', 'city', 'device_type',
'device_vendor', 'ad_network', 'campaign', 'site_id', 'event_type',
'event_name', 'organic', 'days_from_install', 'revenue', 'users', 'count'
]
def read_cluster_config(host):
"""Return configuration written by ViyaDB node controller"""
try:
r = requests.get('http://{}:5555/config'.format(host))
if r.status_code == 200:
return r.json()
except requests.exceptions.ConnectionError:
pass
return None
def wait_for_viyadb_cluster():
"""Check whether all the nodes are started by reading their controller config"""
while True:
time.sleep(3)
for node in nodes:
if not read_cluster_config(node):
print(
'Not all ViyaDB nodes have started yet ... will retry in 3s'
)
break
else:
break
print('ViyaDB cluster is ready!')
def wait_for_kafka():
while True:
try:
return kafka.KafkaProducer(
bootstrap_servers='kafka:9092',
value_serializer=lambda v: json.dumps(v).encode('utf-8'))
except kafka.errors.NoBrokersAvailable:
print('Kafka is not available ... will retry in 3s')
time.sleep(3)
def send_new_notifications():
"""Send new micro-batches info to Kafka for triggering real-time ingestion"""
producer = wait_for_kafka()
new_notifications = [
{'id':1565439480000,'tables':{'events':{'paths':['/tmp/viyadb/deepstore/realtime/events/dt=1565439480000/mb=1565439480000'],'columns':columns,'recordCount':156}},'offsets':[{'topic':'events','partition':0,'offset':1536}]}, \
{'id':1565439500000,'tables':{'events':{'paths':['/tmp/viyadb/deepstore/realtime/events/dt=1565439480000/mb=1565439500000'],'columns':columns,'recordCount':173}},'offsets':[{'topic':'events','partition':0,'offset':1739}]}, \
{'id':1565439520000,'tables':{'events':{'paths':['/tmp/viyadb/deepstore/realtime/events/dt=1565439480000/mb=1565439520000'],'columns':columns,'recordCount':174}},'offsets':[{'topic':'events','partition':0,'offset':1931}]}, \
{'id':1565439540000,'tables':{'events':{'paths':['/tmp/viyadb/deepstore/realtime/events/dt=1565439540000/mb=1565439540000'],'columns':columns,'recordCount':161}},'offsets':[{'topic':'events','partition':0,'offset':2133}]}, \
{'id':1565439560000,'tables':{'events':{'paths':['/tmp/viyadb/deepstore/realtime/events/dt=1565439540000/mb=1565439560000'],'columns':columns,'recordCount':170}},'offsets':[{'topic':'events','partition':0,'offset':2338}]}, \
{'id':1565439580000,'tables':{'events':{'paths':['/tmp/viyadb/deepstore/realtime/events/dt=1565439540000/mb=1565439580000'],'columns':columns,'recordCount':173}},'offsets':[{'topic':'events','partition':0,'offset':2530}]}, \
{'id':1565439600000,'tables':{'events':{'paths':['/tmp/viyadb/deepstore/realtime/events/dt=1565439600000/mb=1565439600000'],'columns':columns,'recordCount':174}},'offsets':[{'topic':'events','partition':0,'offset':2734}]}, \
{'id':1565439620000,'tables':{'events':{'paths':['/tmp/viyadb/deepstore/realtime/events/dt=1565439600000/mb=1565439620000'],'columns':columns,'recordCount':170}},'offsets':[{'topic':'events','partition':0,'offset':2928}]}, \
]
for e in new_notifications:
producer.send('rt-notifications', e).get()
def send_sql_query(query, host, port, wait_for_batch=None):
while True:
try:
r = requests.post(
'http://{}:{}/sql'.format(host, port), data=query)
except requests.exceptions.ConnectionError:
print('Host {}:{} is not available ... will retry in 3s'.format(
host, port))
time.sleep(3)
continue
r.raise_for_status()
if wait_for_batch:
last_batch = r.headers.get("X-Last-Batch-ID")
if last_batch and int(last_batch) < wait_for_batch:
time.sleep(3)
continue
break
return csv.reader(
r.text.splitlines(), delimiter='\t', quoting=csv.QUOTE_NONE)
def compare_results(expected, actual):
if expected != actual:
raise Exception('Expected: {}\nActual: {}'.format(expected, actual))
def check_node_bootstrapped(query, host):
compare_results({
'com.skype.raider': '44'
}, dict(send_sql_query(query, host, 5000, 1565439460000)))
compare_results({
'com.dropbox.android': '68'
}, dict(send_sql_query(query, host, 5001, 1565439460000)))
compare_results({
'com.dropbox.android': '68',
'com.skype.raider': '44'
}, dict(send_sql_query(query, host, 5555)))
def check_node_uptodate(query, host):
compare_results({
'com.skype.raider': '76'
}, dict(send_sql_query(query, host, 5000, 1565439620000)))
compare_results({
'com.dropbox.android': '164'
}, dict(send_sql_query(query, host, 5001, 1565439620000)))
compare_results({
'com.dropbox.android': '164',
'com.skype.raider': '76'
}, dict(send_sql_query(query, host, 5555)))
def get_json(host, port, cmd):
r = requests.get('http://{}:{}/{}'.format(host, port, cmd))
r.raise_for_status()
return r.json()
def send_control_cmd(host, cmd):
return get_json(host, 8080, cmd)
def validate_statsd_metrics(host):
if len([
m for m in send_control_cmd(host, 'statsd_metrics')
if m.startswith('viyadb.{}'.format(host))
]) == 0:
raise Exception(
'No StatsD metrics received from host: {}'.format(host))
def validate_metadata(host):
for port in [5000, 5001]:
db_meta = get_json(host, port, 'database/meta')
if db_meta['tables'][0]['name'] != 'events':
raise Exception(
'Wrong database metadata received from host: {}'.format(host))
table_meta = get_json(host, port, 'tables/events/meta')
if not 'cardinality' in [
d for d in table_meta['dimensions'] if d['name'] == 'app_id'
][0]:
raise Exception(
'Wrong table metadata received from host: {}'.format(host))
if __name__ == '__main__':
wait_for_viyadb_cluster()
query = 'SELECT app_id,count FROM events WHERE app_id IN (\'com.dropbox.android\', \'com.skype.raider\')'
for host in nodes:
check_node_bootstrapped(query, host)
send_new_notifications()
time.sleep(3)
for host in nodes:
check_node_uptodate(query, host)
send_control_cmd(nodes[0], 'kill_worker')
check_node_uptodate(query, nodes[0])
for host in nodes:
validate_statsd_metrics(host)
validate_metadata(host)
search_query = 'SELECT SEARCH(site_id, \'booking.com\') FROM events'
compare_results([['http://booking.com/page53']],
list(send_sql_query(search_query, host, 5000))) | en | 0.81958 | #!/usr/bin/env python3 Return configuration written by ViyaDB node controller Check whether all the nodes are started by reading their controller config Send new micro-batches info to Kafka for triggering real-time ingestion | 2.511549 | 3 |
topictracking/RuleTopicTrackers.py | Dagu9/Reinforcement-learning-SGD | 0 | 6633272 | <reponame>Dagu9/Reinforcement-learning-SGD
###############################################################################
# PyDial: Multi-domain Statistical Spoken Dialogue System Software
###############################################################################
#
# Copyright 2015 - 2019
# Cambridge University Engineering Department Dialogue Systems Group
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
'''
RuleTopicTrackers.py - Rule based topic trackers
==========================================================================
Copyright CUED Dialogue Systems Group 2015 - 2017
.. seealso:: CUED Imports/Dependencies:
import :mod:`utils.Settings` |.|
import :mod:`utils.ContextLogger` |.|
import :mod:`ontology.OntologyUtils` |.|
************************
'''
__author__ = "cued_dialogue_systems_group"
'''
Modifications History
===============================
Date Author Description
===============================
Jul 20 2016 lmr46 Inferring only the domains configured in the config-file
Note that keywords for domains are set in the dictionary here (handcoded)
TODO: What happen when the same keyword apply for different domains?
'''
from utils import Settings, ContextLogger
from ontology import OntologyUtils
logger = ContextLogger.getLogger('')
class TopicTrackerInterface(object):
"""Template for any Topic Tracker for the cued-python system
.. Note:
To dynamically load a class, the __init__() must take one argument: domainString.
"""
def infer_domain(self,userActHyps=None):
pass # Define in actual class. Must set and also return self.current_tracking_result
def restart(self):
pass # Define in actual class. May be some notion of state etc to be reset in more advanced topic trackers
class TextBasedSwitchTopicTracker(TopicTrackerInterface):
"""When using texthub, you can enter: switch("CamRestaurants") which will change domains to CamRestaurants for example.
-- if switch("XX") not entered, assumes you want to stay in domain of previous turn
"""
def __init__(self):
self.restart()
def restart(self):
self.current_tracking_result = None
self.FOUND_DOMAIN = False
def infer_domain(self, userActHyps=None):
"""userActHyps : [(text, prob)]
"""
if 'switch("' in userActHyps[0][0]:
candidateDomain = userActHyps[0][0].split('"')[1] # a little fragile -
if candidateDomain in OntologyUtils.available_domains:
self.current_tracking_result = candidateDomain
self.FOUND_DOMAIN = True
else:
logger.warning("Not a valid domain tag in your switch('X') command - remain with previous domain")
elif not self.FOUND_DOMAIN:
msg = '\nSWITCH TOPIC TRACKER USAGE: When using the texthub switch topic tracker '
msg += '-You should start by saying which domain to switch to.\n'
msg += 'Enter exactly (where DOMAINTAG is CamRestaurants,Laptops6 etc): switch("DOMAINTAG")\n'
msg += 'You can continue on directly by entering for example: switch("DOMAINTAG")i want a cheap one\n'
msg += 'Alternatively, use a different topic tracker.'
exit(msg)
else:
logger.info('Switch("DOMAINTAG") not detected - staying with previous domain')
return self.current_tracking_result
class KeywordSpottingTopicTracker(TopicTrackerInterface):
""" Just a hacky topic tracker to develop voicehub with.
:: Assumptions/Notes
-- To resolve resturants and hotels will also have to spot location
-- Assume we will stick with last domain unless we detect one of our keywords
"""
def __init__(self):
self.current_tracking_result = None
self.keywords = dict.fromkeys(OntologyUtils.available_domains, None)
#lmr46: added some keywords or lexical units ('food')
#consider to have a Lexicon that groups words per concepts, there are available lexica for English
#lmr46: Adapting only the domains available in the config file
domains = Settings.config.get("GENERAL",'domains') # a Hub has checked this exists
possible_domains = domains.split(',')
for dom in possible_domains:
kwds=[]
if dom=="CamRestaurants":
kwds=["cambridge","restaurant",'food','eat']
elif dom=="CamHotels":
kwds=["cambridge","hotel", "guest house", "guesthouse"]
elif dom=="SFRestaurants":
kwds=["san francisco","restaurant", "food","place to eat"]
elif dom=="SFHotels":
kwds=["san francisco","hotel", "guest house", "guesthouse", "hostel", "motel", "place to stay"]
elif dom=="wikipedia":
kwds=["wiki"]
self.keywords[dom]=kwds
# self.keywords["CamRestaurants"] = ["cambridge","restaurant",'food']
# self.keywords["CamHotels"] = ["cambridge","hotel", "guest house", "guesthouse"]
# self.keywords["SFRestaurants"] = ["san francisco","restaurant", "food","book"] # ASR cant recognise much at present -- will develop
# # system using CamRestaurants and CamHotels
# self.keywords["SFHotels"] = ["san francisco","hotel", "guest house", "guesthouse", "hostel", "motel", "book"]
# self.keywords["wikipedia"] = ["wiki"] # this could be used like "OK Google" or "Alexa"
def restart(self):
self.current_tracking_result = None
def infer_domain(self,userActHyps=None):
"""
-- Assumptions: Only working with the top hypothesis from ASR
-- Stick to last domain if nothing spotted in this turn
-- ORDER IS IMPORTANT -- ie it will hand off to FIRST domain a keyword is spotted in
"""
# TODO - could require all keywords to be present - e.g to disambiguate cam hotels from SFHotels
#lmr46: allowing overlapping keywords between domains
#su259: making current_tracking_result a local variable. method returns none if no new domain has been identified.
current_tracking_result = None
overlappindomains=[]
for dstring in self.keywords.keys():
if self._is_a_keyword_in_sentence(self.keywords[dstring],userActHyps[0][0]):
logger.info(dstring + " keyword found in: " + userActHyps[0][0])
if "i(=" in userActHyps[0][0] or "inform(query=" in userActHyps[0][0]:
current_tracking_result = "wikipedia" # this is just a hack so i can wiki things like hotels!
else:
overlappindomains.append(dstring)
current_tracking_result = dstring
#break #TODO: Not handling overlapping of keywords between domains - it has to disambiguate!!!
if len(overlappindomains) > 1:
current_tracking_result = None
return current_tracking_result
def _is_a_keyword_in_sentence(self,keywords, sentence):
"""Note keywords just use the first spotted one ... this needs to be a little more sophisticated to resolve
SF hotel versus Cambridge hotel
"""
#TODO - will need changing if/when ASR is good enough to decode LOCATIONS - so that a match will require e.g
# "CAMBRIDGE" + "RESTAURANT" to count for TT domain
if keywords is not None:
for keyword in keywords:
if keyword in sentence.lower():
return True
return False
#END OF FILE
| ###############################################################################
# PyDial: Multi-domain Statistical Spoken Dialogue System Software
###############################################################################
#
# Copyright 2015 - 2019
# Cambridge University Engineering Department Dialogue Systems Group
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
'''
RuleTopicTrackers.py - Rule based topic trackers
==========================================================================
Copyright CUED Dialogue Systems Group 2015 - 2017
.. seealso:: CUED Imports/Dependencies:
import :mod:`utils.Settings` |.|
import :mod:`utils.ContextLogger` |.|
import :mod:`ontology.OntologyUtils` |.|
************************
'''
__author__ = "cued_dialogue_systems_group"
'''
Modifications History
===============================
Date Author Description
===============================
Jul 20 2016 lmr46 Inferring only the domains configured in the config-file
Note that keywords for domains are set in the dictionary here (handcoded)
TODO: What happen when the same keyword apply for different domains?
'''
from utils import Settings, ContextLogger
from ontology import OntologyUtils
logger = ContextLogger.getLogger('')
class TopicTrackerInterface(object):
"""Template for any Topic Tracker for the cued-python system
.. Note:
To dynamically load a class, the __init__() must take one argument: domainString.
"""
def infer_domain(self,userActHyps=None):
pass # Define in actual class. Must set and also return self.current_tracking_result
def restart(self):
pass # Define in actual class. May be some notion of state etc to be reset in more advanced topic trackers
class TextBasedSwitchTopicTracker(TopicTrackerInterface):
"""When using texthub, you can enter: switch("CamRestaurants") which will change domains to CamRestaurants for example.
-- if switch("XX") not entered, assumes you want to stay in domain of previous turn
"""
def __init__(self):
self.restart()
def restart(self):
self.current_tracking_result = None
self.FOUND_DOMAIN = False
def infer_domain(self, userActHyps=None):
"""userActHyps : [(text, prob)]
"""
if 'switch("' in userActHyps[0][0]:
candidateDomain = userActHyps[0][0].split('"')[1] # a little fragile -
if candidateDomain in OntologyUtils.available_domains:
self.current_tracking_result = candidateDomain
self.FOUND_DOMAIN = True
else:
logger.warning("Not a valid domain tag in your switch('X') command - remain with previous domain")
elif not self.FOUND_DOMAIN:
msg = '\nSWITCH TOPIC TRACKER USAGE: When using the texthub switch topic tracker '
msg += '-You should start by saying which domain to switch to.\n'
msg += 'Enter exactly (where DOMAINTAG is CamRestaurants,Laptops6 etc): switch("DOMAINTAG")\n'
msg += 'You can continue on directly by entering for example: switch("DOMAINTAG")i want a cheap one\n'
msg += 'Alternatively, use a different topic tracker.'
exit(msg)
else:
logger.info('Switch("DOMAINTAG") not detected - staying with previous domain')
return self.current_tracking_result
class KeywordSpottingTopicTracker(TopicTrackerInterface):
""" Just a hacky topic tracker to develop voicehub with.
:: Assumptions/Notes
-- To resolve resturants and hotels will also have to spot location
-- Assume we will stick with last domain unless we detect one of our keywords
"""
def __init__(self):
self.current_tracking_result = None
self.keywords = dict.fromkeys(OntologyUtils.available_domains, None)
#lmr46: added some keywords or lexical units ('food')
#consider to have a Lexicon that groups words per concepts, there are available lexica for English
#lmr46: Adapting only the domains available in the config file
domains = Settings.config.get("GENERAL",'domains') # a Hub has checked this exists
possible_domains = domains.split(',')
for dom in possible_domains:
kwds=[]
if dom=="CamRestaurants":
kwds=["cambridge","restaurant",'food','eat']
elif dom=="CamHotels":
kwds=["cambridge","hotel", "guest house", "guesthouse"]
elif dom=="SFRestaurants":
kwds=["san francisco","restaurant", "food","place to eat"]
elif dom=="SFHotels":
kwds=["san francisco","hotel", "guest house", "guesthouse", "hostel", "motel", "place to stay"]
elif dom=="wikipedia":
kwds=["wiki"]
self.keywords[dom]=kwds
# self.keywords["CamRestaurants"] = ["cambridge","restaurant",'food']
# self.keywords["CamHotels"] = ["cambridge","hotel", "guest house", "guesthouse"]
# self.keywords["SFRestaurants"] = ["san francisco","restaurant", "food","book"] # ASR cant recognise much at present -- will develop
# # system using CamRestaurants and CamHotels
# self.keywords["SFHotels"] = ["san francisco","hotel", "guest house", "guesthouse", "hostel", "motel", "book"]
# self.keywords["wikipedia"] = ["wiki"] # this could be used like "OK Google" or "Alexa"
def restart(self):
self.current_tracking_result = None
def infer_domain(self,userActHyps=None):
"""
-- Assumptions: Only working with the top hypothesis from ASR
-- Stick to last domain if nothing spotted in this turn
-- ORDER IS IMPORTANT -- ie it will hand off to FIRST domain a keyword is spotted in
"""
# TODO - could require all keywords to be present - e.g to disambiguate cam hotels from SFHotels
#lmr46: allowing overlapping keywords between domains
#su259: making current_tracking_result a local variable. method returns none if no new domain has been identified.
current_tracking_result = None
overlappindomains=[]
for dstring in self.keywords.keys():
if self._is_a_keyword_in_sentence(self.keywords[dstring],userActHyps[0][0]):
logger.info(dstring + " keyword found in: " + userActHyps[0][0])
if "i(=" in userActHyps[0][0] or "inform(query=" in userActHyps[0][0]:
current_tracking_result = "wikipedia" # this is just a hack so i can wiki things like hotels!
else:
overlappindomains.append(dstring)
current_tracking_result = dstring
#break #TODO: Not handling overlapping of keywords between domains - it has to disambiguate!!!
if len(overlappindomains) > 1:
current_tracking_result = None
return current_tracking_result
def _is_a_keyword_in_sentence(self,keywords, sentence):
"""Note keywords just use the first spotted one ... this needs to be a little more sophisticated to resolve
SF hotel versus Cambridge hotel
"""
#TODO - will need changing if/when ASR is good enough to decode LOCATIONS - so that a match will require e.g
# "CAMBRIDGE" + "RESTAURANT" to count for TT domain
if keywords is not None:
for keyword in keywords:
if keyword in sentence.lower():
return True
return False
#END OF FILE | en | 0.717595 | ############################################################################### # PyDial: Multi-domain Statistical Spoken Dialogue System Software ############################################################################### # # Copyright 2015 - 2019 # Cambridge University Engineering Department Dialogue Systems Group # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ############################################################################### RuleTopicTrackers.py - Rule based topic trackers ========================================================================== Copyright CUED Dialogue Systems Group 2015 - 2017 .. seealso:: CUED Imports/Dependencies: import :mod:`utils.Settings` |.| import :mod:`utils.ContextLogger` |.| import :mod:`ontology.OntologyUtils` |.| ************************ Modifications History =============================== Date Author Description =============================== Jul 20 2016 lmr46 Inferring only the domains configured in the config-file Note that keywords for domains are set in the dictionary here (handcoded) TODO: What happen when the same keyword apply for different domains? Template for any Topic Tracker for the cued-python system .. Note: To dynamically load a class, the __init__() must take one argument: domainString. # Define in actual class. Must set and also return self.current_tracking_result # Define in actual class. May be some notion of state etc to be reset in more advanced topic trackers When using texthub, you can enter: switch("CamRestaurants") which will change domains to CamRestaurants for example. -- if switch("XX") not entered, assumes you want to stay in domain of previous turn userActHyps : [(text, prob)] # a little fragile - Just a hacky topic tracker to develop voicehub with. :: Assumptions/Notes -- To resolve resturants and hotels will also have to spot location -- Assume we will stick with last domain unless we detect one of our keywords #lmr46: added some keywords or lexical units ('food') #consider to have a Lexicon that groups words per concepts, there are available lexica for English #lmr46: Adapting only the domains available in the config file # a Hub has checked this exists # self.keywords["CamRestaurants"] = ["cambridge","restaurant",'food'] # self.keywords["CamHotels"] = ["cambridge","hotel", "guest house", "guesthouse"] # self.keywords["SFRestaurants"] = ["san francisco","restaurant", "food","book"] # ASR cant recognise much at present -- will develop # # system using CamRestaurants and CamHotels # self.keywords["SFHotels"] = ["san francisco","hotel", "guest house", "guesthouse", "hostel", "motel", "book"] # self.keywords["wikipedia"] = ["wiki"] # this could be used like "OK Google" or "Alexa" -- Assumptions: Only working with the top hypothesis from ASR -- Stick to last domain if nothing spotted in this turn -- ORDER IS IMPORTANT -- ie it will hand off to FIRST domain a keyword is spotted in # TODO - could require all keywords to be present - e.g to disambiguate cam hotels from SFHotels #lmr46: allowing overlapping keywords between domains #su259: making current_tracking_result a local variable. method returns none if no new domain has been identified. # this is just a hack so i can wiki things like hotels! #break #TODO: Not handling overlapping of keywords between domains - it has to disambiguate!!! Note keywords just use the first spotted one ... this needs to be a little more sophisticated to resolve SF hotel versus Cambridge hotel #TODO - will need changing if/when ASR is good enough to decode LOCATIONS - so that a match will require e.g # "CAMBRIDGE" + "RESTAURANT" to count for TT domain #END OF FILE | 1.496917 | 1 |
gram/migrations/0001_initial.py | nderituliz/Instagram-App | 0 | 6633273 | # Generated by Django 2.2.17 on 2021-01-26 19:49
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('photo', models.ImageField(blank=True, null=True, upload_to='images')),
('Bio', models.TextField(max_length=1500)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Picture',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('vote_score', models.IntegerField(db_index=True, default=0)),
('num_vote_up', models.PositiveIntegerField(db_index=True, default=0)),
('num_vote_down', models.PositiveIntegerField(db_index=True, default=0)),
('image', models.ImageField(null=True, upload_to='images/')),
('name', models.CharField(max_length=100)),
('caption', models.CharField(max_length=100)),
('like_add', models.IntegerField(default=0)),
('date_uploaded', models.DateTimeField(auto_now_add=True)),
('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-date_uploaded'],
},
),
migrations.CreateModel(
name='Likes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='gram.Picture')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Comments',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.CharField(blank=True, max_length=50)),
('posted', models.DateTimeField(auto_now_add=True)),
('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='gram.Picture')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| # Generated by Django 2.2.17 on 2021-01-26 19:49
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0011_update_proxy_permissions'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('photo', models.ImageField(blank=True, null=True, upload_to='images')),
('Bio', models.TextField(max_length=1500)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Picture',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('vote_score', models.IntegerField(db_index=True, default=0)),
('num_vote_up', models.PositiveIntegerField(db_index=True, default=0)),
('num_vote_down', models.PositiveIntegerField(db_index=True, default=0)),
('image', models.ImageField(null=True, upload_to='images/')),
('name', models.CharField(max_length=100)),
('caption', models.CharField(max_length=100)),
('like_add', models.IntegerField(default=0)),
('date_uploaded', models.DateTimeField(auto_now_add=True)),
('profile', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ['-date_uploaded'],
},
),
migrations.CreateModel(
name='Likes',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='gram.Picture')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Comments',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.CharField(blank=True, max_length=50)),
('posted', models.DateTimeField(auto_now_add=True)),
('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='gram.Picture')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| en | 0.823309 | # Generated by Django 2.2.17 on 2021-01-26 19:49 | 1.756914 | 2 |
NaturalLanguageProcessingWithPython/02/practice23_b.py | t2y/learnnlp | 4 | 6633274 | <filename>NaturalLanguageProcessingWithPython/02/practice23_b.py<gh_stars>1-10
# -*- coding: utf-8 -*-
import random
import pylab
from practice23_a import get_frequency_distribution
CHAR_NUM_LIMIT = 15
ORIGIN_WORDS = 'abcdefg'
def generate_words(num):
for _ in xrange(num):
yield ''.join(i for _ in xrange(random.randint(1, CHAR_NUM_LIMIT))
for i in random.choice(ORIGIN_WORDS))
def main():
fd1 = get_frequency_distribution(generate_words(10000))
pylab.plot(fd1, color='blue')
fd2 = get_frequency_distribution(generate_words(1000000))
pylab.plot(fd2, color='green')
pylab.xscale('log')
pylab.yscale('log')
pylab.show()
if __name__ == '__main__':
main()
| <filename>NaturalLanguageProcessingWithPython/02/practice23_b.py<gh_stars>1-10
# -*- coding: utf-8 -*-
import random
import pylab
from practice23_a import get_frequency_distribution
CHAR_NUM_LIMIT = 15
ORIGIN_WORDS = 'abcdefg'
def generate_words(num):
for _ in xrange(num):
yield ''.join(i for _ in xrange(random.randint(1, CHAR_NUM_LIMIT))
for i in random.choice(ORIGIN_WORDS))
def main():
fd1 = get_frequency_distribution(generate_words(10000))
pylab.plot(fd1, color='blue')
fd2 = get_frequency_distribution(generate_words(1000000))
pylab.plot(fd2, color='green')
pylab.xscale('log')
pylab.yscale('log')
pylab.show()
if __name__ == '__main__':
main()
| en | 0.769321 | # -*- coding: utf-8 -*- | 3.087717 | 3 |
2english-init.py | MalikKeio/valkyrie-anatomia-script | 0 | 6633275 | <filename>2english-init.py
#!/usr/bin/env python3
import os
from names import CHAPTERS, CHARACTERS
en_chapter = "en/"
if not os.path.exists(en_chapter):
os.makedirs(en_chapter)
for root, dirs, files in os.walk('jp/'):
for name in files:
path = os.path.join(root, name)
target_path = path.replace('jp/', 'en/')
if os.path.exists(target_path):
print("[WARN] %s exists!" % target_path)
else:
with open(path) as f:
target_dir_path = "/".join(target_path.split('/')[0:-1])
if not os.path.exists(target_dir_path):
os.makedirs(target_dir_path)
target = open(target_path, "w")
for line in f:
speaker = line[:-1]
if speaker in CHARACTERS:
target.write("%s\n" % CHARACTERS[speaker])
else:
target.write(line)
| <filename>2english-init.py
#!/usr/bin/env python3
import os
from names import CHAPTERS, CHARACTERS
en_chapter = "en/"
if not os.path.exists(en_chapter):
os.makedirs(en_chapter)
for root, dirs, files in os.walk('jp/'):
for name in files:
path = os.path.join(root, name)
target_path = path.replace('jp/', 'en/')
if os.path.exists(target_path):
print("[WARN] %s exists!" % target_path)
else:
with open(path) as f:
target_dir_path = "/".join(target_path.split('/')[0:-1])
if not os.path.exists(target_dir_path):
os.makedirs(target_dir_path)
target = open(target_path, "w")
for line in f:
speaker = line[:-1]
if speaker in CHARACTERS:
target.write("%s\n" % CHARACTERS[speaker])
else:
target.write(line)
| fr | 0.221828 | #!/usr/bin/env python3 | 3.024192 | 3 |
src/others/utils.py | wangdongmeizju/BertSum | 1,301 | 6633276 | <gh_stars>1000+
import os
import re
import shutil
import time
from others import pyrouge
REMAP = {"-lrb-": "(", "-rrb-": ")", "-lcb-": "{", "-rcb-": "}",
"-lsb-": "[", "-rsb-": "]", "``": '"', "''": '"'}
def clean(x):
return re.sub(
r"-lrb-|-rrb-|-lcb-|-rcb-|-lsb-|-rsb-|``|''",
lambda m: REMAP.get(m.group()), x)
def process(params):
temp_dir, data = params
candidates, references, pool_id = data
cnt = len(candidates)
current_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime())
tmp_dir = os.path.join(temp_dir, "rouge-tmp-{}-{}".format(current_time, pool_id))
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
os.mkdir(tmp_dir + "/candidate")
os.mkdir(tmp_dir + "/reference")
try:
for i in range(cnt):
if len(references[i]) < 1:
continue
with open(tmp_dir + "/candidate/cand.{}.txt".format(i), "w",
encoding="utf-8") as f:
f.write(candidates[i])
with open(tmp_dir + "/reference/ref.{}.txt".format(i), "w",
encoding="utf-8") as f:
f.write(references[i])
r = pyrouge.Rouge155(temp_dir=temp_dir)
r.model_dir = tmp_dir + "/reference/"
r.system_dir = tmp_dir + "/candidate/"
r.model_filename_pattern = 'ref.#ID#.txt'
r.system_filename_pattern = r'cand.(\d+).txt'
rouge_results = r.convert_and_evaluate()
print(rouge_results)
results_dict = r.output_to_dict(rouge_results)
finally:
pass
if os.path.isdir(tmp_dir):
shutil.rmtree(tmp_dir)
return results_dict
def test_rouge(temp_dir, cand, ref):
candidates = [line.strip() for line in open(cand, encoding='utf-8')]
references = [line.strip() for line in open(ref, encoding='utf-8')]
print(len(candidates))
print(len(references))
assert len(candidates) == len(references)
cnt = len(candidates)
current_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime())
tmp_dir = os.path.join(temp_dir, "rouge-tmp-{}".format(current_time))
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
os.mkdir(tmp_dir + "/candidate")
os.mkdir(tmp_dir + "/reference")
try:
for i in range(cnt):
if len(references[i]) < 1:
continue
with open(tmp_dir + "/candidate/cand.{}.txt".format(i), "w",
encoding="utf-8") as f:
f.write(candidates[i])
with open(tmp_dir + "/reference/ref.{}.txt".format(i), "w",
encoding="utf-8") as f:
f.write(references[i])
r = pyrouge.Rouge155(temp_dir=temp_dir)
r.model_dir = tmp_dir + "/reference/"
r.system_dir = tmp_dir + "/candidate/"
r.model_filename_pattern = 'ref.#ID#.txt'
r.system_filename_pattern = r'cand.(\d+).txt'
rouge_results = r.convert_and_evaluate()
print(rouge_results)
results_dict = r.output_to_dict(rouge_results)
finally:
pass
if os.path.isdir(tmp_dir):
shutil.rmtree(tmp_dir)
return results_dict
def rouge_results_to_str(results_dict):
return ">> ROUGE-F(1/2/3/l): {:.2f}/{:.2f}/{:.2f}\nROUGE-R(1/2/3/l): {:.2f}/{:.2f}/{:.2f}\n".format(
results_dict["rouge_1_f_score"] * 100,
results_dict["rouge_2_f_score"] * 100,
results_dict["rouge_l_f_score"] * 100,
results_dict["rouge_1_recall"] * 100,
results_dict["rouge_2_recall"] * 100,
results_dict["rouge_l_recall"] * 100
)
| import os
import re
import shutil
import time
from others import pyrouge
REMAP = {"-lrb-": "(", "-rrb-": ")", "-lcb-": "{", "-rcb-": "}",
"-lsb-": "[", "-rsb-": "]", "``": '"', "''": '"'}
def clean(x):
return re.sub(
r"-lrb-|-rrb-|-lcb-|-rcb-|-lsb-|-rsb-|``|''",
lambda m: REMAP.get(m.group()), x)
def process(params):
temp_dir, data = params
candidates, references, pool_id = data
cnt = len(candidates)
current_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime())
tmp_dir = os.path.join(temp_dir, "rouge-tmp-{}-{}".format(current_time, pool_id))
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
os.mkdir(tmp_dir + "/candidate")
os.mkdir(tmp_dir + "/reference")
try:
for i in range(cnt):
if len(references[i]) < 1:
continue
with open(tmp_dir + "/candidate/cand.{}.txt".format(i), "w",
encoding="utf-8") as f:
f.write(candidates[i])
with open(tmp_dir + "/reference/ref.{}.txt".format(i), "w",
encoding="utf-8") as f:
f.write(references[i])
r = pyrouge.Rouge155(temp_dir=temp_dir)
r.model_dir = tmp_dir + "/reference/"
r.system_dir = tmp_dir + "/candidate/"
r.model_filename_pattern = 'ref.#ID#.txt'
r.system_filename_pattern = r'cand.(\d+).txt'
rouge_results = r.convert_and_evaluate()
print(rouge_results)
results_dict = r.output_to_dict(rouge_results)
finally:
pass
if os.path.isdir(tmp_dir):
shutil.rmtree(tmp_dir)
return results_dict
def test_rouge(temp_dir, cand, ref):
candidates = [line.strip() for line in open(cand, encoding='utf-8')]
references = [line.strip() for line in open(ref, encoding='utf-8')]
print(len(candidates))
print(len(references))
assert len(candidates) == len(references)
cnt = len(candidates)
current_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime())
tmp_dir = os.path.join(temp_dir, "rouge-tmp-{}".format(current_time))
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
os.mkdir(tmp_dir + "/candidate")
os.mkdir(tmp_dir + "/reference")
try:
for i in range(cnt):
if len(references[i]) < 1:
continue
with open(tmp_dir + "/candidate/cand.{}.txt".format(i), "w",
encoding="utf-8") as f:
f.write(candidates[i])
with open(tmp_dir + "/reference/ref.{}.txt".format(i), "w",
encoding="utf-8") as f:
f.write(references[i])
r = pyrouge.Rouge155(temp_dir=temp_dir)
r.model_dir = tmp_dir + "/reference/"
r.system_dir = tmp_dir + "/candidate/"
r.model_filename_pattern = 'ref.#ID#.txt'
r.system_filename_pattern = r'cand.(\d+).txt'
rouge_results = r.convert_and_evaluate()
print(rouge_results)
results_dict = r.output_to_dict(rouge_results)
finally:
pass
if os.path.isdir(tmp_dir):
shutil.rmtree(tmp_dir)
return results_dict
def rouge_results_to_str(results_dict):
return ">> ROUGE-F(1/2/3/l): {:.2f}/{:.2f}/{:.2f}\nROUGE-R(1/2/3/l): {:.2f}/{:.2f}/{:.2f}\n".format(
results_dict["rouge_1_f_score"] * 100,
results_dict["rouge_2_f_score"] * 100,
results_dict["rouge_l_f_score"] * 100,
results_dict["rouge_1_recall"] * 100,
results_dict["rouge_2_recall"] * 100,
results_dict["rouge_l_recall"] * 100
) | en | 0.518143 | #ID#.txt' #ID#.txt' | 2.120623 | 2 |
test-common/integrationtest/testcase/test_ns_client_ha.py | lotabout/OpenMLDB | 2,659 | 6633277 | <gh_stars>1000+
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2021 4Paradigm
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
from testcasebase import TestCaseBase
import time
import os
from libs.test_loader import load
import libs.utils as utils
from libs.logger import infoLogger
from libs.deco import multi_dimension
import libs.ddt as ddt
from libs.clients.ns_cluster import NsCluster
from libs.clients.tb_cluster import TbCluster
import libs.conf as conf
@ddt.ddt
@multi_dimension(False)
class TestNameserverHa(TestCaseBase):
def createtable_put(self):
self.tname = 'tname{}'.format(time.time())
metadata_path = '{}/metadata.txt'.format(self.testpath)
m = utils.gen_table_metadata(
'"{}"'.format(self.tname), '"kAbsoluteTime"', 144000, 8,
('table_partition', '"{}"'.format(self.leader), '"0-3"', 'true'),
('table_partition', '"{}"'.format(self.slave1), '"0-3"', 'false'),
('table_partition', '"{}"'.format(self.slave2), '"2-3"', 'false'),
('column_desc', '"k1"', '"string"', 'true'),
('column_desc', '"k2"', '"string"', 'false'),
('column_desc', '"k3"', '"string"', 'false'))
utils.gen_table_metadata_file(m, metadata_path)
rs = self.ns_create(self.ns_leader, metadata_path)
self.assertTrue('Create table ok', rs)
self.multidimension_vk = {'k1': ('string:index', 'testvalue0'),
'k2': ('string', 'testvalue1'),
'k3': ('string', 1.1)}
self.multidimension_scan_vk = {'k1': 'testvalue0'}
table_info = self.showtable(self.ns_leader, self.tname)
self.tid = int(table_info.keys()[0][1])
self.pid = 3
for _ in range(10):
self.put(self.leader, self.tid, self.pid, 'testkey0', self.now() + 90000, 'testvalue0')
def get_latest_op(self):
rs = self.showopstatus(self.ns_leader)
latest_ley = max(rs.keys())
return latest_ley, rs[latest_ley][0]
def put_data(self, endpoint, n=1):
for _ in range(n):
self.put(endpoint, self.tid, self.pid, "testkey0", self.now() + 1000, "testvalue0")
@staticmethod
def get_steps_dict():
return {
-1: 'time.sleep(3)',
0: 'time.sleep(10)',
1: 'self.createtable_put()',
2: 'self.stop_client(self.ns_leader)',
3: 'self.disconnectzk(self.ns_leader, "ns_client")',
4: 'self.put_large_datas(500, 7)',
5: 'self.put_data(self.leader)',
6: 'self.makesnapshot(self.ns_leader, self.tname, self.pid, \'ns_client\', 0)',
7: 'self.start_client(self.ns_leader, "nameserver")',
8: 'self.connectzk(self.ns_leader, "ns_client")',
9: 'self.get_new_ns_leader()',
10: 'None',
12: 'self.assertEqual(self.get_manifest(self.leaderpath, self.tid, self.pid)["offset"], "3510")',
13: 'self.assertEqual("15", self.get_table_status(self.slave1, self.tid, self.pid)[0])',
14: 'self.assertIn("drop ok", self.ns_drop(self.ns_leader, self.tname))',
15: 'self.assertFalse(self.showtable(self.ns_leader) is {})',
16: 'self.confset(self.ns_leader, "auto_failover", "true")',
17: 'self.confset(self.ns_leader, "auto_failover", "false")',
20: 'self.stop_client(self.ns_slaver)',
21: 'self.start_client(self.ns_slaver, "nameserver")',
}
@ddt.data(
(16,9,1,3,8,5,5,5,5,5,-1,2,7,0,9,13,14,17), # ns_leader断网,可以继续put及同步数据
(16,9,1,2,7,5,5,5,5,5,0,9,13,14,17), # ns_leader挂掉,可以继续put及同步数据
(16,9,1,4,6,3,0,8,12,2,7,0,9,17), # ns_leader断网,可以makesnapshot成功
(16,9,1,4,6,2,0,7,12,9,17), # ns_leader挂掉,可以makesnapshot成功
(16,9,1,2,0,7,9,14,15,-1,17), # ns_leader挂掉,可以drop表
(16,9,1,3,0,8,2,7,0,9,14,15,-1,17), # ns_leader断网,可以drop表
(16,9,1,2,0,7,9,1,15,-1,17), # ns_leader挂掉,可以create并put
(16,9,1,3,0,8,2,7,0,9,1,15,-1,17), # ns_leader断网,可以create并put
)
@ddt.unpack
def test_ns_ha(self, *steps):
"""
ns节点故障切换测试
:param steps:
:return:
"""
steps_dict = self.get_steps_dict()
for i in steps:
infoLogger.info('*' * 10 + ' Executing step {}: {}'.format(i, steps_dict[i]))
eval(steps_dict[i])
infoLogger.info(self.ns_slaver, self.tname)
rs = self.showtable(self.ns_slaver)
for repeat in range(10):
if rs == 'failed to showtable:':
time.sleep(2)
rs = self.showtable(self.ns_slaver, self.tname)
continue
break
self.assertIn('nameserver is not leader', rs)
self.ns_drop(self.ns_leader, self.tname)
@ddt.data(
(16,9,20,-1,3,8,0,9,17), # 唯一一个ns_leader闪断后,可以正确判断节点状态 # RTIDB-246
(16,9,20,-1,2,7,0,9,17), # 唯一一个ns_leader重启后,可以正确判断节点状态
)
@ddt.unpack
def test_ns_unique_leader(self, *steps):
"""
唯一一个ns节点故障恢复
:param steps:
:return:
"""
steps_dict = self.get_steps_dict()
for i in steps:
infoLogger.info('*' * 10 + ' Executing step {}: {}'.format(i, steps_dict[i]))
eval(steps_dict[i])
self.stop_client(self.leader)
time.sleep(10)
rs = self.showtablet(self.ns_leader)
self.start_client(self.leader)
self.start_client(self.ns_slaver, "nameserver")
time.sleep(5)
self.get_new_ns_leader()
self.assertEqual(rs[self.leader][0], 'kTabletOffline')
@ddt.data(
(16,9,3,8,0,9,17), # ns_leader断网重启后,新的ns_leader可以正确判断节点状态
(16,9,2,7,0,9,17), # ns_leader重启后,新的ns_leader可以正确判断节点状态
(16,9,3,8,0,9,2,7,0,9,17), # ns_leader断网后,新的ns_leader重启,切回原leader后可以正确判断节点状态
)
@ddt.unpack
def test_ns_after_failover(self, *steps):
"""
ns故障切换后,新主可以判断节点状态
:param steps:
:return:
"""
self.createtable_put()
rs1 = self.showtable(self.ns_leader, self.tname)
steps_dict = self.get_steps_dict()
for i in steps:
infoLogger.info('*' * 10 + ' Executing step {}: {}'.format(i, steps_dict[i]))
eval(steps_dict[i])
rs2 = self.showtable(self.ns_leader, self.tname)
self.stop_client(self.leader)
self.updatetablealive(self.ns_leader, self.tname, '*', self.leader, 'no')
time.sleep(10)
rs3 = self.showtablet(self.ns_leader)
rs4 = self.showtable(self.ns_leader, self.tname)
self.start_client(self.leader)
self.stop_client(self.ns_leader)
self.start_client(self.ns_leader, 'nameserver')
time.sleep(10)
self.get_new_ns_leader()
self.assertEqual(rs1, rs2)
self.assertEqual(rs3[self.leader][0], 'kTabletOffline')
self.assertEqual([v[-2] for k, v in rs4.items() if k[-1] == self.leader], ['no'] * 4)
self.ns_drop(self.ns_leader, self.tname)
@ddt.data(
(17,9,1,16,2,0,7,0,9), # ns_leader confset之后挂掉,新ns_leader在confget时新的conf # RTIDB-197
)
@ddt.unpack
def test_ns_slaver_conf_sync(self, *steps):
"""
ns_leader confset之后挂掉,新ns_leader在confget时新的conf
:param steps:
:return:
"""
steps_dict = self.get_steps_dict()
for i in steps:
infoLogger.info('*' * 10 + ' Executing step {}: {}'.format(i, steps_dict[i]))
eval(steps_dict[i])
rs = self.showtable(self.ns_slaver, self.tname)
rs1 = self.confget(self.ns_leader, "auto_failover")
nsc = NsCluster(conf.zk_endpoint, *(i for i in conf.ns_endpoints))
nsc.kill(*nsc.endpoints)
nsc.start(False, *nsc.endpoints)
# time.sleep(5)
self.get_new_ns_leader()
self.confset(self.ns_leader, 'auto_failover', 'false')
self.assertIn('nameserver is not leader', rs)
self.assertIn('true', rs1)
self.ns_drop(self.ns_leader, self.tname)
<EMAIL>('FIXME')
@ddt.data(
(9,3,8,0,9), # ns 闪断,RTIDB-223
)
@ddt.unpack
def test_ns_flashbreak(self, *steps):
"""
ns闪断
:param steps:
:return:
"""
steps_dict = self.get_steps_dict()
for i in steps:
infoLogger.info('*' * 10 + ' Executing step {}: {}'.format(i, steps_dict[i]))
eval(steps_dict[i])
rs = self.showtable(self.ns_slaver)
nsc = NsCluster(conf.zk_endpoint, *(i for i in conf.ns_endpoints))
nsc.kill(*nsc.endpoints)
nsc.start(False, *nsc.endpoints)
time.sleep(3)
nsc.get_ns_leader()
self.assertIn('nameserver is not leader', rs)
def test_ha_cluster(self):
"""
zk没挂,集群机房挂掉,重启后可正常加载table信息
:return:
"""
self.createtable_put()
rs1 = self.showtable(self.ns_leader, self.tname)
nsc = NsCluster(conf.zk_endpoint, *(i for i in conf.ns_endpoints))
tbc = TbCluster(conf.zk_endpoint, conf.tb_endpoints)
nsc.kill(*nsc.endpoints)
tbc.kill(*tbc.endpoints)
nsc.start(False, *nsc.endpoints)
tbc.start(tbc.endpoints)
time.sleep(3)
self.get_new_ns_leader()
rs2 = self.showtable(self.ns_leader, self.tname)
self.assertEqual(rs1.keys(), rs2.keys())
self.ns_drop(self.ns_leader, self.tname)
if __name__ == "__main__":
load(TestNameserverHa)
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2021 4Paradigm
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
from testcasebase import TestCaseBase
import time
import os
from libs.test_loader import load
import libs.utils as utils
from libs.logger import infoLogger
from libs.deco import multi_dimension
import libs.ddt as ddt
from libs.clients.ns_cluster import NsCluster
from libs.clients.tb_cluster import TbCluster
import libs.conf as conf
@ddt.ddt
@multi_dimension(False)
class TestNameserverHa(TestCaseBase):
def createtable_put(self):
self.tname = 'tname{}'.format(time.time())
metadata_path = '{}/metadata.txt'.format(self.testpath)
m = utils.gen_table_metadata(
'"{}"'.format(self.tname), '"kAbsoluteTime"', 144000, 8,
('table_partition', '"{}"'.format(self.leader), '"0-3"', 'true'),
('table_partition', '"{}"'.format(self.slave1), '"0-3"', 'false'),
('table_partition', '"{}"'.format(self.slave2), '"2-3"', 'false'),
('column_desc', '"k1"', '"string"', 'true'),
('column_desc', '"k2"', '"string"', 'false'),
('column_desc', '"k3"', '"string"', 'false'))
utils.gen_table_metadata_file(m, metadata_path)
rs = self.ns_create(self.ns_leader, metadata_path)
self.assertTrue('Create table ok', rs)
self.multidimension_vk = {'k1': ('string:index', 'testvalue0'),
'k2': ('string', 'testvalue1'),
'k3': ('string', 1.1)}
self.multidimension_scan_vk = {'k1': 'testvalue0'}
table_info = self.showtable(self.ns_leader, self.tname)
self.tid = int(table_info.keys()[0][1])
self.pid = 3
for _ in range(10):
self.put(self.leader, self.tid, self.pid, 'testkey0', self.now() + 90000, 'testvalue0')
def get_latest_op(self):
rs = self.showopstatus(self.ns_leader)
latest_ley = max(rs.keys())
return latest_ley, rs[latest_ley][0]
def put_data(self, endpoint, n=1):
for _ in range(n):
self.put(endpoint, self.tid, self.pid, "testkey0", self.now() + 1000, "testvalue0")
@staticmethod
def get_steps_dict():
return {
-1: 'time.sleep(3)',
0: 'time.sleep(10)',
1: 'self.createtable_put()',
2: 'self.stop_client(self.ns_leader)',
3: 'self.disconnectzk(self.ns_leader, "ns_client")',
4: 'self.put_large_datas(500, 7)',
5: 'self.put_data(self.leader)',
6: 'self.makesnapshot(self.ns_leader, self.tname, self.pid, \'ns_client\', 0)',
7: 'self.start_client(self.ns_leader, "nameserver")',
8: 'self.connectzk(self.ns_leader, "ns_client")',
9: 'self.get_new_ns_leader()',
10: 'None',
12: 'self.assertEqual(self.get_manifest(self.leaderpath, self.tid, self.pid)["offset"], "3510")',
13: 'self.assertEqual("15", self.get_table_status(self.slave1, self.tid, self.pid)[0])',
14: 'self.assertIn("drop ok", self.ns_drop(self.ns_leader, self.tname))',
15: 'self.assertFalse(self.showtable(self.ns_leader) is {})',
16: 'self.confset(self.ns_leader, "auto_failover", "true")',
17: 'self.confset(self.ns_leader, "auto_failover", "false")',
20: 'self.stop_client(self.ns_slaver)',
21: 'self.start_client(self.ns_slaver, "nameserver")',
}
@ddt.data(
(16,9,1,3,8,5,5,5,5,5,-1,2,7,0,9,13,14,17), # ns_leader断网,可以继续put及同步数据
(16,9,1,2,7,5,5,5,5,5,0,9,13,14,17), # ns_leader挂掉,可以继续put及同步数据
(16,9,1,4,6,3,0,8,12,2,7,0,9,17), # ns_leader断网,可以makesnapshot成功
(16,9,1,4,6,2,0,7,12,9,17), # ns_leader挂掉,可以makesnapshot成功
(16,9,1,2,0,7,9,14,15,-1,17), # ns_leader挂掉,可以drop表
(16,9,1,3,0,8,2,7,0,9,14,15,-1,17), # ns_leader断网,可以drop表
(16,9,1,2,0,7,9,1,15,-1,17), # ns_leader挂掉,可以create并put
(16,9,1,3,0,8,2,7,0,9,1,15,-1,17), # ns_leader断网,可以create并put
)
@ddt.unpack
def test_ns_ha(self, *steps):
"""
ns节点故障切换测试
:param steps:
:return:
"""
steps_dict = self.get_steps_dict()
for i in steps:
infoLogger.info('*' * 10 + ' Executing step {}: {}'.format(i, steps_dict[i]))
eval(steps_dict[i])
infoLogger.info(self.ns_slaver, self.tname)
rs = self.showtable(self.ns_slaver)
for repeat in range(10):
if rs == 'failed to showtable:':
time.sleep(2)
rs = self.showtable(self.ns_slaver, self.tname)
continue
break
self.assertIn('nameserver is not leader', rs)
self.ns_drop(self.ns_leader, self.tname)
@ddt.data(
(16,9,20,-1,3,8,0,9,17), # 唯一一个ns_leader闪断后,可以正确判断节点状态 # RTIDB-246
(16,9,20,-1,2,7,0,9,17), # 唯一一个ns_leader重启后,可以正确判断节点状态
)
@ddt.unpack
def test_ns_unique_leader(self, *steps):
"""
唯一一个ns节点故障恢复
:param steps:
:return:
"""
steps_dict = self.get_steps_dict()
for i in steps:
infoLogger.info('*' * 10 + ' Executing step {}: {}'.format(i, steps_dict[i]))
eval(steps_dict[i])
self.stop_client(self.leader)
time.sleep(10)
rs = self.showtablet(self.ns_leader)
self.start_client(self.leader)
self.start_client(self.ns_slaver, "nameserver")
time.sleep(5)
self.get_new_ns_leader()
self.assertEqual(rs[self.leader][0], 'kTabletOffline')
@ddt.data(
(16,9,3,8,0,9,17), # ns_leader断网重启后,新的ns_leader可以正确判断节点状态
(16,9,2,7,0,9,17), # ns_leader重启后,新的ns_leader可以正确判断节点状态
(16,9,3,8,0,9,2,7,0,9,17), # ns_leader断网后,新的ns_leader重启,切回原leader后可以正确判断节点状态
)
@ddt.unpack
def test_ns_after_failover(self, *steps):
"""
ns故障切换后,新主可以判断节点状态
:param steps:
:return:
"""
self.createtable_put()
rs1 = self.showtable(self.ns_leader, self.tname)
steps_dict = self.get_steps_dict()
for i in steps:
infoLogger.info('*' * 10 + ' Executing step {}: {}'.format(i, steps_dict[i]))
eval(steps_dict[i])
rs2 = self.showtable(self.ns_leader, self.tname)
self.stop_client(self.leader)
self.updatetablealive(self.ns_leader, self.tname, '*', self.leader, 'no')
time.sleep(10)
rs3 = self.showtablet(self.ns_leader)
rs4 = self.showtable(self.ns_leader, self.tname)
self.start_client(self.leader)
self.stop_client(self.ns_leader)
self.start_client(self.ns_leader, 'nameserver')
time.sleep(10)
self.get_new_ns_leader()
self.assertEqual(rs1, rs2)
self.assertEqual(rs3[self.leader][0], 'kTabletOffline')
self.assertEqual([v[-2] for k, v in rs4.items() if k[-1] == self.leader], ['no'] * 4)
self.ns_drop(self.ns_leader, self.tname)
@ddt.data(
(17,9,1,16,2,0,7,0,9), # ns_leader confset之后挂掉,新ns_leader在confget时新的conf # RTIDB-197
)
@ddt.unpack
def test_ns_slaver_conf_sync(self, *steps):
"""
ns_leader confset之后挂掉,新ns_leader在confget时新的conf
:param steps:
:return:
"""
steps_dict = self.get_steps_dict()
for i in steps:
infoLogger.info('*' * 10 + ' Executing step {}: {}'.format(i, steps_dict[i]))
eval(steps_dict[i])
rs = self.showtable(self.ns_slaver, self.tname)
rs1 = self.confget(self.ns_leader, "auto_failover")
nsc = NsCluster(conf.zk_endpoint, *(i for i in conf.ns_endpoints))
nsc.kill(*nsc.endpoints)
nsc.start(False, *nsc.endpoints)
# time.sleep(5)
self.get_new_ns_leader()
self.confset(self.ns_leader, 'auto_failover', 'false')
self.assertIn('nameserver is not leader', rs)
self.assertIn('true', rs1)
self.ns_drop(self.ns_leader, self.tname)
<EMAIL>('FIXME')
@ddt.data(
(9,3,8,0,9), # ns 闪断,RTIDB-223
)
@ddt.unpack
def test_ns_flashbreak(self, *steps):
"""
ns闪断
:param steps:
:return:
"""
steps_dict = self.get_steps_dict()
for i in steps:
infoLogger.info('*' * 10 + ' Executing step {}: {}'.format(i, steps_dict[i]))
eval(steps_dict[i])
rs = self.showtable(self.ns_slaver)
nsc = NsCluster(conf.zk_endpoint, *(i for i in conf.ns_endpoints))
nsc.kill(*nsc.endpoints)
nsc.start(False, *nsc.endpoints)
time.sleep(3)
nsc.get_ns_leader()
self.assertIn('nameserver is not leader', rs)
def test_ha_cluster(self):
"""
zk没挂,集群机房挂掉,重启后可正常加载table信息
:return:
"""
self.createtable_put()
rs1 = self.showtable(self.ns_leader, self.tname)
nsc = NsCluster(conf.zk_endpoint, *(i for i in conf.ns_endpoints))
tbc = TbCluster(conf.zk_endpoint, conf.tb_endpoints)
nsc.kill(*nsc.endpoints)
tbc.kill(*tbc.endpoints)
nsc.start(False, *nsc.endpoints)
tbc.start(tbc.endpoints)
time.sleep(3)
self.get_new_ns_leader()
rs2 = self.showtable(self.ns_leader, self.tname)
self.assertEqual(rs1.keys(), rs2.keys())
self.ns_drop(self.ns_leader, self.tname)
if __name__ == "__main__":
load(TestNameserverHa) | en | 0.479304 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- # Copyright 2021 4Paradigm # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # -*- coding: utf-8 -*- # ns_leader断网,可以继续put及同步数据 # ns_leader挂掉,可以继续put及同步数据 # ns_leader断网,可以makesnapshot成功 # ns_leader挂掉,可以makesnapshot成功 # ns_leader挂掉,可以drop表 # ns_leader断网,可以drop表 # ns_leader挂掉,可以create并put # ns_leader断网,可以create并put ns节点故障切换测试 :param steps: :return: # 唯一一个ns_leader闪断后,可以正确判断节点状态 # RTIDB-246 # 唯一一个ns_leader重启后,可以正确判断节点状态 唯一一个ns节点故障恢复 :param steps: :return: # ns_leader断网重启后,新的ns_leader可以正确判断节点状态 # ns_leader重启后,新的ns_leader可以正确判断节点状态 # ns_leader断网后,新的ns_leader重启,切回原leader后可以正确判断节点状态 ns故障切换后,新主可以判断节点状态 :param steps: :return: # ns_leader confset之后挂掉,新ns_leader在confget时新的conf # RTIDB-197 ns_leader confset之后挂掉,新ns_leader在confget时新的conf :param steps: :return: # time.sleep(5) # ns 闪断,RTIDB-223 ns闪断 :param steps: :return: zk没挂,集群机房挂掉,重启后可正常加载table信息 :return: | 1.816585 | 2 |
async_service/trio.py | cburgdorf/async-service | 0 | 6633278 | <reponame>cburgdorf/async-service<gh_stars>0
import functools
import sys
from typing import Any, AsyncIterator, Awaitable, Callable, cast
from async_generator import asynccontextmanager
import trio
import trio_typing
from .abc import ManagerAPI, ServiceAPI
from .base import BaseManager
from .exceptions import DaemonTaskExit, LifecycleError
from .typing import EXC_INFO
class TrioManager(BaseManager):
# A nursery for sub tasks and services. This nursery is cancelled if the
# service is cancelled but allowed to exit normally if the service exits.
_task_nursery: trio_typing.Nursery
def __init__(self, service: ServiceAPI) -> None:
super().__init__(service)
# events
self._started = trio.Event()
self._cancelled = trio.Event()
self._stopping = trio.Event()
self._finished = trio.Event()
# locks
self._run_lock = trio.Lock()
#
# System Tasks
#
async def _handle_cancelled(self, task_nursery: trio_typing.Nursery) -> None:
"""
Handle cancellation of the task nursery.
"""
self.logger.debug("%s: _handle_cancelled waiting for cancellation", self)
await self.wait_cancelled()
self.logger.debug(
"%s: _handle_cancelled triggering task nursery cancellation", self
)
task_nursery.cancel_scope.cancel()
async def _handle_stopping(self, system_nursery: trio_typing.Nursery) -> None:
"""
Handle cancellation of the system nursery.
"""
self.logger.debug("%s: _handle_stopping waiting for stopping", self)
await self.wait_stopping()
self.logger.debug(
"%s: _handle_stopping triggering system nursery cancellation", self
)
system_nursery.cancel_scope.cancel()
async def _handle_run(self) -> None:
"""
Run and monitor the actual :meth:`ServiceAPI.run` method.
In the event that it throws an exception the service will be cancelled.
Upon a clean exit
Triggers cancellation in the case where the service exits normally or
throws an exception.
"""
try:
await self._service.run()
except Exception as err:
self.logger.debug(
"%s: _handle_run got error, storing exception and setting cancelled: %s",
self,
err,
)
self._errors.append(cast(EXC_INFO, sys.exc_info()))
self.cancel()
else:
# NOTE: Any service which uses daemon tasks will need to trigger
# cancellation in order for the service to exit since this code
# path does not trigger task cancellation. It might make sense to
# trigger cancellation if all of the running tasks are daemon
# tasks.
self.logger.debug(
"%s: _handle_run exited cleanly, waiting for full stop...", self
)
@classmethod
async def run_service(cls, service: ServiceAPI) -> None:
manager = cls(service)
await manager.run()
async def run(self) -> None:
if self._run_lock.locked():
raise LifecycleError(
"Cannot run a service with the run lock already engaged. Already started?"
)
elif self.is_started:
raise LifecycleError("Cannot run a service which is already started.")
try:
async with self._run_lock:
async with trio.open_nursery() as system_nursery:
try:
async with trio.open_nursery() as task_nursery:
self._task_nursery = task_nursery
system_nursery.start_soon(
self._handle_cancelled, task_nursery
)
system_nursery.start_soon(
self._handle_stopping, system_nursery
)
task_nursery.start_soon(self._handle_run)
self._started.set()
# ***BLOCKING HERE***
# The code flow will block here until the background tasks have
# completed or cancellation occurs.
finally:
# signal that the service is stopping
self._stopping.set()
self.logger.debug("%s stopping", self)
finally:
self._finished.set()
self.logger.debug("%s finished", self)
# If an error occured, re-raise it here
if self.did_error:
raise trio.MultiError(
tuple(
exc_value.with_traceback(exc_tb)
for _, exc_value, exc_tb in self._errors
)
)
#
# Event API mirror
#
@property
def is_started(self) -> bool:
return self._started.is_set()
@property
def is_cancelled(self) -> bool:
return self._cancelled.is_set()
@property
def is_stopping(self) -> bool:
return self._stopping.is_set() and not self.is_finished
@property
def is_finished(self) -> bool:
return self._finished.is_set()
#
# Control API
#
def cancel(self) -> None:
if not self.is_started:
raise LifecycleError("Cannot cancel as service which was never started.")
self._cancelled.set()
#
# Wait API
#
async def wait_started(self) -> None:
await self._started.wait()
async def wait_cancelled(self) -> None:
await self._cancelled.wait()
async def wait_stopping(self) -> None:
await self._stopping.wait()
async def wait_finished(self) -> None:
await self._finished.wait()
async def _run_and_manage_task(
self,
async_fn: Callable[..., Awaitable[Any]],
*args: Any,
daemon: bool,
name: str,
) -> None:
try:
await async_fn(*args)
except Exception as err:
self.logger.debug(
"task '%s[daemon=%s]' exited with error: %s",
name,
daemon,
err,
exc_info=True,
)
self._errors.append(cast(EXC_INFO, sys.exc_info()))
self.cancel()
else:
self.logger.debug("task '%s[daemon=%s]' finished.", name, daemon)
if daemon:
self.logger.debug(
"daemon task '%s' exited unexpectedly. Cancelling service: %s",
name,
self,
)
self.cancel()
raise DaemonTaskExit(f"Daemon task {name} exited")
def run_task(
self,
async_fn: Callable[..., Awaitable[Any]],
*args: Any,
daemon: bool = False,
name: str = None,
) -> None:
self._task_nursery.start_soon(
functools.partial(
self._run_and_manage_task, daemon=daemon, name=name or repr(async_fn)
),
async_fn,
*args,
name=name,
)
def run_child_service(
self, service: ServiceAPI, daemon: bool = False, name: str = None
) -> ManagerAPI:
child_manager = type(self)(service)
self.run_task(child_manager.run, daemon=daemon, name=name or repr(service))
return child_manager
@asynccontextmanager
async def background_trio_service(service: ServiceAPI) -> AsyncIterator[ManagerAPI]:
"""
Run a service in the background.
The service is running within the context
block and will be properly cleaned up upon exiting the context block.
"""
async with trio.open_nursery() as nursery:
manager = TrioManager(service)
nursery.start_soon(manager.run)
await manager.wait_started()
try:
yield manager
finally:
await manager.stop()
| import functools
import sys
from typing import Any, AsyncIterator, Awaitable, Callable, cast
from async_generator import asynccontextmanager
import trio
import trio_typing
from .abc import ManagerAPI, ServiceAPI
from .base import BaseManager
from .exceptions import DaemonTaskExit, LifecycleError
from .typing import EXC_INFO
class TrioManager(BaseManager):
# A nursery for sub tasks and services. This nursery is cancelled if the
# service is cancelled but allowed to exit normally if the service exits.
_task_nursery: trio_typing.Nursery
def __init__(self, service: ServiceAPI) -> None:
super().__init__(service)
# events
self._started = trio.Event()
self._cancelled = trio.Event()
self._stopping = trio.Event()
self._finished = trio.Event()
# locks
self._run_lock = trio.Lock()
#
# System Tasks
#
async def _handle_cancelled(self, task_nursery: trio_typing.Nursery) -> None:
"""
Handle cancellation of the task nursery.
"""
self.logger.debug("%s: _handle_cancelled waiting for cancellation", self)
await self.wait_cancelled()
self.logger.debug(
"%s: _handle_cancelled triggering task nursery cancellation", self
)
task_nursery.cancel_scope.cancel()
async def _handle_stopping(self, system_nursery: trio_typing.Nursery) -> None:
"""
Handle cancellation of the system nursery.
"""
self.logger.debug("%s: _handle_stopping waiting for stopping", self)
await self.wait_stopping()
self.logger.debug(
"%s: _handle_stopping triggering system nursery cancellation", self
)
system_nursery.cancel_scope.cancel()
async def _handle_run(self) -> None:
"""
Run and monitor the actual :meth:`ServiceAPI.run` method.
In the event that it throws an exception the service will be cancelled.
Upon a clean exit
Triggers cancellation in the case where the service exits normally or
throws an exception.
"""
try:
await self._service.run()
except Exception as err:
self.logger.debug(
"%s: _handle_run got error, storing exception and setting cancelled: %s",
self,
err,
)
self._errors.append(cast(EXC_INFO, sys.exc_info()))
self.cancel()
else:
# NOTE: Any service which uses daemon tasks will need to trigger
# cancellation in order for the service to exit since this code
# path does not trigger task cancellation. It might make sense to
# trigger cancellation if all of the running tasks are daemon
# tasks.
self.logger.debug(
"%s: _handle_run exited cleanly, waiting for full stop...", self
)
@classmethod
async def run_service(cls, service: ServiceAPI) -> None:
manager = cls(service)
await manager.run()
async def run(self) -> None:
if self._run_lock.locked():
raise LifecycleError(
"Cannot run a service with the run lock already engaged. Already started?"
)
elif self.is_started:
raise LifecycleError("Cannot run a service which is already started.")
try:
async with self._run_lock:
async with trio.open_nursery() as system_nursery:
try:
async with trio.open_nursery() as task_nursery:
self._task_nursery = task_nursery
system_nursery.start_soon(
self._handle_cancelled, task_nursery
)
system_nursery.start_soon(
self._handle_stopping, system_nursery
)
task_nursery.start_soon(self._handle_run)
self._started.set()
# ***BLOCKING HERE***
# The code flow will block here until the background tasks have
# completed or cancellation occurs.
finally:
# signal that the service is stopping
self._stopping.set()
self.logger.debug("%s stopping", self)
finally:
self._finished.set()
self.logger.debug("%s finished", self)
# If an error occured, re-raise it here
if self.did_error:
raise trio.MultiError(
tuple(
exc_value.with_traceback(exc_tb)
for _, exc_value, exc_tb in self._errors
)
)
#
# Event API mirror
#
@property
def is_started(self) -> bool:
return self._started.is_set()
@property
def is_cancelled(self) -> bool:
return self._cancelled.is_set()
@property
def is_stopping(self) -> bool:
return self._stopping.is_set() and not self.is_finished
@property
def is_finished(self) -> bool:
return self._finished.is_set()
#
# Control API
#
def cancel(self) -> None:
if not self.is_started:
raise LifecycleError("Cannot cancel as service which was never started.")
self._cancelled.set()
#
# Wait API
#
async def wait_started(self) -> None:
await self._started.wait()
async def wait_cancelled(self) -> None:
await self._cancelled.wait()
async def wait_stopping(self) -> None:
await self._stopping.wait()
async def wait_finished(self) -> None:
await self._finished.wait()
async def _run_and_manage_task(
self,
async_fn: Callable[..., Awaitable[Any]],
*args: Any,
daemon: bool,
name: str,
) -> None:
try:
await async_fn(*args)
except Exception as err:
self.logger.debug(
"task '%s[daemon=%s]' exited with error: %s",
name,
daemon,
err,
exc_info=True,
)
self._errors.append(cast(EXC_INFO, sys.exc_info()))
self.cancel()
else:
self.logger.debug("task '%s[daemon=%s]' finished.", name, daemon)
if daemon:
self.logger.debug(
"daemon task '%s' exited unexpectedly. Cancelling service: %s",
name,
self,
)
self.cancel()
raise DaemonTaskExit(f"Daemon task {name} exited")
def run_task(
self,
async_fn: Callable[..., Awaitable[Any]],
*args: Any,
daemon: bool = False,
name: str = None,
) -> None:
self._task_nursery.start_soon(
functools.partial(
self._run_and_manage_task, daemon=daemon, name=name or repr(async_fn)
),
async_fn,
*args,
name=name,
)
def run_child_service(
self, service: ServiceAPI, daemon: bool = False, name: str = None
) -> ManagerAPI:
child_manager = type(self)(service)
self.run_task(child_manager.run, daemon=daemon, name=name or repr(service))
return child_manager
@asynccontextmanager
async def background_trio_service(service: ServiceAPI) -> AsyncIterator[ManagerAPI]:
"""
Run a service in the background.
The service is running within the context
block and will be properly cleaned up upon exiting the context block.
"""
async with trio.open_nursery() as nursery:
manager = TrioManager(service)
nursery.start_soon(manager.run)
await manager.wait_started()
try:
yield manager
finally:
await manager.stop() | en | 0.895642 | # A nursery for sub tasks and services. This nursery is cancelled if the # service is cancelled but allowed to exit normally if the service exits. # events # locks # # System Tasks # Handle cancellation of the task nursery. Handle cancellation of the system nursery. Run and monitor the actual :meth:`ServiceAPI.run` method. In the event that it throws an exception the service will be cancelled. Upon a clean exit Triggers cancellation in the case where the service exits normally or throws an exception. # NOTE: Any service which uses daemon tasks will need to trigger # cancellation in order for the service to exit since this code # path does not trigger task cancellation. It might make sense to # trigger cancellation if all of the running tasks are daemon # tasks. # ***BLOCKING HERE*** # The code flow will block here until the background tasks have # completed or cancellation occurs. # signal that the service is stopping # If an error occured, re-raise it here # # Event API mirror # # # Control API # # # Wait API # Run a service in the background. The service is running within the context block and will be properly cleaned up upon exiting the context block. | 1.967356 | 2 |
browsepy/tests/test_main.py | galacticpolymath/browsepy | 164 | 6633279 | import unittest
import os
import os.path
import tempfile
import shutil
import browsepy.__main__
class TestMain(unittest.TestCase):
module = browsepy.__main__
def setUp(self):
self.app = browsepy.app
self.parser = self.module.ArgParse(sep=os.sep)
self.base = tempfile.mkdtemp()
self.exclude_file = os.path.join(self.base, '.ignore')
with open(self.exclude_file, 'w') as f:
f.write('.ignore\n')
def tearDown(self):
shutil.rmtree(self.base)
def test_defaults(self):
result = self.parser.parse_args([])
self.assertEqual(result.host, '127.0.0.1')
self.assertEqual(result.port, 8080)
self.assertEqual(result.directory, os.getcwd())
self.assertEqual(result.initial, None)
self.assertEqual(result.removable, None)
self.assertEqual(result.upload, None)
self.assertListEqual(result.exclude, [])
self.assertListEqual(result.exclude_from, [])
self.assertEqual(result.plugin, [])
def test_params(self):
plugins = ['plugin_1', 'plugin_2', 'namespace.plugin_3']
result = self.parser.parse_args([
'127.1.1.1',
'5000',
'--directory=%s' % self.base,
'--initial=%s' % self.base,
'--removable=%s' % self.base,
'--upload=%s' % self.base,
'--exclude=a',
'--exclude-from=%s' % self.exclude_file,
] + [
'--plugin=%s' % plugin
for plugin in plugins
])
self.assertEqual(result.host, '127.1.1.1')
self.assertEqual(result.port, 5000)
self.assertEqual(result.directory, self.base)
self.assertEqual(result.initial, self.base)
self.assertEqual(result.removable, self.base)
self.assertEqual(result.upload, self.base)
self.assertListEqual(result.exclude, ['a'])
self.assertListEqual(result.exclude_from, [self.exclude_file])
self.assertEqual(result.plugin, plugins)
result = self.parser.parse_args([
'--directory', self.base,
'--plugin', ','.join(plugins),
'--exclude', '/.*'
])
self.assertEqual(result.directory, self.base)
self.assertEqual(result.plugin, plugins)
self.assertListEqual(result.exclude, ['/.*'])
result = self.parser.parse_args([
'--directory=%s' % self.base,
'--initial='
])
self.assertEqual(result.host, '127.0.0.1')
self.assertEqual(result.port, 8080)
self.assertEqual(result.directory, self.base)
self.assertIsNone(result.initial)
self.assertIsNone(result.removable)
self.assertIsNone(result.upload)
self.assertListEqual(result.exclude, [])
self.assertListEqual(result.exclude_from, [])
self.assertListEqual(result.plugin, [])
self.assertRaises(
SystemExit,
self.parser.parse_args,
['--directory=%s' % __file__]
)
self.assertRaises(
SystemExit,
self.parser.parse_args,
['--exclude-from=non-existing']
)
def test_exclude(self):
result = self.parser.parse_args([
'--exclude', '/.*',
'--exclude-from', self.exclude_file,
])
extra = self.module.collect_exclude_patterns(result.exclude_from)
self.assertListEqual(extra, ['.ignore'])
match = self.module.create_exclude_fnc(
result.exclude + extra, '/b', sep='/')
self.assertTrue(match('/b/.a'))
self.assertTrue(match('/b/.a/b'))
self.assertFalse(match('/b/a/.a'))
self.assertTrue(match('/b/a/.ignore'))
match = self.module.create_exclude_fnc(
result.exclude + extra, 'C:\\b', sep='\\')
self.assertTrue(match('C:\\b\\.a'))
self.assertTrue(match('C:\\b\\.a\\b'))
self.assertFalse(match('C:\\b\\a\\.a'))
self.assertTrue(match('C:\\b\\a\\.ignore'))
def test_main(self):
params = {}
self.module.main(
argv=[],
run_fnc=lambda app, **kwargs: params.update(kwargs)
)
defaults = {
'host': '127.0.0.1',
'port': 8080,
'debug': False,
'threaded': True
}
params_subset = {k: v for k, v in params.items() if k in defaults}
self.assertEqual(defaults, params_subset)
def test_filter_union(self):
fu = self.module.filter_union
self.assertIsNone(fu())
self.assertIsNone(fu(None))
self.assertIsNone(fu(None, None))
def fnc1(path):
return False
self.assertEqual(fu(fnc1), fnc1)
def fnc2(path):
return True
self.assertTrue(fu(fnc1, fnc2)('a'))
| import unittest
import os
import os.path
import tempfile
import shutil
import browsepy.__main__
class TestMain(unittest.TestCase):
module = browsepy.__main__
def setUp(self):
self.app = browsepy.app
self.parser = self.module.ArgParse(sep=os.sep)
self.base = tempfile.mkdtemp()
self.exclude_file = os.path.join(self.base, '.ignore')
with open(self.exclude_file, 'w') as f:
f.write('.ignore\n')
def tearDown(self):
shutil.rmtree(self.base)
def test_defaults(self):
result = self.parser.parse_args([])
self.assertEqual(result.host, '127.0.0.1')
self.assertEqual(result.port, 8080)
self.assertEqual(result.directory, os.getcwd())
self.assertEqual(result.initial, None)
self.assertEqual(result.removable, None)
self.assertEqual(result.upload, None)
self.assertListEqual(result.exclude, [])
self.assertListEqual(result.exclude_from, [])
self.assertEqual(result.plugin, [])
def test_params(self):
plugins = ['plugin_1', 'plugin_2', 'namespace.plugin_3']
result = self.parser.parse_args([
'127.1.1.1',
'5000',
'--directory=%s' % self.base,
'--initial=%s' % self.base,
'--removable=%s' % self.base,
'--upload=%s' % self.base,
'--exclude=a',
'--exclude-from=%s' % self.exclude_file,
] + [
'--plugin=%s' % plugin
for plugin in plugins
])
self.assertEqual(result.host, '127.1.1.1')
self.assertEqual(result.port, 5000)
self.assertEqual(result.directory, self.base)
self.assertEqual(result.initial, self.base)
self.assertEqual(result.removable, self.base)
self.assertEqual(result.upload, self.base)
self.assertListEqual(result.exclude, ['a'])
self.assertListEqual(result.exclude_from, [self.exclude_file])
self.assertEqual(result.plugin, plugins)
result = self.parser.parse_args([
'--directory', self.base,
'--plugin', ','.join(plugins),
'--exclude', '/.*'
])
self.assertEqual(result.directory, self.base)
self.assertEqual(result.plugin, plugins)
self.assertListEqual(result.exclude, ['/.*'])
result = self.parser.parse_args([
'--directory=%s' % self.base,
'--initial='
])
self.assertEqual(result.host, '127.0.0.1')
self.assertEqual(result.port, 8080)
self.assertEqual(result.directory, self.base)
self.assertIsNone(result.initial)
self.assertIsNone(result.removable)
self.assertIsNone(result.upload)
self.assertListEqual(result.exclude, [])
self.assertListEqual(result.exclude_from, [])
self.assertListEqual(result.plugin, [])
self.assertRaises(
SystemExit,
self.parser.parse_args,
['--directory=%s' % __file__]
)
self.assertRaises(
SystemExit,
self.parser.parse_args,
['--exclude-from=non-existing']
)
def test_exclude(self):
result = self.parser.parse_args([
'--exclude', '/.*',
'--exclude-from', self.exclude_file,
])
extra = self.module.collect_exclude_patterns(result.exclude_from)
self.assertListEqual(extra, ['.ignore'])
match = self.module.create_exclude_fnc(
result.exclude + extra, '/b', sep='/')
self.assertTrue(match('/b/.a'))
self.assertTrue(match('/b/.a/b'))
self.assertFalse(match('/b/a/.a'))
self.assertTrue(match('/b/a/.ignore'))
match = self.module.create_exclude_fnc(
result.exclude + extra, 'C:\\b', sep='\\')
self.assertTrue(match('C:\\b\\.a'))
self.assertTrue(match('C:\\b\\.a\\b'))
self.assertFalse(match('C:\\b\\a\\.a'))
self.assertTrue(match('C:\\b\\a\\.ignore'))
def test_main(self):
params = {}
self.module.main(
argv=[],
run_fnc=lambda app, **kwargs: params.update(kwargs)
)
defaults = {
'host': '127.0.0.1',
'port': 8080,
'debug': False,
'threaded': True
}
params_subset = {k: v for k, v in params.items() if k in defaults}
self.assertEqual(defaults, params_subset)
def test_filter_union(self):
fu = self.module.filter_union
self.assertIsNone(fu())
self.assertIsNone(fu(None))
self.assertIsNone(fu(None, None))
def fnc1(path):
return False
self.assertEqual(fu(fnc1), fnc1)
def fnc2(path):
return True
self.assertTrue(fu(fnc1, fnc2)('a'))
| none | 1 | 2.46672 | 2 |
|
Chapter 07/Implementing a simple majority vote classifier/program3.py | fagaiera/python-machine-learning-book-3rd-edition-examples | 0 | 6633280 | <gh_stars>0
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import Pipeline
from algorithm import MajorityVoteClassifier
import numpy as np
import matplotlib.pyplot as plt
from itertools import product
iris = datasets.load_iris()
X, y = iris.data[50:, [1, 2]], iris.target[50:]
le = LabelEncoder()
y = le.fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.5,
random_state=1,
stratify=y)
clf1 = LogisticRegression(penalty='l2',
C=0.001,
solver='lbfgs',
random_state=1)
clf2 = DecisionTreeClassifier(max_depth=1,
criterion='entropy',
random_state=0)
clf3 = KNeighborsClassifier(n_neighbors=1,
p=2,
metric='minkowski')
pipe1 = Pipeline([['sc', StandardScaler()],
['clf', clf1]])
pipe3 = Pipeline([['sc', StandardScaler()],
['clf', clf3]])
mv_clf = MajorityVoteClassifier(classifiers=[pipe1, clf2, pipe3])
all_clf = [pipe1, clf2, pipe3, mv_clf]
clf_labels = ['Logistic regression', 'Decision tree', 'KNN', 'Majority voting']
sc = StandardScaler()
X_train_std = sc.fit_transform(X_train)
x_min = X_train_std[:, 0].min() - 1
x_max = X_train_std[:, 0].max() + 1
y_min = X_train_std[:, 1].min() - 1
y_max = X_train_std[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(nrows=2, ncols=2,
sharex='col',
sharey='row',
figsize=(7, 5))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
all_clf, clf_labels):
clf.fit(X_train_std, y_train)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.3)
axarr[idx[0], idx[1]].scatter(X_train_std[y_train==0, 0],
X_train_std[y_train==0, 1],
c='blue',
marker='^',
s=50)
axarr[idx[0], idx[1]].scatter(X_train_std[y_train==1, 0],
X_train_std[y_train==1, 1],
c='green',
marker='o',
s=50)
axarr[idx[0], idx[1]].set_title(tt)
plt.text(-3.5, -5.,
s='Sepal width [standardized]',
ha='center', va='center', fontsize=12)
plt.text(-12.5, 4.5,
s='Petal length [standardized]',
ha='center', va='center',
fontsize=12, rotation=90)
plt.show()
print(mv_clf.get_params()) | from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import Pipeline
from algorithm import MajorityVoteClassifier
import numpy as np
import matplotlib.pyplot as plt
from itertools import product
iris = datasets.load_iris()
X, y = iris.data[50:, [1, 2]], iris.target[50:]
le = LabelEncoder()
y = le.fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.5,
random_state=1,
stratify=y)
clf1 = LogisticRegression(penalty='l2',
C=0.001,
solver='lbfgs',
random_state=1)
clf2 = DecisionTreeClassifier(max_depth=1,
criterion='entropy',
random_state=0)
clf3 = KNeighborsClassifier(n_neighbors=1,
p=2,
metric='minkowski')
pipe1 = Pipeline([['sc', StandardScaler()],
['clf', clf1]])
pipe3 = Pipeline([['sc', StandardScaler()],
['clf', clf3]])
mv_clf = MajorityVoteClassifier(classifiers=[pipe1, clf2, pipe3])
all_clf = [pipe1, clf2, pipe3, mv_clf]
clf_labels = ['Logistic regression', 'Decision tree', 'KNN', 'Majority voting']
sc = StandardScaler()
X_train_std = sc.fit_transform(X_train)
x_min = X_train_std[:, 0].min() - 1
x_max = X_train_std[:, 0].max() + 1
y_min = X_train_std[:, 1].min() - 1
y_max = X_train_std[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, 0.1),
np.arange(y_min, y_max, 0.1))
f, axarr = plt.subplots(nrows=2, ncols=2,
sharex='col',
sharey='row',
figsize=(7, 5))
for idx, clf, tt in zip(product([0, 1], [0, 1]),
all_clf, clf_labels):
clf.fit(X_train_std, y_train)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha=0.3)
axarr[idx[0], idx[1]].scatter(X_train_std[y_train==0, 0],
X_train_std[y_train==0, 1],
c='blue',
marker='^',
s=50)
axarr[idx[0], idx[1]].scatter(X_train_std[y_train==1, 0],
X_train_std[y_train==1, 1],
c='green',
marker='o',
s=50)
axarr[idx[0], idx[1]].set_title(tt)
plt.text(-3.5, -5.,
s='Sepal width [standardized]',
ha='center', va='center', fontsize=12)
plt.text(-12.5, 4.5,
s='Petal length [standardized]',
ha='center', va='center',
fontsize=12, rotation=90)
plt.show()
print(mv_clf.get_params()) | none | 1 | 2.592232 | 3 |
|
scraper/dzoneScraper.py | AnshG714/sweeger | 0 | 6633281 | <gh_stars>0
from common import Article
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
import time
import os
def getDzoneURL(topic, pageNumber):
assert type(pageNumber) == int and pageNumber >= 1
# f-string formatting will only work on Python 3.6+
return f'https://dzone.com/{topic}/list?page={pageNumber}'
def loadPage(topic, pageNumber):
# This function is needed because DZone is written in Angular all components
# are dynamically JS-rendered.
# Configure Chrome options
options = webdriver.ChromeOptions()
options.add_argument('--ignore-certificate-errors')
# So we don't need to actually open a new Window
options.add_argument('--headless')
# Instantiate Chromium driver
driver = webdriver.Chrome(
os.path.join(os.path.dirname(os.path.abspath(__file__)),
'..', 'scraper', 'chromedriver'),
chrome_options=options)
driver.get(getDzoneURL(topic, pageNumber))
time.sleep(1)
return driver.page_source
def scrapeSource(page_source, keywords):
# Instantiate BS object
soup = BeautifulSoup(page_source, 'lxml')
articleContainers = soup.findAll(
class_="article-content-right article-toggle")
articles = []
for container in articleContainers:
blurb = ""
titleAnchor = container.find(class_="article-title article-toggle").a
title = titleAnchor.string
link = 'https://dzone.com' + titleAnchor['href']
date = container.find(
class_="article-source-date article-toggle").string
author = container.find(
class_="author-name ng-binding ng-isolate-scope").string
article = Article(title, blurb, link, author, date)
if keywords:
for keyword in keywords:
if keyword in blurb or keyword in title:
articles.append(Article(title, blurb, link, author, date))
break
else:
articles.append(Article(title, blurb, link, author, date))
return articles
def scrape(topic, numPages=2, keywords=None):
res = []
for i in range(1, numPages+1):
ps = loadPage(topic, i)
res.append(scrapeSource(ps, keywords))
time.sleep(1)
# flatten list
return [item for items in res for item in items]
| from common import Article
import requests
from bs4 import BeautifulSoup
from selenium import webdriver
import time
import os
def getDzoneURL(topic, pageNumber):
assert type(pageNumber) == int and pageNumber >= 1
# f-string formatting will only work on Python 3.6+
return f'https://dzone.com/{topic}/list?page={pageNumber}'
def loadPage(topic, pageNumber):
# This function is needed because DZone is written in Angular all components
# are dynamically JS-rendered.
# Configure Chrome options
options = webdriver.ChromeOptions()
options.add_argument('--ignore-certificate-errors')
# So we don't need to actually open a new Window
options.add_argument('--headless')
# Instantiate Chromium driver
driver = webdriver.Chrome(
os.path.join(os.path.dirname(os.path.abspath(__file__)),
'..', 'scraper', 'chromedriver'),
chrome_options=options)
driver.get(getDzoneURL(topic, pageNumber))
time.sleep(1)
return driver.page_source
def scrapeSource(page_source, keywords):
# Instantiate BS object
soup = BeautifulSoup(page_source, 'lxml')
articleContainers = soup.findAll(
class_="article-content-right article-toggle")
articles = []
for container in articleContainers:
blurb = ""
titleAnchor = container.find(class_="article-title article-toggle").a
title = titleAnchor.string
link = 'https://dzone.com' + titleAnchor['href']
date = container.find(
class_="article-source-date article-toggle").string
author = container.find(
class_="author-name ng-binding ng-isolate-scope").string
article = Article(title, blurb, link, author, date)
if keywords:
for keyword in keywords:
if keyword in blurb or keyword in title:
articles.append(Article(title, blurb, link, author, date))
break
else:
articles.append(Article(title, blurb, link, author, date))
return articles
def scrape(topic, numPages=2, keywords=None):
res = []
for i in range(1, numPages+1):
ps = loadPage(topic, i)
res.append(scrapeSource(ps, keywords))
time.sleep(1)
# flatten list
return [item for items in res for item in items] | en | 0.851277 | # f-string formatting will only work on Python 3.6+ # This function is needed because DZone is written in Angular all components # are dynamically JS-rendered. # Configure Chrome options # So we don't need to actually open a new Window # Instantiate Chromium driver # Instantiate BS object # flatten list | 3.108028 | 3 |
mapentity/serializers/helpers.py | GeotrekCE/Geotrek-admin | 50 | 6633282 | from decimal import Decimal
from functools import partial
import html
import json
from django.core.serializers import serialize
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models.query import QuerySet
from django.utils.encoding import force_str
from django.utils.encoding import smart_str
from django.utils.formats import number_format
from django.utils.functional import Promise
from django.utils.html import strip_tags
from django.utils.translation import gettext_lazy as _
def field_as_string(obj, field, ascii=False):
value = getattr(obj, field + '_csv_display', None)
if value is None:
value = getattr(obj, field + '_display', None)
if value is None:
value = getattr(obj, field)
if isinstance(value, bool):
value = (_('no'), _('yes'))[value]
if isinstance(value, float) or isinstance(value, int) or isinstance(value, Decimal):
value = number_format(value)
if isinstance(value, list) or isinstance(value, QuerySet):
value = ", ".join([str(val) for val in value])
return smart_plain_text(value, ascii)
def plain_text(html_content):
return html.unescape(strip_tags(html_content))
def smart_plain_text(s, ascii=False):
if s is None:
return ''
try:
# Converts to unicode, remove HTML tags, convert HTML entities
us = plain_text(str(s))
if ascii:
return smart_str(us)
return us
except UnicodeDecodeError:
return smart_str(s)
class DjangoJSONEncoder(DjangoJSONEncoder):
"""
Taken (slightly modified) from:
http://stackoverflow.com/questions/2249792/json-serializing-django-models-with-simplejson
"""
def default(self, obj):
# https://docs.djangoproject.com/en/dev/topics/serialization/#id2
if isinstance(obj, Promise):
return force_str(obj)
if isinstance(obj, QuerySet):
# `default` must return a python serializable
# structure, the easiest way is to load the JSON
# string produced by `serialize` and return it
return json.loads(serialize('json', obj))
return force_str(obj)
# partial function, we can now use dumps(my_dict) instead
# of dumps(my_dict, cls=DjangoJSONEncoder)
json_django_dumps = partial(json.dumps, cls=DjangoJSONEncoder)
| from decimal import Decimal
from functools import partial
import html
import json
from django.core.serializers import serialize
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models.query import QuerySet
from django.utils.encoding import force_str
from django.utils.encoding import smart_str
from django.utils.formats import number_format
from django.utils.functional import Promise
from django.utils.html import strip_tags
from django.utils.translation import gettext_lazy as _
def field_as_string(obj, field, ascii=False):
value = getattr(obj, field + '_csv_display', None)
if value is None:
value = getattr(obj, field + '_display', None)
if value is None:
value = getattr(obj, field)
if isinstance(value, bool):
value = (_('no'), _('yes'))[value]
if isinstance(value, float) or isinstance(value, int) or isinstance(value, Decimal):
value = number_format(value)
if isinstance(value, list) or isinstance(value, QuerySet):
value = ", ".join([str(val) for val in value])
return smart_plain_text(value, ascii)
def plain_text(html_content):
return html.unescape(strip_tags(html_content))
def smart_plain_text(s, ascii=False):
if s is None:
return ''
try:
# Converts to unicode, remove HTML tags, convert HTML entities
us = plain_text(str(s))
if ascii:
return smart_str(us)
return us
except UnicodeDecodeError:
return smart_str(s)
class DjangoJSONEncoder(DjangoJSONEncoder):
"""
Taken (slightly modified) from:
http://stackoverflow.com/questions/2249792/json-serializing-django-models-with-simplejson
"""
def default(self, obj):
# https://docs.djangoproject.com/en/dev/topics/serialization/#id2
if isinstance(obj, Promise):
return force_str(obj)
if isinstance(obj, QuerySet):
# `default` must return a python serializable
# structure, the easiest way is to load the JSON
# string produced by `serialize` and return it
return json.loads(serialize('json', obj))
return force_str(obj)
# partial function, we can now use dumps(my_dict) instead
# of dumps(my_dict, cls=DjangoJSONEncoder)
json_django_dumps = partial(json.dumps, cls=DjangoJSONEncoder)
| en | 0.547052 | # Converts to unicode, remove HTML tags, convert HTML entities Taken (slightly modified) from: http://stackoverflow.com/questions/2249792/json-serializing-django-models-with-simplejson # https://docs.djangoproject.com/en/dev/topics/serialization/#id2 # `default` must return a python serializable # structure, the easiest way is to load the JSON # string produced by `serialize` and return it # partial function, we can now use dumps(my_dict) instead # of dumps(my_dict, cls=DjangoJSONEncoder) | 2.194548 | 2 |
quantecon/ecdf.py | gosccm/Learning | 1 | 6633283 | """
Filename: ecdf.py
Authors: <NAME>, <NAME>
Implements the empirical cumulative distribution function given an array
of observations.
"""
import numpy as np
class ECDF:
"""
One-dimensional empirical distribution function given a vector of
observations.
Parameters
----------
observations : array_like
An array of observations
Attributes
----------
observations : see Parameters
"""
def __init__(self, observations):
self.observations = np.asarray(observations)
def __repr__(self):
return self.__str__()
def __str__(self):
m = "Empirical CDF:\n - number of observations: {n}"
return m.format(n=self.observations.size)
def __call__(self, x):
"""
Evaluates the ecdf at x
Parameters
----------
x : scalar(float)
The x at which the ecdf is evaluated
Returns
-------
scalar(float)
Fraction of the sample less than x
"""
return np.mean(self.observations <= x)
| """
Filename: ecdf.py
Authors: <NAME>, <NAME>
Implements the empirical cumulative distribution function given an array
of observations.
"""
import numpy as np
class ECDF:
"""
One-dimensional empirical distribution function given a vector of
observations.
Parameters
----------
observations : array_like
An array of observations
Attributes
----------
observations : see Parameters
"""
def __init__(self, observations):
self.observations = np.asarray(observations)
def __repr__(self):
return self.__str__()
def __str__(self):
m = "Empirical CDF:\n - number of observations: {n}"
return m.format(n=self.observations.size)
def __call__(self, x):
"""
Evaluates the ecdf at x
Parameters
----------
x : scalar(float)
The x at which the ecdf is evaluated
Returns
-------
scalar(float)
Fraction of the sample less than x
"""
return np.mean(self.observations <= x)
| en | 0.592185 | Filename: ecdf.py Authors: <NAME>, <NAME> Implements the empirical cumulative distribution function given an array of observations. One-dimensional empirical distribution function given a vector of observations. Parameters ---------- observations : array_like An array of observations Attributes ---------- observations : see Parameters Evaluates the ecdf at x Parameters ---------- x : scalar(float) The x at which the ecdf is evaluated Returns ------- scalar(float) Fraction of the sample less than x | 3.082415 | 3 |
utils/custom_methods.py | alexanderwendt/sklearn_ml_toolbox | 1 | 6633284 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import datetime
#import data_visualization_functions as vis
def load_source(source_path):
'''
Load stock charts as source
'''
source = pd.read_csv(source_path, sep=';')
source.index.name = "id"
source.columns = ['Date', 'Open', 'High', 'Low', 'Close']
source['Date'] = pd.to_datetime(source['Date'])
source['Date'].apply(mdates.date2num)
return source | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import datetime
#import data_visualization_functions as vis
def load_source(source_path):
'''
Load stock charts as source
'''
source = pd.read_csv(source_path, sep=';')
source.index.name = "id"
source.columns = ['Date', 'Open', 'High', 'Low', 'Close']
source['Date'] = pd.to_datetime(source['Date'])
source['Date'].apply(mdates.date2num)
return source | en | 0.81788 | #import data_visualization_functions as vis Load stock charts as source | 2.869581 | 3 |
lib/pyramid.py | rpautrat/d2_net | 0 | 6633285 | import torch
import torch.nn as nn
import torch.nn.functional as F
from .exceptions import EmptyTensorError
from .utils import interpolate_dense_features, upscale_positions
def process_multiscale(image, model, scales=[.5, 1, 2]):
b, _, h_init, w_init = image.size()
device = image.device
assert(b == 1)
all_keypoints = torch.zeros([3, 0])
all_descriptors = torch.zeros([
model.dense_feature_extraction.num_channels, 0
])
all_scores = torch.zeros(0)
previous_dense_features = None
banned = None
for idx, scale in enumerate(scales):
current_image = F.interpolate(
image, scale_factor=scale,
mode='bilinear', align_corners=True
)
_, _, h_level, w_level = current_image.size()
dense_features = model.dense_feature_extraction(current_image)
del current_image
_, _, h, w = dense_features.size()
# Sum the feature maps.
if previous_dense_features is not None:
dense_features += F.interpolate(
previous_dense_features, size=[h, w],
mode='bilinear', align_corners=True
)
del previous_dense_features
# Recover detections.
detections = model.detection(dense_features)
if banned is not None:
banned = F.interpolate(banned.float(), size=[h, w]).bool()
detections = torch.min(detections, ~banned)
banned = torch.max(
torch.max(detections, dim=1)[0].unsqueeze(1), banned
)
else:
banned = torch.max(detections, dim=1)[0].unsqueeze(1)
fmap_pos = torch.nonzero(detections[0].cpu()).t()
del detections
# Recover displacements.
displacements = model.localization(dense_features)[0].cpu()
displacements_i = displacements[
0, fmap_pos[0, :], fmap_pos[1, :], fmap_pos[2, :]
]
displacements_j = displacements[
1, fmap_pos[0, :], fmap_pos[1, :], fmap_pos[2, :]
]
del displacements
mask = torch.min(
torch.abs(displacements_i) < 0.5,
torch.abs(displacements_j) < 0.5
)
fmap_pos = fmap_pos[:, mask]
valid_displacements = torch.stack([
displacements_i[mask],
displacements_j[mask]
], dim=0)
del mask, displacements_i, displacements_j
fmap_keypoints = fmap_pos[1 :, :].float() + valid_displacements
del valid_displacements
try:
raw_descriptors, _, ids = interpolate_dense_features(
fmap_keypoints.to(device),
dense_features[0]
)
except EmptyTensorError:
continue
fmap_pos = fmap_pos[:, ids]
fmap_keypoints = fmap_keypoints[:, ids]
del ids
keypoints = upscale_positions(fmap_keypoints, scaling_steps=2)
del fmap_keypoints
descriptors = F.normalize(raw_descriptors, dim=0).cpu()
del raw_descriptors
keypoints[0, :] *= h_init / h_level
keypoints[1, :] *= w_init / w_level
fmap_pos = fmap_pos.cpu()
keypoints = keypoints.cpu()
keypoints = torch.cat([
keypoints,
torch.ones([1, keypoints.size(1)]) * 1 / scale,
], dim=0)
scores = dense_features[
0, fmap_pos[0, :], fmap_pos[1, :], fmap_pos[2, :]
].cpu() / (idx + 1)
del fmap_pos
all_keypoints = torch.cat([all_keypoints, keypoints], dim=1)
all_descriptors = torch.cat([all_descriptors, descriptors], dim=1)
all_scores = torch.cat([all_scores, scores], dim=0)
del keypoints, descriptors
previous_dense_features = dense_features
del dense_features
del previous_dense_features, banned
keypoints = all_keypoints.t().numpy()
del all_keypoints
scores = all_scores.numpy()
del all_scores
descriptors = all_descriptors.t().numpy()
del all_descriptors
return keypoints, scores, descriptors
| import torch
import torch.nn as nn
import torch.nn.functional as F
from .exceptions import EmptyTensorError
from .utils import interpolate_dense_features, upscale_positions
def process_multiscale(image, model, scales=[.5, 1, 2]):
b, _, h_init, w_init = image.size()
device = image.device
assert(b == 1)
all_keypoints = torch.zeros([3, 0])
all_descriptors = torch.zeros([
model.dense_feature_extraction.num_channels, 0
])
all_scores = torch.zeros(0)
previous_dense_features = None
banned = None
for idx, scale in enumerate(scales):
current_image = F.interpolate(
image, scale_factor=scale,
mode='bilinear', align_corners=True
)
_, _, h_level, w_level = current_image.size()
dense_features = model.dense_feature_extraction(current_image)
del current_image
_, _, h, w = dense_features.size()
# Sum the feature maps.
if previous_dense_features is not None:
dense_features += F.interpolate(
previous_dense_features, size=[h, w],
mode='bilinear', align_corners=True
)
del previous_dense_features
# Recover detections.
detections = model.detection(dense_features)
if banned is not None:
banned = F.interpolate(banned.float(), size=[h, w]).bool()
detections = torch.min(detections, ~banned)
banned = torch.max(
torch.max(detections, dim=1)[0].unsqueeze(1), banned
)
else:
banned = torch.max(detections, dim=1)[0].unsqueeze(1)
fmap_pos = torch.nonzero(detections[0].cpu()).t()
del detections
# Recover displacements.
displacements = model.localization(dense_features)[0].cpu()
displacements_i = displacements[
0, fmap_pos[0, :], fmap_pos[1, :], fmap_pos[2, :]
]
displacements_j = displacements[
1, fmap_pos[0, :], fmap_pos[1, :], fmap_pos[2, :]
]
del displacements
mask = torch.min(
torch.abs(displacements_i) < 0.5,
torch.abs(displacements_j) < 0.5
)
fmap_pos = fmap_pos[:, mask]
valid_displacements = torch.stack([
displacements_i[mask],
displacements_j[mask]
], dim=0)
del mask, displacements_i, displacements_j
fmap_keypoints = fmap_pos[1 :, :].float() + valid_displacements
del valid_displacements
try:
raw_descriptors, _, ids = interpolate_dense_features(
fmap_keypoints.to(device),
dense_features[0]
)
except EmptyTensorError:
continue
fmap_pos = fmap_pos[:, ids]
fmap_keypoints = fmap_keypoints[:, ids]
del ids
keypoints = upscale_positions(fmap_keypoints, scaling_steps=2)
del fmap_keypoints
descriptors = F.normalize(raw_descriptors, dim=0).cpu()
del raw_descriptors
keypoints[0, :] *= h_init / h_level
keypoints[1, :] *= w_init / w_level
fmap_pos = fmap_pos.cpu()
keypoints = keypoints.cpu()
keypoints = torch.cat([
keypoints,
torch.ones([1, keypoints.size(1)]) * 1 / scale,
], dim=0)
scores = dense_features[
0, fmap_pos[0, :], fmap_pos[1, :], fmap_pos[2, :]
].cpu() / (idx + 1)
del fmap_pos
all_keypoints = torch.cat([all_keypoints, keypoints], dim=1)
all_descriptors = torch.cat([all_descriptors, descriptors], dim=1)
all_scores = torch.cat([all_scores, scores], dim=0)
del keypoints, descriptors
previous_dense_features = dense_features
del dense_features
del previous_dense_features, banned
keypoints = all_keypoints.t().numpy()
del all_keypoints
scores = all_scores.numpy()
del all_scores
descriptors = all_descriptors.t().numpy()
del all_descriptors
return keypoints, scores, descriptors
| en | 0.651714 | # Sum the feature maps. # Recover detections. # Recover displacements. | 1.883443 | 2 |
analysis/permeability_profiles/abf_pmf_processor.py | vtlim/permeability | 1 | 6633286 | <filename>analysis/permeability_profiles/abf_pmf_processor.py
import numpy as np
import numpy_indexed as npi
from scipy import integrate
# TODO: consider making the plotting lines in the main function more modular
# TODO: check that file exists in __init__
# TODO: add diagram from group meeting to Github
class Profile:
def __init__(self, infile, xdata, ydata):
# if xdata and ydata are NOT passed, initiate object from file
if all(i is None for i in [xdata, ydata]):
# only unpack x and y data (ignoring error column if present)
xdata, ydata = np.genfromtxt(infile, usecols = (0, 1), unpack=True)
self._infile = infile
self._xdata = xdata
self._ydata = ydata
@property
def infile(self):
"""Getter for infile."""
return self._infile
@property
def xdata(self):
"""Getter for xdata."""
return self._xdata
@property
def ydata(self):
"""Getter for ydata."""
return self._ydata
@infile.setter
def infile(self, value):
"""Setter for infile."""
self._infile = value
@xdata.setter
def xdata(self, value):
"""Setter for xdata."""
self._xdata = value
@ydata.setter
def ydata(self, value):
"""Setter for ydata."""
self._ydata = value
def _decompose_list(list_of_objs):
"""Combine all xdata and ydata from multiple Grad or Pmf objects."""
# extract data from objects
whole_i = []
whole_x = []
whole_y = []
for obj in list_of_objs:
whole_i.append(obj.infile)
whole_x.append(obj.xdata)
whole_y.append(obj.ydata)
# concatenate full list to numpy array
x = np.concatenate(whole_x)
grad = np.concatenate(whole_y)
# concatenate file names into single string
infilestring = " ".join(whole_i)
return x, grad, infilestring
def _sort_by_x(self):
"""Make sure data is sorted by x in ascending order.
To have descending, use [::-1] after arr1inds."""
unsorted_x = self.xdata
unsorted_y = self.ydata
arr1inds = unsorted_x.argsort()
sorted_x = unsorted_x[arr1inds]
sorted_y = unsorted_y[arr1inds]
self.xdata = sorted_x
self.ydata = sorted_y
@staticmethod
def _get_kt(T):
"""Compute thermal energy."""
# Boltzmann constant in kcal/(mol K)
kb = 0.0019872041
kt = kb*T
return kt
def write_data(self, outfile, errbar=False):
header = "Input data: {}".format(self.infile)
if errbar:
np.savetxt(
outfile, np.c_[self.xdata, self.ydata, self.errbar],
header=header, fmt=['%.2f', '%.6f', '%.6f'])
else:
np.savetxt(
outfile, np.c_[self.xdata, self.ydata],
header=header, fmt=['%.2f', '%.6f'])
class Grad(Profile):
def __init__(self, infile=None, xdata=None, ydata=None):
super().__init__(infile, xdata, ydata)
def integrate(self):
# integrate ydata
y_pmf = integrate.cumtrapz(self.ydata, self.xdata)
# take midpoint of all adjacent data points in half_cvs due to integration
# https://tinyurl.com/ycahltpp
x_pmf = (self.xdata[1:] + self.xdata[:-1]) / 2
x_pmf = x_pmf.flatten()
# create new pmf object from integrated data
new_pmf = Pmf(self.infile, x_pmf, y_pmf)
return new_pmf
@staticmethod
def join_windows(list_grads):
"""Join windows by averaging overlapping regions of .czar.grad files.
https://stackoverflow.com/questions/41821539/calculate-average-of-y-values-with-different-x-values
Parameters
----------
list_grads : list
list of Grad objects to be combined
Returns
-------
new_grad : Grad
new Grad object with xdata and ydata of combined grads
"""
# combine all xdata and all ydata
x, grad, allfiles = Profile._decompose_list(list_grads)
# average the values having same x gridpoint
x_unique, grad_mean = npi.group_by(x).mean(grad)
# create new grad instance for joined data
new_grad = Grad(allfiles, x_unique.flatten(), grad_mean.flatten())
# reorder data for ascending x, then return object
new_grad._sort_by_x()
return new_grad
class Pmf(Profile):
def __init__(self, infile=None, xdata=None, ydata=None):
super().__init__(infile, xdata, ydata)
def shift_bulk_zero(self, x0, x1):
"""Compute average from x0 to x1, and shift the average to zero.
Parameters
----------
x0 : float
x1 : float
"""
# get indices of x0 and x1 values
try:
x0_index = np.where(np.isclose(self.xdata, x0))[0][0]
x1_index = np.where(np.isclose(self.xdata, x1))[0][0]
except IndexError as e:
raise Exception("ERROR: at least one x-value not found or was " +
"found more than one time (IndexError)") from e
# calculate the mean of the region
orig_mean = np.mean(
self.ydata[ min(x0_index, x1_index):max(x0_index, x1_index)+1 ])
print("Unshifted mean from {:6.2f} to {:6.2f} == {:10.4f} kcal/mol".format(x0, x1, orig_mean))
# shift the y-data
shifted_ydata = self.ydata - orig_mean
self.ydata = shifted_ydata
def symmetrize(self):
# average the values having same abs(x) gridpoint
rhs_x, rhs_y = npi.group_by(np.abs(self.xdata)).mean(self.ydata)
# regenerate -x data from averaged values (stored in +x side)
full_x = np.concatenate((np.flip(-rhs_x), rhs_x))
full_y = np.concatenate((np.flip( rhs_y), rhs_y))
# remove the -0.0 entry if it exists
first_neg_idx = len(rhs_x)-1
if (rhs_x[0] == 0.0) and (full_y[first_neg_idx] == full_y[len(rhs_x)]):
full_x = np.delete(full_x, first_neg_idx)
full_y = np.delete(full_y, first_neg_idx)
# compute difference before and after symmetrization
if not np.array_equal(self.xdata, full_x):
print(" error in subtracting pmfs before/after symmetrization" +
"\n the x-range differs here:\n " +
np.setdiff1d(self.xdata, full_x))
else:
subtracted = np.abs(self.ydata - full_y)
self.errbar = subtracted
# set data in object
self.xdata = full_x
self.ydata = full_y
@staticmethod
def join_leaflets(list_pmfs, T):
"""Join PMFs by eq. 5 of the following work.
https://pubs.acs.org/doi/10.1021/jp7114912
Parameters
----------
list_pmfs : list
list of the two Pmf objects to be combined
T : float
temperature of the system
Returns
-------
new_pmf : Pmf
new Pmf object with xdata and ydata of combined pmfs
"""
kt = Profile._get_kt(T)
# combine all xdata and all ydata
if len(list_pmfs) != 2:
print("ERROR: More than 2 PMFs passed into join_leaflets function")
return
x, pmf_raw, allfiles = Profile._decompose_list(list_pmfs)
# take boltzmann weight of free energies
pmf_boltz = np.exp(-1*pmf_raw/kt)
# sum overlapping regions
# https://stackoverflow.com/questions/41821539/calculate-average-of-y-values-with-different-x-values
x_unique, pmf_boltz_sum = npi.group_by(x).sum(pmf_boltz)
# calculate free energies from boltzmann sum
pmf_final = -1*kt*np.log(pmf_boltz_sum)
# create new pmf instance for joined data
new_pmf = Pmf(allfiles, x_unique, pmf_final)
# reorder data for ascending x, then return object
new_pmf._sort_by_x()
return new_pmf
def subsample_errors(self, every_nth):
"Only keep every Nth value of error values."""
size = len(self.errbar)
zeroes = np.zeros(size)
# get indices which should be kept
keep_idx = range(0, size, every_nth)
# transfer the to-keep values into the array of zeros
zeroes[keep_idx] = self.errbar[keep_idx]
self.errbar = zeroes
@staticmethod
def calc_pka_shift(list_pmfs, T):
"""Compute pKa shift profile by eq. 18 of the following work.
https://pubs.acs.org/doi/10.1021/jp7114912
Parameters
----------
list_pmfs : list
list of the two Pmf objects, FIRST neutral and SECOND charged
T : float
temperature of the system
Returns
-------
new_pka : Pka
new Pka object with xdata and ydata of pKa shift profile
"""
# extract constants and data
kt = Profile._get_kt(T)
x0 = list_pmfs[0].xdata
x1 = list_pmfs[1].xdata
y0 = list_pmfs[0].ydata
y1 = list_pmfs[1].ydata
# concatenate file names into single string
allfiles = " ".join([list_pmfs[0].infile, list_pmfs[1].infile])
# make sure xdata are equal for both
if len(list_pmfs) != 2:
print("ERROR: More than 2 PMFs passed into join_leaflets function")
return
if not np.array_equal(x0, x1):
print(" error in matching x-range for computing pka shift " +
"\n the x-range differs here:\n " +
np.setdiff1d(x0, x1))
# subtract pmf_neutral minus pmf_charged
dy = y0 - y1
# divide by 2.3*kt
dy = dy/(2.3*kt)
# create new pmf instance for joined data
new_pka = Pka(allfiles, x0, dy)
return new_pka
class Pka(Profile):
def __init__(self, infile=None, xdata=None, ydata=None):
super().__init__(infile, xdata, ydata)
def open_join_grads(list_files):
"""Open a list of files with .grad data and join the windows.
Should this be a static function? Maybe it doesn't make sense to call
Grad.open_join_grads(...) so maybe better off as module-level function.
"""
list_grads = []
for f in list_files:
g = Grad(f)
list_grads.append(g)
joined_grad = Grad.join_windows(list_grads)
pmf = joined_grad.integrate()
return pmf
def grads_to_pmf(
side0_files, side1_files,
bulk_range0, bulk_range1,
T,
out_file='pmf.dat'):
"""Main function to generate symmetrized PMF given input gradient files.
Parameters
----------
side0_files : list
list of strings of filenames for gradient files of one side leaflet
side1_files : list
list of strings of filenames for gradient files of one side leaflet
bulk_range0 : list
list of floats for x values that define bulk region for side0 PMF
bulk_range1 : list
list of floats for x values that define bulk region for side1 PMF
T : float
temperature of the system
out_file : string
filename of the output pmf data
Returns
-------
pmf_0 : Pmf
new Pmf object of side0
pmf_1 : Pmf
new Pmf object of side1
joined_pmf : Pmf
new Pmf object with xdata and ydata of joined PMF
"""
# combine windows of each leaflet
pmf_0 = open_join_grads(side0_files)
pmf_1 = open_join_grads(side1_files)
# shift bulk water region to have average pmf of zero
pmf_0.shift_bulk_zero(*bulk_range0)
pmf_1.shift_bulk_zero(*bulk_range1)
print("Value of pre-shifted bulk water region may be an artifact of where "
"(x-value) integration begins, where y-value is defined 0.\n")
pmf_0.write_data('pmf0.dat')
pmf_1.write_data('pmf1.dat')
# combine upper and lower leaflets
joined_pmf = Pmf.join_leaflets([pmf_0, pmf_1], T)
joined_pmf.write_data('pmf_unsym.dat')
# symmetrize pmf
joined_pmf.symmetrize()
#joined_pmf.errbar = np.zeros(len(joined_pmf.ydata))
# write out pmf
joined_pmf.write_data('pmf.dat', errbar=True)
return pmf_0, pmf_1, joined_pmf
def pmfs_to_pka(pmf0_file, pmf1_file, T, out_file='pka_shift.dat'):
"""Main function to calculate pKa shift profile given 2 files of PMFs.
Parameters
----------
pmf0_file : string
filename of the neutral PMF
pmf1_file : string
filename of the charged PMF
T : float
temperature of the system
out_file : string
filename of the output pKa shift profile data
Returns
-------
pka_shift : Pka
new Pka object with xdata and ydata of pKa shift profile
"""
pmf_neu = Pmf(pmf0_file)
pmf_chg = Pmf(pmf1_file)
pka_shift = Pmf.calc_pka_shift([pmf_neu, pmf_chg], T)
pka_shift.write_data(out_file)
return pka_shift
if __name__ == "__main__":
import argparse
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument("-0", "--side0", required=True, nargs='+',
help="One or more files to be processed for one leaflet.")
parser.add_argument("-1", "--side1", required=True, nargs='+',
help="One or more files to be processed for other leaflet.")
parser.add_argument("-p", "--pka", action="store_true", default=False,
help="Compute pKa shift profile from neutral PMF in -0"
" flag and charged PMF in -1 flag")
args = parser.parse_args()
# compute pka shift profile
if args.pka and len(args.side0)==1 and len(args.side1)==1:
pka_shift = pmfs_to_pka(args.side0[0], args.side1[0], T = 295)
# plot final data
plt.plot(pka_shift.xdata, pka_shift.ydata)
plt.grid()
plt.savefig('plot_pka.png', bbox_inches='tight')
plt.show()
# generate pmf from gradient files
else:
pmf_0, pmf_1, joined_pmf = grads_to_pmf(
args.side0, args.side1,
bulk_range0 = [35, 39.9], bulk_range1 = [-35, -39.9],
T = 295)
# for plotting: only keep every Nth error bar else hard to interpret
joined_pmf.subsample_errors(every_nth = 20)
# plot final data
plt.errorbar(joined_pmf.xdata, joined_pmf.ydata, yerr=joined_pmf.errbar)
plt.plot(pmf_0.xdata, pmf_0.ydata)
plt.plot(pmf_1.xdata, pmf_1.ydata)
plt.xlabel('permeant position ($\mathrm{\AA}$)', fontsize=16)
plt.ylabel('pmf (kcal/mol)', fontsize=16)
#plt.tick_params(axis='both', which='major', labelsize=14)
plt.yticks(fontsize=14)
plt.ylim(-10, 20)
plt.grid()
plt.savefig('plot_pmf.png', bbox_inches='tight')
plt.show()
| <filename>analysis/permeability_profiles/abf_pmf_processor.py
import numpy as np
import numpy_indexed as npi
from scipy import integrate
# TODO: consider making the plotting lines in the main function more modular
# TODO: check that file exists in __init__
# TODO: add diagram from group meeting to Github
class Profile:
def __init__(self, infile, xdata, ydata):
# if xdata and ydata are NOT passed, initiate object from file
if all(i is None for i in [xdata, ydata]):
# only unpack x and y data (ignoring error column if present)
xdata, ydata = np.genfromtxt(infile, usecols = (0, 1), unpack=True)
self._infile = infile
self._xdata = xdata
self._ydata = ydata
@property
def infile(self):
"""Getter for infile."""
return self._infile
@property
def xdata(self):
"""Getter for xdata."""
return self._xdata
@property
def ydata(self):
"""Getter for ydata."""
return self._ydata
@infile.setter
def infile(self, value):
"""Setter for infile."""
self._infile = value
@xdata.setter
def xdata(self, value):
"""Setter for xdata."""
self._xdata = value
@ydata.setter
def ydata(self, value):
"""Setter for ydata."""
self._ydata = value
def _decompose_list(list_of_objs):
"""Combine all xdata and ydata from multiple Grad or Pmf objects."""
# extract data from objects
whole_i = []
whole_x = []
whole_y = []
for obj in list_of_objs:
whole_i.append(obj.infile)
whole_x.append(obj.xdata)
whole_y.append(obj.ydata)
# concatenate full list to numpy array
x = np.concatenate(whole_x)
grad = np.concatenate(whole_y)
# concatenate file names into single string
infilestring = " ".join(whole_i)
return x, grad, infilestring
def _sort_by_x(self):
"""Make sure data is sorted by x in ascending order.
To have descending, use [::-1] after arr1inds."""
unsorted_x = self.xdata
unsorted_y = self.ydata
arr1inds = unsorted_x.argsort()
sorted_x = unsorted_x[arr1inds]
sorted_y = unsorted_y[arr1inds]
self.xdata = sorted_x
self.ydata = sorted_y
@staticmethod
def _get_kt(T):
"""Compute thermal energy."""
# Boltzmann constant in kcal/(mol K)
kb = 0.0019872041
kt = kb*T
return kt
def write_data(self, outfile, errbar=False):
header = "Input data: {}".format(self.infile)
if errbar:
np.savetxt(
outfile, np.c_[self.xdata, self.ydata, self.errbar],
header=header, fmt=['%.2f', '%.6f', '%.6f'])
else:
np.savetxt(
outfile, np.c_[self.xdata, self.ydata],
header=header, fmt=['%.2f', '%.6f'])
class Grad(Profile):
def __init__(self, infile=None, xdata=None, ydata=None):
super().__init__(infile, xdata, ydata)
def integrate(self):
# integrate ydata
y_pmf = integrate.cumtrapz(self.ydata, self.xdata)
# take midpoint of all adjacent data points in half_cvs due to integration
# https://tinyurl.com/ycahltpp
x_pmf = (self.xdata[1:] + self.xdata[:-1]) / 2
x_pmf = x_pmf.flatten()
# create new pmf object from integrated data
new_pmf = Pmf(self.infile, x_pmf, y_pmf)
return new_pmf
@staticmethod
def join_windows(list_grads):
"""Join windows by averaging overlapping regions of .czar.grad files.
https://stackoverflow.com/questions/41821539/calculate-average-of-y-values-with-different-x-values
Parameters
----------
list_grads : list
list of Grad objects to be combined
Returns
-------
new_grad : Grad
new Grad object with xdata and ydata of combined grads
"""
# combine all xdata and all ydata
x, grad, allfiles = Profile._decompose_list(list_grads)
# average the values having same x gridpoint
x_unique, grad_mean = npi.group_by(x).mean(grad)
# create new grad instance for joined data
new_grad = Grad(allfiles, x_unique.flatten(), grad_mean.flatten())
# reorder data for ascending x, then return object
new_grad._sort_by_x()
return new_grad
class Pmf(Profile):
def __init__(self, infile=None, xdata=None, ydata=None):
super().__init__(infile, xdata, ydata)
def shift_bulk_zero(self, x0, x1):
"""Compute average from x0 to x1, and shift the average to zero.
Parameters
----------
x0 : float
x1 : float
"""
# get indices of x0 and x1 values
try:
x0_index = np.where(np.isclose(self.xdata, x0))[0][0]
x1_index = np.where(np.isclose(self.xdata, x1))[0][0]
except IndexError as e:
raise Exception("ERROR: at least one x-value not found or was " +
"found more than one time (IndexError)") from e
# calculate the mean of the region
orig_mean = np.mean(
self.ydata[ min(x0_index, x1_index):max(x0_index, x1_index)+1 ])
print("Unshifted mean from {:6.2f} to {:6.2f} == {:10.4f} kcal/mol".format(x0, x1, orig_mean))
# shift the y-data
shifted_ydata = self.ydata - orig_mean
self.ydata = shifted_ydata
def symmetrize(self):
# average the values having same abs(x) gridpoint
rhs_x, rhs_y = npi.group_by(np.abs(self.xdata)).mean(self.ydata)
# regenerate -x data from averaged values (stored in +x side)
full_x = np.concatenate((np.flip(-rhs_x), rhs_x))
full_y = np.concatenate((np.flip( rhs_y), rhs_y))
# remove the -0.0 entry if it exists
first_neg_idx = len(rhs_x)-1
if (rhs_x[0] == 0.0) and (full_y[first_neg_idx] == full_y[len(rhs_x)]):
full_x = np.delete(full_x, first_neg_idx)
full_y = np.delete(full_y, first_neg_idx)
# compute difference before and after symmetrization
if not np.array_equal(self.xdata, full_x):
print(" error in subtracting pmfs before/after symmetrization" +
"\n the x-range differs here:\n " +
np.setdiff1d(self.xdata, full_x))
else:
subtracted = np.abs(self.ydata - full_y)
self.errbar = subtracted
# set data in object
self.xdata = full_x
self.ydata = full_y
@staticmethod
def join_leaflets(list_pmfs, T):
"""Join PMFs by eq. 5 of the following work.
https://pubs.acs.org/doi/10.1021/jp7114912
Parameters
----------
list_pmfs : list
list of the two Pmf objects to be combined
T : float
temperature of the system
Returns
-------
new_pmf : Pmf
new Pmf object with xdata and ydata of combined pmfs
"""
kt = Profile._get_kt(T)
# combine all xdata and all ydata
if len(list_pmfs) != 2:
print("ERROR: More than 2 PMFs passed into join_leaflets function")
return
x, pmf_raw, allfiles = Profile._decompose_list(list_pmfs)
# take boltzmann weight of free energies
pmf_boltz = np.exp(-1*pmf_raw/kt)
# sum overlapping regions
# https://stackoverflow.com/questions/41821539/calculate-average-of-y-values-with-different-x-values
x_unique, pmf_boltz_sum = npi.group_by(x).sum(pmf_boltz)
# calculate free energies from boltzmann sum
pmf_final = -1*kt*np.log(pmf_boltz_sum)
# create new pmf instance for joined data
new_pmf = Pmf(allfiles, x_unique, pmf_final)
# reorder data for ascending x, then return object
new_pmf._sort_by_x()
return new_pmf
def subsample_errors(self, every_nth):
"Only keep every Nth value of error values."""
size = len(self.errbar)
zeroes = np.zeros(size)
# get indices which should be kept
keep_idx = range(0, size, every_nth)
# transfer the to-keep values into the array of zeros
zeroes[keep_idx] = self.errbar[keep_idx]
self.errbar = zeroes
@staticmethod
def calc_pka_shift(list_pmfs, T):
"""Compute pKa shift profile by eq. 18 of the following work.
https://pubs.acs.org/doi/10.1021/jp7114912
Parameters
----------
list_pmfs : list
list of the two Pmf objects, FIRST neutral and SECOND charged
T : float
temperature of the system
Returns
-------
new_pka : Pka
new Pka object with xdata and ydata of pKa shift profile
"""
# extract constants and data
kt = Profile._get_kt(T)
x0 = list_pmfs[0].xdata
x1 = list_pmfs[1].xdata
y0 = list_pmfs[0].ydata
y1 = list_pmfs[1].ydata
# concatenate file names into single string
allfiles = " ".join([list_pmfs[0].infile, list_pmfs[1].infile])
# make sure xdata are equal for both
if len(list_pmfs) != 2:
print("ERROR: More than 2 PMFs passed into join_leaflets function")
return
if not np.array_equal(x0, x1):
print(" error in matching x-range for computing pka shift " +
"\n the x-range differs here:\n " +
np.setdiff1d(x0, x1))
# subtract pmf_neutral minus pmf_charged
dy = y0 - y1
# divide by 2.3*kt
dy = dy/(2.3*kt)
# create new pmf instance for joined data
new_pka = Pka(allfiles, x0, dy)
return new_pka
class Pka(Profile):
def __init__(self, infile=None, xdata=None, ydata=None):
super().__init__(infile, xdata, ydata)
def open_join_grads(list_files):
"""Open a list of files with .grad data and join the windows.
Should this be a static function? Maybe it doesn't make sense to call
Grad.open_join_grads(...) so maybe better off as module-level function.
"""
list_grads = []
for f in list_files:
g = Grad(f)
list_grads.append(g)
joined_grad = Grad.join_windows(list_grads)
pmf = joined_grad.integrate()
return pmf
def grads_to_pmf(
side0_files, side1_files,
bulk_range0, bulk_range1,
T,
out_file='pmf.dat'):
"""Main function to generate symmetrized PMF given input gradient files.
Parameters
----------
side0_files : list
list of strings of filenames for gradient files of one side leaflet
side1_files : list
list of strings of filenames for gradient files of one side leaflet
bulk_range0 : list
list of floats for x values that define bulk region for side0 PMF
bulk_range1 : list
list of floats for x values that define bulk region for side1 PMF
T : float
temperature of the system
out_file : string
filename of the output pmf data
Returns
-------
pmf_0 : Pmf
new Pmf object of side0
pmf_1 : Pmf
new Pmf object of side1
joined_pmf : Pmf
new Pmf object with xdata and ydata of joined PMF
"""
# combine windows of each leaflet
pmf_0 = open_join_grads(side0_files)
pmf_1 = open_join_grads(side1_files)
# shift bulk water region to have average pmf of zero
pmf_0.shift_bulk_zero(*bulk_range0)
pmf_1.shift_bulk_zero(*bulk_range1)
print("Value of pre-shifted bulk water region may be an artifact of where "
"(x-value) integration begins, where y-value is defined 0.\n")
pmf_0.write_data('pmf0.dat')
pmf_1.write_data('pmf1.dat')
# combine upper and lower leaflets
joined_pmf = Pmf.join_leaflets([pmf_0, pmf_1], T)
joined_pmf.write_data('pmf_unsym.dat')
# symmetrize pmf
joined_pmf.symmetrize()
#joined_pmf.errbar = np.zeros(len(joined_pmf.ydata))
# write out pmf
joined_pmf.write_data('pmf.dat', errbar=True)
return pmf_0, pmf_1, joined_pmf
def pmfs_to_pka(pmf0_file, pmf1_file, T, out_file='pka_shift.dat'):
"""Main function to calculate pKa shift profile given 2 files of PMFs.
Parameters
----------
pmf0_file : string
filename of the neutral PMF
pmf1_file : string
filename of the charged PMF
T : float
temperature of the system
out_file : string
filename of the output pKa shift profile data
Returns
-------
pka_shift : Pka
new Pka object with xdata and ydata of pKa shift profile
"""
pmf_neu = Pmf(pmf0_file)
pmf_chg = Pmf(pmf1_file)
pka_shift = Pmf.calc_pka_shift([pmf_neu, pmf_chg], T)
pka_shift.write_data(out_file)
return pka_shift
if __name__ == "__main__":
import argparse
import matplotlib.pyplot as plt
parser = argparse.ArgumentParser()
parser.add_argument("-0", "--side0", required=True, nargs='+',
help="One or more files to be processed for one leaflet.")
parser.add_argument("-1", "--side1", required=True, nargs='+',
help="One or more files to be processed for other leaflet.")
parser.add_argument("-p", "--pka", action="store_true", default=False,
help="Compute pKa shift profile from neutral PMF in -0"
" flag and charged PMF in -1 flag")
args = parser.parse_args()
# compute pka shift profile
if args.pka and len(args.side0)==1 and len(args.side1)==1:
pka_shift = pmfs_to_pka(args.side0[0], args.side1[0], T = 295)
# plot final data
plt.plot(pka_shift.xdata, pka_shift.ydata)
plt.grid()
plt.savefig('plot_pka.png', bbox_inches='tight')
plt.show()
# generate pmf from gradient files
else:
pmf_0, pmf_1, joined_pmf = grads_to_pmf(
args.side0, args.side1,
bulk_range0 = [35, 39.9], bulk_range1 = [-35, -39.9],
T = 295)
# for plotting: only keep every Nth error bar else hard to interpret
joined_pmf.subsample_errors(every_nth = 20)
# plot final data
plt.errorbar(joined_pmf.xdata, joined_pmf.ydata, yerr=joined_pmf.errbar)
plt.plot(pmf_0.xdata, pmf_0.ydata)
plt.plot(pmf_1.xdata, pmf_1.ydata)
plt.xlabel('permeant position ($\mathrm{\AA}$)', fontsize=16)
plt.ylabel('pmf (kcal/mol)', fontsize=16)
#plt.tick_params(axis='both', which='major', labelsize=14)
plt.yticks(fontsize=14)
plt.ylim(-10, 20)
plt.grid()
plt.savefig('plot_pmf.png', bbox_inches='tight')
plt.show()
| en | 0.643892 | # TODO: consider making the plotting lines in the main function more modular # TODO: check that file exists in __init__ # TODO: add diagram from group meeting to Github # if xdata and ydata are NOT passed, initiate object from file # only unpack x and y data (ignoring error column if present) Getter for infile. Getter for xdata. Getter for ydata. Setter for infile. Setter for xdata. Setter for ydata. Combine all xdata and ydata from multiple Grad or Pmf objects. # extract data from objects # concatenate full list to numpy array # concatenate file names into single string Make sure data is sorted by x in ascending order. To have descending, use [::-1] after arr1inds. Compute thermal energy. # Boltzmann constant in kcal/(mol K) # integrate ydata # take midpoint of all adjacent data points in half_cvs due to integration # https://tinyurl.com/ycahltpp # create new pmf object from integrated data Join windows by averaging overlapping regions of .czar.grad files. https://stackoverflow.com/questions/41821539/calculate-average-of-y-values-with-different-x-values Parameters ---------- list_grads : list list of Grad objects to be combined Returns ------- new_grad : Grad new Grad object with xdata and ydata of combined grads # combine all xdata and all ydata # average the values having same x gridpoint # create new grad instance for joined data # reorder data for ascending x, then return object Compute average from x0 to x1, and shift the average to zero. Parameters ---------- x0 : float x1 : float # get indices of x0 and x1 values # calculate the mean of the region # shift the y-data # average the values having same abs(x) gridpoint # regenerate -x data from averaged values (stored in +x side) # remove the -0.0 entry if it exists # compute difference before and after symmetrization # set data in object Join PMFs by eq. 5 of the following work. https://pubs.acs.org/doi/10.1021/jp7114912 Parameters ---------- list_pmfs : list list of the two Pmf objects to be combined T : float temperature of the system Returns ------- new_pmf : Pmf new Pmf object with xdata and ydata of combined pmfs # combine all xdata and all ydata # take boltzmann weight of free energies # sum overlapping regions # https://stackoverflow.com/questions/41821539/calculate-average-of-y-values-with-different-x-values # calculate free energies from boltzmann sum # create new pmf instance for joined data # reorder data for ascending x, then return object size = len(self.errbar) zeroes = np.zeros(size) # get indices which should be kept keep_idx = range(0, size, every_nth) # transfer the to-keep values into the array of zeros zeroes[keep_idx] = self.errbar[keep_idx] self.errbar = zeroes @staticmethod def calc_pka_shift(list_pmfs, T): # extract constants and data kt = Profile._get_kt(T) x0 = list_pmfs[0].xdata x1 = list_pmfs[1].xdata y0 = list_pmfs[0].ydata y1 = list_pmfs[1].ydata # concatenate file names into single string allfiles = " ".join([list_pmfs[0].infile, list_pmfs[1].infile]) # make sure xdata are equal for both if len(list_pmfs) != 2: print("ERROR: More than 2 PMFs passed into join_leaflets function") return if not np.array_equal(x0, x1): print(" error in matching x-range for computing pka shift " + "\n the x-range differs here:\n " + np.setdiff1d(x0, x1)) # subtract pmf_neutral minus pmf_charged dy = y0 - y1 # divide by 2.3*kt dy = dy/(2.3*kt) # create new pmf instance for joined data new_pka = Pka(allfiles, x0, dy) return new_pka class Pka(Profile): def __init__(self, infile=None, xdata=None, ydata=None): super().__init__(infile, xdata, ydata) def open_join_grads(list_files): list_grads = [] for f in list_files: g = Grad(f) list_grads.append(g) joined_grad = Grad.join_windows(list_grads) pmf = joined_grad.integrate() return pmf def grads_to_pmf( side0_files, side1_files, bulk_range0, bulk_range1, T, out_file='pmf.dat'): # combine windows of each leaflet pmf_0 = open_join_grads(side0_files) pmf_1 = open_join_grads(side1_files) # shift bulk water region to have average pmf of zero pmf_0.shift_bulk_zero(*bulk_range0) pmf_1.shift_bulk_zero(*bulk_range1) print("Value of pre-shifted bulk water region may be an artifact of where " "(x-value) integration begins, where y-value is defined 0.\n") pmf_0.write_data('pmf0.dat') pmf_1.write_data('pmf1.dat') # combine upper and lower leaflets joined_pmf = Pmf.join_leaflets([pmf_0, pmf_1], T) joined_pmf.write_data('pmf_unsym.dat') # symmetrize pmf joined_pmf.symmetrize() #joined_pmf.errbar = np.zeros(len(joined_pmf.ydata)) # write out pmf joined_pmf.write_data('pmf.dat', errbar=True) return pmf_0, pmf_1, joined_pmf def pmfs_to_pka(pmf0_file, pmf1_file, T, out_file='pka_shift.dat'): # compute pka shift profile # plot final data # generate pmf from gradient files # for plotting: only keep every Nth error bar else hard to interpret # plot final data #plt.tick_params(axis='both', which='major', labelsize=14) | 2.470529 | 2 |
edge/python/modify-host-origin-request-header/.aws-sam/build/ModifyRequestHeaderFunction/app.py | Joe-Wu-88/my-test-repository | 0 | 6633287 | import os
def lambda_handler(event, context):
request = event['Records'][0]['cf']['request']
originDomain = os.environ['originDomain']
request['headers']['host'][0]['value'] = originDomain;
return request
| import os
def lambda_handler(event, context):
request = event['Records'][0]['cf']['request']
originDomain = os.environ['originDomain']
request['headers']['host'][0]['value'] = originDomain;
return request
| none | 1 | 2.104616 | 2 |
|
exams/Messages Manager.py | nrgxtra/fundamentals | 0 | 6633288 | users_base = dict()
capacity = int(input())
while True:
command = input()
if command == 'Statistics':
break
tokens = command.split('=')
com = tokens[0]
if com == 'Add':
username = tokens[1]
sent = int(tokens[2])
received = int(tokens[3])
if username not in users_base:
users_base[username] = [sent, received]
elif com == 'Message':
sender = tokens[1]
receiver = tokens[2]
if sender and receiver in users_base:
x = users_base[sender].pop(0)
users_base[sender].insert(0, x+1)
if users_base[sender][0]+users_base[sender][1] >= capacity:
users_base.pop(sender)
print(f"{sender} reached the capacity!")
y = users_base[receiver].pop(1)
users_base[receiver].insert(1, y+1)
if users_base[receiver][1] + users_base[receiver][0] >= capacity:
users_base.pop(receiver)
print(f"{receiver} reached the capacity!")
elif com == 'Empty':
username = tokens[1]
if username == 'All':
users_base.clear()
if username in users_base:
users_base.pop(username)
print(f'Users count: {len(users_base)}')
sort_base = dict(sorted(users_base.items(), key=lambda x: (-x[1][1], x[0])))
for user, msg in sort_base.items():
print(f'{user} - {msg[0]+msg[1]}')
| users_base = dict()
capacity = int(input())
while True:
command = input()
if command == 'Statistics':
break
tokens = command.split('=')
com = tokens[0]
if com == 'Add':
username = tokens[1]
sent = int(tokens[2])
received = int(tokens[3])
if username not in users_base:
users_base[username] = [sent, received]
elif com == 'Message':
sender = tokens[1]
receiver = tokens[2]
if sender and receiver in users_base:
x = users_base[sender].pop(0)
users_base[sender].insert(0, x+1)
if users_base[sender][0]+users_base[sender][1] >= capacity:
users_base.pop(sender)
print(f"{sender} reached the capacity!")
y = users_base[receiver].pop(1)
users_base[receiver].insert(1, y+1)
if users_base[receiver][1] + users_base[receiver][0] >= capacity:
users_base.pop(receiver)
print(f"{receiver} reached the capacity!")
elif com == 'Empty':
username = tokens[1]
if username == 'All':
users_base.clear()
if username in users_base:
users_base.pop(username)
print(f'Users count: {len(users_base)}')
sort_base = dict(sorted(users_base.items(), key=lambda x: (-x[1][1], x[0])))
for user, msg in sort_base.items():
print(f'{user} - {msg[0]+msg[1]}')
| none | 1 | 3.363749 | 3 |
|
vectorhub/encoders/image/tfhub/inception_resnet.py | NanaAkwasiAbayieBoateng/vectorhub | 1 | 6633289 | from ..base import BaseImage2Vec
from ....base import catch_vector_errors
from ....doc_utils import ModelDefinition
from ....import_utils import *
if is_all_dependency_installed('encoders-image-tfhub'):
import tensorflow as tf
import tensorflow_hub as hub
import traceback
from datetime import date
InceptionResnetModelDefinition = ModelDefinition(
model_id = "image/inception-resnet",
model_name="Inception Resnet",
vector_length=1536,
description="""
Very deep convolutional networks have been central to the largest advances in image recognition performance in
recent years. One example is the Inception architecture that has been shown to achieve very good performance at
relatively low computational cost. Recently, the introduction of residual connections in conjunction with a more traditional
architecture has yielded state-of-the-art performance in the 2015 ILSVRC challenge; its performance was similar to the latest
generation Inception-v3 network. This raises the question of whether there are any benefit in combining the Inception architecture
with residual connections. Here we give clear empirical evidence that training with residual connections accelerates the training
of Inception networks significantly. There is also some evidence of residual Inception networks outperforming similarly expensive
Inception networks without residual connections by a thin margin. We also present several new streamlined architectures for both
residual and non-residual Inception networks. These variations improve the single-frame recognition performance on the ILSVRC 2012
classification task significantly. We further demonstrate how proper activation scaling stabilizes the training of very wide residual
Inception networks. With an ensemble of three residual and one Inception-v4, we achieve 3.08 percent top-5 error on the test set of the
ImageNet classification (CLS) challenge.""",
paper="https://arxiv.org/abs/1602.07261",
repo="https://tfhub.dev/google/imagenet/inception_resnet_v2/feature_vector/4",
installation="pip install vectorhub[encoders-image-tfhub]",
release_date=date(2016,2,23),
example="""
#pip install vectorhub[encoders-image-tfhub]
from vectorhub.encoders.image.tfhub import InceptionResnet2Vec
model = InceptionResnet2Vec()
sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png')
model.encode(sample)
"""
)
__doc__ = InceptionResnetModelDefinition.create_docs()
class InceptionResnet2Vec(BaseImage2Vec):
definition = InceptionResnetModelDefinition
def __init__(self, model_url="https://tfhub.dev/google/imagenet/inception_resnet_v2/feature_vector/4"):
self.model_url = model_url
self.model_name = self.model_url.replace(
'https://tfhub.dev/google/', '').replace('/', '_')
self.model = hub.load(self.model_url)
self.vector_length = 1536
@catch_vector_errors
def encode(self, image):
"""
Encode an image using InceptionResnet.
Example:
>>> from vectorhub.image.encoder.tfhub import inception_resnet
>>> model = InceptionResnet2Vec(username, api_key)
>>> model.encode("Hey!")
"""
return self.model([image]).numpy().tolist()[0]
@catch_vector_errors
def bulk_encode(self, images, threads=10, chunks=10):
return [i for c in self.chunk(images, chunks) for i in self.model(c).numpy().tolist()]
| from ..base import BaseImage2Vec
from ....base import catch_vector_errors
from ....doc_utils import ModelDefinition
from ....import_utils import *
if is_all_dependency_installed('encoders-image-tfhub'):
import tensorflow as tf
import tensorflow_hub as hub
import traceback
from datetime import date
InceptionResnetModelDefinition = ModelDefinition(
model_id = "image/inception-resnet",
model_name="Inception Resnet",
vector_length=1536,
description="""
Very deep convolutional networks have been central to the largest advances in image recognition performance in
recent years. One example is the Inception architecture that has been shown to achieve very good performance at
relatively low computational cost. Recently, the introduction of residual connections in conjunction with a more traditional
architecture has yielded state-of-the-art performance in the 2015 ILSVRC challenge; its performance was similar to the latest
generation Inception-v3 network. This raises the question of whether there are any benefit in combining the Inception architecture
with residual connections. Here we give clear empirical evidence that training with residual connections accelerates the training
of Inception networks significantly. There is also some evidence of residual Inception networks outperforming similarly expensive
Inception networks without residual connections by a thin margin. We also present several new streamlined architectures for both
residual and non-residual Inception networks. These variations improve the single-frame recognition performance on the ILSVRC 2012
classification task significantly. We further demonstrate how proper activation scaling stabilizes the training of very wide residual
Inception networks. With an ensemble of three residual and one Inception-v4, we achieve 3.08 percent top-5 error on the test set of the
ImageNet classification (CLS) challenge.""",
paper="https://arxiv.org/abs/1602.07261",
repo="https://tfhub.dev/google/imagenet/inception_resnet_v2/feature_vector/4",
installation="pip install vectorhub[encoders-image-tfhub]",
release_date=date(2016,2,23),
example="""
#pip install vectorhub[encoders-image-tfhub]
from vectorhub.encoders.image.tfhub import InceptionResnet2Vec
model = InceptionResnet2Vec()
sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png')
model.encode(sample)
"""
)
__doc__ = InceptionResnetModelDefinition.create_docs()
class InceptionResnet2Vec(BaseImage2Vec):
definition = InceptionResnetModelDefinition
def __init__(self, model_url="https://tfhub.dev/google/imagenet/inception_resnet_v2/feature_vector/4"):
self.model_url = model_url
self.model_name = self.model_url.replace(
'https://tfhub.dev/google/', '').replace('/', '_')
self.model = hub.load(self.model_url)
self.vector_length = 1536
@catch_vector_errors
def encode(self, image):
"""
Encode an image using InceptionResnet.
Example:
>>> from vectorhub.image.encoder.tfhub import inception_resnet
>>> model = InceptionResnet2Vec(username, api_key)
>>> model.encode("Hey!")
"""
return self.model([image]).numpy().tolist()[0]
@catch_vector_errors
def bulk_encode(self, images, threads=10, chunks=10):
return [i for c in self.chunk(images, chunks) for i in self.model(c).numpy().tolist()]
| en | 0.875281 | Very deep convolutional networks have been central to the largest advances in image recognition performance in recent years. One example is the Inception architecture that has been shown to achieve very good performance at relatively low computational cost. Recently, the introduction of residual connections in conjunction with a more traditional architecture has yielded state-of-the-art performance in the 2015 ILSVRC challenge; its performance was similar to the latest generation Inception-v3 network. This raises the question of whether there are any benefit in combining the Inception architecture with residual connections. Here we give clear empirical evidence that training with residual connections accelerates the training of Inception networks significantly. There is also some evidence of residual Inception networks outperforming similarly expensive Inception networks without residual connections by a thin margin. We also present several new streamlined architectures for both residual and non-residual Inception networks. These variations improve the single-frame recognition performance on the ILSVRC 2012 classification task significantly. We further demonstrate how proper activation scaling stabilizes the training of very wide residual Inception networks. With an ensemble of three residual and one Inception-v4, we achieve 3.08 percent top-5 error on the test set of the ImageNet classification (CLS) challenge. #pip install vectorhub[encoders-image-tfhub] from vectorhub.encoders.image.tfhub import InceptionResnet2Vec model = InceptionResnet2Vec() sample = model.read('https://getvectorai.com/assets/hub-logo-with-text.png') model.encode(sample) Encode an image using InceptionResnet. Example: >>> from vectorhub.image.encoder.tfhub import inception_resnet >>> model = InceptionResnet2Vec(username, api_key) >>> model.encode("Hey!") | 2.148487 | 2 |
fuji/settingsclass.py | startechsheffield/startech | 0 | 6633290 | from os.path import exists
from fuji.generalclass import checkToken
from fuji.listclass import align
def __checkstr__(val):
if not val.find("\n") < 0:
return(False)
badChars = "{|}"
for c in range(len(val)):
if not badChars.find(val[c]) < 0:
return(False)
return(True)
def __convert__(val):
val = val.replace("\n","")
if val.lower() == "false":
return(False)
elif val.lower() == "true":
return(True)
elif val.endswith("}") == True:
val = val.replace("{","")
val = val.replace("}","")
return(val.split("|"))
try:
return(int(val))
except:
return(val)
def getList(tkn):
if checkToken(tkn,True) == False:
return([])
try:
f = open("/usr/share/fuji/api/reg_"+tkn+".txt","r")
lines = f.readlines()
f.close()
except:
return([])
olist = []
for l in range(len(lines)):
if not lines[l].find("#") == 0:
olist.append(lines[l].split(" : ")[0])
return(olist)
def get(tkn,ttl,sf=False):
if checkToken(tkn,True) == False or checkToken(ttl,True) == False:
return([])
if not type(sf) == bool:
sf = False
try:
f = open("/usr/share/fuji/api/reg_"+tkn+".txt","r")
lines = f.readlines()
f.close()
except:
return([])
for l in range(len(lines)):
if not lines[l].find("#") == 0 and lines[l].split(" : ")[0] == ttl:
r = lines[l].split(" : ")[1]
if sf == False or r.endswith("}\n"):
r = __convert__(r)
if not type(r) == list or sf == True:
return(r)
for v in range(len(r)):
r[v] == __convert__(r[v])
return(r)
return("")
def set(tkn,ttl,val):
if checkToken(tkn,True) == False or checkToken(ttl,True) == False:
return(False)
if type(val) == list:
ostr = "{"
for e in range(len(val)):
if (type(val[e]) == str and __checkstr__(val[e]) == False) and not (type(val) == int or type(val) == bool):
return(False)
if not e == 0:
ostr = ostr + "|"
ostr = ostr + str(val[e])
ostr = ostr + "}"
val = ostr
elif not (type(val) == str or type(val) == int or type(val) == bool):
return(False)
print(val)
if exists("/usr/share/fuji/api/reg_"+tkn+".txt") == False:
try:
f = open("/usr/share/fuji/api/reg_"+tkn+".txt","w")
f.write("# Please try to avoid editing this file manually if possible.\n")
f.write(ttl+" : "+str(val)+"\n")
f.close()
return(True)
except:
return(False)
try:
f = open("/usr/share/fuji/api/reg_"+tkn+".txt","r")
lines = f.readlines()
f.close()
except:
return(False)
found = False
for l in range(len(lines)):
if not lines[l].find("#") == 0 and lines[l].split(" : ")[0] == ttl:
found = True
lines[l] = ttl+" : "+str(val)+"\n"
break
if found == False:
lines.append(ttl+" : "+str(val)+"\n")
try:
f = open("/usr/share/fuji/api/reg_"+tkn+".txt","w")
f.writelines(lines)
f.close()
except:
return(False)
return(True)
def unset(tkn,ttl):
if checkToken(tkn,True) == False or checkToken(ttl,True) == False:
return(False)
try:
f = open("/usr/share/fuji/api/reg_"+tkn+".txt","r")
lines = f.readlines()
f.close()
except:
return(False)
olist = []
for l in range(len(lines)):
if lines[l].find(ttl+" : ") < 0:
olist.append(lines[l])
break
try:
f = open("/usr/share/fuji/api/reg_"+tkn+".txt","w")
f.writelines(olist)
f.close()
return(True)
except:
return(False)
def unsetAll(tkn):
if checkToken(tkn,True) == False:
return(False)
lst = getList(tkn)
if lst == []:
return(False)
for s in range(len(lst)):
if unset(tkn,lst[s]) == False:
return(False)
return(True)
| from os.path import exists
from fuji.generalclass import checkToken
from fuji.listclass import align
def __checkstr__(val):
if not val.find("\n") < 0:
return(False)
badChars = "{|}"
for c in range(len(val)):
if not badChars.find(val[c]) < 0:
return(False)
return(True)
def __convert__(val):
val = val.replace("\n","")
if val.lower() == "false":
return(False)
elif val.lower() == "true":
return(True)
elif val.endswith("}") == True:
val = val.replace("{","")
val = val.replace("}","")
return(val.split("|"))
try:
return(int(val))
except:
return(val)
def getList(tkn):
if checkToken(tkn,True) == False:
return([])
try:
f = open("/usr/share/fuji/api/reg_"+tkn+".txt","r")
lines = f.readlines()
f.close()
except:
return([])
olist = []
for l in range(len(lines)):
if not lines[l].find("#") == 0:
olist.append(lines[l].split(" : ")[0])
return(olist)
def get(tkn,ttl,sf=False):
if checkToken(tkn,True) == False or checkToken(ttl,True) == False:
return([])
if not type(sf) == bool:
sf = False
try:
f = open("/usr/share/fuji/api/reg_"+tkn+".txt","r")
lines = f.readlines()
f.close()
except:
return([])
for l in range(len(lines)):
if not lines[l].find("#") == 0 and lines[l].split(" : ")[0] == ttl:
r = lines[l].split(" : ")[1]
if sf == False or r.endswith("}\n"):
r = __convert__(r)
if not type(r) == list or sf == True:
return(r)
for v in range(len(r)):
r[v] == __convert__(r[v])
return(r)
return("")
def set(tkn,ttl,val):
if checkToken(tkn,True) == False or checkToken(ttl,True) == False:
return(False)
if type(val) == list:
ostr = "{"
for e in range(len(val)):
if (type(val[e]) == str and __checkstr__(val[e]) == False) and not (type(val) == int or type(val) == bool):
return(False)
if not e == 0:
ostr = ostr + "|"
ostr = ostr + str(val[e])
ostr = ostr + "}"
val = ostr
elif not (type(val) == str or type(val) == int or type(val) == bool):
return(False)
print(val)
if exists("/usr/share/fuji/api/reg_"+tkn+".txt") == False:
try:
f = open("/usr/share/fuji/api/reg_"+tkn+".txt","w")
f.write("# Please try to avoid editing this file manually if possible.\n")
f.write(ttl+" : "+str(val)+"\n")
f.close()
return(True)
except:
return(False)
try:
f = open("/usr/share/fuji/api/reg_"+tkn+".txt","r")
lines = f.readlines()
f.close()
except:
return(False)
found = False
for l in range(len(lines)):
if not lines[l].find("#") == 0 and lines[l].split(" : ")[0] == ttl:
found = True
lines[l] = ttl+" : "+str(val)+"\n"
break
if found == False:
lines.append(ttl+" : "+str(val)+"\n")
try:
f = open("/usr/share/fuji/api/reg_"+tkn+".txt","w")
f.writelines(lines)
f.close()
except:
return(False)
return(True)
def unset(tkn,ttl):
if checkToken(tkn,True) == False or checkToken(ttl,True) == False:
return(False)
try:
f = open("/usr/share/fuji/api/reg_"+tkn+".txt","r")
lines = f.readlines()
f.close()
except:
return(False)
olist = []
for l in range(len(lines)):
if lines[l].find(ttl+" : ") < 0:
olist.append(lines[l])
break
try:
f = open("/usr/share/fuji/api/reg_"+tkn+".txt","w")
f.writelines(olist)
f.close()
return(True)
except:
return(False)
def unsetAll(tkn):
if checkToken(tkn,True) == False:
return(False)
lst = getList(tkn)
if lst == []:
return(False)
for s in range(len(lst)):
if unset(tkn,lst[s]) == False:
return(False)
return(True)
| none | 1 | 2.547824 | 3 |
|
toSort/inMoovHandRobot.py | sola1993/inmoov | 1 | 6633291 | <filename>toSort/inMoovHandRobot.py
from java.lang import String
from org.myrobotlab.service import Speech
from org.myrobotlab.service import Sphinx
from org.myrobotlab.service import Runtime
# This demo is a basic speech recognition script.
#
# A set of commands needs to be defined before the recognizer starts
# Internet connectivity is needed initially to download the audio files
# of the Speech service (its default behavior interfaces with Google)
# once the phrases are spoken once, the files are used from that point on
# and internet connectivity is no longer used. These cached files
# can be found ./audioFile/google/<language code>/audrey/phrase.mpe
#
# A message route is created to NOT recognize speech when the speech service is talking.
# Otherwise, initially amusing scenarios can occur such as infinite loops of
# the robot recognizing "hello", then saying "hello", then recognizing "hello"...
#
# The recognized phrase can easily be hooked to additional function such as
# changing the mode of the robot, or moving it. Speech recognition is not
# the best interface to do finely controlled actuation. But, it is very
# convenient to express high-level (e.g. go to center of the room) commands
#
# FYI - The memory requirements for Sphinx are a bit hefty and depending on the
# platform additional JVM arguments might be necessary e.g. -Xmx256m
# create an ear
ear = Runtime.create("ear","Sphinx")
# create the grammar you would like recognized
# this must be done before the service is started
ear.createGrammar("all open | hand close | pinch mode | open pinch | hand open | hand rest | hand open two ")
ear.startService()
# start the mouth
mouth = Runtime.createAndStart("mouth","Speech")
# set up a message route from the ear --to--> python method "heard"
ear.addListener("recognized", python.name, "heard", String().getClass());
# this method is invoked when something is
# recognized by the ear - in this case we
# have the mouth "talk back" the word it recognized
# prevent infinite loop - this will suppress the
# recognition when speaking - default behavior
# when attaching an ear to a mouth :)
ear.attach("mouth")
#create an Arduino & name arduino & index
runtime.createAndStart("arduino","Arduino")
runtime.createAndStart("thumb","Servo")
runtime.createAndStart("index","Servo")
runtime.createAndStart("majeure","Servo")
runtime.createAndStart("ringfinger","Servo")
runtime.createAndStart("pinky","Servo")
runtime.createAndStart("wrist","Servo")
runtime.createAndStart("biceps","Servo")
runtime.createAndStart("rotate","Servo")
runtime.createAndStart("shoulder","Servo")
# runtime.createAndStart("omoplat","Servo")
runtime.createAndStart("neck","Servo")
runtime.createAndStart("rothead","Servo")
# configuration for the arduino & quick test
arduino.setBoard("atmega1280") # atmega328 | atmega168 | mega2560 | atmega1280 etc
arduino.connect("COM7",57600,8,1,0)
sleep(2)
arduino.pinMode(17,0)
arduino.analogReadPollingStart(17)
sleep(1)
arduino.pinMode(17,0)
arduino.analogReadPollingStop(17)
# attach servos
arduino.servoAttach("thumb",2)
arduino.servoAttach("index",3)
arduino.servoAttach("majeure",4)
arduino.servoAttach("ringfinger",5)
arduino.servoAttach("pinky",6)
arduino.servoAttach("wrist",7)
arduino.servoAttach("biceps",8)
arduino.servoAttach("rotate",9)
arduino.servoAttach("shoulder",10)
#arduino.servoAttach("omoplat",11)
arduino.servoAttach("neck",12)
arduino.servoAttach("rothead",13)
# refresh the gui
arduino.publishState()
thumb.publishState()
index.publishState()
majeure.publishState()
ringfinger.publishState()
pinky.publishState()
wrist.publishState()
biceps.publishState()
rotate.publishState()
shoulder.publishState()
#omoplat.publishState()
neck.publishState()
rothead.publishState()
def allopen():
thumb.moveTo(0)
index.moveTo(0)
majeure.moveTo(0)
ringfinger.moveTo(0)
pinky.moveTo(0)
wrist.moveTo(0)
biceps.moveTo(0)
rotate.moveTo(90)
shoulder.moveTo(0)
#omoplat.moveTo(0)
neck.moveTo(90)
rothead.moveTo(90)
def handclose():
thumb.moveTo(130)
index.moveTo(180)
majeure.moveTo(180)
ringfinger.moveTo(180)
pinky.moveTo(180)
wrist.moveTo(180)
biceps.moveTo(90)
rotate.moveTo(50)
shoulder.moveTo(20)
#omoplat.moveTo(0)
neck.moveTo(130)
rothead.moveTo(110)
def pinchmode():
thumb.moveTo(130)
index.moveTo(140)
majeure.moveTo(180)
ringfinger.moveTo(180)
pinky.moveTo(180)
wrist.moveTo(180)
biceps.moveTo(90)
rotate.moveTo(80)
shoulder.moveTo(20)
#omoplat.moveTo(0)
neck.moveTo(140)
rothead.moveTo(110)
def openpinch():
thumb.moveTo(0)
index.moveTo(0)
majeure.moveTo(180)
ringfinger.moveTo(180)
pinky.moveTo(180)
wrist.moveTo(180)
biceps.moveTo(90)
rotate.moveTo(80)
shoulder.moveTo(25)
#omoplat.moveTo(0)
neck.moveTo(145)
rothead.moveTo(125)
def handopen():
thumb.moveTo(0)
index.moveTo(0)
majeure.moveTo(0)
ringfinger.moveTo(0)
pinky.moveTo(0)
wrist.moveTo(180)
biceps.moveTo(80)
rotate.moveTo(85)
shoulder.moveTo(25)
#omoplat.moveTo(0)
neck.moveTo(140)
rothead.moveTo(130)
def handrest():
thumb.moveTo(60)
index.moveTo(40)
majeure.moveTo(30)
ringfinger.moveTo(40)
pinky.moveTo(40)
wrist.moveTo(50)
biceps.moveTo(0)
rotate.moveTo(90)
shoulder.moveTo(0)
#omoplat.moveTo(0)
neck.moveTo(160)
rothead.moveTo(80)
def handopen2():
thumb.moveTo(0)
index.moveTo(0)
majeure.moveTo(0)
ringfinger.moveTo(0)
pinky.moveTo(0)
wrist.moveTo(0)
biceps.moveTo(0)
rotate.moveTo(60)
shoulder.moveTo(0)
#for x in range (0, 180):
# allopen()
# sleep(2.5)
# handclose()
# sleep(2.5)
# pinchmode()
# sleep(1.5)
# openpinch()
# sleep(0.5)
# handopen()
# sleep(1.5)
# handrest()
# sleep(2.0)
# all open | hand close | pinch mode | open pinch | hand open | hand rest | hand open two
def heard():
data = msg_ear_recognized.data[0]
# mouth.speak("you said " + data)
print "heard ", data
ear.stopListening()
if (data == "all open"):
allopen()
elif (data == "hand close"):
handclose()
elif (data == "pinch mode"):
pinchmode()
elif (data == "open pinch"):
openpinch()
elif (data == "hand open"):
handopen()
elif (data == "hand rest"):
handrest()
elif (data == "hand open two"):
handopen2()
ear.startListening()
# ... etc
| <filename>toSort/inMoovHandRobot.py
from java.lang import String
from org.myrobotlab.service import Speech
from org.myrobotlab.service import Sphinx
from org.myrobotlab.service import Runtime
# This demo is a basic speech recognition script.
#
# A set of commands needs to be defined before the recognizer starts
# Internet connectivity is needed initially to download the audio files
# of the Speech service (its default behavior interfaces with Google)
# once the phrases are spoken once, the files are used from that point on
# and internet connectivity is no longer used. These cached files
# can be found ./audioFile/google/<language code>/audrey/phrase.mpe
#
# A message route is created to NOT recognize speech when the speech service is talking.
# Otherwise, initially amusing scenarios can occur such as infinite loops of
# the robot recognizing "hello", then saying "hello", then recognizing "hello"...
#
# The recognized phrase can easily be hooked to additional function such as
# changing the mode of the robot, or moving it. Speech recognition is not
# the best interface to do finely controlled actuation. But, it is very
# convenient to express high-level (e.g. go to center of the room) commands
#
# FYI - The memory requirements for Sphinx are a bit hefty and depending on the
# platform additional JVM arguments might be necessary e.g. -Xmx256m
# create an ear
ear = Runtime.create("ear","Sphinx")
# create the grammar you would like recognized
# this must be done before the service is started
ear.createGrammar("all open | hand close | pinch mode | open pinch | hand open | hand rest | hand open two ")
ear.startService()
# start the mouth
mouth = Runtime.createAndStart("mouth","Speech")
# set up a message route from the ear --to--> python method "heard"
ear.addListener("recognized", python.name, "heard", String().getClass());
# this method is invoked when something is
# recognized by the ear - in this case we
# have the mouth "talk back" the word it recognized
# prevent infinite loop - this will suppress the
# recognition when speaking - default behavior
# when attaching an ear to a mouth :)
ear.attach("mouth")
#create an Arduino & name arduino & index
runtime.createAndStart("arduino","Arduino")
runtime.createAndStart("thumb","Servo")
runtime.createAndStart("index","Servo")
runtime.createAndStart("majeure","Servo")
runtime.createAndStart("ringfinger","Servo")
runtime.createAndStart("pinky","Servo")
runtime.createAndStart("wrist","Servo")
runtime.createAndStart("biceps","Servo")
runtime.createAndStart("rotate","Servo")
runtime.createAndStart("shoulder","Servo")
# runtime.createAndStart("omoplat","Servo")
runtime.createAndStart("neck","Servo")
runtime.createAndStart("rothead","Servo")
# configuration for the arduino & quick test
arduino.setBoard("atmega1280") # atmega328 | atmega168 | mega2560 | atmega1280 etc
arduino.connect("COM7",57600,8,1,0)
sleep(2)
arduino.pinMode(17,0)
arduino.analogReadPollingStart(17)
sleep(1)
arduino.pinMode(17,0)
arduino.analogReadPollingStop(17)
# attach servos
arduino.servoAttach("thumb",2)
arduino.servoAttach("index",3)
arduino.servoAttach("majeure",4)
arduino.servoAttach("ringfinger",5)
arduino.servoAttach("pinky",6)
arduino.servoAttach("wrist",7)
arduino.servoAttach("biceps",8)
arduino.servoAttach("rotate",9)
arduino.servoAttach("shoulder",10)
#arduino.servoAttach("omoplat",11)
arduino.servoAttach("neck",12)
arduino.servoAttach("rothead",13)
# refresh the gui
arduino.publishState()
thumb.publishState()
index.publishState()
majeure.publishState()
ringfinger.publishState()
pinky.publishState()
wrist.publishState()
biceps.publishState()
rotate.publishState()
shoulder.publishState()
#omoplat.publishState()
neck.publishState()
rothead.publishState()
def allopen():
thumb.moveTo(0)
index.moveTo(0)
majeure.moveTo(0)
ringfinger.moveTo(0)
pinky.moveTo(0)
wrist.moveTo(0)
biceps.moveTo(0)
rotate.moveTo(90)
shoulder.moveTo(0)
#omoplat.moveTo(0)
neck.moveTo(90)
rothead.moveTo(90)
def handclose():
thumb.moveTo(130)
index.moveTo(180)
majeure.moveTo(180)
ringfinger.moveTo(180)
pinky.moveTo(180)
wrist.moveTo(180)
biceps.moveTo(90)
rotate.moveTo(50)
shoulder.moveTo(20)
#omoplat.moveTo(0)
neck.moveTo(130)
rothead.moveTo(110)
def pinchmode():
thumb.moveTo(130)
index.moveTo(140)
majeure.moveTo(180)
ringfinger.moveTo(180)
pinky.moveTo(180)
wrist.moveTo(180)
biceps.moveTo(90)
rotate.moveTo(80)
shoulder.moveTo(20)
#omoplat.moveTo(0)
neck.moveTo(140)
rothead.moveTo(110)
def openpinch():
thumb.moveTo(0)
index.moveTo(0)
majeure.moveTo(180)
ringfinger.moveTo(180)
pinky.moveTo(180)
wrist.moveTo(180)
biceps.moveTo(90)
rotate.moveTo(80)
shoulder.moveTo(25)
#omoplat.moveTo(0)
neck.moveTo(145)
rothead.moveTo(125)
def handopen():
thumb.moveTo(0)
index.moveTo(0)
majeure.moveTo(0)
ringfinger.moveTo(0)
pinky.moveTo(0)
wrist.moveTo(180)
biceps.moveTo(80)
rotate.moveTo(85)
shoulder.moveTo(25)
#omoplat.moveTo(0)
neck.moveTo(140)
rothead.moveTo(130)
def handrest():
thumb.moveTo(60)
index.moveTo(40)
majeure.moveTo(30)
ringfinger.moveTo(40)
pinky.moveTo(40)
wrist.moveTo(50)
biceps.moveTo(0)
rotate.moveTo(90)
shoulder.moveTo(0)
#omoplat.moveTo(0)
neck.moveTo(160)
rothead.moveTo(80)
def handopen2():
thumb.moveTo(0)
index.moveTo(0)
majeure.moveTo(0)
ringfinger.moveTo(0)
pinky.moveTo(0)
wrist.moveTo(0)
biceps.moveTo(0)
rotate.moveTo(60)
shoulder.moveTo(0)
#for x in range (0, 180):
# allopen()
# sleep(2.5)
# handclose()
# sleep(2.5)
# pinchmode()
# sleep(1.5)
# openpinch()
# sleep(0.5)
# handopen()
# sleep(1.5)
# handrest()
# sleep(2.0)
# all open | hand close | pinch mode | open pinch | hand open | hand rest | hand open two
def heard():
data = msg_ear_recognized.data[0]
# mouth.speak("you said " + data)
print "heard ", data
ear.stopListening()
if (data == "all open"):
allopen()
elif (data == "hand close"):
handclose()
elif (data == "pinch mode"):
pinchmode()
elif (data == "open pinch"):
openpinch()
elif (data == "hand open"):
handopen()
elif (data == "hand rest"):
handrest()
elif (data == "hand open two"):
handopen2()
ear.startListening()
# ... etc
| en | 0.776147 | # This demo is a basic speech recognition script. # # A set of commands needs to be defined before the recognizer starts # Internet connectivity is needed initially to download the audio files # of the Speech service (its default behavior interfaces with Google) # once the phrases are spoken once, the files are used from that point on # and internet connectivity is no longer used. These cached files # can be found ./audioFile/google/<language code>/audrey/phrase.mpe # # A message route is created to NOT recognize speech when the speech service is talking. # Otherwise, initially amusing scenarios can occur such as infinite loops of # the robot recognizing "hello", then saying "hello", then recognizing "hello"... # # The recognized phrase can easily be hooked to additional function such as # changing the mode of the robot, or moving it. Speech recognition is not # the best interface to do finely controlled actuation. But, it is very # convenient to express high-level (e.g. go to center of the room) commands # # FYI - The memory requirements for Sphinx are a bit hefty and depending on the # platform additional JVM arguments might be necessary e.g. -Xmx256m # create an ear # create the grammar you would like recognized # this must be done before the service is started # start the mouth # set up a message route from the ear --to--> python method "heard" # this method is invoked when something is # recognized by the ear - in this case we # have the mouth "talk back" the word it recognized # prevent infinite loop - this will suppress the # recognition when speaking - default behavior # when attaching an ear to a mouth :) #create an Arduino & name arduino & index # runtime.createAndStart("omoplat","Servo") # configuration for the arduino & quick test # atmega328 | atmega168 | mega2560 | atmega1280 etc # attach servos #arduino.servoAttach("omoplat",11) # refresh the gui #omoplat.publishState() #omoplat.moveTo(0) #omoplat.moveTo(0) #omoplat.moveTo(0) #omoplat.moveTo(0) #omoplat.moveTo(0) #omoplat.moveTo(0) #for x in range (0, 180): # allopen() # sleep(2.5) # handclose() # sleep(2.5) # pinchmode() # sleep(1.5) # openpinch() # sleep(0.5) # handopen() # sleep(1.5) # handrest() # sleep(2.0) # all open | hand close | pinch mode | open pinch | hand open | hand rest | hand open two # mouth.speak("you said " + data) # ... etc | 3.276354 | 3 |
runner/deps/pg8000/dbapi.py | Tanzu-Solutions-Engineering/benchmark | 0 | 6633292 | <reponame>Tanzu-Solutions-Engineering/benchmark
# vim: sw=4:expandtab:foldmethod=marker
#
# Copyright (c) 2007-2009, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = "<NAME>"
import datetime
import time
import interface
import types
import threading
from errors import *
from warnings import warn
##
# The DBAPI level supported. Currently 2.0. This property is part of the
# DBAPI 2.0 specification.
apilevel = "2.0"
##
# Integer constant stating the level of thread safety the DBAPI interface
# supports. This DBAPI interface supports sharing of the module, connections,
# and cursors. This property is part of the DBAPI 2.0 specification.
threadsafety = 3
##
# String property stating the type of parameter marker formatting expected by
# the interface. This value defaults to "format". This property is part of
# the DBAPI 2.0 specification.
# <p>
# Unlike the DBAPI specification, this value is not constant. It can be
# changed to any standard paramstyle value (ie. qmark, numeric, named, format,
# and pyformat).
paramstyle = 'format' # paramstyle can be changed to any DB-API paramstyle
def convert_paramstyle(src_style, query, args):
# I don't see any way to avoid scanning the query string char by char,
# so we might as well take that careful approach and create a
# state-based scanner. We'll use int variables for the state.
# 0 -- outside quoted string
# 1 -- inside single-quote string '...'
# 2 -- inside quoted identifier "..."
# 3 -- inside escaped single-quote string, E'...'
state = 0
output_query = ""
output_args = []
if src_style == "numeric":
output_args = args
elif src_style in ("pyformat", "named"):
mapping_to_idx = {}
i = 0
while 1:
if i == len(query):
break
c = query[i]
# print "begin loop", repr(i), repr(c), repr(state)
if state == 0:
if c == "'":
i += 1
output_query += c
state = 1
elif c == '"':
i += 1
output_query += c
state = 2
elif c == 'E':
# check for escaped single-quote string
i += 1
if i < len(query) and i > 1 and query[i] == "'":
i += 1
output_query += "E'"
state = 3
else:
output_query += c
elif src_style == "qmark" and c == "?":
i += 1
param_idx = len(output_args)
if param_idx == len(args):
raise QueryParameterIndexError("too many parameter fields, not enough parameters")
output_args.append(args[param_idx])
output_query += "$" + str(param_idx + 1)
elif src_style == "numeric" and c == ":":
i += 1
if i < len(query) and i > 1 and query[i].isdigit():
output_query += "$" + query[i]
i += 1
else:
raise QueryParameterParseError("numeric parameter : does not have numeric arg")
elif src_style == "named" and c == ":":
name = ""
while 1:
i += 1
if i == len(query):
break
c = query[i]
if c.isalnum() or c == '_':
name += c
else:
break
if name == "":
raise QueryParameterParseError("empty name of named parameter")
idx = mapping_to_idx.get(name)
if idx == None:
idx = len(output_args)
output_args.append(args[name])
idx += 1
mapping_to_idx[name] = idx
output_query += "$" + str(idx)
elif src_style == "format" and c == "%":
i += 1
if i < len(query) and i > 1:
if query[i] == "s":
param_idx = len(output_args)
if param_idx == len(args):
raise QueryParameterIndexError("too many parameter fields, not enough parameters")
output_args.append(args[param_idx])
output_query += "$" + str(param_idx + 1)
elif query[i] == "%":
output_query += "%"
else:
raise QueryParameterParseError("Only %s and %% are supported")
i += 1
else:
raise QueryParameterParseError("format parameter % does not have format code")
elif src_style == "pyformat" and c == "%":
i += 1
if i < len(query) and i > 1:
if query[i] == "(":
i += 1
# begin mapping name
end_idx = query.find(')', i)
if end_idx == -1:
raise QueryParameterParseError("began pyformat dict read, but couldn't find end of name")
else:
name = query[i:end_idx]
i = end_idx + 1
if i < len(query) and query[i] == "s":
i += 1
idx = mapping_to_idx.get(name)
if idx == None:
idx = len(output_args)
output_args.append(args[name])
idx += 1
mapping_to_idx[name] = idx
output_query += "$" + str(idx)
else:
raise QueryParameterParseError("format not specified or not supported (only %(...)s supported)")
elif query[i] == "%":
output_query += "%"
elif query[i] == "s":
# we have a %s in a pyformat query string. Assume
# support for format instead.
i -= 1
src_style = "format"
else:
raise QueryParameterParseError("Only %(name)s, %s and %% are supported")
else:
i += 1
output_query += c
elif state == 1:
output_query += c
i += 1
if c == "'":
# Could be a double ''
if i < len(query) and query[i] == "'":
# is a double quote.
output_query += query[i]
i += 1
else:
state = 0
elif src_style in ("pyformat","format") and c == "%":
# hm... we're only going to support an escaped percent sign
if i < len(query):
if query[i] == "%":
# good. We already output the first percent sign.
i += 1
else:
raise QueryParameterParseError("'%" + query[i] + "' not supported in quoted string")
elif state == 2:
output_query += c
i += 1
if c == '"':
state = 0
elif src_style in ("pyformat","format") and c == "%":
# hm... we're only going to support an escaped percent sign
if i < len(query):
if query[i] == "%":
# good. We already output the first percent sign.
i += 1
else:
raise QueryParameterParseError("'%" + query[i] + "' not supported in quoted string")
elif state == 3:
output_query += c
i += 1
if c == "\\":
# check for escaped single-quote
if i < len(query) and query[i] == "'":
output_query += "'"
i += 1
elif c == "'":
state = 0
elif src_style in ("pyformat","format") and c == "%":
# hm... we're only going to support an escaped percent sign
if i < len(query):
if query[i] == "%":
# good. We already output the first percent sign.
i += 1
else:
raise QueryParameterParseError("'%" + query[i] + "' not supported in quoted string")
return output_query, tuple(output_args)
def require_open_cursor(fn):
def _fn(self, *args, **kwargs):
if self.cursor == None:
raise CursorClosedError()
return fn(self, *args, **kwargs)
return _fn
##
# The class of object returned by the {@link #ConnectionWrapper.cursor cursor method}.
class CursorWrapper(object):
def __init__(self, conn, connection):
self.cursor = interface.Cursor(conn)
self.arraysize = 1
self._connection = connection
self._override_rowcount = None
##
# This read-only attribute returns a reference to the connection object on
# which the cursor was created.
# <p>
# Stability: Part of a DBAPI 2.0 extension. A warning "DB-API extension
# cursor.connection used" will be fired.
connection = property(lambda self: self._getConnection())
def _getConnection(self):
warn("DB-API extension cursor.connection used", stacklevel=3)
return self._connection
##
# This read-only attribute specifies the number of rows that the last
# .execute*() produced (for DQL statements like 'select') or affected (for
# DML statements like 'update' or 'insert').
# <p>
# The attribute is -1 in case no .execute*() has been performed on the
# cursor or the rowcount of the last operation is cannot be determined by
# the interface.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
rowcount = property(lambda self: self._getRowCount())
@require_open_cursor
def _getRowCount(self):
if self._override_rowcount != None:
return self._override_rowcount
return self.cursor.row_count
##
# This read-only attribute is a sequence of 7-item sequences. Each value
# contains information describing one result column. The 7 items returned
# for each column are (name, type_code, display_size, internal_size,
# precision, scale, null_ok). Only the first two values are provided by
# this interface implementation.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
description = property(lambda self: self._getDescription())
@require_open_cursor
def _getDescription(self):
if self.cursor.row_description == None:
return None
columns = []
for col in self.cursor.row_description:
columns.append((col["name"], col["type_oid"], None, None, None, None, None))
return columns
##
# Executes a database operation. Parameters may be provided as a sequence
# or mapping and will be bound to variables in the operation.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
@require_open_cursor
def execute(self, operation, args=()):
if not self._connection.in_transaction:
self._connection.begin()
self._override_rowcount = None
self._execute(operation, args)
def _execute(self, operation, args=()):
new_query, new_args = convert_paramstyle(paramstyle, operation, args)
try:
self.cursor.execute(new_query, *new_args)
except ConnectionClosedError:
# can't rollback in this case
raise
except:
# any error will rollback the transaction to-date
self._connection.rollback()
raise
def copy_from(self, fileobj, table=None, sep='\t', null=None, query=None):
if query == None:
if table == None:
raise CopyQueryOrTableRequiredError()
query = "COPY %s FROM stdout DELIMITER '%s'" % (table, sep)
if null is not None:
query += " NULL '%s'" % (null,)
self.copy_execute(fileobj, query)
def copy_to(self, fileobj, table=None, sep='\t', null=None, query=None):
if query == None:
if table == None:
raise CopyQueryOrTableRequiredError()
query = "COPY %s TO stdout DELIMITER '%s'" % (table, sep)
if null is not None:
query += " NULL '%s'" % (null,)
self.copy_execute(fileobj, query)
@require_open_cursor
def copy_execute(self, fileobj, query):
try:
self.cursor.execute(query, stream=fileobj)
except ConnectionClosedError:
# can't rollback in this case
raise
except:
# any error will rollback the transaction to-date
import traceback; traceback.print_exc()
self._connection.rollback()
raise
##
# Prepare a database operation and then execute it against all parameter
# sequences or mappings provided.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
@require_open_cursor
def executemany(self, operation, parameter_sets):
if not self._connection.in_transaction:
self._connection.begin()
self._override_rowcount = 0
for parameters in parameter_sets:
self._execute(operation, parameters)
if self.cursor.row_count == -1 or self._override_rowcount == -1:
self._override_rowcount = -1
else:
self._override_rowcount += self.cursor.row_count
##
# Fetch the next row of a query result set, returning a single sequence, or
# None when no more data is available.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
@require_open_cursor
def fetchone(self):
return self.cursor.read_tuple()
##
# Fetch the next set of rows of a query result, returning a sequence of
# sequences. An empty sequence is returned when no more rows are
# available.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
# @param size The number of rows to fetch when called. If not provided,
# the arraysize property value is used instead.
def fetchmany(self, size=None):
if size == None:
size = self.arraysize
rows = []
for i in range(size):
value = self.fetchone()
if value == None:
break
rows.append(value)
return rows
##
# Fetch all remaining rows of a query result, returning them as a sequence
# of sequences.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
@require_open_cursor
def fetchall(self):
return tuple(self.cursor.iterate_tuple())
##
# Close the cursor.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
@require_open_cursor
def close(self):
self.cursor.close()
self.cursor = None
self._override_rowcount = None
def next(self):
warn("DB-API extension cursor.next() used", stacklevel=2)
retval = self.fetchone()
if retval == None:
raise StopIteration()
return retval
def __iter__(self):
warn("DB-API extension cursor.__iter__() used", stacklevel=2)
return self
def setinputsizes(self, sizes):
pass
def setoutputsize(self, size, column=None):
pass
@require_open_cursor
def fileno(self):
return self.cursor.fileno()
@require_open_cursor
def isready(self):
return self.cursor.isready()
def require_open_connection(fn):
def _fn(self, *args, **kwargs):
if self.conn == None:
raise ConnectionClosedError()
return fn(self, *args, **kwargs)
return _fn
##
# The class of object returned by the {@link #connect connect method}.
class ConnectionWrapper(object):
# DBAPI Extension: supply exceptions as attributes on the connection
Warning = property(lambda self: self._getError(Warning))
Error = property(lambda self: self._getError(Error))
InterfaceError = property(lambda self: self._getError(InterfaceError))
DatabaseError = property(lambda self: self._getError(DatabaseError))
OperationalError = property(lambda self: self._getError(OperationalError))
IntegrityError = property(lambda self: self._getError(IntegrityError))
InternalError = property(lambda self: self._getError(InternalError))
ProgrammingError = property(lambda self: self._getError(ProgrammingError))
NotSupportedError = property(lambda self: self._getError(NotSupportedError))
def _getError(self, error):
warn("DB-API extension connection.%s used" % error.__name__, stacklevel=3)
return error
@property
def in_transaction(self):
if self.conn:
return self.conn.in_transaction
return False
def __init__(self, **kwargs):
self.conn = interface.Connection(**kwargs)
self.notifies = []
self.notifies_lock = threading.Lock()
self.conn.NotificationReceived += self._notificationReceived
@require_open_connection
def begin(self):
self.conn.begin()
def _notificationReceived(self, notice):
try:
# psycopg2 compatible notification interface
self.notifies_lock.acquire()
self.notifies.append((notice.backend_pid, notice.condition))
finally:
self.notifies_lock.release()
##
# Creates a {@link #CursorWrapper CursorWrapper} object bound to this
# connection.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
@require_open_connection
def cursor(self):
return CursorWrapper(self.conn, self)
##
# Commits the current database transaction.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
@require_open_connection
def commit(self):
# There's a threading bug here. If a query is sent after the
# commit, but before the begin, it will be executed immediately
# without a surrounding transaction. Like all threading bugs -- it
# sounds unlikely, until it happens every time in one
# application... however, to fix this, we need to lock the
# database connection entirely, so that no cursors can execute
# statements on other threads. Support for that type of lock will
# be done later.
self.conn.commit()
##
# Rolls back the current database transaction.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
@require_open_connection
def rollback(self):
# see bug description in commit.
self.conn.rollback()
##
# Closes the database connection.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
@require_open_connection
def close(self):
self.conn.close()
self.conn = None
##
# Creates a DBAPI 2.0 compatible interface to a PostgreSQL database.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
#
# @param user The username to connect to the PostgreSQL server with. This
# parameter is required.
#
# @keyparam host The hostname of the PostgreSQL server to connect with.
# Providing this parameter is necessary for TCP/IP connections. One of either
# host, or unix_sock, must be provided.
#
# @keyparam unix_sock The path to the UNIX socket to access the database
# through, for example, '/tmp/.s.PGSQL.5432'. One of either unix_sock or host
# must be provided. The port parameter will have no affect if unix_sock is
# provided.
#
# @keyparam port The TCP/IP port of the PostgreSQL server instance. This
# parameter defaults to 5432, the registered and common port of PostgreSQL
# TCP/IP servers.
#
# @keyparam database The name of the database instance to connect with. This
# parameter is optional, if omitted the PostgreSQL server will assume the
# database name is the same as the username.
#
# @keyparam password The user password to connect to the server with. This
# parameter is optional. If omitted, and the database server requests password
# based authentication, the connection will fail. On the other hand, if this
# parameter is provided and the database does not request password
# authentication, then the password will not be used.
#
# @keyparam socket_timeout Socket connect timeout measured in seconds.
# Defaults to 60 seconds.
#
# @keyparam ssl Use SSL encryption for TCP/IP socket. Defaults to False.
#
# @return An instance of {@link #ConnectionWrapper ConnectionWrapper}.
def connect(user, host=None, unix_sock=None, port=5432, database=None, password=<PASSWORD>, socket_timeout=60, ssl=False):
return ConnectionWrapper(user=user, host=host,
unix_sock=unix_sock, port=port, database=database,
password=password, socket_timeout=socket_timeout, ssl=ssl)
def Date(year, month, day):
return datetime.date(year, month, day)
def Time(hour, minute, second):
return datetime.time(hour, minute, second)
def Timestamp(year, month, day, hour, minute, second):
return datetime.datetime(year, month, day, hour, minute, second)
def DateFromTicks(ticks):
return Date(*time.localtime(ticks)[:3])
def TimeFromTicks(ticks):
return Time(*time.localtime(ticks)[3:6])
def TimestampFromTicks(ticks):
return Timestamp(*time.localtime(ticks)[:6])
##
# Construct an object holding binary data.
def Binary(value):
return types.Bytea(value)
# I have no idea what this would be used for by a client app. Should it be
# TEXT, VARCHAR, CHAR? It will only compare against row_description's
# type_code if it is this one type. It is the varchar type oid for now, this
# appears to match expectations in the DB API 2.0 compliance test suite.
STRING = 1043
# bytea type_oid
BINARY = 17
# numeric type_oid
NUMBER = 1700
# timestamp type_oid
DATETIME = 1114
# oid type_oid
ROWID = 26
| # vim: sw=4:expandtab:foldmethod=marker
#
# Copyright (c) 2007-2009, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = "<NAME>"
import datetime
import time
import interface
import types
import threading
from errors import *
from warnings import warn
##
# The DBAPI level supported. Currently 2.0. This property is part of the
# DBAPI 2.0 specification.
apilevel = "2.0"
##
# Integer constant stating the level of thread safety the DBAPI interface
# supports. This DBAPI interface supports sharing of the module, connections,
# and cursors. This property is part of the DBAPI 2.0 specification.
threadsafety = 3
##
# String property stating the type of parameter marker formatting expected by
# the interface. This value defaults to "format". This property is part of
# the DBAPI 2.0 specification.
# <p>
# Unlike the DBAPI specification, this value is not constant. It can be
# changed to any standard paramstyle value (ie. qmark, numeric, named, format,
# and pyformat).
paramstyle = 'format' # paramstyle can be changed to any DB-API paramstyle
def convert_paramstyle(src_style, query, args):
# I don't see any way to avoid scanning the query string char by char,
# so we might as well take that careful approach and create a
# state-based scanner. We'll use int variables for the state.
# 0 -- outside quoted string
# 1 -- inside single-quote string '...'
# 2 -- inside quoted identifier "..."
# 3 -- inside escaped single-quote string, E'...'
state = 0
output_query = ""
output_args = []
if src_style == "numeric":
output_args = args
elif src_style in ("pyformat", "named"):
mapping_to_idx = {}
i = 0
while 1:
if i == len(query):
break
c = query[i]
# print "begin loop", repr(i), repr(c), repr(state)
if state == 0:
if c == "'":
i += 1
output_query += c
state = 1
elif c == '"':
i += 1
output_query += c
state = 2
elif c == 'E':
# check for escaped single-quote string
i += 1
if i < len(query) and i > 1 and query[i] == "'":
i += 1
output_query += "E'"
state = 3
else:
output_query += c
elif src_style == "qmark" and c == "?":
i += 1
param_idx = len(output_args)
if param_idx == len(args):
raise QueryParameterIndexError("too many parameter fields, not enough parameters")
output_args.append(args[param_idx])
output_query += "$" + str(param_idx + 1)
elif src_style == "numeric" and c == ":":
i += 1
if i < len(query) and i > 1 and query[i].isdigit():
output_query += "$" + query[i]
i += 1
else:
raise QueryParameterParseError("numeric parameter : does not have numeric arg")
elif src_style == "named" and c == ":":
name = ""
while 1:
i += 1
if i == len(query):
break
c = query[i]
if c.isalnum() or c == '_':
name += c
else:
break
if name == "":
raise QueryParameterParseError("empty name of named parameter")
idx = mapping_to_idx.get(name)
if idx == None:
idx = len(output_args)
output_args.append(args[name])
idx += 1
mapping_to_idx[name] = idx
output_query += "$" + str(idx)
elif src_style == "format" and c == "%":
i += 1
if i < len(query) and i > 1:
if query[i] == "s":
param_idx = len(output_args)
if param_idx == len(args):
raise QueryParameterIndexError("too many parameter fields, not enough parameters")
output_args.append(args[param_idx])
output_query += "$" + str(param_idx + 1)
elif query[i] == "%":
output_query += "%"
else:
raise QueryParameterParseError("Only %s and %% are supported")
i += 1
else:
raise QueryParameterParseError("format parameter % does not have format code")
elif src_style == "pyformat" and c == "%":
i += 1
if i < len(query) and i > 1:
if query[i] == "(":
i += 1
# begin mapping name
end_idx = query.find(')', i)
if end_idx == -1:
raise QueryParameterParseError("began pyformat dict read, but couldn't find end of name")
else:
name = query[i:end_idx]
i = end_idx + 1
if i < len(query) and query[i] == "s":
i += 1
idx = mapping_to_idx.get(name)
if idx == None:
idx = len(output_args)
output_args.append(args[name])
idx += 1
mapping_to_idx[name] = idx
output_query += "$" + str(idx)
else:
raise QueryParameterParseError("format not specified or not supported (only %(...)s supported)")
elif query[i] == "%":
output_query += "%"
elif query[i] == "s":
# we have a %s in a pyformat query string. Assume
# support for format instead.
i -= 1
src_style = "format"
else:
raise QueryParameterParseError("Only %(name)s, %s and %% are supported")
else:
i += 1
output_query += c
elif state == 1:
output_query += c
i += 1
if c == "'":
# Could be a double ''
if i < len(query) and query[i] == "'":
# is a double quote.
output_query += query[i]
i += 1
else:
state = 0
elif src_style in ("pyformat","format") and c == "%":
# hm... we're only going to support an escaped percent sign
if i < len(query):
if query[i] == "%":
# good. We already output the first percent sign.
i += 1
else:
raise QueryParameterParseError("'%" + query[i] + "' not supported in quoted string")
elif state == 2:
output_query += c
i += 1
if c == '"':
state = 0
elif src_style in ("pyformat","format") and c == "%":
# hm... we're only going to support an escaped percent sign
if i < len(query):
if query[i] == "%":
# good. We already output the first percent sign.
i += 1
else:
raise QueryParameterParseError("'%" + query[i] + "' not supported in quoted string")
elif state == 3:
output_query += c
i += 1
if c == "\\":
# check for escaped single-quote
if i < len(query) and query[i] == "'":
output_query += "'"
i += 1
elif c == "'":
state = 0
elif src_style in ("pyformat","format") and c == "%":
# hm... we're only going to support an escaped percent sign
if i < len(query):
if query[i] == "%":
# good. We already output the first percent sign.
i += 1
else:
raise QueryParameterParseError("'%" + query[i] + "' not supported in quoted string")
return output_query, tuple(output_args)
def require_open_cursor(fn):
def _fn(self, *args, **kwargs):
if self.cursor == None:
raise CursorClosedError()
return fn(self, *args, **kwargs)
return _fn
##
# The class of object returned by the {@link #ConnectionWrapper.cursor cursor method}.
class CursorWrapper(object):
def __init__(self, conn, connection):
self.cursor = interface.Cursor(conn)
self.arraysize = 1
self._connection = connection
self._override_rowcount = None
##
# This read-only attribute returns a reference to the connection object on
# which the cursor was created.
# <p>
# Stability: Part of a DBAPI 2.0 extension. A warning "DB-API extension
# cursor.connection used" will be fired.
connection = property(lambda self: self._getConnection())
def _getConnection(self):
warn("DB-API extension cursor.connection used", stacklevel=3)
return self._connection
##
# This read-only attribute specifies the number of rows that the last
# .execute*() produced (for DQL statements like 'select') or affected (for
# DML statements like 'update' or 'insert').
# <p>
# The attribute is -1 in case no .execute*() has been performed on the
# cursor or the rowcount of the last operation is cannot be determined by
# the interface.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
rowcount = property(lambda self: self._getRowCount())
@require_open_cursor
def _getRowCount(self):
if self._override_rowcount != None:
return self._override_rowcount
return self.cursor.row_count
##
# This read-only attribute is a sequence of 7-item sequences. Each value
# contains information describing one result column. The 7 items returned
# for each column are (name, type_code, display_size, internal_size,
# precision, scale, null_ok). Only the first two values are provided by
# this interface implementation.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
description = property(lambda self: self._getDescription())
@require_open_cursor
def _getDescription(self):
if self.cursor.row_description == None:
return None
columns = []
for col in self.cursor.row_description:
columns.append((col["name"], col["type_oid"], None, None, None, None, None))
return columns
##
# Executes a database operation. Parameters may be provided as a sequence
# or mapping and will be bound to variables in the operation.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
@require_open_cursor
def execute(self, operation, args=()):
if not self._connection.in_transaction:
self._connection.begin()
self._override_rowcount = None
self._execute(operation, args)
def _execute(self, operation, args=()):
new_query, new_args = convert_paramstyle(paramstyle, operation, args)
try:
self.cursor.execute(new_query, *new_args)
except ConnectionClosedError:
# can't rollback in this case
raise
except:
# any error will rollback the transaction to-date
self._connection.rollback()
raise
def copy_from(self, fileobj, table=None, sep='\t', null=None, query=None):
if query == None:
if table == None:
raise CopyQueryOrTableRequiredError()
query = "COPY %s FROM stdout DELIMITER '%s'" % (table, sep)
if null is not None:
query += " NULL '%s'" % (null,)
self.copy_execute(fileobj, query)
def copy_to(self, fileobj, table=None, sep='\t', null=None, query=None):
if query == None:
if table == None:
raise CopyQueryOrTableRequiredError()
query = "COPY %s TO stdout DELIMITER '%s'" % (table, sep)
if null is not None:
query += " NULL '%s'" % (null,)
self.copy_execute(fileobj, query)
@require_open_cursor
def copy_execute(self, fileobj, query):
try:
self.cursor.execute(query, stream=fileobj)
except ConnectionClosedError:
# can't rollback in this case
raise
except:
# any error will rollback the transaction to-date
import traceback; traceback.print_exc()
self._connection.rollback()
raise
##
# Prepare a database operation and then execute it against all parameter
# sequences or mappings provided.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
@require_open_cursor
def executemany(self, operation, parameter_sets):
if not self._connection.in_transaction:
self._connection.begin()
self._override_rowcount = 0
for parameters in parameter_sets:
self._execute(operation, parameters)
if self.cursor.row_count == -1 or self._override_rowcount == -1:
self._override_rowcount = -1
else:
self._override_rowcount += self.cursor.row_count
##
# Fetch the next row of a query result set, returning a single sequence, or
# None when no more data is available.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
@require_open_cursor
def fetchone(self):
return self.cursor.read_tuple()
##
# Fetch the next set of rows of a query result, returning a sequence of
# sequences. An empty sequence is returned when no more rows are
# available.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
# @param size The number of rows to fetch when called. If not provided,
# the arraysize property value is used instead.
def fetchmany(self, size=None):
if size == None:
size = self.arraysize
rows = []
for i in range(size):
value = self.fetchone()
if value == None:
break
rows.append(value)
return rows
##
# Fetch all remaining rows of a query result, returning them as a sequence
# of sequences.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
@require_open_cursor
def fetchall(self):
return tuple(self.cursor.iterate_tuple())
##
# Close the cursor.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
@require_open_cursor
def close(self):
self.cursor.close()
self.cursor = None
self._override_rowcount = None
def next(self):
warn("DB-API extension cursor.next() used", stacklevel=2)
retval = self.fetchone()
if retval == None:
raise StopIteration()
return retval
def __iter__(self):
warn("DB-API extension cursor.__iter__() used", stacklevel=2)
return self
def setinputsizes(self, sizes):
pass
def setoutputsize(self, size, column=None):
pass
@require_open_cursor
def fileno(self):
return self.cursor.fileno()
@require_open_cursor
def isready(self):
return self.cursor.isready()
def require_open_connection(fn):
def _fn(self, *args, **kwargs):
if self.conn == None:
raise ConnectionClosedError()
return fn(self, *args, **kwargs)
return _fn
##
# The class of object returned by the {@link #connect connect method}.
class ConnectionWrapper(object):
# DBAPI Extension: supply exceptions as attributes on the connection
Warning = property(lambda self: self._getError(Warning))
Error = property(lambda self: self._getError(Error))
InterfaceError = property(lambda self: self._getError(InterfaceError))
DatabaseError = property(lambda self: self._getError(DatabaseError))
OperationalError = property(lambda self: self._getError(OperationalError))
IntegrityError = property(lambda self: self._getError(IntegrityError))
InternalError = property(lambda self: self._getError(InternalError))
ProgrammingError = property(lambda self: self._getError(ProgrammingError))
NotSupportedError = property(lambda self: self._getError(NotSupportedError))
def _getError(self, error):
warn("DB-API extension connection.%s used" % error.__name__, stacklevel=3)
return error
@property
def in_transaction(self):
if self.conn:
return self.conn.in_transaction
return False
def __init__(self, **kwargs):
self.conn = interface.Connection(**kwargs)
self.notifies = []
self.notifies_lock = threading.Lock()
self.conn.NotificationReceived += self._notificationReceived
@require_open_connection
def begin(self):
self.conn.begin()
def _notificationReceived(self, notice):
try:
# psycopg2 compatible notification interface
self.notifies_lock.acquire()
self.notifies.append((notice.backend_pid, notice.condition))
finally:
self.notifies_lock.release()
##
# Creates a {@link #CursorWrapper CursorWrapper} object bound to this
# connection.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
@require_open_connection
def cursor(self):
return CursorWrapper(self.conn, self)
##
# Commits the current database transaction.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
@require_open_connection
def commit(self):
# There's a threading bug here. If a query is sent after the
# commit, but before the begin, it will be executed immediately
# without a surrounding transaction. Like all threading bugs -- it
# sounds unlikely, until it happens every time in one
# application... however, to fix this, we need to lock the
# database connection entirely, so that no cursors can execute
# statements on other threads. Support for that type of lock will
# be done later.
self.conn.commit()
##
# Rolls back the current database transaction.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
@require_open_connection
def rollback(self):
# see bug description in commit.
self.conn.rollback()
##
# Closes the database connection.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
@require_open_connection
def close(self):
self.conn.close()
self.conn = None
##
# Creates a DBAPI 2.0 compatible interface to a PostgreSQL database.
# <p>
# Stability: Part of the DBAPI 2.0 specification.
#
# @param user The username to connect to the PostgreSQL server with. This
# parameter is required.
#
# @keyparam host The hostname of the PostgreSQL server to connect with.
# Providing this parameter is necessary for TCP/IP connections. One of either
# host, or unix_sock, must be provided.
#
# @keyparam unix_sock The path to the UNIX socket to access the database
# through, for example, '/tmp/.s.PGSQL.5432'. One of either unix_sock or host
# must be provided. The port parameter will have no affect if unix_sock is
# provided.
#
# @keyparam port The TCP/IP port of the PostgreSQL server instance. This
# parameter defaults to 5432, the registered and common port of PostgreSQL
# TCP/IP servers.
#
# @keyparam database The name of the database instance to connect with. This
# parameter is optional, if omitted the PostgreSQL server will assume the
# database name is the same as the username.
#
# @keyparam password The user password to connect to the server with. This
# parameter is optional. If omitted, and the database server requests password
# based authentication, the connection will fail. On the other hand, if this
# parameter is provided and the database does not request password
# authentication, then the password will not be used.
#
# @keyparam socket_timeout Socket connect timeout measured in seconds.
# Defaults to 60 seconds.
#
# @keyparam ssl Use SSL encryption for TCP/IP socket. Defaults to False.
#
# @return An instance of {@link #ConnectionWrapper ConnectionWrapper}.
def connect(user, host=None, unix_sock=None, port=5432, database=None, password=<PASSWORD>, socket_timeout=60, ssl=False):
return ConnectionWrapper(user=user, host=host,
unix_sock=unix_sock, port=port, database=database,
password=password, socket_timeout=socket_timeout, ssl=ssl)
def Date(year, month, day):
return datetime.date(year, month, day)
def Time(hour, minute, second):
return datetime.time(hour, minute, second)
def Timestamp(year, month, day, hour, minute, second):
return datetime.datetime(year, month, day, hour, minute, second)
def DateFromTicks(ticks):
return Date(*time.localtime(ticks)[:3])
def TimeFromTicks(ticks):
return Time(*time.localtime(ticks)[3:6])
def TimestampFromTicks(ticks):
return Timestamp(*time.localtime(ticks)[:6])
##
# Construct an object holding binary data.
def Binary(value):
return types.Bytea(value)
# I have no idea what this would be used for by a client app. Should it be
# TEXT, VARCHAR, CHAR? It will only compare against row_description's
# type_code if it is this one type. It is the varchar type oid for now, this
# appears to match expectations in the DB API 2.0 compliance test suite.
STRING = 1043
# bytea type_oid
BINARY = 17
# numeric type_oid
NUMBER = 1700
# timestamp type_oid
DATETIME = 1114
# oid type_oid
ROWID = 26 | en | 0.77719 | # vim: sw=4:expandtab:foldmethod=marker # # Copyright (c) 2007-2009, <NAME> # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * The name of the author may not be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. ## # The DBAPI level supported. Currently 2.0. This property is part of the # DBAPI 2.0 specification. ## # Integer constant stating the level of thread safety the DBAPI interface # supports. This DBAPI interface supports sharing of the module, connections, # and cursors. This property is part of the DBAPI 2.0 specification. ## # String property stating the type of parameter marker formatting expected by # the interface. This value defaults to "format". This property is part of # the DBAPI 2.0 specification. # <p> # Unlike the DBAPI specification, this value is not constant. It can be # changed to any standard paramstyle value (ie. qmark, numeric, named, format, # and pyformat). # paramstyle can be changed to any DB-API paramstyle # I don't see any way to avoid scanning the query string char by char, # so we might as well take that careful approach and create a # state-based scanner. We'll use int variables for the state. # 0 -- outside quoted string # 1 -- inside single-quote string '...' # 2 -- inside quoted identifier "..." # 3 -- inside escaped single-quote string, E'...' # print "begin loop", repr(i), repr(c), repr(state) # check for escaped single-quote string # begin mapping name # we have a %s in a pyformat query string. Assume # support for format instead. # Could be a double '' # is a double quote. # hm... we're only going to support an escaped percent sign # good. We already output the first percent sign. # hm... we're only going to support an escaped percent sign # good. We already output the first percent sign. # check for escaped single-quote # hm... we're only going to support an escaped percent sign # good. We already output the first percent sign. ## # The class of object returned by the {@link #ConnectionWrapper.cursor cursor method}. ## # This read-only attribute returns a reference to the connection object on # which the cursor was created. # <p> # Stability: Part of a DBAPI 2.0 extension. A warning "DB-API extension # cursor.connection used" will be fired. ## # This read-only attribute specifies the number of rows that the last # .execute*() produced (for DQL statements like 'select') or affected (for # DML statements like 'update' or 'insert'). # <p> # The attribute is -1 in case no .execute*() has been performed on the # cursor or the rowcount of the last operation is cannot be determined by # the interface. # <p> # Stability: Part of the DBAPI 2.0 specification. ## # This read-only attribute is a sequence of 7-item sequences. Each value # contains information describing one result column. The 7 items returned # for each column are (name, type_code, display_size, internal_size, # precision, scale, null_ok). Only the first two values are provided by # this interface implementation. # <p> # Stability: Part of the DBAPI 2.0 specification. ## # Executes a database operation. Parameters may be provided as a sequence # or mapping and will be bound to variables in the operation. # <p> # Stability: Part of the DBAPI 2.0 specification. # can't rollback in this case # any error will rollback the transaction to-date # can't rollback in this case # any error will rollback the transaction to-date ## # Prepare a database operation and then execute it against all parameter # sequences or mappings provided. # <p> # Stability: Part of the DBAPI 2.0 specification. ## # Fetch the next row of a query result set, returning a single sequence, or # None when no more data is available. # <p> # Stability: Part of the DBAPI 2.0 specification. ## # Fetch the next set of rows of a query result, returning a sequence of # sequences. An empty sequence is returned when no more rows are # available. # <p> # Stability: Part of the DBAPI 2.0 specification. # @param size The number of rows to fetch when called. If not provided, # the arraysize property value is used instead. ## # Fetch all remaining rows of a query result, returning them as a sequence # of sequences. # <p> # Stability: Part of the DBAPI 2.0 specification. ## # Close the cursor. # <p> # Stability: Part of the DBAPI 2.0 specification. ## # The class of object returned by the {@link #connect connect method}. # DBAPI Extension: supply exceptions as attributes on the connection # psycopg2 compatible notification interface ## # Creates a {@link #CursorWrapper CursorWrapper} object bound to this # connection. # <p> # Stability: Part of the DBAPI 2.0 specification. ## # Commits the current database transaction. # <p> # Stability: Part of the DBAPI 2.0 specification. # There's a threading bug here. If a query is sent after the # commit, but before the begin, it will be executed immediately # without a surrounding transaction. Like all threading bugs -- it # sounds unlikely, until it happens every time in one # application... however, to fix this, we need to lock the # database connection entirely, so that no cursors can execute # statements on other threads. Support for that type of lock will # be done later. ## # Rolls back the current database transaction. # <p> # Stability: Part of the DBAPI 2.0 specification. # see bug description in commit. ## # Closes the database connection. # <p> # Stability: Part of the DBAPI 2.0 specification. ## # Creates a DBAPI 2.0 compatible interface to a PostgreSQL database. # <p> # Stability: Part of the DBAPI 2.0 specification. # # @param user The username to connect to the PostgreSQL server with. This # parameter is required. # # @keyparam host The hostname of the PostgreSQL server to connect with. # Providing this parameter is necessary for TCP/IP connections. One of either # host, or unix_sock, must be provided. # # @keyparam unix_sock The path to the UNIX socket to access the database # through, for example, '/tmp/.s.PGSQL.5432'. One of either unix_sock or host # must be provided. The port parameter will have no affect if unix_sock is # provided. # # @keyparam port The TCP/IP port of the PostgreSQL server instance. This # parameter defaults to 5432, the registered and common port of PostgreSQL # TCP/IP servers. # # @keyparam database The name of the database instance to connect with. This # parameter is optional, if omitted the PostgreSQL server will assume the # database name is the same as the username. # # @keyparam password The user password to connect to the server with. This # parameter is optional. If omitted, and the database server requests password # based authentication, the connection will fail. On the other hand, if this # parameter is provided and the database does not request password # authentication, then the password will not be used. # # @keyparam socket_timeout Socket connect timeout measured in seconds. # Defaults to 60 seconds. # # @keyparam ssl Use SSL encryption for TCP/IP socket. Defaults to False. # # @return An instance of {@link #ConnectionWrapper ConnectionWrapper}. ## # Construct an object holding binary data. # I have no idea what this would be used for by a client app. Should it be # TEXT, VARCHAR, CHAR? It will only compare against row_description's # type_code if it is this one type. It is the varchar type oid for now, this # appears to match expectations in the DB API 2.0 compliance test suite. # bytea type_oid # numeric type_oid # timestamp type_oid # oid type_oid | 1.371917 | 1 |
docs/loadcensus/configureCensus.py | azavea/district-builder-dtl-pa | 5 | 6633293 | <reponame>azavea/district-builder-dtl-pa
#!/usr/bin/env python
# Framework for loading census data
# Inputs: FIPS state code, list of variables to include as additional subjects
# Requirements:
# - external software: DistrictBuilder, R, gdal, wget, unzip
# TODO -- check for VTD's
import re # regular expressions
import sys # arg lists etc
import glob # globbing
import commands # system commands
import os # os commands
import stat
import subprocess # for external commands
import zipfile # unzipping
import rpy2.robjects as robjects
import shutil
import psycopg2 as dbapi2
import optparse
import string
import time
###
### Globals
###
PUBLICMAPPINGPASS="<PASSWORD>"
# TODO : build in vote geographies, numbers of districts per state
#VOTEGEOGRAPHIES={"county":"COUNTYFP10","tract":"TRACTCE10","block":"BLOCKCE10"}
### clear_publicmapping_db
###
### Truncate database
def clear_publicmapping_db():
db = dbapi2.connect (database="publicmapping", user="publicmapping", password=<PASSWORD>)
cur = db.cursor()
redtable=["redistricting_characteristic","redistricting_computedcharacteristic","redistricting_computeddistrictscore","redistricting_computedplanscore","redistricting_contiguityoverride","redistricting_district","redistricting_geolevel","redistricting_geounit","redistricting_legislativebody","redistricting_legislativedefault","redistricting_legislativelevel","redistricting_plan","redistricting_profile","redistricting_scoreargument","redistricting_scoredisplay","redistricting_scorefunction","redistricting_scorepanel","redistricting_scorepanel_displays","redistricting_scorepanel_score_functions","redistricting_subject","redistricting_target","redistricting_validationcriteria"]
for i in redtable:
cur.execute("truncate table %s CASCADE" % i)
db.commit()
db.close()
### Drop DB
def drop_db():
olddir = os.getcwd()
os.chdir("/tmp")
subprocess.check_call(["service","tomcat6","stop"])
subprocess.check_call(["service","celeryd","stop"])
subprocess.check_call(["service","apache2","stop"])
subprocess.check_call(["service","apache2","restart"])
subprocess.check_call(["service","postgresql","restart"])
subprocess.check_call(['su postgres -c "dropdb publicmapping"'],shell=True)
subprocess.check_call(['cat /projects/PublicMapping/DistrictBuilder/sql/publicmapping_db.sql | su postgres -c "psql -f - postgres"'],shell=True)
subprocess.check_call(["service","apache2","start"])
subprocess.check_call(["service","tomcat6","start"])
os.chdir(olddir)
### Install dependencies
###
### This attempts to install dependencies using apt-get
###
def install_dependencies():
if (os.path.exists("/usr/bin/ogrinfo")==False) :
cmdarg = 'gdal-bin'
subprocess.check_call(["apt-get","install",cmdarg])
###
### Retrieve data files
###
### This retrieves the census files,unzips and reprojects (using ogr2ogr)
def get_census_data(stateFips):
if (stateFips<10) :
stateFips = "0%s" % stateFips
print 'Retrieving census shapefiles...'
# put all data in publicmapping data directory
olddir = os.getcwd()
os.chdir("/projects/PublicMapping/data/")
# obtain state boundary files from census
cenBlockFilePrefix = 'tl_2010_%s_tabblock10' % stateFips
cenTractFilePrefix = 'tl_2010_%s_tract10' % stateFips
cenCountyFilePrefix= 'tl_2010_%s_county10' % stateFips
cmdarg = 'ftp://ftp2.census.gov/geo/tiger/TIGER2010/TABBLOCK/2010/%s.zip' % cenBlockFilePrefix
subprocess.check_call(["wget","-nc",cmdarg])
cmdarg = 'ftp://ftp2.census.gov/geo/tiger/TIGER2010/TRACT/2010/%s.zip' % cenTractFilePrefix
subprocess.check_call(["wget","-N",cmdarg])
cmdarg = 'ftp://ftp2.census.gov/geo/tiger/TIGER2010/COUNTY/2010/%s.zip' % cenCountyFilePrefix
subprocess.check_call(["wget","-N",cmdarg])
# get additional data from our S3 bucket
print 'Retrieving additional data...'
cmdarg = 'https://s3.amazonaws.com/redistricting_supplement_data/redist/%s_redist_data.zip' % stateFips
subprocess.check_call(["wget","-N",cmdarg])
cmdarg = 'https://s3.amazonaws.com/redistricting_supplement_data/redist/%s_contiguity_overrides.csv' % stateFips
subprocess.call(["wget","-N",cmdarg])
print 'Unzipping files ...'
# unzip data files
for i in [ cenBlockFilePrefix, cenTractFilePrefix, cenCountyFilePrefix ] :
zfile = '%s.zip' % i
print ('Unzipping %s' %zfile)
myzip = zipfile.ZipFile(zfile, 'r')
myzip.extractall()
myzip = zipfile.ZipFile('%s_redist_data.zip' % stateFips, 'r')
myzip.extractall() # Reproject block data
print 'Reprojecting block shapefile...'
if (os.path.exists("census_blocks.shp")) :
os.remove('census_blocks.shp')
if (os.path.exists("census_tracts.shp")) :
os.remove('census_tracts.shp')
if (os.path.exists("census_counties.shp")) :
os.remove('census_counties.shp')
subprocess.check_call(["ogr2ogr",'-overwrite','-t_srs','EPSG:3785','census_blocks.shp','%s.shp' % cenBlockFilePrefix ])
subprocess.check_call(["ogr2ogr",'-overwrite','-t_srs','EPSG:3785','census_tracts.shp','%s.shp' % cenTractFilePrefix])
subprocess.check_call(["ogr2ogr",'-overwrite','-t_srs','EPSG:3785','census_counties.shp','%s.shp' % cenCountyFilePrefix])
# standardize file names
print 'Copying data files...'
shutil.copy('%s_redist_data.csv' %stateFips , 'redist_data.csv' )
if (os.path.exists("redist_overrides.csv")) :
os.remove('redist_overrides.csv')
if (os.path.exists("%s_contiguity_overrides.csv" % stateFips)) :
shutil.copy("%s_contiguity_overrides.csv" % stateFips,'redist_overrides.csv')
os.chdir(olddir)
###
### TEMPLATING - SLD's
###
# general template classes
class DictionaryTemplate:
def __init__(self, dict={}, **keywords):
self.dict = dict
self.dict.update(keywords)
def __str__(self):
return self._template % self
def __getitem__(self, key):
return self._process(key.split("|"))
def _process(self, l):
arg = l[0]
if len(l) == 1:
if arg in self.dict:
return self.dict[arg]
elif hasattr(self, arg) and callable(getattr(self, arg)):
return getattr(self, arg)()
else:
raise KeyError(arg)
else:
func_name = l[1]
if func_name in self.dict:
func = self.dict[func_name]
else:
func = getattr(self, func_name)
return func(self._process([arg]))
class ListTemplate:
def __init__(self, input_list=[]):
self.input_list = input_list
def __str__(self):
return "\n".join([self._template % x for x in self.input_list])
class Empty_Template(ListTemplate):
_template = """
"""
###
### SLD Skeleton Classes
###
class SldList_Template(DictionaryTemplate):
_template = """<?xml version="1.0" encoding="ISO-8859-1"?>
<StyledLayerDescriptor version="1.0.0" xmlns="http://www.opengis.net/sld" xmlns:ogc="http://www.opengis.net/ogc" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.opengis.net/sld http://schemas.opengis.net/sld/1.0.0/StyledLayerDescriptor.xsd">
<NamedLayer>
<Name>%(layername)s</Name>
<UserStyle>
<Title>%(layertitle)s</Title>
<Abstract>%(layerabs)s</Abstract>
<FeatureTypeStyle>
%(slst|sli)s
%(lst|li)s
%(elst|eli)s
</FeatureTypeStyle>
</UserStyle>
</NamedLayer>
</StyledLayerDescriptor>
"""
class Sld_Poly_Template(ListTemplate):
_template = """
<Rule>
<Title>%(title)s</Title>
<PolygonSymbolizer>
<Fill>
<CssParameter name="fill">%(fill)s</CssParameter>
<CssParameter name="fill-opacity">%(fillopacity)s</CssParameter>
</Fill>
</PolygonSymbolizer>
</Rule>
"""
class Sld_PolyB_Template(ListTemplate):
_template = """
<Rule>
<Title>%(title)s</Title>
<PolygonSymbolizer>
<Fill>
<CssParameter name="fill">%(fill)s</CssParameter>
<CssParameter name="fill-opacity">%(fillopacity)s</CssParameter>
</Fill>
<Stroke>
<CssParameter name="stroke">%(stroke)s</CssParameter>
<CssParameter name="stroke-width">%(strokewidth)s</CssParameter>
<CssParameter name="stroke-opacity">%(strokeopacity)s</CssParameter>
</Stroke>
</PolygonSymbolizer>
</Rule>
"""
# plain fill template
class Sld_Line_Template(ListTemplate):
_template = """
<Rule>
<Title>%(title)s</Title>
<LineSymbolizer>
<Stroke>
<CssParameter name="stroke">%(stroke)s</CssParameter>
<CssParameter name="stroke-width">%(strokewidth)s</CssParameter>
<CssParameter name="stroke-opacity">%(strokeopacity)s</CssParameter>
</Stroke>
</LineSymbolizer>
</Rule>
"""
# min-max range template
class Sld_Range_Template(ListTemplate):
_template = """
<Rule>
<Title>%(bottom)s-%(top)s</Title>
<ogc:Filter>
<ogc:And>
<ogc:PropertyIsLessThan>
<ogc:PropertyName>%(unit)s</ogc:PropertyName>
<ogc:Literal>%(top)s</ogc:Literal>
</ogc:PropertyIsLessThan>
<ogc:PropertyIsGreaterThanOrEqualTo>
<ogc:PropertyName>%(unit)s</ogc:PropertyName>
<ogc:Literal>%(bottom)s</ogc:Literal>
</ogc:PropertyIsGreaterThanOrEqualTo>
</ogc:And>
</ogc:Filter>
<PolygonSymbolizer>
<Fill>
<CssParameter name="fill">%(fill)s</CssParameter>
<CssParameter name="fill-opacity">%(fillopacity)s</CssParameter>
</Fill>
</PolygonSymbolizer>
</Rule>
"""
class Sld_URange_Template(ListTemplate):
_template = """
<Rule>
<Title>%(bottom)s-%(top)s</Title>
<ogc:Filter>
<ogc:PropertyIsGreaterThanOrEqualTo>
<ogc:PropertyName>%(unit)s</ogc:PropertyName>
<ogc:Literal>%(bottom)s</ogc:Literal>
</ogc:PropertyIsGreaterThanOrEqualTo>
</ogc:Filter>
<PolygonSymbolizer>
<Fill>
<CssParameter name="fill">%(fill)s</CssParameter>
<CssParameter name="fill-opacity">%(fillopacity)s</CssParameter>
</Fill>
</PolygonSymbolizer>
</Rule>
"""
def gensld_none(geoname):
target_file = '/projects/PublicMapping/DistrictBuilder/sld/pmp:%s_none.sld' % (geoname)
f = open(target_file,'w')
f.write ( str(SldList_Template(layername="%s No fill" % (geoname),layertitle="%s No Fill" % (geoname) ,layerabs="A style showing the boundaries of a geounit with a transparent fill", slst=[],sli=Empty_Template, lst=[{"title":"Fill","fill":"#FFFFFF","fillopacity":"1.0"}],li=Sld_Poly_Template,elst=[{"title":"Boundary","stroke":"#555555","strokewidth":"3.00","strokeopacity":"1.0"}],eli=Sld_Line_Template)) )
f.write("\n")
f.close()
os.chmod(target_file,stat.S_IRUSR|stat.S_IRGRP|stat.S_IROTH)
def gensld_boundaries(geoname):
target_file = '/projects/PublicMapping/DistrictBuilder/sld/pmp:%s_boundaries.sld' % (geoname)
f = open(target_file,'w')
f.write ( str(SldList_Template(layername="%s Boundaries" % (geoname) ,layertitle="%s Boundaries Only" %(geoname),layerabs="A style showing the boundaries of a geounit", slst=[] ,sli=Empty_Template, lst=[],li=Empty_Template,elst=[{"title":"County Boundaries","fill":"#000000","fillopacity":"0.0","stroke":"#2255FF","strokewidth":"2","strokeopacity":"0.35"}],eli=Sld_PolyB_Template)))
f.write("\n")
f.close()
os.chmod(target_file,stat.S_IRUSR|stat.S_IRGRP|stat.S_IROTH)
#TODO: generalize to any number of choropleths
def gensld_choro(geoname,varname,vartitle,quantiles):
gensld_choro_internal(geoname,varname,vartitle,quantiles,unit="number")
def gensld_choro_internal(geoname,varname,vartitle,quantiles,unit="number"):
# WARNING: sld files need to be lower case to be compatible with postgres views
lvarname = string.lower(varname)
target_file = '/projects/PublicMapping/DistrictBuilder/sld/pmp:%s_%s.sld' % (geoname,lvarname)
varabs="Grayscale choropleth based on quantiles of %s" % (varname)
valuelist= [
{"top": str(quantiles[4]),
"bottom": str(quantiles[3]),
"fill": "#444444",
"fillopacity":"1.0",
"unit":unit},
{"top": str(quantiles[3]),
"bottom": str(quantiles[2]),
"fill": "#777777",
"fillopacity":"1.0",
"unit":unit},
{"top": str(quantiles[2]),
"bottom": str(quantiles[1]),
"fill": "#AAAAAA",
"fillopacity":"1.0",
"unit":unit},
{"top": str(quantiles[1]),
"bottom": str(quantiles[0]),
"fill": "#EEEEEE",
"fillopacity":"1.0",
"unit":unit}]
svaluelist = [{"top": str(quantiles[5]),
"bottom": str(quantiles[4]),
"fill": "#000000",
"fillopacity":"1.0",
"unit":unit}]
f = open(target_file,'w')
f.write(str( SldList_Template(layername=lvarname,layertitle=vartitle,layerabs=varabs,slst=svaluelist,sli=Sld_URange_Template, lst=valuelist,li=Sld_Range_Template,elst=[{"title":"Boundary","stroke":"#555555","strokewidth":"0.25","strokeopacity":"1.0"}],eli=Sld_Line_Template) ))
f.write("\n")
f.close()
os.chmod(target_file,stat.S_IRUSR|stat.S_IRGRP|stat.S_IROTH)
def gensld_choro_denquint(geoname,varname,vartitle,dummy):
quantiles=[0,0.2,0.4,0.6,0.8,1]
gensld_choro_internal(geoname,varname,vartitle,quantiles,unit="percentage")
### Config file generation
### TODO: has_vtds==1 has not fully implemented
### paramaterize thresholds?
class Config_Template(DictionaryTemplate):
_template = """<!-- Define Internal Entities to avoid Repeated Entering of Values -->
<!DOCTYPE DistrictBuilder [
<!ENTITY num_districts_congress "%(num_districts_congress)s">
<!ENTITY num_districts_house "%(num_districts_house)s">
<!ENTITY num_districts_senate "%(num_districts_senate)s">
<!ENTITY pop_congress "%(pop_congress)s">
<!ENTITY pop_house "%(pop_house)s">
<!ENTITY pop_senate "%(pop_senate)s">
<!ENTITY pop_congress_min "%(pop_congress_min)s">
<!ENTITY pop_house_min "%(pop_house_min)s">
<!ENTITY pop_senate_min "%(pop_senate_min)s">
<!ENTITY pop_congress_max "%(pop_congress_max)s">
<!ENTITY pop_house_max "%(pop_house_max)s">
<!ENTITY pop_senate_max "%(pop_senate_max)s">
<!ENTITY target_hisp_congress "%(target_hisp_congress)s">
<!ENTITY target_hisp_senate "%(target_hisp_senate)s">
<!ENTITY target_hisp_house "%(target_hisp_house)s">
<!ENTITY target_bl_congress "%(target_bl_congress)s">
<!ENTITY target_bl_senate "%(target_bl_senate)s">
<!ENTITY target_bl_house "%(target_bl_house)s">
<!ENTITY target_na_senate "%(target_na_senate)s">
<!ENTITY target_na_house "%(target_na_house)s">
<!ENTITY target_na_congress "%(target_na_congress)s">
]>
<DistrictBuilder>
<!-- Define legislative bodies referenced in the system. -->
<LegislativeBodies>
<!-- A Legislative body has an ID (for referencing in GeoLevel
definitions later), a name, and a label for plan items
("District" for Congressional, etc) -->
<LegislativeBody id="congress" name="Congressional" member="District %%s" maxdistricts="&num_districts_congress;"/>
<LegislativeBody id="house" name="State House" member="District %%s" maxdistricts="&num_districts_house;" />
<LegislativeBody id="senate" name="State Senate" member="District %%s" maxdistricts="&num_districts_senate;" />
</LegislativeBodies>
<!-- A list of subjects referenced in the system. -->
<Subjects>
<!-- A Subject is a measurement type, such as "Total Population".
The subject is mapped to an attribute during the import phase,
and contains a long and short display name. Subjects have IDs
for referencing in GeoLevel definitions later. -->
<Subject id="vap_b" field="VAP_B" name="African-American Voting Age Population" short_name="Black VAP " displayed="true" sortkey="1" percentage_denominator="vap" />
<Subject id="vap_h" field="VAP_H" name="Hispanic or Latino voting age population" short_name="Hispanic VAP" displayed="true" sortkey="2" percentage_denominator="vap" />
<Subject id="vap_na" field="VAP_NA" name="Native American Voting Age Population" short_name="Nat Amer VAP" displayed="true" sortkey="4" percentage_denominator="vap" />
%(start_elec)s
<Subject id="vote_dem" field="VOTE_DEM" name="num likely Democratic voters" short_name="Democratic voters" displayed="true" sortkey="3" percentage_denominator="vote_tot" />
<Subject id="vote_rep" field="VOTE_REP" name="num likely Republican voters" short_name="Republican voters" displayed="true" sortkey="5" percentage_denominator="vote_tot" />
<Subject id="vote_tot" field="VOTE_TOT" name="num likely Rep/Dem voters" short_name="Rep+ Dem vote" displayed="false" sortkey="6" />
<Subject id="vote_dem_norm" field="VOTE_DEM_N" name="num of likely Democratic voters normalized to 50/50 state baseline" short_name="Normal Dem vote" displayed="true" sortkey="18" percentage_denominator="vote_tot_norm" />
<Subject id="vote_rep_norm" field="VOTE_REP_N" name="num of likely Republican voters normalized to 50/50 state baseline" short_name="Normal Rep vote" displayed="true" sortkey="19" percentage_denominator="vote_tot_norm" />
<Subject id="vote_tot_norm" field="VOTE_TOT_N" name="number of likely Republican and Democratic voters normalized to 50/50 state baseline" short_name="Normal 2-party vote" displayed="false" sortkey="20" />
%(end_elec)s
<Subject id="vap" field="VAP" name="Voting Age Population" short_name="vap" displayed="true" sortkey="7" />
<Subject id="totpop_b" field="TOTPOP_B" name="African-American" short_name="Black" displayed="false" sortkey="8" percentage_denominator="totpop"/>
<Subject id="totpop_h" field="TOTPOP_H" name="Hispanic or Latino" short_name="Hispanic" displayed="false" sortkey="9" percentage_denominator="totpop"/>
<Subject id="totpop_na" field="TOTPOP_NA" name="Native American" short_name="Nat Amer" displayed="false" sortkey="10" percentage_denominator="totpop"/>
<Subject id="totpop_a" field="TOTPOP_A" name="Asian Population" short_name="Asian" displayed="false" sortkey="11" percentage_denominator="totpop"/>
<Subject id="totpop_pi" field="TOTPOP_PI" name="Pacific Islander" short_name="Pac Isl" displayed="false" sortkey="12" percentage_denominator="totpop"/>
<Subject id="totpop_wnh" field="TOTPOP_WNH" name="White Non-Hispanic" short_name="White" displayed="false" sortkey="13" percentage_denominator="totpop"/>
<Subject id="totpop" field="TOTPOP" name="Total Population" short_name="Total Pop." displayed="true" sortkey="14"/>
<Subject id="vap_a" field="VAP_A" name="Asian Voting Age Population" short_name="Asian VAP" displayed="true" sortkey="15" percentage_denominator="vap" />
<Subject id="vap_pi" field="VAP_PI" name="Pacific Islander Voting Age Population" short_name="Pacific VAP" displayed="true" sortkey="16" percentage_denominator="vap"/>
<Subject id="vap_wnh" field="VAP_WNH" name="White Non-Hispanic Voting Age Population" short_name="White VAP" displayed="true" sortkey="17" percentage_denominator="vap"/>
</Subjects>
<Scoring>
<ScoreFunctions>
<!-- A district score that returns a literal value -->
<ScoreFunction id="district_poptot" type="district"
calculator="publicmapping.redistricting.calculators.Sum"
label="Total Pop" user_selectable="true">
<SubjectArgument name="value1" ref="totpop" />
</ScoreFunction>
<ScoreFunction id="district_totpop_b" type="district"
calculator="publicmapping.redistricting.calculators.Sum"
label="Black VAP" user_selectable="true">
<SubjectArgument name="value1" ref="totpop_b" />
</ScoreFunction>
<ScoreFunction id="district_totpop_h" type="district"
calculator="publicmapping.redistricting.calculators.Sum"
label="Hispanic VAP" user_selectable="true">
<SubjectArgument name="value1" ref="totpop_h" />
</ScoreFunction>
<ScoreFunction id="district_totpop_a" type="district"
calculator="publicmapping.redistricting.calculators.Sum"
label="Asian VAP" user_selectable="true">
<SubjectArgument name="value1" ref="totpop_a" />
</ScoreFunction>
<ScoreFunction id="district_totpop_na" type="district"
calculator="publicmapping.redistricting.calculators.Sum"
label="Native American VAP" user_selectable="true">
<SubjectArgument name="value1" ref="totpop_na" />
</ScoreFunction>
<ScoreFunction id="district_totpop_pi" type="district"
calculator="publicmapping.redistricting.calculators.Sum"
label="Pacific Islander VAP" user_selectable="true">
<SubjectArgument name="value1" ref="totpop_pi" />
</ScoreFunction>
<ScoreFunction id="district_totpop_wnh" type="district"
calculator="publicmapping.redistricting.calculators.Sum"
label="Pacific Islander VAP" user_selectable="true">
<SubjectArgument name="value1" ref="totpop_wnh" />
</ScoreFunction>
<ScoreFunction id="district_vap" type="district"
calculator="publicmapping.redistricting.calculators.Sum"
label="VAP" user_selectable="true">
<SubjectArgument name="value1" ref="vap" />
</ScoreFunction>
<ScoreFunction id="district_vap_b" type="district"
calculator="publicmapping.redistricting.calculators.Sum"
label="Black VAP" user_selectable="true">
<SubjectArgument name="value1" ref="vap_b" />
</ScoreFunction>
<ScoreFunction id="district_vap_h" type="district"
calculator="publicmapping.redistricting.calculators.Sum"
label="Hispanic VAP" user_selectable="true">
<SubjectArgument name="value1" ref="vap_h" />
</ScoreFunction>
<ScoreFunction id="district_vap_a" type="district"
calculator="publicmapping.redistricting.calculators.Sum"
label="Asian VAP" user_selectable="true">
<SubjectArgument name="value1" ref="vap_a" />
</ScoreFunction>
<ScoreFunction id="district_vap_na" type="district"
calculator="publicmapping.redistricting.calculators.Sum"
label="Native American VAP" user_selectable="true">
<SubjectArgument name="value1" ref="vap_na" />
</ScoreFunction>
<ScoreFunction id="district_vap_pi" type="district"
calculator="publicmapping.redistricting.calculators.Sum"
label="Pacific Islander VAP" user_selectable="true">
<SubjectArgument name="value1" ref="vap_pi" />
</ScoreFunction>
<ScoreFunction id="district_vap_wnh" type="district"
calculator="publicmapping.redistricting.calculators.Sum"
label="Pacific Islander VAP" user_selectable="true">
<SubjectArgument name="value1" ref="vap_wnh" />
</ScoreFunction>
<!-- A district score that returns a percentage -->
<ScoreFunction id="district_blkvap_percent" type="district"
calculator="publicmapping.redistricting.calculators.Percent"
label="Black VAP %%" user_selectable="true">
<SubjectArgument name="numerator" ref="vap_b" />
<SubjectArgument name="denominator" ref="vap" />
</ScoreFunction>
<ScoreFunction id="district_blkvap_thresh" type="district"
calculator="publicmapping.redistricting.calculators.Threshold"
label="Black VAP Threshold">
<ScoreArgument name="value" ref="district_blkvap_percent" />
<Argument name="threshold" value="0.5" />
</ScoreFunction>
<ScoreFunction id="district_hispvap_percent" type="district"
calculator="publicmapping.redistricting.calculators.Percent"
label="Hisp. VAP %%" user_selectable="true">
<SubjectArgument name="numerator" ref="vap_h" />
<SubjectArgument name="denominator" ref="vap" />
</ScoreFunction>
<ScoreFunction id="district_hispvap_thresh" type="district"
calculator="publicmapping.redistricting.calculators.Threshold"
label="Hisp. VAP Threshold">
<ScoreArgument name="value" ref="district_hispvap_percent" />
<Argument name="threshold" value="0.5" />
</ScoreFunction>
<ScoreFunction id="district_navap_percent" type="district"
calculator="publicmapping.redistricting.calculators.Percent"
label="Native American VAP %%" user_selectable="true">
<SubjectArgument name="numerator" ref="vap_na" />
<SubjectArgument name="denominator" ref="vap" />
</ScoreFunction>
<ScoreFunction id="district_navap_thresh" type="district"
calculator="publicmapping.redistricting.calculators.Threshold"
label="Native American VAP Threshold">
<ScoreArgument name="value" ref="district_navap_percent" />
<Argument name="threshold" value="0.5" />
</ScoreFunction>
<ScoreFunction id="district_avap_percent" type="district"
calculator="publicmapping.redistricting.calculators.Percent"
label="Asian VAP %%" user_selectable="true">
<SubjectArgument name="numerator" ref="vap_a" />
<SubjectArgument name="denominator" ref="vap" />
</ScoreFunction>
<ScoreFunction id="district_avap_thresh" type="district"
calculator="publicmapping.redistricting.calculators.Threshold"
label="Asian VAP Threshold">
<ScoreArgument name="value" ref="district_avap_percent" />
<Argument name="threshold" value="0.5" />
</ScoreFunction>
<ScoreFunction id="district_pivap_percent" type="district"
calculator="publicmapping.redistricting.calculators.Percent"
label="Pacific Islander VAP %%" user_selectable="true">
<SubjectArgument name="numerator" ref="vap_pi" />
<SubjectArgument name="denominator" ref="vap" />
</ScoreFunction>
<ScoreFunction id="district_pivap_thresh" type="district"
calculator="publicmapping.redistricting.calculators.Threshold"
label="Pacific Islander VAP Threshold">
<ScoreArgument name="value" ref="district_pivap_percent" />
<Argument name="threshold" value="0.5" />
</ScoreFunction>
<ScoreFunction id="district_wnhvap_percent" type="district"
calculator="publicmapping.redistricting.calculators.Percent"
label="White VAP %%" user_selectable="true">
<SubjectArgument name="numerator" ref="vap_wnh" />
<SubjectArgument name="denominator" ref="vap" />
</ScoreFunction>
<ScoreFunction id="district_wnhvap_thresh" type="district"
calculator="publicmapping.redistricting.calculators.Threshold"
label="White VAP Threshold">
<ScoreArgument name="value" ref="district_wnhvap_percent" />
<Argument name="threshold" value="0.5" />
</ScoreFunction>
%(start_elec)s
<ScoreFunction id="district_vote" type="district"
calculator="publicmapping.redistricting.calculators.Sum"
label="Estimated votes" user_selectable="true">
<SubjectArgument name="value1" ref="vote_tot" />
</ScoreFunction>
<ScoreFunction id="district_vote_dem" type="district"
calculator="publicmapping.redistricting.calculators.Sum"
label="Estimated Democratic votes" user_selectable="true">
<SubjectArgument name="value1" ref="vote_dem" />
</ScoreFunction>
<ScoreFunction id="district_vote_rep" type="district"
calculator="publicmapping.redistricting.calculators.Sum"
label="Estimated votes" user_selectable="true">
<SubjectArgument name="value1" ref="vote_rep" />
</ScoreFunction>
<ScoreFunction id="district_vote_dem_percent" type="district"
calculator="publicmapping.redistricting.calculators.Percent"
label="Democratic Predicted Vote %%" user_selectable="true">
<SubjectArgument name="numerator" ref="vote_dem" />
<SubjectArgument name="denominator" ref="vote_tot" />
</ScoreFunction>
<ScoreFunction id="district_vote_dem_thresh" type="district"
calculator="publicmapping.redistricting.calculators.Threshold"
label="Democratic Predicted Vote Threshold">
<ScoreArgument name="value" ref="district_vote_dem_percent" />
<Argument name="threshold" value="0.5" />
</ScoreFunction>
<ScoreFunction id="district_vote_rep_percent" type="district"
calculator="publicmapping.redistricting.calculators.Percent"
label="Republican Predicted Vote %%" user_selectable="true">
<SubjectArgument name="numerator" ref="vote_rep" />
<SubjectArgument name="denominator" ref="vote_tot" />
</ScoreFunction>
<ScoreFunction id="district_vote_rep_thresh" type="district"
calculator="publicmapping.redistricting.calculators.Threshold"
label="Republican Predicted Vote Threshold">
<ScoreArgument name="value" ref="district_vote_rep_percent" />
<Argument name="threshold" value="0.5" />
</ScoreFunction>
%(end_elec)s
<!-- A district score that generates classes based on a couple
ranges around a mean value. -->
<ScoreFunction id="district_poptot_uitarget_congress" type="district"
calculator="publicmapping.redistricting.calculators.Target">
<SubjectArgument name="value" ref="totpop" />
<Argument name="target" value="&pop_congress;" />
<Argument name="range1" value="0.005"/>
<Argument name="range2" value="0.010"/>
</ScoreFunction>
<ScoreFunction id="district_poptot_uitarget_house" type="district"
calculator="publicmapping.redistricting.calculators.Target">
<SubjectArgument name="value" ref="totpop" />
<Argument name="target" value="%(pop_house)s" />
<Argument name="range1" value="0.05" />
<Argument name="range2" value="0.10" />
</ScoreFunction>
<ScoreFunction id="district_poptot_uitarget_senate" type="district"
calculator="publicmapping.redistricting.calculators.Target">
<SubjectArgument name="value" ref="totpop" />
<Argument name="target" value="%(pop_senate)s" />
<Argument name="range1" value="0.05" />
<Argument name="range2" value="0.10" />
</ScoreFunction>
<!-- A district score that returns 1(T) if the subject value
is between the ranges, otherwise returns 0(F). -->
<ScoreFunction id="district_poptot_range" type="district"
calculator="publicmapping.redistricting.calculators.Range"
label="Tot Pop Range">
<SubjectArgument name="value" ref="totpop" />
<Argument name="min" value="&pop_congress_min;" />
<Argument name="max" value="&pop_congress_max;" />
</ScoreFunction>
<!-- A district score that is threshold dependent, and returns
T/F; this example uses 2 score functions: 1 to combine a
set of subjects, and 2 to divide the sum over another
subject. -->
<ScoreFunction id="district_mintot" type="district"
calculator="publicmapping.redistricting.calculators.Sum">
<SubjectArgument name="value1" ref="totpop_b" />
<SubjectArgument name="value2" ref="totpop_h" />
<SubjectArgument name="value3" ref="totpop_na" />
</ScoreFunction>
<ScoreFunction id="district_majmin" type="district"
calculator="publicmapping.redistricting.calculators.DivideAndThreshold" >
<ScoreArgument name="numerator" ref="district_mintot" />
<SubjectArgument name="denominator" ref="totpop" />
<Argument name="threshold" value="0.5" />
</ScoreFunction>
<!-- A custom calculator to calculate compactness, and return
the raw compactness score. -->
<ScoreFunction id="district_schwartzberg" type="district"
calculator="publicmapping.redistricting.calculators.Schwartzberg"
label="Compactness" user_selectable="true">
</ScoreFunction>
<!-- A custom calculator to do contiguity, and is boolean. -->
<ScoreFunction id="district_contiguous" type="district"
calculator="publicmapping.redistricting.calculators.Contiguity"
label="Contiguous" user_selectable="true">
</ScoreFunction>
<!-- A plan score that aggregates all literal values -->
<ScoreFunction id="plan_sum_equipop" type="plan"
calculator="publicmapping.redistricting.calculators.Sum"
label="Equal Population">
<ScoreArgument name="value1" ref="district_poptot_range" />
</ScoreFunction>
<ScoreFunction id="plan_all_equipop" type="plan"
calculator="publicmapping.redistricting.calculators.Threshold" >
<ScoreArgument name="value" ref="plan_sum_equipop" />
<Argument name="threshold" value="0" />
</ScoreFunction>
<!-- A plan score that aggregates all districts over a threshold -->
<ScoreFunction id="plan_count_majmin" type="plan"
calculator="publicmapping.redistricting.calculators.Sum">
<ScoreArgument name="value1" ref="district_majmin" />
</ScoreFunction>
<ScoreFunction id="plan_blkvap_thresh" type="plan"
calculator="publicmapping.redistricting.calculators.Sum"
label="Majority Black Districts" user_selectable="true">
<ScoreArgument name="value1" ref="district_blkvap_thresh" />
</ScoreFunction>
<ScoreFunction id="plan_hispvap_thresh" type="plan"
calculator="publicmapping.redistricting.calculators.Sum"
label="Majority Hispanic Districts" user_selectable="true">
<ScoreArgument name="value1" ref="district_hispvap_thresh" />
</ScoreFunction>
<ScoreFunction id="plan_navap_thresh" type="plan"
calculator="publicmapping.redistricting.calculators.Sum"
label="Majority Asian Districts" user_selectable="true">
<ScoreArgument name="value1" ref="district_navap_thresh" />
</ScoreFunction>
<ScoreFunction id="plan_avap_thresh" type="plan"
calculator="publicmapping.redistricting.calculators.Sum"
label="Majority Asian Districts" user_selectable="true">
<ScoreArgument name="value1" ref="district_avap_thresh" />
</ScoreFunction>
<ScoreFunction id="plan_pivap_thresh" type="plan"
calculator="publicmapping.redistricting.calculators.Sum">
<ScoreArgument name="value1" ref="district_pivap_thresh" />
</ScoreFunction>
<!-- A plan score that evaluates a threshold, and returns T/F.
This plan score checks that all districts are within the
population limits. -->
<ScoreFunction id="plan_poptot_inrange" type="plan"
calculator="publicmapping.redistricting.calculators.Threshold">
<ScoreArgument name="value" ref="district_poptot_range" />
<Argument name="threshold" value="0" />
</ScoreFunction>
<!-- A plan score that evaluates all districts, and returns
1(T) if there is more than 0 districts that have a minority
majority. -->
<ScoreFunction id="plan_major_minor" type="plan"
calculator="publicmapping.redistricting.calculators.Threshold"
label="Majority-Minority">
<ScoreArgument name="value" ref="district_majmin" />
<Argument name="threshold" value="0" />
</ScoreFunction>
<ScoreFunction id="plan_contiguous" type="plan"
calculator="publicmapping.redistricting.calculators.Sum"
label="Contiguous">
<ScoreArgument name="value1" ref="district_contiguous"/>
</ScoreFunction>
<ScoreFunction id="b_plan_congress_noncontiguous" type="plan"
calculator="publicmapping.redistricting.calculators.Contiguity"
label="Contiguous">
<Argument name="target" value="&num_districts_congress;" />
</ScoreFunction>
<ScoreFunction id="b_plan_house_noncontiguous" type="plan"
calculator="publicmapping.redistricting.calculators.Contiguity"
label="Contiguous">
<Argument name="target" value="&num_districts_house;" />
</ScoreFunction>
<ScoreFunction id="b_plan_senate_noncontiguous" type="plan"
calculator="publicmapping.redistricting.calculators.Contiguity"
label="Contiguous">
<Argument name="target" value="&num_districts_senate;" />
</ScoreFunction>
<!-- interval score function for population -->
<ScoreFunction id="a_congressional_population" type="district"
label="Tot Pop Range (Congress)" user_selectable="true"
description="Population interval calculator for congressional."
calculator="publicmapping.redistricting.calculators.Interval">
<SubjectArgument name="subject" ref="totpop" />
<Argument name="target" value="&pop_congress;" />
<Argument name="bound1" value=".005" />
<Argument name="bound2" value=".01" />
</ScoreFunction>
<ScoreFunction id="a_house_population" type="district"
label="Tot Pop Range (House)" user_selectable="true"
description="Population interval calculator for house."
calculator="publicmapping.redistricting.calculators.Interval">
<SubjectArgument name="subject" ref="totpop" />
<Argument name="target" value="%(pop_house)s" />
<Argument name="bound1" value=".005" />
<Argument name="bound2" value=".01" />
</ScoreFunction>
<ScoreFunction id="a_senate_population" type="district"
label="Tot Pop Range (Senate)" user_selectable="true"
description="Population interval calculator for senate."
calculator="publicmapping.redistricting.calculators.Interval">
<SubjectArgument name="subject" ref="totpop" />
<Argument name="target" value="%(pop_senate)s" />
<Argument name="bound1" value=".005" />
<Argument name="bound2" value=".01" />
</ScoreFunction>
<!-- leaderboard functions -->
<ScoreFunction id="a_congress_plan_count_districts" type="plan"
calculator="publicmapping.redistricting.calculators.CountDistricts"
label="Count Districts"
description="The number of districts in a Congressional redistricting plan must be &num_districts_congress;.">
<Argument name="target" value="&num_districts_congress;" />
</ScoreFunction>
<ScoreFunction id="a_house_plan_count_districts" type="plan"
calculator="publicmapping.redistricting.calculators.CountDistricts"
label="Count Districts"
description="The number of districts in a House of Delegates redistricting plan must be &num_districts_house;.">
<Argument name="target" value="&num_districts_house;" />
</ScoreFunction>
<ScoreFunction id="a_senate_plan_count_districts" type="plan"
calculator="publicmapping.redistricting.calculators.CountDistricts"
label="Count Districts"
description="The number of districts in a State Senate redistricting plan must be &num_districts_senate;.">
<Argument name="target" value="&num_districts_senate;" />
</ScoreFunction>
<ScoreFunction id="a_congress_plan_equipopulation_validation" type="plan"
calculator="publicmapping.redistricting.calculators.Equipopulation"
label="Target Pop. &pop_congress;"
description="The population of each Congressional district must be &pop_congress_min;-&pop_congress_max;">
<Argument name="min" value="&pop_congress_min;"/>
<Argument name="max" value="&pop_congress_max;"/>
<SubjectArgument name="value" ref="totpop"/>
<Argument name="validation" value="1"/>
</ScoreFunction>
<ScoreFunction id="a_congress_plan_equipopulation_summary" type="plan"
calculator="publicmapping.redistricting.calculators.Equipopulation"
label="Target Pop. &pop_congress;"
description="The population of each Congressional district must be &pop_congress_min;-&pop_congress_max;">
<Argument name="min" value="&pop_congress_min;"/>
<Argument name="max" value="&pop_congress_max;"/>
<SubjectArgument name="value" ref="totpop"/>
<Argument name="target" value="&num_districts_congress;"/>
</ScoreFunction>
<ScoreFunction id="a_senate_plan_equipopulation_validation" type="plan"
calculator="publicmapping.redistricting.calculators.Equipopulation"
label="Target Pop. %(pop_senate)s"
description="The population of each Senate district must be &pop_senate_min;-&pop_senate_max;">
<Argument name="min" value="&pop_senate_min;"/>
<Argument name="max" value="&pop_senate_max;"/>
<SubjectArgument name="value" ref="totpop"/>
<Argument name="validation" value="1"/>
</ScoreFunction>
<ScoreFunction id="a_senate_plan_equipopulation_summary" type="plan"
calculator="publicmapping.redistricting.calculators.Equipopulation"
label="Target Pop. %(pop_senate)s"
description="The population of each Senate district must be &pop_senate_min;-&pop_senate_max;">
<Argument name="min" value="&pop_senate_min;"/>
<Argument name="max" value="&pop_senate_max;"/>
<SubjectArgument name="value" ref="totpop"/>
<Argument name="target" value="&num_districts_senate;"/>
</ScoreFunction>
<ScoreFunction id="a_house_plan_equipopulation_validation" type="plan"
calculator="publicmapping.redistricting.calculators.Equipopulation"
label="Target Pop. %(pop_house)s"
description="The population of each House district must be &pop_house_min;-&pop_house_max;">
<Argument name="min" value="&pop_house_min;"/>
<Argument name="max" value="&pop_house_max;"/>
<SubjectArgument name="value" ref="totpop"/>
<Argument name="validation" value="1"/>
</ScoreFunction>
<ScoreFunction id="a_house_plan_equipopulation_summary" type="plan"
calculator="publicmapping.redistricting.calculators.Equipopulation"
label="Target Pop. %(pop_house)s"
description="The population of each House district must be &pop_house_min;-&pop_house_max;">
<Argument name="min" value="&pop_house_min;"/>
<Argument name="max" value="&pop_house_max;"/>
<SubjectArgument name="value" ref="totpop"/>
<Argument name="target" value="&num_districts_house;"/>
</ScoreFunction>
<ScoreFunction id="plan_all_blocks_assigned" type="plan"
calculator="publicmapping.redistricting.calculators.AllBlocksAssigned"
label="All Blocks Assigned"
description="All blocks in the plan must be assigned.">
</ScoreFunction>
<ScoreFunction id="plan_all_contiguous" type="plan"
calculator="publicmapping.redistricting.calculators.AllContiguous"
label="All Contiguous"
description="Contiguity means that every part of a district must be reachable from every other part without crossing the district's borders. All districts within a plan must be contiguous. Water contiguity is permitted. 'Point contiguity' or 'touch-point contiguity' where two sections of a district are connected at a single point is not permitted.">
</ScoreFunction>
%(start_elec)s
<ScoreFunction id="plan_competitiveness" type="plan"
calculator="publicmapping.redistricting.calculators.Competitiveness"
label="Competitiveness"
description="Each plan's overall political competitiveness is determined by averaging each district.s 'partisan differential'. The partisan differential of each district is calculated by subtracting the Democratic 'partisan index' from the Republican 'partisan index'.<br/><br/>'Heavily' competitive districts are districts with partisan differentials of less than or equal to 5%%. 'Generally' competitive districts are districts with partisan differentials of greater than 5%% but less than 10%%.">
<SubjectArgument name="democratic" ref="vote_dem" />
<SubjectArgument name="republican" ref="vote_rep" />
</ScoreFunction>
%(end_elec)s
<ScoreFunction id="plan_equivalence" type="plan"
calculator="publicmapping.redistricting.calculators.Equivalence"
label="Equal Population"
description="The Equipopulation score is the difference between the district with the highest population and the district with the lowest population.">
<SubjectArgument name="value" ref="totpop" />
</ScoreFunction>
<ScoreFunction id="plan_majority_minority_blk_congress" type="plan"
calculator="publicmapping.redistricting.calculators.MajorityMinority"
label="Black VAP Majority (> 50%%)"
description="Compliance with the Voting Rights Act will be assumed if maps include a minority-majority district in any area where a minority group is (as described in Thornburg V. Gingles, 478 U.S. 30, 49 (1986)) 'sufficiently large and geographically compact to constitute a majority in a single-member district'.">
<SubjectArgument name="population" ref="vap" />
<SubjectArgument name="minority1" ref="vap_b" />
<Argument name="target" value="&target_bl_congress;" />
</ScoreFunction>
<ScoreFunction id="plan_majority_minority_blk_house" type="plan"
calculator="publicmapping.redistricting.calculators.MajorityMinority"
label="Black VAP Majority (> 50%%)"
description="Compliance with the Voting Rights Act will be assumed if maps include a minority-majority district in any area where a minority group is (as described in Thornburg V. Gingles, 478 U.S. 30, 49 (1986)) 'sufficiently large and geographically compact to constitute a majority in a single-member district'.">
<SubjectArgument name="population" ref="vap" />
<SubjectArgument name="minority1" ref="vap_b" />
<Argument name="target" value="&target_bl_house;" />
</ScoreFunction>
<ScoreFunction id="plan_majority_minority_blk_senate" type="plan"
calculator="publicmapping.redistricting.calculators.MajorityMinority"
label="Black VAP Majority (> 50%%)"
description="Compliance with the Voting Rights Act will be assumed if maps include a minority-majority district in any area where a minority group is (as described in Thornburg V. Gingles, 478 U.S. 30, 49 (1986)) 'sufficiently large and geographically compact to constitute a majority in a single-member district'.">
<SubjectArgument name="population" ref="vap" />
<SubjectArgument name="minority1" ref="vap_b" />
<Argument name="target" value="&target_bl_senate;" />
</ScoreFunction>
<ScoreFunction id="plan_majority_minority_hisp_congress" type="plan"
calculator="publicmapping.redistricting.calculators.MajorityMinority"
label="Hisp. VAP Majority (> 50%%)"
description="Compliance with the Voting Rights Act will be assumed if maps include a minority-majority district in any area where a minority group is (as described in Thornburg V. Gingles, 478 U.S. 30, 49 (1986)) 'sufficiently large and geographically compact to constitute a majority in a single-member district'.">
<SubjectArgument name="population" ref="vap" />
<SubjectArgument name="minority1" ref="vap_h" />
<Argument name="target" value="&target_hisp_congress;" />
</ScoreFunction>
<ScoreFunction id="plan_majority_minority_hisp_house" type="plan"
calculator="publicmapping.redistricting.calculators.MajorityMinority"
label="Hisp. VAP Majority (> 50%%)"
description="Compliance with the Voting Rights Act will be assumed if maps include a minority-majority district in any area where a minority group is (as described in Thornburg V. Gingles, 478 U.S. 30, 49 (1986)) 'sufficiently large and geographically compact to constitute a majority in a single-member district'.">
<SubjectArgument name="population" ref="vap" />
<SubjectArgument name="minority1" ref="vap_h" />
<Argument name="target" value="&target_hisp_house;" />
</ScoreFunction>
<ScoreFunction id="plan_majority_minority_hisp_senate" type="plan"
calculator="publicmapping.redistricting.calculators.MajorityMinority"
label="Hisp. VAP Majority (> 50%%)"
description="Compliance with the Voting Rights Act will be assumed if maps include a minority-majority district in any area where a minority group is (as described in Thornburg V. Gingles, 478 U.S. 30, 49 (1986)) 'sufficiently large and geographically compact to constitute a majority in a single-member district'.">
<SubjectArgument name="population" ref="vap" />
<SubjectArgument name="minority1" ref="vap_h" />
<Argument name="target" value="&target_hisp_senate;" />
</ScoreFunction>
<ScoreFunction id="plan_majority_minority_na_congress" type="plan"
calculator="publicmapping.redistricting.calculators.MajorityMinority"
label="Native American Majority (> 50%%)"
description="Compliance with the Voting Rights Act will be assumed if maps include a minority-majority district in any area where a minority group is (as described in Thornburg V. Gingles, 478 U.S. 30, 49 (1986)) 'sufficiently large and geographically compact to constitute a majority in a single-member district'.">
<SubjectArgument name="population" ref="vap" />
<SubjectArgument name="minority1" ref="vap_na" />
<Argument name="target" value="&target_na_congress;" />
</ScoreFunction>
<ScoreFunction id="plan_majority_minority_na_house" type="plan"
calculator="publicmapping.redistricting.calculators.MajorityMinority"
label="Native American Majority (> 50%%)"
description="Compliance with the Voting Rights Act will be assumed if maps include a minority-majority district in any area where a minority group is (as described in Thornburg V. Gingles, 478 U.S. 30, 49 (1986)) 'sufficiently large and geographically compact to constitute a majority in a single-member district'.">
<SubjectArgument name="population" ref="vap" />
<SubjectArgument name="minority1" ref="vap_na" />
<Argument name="target" value="&target_na_house;" />
</ScoreFunction>
<ScoreFunction id="plan_majority_minority_na_senate" type="plan"
calculator="publicmapping.redistricting.calculators.MajorityMinority"
label="Native American Majority (> 50%%)"
description="Compliance with the Voting Rights Act will be assumed if maps include a minority-majority district in any area where a minority group is (as described in Thornburg V. Gingles, 478 U.S. 30, 49 (1986)) 'sufficiently large and geographically compact to constitute a majority in a single-member district'.">
<SubjectArgument name="population" ref="vap" />
<SubjectArgument name="minority1" ref="vap_na" />
<Argument name="target" value="&target_na_senate;" />
</ScoreFunction>
<ScoreFunction id="plan_majority_minority" type="plan"
calculator="publicmapping.redistricting.calculators.MajorityMinority"
label="Majority Minority District"
description="Compliance with the Voting Rights Act will be assumed if maps include a minority-majority district in any area where a minority group is (as described in Thornburg V. Gingles, 478 U.S. 30, 49 (1986)) 'sufficiently large and geographically compact to constitute a majority in a single-member district'.">
<SubjectArgument name="population" ref="vap" />
<SubjectArgument name="minority1" ref="vap_b" />
<SubjectArgument name="minority2" ref="vap_h" />
<SubjectArgument name="minority3" ref="vap_na" />
<Argument name="validation" value="1" />
</ScoreFunction>
%(start_elec)s
<ScoreFunction id="plan_repfairness" type="plan"
calculator="publicmapping.redistricting.calculators.RepresentationalFairness"
label="Representational Fairness"
description="Representational fairness is increased when the percentage of districts a party would likely win (based upon the 'partisan index' used to determine Competitiveness) closely mirrors that party.s percentage of the statewide vote." >
<Argument name="range" value="0.05" />
<SubjectArgument name="normalized democratic" ref="vote_dem_norm" />
<SubjectArgument name="normalized republican" ref="vote_rep_norm" />
</ScoreFunction>
%(end_elec)s
<ScoreFunction id="plan_schwartzberg" type="plan"
calculator="publicmapping.redistricting.calculators.Schwartzberg"
label="Average Compactness"
description="The competition is using the 'Schwartzberg' compactness measure. This measure is a ratio of the perimeter of the district to the circumference of the circle whose area is equal to the area of the district." >
</ScoreFunction>
</ScoreFunctions>
<ScorePanels>
<ScorePanel id="panel_equipop_all" type="plan" position="1"
title="Equipopulation" template="leaderboard_panel_all.html">
<Score ref="plan_equivalence" />
</ScorePanel>
<ScorePanel id="panel_equipop_mine" type="plan" position="1"
title="Equipopulation" template="leaderboard_panel_mine.html">
<Score ref="plan_equivalence" />
</ScorePanel>
<ScorePanel id="panel_compact_all" type="plan" position="2"
title="Schwartzberg" template="leaderboard_panel_all.html">
<Score ref="plan_schwartzberg" />
</ScorePanel>
<ScorePanel id="panel_compact_mine" type="plan" position="2"
title="Schwartzberg" template="leaderboard_panel_mine.html">
<Score ref="plan_schwartzberg" />
</ScorePanel>
%(start_elec)s
<ScorePanel id="panel_competitive_all" type="plan" position="3"
title="Competitiveness" template="leaderboard_panel_all.html">
<Score ref="plan_competitiveness" />
</ScorePanel>
<ScorePanel id="panel_competitive_mine" type="plan" position="3"
title="Competitiveness" template="leaderboard_panel_mine.html">
<Score ref="plan_competitiveness" />
</ScorePanel>
<ScorePanel id="panel_rf_all" type="plan" position="4"
title="Representational Fairness" template="leaderboard_panel_all.html">
<Score ref="plan_repfairness" />
</ScorePanel>
<ScorePanel id="panel_rf_mine" type="plan" position="4"
title="Representational Fairness" template="leaderboard_panel_mine.html">
<Score ref="plan_repfairness" />
</ScorePanel>
%(end_elec)s
<!-- Summary above all sidebar panels -->
<ScorePanel id="congressional_panel_summary" type="plan_summary" position="1"
title="Plan Summary" cssclass="plan_summary congressional" template="plan_summary.html">
<Score ref="a_congress_plan_equipopulation_summary"/>
<Score ref="b_plan_congress_noncontiguous"/>
<Score ref="plan_majority_minority_blk_congress" />
<Score ref="plan_majority_minority_hisp_congress" />
%(start_na)s
<Score ref="plan_majority_minority_na_congress" />
%(end_na)s
</ScorePanel>
<ScorePanel id="house_panel_summary" type="plan_summary" position="1"
title="Plan Summary" cssclass="plan_summary house" template="plan_summary.html">
<Score ref="a_house_plan_equipopulation_summary"/>
<Score ref="b_plan_house_noncontiguous"/>
<Score ref="plan_majority_minority_blk_house" />
<Score ref="plan_majority_minority_hisp_house" />
%(start_na)s
<Score ref="plan_majority_minority_na_house" />
%(end_na)s
</ScorePanel>
<ScorePanel id="senate_panel_summary" type="plan_summary" position="1"
title="Plan Summary" cssclass="plan_summary senate" template="plan_summary.html">
<Score ref="a_senate_plan_equipopulation_summary"/>
<Score ref="b_plan_senate_noncontiguous"/>
<Score ref="a_senate_plan_count_districts" />
<Score ref="plan_majority_minority_blk_senate" />
<Score ref="plan_majority_minority_hisp_senate" />
%(start_na)s
<Score ref="plan_majority_minority_na_senate" />
%(end_na)s
</ScorePanel>
<!-- Basic Information -->
<ScorePanel id="congresional_panel_info" type="district" position="2"
title="Basic Information" cssclass="district_basic_info congressional"
template="basic_information.html">
<Score ref="a_congressional_population" />
<Score ref="district_contiguous" />
<Score ref="district_schwartzberg" />
</ScorePanel>
<ScorePanel id="house_panel_info" type="district" position="2"
title="Basic Information" cssclass="district_basic_info house"
template="basic_information.html">
<Score ref="a_house_population" />
<Score ref="district_contiguous" />
<Score ref="district_schwartzberg" />
</ScorePanel>
<ScorePanel id="senate_panel_info" type="district" position="2"
title="Basic Information" cssclass="district_basic_info senate"
template="basic_information.html">
<Score ref="a_senate_population" />
<Score ref="district_contiguous" />
<Score ref="district_schwartzberg" />
</ScorePanel>
<!-- Demographics -->
<ScorePanel id="congressional_panel_demo" type="district" position="2"
title="Demographics" cssclass="district_demographics congressional"
template="demographics.html">
%(start_elec)s
<Score ref="district_vote_dem_percent" />
%(end_elec)s
<Score ref="district_blkvap_percent" />
<Score ref="district_hispvap_percent" />
</ScorePanel>
<ScorePanel id="house_panel_demo" type="district" position="2"
title="Demographics" cssclass="district_demographics house"
template="demographics.html">
%(start_elec)s
<Score ref="district_vote_dem_percent" />
%(end_elec)s
<Score ref="district_blkvap_percent" />
<Score ref="district_hispvap_percent" />
</ScorePanel>
<ScorePanel id="senate_panel_demo" type="district" position="2"
title="Demographics" cssclass="district_demographics senate"
template="demographics.html">
%(start_elec)s
<Score ref="district_vote_dem_percent" />
%(end_elec)s
<Score ref="district_blkvap_percent" />
<Score ref="district_hispvap_percent" />
</ScorePanel>
<!-- Needed due to issue https://sourceforge.net/apps/trac/publicmapping/ticket/340 Delete after setup -->
<ScorePanel id="stats_picker" type="district" position="1" title="Stats Picker" cssclass="hidden" template="demographics.html">
<Score ref="district_poptot"/>
<Score ref="district_totpop_b"/>
<Score ref="district_totpop_h"/>
<Score ref="district_totpop_a"/>
<Score ref="district_totpop_na"/>
<Score ref="district_totpop_pi"/>
<Score ref="district_totpop_wnh"/>
<Score ref="district_vap"/>
<Score ref="district_vap_b"/>
<Score ref="district_vap_h"/>
<Score ref="district_vap_a"/>
<Score ref="district_vap_na"/>
<Score ref="district_vap_pi"/>
<Score ref="district_vap_wnh"/>
<Score ref="district_blkvap_percent"/>
<Score ref="district_hispvap_percent"/>
<Score ref="district_avap_percent"/>
<Score ref="district_navap_percent"/>
<Score ref="district_pivap_percent"/>
<Score ref="district_wnhvap_percent"/>
%(start_elec)s
<Score ref="district_vote"/>
<Score ref="district_vote_dem"/>
<Score ref="district_vote_rep"/>
<Score ref="district_vote_dem_percent"/>
<Score ref="district_vote_rep_percent"/>
%(end_elec)s
</ScorePanel>
</ScorePanels>
<ScoreDisplays>
<ScoreDisplay legislativebodyref="congress" type="leaderboard"
title="Congressional Leaderboard - All" cssclass="leaderboard congress">
<ScorePanel ref="panel_equipop_all" />
<ScorePanel ref="panel_compact_all" />
%(start_elec)s
<ScorePanel ref="panel_competitive_all" />
<ScorePanel ref="panel_rf_all" />
%(end_elec)s
</ScoreDisplay>
<ScoreDisplay legislativebodyref="congress" type="leaderboard"
title="Congressional Leaderboard - Mine" cssclass="leaderboard congress">
<ScorePanel ref="panel_equipop_mine" />
<ScorePanel ref="panel_compact_mine" />
%(start_elec)s
<ScorePanel ref="panel_competitive_all" />
<ScorePanel ref="panel_rf_mine" />
%(end_elec)s
</ScoreDisplay>
<ScoreDisplay legislativebodyref="house" type="leaderboard"
title="State House Leaderboard - All" cssclass="leaderboard house">
<ScorePanel ref="panel_equipop_all" />
<ScorePanel ref="panel_compact_all" />
%(start_elec)s
<ScorePanel ref="panel_competitive_all" />
<ScorePanel ref="panel_rf_all" />
%(end_elec)s
</ScoreDisplay>
<ScoreDisplay legislativebodyref="house" type="leaderboard"
title="State House Leaderboard - Mine" cssclass="leaderboard house">
<ScorePanel ref="panel_equipop_mine" />
<ScorePanel ref="panel_compact_mine" />
%(start_elec)s
<ScorePanel ref="panel_competitive_mine" />
<ScorePanel ref="panel_rf_mine" />
%(end_elec)s
</ScoreDisplay>
<ScoreDisplay legislativebodyref="senate" type="leaderboard"
title="State Senate Leaderboard - All" cssclass="leaderboard senate">
<ScorePanel ref="panel_equipop_all" />
<ScorePanel ref="panel_compact_all" />
%(start_elec)s
<ScorePanel ref="panel_competitive_all" />
<ScorePanel ref="panel_rf_all" />
%(end_elec)s
</ScoreDisplay>
<ScoreDisplay legislativebodyref="senate" type="leaderboard"
title="State Senate Leaderboard - Mine" cssclass="leaderboard senate">
<ScorePanel ref="panel_equipop_mine" />
<ScorePanel ref="panel_compact_mine" />
%(start_elec)s
<ScorePanel ref="panel_competitive_mine" />
<ScorePanel ref="panel_rf_mine" />
%(end_elec)s
</ScoreDisplay>
<!-- Sidebar configuration -->
<ScoreDisplay legislativebodyref="congress" type="sidebar" title="Basic Information" cssclass="basic_information">
<ScorePanel ref="congressional_panel_summary" />
<ScorePanel ref="congresional_panel_info" />
</ScoreDisplay>
<ScoreDisplay legislativebodyref="congress" type="sidebar" title="Demographics" cssclass="demographics">
<ScorePanel ref="congressional_panel_summary" />
<ScorePanel ref="congressional_panel_demo" />
</ScoreDisplay>
<ScoreDisplay legislativebodyref="house" type="sidebar" title="Basic Information" cssclass="basic_information">
<ScorePanel ref="house_panel_summary" />
<ScorePanel ref="house_panel_info" />
</ScoreDisplay>
<ScoreDisplay legislativebodyref="house" type="sidebar" title="Demographics" cssclass="demographics">
<ScorePanel ref="house_panel_summary" />
<ScorePanel ref="house_panel_demo" />
</ScoreDisplay>
<ScoreDisplay legislativebodyref="senate" type="sidebar" title="Basic Information" cssclass="basic_information">
<ScorePanel ref="senate_panel_summary" />
<ScorePanel ref="senate_panel_info" />
</ScoreDisplay>
<ScoreDisplay legislativebodyref="senate" type="sidebar" title="Demographics" cssclass="demographics">
<ScorePanel ref="senate_panel_summary" />
<ScorePanel ref="senate_panel_demo" />
</ScoreDisplay>
<!-- Needed due to issue https://sourceforge.net/apps/trac/publicmapping/ticket/340 Delete after setup -->
<ScoreDisplay legislativebodyref="congress" type="sidebar" title="All Stats" cssclass="hidden"><ScorePanel ref="stats_picker"/></ScoreDisplay>
</ScoreDisplays>
</Scoring>
<Validation>
<Criteria legislativebodyref="congress">
<Criterion name="Equipopulation - Congress" description="<p>Your plan does not meet the competition criteria for Equipopulation:</p><p> The population of each Congressional district must be &pop_congress_max;-&pop_congress_min;">
<Score ref="a_congress_plan_equipopulation_validation" />
</Criterion>
<Criterion name="AllContiguous - Congress"
description="<p>Your plan does not meet the competition criteria for Contiguity</p><p>Every part of a district must be reachable from every other part without crossing the district's borders. All districts within a plan must be contiguous. </p>">
<Score ref="plan_all_contiguous" />
</Criterion>
<Criterion name="MajorityMinority - Congress" description="">
<Score ref="plan_majority_minority" />
</Criterion>
<Criterion name="CountDistricts - Congress" description="">
<Score ref="a_congress_plan_count_districts" />
</Criterion>
<Criterion name="AllBlocksAssigned - Congress" description="">
<Score ref="plan_all_blocks_assigned" />
</Criterion>
</Criteria>
<Criteria legislativebodyref="house">
<Criterion name="Equipopulation - House" description="<p>Your plan does not meet the competition criteria for Equipopulation:</p><p>The population of each House of Delegates district must be &pop_house_min; - &pop_house_max;">
<Score ref="a_house_plan_equipopulation_validation" />
</Criterion>
<Criterion name="AllContiguous - House"
description="<p>Your plan does not meet the competition criteria for Contiguity</p><p>Every part of a district must be reachable from every other part without crossing the district's borders. All districts within a plan must be contiguous. </p>">
<Score ref="plan_all_contiguous" />
</Criterion>
<Criterion name="MajorityMinority - House" description="">
<Score ref="plan_majority_minority" />
</Criterion>
<Criterion name="CountDistricts - House" description="">
<Score ref="a_house_plan_count_districts" />
</Criterion>
<Criterion name="AllBlocksAssigned - House" description="">
<Score ref="plan_all_blocks_assigned" />
</Criterion>
</Criteria>
<Criteria legislativebodyref="senate">
<Criterion name="Equipopulation - Senate" description="<p>Your plan does not meet the competition criteria for Equipopulation:</p><p>The population of each State Senate district must be &pop_house_min;-&pop_house_max;">
<Score ref="a_senate_plan_equipopulation_validation" />
</Criterion>
<Criterion name="AllContiguous - Senate"
description="<p>Your plan does not meet the competition criteria for Contiguity</p><p>Every part of a district must be reachable from every other part without crossing the district's borders. All districts within a plan must be contiguous. </p>">
<Score ref="plan_all_contiguous" />
</Criterion>
<Criterion name="MajorityMinority - Senate" description="">
<Score ref="plan_majority_minority" />
</Criterion>
<Criterion name="CountDistricts - Senate" description="">
<Score ref="a_senate_plan_count_districts" />
</Criterion>
<Criterion name="AllBlocksAssigned - Senate" description="">
<Score ref="plan_all_blocks_assigned" />
</Criterion>
</Criteria>
</Validation>
<!--
Optional configuration for geounits that require special contiguity rules.
'id' is the portable id of the geounit in which to configure an override.
'connect_to' is the portable id of the geounit in which the geounit is
to be considered contiguous with. Tests for contiguity will apply these overrides
in order to account for contiguity when physical contiguity is not possible.
For example, an island may need to be marked contiguous with one or more geounits
on an adjacent coast (possibly containing harbors).
<ContiguityOverrides>
<ContiguityOverride id="510030112012077" connect_to="510030102011065" />
<ContiguityOverride id="510030112012077" connect_to="510030103003037" />
</ContiguityOverrides>
-->
<!-- Contiguity Overrides, if Any -->
%(contiguityOverrideString)s
<GeoLevels>
<GeoLevel id="block" name="block" min_zoom="6" sort_key="3" tolerance="2.5">
<Shapefile path="/projects/PublicMapping/data/census_blocks.shp">
<Fields>
<Field name="NAME10" type="name"/>
<Field name="GEOID10" type="portable"/>
<Field name="STATEFP10" type="tree" pos="0" width="2"/>
<Field name="COUNTYFP10" type="tree" pos="1" width="3"/>
<Field name="TRACTCE10" type="tree" pos="2" width="6"/>
<Field name="BLOCKCE10" type="tree" pos="3" width="4"/>
</Fields>
</Shapefile>
<GeoLevelCharacteristics>
<GeoLevelCharacteristic ref="totpop" />
<GeoLevelCharacteristic ref="vap" />
<GeoLevelCharacteristic ref="vap_b" />
<GeoLevelCharacteristic ref="vap_h" />
<GeoLevelCharacteristic ref="vap_na" />
<GeoLevelCharacteristic ref="vap_wnh" />
<GeoLevelCharacteristic ref="vap_pi" />
<GeoLevelCharacteristic ref="vap_a" />
<GeoLevelCharacteristic ref="totpop_wnh" />
<GeoLevelCharacteristic ref="totpop_pi" />
<GeoLevelCharacteristic ref="totpop_a" />
<GeoLevelCharacteristic ref="totpop_b" />
<GeoLevelCharacteristic ref="totpop_h" />
<GeoLevelCharacteristic ref="totpop_na" />
%(start_elec)s
<GeoLevelCharacteristic ref="vote_dem" />
<GeoLevelCharacteristic ref="vote_rep" />
<GeoLevelCharacteristic ref="vote_tot" />
<GeoLevelCharacteristic ref="vote_dem_norm" />
<GeoLevelCharacteristic ref="vote_rep_norm" />
<GeoLevelCharacteristic ref="vote_tot_norm" />
%(end_elec)s
</GeoLevelCharacteristics>
<LegislativeBodies>
<LegislativeBody ref="congress">
<LegislativeTargets>
<LegislativeTarget ref="congress_target" default="true" />
</LegislativeTargets>
</LegislativeBody>
<LegislativeBody ref="house">
<LegislativeTargets>
<LegislativeTarget ref="house_target" default="true" />
</LegislativeTargets>
</LegislativeBody>
<LegislativeBody ref="senate">
<LegislativeTargets>
<LegislativeTarget ref="senate_target" default="true" />
</LegislativeTargets>
</LegislativeBody>
</LegislativeBodies>
</GeoLevel>
<GeoLevel id="tract" name="tract" min_zoom="3" sort_key="2" tolerance="25">
<Files>
<Geography path="/projects/PublicMapping/data/census_tracts.shp">
<Fields>
<Field name="NAME10" type="name" />
<Field name="GEOID10" type="portable" />
<Field name="STATEFP10" type="tree" pos="0" width="2"/>
<Field name="COUNTYFP10" type="tree" pos="1" width="3"/>
<Field name="TRACTCE10" type="tree" pos="2" width="6"/>
</Fields>
</Geography>
</Files>
<GeoLevelCharacteristics>
<GeoLevelCharacteristic ref="totpop" />
<GeoLevelCharacteristic ref="vap" />
<GeoLevelCharacteristic ref="vap_b" />
<GeoLevelCharacteristic ref="vap_h" />
<GeoLevelCharacteristic ref="vap_na" />
<GeoLevelCharacteristic ref="vap_wnh" />
<GeoLevelCharacteristic ref="vap_pi" />
<GeoLevelCharacteristic ref="vap_a" />
<GeoLevelCharacteristic ref="totpop_wnh" />
<GeoLevelCharacteristic ref="totpop_pi" />
<GeoLevelCharacteristic ref="totpop_a" />
<GeoLevelCharacteristic ref="totpop_b" />
<GeoLevelCharacteristic ref="totpop_h" />
<GeoLevelCharacteristic ref="totpop_na" />
%(start_elec)s
<GeoLevelCharacteristic ref="vote_dem" />
<GeoLevelCharacteristic ref="vote_rep" />
<GeoLevelCharacteristic ref="vote_tot" />
<GeoLevelCharacteristic ref="vote_dem_norm" />
<GeoLevelCharacteristic ref="vote_rep_norm" />
<GeoLevelCharacteristic ref="vote_tot_norm" />
%(end_elec)s
</GeoLevelCharacteristics>
<LegislativeBodies>
<LegislativeBody ref="congress">
<Parent ref="block" />
<LegislativeTargets>
<LegislativeTarget ref="congress_target" default="true" />
</LegislativeTargets>
</LegislativeBody>
<LegislativeBody ref="house">
<Parent ref="block" />
<LegislativeTargets>
<LegislativeTarget ref="house_target" default="true" />
</LegislativeTargets>
</LegislativeBody>
<LegislativeBody ref="senate">
<Parent ref="block" />
<LegislativeTargets>
<LegislativeTarget ref="senate_target" default="true" />
</LegislativeTargets>
</LegislativeBody>
</LegislativeBodies>
</GeoLevel>
<GeoLevel id="county" name="county" min_zoom="0" sort_key="1" tolerance="250">
<Files>
<Geography path="/projects/PublicMapping/data/census_counties.shp">
<Fields>
<Field name="NAME10" type="name"/>
<Field name="GEOID10" type="portable"/>
<Field name="STATEFP10" type="tree" pos="0" width="2"/>
<Field name="COUNTYFP10" type="tree" pos="1" width="3"/>
</Fields>
</Geography>
</Files>
<GeoLevelCharacteristics>
<GeoLevelCharacteristic ref="totpop" />
<GeoLevelCharacteristic ref="vap" />
<GeoLevelCharacteristic ref="vap_b" />
<GeoLevelCharacteristic ref="vap_h" />
<GeoLevelCharacteristic ref="vap_na" />
<GeoLevelCharacteristic ref="vap_wnh" />
<GeoLevelCharacteristic ref="vap_pi" />
<GeoLevelCharacteristic ref="vap_a" />
<GeoLevelCharacteristic ref="totpop_wnh" />
<GeoLevelCharacteristic ref="totpop_pi" />
<GeoLevelCharacteristic ref="totpop_a" />
<GeoLevelCharacteristic ref="totpop_b" />
<GeoLevelCharacteristic ref="totpop_h" />
<GeoLevelCharacteristic ref="totpop_na" />
%(start_elec)s
<GeoLevelCharacteristic ref="vote_dem" />
<GeoLevelCharacteristic ref="vote_rep" />
<GeoLevelCharacteristic ref="vote_tot" />
<GeoLevelCharacteristic ref="vote_dem_norm" />
<GeoLevelCharacteristic ref="vote_rep_norm" />
<GeoLevelCharacteristic ref="vote_tot_norm" />
%(end_elec)s
</GeoLevelCharacteristics>
<LegislativeBodies>
<LegislativeBody ref="congress">
<Parent ref="tract" />
<LegislativeTargets>
<LegislativeTarget ref="congress_target" default="true" />
</LegislativeTargets>
</LegislativeBody>
<LegislativeBody ref="house">
<Parent ref="tract" />
<LegislativeTargets>
<LegislativeTarget ref="house_target" default="true" />
</LegislativeTargets>
</LegislativeBody>
<LegislativeBody ref="senate">
<Parent ref="tract" />
<LegislativeTargets>
<LegislativeTarget ref="senate_target" default="true" />
</LegislativeTargets>
</LegislativeBody>
</LegislativeBodies>
</GeoLevel>
</GeoLevels>
<Templates>
<Template name="Congressional">
<LegislativeBody ref="congress"/>
<Blockfile path="/projects/PublicMapping/data/congress_generated_index.csv" />
</Template>
<Template name="State House">
<LegislativeBody ref="house"/>
<Blockfile path="/projects/PublicMapping/data/house_generated_index.csv" />
</Template>
<Template name="State Senate">
<LegislativeBody ref="senate"/>
<Blockfile path="/projects/PublicMapping/data/senate_generated_index.csv" />
</Template>
</Templates>
<Project root="/projects/PublicMapping/DistrictBuilder" sessionquota="5"
sessiontimeout="15">
<!-- Database connection information. -->
<Database name="publicmapping" user="publicmapping" password="<PASSWORD>"/>
<!--
Administrative user information. This should match the admin
user created when the django project is created.
-->
<Admin user="admin" email="<EMAIL>" password="<PASSWORD>"/>
<!-- Configuration items specific to the 'redistricting' app. -->
<Redistricting>
<MapServer hostname="" ns="pmp" nshref="http://publicmapping.sourceforge.net/"
adminuser="admin" adminpass="<PASSWORD>" maxfeatures="100"
styles="/projects/PublicMapping/DistrictBuilder/sld" />
<!--
Use a GoogleAnalytics account to tract the usage of the
application. This requires an account and domain.
<GoogleAnalytics account="" domain=""/>
-->
<!-- Upload file size restrictions. This is in KB -->
<Upload maxsize="2500"/>
<!-- Undo restrictions -->
<MaxUndos duringedit="50" afteredit="10" />
<!-- Leaderboard configuration -->
<Leaderboard maxranked="10" />
</Redistricting>
<Reporting>
<BardConfigs>
<BardConfig
id="blocks"
shape="/projects/PublicMapping/data/census_configured.Rdata"
temp="/projects/PublicMapping/local/reports"
transform="/projects/PublicMapping/DistrictBuilder/docs/bard_template.xslt">
<PopVars>
<PopVar subjectref="totpop" threshold=".01" default="true" />
<PopVar subjectref="vap" threshold=".1" />
</PopVars>
<RatioVars>
<!--
Set up RatioVars for both ethnicity and political
party.
-->
<RatioVar id="racialComp" label="Majority Minority Districts" threshold=".5">
<Numerators>
<Numerator subjectref="totpop_b" />
<Numerator subjectref="totpop_h" />
<Numerator subjectref="totpop_na" />
<Numerator subjectref="totpop_a" />
<Numerator subjectref="totpop_pi" />
<Numerator subjectref="totpop_wnh" />
</Numerators>
<Denominator subjectref="totpop" />
</RatioVar>
<RatioVar id="racialCompVap" label="Majority Minority Districts" threshold=".5">
<Numerators>
<Numerator subjectref="vap_b" />
<Numerator subjectref="vap_h" />
<Numerator subjectref="vap_na" />
<Numerator subjectref="vap_a" />
<Numerator subjectref="vap_pi" />
<Numerator subjectref="vap_wnh" />
</Numerators>
<Denominator subjectref="vap" />
</RatioVar>
%(start_elec)s
<RatioVar id="partyControl" label="Party-Controlled Districts" threshold=".5">
<Numerators>
<Numerator subjectref="vote_dem" />
<Numerator subjectref="vote_rep" />
</Numerators>
<Denominator subjectref="vote_tot" />
</RatioVar>
%(end_elec)s
</RatioVars>
<SplitVars>
<!--
See whether a given district splits a geography.
This can be any higher level geography: a county,
VTd, or tract.
-->
<SplitVar field="COUNTYFP10" label="County" />
<SplitVar field="TRACTCE10" label="Tract" />
</SplitVars>
</BardConfig>
</BardConfigs>
<BardBodyConfigs>
<!--
For each legislative body, map the configuration to the
geography used to generate reports.
-->
<BardBodyConfig
id="congress_blocks"
legislativebodyref="congress"
bardconfigref="blocks" />
<BardBodyConfig
id="house_blocks"
legislativebodyref="house"
bardconfigref="blocks" />
<BardBodyConfig
id="senate_blocks"
legislativebodyref="senate"
bardconfigref="blocks" />
</BardBodyConfigs>
</Reporting>
<!-- Information about the mailer configuration. -->
<Mailer server="localhost" port="25" username="" password=""/>
</Project>
</DistrictBuilder>
"""
def gen_config(num_districts_congress,num_districts_senate,num_districts_house,sum_TOTPOP,has_election_data=0,has_vtds=0, conf_na=False ,
target_na_congress=0, target_hisp_congress = 0 , target_bl_congress = 0,
target_na_house=0, target_hisp_house = 0 , target_bl_house = 0,
target_na_senate =0, target_hisp_senate = 0 , target_bl_senate = 0, contiguityOverrideString = ""):
start_na="<!--"
start_elec="<!--"
end_elec="-->"
end_na="-->"
midlevel="tract"
if (conf_na==True):
start_na=""
end_na=""
midlevel_width="6"
midlevel_var="TRACTCE10"
if (has_election_data==1):
start_elec=""
end_elec=""
if (has_vtds==1) :
midlevel="vtds"
midlevel_width="4"
midlevel_var="VTDST10"
pop_congress = int(round((sum_TOTPOP/float(num_districts_congress))))
pop_congress_max = int(round((sum_TOTPOP/float(num_districts_congress)) * 1.005))
pop_congress_min = int(round((sum_TOTPOP/float(num_districts_congress)) * 0.995))
pop_house = int(round((sum_TOTPOP/float(num_districts_house))))
pop_house_max = int(round((sum_TOTPOP/float(num_districts_house)) * 1.1))
pop_house_min = int(round((sum_TOTPOP/float(num_districts_house)) * 0.9))
pop_senate = int(round((sum_TOTPOP/float(num_districts_senate))))
pop_senate_max = int(round((sum_TOTPOP/float(num_districts_senate)) * 1.1))
pop_senate_min = int(round((sum_TOTPOP/float(num_districts_senate)) * 0.9))
target_file = '/projects/PublicMapping/DistrictBuilder/docs/config_census_generated.xml'
f = open(target_file,'w')
f.write(str( Config_Template(start_elec=start_elec,end_elec=end_elec,num_districts_congress=num_districts_congress,num_districts_house=num_districts_house,num_districts_senate=num_districts_senate,pop_congress_max=pop_congress_max,pop_congress_min=pop_congress_min,pop_senate_max=pop_senate_max, pop_senate_min=pop_senate_min,pop_house_max=pop_house_max,pop_house_min=pop_house_min,pop_congress=pop_congress,pop_senate=pop_senate,pop_house=pop_house,start_na=start_na, end_na=end_na, target_na_congress=target_na_congress, target_hisp_congress=target_hisp_congress, target_bl_congress=target_bl_congress, target_na_house=target_na_house, target_hisp_house=target_hisp_house, target_bl_house=target_bl_house, target_na_senate=target_na_senate, target_hisp_senate=target_hisp_senate, target_bl_senate=target_bl_senate,contiguityOverrideString=contiguityOverrideString)))
f.write("\n")
f.close()
os.chmod(target_file,stat.S_IRUSR|stat.S_IRGRP|stat.S_IROTH)
###
### MAIN
###
#
# Get Arguments
#
parser=optparse.OptionParser(usage="%prog -F[fips_code] -C[num_congressional_districts] -S[num_senate_districts] -H[num_house_districts]", version="%prog 0.1")
# required arguments
parser.add_option('-F','--fips', dest='stateFips',help="State two digit FIPS code", type=int, default=0)
parser.add_option('-C','--congdist', dest='congDis',help="number of congressional districts", type=int, default=0)
parser.add_option('-H', '--housedist',dest='houseDis',help="number of senate districts", type=int, default=0)
parser.add_option('-S', '--sendist', dest='senDis',help="number of house districts", type=int,default=0)
# operations to perform
parser.add_option('-i', '--install', dest="do_install", help="Install dependencencies.", default=False, action='store_true')
parser.add_option('-g', '--getdata', dest="do_getdata", help="Get data.", default=False, action='store_true')
parser.add_option('-s', '--gensld', dest="do_gensld", help="Generate slds", default=False, action='store_true')
parser.add_option('-c', '--genconf', dest="do_genconf", help="Generate config file", default=False, action='store_true')
parser.add_option('-d', '--dropdb', dest="do_dropdb", help="Drop database", default=False, action='store_true')
parser.add_option('-r', '--run', dest="do_run", help="run setup.py", default=False, action='store_true')
# configuration options
parser.add_option('--na_inc', dest="conf_na", help="Include Native Americans in stats.", default=False, action='store_true')
parser.add_option('--na_targ_c', dest='target_na_congress',help="Number of Native American Congressional Districts for target", type=int, default=0)
parser.add_option('--na_targ_h', dest='target_na_house',help="Number of Native American House Districts for target", type=int, default=0)
parser.add_option('--na_targ_s', dest='target_na_senate',help="Number of Native American Senate Districts for target", type=int, default=0)
parser.add_option('--hisp_targ_c', dest='target_hisp_congress',help="Number of Hispanic Congressional Districts for target", type=int, default=0)
parser.add_option('--hisp_targ_h', dest='target_hisp_house',help="Number of Hispanic House Districts for target", type=int, default=0)
parser.add_option('--hisp_targ_s', dest='target_hisp_senate',help="Number of Hispanic SenateDistricts for target", type=int, default=0)
parser.add_option('--bl_targ_c', dest='target_bl_congress',help="Number of Black Congressional districts for target", type=int, default=0)
parser.add_option('--bl_targ_h', dest='target_bl_house',help="Number of Black House districts for target", type=int, default=0)
parser.add_option('--bl_targ_s', dest='target_bl_senate',help="Number of Black Senate districts for target", type=int, default=0)
(parseResults,numargs)=parser.parse_args()
# include na if there is a positive target, even if not otherwise specified
if ((parseResults.target_na_congress+parseResults.target_na_senate+parseResults.target_na_house)>0) :
parseResults.conf_na = True
allops = (not parseResults.do_install) and (not parseResults.do_getdata) and (not parseResults.do_gensld) and (not parseResults.do_genconf) and (not parseResults.do_dropdb) and (not parseResults.do_run)
if (allops):
parseResults.do_install=True
parseResults.do_getdata=True
parseResults.do_gensld=True
parseResults.do_genconf=True
parseResults.do_dropdb=True
parseResults.do_run=True
if len(numargs) != 0:
parser.error("additional arguments ignored ")
stateFips = parseResults.stateFips
houseDis = parseResults.houseDis
senDis= parseResults.senDis
congDis= parseResults.congDis
if (stateFips==0 or houseDis==0 or senDis==0 or congDis==0):
print "Must supply all district arguments"
raise ValueError
# install dependencies
if (parseResults.do_install):
print "installing dependencies..."
install_dependencies()
# Clear out DB
if (parseResults.do_dropdb):
print 'clearing database ...'
drop_db()
# generate generic sld files
if (parseResults.do_gensld):
print 'generating generic sld files ...'
gensld_none("county")
gensld_none("tract")
gensld_none("block")
gensld_boundaries("county")
gensld_boundaries("tract")
gensld_boundaries("block")
# Retrieve data files
if (parseResults.do_getdata):
print 'retrieving census data ...'
get_census_data(stateFips)
# merge standard variables
# TODO: Refactor entirely in rpy
print 'merging data...'
robjects.r.source("/projects/PublicMapping/DistrictBuilder/docs/loadcensus/mergeCensus.R")
if ( (parseResults.do_genconf) or (parseResults.do_gensld)) :
print 'calculating statistics for configs and slds...'
robjects.r.source("/projects/PublicMapping/DistrictBuilder/docs/loadcensus/calcStats.R")
sum_TOTPOP= robjects.r.sum_TOTPOP[0]
# TODO: Refactor entirely in rpy
# NOTE: robject is returning 6-level quantiles, has_election_data, has_vtd, sum_TOTPOP
has_election_data = robjects.r.has_election_data[0]
if ( parseResults.do_genconf) :
robjects.r.source("/projects/PublicMapping/DistrictBuilder/docs/loadcensus/contiguityOverride.R")
# TODO: should work but has embedded string forwarding
#contiguityOverrideString = robjects.r.contiguityOverrideString
f = open('/projects/PublicMapping/DistrictBuilder/docs/generated_overrides.xml', 'r')
contiguityOverrideString = f.read()
f.close()
# TODO: refactor as matrix of varnames and geographies
if ( parseResults.do_gensld) :
print 'generating choropleth slds ...'
gensld_choro("block","TOTPOP","Total Population",robjects.r.q_block_TOTPOP)
gensld_choro_denquint("block","TOTPOP_H","Percent Hispanic Population",robjects.r.q_block_TOTPOP_H)
gensld_choro_denquint("block","TOTPOP_B","Percent Black Population",robjects.r.q_block_TOTPOP_B)
gensld_choro_denquint("block","TOTPOP_NA","Percent Native American Population",robjects.r.q_block_TOTPOP_NA)
gensld_choro("block","VAP","Voting Age Population",robjects.r.q_block_VAP)
gensld_choro_denquint("block","VAP_H","Percent Voting Age Hispanic Population",robjects.r.q_block_VAP_H)
gensld_choro_denquint("block","VAP_B","Percent Voting Age Black Population",robjects.r.q_block_VAP_B)
gensld_choro_denquint("block","VAP_NA","Percent Voting Age Native American Population",robjects.r.q_block_VAP_NA)
gensld_choro("tract","TOTPOP","Total Population",robjects.r.q_tract_TOTPOP)
gensld_choro_denquint("tract","TOTPOP_H","Percent Total Hispanic Population",robjects.r.q_tract_TOTPOP_H)
gensld_choro_denquint("tract","TOTPOP_B","Percent Black Population",robjects.r.q_tract_TOTPOP_B)
gensld_choro_denquint("tract","TOTPOP_NA","Percent Native American Population",robjects.r.q_tract_TOTPOP_NA)
gensld_choro("tract","VAP","Voting Age Population",robjects.r.q_tract_VAP)
gensld_choro_denquint("tract","VAP_H","Percent Voting Age Hispanic Population",robjects.r.q_tract_VAP_H)
gensld_choro_denquint("tract","VAP_B","Percent Voting Age Black Population",robjects.r.q_tract_VAP_B)
gensld_choro_denquint("tract","VAP_NA","Percent Voting Age Native American Population",robjects.r.q_tract_VAP_NA)
gensld_choro("county","TOTPOP","Total Population",robjects.r.q_county_TOTPOP)
gensld_choro_denquint("county","TOTPOP_H","Percent Hispanic Population",robjects.r.q_county_TOTPOP_H)
gensld_choro_denquint("county","TOTPOP_B","Percent Black Population",robjects.r.q_county_TOTPOP_B)
gensld_choro_denquint("county","TOTPOP_NA","Percent Native American Population",robjects.r.q_county_TOTPOP_NA)
gensld_choro("county","VAP","Voting Age Population",robjects.r.q_county_VAP)
gensld_choro_denquint("county","VAP_H","Percent Voting Age Hispanic Population",robjects.r.q_county_VAP_H)
gensld_choro_denquint("county","VAP_B","Percent Voting Age Black Population",robjects.r.q_county_VAP_B)
gensld_choro_denquint("county","VAP_NA","Percent Voting Age Native American Population",robjects.r.q_county_VAP_NA)
if (has_election_data==1) :
gensld_choro_denquint("block","VOTE_DEM","Percent Predicted Democratic Vote ",robjects.r.q_block_VOTE_DEM)
gensld_choro_denquint("block","VOTE_REP","Percent Predicted Republican Vote ",robjects.r.q_block_VOTE_REP)
gensld_choro("block","VOTE_TOT","Predicted Vote ",robjects.r.q_block_VOTE_TOT)
gensld_choro_denquint("tract","VOTE_DEM","Percent Predicted Democratic Vote ",robjects.r.q_tract_VOTE_DEM)
gensld_choro_denquint("tract","VOTE_REP","Percent Predicted Republican Vote ",robjects.r.q_tract_VOTE_REP)
gensld_choro("tract","VOTE_TOT","Predicted Vote ",robjects.r.q_tract_VOTE_TOT)
gensld_choro_denquint("county","VOTE_DEM","Perecent Predicted Democratic Vote ",robjects.r.q_county_VOTE_DEM)
gensld_choro_denquint("county","VOTE_REP","Percent Predicted Republican Vote ",robjects.r.q_county_VOTE_REP)
gensld_choro("county","VOTE_TOT","Predicted Vote ",robjects.r.q_county_VOTE_TOT)
gensld_choro_denquint("block","VOTE_DEM_N","Percent Predicted Democratic Vote ",robjects.r.q_block_VOTE_DEM_N)
gensld_choro_denquint("block","VOTE_REP_N","Percent Predicted Republican Vote ",robjects.r.q_block_VOTE_REP_N)
gensld_choro("block","VOTE_TOT_N","Predicted Vote ",robjects.r.q_block_VOTE_TOT_N)
gensld_choro_denquint("tract","VOTE_DEM_N","Percent Predicted Democratic Vote ",robjects.r.q_tract_VOTE_DEM_N)
gensld_choro_denquint("tract","VOTE_REP_N","Percent Predicted Republican Vote ",robjects.r.q_tract_VOTE_REP_N)
gensld_choro("tract","VOTE_TOT_N","Predicted Vote ",robjects.r.q_tract_VOTE_TOT_N)
gensld_choro_denquint("county","VOTE_DEM_N","Percent Predicted Democratic Vote ",robjects.r.q_county_VOTE_DEM_N)
gensld_choro_denquint("county","VOTE_REP_N","Percent Predicted Republican Vote ",robjects.r.q_county_VOTE_REP_N)
gensld_choro("county","VOTE_TOT_N","Predicted Vote ",robjects.r.q_county_VOTE_TOT_N)
# generate config file
if (parseResults.do_genconf):
print 'generating config file ... '
gen_config(num_districts_congress=congDis,num_districts_senate=senDis,num_districts_house=houseDis,sum_TOTPOP=sum_TOTPOP,has_election_data=has_election_data,has_vtds=0,conf_na=parseResults.conf_na,
target_na_congress=parseResults.target_na_congress, target_hisp_congress = parseResults.target_hisp_congress, target_bl_congress = parseResults.target_bl_congress,
target_na_house=parseResults.target_na_house, target_hisp_house = parseResults.target_hisp_house, target_bl_house = parseResults.target_bl_house,
target_na_senate=parseResults.target_na_senate, target_hisp_senate = parseResults.target_hisp_senate, target_bl_senate = parseResults.target_bl_senate, contiguityOverrideString=contiguityOverrideString)
if (parseResults.do_run):
print 'running setup-py ... '
olddir = os.getcwd()
os.chdir("/projects/PublicMapping/DistrictBuilder/django/publicmapping/")
subprocess.check_call(["ls"])
#subprocess.check_call(["setup.py","-v2","/projects/PublicMapping/DistrictBuilder/docs/config.xsd"," /projects/PublicMapping/DistrictBuilder/docs/config_census_generated.xml"])
subprocess.check_call(["./setup.py -v2 /projects/PublicMapping/DistrictBuilder/docs/config.xsd /projects/PublicMapping/DistrictBuilder/docs/config_census_generated.xml"],shell=True)
os.chdir(olddir)
else:
print '\n\n*** Now run: ***\n\n'
print '(cd /projects/PublicMapping/DistrictBuilder/django/publicmapping/; python setup.py -v2 /projects/PublicMapping/DistrictBuilder/docs/config.xsd /projects/PublicMapping/DistrictBuilder/docs/config_census_generated.xml)'
# workaround celeryd first-time startup problem
print 'Starting celeryd ...'
subprocess.check_call(["service","celeryd","start"])
| #!/usr/bin/env python
# Framework for loading census data
# Inputs: FIPS state code, list of variables to include as additional subjects
# Requirements:
# - external software: DistrictBuilder, R, gdal, wget, unzip
# TODO -- check for VTD's
import re # regular expressions
import sys # arg lists etc
import glob # globbing
import commands # system commands
import os # os commands
import stat
import subprocess # for external commands
import zipfile # unzipping
import rpy2.robjects as robjects
import shutil
import psycopg2 as dbapi2
import optparse
import string
import time
###
### Globals
###
PUBLICMAPPINGPASS="<PASSWORD>"
# TODO : build in vote geographies, numbers of districts per state
#VOTEGEOGRAPHIES={"county":"COUNTYFP10","tract":"TRACTCE10","block":"BLOCKCE10"}
### clear_publicmapping_db
###
### Truncate database
def clear_publicmapping_db():
db = dbapi2.connect (database="publicmapping", user="publicmapping", password=<PASSWORD>)
cur = db.cursor()
redtable=["redistricting_characteristic","redistricting_computedcharacteristic","redistricting_computeddistrictscore","redistricting_computedplanscore","redistricting_contiguityoverride","redistricting_district","redistricting_geolevel","redistricting_geounit","redistricting_legislativebody","redistricting_legislativedefault","redistricting_legislativelevel","redistricting_plan","redistricting_profile","redistricting_scoreargument","redistricting_scoredisplay","redistricting_scorefunction","redistricting_scorepanel","redistricting_scorepanel_displays","redistricting_scorepanel_score_functions","redistricting_subject","redistricting_target","redistricting_validationcriteria"]
for i in redtable:
cur.execute("truncate table %s CASCADE" % i)
db.commit()
db.close()
### Drop DB
def drop_db():
olddir = os.getcwd()
os.chdir("/tmp")
subprocess.check_call(["service","tomcat6","stop"])
subprocess.check_call(["service","celeryd","stop"])
subprocess.check_call(["service","apache2","stop"])
subprocess.check_call(["service","apache2","restart"])
subprocess.check_call(["service","postgresql","restart"])
subprocess.check_call(['su postgres -c "dropdb publicmapping"'],shell=True)
subprocess.check_call(['cat /projects/PublicMapping/DistrictBuilder/sql/publicmapping_db.sql | su postgres -c "psql -f - postgres"'],shell=True)
subprocess.check_call(["service","apache2","start"])
subprocess.check_call(["service","tomcat6","start"])
os.chdir(olddir)
### Install dependencies
###
### This attempts to install dependencies using apt-get
###
def install_dependencies():
if (os.path.exists("/usr/bin/ogrinfo")==False) :
cmdarg = 'gdal-bin'
subprocess.check_call(["apt-get","install",cmdarg])
###
### Retrieve data files
###
### This retrieves the census files,unzips and reprojects (using ogr2ogr)
def get_census_data(stateFips):
if (stateFips<10) :
stateFips = "0%s" % stateFips
print 'Retrieving census shapefiles...'
# put all data in publicmapping data directory
olddir = os.getcwd()
os.chdir("/projects/PublicMapping/data/")
# obtain state boundary files from census
cenBlockFilePrefix = 'tl_2010_%s_tabblock10' % stateFips
cenTractFilePrefix = 'tl_2010_%s_tract10' % stateFips
cenCountyFilePrefix= 'tl_2010_%s_county10' % stateFips
cmdarg = 'ftp://ftp2.census.gov/geo/tiger/TIGER2010/TABBLOCK/2010/%s.zip' % cenBlockFilePrefix
subprocess.check_call(["wget","-nc",cmdarg])
cmdarg = 'ftp://ftp2.census.gov/geo/tiger/TIGER2010/TRACT/2010/%s.zip' % cenTractFilePrefix
subprocess.check_call(["wget","-N",cmdarg])
cmdarg = 'ftp://ftp2.census.gov/geo/tiger/TIGER2010/COUNTY/2010/%s.zip' % cenCountyFilePrefix
subprocess.check_call(["wget","-N",cmdarg])
# get additional data from our S3 bucket
print 'Retrieving additional data...'
cmdarg = 'https://s3.amazonaws.com/redistricting_supplement_data/redist/%s_redist_data.zip' % stateFips
subprocess.check_call(["wget","-N",cmdarg])
cmdarg = 'https://s3.amazonaws.com/redistricting_supplement_data/redist/%s_contiguity_overrides.csv' % stateFips
subprocess.call(["wget","-N",cmdarg])
print 'Unzipping files ...'
# unzip data files
for i in [ cenBlockFilePrefix, cenTractFilePrefix, cenCountyFilePrefix ] :
zfile = '%s.zip' % i
print ('Unzipping %s' %zfile)
myzip = zipfile.ZipFile(zfile, 'r')
myzip.extractall()
myzip = zipfile.ZipFile('%s_redist_data.zip' % stateFips, 'r')
myzip.extractall() # Reproject block data
print 'Reprojecting block shapefile...'
if (os.path.exists("census_blocks.shp")) :
os.remove('census_blocks.shp')
if (os.path.exists("census_tracts.shp")) :
os.remove('census_tracts.shp')
if (os.path.exists("census_counties.shp")) :
os.remove('census_counties.shp')
subprocess.check_call(["ogr2ogr",'-overwrite','-t_srs','EPSG:3785','census_blocks.shp','%s.shp' % cenBlockFilePrefix ])
subprocess.check_call(["ogr2ogr",'-overwrite','-t_srs','EPSG:3785','census_tracts.shp','%s.shp' % cenTractFilePrefix])
subprocess.check_call(["ogr2ogr",'-overwrite','-t_srs','EPSG:3785','census_counties.shp','%s.shp' % cenCountyFilePrefix])
# standardize file names
print 'Copying data files...'
shutil.copy('%s_redist_data.csv' %stateFips , 'redist_data.csv' )
if (os.path.exists("redist_overrides.csv")) :
os.remove('redist_overrides.csv')
if (os.path.exists("%s_contiguity_overrides.csv" % stateFips)) :
shutil.copy("%s_contiguity_overrides.csv" % stateFips,'redist_overrides.csv')
os.chdir(olddir)
###
### TEMPLATING - SLD's
###
# general template classes
class DictionaryTemplate:
def __init__(self, dict={}, **keywords):
self.dict = dict
self.dict.update(keywords)
def __str__(self):
return self._template % self
def __getitem__(self, key):
return self._process(key.split("|"))
def _process(self, l):
arg = l[0]
if len(l) == 1:
if arg in self.dict:
return self.dict[arg]
elif hasattr(self, arg) and callable(getattr(self, arg)):
return getattr(self, arg)()
else:
raise KeyError(arg)
else:
func_name = l[1]
if func_name in self.dict:
func = self.dict[func_name]
else:
func = getattr(self, func_name)
return func(self._process([arg]))
class ListTemplate:
def __init__(self, input_list=[]):
self.input_list = input_list
def __str__(self):
return "\n".join([self._template % x for x in self.input_list])
class Empty_Template(ListTemplate):
_template = """
"""
###
### SLD Skeleton Classes
###
class SldList_Template(DictionaryTemplate):
_template = """<?xml version="1.0" encoding="ISO-8859-1"?>
<StyledLayerDescriptor version="1.0.0" xmlns="http://www.opengis.net/sld" xmlns:ogc="http://www.opengis.net/ogc" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.opengis.net/sld http://schemas.opengis.net/sld/1.0.0/StyledLayerDescriptor.xsd">
<NamedLayer>
<Name>%(layername)s</Name>
<UserStyle>
<Title>%(layertitle)s</Title>
<Abstract>%(layerabs)s</Abstract>
<FeatureTypeStyle>
%(slst|sli)s
%(lst|li)s
%(elst|eli)s
</FeatureTypeStyle>
</UserStyle>
</NamedLayer>
</StyledLayerDescriptor>
"""
class Sld_Poly_Template(ListTemplate):
_template = """
<Rule>
<Title>%(title)s</Title>
<PolygonSymbolizer>
<Fill>
<CssParameter name="fill">%(fill)s</CssParameter>
<CssParameter name="fill-opacity">%(fillopacity)s</CssParameter>
</Fill>
</PolygonSymbolizer>
</Rule>
"""
class Sld_PolyB_Template(ListTemplate):
_template = """
<Rule>
<Title>%(title)s</Title>
<PolygonSymbolizer>
<Fill>
<CssParameter name="fill">%(fill)s</CssParameter>
<CssParameter name="fill-opacity">%(fillopacity)s</CssParameter>
</Fill>
<Stroke>
<CssParameter name="stroke">%(stroke)s</CssParameter>
<CssParameter name="stroke-width">%(strokewidth)s</CssParameter>
<CssParameter name="stroke-opacity">%(strokeopacity)s</CssParameter>
</Stroke>
</PolygonSymbolizer>
</Rule>
"""
# plain fill template
class Sld_Line_Template(ListTemplate):
_template = """
<Rule>
<Title>%(title)s</Title>
<LineSymbolizer>
<Stroke>
<CssParameter name="stroke">%(stroke)s</CssParameter>
<CssParameter name="stroke-width">%(strokewidth)s</CssParameter>
<CssParameter name="stroke-opacity">%(strokeopacity)s</CssParameter>
</Stroke>
</LineSymbolizer>
</Rule>
"""
# min-max range template
class Sld_Range_Template(ListTemplate):
_template = """
<Rule>
<Title>%(bottom)s-%(top)s</Title>
<ogc:Filter>
<ogc:And>
<ogc:PropertyIsLessThan>
<ogc:PropertyName>%(unit)s</ogc:PropertyName>
<ogc:Literal>%(top)s</ogc:Literal>
</ogc:PropertyIsLessThan>
<ogc:PropertyIsGreaterThanOrEqualTo>
<ogc:PropertyName>%(unit)s</ogc:PropertyName>
<ogc:Literal>%(bottom)s</ogc:Literal>
</ogc:PropertyIsGreaterThanOrEqualTo>
</ogc:And>
</ogc:Filter>
<PolygonSymbolizer>
<Fill>
<CssParameter name="fill">%(fill)s</CssParameter>
<CssParameter name="fill-opacity">%(fillopacity)s</CssParameter>
</Fill>
</PolygonSymbolizer>
</Rule>
"""
class Sld_URange_Template(ListTemplate):
_template = """
<Rule>
<Title>%(bottom)s-%(top)s</Title>
<ogc:Filter>
<ogc:PropertyIsGreaterThanOrEqualTo>
<ogc:PropertyName>%(unit)s</ogc:PropertyName>
<ogc:Literal>%(bottom)s</ogc:Literal>
</ogc:PropertyIsGreaterThanOrEqualTo>
</ogc:Filter>
<PolygonSymbolizer>
<Fill>
<CssParameter name="fill">%(fill)s</CssParameter>
<CssParameter name="fill-opacity">%(fillopacity)s</CssParameter>
</Fill>
</PolygonSymbolizer>
</Rule>
"""
def gensld_none(geoname):
target_file = '/projects/PublicMapping/DistrictBuilder/sld/pmp:%s_none.sld' % (geoname)
f = open(target_file,'w')
f.write ( str(SldList_Template(layername="%s No fill" % (geoname),layertitle="%s No Fill" % (geoname) ,layerabs="A style showing the boundaries of a geounit with a transparent fill", slst=[],sli=Empty_Template, lst=[{"title":"Fill","fill":"#FFFFFF","fillopacity":"1.0"}],li=Sld_Poly_Template,elst=[{"title":"Boundary","stroke":"#555555","strokewidth":"3.00","strokeopacity":"1.0"}],eli=Sld_Line_Template)) )
f.write("\n")
f.close()
os.chmod(target_file,stat.S_IRUSR|stat.S_IRGRP|stat.S_IROTH)
def gensld_boundaries(geoname):
target_file = '/projects/PublicMapping/DistrictBuilder/sld/pmp:%s_boundaries.sld' % (geoname)
f = open(target_file,'w')
f.write ( str(SldList_Template(layername="%s Boundaries" % (geoname) ,layertitle="%s Boundaries Only" %(geoname),layerabs="A style showing the boundaries of a geounit", slst=[] ,sli=Empty_Template, lst=[],li=Empty_Template,elst=[{"title":"County Boundaries","fill":"#000000","fillopacity":"0.0","stroke":"#2255FF","strokewidth":"2","strokeopacity":"0.35"}],eli=Sld_PolyB_Template)))
f.write("\n")
f.close()
os.chmod(target_file,stat.S_IRUSR|stat.S_IRGRP|stat.S_IROTH)
#TODO: generalize to any number of choropleths
def gensld_choro(geoname,varname,vartitle,quantiles):
gensld_choro_internal(geoname,varname,vartitle,quantiles,unit="number")
def gensld_choro_internal(geoname,varname,vartitle,quantiles,unit="number"):
# WARNING: sld files need to be lower case to be compatible with postgres views
lvarname = string.lower(varname)
target_file = '/projects/PublicMapping/DistrictBuilder/sld/pmp:%s_%s.sld' % (geoname,lvarname)
varabs="Grayscale choropleth based on quantiles of %s" % (varname)
valuelist= [
{"top": str(quantiles[4]),
"bottom": str(quantiles[3]),
"fill": "#444444",
"fillopacity":"1.0",
"unit":unit},
{"top": str(quantiles[3]),
"bottom": str(quantiles[2]),
"fill": "#777777",
"fillopacity":"1.0",
"unit":unit},
{"top": str(quantiles[2]),
"bottom": str(quantiles[1]),
"fill": "#AAAAAA",
"fillopacity":"1.0",
"unit":unit},
{"top": str(quantiles[1]),
"bottom": str(quantiles[0]),
"fill": "#EEEEEE",
"fillopacity":"1.0",
"unit":unit}]
svaluelist = [{"top": str(quantiles[5]),
"bottom": str(quantiles[4]),
"fill": "#000000",
"fillopacity":"1.0",
"unit":unit}]
f = open(target_file,'w')
f.write(str( SldList_Template(layername=lvarname,layertitle=vartitle,layerabs=varabs,slst=svaluelist,sli=Sld_URange_Template, lst=valuelist,li=Sld_Range_Template,elst=[{"title":"Boundary","stroke":"#555555","strokewidth":"0.25","strokeopacity":"1.0"}],eli=Sld_Line_Template) ))
f.write("\n")
f.close()
os.chmod(target_file,stat.S_IRUSR|stat.S_IRGRP|stat.S_IROTH)
def gensld_choro_denquint(geoname,varname,vartitle,dummy):
quantiles=[0,0.2,0.4,0.6,0.8,1]
gensld_choro_internal(geoname,varname,vartitle,quantiles,unit="percentage")
### Config file generation
### TODO: has_vtds==1 has not fully implemented
### paramaterize thresholds?
class Config_Template(DictionaryTemplate):
_template = """<!-- Define Internal Entities to avoid Repeated Entering of Values -->
<!DOCTYPE DistrictBuilder [
<!ENTITY num_districts_congress "%(num_districts_congress)s">
<!ENTITY num_districts_house "%(num_districts_house)s">
<!ENTITY num_districts_senate "%(num_districts_senate)s">
<!ENTITY pop_congress "%(pop_congress)s">
<!ENTITY pop_house "%(pop_house)s">
<!ENTITY pop_senate "%(pop_senate)s">
<!ENTITY pop_congress_min "%(pop_congress_min)s">
<!ENTITY pop_house_min "%(pop_house_min)s">
<!ENTITY pop_senate_min "%(pop_senate_min)s">
<!ENTITY pop_congress_max "%(pop_congress_max)s">
<!ENTITY pop_house_max "%(pop_house_max)s">
<!ENTITY pop_senate_max "%(pop_senate_max)s">
<!ENTITY target_hisp_congress "%(target_hisp_congress)s">
<!ENTITY target_hisp_senate "%(target_hisp_senate)s">
<!ENTITY target_hisp_house "%(target_hisp_house)s">
<!ENTITY target_bl_congress "%(target_bl_congress)s">
<!ENTITY target_bl_senate "%(target_bl_senate)s">
<!ENTITY target_bl_house "%(target_bl_house)s">
<!ENTITY target_na_senate "%(target_na_senate)s">
<!ENTITY target_na_house "%(target_na_house)s">
<!ENTITY target_na_congress "%(target_na_congress)s">
]>
<DistrictBuilder>
<!-- Define legislative bodies referenced in the system. -->
<LegislativeBodies>
<!-- A Legislative body has an ID (for referencing in GeoLevel
definitions later), a name, and a label for plan items
("District" for Congressional, etc) -->
<LegislativeBody id="congress" name="Congressional" member="District %%s" maxdistricts="&num_districts_congress;"/>
<LegislativeBody id="house" name="State House" member="District %%s" maxdistricts="&num_districts_house;" />
<LegislativeBody id="senate" name="State Senate" member="District %%s" maxdistricts="&num_districts_senate;" />
</LegislativeBodies>
<!-- A list of subjects referenced in the system. -->
<Subjects>
<!-- A Subject is a measurement type, such as "Total Population".
The subject is mapped to an attribute during the import phase,
and contains a long and short display name. Subjects have IDs
for referencing in GeoLevel definitions later. -->
<Subject id="vap_b" field="VAP_B" name="African-American Voting Age Population" short_name="Black VAP " displayed="true" sortkey="1" percentage_denominator="vap" />
<Subject id="vap_h" field="VAP_H" name="Hispanic or Latino voting age population" short_name="Hispanic VAP" displayed="true" sortkey="2" percentage_denominator="vap" />
<Subject id="vap_na" field="VAP_NA" name="Native American Voting Age Population" short_name="Nat Amer VAP" displayed="true" sortkey="4" percentage_denominator="vap" />
%(start_elec)s
<Subject id="vote_dem" field="VOTE_DEM" name="num likely Democratic voters" short_name="Democratic voters" displayed="true" sortkey="3" percentage_denominator="vote_tot" />
<Subject id="vote_rep" field="VOTE_REP" name="num likely Republican voters" short_name="Republican voters" displayed="true" sortkey="5" percentage_denominator="vote_tot" />
<Subject id="vote_tot" field="VOTE_TOT" name="num likely Rep/Dem voters" short_name="Rep+ Dem vote" displayed="false" sortkey="6" />
<Subject id="vote_dem_norm" field="VOTE_DEM_N" name="num of likely Democratic voters normalized to 50/50 state baseline" short_name="Normal Dem vote" displayed="true" sortkey="18" percentage_denominator="vote_tot_norm" />
<Subject id="vote_rep_norm" field="VOTE_REP_N" name="num of likely Republican voters normalized to 50/50 state baseline" short_name="Normal Rep vote" displayed="true" sortkey="19" percentage_denominator="vote_tot_norm" />
<Subject id="vote_tot_norm" field="VOTE_TOT_N" name="number of likely Republican and Democratic voters normalized to 50/50 state baseline" short_name="Normal 2-party vote" displayed="false" sortkey="20" />
%(end_elec)s
<Subject id="vap" field="VAP" name="Voting Age Population" short_name="vap" displayed="true" sortkey="7" />
<Subject id="totpop_b" field="TOTPOP_B" name="African-American" short_name="Black" displayed="false" sortkey="8" percentage_denominator="totpop"/>
<Subject id="totpop_h" field="TOTPOP_H" name="Hispanic or Latino" short_name="Hispanic" displayed="false" sortkey="9" percentage_denominator="totpop"/>
<Subject id="totpop_na" field="TOTPOP_NA" name="Native American" short_name="Nat Amer" displayed="false" sortkey="10" percentage_denominator="totpop"/>
<Subject id="totpop_a" field="TOTPOP_A" name="Asian Population" short_name="Asian" displayed="false" sortkey="11" percentage_denominator="totpop"/>
<Subject id="totpop_pi" field="TOTPOP_PI" name="Pacific Islander" short_name="Pac Isl" displayed="false" sortkey="12" percentage_denominator="totpop"/>
<Subject id="totpop_wnh" field="TOTPOP_WNH" name="White Non-Hispanic" short_name="White" displayed="false" sortkey="13" percentage_denominator="totpop"/>
<Subject id="totpop" field="TOTPOP" name="Total Population" short_name="Total Pop." displayed="true" sortkey="14"/>
<Subject id="vap_a" field="VAP_A" name="Asian Voting Age Population" short_name="Asian VAP" displayed="true" sortkey="15" percentage_denominator="vap" />
<Subject id="vap_pi" field="VAP_PI" name="Pacific Islander Voting Age Population" short_name="Pacific VAP" displayed="true" sortkey="16" percentage_denominator="vap"/>
<Subject id="vap_wnh" field="VAP_WNH" name="White Non-Hispanic Voting Age Population" short_name="White VAP" displayed="true" sortkey="17" percentage_denominator="vap"/>
</Subjects>
<Scoring>
<ScoreFunctions>
<!-- A district score that returns a literal value -->
<ScoreFunction id="district_poptot" type="district"
calculator="publicmapping.redistricting.calculators.Sum"
label="Total Pop" user_selectable="true">
<SubjectArgument name="value1" ref="totpop" />
</ScoreFunction>
<ScoreFunction id="district_totpop_b" type="district"
calculator="publicmapping.redistricting.calculators.Sum"
label="Black VAP" user_selectable="true">
<SubjectArgument name="value1" ref="totpop_b" />
</ScoreFunction>
<ScoreFunction id="district_totpop_h" type="district"
calculator="publicmapping.redistricting.calculators.Sum"
label="Hispanic VAP" user_selectable="true">
<SubjectArgument name="value1" ref="totpop_h" />
</ScoreFunction>
<ScoreFunction id="district_totpop_a" type="district"
calculator="publicmapping.redistricting.calculators.Sum"
label="Asian VAP" user_selectable="true">
<SubjectArgument name="value1" ref="totpop_a" />
</ScoreFunction>
<ScoreFunction id="district_totpop_na" type="district"
calculator="publicmapping.redistricting.calculators.Sum"
label="Native American VAP" user_selectable="true">
<SubjectArgument name="value1" ref="totpop_na" />
</ScoreFunction>
<ScoreFunction id="district_totpop_pi" type="district"
calculator="publicmapping.redistricting.calculators.Sum"
label="Pacific Islander VAP" user_selectable="true">
<SubjectArgument name="value1" ref="totpop_pi" />
</ScoreFunction>
<ScoreFunction id="district_totpop_wnh" type="district"
calculator="publicmapping.redistricting.calculators.Sum"
label="Pacific Islander VAP" user_selectable="true">
<SubjectArgument name="value1" ref="totpop_wnh" />
</ScoreFunction>
<ScoreFunction id="district_vap" type="district"
calculator="publicmapping.redistricting.calculators.Sum"
label="VAP" user_selectable="true">
<SubjectArgument name="value1" ref="vap" />
</ScoreFunction>
<ScoreFunction id="district_vap_b" type="district"
calculator="publicmapping.redistricting.calculators.Sum"
label="Black VAP" user_selectable="true">
<SubjectArgument name="value1" ref="vap_b" />
</ScoreFunction>
<ScoreFunction id="district_vap_h" type="district"
calculator="publicmapping.redistricting.calculators.Sum"
label="Hispanic VAP" user_selectable="true">
<SubjectArgument name="value1" ref="vap_h" />
</ScoreFunction>
<ScoreFunction id="district_vap_a" type="district"
calculator="publicmapping.redistricting.calculators.Sum"
label="Asian VAP" user_selectable="true">
<SubjectArgument name="value1" ref="vap_a" />
</ScoreFunction>
<ScoreFunction id="district_vap_na" type="district"
calculator="publicmapping.redistricting.calculators.Sum"
label="Native American VAP" user_selectable="true">
<SubjectArgument name="value1" ref="vap_na" />
</ScoreFunction>
<ScoreFunction id="district_vap_pi" type="district"
calculator="publicmapping.redistricting.calculators.Sum"
label="Pacific Islander VAP" user_selectable="true">
<SubjectArgument name="value1" ref="vap_pi" />
</ScoreFunction>
<ScoreFunction id="district_vap_wnh" type="district"
calculator="publicmapping.redistricting.calculators.Sum"
label="Pacific Islander VAP" user_selectable="true">
<SubjectArgument name="value1" ref="vap_wnh" />
</ScoreFunction>
<!-- A district score that returns a percentage -->
<ScoreFunction id="district_blkvap_percent" type="district"
calculator="publicmapping.redistricting.calculators.Percent"
label="Black VAP %%" user_selectable="true">
<SubjectArgument name="numerator" ref="vap_b" />
<SubjectArgument name="denominator" ref="vap" />
</ScoreFunction>
<ScoreFunction id="district_blkvap_thresh" type="district"
calculator="publicmapping.redistricting.calculators.Threshold"
label="Black VAP Threshold">
<ScoreArgument name="value" ref="district_blkvap_percent" />
<Argument name="threshold" value="0.5" />
</ScoreFunction>
<ScoreFunction id="district_hispvap_percent" type="district"
calculator="publicmapping.redistricting.calculators.Percent"
label="Hisp. VAP %%" user_selectable="true">
<SubjectArgument name="numerator" ref="vap_h" />
<SubjectArgument name="denominator" ref="vap" />
</ScoreFunction>
<ScoreFunction id="district_hispvap_thresh" type="district"
calculator="publicmapping.redistricting.calculators.Threshold"
label="Hisp. VAP Threshold">
<ScoreArgument name="value" ref="district_hispvap_percent" />
<Argument name="threshold" value="0.5" />
</ScoreFunction>
<ScoreFunction id="district_navap_percent" type="district"
calculator="publicmapping.redistricting.calculators.Percent"
label="Native American VAP %%" user_selectable="true">
<SubjectArgument name="numerator" ref="vap_na" />
<SubjectArgument name="denominator" ref="vap" />
</ScoreFunction>
<ScoreFunction id="district_navap_thresh" type="district"
calculator="publicmapping.redistricting.calculators.Threshold"
label="Native American VAP Threshold">
<ScoreArgument name="value" ref="district_navap_percent" />
<Argument name="threshold" value="0.5" />
</ScoreFunction>
<ScoreFunction id="district_avap_percent" type="district"
calculator="publicmapping.redistricting.calculators.Percent"
label="Asian VAP %%" user_selectable="true">
<SubjectArgument name="numerator" ref="vap_a" />
<SubjectArgument name="denominator" ref="vap" />
</ScoreFunction>
<ScoreFunction id="district_avap_thresh" type="district"
calculator="publicmapping.redistricting.calculators.Threshold"
label="Asian VAP Threshold">
<ScoreArgument name="value" ref="district_avap_percent" />
<Argument name="threshold" value="0.5" />
</ScoreFunction>
<ScoreFunction id="district_pivap_percent" type="district"
calculator="publicmapping.redistricting.calculators.Percent"
label="Pacific Islander VAP %%" user_selectable="true">
<SubjectArgument name="numerator" ref="vap_pi" />
<SubjectArgument name="denominator" ref="vap" />
</ScoreFunction>
<ScoreFunction id="district_pivap_thresh" type="district"
calculator="publicmapping.redistricting.calculators.Threshold"
label="Pacific Islander VAP Threshold">
<ScoreArgument name="value" ref="district_pivap_percent" />
<Argument name="threshold" value="0.5" />
</ScoreFunction>
<ScoreFunction id="district_wnhvap_percent" type="district"
calculator="publicmapping.redistricting.calculators.Percent"
label="White VAP %%" user_selectable="true">
<SubjectArgument name="numerator" ref="vap_wnh" />
<SubjectArgument name="denominator" ref="vap" />
</ScoreFunction>
<ScoreFunction id="district_wnhvap_thresh" type="district"
calculator="publicmapping.redistricting.calculators.Threshold"
label="White VAP Threshold">
<ScoreArgument name="value" ref="district_wnhvap_percent" />
<Argument name="threshold" value="0.5" />
</ScoreFunction>
%(start_elec)s
<ScoreFunction id="district_vote" type="district"
calculator="publicmapping.redistricting.calculators.Sum"
label="Estimated votes" user_selectable="true">
<SubjectArgument name="value1" ref="vote_tot" />
</ScoreFunction>
<ScoreFunction id="district_vote_dem" type="district"
calculator="publicmapping.redistricting.calculators.Sum"
label="Estimated Democratic votes" user_selectable="true">
<SubjectArgument name="value1" ref="vote_dem" />
</ScoreFunction>
<ScoreFunction id="district_vote_rep" type="district"
calculator="publicmapping.redistricting.calculators.Sum"
label="Estimated votes" user_selectable="true">
<SubjectArgument name="value1" ref="vote_rep" />
</ScoreFunction>
<ScoreFunction id="district_vote_dem_percent" type="district"
calculator="publicmapping.redistricting.calculators.Percent"
label="Democratic Predicted Vote %%" user_selectable="true">
<SubjectArgument name="numerator" ref="vote_dem" />
<SubjectArgument name="denominator" ref="vote_tot" />
</ScoreFunction>
<ScoreFunction id="district_vote_dem_thresh" type="district"
calculator="publicmapping.redistricting.calculators.Threshold"
label="Democratic Predicted Vote Threshold">
<ScoreArgument name="value" ref="district_vote_dem_percent" />
<Argument name="threshold" value="0.5" />
</ScoreFunction>
<ScoreFunction id="district_vote_rep_percent" type="district"
calculator="publicmapping.redistricting.calculators.Percent"
label="Republican Predicted Vote %%" user_selectable="true">
<SubjectArgument name="numerator" ref="vote_rep" />
<SubjectArgument name="denominator" ref="vote_tot" />
</ScoreFunction>
<ScoreFunction id="district_vote_rep_thresh" type="district"
calculator="publicmapping.redistricting.calculators.Threshold"
label="Republican Predicted Vote Threshold">
<ScoreArgument name="value" ref="district_vote_rep_percent" />
<Argument name="threshold" value="0.5" />
</ScoreFunction>
%(end_elec)s
<!-- A district score that generates classes based on a couple
ranges around a mean value. -->
<ScoreFunction id="district_poptot_uitarget_congress" type="district"
calculator="publicmapping.redistricting.calculators.Target">
<SubjectArgument name="value" ref="totpop" />
<Argument name="target" value="&pop_congress;" />
<Argument name="range1" value="0.005"/>
<Argument name="range2" value="0.010"/>
</ScoreFunction>
<ScoreFunction id="district_poptot_uitarget_house" type="district"
calculator="publicmapping.redistricting.calculators.Target">
<SubjectArgument name="value" ref="totpop" />
<Argument name="target" value="%(pop_house)s" />
<Argument name="range1" value="0.05" />
<Argument name="range2" value="0.10" />
</ScoreFunction>
<ScoreFunction id="district_poptot_uitarget_senate" type="district"
calculator="publicmapping.redistricting.calculators.Target">
<SubjectArgument name="value" ref="totpop" />
<Argument name="target" value="%(pop_senate)s" />
<Argument name="range1" value="0.05" />
<Argument name="range2" value="0.10" />
</ScoreFunction>
<!-- A district score that returns 1(T) if the subject value
is between the ranges, otherwise returns 0(F). -->
<ScoreFunction id="district_poptot_range" type="district"
calculator="publicmapping.redistricting.calculators.Range"
label="Tot Pop Range">
<SubjectArgument name="value" ref="totpop" />
<Argument name="min" value="&pop_congress_min;" />
<Argument name="max" value="&pop_congress_max;" />
</ScoreFunction>
<!-- A district score that is threshold dependent, and returns
T/F; this example uses 2 score functions: 1 to combine a
set of subjects, and 2 to divide the sum over another
subject. -->
<ScoreFunction id="district_mintot" type="district"
calculator="publicmapping.redistricting.calculators.Sum">
<SubjectArgument name="value1" ref="totpop_b" />
<SubjectArgument name="value2" ref="totpop_h" />
<SubjectArgument name="value3" ref="totpop_na" />
</ScoreFunction>
<ScoreFunction id="district_majmin" type="district"
calculator="publicmapping.redistricting.calculators.DivideAndThreshold" >
<ScoreArgument name="numerator" ref="district_mintot" />
<SubjectArgument name="denominator" ref="totpop" />
<Argument name="threshold" value="0.5" />
</ScoreFunction>
<!-- A custom calculator to calculate compactness, and return
the raw compactness score. -->
<ScoreFunction id="district_schwartzberg" type="district"
calculator="publicmapping.redistricting.calculators.Schwartzberg"
label="Compactness" user_selectable="true">
</ScoreFunction>
<!-- A custom calculator to do contiguity, and is boolean. -->
<ScoreFunction id="district_contiguous" type="district"
calculator="publicmapping.redistricting.calculators.Contiguity"
label="Contiguous" user_selectable="true">
</ScoreFunction>
<!-- A plan score that aggregates all literal values -->
<ScoreFunction id="plan_sum_equipop" type="plan"
calculator="publicmapping.redistricting.calculators.Sum"
label="Equal Population">
<ScoreArgument name="value1" ref="district_poptot_range" />
</ScoreFunction>
<ScoreFunction id="plan_all_equipop" type="plan"
calculator="publicmapping.redistricting.calculators.Threshold" >
<ScoreArgument name="value" ref="plan_sum_equipop" />
<Argument name="threshold" value="0" />
</ScoreFunction>
<!-- A plan score that aggregates all districts over a threshold -->
<ScoreFunction id="plan_count_majmin" type="plan"
calculator="publicmapping.redistricting.calculators.Sum">
<ScoreArgument name="value1" ref="district_majmin" />
</ScoreFunction>
<ScoreFunction id="plan_blkvap_thresh" type="plan"
calculator="publicmapping.redistricting.calculators.Sum"
label="Majority Black Districts" user_selectable="true">
<ScoreArgument name="value1" ref="district_blkvap_thresh" />
</ScoreFunction>
<ScoreFunction id="plan_hispvap_thresh" type="plan"
calculator="publicmapping.redistricting.calculators.Sum"
label="Majority Hispanic Districts" user_selectable="true">
<ScoreArgument name="value1" ref="district_hispvap_thresh" />
</ScoreFunction>
<ScoreFunction id="plan_navap_thresh" type="plan"
calculator="publicmapping.redistricting.calculators.Sum"
label="Majority Asian Districts" user_selectable="true">
<ScoreArgument name="value1" ref="district_navap_thresh" />
</ScoreFunction>
<ScoreFunction id="plan_avap_thresh" type="plan"
calculator="publicmapping.redistricting.calculators.Sum"
label="Majority Asian Districts" user_selectable="true">
<ScoreArgument name="value1" ref="district_avap_thresh" />
</ScoreFunction>
<ScoreFunction id="plan_pivap_thresh" type="plan"
calculator="publicmapping.redistricting.calculators.Sum">
<ScoreArgument name="value1" ref="district_pivap_thresh" />
</ScoreFunction>
<!-- A plan score that evaluates a threshold, and returns T/F.
This plan score checks that all districts are within the
population limits. -->
<ScoreFunction id="plan_poptot_inrange" type="plan"
calculator="publicmapping.redistricting.calculators.Threshold">
<ScoreArgument name="value" ref="district_poptot_range" />
<Argument name="threshold" value="0" />
</ScoreFunction>
<!-- A plan score that evaluates all districts, and returns
1(T) if there is more than 0 districts that have a minority
majority. -->
<ScoreFunction id="plan_major_minor" type="plan"
calculator="publicmapping.redistricting.calculators.Threshold"
label="Majority-Minority">
<ScoreArgument name="value" ref="district_majmin" />
<Argument name="threshold" value="0" />
</ScoreFunction>
<ScoreFunction id="plan_contiguous" type="plan"
calculator="publicmapping.redistricting.calculators.Sum"
label="Contiguous">
<ScoreArgument name="value1" ref="district_contiguous"/>
</ScoreFunction>
<ScoreFunction id="b_plan_congress_noncontiguous" type="plan"
calculator="publicmapping.redistricting.calculators.Contiguity"
label="Contiguous">
<Argument name="target" value="&num_districts_congress;" />
</ScoreFunction>
<ScoreFunction id="b_plan_house_noncontiguous" type="plan"
calculator="publicmapping.redistricting.calculators.Contiguity"
label="Contiguous">
<Argument name="target" value="&num_districts_house;" />
</ScoreFunction>
<ScoreFunction id="b_plan_senate_noncontiguous" type="plan"
calculator="publicmapping.redistricting.calculators.Contiguity"
label="Contiguous">
<Argument name="target" value="&num_districts_senate;" />
</ScoreFunction>
<!-- interval score function for population -->
<ScoreFunction id="a_congressional_population" type="district"
label="Tot Pop Range (Congress)" user_selectable="true"
description="Population interval calculator for congressional."
calculator="publicmapping.redistricting.calculators.Interval">
<SubjectArgument name="subject" ref="totpop" />
<Argument name="target" value="&pop_congress;" />
<Argument name="bound1" value=".005" />
<Argument name="bound2" value=".01" />
</ScoreFunction>
<ScoreFunction id="a_house_population" type="district"
label="Tot Pop Range (House)" user_selectable="true"
description="Population interval calculator for house."
calculator="publicmapping.redistricting.calculators.Interval">
<SubjectArgument name="subject" ref="totpop" />
<Argument name="target" value="%(pop_house)s" />
<Argument name="bound1" value=".005" />
<Argument name="bound2" value=".01" />
</ScoreFunction>
<ScoreFunction id="a_senate_population" type="district"
label="Tot Pop Range (Senate)" user_selectable="true"
description="Population interval calculator for senate."
calculator="publicmapping.redistricting.calculators.Interval">
<SubjectArgument name="subject" ref="totpop" />
<Argument name="target" value="%(pop_senate)s" />
<Argument name="bound1" value=".005" />
<Argument name="bound2" value=".01" />
</ScoreFunction>
<!-- leaderboard functions -->
<ScoreFunction id="a_congress_plan_count_districts" type="plan"
calculator="publicmapping.redistricting.calculators.CountDistricts"
label="Count Districts"
description="The number of districts in a Congressional redistricting plan must be &num_districts_congress;.">
<Argument name="target" value="&num_districts_congress;" />
</ScoreFunction>
<ScoreFunction id="a_house_plan_count_districts" type="plan"
calculator="publicmapping.redistricting.calculators.CountDistricts"
label="Count Districts"
description="The number of districts in a House of Delegates redistricting plan must be &num_districts_house;.">
<Argument name="target" value="&num_districts_house;" />
</ScoreFunction>
<ScoreFunction id="a_senate_plan_count_districts" type="plan"
calculator="publicmapping.redistricting.calculators.CountDistricts"
label="Count Districts"
description="The number of districts in a State Senate redistricting plan must be &num_districts_senate;.">
<Argument name="target" value="&num_districts_senate;" />
</ScoreFunction>
<ScoreFunction id="a_congress_plan_equipopulation_validation" type="plan"
calculator="publicmapping.redistricting.calculators.Equipopulation"
label="Target Pop. &pop_congress;"
description="The population of each Congressional district must be &pop_congress_min;-&pop_congress_max;">
<Argument name="min" value="&pop_congress_min;"/>
<Argument name="max" value="&pop_congress_max;"/>
<SubjectArgument name="value" ref="totpop"/>
<Argument name="validation" value="1"/>
</ScoreFunction>
<ScoreFunction id="a_congress_plan_equipopulation_summary" type="plan"
calculator="publicmapping.redistricting.calculators.Equipopulation"
label="Target Pop. &pop_congress;"
description="The population of each Congressional district must be &pop_congress_min;-&pop_congress_max;">
<Argument name="min" value="&pop_congress_min;"/>
<Argument name="max" value="&pop_congress_max;"/>
<SubjectArgument name="value" ref="totpop"/>
<Argument name="target" value="&num_districts_congress;"/>
</ScoreFunction>
<ScoreFunction id="a_senate_plan_equipopulation_validation" type="plan"
calculator="publicmapping.redistricting.calculators.Equipopulation"
label="Target Pop. %(pop_senate)s"
description="The population of each Senate district must be &pop_senate_min;-&pop_senate_max;">
<Argument name="min" value="&pop_senate_min;"/>
<Argument name="max" value="&pop_senate_max;"/>
<SubjectArgument name="value" ref="totpop"/>
<Argument name="validation" value="1"/>
</ScoreFunction>
<ScoreFunction id="a_senate_plan_equipopulation_summary" type="plan"
calculator="publicmapping.redistricting.calculators.Equipopulation"
label="Target Pop. %(pop_senate)s"
description="The population of each Senate district must be &pop_senate_min;-&pop_senate_max;">
<Argument name="min" value="&pop_senate_min;"/>
<Argument name="max" value="&pop_senate_max;"/>
<SubjectArgument name="value" ref="totpop"/>
<Argument name="target" value="&num_districts_senate;"/>
</ScoreFunction>
<ScoreFunction id="a_house_plan_equipopulation_validation" type="plan"
calculator="publicmapping.redistricting.calculators.Equipopulation"
label="Target Pop. %(pop_house)s"
description="The population of each House district must be &pop_house_min;-&pop_house_max;">
<Argument name="min" value="&pop_house_min;"/>
<Argument name="max" value="&pop_house_max;"/>
<SubjectArgument name="value" ref="totpop"/>
<Argument name="validation" value="1"/>
</ScoreFunction>
<ScoreFunction id="a_house_plan_equipopulation_summary" type="plan"
calculator="publicmapping.redistricting.calculators.Equipopulation"
label="Target Pop. %(pop_house)s"
description="The population of each House district must be &pop_house_min;-&pop_house_max;">
<Argument name="min" value="&pop_house_min;"/>
<Argument name="max" value="&pop_house_max;"/>
<SubjectArgument name="value" ref="totpop"/>
<Argument name="target" value="&num_districts_house;"/>
</ScoreFunction>
<ScoreFunction id="plan_all_blocks_assigned" type="plan"
calculator="publicmapping.redistricting.calculators.AllBlocksAssigned"
label="All Blocks Assigned"
description="All blocks in the plan must be assigned.">
</ScoreFunction>
<ScoreFunction id="plan_all_contiguous" type="plan"
calculator="publicmapping.redistricting.calculators.AllContiguous"
label="All Contiguous"
description="Contiguity means that every part of a district must be reachable from every other part without crossing the district's borders. All districts within a plan must be contiguous. Water contiguity is permitted. 'Point contiguity' or 'touch-point contiguity' where two sections of a district are connected at a single point is not permitted.">
</ScoreFunction>
%(start_elec)s
<ScoreFunction id="plan_competitiveness" type="plan"
calculator="publicmapping.redistricting.calculators.Competitiveness"
label="Competitiveness"
description="Each plan's overall political competitiveness is determined by averaging each district.s 'partisan differential'. The partisan differential of each district is calculated by subtracting the Democratic 'partisan index' from the Republican 'partisan index'.<br/><br/>'Heavily' competitive districts are districts with partisan differentials of less than or equal to 5%%. 'Generally' competitive districts are districts with partisan differentials of greater than 5%% but less than 10%%.">
<SubjectArgument name="democratic" ref="vote_dem" />
<SubjectArgument name="republican" ref="vote_rep" />
</ScoreFunction>
%(end_elec)s
<ScoreFunction id="plan_equivalence" type="plan"
calculator="publicmapping.redistricting.calculators.Equivalence"
label="Equal Population"
description="The Equipopulation score is the difference between the district with the highest population and the district with the lowest population.">
<SubjectArgument name="value" ref="totpop" />
</ScoreFunction>
<ScoreFunction id="plan_majority_minority_blk_congress" type="plan"
calculator="publicmapping.redistricting.calculators.MajorityMinority"
label="Black VAP Majority (> 50%%)"
description="Compliance with the Voting Rights Act will be assumed if maps include a minority-majority district in any area where a minority group is (as described in Thornburg V. Gingles, 478 U.S. 30, 49 (1986)) 'sufficiently large and geographically compact to constitute a majority in a single-member district'.">
<SubjectArgument name="population" ref="vap" />
<SubjectArgument name="minority1" ref="vap_b" />
<Argument name="target" value="&target_bl_congress;" />
</ScoreFunction>
<ScoreFunction id="plan_majority_minority_blk_house" type="plan"
calculator="publicmapping.redistricting.calculators.MajorityMinority"
label="Black VAP Majority (> 50%%)"
description="Compliance with the Voting Rights Act will be assumed if maps include a minority-majority district in any area where a minority group is (as described in Thornburg V. Gingles, 478 U.S. 30, 49 (1986)) 'sufficiently large and geographically compact to constitute a majority in a single-member district'.">
<SubjectArgument name="population" ref="vap" />
<SubjectArgument name="minority1" ref="vap_b" />
<Argument name="target" value="&target_bl_house;" />
</ScoreFunction>
<ScoreFunction id="plan_majority_minority_blk_senate" type="plan"
calculator="publicmapping.redistricting.calculators.MajorityMinority"
label="Black VAP Majority (> 50%%)"
description="Compliance with the Voting Rights Act will be assumed if maps include a minority-majority district in any area where a minority group is (as described in Thornburg V. Gingles, 478 U.S. 30, 49 (1986)) 'sufficiently large and geographically compact to constitute a majority in a single-member district'.">
<SubjectArgument name="population" ref="vap" />
<SubjectArgument name="minority1" ref="vap_b" />
<Argument name="target" value="&target_bl_senate;" />
</ScoreFunction>
<ScoreFunction id="plan_majority_minority_hisp_congress" type="plan"
calculator="publicmapping.redistricting.calculators.MajorityMinority"
label="Hisp. VAP Majority (> 50%%)"
description="Compliance with the Voting Rights Act will be assumed if maps include a minority-majority district in any area where a minority group is (as described in Thornburg V. Gingles, 478 U.S. 30, 49 (1986)) 'sufficiently large and geographically compact to constitute a majority in a single-member district'.">
<SubjectArgument name="population" ref="vap" />
<SubjectArgument name="minority1" ref="vap_h" />
<Argument name="target" value="&target_hisp_congress;" />
</ScoreFunction>
<ScoreFunction id="plan_majority_minority_hisp_house" type="plan"
calculator="publicmapping.redistricting.calculators.MajorityMinority"
label="Hisp. VAP Majority (> 50%%)"
description="Compliance with the Voting Rights Act will be assumed if maps include a minority-majority district in any area where a minority group is (as described in Thornburg V. Gingles, 478 U.S. 30, 49 (1986)) 'sufficiently large and geographically compact to constitute a majority in a single-member district'.">
<SubjectArgument name="population" ref="vap" />
<SubjectArgument name="minority1" ref="vap_h" />
<Argument name="target" value="&target_hisp_house;" />
</ScoreFunction>
<ScoreFunction id="plan_majority_minority_hisp_senate" type="plan"
calculator="publicmapping.redistricting.calculators.MajorityMinority"
label="Hisp. VAP Majority (> 50%%)"
description="Compliance with the Voting Rights Act will be assumed if maps include a minority-majority district in any area where a minority group is (as described in Thornburg V. Gingles, 478 U.S. 30, 49 (1986)) 'sufficiently large and geographically compact to constitute a majority in a single-member district'.">
<SubjectArgument name="population" ref="vap" />
<SubjectArgument name="minority1" ref="vap_h" />
<Argument name="target" value="&target_hisp_senate;" />
</ScoreFunction>
<ScoreFunction id="plan_majority_minority_na_congress" type="plan"
calculator="publicmapping.redistricting.calculators.MajorityMinority"
label="Native American Majority (> 50%%)"
description="Compliance with the Voting Rights Act will be assumed if maps include a minority-majority district in any area where a minority group is (as described in Thornburg V. Gingles, 478 U.S. 30, 49 (1986)) 'sufficiently large and geographically compact to constitute a majority in a single-member district'.">
<SubjectArgument name="population" ref="vap" />
<SubjectArgument name="minority1" ref="vap_na" />
<Argument name="target" value="&target_na_congress;" />
</ScoreFunction>
<ScoreFunction id="plan_majority_minority_na_house" type="plan"
calculator="publicmapping.redistricting.calculators.MajorityMinority"
label="Native American Majority (> 50%%)"
description="Compliance with the Voting Rights Act will be assumed if maps include a minority-majority district in any area where a minority group is (as described in Thornburg V. Gingles, 478 U.S. 30, 49 (1986)) 'sufficiently large and geographically compact to constitute a majority in a single-member district'.">
<SubjectArgument name="population" ref="vap" />
<SubjectArgument name="minority1" ref="vap_na" />
<Argument name="target" value="&target_na_house;" />
</ScoreFunction>
<ScoreFunction id="plan_majority_minority_na_senate" type="plan"
calculator="publicmapping.redistricting.calculators.MajorityMinority"
label="Native American Majority (> 50%%)"
description="Compliance with the Voting Rights Act will be assumed if maps include a minority-majority district in any area where a minority group is (as described in Thornburg V. Gingles, 478 U.S. 30, 49 (1986)) 'sufficiently large and geographically compact to constitute a majority in a single-member district'.">
<SubjectArgument name="population" ref="vap" />
<SubjectArgument name="minority1" ref="vap_na" />
<Argument name="target" value="&target_na_senate;" />
</ScoreFunction>
<ScoreFunction id="plan_majority_minority" type="plan"
calculator="publicmapping.redistricting.calculators.MajorityMinority"
label="Majority Minority District"
description="Compliance with the Voting Rights Act will be assumed if maps include a minority-majority district in any area where a minority group is (as described in Thornburg V. Gingles, 478 U.S. 30, 49 (1986)) 'sufficiently large and geographically compact to constitute a majority in a single-member district'.">
<SubjectArgument name="population" ref="vap" />
<SubjectArgument name="minority1" ref="vap_b" />
<SubjectArgument name="minority2" ref="vap_h" />
<SubjectArgument name="minority3" ref="vap_na" />
<Argument name="validation" value="1" />
</ScoreFunction>
%(start_elec)s
<ScoreFunction id="plan_repfairness" type="plan"
calculator="publicmapping.redistricting.calculators.RepresentationalFairness"
label="Representational Fairness"
description="Representational fairness is increased when the percentage of districts a party would likely win (based upon the 'partisan index' used to determine Competitiveness) closely mirrors that party.s percentage of the statewide vote." >
<Argument name="range" value="0.05" />
<SubjectArgument name="normalized democratic" ref="vote_dem_norm" />
<SubjectArgument name="normalized republican" ref="vote_rep_norm" />
</ScoreFunction>
%(end_elec)s
<ScoreFunction id="plan_schwartzberg" type="plan"
calculator="publicmapping.redistricting.calculators.Schwartzberg"
label="Average Compactness"
description="The competition is using the 'Schwartzberg' compactness measure. This measure is a ratio of the perimeter of the district to the circumference of the circle whose area is equal to the area of the district." >
</ScoreFunction>
</ScoreFunctions>
<ScorePanels>
<ScorePanel id="panel_equipop_all" type="plan" position="1"
title="Equipopulation" template="leaderboard_panel_all.html">
<Score ref="plan_equivalence" />
</ScorePanel>
<ScorePanel id="panel_equipop_mine" type="plan" position="1"
title="Equipopulation" template="leaderboard_panel_mine.html">
<Score ref="plan_equivalence" />
</ScorePanel>
<ScorePanel id="panel_compact_all" type="plan" position="2"
title="Schwartzberg" template="leaderboard_panel_all.html">
<Score ref="plan_schwartzberg" />
</ScorePanel>
<ScorePanel id="panel_compact_mine" type="plan" position="2"
title="Schwartzberg" template="leaderboard_panel_mine.html">
<Score ref="plan_schwartzberg" />
</ScorePanel>
%(start_elec)s
<ScorePanel id="panel_competitive_all" type="plan" position="3"
title="Competitiveness" template="leaderboard_panel_all.html">
<Score ref="plan_competitiveness" />
</ScorePanel>
<ScorePanel id="panel_competitive_mine" type="plan" position="3"
title="Competitiveness" template="leaderboard_panel_mine.html">
<Score ref="plan_competitiveness" />
</ScorePanel>
<ScorePanel id="panel_rf_all" type="plan" position="4"
title="Representational Fairness" template="leaderboard_panel_all.html">
<Score ref="plan_repfairness" />
</ScorePanel>
<ScorePanel id="panel_rf_mine" type="plan" position="4"
title="Representational Fairness" template="leaderboard_panel_mine.html">
<Score ref="plan_repfairness" />
</ScorePanel>
%(end_elec)s
<!-- Summary above all sidebar panels -->
<ScorePanel id="congressional_panel_summary" type="plan_summary" position="1"
title="Plan Summary" cssclass="plan_summary congressional" template="plan_summary.html">
<Score ref="a_congress_plan_equipopulation_summary"/>
<Score ref="b_plan_congress_noncontiguous"/>
<Score ref="plan_majority_minority_blk_congress" />
<Score ref="plan_majority_minority_hisp_congress" />
%(start_na)s
<Score ref="plan_majority_minority_na_congress" />
%(end_na)s
</ScorePanel>
<ScorePanel id="house_panel_summary" type="plan_summary" position="1"
title="Plan Summary" cssclass="plan_summary house" template="plan_summary.html">
<Score ref="a_house_plan_equipopulation_summary"/>
<Score ref="b_plan_house_noncontiguous"/>
<Score ref="plan_majority_minority_blk_house" />
<Score ref="plan_majority_minority_hisp_house" />
%(start_na)s
<Score ref="plan_majority_minority_na_house" />
%(end_na)s
</ScorePanel>
<ScorePanel id="senate_panel_summary" type="plan_summary" position="1"
title="Plan Summary" cssclass="plan_summary senate" template="plan_summary.html">
<Score ref="a_senate_plan_equipopulation_summary"/>
<Score ref="b_plan_senate_noncontiguous"/>
<Score ref="a_senate_plan_count_districts" />
<Score ref="plan_majority_minority_blk_senate" />
<Score ref="plan_majority_minority_hisp_senate" />
%(start_na)s
<Score ref="plan_majority_minority_na_senate" />
%(end_na)s
</ScorePanel>
<!-- Basic Information -->
<ScorePanel id="congresional_panel_info" type="district" position="2"
title="Basic Information" cssclass="district_basic_info congressional"
template="basic_information.html">
<Score ref="a_congressional_population" />
<Score ref="district_contiguous" />
<Score ref="district_schwartzberg" />
</ScorePanel>
<ScorePanel id="house_panel_info" type="district" position="2"
title="Basic Information" cssclass="district_basic_info house"
template="basic_information.html">
<Score ref="a_house_population" />
<Score ref="district_contiguous" />
<Score ref="district_schwartzberg" />
</ScorePanel>
<ScorePanel id="senate_panel_info" type="district" position="2"
title="Basic Information" cssclass="district_basic_info senate"
template="basic_information.html">
<Score ref="a_senate_population" />
<Score ref="district_contiguous" />
<Score ref="district_schwartzberg" />
</ScorePanel>
<!-- Demographics -->
<ScorePanel id="congressional_panel_demo" type="district" position="2"
title="Demographics" cssclass="district_demographics congressional"
template="demographics.html">
%(start_elec)s
<Score ref="district_vote_dem_percent" />
%(end_elec)s
<Score ref="district_blkvap_percent" />
<Score ref="district_hispvap_percent" />
</ScorePanel>
<ScorePanel id="house_panel_demo" type="district" position="2"
title="Demographics" cssclass="district_demographics house"
template="demographics.html">
%(start_elec)s
<Score ref="district_vote_dem_percent" />
%(end_elec)s
<Score ref="district_blkvap_percent" />
<Score ref="district_hispvap_percent" />
</ScorePanel>
<ScorePanel id="senate_panel_demo" type="district" position="2"
title="Demographics" cssclass="district_demographics senate"
template="demographics.html">
%(start_elec)s
<Score ref="district_vote_dem_percent" />
%(end_elec)s
<Score ref="district_blkvap_percent" />
<Score ref="district_hispvap_percent" />
</ScorePanel>
<!-- Needed due to issue https://sourceforge.net/apps/trac/publicmapping/ticket/340 Delete after setup -->
<ScorePanel id="stats_picker" type="district" position="1" title="Stats Picker" cssclass="hidden" template="demographics.html">
<Score ref="district_poptot"/>
<Score ref="district_totpop_b"/>
<Score ref="district_totpop_h"/>
<Score ref="district_totpop_a"/>
<Score ref="district_totpop_na"/>
<Score ref="district_totpop_pi"/>
<Score ref="district_totpop_wnh"/>
<Score ref="district_vap"/>
<Score ref="district_vap_b"/>
<Score ref="district_vap_h"/>
<Score ref="district_vap_a"/>
<Score ref="district_vap_na"/>
<Score ref="district_vap_pi"/>
<Score ref="district_vap_wnh"/>
<Score ref="district_blkvap_percent"/>
<Score ref="district_hispvap_percent"/>
<Score ref="district_avap_percent"/>
<Score ref="district_navap_percent"/>
<Score ref="district_pivap_percent"/>
<Score ref="district_wnhvap_percent"/>
%(start_elec)s
<Score ref="district_vote"/>
<Score ref="district_vote_dem"/>
<Score ref="district_vote_rep"/>
<Score ref="district_vote_dem_percent"/>
<Score ref="district_vote_rep_percent"/>
%(end_elec)s
</ScorePanel>
</ScorePanels>
<ScoreDisplays>
<ScoreDisplay legislativebodyref="congress" type="leaderboard"
title="Congressional Leaderboard - All" cssclass="leaderboard congress">
<ScorePanel ref="panel_equipop_all" />
<ScorePanel ref="panel_compact_all" />
%(start_elec)s
<ScorePanel ref="panel_competitive_all" />
<ScorePanel ref="panel_rf_all" />
%(end_elec)s
</ScoreDisplay>
<ScoreDisplay legislativebodyref="congress" type="leaderboard"
title="Congressional Leaderboard - Mine" cssclass="leaderboard congress">
<ScorePanel ref="panel_equipop_mine" />
<ScorePanel ref="panel_compact_mine" />
%(start_elec)s
<ScorePanel ref="panel_competitive_all" />
<ScorePanel ref="panel_rf_mine" />
%(end_elec)s
</ScoreDisplay>
<ScoreDisplay legislativebodyref="house" type="leaderboard"
title="State House Leaderboard - All" cssclass="leaderboard house">
<ScorePanel ref="panel_equipop_all" />
<ScorePanel ref="panel_compact_all" />
%(start_elec)s
<ScorePanel ref="panel_competitive_all" />
<ScorePanel ref="panel_rf_all" />
%(end_elec)s
</ScoreDisplay>
<ScoreDisplay legislativebodyref="house" type="leaderboard"
title="State House Leaderboard - Mine" cssclass="leaderboard house">
<ScorePanel ref="panel_equipop_mine" />
<ScorePanel ref="panel_compact_mine" />
%(start_elec)s
<ScorePanel ref="panel_competitive_mine" />
<ScorePanel ref="panel_rf_mine" />
%(end_elec)s
</ScoreDisplay>
<ScoreDisplay legislativebodyref="senate" type="leaderboard"
title="State Senate Leaderboard - All" cssclass="leaderboard senate">
<ScorePanel ref="panel_equipop_all" />
<ScorePanel ref="panel_compact_all" />
%(start_elec)s
<ScorePanel ref="panel_competitive_all" />
<ScorePanel ref="panel_rf_all" />
%(end_elec)s
</ScoreDisplay>
<ScoreDisplay legislativebodyref="senate" type="leaderboard"
title="State Senate Leaderboard - Mine" cssclass="leaderboard senate">
<ScorePanel ref="panel_equipop_mine" />
<ScorePanel ref="panel_compact_mine" />
%(start_elec)s
<ScorePanel ref="panel_competitive_mine" />
<ScorePanel ref="panel_rf_mine" />
%(end_elec)s
</ScoreDisplay>
<!-- Sidebar configuration -->
<ScoreDisplay legislativebodyref="congress" type="sidebar" title="Basic Information" cssclass="basic_information">
<ScorePanel ref="congressional_panel_summary" />
<ScorePanel ref="congresional_panel_info" />
</ScoreDisplay>
<ScoreDisplay legislativebodyref="congress" type="sidebar" title="Demographics" cssclass="demographics">
<ScorePanel ref="congressional_panel_summary" />
<ScorePanel ref="congressional_panel_demo" />
</ScoreDisplay>
<ScoreDisplay legislativebodyref="house" type="sidebar" title="Basic Information" cssclass="basic_information">
<ScorePanel ref="house_panel_summary" />
<ScorePanel ref="house_panel_info" />
</ScoreDisplay>
<ScoreDisplay legislativebodyref="house" type="sidebar" title="Demographics" cssclass="demographics">
<ScorePanel ref="house_panel_summary" />
<ScorePanel ref="house_panel_demo" />
</ScoreDisplay>
<ScoreDisplay legislativebodyref="senate" type="sidebar" title="Basic Information" cssclass="basic_information">
<ScorePanel ref="senate_panel_summary" />
<ScorePanel ref="senate_panel_info" />
</ScoreDisplay>
<ScoreDisplay legislativebodyref="senate" type="sidebar" title="Demographics" cssclass="demographics">
<ScorePanel ref="senate_panel_summary" />
<ScorePanel ref="senate_panel_demo" />
</ScoreDisplay>
<!-- Needed due to issue https://sourceforge.net/apps/trac/publicmapping/ticket/340 Delete after setup -->
<ScoreDisplay legislativebodyref="congress" type="sidebar" title="All Stats" cssclass="hidden"><ScorePanel ref="stats_picker"/></ScoreDisplay>
</ScoreDisplays>
</Scoring>
<Validation>
<Criteria legislativebodyref="congress">
<Criterion name="Equipopulation - Congress" description="<p>Your plan does not meet the competition criteria for Equipopulation:</p><p> The population of each Congressional district must be &pop_congress_max;-&pop_congress_min;">
<Score ref="a_congress_plan_equipopulation_validation" />
</Criterion>
<Criterion name="AllContiguous - Congress"
description="<p>Your plan does not meet the competition criteria for Contiguity</p><p>Every part of a district must be reachable from every other part without crossing the district's borders. All districts within a plan must be contiguous. </p>">
<Score ref="plan_all_contiguous" />
</Criterion>
<Criterion name="MajorityMinority - Congress" description="">
<Score ref="plan_majority_minority" />
</Criterion>
<Criterion name="CountDistricts - Congress" description="">
<Score ref="a_congress_plan_count_districts" />
</Criterion>
<Criterion name="AllBlocksAssigned - Congress" description="">
<Score ref="plan_all_blocks_assigned" />
</Criterion>
</Criteria>
<Criteria legislativebodyref="house">
<Criterion name="Equipopulation - House" description="<p>Your plan does not meet the competition criteria for Equipopulation:</p><p>The population of each House of Delegates district must be &pop_house_min; - &pop_house_max;">
<Score ref="a_house_plan_equipopulation_validation" />
</Criterion>
<Criterion name="AllContiguous - House"
description="<p>Your plan does not meet the competition criteria for Contiguity</p><p>Every part of a district must be reachable from every other part without crossing the district's borders. All districts within a plan must be contiguous. </p>">
<Score ref="plan_all_contiguous" />
</Criterion>
<Criterion name="MajorityMinority - House" description="">
<Score ref="plan_majority_minority" />
</Criterion>
<Criterion name="CountDistricts - House" description="">
<Score ref="a_house_plan_count_districts" />
</Criterion>
<Criterion name="AllBlocksAssigned - House" description="">
<Score ref="plan_all_blocks_assigned" />
</Criterion>
</Criteria>
<Criteria legislativebodyref="senate">
<Criterion name="Equipopulation - Senate" description="<p>Your plan does not meet the competition criteria for Equipopulation:</p><p>The population of each State Senate district must be &pop_house_min;-&pop_house_max;">
<Score ref="a_senate_plan_equipopulation_validation" />
</Criterion>
<Criterion name="AllContiguous - Senate"
description="<p>Your plan does not meet the competition criteria for Contiguity</p><p>Every part of a district must be reachable from every other part without crossing the district's borders. All districts within a plan must be contiguous. </p>">
<Score ref="plan_all_contiguous" />
</Criterion>
<Criterion name="MajorityMinority - Senate" description="">
<Score ref="plan_majority_minority" />
</Criterion>
<Criterion name="CountDistricts - Senate" description="">
<Score ref="a_senate_plan_count_districts" />
</Criterion>
<Criterion name="AllBlocksAssigned - Senate" description="">
<Score ref="plan_all_blocks_assigned" />
</Criterion>
</Criteria>
</Validation>
<!--
Optional configuration for geounits that require special contiguity rules.
'id' is the portable id of the geounit in which to configure an override.
'connect_to' is the portable id of the geounit in which the geounit is
to be considered contiguous with. Tests for contiguity will apply these overrides
in order to account for contiguity when physical contiguity is not possible.
For example, an island may need to be marked contiguous with one or more geounits
on an adjacent coast (possibly containing harbors).
<ContiguityOverrides>
<ContiguityOverride id="510030112012077" connect_to="510030102011065" />
<ContiguityOverride id="510030112012077" connect_to="510030103003037" />
</ContiguityOverrides>
-->
<!-- Contiguity Overrides, if Any -->
%(contiguityOverrideString)s
<GeoLevels>
<GeoLevel id="block" name="block" min_zoom="6" sort_key="3" tolerance="2.5">
<Shapefile path="/projects/PublicMapping/data/census_blocks.shp">
<Fields>
<Field name="NAME10" type="name"/>
<Field name="GEOID10" type="portable"/>
<Field name="STATEFP10" type="tree" pos="0" width="2"/>
<Field name="COUNTYFP10" type="tree" pos="1" width="3"/>
<Field name="TRACTCE10" type="tree" pos="2" width="6"/>
<Field name="BLOCKCE10" type="tree" pos="3" width="4"/>
</Fields>
</Shapefile>
<GeoLevelCharacteristics>
<GeoLevelCharacteristic ref="totpop" />
<GeoLevelCharacteristic ref="vap" />
<GeoLevelCharacteristic ref="vap_b" />
<GeoLevelCharacteristic ref="vap_h" />
<GeoLevelCharacteristic ref="vap_na" />
<GeoLevelCharacteristic ref="vap_wnh" />
<GeoLevelCharacteristic ref="vap_pi" />
<GeoLevelCharacteristic ref="vap_a" />
<GeoLevelCharacteristic ref="totpop_wnh" />
<GeoLevelCharacteristic ref="totpop_pi" />
<GeoLevelCharacteristic ref="totpop_a" />
<GeoLevelCharacteristic ref="totpop_b" />
<GeoLevelCharacteristic ref="totpop_h" />
<GeoLevelCharacteristic ref="totpop_na" />
%(start_elec)s
<GeoLevelCharacteristic ref="vote_dem" />
<GeoLevelCharacteristic ref="vote_rep" />
<GeoLevelCharacteristic ref="vote_tot" />
<GeoLevelCharacteristic ref="vote_dem_norm" />
<GeoLevelCharacteristic ref="vote_rep_norm" />
<GeoLevelCharacteristic ref="vote_tot_norm" />
%(end_elec)s
</GeoLevelCharacteristics>
<LegislativeBodies>
<LegislativeBody ref="congress">
<LegislativeTargets>
<LegislativeTarget ref="congress_target" default="true" />
</LegislativeTargets>
</LegislativeBody>
<LegislativeBody ref="house">
<LegislativeTargets>
<LegislativeTarget ref="house_target" default="true" />
</LegislativeTargets>
</LegislativeBody>
<LegislativeBody ref="senate">
<LegislativeTargets>
<LegislativeTarget ref="senate_target" default="true" />
</LegislativeTargets>
</LegislativeBody>
</LegislativeBodies>
</GeoLevel>
<GeoLevel id="tract" name="tract" min_zoom="3" sort_key="2" tolerance="25">
<Files>
<Geography path="/projects/PublicMapping/data/census_tracts.shp">
<Fields>
<Field name="NAME10" type="name" />
<Field name="GEOID10" type="portable" />
<Field name="STATEFP10" type="tree" pos="0" width="2"/>
<Field name="COUNTYFP10" type="tree" pos="1" width="3"/>
<Field name="TRACTCE10" type="tree" pos="2" width="6"/>
</Fields>
</Geography>
</Files>
<GeoLevelCharacteristics>
<GeoLevelCharacteristic ref="totpop" />
<GeoLevelCharacteristic ref="vap" />
<GeoLevelCharacteristic ref="vap_b" />
<GeoLevelCharacteristic ref="vap_h" />
<GeoLevelCharacteristic ref="vap_na" />
<GeoLevelCharacteristic ref="vap_wnh" />
<GeoLevelCharacteristic ref="vap_pi" />
<GeoLevelCharacteristic ref="vap_a" />
<GeoLevelCharacteristic ref="totpop_wnh" />
<GeoLevelCharacteristic ref="totpop_pi" />
<GeoLevelCharacteristic ref="totpop_a" />
<GeoLevelCharacteristic ref="totpop_b" />
<GeoLevelCharacteristic ref="totpop_h" />
<GeoLevelCharacteristic ref="totpop_na" />
%(start_elec)s
<GeoLevelCharacteristic ref="vote_dem" />
<GeoLevelCharacteristic ref="vote_rep" />
<GeoLevelCharacteristic ref="vote_tot" />
<GeoLevelCharacteristic ref="vote_dem_norm" />
<GeoLevelCharacteristic ref="vote_rep_norm" />
<GeoLevelCharacteristic ref="vote_tot_norm" />
%(end_elec)s
</GeoLevelCharacteristics>
<LegislativeBodies>
<LegislativeBody ref="congress">
<Parent ref="block" />
<LegislativeTargets>
<LegislativeTarget ref="congress_target" default="true" />
</LegislativeTargets>
</LegislativeBody>
<LegislativeBody ref="house">
<Parent ref="block" />
<LegislativeTargets>
<LegislativeTarget ref="house_target" default="true" />
</LegislativeTargets>
</LegislativeBody>
<LegislativeBody ref="senate">
<Parent ref="block" />
<LegislativeTargets>
<LegislativeTarget ref="senate_target" default="true" />
</LegislativeTargets>
</LegislativeBody>
</LegislativeBodies>
</GeoLevel>
<GeoLevel id="county" name="county" min_zoom="0" sort_key="1" tolerance="250">
<Files>
<Geography path="/projects/PublicMapping/data/census_counties.shp">
<Fields>
<Field name="NAME10" type="name"/>
<Field name="GEOID10" type="portable"/>
<Field name="STATEFP10" type="tree" pos="0" width="2"/>
<Field name="COUNTYFP10" type="tree" pos="1" width="3"/>
</Fields>
</Geography>
</Files>
<GeoLevelCharacteristics>
<GeoLevelCharacteristic ref="totpop" />
<GeoLevelCharacteristic ref="vap" />
<GeoLevelCharacteristic ref="vap_b" />
<GeoLevelCharacteristic ref="vap_h" />
<GeoLevelCharacteristic ref="vap_na" />
<GeoLevelCharacteristic ref="vap_wnh" />
<GeoLevelCharacteristic ref="vap_pi" />
<GeoLevelCharacteristic ref="vap_a" />
<GeoLevelCharacteristic ref="totpop_wnh" />
<GeoLevelCharacteristic ref="totpop_pi" />
<GeoLevelCharacteristic ref="totpop_a" />
<GeoLevelCharacteristic ref="totpop_b" />
<GeoLevelCharacteristic ref="totpop_h" />
<GeoLevelCharacteristic ref="totpop_na" />
%(start_elec)s
<GeoLevelCharacteristic ref="vote_dem" />
<GeoLevelCharacteristic ref="vote_rep" />
<GeoLevelCharacteristic ref="vote_tot" />
<GeoLevelCharacteristic ref="vote_dem_norm" />
<GeoLevelCharacteristic ref="vote_rep_norm" />
<GeoLevelCharacteristic ref="vote_tot_norm" />
%(end_elec)s
</GeoLevelCharacteristics>
<LegislativeBodies>
<LegislativeBody ref="congress">
<Parent ref="tract" />
<LegislativeTargets>
<LegislativeTarget ref="congress_target" default="true" />
</LegislativeTargets>
</LegislativeBody>
<LegislativeBody ref="house">
<Parent ref="tract" />
<LegislativeTargets>
<LegislativeTarget ref="house_target" default="true" />
</LegislativeTargets>
</LegislativeBody>
<LegislativeBody ref="senate">
<Parent ref="tract" />
<LegislativeTargets>
<LegislativeTarget ref="senate_target" default="true" />
</LegislativeTargets>
</LegislativeBody>
</LegislativeBodies>
</GeoLevel>
</GeoLevels>
<Templates>
<Template name="Congressional">
<LegislativeBody ref="congress"/>
<Blockfile path="/projects/PublicMapping/data/congress_generated_index.csv" />
</Template>
<Template name="State House">
<LegislativeBody ref="house"/>
<Blockfile path="/projects/PublicMapping/data/house_generated_index.csv" />
</Template>
<Template name="State Senate">
<LegislativeBody ref="senate"/>
<Blockfile path="/projects/PublicMapping/data/senate_generated_index.csv" />
</Template>
</Templates>
<Project root="/projects/PublicMapping/DistrictBuilder" sessionquota="5"
sessiontimeout="15">
<!-- Database connection information. -->
<Database name="publicmapping" user="publicmapping" password="<PASSWORD>"/>
<!--
Administrative user information. This should match the admin
user created when the django project is created.
-->
<Admin user="admin" email="<EMAIL>" password="<PASSWORD>"/>
<!-- Configuration items specific to the 'redistricting' app. -->
<Redistricting>
<MapServer hostname="" ns="pmp" nshref="http://publicmapping.sourceforge.net/"
adminuser="admin" adminpass="<PASSWORD>" maxfeatures="100"
styles="/projects/PublicMapping/DistrictBuilder/sld" />
<!--
Use a GoogleAnalytics account to tract the usage of the
application. This requires an account and domain.
<GoogleAnalytics account="" domain=""/>
-->
<!-- Upload file size restrictions. This is in KB -->
<Upload maxsize="2500"/>
<!-- Undo restrictions -->
<MaxUndos duringedit="50" afteredit="10" />
<!-- Leaderboard configuration -->
<Leaderboard maxranked="10" />
</Redistricting>
<Reporting>
<BardConfigs>
<BardConfig
id="blocks"
shape="/projects/PublicMapping/data/census_configured.Rdata"
temp="/projects/PublicMapping/local/reports"
transform="/projects/PublicMapping/DistrictBuilder/docs/bard_template.xslt">
<PopVars>
<PopVar subjectref="totpop" threshold=".01" default="true" />
<PopVar subjectref="vap" threshold=".1" />
</PopVars>
<RatioVars>
<!--
Set up RatioVars for both ethnicity and political
party.
-->
<RatioVar id="racialComp" label="Majority Minority Districts" threshold=".5">
<Numerators>
<Numerator subjectref="totpop_b" />
<Numerator subjectref="totpop_h" />
<Numerator subjectref="totpop_na" />
<Numerator subjectref="totpop_a" />
<Numerator subjectref="totpop_pi" />
<Numerator subjectref="totpop_wnh" />
</Numerators>
<Denominator subjectref="totpop" />
</RatioVar>
<RatioVar id="racialCompVap" label="Majority Minority Districts" threshold=".5">
<Numerators>
<Numerator subjectref="vap_b" />
<Numerator subjectref="vap_h" />
<Numerator subjectref="vap_na" />
<Numerator subjectref="vap_a" />
<Numerator subjectref="vap_pi" />
<Numerator subjectref="vap_wnh" />
</Numerators>
<Denominator subjectref="vap" />
</RatioVar>
%(start_elec)s
<RatioVar id="partyControl" label="Party-Controlled Districts" threshold=".5">
<Numerators>
<Numerator subjectref="vote_dem" />
<Numerator subjectref="vote_rep" />
</Numerators>
<Denominator subjectref="vote_tot" />
</RatioVar>
%(end_elec)s
</RatioVars>
<SplitVars>
<!--
See whether a given district splits a geography.
This can be any higher level geography: a county,
VTd, or tract.
-->
<SplitVar field="COUNTYFP10" label="County" />
<SplitVar field="TRACTCE10" label="Tract" />
</SplitVars>
</BardConfig>
</BardConfigs>
<BardBodyConfigs>
<!--
For each legislative body, map the configuration to the
geography used to generate reports.
-->
<BardBodyConfig
id="congress_blocks"
legislativebodyref="congress"
bardconfigref="blocks" />
<BardBodyConfig
id="house_blocks"
legislativebodyref="house"
bardconfigref="blocks" />
<BardBodyConfig
id="senate_blocks"
legislativebodyref="senate"
bardconfigref="blocks" />
</BardBodyConfigs>
</Reporting>
<!-- Information about the mailer configuration. -->
<Mailer server="localhost" port="25" username="" password=""/>
</Project>
</DistrictBuilder>
"""
def gen_config(num_districts_congress,num_districts_senate,num_districts_house,sum_TOTPOP,has_election_data=0,has_vtds=0, conf_na=False ,
target_na_congress=0, target_hisp_congress = 0 , target_bl_congress = 0,
target_na_house=0, target_hisp_house = 0 , target_bl_house = 0,
target_na_senate =0, target_hisp_senate = 0 , target_bl_senate = 0, contiguityOverrideString = ""):
start_na="<!--"
start_elec="<!--"
end_elec="-->"
end_na="-->"
midlevel="tract"
if (conf_na==True):
start_na=""
end_na=""
midlevel_width="6"
midlevel_var="TRACTCE10"
if (has_election_data==1):
start_elec=""
end_elec=""
if (has_vtds==1) :
midlevel="vtds"
midlevel_width="4"
midlevel_var="VTDST10"
pop_congress = int(round((sum_TOTPOP/float(num_districts_congress))))
pop_congress_max = int(round((sum_TOTPOP/float(num_districts_congress)) * 1.005))
pop_congress_min = int(round((sum_TOTPOP/float(num_districts_congress)) * 0.995))
pop_house = int(round((sum_TOTPOP/float(num_districts_house))))
pop_house_max = int(round((sum_TOTPOP/float(num_districts_house)) * 1.1))
pop_house_min = int(round((sum_TOTPOP/float(num_districts_house)) * 0.9))
pop_senate = int(round((sum_TOTPOP/float(num_districts_senate))))
pop_senate_max = int(round((sum_TOTPOP/float(num_districts_senate)) * 1.1))
pop_senate_min = int(round((sum_TOTPOP/float(num_districts_senate)) * 0.9))
target_file = '/projects/PublicMapping/DistrictBuilder/docs/config_census_generated.xml'
f = open(target_file,'w')
f.write(str( Config_Template(start_elec=start_elec,end_elec=end_elec,num_districts_congress=num_districts_congress,num_districts_house=num_districts_house,num_districts_senate=num_districts_senate,pop_congress_max=pop_congress_max,pop_congress_min=pop_congress_min,pop_senate_max=pop_senate_max, pop_senate_min=pop_senate_min,pop_house_max=pop_house_max,pop_house_min=pop_house_min,pop_congress=pop_congress,pop_senate=pop_senate,pop_house=pop_house,start_na=start_na, end_na=end_na, target_na_congress=target_na_congress, target_hisp_congress=target_hisp_congress, target_bl_congress=target_bl_congress, target_na_house=target_na_house, target_hisp_house=target_hisp_house, target_bl_house=target_bl_house, target_na_senate=target_na_senate, target_hisp_senate=target_hisp_senate, target_bl_senate=target_bl_senate,contiguityOverrideString=contiguityOverrideString)))
f.write("\n")
f.close()
os.chmod(target_file,stat.S_IRUSR|stat.S_IRGRP|stat.S_IROTH)
###
### MAIN
###
#
# Get Arguments
#
parser=optparse.OptionParser(usage="%prog -F[fips_code] -C[num_congressional_districts] -S[num_senate_districts] -H[num_house_districts]", version="%prog 0.1")
# required arguments
parser.add_option('-F','--fips', dest='stateFips',help="State two digit FIPS code", type=int, default=0)
parser.add_option('-C','--congdist', dest='congDis',help="number of congressional districts", type=int, default=0)
parser.add_option('-H', '--housedist',dest='houseDis',help="number of senate districts", type=int, default=0)
parser.add_option('-S', '--sendist', dest='senDis',help="number of house districts", type=int,default=0)
# operations to perform
parser.add_option('-i', '--install', dest="do_install", help="Install dependencencies.", default=False, action='store_true')
parser.add_option('-g', '--getdata', dest="do_getdata", help="Get data.", default=False, action='store_true')
parser.add_option('-s', '--gensld', dest="do_gensld", help="Generate slds", default=False, action='store_true')
parser.add_option('-c', '--genconf', dest="do_genconf", help="Generate config file", default=False, action='store_true')
parser.add_option('-d', '--dropdb', dest="do_dropdb", help="Drop database", default=False, action='store_true')
parser.add_option('-r', '--run', dest="do_run", help="run setup.py", default=False, action='store_true')
# configuration options
parser.add_option('--na_inc', dest="conf_na", help="Include Native Americans in stats.", default=False, action='store_true')
parser.add_option('--na_targ_c', dest='target_na_congress',help="Number of Native American Congressional Districts for target", type=int, default=0)
parser.add_option('--na_targ_h', dest='target_na_house',help="Number of Native American House Districts for target", type=int, default=0)
parser.add_option('--na_targ_s', dest='target_na_senate',help="Number of Native American Senate Districts for target", type=int, default=0)
parser.add_option('--hisp_targ_c', dest='target_hisp_congress',help="Number of Hispanic Congressional Districts for target", type=int, default=0)
parser.add_option('--hisp_targ_h', dest='target_hisp_house',help="Number of Hispanic House Districts for target", type=int, default=0)
parser.add_option('--hisp_targ_s', dest='target_hisp_senate',help="Number of Hispanic SenateDistricts for target", type=int, default=0)
parser.add_option('--bl_targ_c', dest='target_bl_congress',help="Number of Black Congressional districts for target", type=int, default=0)
parser.add_option('--bl_targ_h', dest='target_bl_house',help="Number of Black House districts for target", type=int, default=0)
parser.add_option('--bl_targ_s', dest='target_bl_senate',help="Number of Black Senate districts for target", type=int, default=0)
(parseResults,numargs)=parser.parse_args()
# include na if there is a positive target, even if not otherwise specified
if ((parseResults.target_na_congress+parseResults.target_na_senate+parseResults.target_na_house)>0) :
parseResults.conf_na = True
allops = (not parseResults.do_install) and (not parseResults.do_getdata) and (not parseResults.do_gensld) and (not parseResults.do_genconf) and (not parseResults.do_dropdb) and (not parseResults.do_run)
if (allops):
parseResults.do_install=True
parseResults.do_getdata=True
parseResults.do_gensld=True
parseResults.do_genconf=True
parseResults.do_dropdb=True
parseResults.do_run=True
if len(numargs) != 0:
parser.error("additional arguments ignored ")
stateFips = parseResults.stateFips
houseDis = parseResults.houseDis
senDis= parseResults.senDis
congDis= parseResults.congDis
if (stateFips==0 or houseDis==0 or senDis==0 or congDis==0):
print "Must supply all district arguments"
raise ValueError
# install dependencies
if (parseResults.do_install):
print "installing dependencies..."
install_dependencies()
# Clear out DB
if (parseResults.do_dropdb):
print 'clearing database ...'
drop_db()
# generate generic sld files
if (parseResults.do_gensld):
print 'generating generic sld files ...'
gensld_none("county")
gensld_none("tract")
gensld_none("block")
gensld_boundaries("county")
gensld_boundaries("tract")
gensld_boundaries("block")
# Retrieve data files
if (parseResults.do_getdata):
print 'retrieving census data ...'
get_census_data(stateFips)
# merge standard variables
# TODO: Refactor entirely in rpy
print 'merging data...'
robjects.r.source("/projects/PublicMapping/DistrictBuilder/docs/loadcensus/mergeCensus.R")
if ( (parseResults.do_genconf) or (parseResults.do_gensld)) :
print 'calculating statistics for configs and slds...'
robjects.r.source("/projects/PublicMapping/DistrictBuilder/docs/loadcensus/calcStats.R")
sum_TOTPOP= robjects.r.sum_TOTPOP[0]
# TODO: Refactor entirely in rpy
# NOTE: robject is returning 6-level quantiles, has_election_data, has_vtd, sum_TOTPOP
has_election_data = robjects.r.has_election_data[0]
if ( parseResults.do_genconf) :
robjects.r.source("/projects/PublicMapping/DistrictBuilder/docs/loadcensus/contiguityOverride.R")
# TODO: should work but has embedded string forwarding
#contiguityOverrideString = robjects.r.contiguityOverrideString
f = open('/projects/PublicMapping/DistrictBuilder/docs/generated_overrides.xml', 'r')
contiguityOverrideString = f.read()
f.close()
# TODO: refactor as matrix of varnames and geographies
if ( parseResults.do_gensld) :
print 'generating choropleth slds ...'
gensld_choro("block","TOTPOP","Total Population",robjects.r.q_block_TOTPOP)
gensld_choro_denquint("block","TOTPOP_H","Percent Hispanic Population",robjects.r.q_block_TOTPOP_H)
gensld_choro_denquint("block","TOTPOP_B","Percent Black Population",robjects.r.q_block_TOTPOP_B)
gensld_choro_denquint("block","TOTPOP_NA","Percent Native American Population",robjects.r.q_block_TOTPOP_NA)
gensld_choro("block","VAP","Voting Age Population",robjects.r.q_block_VAP)
gensld_choro_denquint("block","VAP_H","Percent Voting Age Hispanic Population",robjects.r.q_block_VAP_H)
gensld_choro_denquint("block","VAP_B","Percent Voting Age Black Population",robjects.r.q_block_VAP_B)
gensld_choro_denquint("block","VAP_NA","Percent Voting Age Native American Population",robjects.r.q_block_VAP_NA)
gensld_choro("tract","TOTPOP","Total Population",robjects.r.q_tract_TOTPOP)
gensld_choro_denquint("tract","TOTPOP_H","Percent Total Hispanic Population",robjects.r.q_tract_TOTPOP_H)
gensld_choro_denquint("tract","TOTPOP_B","Percent Black Population",robjects.r.q_tract_TOTPOP_B)
gensld_choro_denquint("tract","TOTPOP_NA","Percent Native American Population",robjects.r.q_tract_TOTPOP_NA)
gensld_choro("tract","VAP","Voting Age Population",robjects.r.q_tract_VAP)
gensld_choro_denquint("tract","VAP_H","Percent Voting Age Hispanic Population",robjects.r.q_tract_VAP_H)
gensld_choro_denquint("tract","VAP_B","Percent Voting Age Black Population",robjects.r.q_tract_VAP_B)
gensld_choro_denquint("tract","VAP_NA","Percent Voting Age Native American Population",robjects.r.q_tract_VAP_NA)
gensld_choro("county","TOTPOP","Total Population",robjects.r.q_county_TOTPOP)
gensld_choro_denquint("county","TOTPOP_H","Percent Hispanic Population",robjects.r.q_county_TOTPOP_H)
gensld_choro_denquint("county","TOTPOP_B","Percent Black Population",robjects.r.q_county_TOTPOP_B)
gensld_choro_denquint("county","TOTPOP_NA","Percent Native American Population",robjects.r.q_county_TOTPOP_NA)
gensld_choro("county","VAP","Voting Age Population",robjects.r.q_county_VAP)
gensld_choro_denquint("county","VAP_H","Percent Voting Age Hispanic Population",robjects.r.q_county_VAP_H)
gensld_choro_denquint("county","VAP_B","Percent Voting Age Black Population",robjects.r.q_county_VAP_B)
gensld_choro_denquint("county","VAP_NA","Percent Voting Age Native American Population",robjects.r.q_county_VAP_NA)
if (has_election_data==1) :
gensld_choro_denquint("block","VOTE_DEM","Percent Predicted Democratic Vote ",robjects.r.q_block_VOTE_DEM)
gensld_choro_denquint("block","VOTE_REP","Percent Predicted Republican Vote ",robjects.r.q_block_VOTE_REP)
gensld_choro("block","VOTE_TOT","Predicted Vote ",robjects.r.q_block_VOTE_TOT)
gensld_choro_denquint("tract","VOTE_DEM","Percent Predicted Democratic Vote ",robjects.r.q_tract_VOTE_DEM)
gensld_choro_denquint("tract","VOTE_REP","Percent Predicted Republican Vote ",robjects.r.q_tract_VOTE_REP)
gensld_choro("tract","VOTE_TOT","Predicted Vote ",robjects.r.q_tract_VOTE_TOT)
gensld_choro_denquint("county","VOTE_DEM","Perecent Predicted Democratic Vote ",robjects.r.q_county_VOTE_DEM)
gensld_choro_denquint("county","VOTE_REP","Percent Predicted Republican Vote ",robjects.r.q_county_VOTE_REP)
gensld_choro("county","VOTE_TOT","Predicted Vote ",robjects.r.q_county_VOTE_TOT)
gensld_choro_denquint("block","VOTE_DEM_N","Percent Predicted Democratic Vote ",robjects.r.q_block_VOTE_DEM_N)
gensld_choro_denquint("block","VOTE_REP_N","Percent Predicted Republican Vote ",robjects.r.q_block_VOTE_REP_N)
gensld_choro("block","VOTE_TOT_N","Predicted Vote ",robjects.r.q_block_VOTE_TOT_N)
gensld_choro_denquint("tract","VOTE_DEM_N","Percent Predicted Democratic Vote ",robjects.r.q_tract_VOTE_DEM_N)
gensld_choro_denquint("tract","VOTE_REP_N","Percent Predicted Republican Vote ",robjects.r.q_tract_VOTE_REP_N)
gensld_choro("tract","VOTE_TOT_N","Predicted Vote ",robjects.r.q_tract_VOTE_TOT_N)
gensld_choro_denquint("county","VOTE_DEM_N","Percent Predicted Democratic Vote ",robjects.r.q_county_VOTE_DEM_N)
gensld_choro_denquint("county","VOTE_REP_N","Percent Predicted Republican Vote ",robjects.r.q_county_VOTE_REP_N)
gensld_choro("county","VOTE_TOT_N","Predicted Vote ",robjects.r.q_county_VOTE_TOT_N)
# generate config file
if (parseResults.do_genconf):
print 'generating config file ... '
gen_config(num_districts_congress=congDis,num_districts_senate=senDis,num_districts_house=houseDis,sum_TOTPOP=sum_TOTPOP,has_election_data=has_election_data,has_vtds=0,conf_na=parseResults.conf_na,
target_na_congress=parseResults.target_na_congress, target_hisp_congress = parseResults.target_hisp_congress, target_bl_congress = parseResults.target_bl_congress,
target_na_house=parseResults.target_na_house, target_hisp_house = parseResults.target_hisp_house, target_bl_house = parseResults.target_bl_house,
target_na_senate=parseResults.target_na_senate, target_hisp_senate = parseResults.target_hisp_senate, target_bl_senate = parseResults.target_bl_senate, contiguityOverrideString=contiguityOverrideString)
if (parseResults.do_run):
print 'running setup-py ... '
olddir = os.getcwd()
os.chdir("/projects/PublicMapping/DistrictBuilder/django/publicmapping/")
subprocess.check_call(["ls"])
#subprocess.check_call(["setup.py","-v2","/projects/PublicMapping/DistrictBuilder/docs/config.xsd"," /projects/PublicMapping/DistrictBuilder/docs/config_census_generated.xml"])
subprocess.check_call(["./setup.py -v2 /projects/PublicMapping/DistrictBuilder/docs/config.xsd /projects/PublicMapping/DistrictBuilder/docs/config_census_generated.xml"],shell=True)
os.chdir(olddir)
else:
print '\n\n*** Now run: ***\n\n'
print '(cd /projects/PublicMapping/DistrictBuilder/django/publicmapping/; python setup.py -v2 /projects/PublicMapping/DistrictBuilder/docs/config.xsd /projects/PublicMapping/DistrictBuilder/docs/config_census_generated.xml)'
# workaround celeryd first-time startup problem
print 'Starting celeryd ...'
subprocess.check_call(["service","celeryd","start"]) | en | 0.453241 | #!/usr/bin/env python # Framework for loading census data # Inputs: FIPS state code, list of variables to include as additional subjects # Requirements: # - external software: DistrictBuilder, R, gdal, wget, unzip # TODO -- check for VTD's # regular expressions # arg lists etc # globbing # system commands # os commands # for external commands # unzipping ### ### Globals ### # TODO : build in vote geographies, numbers of districts per state #VOTEGEOGRAPHIES={"county":"COUNTYFP10","tract":"TRACTCE10","block":"BLOCKCE10"} ### clear_publicmapping_db ### ### Truncate database ### Drop DB ### Install dependencies ### ### This attempts to install dependencies using apt-get ### ### ### Retrieve data files ### ### This retrieves the census files,unzips and reprojects (using ogr2ogr) # put all data in publicmapping data directory # obtain state boundary files from census # get additional data from our S3 bucket # unzip data files # Reproject block data # standardize file names ### ### TEMPLATING - SLD's ### # general template classes ### ### SLD Skeleton Classes ### <?xml version="1.0" encoding="ISO-8859-1"?> <StyledLayerDescriptor version="1.0.0" xmlns="http://www.opengis.net/sld" xmlns:ogc="http://www.opengis.net/ogc" xmlns:xlink="http://www.w3.org/1999/xlink" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://www.opengis.net/sld http://schemas.opengis.net/sld/1.0.0/StyledLayerDescriptor.xsd"> <NamedLayer> <Name>%(layername)s</Name> <UserStyle> <Title>%(layertitle)s</Title> <Abstract>%(layerabs)s</Abstract> <FeatureTypeStyle> %(slst|sli)s %(lst|li)s %(elst|eli)s </FeatureTypeStyle> </UserStyle> </NamedLayer> </StyledLayerDescriptor> <Rule> <Title>%(title)s</Title> <PolygonSymbolizer> <Fill> <CssParameter name="fill">%(fill)s</CssParameter> <CssParameter name="fill-opacity">%(fillopacity)s</CssParameter> </Fill> </PolygonSymbolizer> </Rule> <Rule> <Title>%(title)s</Title> <PolygonSymbolizer> <Fill> <CssParameter name="fill">%(fill)s</CssParameter> <CssParameter name="fill-opacity">%(fillopacity)s</CssParameter> </Fill> <Stroke> <CssParameter name="stroke">%(stroke)s</CssParameter> <CssParameter name="stroke-width">%(strokewidth)s</CssParameter> <CssParameter name="stroke-opacity">%(strokeopacity)s</CssParameter> </Stroke> </PolygonSymbolizer> </Rule> # plain fill template <Rule> <Title>%(title)s</Title> <LineSymbolizer> <Stroke> <CssParameter name="stroke">%(stroke)s</CssParameter> <CssParameter name="stroke-width">%(strokewidth)s</CssParameter> <CssParameter name="stroke-opacity">%(strokeopacity)s</CssParameter> </Stroke> </LineSymbolizer> </Rule> # min-max range template <Rule> <Title>%(bottom)s-%(top)s</Title> <ogc:Filter> <ogc:And> <ogc:PropertyIsLessThan> <ogc:PropertyName>%(unit)s</ogc:PropertyName> <ogc:Literal>%(top)s</ogc:Literal> </ogc:PropertyIsLessThan> <ogc:PropertyIsGreaterThanOrEqualTo> <ogc:PropertyName>%(unit)s</ogc:PropertyName> <ogc:Literal>%(bottom)s</ogc:Literal> </ogc:PropertyIsGreaterThanOrEqualTo> </ogc:And> </ogc:Filter> <PolygonSymbolizer> <Fill> <CssParameter name="fill">%(fill)s</CssParameter> <CssParameter name="fill-opacity">%(fillopacity)s</CssParameter> </Fill> </PolygonSymbolizer> </Rule> <Rule> <Title>%(bottom)s-%(top)s</Title> <ogc:Filter> <ogc:PropertyIsGreaterThanOrEqualTo> <ogc:PropertyName>%(unit)s</ogc:PropertyName> <ogc:Literal>%(bottom)s</ogc:Literal> </ogc:PropertyIsGreaterThanOrEqualTo> </ogc:Filter> <PolygonSymbolizer> <Fill> <CssParameter name="fill">%(fill)s</CssParameter> <CssParameter name="fill-opacity">%(fillopacity)s</CssParameter> </Fill> </PolygonSymbolizer> </Rule> #TODO: generalize to any number of choropleths # WARNING: sld files need to be lower case to be compatible with postgres views ### Config file generation ### TODO: has_vtds==1 has not fully implemented ### paramaterize thresholds? <!-- Define Internal Entities to avoid Repeated Entering of Values --> <!DOCTYPE DistrictBuilder [ <!ENTITY num_districts_congress "%(num_districts_congress)s"> <!ENTITY num_districts_house "%(num_districts_house)s"> <!ENTITY num_districts_senate "%(num_districts_senate)s"> <!ENTITY pop_congress "%(pop_congress)s"> <!ENTITY pop_house "%(pop_house)s"> <!ENTITY pop_senate "%(pop_senate)s"> <!ENTITY pop_congress_min "%(pop_congress_min)s"> <!ENTITY pop_house_min "%(pop_house_min)s"> <!ENTITY pop_senate_min "%(pop_senate_min)s"> <!ENTITY pop_congress_max "%(pop_congress_max)s"> <!ENTITY pop_house_max "%(pop_house_max)s"> <!ENTITY pop_senate_max "%(pop_senate_max)s"> <!ENTITY target_hisp_congress "%(target_hisp_congress)s"> <!ENTITY target_hisp_senate "%(target_hisp_senate)s"> <!ENTITY target_hisp_house "%(target_hisp_house)s"> <!ENTITY target_bl_congress "%(target_bl_congress)s"> <!ENTITY target_bl_senate "%(target_bl_senate)s"> <!ENTITY target_bl_house "%(target_bl_house)s"> <!ENTITY target_na_senate "%(target_na_senate)s"> <!ENTITY target_na_house "%(target_na_house)s"> <!ENTITY target_na_congress "%(target_na_congress)s"> ]> <DistrictBuilder> <!-- Define legislative bodies referenced in the system. --> <LegislativeBodies> <!-- A Legislative body has an ID (for referencing in GeoLevel definitions later), a name, and a label for plan items ("District" for Congressional, etc) --> <LegislativeBody id="congress" name="Congressional" member="District %%s" maxdistricts="&num_districts_congress;"/> <LegislativeBody id="house" name="State House" member="District %%s" maxdistricts="&num_districts_house;" /> <LegislativeBody id="senate" name="State Senate" member="District %%s" maxdistricts="&num_districts_senate;" /> </LegislativeBodies> <!-- A list of subjects referenced in the system. --> <Subjects> <!-- A Subject is a measurement type, such as "Total Population". The subject is mapped to an attribute during the import phase, and contains a long and short display name. Subjects have IDs for referencing in GeoLevel definitions later. --> <Subject id="vap_b" field="VAP_B" name="African-American Voting Age Population" short_name="Black VAP " displayed="true" sortkey="1" percentage_denominator="vap" /> <Subject id="vap_h" field="VAP_H" name="Hispanic or Latino voting age population" short_name="Hispanic VAP" displayed="true" sortkey="2" percentage_denominator="vap" /> <Subject id="vap_na" field="VAP_NA" name="Native American Voting Age Population" short_name="Nat Amer VAP" displayed="true" sortkey="4" percentage_denominator="vap" /> %(start_elec)s <Subject id="vote_dem" field="VOTE_DEM" name="num likely Democratic voters" short_name="Democratic voters" displayed="true" sortkey="3" percentage_denominator="vote_tot" /> <Subject id="vote_rep" field="VOTE_REP" name="num likely Republican voters" short_name="Republican voters" displayed="true" sortkey="5" percentage_denominator="vote_tot" /> <Subject id="vote_tot" field="VOTE_TOT" name="num likely Rep/Dem voters" short_name="Rep+ Dem vote" displayed="false" sortkey="6" /> <Subject id="vote_dem_norm" field="VOTE_DEM_N" name="num of likely Democratic voters normalized to 50/50 state baseline" short_name="Normal Dem vote" displayed="true" sortkey="18" percentage_denominator="vote_tot_norm" /> <Subject id="vote_rep_norm" field="VOTE_REP_N" name="num of likely Republican voters normalized to 50/50 state baseline" short_name="Normal Rep vote" displayed="true" sortkey="19" percentage_denominator="vote_tot_norm" /> <Subject id="vote_tot_norm" field="VOTE_TOT_N" name="number of likely Republican and Democratic voters normalized to 50/50 state baseline" short_name="Normal 2-party vote" displayed="false" sortkey="20" /> %(end_elec)s <Subject id="vap" field="VAP" name="Voting Age Population" short_name="vap" displayed="true" sortkey="7" /> <Subject id="totpop_b" field="TOTPOP_B" name="African-American" short_name="Black" displayed="false" sortkey="8" percentage_denominator="totpop"/> <Subject id="totpop_h" field="TOTPOP_H" name="Hispanic or Latino" short_name="Hispanic" displayed="false" sortkey="9" percentage_denominator="totpop"/> <Subject id="totpop_na" field="TOTPOP_NA" name="Native American" short_name="Nat Amer" displayed="false" sortkey="10" percentage_denominator="totpop"/> <Subject id="totpop_a" field="TOTPOP_A" name="Asian Population" short_name="Asian" displayed="false" sortkey="11" percentage_denominator="totpop"/> <Subject id="totpop_pi" field="TOTPOP_PI" name="Pacific Islander" short_name="Pac Isl" displayed="false" sortkey="12" percentage_denominator="totpop"/> <Subject id="totpop_wnh" field="TOTPOP_WNH" name="White Non-Hispanic" short_name="White" displayed="false" sortkey="13" percentage_denominator="totpop"/> <Subject id="totpop" field="TOTPOP" name="Total Population" short_name="Total Pop." displayed="true" sortkey="14"/> <Subject id="vap_a" field="VAP_A" name="Asian Voting Age Population" short_name="Asian VAP" displayed="true" sortkey="15" percentage_denominator="vap" /> <Subject id="vap_pi" field="VAP_PI" name="Pacific Islander Voting Age Population" short_name="Pacific VAP" displayed="true" sortkey="16" percentage_denominator="vap"/> <Subject id="vap_wnh" field="VAP_WNH" name="White Non-Hispanic Voting Age Population" short_name="White VAP" displayed="true" sortkey="17" percentage_denominator="vap"/> </Subjects> <Scoring> <ScoreFunctions> <!-- A district score that returns a literal value --> <ScoreFunction id="district_poptot" type="district" calculator="publicmapping.redistricting.calculators.Sum" label="Total Pop" user_selectable="true"> <SubjectArgument name="value1" ref="totpop" /> </ScoreFunction> <ScoreFunction id="district_totpop_b" type="district" calculator="publicmapping.redistricting.calculators.Sum" label="Black VAP" user_selectable="true"> <SubjectArgument name="value1" ref="totpop_b" /> </ScoreFunction> <ScoreFunction id="district_totpop_h" type="district" calculator="publicmapping.redistricting.calculators.Sum" label="Hispanic VAP" user_selectable="true"> <SubjectArgument name="value1" ref="totpop_h" /> </ScoreFunction> <ScoreFunction id="district_totpop_a" type="district" calculator="publicmapping.redistricting.calculators.Sum" label="Asian VAP" user_selectable="true"> <SubjectArgument name="value1" ref="totpop_a" /> </ScoreFunction> <ScoreFunction id="district_totpop_na" type="district" calculator="publicmapping.redistricting.calculators.Sum" label="Native American VAP" user_selectable="true"> <SubjectArgument name="value1" ref="totpop_na" /> </ScoreFunction> <ScoreFunction id="district_totpop_pi" type="district" calculator="publicmapping.redistricting.calculators.Sum" label="Pacific Islander VAP" user_selectable="true"> <SubjectArgument name="value1" ref="totpop_pi" /> </ScoreFunction> <ScoreFunction id="district_totpop_wnh" type="district" calculator="publicmapping.redistricting.calculators.Sum" label="Pacific Islander VAP" user_selectable="true"> <SubjectArgument name="value1" ref="totpop_wnh" /> </ScoreFunction> <ScoreFunction id="district_vap" type="district" calculator="publicmapping.redistricting.calculators.Sum" label="VAP" user_selectable="true"> <SubjectArgument name="value1" ref="vap" /> </ScoreFunction> <ScoreFunction id="district_vap_b" type="district" calculator="publicmapping.redistricting.calculators.Sum" label="Black VAP" user_selectable="true"> <SubjectArgument name="value1" ref="vap_b" /> </ScoreFunction> <ScoreFunction id="district_vap_h" type="district" calculator="publicmapping.redistricting.calculators.Sum" label="Hispanic VAP" user_selectable="true"> <SubjectArgument name="value1" ref="vap_h" /> </ScoreFunction> <ScoreFunction id="district_vap_a" type="district" calculator="publicmapping.redistricting.calculators.Sum" label="Asian VAP" user_selectable="true"> <SubjectArgument name="value1" ref="vap_a" /> </ScoreFunction> <ScoreFunction id="district_vap_na" type="district" calculator="publicmapping.redistricting.calculators.Sum" label="Native American VAP" user_selectable="true"> <SubjectArgument name="value1" ref="vap_na" /> </ScoreFunction> <ScoreFunction id="district_vap_pi" type="district" calculator="publicmapping.redistricting.calculators.Sum" label="Pacific Islander VAP" user_selectable="true"> <SubjectArgument name="value1" ref="vap_pi" /> </ScoreFunction> <ScoreFunction id="district_vap_wnh" type="district" calculator="publicmapping.redistricting.calculators.Sum" label="Pacific Islander VAP" user_selectable="true"> <SubjectArgument name="value1" ref="vap_wnh" /> </ScoreFunction> <!-- A district score that returns a percentage --> <ScoreFunction id="district_blkvap_percent" type="district" calculator="publicmapping.redistricting.calculators.Percent" label="Black VAP %%" user_selectable="true"> <SubjectArgument name="numerator" ref="vap_b" /> <SubjectArgument name="denominator" ref="vap" /> </ScoreFunction> <ScoreFunction id="district_blkvap_thresh" type="district" calculator="publicmapping.redistricting.calculators.Threshold" label="Black VAP Threshold"> <ScoreArgument name="value" ref="district_blkvap_percent" /> <Argument name="threshold" value="0.5" /> </ScoreFunction> <ScoreFunction id="district_hispvap_percent" type="district" calculator="publicmapping.redistricting.calculators.Percent" label="Hisp. VAP %%" user_selectable="true"> <SubjectArgument name="numerator" ref="vap_h" /> <SubjectArgument name="denominator" ref="vap" /> </ScoreFunction> <ScoreFunction id="district_hispvap_thresh" type="district" calculator="publicmapping.redistricting.calculators.Threshold" label="Hisp. VAP Threshold"> <ScoreArgument name="value" ref="district_hispvap_percent" /> <Argument name="threshold" value="0.5" /> </ScoreFunction> <ScoreFunction id="district_navap_percent" type="district" calculator="publicmapping.redistricting.calculators.Percent" label="Native American VAP %%" user_selectable="true"> <SubjectArgument name="numerator" ref="vap_na" /> <SubjectArgument name="denominator" ref="vap" /> </ScoreFunction> <ScoreFunction id="district_navap_thresh" type="district" calculator="publicmapping.redistricting.calculators.Threshold" label="Native American VAP Threshold"> <ScoreArgument name="value" ref="district_navap_percent" /> <Argument name="threshold" value="0.5" /> </ScoreFunction> <ScoreFunction id="district_avap_percent" type="district" calculator="publicmapping.redistricting.calculators.Percent" label="Asian VAP %%" user_selectable="true"> <SubjectArgument name="numerator" ref="vap_a" /> <SubjectArgument name="denominator" ref="vap" /> </ScoreFunction> <ScoreFunction id="district_avap_thresh" type="district" calculator="publicmapping.redistricting.calculators.Threshold" label="Asian VAP Threshold"> <ScoreArgument name="value" ref="district_avap_percent" /> <Argument name="threshold" value="0.5" /> </ScoreFunction> <ScoreFunction id="district_pivap_percent" type="district" calculator="publicmapping.redistricting.calculators.Percent" label="Pacific Islander VAP %%" user_selectable="true"> <SubjectArgument name="numerator" ref="vap_pi" /> <SubjectArgument name="denominator" ref="vap" /> </ScoreFunction> <ScoreFunction id="district_pivap_thresh" type="district" calculator="publicmapping.redistricting.calculators.Threshold" label="Pacific Islander VAP Threshold"> <ScoreArgument name="value" ref="district_pivap_percent" /> <Argument name="threshold" value="0.5" /> </ScoreFunction> <ScoreFunction id="district_wnhvap_percent" type="district" calculator="publicmapping.redistricting.calculators.Percent" label="White VAP %%" user_selectable="true"> <SubjectArgument name="numerator" ref="vap_wnh" /> <SubjectArgument name="denominator" ref="vap" /> </ScoreFunction> <ScoreFunction id="district_wnhvap_thresh" type="district" calculator="publicmapping.redistricting.calculators.Threshold" label="White VAP Threshold"> <ScoreArgument name="value" ref="district_wnhvap_percent" /> <Argument name="threshold" value="0.5" /> </ScoreFunction> %(start_elec)s <ScoreFunction id="district_vote" type="district" calculator="publicmapping.redistricting.calculators.Sum" label="Estimated votes" user_selectable="true"> <SubjectArgument name="value1" ref="vote_tot" /> </ScoreFunction> <ScoreFunction id="district_vote_dem" type="district" calculator="publicmapping.redistricting.calculators.Sum" label="Estimated Democratic votes" user_selectable="true"> <SubjectArgument name="value1" ref="vote_dem" /> </ScoreFunction> <ScoreFunction id="district_vote_rep" type="district" calculator="publicmapping.redistricting.calculators.Sum" label="Estimated votes" user_selectable="true"> <SubjectArgument name="value1" ref="vote_rep" /> </ScoreFunction> <ScoreFunction id="district_vote_dem_percent" type="district" calculator="publicmapping.redistricting.calculators.Percent" label="Democratic Predicted Vote %%" user_selectable="true"> <SubjectArgument name="numerator" ref="vote_dem" /> <SubjectArgument name="denominator" ref="vote_tot" /> </ScoreFunction> <ScoreFunction id="district_vote_dem_thresh" type="district" calculator="publicmapping.redistricting.calculators.Threshold" label="Democratic Predicted Vote Threshold"> <ScoreArgument name="value" ref="district_vote_dem_percent" /> <Argument name="threshold" value="0.5" /> </ScoreFunction> <ScoreFunction id="district_vote_rep_percent" type="district" calculator="publicmapping.redistricting.calculators.Percent" label="Republican Predicted Vote %%" user_selectable="true"> <SubjectArgument name="numerator" ref="vote_rep" /> <SubjectArgument name="denominator" ref="vote_tot" /> </ScoreFunction> <ScoreFunction id="district_vote_rep_thresh" type="district" calculator="publicmapping.redistricting.calculators.Threshold" label="Republican Predicted Vote Threshold"> <ScoreArgument name="value" ref="district_vote_rep_percent" /> <Argument name="threshold" value="0.5" /> </ScoreFunction> %(end_elec)s <!-- A district score that generates classes based on a couple ranges around a mean value. --> <ScoreFunction id="district_poptot_uitarget_congress" type="district" calculator="publicmapping.redistricting.calculators.Target"> <SubjectArgument name="value" ref="totpop" /> <Argument name="target" value="&pop_congress;" /> <Argument name="range1" value="0.005"/> <Argument name="range2" value="0.010"/> </ScoreFunction> <ScoreFunction id="district_poptot_uitarget_house" type="district" calculator="publicmapping.redistricting.calculators.Target"> <SubjectArgument name="value" ref="totpop" /> <Argument name="target" value="%(pop_house)s" /> <Argument name="range1" value="0.05" /> <Argument name="range2" value="0.10" /> </ScoreFunction> <ScoreFunction id="district_poptot_uitarget_senate" type="district" calculator="publicmapping.redistricting.calculators.Target"> <SubjectArgument name="value" ref="totpop" /> <Argument name="target" value="%(pop_senate)s" /> <Argument name="range1" value="0.05" /> <Argument name="range2" value="0.10" /> </ScoreFunction> <!-- A district score that returns 1(T) if the subject value is between the ranges, otherwise returns 0(F). --> <ScoreFunction id="district_poptot_range" type="district" calculator="publicmapping.redistricting.calculators.Range" label="Tot Pop Range"> <SubjectArgument name="value" ref="totpop" /> <Argument name="min" value="&pop_congress_min;" /> <Argument name="max" value="&pop_congress_max;" /> </ScoreFunction> <!-- A district score that is threshold dependent, and returns T/F; this example uses 2 score functions: 1 to combine a set of subjects, and 2 to divide the sum over another subject. --> <ScoreFunction id="district_mintot" type="district" calculator="publicmapping.redistricting.calculators.Sum"> <SubjectArgument name="value1" ref="totpop_b" /> <SubjectArgument name="value2" ref="totpop_h" /> <SubjectArgument name="value3" ref="totpop_na" /> </ScoreFunction> <ScoreFunction id="district_majmin" type="district" calculator="publicmapping.redistricting.calculators.DivideAndThreshold" > <ScoreArgument name="numerator" ref="district_mintot" /> <SubjectArgument name="denominator" ref="totpop" /> <Argument name="threshold" value="0.5" /> </ScoreFunction> <!-- A custom calculator to calculate compactness, and return the raw compactness score. --> <ScoreFunction id="district_schwartzberg" type="district" calculator="publicmapping.redistricting.calculators.Schwartzberg" label="Compactness" user_selectable="true"> </ScoreFunction> <!-- A custom calculator to do contiguity, and is boolean. --> <ScoreFunction id="district_contiguous" type="district" calculator="publicmapping.redistricting.calculators.Contiguity" label="Contiguous" user_selectable="true"> </ScoreFunction> <!-- A plan score that aggregates all literal values --> <ScoreFunction id="plan_sum_equipop" type="plan" calculator="publicmapping.redistricting.calculators.Sum" label="Equal Population"> <ScoreArgument name="value1" ref="district_poptot_range" /> </ScoreFunction> <ScoreFunction id="plan_all_equipop" type="plan" calculator="publicmapping.redistricting.calculators.Threshold" > <ScoreArgument name="value" ref="plan_sum_equipop" /> <Argument name="threshold" value="0" /> </ScoreFunction> <!-- A plan score that aggregates all districts over a threshold --> <ScoreFunction id="plan_count_majmin" type="plan" calculator="publicmapping.redistricting.calculators.Sum"> <ScoreArgument name="value1" ref="district_majmin" /> </ScoreFunction> <ScoreFunction id="plan_blkvap_thresh" type="plan" calculator="publicmapping.redistricting.calculators.Sum" label="Majority Black Districts" user_selectable="true"> <ScoreArgument name="value1" ref="district_blkvap_thresh" /> </ScoreFunction> <ScoreFunction id="plan_hispvap_thresh" type="plan" calculator="publicmapping.redistricting.calculators.Sum" label="Majority Hispanic Districts" user_selectable="true"> <ScoreArgument name="value1" ref="district_hispvap_thresh" /> </ScoreFunction> <ScoreFunction id="plan_navap_thresh" type="plan" calculator="publicmapping.redistricting.calculators.Sum" label="Majority Asian Districts" user_selectable="true"> <ScoreArgument name="value1" ref="district_navap_thresh" /> </ScoreFunction> <ScoreFunction id="plan_avap_thresh" type="plan" calculator="publicmapping.redistricting.calculators.Sum" label="Majority Asian Districts" user_selectable="true"> <ScoreArgument name="value1" ref="district_avap_thresh" /> </ScoreFunction> <ScoreFunction id="plan_pivap_thresh" type="plan" calculator="publicmapping.redistricting.calculators.Sum"> <ScoreArgument name="value1" ref="district_pivap_thresh" /> </ScoreFunction> <!-- A plan score that evaluates a threshold, and returns T/F. This plan score checks that all districts are within the population limits. --> <ScoreFunction id="plan_poptot_inrange" type="plan" calculator="publicmapping.redistricting.calculators.Threshold"> <ScoreArgument name="value" ref="district_poptot_range" /> <Argument name="threshold" value="0" /> </ScoreFunction> <!-- A plan score that evaluates all districts, and returns 1(T) if there is more than 0 districts that have a minority majority. --> <ScoreFunction id="plan_major_minor" type="plan" calculator="publicmapping.redistricting.calculators.Threshold" label="Majority-Minority"> <ScoreArgument name="value" ref="district_majmin" /> <Argument name="threshold" value="0" /> </ScoreFunction> <ScoreFunction id="plan_contiguous" type="plan" calculator="publicmapping.redistricting.calculators.Sum" label="Contiguous"> <ScoreArgument name="value1" ref="district_contiguous"/> </ScoreFunction> <ScoreFunction id="b_plan_congress_noncontiguous" type="plan" calculator="publicmapping.redistricting.calculators.Contiguity" label="Contiguous"> <Argument name="target" value="&num_districts_congress;" /> </ScoreFunction> <ScoreFunction id="b_plan_house_noncontiguous" type="plan" calculator="publicmapping.redistricting.calculators.Contiguity" label="Contiguous"> <Argument name="target" value="&num_districts_house;" /> </ScoreFunction> <ScoreFunction id="b_plan_senate_noncontiguous" type="plan" calculator="publicmapping.redistricting.calculators.Contiguity" label="Contiguous"> <Argument name="target" value="&num_districts_senate;" /> </ScoreFunction> <!-- interval score function for population --> <ScoreFunction id="a_congressional_population" type="district" label="Tot Pop Range (Congress)" user_selectable="true" description="Population interval calculator for congressional." calculator="publicmapping.redistricting.calculators.Interval"> <SubjectArgument name="subject" ref="totpop" /> <Argument name="target" value="&pop_congress;" /> <Argument name="bound1" value=".005" /> <Argument name="bound2" value=".01" /> </ScoreFunction> <ScoreFunction id="a_house_population" type="district" label="Tot Pop Range (House)" user_selectable="true" description="Population interval calculator for house." calculator="publicmapping.redistricting.calculators.Interval"> <SubjectArgument name="subject" ref="totpop" /> <Argument name="target" value="%(pop_house)s" /> <Argument name="bound1" value=".005" /> <Argument name="bound2" value=".01" /> </ScoreFunction> <ScoreFunction id="a_senate_population" type="district" label="Tot Pop Range (Senate)" user_selectable="true" description="Population interval calculator for senate." calculator="publicmapping.redistricting.calculators.Interval"> <SubjectArgument name="subject" ref="totpop" /> <Argument name="target" value="%(pop_senate)s" /> <Argument name="bound1" value=".005" /> <Argument name="bound2" value=".01" /> </ScoreFunction> <!-- leaderboard functions --> <ScoreFunction id="a_congress_plan_count_districts" type="plan" calculator="publicmapping.redistricting.calculators.CountDistricts" label="Count Districts" description="The number of districts in a Congressional redistricting plan must be &num_districts_congress;."> <Argument name="target" value="&num_districts_congress;" /> </ScoreFunction> <ScoreFunction id="a_house_plan_count_districts" type="plan" calculator="publicmapping.redistricting.calculators.CountDistricts" label="Count Districts" description="The number of districts in a House of Delegates redistricting plan must be &num_districts_house;."> <Argument name="target" value="&num_districts_house;" /> </ScoreFunction> <ScoreFunction id="a_senate_plan_count_districts" type="plan" calculator="publicmapping.redistricting.calculators.CountDistricts" label="Count Districts" description="The number of districts in a State Senate redistricting plan must be &num_districts_senate;."> <Argument name="target" value="&num_districts_senate;" /> </ScoreFunction> <ScoreFunction id="a_congress_plan_equipopulation_validation" type="plan" calculator="publicmapping.redistricting.calculators.Equipopulation" label="Target Pop. &pop_congress;" description="The population of each Congressional district must be &pop_congress_min;-&pop_congress_max;"> <Argument name="min" value="&pop_congress_min;"/> <Argument name="max" value="&pop_congress_max;"/> <SubjectArgument name="value" ref="totpop"/> <Argument name="validation" value="1"/> </ScoreFunction> <ScoreFunction id="a_congress_plan_equipopulation_summary" type="plan" calculator="publicmapping.redistricting.calculators.Equipopulation" label="Target Pop. &pop_congress;" description="The population of each Congressional district must be &pop_congress_min;-&pop_congress_max;"> <Argument name="min" value="&pop_congress_min;"/> <Argument name="max" value="&pop_congress_max;"/> <SubjectArgument name="value" ref="totpop"/> <Argument name="target" value="&num_districts_congress;"/> </ScoreFunction> <ScoreFunction id="a_senate_plan_equipopulation_validation" type="plan" calculator="publicmapping.redistricting.calculators.Equipopulation" label="Target Pop. %(pop_senate)s" description="The population of each Senate district must be &pop_senate_min;-&pop_senate_max;"> <Argument name="min" value="&pop_senate_min;"/> <Argument name="max" value="&pop_senate_max;"/> <SubjectArgument name="value" ref="totpop"/> <Argument name="validation" value="1"/> </ScoreFunction> <ScoreFunction id="a_senate_plan_equipopulation_summary" type="plan" calculator="publicmapping.redistricting.calculators.Equipopulation" label="Target Pop. %(pop_senate)s" description="The population of each Senate district must be &pop_senate_min;-&pop_senate_max;"> <Argument name="min" value="&pop_senate_min;"/> <Argument name="max" value="&pop_senate_max;"/> <SubjectArgument name="value" ref="totpop"/> <Argument name="target" value="&num_districts_senate;"/> </ScoreFunction> <ScoreFunction id="a_house_plan_equipopulation_validation" type="plan" calculator="publicmapping.redistricting.calculators.Equipopulation" label="Target Pop. %(pop_house)s" description="The population of each House district must be &pop_house_min;-&pop_house_max;"> <Argument name="min" value="&pop_house_min;"/> <Argument name="max" value="&pop_house_max;"/> <SubjectArgument name="value" ref="totpop"/> <Argument name="validation" value="1"/> </ScoreFunction> <ScoreFunction id="a_house_plan_equipopulation_summary" type="plan" calculator="publicmapping.redistricting.calculators.Equipopulation" label="Target Pop. %(pop_house)s" description="The population of each House district must be &pop_house_min;-&pop_house_max;"> <Argument name="min" value="&pop_house_min;"/> <Argument name="max" value="&pop_house_max;"/> <SubjectArgument name="value" ref="totpop"/> <Argument name="target" value="&num_districts_house;"/> </ScoreFunction> <ScoreFunction id="plan_all_blocks_assigned" type="plan" calculator="publicmapping.redistricting.calculators.AllBlocksAssigned" label="All Blocks Assigned" description="All blocks in the plan must be assigned."> </ScoreFunction> <ScoreFunction id="plan_all_contiguous" type="plan" calculator="publicmapping.redistricting.calculators.AllContiguous" label="All Contiguous" description="Contiguity means that every part of a district must be reachable from every other part without crossing the district's borders. All districts within a plan must be contiguous. Water contiguity is permitted. 'Point contiguity' or 'touch-point contiguity' where two sections of a district are connected at a single point is not permitted."> </ScoreFunction> %(start_elec)s <ScoreFunction id="plan_competitiveness" type="plan" calculator="publicmapping.redistricting.calculators.Competitiveness" label="Competitiveness" description="Each plan's overall political competitiveness is determined by averaging each district.s 'partisan differential'. The partisan differential of each district is calculated by subtracting the Democratic 'partisan index' from the Republican 'partisan index'.<br/><br/>'Heavily' competitive districts are districts with partisan differentials of less than or equal to 5%%. 'Generally' competitive districts are districts with partisan differentials of greater than 5%% but less than 10%%."> <SubjectArgument name="democratic" ref="vote_dem" /> <SubjectArgument name="republican" ref="vote_rep" /> </ScoreFunction> %(end_elec)s <ScoreFunction id="plan_equivalence" type="plan" calculator="publicmapping.redistricting.calculators.Equivalence" label="Equal Population" description="The Equipopulation score is the difference between the district with the highest population and the district with the lowest population."> <SubjectArgument name="value" ref="totpop" /> </ScoreFunction> <ScoreFunction id="plan_majority_minority_blk_congress" type="plan" calculator="publicmapping.redistricting.calculators.MajorityMinority" label="Black VAP Majority (> 50%%)" description="Compliance with the Voting Rights Act will be assumed if maps include a minority-majority district in any area where a minority group is (as described in Thornburg V. Gingles, 478 U.S. 30, 49 (1986)) 'sufficiently large and geographically compact to constitute a majority in a single-member district'."> <SubjectArgument name="population" ref="vap" /> <SubjectArgument name="minority1" ref="vap_b" /> <Argument name="target" value="&target_bl_congress;" /> </ScoreFunction> <ScoreFunction id="plan_majority_minority_blk_house" type="plan" calculator="publicmapping.redistricting.calculators.MajorityMinority" label="Black VAP Majority (> 50%%)" description="Compliance with the Voting Rights Act will be assumed if maps include a minority-majority district in any area where a minority group is (as described in Thornburg V. Gingles, 478 U.S. 30, 49 (1986)) 'sufficiently large and geographically compact to constitute a majority in a single-member district'."> <SubjectArgument name="population" ref="vap" /> <SubjectArgument name="minority1" ref="vap_b" /> <Argument name="target" value="&target_bl_house;" /> </ScoreFunction> <ScoreFunction id="plan_majority_minority_blk_senate" type="plan" calculator="publicmapping.redistricting.calculators.MajorityMinority" label="Black VAP Majority (> 50%%)" description="Compliance with the Voting Rights Act will be assumed if maps include a minority-majority district in any area where a minority group is (as described in Thornburg V. Gingles, 478 U.S. 30, 49 (1986)) 'sufficiently large and geographically compact to constitute a majority in a single-member district'."> <SubjectArgument name="population" ref="vap" /> <SubjectArgument name="minority1" ref="vap_b" /> <Argument name="target" value="&target_bl_senate;" /> </ScoreFunction> <ScoreFunction id="plan_majority_minority_hisp_congress" type="plan" calculator="publicmapping.redistricting.calculators.MajorityMinority" label="Hisp. VAP Majority (> 50%%)" description="Compliance with the Voting Rights Act will be assumed if maps include a minority-majority district in any area where a minority group is (as described in Thornburg V. Gingles, 478 U.S. 30, 49 (1986)) 'sufficiently large and geographically compact to constitute a majority in a single-member district'."> <SubjectArgument name="population" ref="vap" /> <SubjectArgument name="minority1" ref="vap_h" /> <Argument name="target" value="&target_hisp_congress;" /> </ScoreFunction> <ScoreFunction id="plan_majority_minority_hisp_house" type="plan" calculator="publicmapping.redistricting.calculators.MajorityMinority" label="Hisp. VAP Majority (> 50%%)" description="Compliance with the Voting Rights Act will be assumed if maps include a minority-majority district in any area where a minority group is (as described in Thornburg V. Gingles, 478 U.S. 30, 49 (1986)) 'sufficiently large and geographically compact to constitute a majority in a single-member district'."> <SubjectArgument name="population" ref="vap" /> <SubjectArgument name="minority1" ref="vap_h" /> <Argument name="target" value="&target_hisp_house;" /> </ScoreFunction> <ScoreFunction id="plan_majority_minority_hisp_senate" type="plan" calculator="publicmapping.redistricting.calculators.MajorityMinority" label="Hisp. VAP Majority (> 50%%)" description="Compliance with the Voting Rights Act will be assumed if maps include a minority-majority district in any area where a minority group is (as described in Thornburg V. Gingles, 478 U.S. 30, 49 (1986)) 'sufficiently large and geographically compact to constitute a majority in a single-member district'."> <SubjectArgument name="population" ref="vap" /> <SubjectArgument name="minority1" ref="vap_h" /> <Argument name="target" value="&target_hisp_senate;" /> </ScoreFunction> <ScoreFunction id="plan_majority_minority_na_congress" type="plan" calculator="publicmapping.redistricting.calculators.MajorityMinority" label="Native American Majority (> 50%%)" description="Compliance with the Voting Rights Act will be assumed if maps include a minority-majority district in any area where a minority group is (as described in Thornburg V. Gingles, 478 U.S. 30, 49 (1986)) 'sufficiently large and geographically compact to constitute a majority in a single-member district'."> <SubjectArgument name="population" ref="vap" /> <SubjectArgument name="minority1" ref="vap_na" /> <Argument name="target" value="&target_na_congress;" /> </ScoreFunction> <ScoreFunction id="plan_majority_minority_na_house" type="plan" calculator="publicmapping.redistricting.calculators.MajorityMinority" label="Native American Majority (> 50%%)" description="Compliance with the Voting Rights Act will be assumed if maps include a minority-majority district in any area where a minority group is (as described in Thornburg V. Gingles, 478 U.S. 30, 49 (1986)) 'sufficiently large and geographically compact to constitute a majority in a single-member district'."> <SubjectArgument name="population" ref="vap" /> <SubjectArgument name="minority1" ref="vap_na" /> <Argument name="target" value="&target_na_house;" /> </ScoreFunction> <ScoreFunction id="plan_majority_minority_na_senate" type="plan" calculator="publicmapping.redistricting.calculators.MajorityMinority" label="Native American Majority (> 50%%)" description="Compliance with the Voting Rights Act will be assumed if maps include a minority-majority district in any area where a minority group is (as described in Thornburg V. Gingles, 478 U.S. 30, 49 (1986)) 'sufficiently large and geographically compact to constitute a majority in a single-member district'."> <SubjectArgument name="population" ref="vap" /> <SubjectArgument name="minority1" ref="vap_na" /> <Argument name="target" value="&target_na_senate;" /> </ScoreFunction> <ScoreFunction id="plan_majority_minority" type="plan" calculator="publicmapping.redistricting.calculators.MajorityMinority" label="Majority Minority District" description="Compliance with the Voting Rights Act will be assumed if maps include a minority-majority district in any area where a minority group is (as described in Thornburg V. Gingles, 478 U.S. 30, 49 (1986)) 'sufficiently large and geographically compact to constitute a majority in a single-member district'."> <SubjectArgument name="population" ref="vap" /> <SubjectArgument name="minority1" ref="vap_b" /> <SubjectArgument name="minority2" ref="vap_h" /> <SubjectArgument name="minority3" ref="vap_na" /> <Argument name="validation" value="1" /> </ScoreFunction> %(start_elec)s <ScoreFunction id="plan_repfairness" type="plan" calculator="publicmapping.redistricting.calculators.RepresentationalFairness" label="Representational Fairness" description="Representational fairness is increased when the percentage of districts a party would likely win (based upon the 'partisan index' used to determine Competitiveness) closely mirrors that party.s percentage of the statewide vote." > <Argument name="range" value="0.05" /> <SubjectArgument name="normalized democratic" ref="vote_dem_norm" /> <SubjectArgument name="normalized republican" ref="vote_rep_norm" /> </ScoreFunction> %(end_elec)s <ScoreFunction id="plan_schwartzberg" type="plan" calculator="publicmapping.redistricting.calculators.Schwartzberg" label="Average Compactness" description="The competition is using the 'Schwartzberg' compactness measure. This measure is a ratio of the perimeter of the district to the circumference of the circle whose area is equal to the area of the district." > </ScoreFunction> </ScoreFunctions> <ScorePanels> <ScorePanel id="panel_equipop_all" type="plan" position="1" title="Equipopulation" template="leaderboard_panel_all.html"> <Score ref="plan_equivalence" /> </ScorePanel> <ScorePanel id="panel_equipop_mine" type="plan" position="1" title="Equipopulation" template="leaderboard_panel_mine.html"> <Score ref="plan_equivalence" /> </ScorePanel> <ScorePanel id="panel_compact_all" type="plan" position="2" title="Schwartzberg" template="leaderboard_panel_all.html"> <Score ref="plan_schwartzberg" /> </ScorePanel> <ScorePanel id="panel_compact_mine" type="plan" position="2" title="Schwartzberg" template="leaderboard_panel_mine.html"> <Score ref="plan_schwartzberg" /> </ScorePanel> %(start_elec)s <ScorePanel id="panel_competitive_all" type="plan" position="3" title="Competitiveness" template="leaderboard_panel_all.html"> <Score ref="plan_competitiveness" /> </ScorePanel> <ScorePanel id="panel_competitive_mine" type="plan" position="3" title="Competitiveness" template="leaderboard_panel_mine.html"> <Score ref="plan_competitiveness" /> </ScorePanel> <ScorePanel id="panel_rf_all" type="plan" position="4" title="Representational Fairness" template="leaderboard_panel_all.html"> <Score ref="plan_repfairness" /> </ScorePanel> <ScorePanel id="panel_rf_mine" type="plan" position="4" title="Representational Fairness" template="leaderboard_panel_mine.html"> <Score ref="plan_repfairness" /> </ScorePanel> %(end_elec)s <!-- Summary above all sidebar panels --> <ScorePanel id="congressional_panel_summary" type="plan_summary" position="1" title="Plan Summary" cssclass="plan_summary congressional" template="plan_summary.html"> <Score ref="a_congress_plan_equipopulation_summary"/> <Score ref="b_plan_congress_noncontiguous"/> <Score ref="plan_majority_minority_blk_congress" /> <Score ref="plan_majority_minority_hisp_congress" /> %(start_na)s <Score ref="plan_majority_minority_na_congress" /> %(end_na)s </ScorePanel> <ScorePanel id="house_panel_summary" type="plan_summary" position="1" title="Plan Summary" cssclass="plan_summary house" template="plan_summary.html"> <Score ref="a_house_plan_equipopulation_summary"/> <Score ref="b_plan_house_noncontiguous"/> <Score ref="plan_majority_minority_blk_house" /> <Score ref="plan_majority_minority_hisp_house" /> %(start_na)s <Score ref="plan_majority_minority_na_house" /> %(end_na)s </ScorePanel> <ScorePanel id="senate_panel_summary" type="plan_summary" position="1" title="Plan Summary" cssclass="plan_summary senate" template="plan_summary.html"> <Score ref="a_senate_plan_equipopulation_summary"/> <Score ref="b_plan_senate_noncontiguous"/> <Score ref="a_senate_plan_count_districts" /> <Score ref="plan_majority_minority_blk_senate" /> <Score ref="plan_majority_minority_hisp_senate" /> %(start_na)s <Score ref="plan_majority_minority_na_senate" /> %(end_na)s </ScorePanel> <!-- Basic Information --> <ScorePanel id="congresional_panel_info" type="district" position="2" title="Basic Information" cssclass="district_basic_info congressional" template="basic_information.html"> <Score ref="a_congressional_population" /> <Score ref="district_contiguous" /> <Score ref="district_schwartzberg" /> </ScorePanel> <ScorePanel id="house_panel_info" type="district" position="2" title="Basic Information" cssclass="district_basic_info house" template="basic_information.html"> <Score ref="a_house_population" /> <Score ref="district_contiguous" /> <Score ref="district_schwartzberg" /> </ScorePanel> <ScorePanel id="senate_panel_info" type="district" position="2" title="Basic Information" cssclass="district_basic_info senate" template="basic_information.html"> <Score ref="a_senate_population" /> <Score ref="district_contiguous" /> <Score ref="district_schwartzberg" /> </ScorePanel> <!-- Demographics --> <ScorePanel id="congressional_panel_demo" type="district" position="2" title="Demographics" cssclass="district_demographics congressional" template="demographics.html"> %(start_elec)s <Score ref="district_vote_dem_percent" /> %(end_elec)s <Score ref="district_blkvap_percent" /> <Score ref="district_hispvap_percent" /> </ScorePanel> <ScorePanel id="house_panel_demo" type="district" position="2" title="Demographics" cssclass="district_demographics house" template="demographics.html"> %(start_elec)s <Score ref="district_vote_dem_percent" /> %(end_elec)s <Score ref="district_blkvap_percent" /> <Score ref="district_hispvap_percent" /> </ScorePanel> <ScorePanel id="senate_panel_demo" type="district" position="2" title="Demographics" cssclass="district_demographics senate" template="demographics.html"> %(start_elec)s <Score ref="district_vote_dem_percent" /> %(end_elec)s <Score ref="district_blkvap_percent" /> <Score ref="district_hispvap_percent" /> </ScorePanel> <!-- Needed due to issue https://sourceforge.net/apps/trac/publicmapping/ticket/340 Delete after setup --> <ScorePanel id="stats_picker" type="district" position="1" title="Stats Picker" cssclass="hidden" template="demographics.html"> <Score ref="district_poptot"/> <Score ref="district_totpop_b"/> <Score ref="district_totpop_h"/> <Score ref="district_totpop_a"/> <Score ref="district_totpop_na"/> <Score ref="district_totpop_pi"/> <Score ref="district_totpop_wnh"/> <Score ref="district_vap"/> <Score ref="district_vap_b"/> <Score ref="district_vap_h"/> <Score ref="district_vap_a"/> <Score ref="district_vap_na"/> <Score ref="district_vap_pi"/> <Score ref="district_vap_wnh"/> <Score ref="district_blkvap_percent"/> <Score ref="district_hispvap_percent"/> <Score ref="district_avap_percent"/> <Score ref="district_navap_percent"/> <Score ref="district_pivap_percent"/> <Score ref="district_wnhvap_percent"/> %(start_elec)s <Score ref="district_vote"/> <Score ref="district_vote_dem"/> <Score ref="district_vote_rep"/> <Score ref="district_vote_dem_percent"/> <Score ref="district_vote_rep_percent"/> %(end_elec)s </ScorePanel> </ScorePanels> <ScoreDisplays> <ScoreDisplay legislativebodyref="congress" type="leaderboard" title="Congressional Leaderboard - All" cssclass="leaderboard congress"> <ScorePanel ref="panel_equipop_all" /> <ScorePanel ref="panel_compact_all" /> %(start_elec)s <ScorePanel ref="panel_competitive_all" /> <ScorePanel ref="panel_rf_all" /> %(end_elec)s </ScoreDisplay> <ScoreDisplay legislativebodyref="congress" type="leaderboard" title="Congressional Leaderboard - Mine" cssclass="leaderboard congress"> <ScorePanel ref="panel_equipop_mine" /> <ScorePanel ref="panel_compact_mine" /> %(start_elec)s <ScorePanel ref="panel_competitive_all" /> <ScorePanel ref="panel_rf_mine" /> %(end_elec)s </ScoreDisplay> <ScoreDisplay legislativebodyref="house" type="leaderboard" title="State House Leaderboard - All" cssclass="leaderboard house"> <ScorePanel ref="panel_equipop_all" /> <ScorePanel ref="panel_compact_all" /> %(start_elec)s <ScorePanel ref="panel_competitive_all" /> <ScorePanel ref="panel_rf_all" /> %(end_elec)s </ScoreDisplay> <ScoreDisplay legislativebodyref="house" type="leaderboard" title="State House Leaderboard - Mine" cssclass="leaderboard house"> <ScorePanel ref="panel_equipop_mine" /> <ScorePanel ref="panel_compact_mine" /> %(start_elec)s <ScorePanel ref="panel_competitive_mine" /> <ScorePanel ref="panel_rf_mine" /> %(end_elec)s </ScoreDisplay> <ScoreDisplay legislativebodyref="senate" type="leaderboard" title="State Senate Leaderboard - All" cssclass="leaderboard senate"> <ScorePanel ref="panel_equipop_all" /> <ScorePanel ref="panel_compact_all" /> %(start_elec)s <ScorePanel ref="panel_competitive_all" /> <ScorePanel ref="panel_rf_all" /> %(end_elec)s </ScoreDisplay> <ScoreDisplay legislativebodyref="senate" type="leaderboard" title="State Senate Leaderboard - Mine" cssclass="leaderboard senate"> <ScorePanel ref="panel_equipop_mine" /> <ScorePanel ref="panel_compact_mine" /> %(start_elec)s <ScorePanel ref="panel_competitive_mine" /> <ScorePanel ref="panel_rf_mine" /> %(end_elec)s </ScoreDisplay> <!-- Sidebar configuration --> <ScoreDisplay legislativebodyref="congress" type="sidebar" title="Basic Information" cssclass="basic_information"> <ScorePanel ref="congressional_panel_summary" /> <ScorePanel ref="congresional_panel_info" /> </ScoreDisplay> <ScoreDisplay legislativebodyref="congress" type="sidebar" title="Demographics" cssclass="demographics"> <ScorePanel ref="congressional_panel_summary" /> <ScorePanel ref="congressional_panel_demo" /> </ScoreDisplay> <ScoreDisplay legislativebodyref="house" type="sidebar" title="Basic Information" cssclass="basic_information"> <ScorePanel ref="house_panel_summary" /> <ScorePanel ref="house_panel_info" /> </ScoreDisplay> <ScoreDisplay legislativebodyref="house" type="sidebar" title="Demographics" cssclass="demographics"> <ScorePanel ref="house_panel_summary" /> <ScorePanel ref="house_panel_demo" /> </ScoreDisplay> <ScoreDisplay legislativebodyref="senate" type="sidebar" title="Basic Information" cssclass="basic_information"> <ScorePanel ref="senate_panel_summary" /> <ScorePanel ref="senate_panel_info" /> </ScoreDisplay> <ScoreDisplay legislativebodyref="senate" type="sidebar" title="Demographics" cssclass="demographics"> <ScorePanel ref="senate_panel_summary" /> <ScorePanel ref="senate_panel_demo" /> </ScoreDisplay> <!-- Needed due to issue https://sourceforge.net/apps/trac/publicmapping/ticket/340 Delete after setup --> <ScoreDisplay legislativebodyref="congress" type="sidebar" title="All Stats" cssclass="hidden"><ScorePanel ref="stats_picker"/></ScoreDisplay> </ScoreDisplays> </Scoring> <Validation> <Criteria legislativebodyref="congress"> <Criterion name="Equipopulation - Congress" description="<p>Your plan does not meet the competition criteria for Equipopulation:</p><p> The population of each Congressional district must be &pop_congress_max;-&pop_congress_min;"> <Score ref="a_congress_plan_equipopulation_validation" /> </Criterion> <Criterion name="AllContiguous - Congress" description="<p>Your plan does not meet the competition criteria for Contiguity</p><p>Every part of a district must be reachable from every other part without crossing the district's borders. All districts within a plan must be contiguous. </p>"> <Score ref="plan_all_contiguous" /> </Criterion> <Criterion name="MajorityMinority - Congress" description=""> <Score ref="plan_majority_minority" /> </Criterion> <Criterion name="CountDistricts - Congress" description=""> <Score ref="a_congress_plan_count_districts" /> </Criterion> <Criterion name="AllBlocksAssigned - Congress" description=""> <Score ref="plan_all_blocks_assigned" /> </Criterion> </Criteria> <Criteria legislativebodyref="house"> <Criterion name="Equipopulation - House" description="<p>Your plan does not meet the competition criteria for Equipopulation:</p><p>The population of each House of Delegates district must be &pop_house_min; - &pop_house_max;"> <Score ref="a_house_plan_equipopulation_validation" /> </Criterion> <Criterion name="AllContiguous - House" description="<p>Your plan does not meet the competition criteria for Contiguity</p><p>Every part of a district must be reachable from every other part without crossing the district's borders. All districts within a plan must be contiguous. </p>"> <Score ref="plan_all_contiguous" /> </Criterion> <Criterion name="MajorityMinority - House" description=""> <Score ref="plan_majority_minority" /> </Criterion> <Criterion name="CountDistricts - House" description=""> <Score ref="a_house_plan_count_districts" /> </Criterion> <Criterion name="AllBlocksAssigned - House" description=""> <Score ref="plan_all_blocks_assigned" /> </Criterion> </Criteria> <Criteria legislativebodyref="senate"> <Criterion name="Equipopulation - Senate" description="<p>Your plan does not meet the competition criteria for Equipopulation:</p><p>The population of each State Senate district must be &pop_house_min;-&pop_house_max;"> <Score ref="a_senate_plan_equipopulation_validation" /> </Criterion> <Criterion name="AllContiguous - Senate" description="<p>Your plan does not meet the competition criteria for Contiguity</p><p>Every part of a district must be reachable from every other part without crossing the district's borders. All districts within a plan must be contiguous. </p>"> <Score ref="plan_all_contiguous" /> </Criterion> <Criterion name="MajorityMinority - Senate" description=""> <Score ref="plan_majority_minority" /> </Criterion> <Criterion name="CountDistricts - Senate" description=""> <Score ref="a_senate_plan_count_districts" /> </Criterion> <Criterion name="AllBlocksAssigned - Senate" description=""> <Score ref="plan_all_blocks_assigned" /> </Criterion> </Criteria> </Validation> <!-- Optional configuration for geounits that require special contiguity rules. 'id' is the portable id of the geounit in which to configure an override. 'connect_to' is the portable id of the geounit in which the geounit is to be considered contiguous with. Tests for contiguity will apply these overrides in order to account for contiguity when physical contiguity is not possible. For example, an island may need to be marked contiguous with one or more geounits on an adjacent coast (possibly containing harbors). <ContiguityOverrides> <ContiguityOverride id="510030112012077" connect_to="510030102011065" /> <ContiguityOverride id="510030112012077" connect_to="510030103003037" /> </ContiguityOverrides> --> <!-- Contiguity Overrides, if Any --> %(contiguityOverrideString)s <GeoLevels> <GeoLevel id="block" name="block" min_zoom="6" sort_key="3" tolerance="2.5"> <Shapefile path="/projects/PublicMapping/data/census_blocks.shp"> <Fields> <Field name="NAME10" type="name"/> <Field name="GEOID10" type="portable"/> <Field name="STATEFP10" type="tree" pos="0" width="2"/> <Field name="COUNTYFP10" type="tree" pos="1" width="3"/> <Field name="TRACTCE10" type="tree" pos="2" width="6"/> <Field name="BLOCKCE10" type="tree" pos="3" width="4"/> </Fields> </Shapefile> <GeoLevelCharacteristics> <GeoLevelCharacteristic ref="totpop" /> <GeoLevelCharacteristic ref="vap" /> <GeoLevelCharacteristic ref="vap_b" /> <GeoLevelCharacteristic ref="vap_h" /> <GeoLevelCharacteristic ref="vap_na" /> <GeoLevelCharacteristic ref="vap_wnh" /> <GeoLevelCharacteristic ref="vap_pi" /> <GeoLevelCharacteristic ref="vap_a" /> <GeoLevelCharacteristic ref="totpop_wnh" /> <GeoLevelCharacteristic ref="totpop_pi" /> <GeoLevelCharacteristic ref="totpop_a" /> <GeoLevelCharacteristic ref="totpop_b" /> <GeoLevelCharacteristic ref="totpop_h" /> <GeoLevelCharacteristic ref="totpop_na" /> %(start_elec)s <GeoLevelCharacteristic ref="vote_dem" /> <GeoLevelCharacteristic ref="vote_rep" /> <GeoLevelCharacteristic ref="vote_tot" /> <GeoLevelCharacteristic ref="vote_dem_norm" /> <GeoLevelCharacteristic ref="vote_rep_norm" /> <GeoLevelCharacteristic ref="vote_tot_norm" /> %(end_elec)s </GeoLevelCharacteristics> <LegislativeBodies> <LegislativeBody ref="congress"> <LegislativeTargets> <LegislativeTarget ref="congress_target" default="true" /> </LegislativeTargets> </LegislativeBody> <LegislativeBody ref="house"> <LegislativeTargets> <LegislativeTarget ref="house_target" default="true" /> </LegislativeTargets> </LegislativeBody> <LegislativeBody ref="senate"> <LegislativeTargets> <LegislativeTarget ref="senate_target" default="true" /> </LegislativeTargets> </LegislativeBody> </LegislativeBodies> </GeoLevel> <GeoLevel id="tract" name="tract" min_zoom="3" sort_key="2" tolerance="25"> <Files> <Geography path="/projects/PublicMapping/data/census_tracts.shp"> <Fields> <Field name="NAME10" type="name" /> <Field name="GEOID10" type="portable" /> <Field name="STATEFP10" type="tree" pos="0" width="2"/> <Field name="COUNTYFP10" type="tree" pos="1" width="3"/> <Field name="TRACTCE10" type="tree" pos="2" width="6"/> </Fields> </Geography> </Files> <GeoLevelCharacteristics> <GeoLevelCharacteristic ref="totpop" /> <GeoLevelCharacteristic ref="vap" /> <GeoLevelCharacteristic ref="vap_b" /> <GeoLevelCharacteristic ref="vap_h" /> <GeoLevelCharacteristic ref="vap_na" /> <GeoLevelCharacteristic ref="vap_wnh" /> <GeoLevelCharacteristic ref="vap_pi" /> <GeoLevelCharacteristic ref="vap_a" /> <GeoLevelCharacteristic ref="totpop_wnh" /> <GeoLevelCharacteristic ref="totpop_pi" /> <GeoLevelCharacteristic ref="totpop_a" /> <GeoLevelCharacteristic ref="totpop_b" /> <GeoLevelCharacteristic ref="totpop_h" /> <GeoLevelCharacteristic ref="totpop_na" /> %(start_elec)s <GeoLevelCharacteristic ref="vote_dem" /> <GeoLevelCharacteristic ref="vote_rep" /> <GeoLevelCharacteristic ref="vote_tot" /> <GeoLevelCharacteristic ref="vote_dem_norm" /> <GeoLevelCharacteristic ref="vote_rep_norm" /> <GeoLevelCharacteristic ref="vote_tot_norm" /> %(end_elec)s </GeoLevelCharacteristics> <LegislativeBodies> <LegislativeBody ref="congress"> <Parent ref="block" /> <LegislativeTargets> <LegislativeTarget ref="congress_target" default="true" /> </LegislativeTargets> </LegislativeBody> <LegislativeBody ref="house"> <Parent ref="block" /> <LegislativeTargets> <LegislativeTarget ref="house_target" default="true" /> </LegislativeTargets> </LegislativeBody> <LegislativeBody ref="senate"> <Parent ref="block" /> <LegislativeTargets> <LegislativeTarget ref="senate_target" default="true" /> </LegislativeTargets> </LegislativeBody> </LegislativeBodies> </GeoLevel> <GeoLevel id="county" name="county" min_zoom="0" sort_key="1" tolerance="250"> <Files> <Geography path="/projects/PublicMapping/data/census_counties.shp"> <Fields> <Field name="NAME10" type="name"/> <Field name="GEOID10" type="portable"/> <Field name="STATEFP10" type="tree" pos="0" width="2"/> <Field name="COUNTYFP10" type="tree" pos="1" width="3"/> </Fields> </Geography> </Files> <GeoLevelCharacteristics> <GeoLevelCharacteristic ref="totpop" /> <GeoLevelCharacteristic ref="vap" /> <GeoLevelCharacteristic ref="vap_b" /> <GeoLevelCharacteristic ref="vap_h" /> <GeoLevelCharacteristic ref="vap_na" /> <GeoLevelCharacteristic ref="vap_wnh" /> <GeoLevelCharacteristic ref="vap_pi" /> <GeoLevelCharacteristic ref="vap_a" /> <GeoLevelCharacteristic ref="totpop_wnh" /> <GeoLevelCharacteristic ref="totpop_pi" /> <GeoLevelCharacteristic ref="totpop_a" /> <GeoLevelCharacteristic ref="totpop_b" /> <GeoLevelCharacteristic ref="totpop_h" /> <GeoLevelCharacteristic ref="totpop_na" /> %(start_elec)s <GeoLevelCharacteristic ref="vote_dem" /> <GeoLevelCharacteristic ref="vote_rep" /> <GeoLevelCharacteristic ref="vote_tot" /> <GeoLevelCharacteristic ref="vote_dem_norm" /> <GeoLevelCharacteristic ref="vote_rep_norm" /> <GeoLevelCharacteristic ref="vote_tot_norm" /> %(end_elec)s </GeoLevelCharacteristics> <LegislativeBodies> <LegislativeBody ref="congress"> <Parent ref="tract" /> <LegislativeTargets> <LegislativeTarget ref="congress_target" default="true" /> </LegislativeTargets> </LegislativeBody> <LegislativeBody ref="house"> <Parent ref="tract" /> <LegislativeTargets> <LegislativeTarget ref="house_target" default="true" /> </LegislativeTargets> </LegislativeBody> <LegislativeBody ref="senate"> <Parent ref="tract" /> <LegislativeTargets> <LegislativeTarget ref="senate_target" default="true" /> </LegislativeTargets> </LegislativeBody> </LegislativeBodies> </GeoLevel> </GeoLevels> <Templates> <Template name="Congressional"> <LegislativeBody ref="congress"/> <Blockfile path="/projects/PublicMapping/data/congress_generated_index.csv" /> </Template> <Template name="State House"> <LegislativeBody ref="house"/> <Blockfile path="/projects/PublicMapping/data/house_generated_index.csv" /> </Template> <Template name="State Senate"> <LegislativeBody ref="senate"/> <Blockfile path="/projects/PublicMapping/data/senate_generated_index.csv" /> </Template> </Templates> <Project root="/projects/PublicMapping/DistrictBuilder" sessionquota="5" sessiontimeout="15"> <!-- Database connection information. --> <Database name="publicmapping" user="publicmapping" password="<PASSWORD>"/> <!-- Administrative user information. This should match the admin user created when the django project is created. --> <Admin user="admin" email="<EMAIL>" password="<PASSWORD>"/> <!-- Configuration items specific to the 'redistricting' app. --> <Redistricting> <MapServer hostname="" ns="pmp" nshref="http://publicmapping.sourceforge.net/" adminuser="admin" adminpass="<PASSWORD>" maxfeatures="100" styles="/projects/PublicMapping/DistrictBuilder/sld" /> <!-- Use a GoogleAnalytics account to tract the usage of the application. This requires an account and domain. <GoogleAnalytics account="" domain=""/> --> <!-- Upload file size restrictions. This is in KB --> <Upload maxsize="2500"/> <!-- Undo restrictions --> <MaxUndos duringedit="50" afteredit="10" /> <!-- Leaderboard configuration --> <Leaderboard maxranked="10" /> </Redistricting> <Reporting> <BardConfigs> <BardConfig id="blocks" shape="/projects/PublicMapping/data/census_configured.Rdata" temp="/projects/PublicMapping/local/reports" transform="/projects/PublicMapping/DistrictBuilder/docs/bard_template.xslt"> <PopVars> <PopVar subjectref="totpop" threshold=".01" default="true" /> <PopVar subjectref="vap" threshold=".1" /> </PopVars> <RatioVars> <!-- Set up RatioVars for both ethnicity and political party. --> <RatioVar id="racialComp" label="Majority Minority Districts" threshold=".5"> <Numerators> <Numerator subjectref="totpop_b" /> <Numerator subjectref="totpop_h" /> <Numerator subjectref="totpop_na" /> <Numerator subjectref="totpop_a" /> <Numerator subjectref="totpop_pi" /> <Numerator subjectref="totpop_wnh" /> </Numerators> <Denominator subjectref="totpop" /> </RatioVar> <RatioVar id="racialCompVap" label="Majority Minority Districts" threshold=".5"> <Numerators> <Numerator subjectref="vap_b" /> <Numerator subjectref="vap_h" /> <Numerator subjectref="vap_na" /> <Numerator subjectref="vap_a" /> <Numerator subjectref="vap_pi" /> <Numerator subjectref="vap_wnh" /> </Numerators> <Denominator subjectref="vap" /> </RatioVar> %(start_elec)s <RatioVar id="partyControl" label="Party-Controlled Districts" threshold=".5"> <Numerators> <Numerator subjectref="vote_dem" /> <Numerator subjectref="vote_rep" /> </Numerators> <Denominator subjectref="vote_tot" /> </RatioVar> %(end_elec)s </RatioVars> <SplitVars> <!-- See whether a given district splits a geography. This can be any higher level geography: a county, VTd, or tract. --> <SplitVar field="COUNTYFP10" label="County" /> <SplitVar field="TRACTCE10" label="Tract" /> </SplitVars> </BardConfig> </BardConfigs> <BardBodyConfigs> <!-- For each legislative body, map the configuration to the geography used to generate reports. --> <BardBodyConfig id="congress_blocks" legislativebodyref="congress" bardconfigref="blocks" /> <BardBodyConfig id="house_blocks" legislativebodyref="house" bardconfigref="blocks" /> <BardBodyConfig id="senate_blocks" legislativebodyref="senate" bardconfigref="blocks" /> </BardBodyConfigs> </Reporting> <!-- Information about the mailer configuration. --> <Mailer server="localhost" port="25" username="" password=""/> </Project> </DistrictBuilder> ### ### MAIN ### # # Get Arguments # # required arguments # operations to perform # configuration options # include na if there is a positive target, even if not otherwise specified # install dependencies # Clear out DB # generate generic sld files # Retrieve data files # merge standard variables # TODO: Refactor entirely in rpy # TODO: Refactor entirely in rpy # NOTE: robject is returning 6-level quantiles, has_election_data, has_vtd, sum_TOTPOP # TODO: should work but has embedded string forwarding #contiguityOverrideString = robjects.r.contiguityOverrideString # TODO: refactor as matrix of varnames and geographies # generate config file #subprocess.check_call(["setup.py","-v2","/projects/PublicMapping/DistrictBuilder/docs/config.xsd"," /projects/PublicMapping/DistrictBuilder/docs/config_census_generated.xml"]) # workaround celeryd first-time startup problem | 2.31706 | 2 |
georinex/nav2.py | mrsnhl/georinex | 1 | 6633294 | <filename>georinex/nav2.py
#!/usr/bin/env python
from pathlib import Path
from datetime import datetime
from typing import Dict, Union, Any, Sequence
from typing.io import TextIO
import xarray
import numpy as np
import logging
from .rio import opener, rinexinfo
from .common import rinex_string_to_float
#
STARTCOL2 = 3 # column where numerical data starts for RINEX 2
Nl = {'G': 7, 'R': 3, 'E': 7} # number of additional SV lines
def rinexnav2(fn: Union[TextIO, str, Path],
tlim: Sequence[datetime] = None) -> xarray.Dataset:
"""
Reads RINEX 2.x NAV files
<NAME>, Ph.D.
SciVision, Inc.
http://gage14.upc.es/gLAB/HTML/GPS_Navigation_Rinex_v2.11.html
ftp://igs.org/pub/data/format/rinex211.txt
"""
if isinstance(fn, (str, Path)):
fn = Path(fn).expanduser()
Lf = 19 # string length per field
svs = []
times = []
raws = []
with opener(fn) as f:
header = navheader2(f)
if header['filetype'] == 'N':
svtype = 'G'
fields = ['SVclockBias', 'SVclockDrift', 'SVclockDriftRate',
'IODE', 'Crs', 'DeltaN', 'M0',
'Cuc', 'Eccentricity', 'Cus', 'sqrtA',
'Toe', 'Cic', 'Omega0', 'Cis',
'Io', 'Crc', 'omega', 'OmegaDot',
'IDOT', 'CodesL2', 'GPSWeek', 'L2Pflag',
'SVacc', 'health', 'TGD', 'IODC',
'TransTime', 'FitIntvl']
elif header['filetype'] == 'G':
svtype = 'R' # GLONASS
fields = ['SVclockBias', 'SVrelFreqBias', 'MessageFrameTime',
'X', 'dX', 'dX2', 'health',
'Y', 'dY', 'dY2', 'FreqNum',
'Z', 'dZ', 'dZ2', 'AgeOpInfo']
elif header['filetype'] == 'E':
svtype = 'E' # Galileo
fields = ['SVclockBias', 'SVclockDrift', 'SVclockDriftRate',
'IODnav', 'Crs', 'DeltaN', 'M0',
'Cuc', 'Eccentricity', 'Cus', 'sqrtA',
'Toe', 'Cic', 'Omega0', 'Cis',
'Io', 'Crc', 'omega', 'OmegaDot',
'IDOT', 'DataSrc', 'GALWeek',
'SISA', 'health', 'BGDe5a', 'BGDe5b',
'TransTime']
else:
raise NotImplementedError(f'I do not yet handle Rinex 2 NAV {header["sys"]} {fn}')
# %% read data
for ln in f:
try:
time = _timenav(ln)
except ValueError:
continue
if tlim is not None:
if time < tlim[0]:
_skip(f, Nl[header['systems']])
continue
elif time > tlim[1]:
break
# %% format I2 http://gage.upc.edu/sites/default/files/gLAB/HTML/GPS_Navigation_Rinex_v2.11.html
svs.append(f'{svtype}{ln[:2]}')
times.append(time)
"""
now get the data as one big long string per SV
"""
raw = ln[22:79] # NOTE: MUST be 79, not 80 due to some files that put \n a character early!
for _ in range(Nl[header['systems']]):
raw += f.readline()[STARTCOL2:79]
# one line per SV
# NOTE: Sebastijan added .replace(' ', ' ').replace(' -', '-')
# here, I would like to see a file that needs this first, to be sure
# I'm not needlessly slowing down reading or creating new problems.
raws.append(raw.replace('D', 'E').replace('\n', ''))
# %% parse
svs = [s.replace(' ', '0') for s in svs]
svu = sorted(set(svs))
atimes = np.asarray(times)
timesu = np.unique(atimes)
data = np.empty((len(fields), timesu.size, len(svu)))
data.fill(np.nan)
for j, sv in enumerate(svu): # for each SV, across all values and times...
svi = [i for i, s in enumerate(svs) if s == sv] # these rows are for this SV
tu = np.unique(atimes[svi]) # this SV was seen at these times
if tu.size != atimes[svi].size:
logging.warning(f'duplicate times detected, skipping SV {sv}')
continue
for i in svi:
it = np.nonzero(timesu == times[i])[0][0] # int by defn
"""
some files sometimes drop the last measurement, this fixes that.
It assumes the blank is always in the last measurement for now.
"""
dvec = [float(raws[i][k*Lf:(k+1)*Lf]) for k in range(min(len(fields), len(raws[i])//Lf))]
data[:len(dvec), it, j] = dvec
# %% assemble output
# NOTE: time must be datetime64[ns] or .to_netcdf will fail
nav = xarray.Dataset(coords={'time': timesu.astype('datetime64[ns]'), 'sv': svu})
for i, k in enumerate(fields):
if k is None:
continue
nav[k] = (('time', 'sv'), data[i, :, :])
# GLONASS uses kilometers to report its ephemeris.
# Convert to meters here to be consistent with NAV3 implementation.
if svtype == 'R':
for name in ['X', 'Y', 'Z', 'dX', 'dY', 'dZ', 'dX2', 'dY2', 'dZ2']:
nav[name] *= 1e3
# %% other attributes
nav.attrs['version'] = header['version']
nav.attrs['svtype'] = [svtype] # Use list for consistency with NAV3.
nav.attrs['rinextype'] = 'nav'
if isinstance(fn, Path):
nav.attrs['filename'] = fn.name
if 'ION ALPHA' in header and 'ION BETA' in header:
alpha = header['ION ALPHA']
alpha = [rinex_string_to_float(alpha[2 + i*12:2 + (i+1)*12])
for i in range(4)]
beta = header['ION BETA']
beta = [rinex_string_to_float(beta[2 + i*12:2 + (i+1)*12])
for i in range(4)]
nav.attrs['ionospheric_corr_GPS'] = np.hstack((alpha, beta))
return nav
def navheader2(f: TextIO) -> Dict[str, Any]:
"""
For RINEX NAV version 2 only. End users should use rinexheader()
"""
if isinstance(f, (str, Path)):
with opener(f, header=True) as h:
return navheader2(h)
hdr = rinexinfo(f)
for ln in f:
if 'END OF HEADER' in ln:
break
kind, content = ln[60:].strip(), ln[:60]
hdr[kind] = content
return hdr
def _timenav(ln: str) -> datetime:
year = int(ln[3:5])
if 80 <= year <= 99:
year += 1900
elif year < 80: # because we might pass in four-digit year
year += 2000
else:
raise ValueError(f'unknown year format {year}')
return datetime(year=year,
month=int(ln[6:8]),
day=int(ln[9:11]),
hour=int(ln[12:14]),
minute=int(ln[15:17]),
second=int(float(ln[17:20])),
microsecond=int(float(ln[17:22]) % 1 * 1000000)
)
def _skip(f: TextIO, Nl: int):
for _, _ in zip(range(Nl), f):
pass
def navtime2(fn: Union[TextIO, Path]) -> np.ndarray:
"""
read all times in RINEX 2 NAV file
"""
times = []
with opener(fn) as f:
hdr = navheader2(f)
while True:
ln = f.readline()
if not ln:
break
try:
time = _timenav(ln)
except ValueError:
continue
times.append(time)
_skip(f, Nl[hdr['systems']])
return np.unique(times)
| <filename>georinex/nav2.py
#!/usr/bin/env python
from pathlib import Path
from datetime import datetime
from typing import Dict, Union, Any, Sequence
from typing.io import TextIO
import xarray
import numpy as np
import logging
from .rio import opener, rinexinfo
from .common import rinex_string_to_float
#
STARTCOL2 = 3 # column where numerical data starts for RINEX 2
Nl = {'G': 7, 'R': 3, 'E': 7} # number of additional SV lines
def rinexnav2(fn: Union[TextIO, str, Path],
tlim: Sequence[datetime] = None) -> xarray.Dataset:
"""
Reads RINEX 2.x NAV files
<NAME>, Ph.D.
SciVision, Inc.
http://gage14.upc.es/gLAB/HTML/GPS_Navigation_Rinex_v2.11.html
ftp://igs.org/pub/data/format/rinex211.txt
"""
if isinstance(fn, (str, Path)):
fn = Path(fn).expanduser()
Lf = 19 # string length per field
svs = []
times = []
raws = []
with opener(fn) as f:
header = navheader2(f)
if header['filetype'] == 'N':
svtype = 'G'
fields = ['SVclockBias', 'SVclockDrift', 'SVclockDriftRate',
'IODE', 'Crs', 'DeltaN', 'M0',
'Cuc', 'Eccentricity', 'Cus', 'sqrtA',
'Toe', 'Cic', 'Omega0', 'Cis',
'Io', 'Crc', 'omega', 'OmegaDot',
'IDOT', 'CodesL2', 'GPSWeek', 'L2Pflag',
'SVacc', 'health', 'TGD', 'IODC',
'TransTime', 'FitIntvl']
elif header['filetype'] == 'G':
svtype = 'R' # GLONASS
fields = ['SVclockBias', 'SVrelFreqBias', 'MessageFrameTime',
'X', 'dX', 'dX2', 'health',
'Y', 'dY', 'dY2', 'FreqNum',
'Z', 'dZ', 'dZ2', 'AgeOpInfo']
elif header['filetype'] == 'E':
svtype = 'E' # Galileo
fields = ['SVclockBias', 'SVclockDrift', 'SVclockDriftRate',
'IODnav', 'Crs', 'DeltaN', 'M0',
'Cuc', 'Eccentricity', 'Cus', 'sqrtA',
'Toe', 'Cic', 'Omega0', 'Cis',
'Io', 'Crc', 'omega', 'OmegaDot',
'IDOT', 'DataSrc', 'GALWeek',
'SISA', 'health', 'BGDe5a', 'BGDe5b',
'TransTime']
else:
raise NotImplementedError(f'I do not yet handle Rinex 2 NAV {header["sys"]} {fn}')
# %% read data
for ln in f:
try:
time = _timenav(ln)
except ValueError:
continue
if tlim is not None:
if time < tlim[0]:
_skip(f, Nl[header['systems']])
continue
elif time > tlim[1]:
break
# %% format I2 http://gage.upc.edu/sites/default/files/gLAB/HTML/GPS_Navigation_Rinex_v2.11.html
svs.append(f'{svtype}{ln[:2]}')
times.append(time)
"""
now get the data as one big long string per SV
"""
raw = ln[22:79] # NOTE: MUST be 79, not 80 due to some files that put \n a character early!
for _ in range(Nl[header['systems']]):
raw += f.readline()[STARTCOL2:79]
# one line per SV
# NOTE: Sebastijan added .replace(' ', ' ').replace(' -', '-')
# here, I would like to see a file that needs this first, to be sure
# I'm not needlessly slowing down reading or creating new problems.
raws.append(raw.replace('D', 'E').replace('\n', ''))
# %% parse
svs = [s.replace(' ', '0') for s in svs]
svu = sorted(set(svs))
atimes = np.asarray(times)
timesu = np.unique(atimes)
data = np.empty((len(fields), timesu.size, len(svu)))
data.fill(np.nan)
for j, sv in enumerate(svu): # for each SV, across all values and times...
svi = [i for i, s in enumerate(svs) if s == sv] # these rows are for this SV
tu = np.unique(atimes[svi]) # this SV was seen at these times
if tu.size != atimes[svi].size:
logging.warning(f'duplicate times detected, skipping SV {sv}')
continue
for i in svi:
it = np.nonzero(timesu == times[i])[0][0] # int by defn
"""
some files sometimes drop the last measurement, this fixes that.
It assumes the blank is always in the last measurement for now.
"""
dvec = [float(raws[i][k*Lf:(k+1)*Lf]) for k in range(min(len(fields), len(raws[i])//Lf))]
data[:len(dvec), it, j] = dvec
# %% assemble output
# NOTE: time must be datetime64[ns] or .to_netcdf will fail
nav = xarray.Dataset(coords={'time': timesu.astype('datetime64[ns]'), 'sv': svu})
for i, k in enumerate(fields):
if k is None:
continue
nav[k] = (('time', 'sv'), data[i, :, :])
# GLONASS uses kilometers to report its ephemeris.
# Convert to meters here to be consistent with NAV3 implementation.
if svtype == 'R':
for name in ['X', 'Y', 'Z', 'dX', 'dY', 'dZ', 'dX2', 'dY2', 'dZ2']:
nav[name] *= 1e3
# %% other attributes
nav.attrs['version'] = header['version']
nav.attrs['svtype'] = [svtype] # Use list for consistency with NAV3.
nav.attrs['rinextype'] = 'nav'
if isinstance(fn, Path):
nav.attrs['filename'] = fn.name
if 'ION ALPHA' in header and 'ION BETA' in header:
alpha = header['ION ALPHA']
alpha = [rinex_string_to_float(alpha[2 + i*12:2 + (i+1)*12])
for i in range(4)]
beta = header['ION BETA']
beta = [rinex_string_to_float(beta[2 + i*12:2 + (i+1)*12])
for i in range(4)]
nav.attrs['ionospheric_corr_GPS'] = np.hstack((alpha, beta))
return nav
def navheader2(f: TextIO) -> Dict[str, Any]:
"""
For RINEX NAV version 2 only. End users should use rinexheader()
"""
if isinstance(f, (str, Path)):
with opener(f, header=True) as h:
return navheader2(h)
hdr = rinexinfo(f)
for ln in f:
if 'END OF HEADER' in ln:
break
kind, content = ln[60:].strip(), ln[:60]
hdr[kind] = content
return hdr
def _timenav(ln: str) -> datetime:
year = int(ln[3:5])
if 80 <= year <= 99:
year += 1900
elif year < 80: # because we might pass in four-digit year
year += 2000
else:
raise ValueError(f'unknown year format {year}')
return datetime(year=year,
month=int(ln[6:8]),
day=int(ln[9:11]),
hour=int(ln[12:14]),
minute=int(ln[15:17]),
second=int(float(ln[17:20])),
microsecond=int(float(ln[17:22]) % 1 * 1000000)
)
def _skip(f: TextIO, Nl: int):
for _, _ in zip(range(Nl), f):
pass
def navtime2(fn: Union[TextIO, Path]) -> np.ndarray:
"""
read all times in RINEX 2 NAV file
"""
times = []
with opener(fn) as f:
hdr = navheader2(f)
while True:
ln = f.readline()
if not ln:
break
try:
time = _timenav(ln)
except ValueError:
continue
times.append(time)
_skip(f, Nl[hdr['systems']])
return np.unique(times)
| en | 0.793765 | #!/usr/bin/env python # # column where numerical data starts for RINEX 2 # number of additional SV lines Reads RINEX 2.x NAV files <NAME>, Ph.D. SciVision, Inc. http://gage14.upc.es/gLAB/HTML/GPS_Navigation_Rinex_v2.11.html ftp://igs.org/pub/data/format/rinex211.txt # string length per field # GLONASS # Galileo # %% read data # %% format I2 http://gage.upc.edu/sites/default/files/gLAB/HTML/GPS_Navigation_Rinex_v2.11.html now get the data as one big long string per SV # NOTE: MUST be 79, not 80 due to some files that put \n a character early! # one line per SV # NOTE: Sebastijan added .replace(' ', ' ').replace(' -', '-') # here, I would like to see a file that needs this first, to be sure # I'm not needlessly slowing down reading or creating new problems. # %% parse # for each SV, across all values and times... # these rows are for this SV # this SV was seen at these times # int by defn some files sometimes drop the last measurement, this fixes that. It assumes the blank is always in the last measurement for now. # %% assemble output # NOTE: time must be datetime64[ns] or .to_netcdf will fail # GLONASS uses kilometers to report its ephemeris. # Convert to meters here to be consistent with NAV3 implementation. # %% other attributes # Use list for consistency with NAV3. For RINEX NAV version 2 only. End users should use rinexheader() # because we might pass in four-digit year read all times in RINEX 2 NAV file | 2.584472 | 3 |
Cogs/settings.py | bobo3769/VirusTotalDiscordBot | 5 | 6633295 | <reponame>bobo3769/VirusTotalDiscordBot
import random
from discord import Colour
"""
These are some presets configs, that are predefined
and normally dont need any changes (Thats why they are not in the config file
"""
bottest = True # decides if the bot checks other bots messages
ignorfiles = ['image/gif', 'image/jpeg'] # Content types to ignor. Check out https://en.wikipedia.org/wiki/Media_type
checkorange = 1 # if more or equal than that checks are positive the embed will be orange
checkred = 3 # if more or equal than that checks are positive the embed will be red
helpembedcolour = Colour(random.randint(0, 16777215))
| import random
from discord import Colour
"""
These are some presets configs, that are predefined
and normally dont need any changes (Thats why they are not in the config file
"""
bottest = True # decides if the bot checks other bots messages
ignorfiles = ['image/gif', 'image/jpeg'] # Content types to ignor. Check out https://en.wikipedia.org/wiki/Media_type
checkorange = 1 # if more or equal than that checks are positive the embed will be orange
checkred = 3 # if more or equal than that checks are positive the embed will be red
helpembedcolour = Colour(random.randint(0, 16777215)) | en | 0.841309 | These are some presets configs, that are predefined and normally dont need any changes (Thats why they are not in the config file # decides if the bot checks other bots messages # Content types to ignor. Check out https://en.wikipedia.org/wiki/Media_type # if more or equal than that checks are positive the embed will be orange # if more or equal than that checks are positive the embed will be red | 2.507378 | 3 |
tests/test_games/test_core/test_deck.py | joedaws/card-player | 0 | 6633296 | import pytest
from cartomancy.games.core.deck import Deck
@pytest.fixture
def deck():
return Deck()
def test_deck(deck):
assert hasattr(deck, 'draw')
assert hasattr(deck, 'shuffle')
assert hasattr(deck, '__len__')
assert hasattr(deck, '__str__')
| import pytest
from cartomancy.games.core.deck import Deck
@pytest.fixture
def deck():
return Deck()
def test_deck(deck):
assert hasattr(deck, 'draw')
assert hasattr(deck, 'shuffle')
assert hasattr(deck, '__len__')
assert hasattr(deck, '__str__')
| none | 1 | 2.655797 | 3 |
|
cv/apps.py | mikebader/django-cv | 3 | 6633297 | <reponame>mikebader/django-cv<gh_stars>1-10
from django.apps import AppConfig
import cv
class CvConfig(AppConfig):
name = 'cv'
verbose_name = 'CV'
def ready(self):
import cv.signals
| from django.apps import AppConfig
import cv
class CvConfig(AppConfig):
name = 'cv'
verbose_name = 'CV'
def ready(self):
import cv.signals | none | 1 | 1.472975 | 1 |
|
BNB/tests/test_BNB0.py | celioggr/erc20-pbt | 5 | 6633298 | import pytest
from erc20_pbt import StateMachine
@pytest.fixture()
def contract2test(BNB):
yield BNB
class BNB(StateMachine):
def __init__(self, accounts, contract2test):
contract = contract2test.deploy(
1000, "BNB", 18, "BNB", {"from": accounts[0]}
)
StateMachine.__init__(self, accounts, contract, 1000)
"""Overwrite state machine brownie.reverts() to search for a revert comment string"""
def rule_transfer(self, st_sender, st_receiver, st_amount):
if self.DEBUG:
print(
"transfer({}, {}, {})".format(st_sender, st_receiver, st_amount)
)
if st_amount <= self.balances[st_sender]:
tx = self.contract.transfer(
st_receiver, st_amount, {"from": st_sender}
)
self.verifyTransfer(st_sender, st_receiver, st_amount)
self.verifyEvent(
tx,
"Transfer",
{"from": st_sender, "to": st_receiver, "value": st_amount},
)
else:
with brownie.reverts("revert"):
self.contract.transfer(
st_receiver, st_amount, {"from": st_sender}
)
"""Overwrite state machine brownie.reverts() to search for a revert comment string"""
def rule_transferFrom(self, st_spender, st_owner, st_receiver, st_amount):
if self.DEBUG:
print(
"transferFrom({}, {}, {}, [from: {}])".format(
st_owner, st_receiver, st_amount, st_spender
)
)
if st_amount == 0 or (
(st_owner, st_spender) in self.allowances.keys()
and self.balances[st_owner] >= st_amount
and self.allowances[(st_owner, st_spender)] >= st_amount
):
tx = self.contract.transferFrom(
st_owner, st_receiver, st_amount, {"from": st_spender}
)
self.verifyTransfer(st_owner, st_receiver, st_amount)
self.verifyAllowance(st_owner, st_spender, -st_amount)
self.verifyEvent(
tx,
"Transfer",
{"from": st_owner, "to": st_receiver, "value": st_amount},
)
else:
with brownie.reverts("revert"):
tx = self.contract.transferFrom(
st_owner, st_receiver, st_amount, {"from": st_spender}
)
def test_stateful(contract2test, accounts, state_machine):
state_machine(BNB, accounts, contract2test)
| import pytest
from erc20_pbt import StateMachine
@pytest.fixture()
def contract2test(BNB):
yield BNB
class BNB(StateMachine):
def __init__(self, accounts, contract2test):
contract = contract2test.deploy(
1000, "BNB", 18, "BNB", {"from": accounts[0]}
)
StateMachine.__init__(self, accounts, contract, 1000)
"""Overwrite state machine brownie.reverts() to search for a revert comment string"""
def rule_transfer(self, st_sender, st_receiver, st_amount):
if self.DEBUG:
print(
"transfer({}, {}, {})".format(st_sender, st_receiver, st_amount)
)
if st_amount <= self.balances[st_sender]:
tx = self.contract.transfer(
st_receiver, st_amount, {"from": st_sender}
)
self.verifyTransfer(st_sender, st_receiver, st_amount)
self.verifyEvent(
tx,
"Transfer",
{"from": st_sender, "to": st_receiver, "value": st_amount},
)
else:
with brownie.reverts("revert"):
self.contract.transfer(
st_receiver, st_amount, {"from": st_sender}
)
"""Overwrite state machine brownie.reverts() to search for a revert comment string"""
def rule_transferFrom(self, st_spender, st_owner, st_receiver, st_amount):
if self.DEBUG:
print(
"transferFrom({}, {}, {}, [from: {}])".format(
st_owner, st_receiver, st_amount, st_spender
)
)
if st_amount == 0 or (
(st_owner, st_spender) in self.allowances.keys()
and self.balances[st_owner] >= st_amount
and self.allowances[(st_owner, st_spender)] >= st_amount
):
tx = self.contract.transferFrom(
st_owner, st_receiver, st_amount, {"from": st_spender}
)
self.verifyTransfer(st_owner, st_receiver, st_amount)
self.verifyAllowance(st_owner, st_spender, -st_amount)
self.verifyEvent(
tx,
"Transfer",
{"from": st_owner, "to": st_receiver, "value": st_amount},
)
else:
with brownie.reverts("revert"):
tx = self.contract.transferFrom(
st_owner, st_receiver, st_amount, {"from": st_spender}
)
def test_stateful(contract2test, accounts, state_machine):
state_machine(BNB, accounts, contract2test)
| en | 0.385355 | Overwrite state machine brownie.reverts() to search for a revert comment string Overwrite state machine brownie.reverts() to search for a revert comment string | 2.238061 | 2 |
apex/transformer/utils.py | neon-wild/apex | 0 | 6633299 | """Utility functions used by both `pipeline_parallel` and `tensor_parallel`"""
import torch
from apex.transformer import parallel_state
def ensure_divisibility(numerator, denominator):
"""Ensure that numerator is divisible by the denominator."""
assert numerator % denominator == 0, "{} is not divisible by {}".format(numerator, denominator)
def divide(numerator, denominator):
"""Ensure that numerator is divisible by the denominator and return
the division value."""
ensure_divisibility(numerator, denominator)
return numerator // denominator
def split_tensor_into_1d_equal_chunks(tensor):
"""Break a tensor into equal 1D chunks."""
data = tensor.view(-1)
partition_size = torch.numel(data) // parallel_state.get_tensor_model_parallel_world_size()
start_index = partition_size * parallel_state.get_tensor_model_parallel_rank()
end_index = start_index + partition_size
return data[start_index:end_index]
def gather_split_1d_tensor(tensor):
"""Opposite of above function, gather values from model parallel ranks."""
world_size = parallel_state.get_tensor_model_parallel_world_size()
numel = torch.numel(tensor)
numel_gathered = world_size * numel
gathered = torch.empty(numel_gathered, dtype=tensor.dtype, device=torch.cuda.current_device(), requires_grad=False)
chunks = [gathered[i * numel:(i + 1) * numel] for i in range(world_size)]
torch.distributed.all_gather(chunks, tensor, group=parallel_state.get_tensor_model_parallel_group())
return gathered
# TODO(mkozuki): Rewrite this using `logging`.
def rank_print(msg):
"""Print the given msg with rank information"""
print(
f"tensor rank: {parallel_state.get_tensor_model_parallel_rank()}"
f"pipeline rank: {parallel_state.get_pipeline_model_parallel_rank()}, "
f"virtual pipeline rank: {parallel_state.get_virtual_pipeline_model_parallel_rank()}, "
f"data rank: {parallel_state.get_data_parallel_rank()} | {msg}"
)
| """Utility functions used by both `pipeline_parallel` and `tensor_parallel`"""
import torch
from apex.transformer import parallel_state
def ensure_divisibility(numerator, denominator):
"""Ensure that numerator is divisible by the denominator."""
assert numerator % denominator == 0, "{} is not divisible by {}".format(numerator, denominator)
def divide(numerator, denominator):
"""Ensure that numerator is divisible by the denominator and return
the division value."""
ensure_divisibility(numerator, denominator)
return numerator // denominator
def split_tensor_into_1d_equal_chunks(tensor):
"""Break a tensor into equal 1D chunks."""
data = tensor.view(-1)
partition_size = torch.numel(data) // parallel_state.get_tensor_model_parallel_world_size()
start_index = partition_size * parallel_state.get_tensor_model_parallel_rank()
end_index = start_index + partition_size
return data[start_index:end_index]
def gather_split_1d_tensor(tensor):
"""Opposite of above function, gather values from model parallel ranks."""
world_size = parallel_state.get_tensor_model_parallel_world_size()
numel = torch.numel(tensor)
numel_gathered = world_size * numel
gathered = torch.empty(numel_gathered, dtype=tensor.dtype, device=torch.cuda.current_device(), requires_grad=False)
chunks = [gathered[i * numel:(i + 1) * numel] for i in range(world_size)]
torch.distributed.all_gather(chunks, tensor, group=parallel_state.get_tensor_model_parallel_group())
return gathered
# TODO(mkozuki): Rewrite this using `logging`.
def rank_print(msg):
"""Print the given msg with rank information"""
print(
f"tensor rank: {parallel_state.get_tensor_model_parallel_rank()}"
f"pipeline rank: {parallel_state.get_pipeline_model_parallel_rank()}, "
f"virtual pipeline rank: {parallel_state.get_virtual_pipeline_model_parallel_rank()}, "
f"data rank: {parallel_state.get_data_parallel_rank()} | {msg}"
)
| en | 0.759719 | Utility functions used by both `pipeline_parallel` and `tensor_parallel` Ensure that numerator is divisible by the denominator. Ensure that numerator is divisible by the denominator and return the division value. Break a tensor into equal 1D chunks. Opposite of above function, gather values from model parallel ranks. # TODO(mkozuki): Rewrite this using `logging`. Print the given msg with rank information | 2.685267 | 3 |
Test Code/speak_reg/one_user_rec.py | joexu01/speak_auth | 0 | 6633300 | <gh_stars>0
from pyAudioAnalysis import audioBasicIO
from pyAudioAnalysis import audioFeatureExtraction
import matplotlib.pyplot as plt
import numpy as np
from sklearn import mixture
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.mixture import GaussianMixture
import os
RATE = float(0.75)
# 提取特征
my_file = ['1.wav', '2.wav', '3.wav', '4.wav', '5.wav', '6.wav', '7.wav', '8.wav']
person = '12'
data_matrix = []
label_matrix = []
for file in my_file:
[Fs, x] = audioBasicIO.readAudioFile("D:/ML/speak_reg/spk_rec_data/train/" + person + '/' + file)
F = audioFeatureExtraction.stFeatureExtraction_modified(x, Fs, 0.050 * Fs, 0.025 * Fs)
f = F[8:21, ]
f = f.T
data_matrix.append(f)
label = np.empty(f.shape[0], dtype=int)
label = np.full(label.shape, int(person))
label_matrix.append(label)
data_matrix = np.concatenate(data_matrix, 0)
label_matrix = np.concatenate(label_matrix, 0)
print(data_matrix.shape)
print(label_matrix.shape)
# clf_svm = svm.SVC(gamma='scale', decision_function_shape='ovo')
# clf_svm.fit(data_matrix, label_matrix)
gmm = GaussianMixture(n_components=1, covariance_type='full')
gmm.fit(data_matrix, label_matrix)
def max_list(lt):
temp = 0
for i in lt:
if lt.count(i) > temp:
max_str = i
temp = lt.count(i)
return str(max_str)
# lt->list, lb->label
def calculate_rate(lt, total, lb):
counter = 0
for item in lt:
if item == lb:
counter += 1
return float(counter / total)
# 预测
pre_dir = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19',
'20', '21', '22']
pre_file = ['1.wav', '2.wav']
# result_str = ''
for p_dir in pre_dir:
print(p_dir + ': '),
for p_file in pre_file:
[Fs, x] = audioBasicIO.readAudioFile("D:/ML/speak_reg/spk_rec_data/test/" + p_dir + '/' + p_file)
F = audioFeatureExtraction.stFeatureExtraction(x, Fs, 0.050 * Fs, 0.025 * Fs)
f = F[8:21, ].T
result = gmm.predict(f)
# if calculate_rate(result.tolist(), float(result.shape[0]), p_dir) >= RATE:
# print('Yes '),
# else:
# print('No'),
print(result) | from pyAudioAnalysis import audioBasicIO
from pyAudioAnalysis import audioFeatureExtraction
import matplotlib.pyplot as plt
import numpy as np
from sklearn import mixture
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.mixture import GaussianMixture
import os
RATE = float(0.75)
# 提取特征
my_file = ['1.wav', '2.wav', '3.wav', '4.wav', '5.wav', '6.wav', '7.wav', '8.wav']
person = '12'
data_matrix = []
label_matrix = []
for file in my_file:
[Fs, x] = audioBasicIO.readAudioFile("D:/ML/speak_reg/spk_rec_data/train/" + person + '/' + file)
F = audioFeatureExtraction.stFeatureExtraction_modified(x, Fs, 0.050 * Fs, 0.025 * Fs)
f = F[8:21, ]
f = f.T
data_matrix.append(f)
label = np.empty(f.shape[0], dtype=int)
label = np.full(label.shape, int(person))
label_matrix.append(label)
data_matrix = np.concatenate(data_matrix, 0)
label_matrix = np.concatenate(label_matrix, 0)
print(data_matrix.shape)
print(label_matrix.shape)
# clf_svm = svm.SVC(gamma='scale', decision_function_shape='ovo')
# clf_svm.fit(data_matrix, label_matrix)
gmm = GaussianMixture(n_components=1, covariance_type='full')
gmm.fit(data_matrix, label_matrix)
def max_list(lt):
temp = 0
for i in lt:
if lt.count(i) > temp:
max_str = i
temp = lt.count(i)
return str(max_str)
# lt->list, lb->label
def calculate_rate(lt, total, lb):
counter = 0
for item in lt:
if item == lb:
counter += 1
return float(counter / total)
# 预测
pre_dir = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19',
'20', '21', '22']
pre_file = ['1.wav', '2.wav']
# result_str = ''
for p_dir in pre_dir:
print(p_dir + ': '),
for p_file in pre_file:
[Fs, x] = audioBasicIO.readAudioFile("D:/ML/speak_reg/spk_rec_data/test/" + p_dir + '/' + p_file)
F = audioFeatureExtraction.stFeatureExtraction(x, Fs, 0.050 * Fs, 0.025 * Fs)
f = F[8:21, ].T
result = gmm.predict(f)
# if calculate_rate(result.tolist(), float(result.shape[0]), p_dir) >= RATE:
# print('Yes '),
# else:
# print('No'),
print(result) | en | 0.124609 | # 提取特征 # clf_svm = svm.SVC(gamma='scale', decision_function_shape='ovo') # clf_svm.fit(data_matrix, label_matrix) # lt->list, lb->label # 预测 # result_str = '' # if calculate_rate(result.tolist(), float(result.shape[0]), p_dir) >= RATE: # print('Yes '), # else: # print('No'), | 2.711553 | 3 |
simfempy/meshes/__init__.py | anairabeze/simfempy | 0 | 6633301 |
from . import simplexmesh, plotmesh, testmeshes
|
from . import simplexmesh, plotmesh, testmeshes
| none | 1 | 0.936074 | 1 |
|
source/texture.py | matheusmmoliveira/BrickBreaker | 0 | 6633302 | <gh_stars>0
import pygame
from settings import *
import os
class BlockTextureMapper:
def __init__(self):
blk_dir = os.path.join(BASE_DIR, 'assets', 'imgs', 'blocks')
self.block_textures = {file.replace('.png', ''): pygame.image.load(os.path.join(blk_dir, file)).convert_alpha()
for file in os.listdir(blk_dir)}
def get_block_texture(self, color, size):
return pygame.transform.scale(self.block_textures[color], size)
| import pygame
from settings import *
import os
class BlockTextureMapper:
def __init__(self):
blk_dir = os.path.join(BASE_DIR, 'assets', 'imgs', 'blocks')
self.block_textures = {file.replace('.png', ''): pygame.image.load(os.path.join(blk_dir, file)).convert_alpha()
for file in os.listdir(blk_dir)}
def get_block_texture(self, color, size):
return pygame.transform.scale(self.block_textures[color], size) | none | 1 | 2.79528 | 3 |
|
brood_backend/__main__.py | jonadaly/brood-backend | 0 | 6633303 | import os
import waitress
from brood_backend.app import create_app
PORT = os.getenv("PORT", 8080)
if __name__ == "__main__":
app = create_app()
waitress.serve(app, host="0.0.0.0", port=PORT)
| import os
import waitress
from brood_backend.app import create_app
PORT = os.getenv("PORT", 8080)
if __name__ == "__main__":
app = create_app()
waitress.serve(app, host="0.0.0.0", port=PORT)
| none | 1 | 1.639164 | 2 |
|
aiida_quantumespresso/calculations/pw2wannier90.py | ltalirz/aiida-quantumespresso | 0 | 6633304 | <filename>aiida_quantumespresso/calculations/pw2wannier90.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from aiida.orm import RemoteData, FolderData, SinglefileData, Dict
from aiida_quantumespresso.calculations.namelists import NamelistsCalculation
class Pw2wannier90Calculation(NamelistsCalculation):
"""
pw2wannier90.x code of the Quantum ESPRESSO distribution, handles the
calculation of the Amn, Mmn, ... files to be used to compute
Wannier functions with the Wannier90 code.
For more information, refer to http://www.quantum-espresso.org/
and http://www.wannier.org/
"""
_default_namelists = ['INPUTPP']
_SEEDNAME = 'aiida'
_blocked_keywords = [
('INPUTPP', 'outdir', NamelistsCalculation._OUTPUT_SUBFOLDER),
('INPUTPP', 'prefix', NamelistsCalculation._PREFIX),
('INPUTPP', 'seedname', _SEEDNAME)
]
# By default we do not download anything else than aiida.out. One can add the files
# _SEEDNAME.amn/.nnm/.eig to inputs.settings['ADDITIONAL_RETRIEVE_LIST'] to retrieve them.
_internal_retrieve_list = []
_default_parser = 'quantumespresso.pw2wannier90'
@classmethod
def define(cls, spec):
super(Pw2wannier90Calculation, cls).define(spec)
spec.input('nnkp_file', valid_type=SinglefileData,
help='A SinglefileData containing the .nnkp file generated by wannier90.x -pp')
spec.input('parent_folder', valid_type=(RemoteData, FolderData),
help='The output folder of a pw.x calculation')
spec.output('output_parameters', valid_type=Dict)
spec.default_output_node = 'output_parameters'
spec.exit_code(
100, 'ERROR_NO_RETRIEVED_FOLDER', message='The retrieved folder data node could not be accessed.')
spec.exit_code(
110, 'ERROR_READING_OUTPUT_FILE', message='The output file could not be read from the retrieved folder.')
spec.exit_code(
130, 'ERROR_JOB_NOT_DONE', message='The computation did not finish properly (\'JOB DONE\' not found).')
spec.exit_code(
140, 'ERROR_GENERIC_QE_ERROR', message='QE printed an error message')
spec.exit_code(
150, 'ERROR_GENERIC_PARSING_FAILURE', message='An error happened while parsing the output file')
def prepare_for_submission(self, folder):
"""
Prepare the inputs of the calculation and the calcinfo data.
:param folder: an `aiida.common.folders.Folder` to temporarily write files on disk
:return: `aiida.common.datastructures.CalcInfo` instance
"""
# Run the global namelist logic
calcinfo = super(Pw2wannier90Calculation, self).prepare_for_submission(folder)
# Put the nnkp in the folder, with the correct filename
nnkp_file = self.inputs.nnkp_file
calcinfo.local_copy_list.append(
(nnkp_file.uuid, nnkp_file.filename, '{}.nnkp'.format(self._SEEDNAME))
)
return calcinfo
| <filename>aiida_quantumespresso/calculations/pw2wannier90.py
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from aiida.orm import RemoteData, FolderData, SinglefileData, Dict
from aiida_quantumespresso.calculations.namelists import NamelistsCalculation
class Pw2wannier90Calculation(NamelistsCalculation):
"""
pw2wannier90.x code of the Quantum ESPRESSO distribution, handles the
calculation of the Amn, Mmn, ... files to be used to compute
Wannier functions with the Wannier90 code.
For more information, refer to http://www.quantum-espresso.org/
and http://www.wannier.org/
"""
_default_namelists = ['INPUTPP']
_SEEDNAME = 'aiida'
_blocked_keywords = [
('INPUTPP', 'outdir', NamelistsCalculation._OUTPUT_SUBFOLDER),
('INPUTPP', 'prefix', NamelistsCalculation._PREFIX),
('INPUTPP', 'seedname', _SEEDNAME)
]
# By default we do not download anything else than aiida.out. One can add the files
# _SEEDNAME.amn/.nnm/.eig to inputs.settings['ADDITIONAL_RETRIEVE_LIST'] to retrieve them.
_internal_retrieve_list = []
_default_parser = 'quantumespresso.pw2wannier90'
@classmethod
def define(cls, spec):
super(Pw2wannier90Calculation, cls).define(spec)
spec.input('nnkp_file', valid_type=SinglefileData,
help='A SinglefileData containing the .nnkp file generated by wannier90.x -pp')
spec.input('parent_folder', valid_type=(RemoteData, FolderData),
help='The output folder of a pw.x calculation')
spec.output('output_parameters', valid_type=Dict)
spec.default_output_node = 'output_parameters'
spec.exit_code(
100, 'ERROR_NO_RETRIEVED_FOLDER', message='The retrieved folder data node could not be accessed.')
spec.exit_code(
110, 'ERROR_READING_OUTPUT_FILE', message='The output file could not be read from the retrieved folder.')
spec.exit_code(
130, 'ERROR_JOB_NOT_DONE', message='The computation did not finish properly (\'JOB DONE\' not found).')
spec.exit_code(
140, 'ERROR_GENERIC_QE_ERROR', message='QE printed an error message')
spec.exit_code(
150, 'ERROR_GENERIC_PARSING_FAILURE', message='An error happened while parsing the output file')
def prepare_for_submission(self, folder):
"""
Prepare the inputs of the calculation and the calcinfo data.
:param folder: an `aiida.common.folders.Folder` to temporarily write files on disk
:return: `aiida.common.datastructures.CalcInfo` instance
"""
# Run the global namelist logic
calcinfo = super(Pw2wannier90Calculation, self).prepare_for_submission(folder)
# Put the nnkp in the folder, with the correct filename
nnkp_file = self.inputs.nnkp_file
calcinfo.local_copy_list.append(
(nnkp_file.uuid, nnkp_file.filename, '{}.nnkp'.format(self._SEEDNAME))
)
return calcinfo
| en | 0.666956 | # -*- coding: utf-8 -*- pw2wannier90.x code of the Quantum ESPRESSO distribution, handles the calculation of the Amn, Mmn, ... files to be used to compute Wannier functions with the Wannier90 code. For more information, refer to http://www.quantum-espresso.org/ and http://www.wannier.org/ # By default we do not download anything else than aiida.out. One can add the files # _SEEDNAME.amn/.nnm/.eig to inputs.settings['ADDITIONAL_RETRIEVE_LIST'] to retrieve them. Prepare the inputs of the calculation and the calcinfo data. :param folder: an `aiida.common.folders.Folder` to temporarily write files on disk :return: `aiida.common.datastructures.CalcInfo` instance # Run the global namelist logic # Put the nnkp in the folder, with the correct filename | 2.210279 | 2 |
43. Multiply Strings.py | fossabot/leetcode-2 | 2 | 6633305 | <reponame>fossabot/leetcode-2
class Solution:
def multiply(self, num1, num2):
a=int(num1)
b=int(num2)
return str(a*b) | class Solution:
def multiply(self, num1, num2):
a=int(num1)
b=int(num2)
return str(a*b) | none | 1 | 3.344738 | 3 |
|
spiceup_labels/patch_calendar_tasks.py | nens/spiceup-labels | 0 | 6633306 | # -*- coding: utf-8 -*-
"""Configure labeltype model for the crop calendar tasks of the SpiceUp mobile app.
Used to calculate farm specific tasks from parcel location, plant age, local measurements and raster data.
Calendar tasks are generated with a Lizard labeltype. This labeltype generates crop calendar tasks per plot.
We save farm plots as parcels, which have a location and several initial parameters.
"""
import argparse
import logging
import numpy as np
import json
from copy import deepcopy
from dask_geomodeling.raster import *
from dask_geomodeling.geometry import *
from dask_geomodeling.geometry.base import SetSeriesBlock
from localsecret import username, password
from calendar_tasks_config import (
labeltype_uuid, # the uuid of the model
calendar_tasks, # crop calendar tasks as specified in online spreadsheet "Items & Properties on the App Ui/x"
lizard_rasters, # Raster data: season onset, fertilizer advice. dict with names and uuids of the rasters used
parcels, # Load parcels locally. Mimic Lizard data
parcels_labeled, # parcels with labels locally
labeled_parcels, # parcels with labels in lizard
lp_seriesblocks, # seriesblocks of labelparams per parcel
fertilizer_ids_dict, # Fertilizer conditions 1-12, based on age, variety and (live) support
)
from spiceup_labels.config_lizard import (
mimic_rasters,
raster_seriesblocks,
get_labeltype_source,
patch_labeltype,
)
logger = logging.getLogger(__name__)
# ----------------------------------------------------------
# preprocess calendar based tasks (filtering)
def get_calendar_tasks_labels(calendar_tasks):
"""convert calendar tasks df (group by by month, season and fertilizer) to
calendar_tasks_labels df and next_calendar_tasks df"""
calendar_tasks_one = calendar_tasks[
calendar_tasks["id_season"] == 0
] # filter tasks to make the tasks independent of local conditions
calendar_tasks_one = calendar_tasks_one[
(calendar_tasks_one["fertilizer_data_id"] % 4 == 1)
| (calendar_tasks_one["fertilizer_data_id"] == 0)
]
calendar_tasks_next = (
calendar_tasks_one.groupby(
["month"]
) # group by month to list tasks by plant age
.agg(
{
"task": " | ".join, # if multiple tasks at the same time, use pipe separator ' | '
"task_IND": " | ".join,
"month": "first", # select first (or last) match with string (comes with pandas)
"id_days_start": "first",
}
)
.shift(-1)
.reset_index(drop=True)
) # assign next task(s) to current row
calendar_tasks_join = calendar_tasks.join(
calendar_tasks_next, on="month", rsuffix="_next"
)
calendar_tasks_labels = calendar_tasks_join.iloc[:, np.r_[0:19, 24, -4:0]]
calendar_tasks_labels = calendar_tasks_labels.sort_values(
by=["task_id", "month"]
) # sort by task_id, month
calendar_tasks_next.drop(calendar_tasks_next.tail(1).index, inplace=True)
return calendar_tasks_labels, calendar_tasks_next
def months_n_days(calendar_tasks):
"""List months and convert to days"""
calendar_tasks_monthly = calendar_tasks.drop_duplicates(subset=["month"])
months = list(calendar_tasks_monthly.month)
months.append(months[-1] + 1)
days_months = [round(month * 365.25 / 12 + 0.01) for month in months]
days_months_1521 = deepcopy(days_months)
days_months_1521.append(days_months[-1] + 30)
# List ages that are ideal in (normal, early, late) rainy or dry season.
ideal_conditions = sorted(
list(set([c.split(",")[0] for c in list(calendar_tasks_monthly["ideal_note"])]))
)
months_ideal = {}
for c in ideal_conditions:
c_months = calendar_tasks_monthly.id_month.where(
calendar_tasks_monthly.ideal_note.str.endswith(c, False),
other=calendar_tasks_monthly.id_month + 1000,
).to_list()
c_label = f"months_ideal_{c.lower().replace(' ', '_')}"
months_ideal[c_label] = c_months
# add 7 to all months so they become positive
calendar_tasks_plant_months = [month + 7 for month in months]
return months_ideal, days_months, days_months_1521, calendar_tasks_plant_months
# ----------------------------------------------------------
def actual_plant_age(
days_since_epoch_raster_sb,
days_plant_age_sb,
days_months,
days_months_1521,
calendar_tasks_plant_months,
):
"""calculate actual plant age per plot from epoch raster and plant age labelparameter"""
doy_now_sb = Round(Modulo(days_since_epoch_raster_sb, 365.25))
days_until_jan_1_sb = doy_now_sb * -1 + 365.25
days_since_epoch_plant_sb = days_since_epoch_sb
days_since_planting_sb = days_since_epoch_raster_sb - days_since_epoch_plant_sb
plant_age_start_sb = days_plant_age_sb
plant_age_sb = days_since_planting_sb + plant_age_start_sb
# plant_month_sb = plant_age_sb / 30.4375
calendar_tasks_plant_month_sb = Classify(
plant_age_sb, days_months_1521, calendar_tasks_plant_months, False
)
calendar_tasks_plant_year__01_1__123_2__345_3_sb = Classify(
plant_age_sb, [365, 1095], [1, 2, 3], False
)
age_01 = calendar_tasks_plant_year__01_1__123_2__345_3_sb == 1
age_13 = calendar_tasks_plant_year__01_1__123_2__345_3_sb == 2
age_3p = calendar_tasks_plant_year__01_1__123_2__345_3_sb == 3
# calendar_tasks_plant_day_min_sb = Classify(
# calendar_tasks_plant_month_sb,
# calendar_tasks_plant_months[1:],
# days_months,
# False,
# )
calendar_tasks_plant_day_min_sb = Classify(
calendar_tasks_plant_month_sb,
calendar_tasks_plant_months,
days_months[:-1],
False,
)
shift_days = round((365.25 / 12) * 6 + 1) # 184
id_plant_age = plant_age_sb + shift_days
id_calendar_tasks_plant_day_min_sb = calendar_tasks_plant_day_min_sb + shift_days
calendar_tasks_plant_day_next_sb = Classify(
calendar_tasks_plant_month_sb,
calendar_tasks_plant_months,
days_months_1521,
False,
)
# id_calendar_tasks_plant_day_next_sb = calendar_tasks_plant_day_next_sb + shift_days
days_x_1000 = id_calendar_tasks_plant_day_min_sb * 1000
days_until_next_task = calendar_tasks_plant_day_next_sb - plant_age_sb
return (
days_until_jan_1_sb,
plant_age_sb,
calendar_tasks_plant_month_sb,
id_plant_age,
days_until_next_task,
days_x_1000,
age_01, age_13, age_3p
)
# ----------------------------------------------------------
def season_conditions(
days_until_jan_1_sb,
doy_start_dry_season_raster_sb,
doy_start_rainy_season_raster_sb,
):
"""Clasify season conditions for (early / late) rainy / dry seasons"""
# dry season
doy_start_dry_season_sb = Round(doy_start_dry_season_raster_sb)
pos_days_until_dry_season_sb = days_until_jan_1_sb + doy_start_dry_season_sb
days_until_dry_season_sb = Modulo(pos_days_until_dry_season_sb, 365.25)
days_since_start_dry_season_sb = Modulo(
(pos_days_until_dry_season_sb * -1 + 365.25), 365.25
)
# rainy season
doy_start_rainy_season_sb = Round(doy_start_rainy_season_raster_sb)
pos_days_until_rainy_season_sb = days_until_jan_1_sb + doy_start_rainy_season_sb
days_until_rainy_season_sb = Modulo(pos_days_until_rainy_season_sb, 365.25)
days_since_start_rainy_season_sb = Modulo(
(pos_days_until_rainy_season_sb * -1 + 365.25), 365.25
)
# plant condition given the current plant age and season progress
# dry season
dry_condition = Classify(
days_since_start_dry_season_sb, # below_0_ideal_2_above_4_sb
[0, 120, 242, 366],
[2, 4, 0],
False,
)
dry_early_condition = Classify(
days_since_start_dry_season_sb, # below_01_ideal_2_above_4_sb
[0, 14, 120, 176, 366],
[1, 2, 4, 0],
False,
)
dry_late_condition = Classify(
days_since_start_dry_season_sb, # below_02_ideal_3_above_4_sb
[0, 106, 120, 295, 366],
[2, 3, 4, 0],
False,
)
# rainy season
rainy_condition = Classify(
days_since_start_rainy_season_sb, # below_4_ideal_6_above_7_sb
[0, 120, 242, 366],
[6, 7, 4],
False,
)
rainy_early_condition = Classify(
days_since_start_rainy_season_sb, # below_4_ideal_5_above_6_sb
[0, 14, 190, 366],
[5, 6, 4],
False,
)
rainy_late_condition = Classify(
days_since_start_rainy_season_sb, # below_04_ideal_7_above_8_sb
[0, 106, 120, 295, 366],
[4, 7, 8, 0],
False,
)
return (
dry_condition,
dry_early_condition,
dry_late_condition,
rainy_condition,
rainy_early_condition,
rainy_late_condition,
)
def season_state(
calendar_tasks_plant_month_sb, calendar_tasks_plant_months, months_ideal
):
"""Classify season states # (prefer dry rainy over early/late states)"""
season_states = {}
for c_label, c_months in months_ideal.items():
season_c = c_label.replace("months_ideal_", "").replace("_season", "")
bool_str = f"{season_c}_bool"
season_states[bool_str] = (
Mask(
calendar_tasks_plant_month_sb,
Classify(
calendar_tasks_plant_month_sb,
calendar_tasks_plant_months,
c_months,
False,
)
< 100,
1,
)
< 1000
) * 1
return season_states
def ideal_season_state(season_states, conditions_season):
"""Classify ideal season states # (prefer dry rainy over early/late states)"""
(
dry_condition,
dry_early_condition,
dry_late_condition,
rainy_condition,
rainy_early_condition,
rainy_late_condition,
) = conditions_season
ideal_state_season = (
dry_bool * 2
+ dry_early_bool * 1
+ dry_late_bool * 3
+ rainy_bool * 6
+ rainy_early_bool * 5
+ rainy_late_bool * 7
)
state_season = (
dry_bool * dry_condition
+ dry_early_bool * dry_early_condition
+ dry_late_bool * dry_late_condition
+ rainy_bool * rainy_condition
+ rainy_early_bool * rainy_early_condition
+ rainy_late_bool * rainy_late_condition
)
# string representation
season_actual_classes = [0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5]
season_actual_strings = [
"Between rainy and dry ",
"Dry early",
"Dry season",
"Dry late",
"Between dry and rainy",
"Rainy early",
"Rainy season",
"Rainy late",
"Between rainy and dry",
]
str_season_actual = Classify(
state_season, season_actual_classes, season_actual_strings, False
)
# Compare season state with ideal state
state_equals_ideal_state_1_0 = (state_season == ideal_state_season) * 1
state_greater_ideal_state_2_0 = (state_season > ideal_state_season) * 2
state_season_below_0_ideal_1_above_2 = (
state_equals_ideal_state_1_0 + state_greater_ideal_state_2_0
)
str_season_state = Classify(
state_season_below_0_ideal_1_above_2,
[0, 1, 2, 3],
["Below ideal", "Ideal", "Above ideal"],
False,
)
season_below_0_ideal_100_above_200 = state_season_below_0_ideal_1_above_2 * 100
return season_below_0_ideal_100_above_200
# ----------------------------------------------------------
def get_task_ids(task_id_parts):
"""task id from plant age, season rasters and plot conditions"""
(
live_support_sb,
pepper_variety_sb,
season_below_0_ideal_100_above_200,
days_x_1000,
) = task_id_parts
live_support_1_2 = live_support_sb * 1
pepper_variety_10_20 = field_operations.Classify(
pepper_variety_sb, [6], [10, 20], False
)
identified_task = (
live_support_1_2
+ pepper_variety_10_20
+ season_below_0_ideal_100_above_200
+ days_x_1000
)
identified_task_1 = identified_task + 10000000
identified_task_2 = identified_task + 20000000
identified_task_3 = identified_task + 30000000
# identified_task_4 = identified_task + 40000000
return identified_task_1, identified_task_2, identified_task_3
def tasks_t1_t2_t3(calendar_tasks_labels):
"""create separate dataframes for tasks that occur on the same date.
Each dataframe has a maximum of 1 task per date"""
t1 = calendar_tasks_labels[
calendar_tasks_labels.task_id < 2 * 10 ** 7
] # 2*10**7 == 20000000
t1 = t1.add_prefix("task_1_")
t1.rename(columns={"task_1_task_id": "task_id"}, inplace=True)
t2 = calendar_tasks_labels[
(calendar_tasks_labels.task_id > 2 * 10 ** 7)
& (calendar_tasks_labels.task_id < 3 * 10 ** 7)
]
t2 = t2.add_prefix("task_2_")
t2.rename(columns={"task_2_task_id": "task_id"}, inplace=True)
t3 = calendar_tasks_labels[
(calendar_tasks_labels.task_id > 3 * 10 ** 7)
& (calendar_tasks_labels.task_id < 4 * 10 ** 7)
]
t3 = t3.add_prefix("task_3_")
t3.rename(columns={"task_3_task_id": "task_id"}, inplace=True)
# t4 = calendar_tasks_labels[(calendar_tasks_labels.task_id > 4*10**7) & (calendar_tasks_labels.task_id < 5*10**7)]
# t4 = t4.add_prefix('task_4_')
# t4.rename(columns={"task_4_task_id": "task_id"}, inplace=True)
return t1, t2, t3
def task_contents(task_dfs, t_identifiers):
"""Reclassify task IDs to task contents, loop through task dataframes &
Match possible tasks with identified task from farm conditions
"""
tasks_data = {}
for n, (df, t_identifier) in enumerate(zip(task_dfs, t_identifiers), 1):
t_ids = df.task_id.to_list()
t_valid = deepcopy(t_ids)
t_ids.append(t_ids[-1] + 100)
t_id_classified = Classify(t_identifier, t_ids, t_valid, False)
t_diff = Subtract(t_identifier, t_id_classified)
t_id_match = t_diff < 200
t_identifier_validated = t_id_classified * t_id_match
tasks_data[f"t{n}_id_classified"] = t_id_classified
tasks_data[f"t{n}_diff"] = t_diff
tasks_data[f"t{n}_id_validated"] = t_identifier_validated
for col in list(df.columns)[1:-9]:
df[col] = df["task_id"].astype(str) + "_" + df[col].astype(str)
t_col_list = df[col].to_list()
t_col_classify = Classify(t_identifier, t_ids, t_col_list, False)
t_col = Where(t_col_classify, t_id_match, None)
tasks_data[col] = t_col
return tasks_data
def next_task_contents(tasks_data, calendar_tasks_next, id_plant_age):
"""add next task once (it is already concatenated)"""
calendar_tasks_next.id_days_start = calendar_tasks_next.id_days_start.astype(
"int32"
)
bins_start_ids_next_task = calendar_tasks_next.id_days_start.to_list()
start_ids_next_task = deepcopy(bins_start_ids_next_task)
bins_start_ids_next_task.insert(0, 0)
start_id_next_task_classified = Classify(
id_plant_age, bins_start_ids_next_task, start_ids_next_task
)
next_task_match = (id_plant_age - start_id_next_task_classified) < 1
start_id_next_task_validated = start_id_next_task_classified * next_task_match
tasks_data["next_id"] = start_id_next_task_validated
for col in list(calendar_tasks_next.columns)[:2]:
calendar_tasks_next[col] = (
calendar_tasks_next["id_days_start"].astype(str)
+ "_"
+ calendar_tasks_next[col].astype(str)
)
col_list = calendar_tasks_next[col].to_list()
col_classify = Classify(id_plant_age, bins_start_ids_next_task, col_list, False)
calendar_tasks_next_col = Where(col_classify, next_task_match, None)
tasks_data[f"next_{col}"] = calendar_tasks_next_col
return tasks_data
# ----------------------------------------------------------
def fertilizer_condition(
fertilizer_ids, calendar_tasks_labels, identified_task_1
):
"""Fertilizer conditions binned. Check per NPK advice if it is valid (task fertilizer class Equal class).
and sum the advices (if not valid, they become 0 and will be omitted)
classes are 1-12, based on age, variety and (live) support"""
fertilizer_df = calendar_tasks_labels[["task_id", "fertilizer_data_id"]]
fertilizer_df = fertilizer_df.sort_values(by=["task_id"]) # sort by task_id
fertilizer_tasks = fertilizer_df.values.tolist()
f_bins, f_class_values = [0], []
n = 0
just_binned = False
prev_task_id = 1
# Defaults to True (the right side of the bin is closed so a value
# is assigned to the bin on the left if it is exactly on a bin edge).
for task_id, fertilizer_id in fertilizer_tasks:
n += 0.0001
if fertilizer_id > 0:
if not just_binned:
f_bins.append(prev_task_id)
f_class_values.append(n)
just_binned = True
f_bins.append(task_id)
f_class_values.append(fertilizer_id + n)
else:
f_bins.append(task_id)
f_class_values.append(fertilizer_id + n)
just_binned = True
else:
if just_binned:
f_bins.append(task_id)
f_class_values.append(n)
just_binned = False
prev_task_id = task_id
# Calculate N P K advice
fertilizer_task_id = Round(
Classify(identified_task_1, f_bins, f_class_values, True)
)
n_advice = 0
for c, npk in fertilizer_ids.items():
fertilizer_task_valid = fertilizer_task_id == c
n, p, k = npk
n_advice = eval(n) * fertilizer_task_valid + n_advice
return (n_advice > 0) * 1
def fertilizer_conditions_always(
fertilizer_ids_dict, live_support_sb, pepper_variety_sb, age_01, age_13, age_3p
):
"""Fertilizer conditions binned. Check per NPK advice if it is valid (task fertilizer class Equal class).
and sum the advices (if not valid, they become 0 and will be omitted)
classes are 1-12, based on age, variety and (live) support"""
live_support_1 = live_support_sb == 1
live_support_2 = live_support_sb == 2
pepper_variety_1 = pepper_variety_sb == 1
pepper_variety_2 = pepper_variety_sb == 2
f1 = age_01 * live_support_1 * pepper_variety_1 * 1
f2 = age_13 * live_support_1 * pepper_variety_1 * 2
f3 = age_3p * live_support_1 * pepper_variety_1 * 3
f4 = age_01 * live_support_2 * pepper_variety_1 * 4
f5 = age_13 * live_support_2 * pepper_variety_1 * 5
f6 = age_3p * live_support_2 * pepper_variety_1 * 6
f7 = age_01 * live_support_1 * pepper_variety_2 * 7
f8 = age_13 * live_support_1 * pepper_variety_2 * 8
f9 = age_3p * live_support_1 * pepper_variety_2 * 9
f10 = age_01 * live_support_2 * pepper_variety_2 * 10
f11 = age_13 * live_support_2 * pepper_variety_2 * 11
f12 = age_3p * live_support_2 * pepper_variety_2 * 12
f_number = f1 + f2 + f3 + f4 + f5 + f6 + f7 + f8 + f9 + f10 + f11 + f12
n_advice = 0
p_advice = 0
k_advice = 0
for c, npk in fertilizer_ids_dict.items():
fertilizer_task_valid = f_number == c
n, p, k = npk
n_advice = eval(n) * fertilizer_task_valid + n_advice
p_advice = eval(p) * fertilizer_task_valid + p_advice
k_advice = eval(k) * fertilizer_task_valid + k_advice
n_advice = Round(n_advice * 0.25 * 0.2) * 5 # Give quarterly instead of yearly advice (0.25)
p_advice = Round(p_advice * 0.25 * 0.2) * 5 # & round by 5 grams as advised by IPB (0.2 & 5)
k_advice = Round(k_advice * 0.25 * 0.2) * 5
return n_advice, p_advice, k_advice
def get_parser():
"""Return argument parser."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
dest="verbose",
default=False,
help="Verbose output",
)
return parser
def main(): # pragma: no cover
"""Call main command with args from parser.
This method is called when you run 'bin/run-spiceup-labels',
this is configured in 'setup.py'. Adjust when needed. You can have multiple
main scripts.
"""
options = get_parser().parse_args()
if options.verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
logging.basicConfig(level=log_level, format="%(levelname)s: %(message)s")
logging.info("load and reclass calendar tasks")
calendar_tasks_labels, calendar_tasks_next = get_calendar_tasks_labels(
calendar_tasks
)
(
months_ideal,
days_months,
days_months_1521,
calendar_tasks_plant_months,
) = months_n_days(calendar_tasks)
globals().update(months_ideal)
logging.info(
"load local raster, parcel and labelparameters data (mimics lizard data)"
)
dg_rasters, graph_rasters = mimic_rasters(lizard_rasters)
globals().update(dg_rasters)
sb_objects = raster_seriesblocks(dg_rasters, parcels)
globals().update(sb_objects)
globals().update(lp_seriesblocks)
logging.info("determine actual, local plant and season conditions")
actual_ages = actual_plant_age(
days_since_epoch_raster_sb,
days_plant_age_sb,
days_months,
days_months_1521,
calendar_tasks_plant_months,
)
(
days_until_jan_1_sb,
plant_age_sb,
calendar_tasks_plant_month_sb,
id_plant_age,
days_until_next_task,
days_x_1000,
age_01, age_13, age_3p
) = actual_ages
conditions_season = season_conditions(
days_until_jan_1_sb,
doy_start_dry_season_raster_sb,
doy_start_rainy_season_raster_sb,
)
season_states = season_state(
calendar_tasks_plant_month_sb, calendar_tasks_plant_months, months_ideal
)
globals().update(season_states)
season_below_0_ideal_100_above_200 = ideal_season_state(
season_states, conditions_season
)
logging.info(
"calculate task ids based on actual, local plant and season conditions"
)
task_id_parts = [
live_support_sb,
pepper_variety_sb,
season_below_0_ideal_100_above_200,
days_x_1000,
]
t_identifiers = get_task_ids(task_id_parts)
logging.info("get task content from calendar tasks df, aka tabel suci")
task_dfs = tasks_t1_t2_t3(calendar_tasks_labels)
tasks_data_tasks = task_contents(task_dfs, t_identifiers)
# logging.info(tasks_data_tasks)
logging.info("calculate next tasks content too")
tasks_data = next_task_contents(tasks_data_tasks, calendar_tasks_next, id_plant_age)
globals().update(tasks_data)
logging.info("calculate nutrient advices in the form of n, p and k grams per tree")
tx_input_0_or_1 = fertilizer_condition(
fertilizer_ids_dict, calendar_tasks_labels, t1_id_validated
)
n_advice, p_advice, k_advice = fertilizer_conditions_always(fertilizer_ids_dict, live_support_sb,
pepper_variety_sb, age_01, age_13, age_3p)
logging.info("Set result table with parcels, labelparameters and additional labels")
result_seriesblock = SetSeriesBlock(
parcels_labeled,
"label_value",
"label",
"----task_details_ID task (description_of_index)-----",
"task_nr_1_id_days_start_2345_pepper_variety_6_live_support_7_id_season_state_8",
"t1_task_id",
t1_id_validated,
"t1_task",
task_1_task,
"t1_task_IND",
task_1_task_IND,
"t1_recommendation",
task_1_recommendation,
"t1_recommendation_IND",
task_1_recommendation_IND,
"t1_GAP_info",
task_1_GAP_info,
"t1_GAP_info_IND",
task_1_GAP_info_IND,
"t1_GAP_chapter",
task_1_GAP_chapter,
"t1_image",
task_1_image,
"t1_image_url",
task_1_image_url,
"t1_input",
tx_input_0_or_1 * 1, # fertilizer advice yes 1 or no 0
"t2_task_id",
t2_id_validated,
"t2_task",
task_2_task,
"t2_task_IND",
task_2_task_IND,
"t2_recommendation",
task_2_recommendation,
"t2_recommendation_IND",
task_2_recommendation_IND,
"t2_GAP_info",
task_2_GAP_info,
"t2_GAP_info_IND",
task_2_GAP_info_IND,
"t2_GAP_chapter",
task_2_GAP_chapter,
"t2_image",
task_2_image,
"t2_image_url",
task_2_image_url,
"t2_input",
tx_input_0_or_1 * 2, # TODO insert logic for manure input
"t3_task_id",
t3_id_validated,
"t3_task",
task_3_task,
"t3_task_IND",
task_3_task_IND,
"t3_recommendation",
task_3_recommendation,
"t3_recommendation_IND",
task_3_recommendation_IND,
"t3_GAP_info",
task_3_GAP_info,
"t3_GAP_info_IND",
task_3_GAP_info_IND,
"t3_GAP_chapter",
task_3_GAP_chapter,
"t3_image",
task_3_image,
"t3_image_url",
task_3_image_url,
"t3_input",
0, # optional TODO, insert logic for other input
"_XN_",
n_advice,
"_XP_",
p_advice,
"_XK_",
k_advice,
"next_task_id",
next_id,
"next_task",
next_task,
"next_task_IND",
next_task_IND,
"days_until_next_task",
days_until_next_task,
)
logging.info("serialize model and replace local data with lizard data")
dg_source = get_labeltype_source(result_seriesblock, graph_rasters, labeled_parcels)
with open('calender_tasks.json', 'w+') as f:
json.dump(dg_source, f)
logging.info("Send to Lizard")
response = patch_labeltype(dg_source, username, password, labeltype_uuid)
logger.info("Labeltype update complete. Find response below")
logger.info(response.status_code)
logger.info(response.json())
return response.status_code
| # -*- coding: utf-8 -*-
"""Configure labeltype model for the crop calendar tasks of the SpiceUp mobile app.
Used to calculate farm specific tasks from parcel location, plant age, local measurements and raster data.
Calendar tasks are generated with a Lizard labeltype. This labeltype generates crop calendar tasks per plot.
We save farm plots as parcels, which have a location and several initial parameters.
"""
import argparse
import logging
import numpy as np
import json
from copy import deepcopy
from dask_geomodeling.raster import *
from dask_geomodeling.geometry import *
from dask_geomodeling.geometry.base import SetSeriesBlock
from localsecret import username, password
from calendar_tasks_config import (
labeltype_uuid, # the uuid of the model
calendar_tasks, # crop calendar tasks as specified in online spreadsheet "Items & Properties on the App Ui/x"
lizard_rasters, # Raster data: season onset, fertilizer advice. dict with names and uuids of the rasters used
parcels, # Load parcels locally. Mimic Lizard data
parcels_labeled, # parcels with labels locally
labeled_parcels, # parcels with labels in lizard
lp_seriesblocks, # seriesblocks of labelparams per parcel
fertilizer_ids_dict, # Fertilizer conditions 1-12, based on age, variety and (live) support
)
from spiceup_labels.config_lizard import (
mimic_rasters,
raster_seriesblocks,
get_labeltype_source,
patch_labeltype,
)
logger = logging.getLogger(__name__)
# ----------------------------------------------------------
# preprocess calendar based tasks (filtering)
def get_calendar_tasks_labels(calendar_tasks):
"""convert calendar tasks df (group by by month, season and fertilizer) to
calendar_tasks_labels df and next_calendar_tasks df"""
calendar_tasks_one = calendar_tasks[
calendar_tasks["id_season"] == 0
] # filter tasks to make the tasks independent of local conditions
calendar_tasks_one = calendar_tasks_one[
(calendar_tasks_one["fertilizer_data_id"] % 4 == 1)
| (calendar_tasks_one["fertilizer_data_id"] == 0)
]
calendar_tasks_next = (
calendar_tasks_one.groupby(
["month"]
) # group by month to list tasks by plant age
.agg(
{
"task": " | ".join, # if multiple tasks at the same time, use pipe separator ' | '
"task_IND": " | ".join,
"month": "first", # select first (or last) match with string (comes with pandas)
"id_days_start": "first",
}
)
.shift(-1)
.reset_index(drop=True)
) # assign next task(s) to current row
calendar_tasks_join = calendar_tasks.join(
calendar_tasks_next, on="month", rsuffix="_next"
)
calendar_tasks_labels = calendar_tasks_join.iloc[:, np.r_[0:19, 24, -4:0]]
calendar_tasks_labels = calendar_tasks_labels.sort_values(
by=["task_id", "month"]
) # sort by task_id, month
calendar_tasks_next.drop(calendar_tasks_next.tail(1).index, inplace=True)
return calendar_tasks_labels, calendar_tasks_next
def months_n_days(calendar_tasks):
"""List months and convert to days"""
calendar_tasks_monthly = calendar_tasks.drop_duplicates(subset=["month"])
months = list(calendar_tasks_monthly.month)
months.append(months[-1] + 1)
days_months = [round(month * 365.25 / 12 + 0.01) for month in months]
days_months_1521 = deepcopy(days_months)
days_months_1521.append(days_months[-1] + 30)
# List ages that are ideal in (normal, early, late) rainy or dry season.
ideal_conditions = sorted(
list(set([c.split(",")[0] for c in list(calendar_tasks_monthly["ideal_note"])]))
)
months_ideal = {}
for c in ideal_conditions:
c_months = calendar_tasks_monthly.id_month.where(
calendar_tasks_monthly.ideal_note.str.endswith(c, False),
other=calendar_tasks_monthly.id_month + 1000,
).to_list()
c_label = f"months_ideal_{c.lower().replace(' ', '_')}"
months_ideal[c_label] = c_months
# add 7 to all months so they become positive
calendar_tasks_plant_months = [month + 7 for month in months]
return months_ideal, days_months, days_months_1521, calendar_tasks_plant_months
# ----------------------------------------------------------
def actual_plant_age(
days_since_epoch_raster_sb,
days_plant_age_sb,
days_months,
days_months_1521,
calendar_tasks_plant_months,
):
"""calculate actual plant age per plot from epoch raster and plant age labelparameter"""
doy_now_sb = Round(Modulo(days_since_epoch_raster_sb, 365.25))
days_until_jan_1_sb = doy_now_sb * -1 + 365.25
days_since_epoch_plant_sb = days_since_epoch_sb
days_since_planting_sb = days_since_epoch_raster_sb - days_since_epoch_plant_sb
plant_age_start_sb = days_plant_age_sb
plant_age_sb = days_since_planting_sb + plant_age_start_sb
# plant_month_sb = plant_age_sb / 30.4375
calendar_tasks_plant_month_sb = Classify(
plant_age_sb, days_months_1521, calendar_tasks_plant_months, False
)
calendar_tasks_plant_year__01_1__123_2__345_3_sb = Classify(
plant_age_sb, [365, 1095], [1, 2, 3], False
)
age_01 = calendar_tasks_plant_year__01_1__123_2__345_3_sb == 1
age_13 = calendar_tasks_plant_year__01_1__123_2__345_3_sb == 2
age_3p = calendar_tasks_plant_year__01_1__123_2__345_3_sb == 3
# calendar_tasks_plant_day_min_sb = Classify(
# calendar_tasks_plant_month_sb,
# calendar_tasks_plant_months[1:],
# days_months,
# False,
# )
calendar_tasks_plant_day_min_sb = Classify(
calendar_tasks_plant_month_sb,
calendar_tasks_plant_months,
days_months[:-1],
False,
)
shift_days = round((365.25 / 12) * 6 + 1) # 184
id_plant_age = plant_age_sb + shift_days
id_calendar_tasks_plant_day_min_sb = calendar_tasks_plant_day_min_sb + shift_days
calendar_tasks_plant_day_next_sb = Classify(
calendar_tasks_plant_month_sb,
calendar_tasks_plant_months,
days_months_1521,
False,
)
# id_calendar_tasks_plant_day_next_sb = calendar_tasks_plant_day_next_sb + shift_days
days_x_1000 = id_calendar_tasks_plant_day_min_sb * 1000
days_until_next_task = calendar_tasks_plant_day_next_sb - plant_age_sb
return (
days_until_jan_1_sb,
plant_age_sb,
calendar_tasks_plant_month_sb,
id_plant_age,
days_until_next_task,
days_x_1000,
age_01, age_13, age_3p
)
# ----------------------------------------------------------
def season_conditions(
days_until_jan_1_sb,
doy_start_dry_season_raster_sb,
doy_start_rainy_season_raster_sb,
):
"""Clasify season conditions for (early / late) rainy / dry seasons"""
# dry season
doy_start_dry_season_sb = Round(doy_start_dry_season_raster_sb)
pos_days_until_dry_season_sb = days_until_jan_1_sb + doy_start_dry_season_sb
days_until_dry_season_sb = Modulo(pos_days_until_dry_season_sb, 365.25)
days_since_start_dry_season_sb = Modulo(
(pos_days_until_dry_season_sb * -1 + 365.25), 365.25
)
# rainy season
doy_start_rainy_season_sb = Round(doy_start_rainy_season_raster_sb)
pos_days_until_rainy_season_sb = days_until_jan_1_sb + doy_start_rainy_season_sb
days_until_rainy_season_sb = Modulo(pos_days_until_rainy_season_sb, 365.25)
days_since_start_rainy_season_sb = Modulo(
(pos_days_until_rainy_season_sb * -1 + 365.25), 365.25
)
# plant condition given the current plant age and season progress
# dry season
dry_condition = Classify(
days_since_start_dry_season_sb, # below_0_ideal_2_above_4_sb
[0, 120, 242, 366],
[2, 4, 0],
False,
)
dry_early_condition = Classify(
days_since_start_dry_season_sb, # below_01_ideal_2_above_4_sb
[0, 14, 120, 176, 366],
[1, 2, 4, 0],
False,
)
dry_late_condition = Classify(
days_since_start_dry_season_sb, # below_02_ideal_3_above_4_sb
[0, 106, 120, 295, 366],
[2, 3, 4, 0],
False,
)
# rainy season
rainy_condition = Classify(
days_since_start_rainy_season_sb, # below_4_ideal_6_above_7_sb
[0, 120, 242, 366],
[6, 7, 4],
False,
)
rainy_early_condition = Classify(
days_since_start_rainy_season_sb, # below_4_ideal_5_above_6_sb
[0, 14, 190, 366],
[5, 6, 4],
False,
)
rainy_late_condition = Classify(
days_since_start_rainy_season_sb, # below_04_ideal_7_above_8_sb
[0, 106, 120, 295, 366],
[4, 7, 8, 0],
False,
)
return (
dry_condition,
dry_early_condition,
dry_late_condition,
rainy_condition,
rainy_early_condition,
rainy_late_condition,
)
def season_state(
calendar_tasks_plant_month_sb, calendar_tasks_plant_months, months_ideal
):
"""Classify season states # (prefer dry rainy over early/late states)"""
season_states = {}
for c_label, c_months in months_ideal.items():
season_c = c_label.replace("months_ideal_", "").replace("_season", "")
bool_str = f"{season_c}_bool"
season_states[bool_str] = (
Mask(
calendar_tasks_plant_month_sb,
Classify(
calendar_tasks_plant_month_sb,
calendar_tasks_plant_months,
c_months,
False,
)
< 100,
1,
)
< 1000
) * 1
return season_states
def ideal_season_state(season_states, conditions_season):
"""Classify ideal season states # (prefer dry rainy over early/late states)"""
(
dry_condition,
dry_early_condition,
dry_late_condition,
rainy_condition,
rainy_early_condition,
rainy_late_condition,
) = conditions_season
ideal_state_season = (
dry_bool * 2
+ dry_early_bool * 1
+ dry_late_bool * 3
+ rainy_bool * 6
+ rainy_early_bool * 5
+ rainy_late_bool * 7
)
state_season = (
dry_bool * dry_condition
+ dry_early_bool * dry_early_condition
+ dry_late_bool * dry_late_condition
+ rainy_bool * rainy_condition
+ rainy_early_bool * rainy_early_condition
+ rainy_late_bool * rainy_late_condition
)
# string representation
season_actual_classes = [0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5]
season_actual_strings = [
"Between rainy and dry ",
"Dry early",
"Dry season",
"Dry late",
"Between dry and rainy",
"Rainy early",
"Rainy season",
"Rainy late",
"Between rainy and dry",
]
str_season_actual = Classify(
state_season, season_actual_classes, season_actual_strings, False
)
# Compare season state with ideal state
state_equals_ideal_state_1_0 = (state_season == ideal_state_season) * 1
state_greater_ideal_state_2_0 = (state_season > ideal_state_season) * 2
state_season_below_0_ideal_1_above_2 = (
state_equals_ideal_state_1_0 + state_greater_ideal_state_2_0
)
str_season_state = Classify(
state_season_below_0_ideal_1_above_2,
[0, 1, 2, 3],
["Below ideal", "Ideal", "Above ideal"],
False,
)
season_below_0_ideal_100_above_200 = state_season_below_0_ideal_1_above_2 * 100
return season_below_0_ideal_100_above_200
# ----------------------------------------------------------
def get_task_ids(task_id_parts):
"""task id from plant age, season rasters and plot conditions"""
(
live_support_sb,
pepper_variety_sb,
season_below_0_ideal_100_above_200,
days_x_1000,
) = task_id_parts
live_support_1_2 = live_support_sb * 1
pepper_variety_10_20 = field_operations.Classify(
pepper_variety_sb, [6], [10, 20], False
)
identified_task = (
live_support_1_2
+ pepper_variety_10_20
+ season_below_0_ideal_100_above_200
+ days_x_1000
)
identified_task_1 = identified_task + 10000000
identified_task_2 = identified_task + 20000000
identified_task_3 = identified_task + 30000000
# identified_task_4 = identified_task + 40000000
return identified_task_1, identified_task_2, identified_task_3
def tasks_t1_t2_t3(calendar_tasks_labels):
"""create separate dataframes for tasks that occur on the same date.
Each dataframe has a maximum of 1 task per date"""
t1 = calendar_tasks_labels[
calendar_tasks_labels.task_id < 2 * 10 ** 7
] # 2*10**7 == 20000000
t1 = t1.add_prefix("task_1_")
t1.rename(columns={"task_1_task_id": "task_id"}, inplace=True)
t2 = calendar_tasks_labels[
(calendar_tasks_labels.task_id > 2 * 10 ** 7)
& (calendar_tasks_labels.task_id < 3 * 10 ** 7)
]
t2 = t2.add_prefix("task_2_")
t2.rename(columns={"task_2_task_id": "task_id"}, inplace=True)
t3 = calendar_tasks_labels[
(calendar_tasks_labels.task_id > 3 * 10 ** 7)
& (calendar_tasks_labels.task_id < 4 * 10 ** 7)
]
t3 = t3.add_prefix("task_3_")
t3.rename(columns={"task_3_task_id": "task_id"}, inplace=True)
# t4 = calendar_tasks_labels[(calendar_tasks_labels.task_id > 4*10**7) & (calendar_tasks_labels.task_id < 5*10**7)]
# t4 = t4.add_prefix('task_4_')
# t4.rename(columns={"task_4_task_id": "task_id"}, inplace=True)
return t1, t2, t3
def task_contents(task_dfs, t_identifiers):
"""Reclassify task IDs to task contents, loop through task dataframes &
Match possible tasks with identified task from farm conditions
"""
tasks_data = {}
for n, (df, t_identifier) in enumerate(zip(task_dfs, t_identifiers), 1):
t_ids = df.task_id.to_list()
t_valid = deepcopy(t_ids)
t_ids.append(t_ids[-1] + 100)
t_id_classified = Classify(t_identifier, t_ids, t_valid, False)
t_diff = Subtract(t_identifier, t_id_classified)
t_id_match = t_diff < 200
t_identifier_validated = t_id_classified * t_id_match
tasks_data[f"t{n}_id_classified"] = t_id_classified
tasks_data[f"t{n}_diff"] = t_diff
tasks_data[f"t{n}_id_validated"] = t_identifier_validated
for col in list(df.columns)[1:-9]:
df[col] = df["task_id"].astype(str) + "_" + df[col].astype(str)
t_col_list = df[col].to_list()
t_col_classify = Classify(t_identifier, t_ids, t_col_list, False)
t_col = Where(t_col_classify, t_id_match, None)
tasks_data[col] = t_col
return tasks_data
def next_task_contents(tasks_data, calendar_tasks_next, id_plant_age):
"""add next task once (it is already concatenated)"""
calendar_tasks_next.id_days_start = calendar_tasks_next.id_days_start.astype(
"int32"
)
bins_start_ids_next_task = calendar_tasks_next.id_days_start.to_list()
start_ids_next_task = deepcopy(bins_start_ids_next_task)
bins_start_ids_next_task.insert(0, 0)
start_id_next_task_classified = Classify(
id_plant_age, bins_start_ids_next_task, start_ids_next_task
)
next_task_match = (id_plant_age - start_id_next_task_classified) < 1
start_id_next_task_validated = start_id_next_task_classified * next_task_match
tasks_data["next_id"] = start_id_next_task_validated
for col in list(calendar_tasks_next.columns)[:2]:
calendar_tasks_next[col] = (
calendar_tasks_next["id_days_start"].astype(str)
+ "_"
+ calendar_tasks_next[col].astype(str)
)
col_list = calendar_tasks_next[col].to_list()
col_classify = Classify(id_plant_age, bins_start_ids_next_task, col_list, False)
calendar_tasks_next_col = Where(col_classify, next_task_match, None)
tasks_data[f"next_{col}"] = calendar_tasks_next_col
return tasks_data
# ----------------------------------------------------------
def fertilizer_condition(
fertilizer_ids, calendar_tasks_labels, identified_task_1
):
"""Fertilizer conditions binned. Check per NPK advice if it is valid (task fertilizer class Equal class).
and sum the advices (if not valid, they become 0 and will be omitted)
classes are 1-12, based on age, variety and (live) support"""
fertilizer_df = calendar_tasks_labels[["task_id", "fertilizer_data_id"]]
fertilizer_df = fertilizer_df.sort_values(by=["task_id"]) # sort by task_id
fertilizer_tasks = fertilizer_df.values.tolist()
f_bins, f_class_values = [0], []
n = 0
just_binned = False
prev_task_id = 1
# Defaults to True (the right side of the bin is closed so a value
# is assigned to the bin on the left if it is exactly on a bin edge).
for task_id, fertilizer_id in fertilizer_tasks:
n += 0.0001
if fertilizer_id > 0:
if not just_binned:
f_bins.append(prev_task_id)
f_class_values.append(n)
just_binned = True
f_bins.append(task_id)
f_class_values.append(fertilizer_id + n)
else:
f_bins.append(task_id)
f_class_values.append(fertilizer_id + n)
just_binned = True
else:
if just_binned:
f_bins.append(task_id)
f_class_values.append(n)
just_binned = False
prev_task_id = task_id
# Calculate N P K advice
fertilizer_task_id = Round(
Classify(identified_task_1, f_bins, f_class_values, True)
)
n_advice = 0
for c, npk in fertilizer_ids.items():
fertilizer_task_valid = fertilizer_task_id == c
n, p, k = npk
n_advice = eval(n) * fertilizer_task_valid + n_advice
return (n_advice > 0) * 1
def fertilizer_conditions_always(
fertilizer_ids_dict, live_support_sb, pepper_variety_sb, age_01, age_13, age_3p
):
"""Fertilizer conditions binned. Check per NPK advice if it is valid (task fertilizer class Equal class).
and sum the advices (if not valid, they become 0 and will be omitted)
classes are 1-12, based on age, variety and (live) support"""
live_support_1 = live_support_sb == 1
live_support_2 = live_support_sb == 2
pepper_variety_1 = pepper_variety_sb == 1
pepper_variety_2 = pepper_variety_sb == 2
f1 = age_01 * live_support_1 * pepper_variety_1 * 1
f2 = age_13 * live_support_1 * pepper_variety_1 * 2
f3 = age_3p * live_support_1 * pepper_variety_1 * 3
f4 = age_01 * live_support_2 * pepper_variety_1 * 4
f5 = age_13 * live_support_2 * pepper_variety_1 * 5
f6 = age_3p * live_support_2 * pepper_variety_1 * 6
f7 = age_01 * live_support_1 * pepper_variety_2 * 7
f8 = age_13 * live_support_1 * pepper_variety_2 * 8
f9 = age_3p * live_support_1 * pepper_variety_2 * 9
f10 = age_01 * live_support_2 * pepper_variety_2 * 10
f11 = age_13 * live_support_2 * pepper_variety_2 * 11
f12 = age_3p * live_support_2 * pepper_variety_2 * 12
f_number = f1 + f2 + f3 + f4 + f5 + f6 + f7 + f8 + f9 + f10 + f11 + f12
n_advice = 0
p_advice = 0
k_advice = 0
for c, npk in fertilizer_ids_dict.items():
fertilizer_task_valid = f_number == c
n, p, k = npk
n_advice = eval(n) * fertilizer_task_valid + n_advice
p_advice = eval(p) * fertilizer_task_valid + p_advice
k_advice = eval(k) * fertilizer_task_valid + k_advice
n_advice = Round(n_advice * 0.25 * 0.2) * 5 # Give quarterly instead of yearly advice (0.25)
p_advice = Round(p_advice * 0.25 * 0.2) * 5 # & round by 5 grams as advised by IPB (0.2 & 5)
k_advice = Round(k_advice * 0.25 * 0.2) * 5
return n_advice, p_advice, k_advice
def get_parser():
"""Return argument parser."""
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
dest="verbose",
default=False,
help="Verbose output",
)
return parser
def main(): # pragma: no cover
"""Call main command with args from parser.
This method is called when you run 'bin/run-spiceup-labels',
this is configured in 'setup.py'. Adjust when needed. You can have multiple
main scripts.
"""
options = get_parser().parse_args()
if options.verbose:
log_level = logging.DEBUG
else:
log_level = logging.INFO
logging.basicConfig(level=log_level, format="%(levelname)s: %(message)s")
logging.info("load and reclass calendar tasks")
calendar_tasks_labels, calendar_tasks_next = get_calendar_tasks_labels(
calendar_tasks
)
(
months_ideal,
days_months,
days_months_1521,
calendar_tasks_plant_months,
) = months_n_days(calendar_tasks)
globals().update(months_ideal)
logging.info(
"load local raster, parcel and labelparameters data (mimics lizard data)"
)
dg_rasters, graph_rasters = mimic_rasters(lizard_rasters)
globals().update(dg_rasters)
sb_objects = raster_seriesblocks(dg_rasters, parcels)
globals().update(sb_objects)
globals().update(lp_seriesblocks)
logging.info("determine actual, local plant and season conditions")
actual_ages = actual_plant_age(
days_since_epoch_raster_sb,
days_plant_age_sb,
days_months,
days_months_1521,
calendar_tasks_plant_months,
)
(
days_until_jan_1_sb,
plant_age_sb,
calendar_tasks_plant_month_sb,
id_plant_age,
days_until_next_task,
days_x_1000,
age_01, age_13, age_3p
) = actual_ages
conditions_season = season_conditions(
days_until_jan_1_sb,
doy_start_dry_season_raster_sb,
doy_start_rainy_season_raster_sb,
)
season_states = season_state(
calendar_tasks_plant_month_sb, calendar_tasks_plant_months, months_ideal
)
globals().update(season_states)
season_below_0_ideal_100_above_200 = ideal_season_state(
season_states, conditions_season
)
logging.info(
"calculate task ids based on actual, local plant and season conditions"
)
task_id_parts = [
live_support_sb,
pepper_variety_sb,
season_below_0_ideal_100_above_200,
days_x_1000,
]
t_identifiers = get_task_ids(task_id_parts)
logging.info("get task content from calendar tasks df, aka tabel suci")
task_dfs = tasks_t1_t2_t3(calendar_tasks_labels)
tasks_data_tasks = task_contents(task_dfs, t_identifiers)
# logging.info(tasks_data_tasks)
logging.info("calculate next tasks content too")
tasks_data = next_task_contents(tasks_data_tasks, calendar_tasks_next, id_plant_age)
globals().update(tasks_data)
logging.info("calculate nutrient advices in the form of n, p and k grams per tree")
tx_input_0_or_1 = fertilizer_condition(
fertilizer_ids_dict, calendar_tasks_labels, t1_id_validated
)
n_advice, p_advice, k_advice = fertilizer_conditions_always(fertilizer_ids_dict, live_support_sb,
pepper_variety_sb, age_01, age_13, age_3p)
logging.info("Set result table with parcels, labelparameters and additional labels")
result_seriesblock = SetSeriesBlock(
parcels_labeled,
"label_value",
"label",
"----task_details_ID task (description_of_index)-----",
"task_nr_1_id_days_start_2345_pepper_variety_6_live_support_7_id_season_state_8",
"t1_task_id",
t1_id_validated,
"t1_task",
task_1_task,
"t1_task_IND",
task_1_task_IND,
"t1_recommendation",
task_1_recommendation,
"t1_recommendation_IND",
task_1_recommendation_IND,
"t1_GAP_info",
task_1_GAP_info,
"t1_GAP_info_IND",
task_1_GAP_info_IND,
"t1_GAP_chapter",
task_1_GAP_chapter,
"t1_image",
task_1_image,
"t1_image_url",
task_1_image_url,
"t1_input",
tx_input_0_or_1 * 1, # fertilizer advice yes 1 or no 0
"t2_task_id",
t2_id_validated,
"t2_task",
task_2_task,
"t2_task_IND",
task_2_task_IND,
"t2_recommendation",
task_2_recommendation,
"t2_recommendation_IND",
task_2_recommendation_IND,
"t2_GAP_info",
task_2_GAP_info,
"t2_GAP_info_IND",
task_2_GAP_info_IND,
"t2_GAP_chapter",
task_2_GAP_chapter,
"t2_image",
task_2_image,
"t2_image_url",
task_2_image_url,
"t2_input",
tx_input_0_or_1 * 2, # TODO insert logic for manure input
"t3_task_id",
t3_id_validated,
"t3_task",
task_3_task,
"t3_task_IND",
task_3_task_IND,
"t3_recommendation",
task_3_recommendation,
"t3_recommendation_IND",
task_3_recommendation_IND,
"t3_GAP_info",
task_3_GAP_info,
"t3_GAP_info_IND",
task_3_GAP_info_IND,
"t3_GAP_chapter",
task_3_GAP_chapter,
"t3_image",
task_3_image,
"t3_image_url",
task_3_image_url,
"t3_input",
0, # optional TODO, insert logic for other input
"_XN_",
n_advice,
"_XP_",
p_advice,
"_XK_",
k_advice,
"next_task_id",
next_id,
"next_task",
next_task,
"next_task_IND",
next_task_IND,
"days_until_next_task",
days_until_next_task,
)
logging.info("serialize model and replace local data with lizard data")
dg_source = get_labeltype_source(result_seriesblock, graph_rasters, labeled_parcels)
with open('calender_tasks.json', 'w+') as f:
json.dump(dg_source, f)
logging.info("Send to Lizard")
response = patch_labeltype(dg_source, username, password, labeltype_uuid)
logger.info("Labeltype update complete. Find response below")
logger.info(response.status_code)
logger.info(response.json())
return response.status_code
| en | 0.77254 | # -*- coding: utf-8 -*- Configure labeltype model for the crop calendar tasks of the SpiceUp mobile app. Used to calculate farm specific tasks from parcel location, plant age, local measurements and raster data. Calendar tasks are generated with a Lizard labeltype. This labeltype generates crop calendar tasks per plot. We save farm plots as parcels, which have a location and several initial parameters. # the uuid of the model # crop calendar tasks as specified in online spreadsheet "Items & Properties on the App Ui/x" # Raster data: season onset, fertilizer advice. dict with names and uuids of the rasters used # Load parcels locally. Mimic Lizard data # parcels with labels locally # parcels with labels in lizard # seriesblocks of labelparams per parcel # Fertilizer conditions 1-12, based on age, variety and (live) support # ---------------------------------------------------------- # preprocess calendar based tasks (filtering) convert calendar tasks df (group by by month, season and fertilizer) to calendar_tasks_labels df and next_calendar_tasks df # filter tasks to make the tasks independent of local conditions # group by month to list tasks by plant age # if multiple tasks at the same time, use pipe separator ' | ' # select first (or last) match with string (comes with pandas) # assign next task(s) to current row # sort by task_id, month List months and convert to days # List ages that are ideal in (normal, early, late) rainy or dry season. # add 7 to all months so they become positive # ---------------------------------------------------------- calculate actual plant age per plot from epoch raster and plant age labelparameter # plant_month_sb = plant_age_sb / 30.4375 # calendar_tasks_plant_day_min_sb = Classify( # calendar_tasks_plant_month_sb, # calendar_tasks_plant_months[1:], # days_months, # False, # ) # 184 # id_calendar_tasks_plant_day_next_sb = calendar_tasks_plant_day_next_sb + shift_days # ---------------------------------------------------------- Clasify season conditions for (early / late) rainy / dry seasons # dry season # rainy season # plant condition given the current plant age and season progress # dry season # below_0_ideal_2_above_4_sb # below_01_ideal_2_above_4_sb # below_02_ideal_3_above_4_sb # rainy season # below_4_ideal_6_above_7_sb # below_4_ideal_5_above_6_sb # below_04_ideal_7_above_8_sb Classify season states # (prefer dry rainy over early/late states) Classify ideal season states # (prefer dry rainy over early/late states) # string representation # Compare season state with ideal state # ---------------------------------------------------------- task id from plant age, season rasters and plot conditions # identified_task_4 = identified_task + 40000000 create separate dataframes for tasks that occur on the same date. Each dataframe has a maximum of 1 task per date # 2*10**7 == 20000000 # t4 = calendar_tasks_labels[(calendar_tasks_labels.task_id > 4*10**7) & (calendar_tasks_labels.task_id < 5*10**7)] # t4 = t4.add_prefix('task_4_') # t4.rename(columns={"task_4_task_id": "task_id"}, inplace=True) Reclassify task IDs to task contents, loop through task dataframes & Match possible tasks with identified task from farm conditions add next task once (it is already concatenated) # ---------------------------------------------------------- Fertilizer conditions binned. Check per NPK advice if it is valid (task fertilizer class Equal class). and sum the advices (if not valid, they become 0 and will be omitted) classes are 1-12, based on age, variety and (live) support # sort by task_id # Defaults to True (the right side of the bin is closed so a value # is assigned to the bin on the left if it is exactly on a bin edge). # Calculate N P K advice Fertilizer conditions binned. Check per NPK advice if it is valid (task fertilizer class Equal class). and sum the advices (if not valid, they become 0 and will be omitted) classes are 1-12, based on age, variety and (live) support # Give quarterly instead of yearly advice (0.25) # & round by 5 grams as advised by IPB (0.2 & 5) Return argument parser. # pragma: no cover Call main command with args from parser. This method is called when you run 'bin/run-spiceup-labels', this is configured in 'setup.py'. Adjust when needed. You can have multiple main scripts. # logging.info(tasks_data_tasks) # fertilizer advice yes 1 or no 0 # TODO insert logic for manure input # optional TODO, insert logic for other input | 2.350521 | 2 |
tests/other/test_aiohttp.py | true2blue/firestone-engine | 0 | 6633307 | import tushare as ts
import asyncio
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
import time
from datetime import datetime
async def get_data(l):
df = await ts.get_realtime_quotes(l)
print(df)
def run():
print(f'start = {datetime.now()}')
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
tasks = [asyncio.async(get_data('002639')), asyncio.async(get_data('000793'))]
loop.run_until_complete(asyncio.wait(tasks))
loop.close()
print(f'end = {datetime.now()}')
if __name__ == "__main__":
bgs = BackgroundScheduler()
trigger = CronTrigger(second='*/10')
bgs.add_job(run,trigger=trigger)
bgs.start()
while True:
time.sleep(3)
| import tushare as ts
import asyncio
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.triggers.cron import CronTrigger
import time
from datetime import datetime
async def get_data(l):
df = await ts.get_realtime_quotes(l)
print(df)
def run():
print(f'start = {datetime.now()}')
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
tasks = [asyncio.async(get_data('002639')), asyncio.async(get_data('000793'))]
loop.run_until_complete(asyncio.wait(tasks))
loop.close()
print(f'end = {datetime.now()}')
if __name__ == "__main__":
bgs = BackgroundScheduler()
trigger = CronTrigger(second='*/10')
bgs.add_job(run,trigger=trigger)
bgs.start()
while True:
time.sleep(3)
| none | 1 | 2.620311 | 3 |
|
calamari_ocr/ocr/dataset/params.py | jacektl/calamari | 922 | 6633308 | from dataclasses import dataclass, field
from typing import Optional
from paiargparse import pai_dataclass, pai_meta
from tfaip import DataBaseParams
from calamari_ocr.ocr.dataset.codec import Codec
from calamari_ocr.ocr.dataset.datareader.abbyy.reader import Abbyy
from calamari_ocr.ocr.dataset.datareader.file import FileDataParams
from calamari_ocr.ocr.dataset.datareader.hdf5.reader import Hdf5
from calamari_ocr.ocr.dataset.datareader.pagexml.reader import PageXML
DATA_GENERATOR_CHOICES = [FileDataParams, PageXML, Abbyy, Hdf5]
@pai_dataclass
@dataclass
class DataParams(DataBaseParams):
skip_invalid_gt: bool = True
input_channels: int = 1
downscale_factor: int = field(default=-1, metadata=pai_meta(mode="ignore")) # Set based on model
line_height: int = field(default=48, metadata=pai_meta(help="The line height"))
ensemble: int = field(default=0, metadata=pai_meta(mode="ignore")) # Set based on model
codec: Optional[Codec] = field(default=None, metadata=pai_meta(mode="ignore"))
@staticmethod
def cls():
from calamari_ocr.ocr.dataset.data import Data
return Data
def __post_init__(self):
from calamari_ocr.ocr.dataset.imageprocessors.center_normalizer import (
CenterNormalizerProcessorParams,
)
from calamari_ocr.ocr.dataset.imageprocessors.scale_to_height_processor import (
ScaleToHeightProcessorParams,
)
for p in self.post_proc.processors + self.pre_proc.processors:
if isinstance(p, ScaleToHeightProcessorParams):
p.height = self.line_height
elif isinstance(p, CenterNormalizerProcessorParams):
p.line_height = self.line_height
| from dataclasses import dataclass, field
from typing import Optional
from paiargparse import pai_dataclass, pai_meta
from tfaip import DataBaseParams
from calamari_ocr.ocr.dataset.codec import Codec
from calamari_ocr.ocr.dataset.datareader.abbyy.reader import Abbyy
from calamari_ocr.ocr.dataset.datareader.file import FileDataParams
from calamari_ocr.ocr.dataset.datareader.hdf5.reader import Hdf5
from calamari_ocr.ocr.dataset.datareader.pagexml.reader import PageXML
DATA_GENERATOR_CHOICES = [FileDataParams, PageXML, Abbyy, Hdf5]
@pai_dataclass
@dataclass
class DataParams(DataBaseParams):
skip_invalid_gt: bool = True
input_channels: int = 1
downscale_factor: int = field(default=-1, metadata=pai_meta(mode="ignore")) # Set based on model
line_height: int = field(default=48, metadata=pai_meta(help="The line height"))
ensemble: int = field(default=0, metadata=pai_meta(mode="ignore")) # Set based on model
codec: Optional[Codec] = field(default=None, metadata=pai_meta(mode="ignore"))
@staticmethod
def cls():
from calamari_ocr.ocr.dataset.data import Data
return Data
def __post_init__(self):
from calamari_ocr.ocr.dataset.imageprocessors.center_normalizer import (
CenterNormalizerProcessorParams,
)
from calamari_ocr.ocr.dataset.imageprocessors.scale_to_height_processor import (
ScaleToHeightProcessorParams,
)
for p in self.post_proc.processors + self.pre_proc.processors:
if isinstance(p, ScaleToHeightProcessorParams):
p.height = self.line_height
elif isinstance(p, CenterNormalizerProcessorParams):
p.line_height = self.line_height
| en | 0.9934 | # Set based on model # Set based on model | 1.972796 | 2 |
facedetection.py | MachineLearning-Nerd/FaceDetection | 0 | 6633309 | <gh_stars>0
import dlib
import cv2
from drawmarks import renderFace
if __name__ == "__main__":
# Convert the rectangle in to the tuple of the dlib.rectangle output
def point_to_rectangle(rectangle):
# Take the input from the frontal face detector
# e.g facedetector = dlib.get_frontal_face_detector()
new_rect = dlib.rectangle(int(rectangle.left()),int(rectangle.top()),
int(rectangle.right()),int(rectangle.bottom()))
return new_rect
# This is the function for the writing the data into the files
def writelandmarkfile(dlandmarks,landmarks_filesname):
with open(landmarkfilename,'w') as f :
for p in dlandmarks.parts():
f.write("%s %s\n" %(int(p.x),int(p.y)))
f.close
# Put the predictor path here which is the pretrained path
predictor_path = "shape_predictor_68_face_landmarks.dat"
# Call the face detector
# This is the face detector. We will first detect the face.
facedetector = dlib.get_frontal_face_detector()
# Landmark detector is implemented in the shape predictor class
# So we will call it first and then we will go ahead
landmarkdetector = dlib.shape_predictor(predictor_path)
# Read the image from thse camera
cam = cv2.VideoCapture(0)
# If we just want to input as the image then
# imagename = "dinesh.jpg"
while(True):
# Capture the video frame by frame
ret , frame = cam.read()
# for image as the input
# im = cv2.imread(imagename)
# Landmarks will be stored in the results folder
landmarkbase = "results/faces"
# Process of the detection
# Detect the face in the image
facerectangle = facedetector(frame,0)
# Number of faces detected in the image
print("Number of the faces detected:", len(facerectangle))
# Detect all the landmarks in the image and stores
landmarkall = []
if (len(facerectangle)==0):
# show the image
cv2.imshow("Facial Landmark detector",frame)
cv2.waitKey(1)
continue
# Loop over the all the face those are detected in the frontal face detector
for i in range(0,len(facerectangle)):
# Get the all the point of the rectangle
new_rect = point_to_rectangle(facerectangle[i])
# For every face rectangle run the face landmark detection
landmarks = landmarkdetector(frame,new_rect)
# Number of the landmarks that are detected
if i==0:
print("Number of landmarks:",len(landmarks.parts()))
# Stores the all the landmarks
landmarkall.append(landmarks)
# Draw all the land marks
renderFace(frame, landmarks)
landmarkfilename = landmarkbase + "_" + str(i) + ".txt"
# Write the all the landmarks in the files
writelandmarkfile(landmarks,landmarkfilename)
# show the image
cv2.imshow("Facial Landmark detector",frame)
key = cv2.waitKey(1)
if key == 101 :
cv2.destroyAllWindows()
break
cv2.waitKey(1)
cv2.destroyAllWindows() | import dlib
import cv2
from drawmarks import renderFace
if __name__ == "__main__":
# Convert the rectangle in to the tuple of the dlib.rectangle output
def point_to_rectangle(rectangle):
# Take the input from the frontal face detector
# e.g facedetector = dlib.get_frontal_face_detector()
new_rect = dlib.rectangle(int(rectangle.left()),int(rectangle.top()),
int(rectangle.right()),int(rectangle.bottom()))
return new_rect
# This is the function for the writing the data into the files
def writelandmarkfile(dlandmarks,landmarks_filesname):
with open(landmarkfilename,'w') as f :
for p in dlandmarks.parts():
f.write("%s %s\n" %(int(p.x),int(p.y)))
f.close
# Put the predictor path here which is the pretrained path
predictor_path = "shape_predictor_68_face_landmarks.dat"
# Call the face detector
# This is the face detector. We will first detect the face.
facedetector = dlib.get_frontal_face_detector()
# Landmark detector is implemented in the shape predictor class
# So we will call it first and then we will go ahead
landmarkdetector = dlib.shape_predictor(predictor_path)
# Read the image from thse camera
cam = cv2.VideoCapture(0)
# If we just want to input as the image then
# imagename = "dinesh.jpg"
while(True):
# Capture the video frame by frame
ret , frame = cam.read()
# for image as the input
# im = cv2.imread(imagename)
# Landmarks will be stored in the results folder
landmarkbase = "results/faces"
# Process of the detection
# Detect the face in the image
facerectangle = facedetector(frame,0)
# Number of faces detected in the image
print("Number of the faces detected:", len(facerectangle))
# Detect all the landmarks in the image and stores
landmarkall = []
if (len(facerectangle)==0):
# show the image
cv2.imshow("Facial Landmark detector",frame)
cv2.waitKey(1)
continue
# Loop over the all the face those are detected in the frontal face detector
for i in range(0,len(facerectangle)):
# Get the all the point of the rectangle
new_rect = point_to_rectangle(facerectangle[i])
# For every face rectangle run the face landmark detection
landmarks = landmarkdetector(frame,new_rect)
# Number of the landmarks that are detected
if i==0:
print("Number of landmarks:",len(landmarks.parts()))
# Stores the all the landmarks
landmarkall.append(landmarks)
# Draw all the land marks
renderFace(frame, landmarks)
landmarkfilename = landmarkbase + "_" + str(i) + ".txt"
# Write the all the landmarks in the files
writelandmarkfile(landmarks,landmarkfilename)
# show the image
cv2.imshow("Facial Landmark detector",frame)
key = cv2.waitKey(1)
if key == 101 :
cv2.destroyAllWindows()
break
cv2.waitKey(1)
cv2.destroyAllWindows() | en | 0.80764 | # Convert the rectangle in to the tuple of the dlib.rectangle output # Take the input from the frontal face detector # e.g facedetector = dlib.get_frontal_face_detector() # This is the function for the writing the data into the files # Put the predictor path here which is the pretrained path # Call the face detector # This is the face detector. We will first detect the face. # Landmark detector is implemented in the shape predictor class # So we will call it first and then we will go ahead # Read the image from thse camera # If we just want to input as the image then # imagename = "dinesh.jpg" # Capture the video frame by frame # for image as the input # im = cv2.imread(imagename) # Landmarks will be stored in the results folder # Process of the detection # Detect the face in the image # Number of faces detected in the image # Detect all the landmarks in the image and stores # show the image # Loop over the all the face those are detected in the frontal face detector # Get the all the point of the rectangle # For every face rectangle run the face landmark detection # Number of the landmarks that are detected # Stores the all the landmarks # Draw all the land marks # Write the all the landmarks in the files # show the image | 3.069812 | 3 |
sample/how_ocr_works.py | uzstudio/findit | 91 | 6633310 | """
OCR engine binding to tesseract engine.
tesseract engine: https://github.com/tesseract-ocr/tesseract
tesseract language data: https://github.com/tesseract-ocr/tesseract/wiki/Data-Files#data-files-for-version-400-november-29-2016
tesserocr (python wrapper of tesseract): https://github.com/sirfz/tesserocr
"""
import tesserocr
from PIL import Image
image = Image.open('./pics/screen.png')
print(tesserocr.image_to_text(image))
print(tesserocr.get_languages())
# or ...
from tesserocr import PyTessBaseAPI
images = ['./pics/screen.png']
# you can set language here, but you need to install specify language data firstly.
with PyTessBaseAPI(lang='eng') as api:
for img in images:
api.SetImageFile(img)
print(api.GetUTF8Text())
| """
OCR engine binding to tesseract engine.
tesseract engine: https://github.com/tesseract-ocr/tesseract
tesseract language data: https://github.com/tesseract-ocr/tesseract/wiki/Data-Files#data-files-for-version-400-november-29-2016
tesserocr (python wrapper of tesseract): https://github.com/sirfz/tesserocr
"""
import tesserocr
from PIL import Image
image = Image.open('./pics/screen.png')
print(tesserocr.image_to_text(image))
print(tesserocr.get_languages())
# or ...
from tesserocr import PyTessBaseAPI
images = ['./pics/screen.png']
# you can set language here, but you need to install specify language data firstly.
with PyTessBaseAPI(lang='eng') as api:
for img in images:
api.SetImageFile(img)
print(api.GetUTF8Text())
| en | 0.571591 | OCR engine binding to tesseract engine. tesseract engine: https://github.com/tesseract-ocr/tesseract tesseract language data: https://github.com/tesseract-ocr/tesseract/wiki/Data-Files#data-files-for-version-400-november-29-2016 tesserocr (python wrapper of tesseract): https://github.com/sirfz/tesserocr # or ... # you can set language here, but you need to install specify language data firstly. | 2.666722 | 3 |
tests/test_gfapy_line_containment.py | ujjwalsh/gfapy | 44 | 6633311 | import unittest
import gfapy
class TestLineContainment(unittest.TestCase):
def test_from_string(self):
fields = ["C","1","+","2","-","12","12M","MQ:i:1232","NM:i:3","ab:Z:abcd"]
string="\t".join(fields)
gfapy.Line(string)
self.assertIsInstance(gfapy.Line(string), gfapy.line.edge.Containment)
self.assertEqual(fields[0], gfapy.Line(string).record_type)
self.assertEqual(fields[1], gfapy.Line(string).from_segment)
self.assertEqual(fields[2], gfapy.Line(string).from_orient)
self.assertEqual(fields[3], gfapy.Line(string).to_segment)
self.assertEqual(fields[4], gfapy.Line(string).to_orient)
self.assertEqual(12, gfapy.Line(string).pos)
self.assertEqual([gfapy.alignment.cigar.CIGAR.Operation(12, "M")],
gfapy.Line(string).overlap)
self.assertEqual(1232, gfapy.Line(string).MQ)
self.assertEqual(3, gfapy.Line(string).NM)
self.assertEqual("abcd", gfapy.Line(string).ab)
with self.assertRaises(gfapy.FormatError):
gfapy.Line(string+"\tH1")
with self.assertRaises(gfapy.FormatError):
gfapy.Line(string+"\tH1")
with self.assertRaises(gfapy.FormatError):
gfapy.Line("C\tH")
with self.assertRaises(gfapy.FormatError):
f=fields[:]
f[2]="x"
gfapy.Line("\t".join(f), vlevel = 2)
with self.assertRaises(gfapy.FormatError):
f=fields[:]
f[4]="x"
gfapy.Line("\t".join(f), vlevel = 2)
with self.assertRaises(gfapy.FormatError):
f=fields[:]
f[5]="x"
gfapy.Line("\t".join(f), vlevel = 2)
with self.assertRaises(gfapy.FormatError):
f=fields[:]
f[6]="x"
gfapy.Line("\t".join(f), vlevel = 2)
with self.assertRaises(gfapy.TypeError):
f=fields[:]
f[7]="MQ:Z:1232"
gfapy.Line("\t".join(f), vlevel = 2)
with self.assertRaises(gfapy.TypeError):
f=fields[:]
f[8]="NM:Z:1232"
gfapy.Line("\t".join(f), vlevel = 2)
| import unittest
import gfapy
class TestLineContainment(unittest.TestCase):
def test_from_string(self):
fields = ["C","1","+","2","-","12","12M","MQ:i:1232","NM:i:3","ab:Z:abcd"]
string="\t".join(fields)
gfapy.Line(string)
self.assertIsInstance(gfapy.Line(string), gfapy.line.edge.Containment)
self.assertEqual(fields[0], gfapy.Line(string).record_type)
self.assertEqual(fields[1], gfapy.Line(string).from_segment)
self.assertEqual(fields[2], gfapy.Line(string).from_orient)
self.assertEqual(fields[3], gfapy.Line(string).to_segment)
self.assertEqual(fields[4], gfapy.Line(string).to_orient)
self.assertEqual(12, gfapy.Line(string).pos)
self.assertEqual([gfapy.alignment.cigar.CIGAR.Operation(12, "M")],
gfapy.Line(string).overlap)
self.assertEqual(1232, gfapy.Line(string).MQ)
self.assertEqual(3, gfapy.Line(string).NM)
self.assertEqual("abcd", gfapy.Line(string).ab)
with self.assertRaises(gfapy.FormatError):
gfapy.Line(string+"\tH1")
with self.assertRaises(gfapy.FormatError):
gfapy.Line(string+"\tH1")
with self.assertRaises(gfapy.FormatError):
gfapy.Line("C\tH")
with self.assertRaises(gfapy.FormatError):
f=fields[:]
f[2]="x"
gfapy.Line("\t".join(f), vlevel = 2)
with self.assertRaises(gfapy.FormatError):
f=fields[:]
f[4]="x"
gfapy.Line("\t".join(f), vlevel = 2)
with self.assertRaises(gfapy.FormatError):
f=fields[:]
f[5]="x"
gfapy.Line("\t".join(f), vlevel = 2)
with self.assertRaises(gfapy.FormatError):
f=fields[:]
f[6]="x"
gfapy.Line("\t".join(f), vlevel = 2)
with self.assertRaises(gfapy.TypeError):
f=fields[:]
f[7]="MQ:Z:1232"
gfapy.Line("\t".join(f), vlevel = 2)
with self.assertRaises(gfapy.TypeError):
f=fields[:]
f[8]="NM:Z:1232"
gfapy.Line("\t".join(f), vlevel = 2)
| none | 1 | 3.023729 | 3 |
|
pysongman/views/library/audio.py | devdave/pysongman | 1 | 6633312 | from pysongman.lib.qtd import QtWidgets, Qt
from pysongman.lib.qtd import QHBoxLayout
from pysongman.lib.qtd import QVBoxLayout
from pysongman.lib.qtd import QLabel
from pysongman.lib.qtd import QLineEdit
from pysongman.lib.qtd import QPushButton
from pysongman.lib.qtd import QFrame
class AudioWindow(QtWidgets.QWidget):
search_label: QLabel
search_input: QLineEdit
clear_button: QPushButton
artist_table: QtWidgets.QTableView
album_table: QtWidgets.QTableView
songs_table: QtWidgets.QTableView
def __init__(self):
super(AudioWindow, self).__init__()
self.top = None
self.body = None
self.frame = None
self.setup_ui()
def setup_ui(self):
"""
frame
VBOX
HBOX # Search label # Line edit # clear button
##############################################
HBOX # Artist Table | SPLITTER | Album Table
|SPLITTER |
# Song table #
"""
self.frame = QFrame()
self.body = QVBoxLayout()
search_line = QHBoxLayout()
search_label = QLabel("Search: ")
self.search_input = QLineEdit()
self.clear_button = QPushButton("Clear Search")
search_line.addWidget(search_label)
search_line.addWidget(self.search_input)
search_line.addWidget(self.clear_button)
self.body.addLayout(search_line)
self.artist_table = QtWidgets.QTableView()
self.album_table = QtWidgets.QTableView()
self.songs_table = QtWidgets.QTableView()
# get rid of the vertical headers
for table in [self.artist_table, self.album_table, self.songs_table]:
table.verticalHeader().hide()
table.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
table.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
table.setSortingEnabled(True)
table.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)
artist_album_split = QtWidgets.QSplitter(Qt.Horizontal)
artist_album_split.addWidget(self.artist_table)
artist_album_split.addWidget(self.album_table)
a_a_songs_split = QtWidgets.QSplitter(Qt.Vertical)
a_a_songs_split.addWidget(artist_album_split)
a_a_songs_split.addWidget(self.songs_table)
self.body.addWidget(a_a_songs_split)
self.setLayout(self.body)
def show(self) -> None:
super(AudioWindow, self).show()
self.album_table.model().beginResetModel()
self.artist_table.model().beginResetModel()
| from pysongman.lib.qtd import QtWidgets, Qt
from pysongman.lib.qtd import QHBoxLayout
from pysongman.lib.qtd import QVBoxLayout
from pysongman.lib.qtd import QLabel
from pysongman.lib.qtd import QLineEdit
from pysongman.lib.qtd import QPushButton
from pysongman.lib.qtd import QFrame
class AudioWindow(QtWidgets.QWidget):
search_label: QLabel
search_input: QLineEdit
clear_button: QPushButton
artist_table: QtWidgets.QTableView
album_table: QtWidgets.QTableView
songs_table: QtWidgets.QTableView
def __init__(self):
super(AudioWindow, self).__init__()
self.top = None
self.body = None
self.frame = None
self.setup_ui()
def setup_ui(self):
"""
frame
VBOX
HBOX # Search label # Line edit # clear button
##############################################
HBOX # Artist Table | SPLITTER | Album Table
|SPLITTER |
# Song table #
"""
self.frame = QFrame()
self.body = QVBoxLayout()
search_line = QHBoxLayout()
search_label = QLabel("Search: ")
self.search_input = QLineEdit()
self.clear_button = QPushButton("Clear Search")
search_line.addWidget(search_label)
search_line.addWidget(self.search_input)
search_line.addWidget(self.clear_button)
self.body.addLayout(search_line)
self.artist_table = QtWidgets.QTableView()
self.album_table = QtWidgets.QTableView()
self.songs_table = QtWidgets.QTableView()
# get rid of the vertical headers
for table in [self.artist_table, self.album_table, self.songs_table]:
table.verticalHeader().hide()
table.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
table.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)
table.setSortingEnabled(True)
table.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)
artist_album_split = QtWidgets.QSplitter(Qt.Horizontal)
artist_album_split.addWidget(self.artist_table)
artist_album_split.addWidget(self.album_table)
a_a_songs_split = QtWidgets.QSplitter(Qt.Vertical)
a_a_songs_split.addWidget(artist_album_split)
a_a_songs_split.addWidget(self.songs_table)
self.body.addWidget(a_a_songs_split)
self.setLayout(self.body)
def show(self) -> None:
super(AudioWindow, self).show()
self.album_table.model().beginResetModel()
self.artist_table.model().beginResetModel()
| en | 0.25047 | frame VBOX HBOX # Search label # Line edit # clear button ############################################## HBOX # Artist Table | SPLITTER | Album Table |SPLITTER | # Song table # # get rid of the vertical headers | 2.600228 | 3 |
src/NumberEngine.py | VaclavSevcik/MathKing | 0 | 6633313 | <filename>src/NumberEngine.py
from src.exceptions import ParseConfigException
import random
# TODO constrain number of example
# TODO - constrain - more then EXAMPLE_TO_PAGE example overflow one page - need count page and write to previous page in method __writeExamplesToPDF
EXAMPLE_TO_PAGE = 132
class NumberEngine:
'''
The class NumberEngine provides generate examples according to configuration from application window.
'''
lowBoundNumber = 0
highBoundNumber = 0
addition = False
subtraction = False
multiplication = False
division = False
def __prepare_data(self, information_from_GUI):
''' The method sets all inner data from argument and check if their is regular.
:param dict information_from_GUI: The dictionary with information passed from GUI.
:return None:
'''
self.addition = information_from_GUI['plus']
self.subtraction = information_from_GUI['minus']
self.multiplication = information_from_GUI['multiplication']
self.division = information_from_GUI['division']
self.allowedOperation = []
if self.addition:
self.allowedOperation.append('addition')
if self.subtraction:
self.allowedOperation.append('subtraction')
if self.multiplication:
self.allowedOperation.append('multiplication')
if self.division:
self.allowedOperation.append('division')
# check is lower range is number
try:
int(information_from_GUI['range_from'])
except:
raise ParseConfigException('parseExceptionFromIsInteger')
# check is higher range is number
try:
int(information_from_GUI['range_to'])
except:
raise ParseConfigException('parseExceptionFromIsInteger')
# check low boundary is lower than high boundary
if int(information_from_GUI['range_from']) > int(information_from_GUI['range_to']):
raise ParseConfigException('parseExceptionFromGreaterThenTo')
# set boundary values
self.lowBoundNumber = int(information_from_GUI['range_from'])
self.highBoundNumber = int(information_from_GUI['range_to'])
# check the amount of examples is number
try:
int(information_from_GUI['amount_of_examples'])
except:
raise ParseConfigException('parseExceptionAmountOfExampleIsInteger')
# check the amount of examples is greater than zero
if int(information_from_GUI['amount_of_examples']) < 0:
raise ParseConfigException('parseExceptionAmountOfExampleIsGreaterThanZero')
# Number of example to generate
self.numberOfExample = int(information_from_GUI['amount_of_examples'])
# set number of example to max number example which fill one page.
if self.numberOfExample > EXAMPLE_TO_PAGE:
self.numberOfExample = EXAMPLE_TO_PAGE
self.listOfExamples = []
def __create_example(self):
''' The method creates one example with expression (exp sign exp)
:return str: The expression in string.
'''
# generate two random number from low bound to high bound
first_number = self.__getRandomNumber(self.lowBoundNumber, self.highBoundNumber)
second_number = self.__getRandomNumber(self.lowBoundNumber, self.highBoundNumber)
# draw enable operator
if not self.allowedOperation:
return ""
operation = random.choice(self.allowedOperation)
# The string contains example
example = ""
# Addition case
if operation == "addition":
example = str(first_number) + " + " + str(second_number)
# Multiplication case
elif operation == "multiplication":
example = str(first_number) + " * " + str(second_number)
# Subtraction case
elif operation == "subtraction":
while second_number > first_number:
first_number = self.__getRandomNumber(self.lowBoundNumber, self.highBoundNumber)
second_number = self.__getRandomNumber(self.lowBoundNumber, self.highBoundNumber)
example = str(first_number) + " - " + str(second_number)
# Division case
elif operation == "division":
# generates new value when one of number is negative or division has reminder
while (first_number < 0 and second_number <= 0) or first_number % second_number != 0:
first_number = self.__getRandomNumber(self.lowBoundNumber, self.highBoundNumber)
second_number = self.__getRandomNumber(self.lowBoundNumber, self.highBoundNumber)
example = str(first_number) + " / " + str(second_number)
return example
def __getRandomNumber(self, start_int, stop_int):
''' The method generate one number from interval.
The method uses library random which provides Mersenne Twister pseudorandom generator.
:param int start_int: The lower limit.
:param int stop_int: The higher limit.
:return int: The pseudorandom number from range of limits in argument.
'''
return random.randint(start_int, stop_int)
def generateExamples(self, information_from_GUI):
''' The method generate amount examples according to user input.
:param dict information_from_GUI: The dictionary with information passed from GUI.
:return [str]: The list with examples.
'''
# initialize number engine
self.__prepare_data(information_from_GUI)
# generate amount of example
for i in range(self.numberOfExample):
self.listOfExamples.append(self.__create_example())
return self.listOfExamples | <filename>src/NumberEngine.py
from src.exceptions import ParseConfigException
import random
# TODO constrain number of example
# TODO - constrain - more then EXAMPLE_TO_PAGE example overflow one page - need count page and write to previous page in method __writeExamplesToPDF
EXAMPLE_TO_PAGE = 132
class NumberEngine:
'''
The class NumberEngine provides generate examples according to configuration from application window.
'''
lowBoundNumber = 0
highBoundNumber = 0
addition = False
subtraction = False
multiplication = False
division = False
def __prepare_data(self, information_from_GUI):
''' The method sets all inner data from argument and check if their is regular.
:param dict information_from_GUI: The dictionary with information passed from GUI.
:return None:
'''
self.addition = information_from_GUI['plus']
self.subtraction = information_from_GUI['minus']
self.multiplication = information_from_GUI['multiplication']
self.division = information_from_GUI['division']
self.allowedOperation = []
if self.addition:
self.allowedOperation.append('addition')
if self.subtraction:
self.allowedOperation.append('subtraction')
if self.multiplication:
self.allowedOperation.append('multiplication')
if self.division:
self.allowedOperation.append('division')
# check is lower range is number
try:
int(information_from_GUI['range_from'])
except:
raise ParseConfigException('parseExceptionFromIsInteger')
# check is higher range is number
try:
int(information_from_GUI['range_to'])
except:
raise ParseConfigException('parseExceptionFromIsInteger')
# check low boundary is lower than high boundary
if int(information_from_GUI['range_from']) > int(information_from_GUI['range_to']):
raise ParseConfigException('parseExceptionFromGreaterThenTo')
# set boundary values
self.lowBoundNumber = int(information_from_GUI['range_from'])
self.highBoundNumber = int(information_from_GUI['range_to'])
# check the amount of examples is number
try:
int(information_from_GUI['amount_of_examples'])
except:
raise ParseConfigException('parseExceptionAmountOfExampleIsInteger')
# check the amount of examples is greater than zero
if int(information_from_GUI['amount_of_examples']) < 0:
raise ParseConfigException('parseExceptionAmountOfExampleIsGreaterThanZero')
# Number of example to generate
self.numberOfExample = int(information_from_GUI['amount_of_examples'])
# set number of example to max number example which fill one page.
if self.numberOfExample > EXAMPLE_TO_PAGE:
self.numberOfExample = EXAMPLE_TO_PAGE
self.listOfExamples = []
def __create_example(self):
''' The method creates one example with expression (exp sign exp)
:return str: The expression in string.
'''
# generate two random number from low bound to high bound
first_number = self.__getRandomNumber(self.lowBoundNumber, self.highBoundNumber)
second_number = self.__getRandomNumber(self.lowBoundNumber, self.highBoundNumber)
# draw enable operator
if not self.allowedOperation:
return ""
operation = random.choice(self.allowedOperation)
# The string contains example
example = ""
# Addition case
if operation == "addition":
example = str(first_number) + " + " + str(second_number)
# Multiplication case
elif operation == "multiplication":
example = str(first_number) + " * " + str(second_number)
# Subtraction case
elif operation == "subtraction":
while second_number > first_number:
first_number = self.__getRandomNumber(self.lowBoundNumber, self.highBoundNumber)
second_number = self.__getRandomNumber(self.lowBoundNumber, self.highBoundNumber)
example = str(first_number) + " - " + str(second_number)
# Division case
elif operation == "division":
# generates new value when one of number is negative or division has reminder
while (first_number < 0 and second_number <= 0) or first_number % second_number != 0:
first_number = self.__getRandomNumber(self.lowBoundNumber, self.highBoundNumber)
second_number = self.__getRandomNumber(self.lowBoundNumber, self.highBoundNumber)
example = str(first_number) + " / " + str(second_number)
return example
def __getRandomNumber(self, start_int, stop_int):
''' The method generate one number from interval.
The method uses library random which provides Mersenne Twister pseudorandom generator.
:param int start_int: The lower limit.
:param int stop_int: The higher limit.
:return int: The pseudorandom number from range of limits in argument.
'''
return random.randint(start_int, stop_int)
def generateExamples(self, information_from_GUI):
''' The method generate amount examples according to user input.
:param dict information_from_GUI: The dictionary with information passed from GUI.
:return [str]: The list with examples.
'''
# initialize number engine
self.__prepare_data(information_from_GUI)
# generate amount of example
for i in range(self.numberOfExample):
self.listOfExamples.append(self.__create_example())
return self.listOfExamples | en | 0.737899 | # TODO constrain number of example # TODO - constrain - more then EXAMPLE_TO_PAGE example overflow one page - need count page and write to previous page in method __writeExamplesToPDF The class NumberEngine provides generate examples according to configuration from application window. The method sets all inner data from argument and check if their is regular. :param dict information_from_GUI: The dictionary with information passed from GUI. :return None: # check is lower range is number # check is higher range is number # check low boundary is lower than high boundary # set boundary values # check the amount of examples is number # check the amount of examples is greater than zero # Number of example to generate # set number of example to max number example which fill one page. The method creates one example with expression (exp sign exp) :return str: The expression in string. # generate two random number from low bound to high bound # draw enable operator # The string contains example # Addition case # Multiplication case # Subtraction case # Division case # generates new value when one of number is negative or division has reminder The method generate one number from interval. The method uses library random which provides Mersenne Twister pseudorandom generator. :param int start_int: The lower limit. :param int stop_int: The higher limit. :return int: The pseudorandom number from range of limits in argument. The method generate amount examples according to user input. :param dict information_from_GUI: The dictionary with information passed from GUI. :return [str]: The list with examples. # initialize number engine # generate amount of example | 3.308687 | 3 |
alteia/core/resources/sharetokens.py | alteia-ai/alteia-python-sdk | 11 | 6633314 | <gh_stars>10-100
from typing import List, NamedTuple
from alteia.core.utils.typing import ShareToken
ShareTokensWithTotal = NamedTuple(
'ShareTokensWithTotal',
[('total', int), ('results', List[ShareToken])]
)
| from typing import List, NamedTuple
from alteia.core.utils.typing import ShareToken
ShareTokensWithTotal = NamedTuple(
'ShareTokensWithTotal',
[('total', int), ('results', List[ShareToken])]
) | none | 1 | 2.298903 | 2 |
|
setup.py | mao2009/Python_Counter | 0 | 6633315 | from setuptools import setup
from os import path
with open('README.md') as f:
long_description = f.read()
setup(
name='itrcnt',
module='itrcnt.py',
version='0.1.2',
license='BSD',
author='mao2009',
url='https://github.com/mao2009/Python_Counter',
description='Alternative for Range and Enumerator',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='range enumrator'
) | from setuptools import setup
from os import path
with open('README.md') as f:
long_description = f.read()
setup(
name='itrcnt',
module='itrcnt.py',
version='0.1.2',
license='BSD',
author='mao2009',
url='https://github.com/mao2009/Python_Counter',
description='Alternative for Range and Enumerator',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='range enumrator'
) | none | 1 | 1.509625 | 2 |
|
scripts/remote_cam_view.py | KingTaTo/CS321-Racer-1 | 0 | 6633316 | """
Scripts to drive a donkey car remotely
Usage:
remote_cam_view.py --name=<robot_name> --broker="localhost" [--record=<path>]
Options:
-h --help Show this screen.
"""
import os
import time
import math
from docopt import docopt
import donkeycar as dk
import cv2
from donkeycar.parts.cv import CvImageView, ImgBGR2RGB, ImgRGB2BGR, ImageScale, ImgWriter, ArrowKeyboardControls
from donkeycar.parts.salient import SalientVis
from donkeycar.parts.network import MQTTValuePub, MQTTValueSub
from donkeycar.parts.transform import Lambda
from donkeycar.parts.image import JpgToImgArr
V = dk.vehicle.Vehicle()
args = docopt(__doc__)
print(args)
V.add(MQTTValueSub(name="donkey/%s/camera" % args["--name"], broker=args["--broker"]), outputs=["jpg"])
V.add(JpgToImgArr(), inputs=["jpg"], outputs=["img_arr"])
V.add(ImgBGR2RGB(), inputs=["img_arr"], outputs=["rgb"])
V.add(ImageScale(4.0), inputs=["rgb"], outputs=["lg_img"])
V.add(CvImageView(), inputs=["lg_img"])
V.add(ArrowKeyboardControls(), outputs=["control"])
V.add(MQTTValuePub(name="donkey/%s/controls" % args["--name"]), inputs=["control"])
record_path = args["--record"]
if record_path is not None:
class ImageSaver:
def __init__(self, path):
self.index = 0
self.path = path
def run(self, img_arr):
if img_arr is None:
return
dest_path = os.path.join(self.path, "img_%d.jpg" % self.index)
self.index += 1
cv2.imwrite(dest_path, img_arr)
V.add(ImageSaver(record_path), inputs=["rgb"])
V.start(rate_hz=20)
| """
Scripts to drive a donkey car remotely
Usage:
remote_cam_view.py --name=<robot_name> --broker="localhost" [--record=<path>]
Options:
-h --help Show this screen.
"""
import os
import time
import math
from docopt import docopt
import donkeycar as dk
import cv2
from donkeycar.parts.cv import CvImageView, ImgBGR2RGB, ImgRGB2BGR, ImageScale, ImgWriter, ArrowKeyboardControls
from donkeycar.parts.salient import SalientVis
from donkeycar.parts.network import MQTTValuePub, MQTTValueSub
from donkeycar.parts.transform import Lambda
from donkeycar.parts.image import JpgToImgArr
V = dk.vehicle.Vehicle()
args = docopt(__doc__)
print(args)
V.add(MQTTValueSub(name="donkey/%s/camera" % args["--name"], broker=args["--broker"]), outputs=["jpg"])
V.add(JpgToImgArr(), inputs=["jpg"], outputs=["img_arr"])
V.add(ImgBGR2RGB(), inputs=["img_arr"], outputs=["rgb"])
V.add(ImageScale(4.0), inputs=["rgb"], outputs=["lg_img"])
V.add(CvImageView(), inputs=["lg_img"])
V.add(ArrowKeyboardControls(), outputs=["control"])
V.add(MQTTValuePub(name="donkey/%s/controls" % args["--name"]), inputs=["control"])
record_path = args["--record"]
if record_path is not None:
class ImageSaver:
def __init__(self, path):
self.index = 0
self.path = path
def run(self, img_arr):
if img_arr is None:
return
dest_path = os.path.join(self.path, "img_%d.jpg" % self.index)
self.index += 1
cv2.imwrite(dest_path, img_arr)
V.add(ImageSaver(record_path), inputs=["rgb"])
V.start(rate_hz=20)
| en | 0.472584 | Scripts to drive a donkey car remotely
Usage:
remote_cam_view.py --name=<robot_name> --broker="localhost" [--record=<path>]
Options:
-h --help Show this screen. | 2.651703 | 3 |
tests/test_logzioSender.py | hilsenrat/logzio-python-handler | 31 | 6633317 | <gh_stars>10-100
import fnmatch
import logging.config
import os
import time
from unittest import TestCase
from .mockLogzioListener import listener
def _find(pattern, path):
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
break # Not descending recursively
return result
class TestLogzioSender(TestCase):
def setUp(self):
self.logzio_listener = listener.MockLogzioListener()
self.logzio_listener.clear_logs_buffer()
self.logzio_listener.clear_server_error()
self.logs_drain_timeout = 1
logging_configuration = {
"version": 1,
"formatters": {
"logzio": {
"format": '{"key": "value"}',
"validate": False
}
},
"handlers": {
"LogzioHandler": {
"class": "logzio.handler.LogzioHandler",
"formatter": "logzio",
"level": "DEBUG",
"token": "token",
'logzio_type': "type",
'logs_drain_timeout': self.logs_drain_timeout,
'url': "http://" + self.logzio_listener.get_host() + ":" + str(self.logzio_listener.get_port()),
'debug': True
}
},
"loggers": {
"test": {
"handlers": ["LogzioHandler"],
"level": "DEBUG"
}
}
}
logging.config.dictConfig(logging_configuration)
self.logger = logging.getLogger('test')
for curr_file in _find("logzio-failures-*.txt", "."):
os.remove(curr_file)
def test_simple_log_drain(self):
log_message = "Test simple log drain"
self.logger.info(log_message)
time.sleep(self.logs_drain_timeout * 2)
self.assertTrue(self.logzio_listener.find_log(log_message))
def test_multiple_lines_drain(self):
logs_num = 50
for counter in range(0, logs_num):
self.logger.info("Test " + str(counter))
time.sleep(self.logs_drain_timeout * 2)
for counter in range(0, logs_num):
self.logger.info("Test " + str(counter))
time.sleep(self.logs_drain_timeout * 2)
self.assertEqual(self.logzio_listener.get_number_of_logs(), logs_num * 2)
def test_server_failure(self):
log_message = "Failing log message"
self.logzio_listener.set_server_error()
self.logger.info(log_message)
time.sleep(self.logs_drain_timeout * 2)
self.assertFalse(self.logzio_listener.find_log(log_message))
self.logzio_listener.clear_server_error()
time.sleep(self.logs_drain_timeout * 2 * 4) # Longer, because of the retry
self.assertTrue(self.logzio_listener.find_log(log_message))
def test_local_file_backup(self):
log_message = "Backup to local filesystem"
self.logzio_listener.set_server_error()
self.logger.info(log_message)
# Make sure no file is present
self.assertEqual(len(_find("logzio-failures-*.txt", ".")), 0)
time.sleep(2 * 2 * 2 * 2 * 2) # All of the retries
failure_files = _find("logzio-failures-*.txt", ".")
self.assertEqual(len(failure_files), 1)
with open(failure_files[0], "r") as f:
line = f.readline()
self.assertTrue(log_message in line)
def test_local_file_backup_disabled(self):
log_message = "Backup to local filesystem"
self.logzio_listener.set_server_error()
self.logger.handlers[0].logzio_sender.backup_logs = False
self.logger.info(log_message)
# Make sure no file is present
self.assertEqual(len(_find("logzio-failures-*.txt", ".")), 0)
time.sleep(2 * 2 * 2 * 2 * 2) # All of the retries
# Make sure no file was created
self.assertEqual(len(_find("logzio-failures-*.txt", ".")), 0)
def test_can_send_after_fork(self):
childpid = os.fork()
child_log_message = 'logged from child process'
parent_log_message = 'logged from parent process'
if childpid == 0:
# Log from the child process
self.logger.info(child_log_message)
time.sleep(self.logs_drain_timeout * 2)
os._exit(0)
# Wait for the child process to finish
os.waitpid(childpid, 0)
# log from the parent process
self.logger.info(parent_log_message)
time.sleep(self.logs_drain_timeout * 2)
# Ensure listener receive all log messages
self.assertTrue(self.logzio_listener.find_log(child_log_message))
self.assertTrue(self.logzio_listener.find_log(parent_log_message))
| import fnmatch
import logging.config
import os
import time
from unittest import TestCase
from .mockLogzioListener import listener
def _find(pattern, path):
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
break # Not descending recursively
return result
class TestLogzioSender(TestCase):
def setUp(self):
self.logzio_listener = listener.MockLogzioListener()
self.logzio_listener.clear_logs_buffer()
self.logzio_listener.clear_server_error()
self.logs_drain_timeout = 1
logging_configuration = {
"version": 1,
"formatters": {
"logzio": {
"format": '{"key": "value"}',
"validate": False
}
},
"handlers": {
"LogzioHandler": {
"class": "logzio.handler.LogzioHandler",
"formatter": "logzio",
"level": "DEBUG",
"token": "token",
'logzio_type': "type",
'logs_drain_timeout': self.logs_drain_timeout,
'url': "http://" + self.logzio_listener.get_host() + ":" + str(self.logzio_listener.get_port()),
'debug': True
}
},
"loggers": {
"test": {
"handlers": ["LogzioHandler"],
"level": "DEBUG"
}
}
}
logging.config.dictConfig(logging_configuration)
self.logger = logging.getLogger('test')
for curr_file in _find("logzio-failures-*.txt", "."):
os.remove(curr_file)
def test_simple_log_drain(self):
log_message = "Test simple log drain"
self.logger.info(log_message)
time.sleep(self.logs_drain_timeout * 2)
self.assertTrue(self.logzio_listener.find_log(log_message))
def test_multiple_lines_drain(self):
logs_num = 50
for counter in range(0, logs_num):
self.logger.info("Test " + str(counter))
time.sleep(self.logs_drain_timeout * 2)
for counter in range(0, logs_num):
self.logger.info("Test " + str(counter))
time.sleep(self.logs_drain_timeout * 2)
self.assertEqual(self.logzio_listener.get_number_of_logs(), logs_num * 2)
def test_server_failure(self):
log_message = "Failing log message"
self.logzio_listener.set_server_error()
self.logger.info(log_message)
time.sleep(self.logs_drain_timeout * 2)
self.assertFalse(self.logzio_listener.find_log(log_message))
self.logzio_listener.clear_server_error()
time.sleep(self.logs_drain_timeout * 2 * 4) # Longer, because of the retry
self.assertTrue(self.logzio_listener.find_log(log_message))
def test_local_file_backup(self):
log_message = "Backup to local filesystem"
self.logzio_listener.set_server_error()
self.logger.info(log_message)
# Make sure no file is present
self.assertEqual(len(_find("logzio-failures-*.txt", ".")), 0)
time.sleep(2 * 2 * 2 * 2 * 2) # All of the retries
failure_files = _find("logzio-failures-*.txt", ".")
self.assertEqual(len(failure_files), 1)
with open(failure_files[0], "r") as f:
line = f.readline()
self.assertTrue(log_message in line)
def test_local_file_backup_disabled(self):
log_message = "Backup to local filesystem"
self.logzio_listener.set_server_error()
self.logger.handlers[0].logzio_sender.backup_logs = False
self.logger.info(log_message)
# Make sure no file is present
self.assertEqual(len(_find("logzio-failures-*.txt", ".")), 0)
time.sleep(2 * 2 * 2 * 2 * 2) # All of the retries
# Make sure no file was created
self.assertEqual(len(_find("logzio-failures-*.txt", ".")), 0)
def test_can_send_after_fork(self):
childpid = os.fork()
child_log_message = 'logged from child process'
parent_log_message = 'logged from parent process'
if childpid == 0:
# Log from the child process
self.logger.info(child_log_message)
time.sleep(self.logs_drain_timeout * 2)
os._exit(0)
# Wait for the child process to finish
os.waitpid(childpid, 0)
# log from the parent process
self.logger.info(parent_log_message)
time.sleep(self.logs_drain_timeout * 2)
# Ensure listener receive all log messages
self.assertTrue(self.logzio_listener.find_log(child_log_message))
self.assertTrue(self.logzio_listener.find_log(parent_log_message)) | en | 0.947567 | # Not descending recursively # Longer, because of the retry # Make sure no file is present # All of the retries # Make sure no file is present # All of the retries # Make sure no file was created # Log from the child process # Wait for the child process to finish # log from the parent process # Ensure listener receive all log messages | 2.468078 | 2 |
middleSchool.py | BrianCarela/google-python-exercises | 0 | 6633318 | <filename>middleSchool.py
import sys
def main():
u_gay = raw_input("Are you gay? (yes/no)")
if(u_gay == "no"):
trick = raw_input("Does your mom know you're gay? (yes/no)")
if(trick == "no"):
print("LOL ur gay bro")
else:
print("... Bet")
elif(u_gay == "yes"):
print("Facts, I respect it")
else: main()
if __name__ == '__main__':
main()
| <filename>middleSchool.py
import sys
def main():
u_gay = raw_input("Are you gay? (yes/no)")
if(u_gay == "no"):
trick = raw_input("Does your mom know you're gay? (yes/no)")
if(trick == "no"):
print("LOL ur gay bro")
else:
print("... Bet")
elif(u_gay == "yes"):
print("Facts, I respect it")
else: main()
if __name__ == '__main__':
main()
| none | 1 | 3.646694 | 4 |
|
engine/objects/primitives/icollider.py | vityaman/Pygame2DGameEngine | 0 | 6633319 | <reponame>vityaman/Pygame2DGameEngine<filename>engine/objects/primitives/icollider.py
from abc import ABC, abstractmethod
from engine.objects.primitives.vector2d import Vector2D
class ICollider(ABC):
@abstractmethod
def collides_with(self, other: 'ICollider') -> bool:
raise NotImplementedError()
@property
@abstractmethod
def position(self) -> Vector2D:
raise NotImplementedError()
@position.setter
@abstractmethod
def position(self, position: Vector2D):
raise NotImplementedError()
@abstractmethod
def shift_to_collide_with(self, other: 'ICollider') -> Vector2D:
raise NotImplementedError()
| from abc import ABC, abstractmethod
from engine.objects.primitives.vector2d import Vector2D
class ICollider(ABC):
@abstractmethod
def collides_with(self, other: 'ICollider') -> bool:
raise NotImplementedError()
@property
@abstractmethod
def position(self) -> Vector2D:
raise NotImplementedError()
@position.setter
@abstractmethod
def position(self, position: Vector2D):
raise NotImplementedError()
@abstractmethod
def shift_to_collide_with(self, other: 'ICollider') -> Vector2D:
raise NotImplementedError() | none | 1 | 3.397919 | 3 |
|
demo.py | B-C-WANG/MachineLearningTools | 3 | 6633320 | from soapml.Dataset import Dataset
from MLT.VAE.VAE1D import VAE1D
import numpy as np
from MLT.Regression.LinearNNFeatureExtraction import LinearNN
from MLT.Regression.GBR_FeatureImportanceEstimater import GBRFIE
def gbr_feature():
model = GBRFIE(X, y, test_split_ratio=0.3)
model.fit(n_estimators=40)
error = model.show_pred_train_test(plot_fig=True, point_size=100)
print(error)
model.print_out_importance()
def linear_NN():
X = np.load("X.npy")
y = np.load('y.npy')
model = LinearNN(X, y)
model.build_model()
model.train(epochs=4000)
model.show_results()
model.plot_weights()
def vae1d_demo():
dataset = Dataset.load("cnecdaDataset_test.smld")
datax = dataset.datasetx
datay = dataset.datasety
batch_size = 100
# make it can be // by batch size
more_sample = datax.shape[0] % batch_size
sample_num = datax.shape[0] - more_sample
datax = datax[:sample_num, :]
datay = datay[:sample_num]
original_dim = datax.shape[1]
intermediate_dim = 256
# normalize data
datax /= np.max(datax)
model = VAE1D(original_dim, intermediate_dim, batch_size)
model.fit(x_train=datax, epochs=30)
encode_result = model.encode(datax)
model.plot_encoded_result_as_dimension_reduction(encode_result, y=datay)
| from soapml.Dataset import Dataset
from MLT.VAE.VAE1D import VAE1D
import numpy as np
from MLT.Regression.LinearNNFeatureExtraction import LinearNN
from MLT.Regression.GBR_FeatureImportanceEstimater import GBRFIE
def gbr_feature():
model = GBRFIE(X, y, test_split_ratio=0.3)
model.fit(n_estimators=40)
error = model.show_pred_train_test(plot_fig=True, point_size=100)
print(error)
model.print_out_importance()
def linear_NN():
X = np.load("X.npy")
y = np.load('y.npy')
model = LinearNN(X, y)
model.build_model()
model.train(epochs=4000)
model.show_results()
model.plot_weights()
def vae1d_demo():
dataset = Dataset.load("cnecdaDataset_test.smld")
datax = dataset.datasetx
datay = dataset.datasety
batch_size = 100
# make it can be // by batch size
more_sample = datax.shape[0] % batch_size
sample_num = datax.shape[0] - more_sample
datax = datax[:sample_num, :]
datay = datay[:sample_num]
original_dim = datax.shape[1]
intermediate_dim = 256
# normalize data
datax /= np.max(datax)
model = VAE1D(original_dim, intermediate_dim, batch_size)
model.fit(x_train=datax, epochs=30)
encode_result = model.encode(datax)
model.plot_encoded_result_as_dimension_reduction(encode_result, y=datay)
| en | 0.699271 | # make it can be // by batch size # normalize data | 2.673519 | 3 |
challenges/2022-03-08-hidden-digits/solutions/python/xanderyzwich/xanderyzwich.py | aureliefomum/CodingDojo-1 | 0 | 6633321 | <filename>challenges/2022-03-08-hidden-digits/solutions/python/xanderyzwich/xanderyzwich.py
from unittest import TestCase
def hidden_digits(time_str):
result = ''
# Hours
first_hidden = '?' == time_str[0]
second_hidden = '?' == time_str[1]
if first_hidden and second_hidden:
result += '23'
else:
if first_hidden:
result += '2' if int(time_str[1]) < 5 else '1'
else:
result += time_str[0]
if second_hidden:
result += '3' if time_str[0] > '1' else '9'
else:
result += time_str[1]
# Separator
result += ':'
# Minutes
if '??' == time_str[3:]:
result += '59'
else:
result += '5' if '?' == time_str[3] else time_str[3]
result += '9' if '?' == time_str[4] else time_str[4]
return result
class TestHiddenDigits(TestCase):
def test_this(self):
assert hidden_digits("2?:?0") == "23:50"
assert hidden_digits("0?:3?") == "09:39"
assert hidden_digits("?7:?1") == "17:51"
assert hidden_digits("1?:22") == "19:22"
assert hidden_digits("00:00") == "00:00"
assert hidden_digits("??:??") == "23:59"
| <filename>challenges/2022-03-08-hidden-digits/solutions/python/xanderyzwich/xanderyzwich.py
from unittest import TestCase
def hidden_digits(time_str):
result = ''
# Hours
first_hidden = '?' == time_str[0]
second_hidden = '?' == time_str[1]
if first_hidden and second_hidden:
result += '23'
else:
if first_hidden:
result += '2' if int(time_str[1]) < 5 else '1'
else:
result += time_str[0]
if second_hidden:
result += '3' if time_str[0] > '1' else '9'
else:
result += time_str[1]
# Separator
result += ':'
# Minutes
if '??' == time_str[3:]:
result += '59'
else:
result += '5' if '?' == time_str[3] else time_str[3]
result += '9' if '?' == time_str[4] else time_str[4]
return result
class TestHiddenDigits(TestCase):
def test_this(self):
assert hidden_digits("2?:?0") == "23:50"
assert hidden_digits("0?:3?") == "09:39"
assert hidden_digits("?7:?1") == "17:51"
assert hidden_digits("1?:22") == "19:22"
assert hidden_digits("00:00") == "00:00"
assert hidden_digits("??:??") == "23:59"
| en | 0.3368 | # Hours # Separator # Minutes | 3.634702 | 4 |
tests/functional/test_model_completeness.py | doc-E-brown/botocore | 2 | 6633322 | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from botocore.session import Session
from botocore.loaders import Loader
from botocore.exceptions import DataNotFoundError
def _test_model_is_not_lost(service_name, type_name,
previous_version, latest_version):
# Make sure if a paginator and/or waiter exists in previous version,
# there will be a successor existing in latest version.
loader = Loader()
try:
previous = loader.load_service_model(
service_name, type_name, previous_version)
except DataNotFoundError:
pass
else:
try:
latest = loader.load_service_model(
service_name, type_name, latest_version)
except DataNotFoundError as e:
raise AssertionError(
"%s must exist for %s: %s" % (type_name, service_name, e))
def test_paginators_and_waiters_are_not_lost_in_new_version():
for service_name in Session().get_available_services():
versions = Loader().list_api_versions(service_name, 'service-2')
if len(versions) > 1:
for type_name in ['paginators-1', 'waiters-2']:
yield (_test_model_is_not_lost, service_name,
type_name, versions[-2], versions[-1])
| # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from botocore.session import Session
from botocore.loaders import Loader
from botocore.exceptions import DataNotFoundError
def _test_model_is_not_lost(service_name, type_name,
previous_version, latest_version):
# Make sure if a paginator and/or waiter exists in previous version,
# there will be a successor existing in latest version.
loader = Loader()
try:
previous = loader.load_service_model(
service_name, type_name, previous_version)
except DataNotFoundError:
pass
else:
try:
latest = loader.load_service_model(
service_name, type_name, latest_version)
except DataNotFoundError as e:
raise AssertionError(
"%s must exist for %s: %s" % (type_name, service_name, e))
def test_paginators_and_waiters_are_not_lost_in_new_version():
for service_name in Session().get_available_services():
versions = Loader().list_api_versions(service_name, 'service-2')
if len(versions) > 1:
for type_name in ['paginators-1', 'waiters-2']:
yield (_test_model_is_not_lost, service_name,
type_name, versions[-2], versions[-1])
| en | 0.872452 | # Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. # Make sure if a paginator and/or waiter exists in previous version, # there will be a successor existing in latest version. | 2.060454 | 2 |
libtf/logparsers/tf_http_log.py | ThreshingFloor/libtf | 0 | 6633323 | from .tf_generic_log import TFGenericLog
class TFHttpLog(TFGenericLog):
def __init__(self, line_iterator, api_key, ports=None, base_uri=None):
# Default to commonly used HTTP ports if not specified
if not ports:
ports = ["80:tcp", "8080:tcp", "443:tcp"]
super(TFHttpLog, self).__init__(line_iterator, api_key, ports, base_uri)
| from .tf_generic_log import TFGenericLog
class TFHttpLog(TFGenericLog):
def __init__(self, line_iterator, api_key, ports=None, base_uri=None):
# Default to commonly used HTTP ports if not specified
if not ports:
ports = ["80:tcp", "8080:tcp", "443:tcp"]
super(TFHttpLog, self).__init__(line_iterator, api_key, ports, base_uri)
| en | 0.400536 | # Default to commonly used HTTP ports if not specified | 2.094649 | 2 |
benchmarks/producer.py | alauda/aster | 1 | 6633324 | <filename>benchmarks/producer.py
# -*- coding: utf-8 -*-
from kafka import KafkaProducer
producer = KafkaProducer(bootstrap_servers='localhost:9092')
if __name__ == '__main__':
s='0123456789abcdef'
i=0
while i<13:
i+=1
s = s +s
message_count = 30000
for _ in range(message_count):
producer.send('test-6', s)
producer.flush()
| <filename>benchmarks/producer.py
# -*- coding: utf-8 -*-
from kafka import KafkaProducer
producer = KafkaProducer(bootstrap_servers='localhost:9092')
if __name__ == '__main__':
s='0123456789abcdef'
i=0
while i<13:
i+=1
s = s +s
message_count = 30000
for _ in range(message_count):
producer.send('test-6', s)
producer.flush()
| en | 0.769321 | # -*- coding: utf-8 -*- | 2.091256 | 2 |
src/wheezy/captcha/comp.py | akornatskyy/wheezy.captcha | 4 | 6633325 | <gh_stars>1-10
""" ``comp`` module.
"""
try: # pragma: nocover
from PIL import Image, ImageFilter
from PIL.ImageColor import getrgb
from PIL.ImageDraw import Draw
from PIL.ImageFont import truetype
except ImportError: # pragma: nocover
import Image # noqa
import ImageFilter # noqa
from ImageColor import getrgb # noqa
from ImageDraw import Draw # noqa
from ImageFont import truetype # noqa
| """ ``comp`` module.
"""
try: # pragma: nocover
from PIL import Image, ImageFilter
from PIL.ImageColor import getrgb
from PIL.ImageDraw import Draw
from PIL.ImageFont import truetype
except ImportError: # pragma: nocover
import Image # noqa
import ImageFilter # noqa
from ImageColor import getrgb # noqa
from ImageDraw import Draw # noqa
from ImageFont import truetype # noqa | uz | 0.261562 | ``comp`` module. # pragma: nocover # pragma: nocover # noqa # noqa # noqa # noqa # noqa | 1.127334 | 1 |
tools/demo.py | yellowstarhx/person_search | 768 | 6633326 | <filename>tools/demo.py<gh_stars>100-1000
import _init_paths
import argparse
import time
import os
import sys
import os.path as osp
from glob import glob
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import caffe
from mpi4py import MPI
from fast_rcnn.test_probe import demo_exfeat
from fast_rcnn.test_gallery import demo_detect
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
def main(args):
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
# Setup caffe
if args.gpu >= 0:
caffe.mpi_init()
caffe.set_mode_gpu()
caffe.set_device(cfg.GPU_ID)
else:
caffe.mpi_init()
caffe.set_mode_cpu()
# Get query image and roi
query_img = 'demo/query.jpg'
query_roi = [0, 0, 466, 943] # [x1, y1, x2, y2]
# Extract feature of the query person
net = caffe.Net(args.probe_def, args.caffemodel, caffe.TEST)
query_feat = demo_exfeat(net, query_img, query_roi)
del net # Necessary to release cuDNN conv static workspace
# Get gallery images
gallery_imgs = sorted(glob('demo/gallery*.jpg'))
# Detect and extract feature of persons in each gallery image
net = caffe.Net(args.gallery_def, args.caffemodel, caffe.TEST)
# Necessary to warm-up the net, otherwise the first image results are wrong
# Don't know why. Possibly a bug in caffe's memory optimization.
# Nevertheless, the results are correct after this warm-up.
demo_detect(net, query_img)
for gallery_img in gallery_imgs:
print gallery_img, '...'
boxes, features = demo_detect(net, gallery_img,
threshold=args.det_thresh)
if boxes is None:
print gallery_img, 'no detections'
continue
# Compute pairwise cosine similarities,
# equals to inner-products, as features are already L2-normed
similarities = features.dot(query_feat)
# Visualize the results
fig, ax = plt.subplots(figsize=(16, 9))
ax.imshow(plt.imread(gallery_img))
plt.axis('off')
for box, sim in zip(boxes, similarities):
x1, y1, x2, y2, _ = box
ax.add_patch(
plt.Rectangle((x1, y1), x2 - x1, y2 - y1,
fill=False, edgecolor='#4CAF50', linewidth=3.5))
ax.add_patch(
plt.Rectangle((x1, y1), x2 - x1, y2 - y1,
fill=False, edgecolor='white', linewidth=1))
ax.text(x1 + 5, y1 - 18, '{:.2f}'.format(sim),
bbox=dict(facecolor='#4CAF50', linewidth=0),
fontsize=20, color='white')
plt.tight_layout()
fig.savefig(gallery_img.replace('gallery', 'result'))
plt.show()
plt.close(fig)
del net
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Person Search Demo')
parser.add_argument('--gpu',
help='GPU id to be used, -1 for CPU. Default: 0',
type=int, default=0)
parser.add_argument('--gallery_def',
help='prototxt file defining the gallery network',
default='models/psdb/resnet50/eval_gallery.prototxt')
parser.add_argument('--probe_def',
help='prototxt file defining the probe network',
default='models/psdb/resnet50/eval_probe.prototxt')
parser.add_argument('--net', dest='caffemodel',
help='path to trained caffemodel',
default='output/psdb_train/resnet50/resnet50_iter_50000.caffemodel')
parser.add_argument('--det_thresh',
help="detection score threshold to be evaluated",
type=float, default=0.75)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default='experiments/cfgs/resnet50.yml')
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
args = parser.parse_args()
main(args)
| <filename>tools/demo.py<gh_stars>100-1000
import _init_paths
import argparse
import time
import os
import sys
import os.path as osp
from glob import glob
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import caffe
from mpi4py import MPI
from fast_rcnn.test_probe import demo_exfeat
from fast_rcnn.test_gallery import demo_detect
from fast_rcnn.config import cfg, cfg_from_file, cfg_from_list
def main(args):
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
# Setup caffe
if args.gpu >= 0:
caffe.mpi_init()
caffe.set_mode_gpu()
caffe.set_device(cfg.GPU_ID)
else:
caffe.mpi_init()
caffe.set_mode_cpu()
# Get query image and roi
query_img = 'demo/query.jpg'
query_roi = [0, 0, 466, 943] # [x1, y1, x2, y2]
# Extract feature of the query person
net = caffe.Net(args.probe_def, args.caffemodel, caffe.TEST)
query_feat = demo_exfeat(net, query_img, query_roi)
del net # Necessary to release cuDNN conv static workspace
# Get gallery images
gallery_imgs = sorted(glob('demo/gallery*.jpg'))
# Detect and extract feature of persons in each gallery image
net = caffe.Net(args.gallery_def, args.caffemodel, caffe.TEST)
# Necessary to warm-up the net, otherwise the first image results are wrong
# Don't know why. Possibly a bug in caffe's memory optimization.
# Nevertheless, the results are correct after this warm-up.
demo_detect(net, query_img)
for gallery_img in gallery_imgs:
print gallery_img, '...'
boxes, features = demo_detect(net, gallery_img,
threshold=args.det_thresh)
if boxes is None:
print gallery_img, 'no detections'
continue
# Compute pairwise cosine similarities,
# equals to inner-products, as features are already L2-normed
similarities = features.dot(query_feat)
# Visualize the results
fig, ax = plt.subplots(figsize=(16, 9))
ax.imshow(plt.imread(gallery_img))
plt.axis('off')
for box, sim in zip(boxes, similarities):
x1, y1, x2, y2, _ = box
ax.add_patch(
plt.Rectangle((x1, y1), x2 - x1, y2 - y1,
fill=False, edgecolor='#4CAF50', linewidth=3.5))
ax.add_patch(
plt.Rectangle((x1, y1), x2 - x1, y2 - y1,
fill=False, edgecolor='white', linewidth=1))
ax.text(x1 + 5, y1 - 18, '{:.2f}'.format(sim),
bbox=dict(facecolor='#4CAF50', linewidth=0),
fontsize=20, color='white')
plt.tight_layout()
fig.savefig(gallery_img.replace('gallery', 'result'))
plt.show()
plt.close(fig)
del net
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Person Search Demo')
parser.add_argument('--gpu',
help='GPU id to be used, -1 for CPU. Default: 0',
type=int, default=0)
parser.add_argument('--gallery_def',
help='prototxt file defining the gallery network',
default='models/psdb/resnet50/eval_gallery.prototxt')
parser.add_argument('--probe_def',
help='prototxt file defining the probe network',
default='models/psdb/resnet50/eval_probe.prototxt')
parser.add_argument('--net', dest='caffemodel',
help='path to trained caffemodel',
default='output/psdb_train/resnet50/resnet50_iter_50000.caffemodel')
parser.add_argument('--det_thresh',
help="detection score threshold to be evaluated",
type=float, default=0.75)
parser.add_argument('--cfg', dest='cfg_file',
help='optional config file',
default='experiments/cfgs/resnet50.yml')
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
args = parser.parse_args()
main(args)
| en | 0.882159 | # Setup caffe # Get query image and roi # [x1, y1, x2, y2] # Extract feature of the query person # Necessary to release cuDNN conv static workspace # Get gallery images # Detect and extract feature of persons in each gallery image # Necessary to warm-up the net, otherwise the first image results are wrong # Don't know why. Possibly a bug in caffe's memory optimization. # Nevertheless, the results are correct after this warm-up. # Compute pairwise cosine similarities, # equals to inner-products, as features are already L2-normed # Visualize the results | 2.058521 | 2 |
sapextractor/utils/objclass_to_tables/get.py | aarkue/sap-meta-explorer | 2 | 6633327 | def apply(con, mandt="800"):
df = con.execute_read_sql("SELECT OBJECTCLAS, Count(*) FROM "+con.table_prefix+"CDHDR WHERE MANDANT = '"+mandt+"' GROUP BY OBJECTCLAS ORDER BY Count(*) DESC", ["OBJECTCLAS", "COUNT"])
df = df.to_dict("r")
df = {str(x["OBJECTCLAS"]): int(x["COUNT"]) for x in df}
return df
| def apply(con, mandt="800"):
df = con.execute_read_sql("SELECT OBJECTCLAS, Count(*) FROM "+con.table_prefix+"CDHDR WHERE MANDANT = '"+mandt+"' GROUP BY OBJECTCLAS ORDER BY Count(*) DESC", ["OBJECTCLAS", "COUNT"])
df = df.to_dict("r")
df = {str(x["OBJECTCLAS"]): int(x["COUNT"]) for x in df}
return df
| none | 1 | 2.872023 | 3 |
|
pybind/slxos/v17r_2_00/tm_state/tmcpustatsslot/__init__.py | extremenetworks/pybind | 0 | 6633328 | <reponame>extremenetworks/pybind
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class tmcpustatsslot(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-sysdiag-operational - based on the path /tm-state/tmcpustatsslot. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: TM voq stats for CPU port per slot
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__slot_id','__cpugroup_id','__priority','__enquepkt','__enqueubytes','__discardpkt','__discardbytes','__currdepth','__maxdepth',)
_yang_name = 'tmcpustatsslot'
_rest_name = 'tmcpustatsslot'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__discardpkt = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="discardpkt", rest_name="discardpkt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)
self.__enqueubytes = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="enqueubytes", rest_name="enqueubytes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)
self.__discardbytes = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="discardbytes", rest_name="discardbytes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)
self.__priority = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="priority", rest_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint16', is_config=False)
self.__cpugroup_id = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="cpugroup-id", rest_name="cpugroup-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint16', is_config=False)
self.__slot_id = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="slot-id", rest_name="slot-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint16', is_config=False)
self.__maxdepth = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="maxdepth", rest_name="maxdepth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)
self.__enquepkt = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="enquepkt", rest_name="enquepkt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)
self.__currdepth = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="currdepth", rest_name="currdepth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'tm-state', u'tmcpustatsslot']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'tm-state', u'tmcpustatsslot']
def _get_slot_id(self):
"""
Getter method for slot_id, mapped from YANG variable /tm_state/tmcpustatsslot/slot_id (uint16)
YANG Description: slot_id
"""
return self.__slot_id
def _set_slot_id(self, v, load=False):
"""
Setter method for slot_id, mapped from YANG variable /tm_state/tmcpustatsslot/slot_id (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_slot_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_slot_id() directly.
YANG Description: slot_id
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="slot-id", rest_name="slot-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """slot_id must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="slot-id", rest_name="slot-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint16', is_config=False)""",
})
self.__slot_id = t
if hasattr(self, '_set'):
self._set()
def _unset_slot_id(self):
self.__slot_id = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="slot-id", rest_name="slot-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint16', is_config=False)
def _get_cpugroup_id(self):
"""
Getter method for cpugroup_id, mapped from YANG variable /tm_state/tmcpustatsslot/cpugroup_id (uint16)
YANG Description: cpugroup_id
"""
return self.__cpugroup_id
def _set_cpugroup_id(self, v, load=False):
"""
Setter method for cpugroup_id, mapped from YANG variable /tm_state/tmcpustatsslot/cpugroup_id (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_cpugroup_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cpugroup_id() directly.
YANG Description: cpugroup_id
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="cpugroup-id", rest_name="cpugroup-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cpugroup_id must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="cpugroup-id", rest_name="cpugroup-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint16', is_config=False)""",
})
self.__cpugroup_id = t
if hasattr(self, '_set'):
self._set()
def _unset_cpugroup_id(self):
self.__cpugroup_id = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="cpugroup-id", rest_name="cpugroup-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint16', is_config=False)
def _get_priority(self):
"""
Getter method for priority, mapped from YANG variable /tm_state/tmcpustatsslot/priority (uint16)
YANG Description: Traffic class priority for TM VOQ statistics
"""
return self.__priority
def _set_priority(self, v, load=False):
"""
Setter method for priority, mapped from YANG variable /tm_state/tmcpustatsslot/priority (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_priority is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_priority() directly.
YANG Description: Traffic class priority for TM VOQ statistics
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="priority", rest_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """priority must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="priority", rest_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint16', is_config=False)""",
})
self.__priority = t
if hasattr(self, '_set'):
self._set()
def _unset_priority(self):
self.__priority = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="priority", rest_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint16', is_config=False)
def _get_enquepkt(self):
"""
Getter method for enquepkt, mapped from YANG variable /tm_state/tmcpustatsslot/enquepkt (uint64)
YANG Description: enque_pkts
"""
return self.__enquepkt
def _set_enquepkt(self, v, load=False):
"""
Setter method for enquepkt, mapped from YANG variable /tm_state/tmcpustatsslot/enquepkt (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_enquepkt is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enquepkt() directly.
YANG Description: enque_pkts
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="enquepkt", rest_name="enquepkt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """enquepkt must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="enquepkt", rest_name="enquepkt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)""",
})
self.__enquepkt = t
if hasattr(self, '_set'):
self._set()
def _unset_enquepkt(self):
self.__enquepkt = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="enquepkt", rest_name="enquepkt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)
def _get_enqueubytes(self):
"""
Getter method for enqueubytes, mapped from YANG variable /tm_state/tmcpustatsslot/enqueubytes (uint64)
YANG Description: enque_bytes
"""
return self.__enqueubytes
def _set_enqueubytes(self, v, load=False):
"""
Setter method for enqueubytes, mapped from YANG variable /tm_state/tmcpustatsslot/enqueubytes (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_enqueubytes is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enqueubytes() directly.
YANG Description: enque_bytes
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="enqueubytes", rest_name="enqueubytes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """enqueubytes must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="enqueubytes", rest_name="enqueubytes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)""",
})
self.__enqueubytes = t
if hasattr(self, '_set'):
self._set()
def _unset_enqueubytes(self):
self.__enqueubytes = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="enqueubytes", rest_name="enqueubytes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)
def _get_discardpkt(self):
"""
Getter method for discardpkt, mapped from YANG variable /tm_state/tmcpustatsslot/discardpkt (uint64)
YANG Description: discard_pkts
"""
return self.__discardpkt
def _set_discardpkt(self, v, load=False):
"""
Setter method for discardpkt, mapped from YANG variable /tm_state/tmcpustatsslot/discardpkt (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_discardpkt is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_discardpkt() directly.
YANG Description: discard_pkts
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="discardpkt", rest_name="discardpkt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """discardpkt must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="discardpkt", rest_name="discardpkt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)""",
})
self.__discardpkt = t
if hasattr(self, '_set'):
self._set()
def _unset_discardpkt(self):
self.__discardpkt = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="discardpkt", rest_name="discardpkt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)
def _get_discardbytes(self):
"""
Getter method for discardbytes, mapped from YANG variable /tm_state/tmcpustatsslot/discardbytes (uint64)
YANG Description: discard_bytes
"""
return self.__discardbytes
def _set_discardbytes(self, v, load=False):
"""
Setter method for discardbytes, mapped from YANG variable /tm_state/tmcpustatsslot/discardbytes (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_discardbytes is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_discardbytes() directly.
YANG Description: discard_bytes
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="discardbytes", rest_name="discardbytes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """discardbytes must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="discardbytes", rest_name="discardbytes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)""",
})
self.__discardbytes = t
if hasattr(self, '_set'):
self._set()
def _unset_discardbytes(self):
self.__discardbytes = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="discardbytes", rest_name="discardbytes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)
def _get_currdepth(self):
"""
Getter method for currdepth, mapped from YANG variable /tm_state/tmcpustatsslot/currdepth (uint64)
YANG Description: current_queue_depth
"""
return self.__currdepth
def _set_currdepth(self, v, load=False):
"""
Setter method for currdepth, mapped from YANG variable /tm_state/tmcpustatsslot/currdepth (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_currdepth is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_currdepth() directly.
YANG Description: current_queue_depth
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="currdepth", rest_name="currdepth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """currdepth must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="currdepth", rest_name="currdepth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)""",
})
self.__currdepth = t
if hasattr(self, '_set'):
self._set()
def _unset_currdepth(self):
self.__currdepth = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="currdepth", rest_name="currdepth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)
def _get_maxdepth(self):
"""
Getter method for maxdepth, mapped from YANG variable /tm_state/tmcpustatsslot/maxdepth (uint64)
YANG Description: max_queue_depth
"""
return self.__maxdepth
def _set_maxdepth(self, v, load=False):
"""
Setter method for maxdepth, mapped from YANG variable /tm_state/tmcpustatsslot/maxdepth (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_maxdepth is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_maxdepth() directly.
YANG Description: max_queue_depth
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="maxdepth", rest_name="maxdepth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """maxdepth must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="maxdepth", rest_name="maxdepth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)""",
})
self.__maxdepth = t
if hasattr(self, '_set'):
self._set()
def _unset_maxdepth(self):
self.__maxdepth = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="maxdepth", rest_name="maxdepth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)
slot_id = __builtin__.property(_get_slot_id)
cpugroup_id = __builtin__.property(_get_cpugroup_id)
priority = __builtin__.property(_get_priority)
enquepkt = __builtin__.property(_get_enquepkt)
enqueubytes = __builtin__.property(_get_enqueubytes)
discardpkt = __builtin__.property(_get_discardpkt)
discardbytes = __builtin__.property(_get_discardbytes)
currdepth = __builtin__.property(_get_currdepth)
maxdepth = __builtin__.property(_get_maxdepth)
_pyangbind_elements = {'slot_id': slot_id, 'cpugroup_id': cpugroup_id, 'priority': priority, 'enquepkt': enquepkt, 'enqueubytes': enqueubytes, 'discardpkt': discardpkt, 'discardbytes': discardbytes, 'currdepth': currdepth, 'maxdepth': maxdepth, }
| from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
class tmcpustatsslot(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-sysdiag-operational - based on the path /tm-state/tmcpustatsslot. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: TM voq stats for CPU port per slot
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__slot_id','__cpugroup_id','__priority','__enquepkt','__enqueubytes','__discardpkt','__discardbytes','__currdepth','__maxdepth',)
_yang_name = 'tmcpustatsslot'
_rest_name = 'tmcpustatsslot'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__discardpkt = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="discardpkt", rest_name="discardpkt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)
self.__enqueubytes = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="enqueubytes", rest_name="enqueubytes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)
self.__discardbytes = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="discardbytes", rest_name="discardbytes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)
self.__priority = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="priority", rest_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint16', is_config=False)
self.__cpugroup_id = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="cpugroup-id", rest_name="cpugroup-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint16', is_config=False)
self.__slot_id = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="slot-id", rest_name="slot-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint16', is_config=False)
self.__maxdepth = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="maxdepth", rest_name="maxdepth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)
self.__enquepkt = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="enquepkt", rest_name="enquepkt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)
self.__currdepth = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="currdepth", rest_name="currdepth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'tm-state', u'tmcpustatsslot']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'tm-state', u'tmcpustatsslot']
def _get_slot_id(self):
"""
Getter method for slot_id, mapped from YANG variable /tm_state/tmcpustatsslot/slot_id (uint16)
YANG Description: slot_id
"""
return self.__slot_id
def _set_slot_id(self, v, load=False):
"""
Setter method for slot_id, mapped from YANG variable /tm_state/tmcpustatsslot/slot_id (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_slot_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_slot_id() directly.
YANG Description: slot_id
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="slot-id", rest_name="slot-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """slot_id must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="slot-id", rest_name="slot-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint16', is_config=False)""",
})
self.__slot_id = t
if hasattr(self, '_set'):
self._set()
def _unset_slot_id(self):
self.__slot_id = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="slot-id", rest_name="slot-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint16', is_config=False)
def _get_cpugroup_id(self):
"""
Getter method for cpugroup_id, mapped from YANG variable /tm_state/tmcpustatsslot/cpugroup_id (uint16)
YANG Description: cpugroup_id
"""
return self.__cpugroup_id
def _set_cpugroup_id(self, v, load=False):
"""
Setter method for cpugroup_id, mapped from YANG variable /tm_state/tmcpustatsslot/cpugroup_id (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_cpugroup_id is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_cpugroup_id() directly.
YANG Description: cpugroup_id
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="cpugroup-id", rest_name="cpugroup-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """cpugroup_id must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="cpugroup-id", rest_name="cpugroup-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint16', is_config=False)""",
})
self.__cpugroup_id = t
if hasattr(self, '_set'):
self._set()
def _unset_cpugroup_id(self):
self.__cpugroup_id = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="cpugroup-id", rest_name="cpugroup-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint16', is_config=False)
def _get_priority(self):
"""
Getter method for priority, mapped from YANG variable /tm_state/tmcpustatsslot/priority (uint16)
YANG Description: Traffic class priority for TM VOQ statistics
"""
return self.__priority
def _set_priority(self, v, load=False):
"""
Setter method for priority, mapped from YANG variable /tm_state/tmcpustatsslot/priority (uint16)
If this variable is read-only (config: false) in the
source YANG file, then _set_priority is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_priority() directly.
YANG Description: Traffic class priority for TM VOQ statistics
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="priority", rest_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint16', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """priority must be of a type compatible with uint16""",
'defined-type': "uint16",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="priority", rest_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint16', is_config=False)""",
})
self.__priority = t
if hasattr(self, '_set'):
self._set()
def _unset_priority(self):
self.__priority = YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="priority", rest_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint16', is_config=False)
def _get_enquepkt(self):
"""
Getter method for enquepkt, mapped from YANG variable /tm_state/tmcpustatsslot/enquepkt (uint64)
YANG Description: enque_pkts
"""
return self.__enquepkt
def _set_enquepkt(self, v, load=False):
"""
Setter method for enquepkt, mapped from YANG variable /tm_state/tmcpustatsslot/enquepkt (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_enquepkt is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enquepkt() directly.
YANG Description: enque_pkts
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="enquepkt", rest_name="enquepkt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """enquepkt must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="enquepkt", rest_name="enquepkt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)""",
})
self.__enquepkt = t
if hasattr(self, '_set'):
self._set()
def _unset_enquepkt(self):
self.__enquepkt = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="enquepkt", rest_name="enquepkt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)
def _get_enqueubytes(self):
"""
Getter method for enqueubytes, mapped from YANG variable /tm_state/tmcpustatsslot/enqueubytes (uint64)
YANG Description: enque_bytes
"""
return self.__enqueubytes
def _set_enqueubytes(self, v, load=False):
"""
Setter method for enqueubytes, mapped from YANG variable /tm_state/tmcpustatsslot/enqueubytes (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_enqueubytes is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_enqueubytes() directly.
YANG Description: enque_bytes
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="enqueubytes", rest_name="enqueubytes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """enqueubytes must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="enqueubytes", rest_name="enqueubytes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)""",
})
self.__enqueubytes = t
if hasattr(self, '_set'):
self._set()
def _unset_enqueubytes(self):
self.__enqueubytes = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="enqueubytes", rest_name="enqueubytes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)
def _get_discardpkt(self):
"""
Getter method for discardpkt, mapped from YANG variable /tm_state/tmcpustatsslot/discardpkt (uint64)
YANG Description: discard_pkts
"""
return self.__discardpkt
def _set_discardpkt(self, v, load=False):
"""
Setter method for discardpkt, mapped from YANG variable /tm_state/tmcpustatsslot/discardpkt (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_discardpkt is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_discardpkt() directly.
YANG Description: discard_pkts
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="discardpkt", rest_name="discardpkt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """discardpkt must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="discardpkt", rest_name="discardpkt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)""",
})
self.__discardpkt = t
if hasattr(self, '_set'):
self._set()
def _unset_discardpkt(self):
self.__discardpkt = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="discardpkt", rest_name="discardpkt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)
def _get_discardbytes(self):
"""
Getter method for discardbytes, mapped from YANG variable /tm_state/tmcpustatsslot/discardbytes (uint64)
YANG Description: discard_bytes
"""
return self.__discardbytes
def _set_discardbytes(self, v, load=False):
"""
Setter method for discardbytes, mapped from YANG variable /tm_state/tmcpustatsslot/discardbytes (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_discardbytes is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_discardbytes() directly.
YANG Description: discard_bytes
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="discardbytes", rest_name="discardbytes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """discardbytes must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="discardbytes", rest_name="discardbytes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)""",
})
self.__discardbytes = t
if hasattr(self, '_set'):
self._set()
def _unset_discardbytes(self):
self.__discardbytes = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="discardbytes", rest_name="discardbytes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)
def _get_currdepth(self):
"""
Getter method for currdepth, mapped from YANG variable /tm_state/tmcpustatsslot/currdepth (uint64)
YANG Description: current_queue_depth
"""
return self.__currdepth
def _set_currdepth(self, v, load=False):
"""
Setter method for currdepth, mapped from YANG variable /tm_state/tmcpustatsslot/currdepth (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_currdepth is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_currdepth() directly.
YANG Description: current_queue_depth
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="currdepth", rest_name="currdepth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """currdepth must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="currdepth", rest_name="currdepth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)""",
})
self.__currdepth = t
if hasattr(self, '_set'):
self._set()
def _unset_currdepth(self):
self.__currdepth = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="currdepth", rest_name="currdepth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)
def _get_maxdepth(self):
"""
Getter method for maxdepth, mapped from YANG variable /tm_state/tmcpustatsslot/maxdepth (uint64)
YANG Description: max_queue_depth
"""
return self.__maxdepth
def _set_maxdepth(self, v, load=False):
"""
Setter method for maxdepth, mapped from YANG variable /tm_state/tmcpustatsslot/maxdepth (uint64)
If this variable is read-only (config: false) in the
source YANG file, then _set_maxdepth is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_maxdepth() directly.
YANG Description: max_queue_depth
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="maxdepth", rest_name="maxdepth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """maxdepth must be of a type compatible with uint64""",
'defined-type': "uint64",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="maxdepth", rest_name="maxdepth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)""",
})
self.__maxdepth = t
if hasattr(self, '_set'):
self._set()
def _unset_maxdepth(self):
self.__maxdepth = YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="maxdepth", rest_name="maxdepth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False)
slot_id = __builtin__.property(_get_slot_id)
cpugroup_id = __builtin__.property(_get_cpugroup_id)
priority = __builtin__.property(_get_priority)
enquepkt = __builtin__.property(_get_enquepkt)
enqueubytes = __builtin__.property(_get_enqueubytes)
discardpkt = __builtin__.property(_get_discardpkt)
discardbytes = __builtin__.property(_get_discardbytes)
currdepth = __builtin__.property(_get_currdepth)
maxdepth = __builtin__.property(_get_maxdepth)
_pyangbind_elements = {'slot_id': slot_id, 'cpugroup_id': cpugroup_id, 'priority': priority, 'enquepkt': enquepkt, 'enqueubytes': enqueubytes, 'discardpkt': discardpkt, 'discardbytes': discardbytes, 'currdepth': currdepth, 'maxdepth': maxdepth, } | en | 0.501971 | This class was auto-generated by the PythonClass plugin for PYANG from YANG module brocade-sysdiag-operational - based on the path /tm-state/tmcpustatsslot. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: TM voq stats for CPU port per slot Getter method for slot_id, mapped from YANG variable /tm_state/tmcpustatsslot/slot_id (uint16) YANG Description: slot_id Setter method for slot_id, mapped from YANG variable /tm_state/tmcpustatsslot/slot_id (uint16) If this variable is read-only (config: false) in the source YANG file, then _set_slot_id is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_slot_id() directly. YANG Description: slot_id slot_id must be of a type compatible with uint16 YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="slot-id", rest_name="slot-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint16', is_config=False) Getter method for cpugroup_id, mapped from YANG variable /tm_state/tmcpustatsslot/cpugroup_id (uint16) YANG Description: cpugroup_id Setter method for cpugroup_id, mapped from YANG variable /tm_state/tmcpustatsslot/cpugroup_id (uint16) If this variable is read-only (config: false) in the source YANG file, then _set_cpugroup_id is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_cpugroup_id() directly. YANG Description: cpugroup_id cpugroup_id must be of a type compatible with uint16 YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="cpugroup-id", rest_name="cpugroup-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint16', is_config=False) Getter method for priority, mapped from YANG variable /tm_state/tmcpustatsslot/priority (uint16) YANG Description: Traffic class priority for TM VOQ statistics Setter method for priority, mapped from YANG variable /tm_state/tmcpustatsslot/priority (uint16) If this variable is read-only (config: false) in the source YANG file, then _set_priority is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_priority() directly. YANG Description: Traffic class priority for TM VOQ statistics priority must be of a type compatible with uint16 YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="priority", rest_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint16', is_config=False) Getter method for enquepkt, mapped from YANG variable /tm_state/tmcpustatsslot/enquepkt (uint64) YANG Description: enque_pkts Setter method for enquepkt, mapped from YANG variable /tm_state/tmcpustatsslot/enquepkt (uint64) If this variable is read-only (config: false) in the source YANG file, then _set_enquepkt is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_enquepkt() directly. YANG Description: enque_pkts enquepkt must be of a type compatible with uint64 YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="enquepkt", rest_name="enquepkt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False) Getter method for enqueubytes, mapped from YANG variable /tm_state/tmcpustatsslot/enqueubytes (uint64) YANG Description: enque_bytes Setter method for enqueubytes, mapped from YANG variable /tm_state/tmcpustatsslot/enqueubytes (uint64) If this variable is read-only (config: false) in the source YANG file, then _set_enqueubytes is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_enqueubytes() directly. YANG Description: enque_bytes enqueubytes must be of a type compatible with uint64 YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="enqueubytes", rest_name="enqueubytes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False) Getter method for discardpkt, mapped from YANG variable /tm_state/tmcpustatsslot/discardpkt (uint64) YANG Description: discard_pkts Setter method for discardpkt, mapped from YANG variable /tm_state/tmcpustatsslot/discardpkt (uint64) If this variable is read-only (config: false) in the source YANG file, then _set_discardpkt is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_discardpkt() directly. YANG Description: discard_pkts discardpkt must be of a type compatible with uint64 YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="discardpkt", rest_name="discardpkt", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False) Getter method for discardbytes, mapped from YANG variable /tm_state/tmcpustatsslot/discardbytes (uint64) YANG Description: discard_bytes Setter method for discardbytes, mapped from YANG variable /tm_state/tmcpustatsslot/discardbytes (uint64) If this variable is read-only (config: false) in the source YANG file, then _set_discardbytes is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_discardbytes() directly. YANG Description: discard_bytes discardbytes must be of a type compatible with uint64 YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="discardbytes", rest_name="discardbytes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False) Getter method for currdepth, mapped from YANG variable /tm_state/tmcpustatsslot/currdepth (uint64) YANG Description: current_queue_depth Setter method for currdepth, mapped from YANG variable /tm_state/tmcpustatsslot/currdepth (uint64) If this variable is read-only (config: false) in the source YANG file, then _set_currdepth is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_currdepth() directly. YANG Description: current_queue_depth currdepth must be of a type compatible with uint64 YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="currdepth", rest_name="currdepth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False) Getter method for maxdepth, mapped from YANG variable /tm_state/tmcpustatsslot/maxdepth (uint64) YANG Description: max_queue_depth Setter method for maxdepth, mapped from YANG variable /tm_state/tmcpustatsslot/maxdepth (uint64) If this variable is read-only (config: false) in the source YANG file, then _set_maxdepth is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_maxdepth() directly. YANG Description: max_queue_depth maxdepth must be of a type compatible with uint64 YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..18446744073709551615']}, int_size=64), is_leaf=True, yang_name="maxdepth", rest_name="maxdepth", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-sysdiag-operational', defining_module='brocade-sysdiag-operational', yang_type='uint64', is_config=False) | 1.936999 | 2 |
cisco-ios-xr/ydk/models/cisco_ios_xr/Cisco_IOS_XR_sysadmin_fpd_infra_cli_fpdserv_ctrace.py | bopopescu/ACI | 0 | 6633329 | <reponame>bopopescu/ACI
""" Cisco_IOS_XR_sysadmin_fpd_infra_cli_fpdserv_ctrace
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class Fpdserv(Entity):
"""
.. attribute:: trace
show traceable processes
**type**\: list of :py:class:`Trace <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_fpd_infra_cli_fpdserv_ctrace.Fpdserv.Trace>`
"""
_prefix = 'fpdserv'
_revision = '2017-05-01'
def __init__(self):
super(Fpdserv, self).__init__()
self._top_entity = None
self.yang_name = "fpdserv"
self.yang_parent_name = "Cisco-IOS-XR-sysadmin-fpd-infra-cli-fpdserv-ctrace"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("trace", ("trace", Fpdserv.Trace))])
self._leafs = OrderedDict()
self.trace = YList(self)
self._segment_path = lambda: "Cisco-IOS-XR-sysadmin-fpd-infra-cli-fpdserv-ctrace:fpdserv"
def __setattr__(self, name, value):
self._perform_setattr(Fpdserv, [], name, value)
class Trace(Entity):
"""
show traceable processes
.. attribute:: buffer (key)
**type**\: str
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_fpd_infra_cli_fpdserv_ctrace.Fpdserv.Trace.Location>`
"""
_prefix = 'fpdserv'
_revision = '2017-05-01'
def __init__(self):
super(Fpdserv.Trace, self).__init__()
self.yang_name = "trace"
self.yang_parent_name = "fpdserv"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['buffer']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", Fpdserv.Trace.Location))])
self._leafs = OrderedDict([
('buffer', YLeaf(YType.str, 'buffer')),
])
self.buffer = None
self.location = YList(self)
self._segment_path = lambda: "trace" + "[buffer='" + str(self.buffer) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-sysadmin-fpd-infra-cli-fpdserv-ctrace:fpdserv/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(Fpdserv.Trace, ['buffer'], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
.. attribute:: all_options
**type**\: list of :py:class:`AllOptions <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_fpd_infra_cli_fpdserv_ctrace.Fpdserv.Trace.Location.AllOptions>`
"""
_prefix = 'fpdserv'
_revision = '2017-05-01'
def __init__(self):
super(Fpdserv.Trace.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "trace"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("all-options", ("all_options", Fpdserv.Trace.Location.AllOptions))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location_name')),
])
self.location_name = None
self.all_options = YList(self)
self._segment_path = lambda: "location" + "[location_name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(Fpdserv.Trace.Location, ['location_name'], name, value)
class AllOptions(Entity):
"""
.. attribute:: option (key)
**type**\: str
.. attribute:: trace_blocks
**type**\: list of :py:class:`TraceBlocks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_fpd_infra_cli_fpdserv_ctrace.Fpdserv.Trace.Location.AllOptions.TraceBlocks>`
"""
_prefix = 'fpdserv'
_revision = '2017-05-01'
def __init__(self):
super(Fpdserv.Trace.Location.AllOptions, self).__init__()
self.yang_name = "all-options"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['option']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("trace-blocks", ("trace_blocks", Fpdserv.Trace.Location.AllOptions.TraceBlocks))])
self._leafs = OrderedDict([
('option', YLeaf(YType.str, 'option')),
])
self.option = None
self.trace_blocks = YList(self)
self._segment_path = lambda: "all-options" + "[option='" + str(self.option) + "']"
def __setattr__(self, name, value):
self._perform_setattr(Fpdserv.Trace.Location.AllOptions, ['option'], name, value)
class TraceBlocks(Entity):
"""
.. attribute:: data
Trace output block
**type**\: str
"""
_prefix = 'fpdserv'
_revision = '2017-05-01'
def __init__(self):
super(Fpdserv.Trace.Location.AllOptions.TraceBlocks, self).__init__()
self.yang_name = "trace-blocks"
self.yang_parent_name = "all-options"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('data', YLeaf(YType.str, 'data')),
])
self.data = None
self._segment_path = lambda: "trace-blocks"
def __setattr__(self, name, value):
self._perform_setattr(Fpdserv.Trace.Location.AllOptions.TraceBlocks, ['data'], name, value)
def clone_ptr(self):
self._top_entity = Fpdserv()
return self._top_entity
| """ Cisco_IOS_XR_sysadmin_fpd_infra_cli_fpdserv_ctrace
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class Fpdserv(Entity):
"""
.. attribute:: trace
show traceable processes
**type**\: list of :py:class:`Trace <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_fpd_infra_cli_fpdserv_ctrace.Fpdserv.Trace>`
"""
_prefix = 'fpdserv'
_revision = '2017-05-01'
def __init__(self):
super(Fpdserv, self).__init__()
self._top_entity = None
self.yang_name = "fpdserv"
self.yang_parent_name = "Cisco-IOS-XR-sysadmin-fpd-infra-cli-fpdserv-ctrace"
self.is_top_level_class = True
self.has_list_ancestor = False
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("trace", ("trace", Fpdserv.Trace))])
self._leafs = OrderedDict()
self.trace = YList(self)
self._segment_path = lambda: "Cisco-IOS-XR-sysadmin-fpd-infra-cli-fpdserv-ctrace:fpdserv"
def __setattr__(self, name, value):
self._perform_setattr(Fpdserv, [], name, value)
class Trace(Entity):
"""
show traceable processes
.. attribute:: buffer (key)
**type**\: str
.. attribute:: location
**type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_fpd_infra_cli_fpdserv_ctrace.Fpdserv.Trace.Location>`
"""
_prefix = 'fpdserv'
_revision = '2017-05-01'
def __init__(self):
super(Fpdserv.Trace, self).__init__()
self.yang_name = "trace"
self.yang_parent_name = "fpdserv"
self.is_top_level_class = False
self.has_list_ancestor = False
self.ylist_key_names = ['buffer']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("location", ("location", Fpdserv.Trace.Location))])
self._leafs = OrderedDict([
('buffer', YLeaf(YType.str, 'buffer')),
])
self.buffer = None
self.location = YList(self)
self._segment_path = lambda: "trace" + "[buffer='" + str(self.buffer) + "']"
self._absolute_path = lambda: "Cisco-IOS-XR-sysadmin-fpd-infra-cli-fpdserv-ctrace:fpdserv/%s" % self._segment_path()
def __setattr__(self, name, value):
self._perform_setattr(Fpdserv.Trace, ['buffer'], name, value)
class Location(Entity):
"""
.. attribute:: location_name (key)
**type**\: str
.. attribute:: all_options
**type**\: list of :py:class:`AllOptions <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_fpd_infra_cli_fpdserv_ctrace.Fpdserv.Trace.Location.AllOptions>`
"""
_prefix = 'fpdserv'
_revision = '2017-05-01'
def __init__(self):
super(Fpdserv.Trace.Location, self).__init__()
self.yang_name = "location"
self.yang_parent_name = "trace"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['location_name']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("all-options", ("all_options", Fpdserv.Trace.Location.AllOptions))])
self._leafs = OrderedDict([
('location_name', YLeaf(YType.str, 'location_name')),
])
self.location_name = None
self.all_options = YList(self)
self._segment_path = lambda: "location" + "[location_name='" + str(self.location_name) + "']"
def __setattr__(self, name, value):
self._perform_setattr(Fpdserv.Trace.Location, ['location_name'], name, value)
class AllOptions(Entity):
"""
.. attribute:: option (key)
**type**\: str
.. attribute:: trace_blocks
**type**\: list of :py:class:`TraceBlocks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_fpd_infra_cli_fpdserv_ctrace.Fpdserv.Trace.Location.AllOptions.TraceBlocks>`
"""
_prefix = 'fpdserv'
_revision = '2017-05-01'
def __init__(self):
super(Fpdserv.Trace.Location.AllOptions, self).__init__()
self.yang_name = "all-options"
self.yang_parent_name = "location"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['option']
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([("trace-blocks", ("trace_blocks", Fpdserv.Trace.Location.AllOptions.TraceBlocks))])
self._leafs = OrderedDict([
('option', YLeaf(YType.str, 'option')),
])
self.option = None
self.trace_blocks = YList(self)
self._segment_path = lambda: "all-options" + "[option='" + str(self.option) + "']"
def __setattr__(self, name, value):
self._perform_setattr(Fpdserv.Trace.Location.AllOptions, ['option'], name, value)
class TraceBlocks(Entity):
"""
.. attribute:: data
Trace output block
**type**\: str
"""
_prefix = 'fpdserv'
_revision = '2017-05-01'
def __init__(self):
super(Fpdserv.Trace.Location.AllOptions.TraceBlocks, self).__init__()
self.yang_name = "trace-blocks"
self.yang_parent_name = "all-options"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_container_classes = OrderedDict([])
self._child_list_classes = OrderedDict([])
self._leafs = OrderedDict([
('data', YLeaf(YType.str, 'data')),
])
self.data = None
self._segment_path = lambda: "trace-blocks"
def __setattr__(self, name, value):
self._perform_setattr(Fpdserv.Trace.Location.AllOptions.TraceBlocks, ['data'], name, value)
def clone_ptr(self):
self._top_entity = Fpdserv()
return self._top_entity | en | 0.299807 | Cisco_IOS_XR_sysadmin_fpd_infra_cli_fpdserv_ctrace .. attribute:: trace show traceable processes **type**\: list of :py:class:`Trace <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_fpd_infra_cli_fpdserv_ctrace.Fpdserv.Trace>` show traceable processes .. attribute:: buffer (key) **type**\: str .. attribute:: location **type**\: list of :py:class:`Location <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_fpd_infra_cli_fpdserv_ctrace.Fpdserv.Trace.Location>` .. attribute:: location_name (key) **type**\: str .. attribute:: all_options **type**\: list of :py:class:`AllOptions <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_fpd_infra_cli_fpdserv_ctrace.Fpdserv.Trace.Location.AllOptions>` .. attribute:: option (key) **type**\: str .. attribute:: trace_blocks **type**\: list of :py:class:`TraceBlocks <ydk.models.cisco_ios_xr.Cisco_IOS_XR_sysadmin_fpd_infra_cli_fpdserv_ctrace.Fpdserv.Trace.Location.AllOptions.TraceBlocks>` .. attribute:: data Trace output block **type**\: str | 1.900437 | 2 |
core/core.py | helloqiu/silly_spider | 2 | 6633330 | <reponame>helloqiu/silly_spider<gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import re
from .bag import *
from .dog import *
class Core:
origin_url = None
url_set = set([])
bag = Bag()
def __init__(self, url):
self.origin_url = url
self.bag.put(url)
def put(self, links):
for link in links:
if (re.findall(self.origin_url, link[0]) != []) and (not (link[0] in self.url_set)):
self.url_set.add(link[0])
self.bag.put(link[0])
print 'Get new url: %s' % link[0]
| # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import re
from .bag import *
from .dog import *
class Core:
origin_url = None
url_set = set([])
bag = Bag()
def __init__(self, url):
self.origin_url = url
self.bag.put(url)
def put(self, links):
for link in links:
if (re.findall(self.origin_url, link[0]) != []) and (not (link[0] in self.url_set)):
self.url_set.add(link[0])
self.bag.put(link[0])
print 'Get new url: %s' % link[0] | en | 0.769321 | # -*- coding: utf-8 -*- | 2.710583 | 3 |
src/sage/combinat/crystals/spins.py | bopopescu/sage | 3 | 6633331 | <gh_stars>1-10
r"""
Spin Crystals
These are the crystals associated with the three spin
representations: the spin representations of odd orthogonal groups
(or rather their double covers); and the `+` and `-` spin
representations of the even orthogonal groups.
We follow Kashiwara and Nakashima (Journal of Algebra 165, 1994) in
representing the elements of the spin crystal by sequences of signs
`\pm`.
"""
#TODO: Do we want the following two representations?
#
#Two other representations are available as attributes
#:meth:`Spin.internal_repn` and :meth:`Spin.signature` of the crystal element.
#
#- A numerical internal representation, an integer `n` such that if `n-1`
# is written in binary and the `1`'s are replaced by ``-``, the `0`'s by
# ``+``
#
#- The signature, which is a list in which ``+`` is replaced by `+1` and
# ``-`` by `-1`.
#*****************************************************************************
# Copyright (C) 2007 <NAME> <anne at math.ucdavis.edu>
# <NAME> <nthiery at users.sf.net>
# <NAME> <bump at match.stanford.edu>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#****************************************************************************
from __future__ import print_function
from sage.structure.unique_representation import UniqueRepresentation
from sage.structure.parent import Parent
from sage.categories.classical_crystals import ClassicalCrystals
from sage.combinat.crystals.letters import LetterTuple
from sage.combinat.root_system.cartan_type import CartanType
from sage.combinat.tableau import Tableau
#########################
# Type B spin
#########################
def CrystalOfSpins(ct):
r"""
Return the spin crystal of the given type `B`.
This is a combinatorial model for the crystal with highest weight
`Lambda_n` (the `n`-th fundamental weight). It has
`2^n` elements, here called Spins. See also
:func:`~sage.combinat.crystals.letters.CrystalOfLetters`,
:func:`~sage.combinat.crystals.spins.CrystalOfSpinsPlus`,
and :func:`~sage.combinat.crystals.spins.CrystalOfSpinsMinus`.
INPUT:
- ``['B', n]`` - A Cartan type `B_n`.
EXAMPLES::
sage: C = crystals.Spins(['B',3])
sage: C.list()
[+++, ++-, +-+, -++, +--, -+-, --+, ---]
sage: C.cartan_type()
['B', 3]
::
sage: [x.signature() for x in C]
['+++', '++-', '+-+', '-++', '+--', '-+-', '--+', '---']
TESTS::
sage: crystals.TensorProduct(C,C,generators=[[C.list()[0],C.list()[0]]]).cardinality()
35
"""
ct = CartanType(ct)
if ct[0] == 'B':
return GenericCrystalOfSpins(ct, Spin_crystal_type_B_element, "spins")
else:
raise NotImplementedError
#########################
# Type D spins
#########################
def CrystalOfSpinsPlus(ct):
r"""
Return the plus spin crystal of the given type D.
This is the crystal with highest weight `Lambda_n` (the
`n`-th fundamental weight).
INPUT:
- ``['D', n]`` - A Cartan type `D_n`.
EXAMPLES::
sage: D = crystals.SpinsPlus(['D',4])
sage: D.list()
[++++, ++--, +-+-, -++-, +--+, -+-+, --++, ----]
::
sage: [x.signature() for x in D]
['++++', '++--', '+-+-', '-++-', '+--+', '-+-+', '--++', '----']
TESTS::
sage: TestSuite(D).run()
"""
ct = CartanType(ct)
if ct[0] == 'D':
return GenericCrystalOfSpins(ct, Spin_crystal_type_D_element, "plus")
else:
raise NotImplementedError
def CrystalOfSpinsMinus(ct):
r"""
Return the minus spin crystal of the given type D.
This is the crystal with highest weight `Lambda_{n-1}`
(the `(n-1)`-st fundamental weight).
INPUT:
- ``['D', n]`` - A Cartan type `D_n`.
EXAMPLES::
sage: E = crystals.SpinsMinus(['D',4])
sage: E.list()
[+++-, ++-+, +-++, -+++, +---, -+--, --+-, ---+]
sage: [x.signature() for x in E]
['+++-', '++-+', '+-++', '-+++', '+---', '-+--', '--+-', '---+']
TESTS::
sage: len(crystals.TensorProduct(E,E,generators=[[E[0],E[0]]]).list())
35
sage: D = crystals.SpinsPlus(['D',4])
sage: len(crystals.TensorProduct(D,E,generators=[[D.list()[0],E.list()[0]]]).list())
56
"""
ct = CartanType(ct)
if ct[0] == 'D':
return GenericCrystalOfSpins(ct, Spin_crystal_type_D_element, "minus")
else:
raise NotImplementedError
class GenericCrystalOfSpins(UniqueRepresentation, Parent):
"""
A generic crystal of spins.
"""
def __init__(self, ct, element_class, case):
"""
EXAMPLES::
sage: E = crystals.SpinsMinus(['D',4])
sage: TestSuite(E).run()
"""
self._cartan_type = CartanType(ct)
if case == "spins":
self.rename("The crystal of spins for type %s"%ct)
elif case == "plus":
self.rename("The plus crystal of spins for type %s"%ct)
else:
self.rename("The minus crystal of spins for type %s"%ct)
self.Element = element_class
Parent.__init__(self, category = ClassicalCrystals())
if case == "minus":
generator = [1]*(ct[1]-1)
generator.append(-1)
else:
generator = [1]*ct[1]
self.module_generators = (self.element_class(self, tuple(generator)),)
def _element_constructor_(self, value):
"""
Construct an element of ``self`` from ``value``.
EXAMPLES::
sage: C = crystals.Spins(['B',3])
sage: x = C((1,1,1)); x
+++
sage: y = C([1,1,1]); y
+++
sage: x == y
True
"""
return self.element_class(self, tuple(value))
def digraph(self):
"""
Return the directed graph associated to ``self``.
EXAMPLES::
sage: crystals.Spins(['B',3]).digraph()
Digraph on 8 vertices
"""
try:
return self._digraph
except AttributeError:
pass
self._digraph = super(GenericCrystalOfSpins, self).digraph()
self._digraph.copy(immutable=True)
return self._digraph
def lt_elements(self, x,y):
r"""
Return ``True`` if and only if there is a path from ``x`` to ``y``
in the crystal graph.
Because the crystal graph is classical, it is a directed acyclic
graph which can be interpreted as a poset. This function implements
the comparison function of this poset.
EXAMPLES::
sage: C = crystals.Spins(['B',3])
sage: x = C([1,1,1])
sage: y = C([-1,-1,-1])
sage: C.lt_elements(x,y)
True
sage: C.lt_elements(y,x)
False
sage: C.lt_elements(x,x)
False
"""
if x.parent() is not self or y.parent() is not self:
raise ValueError("both elements must be in this crystal")
try:
GC = self._digraph_closure
except AttributeError:
GC = self.digraph().transitive_closure()
self._digraph_closure = GC
if GC.has_edge(x,y):
return True
return False
class Spin(LetterTuple):
"""
A spin letter in the crystal of spins.
EXAMPLES::
sage: C = crystals.Spins(['B',3])
sage: c = C([1,1,1])
sage: TestSuite(c).run()
sage: C([1,1,1]).parent()
The crystal of spins for type ['B', 3]
sage: c = C([1,1,1])
sage: c._repr_()
'+++'
sage: D = crystals.Spins(['B',4])
sage: a = C([1,1,1])
sage: b = C([-1,-1,-1])
sage: c = D([1,1,1,1])
sage: a == a
True
sage: a == b
False
sage: b == c
False
"""
def signature(self):
"""
Return the signature of ``self``.
EXAMPLES::
sage: C = crystals.Spins(['B',3])
sage: C([1,1,1]).signature()
'+++'
sage: C([1,1,-1]).signature()
'++-'
"""
sword = ""
for x in range(self.parent().cartan_type().n):
sword += "+" if self.value[x] == 1 else "-"
return sword
def _repr_(self):
"""
Represents the spin elements in terms of its signature.
EXAMPLES::
sage: C = crystals.Spins(['B',3])
sage: b = C([1,1,-1])
sage: b
++-
sage: b._repr_()
'++-'
"""
return self.signature()
def _repr_diagram(self):
"""
Return a representation of ``self`` as a diagram.
EXAMPLES::
sage: C = crystals.Spins(['B',3])
sage: b = C([1,1,-1])
sage: print(b._repr_diagram())
+
+
-
"""
return '\n'.join(self.signature())
def pp(self):
"""
Pretty print ``self`` as a column.
EXAMPLES::
sage: C = crystals.Spins(['B',3])
sage: b = C([1,1,-1])
sage: b.pp()
+
+
-
"""
print(self._repr_diagram())
def _latex_(self):
r"""
Gives the latex output of a spin column.
EXAMPLES::
sage: C = crystals.Spins(['B',3])
sage: b = C([1,1,-1])
sage: print(b._latex_())
{\def\lr#1{\multicolumn{1}{|@{\hspace{.6ex}}c@{\hspace{.6ex}}|}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[b]{*{1}c}\cline{1-1}
\lr{-}\\\cline{1-1}
\lr{+}\\\cline{1-1}
\lr{+}\\\cline{1-1}
\end{array}$}
}
"""
return Tableau([[i] for i in reversed(self.signature())])._latex_()
def epsilon(self, i):
r"""
Return `\varepsilon_i` of ``self``.
EXAMPLES::
sage: C = crystals.Spins(['B',3])
sage: [[C[m].epsilon(i) for i in range(1,4)] for m in range(8)]
[[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0],
[0, 0, 1], [1, 0, 1], [0, 1, 0], [0, 0, 1]]
"""
if self.e(i) is None:
return 0
return 1
def phi(self, i):
r"""
Return `\varphi_i` of ``self``.
EXAMPLES::
sage: C = crystals.Spins(['B',3])
sage: [[C[m].phi(i) for i in range(1,4)] for m in range(8)]
[[0, 0, 1], [0, 1, 0], [1, 0, 1], [0, 0, 1],
[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]]
"""
if self.f(i) is None:
return 0
return 1
class Spin_crystal_type_B_element(Spin):
r"""
Type B spin representation crystal element
"""
def e(self, i):
r"""
Returns the action of `e_i` on self.
EXAMPLES::
sage: C = crystals.Spins(['B',3])
sage: [[C[m].e(i) for i in range(1,4)] for m in range(8)]
[[None, None, None], [None, None, +++], [None, ++-, None], [+-+, None, None],
[None, None, +-+], [+--, None, -++], [None, -+-, None], [None, None, --+]]
"""
assert i in self.index_set()
rank = self.parent().cartan_type().n
if i < rank:
if self.value[i-1] == -1 and self.value[i] == 1:
ret = [self.value[x] for x in range(rank)]
ret[i-1] = 1
ret[i] = -1
return self.__class__(self.parent(), tuple(ret))
elif i == rank:
if self.value[i-1] == -1:
ret = [self.value[x] for x in range(rank)]
ret[i-1] = 1
return self.__class__(self.parent(), tuple(ret))
return None
def f(self, i):
r"""
Returns the action of `f_i` on self.
EXAMPLES::
sage: C = crystals.Spins(['B',3])
sage: [[C[m].f(i) for i in range(1,4)] for m in range(8)]
[[None, None, ++-], [None, +-+, None], [-++, None, +--], [None, None, -+-],
[-+-, None, None], [None, --+, None], [None, None, ---], [None, None, None]]
"""
assert i in self.index_set()
rank = self.parent().cartan_type().n
if i < rank:
if self.value[i-1] == 1 and self.value[i] == -1:
ret = [self.value[x] for x in range(rank)]
ret[i-1] = -1
ret[i] = 1
return self.__class__(self.parent(), tuple(ret))
elif i == rank:
if self.value[i-1] == 1:
ret = [self.value[x] for x in range(rank)]
ret[i-1] = -1
return self.__class__(self.parent(), tuple(ret))
return None
class Spin_crystal_type_D_element(Spin):
r"""
Type D spin representation crystal element
"""
def e(self, i):
r"""
Returns the action of `e_i` on self.
EXAMPLES::
sage: D = crystals.SpinsPlus(['D',4])
sage: [[D.list()[m].e(i) for i in range(1,4)] for m in range(8)]
[[None, None, None], [None, None, None], [None, ++--, None], [+-+-, None, None],
[None, None, +-+-], [+--+, None, -++-], [None, -+-+, None], [None, None, None]]
::
sage: E = crystals.SpinsMinus(['D',4])
sage: [[E[m].e(i) for i in range(1,4)] for m in range(8)]
[[None, None, None], [None, None, +++-], [None, ++-+, None], [+-++, None, None],
[None, None, None], [+---, None, None], [None, -+--, None], [None, None, --+-]]
"""
assert i in self.index_set()
rank = self.parent().cartan_type().n
if i < rank:
if self.value[i-1] == -1 and self.value[i] == 1:
ret = [self.value[x] for x in range(rank)]
ret[i-1] = 1
ret[i] = -1
return self.__class__(self.parent(), tuple(ret))
elif i == rank:
if self.value[i-2] == -1 and self.value[i-1] == -1:
ret = [self.value[x] for x in range(rank)]
ret[i-2] = 1
ret[i-1] = 1
return self.__class__(self.parent(), tuple(ret))
return None
def f(self, i):
r"""
Returns the action of `f_i` on self.
EXAMPLES::
sage: D = crystals.SpinsPlus(['D',4])
sage: [[D.list()[m].f(i) for i in range(1,4)] for m in range(8)]
[[None, None, None], [None, +-+-, None], [-++-, None, +--+], [None, None, -+-+],
[-+-+, None, None], [None, --++, None], [None, None, None], [None, None, None]]
::
sage: E = crystals.SpinsMinus(['D',4])
sage: [[E[m].f(i) for i in range(1,4)] for m in range(8)]
[[None, None, ++-+], [None, +-++, None], [-+++, None, None], [None, None, None],
[-+--, None, None], [None, --+-, None], [None, None, ---+], [None, None, None]]
"""
assert i in self.index_set()
rank = self.parent().cartan_type().n
if i < rank:
if self.value[i-1] == 1 and self.value[i] == -1:
ret = [self.value[x] for x in range(rank)]
ret[i-1] = -1
ret[i] = 1
return self.__class__(self.parent(), tuple(ret))
elif i == rank:
if self.value[i-2] == 1 and self.value[i-1] == 1:
ret = [self.value[x] for x in range(rank)]
ret[i-2] = -1
ret[i-1] = -1
return self.__class__(self.parent(), tuple(ret))
return None
| r"""
Spin Crystals
These are the crystals associated with the three spin
representations: the spin representations of odd orthogonal groups
(or rather their double covers); and the `+` and `-` spin
representations of the even orthogonal groups.
We follow Kashiwara and Nakashima (Journal of Algebra 165, 1994) in
representing the elements of the spin crystal by sequences of signs
`\pm`.
"""
#TODO: Do we want the following two representations?
#
#Two other representations are available as attributes
#:meth:`Spin.internal_repn` and :meth:`Spin.signature` of the crystal element.
#
#- A numerical internal representation, an integer `n` such that if `n-1`
# is written in binary and the `1`'s are replaced by ``-``, the `0`'s by
# ``+``
#
#- The signature, which is a list in which ``+`` is replaced by `+1` and
# ``-`` by `-1`.
#*****************************************************************************
# Copyright (C) 2007 <NAME> <anne at math.ucdavis.edu>
# <NAME> <nthiery at users.sf.net>
# <NAME> <bump at match.stanford.edu>
#
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# http://www.gnu.org/licenses/
#****************************************************************************
from __future__ import print_function
from sage.structure.unique_representation import UniqueRepresentation
from sage.structure.parent import Parent
from sage.categories.classical_crystals import ClassicalCrystals
from sage.combinat.crystals.letters import LetterTuple
from sage.combinat.root_system.cartan_type import CartanType
from sage.combinat.tableau import Tableau
#########################
# Type B spin
#########################
def CrystalOfSpins(ct):
r"""
Return the spin crystal of the given type `B`.
This is a combinatorial model for the crystal with highest weight
`Lambda_n` (the `n`-th fundamental weight). It has
`2^n` elements, here called Spins. See also
:func:`~sage.combinat.crystals.letters.CrystalOfLetters`,
:func:`~sage.combinat.crystals.spins.CrystalOfSpinsPlus`,
and :func:`~sage.combinat.crystals.spins.CrystalOfSpinsMinus`.
INPUT:
- ``['B', n]`` - A Cartan type `B_n`.
EXAMPLES::
sage: C = crystals.Spins(['B',3])
sage: C.list()
[+++, ++-, +-+, -++, +--, -+-, --+, ---]
sage: C.cartan_type()
['B', 3]
::
sage: [x.signature() for x in C]
['+++', '++-', '+-+', '-++', '+--', '-+-', '--+', '---']
TESTS::
sage: crystals.TensorProduct(C,C,generators=[[C.list()[0],C.list()[0]]]).cardinality()
35
"""
ct = CartanType(ct)
if ct[0] == 'B':
return GenericCrystalOfSpins(ct, Spin_crystal_type_B_element, "spins")
else:
raise NotImplementedError
#########################
# Type D spins
#########################
def CrystalOfSpinsPlus(ct):
r"""
Return the plus spin crystal of the given type D.
This is the crystal with highest weight `Lambda_n` (the
`n`-th fundamental weight).
INPUT:
- ``['D', n]`` - A Cartan type `D_n`.
EXAMPLES::
sage: D = crystals.SpinsPlus(['D',4])
sage: D.list()
[++++, ++--, +-+-, -++-, +--+, -+-+, --++, ----]
::
sage: [x.signature() for x in D]
['++++', '++--', '+-+-', '-++-', '+--+', '-+-+', '--++', '----']
TESTS::
sage: TestSuite(D).run()
"""
ct = CartanType(ct)
if ct[0] == 'D':
return GenericCrystalOfSpins(ct, Spin_crystal_type_D_element, "plus")
else:
raise NotImplementedError
def CrystalOfSpinsMinus(ct):
r"""
Return the minus spin crystal of the given type D.
This is the crystal with highest weight `Lambda_{n-1}`
(the `(n-1)`-st fundamental weight).
INPUT:
- ``['D', n]`` - A Cartan type `D_n`.
EXAMPLES::
sage: E = crystals.SpinsMinus(['D',4])
sage: E.list()
[+++-, ++-+, +-++, -+++, +---, -+--, --+-, ---+]
sage: [x.signature() for x in E]
['+++-', '++-+', '+-++', '-+++', '+---', '-+--', '--+-', '---+']
TESTS::
sage: len(crystals.TensorProduct(E,E,generators=[[E[0],E[0]]]).list())
35
sage: D = crystals.SpinsPlus(['D',4])
sage: len(crystals.TensorProduct(D,E,generators=[[D.list()[0],E.list()[0]]]).list())
56
"""
ct = CartanType(ct)
if ct[0] == 'D':
return GenericCrystalOfSpins(ct, Spin_crystal_type_D_element, "minus")
else:
raise NotImplementedError
class GenericCrystalOfSpins(UniqueRepresentation, Parent):
"""
A generic crystal of spins.
"""
def __init__(self, ct, element_class, case):
"""
EXAMPLES::
sage: E = crystals.SpinsMinus(['D',4])
sage: TestSuite(E).run()
"""
self._cartan_type = CartanType(ct)
if case == "spins":
self.rename("The crystal of spins for type %s"%ct)
elif case == "plus":
self.rename("The plus crystal of spins for type %s"%ct)
else:
self.rename("The minus crystal of spins for type %s"%ct)
self.Element = element_class
Parent.__init__(self, category = ClassicalCrystals())
if case == "minus":
generator = [1]*(ct[1]-1)
generator.append(-1)
else:
generator = [1]*ct[1]
self.module_generators = (self.element_class(self, tuple(generator)),)
def _element_constructor_(self, value):
"""
Construct an element of ``self`` from ``value``.
EXAMPLES::
sage: C = crystals.Spins(['B',3])
sage: x = C((1,1,1)); x
+++
sage: y = C([1,1,1]); y
+++
sage: x == y
True
"""
return self.element_class(self, tuple(value))
def digraph(self):
"""
Return the directed graph associated to ``self``.
EXAMPLES::
sage: crystals.Spins(['B',3]).digraph()
Digraph on 8 vertices
"""
try:
return self._digraph
except AttributeError:
pass
self._digraph = super(GenericCrystalOfSpins, self).digraph()
self._digraph.copy(immutable=True)
return self._digraph
def lt_elements(self, x,y):
r"""
Return ``True`` if and only if there is a path from ``x`` to ``y``
in the crystal graph.
Because the crystal graph is classical, it is a directed acyclic
graph which can be interpreted as a poset. This function implements
the comparison function of this poset.
EXAMPLES::
sage: C = crystals.Spins(['B',3])
sage: x = C([1,1,1])
sage: y = C([-1,-1,-1])
sage: C.lt_elements(x,y)
True
sage: C.lt_elements(y,x)
False
sage: C.lt_elements(x,x)
False
"""
if x.parent() is not self or y.parent() is not self:
raise ValueError("both elements must be in this crystal")
try:
GC = self._digraph_closure
except AttributeError:
GC = self.digraph().transitive_closure()
self._digraph_closure = GC
if GC.has_edge(x,y):
return True
return False
class Spin(LetterTuple):
"""
A spin letter in the crystal of spins.
EXAMPLES::
sage: C = crystals.Spins(['B',3])
sage: c = C([1,1,1])
sage: TestSuite(c).run()
sage: C([1,1,1]).parent()
The crystal of spins for type ['B', 3]
sage: c = C([1,1,1])
sage: c._repr_()
'+++'
sage: D = crystals.Spins(['B',4])
sage: a = C([1,1,1])
sage: b = C([-1,-1,-1])
sage: c = D([1,1,1,1])
sage: a == a
True
sage: a == b
False
sage: b == c
False
"""
def signature(self):
"""
Return the signature of ``self``.
EXAMPLES::
sage: C = crystals.Spins(['B',3])
sage: C([1,1,1]).signature()
'+++'
sage: C([1,1,-1]).signature()
'++-'
"""
sword = ""
for x in range(self.parent().cartan_type().n):
sword += "+" if self.value[x] == 1 else "-"
return sword
def _repr_(self):
"""
Represents the spin elements in terms of its signature.
EXAMPLES::
sage: C = crystals.Spins(['B',3])
sage: b = C([1,1,-1])
sage: b
++-
sage: b._repr_()
'++-'
"""
return self.signature()
def _repr_diagram(self):
"""
Return a representation of ``self`` as a diagram.
EXAMPLES::
sage: C = crystals.Spins(['B',3])
sage: b = C([1,1,-1])
sage: print(b._repr_diagram())
+
+
-
"""
return '\n'.join(self.signature())
def pp(self):
"""
Pretty print ``self`` as a column.
EXAMPLES::
sage: C = crystals.Spins(['B',3])
sage: b = C([1,1,-1])
sage: b.pp()
+
+
-
"""
print(self._repr_diagram())
def _latex_(self):
r"""
Gives the latex output of a spin column.
EXAMPLES::
sage: C = crystals.Spins(['B',3])
sage: b = C([1,1,-1])
sage: print(b._latex_())
{\def\lr#1{\multicolumn{1}{|@{\hspace{.6ex}}c@{\hspace{.6ex}}|}{\raisebox{-.3ex}{$#1$}}}
\raisebox{-.6ex}{$\begin{array}[b]{*{1}c}\cline{1-1}
\lr{-}\\\cline{1-1}
\lr{+}\\\cline{1-1}
\lr{+}\\\cline{1-1}
\end{array}$}
}
"""
return Tableau([[i] for i in reversed(self.signature())])._latex_()
def epsilon(self, i):
r"""
Return `\varepsilon_i` of ``self``.
EXAMPLES::
sage: C = crystals.Spins(['B',3])
sage: [[C[m].epsilon(i) for i in range(1,4)] for m in range(8)]
[[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0],
[0, 0, 1], [1, 0, 1], [0, 1, 0], [0, 0, 1]]
"""
if self.e(i) is None:
return 0
return 1
def phi(self, i):
r"""
Return `\varphi_i` of ``self``.
EXAMPLES::
sage: C = crystals.Spins(['B',3])
sage: [[C[m].phi(i) for i in range(1,4)] for m in range(8)]
[[0, 0, 1], [0, 1, 0], [1, 0, 1], [0, 0, 1],
[1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]]
"""
if self.f(i) is None:
return 0
return 1
class Spin_crystal_type_B_element(Spin):
r"""
Type B spin representation crystal element
"""
def e(self, i):
r"""
Returns the action of `e_i` on self.
EXAMPLES::
sage: C = crystals.Spins(['B',3])
sage: [[C[m].e(i) for i in range(1,4)] for m in range(8)]
[[None, None, None], [None, None, +++], [None, ++-, None], [+-+, None, None],
[None, None, +-+], [+--, None, -++], [None, -+-, None], [None, None, --+]]
"""
assert i in self.index_set()
rank = self.parent().cartan_type().n
if i < rank:
if self.value[i-1] == -1 and self.value[i] == 1:
ret = [self.value[x] for x in range(rank)]
ret[i-1] = 1
ret[i] = -1
return self.__class__(self.parent(), tuple(ret))
elif i == rank:
if self.value[i-1] == -1:
ret = [self.value[x] for x in range(rank)]
ret[i-1] = 1
return self.__class__(self.parent(), tuple(ret))
return None
def f(self, i):
r"""
Returns the action of `f_i` on self.
EXAMPLES::
sage: C = crystals.Spins(['B',3])
sage: [[C[m].f(i) for i in range(1,4)] for m in range(8)]
[[None, None, ++-], [None, +-+, None], [-++, None, +--], [None, None, -+-],
[-+-, None, None], [None, --+, None], [None, None, ---], [None, None, None]]
"""
assert i in self.index_set()
rank = self.parent().cartan_type().n
if i < rank:
if self.value[i-1] == 1 and self.value[i] == -1:
ret = [self.value[x] for x in range(rank)]
ret[i-1] = -1
ret[i] = 1
return self.__class__(self.parent(), tuple(ret))
elif i == rank:
if self.value[i-1] == 1:
ret = [self.value[x] for x in range(rank)]
ret[i-1] = -1
return self.__class__(self.parent(), tuple(ret))
return None
class Spin_crystal_type_D_element(Spin):
r"""
Type D spin representation crystal element
"""
def e(self, i):
r"""
Returns the action of `e_i` on self.
EXAMPLES::
sage: D = crystals.SpinsPlus(['D',4])
sage: [[D.list()[m].e(i) for i in range(1,4)] for m in range(8)]
[[None, None, None], [None, None, None], [None, ++--, None], [+-+-, None, None],
[None, None, +-+-], [+--+, None, -++-], [None, -+-+, None], [None, None, None]]
::
sage: E = crystals.SpinsMinus(['D',4])
sage: [[E[m].e(i) for i in range(1,4)] for m in range(8)]
[[None, None, None], [None, None, +++-], [None, ++-+, None], [+-++, None, None],
[None, None, None], [+---, None, None], [None, -+--, None], [None, None, --+-]]
"""
assert i in self.index_set()
rank = self.parent().cartan_type().n
if i < rank:
if self.value[i-1] == -1 and self.value[i] == 1:
ret = [self.value[x] for x in range(rank)]
ret[i-1] = 1
ret[i] = -1
return self.__class__(self.parent(), tuple(ret))
elif i == rank:
if self.value[i-2] == -1 and self.value[i-1] == -1:
ret = [self.value[x] for x in range(rank)]
ret[i-2] = 1
ret[i-1] = 1
return self.__class__(self.parent(), tuple(ret))
return None
def f(self, i):
r"""
Returns the action of `f_i` on self.
EXAMPLES::
sage: D = crystals.SpinsPlus(['D',4])
sage: [[D.list()[m].f(i) for i in range(1,4)] for m in range(8)]
[[None, None, None], [None, +-+-, None], [-++-, None, +--+], [None, None, -+-+],
[-+-+, None, None], [None, --++, None], [None, None, None], [None, None, None]]
::
sage: E = crystals.SpinsMinus(['D',4])
sage: [[E[m].f(i) for i in range(1,4)] for m in range(8)]
[[None, None, ++-+], [None, +-++, None], [-+++, None, None], [None, None, None],
[-+--, None, None], [None, --+-, None], [None, None, ---+], [None, None, None]]
"""
assert i in self.index_set()
rank = self.parent().cartan_type().n
if i < rank:
if self.value[i-1] == 1 and self.value[i] == -1:
ret = [self.value[x] for x in range(rank)]
ret[i-1] = -1
ret[i] = 1
return self.__class__(self.parent(), tuple(ret))
elif i == rank:
if self.value[i-2] == 1 and self.value[i-1] == 1:
ret = [self.value[x] for x in range(rank)]
ret[i-2] = -1
ret[i-1] = -1
return self.__class__(self.parent(), tuple(ret))
return None | en | 0.583714 | Spin Crystals These are the crystals associated with the three spin representations: the spin representations of odd orthogonal groups (or rather their double covers); and the `+` and `-` spin representations of the even orthogonal groups. We follow Kashiwara and Nakashima (Journal of Algebra 165, 1994) in representing the elements of the spin crystal by sequences of signs `\pm`. #TODO: Do we want the following two representations? # #Two other representations are available as attributes #:meth:`Spin.internal_repn` and :meth:`Spin.signature` of the crystal element. # #- A numerical internal representation, an integer `n` such that if `n-1` # is written in binary and the `1`'s are replaced by ``-``, the `0`'s by # ``+`` # #- The signature, which is a list in which ``+`` is replaced by `+1` and # ``-`` by `-1`. #***************************************************************************** # Copyright (C) 2007 <NAME> <anne at math.ucdavis.edu> # <NAME> <nthiery at users.sf.net> # <NAME> <bump at match.stanford.edu> # # Distributed under the terms of the GNU General Public License (GPL) # # This code is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # The full text of the GPL is available at: # # http://www.gnu.org/licenses/ #**************************************************************************** ######################### # Type B spin ######################### Return the spin crystal of the given type `B`. This is a combinatorial model for the crystal with highest weight `Lambda_n` (the `n`-th fundamental weight). It has `2^n` elements, here called Spins. See also :func:`~sage.combinat.crystals.letters.CrystalOfLetters`, :func:`~sage.combinat.crystals.spins.CrystalOfSpinsPlus`, and :func:`~sage.combinat.crystals.spins.CrystalOfSpinsMinus`. INPUT: - ``['B', n]`` - A Cartan type `B_n`. EXAMPLES:: sage: C = crystals.Spins(['B',3]) sage: C.list() [+++, ++-, +-+, -++, +--, -+-, --+, ---] sage: C.cartan_type() ['B', 3] :: sage: [x.signature() for x in C] ['+++', '++-', '+-+', '-++', '+--', '-+-', '--+', '---'] TESTS:: sage: crystals.TensorProduct(C,C,generators=[[C.list()[0],C.list()[0]]]).cardinality() 35 ######################### # Type D spins ######################### Return the plus spin crystal of the given type D. This is the crystal with highest weight `Lambda_n` (the `n`-th fundamental weight). INPUT: - ``['D', n]`` - A Cartan type `D_n`. EXAMPLES:: sage: D = crystals.SpinsPlus(['D',4]) sage: D.list() [++++, ++--, +-+-, -++-, +--+, -+-+, --++, ----] :: sage: [x.signature() for x in D] ['++++', '++--', '+-+-', '-++-', '+--+', '-+-+', '--++', '----'] TESTS:: sage: TestSuite(D).run() Return the minus spin crystal of the given type D. This is the crystal with highest weight `Lambda_{n-1}` (the `(n-1)`-st fundamental weight). INPUT: - ``['D', n]`` - A Cartan type `D_n`. EXAMPLES:: sage: E = crystals.SpinsMinus(['D',4]) sage: E.list() [+++-, ++-+, +-++, -+++, +---, -+--, --+-, ---+] sage: [x.signature() for x in E] ['+++-', '++-+', '+-++', '-+++', '+---', '-+--', '--+-', '---+'] TESTS:: sage: len(crystals.TensorProduct(E,E,generators=[[E[0],E[0]]]).list()) 35 sage: D = crystals.SpinsPlus(['D',4]) sage: len(crystals.TensorProduct(D,E,generators=[[D.list()[0],E.list()[0]]]).list()) 56 A generic crystal of spins. EXAMPLES:: sage: E = crystals.SpinsMinus(['D',4]) sage: TestSuite(E).run() Construct an element of ``self`` from ``value``. EXAMPLES:: sage: C = crystals.Spins(['B',3]) sage: x = C((1,1,1)); x +++ sage: y = C([1,1,1]); y +++ sage: x == y True Return the directed graph associated to ``self``. EXAMPLES:: sage: crystals.Spins(['B',3]).digraph() Digraph on 8 vertices Return ``True`` if and only if there is a path from ``x`` to ``y`` in the crystal graph. Because the crystal graph is classical, it is a directed acyclic graph which can be interpreted as a poset. This function implements the comparison function of this poset. EXAMPLES:: sage: C = crystals.Spins(['B',3]) sage: x = C([1,1,1]) sage: y = C([-1,-1,-1]) sage: C.lt_elements(x,y) True sage: C.lt_elements(y,x) False sage: C.lt_elements(x,x) False A spin letter in the crystal of spins. EXAMPLES:: sage: C = crystals.Spins(['B',3]) sage: c = C([1,1,1]) sage: TestSuite(c).run() sage: C([1,1,1]).parent() The crystal of spins for type ['B', 3] sage: c = C([1,1,1]) sage: c._repr_() '+++' sage: D = crystals.Spins(['B',4]) sage: a = C([1,1,1]) sage: b = C([-1,-1,-1]) sage: c = D([1,1,1,1]) sage: a == a True sage: a == b False sage: b == c False Return the signature of ``self``. EXAMPLES:: sage: C = crystals.Spins(['B',3]) sage: C([1,1,1]).signature() '+++' sage: C([1,1,-1]).signature() '++-' Represents the spin elements in terms of its signature. EXAMPLES:: sage: C = crystals.Spins(['B',3]) sage: b = C([1,1,-1]) sage: b ++- sage: b._repr_() '++-' Return a representation of ``self`` as a diagram. EXAMPLES:: sage: C = crystals.Spins(['B',3]) sage: b = C([1,1,-1]) sage: print(b._repr_diagram()) + + - Pretty print ``self`` as a column. EXAMPLES:: sage: C = crystals.Spins(['B',3]) sage: b = C([1,1,-1]) sage: b.pp() + + - Gives the latex output of a spin column. EXAMPLES:: sage: C = crystals.Spins(['B',3]) sage: b = C([1,1,-1]) sage: print(b._latex_()) {\def\lr#1{\multicolumn{1}{|@{\hspace{.6ex}}c@{\hspace{.6ex}}|}{\raisebox{-.3ex}{$#1$}}} \raisebox{-.6ex}{$\begin{array}[b]{*{1}c}\cline{1-1} \lr{-}\\\cline{1-1} \lr{+}\\\cline{1-1} \lr{+}\\\cline{1-1} \end{array}$} } Return `\varepsilon_i` of ``self``. EXAMPLES:: sage: C = crystals.Spins(['B',3]) sage: [[C[m].epsilon(i) for i in range(1,4)] for m in range(8)] [[0, 0, 0], [0, 0, 1], [0, 1, 0], [1, 0, 0], [0, 0, 1], [1, 0, 1], [0, 1, 0], [0, 0, 1]] Return `\varphi_i` of ``self``. EXAMPLES:: sage: C = crystals.Spins(['B',3]) sage: [[C[m].phi(i) for i in range(1,4)] for m in range(8)] [[0, 0, 1], [0, 1, 0], [1, 0, 1], [0, 0, 1], [1, 0, 0], [0, 1, 0], [0, 0, 1], [0, 0, 0]] Type B spin representation crystal element Returns the action of `e_i` on self. EXAMPLES:: sage: C = crystals.Spins(['B',3]) sage: [[C[m].e(i) for i in range(1,4)] for m in range(8)] [[None, None, None], [None, None, +++], [None, ++-, None], [+-+, None, None], [None, None, +-+], [+--, None, -++], [None, -+-, None], [None, None, --+]] Returns the action of `f_i` on self. EXAMPLES:: sage: C = crystals.Spins(['B',3]) sage: [[C[m].f(i) for i in range(1,4)] for m in range(8)] [[None, None, ++-], [None, +-+, None], [-++, None, +--], [None, None, -+-], [-+-, None, None], [None, --+, None], [None, None, ---], [None, None, None]] Type D spin representation crystal element Returns the action of `e_i` on self. EXAMPLES:: sage: D = crystals.SpinsPlus(['D',4]) sage: [[D.list()[m].e(i) for i in range(1,4)] for m in range(8)] [[None, None, None], [None, None, None], [None, ++--, None], [+-+-, None, None], [None, None, +-+-], [+--+, None, -++-], [None, -+-+, None], [None, None, None]] :: sage: E = crystals.SpinsMinus(['D',4]) sage: [[E[m].e(i) for i in range(1,4)] for m in range(8)] [[None, None, None], [None, None, +++-], [None, ++-+, None], [+-++, None, None], [None, None, None], [+---, None, None], [None, -+--, None], [None, None, --+-]] Returns the action of `f_i` on self. EXAMPLES:: sage: D = crystals.SpinsPlus(['D',4]) sage: [[D.list()[m].f(i) for i in range(1,4)] for m in range(8)] [[None, None, None], [None, +-+-, None], [-++-, None, +--+], [None, None, -+-+], [-+-+, None, None], [None, --++, None], [None, None, None], [None, None, None]] :: sage: E = crystals.SpinsMinus(['D',4]) sage: [[E[m].f(i) for i in range(1,4)] for m in range(8)] [[None, None, ++-+], [None, +-++, None], [-+++, None, None], [None, None, None], [-+--, None, None], [None, --+-, None], [None, None, ---+], [None, None, None]] | 2.527903 | 3 |
python/fate_flow/components/components.py | MiKKiYang/FATE-Flow | 0 | 6633332 | <filename>python/fate_flow/components/components.py
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import importlib
import inspect
import typing
from pathlib import Path
from fate_flow.utils.log_utils import getLogger
from fate_flow.components._base import ComponentMeta
LOGGER = getLogger()
def _get_module_name_by_path(path, base):
return '.'.join(path.resolve().relative_to(base.resolve()).with_suffix('').parts)
def _search_components(path, base):
try:
module_name = _get_module_name_by_path(path, base)
module = importlib.import_module(module_name)
except ImportError as e:
# or skip ?
raise e
_obj_pairs = inspect.getmembers(module, lambda obj: isinstance(obj, ComponentMeta))
return _obj_pairs, module_name
class Components:
provider_version = None
provider_name = None
provider_path = None
@classmethod
def _module_base(cls):
return Path(cls.provider_path).resolve().parent
@classmethod
def _components_base(cls):
return Path(cls.provider_path, 'components').resolve()
@classmethod
def get_names(cls) -> typing.Dict[str, dict]:
names = {}
for p in cls._components_base().glob("**/*.py"):
obj_pairs, module_name = _search_components(p.resolve(), cls._module_base())
for name, obj in obj_pairs:
names[obj.name] = {"module": module_name}
LOGGER.info(f"component register {obj.name} with cache info {module_name}")
return names
@classmethod
def get(cls, name: str, cache) -> ComponentMeta:
if cache:
importlib.import_module(cache[name]["module"])
else:
for p in cls._components_base().glob("**/*.py"):
module_name = _get_module_name_by_path(p, cls._module_base())
importlib.import_module(module_name)
cpn = ComponentMeta.get_meta(name)
return cpn
| <filename>python/fate_flow/components/components.py
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import importlib
import inspect
import typing
from pathlib import Path
from fate_flow.utils.log_utils import getLogger
from fate_flow.components._base import ComponentMeta
LOGGER = getLogger()
def _get_module_name_by_path(path, base):
return '.'.join(path.resolve().relative_to(base.resolve()).with_suffix('').parts)
def _search_components(path, base):
try:
module_name = _get_module_name_by_path(path, base)
module = importlib.import_module(module_name)
except ImportError as e:
# or skip ?
raise e
_obj_pairs = inspect.getmembers(module, lambda obj: isinstance(obj, ComponentMeta))
return _obj_pairs, module_name
class Components:
provider_version = None
provider_name = None
provider_path = None
@classmethod
def _module_base(cls):
return Path(cls.provider_path).resolve().parent
@classmethod
def _components_base(cls):
return Path(cls.provider_path, 'components').resolve()
@classmethod
def get_names(cls) -> typing.Dict[str, dict]:
names = {}
for p in cls._components_base().glob("**/*.py"):
obj_pairs, module_name = _search_components(p.resolve(), cls._module_base())
for name, obj in obj_pairs:
names[obj.name] = {"module": module_name}
LOGGER.info(f"component register {obj.name} with cache info {module_name}")
return names
@classmethod
def get(cls, name: str, cache) -> ComponentMeta:
if cache:
importlib.import_module(cache[name]["module"])
else:
for p in cls._components_base().glob("**/*.py"):
module_name = _get_module_name_by_path(p, cls._module_base())
importlib.import_module(module_name)
cpn = ComponentMeta.get_meta(name)
return cpn
| en | 0.848233 | # # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # or skip ? | 1.905975 | 2 |
gcn/gcn_graph/train_search.py | kevin840307/sgas | 161 | 6633333 | <reponame>kevin840307/sgas<gh_stars>100-1000
import __init__
import os
import sys
import time
import glob
import math
import numpy as np
import torch
from gcn import utils
import logging
import argparse
import torch.utils
import torch.nn as nn
import torch.nn.functional as F
import torch_geometric.datasets as GeoData
from torch_geometric.data import DataLoader
import torch.backends.cudnn as cudnn
import torch.distributions.categorical as cate
import torchvision.utils as vutils
from model_search import Network
from architect import Architect
from tensorboardX import SummaryWriter
# torch_geometric.set_debug(True)
parser = argparse.ArgumentParser("ppi")
parser.add_argument('--data', type=str, default='../../data', help='location of the data corpus')
parser.add_argument('--batch_size', type=int, default=6, help='batch size')
parser.add_argument('--batch_increase', default=1, type=int, help='how much does the batch size increase after making a decision')
parser.add_argument('--learning_rate', type=float, default=0.005, help='init learning rate')
parser.add_argument('--learning_rate_min', type=float, default=0.0001, help='min learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--init_channels', type=int, default=32, help='num of init channels')
parser.add_argument('--num_cells', type=int, default=1, help='total number of cells')
parser.add_argument('--n_steps', type=int, default=3, help='total number of layers in one cell')
parser.add_argument('--model_path', type=str, default='saved_models', help='path to save the model')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.3, help='drop path probability')
parser.add_argument('--save', type=str, default='PPI', help='experiment name')
parser.add_argument('--seed', type=int, default=2, help='random seed')
parser.add_argument('--random_seed', action='store_true', help='use seed randomly')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
parser.add_argument('--train_portion', type=float, default=0.5, help='portion of training data')
parser.add_argument('--unrolled', action='store_true', default=False, help='use one-step unrolled validation loss')
parser.add_argument('--arch_learning_rate', type=float, default=3e-4, help='learning rate for arch encoding')
parser.add_argument('--arch_weight_decay', type=float, default=1e-3, help='weight decay for arch encoding')
parser.add_argument('--warmup_dec_epoch', type=int, default=9, help='warmup decision epoch')
parser.add_argument('--decision_freq', type=int, default=7, help='decision freq epoch')
parser.add_argument('--history_size', type=int, default=4, help='number of stored epoch scores')
parser.add_argument('--use_history', action='store_true', help='use history for decision')
parser.add_argument('--in_channels', default=50, type=int, help='the channel size of input point cloud ')
parser.add_argument('--post_val', action='store_true', default=False, help='validate after each decision')
args = parser.parse_args()
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
args.save = 'log/search-{}-{}'.format(args.save, time.strftime("%Y%m%d-%H%M%S"))
utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
writer = SummaryWriter(log_dir=args.save, max_queue=50)
def histogram_average(history, probs):
histogram_inter = torch.zeros(probs.shape[0], dtype=torch.float).cuda()
if not history:
return histogram_inter
for hist in history:
histogram_inter += utils.histogram_intersection(hist, probs)
histogram_inter /= len(history)
return histogram_inter
def score_image(type, score, epoch):
score_img = vutils.make_grid(
torch.unsqueeze(torch.unsqueeze(torch.unsqueeze(score, 1), 2), 3),
nrow=7,
normalize=True,
pad_value=0.5)
writer.add_image(type + '_score', score_img, epoch)
def edge_decision(type, alphas, selected_idxs, candidate_flags, probs_history, epoch, model, args):
mat = F.softmax(torch.stack(alphas, dim=0), dim=-1).detach()
print(mat)
importance = torch.sum(mat[:, 1:], dim=-1)
# logging.info(type + " importance {}".format(importance))
probs = mat[:, 1:] / importance[:, None]
# print(type + " probs", probs)
entropy = cate.Categorical(probs=probs).entropy() / math.log(probs.size()[1])
# logging.info(type + " entropy {}".format(entropy))
if args.use_history: # SGAS Cri.2
# logging.info(type + " probs history {}".format(probs_history))
histogram_inter = histogram_average(probs_history, probs)
# logging.info(type + " histogram intersection average {}".format(histogram_inter))
probs_history.append(probs)
if (len(probs_history) > args.history_size):
probs_history.pop(0)
score = utils.normalize(importance) * utils.normalize(
1 - entropy) * utils.normalize(histogram_inter)
# logging.info(type + " score {}".format(score))
else: # SGAS Cri.1
score = utils.normalize(importance) * utils.normalize(1 - entropy)
# logging.info(type + " score {}".format(score))
if torch.sum(candidate_flags.int()) > 0 and \
epoch >= args.warmup_dec_epoch and \
(epoch - args.warmup_dec_epoch) % args.decision_freq == 0:
masked_score = torch.min(score,
(2 * candidate_flags.float() - 1) * np.inf)
selected_edge_idx = torch.argmax(masked_score)
selected_op_idx = torch.argmax(probs[selected_edge_idx]) + 1 # add 1 since none op
selected_idxs[selected_edge_idx] = selected_op_idx
candidate_flags[selected_edge_idx] = False
alphas[selected_edge_idx].requires_grad = False
if type == 'normal':
reduction = False
elif type == 'reduce':
reduction = True
else:
raise Exception('Unknown Cell Type')
candidate_flags, selected_idxs = model.check_edges(candidate_flags,
selected_idxs)
logging.info("#" * 30 + " Decision Epoch " + "#" * 30)
logging.info("epoch {}, {}_selected_idxs {}, added edge {} with op idx {}".format(epoch,
type,
selected_idxs,
selected_edge_idx,
selected_op_idx))
print(type + "_candidate_flags {}".format(candidate_flags))
score_image(type, score, epoch)
return True, selected_idxs, candidate_flags
else:
logging.info("#" * 30 + " Not a Decision Epoch " + "#" * 30)
logging.info("epoch {}, {}_selected_idxs {}".format(epoch,
type,
selected_idxs))
print(type + "_candidate_flags {}".format(candidate_flags))
score_image(type, score, epoch)
return False, selected_idxs, candidate_flags
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
if args.random_seed:
args.seed = np.random.randint(0, 1000, 1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled = True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
# dataset ppi
train_dataset = GeoData.PPI(os.path.join(args.data, 'ppi'), split='train')
train_queue = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
valid_dataset = GeoData.PPI(os.path.join(args.data, 'ppi'), split='val')
valid_queue = DataLoader(valid_dataset, batch_size=args.batch_size, shuffle=False)
n_classes = train_queue.dataset.num_classes
criterion = torch.nn.BCEWithLogitsLoss().cuda()
model = Network(args.init_channels, n_classes, args.num_cells, criterion,
args.n_steps, in_channels=args.in_channels).cuda()
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
num_edges = model._steps * 2
post_train = 5
args.epochs = args.warmup_dec_epoch + args.decision_freq * (num_edges - 1) + post_train + 1
logging.info("total epochs: %d", args.epochs)
optimizer = torch.optim.SGD(
model.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, float(args.epochs), eta_min=args.learning_rate_min)
architect = Architect(model, args)
normal_selected_idxs = torch.tensor(len(model.alphas_normal) * [-1], requires_grad=False, dtype=torch.int).cuda()
normal_candidate_flags = torch.tensor(len(model.alphas_normal) * [True], requires_grad=False, dtype=torch.bool).cuda()
logging.info('normal_selected_idxs: {}'.format(normal_selected_idxs))
logging.info('normal_candidate_flags: {}'.format(normal_candidate_flags))
model.normal_selected_idxs = normal_selected_idxs
model.normal_candidate_flags = normal_candidate_flags
print(F.softmax(torch.stack(model.alphas_normal, dim=0), dim=-1).detach())
count = 0
normal_probs_history = []
train_losses, valid_losses = utils.AverageMeter(), utils.AverageMeter()
for epoch in range(args.epochs):
lr = scheduler.get_lr()[0]
logging.info('epoch %d lr %e', epoch, lr)
# training
train_acc, train_losses = train(train_queue, valid_queue, model, architect, criterion, optimizer, lr, train_losses)
valid_acc, valid_losses = infer(valid_queue, model, criterion, valid_losses)
logging.info('train_acc %f\tvalid_acc %f', train_acc, valid_acc)
# make edge decisions
saved_memory_normal, model.normal_selected_idxs, \
model.normal_candidate_flags = edge_decision('normal',
model.alphas_normal,
model.normal_selected_idxs,
model.normal_candidate_flags,
normal_probs_history,
epoch,
model,
args)
if saved_memory_normal:
del train_queue, valid_queue
torch.cuda.empty_cache()
count += 1
new_batch_size = args.batch_size + args.batch_increase * count
logging.info("new_batch_size = {}".format(new_batch_size))
train_queue = DataLoader(train_dataset, batch_size=new_batch_size, shuffle=True)
valid_queue = DataLoader(valid_dataset, batch_size=new_batch_size, shuffle=False)
if args.post_val:
valid_acc, valid_obj = infer(valid_queue, model, criterion)
logging.info('post valid_acc %f', valid_acc)
writer.add_scalar('stats/train_acc', train_acc, epoch)
writer.add_scalar('stats/valid_acc', valid_acc, epoch)
utils.save(model, os.path.join(args.save, 'weights.pt'))
scheduler.step()
logging.info("#" * 30 + " Done " + "#" * 30)
logging.info('genotype = %s', model.get_genotype())
def train(train_queue, valid_queue, model, architect, criterion, optimizer, lr, train_losses):
micro_f1 = 0.
count = 0.
train_losses.reset()
for step, input in enumerate(train_queue):
model.train()
input = input.to(DEVICE)
target = input.y
n = input.x.size(0)
input_search = next(iter(valid_queue))
input_search = input_search.to(DEVICE)
target_search = input_search.y
architect.step(input, target, input_search, target_search, lr, optimizer, unrolled=args.unrolled)
optimizer.zero_grad()
logits = model(input)
loss = criterion(logits, target)
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)
optimizer.step()
micro_f1 += utils.mF1(logits, target) * n
count += n
train_losses.update(loss.item(), n)
micro_f1 = float(micro_f1) / count
return micro_f1, train_losses
def infer(valid_queue, model, criterion, valid_losses):
model.eval()
count = 0.
micro_f1 = 0.
valid_losses.reset()
with torch.no_grad():
for step, input in enumerate(valid_queue):
input = input.to(DEVICE)
target = input.y
logits = model(input)
loss = criterion(logits, target)
n = target.size(0)
micro_f1 += utils.mF1(logits, target) * n
count += n
valid_losses.update(loss.item(), n)
micro_f1 = float(micro_f1) / count
return micro_f1, valid_losses
if __name__ == '__main__':
main()
| import __init__
import os
import sys
import time
import glob
import math
import numpy as np
import torch
from gcn import utils
import logging
import argparse
import torch.utils
import torch.nn as nn
import torch.nn.functional as F
import torch_geometric.datasets as GeoData
from torch_geometric.data import DataLoader
import torch.backends.cudnn as cudnn
import torch.distributions.categorical as cate
import torchvision.utils as vutils
from model_search import Network
from architect import Architect
from tensorboardX import SummaryWriter
# torch_geometric.set_debug(True)
parser = argparse.ArgumentParser("ppi")
parser.add_argument('--data', type=str, default='../../data', help='location of the data corpus')
parser.add_argument('--batch_size', type=int, default=6, help='batch size')
parser.add_argument('--batch_increase', default=1, type=int, help='how much does the batch size increase after making a decision')
parser.add_argument('--learning_rate', type=float, default=0.005, help='init learning rate')
parser.add_argument('--learning_rate_min', type=float, default=0.0001, help='min learning rate')
parser.add_argument('--momentum', type=float, default=0.9, help='momentum')
parser.add_argument('--weight_decay', type=float, default=3e-4, help='weight decay')
parser.add_argument('--report_freq', type=float, default=50, help='report frequency')
parser.add_argument('--gpu', type=int, default=0, help='gpu device id')
parser.add_argument('--init_channels', type=int, default=32, help='num of init channels')
parser.add_argument('--num_cells', type=int, default=1, help='total number of cells')
parser.add_argument('--n_steps', type=int, default=3, help='total number of layers in one cell')
parser.add_argument('--model_path', type=str, default='saved_models', help='path to save the model')
parser.add_argument('--cutout', action='store_true', default=False, help='use cutout')
parser.add_argument('--cutout_length', type=int, default=16, help='cutout length')
parser.add_argument('--drop_path_prob', type=float, default=0.3, help='drop path probability')
parser.add_argument('--save', type=str, default='PPI', help='experiment name')
parser.add_argument('--seed', type=int, default=2, help='random seed')
parser.add_argument('--random_seed', action='store_true', help='use seed randomly')
parser.add_argument('--grad_clip', type=float, default=5, help='gradient clipping')
parser.add_argument('--train_portion', type=float, default=0.5, help='portion of training data')
parser.add_argument('--unrolled', action='store_true', default=False, help='use one-step unrolled validation loss')
parser.add_argument('--arch_learning_rate', type=float, default=3e-4, help='learning rate for arch encoding')
parser.add_argument('--arch_weight_decay', type=float, default=1e-3, help='weight decay for arch encoding')
parser.add_argument('--warmup_dec_epoch', type=int, default=9, help='warmup decision epoch')
parser.add_argument('--decision_freq', type=int, default=7, help='decision freq epoch')
parser.add_argument('--history_size', type=int, default=4, help='number of stored epoch scores')
parser.add_argument('--use_history', action='store_true', help='use history for decision')
parser.add_argument('--in_channels', default=50, type=int, help='the channel size of input point cloud ')
parser.add_argument('--post_val', action='store_true', default=False, help='validate after each decision')
args = parser.parse_args()
DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
args.save = 'log/search-{}-{}'.format(args.save, time.strftime("%Y%m%d-%H%M%S"))
utils.create_exp_dir(args.save, scripts_to_save=glob.glob('*.py'))
log_format = '%(asctime)s %(message)s'
logging.basicConfig(stream=sys.stdout, level=logging.INFO,
format=log_format, datefmt='%m/%d %I:%M:%S %p')
fh = logging.FileHandler(os.path.join(args.save, 'log.txt'))
fh.setFormatter(logging.Formatter(log_format))
logging.getLogger().addHandler(fh)
writer = SummaryWriter(log_dir=args.save, max_queue=50)
def histogram_average(history, probs):
histogram_inter = torch.zeros(probs.shape[0], dtype=torch.float).cuda()
if not history:
return histogram_inter
for hist in history:
histogram_inter += utils.histogram_intersection(hist, probs)
histogram_inter /= len(history)
return histogram_inter
def score_image(type, score, epoch):
score_img = vutils.make_grid(
torch.unsqueeze(torch.unsqueeze(torch.unsqueeze(score, 1), 2), 3),
nrow=7,
normalize=True,
pad_value=0.5)
writer.add_image(type + '_score', score_img, epoch)
def edge_decision(type, alphas, selected_idxs, candidate_flags, probs_history, epoch, model, args):
mat = F.softmax(torch.stack(alphas, dim=0), dim=-1).detach()
print(mat)
importance = torch.sum(mat[:, 1:], dim=-1)
# logging.info(type + " importance {}".format(importance))
probs = mat[:, 1:] / importance[:, None]
# print(type + " probs", probs)
entropy = cate.Categorical(probs=probs).entropy() / math.log(probs.size()[1])
# logging.info(type + " entropy {}".format(entropy))
if args.use_history: # SGAS Cri.2
# logging.info(type + " probs history {}".format(probs_history))
histogram_inter = histogram_average(probs_history, probs)
# logging.info(type + " histogram intersection average {}".format(histogram_inter))
probs_history.append(probs)
if (len(probs_history) > args.history_size):
probs_history.pop(0)
score = utils.normalize(importance) * utils.normalize(
1 - entropy) * utils.normalize(histogram_inter)
# logging.info(type + " score {}".format(score))
else: # SGAS Cri.1
score = utils.normalize(importance) * utils.normalize(1 - entropy)
# logging.info(type + " score {}".format(score))
if torch.sum(candidate_flags.int()) > 0 and \
epoch >= args.warmup_dec_epoch and \
(epoch - args.warmup_dec_epoch) % args.decision_freq == 0:
masked_score = torch.min(score,
(2 * candidate_flags.float() - 1) * np.inf)
selected_edge_idx = torch.argmax(masked_score)
selected_op_idx = torch.argmax(probs[selected_edge_idx]) + 1 # add 1 since none op
selected_idxs[selected_edge_idx] = selected_op_idx
candidate_flags[selected_edge_idx] = False
alphas[selected_edge_idx].requires_grad = False
if type == 'normal':
reduction = False
elif type == 'reduce':
reduction = True
else:
raise Exception('Unknown Cell Type')
candidate_flags, selected_idxs = model.check_edges(candidate_flags,
selected_idxs)
logging.info("#" * 30 + " Decision Epoch " + "#" * 30)
logging.info("epoch {}, {}_selected_idxs {}, added edge {} with op idx {}".format(epoch,
type,
selected_idxs,
selected_edge_idx,
selected_op_idx))
print(type + "_candidate_flags {}".format(candidate_flags))
score_image(type, score, epoch)
return True, selected_idxs, candidate_flags
else:
logging.info("#" * 30 + " Not a Decision Epoch " + "#" * 30)
logging.info("epoch {}, {}_selected_idxs {}".format(epoch,
type,
selected_idxs))
print(type + "_candidate_flags {}".format(candidate_flags))
score_image(type, score, epoch)
return False, selected_idxs, candidate_flags
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
if args.random_seed:
args.seed = np.random.randint(0, 1000, 1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled = True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
# dataset ppi
train_dataset = GeoData.PPI(os.path.join(args.data, 'ppi'), split='train')
train_queue = DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True)
valid_dataset = GeoData.PPI(os.path.join(args.data, 'ppi'), split='val')
valid_queue = DataLoader(valid_dataset, batch_size=args.batch_size, shuffle=False)
n_classes = train_queue.dataset.num_classes
criterion = torch.nn.BCEWithLogitsLoss().cuda()
model = Network(args.init_channels, n_classes, args.num_cells, criterion,
args.n_steps, in_channels=args.in_channels).cuda()
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
num_edges = model._steps * 2
post_train = 5
args.epochs = args.warmup_dec_epoch + args.decision_freq * (num_edges - 1) + post_train + 1
logging.info("total epochs: %d", args.epochs)
optimizer = torch.optim.SGD(
model.parameters(),
args.learning_rate,
momentum=args.momentum,
weight_decay=args.weight_decay)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, float(args.epochs), eta_min=args.learning_rate_min)
architect = Architect(model, args)
normal_selected_idxs = torch.tensor(len(model.alphas_normal) * [-1], requires_grad=False, dtype=torch.int).cuda()
normal_candidate_flags = torch.tensor(len(model.alphas_normal) * [True], requires_grad=False, dtype=torch.bool).cuda()
logging.info('normal_selected_idxs: {}'.format(normal_selected_idxs))
logging.info('normal_candidate_flags: {}'.format(normal_candidate_flags))
model.normal_selected_idxs = normal_selected_idxs
model.normal_candidate_flags = normal_candidate_flags
print(F.softmax(torch.stack(model.alphas_normal, dim=0), dim=-1).detach())
count = 0
normal_probs_history = []
train_losses, valid_losses = utils.AverageMeter(), utils.AverageMeter()
for epoch in range(args.epochs):
lr = scheduler.get_lr()[0]
logging.info('epoch %d lr %e', epoch, lr)
# training
train_acc, train_losses = train(train_queue, valid_queue, model, architect, criterion, optimizer, lr, train_losses)
valid_acc, valid_losses = infer(valid_queue, model, criterion, valid_losses)
logging.info('train_acc %f\tvalid_acc %f', train_acc, valid_acc)
# make edge decisions
saved_memory_normal, model.normal_selected_idxs, \
model.normal_candidate_flags = edge_decision('normal',
model.alphas_normal,
model.normal_selected_idxs,
model.normal_candidate_flags,
normal_probs_history,
epoch,
model,
args)
if saved_memory_normal:
del train_queue, valid_queue
torch.cuda.empty_cache()
count += 1
new_batch_size = args.batch_size + args.batch_increase * count
logging.info("new_batch_size = {}".format(new_batch_size))
train_queue = DataLoader(train_dataset, batch_size=new_batch_size, shuffle=True)
valid_queue = DataLoader(valid_dataset, batch_size=new_batch_size, shuffle=False)
if args.post_val:
valid_acc, valid_obj = infer(valid_queue, model, criterion)
logging.info('post valid_acc %f', valid_acc)
writer.add_scalar('stats/train_acc', train_acc, epoch)
writer.add_scalar('stats/valid_acc', valid_acc, epoch)
utils.save(model, os.path.join(args.save, 'weights.pt'))
scheduler.step()
logging.info("#" * 30 + " Done " + "#" * 30)
logging.info('genotype = %s', model.get_genotype())
def train(train_queue, valid_queue, model, architect, criterion, optimizer, lr, train_losses):
micro_f1 = 0.
count = 0.
train_losses.reset()
for step, input in enumerate(train_queue):
model.train()
input = input.to(DEVICE)
target = input.y
n = input.x.size(0)
input_search = next(iter(valid_queue))
input_search = input_search.to(DEVICE)
target_search = input_search.y
architect.step(input, target, input_search, target_search, lr, optimizer, unrolled=args.unrolled)
optimizer.zero_grad()
logits = model(input)
loss = criterion(logits, target)
loss.backward()
nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip)
optimizer.step()
micro_f1 += utils.mF1(logits, target) * n
count += n
train_losses.update(loss.item(), n)
micro_f1 = float(micro_f1) / count
return micro_f1, train_losses
def infer(valid_queue, model, criterion, valid_losses):
model.eval()
count = 0.
micro_f1 = 0.
valid_losses.reset()
with torch.no_grad():
for step, input in enumerate(valid_queue):
input = input.to(DEVICE)
target = input.y
logits = model(input)
loss = criterion(logits, target)
n = target.size(0)
micro_f1 += utils.mF1(logits, target) * n
count += n
valid_losses.update(loss.item(), n)
micro_f1 = float(micro_f1) / count
return micro_f1, valid_losses
if __name__ == '__main__':
main() | en | 0.459158 | # torch_geometric.set_debug(True) # logging.info(type + " importance {}".format(importance)) # print(type + " probs", probs) # logging.info(type + " entropy {}".format(entropy)) # SGAS Cri.2 # logging.info(type + " probs history {}".format(probs_history)) # logging.info(type + " histogram intersection average {}".format(histogram_inter)) # logging.info(type + " score {}".format(score)) # SGAS Cri.1 # logging.info(type + " score {}".format(score)) # add 1 since none op # dataset ppi # training # make edge decisions | 2.06735 | 2 |
sushy/resources/system/storage/volume.py | sapcc/sushy | 0 | 6633334 | <reponame>sapcc/sushy<gh_stars>0
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is referred from Redfish standard schema.
# http://redfish.dmtf.org/schemas/v1/Volume.v1_0_3.json
import logging
from sushy import exceptions
from sushy.resources import base
from sushy.resources import common
from sushy.resources.system.storage import mappings as store_maps
from sushy import utils
LOG = logging.getLogger(__name__)
class ActionsField(base.CompositeField):
initialize = common.InitializeActionField('#Volume.Initialize')
class Volume(base.ResourceBase):
"""This class adds the Storage Volume resource"""
identity = base.Field('Id', required=True)
"""The Volume identity string"""
name = base.Field('Name')
"""The name of the resource"""
capacity_bytes = base.Field('CapacityBytes', adapter=utils.int_or_none)
"""The size in bytes of this Volume."""
volume_type = base.MappedField('VolumeType',
store_maps.VOLUME_TYPE_TYPE_MAP)
"""The type of this volume."""
raid_type = base.MappedField('RAIDType', store_maps.RAID_TYPE_TYPE_MAP)
"""The RAID type of this volume."""
encrypted = base.Field('Encrypted', adapter=bool)
"""Is this Volume encrypted."""
identifiers = common.IdentifiersListField('Identifiers', default=[])
"""The Durable names for the volume."""
block_size_bytes = base.Field('BlockSizeBytes', adapter=int)
"""The size of the smallest addressable unit of this volume in bytes."""
operation_apply_time_support = common.OperationApplyTimeSupportField()
"""Indicates if a client is allowed to request for a specific apply
time of a create, delete, or action operation of a given resource"""
_actions = ActionsField('Actions')
def _get_initialize_action_element(self):
initialize_action = self._actions.initialize
if not initialize_action:
raise exceptions.MissingActionError(action='#Volume.Initialize',
resource=self._path)
return initialize_action
def get_allowed_initialize_volume_values(self):
"""Get the allowed values for initializing the volume.
:returns: A set with the allowed values.
"""
action = self._get_initialize_action_element()
if not action.allowed_values:
LOG.warning('Could not figure out the allowed values for the '
'initialize volume action for Volume %s',
self.identity)
return set(store_maps.VOLUME_INIT_TYPE_MAP_REV)
return set([store_maps.VOLUME_INIT_TYPE_MAP[v] for v in
set(store_maps.VOLUME_INIT_TYPE_MAP).
intersection(action.allowed_values)])
def initialize_volume(self, value):
"""Initialize the volume.
:param value: The InitializeType value.
:raises: InvalidParameterValueError, if the target value is not
allowed.
"""
valid_values = self.get_allowed_initialize_volume_values()
if value not in valid_values:
raise exceptions.InvalidParameterValueError(
parameter='value', value=value, valid_values=valid_values)
value = store_maps.VOLUME_INIT_TYPE_MAP_REV[value]
target_uri = self._get_initialize_action_element().target_uri
self._conn.post(target_uri, data={'InitializeType': value},
blocking=True)
def delete_volume(self, payload=None):
"""Delete the volume.
:param payload: May contain @Redfish.OperationApplyTime property
:raises: ConnectionError
:raises: HTTPError
"""
self._conn.delete(self._path, data=payload, blocking=True)
class VolumeCollection(base.ResourceCollectionBase):
"""This class represents the Storage Volume collection"""
@property
def _resource_type(self):
return Volume
@property
@utils.cache_it
def volumes_sizes_bytes(self):
"""Sizes of all Volumes in bytes in VolumeCollection resource.
Returns the list of cached values until it (or its parent resource)
is refreshed.
"""
return sorted(vol.capacity_bytes for vol in self.get_members())
@property
def max_volume_size_bytes(self):
"""Max size available (in bytes) among all Volume resources.
Returns the cached value until it (or its parent resource) is
refreshed.
"""
return utils.max_safe(self.volumes_sizes_bytes)
# NOTE(etingof): for backward compatibility
max_size_bytes = max_volume_size_bytes
operation_apply_time_support = common.OperationApplyTimeSupportField()
"""Indicates if a client is allowed to request for a specific apply
time of a create, delete, or action operation of a given resource"""
def create_volume(self, payload):
"""Create a volume.
:param payload: The payload representing the new volume to create.
:raises: ConnectionError
:raises: HTTPError
:returns: Newly created Volume resource or None if no Location header
"""
r = self._conn.post(self._path, data=payload, blocking=True)
location = r.headers.get('Location')
if r.status_code == 201:
if location:
self.refresh()
return self.get_member(location)
| # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# This is referred from Redfish standard schema.
# http://redfish.dmtf.org/schemas/v1/Volume.v1_0_3.json
import logging
from sushy import exceptions
from sushy.resources import base
from sushy.resources import common
from sushy.resources.system.storage import mappings as store_maps
from sushy import utils
LOG = logging.getLogger(__name__)
class ActionsField(base.CompositeField):
initialize = common.InitializeActionField('#Volume.Initialize')
class Volume(base.ResourceBase):
"""This class adds the Storage Volume resource"""
identity = base.Field('Id', required=True)
"""The Volume identity string"""
name = base.Field('Name')
"""The name of the resource"""
capacity_bytes = base.Field('CapacityBytes', adapter=utils.int_or_none)
"""The size in bytes of this Volume."""
volume_type = base.MappedField('VolumeType',
store_maps.VOLUME_TYPE_TYPE_MAP)
"""The type of this volume."""
raid_type = base.MappedField('RAIDType', store_maps.RAID_TYPE_TYPE_MAP)
"""The RAID type of this volume."""
encrypted = base.Field('Encrypted', adapter=bool)
"""Is this Volume encrypted."""
identifiers = common.IdentifiersListField('Identifiers', default=[])
"""The Durable names for the volume."""
block_size_bytes = base.Field('BlockSizeBytes', adapter=int)
"""The size of the smallest addressable unit of this volume in bytes."""
operation_apply_time_support = common.OperationApplyTimeSupportField()
"""Indicates if a client is allowed to request for a specific apply
time of a create, delete, or action operation of a given resource"""
_actions = ActionsField('Actions')
def _get_initialize_action_element(self):
initialize_action = self._actions.initialize
if not initialize_action:
raise exceptions.MissingActionError(action='#Volume.Initialize',
resource=self._path)
return initialize_action
def get_allowed_initialize_volume_values(self):
"""Get the allowed values for initializing the volume.
:returns: A set with the allowed values.
"""
action = self._get_initialize_action_element()
if not action.allowed_values:
LOG.warning('Could not figure out the allowed values for the '
'initialize volume action for Volume %s',
self.identity)
return set(store_maps.VOLUME_INIT_TYPE_MAP_REV)
return set([store_maps.VOLUME_INIT_TYPE_MAP[v] for v in
set(store_maps.VOLUME_INIT_TYPE_MAP).
intersection(action.allowed_values)])
def initialize_volume(self, value):
"""Initialize the volume.
:param value: The InitializeType value.
:raises: InvalidParameterValueError, if the target value is not
allowed.
"""
valid_values = self.get_allowed_initialize_volume_values()
if value not in valid_values:
raise exceptions.InvalidParameterValueError(
parameter='value', value=value, valid_values=valid_values)
value = store_maps.VOLUME_INIT_TYPE_MAP_REV[value]
target_uri = self._get_initialize_action_element().target_uri
self._conn.post(target_uri, data={'InitializeType': value},
blocking=True)
def delete_volume(self, payload=None):
"""Delete the volume.
:param payload: May contain @Redfish.OperationApplyTime property
:raises: ConnectionError
:raises: HTTPError
"""
self._conn.delete(self._path, data=payload, blocking=True)
class VolumeCollection(base.ResourceCollectionBase):
"""This class represents the Storage Volume collection"""
@property
def _resource_type(self):
return Volume
@property
@utils.cache_it
def volumes_sizes_bytes(self):
"""Sizes of all Volumes in bytes in VolumeCollection resource.
Returns the list of cached values until it (or its parent resource)
is refreshed.
"""
return sorted(vol.capacity_bytes for vol in self.get_members())
@property
def max_volume_size_bytes(self):
"""Max size available (in bytes) among all Volume resources.
Returns the cached value until it (or its parent resource) is
refreshed.
"""
return utils.max_safe(self.volumes_sizes_bytes)
# NOTE(etingof): for backward compatibility
max_size_bytes = max_volume_size_bytes
operation_apply_time_support = common.OperationApplyTimeSupportField()
"""Indicates if a client is allowed to request for a specific apply
time of a create, delete, or action operation of a given resource"""
def create_volume(self, payload):
"""Create a volume.
:param payload: The payload representing the new volume to create.
:raises: ConnectionError
:raises: HTTPError
:returns: Newly created Volume resource or None if no Location header
"""
r = self._conn.post(self._path, data=payload, blocking=True)
location = r.headers.get('Location')
if r.status_code == 201:
if location:
self.refresh()
return self.get_member(location) | en | 0.782738 | # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is referred from Redfish standard schema. # http://redfish.dmtf.org/schemas/v1/Volume.v1_0_3.json This class adds the Storage Volume resource The Volume identity string The name of the resource The size in bytes of this Volume. The type of this volume. The RAID type of this volume. Is this Volume encrypted. The Durable names for the volume. The size of the smallest addressable unit of this volume in bytes. Indicates if a client is allowed to request for a specific apply time of a create, delete, or action operation of a given resource Get the allowed values for initializing the volume. :returns: A set with the allowed values. Initialize the volume. :param value: The InitializeType value. :raises: InvalidParameterValueError, if the target value is not allowed. Delete the volume. :param payload: May contain @Redfish.OperationApplyTime property :raises: ConnectionError :raises: HTTPError This class represents the Storage Volume collection Sizes of all Volumes in bytes in VolumeCollection resource. Returns the list of cached values until it (or its parent resource) is refreshed. Max size available (in bytes) among all Volume resources. Returns the cached value until it (or its parent resource) is refreshed. # NOTE(etingof): for backward compatibility Indicates if a client is allowed to request for a specific apply time of a create, delete, or action operation of a given resource Create a volume. :param payload: The payload representing the new volume to create. :raises: ConnectionError :raises: HTTPError :returns: Newly created Volume resource or None if no Location header | 2.039316 | 2 |
chart_web.py | caux/japonicus | 0 | 6633335 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import glob
import datetime
import numpy as np
import pandas as pd
import json
import os
import quantmod as qm
import flask
import dash
from dash.dependencies import Input, Output
import dash_core_components as dcc
import dash_html_components as html
from flask_caching import Cache
# from plotInfo import plotEvolutionSummary
import gekkoWrapper
import Settings
import coreFunctions
import evolution_bayes
gsettings = Settings.getSettings()['global']
settings = Settings.getSettings()['bayesian']
MA_SMA, MA_EMA, MA_WMA, MA_DEMA, MA_TEMA, MA_TRIMA, MA_KAMA, MA_MAMA, MA_T3 = range(9)
rename = {
"DEMA": {"long": "timeperiod"},
"MACD": {"short": "fastperiod", "long": "slowperiod", "signal": "signalperiod"},
"PPO": {"short": "fastperiod", "long": "slowperiod", "signal": "signalperiod"},
"RSI": {"interval": "timeperiod"},
"StochRSI": {"interval": "timeperiod"},
"CCI": {"interval": "timeperiod"},
}
indicators = rename.keys()
def talib_dict(params):
# dict key rename
newparams = {}
for k in rename.keys():
newparams[k] = {}
if k == "STOCHRSI":
k = "StochRSI"
for old, new in rename[k.upper()].items():
newparams[k.upper()][new] = params[k].pop(old)
# add matype
newparams["PPO"]["matype"] = MA_EMA
# newparams["STOCHRSI"]["matype"] = MA_EMA
return newparams
def run_server():
# Setup the app
server = flask.Flask(__name__)
# server.secret_key = os.environ.get('secret_key', 'secret')
app = dash.Dash(__name__, server=server, csrf_protect=False)
app.scripts.config.serve_locally = False
dcc._js_dist[0]['external_url'] = 'https://cdn.plot.ly/plotly-finance-1.28.0.min.js'
# Setup config
responses, configs = get_json()
def setup_config(filename=None):
if filename != None and filename in responses:
config_filename = filename.replace("response", "config")
res = load_json(filename)
gekko_config = load_json(config_filename)
else:
res = load_json(responses[-1])
gekko_config = load_json(configs[-1])
filename = gsettings['configFilename']
configjs = Settings.get_configjs(filename)
config = {k: v for k, v in configjs.items() if k in indicators}
config2 = {
k: v for k, v in gekko_config["gekkoConfig"].items() if k in indicators
}
config.update(config2.copy())
strategy = gekko_config["gekkoConfig"]["tradingAdvisor"]["method"]
return strategy, config, res
# Setup chart
def setup_chart(res):
candles = pd.DataFrame.from_dict(res['candles'])
candles["start"] = pd.to_datetime(candles["start"])
candles.index = candles["start"]
trades = pd.DataFrame.from_dict(res['trades'])
trades["start"] = pd.to_datetime(trades["date"])
trades["color"] = 'rgba(0, 0, 0, 0.)'
trades["symbol"] = 'triangle-down'
trades.loc[trades.action.str.match("buy"), "color"] = 'rgba(255, 182, 193, .5)'
trades.loc[trades.action.str.match("sell"), "color"] = 'rgba(182, 193, 255, .5)'
trades.loc[trades.action.str.match("buy"), "symbol"] = 'triangle-up'
trade_scatter = dict(
x=trades["start"],
y=trades["price"],
name=trades["action"],
mode="markers",
marker=dict(
symbol=trades["symbol"], size=15, color=trades["color"], showscale=True
),
)
return candles, trade_scatter
strategy, config, res = setup_config()
candles, trade_scatter = setup_chart(res)
# Add caching
cache = Cache(app.server, config={'CACHE_TYPE': 'simple'})
timeout = 60 * 60 # 1 hour
# Controls
src = dict(
index='start',
op='open',
hi='high',
lo='low',
cl='close',
aop=None,
ahi=None,
alo=None,
acl=None,
vo='volume',
di=None,
)
logs = responses
logs = [dict(label=str(log), value=str(log)) for log in logs]
# Dynamic binding
functions = dir(qm.ta)[9:-4]
functions = [
dict(label=str(function[4:]), value=str(function)) for function in functions
]
# Layout
app.layout = html.Div(
[
html.Div(
[
html.H2(
'gekkoJaponicus Charts',
style={'padding-top': '20', 'text-align': 'center'},
),
html.Div(
[
html.Label('Select log:'),
dcc.Dropdown(
id='dropdown', options=logs, value=str(responses[0])
),
],
style={
'width': '510',
'display': 'inline-block',
'padding-left': '40',
'margin-bottom': '20',
},
),
html.Div(
[
html.Label('Select technical indicators:'),
dcc.Dropdown(
id='multi',
options=functions,
multi=True,
value=["add_" + strategy.upper()],
),
],
style={
'width': '510',
'display': 'inline-block',
'padding-right': '40',
'margin-bottom': '20',
},
),
]
),
html.Div(
[
html.Label('Specify parameters of technical indicators:'),
dcc.Input(
id='arglist',
style={'height': '32', 'width': '1020'},
value=json.dumps(config),
),
],
id='arg-controls',
style={'display': 'none'},
),
dcc.Graph(id='output'),
],
style={
'width': '1100',
'margin-left': 'auto',
'margin-right': 'auto',
'font-family': 'overpass',
'background-color': '#F3F3F3',
},
)
@app.callback(Output('arg-controls', 'style'), [Input('multi', 'value')])
def display_control(multi):
if not multi:
return {'display': 'none'}
else:
return {'margin-bottom': '20', 'padding-left': '40'}
@cache.memoize(timeout=timeout)
@app.callback(
Output('output', 'figure'),
[
Input('dropdown', 'value'),
Input('multi', 'value'),
Input('arglist', 'value'),
],
)
def update_graph_from_dropdown(dropdown, multi, arglist):
# Get Quantmod Chart
print('Loading')
strategy, config, res = setup_config(dropdown)
candles, trade_scatter = setup_chart(res)
ch = qm.Chart(candles, src=src)
# Get functions and arglist for technical indicators
if arglist:
for function in multi:
try:
config = talib_dict(json.loads(arglist))
indicator = function.split("_")[1]
newargs = config[indicator]
# Dynamic calling
fn = getattr(qm, function)
fn(ch, **newargs)
except Exception as e:
print(e)
getattr(qm, function)(ch)
pass
else:
for function in multi:
# Dynamic calling
getattr(qm, function)(ch)
fig = ch.to_figure(width=1100)
# hack figure
index = 0
for i in range(len(fig["layout"].keys())):
axis = "yaxis" + str(i)
if axis in fig["layout"]:
index = i + 1
yrange = [candles["low"].min(), candles["high"].max()]
fig["layout"]["yaxis"]["range"] = yrange
fig["layout"]["yaxis" + str(index)] = fig["layout"]["yaxis2"]
fig["layout"]["plot_bgcolor"] = 'rgba(0, 0, 0, 0.00)'
trade_scatter["yaxis"] = "y1"
fig["data"].append(trade_scatter)
return fig
# External css
external_css = [
"https://fonts.googleapis.com/css?family=Overpass:400,400i,700,700i",
"https://cdn.rawgit.com/plotly/dash-app-stylesheets/c6a126a684eaaa94a708d41d6ceb32b28ac78583/dash-technical-charting.css",
]
for css in external_css:
app.css.append_css({"external_url": css})
# Run the Dash app
if __name__ == '__main__':
app.server.run(debug=True)
# app.server.run()
def get_json():
files1 = os.path.join(gsettings["save_dir"], '*_response.json')
files2 = os.path.join(gsettings["save_dir"], '*_config.json')
response_files = list(filter(os.path.isfile, glob.glob(files1)))
response_files.sort(key= lambda x: - os.path.getmtime(x))
config_file = list(filter(os.path.isfile, glob.glob(files2)))
config_file.sort(key= lambda x: - os.path.getmtime(x))
return response_files, config_file
def load_json(filename):
f = open(filename, "r")
result = json.loads(f.read())
f.close
return result
def create_first_chart():
print("log file not found: try to fetch")
strategy = settings["Strategy"]
deltaDays = settings['deltaDays']
filename = gsettings['configFilename']
configjs = Settings.get_configjs(filename)
watch = settings["watch"]
dateset = gekkoWrapper.getAvailableDataset(watch)
daterange = coreFunctions.getRandomDateRange(dateset, deltaDays=deltaDays)
config = evolution_bayes.compressing_flatten_dict(configjs[strategy], strategy)
config["watch"] = watch
gekko_config = gekkoWrapper.createConfig(config, daterange)
res = evolution_bayes.EvaluateRaw(watch, daterange, configjs[strategy], strategy)
score = res['report']['relativeProfit']
filename = "_".join(
[
watch["exchange"],
watch["currency"],
watch["asset"],
strategy,
datetime.datetime.now().strftime('%Y%m%d_%H%M%S'),
str(score),
]
)
save_dir = gsettings["save_dir"]
json_filename = os.path.join(save_dir, filename) + "_config.json"
json2_filename = os.path.join(save_dir, filename) + "_response.json"
if not os.path.exists(save_dir):
os.mkdir(save_dir)
f = open(json_filename, "w")
f.write(json.dumps(gekko_config, indent=2))
f.close()
print("Saved: " + json_filename)
f = open(json2_filename, "w")
f.write(json.dumps(res, indent=2))
f.close()
print("Saved: " + json2_filename)
if __name__ == '__main__':
res, config = get_json()
if len(res) > 0 and len(config) > 0:
run_server()
else:
create_first_chart()
run_server()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import glob
import datetime
import numpy as np
import pandas as pd
import json
import os
import quantmod as qm
import flask
import dash
from dash.dependencies import Input, Output
import dash_core_components as dcc
import dash_html_components as html
from flask_caching import Cache
# from plotInfo import plotEvolutionSummary
import gekkoWrapper
import Settings
import coreFunctions
import evolution_bayes
gsettings = Settings.getSettings()['global']
settings = Settings.getSettings()['bayesian']
MA_SMA, MA_EMA, MA_WMA, MA_DEMA, MA_TEMA, MA_TRIMA, MA_KAMA, MA_MAMA, MA_T3 = range(9)
rename = {
"DEMA": {"long": "timeperiod"},
"MACD": {"short": "fastperiod", "long": "slowperiod", "signal": "signalperiod"},
"PPO": {"short": "fastperiod", "long": "slowperiod", "signal": "signalperiod"},
"RSI": {"interval": "timeperiod"},
"StochRSI": {"interval": "timeperiod"},
"CCI": {"interval": "timeperiod"},
}
indicators = rename.keys()
def talib_dict(params):
# dict key rename
newparams = {}
for k in rename.keys():
newparams[k] = {}
if k == "STOCHRSI":
k = "StochRSI"
for old, new in rename[k.upper()].items():
newparams[k.upper()][new] = params[k].pop(old)
# add matype
newparams["PPO"]["matype"] = MA_EMA
# newparams["STOCHRSI"]["matype"] = MA_EMA
return newparams
def run_server():
# Setup the app
server = flask.Flask(__name__)
# server.secret_key = os.environ.get('secret_key', 'secret')
app = dash.Dash(__name__, server=server, csrf_protect=False)
app.scripts.config.serve_locally = False
dcc._js_dist[0]['external_url'] = 'https://cdn.plot.ly/plotly-finance-1.28.0.min.js'
# Setup config
responses, configs = get_json()
def setup_config(filename=None):
if filename != None and filename in responses:
config_filename = filename.replace("response", "config")
res = load_json(filename)
gekko_config = load_json(config_filename)
else:
res = load_json(responses[-1])
gekko_config = load_json(configs[-1])
filename = gsettings['configFilename']
configjs = Settings.get_configjs(filename)
config = {k: v for k, v in configjs.items() if k in indicators}
config2 = {
k: v for k, v in gekko_config["gekkoConfig"].items() if k in indicators
}
config.update(config2.copy())
strategy = gekko_config["gekkoConfig"]["tradingAdvisor"]["method"]
return strategy, config, res
# Setup chart
def setup_chart(res):
candles = pd.DataFrame.from_dict(res['candles'])
candles["start"] = pd.to_datetime(candles["start"])
candles.index = candles["start"]
trades = pd.DataFrame.from_dict(res['trades'])
trades["start"] = pd.to_datetime(trades["date"])
trades["color"] = 'rgba(0, 0, 0, 0.)'
trades["symbol"] = 'triangle-down'
trades.loc[trades.action.str.match("buy"), "color"] = 'rgba(255, 182, 193, .5)'
trades.loc[trades.action.str.match("sell"), "color"] = 'rgba(182, 193, 255, .5)'
trades.loc[trades.action.str.match("buy"), "symbol"] = 'triangle-up'
trade_scatter = dict(
x=trades["start"],
y=trades["price"],
name=trades["action"],
mode="markers",
marker=dict(
symbol=trades["symbol"], size=15, color=trades["color"], showscale=True
),
)
return candles, trade_scatter
strategy, config, res = setup_config()
candles, trade_scatter = setup_chart(res)
# Add caching
cache = Cache(app.server, config={'CACHE_TYPE': 'simple'})
timeout = 60 * 60 # 1 hour
# Controls
src = dict(
index='start',
op='open',
hi='high',
lo='low',
cl='close',
aop=None,
ahi=None,
alo=None,
acl=None,
vo='volume',
di=None,
)
logs = responses
logs = [dict(label=str(log), value=str(log)) for log in logs]
# Dynamic binding
functions = dir(qm.ta)[9:-4]
functions = [
dict(label=str(function[4:]), value=str(function)) for function in functions
]
# Layout
app.layout = html.Div(
[
html.Div(
[
html.H2(
'gekkoJaponicus Charts',
style={'padding-top': '20', 'text-align': 'center'},
),
html.Div(
[
html.Label('Select log:'),
dcc.Dropdown(
id='dropdown', options=logs, value=str(responses[0])
),
],
style={
'width': '510',
'display': 'inline-block',
'padding-left': '40',
'margin-bottom': '20',
},
),
html.Div(
[
html.Label('Select technical indicators:'),
dcc.Dropdown(
id='multi',
options=functions,
multi=True,
value=["add_" + strategy.upper()],
),
],
style={
'width': '510',
'display': 'inline-block',
'padding-right': '40',
'margin-bottom': '20',
},
),
]
),
html.Div(
[
html.Label('Specify parameters of technical indicators:'),
dcc.Input(
id='arglist',
style={'height': '32', 'width': '1020'},
value=json.dumps(config),
),
],
id='arg-controls',
style={'display': 'none'},
),
dcc.Graph(id='output'),
],
style={
'width': '1100',
'margin-left': 'auto',
'margin-right': 'auto',
'font-family': 'overpass',
'background-color': '#F3F3F3',
},
)
@app.callback(Output('arg-controls', 'style'), [Input('multi', 'value')])
def display_control(multi):
if not multi:
return {'display': 'none'}
else:
return {'margin-bottom': '20', 'padding-left': '40'}
@cache.memoize(timeout=timeout)
@app.callback(
Output('output', 'figure'),
[
Input('dropdown', 'value'),
Input('multi', 'value'),
Input('arglist', 'value'),
],
)
def update_graph_from_dropdown(dropdown, multi, arglist):
# Get Quantmod Chart
print('Loading')
strategy, config, res = setup_config(dropdown)
candles, trade_scatter = setup_chart(res)
ch = qm.Chart(candles, src=src)
# Get functions and arglist for technical indicators
if arglist:
for function in multi:
try:
config = talib_dict(json.loads(arglist))
indicator = function.split("_")[1]
newargs = config[indicator]
# Dynamic calling
fn = getattr(qm, function)
fn(ch, **newargs)
except Exception as e:
print(e)
getattr(qm, function)(ch)
pass
else:
for function in multi:
# Dynamic calling
getattr(qm, function)(ch)
fig = ch.to_figure(width=1100)
# hack figure
index = 0
for i in range(len(fig["layout"].keys())):
axis = "yaxis" + str(i)
if axis in fig["layout"]:
index = i + 1
yrange = [candles["low"].min(), candles["high"].max()]
fig["layout"]["yaxis"]["range"] = yrange
fig["layout"]["yaxis" + str(index)] = fig["layout"]["yaxis2"]
fig["layout"]["plot_bgcolor"] = 'rgba(0, 0, 0, 0.00)'
trade_scatter["yaxis"] = "y1"
fig["data"].append(trade_scatter)
return fig
# External css
external_css = [
"https://fonts.googleapis.com/css?family=Overpass:400,400i,700,700i",
"https://cdn.rawgit.com/plotly/dash-app-stylesheets/c6a126a684eaaa94a708d41d6ceb32b28ac78583/dash-technical-charting.css",
]
for css in external_css:
app.css.append_css({"external_url": css})
# Run the Dash app
if __name__ == '__main__':
app.server.run(debug=True)
# app.server.run()
def get_json():
files1 = os.path.join(gsettings["save_dir"], '*_response.json')
files2 = os.path.join(gsettings["save_dir"], '*_config.json')
response_files = list(filter(os.path.isfile, glob.glob(files1)))
response_files.sort(key= lambda x: - os.path.getmtime(x))
config_file = list(filter(os.path.isfile, glob.glob(files2)))
config_file.sort(key= lambda x: - os.path.getmtime(x))
return response_files, config_file
def load_json(filename):
f = open(filename, "r")
result = json.loads(f.read())
f.close
return result
def create_first_chart():
print("log file not found: try to fetch")
strategy = settings["Strategy"]
deltaDays = settings['deltaDays']
filename = gsettings['configFilename']
configjs = Settings.get_configjs(filename)
watch = settings["watch"]
dateset = gekkoWrapper.getAvailableDataset(watch)
daterange = coreFunctions.getRandomDateRange(dateset, deltaDays=deltaDays)
config = evolution_bayes.compressing_flatten_dict(configjs[strategy], strategy)
config["watch"] = watch
gekko_config = gekkoWrapper.createConfig(config, daterange)
res = evolution_bayes.EvaluateRaw(watch, daterange, configjs[strategy], strategy)
score = res['report']['relativeProfit']
filename = "_".join(
[
watch["exchange"],
watch["currency"],
watch["asset"],
strategy,
datetime.datetime.now().strftime('%Y%m%d_%H%M%S'),
str(score),
]
)
save_dir = gsettings["save_dir"]
json_filename = os.path.join(save_dir, filename) + "_config.json"
json2_filename = os.path.join(save_dir, filename) + "_response.json"
if not os.path.exists(save_dir):
os.mkdir(save_dir)
f = open(json_filename, "w")
f.write(json.dumps(gekko_config, indent=2))
f.close()
print("Saved: " + json_filename)
f = open(json2_filename, "w")
f.write(json.dumps(res, indent=2))
f.close()
print("Saved: " + json2_filename)
if __name__ == '__main__':
res, config = get_json()
if len(res) > 0 and len(config) > 0:
run_server()
else:
create_first_chart()
run_server()
| en | 0.388671 | #!/usr/bin/env python # -*- coding: utf-8 -*- # from plotInfo import plotEvolutionSummary # dict key rename # add matype # newparams["STOCHRSI"]["matype"] = MA_EMA # Setup the app # server.secret_key = os.environ.get('secret_key', 'secret') # Setup config # Setup chart # Add caching # 1 hour # Controls # Dynamic binding # Layout # Get Quantmod Chart # Get functions and arglist for technical indicators # Dynamic calling # Dynamic calling # hack figure # External css # Run the Dash app # app.server.run() | 1.840639 | 2 |
image_labelling_tool/models.py | uea-computer-vision/django-labeller | 4 | 6633336 | import json, datetime
from django.db import models
from django.conf import settings
from django.utils import timezone
from django.contrib.auth import get_user_model
from . import managers
class LabelsLockedError (Exception):
pass
class LabellingTask (models.Model):
enabled = models.BooleanField(default=True)
name = models.CharField(max_length=256)
human_name = models.CharField(max_length=256)
order_key = models.IntegerField(default=0)
def to_json(self):
return dict(name=self.name, human_name=self.human_name)
def __str__(self):
return 'Task {} (identifier {})'.format(self.human_name, self.name)
class Labels (models.Model):
# Label data
labels_json_str = models.TextField(default='[]')
# Task completion
completed_tasks = models.ManyToManyField(LabellingTask)
# Creation date
creation_date = models.DateField()
# Time elapsed during editing, in seconds
edit_time_elapsed = models.FloatField(default=0.0, blank=True)
# Last modification user and datetime
last_modified_by = models.ForeignKey(
settings.AUTH_USER_MODEL, models.SET_NULL, related_name='modified_labels', null=True, default=None)
last_modified_datetime = models.DateTimeField(default=datetime.datetime.now)
# Locked by user and expiry datetime
locked_by = models.ForeignKey(
settings.AUTH_USER_MODEL, models.SET_NULL, related_name='locked_labels', null=True, default=None)
lock_expiry_datetime = models.DateTimeField(default=datetime.datetime.now)
# Manager
objects = managers.LabelsManager()
@property
def labels_json(self):
return json.loads(self.labels_json_str)
@labels_json.setter
def labels_json(self, label_js):
self.labels_json_str = json.dumps(label_js)
@property
def metadata(self):
"""
Access metadata (completed tasks, creation date, last modified by, last modified date time) as a dict
:return:
"""
return dict(
completed_tasks=list(self.completed_tasks.all()),
creation_date=self.creation_date,
last_modified_by=self.last_modified_by,
last_modified_datetime=self.last_modified_datetime
)
@metadata.setter
def metadata(self, meta):
if 'completed_tasks' in meta:
self.completed_tasks.set(meta['completed_tasks'])
if 'creation_date' in meta:
self.creation_date = meta['creation_date']
if 'last_modified_by' in meta:
self.last_modified_by = meta['last_modified_by']
if 'last_modified_datetime' in meta:
self.last_modified_datetime = meta['last_modified_datetime']
@property
def metadata_json(self):
"""
Access metadata (completed tasks, creation date, last modified by, last modified date time) as a
JSON dict. The 'last modified by' user is stored as user name and/or user ID. The completed tasks
are stored by name. Dates and datetimes are stored in string form.
:return:
"""
return self.metadata_dict_to_json(self.metadata)
@metadata_json.setter
def metadata_json(self, meta_js):
self.metadata = self.metadata_json_to_dict(meta_js)
@staticmethod
def metadata_dict_to_json(metadata):
"""
Convert metadata in dictionary form to JSON form.
Dates and date times are converted to string form for storage as JSON.
The last_modified_by User object is stored in JSON as a username and user ID.
The completed tasks are converted to a list of task names
:param metadata: metadata in a dictionary with the following optional keys: 'creation_date', 'last_modified_by',
'last_modified_datetime' and 'completed_tasks'
:return: metadata in JSON form
"""
meta_json = {}
if 'creation_date' in metadata:
meta_json['creation_date'] = metadata['creation_date'].strftime('%Y-%m-%d')
if 'last_modified_by' in metadata:
last_modified_by = metadata['last_modified_by']
if last_modified_by is not None:
meta_json['last_modified_by__username'] = last_modified_by.username
meta_json['last_modified_by__id'] = last_modified_by.id
if 'last_modified_datetime' in metadata:
meta_json['last_modified_datetime'] = metadata['last_modified_datetime'].strftime('%Y-%m-%d %H:%M:%S')
if 'completed_tasks' in metadata:
meta_json['completed_tasks'] = [task.name for task in metadata['completed_tasks']]
return meta_json
@staticmethod
def metadata_json_to_dict(metadata_json):
"""
Convert metadata as a JSON dictionary to dictionary form.
:param metadata_json: metadata as a JSON dictionary
:return: metadata in dict form
"""
meta = {}
if 'creation_date' in metadata_json:
meta['creation_date'] = datetime.datetime.strptime(metadata_json['creation_date'], '%Y-%m-%d').date()
last_modified_by = None
if 'last_modified_by__username' in metadata_json:
username = metadata_json['last_modified_by__username']
last_modified_by = get_user_model().objects.get(username=username)
if last_modified_by is None and 'last_modified_by__id' in metadata_json:
user_id = metadata_json['last_modified_by__id']
last_modified_by = get_user_model().objects.get(id=user_id)
if last_modified_by is not None:
meta['last_modified_by'] = last_modified_by
if 'last_modified_datetime' in metadata_json:
meta['last_modified_datetime'] = datetime.datetime.strptime(metadata_json['last_modified_datetime'],
'%Y-%m-%d %H:%M:%S')
if 'complete' in metadata_json:
completed_task_names = ['finished']
elif 'completed_tasks' in metadata_json:
completed_task_names = metadata_json['completed_tasks']
else:
completed_task_names = None
if completed_task_names is not None:
meta['completed_tasks'] = list(LabellingTask.objects.filter(name__in=completed_task_names).distinct())
return meta
@staticmethod
def from_labels_json_str_and_metadata_dict(labels_json_str, metadata):
keys = ['creation_date', 'completed_tasks', 'last_modified_by', 'last_modified_datetime']
kwargs = {key: metadata[key] for key in keys}
return Labels(labels_json_str=labels_json_str, **kwargs)
@staticmethod
def from_labels_json_str_and_metadata_json(labels_json_str, metadata_json):
return Labels.from_labels_json_str_and_metadata_dict(
labels_json_str, Labels.metadata_json_to_dict(metadata_json))
@property
def is_empty(self):
return self.labels_json_str == '[]'
@property
def label_classes(self):
label_classes = [x['label_class'] for x in self.labels_json]
return set(label_classes)
@property
def label_class_histogram(self):
if self.is_empty:
return {}
else:
histogram = {}
for x in self.labels_json:
cls = x['label_class']
histogram[cls] = histogram.get(cls, 0) + 1
return histogram
def update_labels(self, labels_json, completed_tasks, time_elapsed, user, save=False, check_lock=False):
"""
Update labels, normally called by Django views that are responding to user input received from the client
:param labels_json: labels in JSON form
:param completed_tasks: sequence of LabellingTask instances
:param time_elapsed: labelling time elapsed
:param user: user account being used to edit the labels
:param save: if `True`, invoke `self.save()` afterwards
:param check_lock: if `True`, raise `LabelsLockedError` if this labels instance is locked by another user
"""
# Verify time elapsed is within the bounds of possibility
current_time = timezone.now()
dt_since_last_mod = (current_time - self.last_modified_datetime).total_seconds()
# Allow to either double the time since last modification or time since last modification plus 1 minute
# to account for potential latency in delivery of last edit
permitted_dt = max(dt_since_last_mod * 2.0, dt_since_last_mod + 60.0)
permitted_time = self.edit_time_elapsed + permitted_dt
if time_elapsed > permitted_time:
print('WARNING: rejecting time_elapsed: '
'self.edit_time_elapsed={}, time_elapsed={}, permitted_time={}'.format(
self.edit_time_elapsed, time_elapsed, permitted_time
))
elif time_elapsed >= self.edit_time_elapsed:
self.edit_time_elapsed = time_elapsed
if check_lock:
if self.is_locked_to(user):
raise LabelsLockedError
self.labels_json = labels_json
self.completed_tasks.set(completed_tasks)
if user.is_authenticated:
self.last_modified_by = user
else:
self.last_modified_by = None
self.last_modified_datetime = timezone.now()
if save:
self.save()
def is_lock_active(self):
return timezone.now() < self.lock_expiry_datetime and self.locked_by is not None
def is_locked_to(self, user=None):
lock_active = self.is_lock_active()
if user is not None and not user.is_authenticated:
user = None
if user is None:
return lock_active
else:
return lock_active and user != self.locked_by
def lock(self, to_user, expire_after, save=False):
if self.is_locked_to(to_user):
raise ValueError('Cannot lock Labels(id={}) to user {}; is already locked'.format(
self.id, to_user.username
))
self.locked_by = to_user
expiry = timezone.now() + expire_after
self.lock_expiry_datetime = expiry
if save:
self.save()
def refresh_lock(self, to_user, expire_after, save=False):
if self.is_lock_active():
if self.locked_by != to_user:
raise ValueError('Cannot refresh lock Labels(id={}) for user {}; is already locked by {}'.format(
self.id, to_user.username, self.locked_by.username
))
expiry = timezone.now() + expire_after
self.lock_expiry_datetime = expiry
if save:
self.save()
def unlock(self, from_user, save=False):
if self.is_lock_active():
if from_user != self.locked_by:
raise ValueError('Cannot unlock Labels(id={}) from user {}, it is locked by {}'.format(
self.id, from_user.username, self.locked_by.username
))
self.locked_by = None
self.lock_expiry_datetime = timezone.now()
if save:
self.save()
def __str__(self):
if self.last_modified_by is not None:
return 'Labels {} (last modified by {} at {})'.format(
self.id, self.last_modified_by.username, self.last_modified_datetime)
else:
return 'Labels {}'.format(self.id)
| import json, datetime
from django.db import models
from django.conf import settings
from django.utils import timezone
from django.contrib.auth import get_user_model
from . import managers
class LabelsLockedError (Exception):
pass
class LabellingTask (models.Model):
enabled = models.BooleanField(default=True)
name = models.CharField(max_length=256)
human_name = models.CharField(max_length=256)
order_key = models.IntegerField(default=0)
def to_json(self):
return dict(name=self.name, human_name=self.human_name)
def __str__(self):
return 'Task {} (identifier {})'.format(self.human_name, self.name)
class Labels (models.Model):
# Label data
labels_json_str = models.TextField(default='[]')
# Task completion
completed_tasks = models.ManyToManyField(LabellingTask)
# Creation date
creation_date = models.DateField()
# Time elapsed during editing, in seconds
edit_time_elapsed = models.FloatField(default=0.0, blank=True)
# Last modification user and datetime
last_modified_by = models.ForeignKey(
settings.AUTH_USER_MODEL, models.SET_NULL, related_name='modified_labels', null=True, default=None)
last_modified_datetime = models.DateTimeField(default=datetime.datetime.now)
# Locked by user and expiry datetime
locked_by = models.ForeignKey(
settings.AUTH_USER_MODEL, models.SET_NULL, related_name='locked_labels', null=True, default=None)
lock_expiry_datetime = models.DateTimeField(default=datetime.datetime.now)
# Manager
objects = managers.LabelsManager()
@property
def labels_json(self):
return json.loads(self.labels_json_str)
@labels_json.setter
def labels_json(self, label_js):
self.labels_json_str = json.dumps(label_js)
@property
def metadata(self):
"""
Access metadata (completed tasks, creation date, last modified by, last modified date time) as a dict
:return:
"""
return dict(
completed_tasks=list(self.completed_tasks.all()),
creation_date=self.creation_date,
last_modified_by=self.last_modified_by,
last_modified_datetime=self.last_modified_datetime
)
@metadata.setter
def metadata(self, meta):
if 'completed_tasks' in meta:
self.completed_tasks.set(meta['completed_tasks'])
if 'creation_date' in meta:
self.creation_date = meta['creation_date']
if 'last_modified_by' in meta:
self.last_modified_by = meta['last_modified_by']
if 'last_modified_datetime' in meta:
self.last_modified_datetime = meta['last_modified_datetime']
@property
def metadata_json(self):
"""
Access metadata (completed tasks, creation date, last modified by, last modified date time) as a
JSON dict. The 'last modified by' user is stored as user name and/or user ID. The completed tasks
are stored by name. Dates and datetimes are stored in string form.
:return:
"""
return self.metadata_dict_to_json(self.metadata)
@metadata_json.setter
def metadata_json(self, meta_js):
self.metadata = self.metadata_json_to_dict(meta_js)
@staticmethod
def metadata_dict_to_json(metadata):
"""
Convert metadata in dictionary form to JSON form.
Dates and date times are converted to string form for storage as JSON.
The last_modified_by User object is stored in JSON as a username and user ID.
The completed tasks are converted to a list of task names
:param metadata: metadata in a dictionary with the following optional keys: 'creation_date', 'last_modified_by',
'last_modified_datetime' and 'completed_tasks'
:return: metadata in JSON form
"""
meta_json = {}
if 'creation_date' in metadata:
meta_json['creation_date'] = metadata['creation_date'].strftime('%Y-%m-%d')
if 'last_modified_by' in metadata:
last_modified_by = metadata['last_modified_by']
if last_modified_by is not None:
meta_json['last_modified_by__username'] = last_modified_by.username
meta_json['last_modified_by__id'] = last_modified_by.id
if 'last_modified_datetime' in metadata:
meta_json['last_modified_datetime'] = metadata['last_modified_datetime'].strftime('%Y-%m-%d %H:%M:%S')
if 'completed_tasks' in metadata:
meta_json['completed_tasks'] = [task.name for task in metadata['completed_tasks']]
return meta_json
@staticmethod
def metadata_json_to_dict(metadata_json):
"""
Convert metadata as a JSON dictionary to dictionary form.
:param metadata_json: metadata as a JSON dictionary
:return: metadata in dict form
"""
meta = {}
if 'creation_date' in metadata_json:
meta['creation_date'] = datetime.datetime.strptime(metadata_json['creation_date'], '%Y-%m-%d').date()
last_modified_by = None
if 'last_modified_by__username' in metadata_json:
username = metadata_json['last_modified_by__username']
last_modified_by = get_user_model().objects.get(username=username)
if last_modified_by is None and 'last_modified_by__id' in metadata_json:
user_id = metadata_json['last_modified_by__id']
last_modified_by = get_user_model().objects.get(id=user_id)
if last_modified_by is not None:
meta['last_modified_by'] = last_modified_by
if 'last_modified_datetime' in metadata_json:
meta['last_modified_datetime'] = datetime.datetime.strptime(metadata_json['last_modified_datetime'],
'%Y-%m-%d %H:%M:%S')
if 'complete' in metadata_json:
completed_task_names = ['finished']
elif 'completed_tasks' in metadata_json:
completed_task_names = metadata_json['completed_tasks']
else:
completed_task_names = None
if completed_task_names is not None:
meta['completed_tasks'] = list(LabellingTask.objects.filter(name__in=completed_task_names).distinct())
return meta
@staticmethod
def from_labels_json_str_and_metadata_dict(labels_json_str, metadata):
keys = ['creation_date', 'completed_tasks', 'last_modified_by', 'last_modified_datetime']
kwargs = {key: metadata[key] for key in keys}
return Labels(labels_json_str=labels_json_str, **kwargs)
@staticmethod
def from_labels_json_str_and_metadata_json(labels_json_str, metadata_json):
return Labels.from_labels_json_str_and_metadata_dict(
labels_json_str, Labels.metadata_json_to_dict(metadata_json))
@property
def is_empty(self):
return self.labels_json_str == '[]'
@property
def label_classes(self):
label_classes = [x['label_class'] for x in self.labels_json]
return set(label_classes)
@property
def label_class_histogram(self):
if self.is_empty:
return {}
else:
histogram = {}
for x in self.labels_json:
cls = x['label_class']
histogram[cls] = histogram.get(cls, 0) + 1
return histogram
def update_labels(self, labels_json, completed_tasks, time_elapsed, user, save=False, check_lock=False):
"""
Update labels, normally called by Django views that are responding to user input received from the client
:param labels_json: labels in JSON form
:param completed_tasks: sequence of LabellingTask instances
:param time_elapsed: labelling time elapsed
:param user: user account being used to edit the labels
:param save: if `True`, invoke `self.save()` afterwards
:param check_lock: if `True`, raise `LabelsLockedError` if this labels instance is locked by another user
"""
# Verify time elapsed is within the bounds of possibility
current_time = timezone.now()
dt_since_last_mod = (current_time - self.last_modified_datetime).total_seconds()
# Allow to either double the time since last modification or time since last modification plus 1 minute
# to account for potential latency in delivery of last edit
permitted_dt = max(dt_since_last_mod * 2.0, dt_since_last_mod + 60.0)
permitted_time = self.edit_time_elapsed + permitted_dt
if time_elapsed > permitted_time:
print('WARNING: rejecting time_elapsed: '
'self.edit_time_elapsed={}, time_elapsed={}, permitted_time={}'.format(
self.edit_time_elapsed, time_elapsed, permitted_time
))
elif time_elapsed >= self.edit_time_elapsed:
self.edit_time_elapsed = time_elapsed
if check_lock:
if self.is_locked_to(user):
raise LabelsLockedError
self.labels_json = labels_json
self.completed_tasks.set(completed_tasks)
if user.is_authenticated:
self.last_modified_by = user
else:
self.last_modified_by = None
self.last_modified_datetime = timezone.now()
if save:
self.save()
def is_lock_active(self):
return timezone.now() < self.lock_expiry_datetime and self.locked_by is not None
def is_locked_to(self, user=None):
lock_active = self.is_lock_active()
if user is not None and not user.is_authenticated:
user = None
if user is None:
return lock_active
else:
return lock_active and user != self.locked_by
def lock(self, to_user, expire_after, save=False):
if self.is_locked_to(to_user):
raise ValueError('Cannot lock Labels(id={}) to user {}; is already locked'.format(
self.id, to_user.username
))
self.locked_by = to_user
expiry = timezone.now() + expire_after
self.lock_expiry_datetime = expiry
if save:
self.save()
def refresh_lock(self, to_user, expire_after, save=False):
if self.is_lock_active():
if self.locked_by != to_user:
raise ValueError('Cannot refresh lock Labels(id={}) for user {}; is already locked by {}'.format(
self.id, to_user.username, self.locked_by.username
))
expiry = timezone.now() + expire_after
self.lock_expiry_datetime = expiry
if save:
self.save()
def unlock(self, from_user, save=False):
if self.is_lock_active():
if from_user != self.locked_by:
raise ValueError('Cannot unlock Labels(id={}) from user {}, it is locked by {}'.format(
self.id, from_user.username, self.locked_by.username
))
self.locked_by = None
self.lock_expiry_datetime = timezone.now()
if save:
self.save()
def __str__(self):
if self.last_modified_by is not None:
return 'Labels {} (last modified by {} at {})'.format(
self.id, self.last_modified_by.username, self.last_modified_datetime)
else:
return 'Labels {}'.format(self.id)
| en | 0.841532 | # Label data # Task completion # Creation date # Time elapsed during editing, in seconds # Last modification user and datetime # Locked by user and expiry datetime # Manager Access metadata (completed tasks, creation date, last modified by, last modified date time) as a dict :return: Access metadata (completed tasks, creation date, last modified by, last modified date time) as a JSON dict. The 'last modified by' user is stored as user name and/or user ID. The completed tasks are stored by name. Dates and datetimes are stored in string form. :return: Convert metadata in dictionary form to JSON form. Dates and date times are converted to string form for storage as JSON. The last_modified_by User object is stored in JSON as a username and user ID. The completed tasks are converted to a list of task names :param metadata: metadata in a dictionary with the following optional keys: 'creation_date', 'last_modified_by', 'last_modified_datetime' and 'completed_tasks' :return: metadata in JSON form Convert metadata as a JSON dictionary to dictionary form. :param metadata_json: metadata as a JSON dictionary :return: metadata in dict form Update labels, normally called by Django views that are responding to user input received from the client :param labels_json: labels in JSON form :param completed_tasks: sequence of LabellingTask instances :param time_elapsed: labelling time elapsed :param user: user account being used to edit the labels :param save: if `True`, invoke `self.save()` afterwards :param check_lock: if `True`, raise `LabelsLockedError` if this labels instance is locked by another user # Verify time elapsed is within the bounds of possibility # Allow to either double the time since last modification or time since last modification plus 1 minute # to account for potential latency in delivery of last edit | 2.238005 | 2 |
examples/example_vanillaFrog.py | omelchert/optfrog | 8 | 6633337 | <filename>examples/example_vanillaFrog.py
"""Script filename: example_vanillaFrog.py
Exemplary calculation of a vanillaFrog trace for data obtained from
the numerical propagation of a short and intense few-cycle optical
pulse in presence of the refractive index profile of an endlessly single
mode photonic crystal fiber.
"""
import sys
import numpy as np
import numpy.fft as nfft
from optfrog import vanillaFrog
from figure import spectrogramFigure
def main():
tMin= -500.0
tMax= 5800.0
wMin= 0.75
wMax= 3.25
fName = './data/exampleData_pulsePropagation.npz'
def fetchData(fileLoc):
data = np.load(fileLoc)
return data['t'], data['Et']
def windowFuncGauss(s0):
return lambda t: np.exp(-t**2/2/s0/s0)/np.sqrt(2.*np.pi)/s0
t,Et = fetchData(fName)
for s0 in [10.0,140.0]:
oName="./FIGS/fig_vanillaFrog_ESM_sigma%4.3lf.png"%(s0)
res = vanillaFrog(t,Et,windowFuncGauss(s0),tLim=(tMin,tMax,10), wLim=(wMin,wMax,3))
spectrogramFigure((t,Et),res,oName=oName)
main()
# EOF: example_vanillaFrog.py
| <filename>examples/example_vanillaFrog.py
"""Script filename: example_vanillaFrog.py
Exemplary calculation of a vanillaFrog trace for data obtained from
the numerical propagation of a short and intense few-cycle optical
pulse in presence of the refractive index profile of an endlessly single
mode photonic crystal fiber.
"""
import sys
import numpy as np
import numpy.fft as nfft
from optfrog import vanillaFrog
from figure import spectrogramFigure
def main():
tMin= -500.0
tMax= 5800.0
wMin= 0.75
wMax= 3.25
fName = './data/exampleData_pulsePropagation.npz'
def fetchData(fileLoc):
data = np.load(fileLoc)
return data['t'], data['Et']
def windowFuncGauss(s0):
return lambda t: np.exp(-t**2/2/s0/s0)/np.sqrt(2.*np.pi)/s0
t,Et = fetchData(fName)
for s0 in [10.0,140.0]:
oName="./FIGS/fig_vanillaFrog_ESM_sigma%4.3lf.png"%(s0)
res = vanillaFrog(t,Et,windowFuncGauss(s0),tLim=(tMin,tMax,10), wLim=(wMin,wMax,3))
spectrogramFigure((t,Et),res,oName=oName)
main()
# EOF: example_vanillaFrog.py
| en | 0.685457 | Script filename: example_vanillaFrog.py Exemplary calculation of a vanillaFrog trace for data obtained from the numerical propagation of a short and intense few-cycle optical pulse in presence of the refractive index profile of an endlessly single mode photonic crystal fiber. # EOF: example_vanillaFrog.py | 2.490702 | 2 |
sets-master/sets-master/sets/core/__init__.py | FedericoMolinaChavez/tesis-research | 0 | 6633338 | <gh_stars>0
from .dataset import Dataset
from .step import Step
from .embedding import Embedding
| from .dataset import Dataset
from .step import Step
from .embedding import Embedding | none | 1 | 1.128216 | 1 |
|
plots/figure_7.py | Wookai/online-collaborative-prediction | 3 | 6633339 | import argparse
import plot_utils as pu
def main(args):
n_runs = 10
models = {
'BIAS': [':', 'national_bias_nruns=%d.mat' % (n_runs,)],
'LIN(v)': ['-.', 'national_lin_v_lambda=32_nruns=%d.mat' % (n_runs,)],
'MF + GP(r)': ['--', 'national_mf_gp_r_seard_L=25_nruns=%d.mat' % (n_runs,)],
'MF + GP(r) + LIN(v)': ['-', 'national_mf_gp_r_lin_v_seard_L=25_lambda=200_nruns=%d.mat' % (n_runs,)],
}
order = ['BIAS', 'LIN(v)', 'MF + GP(r)', 'MF + GP(r) + LIN(v)']
pu.plot_models_results(models, order, pu.get_accuracy, args.save, national=True,
accuracy=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--save')
args = parser.parse_args()
main(args)
| import argparse
import plot_utils as pu
def main(args):
n_runs = 10
models = {
'BIAS': [':', 'national_bias_nruns=%d.mat' % (n_runs,)],
'LIN(v)': ['-.', 'national_lin_v_lambda=32_nruns=%d.mat' % (n_runs,)],
'MF + GP(r)': ['--', 'national_mf_gp_r_seard_L=25_nruns=%d.mat' % (n_runs,)],
'MF + GP(r) + LIN(v)': ['-', 'national_mf_gp_r_lin_v_seard_L=25_lambda=200_nruns=%d.mat' % (n_runs,)],
}
order = ['BIAS', 'LIN(v)', 'MF + GP(r)', 'MF + GP(r) + LIN(v)']
pu.plot_models_results(models, order, pu.get_accuracy, args.save, national=True,
accuracy=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--save')
args = parser.parse_args()
main(args)
| none | 1 | 2.332414 | 2 |
|
dask_yarn/cli.py | thomasjpfan/dask-yarn | 0 | 6633340 | <reponame>thomasjpfan/dask-yarn<gh_stars>0
import argparse
import os
import shutil
import subprocess
import sys
import tempfile
from contextlib import contextmanager
from urllib.parse import urlparse
import skein
from skein.utils import format_table, humanize_timedelta
from tornado.ioloop import IOLoop, TimeoutError
from distributed import Scheduler, Nanny
from distributed.cli.utils import install_signal_handlers
from distributed.proctitle import (
enable_proctitle_on_children,
enable_proctitle_on_current,
)
from . import __version__
from .core import _make_submit_specification, YarnCluster, _get_skein_client
class _Formatter(argparse.HelpFormatter):
"""Format with a fixed argument width, due to bug in argparse measuring
argument widths"""
@property
def _action_max_length(self):
return 16
@_action_max_length.setter
def _action_max_length(self, value):
pass
def _format_args(self, action, default_metavar):
"""Format remainder arguments nicer"""
get_metavar = self._metavar_formatter(action, default_metavar)
if action.nargs == argparse.REMAINDER:
return "[%s...]" % get_metavar(1)
return super(_Formatter, self)._format_args(action, default_metavar)
class _VersionAction(argparse.Action):
def __init__(
self,
option_strings,
version=None,
dest=argparse.SUPPRESS,
default=argparse.SUPPRESS,
help="Show version then exit",
):
super(_VersionAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help,
)
self.version = version
def __call__(self, parser, namespace, values, option_string=None):
print(self.version % {"prog": parser.prog})
sys.exit(0)
def fail(msg):
print(msg, file=sys.stderr)
sys.exit(1)
def add_help(parser):
parser.add_argument(
"--help", "-h", action="help", help="Show this help message then exit"
)
def arg(*args, **kwargs):
return (args, kwargs)
def subcommand(subparsers, name, help, *args):
def _(func):
parser = subparsers.add_parser(
name,
help=help,
formatter_class=_Formatter,
description=help,
add_help=False,
)
parser.set_defaults(func=func)
for arg in args:
parser.add_argument(*arg[0], **arg[1])
add_help(parser)
func.parser = parser
return func
return _
def node(subs, name, help):
@subcommand(subs, name, help)
def f():
fail(f.parser.format_usage())
f.subs = f.parser.add_subparsers(metavar="command", dest="command")
f.subs.required = True
return f
entry = argparse.ArgumentParser(
prog="dask-yarn",
description="Deploy Dask on Apache YARN",
formatter_class=_Formatter,
add_help=False,
)
add_help(entry)
entry.add_argument(
"--version",
action=_VersionAction,
version="%(prog)s " + __version__,
help="Show version then exit",
)
entry.set_defaults(func=lambda: fail(entry.format_usage()))
entry_subs = entry.add_subparsers(metavar="command", dest="command")
entry_subs.required = True
def _parse_env(service, env):
out = {}
if env is None:
return out
for item in env:
elements = item.split("=")
if len(elements) != 2:
raise ValueError("Invalid parameter to --%s-env: %r" % (service, env))
key, val = elements
out[key.strip()] = val.strip()
return out
# Exposed for testing
def _parse_submit_kwargs(**kwargs):
if kwargs.get("worker_env") is not None:
kwargs["worker_env"] = _parse_env("worker", kwargs["worker_env"])
if kwargs.get("client_env") is not None:
kwargs["client_env"] = _parse_env("client", kwargs["client_env"])
if kwargs.get("tags") is not None:
kwargs["tags"] = set(map(str.strip, kwargs["tags"].split(",")))
if kwargs.get("worker_count") is not None:
kwargs["n_workers"] = kwargs.pop("worker_count")
return kwargs
@subcommand(
entry_subs,
"submit",
"Submit a Dask application to a YARN cluster",
arg("script", help="Path to a python script to run on the client"),
arg(
"args",
nargs=argparse.REMAINDER,
help="Any additional arguments to forward to `script`",
),
arg("--name", help="The application name"),
arg("--queue", help="The queue to deploy to"),
arg(
"--user",
help=(
"The user to submit the application on behalf of. Default "
"is the current user - submitting as a different user "
"requires proxy-user permissions."
),
),
arg(
"--tags",
help=(
"A comma-separated list of strings to use as " "tags for this application."
),
),
arg(
"--environment",
help=(
"Path to the Python environment to use. See the docs "
"for more information"
),
),
arg(
"--deploy-mode",
help=(
"Either 'remote' (default) or 'local'. If 'remote', the "
"scheduler and client will be deployed in a YARN "
"container. If 'local', they will be run locally."
),
),
arg("--worker-count", type=int, help="The number of workers to initially start."),
arg(
"--worker-vcores",
type=int,
help="The number of virtual cores to allocate per worker.",
),
arg(
"--worker-memory",
type=str,
help=(
"The amount of memory to allocate per worker. Accepts a "
"unit suffix (e.g. '2 GiB' or '4096 MiB'). Will be "
"rounded up to the nearest MiB."
),
),
arg(
"--worker-restarts",
type=int,
help=(
"The maximum number of worker restarts to allow before "
"failing the application. Default is unlimited."
),
),
arg(
"--worker-env",
type=str,
action="append",
help=(
"Environment variables to set on the workers. Pass a "
"key-value pair like ``--worker-env key=val``. May "
"be used more than once."
),
),
arg(
"--client-vcores",
type=int,
help="The number of virtual cores to allocate for the client.",
),
arg(
"--client-memory",
type=str,
help=(
"The amount of memory to allocate for the client. "
"Accepts a unit suffix (e.g. '2 GiB' or '4096 MiB'). "
"Will be rounded up to the nearest MiB."
),
),
arg(
"--client-env",
type=str,
action="append",
help=(
"Environment variables to set on the client. Pass a "
"key-value pair like ``--client-env key=val``. May "
"be used more than once."
),
),
arg(
"--scheduler-vcores",
type=int,
help="The number of virtual cores to allocate for the scheduler.",
),
arg(
"--scheduler-memory",
type=str,
help=(
"The amount of memory to allocate for the scheduler. "
"Accepts a unit suffix (e.g. '2 GiB' or '4096 MiB'). "
"Will be rounded up to the nearest MiB."
),
),
arg(
"--temporary-security-credentials",
action="store_true",
help=(
"Instead of using a consistent set of TLS credentials "
"for all clusters, create a fresh set just for this "
"application."
),
),
)
def submit(script, args=None, temporary_security_credentials=False, **kwargs):
kwargs = _parse_submit_kwargs(**kwargs)
args = args or []
spec = _make_submit_specification(script, args=args, **kwargs)
if temporary_security_credentials:
security = skein.Security.new_credentials()
else:
security = None
skein_client = _get_skein_client(security=security)
if "dask.scheduler" in spec.services:
# deploy_mode == 'remote'
app_id = skein_client.submit(spec)
print(app_id)
else:
# deploy_mode == 'local'
if not os.path.exists(script):
raise ValueError("%r doesn't exist locally" % script)
with maybe_tempdir(
temporary_security_credentials
) as security_dir, YarnCluster.from_specification(
spec, skein_client=skein_client
) as cluster:
env = dict(os.environ)
env.update(
{
"DASK_APPLICATION_ID": cluster.app_id,
"DASK_APPMASTER_ADDRESS": cluster.application_client.address,
}
)
if temporary_security_credentials:
security.to_directory(security_dir)
env["DASK_SECURITY_CREDENTIALS"] = security_dir
retcode = subprocess.call([sys.executable, script] + args, env=env)
if retcode == 0:
cluster.shutdown("SUCCEEDED")
else:
cluster.shutdown(
"FAILED",
"Exception in submitted dask application, "
"see logs for more details",
)
sys.exit(retcode)
@contextmanager
def maybe_tempdir(create=False):
"""Contextmanager for consistent syntax for maybe creating a tempdir"""
if create:
try:
path = tempfile.mkdtemp()
yield path
finally:
shutil.rmtree(path)
else:
yield None
app_id = arg("app_id", help="The application id", metavar="APP_ID")
@subcommand(
entry_subs, "status", "Check the status of a submitted Dask application", app_id
)
def status(app_id):
report = _get_skein_client().application_report(app_id)
header = [
"application_id",
"name",
"state",
"status",
"containers",
"vcores",
"memory",
"runtime",
]
data = [
(
report.id,
report.name,
report.state,
report.final_status,
report.usage.num_used_containers,
report.usage.used_resources.vcores,
report.usage.used_resources.memory,
humanize_timedelta(report.runtime),
)
]
print(format_table(header, data))
@subcommand(entry_subs, "kill", "Kill a Dask application", app_id)
def kill(app_id):
_get_skein_client().kill_application(app_id)
services = node(entry_subs, "services", "Manage Dask services")
@subcommand(services.subs, "scheduler", "Start a Dask scheduler process")
def scheduler(): # pragma: nocover
app_client = skein.ApplicationClient.from_current()
enable_proctitle_on_current()
enable_proctitle_on_children()
if sys.platform.startswith("linux"):
import resource # module fails importing on Windows
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
limit = max(soft, hard // 2)
resource.setrlimit(resource.RLIMIT_NOFILE, (limit, hard))
loop = IOLoop.current()
scheduler = Scheduler(loop=loop, dashboard_address=("", 0))
install_signal_handlers(loop)
def post_addresses():
# Set dask.dashboard before dask.scheduler since the YarnCluster object
# waits on dask.scheduler only
if "dashboard" in scheduler.services:
bokeh_port = scheduler.services["dashboard"].port
bokeh_host = urlparse(scheduler.address).hostname
bokeh_address = "http://%s:%d" % (bokeh_host, bokeh_port)
app_client.kv["dask.dashboard"] = bokeh_address.encode()
app_client.kv["dask.scheduler"] = scheduler.address.encode()
async def run():
await scheduler
await loop.run_in_executor(None, post_addresses)
await scheduler.finished()
try:
loop.run_sync(run)
except (KeyboardInterrupt, TimeoutError):
pass
finally:
scheduler.stop()
@subcommand(
services.subs,
"worker",
"Start a Dask worker process",
arg(
"--nthreads",
type=int,
help=("Number of threads. Defaults to number of vcores in " "container"),
),
arg(
"--memory_limit",
type=str,
help=(
"Maximum memory available to the worker. This can be an "
"integer (in bytes), a string (like '5 GiB' or '500 "
"MiB'), or 0 (no memory management). Defaults to the "
"container memory limit."
),
),
)
def worker(nthreads=None, memory_limit=None): # pragma: nocover
enable_proctitle_on_current()
enable_proctitle_on_children()
if memory_limit is None:
memory_limit = int(skein.properties.container_resources.memory * 2 ** 20)
if nthreads is None:
nthreads = skein.properties.container_resources.vcores
app_client = skein.ApplicationClient.from_current()
scheduler = app_client.kv.wait("dask.scheduler").decode()
loop = IOLoop.current()
worker = Nanny(
scheduler,
loop=loop,
memory_limit=memory_limit,
worker_port=0,
nthreads=nthreads,
name=skein.properties.container_id,
)
async def cleanup():
await worker.close(timeout=2)
install_signal_handlers(loop, cleanup=cleanup)
async def run():
await worker
await worker.finished()
try:
loop.run_sync(run)
except (KeyboardInterrupt, TimeoutError):
pass
@subcommand(
services.subs,
"client",
"Start a Dask client process",
arg("script", help="Path to a Python script to run."),
arg(
"args",
nargs=argparse.REMAINDER,
help="Any additional arguments to forward to `script`",
),
)
def client(script, args=None): # pragma: nocover
app = skein.ApplicationClient.from_current()
args = args or []
if not os.path.exists(script):
raise ValueError("%r doesn't exist" % script)
retcode = subprocess.call([sys.executable, script] + args)
if retcode == 0:
app.shutdown("SUCCEEDED")
else:
print(
"User submitted application %s failed with returncode "
"%d, shutting down." % (script, retcode)
)
app.shutdown(
"FAILED",
"Exception in submitted dask application, " "see logs for more details",
)
def main(args=None):
kwargs = vars(entry.parse_args(args=args))
kwargs.pop("command", None) # Drop unnecessary `command` arg
func = kwargs.pop("func")
func(**kwargs)
sys.exit(0)
if __name__ == "__main__": # pragma: nocover
main()
| import argparse
import os
import shutil
import subprocess
import sys
import tempfile
from contextlib import contextmanager
from urllib.parse import urlparse
import skein
from skein.utils import format_table, humanize_timedelta
from tornado.ioloop import IOLoop, TimeoutError
from distributed import Scheduler, Nanny
from distributed.cli.utils import install_signal_handlers
from distributed.proctitle import (
enable_proctitle_on_children,
enable_proctitle_on_current,
)
from . import __version__
from .core import _make_submit_specification, YarnCluster, _get_skein_client
class _Formatter(argparse.HelpFormatter):
"""Format with a fixed argument width, due to bug in argparse measuring
argument widths"""
@property
def _action_max_length(self):
return 16
@_action_max_length.setter
def _action_max_length(self, value):
pass
def _format_args(self, action, default_metavar):
"""Format remainder arguments nicer"""
get_metavar = self._metavar_formatter(action, default_metavar)
if action.nargs == argparse.REMAINDER:
return "[%s...]" % get_metavar(1)
return super(_Formatter, self)._format_args(action, default_metavar)
class _VersionAction(argparse.Action):
def __init__(
self,
option_strings,
version=None,
dest=argparse.SUPPRESS,
default=argparse.SUPPRESS,
help="Show version then exit",
):
super(_VersionAction, self).__init__(
option_strings=option_strings,
dest=dest,
default=default,
nargs=0,
help=help,
)
self.version = version
def __call__(self, parser, namespace, values, option_string=None):
print(self.version % {"prog": parser.prog})
sys.exit(0)
def fail(msg):
print(msg, file=sys.stderr)
sys.exit(1)
def add_help(parser):
parser.add_argument(
"--help", "-h", action="help", help="Show this help message then exit"
)
def arg(*args, **kwargs):
return (args, kwargs)
def subcommand(subparsers, name, help, *args):
def _(func):
parser = subparsers.add_parser(
name,
help=help,
formatter_class=_Formatter,
description=help,
add_help=False,
)
parser.set_defaults(func=func)
for arg in args:
parser.add_argument(*arg[0], **arg[1])
add_help(parser)
func.parser = parser
return func
return _
def node(subs, name, help):
@subcommand(subs, name, help)
def f():
fail(f.parser.format_usage())
f.subs = f.parser.add_subparsers(metavar="command", dest="command")
f.subs.required = True
return f
entry = argparse.ArgumentParser(
prog="dask-yarn",
description="Deploy Dask on Apache YARN",
formatter_class=_Formatter,
add_help=False,
)
add_help(entry)
entry.add_argument(
"--version",
action=_VersionAction,
version="%(prog)s " + __version__,
help="Show version then exit",
)
entry.set_defaults(func=lambda: fail(entry.format_usage()))
entry_subs = entry.add_subparsers(metavar="command", dest="command")
entry_subs.required = True
def _parse_env(service, env):
out = {}
if env is None:
return out
for item in env:
elements = item.split("=")
if len(elements) != 2:
raise ValueError("Invalid parameter to --%s-env: %r" % (service, env))
key, val = elements
out[key.strip()] = val.strip()
return out
# Exposed for testing
def _parse_submit_kwargs(**kwargs):
if kwargs.get("worker_env") is not None:
kwargs["worker_env"] = _parse_env("worker", kwargs["worker_env"])
if kwargs.get("client_env") is not None:
kwargs["client_env"] = _parse_env("client", kwargs["client_env"])
if kwargs.get("tags") is not None:
kwargs["tags"] = set(map(str.strip, kwargs["tags"].split(",")))
if kwargs.get("worker_count") is not None:
kwargs["n_workers"] = kwargs.pop("worker_count")
return kwargs
@subcommand(
entry_subs,
"submit",
"Submit a Dask application to a YARN cluster",
arg("script", help="Path to a python script to run on the client"),
arg(
"args",
nargs=argparse.REMAINDER,
help="Any additional arguments to forward to `script`",
),
arg("--name", help="The application name"),
arg("--queue", help="The queue to deploy to"),
arg(
"--user",
help=(
"The user to submit the application on behalf of. Default "
"is the current user - submitting as a different user "
"requires proxy-user permissions."
),
),
arg(
"--tags",
help=(
"A comma-separated list of strings to use as " "tags for this application."
),
),
arg(
"--environment",
help=(
"Path to the Python environment to use. See the docs "
"for more information"
),
),
arg(
"--deploy-mode",
help=(
"Either 'remote' (default) or 'local'. If 'remote', the "
"scheduler and client will be deployed in a YARN "
"container. If 'local', they will be run locally."
),
),
arg("--worker-count", type=int, help="The number of workers to initially start."),
arg(
"--worker-vcores",
type=int,
help="The number of virtual cores to allocate per worker.",
),
arg(
"--worker-memory",
type=str,
help=(
"The amount of memory to allocate per worker. Accepts a "
"unit suffix (e.g. '2 GiB' or '4096 MiB'). Will be "
"rounded up to the nearest MiB."
),
),
arg(
"--worker-restarts",
type=int,
help=(
"The maximum number of worker restarts to allow before "
"failing the application. Default is unlimited."
),
),
arg(
"--worker-env",
type=str,
action="append",
help=(
"Environment variables to set on the workers. Pass a "
"key-value pair like ``--worker-env key=val``. May "
"be used more than once."
),
),
arg(
"--client-vcores",
type=int,
help="The number of virtual cores to allocate for the client.",
),
arg(
"--client-memory",
type=str,
help=(
"The amount of memory to allocate for the client. "
"Accepts a unit suffix (e.g. '2 GiB' or '4096 MiB'). "
"Will be rounded up to the nearest MiB."
),
),
arg(
"--client-env",
type=str,
action="append",
help=(
"Environment variables to set on the client. Pass a "
"key-value pair like ``--client-env key=val``. May "
"be used more than once."
),
),
arg(
"--scheduler-vcores",
type=int,
help="The number of virtual cores to allocate for the scheduler.",
),
arg(
"--scheduler-memory",
type=str,
help=(
"The amount of memory to allocate for the scheduler. "
"Accepts a unit suffix (e.g. '2 GiB' or '4096 MiB'). "
"Will be rounded up to the nearest MiB."
),
),
arg(
"--temporary-security-credentials",
action="store_true",
help=(
"Instead of using a consistent set of TLS credentials "
"for all clusters, create a fresh set just for this "
"application."
),
),
)
def submit(script, args=None, temporary_security_credentials=False, **kwargs):
kwargs = _parse_submit_kwargs(**kwargs)
args = args or []
spec = _make_submit_specification(script, args=args, **kwargs)
if temporary_security_credentials:
security = skein.Security.new_credentials()
else:
security = None
skein_client = _get_skein_client(security=security)
if "dask.scheduler" in spec.services:
# deploy_mode == 'remote'
app_id = skein_client.submit(spec)
print(app_id)
else:
# deploy_mode == 'local'
if not os.path.exists(script):
raise ValueError("%r doesn't exist locally" % script)
with maybe_tempdir(
temporary_security_credentials
) as security_dir, YarnCluster.from_specification(
spec, skein_client=skein_client
) as cluster:
env = dict(os.environ)
env.update(
{
"DASK_APPLICATION_ID": cluster.app_id,
"DASK_APPMASTER_ADDRESS": cluster.application_client.address,
}
)
if temporary_security_credentials:
security.to_directory(security_dir)
env["DASK_SECURITY_CREDENTIALS"] = security_dir
retcode = subprocess.call([sys.executable, script] + args, env=env)
if retcode == 0:
cluster.shutdown("SUCCEEDED")
else:
cluster.shutdown(
"FAILED",
"Exception in submitted dask application, "
"see logs for more details",
)
sys.exit(retcode)
@contextmanager
def maybe_tempdir(create=False):
"""Contextmanager for consistent syntax for maybe creating a tempdir"""
if create:
try:
path = tempfile.mkdtemp()
yield path
finally:
shutil.rmtree(path)
else:
yield None
app_id = arg("app_id", help="The application id", metavar="APP_ID")
@subcommand(
entry_subs, "status", "Check the status of a submitted Dask application", app_id
)
def status(app_id):
report = _get_skein_client().application_report(app_id)
header = [
"application_id",
"name",
"state",
"status",
"containers",
"vcores",
"memory",
"runtime",
]
data = [
(
report.id,
report.name,
report.state,
report.final_status,
report.usage.num_used_containers,
report.usage.used_resources.vcores,
report.usage.used_resources.memory,
humanize_timedelta(report.runtime),
)
]
print(format_table(header, data))
@subcommand(entry_subs, "kill", "Kill a Dask application", app_id)
def kill(app_id):
_get_skein_client().kill_application(app_id)
services = node(entry_subs, "services", "Manage Dask services")
@subcommand(services.subs, "scheduler", "Start a Dask scheduler process")
def scheduler(): # pragma: nocover
app_client = skein.ApplicationClient.from_current()
enable_proctitle_on_current()
enable_proctitle_on_children()
if sys.platform.startswith("linux"):
import resource # module fails importing on Windows
soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE)
limit = max(soft, hard // 2)
resource.setrlimit(resource.RLIMIT_NOFILE, (limit, hard))
loop = IOLoop.current()
scheduler = Scheduler(loop=loop, dashboard_address=("", 0))
install_signal_handlers(loop)
def post_addresses():
# Set dask.dashboard before dask.scheduler since the YarnCluster object
# waits on dask.scheduler only
if "dashboard" in scheduler.services:
bokeh_port = scheduler.services["dashboard"].port
bokeh_host = urlparse(scheduler.address).hostname
bokeh_address = "http://%s:%d" % (bokeh_host, bokeh_port)
app_client.kv["dask.dashboard"] = bokeh_address.encode()
app_client.kv["dask.scheduler"] = scheduler.address.encode()
async def run():
await scheduler
await loop.run_in_executor(None, post_addresses)
await scheduler.finished()
try:
loop.run_sync(run)
except (KeyboardInterrupt, TimeoutError):
pass
finally:
scheduler.stop()
@subcommand(
services.subs,
"worker",
"Start a Dask worker process",
arg(
"--nthreads",
type=int,
help=("Number of threads. Defaults to number of vcores in " "container"),
),
arg(
"--memory_limit",
type=str,
help=(
"Maximum memory available to the worker. This can be an "
"integer (in bytes), a string (like '5 GiB' or '500 "
"MiB'), or 0 (no memory management). Defaults to the "
"container memory limit."
),
),
)
def worker(nthreads=None, memory_limit=None): # pragma: nocover
enable_proctitle_on_current()
enable_proctitle_on_children()
if memory_limit is None:
memory_limit = int(skein.properties.container_resources.memory * 2 ** 20)
if nthreads is None:
nthreads = skein.properties.container_resources.vcores
app_client = skein.ApplicationClient.from_current()
scheduler = app_client.kv.wait("dask.scheduler").decode()
loop = IOLoop.current()
worker = Nanny(
scheduler,
loop=loop,
memory_limit=memory_limit,
worker_port=0,
nthreads=nthreads,
name=skein.properties.container_id,
)
async def cleanup():
await worker.close(timeout=2)
install_signal_handlers(loop, cleanup=cleanup)
async def run():
await worker
await worker.finished()
try:
loop.run_sync(run)
except (KeyboardInterrupt, TimeoutError):
pass
@subcommand(
services.subs,
"client",
"Start a Dask client process",
arg("script", help="Path to a Python script to run."),
arg(
"args",
nargs=argparse.REMAINDER,
help="Any additional arguments to forward to `script`",
),
)
def client(script, args=None): # pragma: nocover
app = skein.ApplicationClient.from_current()
args = args or []
if not os.path.exists(script):
raise ValueError("%r doesn't exist" % script)
retcode = subprocess.call([sys.executable, script] + args)
if retcode == 0:
app.shutdown("SUCCEEDED")
else:
print(
"User submitted application %s failed with returncode "
"%d, shutting down." % (script, retcode)
)
app.shutdown(
"FAILED",
"Exception in submitted dask application, " "see logs for more details",
)
def main(args=None):
kwargs = vars(entry.parse_args(args=args))
kwargs.pop("command", None) # Drop unnecessary `command` arg
func = kwargs.pop("func")
func(**kwargs)
sys.exit(0)
if __name__ == "__main__": # pragma: nocover
main() | en | 0.561431 | Format with a fixed argument width, due to bug in argparse measuring argument widths Format remainder arguments nicer # Exposed for testing # deploy_mode == 'remote' # deploy_mode == 'local' Contextmanager for consistent syntax for maybe creating a tempdir # pragma: nocover # module fails importing on Windows # Set dask.dashboard before dask.scheduler since the YarnCluster object # waits on dask.scheduler only # pragma: nocover # pragma: nocover # Drop unnecessary `command` arg # pragma: nocover | 1.992444 | 2 |
lib/fasta.py | viadanna/rosalind-python | 0 | 6633341 | <gh_stars>0
from .sequences import DNA
def parse_fasta(lines, _type):
name = None
sequence = ''
for line in lines:
if line.startswith('>'):
if name:
yield DNA(sequence, name)
sequence = ''
name = line[1:]
else:
sequence += line
yield _type(sequence, name)
def read_fasta(string, _type=DNA):
return parse_fasta(string.split('\n'), _type)
| from .sequences import DNA
def parse_fasta(lines, _type):
name = None
sequence = ''
for line in lines:
if line.startswith('>'):
if name:
yield DNA(sequence, name)
sequence = ''
name = line[1:]
else:
sequence += line
yield _type(sequence, name)
def read_fasta(string, _type=DNA):
return parse_fasta(string.split('\n'), _type) | none | 1 | 3.520746 | 4 |
|
pysper/parser/captures.py | arvy/sperf | 0 | 6633342 | # Copyright 2020 DataStax, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" capture rules """
from pysper.parser.rules import capture
#pylint: disable=line-too-long
system_capture_rule = capture(
r' *(?P<level>[A-Z]*) *\[(?P<thread_name>[^\]]*?)[:_-]?(?P<thread_id>[0-9]*)\] (?P<date>.{10} .{12}) *(?P<source_file>[^:]*):(?P<source_line>[0-9]*) - (?P<message>.*)',
r' *(?P<level>[A-Z]*) \[(?P<thread_name>[^\]]*?)[:_-]?(?P<thread_id>[0-9]*)\] (?P<date>.{10} .{12}) (?P<source_file>[^ ]*) \(line (?P<source_line>[0-9]*)\) (?P<message>.*)',
#db-2552 5.x format
#ReadStage 0 0 4248543 0 0
r'^(?P<pool_name>[A-Za-z_-]+) +((?P<active>[0-9]+)|n\/a) +(?P<pending>[0-9]+)(\/(?P<pending_responses>[0-9]+))?( +(?P<completed>[0-9]+) +(?P<blocked>[0-9]+) +(?P<all_time_blocked>[0-9]+))$',
#db-2552 5.x header format
#Pool Name Active Pending Completed Blocked All Time Blocked
r'^(?P<header>.*) Name +Active +Pending +Completed +Blocked +All Time Blocked$',
#db-2552 statuslogger format
r'^(?P<pool_name>[A-Za-z0-9_/#]+) +((?P<active>[0-9]+)|n/a) +(?P<pending>[0-9]+) +\(((?P<backpressure>[0-9]+)|N/A)\) +((?P<delayed>[0-9]+)|N/A) +(?P<completed>[0-9]+) +((?P<blocked>[0-9]+)|N/A) +(?P<all_time_blocked>[0-9]+)$',
#db-2552 statuslogger header format
r'^(?P<header>.*) Name +Active +Pending \(w/Backpressure\) +(?P<delayed_header>.*) +Completed +Blocked +All Time Blocked$',
#db-2552 table header format ColumnFamily matches column_family_header
r'^(?P<column_family_header>.*) +Memtable ops,data',
#db-2552 table format
r'^(?P<keyspace>[^.]*)\.(?P<table>[^ ]*) +(?P<ops>[0-9]*),(?P<data>[0-9]*)$',
#db-2552 cache header format
r'^(?P<cache_header>.*) Type +Size +Capacity +KeysToSave\(Provider\)?$',
#db-2552 cache format
r'^(?P<cache_type>[A-Za-z]*Cache(?! Type)) *(?P<size>[0-9]*) *(?P<capacity>[0-9]*) *(?P<keys_to_save>[^ ]*) *(?P<provider>[A-Za-z_.$]*)$'
)
output_capture_rule = capture(
#INFO [main] 2019-06-21 02:59:14,304 DatabaseDescriptor.java:418 - DiskAccessMode is standard, indexAccessMode is standard, commitlogAccessMode is standard
r' *(?P<level>[A-Z]*) *\[(?P<thread_name>[^\]]*?)[:_-]?(?P<thread_id>[0-9]*)\] (?P<date>.{10} .{12}) *(?P<source_file>[^:]*):(?P<source_line>[0-9]*) - (?P<message>.*)',
r' *(?P<level>[A-Z]*) \[(?P<thread_name>[^\]]*?)[:_-]?(?P<thread_id>[0-9]*)\] (?P<date>.{10} .{12}) (?P<source_file>[^ ]*) \(line (?P<source_line>[0-9]*)\) (?P<message>.*)',
#format with no thread
r' *(?P<level>[A-Z]*) *(?P<date>.{12}) *(?P<source_file>[^:]*):(?P<source_line>[0-9]*) - (?P<message>.*)',
#short format
r'(?P<level>[A-Z]*) *\s(?P<date>.{12}) *\s(?P<message>.*)'
)
| # Copyright 2020 DataStax, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" capture rules """
from pysper.parser.rules import capture
#pylint: disable=line-too-long
system_capture_rule = capture(
r' *(?P<level>[A-Z]*) *\[(?P<thread_name>[^\]]*?)[:_-]?(?P<thread_id>[0-9]*)\] (?P<date>.{10} .{12}) *(?P<source_file>[^:]*):(?P<source_line>[0-9]*) - (?P<message>.*)',
r' *(?P<level>[A-Z]*) \[(?P<thread_name>[^\]]*?)[:_-]?(?P<thread_id>[0-9]*)\] (?P<date>.{10} .{12}) (?P<source_file>[^ ]*) \(line (?P<source_line>[0-9]*)\) (?P<message>.*)',
#db-2552 5.x format
#ReadStage 0 0 4248543 0 0
r'^(?P<pool_name>[A-Za-z_-]+) +((?P<active>[0-9]+)|n\/a) +(?P<pending>[0-9]+)(\/(?P<pending_responses>[0-9]+))?( +(?P<completed>[0-9]+) +(?P<blocked>[0-9]+) +(?P<all_time_blocked>[0-9]+))$',
#db-2552 5.x header format
#Pool Name Active Pending Completed Blocked All Time Blocked
r'^(?P<header>.*) Name +Active +Pending +Completed +Blocked +All Time Blocked$',
#db-2552 statuslogger format
r'^(?P<pool_name>[A-Za-z0-9_/#]+) +((?P<active>[0-9]+)|n/a) +(?P<pending>[0-9]+) +\(((?P<backpressure>[0-9]+)|N/A)\) +((?P<delayed>[0-9]+)|N/A) +(?P<completed>[0-9]+) +((?P<blocked>[0-9]+)|N/A) +(?P<all_time_blocked>[0-9]+)$',
#db-2552 statuslogger header format
r'^(?P<header>.*) Name +Active +Pending \(w/Backpressure\) +(?P<delayed_header>.*) +Completed +Blocked +All Time Blocked$',
#db-2552 table header format ColumnFamily matches column_family_header
r'^(?P<column_family_header>.*) +Memtable ops,data',
#db-2552 table format
r'^(?P<keyspace>[^.]*)\.(?P<table>[^ ]*) +(?P<ops>[0-9]*),(?P<data>[0-9]*)$',
#db-2552 cache header format
r'^(?P<cache_header>.*) Type +Size +Capacity +KeysToSave\(Provider\)?$',
#db-2552 cache format
r'^(?P<cache_type>[A-Za-z]*Cache(?! Type)) *(?P<size>[0-9]*) *(?P<capacity>[0-9]*) *(?P<keys_to_save>[^ ]*) *(?P<provider>[A-Za-z_.$]*)$'
)
output_capture_rule = capture(
#INFO [main] 2019-06-21 02:59:14,304 DatabaseDescriptor.java:418 - DiskAccessMode is standard, indexAccessMode is standard, commitlogAccessMode is standard
r' *(?P<level>[A-Z]*) *\[(?P<thread_name>[^\]]*?)[:_-]?(?P<thread_id>[0-9]*)\] (?P<date>.{10} .{12}) *(?P<source_file>[^:]*):(?P<source_line>[0-9]*) - (?P<message>.*)',
r' *(?P<level>[A-Z]*) \[(?P<thread_name>[^\]]*?)[:_-]?(?P<thread_id>[0-9]*)\] (?P<date>.{10} .{12}) (?P<source_file>[^ ]*) \(line (?P<source_line>[0-9]*)\) (?P<message>.*)',
#format with no thread
r' *(?P<level>[A-Z]*) *(?P<date>.{12}) *(?P<source_file>[^:]*):(?P<source_line>[0-9]*) - (?P<message>.*)',
#short format
r'(?P<level>[A-Z]*) *\s(?P<date>.{12}) *\s(?P<message>.*)'
)
| en | 0.626612 | # Copyright 2020 DataStax, Inc # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. capture rules #pylint: disable=line-too-long #db-2552 5.x format #ReadStage 0 0 4248543 0 0 #db-2552 5.x header format #Pool Name Active Pending Completed Blocked All Time Blocked #db-2552 statuslogger format #]+) +((?P<active>[0-9]+)|n/a) +(?P<pending>[0-9]+) +\(((?P<backpressure>[0-9]+)|N/A)\) +((?P<delayed>[0-9]+)|N/A) +(?P<completed>[0-9]+) +((?P<blocked>[0-9]+)|N/A) +(?P<all_time_blocked>[0-9]+)$', #db-2552 statuslogger header format #db-2552 table header format ColumnFamily matches column_family_header #db-2552 table format #db-2552 cache header format #db-2552 cache format #INFO [main] 2019-06-21 02:59:14,304 DatabaseDescriptor.java:418 - DiskAccessMode is standard, indexAccessMode is standard, commitlogAccessMode is standard #format with no thread #short format | 1.678249 | 2 |
app/api/v2/__init__.py | queenfiona/SendITc3 | 0 | 6633343 | <reponame>queenfiona/SendITc3
"""Docstring for v2's __init__.py."""
from flask import Blueprint
from flask_restful import Api
from .views.user_views import UserRegistration, UserLogin
from .views.parcel_views import (
ParcelOrderView, UserOrderView, AllOrdersView, StatusView, DestinationView,
PresentLocView, CancelOrderView)
version_2 = Blueprint('apiv2', __name__)
api = Api(version_2, prefix="/api/v2")
api.add_resource(UserRegistration, "/auth/signup")
api.add_resource(UserLogin, "/auth/login")
api.add_resource(ParcelOrderView, "/parcels")
api.add_resource(UserOrderView, "/parcels/<string:username>")
api.add_resource(AllOrdersView, "/parcels")
api.add_resource(StatusView, "/parcels/<int:parcel_id>/status")
api.add_resource(CancelOrderView, "/parcels/<int:parcel_id>/cancel")
api.add_resource(DestinationView, "/parcels/<int:parcel_id>/destination")
api.add_resource(PresentLocView,
"/parcels/<int:parcel_id>/presentLocation")
| """Docstring for v2's __init__.py."""
from flask import Blueprint
from flask_restful import Api
from .views.user_views import UserRegistration, UserLogin
from .views.parcel_views import (
ParcelOrderView, UserOrderView, AllOrdersView, StatusView, DestinationView,
PresentLocView, CancelOrderView)
version_2 = Blueprint('apiv2', __name__)
api = Api(version_2, prefix="/api/v2")
api.add_resource(UserRegistration, "/auth/signup")
api.add_resource(UserLogin, "/auth/login")
api.add_resource(ParcelOrderView, "/parcels")
api.add_resource(UserOrderView, "/parcels/<string:username>")
api.add_resource(AllOrdersView, "/parcels")
api.add_resource(StatusView, "/parcels/<int:parcel_id>/status")
api.add_resource(CancelOrderView, "/parcels/<int:parcel_id>/cancel")
api.add_resource(DestinationView, "/parcels/<int:parcel_id>/destination")
api.add_resource(PresentLocView,
"/parcels/<int:parcel_id>/presentLocation") | en | 0.480067 | Docstring for v2's __init__.py. | 2.282457 | 2 |
Models/transfer_learning_models.py | isse-augsburg/PermeabilityNets | 1 | 6633344 | <reponame>isse-augsburg/PermeabilityNets
import torch
import torch.nn as nn
class ModelWrapper(nn.Module):
"""
Wrapper for pretrained torchvision models. Changes the last layer.
"""
def __init__(self, model, out_features=1):
super(ModelWrapper, self).__init__()
self.model = model
'''self.model.conv1 = torch.nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3,
bias=False)'''
num_ftrs = self.model.fc.in_features
self.model.fc = torch.nn.Linear(num_ftrs, out_features)
def forward(self, x):
out = self.model(x)
out = torch.sigmoid(out)
return out
| import torch
import torch.nn as nn
class ModelWrapper(nn.Module):
"""
Wrapper for pretrained torchvision models. Changes the last layer.
"""
def __init__(self, model, out_features=1):
super(ModelWrapper, self).__init__()
self.model = model
'''self.model.conv1 = torch.nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3,
bias=False)'''
num_ftrs = self.model.fc.in_features
self.model.fc = torch.nn.Linear(num_ftrs, out_features)
def forward(self, x):
out = self.model(x)
out = torch.sigmoid(out)
return out | en | 0.451073 | Wrapper for pretrained torchvision models. Changes the last layer. self.model.conv1 = torch.nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3, bias=False) | 3.246609 | 3 |
bootcamp/feeds/models.py | Fadykhallaf/Signet | 0 | 6633345 | from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.html import escape
from django.utils.translation import ugettext_lazy as _
import hashlib
import os
import urllib
from django.conf import settings
from bootcamp import settings
import bleach
from bootcamp.activities.models import Activity
@python_2_unicode_compatible
class Feed(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
date = models.DateTimeField(auto_now_add=True)
post = models.TextField(max_length=255)
parent = models.ForeignKey('Feed', null=True, blank=True)
likes = models.IntegerField(default=0)
comments = models.IntegerField(default=0)
class Meta:
verbose_name = _('Feed')
verbose_name_plural = _('Feeds')
ordering = ('-date',)
def __str__(self):
return self.post
@staticmethod
def get_feeds(from_feed=None):
if from_feed is not None:
feeds = Feed.objects.filter(parent=None, id__lte=from_feed)
else:
feeds = Feed.objects.filter(parent=None)
return feeds
@staticmethod
def get_feeds_after(feed):
feeds = Feed.objects.filter(parent=None, id__gt=feed)
return feeds
def get_comments(self):
return Feed.objects.filter(parent=self).order_by('date')
def calculate_likes(self):
likes = Activity.objects.filter(activity_type=Activity.LIKE,
feed=self.pk).count()
self.likes = likes
self.save()
return self.likes
def get_likes(self):
likes = Activity.objects.filter(activity_type=Activity.LIKE,
feed=self.pk)
return likes
def get_likers(self):
likes = self.get_likes()
likers = []
for like in likes:
likers.append(like.user)
return likers
def calculate_comments(self):
self.comments = Feed.objects.filter(parent=self).count()
self.save()
return self.comments
def comment(self, user, post):
feed_comment = Feed(user=user, post=post, parent=self)
feed_comment.save()
self.comments = Feed.objects.filter(parent=self).count()
self.save()
return feed_comment
def linkfy_post(self):
return bleach.linkify(escape(self.post))
def get_picture(self):
no_picture = 'http://trybootcamp.vitorfs.com/static/img/user.png'
try:
filename = settings.MEDIA_ROOT + '/profile_pictures/' + \
self.user.username + '.jpg'
picture_url = settings.MEDIA_URL + 'profile_pictures/' + \
self.user.username + '.jpg'
if os.path.isfile(filename):
return picture_url
else:
gravatar_url = 'http://www.gravatar.com/avatar/{0}?{1}'.format(
hashlib.md5(self.user.email.lower()).hexdigest(),
urllib.urlencode({'d': no_picture, 's': '256'})
)
return gravatar_url
except Exception:
return no_picture
| from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.html import escape
from django.utils.translation import ugettext_lazy as _
import hashlib
import os
import urllib
from django.conf import settings
from bootcamp import settings
import bleach
from bootcamp.activities.models import Activity
@python_2_unicode_compatible
class Feed(models.Model):
user = models.ForeignKey(settings.AUTH_USER_MODEL)
date = models.DateTimeField(auto_now_add=True)
post = models.TextField(max_length=255)
parent = models.ForeignKey('Feed', null=True, blank=True)
likes = models.IntegerField(default=0)
comments = models.IntegerField(default=0)
class Meta:
verbose_name = _('Feed')
verbose_name_plural = _('Feeds')
ordering = ('-date',)
def __str__(self):
return self.post
@staticmethod
def get_feeds(from_feed=None):
if from_feed is not None:
feeds = Feed.objects.filter(parent=None, id__lte=from_feed)
else:
feeds = Feed.objects.filter(parent=None)
return feeds
@staticmethod
def get_feeds_after(feed):
feeds = Feed.objects.filter(parent=None, id__gt=feed)
return feeds
def get_comments(self):
return Feed.objects.filter(parent=self).order_by('date')
def calculate_likes(self):
likes = Activity.objects.filter(activity_type=Activity.LIKE,
feed=self.pk).count()
self.likes = likes
self.save()
return self.likes
def get_likes(self):
likes = Activity.objects.filter(activity_type=Activity.LIKE,
feed=self.pk)
return likes
def get_likers(self):
likes = self.get_likes()
likers = []
for like in likes:
likers.append(like.user)
return likers
def calculate_comments(self):
self.comments = Feed.objects.filter(parent=self).count()
self.save()
return self.comments
def comment(self, user, post):
feed_comment = Feed(user=user, post=post, parent=self)
feed_comment.save()
self.comments = Feed.objects.filter(parent=self).count()
self.save()
return feed_comment
def linkfy_post(self):
return bleach.linkify(escape(self.post))
def get_picture(self):
no_picture = 'http://trybootcamp.vitorfs.com/static/img/user.png'
try:
filename = settings.MEDIA_ROOT + '/profile_pictures/' + \
self.user.username + '.jpg'
picture_url = settings.MEDIA_URL + 'profile_pictures/' + \
self.user.username + '.jpg'
if os.path.isfile(filename):
return picture_url
else:
gravatar_url = 'http://www.gravatar.com/avatar/{0}?{1}'.format(
hashlib.md5(self.user.email.lower()).hexdigest(),
urllib.urlencode({'d': no_picture, 's': '256'})
)
return gravatar_url
except Exception:
return no_picture
| none | 1 | 2.081829 | 2 |
|
yoti_python_sdk/doc_scan/session/retrieve/id_document_resource_response.py | getyoti/python | 9 | 6633346 | <reponame>getyoti/python<filename>yoti_python_sdk/doc_scan/session/retrieve/id_document_resource_response.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from yoti_python_sdk.doc_scan.session.retrieve.document_fields_response import (
DocumentFieldsResponse,
)
from yoti_python_sdk.doc_scan.session.retrieve.document_id_photo_response import (
DocumentIdPhotoResponse,
)
from yoti_python_sdk.doc_scan.session.retrieve.page_response import PageResponse
from yoti_python_sdk.doc_scan.session.retrieve.resource_response import ResourceResponse
from yoti_python_sdk.doc_scan.session.retrieve.task_response import (
TextExtractionTaskResponse,
)
class IdDocumentResourceResponse(ResourceResponse):
"""
Represents an Identity Document resource for a given session
"""
def __init__(self, data=None):
"""
:param data: the data to parse
:type data: dict or None
"""
if data is None:
data = dict()
ResourceResponse.__init__(self, data)
self.__document_type = data.get("document_type", None)
self.__issuing_country = data.get("issuing_country", None)
self.__pages = [PageResponse(page) for page in data.get("pages", [])]
self.__document_fields = (
DocumentFieldsResponse(data["document_fields"])
if "document_fields" in data.keys()
else None
)
self.__document_id_photo = (
DocumentIdPhotoResponse(data["document_id_photo"])
if "document_id_photo" in data.keys()
else None
)
@property
def document_type(self):
"""
Returns the identity document type, e.g. "PASSPORT"
:return: the document type
:rtype: str or None
"""
return self.__document_type
@property
def issuing_country(self):
"""
Returns the issuing country of the identity document
:return: the issuing country
:rtype: str or None
"""
return self.__issuing_country
@property
def pages(self):
"""
Returns the individual pages of the identity document
:return: the pages
:rtype: list[PageResponse]
"""
return self.__pages
@property
def document_fields(self):
"""
Returns the associated document fields
:return: the document fields
:rtype: DocumentFieldsResponse
"""
return self.__document_fields
@property
def document_id_photo(self):
"""
Returns the associated document ID photo
:return: the document ID photo
:rtype: DocumentIdPhotoResponse
"""
return self.__document_id_photo
@property
def text_extraction_tasks(self):
"""
Returns a list of text extraction tasks associated
with the identity document
:return: list of text extraction tasks
:rtype: list[TextExtractionTaskResponse]
"""
return [
task for task in self.tasks if isinstance(task, TextExtractionTaskResponse)
]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from yoti_python_sdk.doc_scan.session.retrieve.document_fields_response import (
DocumentFieldsResponse,
)
from yoti_python_sdk.doc_scan.session.retrieve.document_id_photo_response import (
DocumentIdPhotoResponse,
)
from yoti_python_sdk.doc_scan.session.retrieve.page_response import PageResponse
from yoti_python_sdk.doc_scan.session.retrieve.resource_response import ResourceResponse
from yoti_python_sdk.doc_scan.session.retrieve.task_response import (
TextExtractionTaskResponse,
)
class IdDocumentResourceResponse(ResourceResponse):
"""
Represents an Identity Document resource for a given session
"""
def __init__(self, data=None):
"""
:param data: the data to parse
:type data: dict or None
"""
if data is None:
data = dict()
ResourceResponse.__init__(self, data)
self.__document_type = data.get("document_type", None)
self.__issuing_country = data.get("issuing_country", None)
self.__pages = [PageResponse(page) for page in data.get("pages", [])]
self.__document_fields = (
DocumentFieldsResponse(data["document_fields"])
if "document_fields" in data.keys()
else None
)
self.__document_id_photo = (
DocumentIdPhotoResponse(data["document_id_photo"])
if "document_id_photo" in data.keys()
else None
)
@property
def document_type(self):
"""
Returns the identity document type, e.g. "PASSPORT"
:return: the document type
:rtype: str or None
"""
return self.__document_type
@property
def issuing_country(self):
"""
Returns the issuing country of the identity document
:return: the issuing country
:rtype: str or None
"""
return self.__issuing_country
@property
def pages(self):
"""
Returns the individual pages of the identity document
:return: the pages
:rtype: list[PageResponse]
"""
return self.__pages
@property
def document_fields(self):
"""
Returns the associated document fields
:return: the document fields
:rtype: DocumentFieldsResponse
"""
return self.__document_fields
@property
def document_id_photo(self):
"""
Returns the associated document ID photo
:return: the document ID photo
:rtype: DocumentIdPhotoResponse
"""
return self.__document_id_photo
@property
def text_extraction_tasks(self):
"""
Returns a list of text extraction tasks associated
with the identity document
:return: list of text extraction tasks
:rtype: list[TextExtractionTaskResponse]
"""
return [
task for task in self.tasks if isinstance(task, TextExtractionTaskResponse)
] | en | 0.583316 | # -*- coding: utf-8 -*- Represents an Identity Document resource for a given session :param data: the data to parse :type data: dict or None Returns the identity document type, e.g. "PASSPORT" :return: the document type :rtype: str or None Returns the issuing country of the identity document :return: the issuing country :rtype: str or None Returns the individual pages of the identity document :return: the pages :rtype: list[PageResponse] Returns the associated document fields :return: the document fields :rtype: DocumentFieldsResponse Returns the associated document ID photo :return: the document ID photo :rtype: DocumentIdPhotoResponse Returns a list of text extraction tasks associated with the identity document :return: list of text extraction tasks :rtype: list[TextExtractionTaskResponse] | 2.22022 | 2 |
scripts/twice.py | naganoyusuke/file2 | 0 | 6633347 | <filename>scripts/twice.py
#!/usr/bin/env python3
import rospy
from std_msgs.msg import Int32
rospy.init_node('twice')
pub = rospy.Publisher('twice_up',Int32,queue_size=1)
rate = rospy.Rate (20)
n = 0
def cb(message):
pub.publish(message.data*2)
if __name__ =='__main__':
rospy.init_node('twice')
sub = rospy.Subscriber('count_up' , Int32, cb)
rospy.spin()
| <filename>scripts/twice.py
#!/usr/bin/env python3
import rospy
from std_msgs.msg import Int32
rospy.init_node('twice')
pub = rospy.Publisher('twice_up',Int32,queue_size=1)
rate = rospy.Rate (20)
n = 0
def cb(message):
pub.publish(message.data*2)
if __name__ =='__main__':
rospy.init_node('twice')
sub = rospy.Subscriber('count_up' , Int32, cb)
rospy.spin()
| fr | 0.221828 | #!/usr/bin/env python3 | 2.336646 | 2 |
src/magnet/utils/config.py | PrincetonUniversity/PMagnet | 20 | 6633348 | import os
import configparser
class ConfigSection(object):
"""
A thin wrapper over a ConfigParser's SectionProxy object,
that tries to infer the types of values, and makes them available as attributes
Currently int/float/str are supported.
"""
def __init__(self, config, section_proxy):
self.config = config
self.name = section_proxy.name
self.d = {} # key value dict where the value is typecast to int/float/str
for k, v in section_proxy.items():
self.d[k] = self.parse(v)
def __setattr__(self, key, value):
if key in ('config', 'name', 'd'):
return super(ConfigSection, self).__setattr__(key, value)
else:
self.d[key] = value
def __getattr__(self, item):
if item not in ('config', 'name', 'd'):
# If an environment variable exists with name <CONFIG_NAME>_<SECTION>_<ITEM>, use it
env_varname = '_'.join([str(x).upper() for x in [self.config.name, self.name, item]])
env_var = os.getenv(env_varname)
return env_var or self.d[item]
def parse(self, s):
s = s.strip()
if s in ('True', 'False'):
return eval(s)
try:
v = int(s)
except ValueError:
try:
v = float(s)
except ValueError:
# We interpret a missing value as None, and a "" as the empty string
if s.startswith('"') and s.endswith('"'):
v = s[1:-1]
elif s == '':
v = None
elif s.startswith('[') and s.endswith(']'):
return [self.parse(t) for t in s[1:-1].split(',')]
return v
else:
return v
else:
return v
def items(self):
return self.d.items()
class Config(object):
def __init__(self, name, filenames):
self.name = name
self.config = configparser.ConfigParser(inline_comment_prefixes='#')
self.init_from_files(filenames)
def init_from_files(self, filenames):
self.config.read(filenames)
self._read_sections()
def read(self, filename):
self.config.read(filename)
self._read_sections()
def _read_sections(self):
for section in self.config.sections():
setattr(self, section, ConfigSection(self, self.config[section]))
def sections(self):
return self.config.sections()
| import os
import configparser
class ConfigSection(object):
"""
A thin wrapper over a ConfigParser's SectionProxy object,
that tries to infer the types of values, and makes them available as attributes
Currently int/float/str are supported.
"""
def __init__(self, config, section_proxy):
self.config = config
self.name = section_proxy.name
self.d = {} # key value dict where the value is typecast to int/float/str
for k, v in section_proxy.items():
self.d[k] = self.parse(v)
def __setattr__(self, key, value):
if key in ('config', 'name', 'd'):
return super(ConfigSection, self).__setattr__(key, value)
else:
self.d[key] = value
def __getattr__(self, item):
if item not in ('config', 'name', 'd'):
# If an environment variable exists with name <CONFIG_NAME>_<SECTION>_<ITEM>, use it
env_varname = '_'.join([str(x).upper() for x in [self.config.name, self.name, item]])
env_var = os.getenv(env_varname)
return env_var or self.d[item]
def parse(self, s):
s = s.strip()
if s in ('True', 'False'):
return eval(s)
try:
v = int(s)
except ValueError:
try:
v = float(s)
except ValueError:
# We interpret a missing value as None, and a "" as the empty string
if s.startswith('"') and s.endswith('"'):
v = s[1:-1]
elif s == '':
v = None
elif s.startswith('[') and s.endswith(']'):
return [self.parse(t) for t in s[1:-1].split(',')]
return v
else:
return v
else:
return v
def items(self):
return self.d.items()
class Config(object):
def __init__(self, name, filenames):
self.name = name
self.config = configparser.ConfigParser(inline_comment_prefixes='#')
self.init_from_files(filenames)
def init_from_files(self, filenames):
self.config.read(filenames)
self._read_sections()
def read(self, filename):
self.config.read(filename)
self._read_sections()
def _read_sections(self):
for section in self.config.sections():
setattr(self, section, ConfigSection(self, self.config[section]))
def sections(self):
return self.config.sections()
| en | 0.846234 | A thin wrapper over a ConfigParser's SectionProxy object, that tries to infer the types of values, and makes them available as attributes Currently int/float/str are supported. # key value dict where the value is typecast to int/float/str # If an environment variable exists with name <CONFIG_NAME>_<SECTION>_<ITEM>, use it # We interpret a missing value as None, and a "" as the empty string | 2.984348 | 3 |
tests/test_updater.py | tgamauf/git-submodule-autoupdate | 1 | 6633349 | from http import HTTPStatus
import json
import os
import requests
import requests_mock
import unittest
from unittest.mock import MagicMock, patch
import gitsup.update as update
class TestUpdate(unittest.TestCase):
@staticmethod
def _clear_environment():
for k in filter(lambda x: x.startswith("GITSUP"), os.environ):
del os.environ[k]
@classmethod
def _set_environment(cls, config):
cls._clear_environment()
if "token" in config:
os.environ["GITSUP_TOKEN"] = config["token"]
if "owner" in config:
os.environ["GITSUP_OWNER"] = config["owner"]
if "repository" in config:
os.environ["GITSUP_REPOSITORY"] = config["repository"]
if "branch" in config:
os.environ["GITSUP_BRANCH"] = config["branch"]
if "submodules" in config:
submodules_config = config["submodules"]
os.environ["GITSUP_SUBMODULES"] = ", ".join(submodules_config)
for n, c in submodules_config.items():
if "owner" in c:
os.environ[f"GITSUP_SUBMODULE_{n}_OWNER"] = c["owner"]
if "branch" in c:
os.environ[f"GITSUP_SUBMODULE_{n}_BRANCH"] = c["branch"]
if "path" in c:
os.environ[f"GITSUP_SUBMODULE_{n}_PATH"] = c["path"]
@classmethod
def setUpClass(cls):
cls.token = "token"
cls.owner = "owner"
cls.repository = "repository"
cls.branch = "branch"
cls.submodule_1 = "sub-1-repo"
cls.submodule_1_owner = "sub-1-owner"
cls.submodule_1_branch = "sub-1-branch"
cls.submodule_1_path = "sub-1-path"
cls.submodule_2 = "sub-2-repo"
cls.submodule_2_owner = cls.owner
cls.submodule_2_branch = "master"
cls.submodule_2_path = cls.submodule_2
cls.submodule_3 = "sub-3-repo"
cls.submodule_3_owner = cls.owner
cls.submodule_3_branch = "master"
cls.submodule_3_path = cls.submodule_3
cls.config = {
"token": cls.token,
"owner": cls.owner,
"repository": cls.repository,
"branch": cls.branch,
"submodules": {
cls.submodule_1: {
"owner": cls.submodule_1_owner,
"branch": cls.submodule_1_branch,
"path": cls.submodule_1_path,
},
# These are empty as we use default values here
cls.submodule_2: {},
cls.submodule_3: {},
},
}
cls.parent_url_template = update.URL_TEMPLATE.format(
owner=cls.owner, repository=cls.repository, url="{url}"
)
cls.submodule_1_url_template = update.URL_TEMPLATE.format(
owner=cls.submodule_1_owner, repository=cls.submodule_1, url="{url}"
)
cls.submodule_2_url_template = update.URL_TEMPLATE.format(
owner=cls.submodule_2_owner, repository=cls.submodule_2, url="{url}"
)
def _assert_header_valid(self, headers):
# Github API v3 accept header
self.assertIn("Accept", headers.keys())
self.assertEqual(headers["Accept"], "application/vnd.github.v3+json")
# Auth header
self.assertIn("Authorization", headers.keys())
self.assertEqual(headers["Authorization"], f"token {self.token}")
def test_update_changed_parameter(self):
# Test if the config file is handed over to config.
# We interrupt the test when the get_config mock is called, as
# we aren't interested in running the rest
with patch("gitsup.update.get_config") as mock_config:
mock_config.side_effect = RuntimeError("interrupd")
with self.assertRaises(RuntimeError):
update.update_git_submodules(config_file_path="config-file-path")
mock_config.assert_called_once_with(
config_file_path="config-file-path", token=None
)
# Test if the token is handed over to config.
# We interrupt the test when the get_config mock is called, as
# we aren't interested in running the rest
with patch("gitsup.update.get_config") as mock_config:
mock_config.side_effect = RuntimeError("interrupd")
with self.assertRaises(RuntimeError):
update.update_git_submodules(token="token")
mock_config.assert_called_once_with(config_file_path=None, token="token")
@requests_mock.mock()
def test_update_changed_success(self, mock_requests):
# Test:
# - submodules 1 & 2 are configured and exist
# - submodule 1 has been changed, submodule 2 is the same
# - submodule 3 is configured, but doesn't exist
# - submodule 4 isn't configured, but exists
# -> the test must pass
current_parent_oid = "current-parent_oid"
current_submodule_1_oid = "current-sub-1-oid"
current_submodule_2_oid = "current-sub-2-oid"
new_submodule_1_oid = "new-sub-1-oid"
tree_oid = "tree-oid"
tree_commit = "tree-commit-oid"
# Prepare mocks
# _get_oid for parent
mock_requests.get(
self.parent_url_template.format(url=f"branches/{self.branch}"),
json={"commit": {"sha": current_parent_oid}},
)
# _get_oid for submodule 1
mock_requests.get(
self.submodule_1_url_template.format(
url=f"branches/{self.submodule_1_branch}"
),
json={"commit": {"sha": new_submodule_1_oid}},
)
# _get_oid for submodule 2
mock_requests.get(
self.submodule_2_url_template.format(
url=f"branches/{self.submodule_2_branch}"
),
json={"commit": {"sha": current_submodule_2_oid}},
)
# _get_current_submodule_oids
mock_requests.get(
self.parent_url_template.format(url=f"git/trees/{current_parent_oid}"),
json={
"tree": [
{"path": "README.md", "type": "blob", "sha": "readme-oid"},
{
"path": self.submodule_1_path,
"type": "commit",
"sha": current_submodule_1_oid,
},
{
"path": self.submodule_2_path,
"type": "commit",
"sha": current_submodule_2_oid,
},
{"path": "sub-4-path", "type": "commit", "sha": "sub-4-oid"},
]
},
)
# _create_updated_tree
mock_requests.post(
self.parent_url_template.format(url=f"git/trees"), json={"sha": tree_oid}
)
# _commit_tree
mock_requests.post(
self.parent_url_template.format(url=f"git/commits"),
json={"sha": tree_commit},
)
# _commit_oid
mock_requests.patch(
self.parent_url_template.format(url=f"git/refs/heads/{self.branch}"),
json={"text": "something"},
)
self._set_environment(self.config)
update.update_git_submodules()
self.assertEqual(mock_requests.call_count, 7)
self._assert_header_valid(mock_requests.last_request._request.headers)
history = mock_requests.request_history
# _create_updated_tree
self.assertDictEqual(
history[4].json(),
{
"base_tree": current_parent_oid,
"tree": [
{
"path": self.submodule_1_path,
"mode": "160000",
"type": "commit",
"sha": new_submodule_1_oid,
}
],
},
)
# _commit_tree
self.assertDictEqual(
history[5].json(),
{
"message": (
"Update submodules in 'branch' to latest commits\n\n"
"* Update submodule 'sub-1-repo' to HEAD of branch "
"'sub-1-branch':\n\tnew-sub-1-oid"
),
"tree": "tree-oid",
"parents": ["current-parent_oid"],
},
)
# _commit_oid
self.assertDictEqual(history[6].json(), {"sha": "tree-commit-oid"})
@requests_mock.mock()
def test_unchanged_success(self, mock_requests):
# Test:
# - submodules 1 & 2 are configured and exist
# - none of the submodules changed
# -> the test must pass
current_parent_oid = "current-parent_oid"
current_submodule_1_oid = "current-sub-1-oid"
current_submodule_2_oid = "current-sub-2-oid"
# Prepare mocks
# _get_oid for parent
mock_requests.get(
self.parent_url_template.format(url=f"branches/{self.branch}"),
json={"commit": {"sha": current_parent_oid}},
)
# _get_oid for submodule 1
mock_requests.get(
self.submodule_1_url_template.format(
url=f"branches/{self.submodule_1_branch}"
),
json={"commit": {"sha": current_submodule_1_oid}},
)
# _get_oid for submodule 2
mock_requests.get(
self.submodule_2_url_template.format(
url=f"branches/{self.submodule_2_branch}"
),
json={"commit": {"sha": current_submodule_2_oid}},
)
# _get_current_submodule_oids
mock_requests.get(
self.parent_url_template.format(url=f"git/trees/{current_parent_oid}"),
json={
"tree": [
{"path": "README.md", "type": "blob", "sha": "readme-oid"},
{
"path": self.submodule_1_path,
"type": "commit",
"sha": current_submodule_1_oid,
},
{
"path": self.submodule_2_path,
"type": "commit",
"sha": current_submodule_2_oid,
},
]
},
)
config_dict = self.config.copy()
del config_dict["submodules"][self.submodule_3]
self._set_environment(self.config)
update.update_git_submodules()
self.assertEqual(mock_requests.call_count, 4)
def test_request(self):
# Test connection timeout
mock_request_fn = MagicMock()
mock_request_fn.side_effect = requests.ConnectionError("something happened")
with self.assertRaises(ConnectionError) as cm:
update._request(f"test", mock_request_fn, test="test")
self.assertEqual(
cm.exception.args[0], f"test: failed to connect to API - something happened"
)
mock_request_fn.assert_called_once_with(test="test")
# Test invalid response
mock_json = MagicMock()
mock_json.side_effect = json.JSONDecodeError("JSON failed", "doc", 0)
mock_response = MagicMock(autospec=requests.Response)
mock_response.json = mock_json
mock_response.text = "some text"
mock_response.status_code = HTTPStatus.NOT_FOUND
mock_request_fn = MagicMock()
mock_request_fn.return_value = mock_response
with self.assertRaises(RuntimeError) as cm:
update._request(f"test", mock_request_fn, test="test")
self.assertEqual(
cm.exception.args[0],
f"test: could not decode API response - some text "
f"({HTTPStatus.NOT_FOUND})",
)
mock_request_fn.assert_called_once_with(test="test")
# Invalid token
with requests_mock.mock() as mock_request:
kwargs = {"url": "mock://some.url", "json": {"test": "value"}}
mock_request.get(
kwargs["url"],
status_code=HTTPStatus.UNAUTHORIZED,
json={"message": "Bad credentials"},
)
with self.assertRaises(PermissionError) as cm:
update._request(error=f"test", fn=requests.get, **kwargs)
self.assertEqual(
cm.exception.args[0],
"test: invalid Github personal access token provided",
)
# Test invalid key permissions, repository doesn't exist
with requests_mock.mock() as mock_request:
kwargs = {"url": "mock://some.url", "json": {"test": "value"}}
mock_request.get(
kwargs["url"],
status_code=HTTPStatus.NOT_FOUND,
json={"message": "Not found"},
)
with self.assertRaises(RuntimeError) as cm:
update._request(error=f"test", fn=requests.get, **kwargs)
self.assertEqual(
cm.exception.args[0],
"test: couldn't access repository. Please check if the "
"owner and repository exist and the Github personal "
"access token has permissions 'repo' assigned",
)
# Test branch doesn't exist
with requests_mock.mock() as mock_request:
kwargs = {"url": "mock://some.url", "json": {"test": "value"}}
mock_request.get(
kwargs["url"],
status_code=HTTPStatus.NOT_FOUND,
json={"message": "Branch not found"},
)
with self.assertRaises(RuntimeError) as cm:
update._request(error=f"test", fn=requests.get, **kwargs)
self.assertEqual(cm.exception.args[0], "test: invalid branch")
# Test unknown 404
with requests_mock.mock() as mock_request:
kwargs = {"url": "mock://some.url", "json": {"test": "value"}}
mock_request.get(
kwargs["url"],
status_code=HTTPStatus.NOT_FOUND,
json={"message": "unknown"},
)
with self.assertRaises(RuntimeError) as cm:
update._request(error=f"test", fn=requests.get, **kwargs)
self.assertEqual(
cm.exception.args[0], f"test: unknown ({HTTPStatus.NOT_FOUND})"
)
# Test other http error code
with requests_mock.mock() as mock_request:
kwargs = {"url": "mock://some.url", "json": {"test": "value"}}
mock_request.get(
kwargs["url"],
status_code=HTTPStatus.INTERNAL_SERVER_ERROR,
text="error",
)
with self.assertRaises(RuntimeError) as cm:
update._request(error=f"test", fn=requests.get, **kwargs)
self.assertEqual(
cm.exception.args[0],
f"test: error ({HTTPStatus.INTERNAL_SERVER_ERROR})",
)
if __name__ == "__main__":
unittest.main()
| from http import HTTPStatus
import json
import os
import requests
import requests_mock
import unittest
from unittest.mock import MagicMock, patch
import gitsup.update as update
class TestUpdate(unittest.TestCase):
@staticmethod
def _clear_environment():
for k in filter(lambda x: x.startswith("GITSUP"), os.environ):
del os.environ[k]
@classmethod
def _set_environment(cls, config):
cls._clear_environment()
if "token" in config:
os.environ["GITSUP_TOKEN"] = config["token"]
if "owner" in config:
os.environ["GITSUP_OWNER"] = config["owner"]
if "repository" in config:
os.environ["GITSUP_REPOSITORY"] = config["repository"]
if "branch" in config:
os.environ["GITSUP_BRANCH"] = config["branch"]
if "submodules" in config:
submodules_config = config["submodules"]
os.environ["GITSUP_SUBMODULES"] = ", ".join(submodules_config)
for n, c in submodules_config.items():
if "owner" in c:
os.environ[f"GITSUP_SUBMODULE_{n}_OWNER"] = c["owner"]
if "branch" in c:
os.environ[f"GITSUP_SUBMODULE_{n}_BRANCH"] = c["branch"]
if "path" in c:
os.environ[f"GITSUP_SUBMODULE_{n}_PATH"] = c["path"]
@classmethod
def setUpClass(cls):
cls.token = "token"
cls.owner = "owner"
cls.repository = "repository"
cls.branch = "branch"
cls.submodule_1 = "sub-1-repo"
cls.submodule_1_owner = "sub-1-owner"
cls.submodule_1_branch = "sub-1-branch"
cls.submodule_1_path = "sub-1-path"
cls.submodule_2 = "sub-2-repo"
cls.submodule_2_owner = cls.owner
cls.submodule_2_branch = "master"
cls.submodule_2_path = cls.submodule_2
cls.submodule_3 = "sub-3-repo"
cls.submodule_3_owner = cls.owner
cls.submodule_3_branch = "master"
cls.submodule_3_path = cls.submodule_3
cls.config = {
"token": cls.token,
"owner": cls.owner,
"repository": cls.repository,
"branch": cls.branch,
"submodules": {
cls.submodule_1: {
"owner": cls.submodule_1_owner,
"branch": cls.submodule_1_branch,
"path": cls.submodule_1_path,
},
# These are empty as we use default values here
cls.submodule_2: {},
cls.submodule_3: {},
},
}
cls.parent_url_template = update.URL_TEMPLATE.format(
owner=cls.owner, repository=cls.repository, url="{url}"
)
cls.submodule_1_url_template = update.URL_TEMPLATE.format(
owner=cls.submodule_1_owner, repository=cls.submodule_1, url="{url}"
)
cls.submodule_2_url_template = update.URL_TEMPLATE.format(
owner=cls.submodule_2_owner, repository=cls.submodule_2, url="{url}"
)
def _assert_header_valid(self, headers):
# Github API v3 accept header
self.assertIn("Accept", headers.keys())
self.assertEqual(headers["Accept"], "application/vnd.github.v3+json")
# Auth header
self.assertIn("Authorization", headers.keys())
self.assertEqual(headers["Authorization"], f"token {self.token}")
def test_update_changed_parameter(self):
# Test if the config file is handed over to config.
# We interrupt the test when the get_config mock is called, as
# we aren't interested in running the rest
with patch("gitsup.update.get_config") as mock_config:
mock_config.side_effect = RuntimeError("interrupd")
with self.assertRaises(RuntimeError):
update.update_git_submodules(config_file_path="config-file-path")
mock_config.assert_called_once_with(
config_file_path="config-file-path", token=None
)
# Test if the token is handed over to config.
# We interrupt the test when the get_config mock is called, as
# we aren't interested in running the rest
with patch("gitsup.update.get_config") as mock_config:
mock_config.side_effect = RuntimeError("interrupd")
with self.assertRaises(RuntimeError):
update.update_git_submodules(token="token")
mock_config.assert_called_once_with(config_file_path=None, token="token")
@requests_mock.mock()
def test_update_changed_success(self, mock_requests):
# Test:
# - submodules 1 & 2 are configured and exist
# - submodule 1 has been changed, submodule 2 is the same
# - submodule 3 is configured, but doesn't exist
# - submodule 4 isn't configured, but exists
# -> the test must pass
current_parent_oid = "current-parent_oid"
current_submodule_1_oid = "current-sub-1-oid"
current_submodule_2_oid = "current-sub-2-oid"
new_submodule_1_oid = "new-sub-1-oid"
tree_oid = "tree-oid"
tree_commit = "tree-commit-oid"
# Prepare mocks
# _get_oid for parent
mock_requests.get(
self.parent_url_template.format(url=f"branches/{self.branch}"),
json={"commit": {"sha": current_parent_oid}},
)
# _get_oid for submodule 1
mock_requests.get(
self.submodule_1_url_template.format(
url=f"branches/{self.submodule_1_branch}"
),
json={"commit": {"sha": new_submodule_1_oid}},
)
# _get_oid for submodule 2
mock_requests.get(
self.submodule_2_url_template.format(
url=f"branches/{self.submodule_2_branch}"
),
json={"commit": {"sha": current_submodule_2_oid}},
)
# _get_current_submodule_oids
mock_requests.get(
self.parent_url_template.format(url=f"git/trees/{current_parent_oid}"),
json={
"tree": [
{"path": "README.md", "type": "blob", "sha": "readme-oid"},
{
"path": self.submodule_1_path,
"type": "commit",
"sha": current_submodule_1_oid,
},
{
"path": self.submodule_2_path,
"type": "commit",
"sha": current_submodule_2_oid,
},
{"path": "sub-4-path", "type": "commit", "sha": "sub-4-oid"},
]
},
)
# _create_updated_tree
mock_requests.post(
self.parent_url_template.format(url=f"git/trees"), json={"sha": tree_oid}
)
# _commit_tree
mock_requests.post(
self.parent_url_template.format(url=f"git/commits"),
json={"sha": tree_commit},
)
# _commit_oid
mock_requests.patch(
self.parent_url_template.format(url=f"git/refs/heads/{self.branch}"),
json={"text": "something"},
)
self._set_environment(self.config)
update.update_git_submodules()
self.assertEqual(mock_requests.call_count, 7)
self._assert_header_valid(mock_requests.last_request._request.headers)
history = mock_requests.request_history
# _create_updated_tree
self.assertDictEqual(
history[4].json(),
{
"base_tree": current_parent_oid,
"tree": [
{
"path": self.submodule_1_path,
"mode": "160000",
"type": "commit",
"sha": new_submodule_1_oid,
}
],
},
)
# _commit_tree
self.assertDictEqual(
history[5].json(),
{
"message": (
"Update submodules in 'branch' to latest commits\n\n"
"* Update submodule 'sub-1-repo' to HEAD of branch "
"'sub-1-branch':\n\tnew-sub-1-oid"
),
"tree": "tree-oid",
"parents": ["current-parent_oid"],
},
)
# _commit_oid
self.assertDictEqual(history[6].json(), {"sha": "tree-commit-oid"})
@requests_mock.mock()
def test_unchanged_success(self, mock_requests):
# Test:
# - submodules 1 & 2 are configured and exist
# - none of the submodules changed
# -> the test must pass
current_parent_oid = "current-parent_oid"
current_submodule_1_oid = "current-sub-1-oid"
current_submodule_2_oid = "current-sub-2-oid"
# Prepare mocks
# _get_oid for parent
mock_requests.get(
self.parent_url_template.format(url=f"branches/{self.branch}"),
json={"commit": {"sha": current_parent_oid}},
)
# _get_oid for submodule 1
mock_requests.get(
self.submodule_1_url_template.format(
url=f"branches/{self.submodule_1_branch}"
),
json={"commit": {"sha": current_submodule_1_oid}},
)
# _get_oid for submodule 2
mock_requests.get(
self.submodule_2_url_template.format(
url=f"branches/{self.submodule_2_branch}"
),
json={"commit": {"sha": current_submodule_2_oid}},
)
# _get_current_submodule_oids
mock_requests.get(
self.parent_url_template.format(url=f"git/trees/{current_parent_oid}"),
json={
"tree": [
{"path": "README.md", "type": "blob", "sha": "readme-oid"},
{
"path": self.submodule_1_path,
"type": "commit",
"sha": current_submodule_1_oid,
},
{
"path": self.submodule_2_path,
"type": "commit",
"sha": current_submodule_2_oid,
},
]
},
)
config_dict = self.config.copy()
del config_dict["submodules"][self.submodule_3]
self._set_environment(self.config)
update.update_git_submodules()
self.assertEqual(mock_requests.call_count, 4)
def test_request(self):
# Test connection timeout
mock_request_fn = MagicMock()
mock_request_fn.side_effect = requests.ConnectionError("something happened")
with self.assertRaises(ConnectionError) as cm:
update._request(f"test", mock_request_fn, test="test")
self.assertEqual(
cm.exception.args[0], f"test: failed to connect to API - something happened"
)
mock_request_fn.assert_called_once_with(test="test")
# Test invalid response
mock_json = MagicMock()
mock_json.side_effect = json.JSONDecodeError("JSON failed", "doc", 0)
mock_response = MagicMock(autospec=requests.Response)
mock_response.json = mock_json
mock_response.text = "some text"
mock_response.status_code = HTTPStatus.NOT_FOUND
mock_request_fn = MagicMock()
mock_request_fn.return_value = mock_response
with self.assertRaises(RuntimeError) as cm:
update._request(f"test", mock_request_fn, test="test")
self.assertEqual(
cm.exception.args[0],
f"test: could not decode API response - some text "
f"({HTTPStatus.NOT_FOUND})",
)
mock_request_fn.assert_called_once_with(test="test")
# Invalid token
with requests_mock.mock() as mock_request:
kwargs = {"url": "mock://some.url", "json": {"test": "value"}}
mock_request.get(
kwargs["url"],
status_code=HTTPStatus.UNAUTHORIZED,
json={"message": "Bad credentials"},
)
with self.assertRaises(PermissionError) as cm:
update._request(error=f"test", fn=requests.get, **kwargs)
self.assertEqual(
cm.exception.args[0],
"test: invalid Github personal access token provided",
)
# Test invalid key permissions, repository doesn't exist
with requests_mock.mock() as mock_request:
kwargs = {"url": "mock://some.url", "json": {"test": "value"}}
mock_request.get(
kwargs["url"],
status_code=HTTPStatus.NOT_FOUND,
json={"message": "Not found"},
)
with self.assertRaises(RuntimeError) as cm:
update._request(error=f"test", fn=requests.get, **kwargs)
self.assertEqual(
cm.exception.args[0],
"test: couldn't access repository. Please check if the "
"owner and repository exist and the Github personal "
"access token has permissions 'repo' assigned",
)
# Test branch doesn't exist
with requests_mock.mock() as mock_request:
kwargs = {"url": "mock://some.url", "json": {"test": "value"}}
mock_request.get(
kwargs["url"],
status_code=HTTPStatus.NOT_FOUND,
json={"message": "Branch not found"},
)
with self.assertRaises(RuntimeError) as cm:
update._request(error=f"test", fn=requests.get, **kwargs)
self.assertEqual(cm.exception.args[0], "test: invalid branch")
# Test unknown 404
with requests_mock.mock() as mock_request:
kwargs = {"url": "mock://some.url", "json": {"test": "value"}}
mock_request.get(
kwargs["url"],
status_code=HTTPStatus.NOT_FOUND,
json={"message": "unknown"},
)
with self.assertRaises(RuntimeError) as cm:
update._request(error=f"test", fn=requests.get, **kwargs)
self.assertEqual(
cm.exception.args[0], f"test: unknown ({HTTPStatus.NOT_FOUND})"
)
# Test other http error code
with requests_mock.mock() as mock_request:
kwargs = {"url": "mock://some.url", "json": {"test": "value"}}
mock_request.get(
kwargs["url"],
status_code=HTTPStatus.INTERNAL_SERVER_ERROR,
text="error",
)
with self.assertRaises(RuntimeError) as cm:
update._request(error=f"test", fn=requests.get, **kwargs)
self.assertEqual(
cm.exception.args[0],
f"test: error ({HTTPStatus.INTERNAL_SERVER_ERROR})",
)
if __name__ == "__main__":
unittest.main()
| en | 0.726197 | # These are empty as we use default values here # Github API v3 accept header # Auth header # Test if the config file is handed over to config. # We interrupt the test when the get_config mock is called, as # we aren't interested in running the rest # Test if the token is handed over to config. # We interrupt the test when the get_config mock is called, as # we aren't interested in running the rest # Test: # - submodules 1 & 2 are configured and exist # - submodule 1 has been changed, submodule 2 is the same # - submodule 3 is configured, but doesn't exist # - submodule 4 isn't configured, but exists # -> the test must pass # Prepare mocks # _get_oid for parent # _get_oid for submodule 1 # _get_oid for submodule 2 # _get_current_submodule_oids # _create_updated_tree # _commit_tree # _commit_oid # _create_updated_tree # _commit_tree # _commit_oid # Test: # - submodules 1 & 2 are configured and exist # - none of the submodules changed # -> the test must pass # Prepare mocks # _get_oid for parent # _get_oid for submodule 1 # _get_oid for submodule 2 # _get_current_submodule_oids # Test connection timeout # Test invalid response # Invalid token # Test invalid key permissions, repository doesn't exist # Test branch doesn't exist # Test unknown 404 # Test other http error code | 2.41855 | 2 |
src/pilot/PilotModes.py | cornzz/robolab-tud-spring18 | 0 | 6633350 | <gh_stars>0
from enum import unique, IntEnum
@unique
class PilotModes(IntEnum):
# low-level modes
FOLLOW_LINE = 0
CHECK_ISC = 1
CHOOSE_PATH = 2
FOLLOW_LINE_ODO = 5
BLOCKED = 6
# top-level modes
EXPLORE = 3
TARGET = 4
| from enum import unique, IntEnum
@unique
class PilotModes(IntEnum):
# low-level modes
FOLLOW_LINE = 0
CHECK_ISC = 1
CHOOSE_PATH = 2
FOLLOW_LINE_ODO = 5
BLOCKED = 6
# top-level modes
EXPLORE = 3
TARGET = 4 | en | 0.892877 | # low-level modes # top-level modes | 2.201066 | 2 |