text
stringlengths 957
885k
|
---|
from datetime import datetime, timedelta
from critiquebrainz.data.testing import DataTestCase
import critiquebrainz.db.oauth_token as db_oauth_token
import critiquebrainz.db.oauth_client as db_oauth_client
import critiquebrainz.db.users as db_users
import critiquebrainz.db.exceptions as db_exceptions
from critiquebrainz.db.user import User
class OAuthTokenTestCase(DataTestCase):
def setUp(self):
super(OAuthTokenTestCase, self).setUp()
self.user = User(db_users.get_or_create('tester_1', new_user_data={
"display_name": "test",
}))
db_oauth_client.create(
user_id=self.user.id,
name="Test App",
desc="Application for testing",
website="https://example.com",
redirect_uri="https://example.com/oauth",
)
self.oauth_client = db_users.clients(self.user.id)[0]
def test_create(self):
self.assertEqual(len(db_oauth_token.list_tokens()), 0)
db_oauth_token.create(
client_id=self.oauth_client["client_id"],
access_token="Test Access Token",
refresh_token="<PASSWORD> Token",
expires=datetime.now() + timedelta(seconds=200),
user_id=self.user.id,
scopes=None,
)
self.assertEqual(len(db_oauth_token.list_tokens()), 1)
def test_list(self):
db_oauth_token.create(
client_id=self.oauth_client["client_id"],
access_token="<PASSWORD> Token",
refresh_token="<PASSWORD>",
expires=datetime.now() + timedelta(seconds=200),
user_id=self.user.id,
scopes=None,
)
self.assertEqual(len(db_oauth_token.list_tokens(client_id=self.oauth_client["client_id"])), 1)
self.assertEqual(len(db_oauth_token.list_tokens(refresh_token="Test Refresh Token")), 1)
def test_delete(self):
db_oauth_token.create(
client_id=self.oauth_client["client_id"],
access_token="<PASSWORD> Token",
refresh_token="<PASSWORD>",
expires=datetime.now() + timedelta(seconds=200),
user_id=self.user.id,
scopes=None,
)
self.assertEqual(len(db_oauth_token.list_tokens(client_id=self.oauth_client["client_id"])), 1)
db_oauth_token.delete(client_id=self.oauth_client["client_id"], refresh_token="<PASSWORD> Token")
self.assertEqual(len(db_oauth_token.list_tokens(client_id=self.oauth_client["client_id"])), 0)
def test_get_scopes(self):
# Test fetching scopes of a valid token
oauth_token = db_oauth_token.create(
client_id=self.oauth_client["client_id"],
access_token="<PASSWORD> Token",
refresh_token="<PASSWORD>",
expires=datetime.now() + timedelta(seconds=200),
user_id=self.user.id,
scopes="Test Scopes",
)
self.assertIn("Test", db_oauth_token.get_scopes(oauth_token["id"]))
# Test fetching scopes of a token that does not exist
db_oauth_token.delete(client_id=self.oauth_client["client_id"], refresh_token="Test Refresh Token")
with self.assertRaises(db_exceptions.NoDataFoundException):
db_oauth_token.get_scopes(oauth_token["id"])
# Test fetching scopes of token with no scopes
oauth_token = db_oauth_token.create(
client_id=self.oauth_client["client_id"],
access_token="Test <PASSWORD> Token",
refresh_token="<PASSWORD>",
expires=datetime.now() + timedelta(seconds=200),
user_id=self.user.id,
scopes=None,
)
self.assertEqual([], db_oauth_token.get_scopes(oauth_token["id"]))
|
#!/usr/bin/env python
"""Provide a command line tool to validate and transform tabular samplesheets."""
import os
import sys
import errno
import argparse
import pandas as pd
def parse_args(args=None):
Description = "Reformat nf-core/metapep samplesheet file and check its contents."
Epilog = "Example usage: python check_samplesheet.py <FILE_IN>"
parser = argparse.ArgumentParser(description=Description, epilog=Epilog)
parser.add_argument('-i', "--input", required=True, metavar='FILE', type=argparse.FileType('r'), help="Input samplesheet file containing: condition, type, microbiome_path, alleles, weights_path.")
parser.add_argument('-m', "--microbiomes", required=True, metavar='FILE', type=argparse.FileType('w'), help="Output file containing: microbiome_id, microbiome_path, microbiome_type, weights_path.")
parser.add_argument('-c', "--conditions", required=True, metavar='FILE', type=argparse.FileType('w'), help="Output file containing: condition_id, condition_name, microbiome_id.")
parser.add_argument('-a', "--alleles", required=True, metavar='FILE', type=argparse.FileType('w'), help="Output file containing: allele_id, allele_name.")
parser.add_argument('-ca', "--conditions_alleles", required=True, metavar='FILE', type=argparse.FileType('w'), help="Output file containing: condition_id, allele_id.")
return parser.parse_args(args)
def print_error(error, context="Line", context_str=""):
error_str = "ERROR: Please check samplesheet -> {}".format(error)
if context != "" and context_str != "":
error_str = "ERROR: Please check samplesheet -> {}\n{}: '{}'".format(
error, context.strip(), context_str.strip()
)
print(error_str)
sys.exit(1)
# TODO for 'proteins' type
# - allow weight input for type 'proteins' as well! (for now use equal weight ?)
def check_samplesheet(args):
"""
Check that the tabular samplesheet has the structure expected by nf-core pipelines.
condition,type,microbiome_path,alleles,weights_path
For an example see:
https://github.com/nf-core/metapep/raw/dev/assets/samplesheet.csv
"""
input_table = pd.read_csv(args.input)
input_table_cp = input_table.copy()
# check if microbiome_path file extensions are valid
for type, fname in zip(input_table["type"], input_table["microbiome_path"]):
if type == "proteins":
print_error("Invalid type '" + type + "' specified in " + args.input.name + ". Type 'proteins' is not yet supported! Valid types are 'taxa', 'bins' and 'assembly'.")
if type not in ["taxa", "assembly", "bins"]:
print_error("Invalid type '" + type + "' specified in " + args.input.name + ". Valid types are 'taxa', 'bins' and 'assembly'.")
if type == "taxa" and not fname.lower().endswith(('.txt', '.tsv')):
print_error("In " + args.input.name + " specified file " + fname + " of type 'taxa' has invalid file extension. Valid extensions are '.txt' and '.tsv'.")
if type == "proteins" and not fname.lower().endswith(('.fa', '.fa.gz', '.fasta', '.fasta.gz')):
print_error("In " + args.input.name + " specified file " + fname + " of type 'proteins' has invalid file extension. Valid extensions are '.fa', '.fa.gz', '.fasta' and '.fasta.gz'.")
if type == "assembly" and not fname.lower().endswith(('.fa', '.fa.gz', '.fasta', '.fasta.gz')):
print_error("In " + args.input.name + " specified file " + fname + " of type 'assembly' has invalid file extension. Valid extensions are '.fa', '.fa.gz', '.fasta' and '.fasta.gz'.")
# check if microbiome_path files exist
# for fname in input_table["microbiome_path"]:
# if not os.path.isfile(fname):
# sys.exit("In " + args.input.name + " specified file " + fname + " does not exist!")
# NOTE not possible for urls, will be checked afterwards during channel creation for microbiome files
# check if condition names unique
if len(input_table["condition"]) != len(input_table["condition"].drop_duplicates()):
sys.exit("Input file " + args.input.name + " contains duplicated conditions! Please specify unique conditions.")
# check if weight_path is valid
for type, weights_path in zip(input_table["type"], input_table["weights_path"]):
if not type == 'assembly' and not pd.isnull(weights_path):
sys.exit("Input file " + args.input.name + " contains 'weights_path' specified for type '" + type + "'! Currently input weights are only supported for type 'assembly'.")
if not pd.isnull(weights_path) and not weights_path.lower().endswith('.tsv'):
sys.exit("In " + args.input.name + " specified 'weights_path' " + weights_path + " has invalid file extension. The extension must be '.tsv'.")
# microbiome_id - microbiome_path - microbiome_type
microbiomes = input_table[["microbiome_path", "type", "weights_path"]].drop_duplicates().rename({"type":"microbiome_type"}, axis=1)
microbiomes["microbiome_id"] = range(len(microbiomes))
if len(microbiomes) != len(microbiomes["microbiome_path"].drop_duplicates()):
sys.exit("Conflicting types or weights were specified for the same microbiome path!")
microbiomes[["microbiome_id", "microbiome_path", "microbiome_type", "weights_path"]].to_csv(args.microbiomes, sep="\t", index=False)
# condition id - condition name - microbiome id
conditions = input_table.merge(microbiomes)[["condition", "microbiome_id"]].rename({"condition":"condition_name"}, axis=1) # conditions unique (checked in nextflow)
conditions["condition_id"] = range(len(conditions))
conditions[["condition_id", "condition_name", "microbiome_id"]].to_csv(args.conditions, sep="\t", index=False)
# allele id - allele name
unique_alleles = { allele for allele_list in input_table["alleles"] for allele in allele_list.split(' ') }
alleles = pd.DataFrame({"allele_name":list(unique_alleles)})
alleles["allele_id"] = range(len(alleles))
alleles[["allele_id", "allele_name"]].to_csv(args.alleles, sep="\t", index=False)
# condition id - allele id
conditions_alleles = pd.DataFrame([ (row["condition"], allele_name) for _, row in input_table.iterrows() for allele_name in row["alleles"].split(' ') ], columns = ["condition_name", "allele_name"])
conditions_alleles = conditions_alleles.merge(conditions).merge(alleles)[["condition_id", "allele_id"]]
conditions_alleles.to_csv(args.conditions_alleles, sep="\t", index=False)
input_table_cp.to_csv("samplesheet.valid.csv", index=False)
print("Done!")
def main(args=None):
args = parse_args(args)
check_samplesheet(args)
if __name__ == "__main__":
sys.exit(main())
|
<reponame>wis-software/rocketchat-tests-based-on-splinter
from argparse import ArgumentParser
from sys import stderr
from time import sleep
from rocketchat_API.rocketchat import RocketChat
from base import SplinterTestCase
LOCALHOST = 'http://127.0.0.1:8006'
class SplinterWizardInit(SplinterTestCase):
def __init__(self, addr, username, password, wait=10, **kwargs):
SplinterTestCase.__init__(self, addr, **kwargs)
self.addr = addr
self.username = username
self.password = password
self.wait = wait
self.bot_name = 'meeseeks'
self.bot_password = '<PASSWORD>'
def _wait_until_loading_is_completed(self, header, selector):
for _ in range(self.wait):
title = self.find_by_css(selector)
if title.text.lower() == header:
return True
sleep(1)
return False
def test_administrator_info(self):
# Admin info
header = self.find_by_css('.setup-wizard-forms__header-title')
assert header.text.lower() in 'admin info'
self.browser.fill('registration-name', self.username)
self.browser.fill('registration-username', self.username)
self.browser.fill(
'registration-email', <EMAIL>(self.<EMAIL>)
)
self.browser.fill('registration-pass', self.password)
submit_btn = self.find_by_css(
'.rc-button.rc-button--primary.setup-wizard-forms__footer-next'
)
assert submit_btn
submit_btn.click()
def test_organisation_info(self):
assert self._wait_until_loading_is_completed(
'organization info',
'.setup-wizard-forms__header-title'
)
submit_btn = self.find_by_css(
'.rc-button.rc-button--primary.setup-wizard-forms__footer-next'
)
assert submit_btn
submit_btn.click()
def test_server_information(self):
assert self._wait_until_loading_is_completed(
'server info',
'.setup-wizard-forms__header-title'
)
submit_btn = self.find_by_css(
'.rc-button.rc-button--primary.setup-wizard-forms__footer-next'
)
assert submit_btn
submit_btn.click()
def test_server_registration(self):
assert self._wait_until_loading_is_completed(
'register server',
'.setup-wizard-forms__header-title'
)
tariff_plan = self.find_by_css(
'.setup-wizard-forms__content-register-radio'
)
assert tariff_plan
tariff_plan.last.click()
submit_btn = self.find_by_css(
'.rc-button.rc-button--primary.setup-wizard-forms__footer-next'
)
assert submit_btn
submit_btn.click()
def test_fin(self):
assert self._wait_until_loading_is_completed(
'your workspace is ready to use 🎉',
'.setup-wizard-info__content-title.setup-wizard-final__box-title'
)
submit_btn = self.find_by_css(
'.rc-button.rc-button--primary.js-finish'
)
assert submit_btn
submit_btn.click()
def test_creating_bot_account(self):
options_btn = self.browser.find_by_css(
'.sidebar__toolbar-button.rc-tooltip.rc-tooltip--down.js-button'
)
options_btn.last.click()
administration_btn = self.browser.find_by_css('.rc-popover__item-text')
administration_btn.click()
users_btn = self.browser.driver.find_elements_by_css_selector(
'a.sidebar-item__link[aria-label="Users"]')
self.browser.driver.execute_script("arguments[0].click();",
users_btn[0])
add_user_btn = self.find_by_css('button[aria-label="Add User"]')
assert add_user_btn
add_user_btn.click()
input_name_el = self.find_by_css('input#name')
assert input_name_el
input_name_el.first.fill(self.bot_name)
input_username_el = self.find_by_css('input#username')
assert input_username_el
input_username_el.first.fill(self.bot_name)
input_email_el = self.find_by_css('input#email')
assert input_email_el
input_email_el.first.fill('{}<EMAIL>'.format(self.<EMAIL>))
verified_btn = self.find_by_css('label.rc-switch__label')
assert verified_btn
verified_btn.first.click()
input_password_el = self.find_by_css('input#password')
assert input_password_el
input_password_el.first.fill(self.bot_password)
verified_btn = self.find_by_css('label.rc-switch__label')
assert verified_btn
verified_btn.last.click()
role_option = self.find_by_css('option[value="bot"]')
assert role_option
role_option.first.click()
add_role_btn = self.find_by_css('button#addRole')
assert add_role_btn
add_role_btn.first.click()
# Do not send welcome email
welcome_ckbx = self.find_by_css('label[for="sendWelcomeEmail"]')
assert welcome_ckbx
welcome_ckbx.first.click()
save_btn = self.find_by_css('.rc-button.rc-button--primary.save')
assert save_btn
save_btn.first.click()
def test_adding_permissions_to_bot(self):
permissions = {
'view-full-other-user-info': True
}
perms_btn = self.browser.driver.find_elements_by_css_selector(
'a.sidebar-item__link[aria-label="Permissions"]'
)
assert perms_btn
self.browser.driver.execute_script("arguments[0].click();",
perms_btn[0])
for name in permissions:
checkbox = self.browser.driver.find_element_by_css_selector(
'input.role-permission[name="perm[bot][{}]"]'.format(name)
)
assert checkbox
if permissions[name] != bool(checkbox.get_attribute('checked')):
checkbox.click()
exit_btn = self.find_by_css(
'.sidebar-flex__close-button'
)
assert exit_btn
exit_btn.click()
def test_create_necessary_rooms(self):
groups = [
'hr',
'leave-coordination'
]
rocket = RocketChat(
self.username,
self.password,
server_url=self.addr
)
for name in groups:
rocket.groups_create(name, members=['meeseeks'])
def main():
parser = ArgumentParser(description='usage: %prog [options] arguments')
parser.add_argument('-a', '--host', dest='host', type=str,
help='allows specifying domain or IP '
'of the Rocket.Chat host')
parser.add_argument('-u', '--username', dest='username', type=str,
help='allows specifying admin username')
parser.add_argument('-p', '--password', dest='password', type=str,
help='allows specifying admin password')
parser.add_argument('-w', '--wait', dest='wait', type=int,
help='allows specifying time '
'for waiting loading of page(secs)')
options = parser.parse_args()
if not options.host:
options.host = LOCALHOST
stderr.write(
'Host is not specified. Defaults to {}.\n'.format(options.host)
)
if not options.username:
parser.error('Username is not specified')
if not options.password:
parser.error('Password is not specified')
if not options.wait:
options.wait = 100
stderr.write(
'Waiting time is not specified. Defaults to {}.\n'
.format(options.wait)
)
test_cases = SplinterWizardInit(
options.host,
options.username,
options.password,
wait=options.wait
)
test_cases.run()
if __name__ == "__main__":
main()
|
import traceback
import ujson as json
from insanic import __version__
from insanic.conf import settings
from insanic.request import Request
from sanic.response import BaseHTTPResponse
from incendiary.xray.utils import abbreviate_for_xray, get_safe_dict
from aws_xray_sdk.core.models import http
from aws_xray_sdk.ext.util import calculate_segment_name, construct_xray_header
async def before_request(request: Request) -> None:
"""
The request middleware that runs when Sanic receives a
request. Starts a segment if sampling determines if
it should be traced.
"""
xray_recorder = request.app.xray_recorder
headers = request.headers
xray_header = construct_xray_header(headers)
name = calculate_segment_name(request.host, xray_recorder)
# custom decision to skip if INCENDIARY_XRAY_ENABLED is false
sampling_decision = xray_recorder.sampler.calculate_sampling_decision(
trace_header=xray_header,
recorder=xray_recorder,
service_name=request.host,
method=request.method,
path=request.path,
)
segment = xray_recorder.begin_segment(
name=name,
traceid=xray_header.root,
parent_id=xray_header.parent,
sampling=sampling_decision,
)
if segment.sampled:
segment.save_origin_trace_header(xray_header)
segment.put_annotation("insanic_version", __version__)
segment.put_annotation(
"service_version", settings.get("APPLICATION_VERSION", "?")
)
segment.put_http_meta(http.URL, request.url)
segment.put_http_meta(http.METHOD, request.method)
segment.put_http_meta(http.USER_AGENT, headers.get("User-Agent"))
client_ip = headers.get(settings.FORWARDED_FOR_HEADER) or headers.get(
"HTTP_X_FORWARDED_FOR"
)
if client_ip:
segment.put_http_meta(http.CLIENT_IP, client_ip)
segment.put_http_meta(http.X_FORWARDED_FOR, True)
else:
segment.put_http_meta(http.CLIENT_IP, request.remote_addr)
attributes = [
"args",
"content_type",
"cookies",
"data",
"host",
"ip",
"method",
"path",
"scheme",
"url",
]
for attr in attributes:
if hasattr(request, attr):
payload = getattr(request, attr)
if isinstance(payload, dict):
payload = abbreviate_for_xray(get_safe_dict(payload))
payload = json.dumps(payload)
segment.put_metadata(f"{attr}", payload, "request")
async def after_request(request: Request, response: BaseHTTPResponse) -> None:
"""
Ends the segment before response is returned.
"""
xray_recorder = request.app.xray_recorder
segment = xray_recorder.current_segment()
if segment.sampled:
# setting user was moved from _before_request,
# because calling request.user authenticates, and if
# authenticators are not set for request, will end not being
# able to authenticate correctly
user = request.user
if user.id:
segment.set_user(user.id)
segment.put_annotation("user__level", user.level)
segment.put_http_meta(http.STATUS, response.status)
cont_len = response.headers.get("Content-Length")
# truncate response if too lo
segment.put_annotation("response", response.body.decode()[:1000])
if cont_len:
segment.put_http_meta(http.CONTENT_LENGTH, int(cont_len))
if hasattr(response, "exception"):
stack = traceback.extract_stack(limit=xray_recorder.max_trace_back)
segment.add_exception(response.exception, stack)
xray_recorder.end_segment()
return response
|
<reponame>kistlin/xknx
"""Unit test for Sensor objects."""
from unittest.mock import AsyncMock
import pytest
from xknx import XKNX
from xknx.devices import Sensor
from xknx.dpt import DPTArray
from xknx.telegram import GroupAddress, Telegram
from xknx.telegram.apci import GroupValueRead, GroupValueResponse, GroupValueWrite
class TestSensor:
"""Test class for Sensor objects."""
@pytest.mark.parametrize(
"value_type,raw_payload,expected_state",
[
# DPT-14 values are according to ETS group monitor values
(
"absolute_temperature",
DPTArray((0x44, 0xD7, 0xD2, 0x8B)),
1726.579,
),
(
"acceleration",
DPTArray((0x45, 0x94, 0xD8, 0x5D)),
4763.045,
),
(
"volume_liquid_litre",
DPTArray((0x00, 0x00, 0x01, 0x00)),
256,
),
(
"volume_m3",
DPTArray((0x00, 0x00, 0x01, 0x00)),
256,
),
(
"active_energy",
DPTArray((0x26, 0x37, 0x49, 0x7F)),
641157503,
),
(
"active_energy_kwh",
DPTArray((0x37, 0x5, 0x5, 0xEA)),
923076074,
),
(
"activity",
DPTArray((0x45, 0x76, 0x0, 0xA3)),
3936.04,
),
(
"amplitude",
DPTArray((0x45, 0x9A, 0xED, 0x8)),
4957.629,
),
(
"angle",
DPTArray((0xE4,)),
322,
),
(
"angle_deg",
DPTArray((0x44, 0x5C, 0x20, 0x2B)),
880.5026,
),
(
"angle_rad",
DPTArray((0x44, 0x36, 0x75, 0x1)),
729.8282,
),
(
"angular_frequency",
DPTArray((0x43, 0xBC, 0x20, 0x8D)),
376.2543,
),
(
"angular_momentum",
DPTArray((0xC2, 0x75, 0xB7, 0xB5)),
-61.4294,
),
(
"angular_velocity",
DPTArray((0xC4, 0xD9, 0x10, 0xB3)),
-1736.522,
),
(
"apparant_energy",
DPTArray((0xD3, 0xBD, 0x1E, 0xA5)),
-742580571,
),
(
"apparant_energy_kvah",
DPTArray((0x49, 0x40, 0xC9, 0x9)),
1228982537,
),
(
"area",
DPTArray((0x45, 0x63, 0x1E, 0xCD)),
3633.925,
),
(
"brightness",
DPTArray((0xC3, 0x56)),
50006,
),
(
"capacitance",
DPTArray((0x45, 0xC9, 0x1D, 0x9D)),
6435.702,
),
(
"charge_density_surface",
DPTArray((0x45, 0xDB, 0x66, 0x99)),
7020.825,
),
(
"charge_density_volume",
DPTArray((0xC4, 0x8C, 0x33, 0xD7)),
-1121.62,
),
(
"color_temperature",
DPTArray((0x6C, 0x95)),
27797,
),
(
"common_temperature",
DPTArray((0x45, 0xD9, 0xC6, 0x3F)),
6968.781,
),
(
"compressibility",
DPTArray((0x45, 0x89, 0x94, 0xAB)),
4402.583,
),
(
"conductance",
DPTArray((0x45, 0xA6, 0x28, 0xF9)),
5317.122,
),
(
"counter_pulses",
DPTArray((0x9D,)),
-99,
),
(
"current",
DPTArray((0xCA, 0xCC)),
51916,
),
(
"delta_time_hrs",
DPTArray((0x47, 0x80)),
18304,
),
(
"delta_time_min",
DPTArray((0xB9, 0x7B)),
-18053,
),
(
"delta_time_ms",
DPTArray((0x58, 0x77)),
22647,
),
(
"delta_time_sec",
DPTArray((0xA3, 0x6A)),
-23702,
),
(
"density",
DPTArray((0x44, 0xA5, 0xCB, 0x27)),
1326.349,
),
(
"electrical_conductivity",
DPTArray((0xC4, 0xC6, 0xF5, 0x6E)),
-1591.67,
),
(
"electric_charge",
DPTArray((0x46, 0x14, 0xF6, 0xA0)),
9533.656,
),
(
"electric_current",
DPTArray((0x45, 0xAD, 0x45, 0x90)),
5544.695,
),
(
"electric_current_density",
DPTArray((0x45, 0x7C, 0x57, 0xF6)),
4037.498,
),
(
"electric_dipole_moment",
DPTArray((0x45, 0x58, 0xF1, 0x73)),
3471.091,
),
(
"electric_displacement",
DPTArray((0xC5, 0x34, 0x8B, 0x0)),
-2888.688,
),
(
"electric_field_strength",
DPTArray((0xC6, 0x17, 0x1C, 0x39)),
-9671.056,
),
(
"electric_flux",
DPTArray((0x45, 0x8F, 0x6C, 0xFD)),
4589.624,
),
(
"electric_flux_density",
DPTArray((0xC6, 0x0, 0x50, 0xA8)),
-8212.164,
),
(
"electric_polarization",
DPTArray((0x45, 0xF8, 0x89, 0xC6)),
7953.222,
),
(
"electric_potential",
DPTArray((0xC6, 0x18, 0xA4, 0xAF)),
-9769.171,
),
(
"electric_potential_difference",
DPTArray((0xC6, 0xF, 0x1D, 0x6)),
-9159.256,
),
(
"electromagnetic_moment",
DPTArray((0x45, 0x82, 0x48, 0xAE)),
4169.085,
),
(
"electromotive_force",
DPTArray((0x45, 0xBC, 0xEF, 0xEB)),
6045.99,
),
(
"energy",
DPTArray((0x45, 0x4B, 0xB3, 0xF8)),
3259.248,
),
(
"enthalpy",
DPTArray((0x76, 0xDD)),
287866.88,
),
(
"flow_rate_m3h",
DPTArray((0x99, 0xEA, 0xC0, 0x55)),
-1712668587,
),
(
"force",
DPTArray((0x45, 0x9E, 0x2C, 0xE1)),
5061.61,
),
(
"frequency",
DPTArray((0x45, 0xC2, 0x3C, 0x44)),
6215.533,
),
(
"heatcapacity",
DPTArray((0xC5, 0xB3, 0x56, 0x7E)),
-5738.812,
),
(
"heatflowrate",
DPTArray((0x44, 0xEC, 0x80, 0x7A)),
1892.015,
),
(
"heat_quantity",
DPTArray((0xC5, 0xA6, 0xB6, 0xD5)),
-5334.854,
),
(
"humidity",
DPTArray((0x7E, 0xE1)),
577044.48,
),
(
"impedance",
DPTArray((0x45, 0xDD, 0x79, 0x6D)),
7087.178,
),
(
"illuminance",
DPTArray((0x7C, 0x5E)),
366346.24,
),
(
"kelvin_per_percent",
DPTArray((0xFA, 0xBD)),
-441384.96,
),
(
"length",
DPTArray((0xC5, 0x9D, 0xAE, 0xC5)),
-5045.846,
),
(
"length_mm",
DPTArray((0x56, 0xB9)),
22201,
),
(
"light_quantity",
DPTArray((0x45, 0x4A, 0xF5, 0x68)),
3247.338,
),
(
"long_delta_timesec",
DPTArray((0x45, 0xB2, 0x17, 0x54)),
1169299284,
),
(
"luminance",
DPTArray((0x45, 0x18, 0xD9, 0x76)),
2445.591,
),
(
"luminous_flux",
DPTArray((0x45, 0xBD, 0x16, 0x9)),
6050.754,
),
(
"luminous_intensity",
DPTArray((0x46, 0xB, 0xBE, 0x7E)),
8943.623,
),
(
"magnetic_field_strength",
DPTArray((0x44, 0x15, 0xF1, 0xAD)),
599.7762,
),
(
"magnetic_flux",
DPTArray((0xC5, 0xCB, 0x3C, 0x98)),
-6503.574,
),
(
"magnetic_flux_density",
DPTArray((0x45, 0xB6, 0xBD, 0x42)),
5847.657,
),
(
"magnetic_moment",
DPTArray((0xC3, 0x8E, 0x7F, 0x73)),
-284.9957,
),
(
"magnetic_polarization",
DPTArray((0x45, 0x8C, 0xFA, 0xCB)),
4511.349,
),
(
"magnetization",
DPTArray((0x45, 0xF7, 0x9D, 0xA2)),
7923.704,
),
(
"magnetomotive_force",
DPTArray((0xC6, 0x4, 0xC2, 0xDA)),
-8496.713,
),
(
"mass",
DPTArray((0x45, 0x8F, 0x70, 0xA4)),
4590.08,
),
(
"mass_flux",
DPTArray((0xC6, 0x7, 0x34, 0xFF)),
-8653.249,
),
(
"mol",
DPTArray((0xC4, 0xA0, 0xF4, 0x68)),
-1287.638,
),
(
"momentum",
DPTArray((0xC5, 0x27, 0xAA, 0x5B)),
-2682.647,
),
(
"percent",
DPTArray((0xE3,)),
89,
),
(
"percentU8",
DPTArray((0x6B,)),
107,
),
(
"percentV8",
DPTArray((0x20,)),
32,
),
(
"percentV16",
DPTArray((0x8A, 0x2F)),
-30161,
),
(
"phaseanglerad",
DPTArray((0x45, 0x54, 0xAC, 0x2E)),
3402.761,
),
(
"phaseangledeg",
DPTArray((0xC5, 0x25, 0x13, 0x38)),
-2641.201,
),
(
"power",
DPTArray((0x45, 0xCB, 0xE2, 0x5C)),
6524.295,
),
(
"power_2byte",
DPTArray((0x6D, 0x91)),
116736.0,
),
(
"power_density",
DPTArray((0x65, 0x3E)),
54968.32,
),
(
"powerfactor",
DPTArray((0xC5, 0x35, 0x28, 0x21)),
-2898.508,
),
(
"ppm",
DPTArray((0x7F, 0x74)),
625213.44,
),
(
"pressure",
DPTArray((0xC5, 0xE6, 0xE6, 0x63)),
-7388.798,
),
(
"pressure_2byte",
DPTArray((0x7C, 0xF4)),
415498.24,
),
(
"pulse",
DPTArray((0xFC,)),
252,
),
(
"rain_amount",
DPTArray((0xE0, 0xD0)),
-75366.4,
),
(
"reactance",
DPTArray((0x45, 0xB0, 0x50, 0x91)),
5642.071,
),
(
"reactive_energy",
DPTArray((0x1A, 0x49, 0x6D, 0xA7)),
441019815,
),
(
"reactive_energy_kvarh",
DPTArray((0xCC, 0x62, 0x5, 0x31)),
-865991375,
),
(
"resistance",
DPTArray((0xC5, 0xFC, 0x5F, 0xC2)),
-8075.97,
),
(
"resistivity",
DPTArray((0xC5, 0x57, 0x76, 0xC3)),
-3447.423,
),
(
"rotation_angle",
DPTArray((0x2D, 0xDC)),
11740,
),
(
"scene_number",
DPTArray((0x1,)),
2,
),
(
"self_inductance",
DPTArray((0xC4, 0xA1, 0xB0, 0x6)),
-1293.501,
),
(
"solid_angle",
DPTArray((0xC5, 0xC6, 0xE5, 0x47)),
-6364.66,
),
(
"sound_intensity",
DPTArray((0xC4, 0xF2, 0x56, 0xE6)),
-1938.716,
),
(
"speed",
DPTArray((0xC5, 0xCD, 0x1C, 0x6A)),
-6563.552,
),
(
"stress",
DPTArray((0x45, 0xDC, 0xA8, 0xF2)),
7061.118,
),
(
"surface_tension",
DPTArray((0x46, 0xB, 0xAC, 0x11)),
8939.017,
),
(
"temperature",
DPTArray((0x77, 0x88)),
315883.52,
),
(
"temperature_a",
DPTArray((0xF1, 0xDB)),
-257720.32,
),
(
"temperature_difference",
DPTArray((0xC6, 0xC, 0x50, 0xBC)),
-8980.184,
),
(
"temperature_difference_2byte",
DPTArray((0xA9, 0xF4)),
-495.36,
),
(
"temperature_f",
DPTArray((0x67, 0xA9)),
80322.56,
),
(
"thermal_capacity",
DPTArray((0x45, 0x83, 0xEA, 0xB3)),
4221.337,
),
(
"thermal_conductivity",
DPTArray((0xC5, 0x9C, 0x4D, 0x22)),
-5001.642,
),
(
"thermoelectric_power",
DPTArray((0x41, 0xCF, 0x9E, 0x4F)),
25.9523,
),
(
"time_1",
DPTArray((0x5E, 0x1E)),
32071.68,
),
(
"time_2",
DPTArray((0xFB, 0x29)),
-405995.52,
),
(
"time_period_100msec",
DPTArray((0x6A, 0x35)),
27189,
),
(
"time_period_10msec",
DPTArray((0x32, 0x3)),
12803,
),
(
"time_period_hrs",
DPTArray((0x29, 0xDE)),
10718,
),
(
"time_period_min",
DPTArray((0x0, 0x54)),
84,
),
(
"time_period_msec",
DPTArray((0x93, 0xC7)),
37831,
),
(
"time_period_sec",
DPTArray((0xE0, 0xF5)),
57589,
),
(
"time_seconds",
DPTArray((0x45, 0xEC, 0x91, 0x7C)),
7570.186,
),
(
"torque",
DPTArray((0xC5, 0x9, 0x23, 0x5F)),
-2194.211,
),
(
"voltage",
DPTArray((0x6D, 0xBF)),
120504.32,
),
(
"volume",
DPTArray((0x46, 0x16, 0x98, 0x43)),
9638.065,
),
(
"volume_flow",
DPTArray((0x7C, 0xF5)),
415825.92,
),
(
"volume_flux",
DPTArray((0xC5, 0x4, 0x2D, 0x72)),
-2114.84,
),
(
"weight",
DPTArray((0x45, 0x20, 0x10, 0xE8)),
2561.057,
),
(
"work",
DPTArray((0x45, 0x64, 0x5D, 0xBE)),
3653.859,
),
(
"wind_speed_ms",
DPTArray((0x7D, 0x98)),
469237.76,
),
(
"wind_speed_kmh",
DPTArray((0x68, 0x0)),
0.0,
),
],
)
async def test_sensor_value_types(
self,
value_type,
raw_payload,
expected_state,
):
"""Test sensor value types."""
xknx = XKNX()
sensor = Sensor(
xknx,
"TestSensor",
group_address_state="1/2/3",
value_type=value_type,
)
await sensor.process(
Telegram(
destination_address=GroupAddress("1/2/3"),
payload=GroupValueWrite(value=raw_payload),
)
)
assert sensor.resolve_state() == expected_state
async def test_always_callback_sensor(self):
"""Test always callback sensor."""
xknx = XKNX()
sensor = Sensor(
xknx,
"TestSensor",
group_address_state="1/2/3",
always_callback=False,
value_type="volume_liquid_litre",
)
after_update_callback = AsyncMock()
sensor.register_device_updated_cb(after_update_callback)
payload = DPTArray((0x00, 0x00, 0x01, 0x00))
# set initial payload of sensor
sensor.sensor_value.value = 256
telegram = Telegram(
destination_address=GroupAddress("1/2/3"), payload=GroupValueWrite(payload)
)
response_telegram = Telegram(
destination_address=GroupAddress("1/2/3"),
payload=GroupValueResponse(payload),
)
# verify not called when always_callback is False
await sensor.process(telegram)
after_update_callback.assert_not_called()
after_update_callback.reset_mock()
sensor.always_callback = True
# verify called when always_callback is True
await sensor.process(telegram)
after_update_callback.assert_called_once()
after_update_callback.reset_mock()
# verify not called when processing read responses
await sensor.process(response_telegram)
after_update_callback.assert_not_called()
#
# SYNC
#
async def test_sync(self):
"""Test sync function / sending group reads to KNX bus."""
xknx = XKNX()
sensor = Sensor(
xknx, "TestSensor", value_type="temperature", group_address_state="1/2/3"
)
await sensor.sync()
assert xknx.telegrams.qsize() == 1
telegram = xknx.telegrams.get_nowait()
assert telegram == Telegram(
destination_address=GroupAddress("1/2/3"), payload=GroupValueRead()
)
#
# HAS GROUP ADDRESS
#
def test_has_group_address(self):
"""Test sensor has group address."""
xknx = XKNX()
sensor = Sensor(
xknx, "TestSensor", value_type="temperature", group_address_state="1/2/3"
)
assert sensor.has_group_address(GroupAddress("1/2/3"))
assert not sensor.has_group_address(GroupAddress("1/2/4"))
#
# TEST PROCESS
#
async def test_process(self):
"""Test process / reading telegrams from telegram queue."""
xknx = XKNX()
sensor = Sensor(
xknx, "TestSensor", value_type="temperature", group_address_state="1/2/3"
)
telegram = Telegram(
destination_address=GroupAddress("1/2/3"),
payload=GroupValueWrite(DPTArray((0x06, 0xA0))),
)
await sensor.process(telegram)
assert sensor.sensor_value.value == 16.96
assert sensor.sensor_value.telegram.payload.value == DPTArray((0x06, 0xA0))
assert sensor.resolve_state() == 16.96
# test HomeAssistant device class
assert sensor.ha_device_class() == "temperature"
async def test_process_callback(self):
"""Test process / reading telegrams from telegram queue. Test if callback is called."""
xknx = XKNX()
sensor = Sensor(
xknx, "TestSensor", group_address_state="1/2/3", value_type="temperature"
)
after_update_callback = AsyncMock()
sensor.register_device_updated_cb(after_update_callback)
telegram = Telegram(
destination_address=GroupAddress("1/2/3"),
payload=GroupValueWrite(DPTArray((0x01, 0x02))),
)
await sensor.process(telegram)
after_update_callback.assert_called_with(sensor)
assert sensor.last_telegram == telegram
|
<filename>emout/plot/basic_plot.py
import copy
import emout.utils as utils
import matplotlib
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import Colormap
import matplotlib.colors as mcolors
_r = 0.98
_d = 0.5
mycmap = mcolors.LinearSegmentedColormap('gray-jet', {
'red': ((0.00, 0.3, 0.3),
(_d*(1-_r), 0.3, 0.3),
(0.35*_r+(1-_r), 0, 0),
(0.66*_r+(1-_r), 1, 1),
(0.89*_r+(1-_r), 1, 1),
(1.00, 0.5, 0.5)),
'green': ((0.00, 0.3, 0.3),
(_d*(1-_r), 0.3, 0.3),
(0.125*_r+(1-_r), 0, 0),
(0.375*_r+(1-_r), 1, 1),
(0.640*_r+(1-_r), 1, 1),
(0.910*_r+(1-_r), 0, 0),
(1.000, 0, 0)),
'blue': ((0.00, 0.3, 0.3),
(_d*(1-_r), 0.3, 0.3),
(0.00*_r+(1-_r), 0.5, 0.5),
(0.11*_r+(1-_r), 1, 1),
(0.34*_r+(1-_r), 1, 1),
(0.65*_r+(1-_r), 0, 0),
(1.00, 0, 0))
})
def figsize_with_2d(data2d, dpi=10):
"""2次元データから図のサイズを計算する.
Parameters
----------
data2d : numpy.ndarray
2次元データ
dpi : int, optional
1データを何pixelで表すか, by default 10
Returns
-------
(float, float)
図のサイズ
"""
px = 1/plt.rcParams['figure.dpi'] * dpi
figsize = (data2d.shape[1]*px, data2d.shape[0]*px)
return figsize
def plot_2dmap(data2d,
mesh=None,
savefilename=None,
cmap=mycmap,
mask_color='gray',
vmin=None,
vmax=None,
figsize=None,
xlabel=None,
ylabel=None,
title=None,
interpolation='bilinear',
dpi=10,
**kwargs):
"""2次元カラーマップをプロットする.
Parameters
----------
data2d : numpy.ndarray
2次元データ
mesh : (numpy.ndarray, numpy.ndarray), optional
メッシュ, by default None
savefilename : str, optional
保存するファイル名(Noneの場合保存しない), by default None
cmap : matplotlib.Colormap or str or None, optional
カラーマップ, by default cm.coolwarm
mask_color : str
マスクされた位置の色, by default 'gray'
vmin : float, optional
最小値, by default None
vmax : float, optional
最大値, by default None
figsize : (float, float), optional
図のサイズ, by default None
xlabel : str, optional
x軸のラベル, by default None
ylabel : str, optional
y軸のラベル, by default None
title : str, optional
タイトル, by default None
interpolation : str, optional
用いる補間方法, by default 'bilinear'
dpi : int, optional
解像度(figsizeが指定された場合は無視される), by default 10
Returns
-------
AxesImage or None
プロットしたimageデータ(保存した場合None)
"""
if mesh is None:
x = list(range(data2d.shape[1]))
y = list(range(data2d.shape[0]))
mesh = np.meshgrid(x, y)
if cmap is not None:
if isinstance(cmap, str):
cmap = copy.copy(cm.get_cmap(str(cmap)))
else:
cmap = copy.copy(cmap)
cmap.set_bad(color=mask_color)
extent = [mesh[0][0, 0], mesh[0][-1, -1],
mesh[1][0, 0], mesh[1][-1, -1]]
img = plt.imshow(data2d,
interpolation=interpolation,
cmap=cmap,
origin='lower',
vmin=vmin,
vmax=vmax,
extent=extent,
aspect='auto')
plt.colorbar()
if title is not None:
plt.title(title)
if xlabel is not None:
plt.xlabel(xlabel)
if ylabel is not None:
plt.ylabel(ylabel)
if savefilename is not None:
plt.gcf().savefig(savefilename)
plt.close(plt.gcf())
return None
else:
return img
def plot_2d_contour(data2d,
mesh=None,
levels=None,
colors=['black'],
cmap=None,
alpha=1,
vmin=None,
vmax=None,
savefilename=None,
figsize=None,
xlabel=None,
ylabel=None,
title=None,
dpi=10,
fmt='%1.1f',
fontsize=12,
**kwargs):
"""2次元等高線をプロットする.
Parameters
----------
data2d : numpy.ndarray
2次元データ
mesh : (numpy.ndarray, numpy.ndarray), optional
メッシュ, by default None
levels : int
等高線数, by default None
alpha : float
透明度(0.0~1.0), by default 1
savefilename : str, optional
保存するファイル名(Noneの場合保存しない), by default None
cmap : matplotlib.Colormap or str or None, optional
カラーマップ, by default None
mask_color : str
マスクされた位置の色, by default 'gray'
vmin : float, optional
最小値, by default None
vmax : float, optional
最大値, by default None
figsize : (float, float), optional
図のサイズ, by default None
xlabel : str, optional
x軸のラベル, by default None
ylabel : str, optional
y軸のラベル, by default None
title : str, optional
タイトル, by default None
interpolation : str, optional
用いる補間方法, by default 'bilinear'
dpi : int, optional
解像度(figsizeが指定された場合は無視される), by default 10
fmt : str
clabelの形式, by default '%1.1f'
fontsize : str
clabelのフォントサイズ, by default 12
Returns
-------
AxesImage or None
プロットしたimageデータ(保存した場合None)
"""
if mesh is None:
x = list(range(data2d.shape[1]))
y = list(range(data2d.shape[0]))
mesh = np.meshgrid(x, y)
kwargs = {
'alpha': alpha,
'vmin': vmin,
'vmax': vmax,
}
if cmap is None:
kwargs['colors'] = colors
else:
kwargs['cmap'] = cmap
if levels is not None:
kwargs['levels'] = levels
cont = plt.contour(*mesh, data2d, **kwargs)
cont.clabel(fmt=fmt, fontsize=fontsize)
if title is not None:
plt.title(title)
if xlabel is not None:
plt.xlabel(xlabel)
if ylabel is not None:
plt.ylabel(ylabel)
if savefilename is not None:
plt.gcf().savefig(savefilename)
plt.close(plt.gcf())
return None
else:
return cont
def plot_surface(x, y, z, value,
ax3d=None,
add_colorbar=False,
savefilename=None,
cmap=cm.jet,
mask_color='gray',
vmin=None,
vmax=None,
figsize=None,
xlabel=None,
ylabel=None,
zlabel=None,
title=None,
ninterp=1,
function='linear',
dpi=10):
"""3次元表面プロットをする.
Parameters
----------
x : (numpy.ndarray, numpy.ndarray), optional
x座標のメッシュ
y : (numpy.ndarray, numpy.ndarray), optional
y座標のメッシュ
z : (numpy.ndarray, numpy.ndarray), optional
z座標のメッシュ
val : (numpy.ndarray, numpy.ndarray), optional
値のメッシュ
ax3d : Axes3D
Axes3Dオブジェクト, by default None
savefilename : str, optional
保存するファイル名(Noneの場合保存しない), by default None
cmap : matplotlib.Colormap or str or None, optional
カラーマップ, by default cm.coolwarm
vmin : float, optional
最小値, by default None
vmax : float, optional
最大値, by default None
figsize : (float, float), optional
図のサイズ, by default None
xlabel : str, optional
x軸のラベル, by default None
ylabel : str, optional
y軸のラベル, by default None
zlabel : str, optional
z軸のラベル, by default None
title : str, optional
タイトル, by default None
dpi : int, optional
解像度(figsizeが指定された場合は無視される), by default 10
Returns
-------
AxesImage or None
プロットしたimageデータ(保存した場合None)
"""
if savefilename is not None:
if figsize is None:
fig = plt.figure()
else:
if figsize == 'auto':
figsize = figsize_with_2d(x, dpi=dpi)
fig = plt.figure(figsize=figsize)
else:
fig = plt.gcf()
if ax3d is None:
ax3d = fig.gca(projection='3d')
if cmap is not None:
if isinstance(cmap, str):
cmap = copy.copy(cm.get_cmap(str(cmap)))
else:
cmap = copy.copy(cmap)
cmap.set_bad(color=mask_color)
if ninterp is not None:
x = utils.interp2d(x, ninterp, method=function)
y = utils.interp2d(y, ninterp, method=function)
z = utils.interp2d(z, ninterp, method=function)
value = utils.interp2d(value, ninterp)
if vmin is None:
vmin = value.min()
if vmax is None:
vmax = value.max()
norm = matplotlib.colors.Normalize(vmin, vmax)
mappable = cm.ScalarMappable(cmap=cmap, norm=norm)
mappable.set_array([])
value_colors = mappable.to_rgba(value)
surf = ax3d.plot_surface(x, y, z,
facecolors=value_colors,
vmin=vmin,
vmax=vmax,
shade=False)
if add_colorbar:
plt.colorbar(mappable, ax=ax3d)
if title is not None:
ax3d.set_title(title)
if xlabel is not None:
ax3d.set_xlabel(xlabel)
if ylabel is not None:
ax3d.set_ylabel(ylabel)
if zlabel is not None:
ax3d.set_zlabel(zlabel)
if savefilename is not None:
fig.savefig(savefilename)
plt.close(fig)
return None
else:
return surf
def plot_line(data1d,
x=None,
savefilename=None,
vmin=None,
vmax=None,
figsize=None,
xlabel=None,
ylabel=None,
label=None,
title=None):
"""1次元データをプロットする.
Parameters
----------
data1d : array-like or scalar
プロットする1次元データ
x : array-like or scalar
横軸となる1次元データ, by default None
savefilename : str, optional
保存するファイル名, by default None
vmin : float, optional
最小値, by default None
vmax : float, optional
最大値, by default None
figsize : (float, float), optional
図のサイズ, by default None
xlabel : str, optional
横軸のラベル, by default None
ylabel : str, optional
縦軸のラベル, by default None
label : str, optional
ラベル, by default None
title : str, optional
タイトル, by default None
Returns
-------
Line2D or None
プロットデータを表す線オブジェクト(保存した場合None)
"""
if savefilename is not None:
if figsize is None:
fig = plt.figure()
else:
fig = plt.figure(figsize=figsize)
if x is None:
line = plt.plot(data1d, label=label)
else:
line = plt.plot(x, data1d, label=label)
plt.ylim([vmin, vmax])
if title is not None:
plt.title(title)
if xlabel is not None:
plt.xlabel(xlabel)
if ylabel is not None:
plt.ylabel(ylabel)
if savefilename is not None:
fig.savefig(savefilename)
plt.close(fig)
return None
else:
return line
def plot_2d_vector(x_data2d,
y_data2d,
mesh=None,
savefilename=None,
color=None,
scale=1,
scaler='standard',
skip=1,
easy_to_read=True,
figsize=None,
xlabel=None,
ylabel=None,
title=None,
dpi=10):
"""2次元ベクトル図をプロットする.
Parameters
----------
x_data2d, y_data2d : numpy.ndarray
2次元データ
mesh : (numpy.ndarray, numpy.ndarray), optional
メッシュ, by default None
savefilename : str, optional
保存するファイル名(Noneの場合保存しない), by default None
color : str
ベクトルの色, by default None
scale : float
ベクトルの大きさ係数(最終的な大きさにこの値を掛ける), by default 1
skip : int
プロットするデータ間隔, by default 1
easy_to_read : bool
ベクトルを見やすい大きさにスケーリングするならTrue, by default True
figsize : (float, float), optional
図のサイズ, by default None
xlabel : str, optional
x軸のラベル, by default None
ylabel : str, optional
y軸のラベル, by default None
title : str, optional
タイトル, by default None
interpolation : str, optional
用いる補間方法, by default 'bilinear'
dpi : int, optional
解像度(figsizeが指定された場合は無視される), by default 10
Returns
-------
AxesImage or None
プロットしたimageデータ(保存した場合None)
"""
fig = None
if savefilename is not None:
if figsize is None:
fig = plt.figure()
else:
if figsize == 'auto':
figsize = figsize_with_2d(x_data2d, dpi=dpi)
fig = plt.figure(figsize=figsize)
if mesh is None:
x = list(range(x_data2d.shape[1]))
y = list(range(x_data2d.shape[0]))
mesh = np.meshgrid(x, y)
x = mesh[0]
y = mesh[1]
U = np.array(x_data2d)
V = np.array(y_data2d)
x_skip = skip if type(skip) == int else skip[0]
y_skip = skip if type(skip) == int else skip[1]
x = x[::y_skip, ::x_skip]
y = y[::y_skip, ::x_skip]
U = U[::y_skip, ::x_skip]
V = V[::y_skip, ::x_skip]
norm = np.sqrt(U**2 + V**2)
if scaler == 'standard':
norm_max = np.nanmax(np.abs(norm))
U /= norm_max
V /= norm_max
elif scaler == 'normal':
U /= norm
V /= norm
elif scaler == 'log':
U = U / norm * np.log(norm+1)
V = V / norm * np.log(norm+1)
# 見やすい大きさに線形スケーリングを行う
if easy_to_read:
dx = (x.max() - x.min()) / x.shape[0]
multiplier = dx * 1.2
norm_mean = np.nanmean(np.sqrt(U**2 + V**2))
U *= scale / norm_mean * multiplier
V *= scale / norm_mean * multiplier
img = plt.quiver(x,
y,
U,
V,
angles='xy',
scale_units='xy',
scale=1,
)
if title is not None:
plt.title(title)
if xlabel is not None:
plt.xlabel(xlabel)
if ylabel is not None:
plt.ylabel(ylabel)
if savefilename is not None:
fig.savefig(savefilename)
plt.close(fig)
return None
else:
return img
|
<filename>app/nanoleaf/state.py
import colorsys
import re
from app.nanoleaf.model import AuroraObject
from app.nanoleaf.exceptions import BadRequestException
class State(AuroraObject):
def __init__(self, requester):
super().__init__(requester)
@property
def color_mode(self):
"""Returns the current color mode."""
return self._requester.request(method="GET", endpoint="state/colorMode")
@property
def on(self):
"""Returns True if the device is on, False if it's off"""
return self._requester.request(method="GET", endpoint="state/on/value")
@on.setter
def on(self, value: bool):
"""Turns the device on/off. True = on, False = off"""
data = {"on": value}
self._requester.request(method="PUT", endpoint="state", data=data)
@property
def off(self):
"""Returns True if the device is off, False if it's on"""
return not self.on
@off.setter
def off(self, value: bool):
"""Turns the device on/off. True = off, False = on"""
self.on = not value
def on_toggle(self):
"""Switches the on/off state of the device"""
self.on = not self.on
@property
def brightness(self):
"""Returns the brightness of the device (0-100)"""
return self._requester.request(method="GET", endpoint="state/brightness/value")
@brightness.setter
def brightness(self, level):
"""Sets the brightness to the given level (0-100)"""
data = {"brightness": {"value": level}}
self._requester.request(method="PUT", endpoint="state", data=data)
@property
def brightness_min(self):
"""Returns the minimum brightness possible. (This always returns 0)"""
return self._requester.request(method="GET", endpoint="state/brightness/min")
@property
def brightness_max(self):
"""Returns the maximum brightness possible. (This always returns 100)"""
return self._requester.request(method="GET", endpoint="state/brightness/max")
def brightness_raise(self, level):
"""Raise the brightness of the device by a relative amount (negative lowers brightness)"""
data = {"brightness": {"increment": level}}
self._requester.request(method="PUT", endpoint="state", data=data)
def brightness_lower(self, level):
"""Lower the brightness of the device by a relative amount (negative raises brightness)"""
self.brightness_raise(-level)
@property
def hue(self):
"""Returns the hue of the device (0-360)"""
return self._requester.request(method="GET", endpoint="state/hue/value")
@hue.setter
def hue(self, level):
"""Sets the hue to the given level (0-360)"""
data = {"hue": {"value": level}}
self._requester.request(method="PUT", endpoint="state", data=data)
@property
def hue_min(self):
"""Returns the minimum hue possible. (This always returns 0)"""
return self._requester.request(method="GET", endpoint="state/hue/min")
@property
def hue_max(self):
"""Returns the maximum hue possible. (This always returns 360)"""
return self._requester.request(method="GET", endpoint="state/hue/max")
def hue_raise(self, level):
"""Raise the hue of the device by a relative amount (negative lowers hue)"""
data = {"hue": {"increment": level}}
self._requester.request(method="PUT", endpoint="state", data=data)
def hue_lower(self, level):
"""Lower the hue of the device by a relative amount (negative raises hue)"""
self.hue_raise(-level)
@property
def saturation(self):
"""Returns the saturation of the device (0-100)"""
return self._requester.request(method="GET", endpoint="state/sat/value")
@saturation.setter
def saturation(self, level):
"""Sets the saturation to the given level (0-100)"""
data = {"sat": {"value": level}}
self._requester.request(method="PUT", endpoint="state", data=data)
@property
def saturation_min(self):
"""Returns the minimum saturation possible. (This always returns 0)"""
self._requester.request(method="GET", endpoint="state/sat/min")
@property
def saturation_max(self):
"""Returns the maximum saturation possible. (This always returns 100)"""
self._requester.request(method="GET", endpoint="state/sat/max")
def saturation_raise(self, level):
"""Raise the saturation of the device by a relative amount (negative lowers saturation)"""
data = {"sat": {"increment": level}}
self._requester.request(method="PUT", endpoint="state", data=data)
def saturation_lower(self, level):
"""Lower the saturation of the device by a relative amount (negative raises saturation)"""
self.saturation_raise(-level)
@property
def color_temperature(self):
"""Returns the color temperature of the device (0-100)"""
return self._requester.request(method="GET", endpoint="state/ct/value")
@color_temperature.setter
def color_temperature(self, level):
"""Sets the color temperature to the given level (0-100)"""
data = {"ct": {"value": level}}
self._requester.request(method="PUT", endpoint="state", data=data)
@property
def color_temperature_min(self):
"""Returns the minimum color temperature possible. (This always returns 1200)"""
return self._requester.request(method="GET", endpoint="state/ct/min")
@property
def color_temperature_max(self):
"""Returns the maximum color temperature possible. (This always returns 6500)"""
return self._requester.request(method="GET", endpoint="state/ct/max")
def color_temperature_raise(self, level):
"""Raise the color temperature of the device by a relative amount (negative lowers color temperature)"""
data = {"ct": {"increment": level}}
self._requester.request(method="PUT", endpoint="state", data=data)
def color_temperature_lower(self, level):
"""Lower the color temperature of the device by a relative amount (negative raises color temperature)"""
self.color_temperature_raise(-level)
# TODO: Shame on all these magic numbers. SHAME.
@property
def rgb(self):
"""The color of the device, as represented by 0-255 RGB values"""
hue = self.hue
saturation = self.saturation
brightness = self.brightness
if hue is None or saturation is None or brightness is None:
return None
rgb = colorsys.hsv_to_rgb(hue / 360, saturation / 100, brightness / 100)
return [int(rgb[0] * 255), int(rgb[1] * 255), int(rgb[2] * 255)]
@rgb.setter
def rgb(self, color):
"""Set the color of the device, as represented by either a hex string or a list of 0-255 RGB values"""
try:
red, green, blue = color
except ValueError:
try:
hexcolor = color
reg_match = re.match("^([A-Fa-f0-9]{6})$", hexcolor)
if reg_match:
red = int(hexcolor[:2], 16)
green = int(hexcolor[2:-2], 16)
blue = int(hexcolor[-2:], 16)
else:
print("Error: Color must be in valid hex format.")
return
except ValueError:
print("Error: Color must have one hex value or three 0-255 values.")
return
if not 0 <= red <= 255:
print("Error: Red value out of range! (0-255)")
return
if not 0 <= green <= 255:
print("Error: Green value out of range! (0-255)")
return
if not 0 <= blue <= 255:
print("Error: Blue value out of range! (0-255)")
return
hsv = colorsys.rgb_to_hsv(red / 255, green / 255, blue / 255)
hue = int(hsv[0] * 360)
saturation = int(hsv[1] * 100)
brightness = int(hsv[2] * 100)
data = {"hue": {"value": hue}, "sat": {"value": saturation}, "brightness": {"value": brightness}}
self._requester.request(method="PUT", endpoint="state", data=data)
|
"""Routines to generate spatial and temporal partitions."""
import numpy as np
from attr import dataclass, field
__all__ = [
"Mesh",
"MeshArrays",
"MeshPartitions",
"Partition",
"TimePartition",
]
@dataclass(frozen=True)
class Partition:
"""Construct a spatial partition.
:param float lower_bound:
The partition lower bound.
:param float upper_bound:
The partition upper bound.
:param int num_segments:
The partition lower bound.
:param bool endpoint:
Indicate whether to consider the upper bound as part of the
partition. By default, the upper bound is excluded
(``endpoint = False``).
:raises ValueError:
If ``upper_bound`` is less than ``lower_bound``.
:raises ValueError:
If ``num_segments`` is a negative integer or zero.
:raises ValueError:
If any of ``lower_bound`` or ``upper_bound`` is ``nan``.
"""
# TODO: Make Partition instances iterable.
# Lower bound.
lower_bound: float
# Upper bound.
upper_bound: float
# Mesh number of segments.
num_segments: int
# Indicate whether to consider the upper bound as part of the partition.
# If `False`, the upper bound is excluded.
endpoint: bool = False
def __attrs_post_init__(self) -> None:
"""Post-initialization procedure."""
if np.isnan(self.lower_bound):
raise ValueError("'nan' is not a valid value for 'lower_bound.")
if np.isnan(self.upper_bound):
raise ValueError("'nan' is not a valid value for 'upper_bound'.")
if not self.upper_bound > self.lower_bound:
raise ValueError(
"'upper_bound' must be greater than 'lower_bound'."
)
if not self.num_segments >= 1:
raise ValueError(
"'num_segments' must be a positive, non-zero integer."
)
@classmethod
def with_size(
cls: type["Partition"],
size: float,
lower_bound: float,
num_segments: int,
endpoint: bool = False,
) -> "Partition":
"""Create a partition with a given lower bound and size.
This method is a convenient alternative to construct a ``Partition``
instance when we want to specify its size and lower bound location.
:param float size:
The partition size (length).
:param float lower_bound:
Location of the new partition lower bound.
:param int num_segments:
The partition number of segments.
:param bool endpoint:
Whether or not to include the endpoint in the partition. It is
``False`` by default.
:rtype: Partition
"""
return cls(
lower_bound=lower_bound,
upper_bound=lower_bound + size,
num_segments=num_segments,
endpoint=endpoint,
)
@classmethod
def make_origin_centered_unit(
cls: type["Partition"], num_segments: int, endpoint: bool = False
) -> "Partition":
"""Get a partition of unit length centered at the origin.
:param int num_segments:
The partition number of segments.
:param bool endpoint:
Whether or not to include the endpoint in the partition. It is
``False`` by default.
:rtype: Partition
"""
return cls(
lower_bound=-0.5,
upper_bound=0.5,
num_segments=num_segments,
endpoint=endpoint,
)
def origin_centered_unit(self) -> "Partition":
"""Get a similar partition of unit length centered at the origin.
The new ``Partition`` instance shares the same number of segments
and the ``endpoint`` attribute as the current partition. However,
its lower and upper bounds are different.
:rtype: Partition
"""
return self.make_origin_centered_unit(
num_segments=self.num_segments,
endpoint=self.endpoint,
)
def origin_centered(self) -> "Partition":
"""Get a similar partition centered at the origin.
The new ``Partition`` instance shares the same number of segments
and the ``endpoint`` attribute as the current partition and has the
same size. However, its lower and upper bounds change.
:rtype: Partition
"""
partition_size = self.size
return Partition(
lower_bound=-partition_size / 2,
upper_bound=partition_size / 2,
num_segments=self.num_segments,
endpoint=self.endpoint,
)
def scaled(self, factor: float) -> "Partition":
"""Make a similar partition under a scaling transformation.
The new ``Partition`` instance shares the same number of segments
and the ``endpoint`` attribute as the current partition.
:param float factor:
A scale factor. The upper and lower bounds of the new partition
will be proportional to the bounds of the current one,
being ``factor`` the proportionality coefficient. Accordingly, the
size of the new partition will be scaled by the same factor too.
:rtype: Partition
"""
return Partition(
lower_bound=self.lower_bound * factor,
upper_bound=self.upper_bound * factor,
num_segments=self.num_segments,
endpoint=self.endpoint,
)
def translated(self, offset: float) -> "Partition":
"""Displace this partition by a fixed number.
The new ``Partition`` instance shares the same number of segments
and the ``endpoint`` attribute as the current partition and has the
same size. However, its lower and upper bounds change due to the
translation.
:param float offset:
The lower and upper bounds of the new partition will be
displaced by the amount set by ``offset``.
:rtype: Partition
"""
return Partition(
lower_bound=self.lower_bound + offset,
upper_bound=self.upper_bound + offset,
num_segments=self.num_segments,
endpoint=self.endpoint,
)
@property
def size(self) -> float:
"""Give the partition length.
:rtype: float
"""
return self.upper_bound - self.lower_bound
@property
def step_size(self) -> float:
"""Partition step size.
:rtype: float
"""
return (self.upper_bound - self.lower_bound) / self.num_segments
@property
def midpoint(self) -> float:
"""Return the partition midpoint.
:rtype: float
"""
return (self.lower_bound + self.upper_bound) / 2
@property
def array(self) -> np.ndarray:
"""Return an array with the partition points.
:rtype: numpy.ndarray
"""
endpoint = self.endpoint
num_segments = self.num_segments + (1 if endpoint else 0)
return np.linspace(
self.lower_bound,
self.upper_bound,
num=num_segments,
endpoint=endpoint,
)
@dataclass(frozen=True)
class TimePartition:
"""Construct a time partition.
:param float time_step:
The partition time step.
:param int num_steps:
The partition number of steps.
:param float ini_time:
The partition initial time. By default, it is zero.
:param bool endpoint:
Indicate whether to consider the upper bound as part of the
partition. By default, the upper bound is excluded
(``endpoint = False``).
:raises ValueError:
If ``time_step`` is negative or zero.
:raises ValueError:
If ``num_steps`` is a negative integer or zero.
:raises ValueError:
If any of ``time_step`` or ``ini_time`` is ``nan``.
"""
# TODO: Make TimePartition instances iterable.
# Partition time step.
time_step: float
# Partition number of steps.
num_steps: int
# Partition initial time.
ini_time: float = 0
# Indicate whether to include the end time as part of the mesh.
# If `False`, the end time is excluded.
endpoint: bool = True
def __attrs_post_init__(self) -> None:
"""Post-initialization procedure."""
if np.isnan(self.time_step):
raise ValueError("'nan' is not a valid value for 'time_step.")
if np.isnan(self.ini_time):
raise ValueError("'nan' is not a valid value for 'ini_time'.")
if not self.time_step > 0:
raise ValueError(
"'time_step' must be a positive, non-zero number."
)
if not self.num_steps >= 1:
raise ValueError(
"'num_steps' must be a positive, non-zero integer."
)
@property
def finish_time(self) -> float:
"""Partition finish time.
:rtype: float
"""
return self.ini_time + self.num_steps * self.time_step
@property
def duration(self) -> float:
"""Give the partition duration.
:rtype: float
"""
return self.finish_time - self.ini_time
@property
def array(self) -> np.ndarray:
"""Return an array with the partition points.
:rtype: numpy.ndarray
"""
endpoint = self.endpoint
num_steps = self.num_steps + (1 if endpoint else 0)
return np.linspace(
self.ini_time,
self.finish_time,
num=num_steps,
endpoint=endpoint,
)
# Mesh attributes types.
# See bug https://github.com/python/mypy/issues/9980.
MeshPartitions = tuple[Partition, ...] # type: ignore
MeshArrays = tuple[np.ndarray, ...] # type: ignore
# Variable types for arguments used in transformation methods.
MeshScalingFactors = tuple[float, ...] # type: ignore
MeshTranslationOffsets = tuple[float, ...] # type: ignore
# Error messages.
MESH_DIMENSION_ERROR = (
"The mesh maximum allowed dimension is three. Therefore, you should "
"supply the 'partitions' argument a tuple with at most three elements."
)
@dataclass(frozen=True)
class Mesh:
"""Construct a spatial mesh from several partitions.
:param tuple[Partition, ...] partitions:
A tuple of ``Partition`` instances for each dimension of the
mesh. The tuple must have at most three elements.
"""
# Partitions that form the mesh.
partitions: MeshPartitions
# Mesh sparse arrays.
_arrays: MeshArrays = field(init=False, default=None, repr=False)
def __attrs_post_init__(self) -> None:
"""Post-initialization tasks."""
if self.dimension > 3:
raise ValueError(MESH_DIMENSION_ERROR)
partition_arrays = [partition.array for partition in self.partitions]
arrays = np.meshgrid(*partition_arrays, indexing="ij", sparse=True)
object.__setattr__(self, "_arrays", tuple(arrays))
@property
def dimension(self) -> int:
"""Give the mesh dimension.
It is one for a 1D mesh, two for a 2D mesh, and three for a
3D mesh.
:rtype: int
"""
return len(self.partitions)
@property
def size(self) -> float:
"""Get the mesh size.
For a 1D mesh, it is the length of its only partition. For a
2D mesh, it is the area of the region delimited by its partitions.
For a 3D mesh, it is the volume.
:rtype: float
"""
size = 1.0
for partition in self.partitions:
size *= partition.size
return size
@property
def element_size(self) -> float:
"""Size of a mesh partition element.
:rtype: float
"""
return float(
np.prod([partition.step_size for partition in self.partitions])
)
@property
def num_elements(self) -> int:
"""Get the number of elements that compose the mesh.
:rtype: int
"""
return int(
np.prod([partition.num_segments for partition in self.partitions])
)
@property
def arrays(self) -> MeshArrays:
"""Return the NumPy arrays representing the mesh.
**NOTE**: The returned arrays are sparse.
:rtype: tuple[numpy.ndarray, ...]
"""
return self._arrays
@property
def shape(self):
"""Shape of the mesh arrays after being broadcast.
:rtype: tuple[int, ...]
"""
array_shapes = [array.shape for array in self.arrays]
return np.broadcast_shapes(*array_shapes)
def origin_centered_unit(self) -> "Mesh":
"""Get a new mesh of unit volume whose center lies at the origin.
This method applies a similar transformation to its internal
partitions to achieve the intended result.
:rtype: Mesh
"""
centered_partitions = []
for partition in self.partitions:
centered_partition = partition.origin_centered_unit()
centered_partitions.append(centered_partition)
return Mesh(MeshPartitions(centered_partitions))
def origin_centered(self) -> "Mesh":
"""Get a new mesh whose center lies at the origin.
This method applies a similar transformation to its internal
partitions to achieve the intended result.
:rtype: Mesh
"""
centered_partitions = []
for partition in self.partitions:
centered_partition = partition.origin_centered()
centered_partitions.append(centered_partition)
return Mesh(MeshPartitions(centered_partitions))
def scaled(self, factors: MeshScalingFactors) -> "Mesh":
"""Get a new mesh by applying a scaling transformation.
This method applies a similar transformation to its internal
partitions to achieve the intended result.
:param tuple[float, ...] factors:
A tuple with the same number of elements as this mesh dimension.
:rtype: Mesh
"""
scaled_partitions = []
for partition, factor in zip(self.partitions, factors):
scaled_partition = partition.scaled(factor=factor)
scaled_partitions.append(scaled_partition)
return Mesh(MeshPartitions(scaled_partitions))
def translated(self, offsets: MeshTranslationOffsets) -> "Mesh":
"""Get a new mesh by applying a translation.
This method applies a similar transformation to its internal
partitions to achieve the intended result.
:param tuple[float, ...] offsets:
A tuple with the same number of elements as this mesh dimension.
:rtype: Mesh
"""
scaled_partitions = []
for partition, offset in zip(self.partitions, offsets):
scaled_partition = partition.translated(offset=offset)
scaled_partitions.append(scaled_partition)
return Mesh(MeshPartitions(scaled_partitions))
|
<gh_stars>1-10
import locale
import logging
from subprocess import Popen, PIPE, CalledProcessError
from collections import UserDict
import rebuild_tool.exceptions as ex
from rebuild_tool.pkg_source import PkgSrcArchive, set_class_attrs
from rebuild_tool.utils import subprocess_popen_call, ChangeDir
logger = logging.getLogger(__name__)
class PkgsContainer(UserDict):
@set_class_attrs
def add(self, package, pkg_dir):
'''
Adds new DnfArchive object to self.data
'''
self[package] = DnfArchive(package, pkg_dir)
class DnfArchive(PkgSrcArchive):
'''
Contains methods to download from dnf, unpack, edit and pack srpm
'''
@property
def dependencies(self):
'''
Returns all dependencies of the package found in selected repo
'''
proc_data = subprocess_popen_call(["dnf", "repoquery", "--arch=src",
"--disablerepo=*", "--enablerepo=" + type(self).repo,
"--requires", self.package])
if proc_data['returncode']:
if proc_data['stderr'] == "Error: Unknown repo: '{0}'\n".format(type(self).repo):
raise ex.UnknownRepoException('Repository {} is probably disabled'.format(
type(self).repo))
all_deps = set(proc_data['stdout'].splitlines()[1:])
return all_deps
def download(self):
'''
Download srpm of package from selected repo using dnf.
'''
proc_data = subprocess_popen_call(["dnf", "download", "--disablerepo=*",
"--enablerepo=" + type(self).repo,
"--destdir", self.pkg_dir,
"--source", self.package])
if proc_data['returncode']:
if proc_data['stderr'] == "Error: Unknown repo: '{0}'\n".format(type(self).repo):
raise ex.UnknownRepoException('Repository {} is probably disabled'.format(
type(self).repo))
else:
raise ex.DownloadFailException(proc_data['stderr'])
self.srpm_file = self.get_file('.src.rpm')
def unpack(self):
'''
Unpacks srpm archive
'''
with ChangeDir(self.pkg_dir):
proc1 = Popen(["rpm2cpio", self.srpm_file], stdout=PIPE, stderr=PIPE)
proc2 = Popen(["cpio", "-idmv"], stdin=proc1.stdout, stdout=PIPE, stderr=PIPE)
stream_data = proc2.communicate()
stderr_str = stream_data[1].decode(locale.getpreferredencoding())
if proc2.returncode:
logger.error(stderr_str)
raise CalledProcessError(cmd='rpm2cpio', returncode=proc2.returncode)
self.spec_file = self.get_file('.spec')
def pack(self, save_dir=None):
'''
Builds a srpm using rpmbuild.
Generated srpm is stored in directory specified by save_dir."""
'''
if not save_dir:
save_dir = self.pkg_dir
try:
msg = Popen(['rpmbuild',
'--define', '_sourcedir {0}'.format(save_dir),
'--define', '_builddir {0}'.format(save_dir),
'--define', '_srcrpmdir {0}'.format(save_dir),
'--define', '_rpmdir {0}'.format(save_dir),
'--define', 'scl_prefix {0}'.format(type(self).prefix),
'-bs', self.spec_file], stdout=PIPE,
stderr=PIPE).communicate()[0].strip()
except OSError:
logger.error('Rpmbuild failed for specfile: {0} and save_dir: {1}'.format(
self.spec_file, self.pkg_dir))
self.srpm_file = self.get_file('.src.rpm')
|
# Copyright 2009-2017 <NAME>.
# This program is distributed under the MIT license.
'''Defines several functions that may be useful when working with dicts.'''
from __future__ import generator_stop
import collections
from python_toolbox import cute_iter_tools
from python_toolbox import comparison_tools
def filter_items(d, condition, double=False, force_dict_type=None):
'''
Get new dict with items from `d` that satisfy the `condition` functions.
`condition` is a function that takes a key and a value.
The newly created dict will be of the same class as `d`, e.g. if you passed
an ordered dict as `d`, the result will be an ordered dict, using the
correct order.
Specify `double=True` to get a tuple of two dicts instead of one. The
second dict will have all the rejected items.
'''
# todo future: possibly shallow-copy `d` to allow for dict classes that
# have more state, (like default factory.)
if force_dict_type is not None:
dict_type = force_dict_type
else:
dict_type = type(d) if (type(d).__name__ != 'dictproxy') else dict
if double:
return tuple(
map(
dict_type,
cute_iter_tools.double_filter(
lambda key_value: condition(key_value[0], key_value[1]),
d.items()
)
)
)
else:
return dict_type(
(key, value) for (key, value) in d.items() if condition(key, value)
)
def get_tuple(d, iterable):
'''Get a tuple of values corresponding to an `iterable` of keys.'''
return tuple(d[key] for key in iterable)
def get_contained(d, container):
'''Get a list of the values in the dict whose keys are in `container`.'''
return [value for (key, value) in d.items() if (key in container)]
def fancy_string(d, indent=0):
'''Show a dict as a string, slightly nicer than dict.__repr__.'''
small_space = ' ' * indent
big_space = ' ' * (indent + 4)
huge_space = ' ' * (indent + 8)
def show(thing, indent=0):
space = ' ' * indent
enter_then_space = '\n' + space
return repr(thing).replace('\n', enter_then_space)
temp1 = (
(big_space + repr(key) + ':\n' + huge_space + show(value, indent + 8))
for (key, value) in list(d.items()))
temp2 = small_space + '{\n' + ',\n'.join(temp1) + '\n' + small_space +'}'
return temp2
def devour_items(d):
'''Iterator that pops (key, value) pairs from `d` until it's empty.'''
while d:
yield d.popitem()
def devour_keys(d):
'''Iterator that pops keys from `d` until it's exhaused (i.e. empty).'''
while d:
key = next(iter(d.keys()))
del d[key]
yield key
def sum_dicts(dicts):
'''
Return the sum of a bunch of dicts i.e. all the dicts merged into one.
If there are any collisions, the latest dicts in the sequence win.
'''
result = {}
for dict_ in dicts:
result.update(dict_)
return result
def remove_keys(d, keys_to_remove):
'''
Remove keys from a dict.
`keys_to_remove` is allowed to be either an iterable (in which case it will
be iterated on and keys with the same name will be removed), a container
(in which case this function will iterate over the keys of the dict, and if
they're contained they'll be removed), or a filter function (in which case
this function will iterate over the keys of the dict, and if they pass the
filter function they'll be removed.)
If key doesn't exist, doesn't raise an exception.
'''
if isinstance(keys_to_remove, collections.abc.Iterable):
for key in keys_to_remove:
try:
del d[key]
except KeyError:
pass
else:
if isinstance(keys_to_remove, collections.abc.Container):
filter_function = lambda value: value in keys_to_remove
else:
assert isinstance(keys_to_remove, collections.abc.Callable)
filter_function = keys_to_remove
for key in list(d.keys()):
if filter_function(key):
del d[key]
def get_sorted_values(d, key=None):
'''
Get the values of dict `d` as a `tuple` sorted by their respective keys.
'''
kwargs = {'key': key,} if key is not None else {}
return get_tuple(d, sorted(d.keys(), **kwargs))
def reverse(d):
'''
Reverse a `dict`, creating a new `dict` where keys and values are switched.
Example:
>>> reverse({'one': 1, 'two': 2, 'three': 3})
{1: 'one', 2: 'two', 3: 'three'})
This function requires that:
1. The values will be distinct, i.e. no value will appear more than once.
2. All the values be hashable.
'''
new_d = {}
for key, value in d.items():
if value in new_d:
raise Exception(
f"Value {value} appeared twice! Once with a key of {key} and "
f"then again with a key of {new_d[value]}. This function is "
f"intended only for dicts with distinct values."
)
new_d[value] = key
return new_d
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
class View(nn.Module):
def __init__(self, size):
super(View, self).__init__()
self.size = size
def forward(self, tensor):
return tensor.view(self.size)
class VAE(nn.Module):
"""Encoder-Decoder architecture for both WAE-MMD and WAE-GAN."""
def __init__(self, z_dim=32, nc=3):
super(VAE, self).__init__()
self.z_dim = z_dim
self.nc = nc
self.encoder = nn.Sequential(
nn.Conv2d(nc, 128, 4, 2, 1, bias=False), # B, 128, 32, 32
nn.BatchNorm2d(128),
nn.ReLU(True),
nn.Conv2d(128, 256, 4, 2, 1, bias=False), # B, 256, 16, 16
nn.BatchNorm2d(256),
nn.ReLU(True),
nn.Conv2d(256, 512, 4, 2, 1, bias=False), # B, 512, 8, 8
nn.BatchNorm2d(512),
nn.ReLU(True),
nn.Conv2d(512, 1024, 4, 2, 1, bias=False), # B, 1024, 4, 4
nn.BatchNorm2d(1024),
nn.ReLU(True),
View((-1, 1024*2*2)), # B, 1024*4*4
)
self.fc_mu = nn.Linear(1024*2*2, z_dim) # B, z_dim
self.fc_logvar = nn.Linear(1024*2*2, z_dim) # B, z_dim
self.decoder = nn.Sequential(
nn.Linear(z_dim, 1024*4*4), # B, 1024*8*8
View((-1, 1024, 4, 4)), # B, 1024, 8, 8
nn.ConvTranspose2d(1024, 512, 4, 2, 1, bias=False), # B, 512, 16, 16
nn.BatchNorm2d(512),
nn.ReLU(True),
nn.ConvTranspose2d(512, 256, 4, 2, 1, bias=False), # B, 256, 32, 32
nn.BatchNorm2d(256),
nn.ReLU(True),
nn.ConvTranspose2d(256, 128, 4, 2, 1, bias=False), # B, 128, 64, 64
nn.BatchNorm2d(128),
nn.ReLU(True),
nn.ConvTranspose2d(128, nc, 1), # B, nc, 64, 64
)
self.weight_init()
def weight_init(self):
for block in self._modules:
try:
for m in self._modules[block]:
kaiming_init(m)
except:
kaiming_init(block)
def forward(self, x):
z = self._encode(x)
mu, logvar = self.fc_mu(z), self.fc_logvar(z)
z = self.reparameterize(mu, logvar)
x_recon = self._decode(z)
return x_recon, z, mu, logvar
def reparameterize(self, mu, logvar):
stds = (0.5 * logvar).exp()
epsilon = torch.randn(*mu.size())
if mu.is_cuda:
stds, epsilon = stds.cuda(), epsilon.cuda()
latents = epsilon * stds + mu
return latents
def _encode(self, x):
return self.encoder(x)
def _decode(self, z):
return self.decoder(z)
class Discriminator(nn.Module):
"""Adversary architecture(Discriminator) for WAE-GAN."""
def __init__(self, z_dim=10):
super(Discriminator, self).__init__()
self.z_dim = z_dim
self.net = nn.Sequential(
nn.Linear(z_dim, 512),
nn.ReLU(True),
nn.Linear(512, 512),
nn.ReLU(True),
nn.Linear(512, 512),
nn.ReLU(True),
nn.Linear(512, 512),
nn.ReLU(True),
nn.Linear(512, 1),
nn.Sigmoid()
)
self.weight_init()
def weight_init(self):
for block in self._modules:
for m in self._modules[block]:
kaiming_init(m)
def forward(self, z):
return self.net(z)
def kaiming_init(m):
if isinstance(m, (nn.Linear, nn.Conv2d)):
init.kaiming_normal(m.weight)
if m.bias is not None:
m.bias.data.fill_(0)
elif isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d)):
m.weight.data.fill_(1)
if m.bias is not None:
m.bias.data.fill_(0)
def normal_init(m, mean, std):
if isinstance(m, (nn.Linear, nn.Conv2d)):
m.weight.data.normal_(mean, std)
if m.bias.data is not None:
m.bias.data.zero_()
elif isinstance(m, (nn.BatchNorm2d, nn.BatchNorm1d)):
m.weight.data.fill_(1)
if m.bias.data is not None:
m.bias.data.zero_()
|
<reponame>jrp55/smash
import argparse
import sys
import os
import os.path
import requests
import json
from flask import Flask, request, render_template, abort
from werkzeug import secure_filename
ALLOWED_IMG_EXTENSIONS = set(['tiff', 'jpg', 'jpeg', 'png', 'gif', 'bmp', 'ico', 'pbm', 'pgm', 'ppm'])
UPLOAD_FOLDER = '/tmp/smash/uploads'
HOD_APIKEY_FILENAME = 'hod.apikey'
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.config['MAX_CONTENT_LENGTH'] = 16 * 1024 * 1024
def load_apikey():
"""
Loads the HoD API key from the configured apikeys directory
:returns: the HoD API key string
"""
with open(os.path.join(app.config['APIKEY_DIR'], HOD_APIKEY_FILENAME), 'r') as f:
apikey = f.read()
return apikey.rstrip("\n\r")
@app.route('/')
def hello_world():
"""
Implements the homepage
"""
return render_template('index.html')
@app.route('/upload', methods=['GET'])
def upload():
"""
Implements the upload page form
"""
return render_template('upload.html')
def allowed_img_file(filename):
"""
Is the image file being uploaded of an acceptable type?
:param filename: filename of the file being uploaded
:returns: True if the filename is acceptable, False otherwise
"""
return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_IMG_EXTENSIONS
def wait_for_async_job(async_response):
"""
Waits for an asynchronous HoD job to finish
:param async_response: The response of an asynchronous request to HoD, obtained via the requests library
:returns: The response of the job status call when the status is a final one, obtained via the requests library
"""
apikey = load_apikey()
jobid = async_response.json()['jobID']
unfinished = True
while unfinished:
s = requests.get('https://api.havenondemand.com/1/job/status/{0}'.format(jobid), params={'apikey': apikey})
status = s.json()['status']
unfinished = True if status not in ['finished', 'failed'] else False
return s
def do_ocr(filepath):
"""
Does OCR on the provided filepath
:param filepath: Path of a file to do OCR on
:returns: All the OCR'd text from the document, concatenated into one string
"""
apikey = load_apikey()
params = {'apikey': apikey,
'job': '{ "actions": [ { "name": "ocrdocument", "version": "v1", "params": {"file": "doc", "mode": "document_photo"} } ] }' }
files = {'doc': open(filepath, 'rb') }
r = requests.post('https://api.havenondemand.com/1/job/', params=params, files=files)
jobid = r.json()['jobID']
s = wait_for_async_job(r)
texts = []
if s.json()['status'] == 'finished':
for action in s.json()['actions']:
for text_block in action['result']['text_block']:
texts.append(text_block['text'])
return texts
def does_index_exist(index_name):
"""
Checks whether a given index exists
:param index_name: The name of the index to check for
:returns: True if index exists, False otherwise
"""
apikey = load_apikey()
params = {'apikey': apikey}
r = requests.get('https://api.havenondemand.com/1/api/sync/listresources/v1', params=params)
for private_resource in r.json()['private_resources']:
if private_resource['resource'] == index_name:
return True
return False
def create_index(index_name):
"""
Creates an index with the given name
:param index_name: The name of the index to create
"""
apikey = load_apikey()
params = {'apikey': apikey, 'index': index_name, 'flavor': 'explorer'}
r = requests.get('https://api.havenondemand.com/1/api/sync/createtextindex/v1', params=params)
def check_smash_index():
smash_index_name = 'smashdata'
if not does_index_exist(smash_index_name):
create_index(smash_index_name)
def index(filename, title, text):
"""
Indexes a document into the HoD text index
:param filename: The name of the file represented by title and text - becomes the reference of the indexed document
:param title: The title of the indexed document
:param text: The content of the indexed document
"""
apikey = load_apikey()
document = {'title': title, 'reference': filename, 'content': text}
documents = [document]
j = { 'document' : documents }
params = {'apikey': apikey, 'index': 'smash', 'json': json.dumps(j)}
r = requests.post('https://api.havenondemand.com/1/api/async/addtotextindex/v1/', params=params)
status = wait_for_async_job(r)
@app.route('/upload', methods=['POST'])
def do_upload():
"""
Implements the action completed by submitting the upload form.
Conducts OCR on the submitted image file and indexes the resulting text
Renders a new webpage
"""
title = request.form['title']
f = request.files['doc']
if f and allowed_img_file(f.filename):
filename = secure_filename(f.filename)
filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
f.save(filepath)
texts = do_ocr(filepath)
os.remove(filepath)
text = ' '.join(texts)
index(f.filename, title, text)
return render_template('doupload.html')
else:
abort(400)
@app.route('/query', methods=['GET'])
def query():
"""
Renders a webpage with the initial state of the query form
"""
return render_template('query_form.html')
@app.route('/query', methods=['POST'])
def doquery():
"""
Gets the query results from the submitted query via HoD and renders the results
"""
apikey = load_apikey()
querytext = request.form['querytext']
params = {'apikey': apikey, 'text': querytext, 'indexes':'smash', 'print':'all'}
r = requests.get('https://api.havenondemand.com/1/api/sync/querytextindex/v1', params=params)
documents = [ {'title': d['title'], 'content': d['content']} for d in r.json()['documents'] ]
return render_template('queryresults.html', documents=documents)
def configure_app(args):
"""
Configures the app from the command line arguments
:params args: Arguments obtained from argparse
"""
app.config['APIKEY_DIR'] = args.apikeydir
# Let's get to work!
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Run SMASH')
parser.add_argument('--apikeydir', '-a', nargs=1, default='.apikeys')
args = parser.parse_args()
configure_app(args)
check_smash_index()
app.run(host='0.0.0.0')
|
import h5py
import json
import gzip
layer_name_dict = {
'Merge': 'mergeLayer',
'Dense': 'denseLayer',
'Dropout': 'dropoutLayer',
'Flatten': 'flattenLayer',
'Embedding': 'embeddingLayer',
'BatchNormalization': 'batchNormalizationLayer',
'LeakyReLU': 'leakyReLULayer',
'PReLU': 'parametricReLULayer',
'ParametricSoftplus': 'parametricSoftplusLayer',
'ThresholdedLinear': 'thresholdedLinearLayer',
'ThresholdedReLu': 'thresholdedReLuLayer',
'LSTM': 'rLSTMLayer',
'GRU': 'rGRULayer',
'Convolution2D': 'convolution2DLayer',
'MaxPooling2D': 'maxPooling2DLayer',
'Convolution1D': 'convolution1DLayer',
'MaxPooling1D': 'maxPooling1DLayer'
}
layer_params_dict = {
'Merge': ['layers', 'mode', 'concat_axis', 'dot_axes'],
'Dense': ['weights', 'activation'],
'Dropout': ['p'],
'Flatten': [],
'Embedding': ['weights', 'mask_zero'],
'BatchNormalization': ['weights', 'epsilon'],
'LeakyReLU': ['alpha'],
'PReLU': ['weights'],
'ParametricSoftplus': ['weights'],
'ThresholdedLinear': ['theta'],
'ThresholdedReLu': ['theta'],
'LSTM': ['weights', 'activation', 'inner_activation', 'return_sequences'],
'GRU': ['weights', 'activation', 'inner_activation', 'return_sequences'],
'Convolution2D': ['weights', 'nb_filter', 'nb_row', 'nb_col', 'border_mode', 'subsample', 'activation'],
'MaxPooling2D': ['pool_size', 'stride', 'ignore_border'],
'Convolution1D': ['weights', 'nb_filter', 'filter_length', 'border_mode', 'subsample_length', 'activation'],
'MaxPooling1D': ['pool_length', 'stride', 'ignore_border']
}
layer_weights_dict = {
'Dense': ['W', 'b'],
'Embedding': ['E'],
'BatchNormalization': ['gamma', 'beta', 'mean', 'std'],
'PReLU': ['alphas'],
'ParametricSoftplus': ['alphas', 'betas'],
'LSTM': ['W_xi', 'W_hi', 'b_i', 'W_xc', 'W_hc', 'b_c', 'W_xf', 'W_hf', 'b_f', 'W_xo', 'W_ho', 'b_o'],
'GRU': ['W_xz', 'W_hz', 'b_z', 'W_xr', 'W_hr', 'b_r', 'W_xh', 'W_hh', 'b_h'],
'Convolution2D': ['W', 'b'],
'Convolution1D': ['W', 'b']
}
def appr_f32_prec(arr):
arr_formatted = []
for item in arr:
if type(item) is list:
arr_formatted.append(appr_f32_prec(item))
elif type(item) is float:
arr_formatted.append(float('{:.7f}'.format(item)))
else:
arr_formatted.append(item)
return arr_formatted
def get_layer_params(layer, weights_file, layer_num, param_num_offset):
layer_params = []
for param in layer_params_dict[layer['name']]:
if param == 'weights':
weights = {}
weight_names = layer_weights_dict[layer['name']]
for p, name in enumerate(weight_names):
arr = weights_file.get('layer_{}/param_{}'.format(layer_num, p + param_num_offset)).value
if arr.dtype == 'float32':
weights[name] = appr_f32_prec(arr.tolist())
else:
weights[name] = arr.tolist()
layer_params.append(weights)
elif param == 'layers':
# for merge layer
merge_branches = []
param_num_offset_update = param_num_offset
for merge_branch in layer['layers']:
merge_branch_layers = []
for merge_branch_layer in merge_branch['layers']:
merge_branch_layer_params = get_layer_params(merge_branch_layer, weights_file, layer_num, param_num_offset_update)
if merge_branch_layer['name'] in layer_weights_dict:
param_num_offset_update += len(layer_weights_dict[merge_branch_layer['name']])
merge_branch_layers.append({
'layerName': layer_name_dict[merge_branch_layer['name']],
'parameters': merge_branch_layer_params
})
merge_branches.append(merge_branch_layers)
layer_params.append(merge_branches)
elif param in layer:
layer_params.append(layer[param])
return layer_params
def serialize(model_json_file, weights_hdf5_file, save_filepath, compress):
with open(model_json_file, 'r') as f:
model_metadata = json.load(f)
weights_file = h5py.File(weights_hdf5_file, 'r')
layers = []
num_activation_layers = 0
for k, layer in enumerate(model_metadata['layers']):
if layer['name'] == 'Activation':
num_activation_layers += 1
prev_layer_name = model_metadata['layers'][k-1]['name']
idx_activation = layer_params_dict[prev_layer_name].index('activation')
layers[k-num_activation_layers]['parameters'][idx_activation] = layer['activation']
continue
layer_params = get_layer_params(layer, weights_file, k, 0)
layers.append({
'layerName': layer_name_dict[layer['name']],
'parameters': layer_params
})
if compress:
with gzip.open(save_filepath, 'wb') as f:
f.write(json.dumps(layers).encode('utf8'))
else:
with open(save_filepath, 'w') as f:
json.dump(layers, f)
|
# -*- coding: utf-8 -*-
# Copyright (c), 2011, the txyoga authors. See the LICENSE file for details.
"""
Serializable REST errors.
"""
from zope.interface import implements
from twisted.web import http, resource
from txyoga import interface
class RESTErrorPage(resource.Resource):
"""
An alternative to C{ErrorPage} for REST APIs.
Wraps a L{SerializableError}, and produces a pretty serializable form.
"""
def __init__(self, exception):
resource.Resource.__init__(self)
self.exception = exception
def render(self, request):
encoder = request.encoder
request.setHeader("Content-Type", encoder.contentType)
request.setResponseCode(self.exception.responseCode)
return encoder(self.exception)
class SerializableError(Exception):
"""
An error that can be serialized.
"""
implements(interface.ISerializableError)
responseCode = http.BAD_REQUEST
def __init__(self, message, details=None):
self.message = message
self.details = details if details is not None else {}
class UnsupportedContentType(SerializableError):
"""
Raised when the provided content type is unsupported.
This happens on POST or PUT.
"""
responseCode = http.UNSUPPORTED_MEDIA_TYPE
def __init__(self, supportedContentTypes, providedContentType):
message = "no acceptable decoder available for given content type"
details = {"supportedContentTypes": supportedContentTypes,
"providedContentType": providedContentType}
SerializableError.__init__(self, message, details)
class MissingContentType(SerializableError):
"""
Raised when the client failed to specify the content type.
"""
responseCode = http.UNSUPPORTED_MEDIA_TYPE
def __init__(self, supportedContentTypes):
message = "request didn't specify a content type"
details = {"supportedContentTypes": supportedContentTypes}
SerializableError.__init__(self, message, details)
class UnacceptableRequest(SerializableError):
"""
Raised when the requested resource could not be provided in one of the
accepted content types.
"""
responseCode = http.NOT_ACCEPTABLE
def __init__(self, supportedContentTypes, acceptedContentTypes):
message = "no acceptable encoder available"
details = {"supportedContentTypes": supportedContentTypes,
"acceptedContentTypes": acceptedContentTypes}
SerializableError.__init__(self, message, details)
class PaginationError(SerializableError):
"""
Raised when there was a problem computing pagination.
"""
class MissingElementError(SerializableError):
"""
Raised when an element that was expected to exist didn't.
This could be raised when attempting to remove or get an element.
"""
responseCode = http.NOT_FOUND
def __init__(self, identifier):
message = "missing element"
details = {"identifier": identifier}
SerializableError.__init__(self, message, details)
UNSPECIFIED = object()
class InvalidElementStateError(SerializableError):
"""
Raised when trying to put an element in an invalid state.
"""
responseCode = http.FORBIDDEN
def __init__(self, state):
message = "Invalid element state"
details = {"state": state}
SerializableError.__init__(self, message, details)
class ElementStateMissingAttributeError(SerializableError):
"""
Raised when some element state is missing a required attribute.
"""
responseCode = http.FORBIDDEN
def __init__(self, state, missingAttribute):
message = "Missing attribute"
details = {"state": state, "missingAttribute": missingAttribute}
SerializableError.__init__(self, message, details)
class AttributeValueUpdateError(SerializableError):
"""
Raised when attempting to update an element with some state that
has at least one immutable (non-updatable) attribute with a
different value than that in the existing element.
"""
responseCode = http.FORBIDDEN
def __init__(self, attribute, newValue, currentValue=UNSPECIFIED):
message = ("attribute update not allowed and provided value differs"
" from existing value, update aborted")
details = {"attribute": attribute, "newValue": newValue}
if currentValue is not UNSPECIFIED:
details["currentValue"] = currentValue
SerializableError.__init__(self, message, details)
class IdentifierError(SerializableError):
"""
Raised when attempting to put an element somewhere that does not
match its identification.
"""
responseCode = http.FORBIDDEN
def __init__(self, expected, actual):
message = "new element did not have specified identifying attribute"
details = {"actualIdentifyingAttribute": repr(actual),
"expectedIdentifyingAttribute": repr(expected)}
SerializableError.__init__(self, message, details)
class DuplicateElementError(SerializableError):
"""
Raised when an element is added to a collection that already has an
element with that identifier.
"""
responseCode = http.FORBIDDEN
def __init__(self, identifier):
message = "duplicate element"
details = {"identifier": identifier}
SerializableError.__init__(self, message, details)
|
import pymel.core as pm
import os
import System.blueprint as blueprint
import System.utils as utils
#reload(blueprint)
reload(utils)
CLASS_NAME = "HingeJoint"
TITLE = "Hinge Joint"
DESCRIPTION = "Creates 3 joints (the middle joint acting as a hinge joint). Ideal use: arm/leg"
ICON = "%s/Icons/_hinge.png" %os.environ["RIGGING_TOOL_ROOT"]
#ICON = "%s/nwModularRiggingTool/Icons/_hinge.png" %pm.internalVar(userScriptDir = True)
class HingeJoint(blueprint.Blueprint):
def __init__(self, _userSpecifiedName, _hookObj):
jointInfo = [ ["root_joint", [0.0, 0.0, 0.0]], ["hinge_joint", [4.0, 0.0, -1.0]], ["end_joint", [8.0, 0.0, 0.0]] ]
blueprint.Blueprint.__init__(self, CLASS_NAME, _userSpecifiedName, jointInfo, _hookObj)
def Install_custom(self, _joints):
pm.select(clear = True)
ikJoints = []
if not self.mirrored:
index = 0
# Create IK joints
for joint in self.jointInfo:
ikJoints.append(pm.joint(name = "%s:IK_%s" %(self.moduleNamespace, joint[0]), position = joint[1], absolute = True, rotationOrder = "xyz"))
pm.setAttr("%s.visibility" %ikJoints[index], 0)
# Orient parent joint after children
if index != 0:
pm.joint(ikJoints[index - 1], edit = True, orientJoint = "xyz", secondaryAxisOrient = "yup")
index += 1
# Mirror module
else:
rootJointName = self.jointInfo[0][0]
tempDuplicateNodes = pm.duplicate("%s:IK_%s" %(self.originalModule, rootJointName), renameChildren = True)
# Make sure entire hierarchy is being stored in tempDuplicateNodes list
pm.select(tempDuplicateNodes[0], hierarchy = True)
tempDuplicateNodes = pm.ls(selection = True)
pm.delete(tempDuplicateNodes.pop())
mirrorXY = False
mirrorYZ = False
mirrorXZ = False
if self.mirrorPlane == "XY":
mirrorXY = True
elif self.mirrorPlane == "YZ":
mirrorYZ = True
elif self.mirrorPlane == "XZ":
mirrorXZ = True
mirrorBehavior = False
if self.rotationFunction == "behaviour":
mirrorBehavior = True
mirrorJoints = pm.mirrorJoint(tempDuplicateNodes[0], mirrorXY = mirrorXY, mirrorYZ = mirrorYZ, mirrorXZ = mirrorXZ, mirrorBehavior = mirrorBehavior)
pm.delete(tempDuplicateNodes)
pm.xform(mirrorJoints[0], worldSpace = True, absolute = True, translation = pm.xform("%s:%s" %(self.moduleNamespace, rootJointName), query = True, worldSpace = True, translation = True))
for i in range(3):
jointName = self.jointInfo[i][0]
newName = pm.rename(mirrorJoints[i], "%s:IK_%s" %(self.moduleNamespace, jointName))
ikJoints.append(newName)
utils.AddNodeToContainer(self.containerName, ikJoints)
# Publish attributes in container
for joint in ikJoints:
jointName = utils.StripAllNamespaces(joint)[1]
pm.container(self.containerName, edit = True, publishAndBind = ["%s.rotate" %joint, "%s_R" %jointName])
pm.setAttr("%s.preferredAngleY" %ikJoints[0], -50.0)
pm.setAttr("%s.preferredAngleY" %ikJoints[1], 50.0)
# Setup stretchy segments
ikNodes = utils.RP_2segment_stretchy_IK(ikJoints[0], ikJoints[1], ikJoints[2], self.containerName)
locators = (ikNodes[0], ikNodes[1], ikNodes[2])
distanceNodes = ikNodes[3]
# Point constrain to translation controls
constraints = []
for i in range(3):
constraints.append(pm.pointConstraint(self.GetTranslationControl(_joints[i]), locators[i], maintainOffset = False))
pm.parent(locators[i], "%s:module_grp" %self.moduleNamespace, absolute = True)
pm.setAttr("%s.visibility" %locators[i], 0)
utils.AddNodeToContainer(self.containerName, constraints)
# Create preferred angle representation
scaleTarget = self.GetTranslationControl(_joints[1])
preferredAngleRep = self.CreatePreferredAngleRepresentation(ikJoints[1], scaleTarget)
pm.setAttr("%s.axis" %preferredAngleRep, lock = True)
def UI_custom(self):
joints = self.GetJoints()
self.CreateRotationOrderUIControl(joints[0])
self.CreateRotationOrderUIControl(joints[1])
def Lock_phase1(self):
# Gather and return all require information from this module's control objects
# jointPositions = List of joint position, from root down the hierarchy
# jointOrientations = list of orientations, or list of axis information (orientJoint and secondaryAxisOrient for joint command)
# # These are passed in the following tuple: (orientation, None) or (None, axisInfo)
# jointRotationOrder = list of joint rotation orders (integer values gathered with getAttr)
# jointPreferredAngles = list of joint preferred angles, optional (can pass None)
# hookObject = self.FindHookObjectForLock()
# rootTransform = bool, either true or false. True = rotate, translate and scale on root joint. False = rotate only
# moduleInfo = (jointPositions, jointOrientations, jointRotationOrders, jointPreferredAngles, hookObject, rootTransform)
# return moduleInfo
jointPositions = []
jointOrientationValues = []
jointRotationOrders = []
jointPreferredAngles = []
pm.lockNode(self.containerName, lock = False, lockUnpublished = False)
ikHandle = "%s:IK_%s_ikHandle" %(self.moduleNamespace, self.jointInfo[0][0])
pm.delete(ikHandle)
for i in range(3):
jointName = self.jointInfo[i][0]
ikJointName = "%s:IK_%s" %(self.moduleNamespace, jointName)
pm.makeIdentity(ikJointName, rotate = True, translate = False, scale = False, apply = True)
jointPositions.append(pm.xform(ikJointName, query = True, worldSpace = True, translation = True))
jointRotationOrders.append(pm.getAttr("%s:%s.rotateOrder" %(self.moduleNamespace, jointName)))
if i < 2:
jointOrientX = pm.getAttr("%s.jointOrientX" %ikJointName)
jointOrientY = pm.getAttr("%s.jointOrientY" %ikJointName)
jointOrientZ = pm.getAttr("%s.jointOrientZ" %ikJointName)
jointOrientationValues.append( (jointOrientX, jointOrientY, jointOrientZ) )
joint_preferredAngle_X = pm.getAttr("%s.preferredAngleX" %ikJointName)
joint_preferredAngle_Y = pm.getAttr("%s.preferredAngleY" %ikJointName)
joint_preferredAngle_Z = pm.getAttr("%s.preferredAngleZ" %ikJointName)
jointPreferredAngles.append( (joint_preferredAngle_X, joint_preferredAngle_Y, joint_preferredAngle_Z) )
jointOrientations = (jointOrientationValues, None)
hookObject = self.FindHookObjectForLock()
rootTransform = False
moduleInfo = (jointPositions, jointOrientations, jointRotationOrders, jointPreferredAngles, hookObject, rootTransform)
return moduleInfo
|
<gh_stars>0
#!/bin/env python
"""
Acquire a series of images using the XPP Rayonix detector with the
LCLS data acquisition system and a server running on a "mond" node
Setup:
source ~schotte/Software/Lauecollect/setup_env.sh
DAQ Control: check Sync Sequence 3 - Target State: Allocate
(if grayed out: daq.diconnect())
xpphome -> LSLS tab -> Event Sequencer -> Event Code Sequence 3 -> Start
ssh daq-xpp-mon05
ssh daq-xpp-mon06
~xppopr/experiments/xppj1216/software/start_zmqsend.sh:
source /reg/d/iocCommon/All/xpp_env.sh
export TIME=`date +%s`
export NAME="zmqsend.$HOSTNAME.$TIME"
source /reg/g/psdm/etc/ana_env.sh
$PROCSERV --logfile /tmp/$NAME --name zmqsend 40000 ./zmqsend.cmd
~xppopr/experiments/xppj1216/software/start_zmqsend.sh:
source /reg/g/psdm/etc/ana_env.sh
`which mpirun` -n 12 python /reg/neh/home/cpo/ipsana/xppj1216/zmqpub.py
Monitor status of servers:
telnet daq-xpp-mon05 40000
telnet daq-xpp-mon06 40000
Control-X, Control-R to restart
Author: <NAME>, Jan 26, 2016 - Feb 1, 2016
"""
from time import time
import zmq
from logging import error,warn,info,debug
from numpy import nan,argsort,array
from threading import Thread
from os.path import basename
from thread import start_new_thread
__version__ = "1.0.2" # multiple command port number
class DAQImages(object):
context = zmq.Context()
socket = context.socket(zmq.SUB)
servers = ["daq-xpp-mon05","daq-xpp-mon06"]
ports = range(12300,12300+12)
cmd_ports = range(12399,12399+5)
for server in servers:
for port in ports: socket.connect("tcp://%s:%d" % (server,port))
socket.setsockopt(zmq.SUBSCRIBE, 'rayonix')
socket.setsockopt(zmq.RCVTIMEO,1000) # ms
cancelled = False
completed = False
def __init__(self):
self.cmd_socket = self.context.socket(zmq.PUB)
for port in self.cmd_ports:
try: self.cmd_socket.bind("tcp://*:%s" % port); break
except zmq.ZMQError: pass # Address already in use
def get(self,nimages):
"""nimages: number of images to retreive"""
images = []; fiducials = []
for i in range(0,nimages):
try:
topic = self.socket.recv()
except Exception,msg:
error("Rayonix shmem: Image %2d/%d: recv: %s" % (i+1,nimages,msg))
break
fiducial = self.socket.recv_pyobj()
image = self.socket.recv_pyobj()
t = "Rayonix shmem: Image %d/%d %r: %d" % (i+1,nimages,image.shape,fiducial)
if len(fiducials)>0: t += " (%+g)" % (fiducial-fiducials[-1])
info(t)
images.append(image); fiducials.append(fiducial)
# The images are not guaranteed to be received in the order acquired.
# Sort the images by "fiducials" timestamp.
order = argsort(fiducials)
images = [images[i] for i in order]
return images
def save_images(self,filenames):
"""Receive a series images from a server running on the
"mond" nodes and save them as TIFF files.
filename: list of absolute pathnames
Returns immediately. Cancel with "abort".
"""
self.completed = False
start_new_thread(self.__save_images__,(filenames,))
def __save_images__(self,filenames):
"""Receive a series images from a server running on the
"mond" nodes and save them as TIFF files.
filename: list of absolute pathnames
Returns after the requested nuumber of images have been received or
a timeout (1 s) has occured.
"""
self.cancelled = False
self.completed = False
nimages = len(filenames)
images = []; fiducials = []; threads = []
for i in range(0,nimages):
if self.cancelled:
info("Rayonix shmem: Image reception cancelled.")
break
try:
topic = self.socket.recv()
except Exception,msg:
error("Image %d/%d: recv: %s" % (i+1,nimages,msg))
break
fiducial = self.socket.recv_pyobj()
image = self.socket.recv_pyobj()
t = "Image %2d/%d %r: %d" % (i+1,nimages,image.shape,fiducial)
if len(fiducials)>0: t += " (%+g)" % (fiducial-fiducials[-1])
info(t)
images.append(image); fiducials.append(fiducial)
thread = Thread(target=save_image,args=(image,filenames[i]))
thread.start()
threads.append(thread)
debug("Rayonix shmem: Waiting for all images to be saved...")
for thread in threads: thread.join()
debug("Rayonix shmem: All images saved.")
# The images are not guaranteed to be received in the order acquired.
# Sort the images by "fiducials" timestamp.
# The "fiducial" timestamp in a 17-bit counter running at 360 Hz.
# It wraps back to 0 from 131039, exactly every 364 seconds.
##fiducials = array(fiducials)
period = 131040
if len(fiducials)>0 and max(fiducials)-min(fiducials) > period/2:
fiducials[fiducials<period/2] += period
order = argsort(fiducials)
if not all(sorted(order) == order):
debug("Rayonix shmem: Resorting images...")
temp_names = [f+".tmp" for f in filenames]
for f,t in zip(filenames,temp_names): move(f,t)
temp_names = [temp_names[i] for i in order]
for t,f in zip(temp_names,filenames): move(t,f)
debug("Rayonix shmem: Images resorted...")
self.completed = True
def abort(self):
"""Cancel series acquisition"""
info("Cancelling image reception...")
self.cancelled = True
__bin_factor__ = 4
def get_bin_factor(self):
"""binning: integer, e.g. 1,2,4,8"""
return self.__bin_factor__
def set_bin_factor(self,binning):
"""binning: integer, e.g. 1,2,4,8"""
debug("Rayonix shmem: bin factor %s" % binning)
self.cmd_socket.send("cmd",zmq.SNDMORE)
self.cmd_socket.send_pyobj(binning)
self.__bin_factor__ = binning
bin_factor = property(get_bin_factor,set_bin_factor)
daq_shmem_client = DAQImages()
def move(src,dest):
"""Rename of move a file or a different directory, overwriting an exising
file"""
from os.path import basename,exists
from os import rename,remove
try:
if exists(dest): remove(dest)
rename(src,dest)
except OSError,msg: warn("Failed to move %r to %r: %s" % (src,dest,msg))
def save_image(image,filename):
from numimage import numimage
##debug("Saving image %r..." % basename(filename))
numimage(image).save(filename,"MCCD")
##debug("Image saved %r" % basename(filename))
if __name__ == "__main__":
import logging
logging.basicConfig(level=logging.DEBUG,format="%(asctime)s: %(message)s")
print("images = daq_shmem_client.get(20)")
print("daq_shmem_client.bin_factor = 4")
|
import numpy as np
"""
basic implementation of Recurrent Neural Networks from scrach
to train model to learn to add any number pair when given in binary arrayed format
devloper--><NAME>
"""
class RecurrentNeuralNetwork:
def __init__(self,hidden_size=10):
"""hidden_size is number of neurons in hidden layer"""
self.hidden_size=hidden_size
self.activation={"sigmoid":(self.sigmoid,self.sig_grad),
"RELU":(self.RELU,self.RELU_grad),
"tanh":(self.tanh,self.tanh_grad)}
def fit(self,X,Y):
"""input your training dataset
X: input array 3D
Y: output arrray 3D
axis0- number of data data
axis1 -oredered steps(time steps) of data
axis2- input array for each step"""
#add a slot for threshold weight in each inputs
X=np.append(X,np.ones((X.shape[0],X.shape[1],1)),axis=2)
# store sizes of datasets
self.input_size=X.shape[2]
self.output_size=Y.shape[2]
self.X=X
self.Y=Y
def tanh(self,x):
"""for hyperbolic tangent activation"""
return np.tanh(x)
def tanh_grad(self,x):
"""gradiant through tanh function"""
return np.minimum(1-self.tanh(x)**2,1e2)
def RELU(self,x):
"""for RELU activation"""
return np.maximum(x,0)
def RELU_grad(self,x):
"""gradient through RELU function"""
return np.sign(x)
def sigmoid(self,x):
"""sigmoid activation"""
return 1/(1+np.exp(-x))
def sig_grad(self,x):
"""gradiant through sigmoid function"""
return x*(1-x)
def train(self,rate=1,activation="sigmoid"):
"""train the model on the dataset provided , rate: learning rate"""
activate,actv_grad=self.activation[activation]
# initialise our weights randomly for hidden and output layers and recursion of previous layers
hidden_weight=2*np.random.random((self.input_size,self.hidden_size))-1
output_weight=2*np.random.random((self.hidden_size,self.output_size))-1
recurent_weight=2*np.random.random((self.hidden_size,self.hidden_size))-1
#terate through all data in dataset
for i,X1 in enumerate(self.X):
#corosponding output
Y1=self.Y[i]
#lists to store our outputs to help find gradients of all timestep
hidden_layers=list()
output_gradients=list()
#initially we set our feedback vector to zero
hiddenlayer=np.zeros((1,self.hidden_size))
hidden_layers.append(hiddenlayer)
#keep track of error
total_errors=0
# forward propagate in time steps finding output of the RNN
for time,X in enumerate(X1):
# hidden state is function of both input of current time step and hidden state of previous time step
#note we can also use other activation like RELU or tanh which may affect performanc
hiddenlayer= activate(np.dot(X,hidden_weight)+np.dot(hidden_layers[-1],recurent_weight))
outputlayer= activate(np.dot(hiddenlayer,output_weight))
#calulate error
error= Y1[time]-outputlayer
total_errors+=np.abs(error[0,0])
#gradient of output layer
outputGradient=error*actv_grad(outputlayer)
#we store the hidden layers and output gradients to calculate the gradients of weight vectors
hidden_layers.append(np.atleast_2d(hiddenlayer))
output_gradients.append(np.atleast_2d(outputGradient))
#initialise all gradients zero
output_weight_gradient=np.zeros_like(output_weight)
hidden_weight_gradient=np.zeros_like(hidden_weight)
recurent_weight_gradient=np.zeros_like(recurent_weight)
#we use this to store the gradient of cost function (of future time) wrt time steps (in current time) on which it depends
future_gradients=np.zeros(self.hidden_size)
# iterate in reverse order, backpropagation through time!
for time,X in enumerate(X1[::-1]):
time=X1.shape[0]-time-1
#recursively set current gradients and all future gradients linked to this time step
hidden_layer_gradients=(np.dot(future_gradients,recurent_weight.T)+ np.dot(output_gradients[time],output_weight.T))*actv_grad(hidden_layers[time+1])
#sum of gradients of error in each time step
output_weight_gradient+=hidden_layers[time+1].T.dot(output_gradients[time])
hidden_weight_gradient+=np.atleast_2d(X).T.dot(hidden_layer_gradients)
recurent_weight_gradient+=np.dot(hidden_layers[time].T,hidden_layer_gradients)
#use this in next iteration to set gradients linked to past
future_gradients=hidden_layer_gradients
# update out weights by the learning rate
hidden_weight += rate * hidden_weight_gradient
output_weight+=rate * output_weight_gradient
recurent_weight += rate * recurent_weight_gradient
# print error in intervals
if i %1000==0:
print("iteration: {0}\t\t error: {1}".format(i,total_errors))
#we save our weights
self.hidden_weight=hidden_weight
self.output_weight=output_weight
self.recurent_weight=recurent_weight
def predict(self,X):
"""predict the output of X"""
#add slot for thresholds
X=np.append(X,np.ones((X.shape[0],X.shape[1],1)),axis=2)
output=np.zeros((X.shape[0],X.shape[1],self.output_size))
#set feedback to zero intially
prev_hiddenlayer=np.zeros((1,self.hidden_size))
#iterate through all input data and do pediction
for j,X2 in enumerate(X):
for time,X1 in enumerate(X2):
hiddenlayer= self.sigmoid(np.dot(X1,self.hidden_weight)+np.dot(prev_hiddenlayer,self.recurent_weight))
outputlayer= self.sigmoid(np.dot(hiddenlayer,self.output_weight))
output[j,time]=outputlayer
prev_hiddenlayer=hiddenlayer
return output
###we train RNN to learn how to add two numbers
# we generate 10,1000 random pair of numbers whose sum is below 2^8
max_val = 2**8
a=np.random.randint(0,high=max_val/2,size=(10000,2,1),dtype=np.uint8)
#convert to binary format
b= np.transpose(np.unpackbits(a, axis=2),(2,1,0))
#reverse order to keep LSB(least significant bit)first
b=b[::-1].transpose((2,0,1))
#sum the pairs with LSB first
sum=np.atleast_3d(np.unpackbits(np.sum(a,axis=1,dtype=np.uint8),axis=1).T[::-1].T)
#create instance of our model we will use 8 neurons in hidden layers it may be changed according to requirments
rnn=RecurrentNeuralNetwork(hidden_size=8)
#train on first 9980 data
rnn.fit(b[:9980],sum[:9980])
rnn.train(rate=1)
#print prediction for last 20 row wise
print(np.round(rnn.predict(b[9980:])).astype(int).transpose(2,0,1))
#and print the actual sums
print(sum[9980:].transpose(2,0,1))
|
from param import Param
import tensorflow as tf
from gpflow import transforms
float_type = tf.float64
jitter_level = 1e-6
class Kernel:
def __init__(self,sf0,ell0,name="kernel",learning_rate=0.01,
summ=False,fix_sf=False,fix_ell=False):
with tf.name_scope(name):
sf = Param(sf0,
transform=transforms.Log1pe(),
name="sf",
learning_rate = learning_rate,
summ = summ,
fixed = fix_sf)
ell = Param(ell0,
transform=transforms.Log1pe(),
name="ell",
learning_rate = learning_rate,
summ = summ,
fixed = fix_ell)
self.sf = sf()
self.ell = ell()
self.fix_sf = fix_sf
self.fix_ell = fix_ell
def square_dist(self,X,X2=None):
X = X / self.ell
Xs = tf.reduce_sum(tf.square(X), 1)
if X2 is None:
return -2 * tf.matmul(X, X, transpose_b=True) + \
tf.reshape(Xs, (-1, 1)) + tf.reshape(Xs, (1, -1))
else:
X2 = X2 / self.ell
X2s = tf.reduce_sum(tf.square(X2), 1)
return -2 * tf.matmul(X, X2, transpose_b=True) + \
tf.reshape(Xs, (-1, 1)) + tf.reshape(X2s, (1, -1))
class OperatorKernel(Kernel):
def __init__(self,sf0,ell0,ktype="id",learning_rate=0.01,
summ=False,block=True,name="OperatorKernel",fix_sf=False,
fix_ell=False):
super().__init__(sf0 = sf0,
ell0 = ell0,
name = name,
learning_rate = learning_rate,
summ = summ,
fix_sf = fix_sf,
fix_ell = fix_ell)
self.ndims = len(ell0)
self.ktype=ktype
self.block = block
def RBF(self,X,X2=None):
if X2 is None:
return self.sf**2 * tf.exp(-self.square_dist(X) / 2)
else:
return self.sf**2 * tf.exp(-self.square_dist(X, X2) / 2)
def HessianDivergenceFree(self,X,X2=None):
D = tf.shape(X)[1]
N = tf.shape(X)[0]
M = tf.shape(X2)[0]
X_expd = tf.expand_dims(X,-1) / self.ell
X2_expd = tf.transpose(tf.expand_dims(X2,-1),perm=[2,1,0])/ self.ell
diff = tf.subtract(X_expd,X2_expd)
diff1 = tf.transpose(tf.expand_dims(diff,-1),perm=[0,2,1,3])
diff2 = tf.transpose(tf.expand_dims(diff,-1),perm=[0,2,3,1])
term1 = tf.multiply(diff1,diff2)
term2 = tf.multiply(
tf.expand_dims(tf.expand_dims(tf.cast(D,dtype=float_type) - 1.0 - self.square_dist(X, X2),-1),-1),
tf.eye(D, batch_shape=[N,M],dtype=float_type))
H = term1 + term2
return H
def HessianCurlFree(self,X,X2=None):
D = tf.shape(X)[1]
N = tf.shape(X)[0]
M = tf.shape(X2)[0]
X = X / self.ell
X2 = X2 / self.ell
X_expd = tf.expand_dims(X,-1)
X2_expd = tf.transpose(tf.expand_dims(X2,-1),perm=[2,1,0])
diff = tf.subtract(X_expd,X2_expd)
diff1 = tf.transpose(tf.expand_dims(diff,-1),perm=[0,2,1,3])
diff2 = tf.transpose(tf.expand_dims(diff,-1),perm=[0,2,3,1])
term1 = tf.multiply(diff1,diff2)
H = tf.eye(D, batch_shape=[N,M],dtype=float_type) - term1
return H
def HessianIdentity(self,X,X2=None):
D = tf.shape(X)[1]
N = tf.shape(X)[0]
M = tf.shape(X2)[0]
H = tf.ones([N,M,D,D],dtype=float_type)
return H
def K(self,X,X2=None):
if X2 is None:
rbf_term = self.RBF(X)
X2 = X
else:
rbf_term = self.RBF(X,X2)
if self.ktype == "id":
# hes_term = self.HessianIdentity(X,X2)
return rbf_term
elif self.ktype == "df":
hes_term = self.HessianDivergenceFree(X,X2)
elif self.ktype == "cf":
hes_term = self.HessianCurlFree(X,X2)
else:
raise ValueError("Bad kernel type passed to `ktype`")
rbf_term = tf.expand_dims(tf.expand_dims(rbf_term,-1),-1)
K = rbf_term * hes_term / tf.square(self.ell)
if self.block:
K = self.tfblock(K)
return K
def Ksymm(self,X):
raise NotImplementedError()
def Kdiag(self,X):
raise NotImplementedError()
def tfblock(self,tensor):
'''
input : tensor of shape NxM,DxD
returns : tensor of shape (ND)x(MD)
'''
N = tf.shape(tensor)[0]
M = tf.shape(tensor)[1]
D = self.ndims
stacked_list = []
for d in range(D):
t = tf.stack([tf.reshape(tensor[:,:,p,d],[N,M]) for p in range(D)],axis=1)
t = tf.transpose(tf.reshape(t,[N*D,M]))
stacked_list.append(t)
reshaped = tf.stack(stacked_list,axis=1)
reshaped = tf.transpose(tf.reshape(reshaped,[M*D,N*D]))
return reshaped
class RBF(Kernel):
'''
Taken from GPFlow
'''
def __init__(self,sf0,ell0,name="RBFKernel",eta=0.01,summ=False,
fix_sf=False,fix_ell=False):
super().__init__(sf0,ell0,name=name,learning_rate=eta,summ=summ,
fix_sf=fix_sf,fix_ell=fix_ell)
def K(self,X,X2=None):
if X2 is None:
return self.sf**2 * tf.exp(-self.square_dist(X) / 2)
else:
return self.sf**2 * tf.exp(-self.square_dist(X, X2) / 2)
def Ksymm(self,X):
return self.sf**2 * tf.exp(-self.square_dist(X) / 2)
def Kdiag(self,X):
return tf.fill(tf.stack([tf.shape(X)[0]]), tf.squeeze(self.sf**2))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from CTFd.models import Teams, Users
from CTFd.utils import set_config
from tests.helpers import (
create_ctfd,
destroy_ctfd,
login_as_user,
login_with_mlc,
register_user,
)
def test_oauth_not_configured():
"""Test that OAuth redirection fails if OAuth settings aren't configured"""
app = create_ctfd()
with app.app_context():
with app.test_client() as client:
r = client.get("/oauth", follow_redirects=False)
assert r.location == "http://localhost/login"
r = client.get(r.location)
resp = r.get_data(as_text=True)
assert "OAuth Settings not configured" in resp
destroy_ctfd(app)
def test_oauth_configured_flow():
"""Test that MLC integration works properly but does not allow registration (account creation) if disabled"""
app = create_ctfd(user_mode="teams")
app.config.update(
{
"OAUTH_CLIENT_ID": "ctfd_testing_client_id",
"OAUTH_CLIENT_SECRET": "ctfd_testing_client_secret",
"OAUTH_AUTHORIZATION_ENDPOINT": "http://auth.localhost/oauth/authorize",
"OAUTH_TOKEN_ENDPOINT": "http://auth.localhost/oauth/token",
"OAUTH_API_ENDPOINT": "http://api.localhost/user",
}
)
with app.app_context():
set_config("registration_visibility", "private")
assert Users.query.count() == 1
assert Teams.query.count() == 0
client = login_with_mlc(app, raise_for_error=False)
assert Users.query.count() == 1
# Users shouldn't be able to register because registration is disabled
resp = client.get("http://localhost/login").get_data(as_text=True)
assert "Public registration is disabled" in resp
set_config("registration_visibility", "public")
client = login_with_mlc(app)
# Users should be able to register now
assert Users.query.count() == 2
user = Users.query.filter_by(email="<EMAIL>").first()
assert user.oauth_id == 1337
assert user.team_id == 1
# Teams should be created
assert Teams.query.count() == 1
team = Teams.query.filter_by(id=1).first()
assert team.oauth_id == 1234
client.get("/logout")
# Users should still be able to login if registration is disabled
set_config("registration_visibility", "private")
client = login_with_mlc(app)
with client.session_transaction() as sess:
assert sess["id"]
assert sess["nonce"]
assert sess["hash"]
destroy_ctfd(app)
def test_oauth_login_upgrade():
"""Test that users who use MLC after having registered will be associated with their MLC account"""
app = create_ctfd(user_mode="teams")
app.config.update(
{
"OAUTH_CLIENT_ID": "ctfd_testing_client_id",
"OAUTH_CLIENT_SECRET": "ctfd_testing_client_secret",
"OAUTH_AUTHORIZATION_ENDPOINT": "http://auth.localhost/oauth/authorize",
"OAUTH_TOKEN_ENDPOINT": "http://auth.localhost/oauth/token",
"OAUTH_API_ENDPOINT": "http://api.localhost/user",
}
)
with app.app_context():
register_user(app)
assert Users.query.count() == 2
set_config("registration_visibility", "private")
# Users should still be able to login
client = login_as_user(app)
client.get("/logout")
user = Users.query.filter_by(id=2).first()
assert user.oauth_id is None
assert user.team_id is None
login_with_mlc(app)
assert Users.query.count() == 2
# Logging in with MLC should insert an OAuth ID and team ID
user = Users.query.filter_by(id=2).first()
assert user.oauth_id
assert user.verified
assert user.team_id
destroy_ctfd(app)
|
import io
import logging
import os
import tempfile
import pyfuse3
from aiofile import AIOFile, Reader
log = logging.getLogger(__name__)
def flags_can_write(flags):
if flags & 0x03 == os.O_RDWR:
return True
if flags & 0x03 == os.O_WRONLY:
return True
return False
class BaseFileContext:
def __init__(self, context, flags=None):
self.context = context
self.current_id = None
self.aiterator = None
self.buffer = None
self.bufferfile = None
self.flags = flags
self.flush_count = 0
async def _flush(self, fp):
raise NotImplementedError()
async def _write_to(self, fp):
raise NotImplementedError()
async def _invalidate(self):
pass
def is_write(self):
return flags_can_write(self.flags)
def is_new_file(self):
return False
async def close(self):
if self.buffer is None and self.is_new_file() and self.is_write():
await self._ensure_buffer()
await self.flush()
self.buffer = None
await self._invalidate()
async def read(self, offset, size):
f = await self._ensure_buffer()
f.seek(offset)
return f.read(size)
async def write(self, offset, buf):
f = await self._ensure_buffer()
f.seek(offset)
f.write(buf)
return len(buf)
async def flush(self):
if self.bufferfile is None:
return
self.flush_count += 1
self.bufferfile.close()
self.bufferfile = None
if not self.is_write():
return
async with AIOFile(self.buffer, 'rb') as afp:
reader = Reader(afp, chunk_size=4096)
#reader.mode = 'rb'
#reader.peek = lambda x=None: True
await self._flush(reader)
os.remove(self.buffer)
async def readdir(self, start_id, token):
if self.aiterator is None:
self.current_id = 0
self.aiterator = self.__aiter__()
if start_id != self.current_id:
return None
self.current_id += 1
try:
object = await self.aiterator.__anext__()
inode = self.get_inode(object)
log.info('Result: name={}, inode={}'.format(object.name, inode))
pyfuse3.readdir_reply(
token, object.name.encode('utf8'),
await self.context.getattr(inode),
self.current_id)
except StopAsyncIteration:
log.info('Finished')
return None
async def _ensure_buffer(self):
if self.bufferfile is not None:
return self.bufferfile
if self.buffer is None:
with tempfile.NamedTemporaryFile(delete=False) as f:
await self._write_to(f)
self.buffer = f.name
mode = 'rb'
if self.flags is None:
pass
elif self.flags & 0x03 == os.O_RDWR and self.flags & os.O_APPEND:
mode = 'a+b'
elif self.flags & 0x03 == os.O_RDWR:
mode = 'r+b'
elif self.flags & 0x03 == os.O_WRONLY and self.flags & os.O_APPEND:
mode = 'ab'
elif self.flags & 0x03 == os.O_WRONLY:
mode = 'wb'
log.info('buffer: file={2}, flags={0:08x}, mode={1}'.format(
self.flags, mode, self.buffer
))
self.bufferfile = open(self.buffer, mode)
return self.bufferfile
class Project(BaseFileContext):
def __init__(self, context, osfproject):
super(Project, self).__init__(context)
self.osfproject = osfproject
def __aiter__(self):
return self.osfproject.storages.__aiter__()
def get_inode(self, storage):
return self.context.inodes.get_storage_inode(storage)
class Folder(BaseFileContext):
def __init__(self, context, storage, folder):
super(Folder, self).__init__(context)
self.storage = storage
self.folder = folder
def __aiter__(self):
return self._get_folders_and_files().__aiter__()
def get_inode(self, file):
return self.context.inodes.get_file_inode(self.storage, file)
async def _get_folders_and_files(self):
if self.storage == self.folder:
async for f in self.storage.child_folders:
yield f
async for f in self.storage.child_files:
yield f
else:
async for f in self.folder.folders:
yield f
async for f in self.folder.files:
yield f
class File(BaseFileContext):
def __init__(self, context, storage, file_, flags):
super(File, self).__init__(context, flags)
self.storage = storage
self.file_ = file_
async def _write_to(self, fp):
await self.file_.write_to(fp)
async def _flush(self, fp):
await self.file_.update(fp)
async def _invalidate(self):
self.context.inodes.clear_inode_cache(self.storage, self.file_.path)
class NewFile(BaseFileContext):
def __init__(self, context, storage, path, flags):
super(NewFile, self).__init__(context, flags)
self.storage = storage
self.path = path
def is_new_file(self):
return True
async def _write_to(self, fp):
pass
async def _flush(self, fp):
await self.storage.create_file(self.path, fp)
async def _invalidate(self):
self.context.inodes.clear_inode_cache(self.storage, self.path)
|
<reponame>markstor/trimesh
import io
import copy
import uuid
import numpy as np
from .. import util
from .. import visual
from ..constants import log
def load_collada(file_obj, resolver=None, **kwargs):
"""
Load a COLLADA (.dae) file into a list of trimesh kwargs.
Parameters
----------
file_obj : file object
Containing a COLLADA file
resolver : trimesh.visual.Resolver or None
For loading referenced files, like texture images
kwargs : **
Passed to trimesh.Trimesh.__init__
Returns
-------
loaded : list of dict
kwargs for Trimesh constructor
"""
import collada
# load scene using pycollada
c = collada.Collada(file_obj)
# Create material map from Material ID to trimesh material
material_map = {}
for m in c.materials:
effect = m.effect
material_map[m.id] = _parse_material(effect, resolver)
# name : kwargs
meshes = {}
# list of dict
graph = []
for node in c.scene.nodes:
_parse_node(node=node,
parent_matrix=np.eye(4),
material_map=material_map,
meshes=meshes,
graph=graph,
resolver=resolver)
# create kwargs for load_kwargs
result = {'class': 'Scene',
'graph': graph,
'geometry': meshes}
return result
def export_collada(mesh, **kwargs):
"""
Export a mesh or a list of meshes as a COLLADA .dae file.
Parameters
-----------
mesh: Trimesh object or list of Trimesh objects
The mesh(es) to export.
Returns
-----------
export: str, string of COLLADA format output
"""
import collada
meshes = mesh
if not isinstance(mesh, (list, tuple, set, np.ndarray)):
meshes = [mesh]
c = collada.Collada()
nodes = []
for i, m in enumerate(meshes):
# Load uv, colors, materials
uv = None
colors = None
mat = _unparse_material(None)
if m.visual.defined:
if m.visual.kind == 'texture':
mat = _unparse_material(m.visual.material)
uv = m.visual.uv
elif m.visual.kind == 'vertex':
colors = (m.visual.vertex_colors / 255.0)[:, :3]
c.effects.append(mat.effect)
c.materials.append(mat)
# Create geometry object
vertices = collada.source.FloatSource(
'verts-array', m.vertices.flatten(), ('X', 'Y', 'Z'))
normals = collada.source.FloatSource(
'normals-array', m.vertex_normals.flatten(), ('X', 'Y', 'Z'))
input_list = collada.source.InputList()
input_list.addInput(0, 'VERTEX', '#verts-array')
input_list.addInput(1, 'NORMAL', '#normals-array')
arrays = [vertices, normals]
if uv is not None:
texcoords = collada.source.FloatSource(
'texcoords-array', uv.flatten(), ('U', 'V'))
input_list.addInput(2, 'TEXCOORD', '#texcoords-array')
arrays.append(texcoords)
if colors is not None:
idx = 2
if uv:
idx = 3
colors = collada.source.FloatSource('colors-array',
colors.flatten(), ('R', 'G', 'B'))
input_list.addInput(idx, 'COLOR', '#colors-array')
arrays.append(colors)
geom = collada.geometry.Geometry(
c, uuid.uuid4().hex, uuid.uuid4().hex, arrays
)
indices = np.repeat(m.faces.flatten(), len(arrays))
matref = u'material{}'.format(i)
triset = geom.createTriangleSet(indices, input_list, matref)
geom.primitives.append(triset)
c.geometries.append(geom)
matnode = collada.scene.MaterialNode(matref, mat, inputs=[])
geomnode = collada.scene.GeometryNode(geom, [matnode])
node = collada.scene.Node(u'node{}'.format(i), children=[geomnode])
nodes.append(node)
scene = collada.scene.Scene('scene', nodes)
c.scenes.append(scene)
c.scene = scene
b = io.BytesIO()
c.write(b)
b.seek(0)
return b.read()
def _parse_node(node,
parent_matrix,
material_map,
meshes,
graph,
resolver=None):
"""
Recursively parse COLLADA scene nodes.
"""
import collada
# Parse mesh node
if isinstance(node, collada.scene.GeometryNode):
geometry = node.geometry
# Create local material map from material symbol to actual material
local_material_map = {}
for mn in node.materials:
symbol = mn.symbol
m = mn.target
if m.id in material_map:
local_material_map[symbol] = material_map[m.id]
else:
local_material_map[symbol] = _parse_material(m, resolver)
# Iterate over primitives of geometry
for i, primitive in enumerate(geometry.primitives):
if isinstance(primitive, collada.polylist.Polylist):
primitive = primitive.triangleset()
if isinstance(primitive, collada.triangleset.TriangleSet):
vertex = primitive.vertex
vertex_index = primitive.vertex_index
vertices = vertex[vertex_index].reshape(
len(vertex_index) * 3, 3)
# Get normals if present
normals = None
if primitive.normal is not None:
normal = primitive.normal
normal_index = primitive.normal_index
normals = normal[normal_index].reshape(
len(normal_index) * 3, 3)
# Get colors if present
colors = None
s = primitive.sources
if ('COLOR' in s and len(s['COLOR'])
> 0 and len(primitive.index) > 0):
color = s['COLOR'][0][4].data
color_index = primitive.index[:, :, s['COLOR'][0][0]]
colors = color[color_index].reshape(
len(color_index) * 3, -1)
faces = np.arange(
vertices.shape[0]).reshape(
vertices.shape[0] // 3, 3)
# Get UV coordinates if possible
vis = None
if primitive.material in local_material_map:
material = copy.copy(
local_material_map[primitive.material])
uv = None
if len(primitive.texcoordset) > 0:
texcoord = primitive.texcoordset[0]
texcoord_index = primitive.texcoord_indexset[0]
uv = texcoord[texcoord_index].reshape(
(len(texcoord_index) * 3, 2))
vis = visual.texture.TextureVisuals(
uv=uv, material=material)
primid = u'{}.{}'.format(geometry.id, i)
meshes[primid] = {
'vertices': vertices,
'faces': faces,
'vertex_normals': normals,
'vertex_colors': colors,
'visual': vis}
graph.append({'frame_to': primid,
'matrix': parent_matrix,
'geometry': primid})
# recurse down tree for nodes with children
elif isinstance(node, collada.scene.Node):
if node.children is not None:
for child in node.children:
# create the new matrix
matrix = np.dot(parent_matrix, node.matrix)
# parse the child node
_parse_node(
node=child,
parent_matrix=matrix,
material_map=material_map,
meshes=meshes,
graph=graph,
resolver=resolver)
elif isinstance(node, collada.scene.CameraNode):
# TODO: convert collada cameras to trimesh cameras
pass
elif isinstance(node, collada.scene.LightNode):
# TODO: convert collada lights to trimesh lights
pass
def _load_texture(file_name, resolver):
"""
Load a texture from a file into a PIL image.
"""
from PIL import Image
file_data = resolver.get(file_name)
image = Image.open(util.wrap_as_stream(file_data))
return image
def _parse_material(effect, resolver):
"""
Turn a COLLADA effect into a trimesh material.
"""
import collada
# Compute base color
baseColorFactor = np.ones(4)
baseColorTexture = None
if isinstance(effect.diffuse, collada.material.Map):
try:
baseColorTexture = _load_texture(
effect.diffuse.sampler.surface.image.path, resolver)
except BaseException:
log.warning('unable to load base texture',
exc_info=True)
elif effect.diffuse is not None:
baseColorFactor = effect.diffuse
# Compute emission color
emissiveFactor = np.zeros(3)
emissiveTexture = None
if isinstance(effect.emission, collada.material.Map):
try:
emissiveTexture = _load_texture(
effect.diffuse.sampler.surface.image.path, resolver)
except BaseException:
log.warning('unable to load emissive texture',
exc_info=True)
elif effect.emission is not None:
emissiveFactor = effect.emission[:3]
# Compute roughness
roughnessFactor = 1.0
if (not isinstance(effect.shininess, collada.material.Map)
and effect.shininess is not None):
roughnessFactor = np.sqrt(2.0 / (2.0 + effect.shininess))
# Compute metallic factor
metallicFactor = 0.0
# Compute normal texture
normalTexture = None
if effect.bumpmap is not None:
try:
normalTexture = _load_texture(
effect.bumpmap.sampler.surface.image.path, resolver)
except BaseException:
log.warning('unable to load bumpmap',
exc_info=True)
# Compute opacity
if (effect.transparent is not None
and not isinstance(effect.transparent, collada.material.Map)):
baseColorFactor = tuple(
np.append(baseColorFactor[:3], float(effect.transparent[3])))
return visual.material.PBRMaterial(
emissiveFactor=emissiveFactor,
emissiveTexture=emissiveTexture,
normalTexture=normalTexture,
baseColorTexture=baseColorTexture,
baseColorFactor=baseColorFactor,
metallicFactor=metallicFactor,
roughnessFactor=roughnessFactor)
def _unparse_material(material):
"""
Turn a trimesh material into a COLLADA material.
"""
import collada
# TODO EXPORT TEXTURES
if isinstance(material, visual.material.PBRMaterial):
diffuse = material.baseColorFactor
if diffuse is not None:
diffuse = list(diffuse)
emission = material.emissiveFactor
if emission is not None:
emission = [float(emission[0]), float(emission[1]),
float(emission[2]), 1.0]
shininess = material.roughnessFactor
if shininess is not None:
shininess = 2.0 / shininess**2 - 2.0
effect = collada.material.Effect(
uuid.uuid4().hex, params=[], shadingtype='phong',
diffuse=diffuse, emission=emission,
specular=[1.0, 1.0, 1.0, 1.0], shininess=float(shininess)
)
material = collada.material.Material(
uuid.uuid4().hex, 'pbrmaterial', effect
)
else:
effect = collada.material.Effect(
uuid.uuid4().hex, params=[], shadingtype='phong'
)
material = collada.material.Material(
uuid.uuid4().hex, 'defaultmaterial', effect
)
return material
def load_zae(file_obj, resolver=None, **kwargs):
"""
Load a ZAE file, which is just a zipped DAE file.
Parameters
-------------
file_obj : file object
Contains ZAE data
resolver : trimesh.visual.Resolver
Resolver to load additional assets
kwargs : dict
Passed to load_collada
Returns
------------
loaded : dict
Results of loading
"""
# a dict, {file name : file object}
archive = util.decompress(file_obj,
file_type='zip')
# load the first file with a .dae extension
file_name = next(i for i in archive.keys()
if i.lower().endswith('.dae'))
# a resolver so the loader can load textures / etc
resolver = visual.resolvers.ZipResolver(archive)
# run the regular collada loader
loaded = load_collada(archive[file_name],
resolver=resolver,
**kwargs)
return loaded
# only provide loaders if `pycollada` is installed
_collada_loaders = {}
_collada_exporters = {}
if util.has_module('collada'):
_collada_loaders['dae'] = load_collada
_collada_loaders['zae'] = load_zae
_collada_exporters['dae'] = export_collada
|
"""
ReBATE was primarily developed at the University of Pennsylvania by:
- <NAME> (<EMAIL>)
- <NAME> (<EMAIL>)
- <NAME> (<EMAIL>)
- and many more generous open source contributors
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
#Initialize hardcoded argument version of rebate.py
import rebate.IO as io
import rebate.Common as cmn
import rebate.relieff as R
import rebate.surf as S
import rebate.multisurf as MS
import rebate.Turf as T
import time as tm
import sys
import os
###############################################################################
#Setup Options ################################################
options = dict()
options['filename'] = 'data/GAMETES_Epistasis_2-Way_20atts_0.4H_EDM-1_1.txt'
options['basename'] = 'GAMETES_Epistasis_2-Way_20atts_0.4H_EDM-1_1.txt'
options['dir_path'] = 'data'
options['testdata'] = None
options['phenotypename'] = "Class"
options['discretelimit'] = 10
options['neighbors'] = 10
options['missingdata'] = 'NA'
options['algorithm'] = 'relieff'
options['turfpct'] = '0'
options['verbose'] = False
options['debug'] = True
options['topattr'] = 0
options['outputdir'] = '.'
#########################################
#Below is just a copy of the required code from rebate.py#########################
V = options['verbose']
turfpct = int(options['turfpct'])
algorithm = options['algorithm']
if(algorithm != 'relieff' and algorithm != 'surf' and algorithm != 'surfstar' and algorithm != 'multisurfstar' and algorithm != 'multisurf'):
print("algorithm " + algorithm + " is not available")
print("Use relieff, surf, surfstar, multisurfstar, or multisurf")
sys.exit(1)
if(V):
print("-------------- Python Version --------------")
print(sys.version)
print("--------------------------------------------")
#-----------------------------------------------------------------------------#
input_file = options['filename']
if(os.path.exists(input_file)):
header, data = io.np_read_data_tst(input_file,options)
else:
print("File " + input_file + " does NOT exist!")
sys.exit(1)
#-----------------------------------------------------------------------------#
x, y = io.getxy(header, data, options)
#-----------------------------------------------------------------------------#
# if there is test data, test it for compatibility
if(options['testdata'] != None):
testdata = options['testdata']
if(os.path.exists(testdata)):
theader, tdata = io.test_testdata(header, testdata, options)
else:
print("File " + testdata + " does NOT exist!")
sys.exit(2)
#-----------------------------------------------------------------------------#
var = cmn.getVariables(header, x, y, options)
attr = cmn.getAttributeInfo(header, x, var, options)
cheader = []
for i in header:
if attr[i][0] == 'continuous':
cheader.append(i)
if(V):
print("--------------- Parameters ---------------")
print("datafile: " + options['basename'])
print("datatype: " + var['dataType'])
print("attributes: " + str(var['NumAttributes']))
if(var['dataType'] == 'mixed'):
print(" continuous: " + str(var['cpct'][1]))
print(" discrete: " + str(var['dpct'][1]))
print("instances: " + str(var['datalen']))
print("missing: " + str(var['mdcnt']))
print("classtype: " + var['classType'])
if(var['classType'] == 'multiclass'):
yset = var['phenoTypeList']
print(" classes: " + str(len(yset)))
print("classname: " + var['phenoTypeName'])
print("algorithm: " + options['algorithm'])
print("--------------------------------------------")
sys.stdout.flush()
#-----------------------------------------------------------------------------#
# create distance array and remove intermediate data
# if missing and/or mixed data use the mixedDistance function
#
begin = tm.time()
diffs, cidx, didx = cmn.dtypeArray(header, attr, var)
if(var['mdcnt'] > 0):
import mmDistance as md
distArray = md.getDistances(x[:,cidx], x[:,didx], var, diffs[cidx])
disttype = "missing"
else:
distArray = cmn.getDistances(x, attr, var, cidx, didx, cheader)
disttype = "discrete/continuous/mixed"
if(V):
ctime = "[" + tm.strftime("%H:%M:%S") + "]"
print(ctime + " " + disttype + " distance array time(sec) = "
+ str(tm.time()-begin))
sys.stdout.flush()
#############################################################################
def test_relieff_GWAS_Sim():
""" Test ReliefF on GWAS_Sim """
Scores = R.runReliefF(header,x,y,attr,var,distArray,options)
print("ReliefF + GWAS_Sim ")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 20 #GWAS simulated dataset
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18 or indexTopScore == 19
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18
def test_surf_GWAS_Sim():
""" Test SURF on GWAS_Sim """
#New parameters
options['algorithm'] = 'surf'
Scores = S.runSURF(header, x, y, attr, var, distArray, options)
print("SURF + GWAS_Sim ")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 20 #GWAS simulated dataset
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18 or indexTopScore == 19
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18
def test_surfstar_GWAS_Sim():
""" Test SURF* on GWAS_Sim """
#New parameters
options['algorithm'] = 'surfstar'
Scores = S.runSURF(header, x, y, attr, var, distArray, options)
print("SURF* + GWAS_Sim ")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 20 #GWAS simulated dataset
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18 or indexTopScore == 19
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18
def test_multisurfstar_GWAS_Sim():
""" Test MultiSURF* on GWAS_Sim """
#New parameters
options['algorithm'] = 'multisurfstar'
Scores = MS.runMultiSURF(header, x, y, attr, var, distArray, options)
print("MultiSURF* + GWAS_Sim ")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 20 #GWAS simulated dataset
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore ==18 or indexTopScore == 19
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18
def test_multisurf_GWAS_Sim():
""" Test MultiSURF on GWAS_Sim """
#New parameters
options['algorithm'] = 'multisurf'
Scores = MS.runMultiSURF(header, x, y, attr, var, distArray, options)
print("MultiSURF + GWAS_Sim ")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 20 #GWAS simulated dataset
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore ==18 or indexTopScore == 19
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 18
def test_multisurf_turf_GWAS_Sim():
""" Test MultiSURF with TuRF on GWAS_Sim """
#New parameters
options['algorithm'] = 'multisurf'
options['turfpct'] = '50'
turfpct = int(options['turfpct'])
pct = float(turfpct)/100.0
iterations = int(1/float(pct))
fun = MS.runMultiSURF
tempx = None
tempVar = None
tempfullscores = None
templost = None
temptable = None
Scores,tempx,tempVar,templost,temptable = T.runTurf(header,x,y,attr,var,distArray,pct,iterations,fun,options,cmn)
options['algorithm'] = algorithm + "-turf"
print("MultiSURF with TuRF + 6-bit Multiplexer ")
print(str(Scores))
#Check that score list is not empty
assert Scores != None
#Check that a score for all features is output
assert len(Scores) == 10 #6-bit Multiplexer problem
#Check that all scores fall between -1 and 1
assert max(Scores) <= 1 and min(Scores) >= -1
#Check that the address bits (indexed as features 0 and 1) have the top scores as expected.
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 8 or indexTopScore == 9
Scores.pop(indexTopScore)
indexTopScore = Scores.index(max(Scores))
assert indexTopScore == 8
#################################
|
import warnings
from typing import Optional, Tuple, Any, Literal
from pandas.core.dtypes.common import is_numeric_dtype
from statsmodels.api import stats
from statsmodels.formula.api import ols
import numpy as np
import pandas as pd
import scipy.stats as sp
import seaborn as sns
import matplotlib.pyplot as plt
__all__ = ['anova', 'anova_for_all', 'kruskal', 'kruskal_for_all', 'kruskal_one_vs_all', 'strip_and_boxplot']
def anova(dataset: pd.DataFrame, test_col: str, target_col: str) -> np.float:
"""Performs a one-way ANOVA F-test for groups in test_col with values in target_col.
Note that ANOVA tests reqire independently, normally distributed samples with
homoscedastic groups. If those assumptions are not met, consider using the
(less powerful) Kruskal-Wallis H-test.
Args:
dataset: dataset to check
test_col: Categorical column containing classes to check
target_col: numerical column to check categorical column against
Returns:
The p-value of the ANOVA F-statistic
"""
lm = ols(f'{target_col} ~ C({test_col})', data=dataset).fit()
result = stats.anova_lm(lm)
return result.iloc[0, -1]
def anova_for_all(dataset: pd.DataFrame, target_col: str, significance: float = 0.05) -> pd.DataFrame:
"""Performs a one-way ANOVA F-test for all categorical columns against target_col.
Performs a one-way ANOVA F-test to all tuples of the form
(categorical col, target_col) in order to test whether the medians in each
of the classes are equal.
Note that ANOVA tests require independently, normally distributed samples with
homoscedastic groups. If those assumptions are not met, consider using the
(less powerful) Kruskal-Wallis H-test.
Args:
dataset: dataset to check
target_col: numerical column to check categorical column against
significance: If set, only return values with p-value <= significance
Returns:
A dataframe consisting of column names and p-values
"""
result_dict = {}
col_names = dataset.select_dtypes(object).columns
for col in col_names:
try:
pr_f = anova(dataset, col, target_col)
if pr_f <= significance:
result_dict[col] = pr_f
except Exception as e:
print(f'Error evaluating column {col}: {e}')
df = pd.DataFrame(data=result_dict.items(), columns=['Column', 'p-val'])
df['Bonf_p'] = df['p-val'] * len(df)
return df.set_index('Column').sort_values(by='p-val')
def kruskal(dataset: pd.DataFrame, test_col: str, target_col: str, nan_policy: str = 'propagate') -> np.float:
"""Applies Kruskal-Wallis H-test to a single column
Applies Kruskal-Wallis H-test to (test col, target_col) in order to
test whether the medians in each of the classes in test_col are equal.
Args:
dataset: dataset to check
test_col: Categorical column containing classes to check
target_col: numerical column to check categorical column against
nan_policy: One of {'handle', 'omit', 'propagate', 'raise'}.
'handle' removes nan values in categorical columns and treats them
as an own class, then passes 'omit' to scipy.stats.kruskal.
All other will be passed to scipy.stats.kruskal
Returns:
The p-value of the Kruskal-Wallis H-statistic
"""
column = dataset[test_col]
if nan_policy == 'handle' and column.dtype.name != 'category':
column = column.fillna('__n_o_n_e__')
# From scipi.stats.kruskal:
# Due to the assumption that H has a chi square distribution, the number of
# samples in each group must not be too small. A typical rule is that each
# sample must have at least 5 measurements.
if column.nunique() == 1:
warnings.warn(f'Ignoring column {test_col}: Only contains one class.')
return np.nan
if len(dataset) / 5 < column.nunique():
warnings.warn(f'Ignoring column {test_col}: Too few (<5) samples in each class.')
return np.nan
samples = [dataset[column == value][target_col] for value in column.unique() if not pd.isna(value)]
_nan_policy = nan_policy if nan_policy != 'handle' else 'omit'
p_value = sp.kruskal(*samples, nan_policy=_nan_policy).pvalue
if np.isnan(p_value):
warnings.warn(f"Obtained nan for column {test_col}. This may happen if your input contained "
f"nan values. In that case, consider setting nan_policy='handle'.")
return p_value
def kruskal_for_all(dataset: pd.DataFrame,
target_col: str,
significance: float = 1,
nan_policy: str = 'propagate') -> pd.DataFrame:
"""Applies Kruskal-Wallis H-test to all columns
Applies Kruskal-Wallis H-test to all tuples of the form
(col, target_col) in order to test whether the medians in each
of the classes are equal. If target_col is numeric, kruskal
checks categorical columns and vice versa.
Args:
dataset: dataset to check
target_col: numerical column to check categorical columns against or
categorical column to check numerical columns against
significance: If set, only return values with p-value <= significance
nan_policy: One of {'handle', 'omit', 'propagate', 'raise'}.
'handle' removes nan values in categorical columns and treats them
as an own category, then passes 'omit' to scipy.stats.kruskal.
All other will be passed to scipy.stats.kruskal
Returns:
A dataframe consisting of column names and p-values
"""
result_dict = {}
if num_vs_cat_mode := is_numeric_dtype(dataset[target_col]):
col_names = dataset.select_dtypes([object, 'datetime', 'category']).columns
else:
col_names = dataset.select_dtypes(np.number).columns
for col in col_names:
try:
if num_vs_cat_mode:
pr_f = kruskal(dataset, col, target_col, nan_policy=nan_policy)
else:
pr_f = kruskal(dataset, target_col, col, nan_policy=nan_policy)
except ValueError as e:
warnings.warn(str(e))
pr_f = 1
if not significance or pr_f <= significance:
result_dict[col] = [pr_f, dataset[col].nunique(dropna=(nan_policy != 'handle'))]
result_col_name = f'p({target_col})'
df = pd.DataFrame.from_dict(
result_dict, orient='index', columns=[result_col_name, 'nunique']
).astype({result_col_name: float, 'nunique': int})
df[f'Bonf_{result_col_name}'] = df[result_col_name] * len(df)
return df.sort_values(by=result_col_name)
def kruskal_one_vs_all(dataset: pd.DataFrame,
cat_col: str,
target_col: str,
significance: float = 1,
nan_policy: str = "omit",
include_stats: bool = True) -> pd.DataFrame:
"""Applies Kruskal-Wallis H-test to all categories in a specified column
Applies Kruskal-Wallis H-test to all tuples of the form
(categorical col == x, categorical col != x) in order to test whether the
specific category has a significantly different distribution
of target_col
Args:
dataset: dataset to check
cat_col: Categorical column including different classes for analysis
target_col: numerical column to check categorical column against
significance: If set, only return values with p-value <= significance
nan_policy: passed to scipy.stats.kruskal
include_stats: Whether to include sample mean and std in the result
Returns:
A dataframe consisting of classes and p-values
"""
result_dict = dict()
categories = dataset[cat_col].unique()
num_cat = len(categories)
for category in categories:
in_cat = dataset[dataset[cat_col] == category][target_col]
nin_cat = dataset[dataset[cat_col] != category][target_col]
pr_f = sp.kruskal(in_cat, nin_cat, nan_policy=nan_policy).pvalue
if not significance or pr_f <= significance:
result_dict[category] = [pr_f, pr_f * num_cat, len(in_cat)]
if include_stats:
result_dict[category] += [in_cat.mean(), nin_cat.mean(), in_cat.std(), nin_cat.std()]
columns = ['p', 'bonf(p)', 'n']
if include_stats:
columns += ['in_mean', 'nin_mean', 'in_std', 'nin_std']
df = pd.DataFrame.from_dict(
result_dict, orient='index', columns=columns
)
return df.sort_values(by='p')
def _combined_boxplot(kind: Literal['stripplot', 'swarmplot'],
common_kwargs: dict,
boxplot_kwargs: dict,
pointplot_kwargs: dict,
figsize: Optional[Tuple[int, int]] = None):
ax = common_kwargs.get('ax', None)
if not ax:
fig, ax = plt.subplots(figsize=figsize)
common_kwargs['ax'] = ax
pointplot = getattr(sns, kind)
pointplot(**common_kwargs, **pointplot_kwargs)
sns.boxplot(**common_kwargs, **boxplot_kwargs, width=.5, color='white', fliersize=0)
plt.xticks(rotation=45)
def strip_and_boxplot(data: pd.DataFrame,
x: str,
y: str,
hue: Optional[str] = None,
figsize: Tuple[int, int] = (12, 8),
alpha: float = 1,
ax: Any = None,
strip_kwargs: Optional[dict] = None,
box_kwargs: Optional[dict] = None) -> None:
strip_kwargs, box_kwargs = strip_kwargs or dict(), box_kwargs or dict()
common_kwargs = dict(data=data, x=x, y=y)
if ax:
common_kwargs['ax'] = ax
pointplot_kwargs = dict(hue=hue, alpha=alpha, jitter=.15, **strip_kwargs)
boxplot_kwargs = box_kwargs
return _combined_boxplot("stripplot", common_kwargs, boxplot_kwargs, pointplot_kwargs, figsize=figsize)
def swarm_and_boxplot(data: pd.DataFrame,
x: str,
y: str,
hue: Optional[str] = None,
figsize: Tuple[int, int] = (12, 8),
alpha: float = 1,
ax: Any = None,
swarm_kwargs: Optional[dict] = None,
box_kwargs: Optional[dict] = None) -> None:
swarm_kwargs, box_kwargs = swarm_kwargs or dict(), box_kwargs or dict()
common_kwargs = dict(data=data, x=x, y=y, ax=ax)
if ax:
common_kwargs['ax'] = ax
pointplot_kwargs = dict(hue=hue, alpha=alpha, **swarm_kwargs)
boxplot_kwargs = box_kwargs
return _combined_boxplot("swarmplot", common_kwargs, boxplot_kwargs, pointplot_kwargs, figsize=figsize)
|
<gh_stars>0
"""
implementation of the MNIST/Fashion MNIST database
"""
import os
import numpy as np
import gzip
import urllib.request
import tensorflow as tf
from tensorflow.python.platform import gfile # pylint: disable=E0611
DEFAULT_SOURCE_URL = 'https://storage.googleapis.com/cvdf-datasets/mnist/'
# For Fashion MNIST, use the following link: 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/'
def _check_download_file(filename, dir_name, source_url):
"""Download the data from source url, unless it's already here.
Args:
filename: string, name of the file in the directory.
dir_name: string, path to working directory.
source_url: url to download from if file doesn't exist.
Returns:
Path to resulting file.
"""
if not gfile.Exists(dir_name):
gfile.MakeDirs(dir_name)
filepath = os.path.join(dir_name, filename)
if not gfile.Exists(filepath):
urllib.request.urlretrieve(source_url, filepath)
with gfile.GFile(filepath) as f:
size = f.size()
print('Successfully downloaded', filename, size, 'bytes.')
return filepath
def _read32(bytestream):
dt = np.dtype(np.uint32).newbyteorder('>')
return np.frombuffer(bytestream.read(4), dtype=dt)[0]
def _dense_to_one_hot(labels_dense, num_classes):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot
def _extract_images(f):
"""Extract the images into a 4D uint8 np array [index, y, x, depth].
Args:
f: A file object that can be passed into a gzip reader.
Returns:
data: A 4D uint8 np array [index, y, x, depth].
Raises:
ValueError: If the bytestream does not start with 2051.
"""
with gzip.GzipFile(fileobj=f) as bytestream:
magic = _read32(bytestream)
if magic != 2051:
raise ValueError('Invalid magic number %d in MNIST image file: %s' % (magic, f.name))
num_images = _read32(bytestream)
rows = _read32(bytestream)
cols = _read32(bytestream)
buf = bytestream.read(rows * cols * num_images)
images = np.frombuffer(buf, dtype=np.uint8)
images = images.reshape(num_images, rows, cols, 1)
images = images.astype(np.float32) / 255
return images
def _extract_labels(f):
"""Extract the labels into a 1D uint8 numpy array [index].
Args:
f: A file object that can be passed into a gzip reader.
one_hot: Does one hot encoding for the result.
num_classes: Number of classes for the one hot encoding.
Returns:
labels: a 1D uint8 numpy array.
Raises:
ValueError: If the bystream doesn't start with 2049.
"""
with gzip.GzipFile(fileobj=f) as bytestream:
magic = _read32(bytestream)
assert magic == 2049, 'Invalid magic number %d in MNIST label file: %s' % (magic, f.name)
num_items = _read32(bytestream)
buf = bytestream.read(num_items)
labels = np.frombuffer(buf, dtype=np.uint8)
return labels.astype(np.int32)
def _read_data_sets(db_dir, source_url):
TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
local_file = _check_download_file(TRAIN_IMAGES, db_dir, source_url + TRAIN_IMAGES)
with gfile.Open(local_file, 'rb') as f:
train_images = _extract_images(f)
local_file = _check_download_file(TRAIN_LABELS, db_dir, source_url + TRAIN_LABELS)
with gfile.Open(local_file, 'rb') as f:
train_labels = _extract_labels(f)
local_file = _check_download_file(TEST_IMAGES, db_dir, source_url + TEST_IMAGES)
with gfile.Open(local_file, 'rb') as f:
test_images = _extract_images(f)
local_file = _check_download_file(TEST_LABELS, db_dir, source_url + TEST_LABELS)
with gfile.Open(local_file, 'rb') as f:
test_labels = _extract_labels(f)
return train_images, train_labels, test_images, test_labels
def create_dataset(
db_dir,
batch_size,
vector_fmt=False,
one_hot=True,
validation_size=5000,
source_url=DEFAULT_SOURCE_URL,
dataset_fmt=False
):
"""
main function to create the mnist data set
Args:
db_dir: string, disrectory of the files of the mnist database.
batch_size: integer or placeholder, training batch size.
vector_fmt: the datapoints are in the vectorized (-1, 784) or image (-1, 28, 28, 1) format.
one_hot: boolean, labels are one-hot represented or not.
validation_size: integer, number of samples in the validation set.
source_url: url to download from if file doesn't exist.
Returns:
images, labels
the initializer operators for the datasets
number of samples in each subset of the database
"""
# read data from files
train_images, train_labels, test_images, test_labels = _read_data_sets(db_dir, source_url)
if one_hot:
train_labels = _dense_to_one_hot(train_labels, num_classes=10)
test_labels = _dense_to_one_hot(test_labels, num_classes=10)
if vector_fmt:
train_images = np.reshape(train_images, newshape=(-1, 784))
test_images = np.reshape(test_images, newshape=(-1, 784))
# separate the validation data
if not 0 <= validation_size <= len(train_images):
raise ValueError(
'Validation size should be between 0 and {}. Received: {}.'.format(len(train_images), validation_size)
)
validation_images = train_images[:validation_size]
validation_labels = train_labels[:validation_size]
train_images = train_images[validation_size:]
train_labels = train_labels[validation_size:]
number_samples = {'train': len(train_labels), 'validation': len(validation_labels), 'test': len(test_labels)}
# create training dataset
train_db = tf.data.Dataset.from_tensor_slices((train_images, train_labels))
train_db = train_db.shuffle(number_samples['train']).repeat()
train_db = train_db.batch(batch_size)
# prefetch data
train_db = train_db.prefetch(1)
# create validation dataset
valid_db = tf.data.Dataset.from_tensor_slices((validation_images, validation_labels))
valid_db = valid_db.batch(number_samples['validation'])
# create test dataset
test_db = tf.data.Dataset.from_tensor_slices((test_images, test_labels))
test_db = test_db.batch(number_samples['test'])
if dataset_fmt:
return train_db, valid_db, test_db, number_samples
# define the iterator and different initializers
iterator = tf.data.Iterator.from_structure(train_db.output_types, train_db.output_shapes)
images, labels = iterator.get_next()
train_init_op = iterator.make_initializer(train_db)
valid_init_op = iterator.make_initializer(valid_db)
test_init_op = iterator.make_initializer(test_db)
init_op = {'train': train_init_op, 'validation': valid_init_op, 'test': test_init_op}
return images, labels, init_op, number_samples
class MNISTDataset:
def __init__(self):
self._train_images = None
self._train_labels = None
self._validation_images = None
self._validation_labels = None
self._test_images = None
self._test_labels = None
self._number_samples = {'train': 0, 'validation': 0, 'test': 0}
self._seed = None
self._index_pos = 0
self._shuffled_index = [0]
@property
def number_samples(self):
return self._number_samples
@property
def train_data(self):
return self._train_images, self._train_labels
@property
def validation(self):
return self._validation_images, self._validation_labels
@property
def test_data(self):
return self._test_images, self._test_labels
def _prepare_samples(self, db_dir, vector_fmt, one_hot, validation_size):
# read data from files
self._train_images, self._train_labels, self._test_images, self._test_labels = _read_data_sets(
db_dir, source_url=DEFAULT_SOURCE_URL
)
if one_hot:
self._train_labels = _dense_to_one_hot(self._train_labels, num_classes=10)
self._test_labels = _dense_to_one_hot(self._test_labels, num_classes=10)
if vector_fmt:
self._train_images = np.reshape(self._train_images, newshape=(-1, 784))
self._test_images = np.reshape(self._test_images, newshape=(-1, 784))
# separate the validation data
if not 0 <= validation_size <= len(self._train_images):
raise ValueError(
'Validation size should be between 0 and {}. Received: {}.'.format(
len(self._train_images), validation_size
)
)
self._validation_images = self._train_images[:validation_size]
self._validation_labels = self._train_labels[:validation_size]
self._train_images = self._train_images[validation_size:]
self._train_labels = self._train_labels[validation_size:]
self._number_samples = {
'train': len(self._train_labels),
'validation': len(self._validation_labels),
'test': len(self._test_labels)
}
def _reset_shuffled_index(self):
np.random.seed(self._seed)
self._shuffled_index = np.arange(0, self._number_samples['train'])
np.random.shuffle(self._shuffled_index)
self._index_pos = 0
# update seed for reproducibility and avoiding conflicts with other rand calls
self._seed = np.random.randint(1000, 1000000)
def create_dataset(self, db_dir, vector_fmt=False, one_hot=True, validation_size=5000, seed=None):
self._seed = np.random.randint(1000, 1000000) if (seed is None) else seed
# read database samples from file or download them if necessary
self._prepare_samples(db_dir, vector_fmt, one_hot, validation_size)
self._reset_shuffled_index()
def next_batch(self, batch_size):
if (self._index_pos + batch_size) >= self._number_samples['train']:
self._reset_shuffled_index()
index = self._shuffled_index[self._index_pos:(self._index_pos + batch_size)]
self._index_pos += batch_size
return self._train_images[index], self._train_labels[index]
|
<reponame>jaraco/pycoreutils
import os
import sys
import zipfile
from .. import exception
def parseargs(p):
"""
Add arguments and `func` to `p`.
:param p: ArgumentParser
:return: ArgumentParser
"""
p.set_defaults(func=func)
p.description = "package and compress (archive) files"
p.usage = (
'%(prog)s -l [OPTION]... ZIPFILE...\n'
' %(prog)s -t [OPTION]... ZIPFILE...\n'
' %(prog)s -e [OPTION]... ZIPFILE TARGET\n'
' %(prog)s -c [OPTION]... ZIPFILE SOURCE...\n'
)
p.add_argument('FILE', nargs='+')
p.add_argument('target', nargs='?')
p.add_argument(
"-c",
"--create",
action="store_true",
dest="create",
help="create zipfile from source.",
)
p.add_argument(
"-e",
"--extract",
action="store_true",
dest="extract",
help="extract zipfile into target directory.",
)
p.add_argument(
"-l", "--list", action="store_true", dest="list", help="list files in zipfile."
)
p.add_argument(
"-t",
"--test",
action="store_true",
dest="test",
help="test if a zipfile is valid.",
)
return p
def func(args):
if args.list:
list_(args)
elif args.test:
test(args)
elif args.extract:
extract(args)
elif args.create:
create(args)
else:
args.parser.print_usage(sys.stderr)
sys.exit(1)
def create(args):
if len(args) < 2:
args.parser.print_usage(sys.stderr)
sys.exit(1)
def addToZip(zf, path, zippath):
if os.path.isfile(path):
zf.write(path, zippath, zipfile.ZIP_DEFLATED)
elif os.path.isdir(path):
for nm in os.listdir(path):
addToZip(zf, os.path.join(path, nm), os.path.join(zippath, nm))
else:
exception.StdErrException("Can't store {0}".format(path))
zf = zipfile.ZipFile(args[0], 'w', allowZip64=True)
for src in args[1:]:
addToZip(zf, src, os.path.basename(src))
zf.close()
def extract(args):
if len(args) != 2:
args.parser.print_usage(sys.stderr)
sys.exit(1)
zf = zipfile.ZipFile(args[0], 'r')
out = args[1]
for path in zf.namelist():
if path.startswith('./'):
tgt = os.path.join(out, path[2:])
else:
tgt = os.path.join(out, path)
tgtdir = os.path.dirname(tgt)
if not os.path.exists(tgtdir):
os.makedirs(tgtdir)
fp = open(tgt, 'wb')
fp.write(zf.read(path))
fp.close()
zf.close()
def test(args):
if len(args) != 1:
args.parser.print_usage(sys.stderr)
sys.exit(1)
zf = zipfile.ZipFile(args[0], 'r')
badfile = zf.testzip()
if badfile:
sys.stderr("Error on file {0}\n".format(badfile))
sys.exit(1)
else:
print("{0} tested ok".format(args[0]) + "\n")
sys.exit(0)
def list_(args):
if len(args) != 1:
args.parser.print_usage(sys.stderr)
sys.exit(1)
zf = zipfile.ZipFile(args[0], 'r')
zf.printdir()
zf.close()
|
# -*- coding: utf-8 -*-
def gldas_variables():
return [('Air Temperature', 'Tair_f_inst'),
('Canopy Water Amount', 'CanopInt_inst'),
('Downward Heat Flux In Soil', 'Qg_tavg'),
('Evaporation Flux From Canopy', 'ECanop_tavg'),
('Evaporation Flux From Soil', 'ESoil_tavg'),
('Potential Evaporation Flux', 'PotEvap_tavg'),
('Precipitation Flux', 'Rainf_f_tavg'),
('Rainfall Flux', 'Rainf_tavg'),
('Root Zone Soil Moisture', 'RootMoist_inst'),
('Snowfall Flux', 'Snowf_tavg'),
('Soil Temperature', 'SoilTMP0_10cm_inst'),
('Specific Humidity', 'Qair_f_inst'),
('Subsurface Runoff Amount', 'Qsb_acc'),
('Surface Air Pressure', 'Psurf_f_inst'),
('Surface Albedo', 'Albedo_inst'),
('Surface Downwelling Longwave Flux In Air', 'LWdown_f_tavg'),
('Surface Downwelling Shortwave Flux In Air', 'SWdown_f_tavg'),
('Surface Net Downward Longwave Flux', 'Lwnet_tavg'),
('Surface Net Downward Shortwave Flux', 'Swnet_tavg'),
('Surface Runoff Amount', 'Qs_acc'),
('Surface Snow Amount', 'SWE_inst'),
('Surface Snow Melt Amount', 'Qsm_acc'),
('Surface Snow Thickness', 'SnowDepth_inst'),
('Surface Temperature', 'AvgSurfT_inst'),
('Surface Upward Latent Heat Flux', 'Qle_tavg'),
('Surface Upward Sensible Heat Flux', 'Qh_tavg'),
('Transpiration Flux From Veg', 'Tveg_tavg'),
('Water Evaporation Flux', 'Evap_tavg'),
('Wind Speed', 'Wind_f_inst')]
def timeintervals():
return [
('All Available Times', 'alltimes'),
('2010s', '2010s'),
('2000s', '2000s'),
('1990s', '1990s'),
('1980s', '1980s'),
('1970s', '1970s'),
('1960s', '1960s'),
('1950s', '1950s'),
]
def wms_colors():
return [
('SST-36', 'sst_36'),
('Greyscale', 'greyscale'),
('Rainbow', 'rainbow'),
('OCCAM', 'occam'),
('OCCAM Pastel', 'occam_pastel-30'),
('Red-Blue', 'redblue'),
('NetCDF Viewer', 'ncview'),
('ALG', 'alg'),
('ALG 2', 'alg2'),
('Ferret', 'ferret'),
]
def geojson_colors():
return [
('White', '#ffffff'),
('Transparent', 'rgb(0,0,0,0)'),
('Red', '#ff0000'),
('Green', '#00ff00'),
('Blue', '#0000ff'),
('Black', '#000000'),
('Pink', '#ff69b4'),
('Orange', '#ffa500'),
('Teal', '#008080'),
('Purple', '#800080'),
]
def get_charttypes():
return [
('Full Timeseries (Single-Line Plot)', 'timeseries'),
('Monthly Analysis (Box Plot)', 'monthbox'),
('Monthly Analysis (Multi-Line Plot)', 'monthmulti'),
('Yearly Analysis (Box Plot)', 'yearbox'),
('Yearly Analysis (Multi-Line Plot)', 'yearmulti'),
]
def worldregions():
return (
('All World Regions', ''),
('Antarctica', 'Antarctica'),
('Asiatic Russia', 'Asiatic Russia'),
('Australia/New Zealand', 'Australia/New Zealand'),
('Caribbean', 'Caribbean'),
('Central America', 'Central America'),
('Central Asia', 'Central Asia'),
('Eastern Africa', 'Eastern Africa'),
('Eastern Asia', 'Eastern Asia'),
('Eastern Europe', 'Eastern Europe'),
('European Russia', 'European Russia'),
('Melanesia', 'Melanesia'),
('Micronesia', 'Micronesia'),
('Middle Africa', 'Middle Africa'),
('Northern Africa', 'Northern Africa'),
('Northern America', 'Northern America'),
('Northern Europe', 'Northern Europe'),
('Polynesia', 'Polynesia'),
('South America', 'South America'),
('Southeastern Asia', 'Southeastern Asia'),
('Southern Africa', 'Southern Africa'),
('Southern Asia', 'Southern Asia'),
('Southern Europe', 'Southern Europe'),
('Western Africa', 'Western Africa'),
('Western Asia', 'Western Asia'),
('Western Europe', 'Western Europe'),
('None', 'none')
)
def countries():
return ['Afghanistan', 'Albania', 'Algeria', 'American Samoa', 'Andorra', 'Angola', 'Anguilla', 'Antarctica',
'Antigua and Barbuda', 'Argentina', 'Armenia', 'Aruba', 'Australia', 'Austria', 'Azerbaijan', 'Bahamas',
'Bahrain', 'Baker Island', 'Bangladesh', 'Barbados', 'Belarus', 'Belgium', 'Belize', 'Benin', 'Bermuda',
'Bhutan', 'Bolivia', 'Bonaire', 'Bosnia and Herzegovina', 'Botswana', 'Bouvet Island', 'Brazil',
'British Indian Ocean Territory', 'British Virgin Islands', 'Brunei Darussalam', 'Bulgaria', 'Burkina Faso',
'Burundi', 'Cambodia', 'Cameroon', 'Canada', 'Cape Verde', 'Cayman Islands', 'Central African Republic',
'Chad', 'Chile', 'China', 'Christmas Island', 'Cocos Islands', 'Colombia', 'Comoros', 'Congo', 'Congo DRC',
'Cook Islands', 'Costa Rica', "Côte d'Ivoire", 'Croatia', 'Cuba', 'Curacao', 'Cyprus', 'Czech Republic',
'Denmark', 'Djibouti', 'Dominica', 'Dominican Republic', 'Ecuador', 'Egypt', 'El Salvador',
'Equatorial Guinea', 'Eritrea', 'Estonia', 'Ethiopia', 'Falkland Islands', 'Faroe Islands', 'Fiji',
'Finland', 'France', 'French Guiana', 'French Polynesia', 'French Southern Territories', 'Gabon', 'Gambia',
'Georgia', 'Germany', 'Ghana', 'Gibraltar', 'Glorioso Island', 'Greece', 'Greenland', 'Grenada',
'Guadeloupe', 'Guam', 'Guatemala', 'Guernsey', 'Guinea', 'Guinea-Bissau', 'Guyana', 'Haiti',
'Heard Island and McDonald Islands', 'Honduras', 'Howland Island', 'Hungary', 'Iceland', 'India',
'Indonesia', 'Iran', 'Iraq', 'Ireland', 'Isle of Man', 'Israel', 'Italy', 'Jamaica', 'Jan Mayen', 'Japan',
'Jarvis Island', 'Jersey', 'Johnston Atoll', 'Jordan', 'Juan De Nova Island', 'Kazakhstan', 'Kenya',
'Kiribati', 'Kuwait', 'Kyrgyzstan', 'Laos', 'Latvia', 'Lebanon', 'Lesotho', 'Liberia', 'Libya',
'Liechtenstein', 'Lithuania', 'Luxembourg', 'Madagascar', 'Malawi', 'Malaysia', 'Maldives', 'Mali', 'Malta',
'Marshall Islands', 'Martinique', 'Mauritania', 'Mauritius', 'Mayotte', 'Mexico', 'Micronesia',
'Midway Islands', 'Moldova', 'Monaco', 'Mongolia', 'Montenegro', 'Montserrat', 'Morocco', 'Mozambique',
'Myanmar', 'Namibia', 'Nauru', 'Nepal', 'Netherlands', 'New Caledonia', 'New Zealand', 'Nicaragua', 'Niger',
'Nigeria', 'Niue', 'Norfolk Island', 'North Korea', 'Northern Mariana Islands', 'Norway', 'Oman',
'Pakistan', 'Palau', 'Palestinian Territory', 'Panama', 'Papua New Guinea', 'Paraguay', 'Peru',
'Philippines', 'Pitcairn', 'Poland', 'Portugal', 'Puerto Rico', 'Qatar', 'Réunion', 'Romania',
'Russian Federation', 'Rwanda', 'Saba', 'Saint Barthelemy', 'Saint Eustatius', 'Saint Helena',
'Saint Kitts and Nevis', 'Saint Lucia', 'Saint Martin', 'Saint Pierre and Miquelon',
'Saint Vincent and the Grenadines', 'Samoa', 'San Marino', 'Sao Tome and Principe', 'Saudi Arabia',
'Senegal', 'Serbia', 'Seychelles', 'Sierra Leone', 'Singapore', 'Sint Maarten', 'Slovakia', 'Slovenia',
'Solomon Islands', 'Somalia', 'South Africa', 'South Georgia', 'South Korea', 'South Sudan', 'Spain',
'Sri Lanka', 'Sudan', 'Suriname', 'Svalbard', 'Swaziland', 'Sweden', 'Switzerland', 'Syria', 'Tajikistan',
'Tanzania', 'Thailand', 'The Former Yugoslav Republic of Macedonia', 'Timor-Leste', 'Togo', 'Tokelau',
'Tonga', 'Trinidad and Tobago', 'Tunisia', 'Turkey', 'Turkmenistan', 'Turks and Caicos Islands', 'Tuvalu',
'Uganda', 'Ukraine', 'United Arab Emirates', 'United Kingdom', 'United States', 'Uruguay',
'US Virgin Islands', 'Uzbekistan', 'Vanuatu', 'Vatican City', 'Venezuela', 'Vietnam', 'Wake Island',
'Wallis and Futuna', 'Yemen', 'Zambia', 'Zimbabwe']
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# LICENSE
#
# Copyright (C) 2010-2018 GEM Foundation, <NAME>, <NAME>,
# <NAME>.
#
# The Hazard Modeller's Toolkit is free software: you can redistribute
# it and/or modify it under the terms of the GNU Affero General Public
# License as published by the Free Software Foundation, either version
# 3 of the License, or (at your option) any later version.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>
#
# DISCLAIMER
#
# The software Hazard Modeller's Toolkit (openquake.hmtk) provided herein
# is released as a prototype implementation on behalf of
# scientists and engineers working within the GEM Foundation (Global
# Earthquake Model).
#
# It is distributed for the purpose of open collaboration and in the
# hope that it will be useful to the scientific, engineering, disaster
# risk and software design communities.
#
# The software is NOT distributed as part of GEM's OpenQuake suite
# (https://www.globalquakemodel.org/tools-products) and must be considered as a
# separate entity. The software provided herein is designed and implemented
# by scientific staff. It is not developed to the design standards, nor
# subject to same level of critical review by professional software
# developers, as GEM's OpenQuake software suite.
#
# Feedback and contribution to the software is welcome, and can be
# directed to the hazard scientific staff of the GEM Model Facility
# (<EMAIL>).
#
# The Hazard Modeller's Toolkit (openquake.hmtk) is therefore distributed WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
# for more details.
#
# The GEM Foundation, and the authors of the software, assume no
# liability for use of the software.
# -*- coding: utf-8 -*-
'''
Class to implement set of functionalities for selecting events from
and earthquake catalogue
'''
import numpy as np
from collections import OrderedDict
from datetime import datetime
from copy import deepcopy
from openquake.hazardlib.geo.point import Point
from openquake.hazardlib.geo.mesh import Mesh
from openquake.hmtk.seismicity.catalogue import Catalogue
from openquake.hmtk.seismicity.utils import decimal_time
def _check_depth_limits(input_dict):
'''Returns the default upper and lower depth values if not in dictionary
:param input_dict:
Dictionary corresponding to the kwargs dictionary of calling function
:returns:
'upper_depth': Upper seismogenic depth (float)
'lower_depth': Lower seismogenic depth (float)
'''
if ('upper_depth' in input_dict.keys()) and input_dict['upper_depth']:
if input_dict['upper_depth'] < 0.:
raise ValueError('Upper seismogenic depth must be positive')
else:
upper_depth = input_dict['upper_depth']
else:
upper_depth = 0.0
if ('lower_depth' in input_dict.keys()) and input_dict['lower_depth']:
if input_dict['lower_depth'] < upper_depth:
raise ValueError('Lower depth must take a greater value than'
' upper depth!')
else:
lower_depth = input_dict['lower_depth']
else:
lower_depth = np.inf
return upper_depth, lower_depth
def _get_decimal_from_datetime(time):
'''
As the decimal time function requires inputs in the form of numpy
arrays need to convert each value in the datetime object to a single
numpy array
'''
# Get decimal seconds from seconds + microseconds
temp_seconds = np.float(time.second) + (np.float(time.microsecond) / 1.0E6)
return decimal_time(np.array([time.year], dtype=int),
np.array([time.month], dtype=int),
np.array([time.day], dtype=int),
np.array([time.hour], dtype=int),
np.array([time.minute], dtype=int),
np.array([temp_seconds], dtype=int))
class CatalogueSelector(object):
'''
Class to implement methods for selecting subsets of the catalogue
according to various attribute criteria.
:attr catalogue: The catalogue to which the selection is applied as
instance of openquake.hmtk.seismicity.catalogue.Catalogue
:attr create_copy: Boolean to indicate whether to create copy of the
original catalogue before selecting {default = True}
'''
def __init__(self, master_catalogue, create_copy=True):
'''
Instantiate
:param master_catalogue:
Instance of openquake.hmtk.seismicity.catalogue.Catalogue class
:param bool create_copy: Option to create copy of te class before
selecting (i.e. preserving original class)
'''
self.catalogue = master_catalogue
self.copycat = create_copy
def select_catalogue(self, valid_id):
'''
Method to post-process the catalogue based on the selection options
:param numpy.ndarray valid_id:
Boolean vector indicating whether each event is selected (True)
or not (False)
:returns:
Catalogue of selected events as instance of
openquake.hmtk.seismicity.catalogue.Catalogue class
'''
if not np.any(valid_id):
# No events selected - create clean instance of class
output = Catalogue()
output.processes = self.catalogue.processes
elif np.all(valid_id):
if self.copycat:
output = deepcopy(self.catalogue)
else:
output = self.catalogue
else:
if self.copycat:
output = deepcopy(self.catalogue)
else:
output = self.catalogue
output.purge_catalogue(valid_id)
return output
def within_polygon(self, polygon, distance=None, **kwargs):
'''
Select earthquakes within polygon
:param polygon:
Centre point as instance of nhlib.geo.polygon.Polygon class
:param float distance:
Buffer distance (km) (can take negative values)
:returns:
Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue`
containing only selected events
'''
if distance:
# If a distance is specified then dilate the polyon by distance
zone_polygon = polygon.dilate(distance)
else:
zone_polygon = polygon
# Make valid all events inside depth range
upper_depth, lower_depth = _check_depth_limits(kwargs)
valid_depth = np.logical_and(
self.catalogue.data['depth'] >= upper_depth,
self.catalogue.data['depth'] < lower_depth)
# Events outside polygon returned to invalid assignment
catalogue_mesh = Mesh(self.catalogue.data['longitude'],
self.catalogue.data['latitude'],
self.catalogue.data['depth'])
valid_id = np.logical_and(valid_depth,
zone_polygon.intersects(catalogue_mesh))
return self.select_catalogue(valid_id)
def circular_distance_from_point(self, point, distance, **kwargs):
'''
Select earthquakes within a distance from a Point
:param point:
Centre point as instance of nhlib.geo.point.Point class
:param float distance:
Distance (km)
:returns:
Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue`
containing only selected events
'''
if kwargs['distance_type'] is 'epicentral':
locations = Mesh(
self.catalogue.data['longitude'],
self.catalogue.data['latitude'],
np.zeros(len(self.catalogue.data['longitude']), dtype=float))
point = Point(point.longitude, point.latitude, 0.0)
else:
locations = self.catalogue.hypocentres_as_mesh()
is_close = point.closer_than(locations, distance)
return self.select_catalogue(is_close)
def cartesian_square_centred_on_point(self, point, distance, **kwargs):
'''
Select earthquakes from within a square centered on a point
:param point:
Centre point as instance of nhlib.geo.point.Point class
:param distance:
Distance (km)
:returns:
Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue`
class containing only selected events
'''
point_surface = Point(point.longitude, point.latitude, 0.)
# As distance is
north_point = point_surface.point_at(distance, 0., 0.)
east_point = point_surface.point_at(distance, 0., 90.)
south_point = point_surface.point_at(distance, 0., 180.)
west_point = point_surface.point_at(distance, 0., 270.)
is_long = np.logical_and(
self.catalogue.data['longitude'] >= west_point.longitude,
self.catalogue.data['longitude'] < east_point.longitude)
is_surface = np.logical_and(
is_long,
self.catalogue.data['latitude'] >= south_point.latitude,
self.catalogue.data['latitude'] < north_point.latitude)
upper_depth, lower_depth = _check_depth_limits(kwargs)
is_valid = np.logical_and(
is_surface,
self.catalogue.data['depth'] >= upper_depth,
self.catalogue.data['depth'] < lower_depth)
return self.select_catalogue(is_valid)
def within_joyner_boore_distance(self, surface, distance, **kwargs):
'''
Select events within a Joyner-Boore distance of a fault
:param surface:
Fault surface as instance of
nhlib.geo.surface.base.SimpleFaultSurface or as instance of
nhlib.geo.surface.ComplexFaultSurface
:param float distance:
Rupture distance (km)
:returns:
Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue`
containing only selected events
'''
upper_depth, lower_depth = _check_depth_limits(kwargs)
rjb = surface.get_joyner_boore_distance(
self.catalogue.hypocentres_as_mesh())
is_valid = np.logical_and(
rjb <= distance,
np.logical_and(self.catalogue.data['depth'] >= upper_depth,
self.catalogue.data['depth'] < lower_depth))
return self.select_catalogue(is_valid)
def within_rupture_distance(self, surface, distance, **kwargs):
'''
Select events within a rupture distance from a fault surface
:param surface:
Fault surface as instance of nhlib.geo.surface.base.BaseSurface
:param float distance:
Rupture distance (km)
:returns:
Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue`
containing only selected events
'''
# Check for upper and lower depths
upper_depth, lower_depth = _check_depth_limits(kwargs)
rrupt = surface.get_min_distance(self.catalogue.hypocentres_as_mesh())
is_valid = np.logical_and(
rrupt <= distance,
np.logical_and(self.catalogue.data['depth'] >= upper_depth,
self.catalogue.data['depth'] < lower_depth))
return self.select_catalogue(is_valid)
def within_time_period(self, start_time=None, end_time=None):
'''
Select earthquakes occurring within a given time period
:param start_time:
Earliest time (as datetime.datetime object)
:param end_time:
Latest time (as datetime.datetime object)
:returns:
Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue`
containing only selected events
'''
time_value = self.catalogue.get_decimal_time()
if not start_time:
if not end_time:
# No times input, therefore skip everything and return catalog
return self.catalogue
else:
start_time = np.min(self.catalogue.data['year'])
else:
start_time = _get_decimal_from_datetime(start_time)
if not end_time:
end_time = _get_decimal_from_datetime(datetime.now())
else:
end_time = _get_decimal_from_datetime(end_time)
# Get decimal time values
time_value = self.catalogue.get_decimal_time()
is_valid = np.logical_and(time_value >= start_time,
time_value < end_time)
return self.select_catalogue(is_valid)
def within_depth_range(self, lower_depth=None, upper_depth=None):
'''
Selects events within a specified depth range
:param float lower_depth:
Lower depth for consideration
:param float upper_depth:
Upper depth for consideration
:returns:
Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue`
containing only selected events
'''
if not lower_depth:
if not upper_depth:
# No limiting depths defined - so return entire catalogue!
return self.catalogue
else:
lower_depth = np.inf
if not upper_depth:
upper_depth = 0.0
is_valid = np.logical_and(self.catalogue.data['depth'] >= upper_depth,
self.catalogue.data['depth'] < lower_depth)
return self.select_catalogue(is_valid)
def within_magnitude_range(self, lower_mag=None, upper_mag=None):
'''
:param float lower_mag:
Lower magnitude for consideration
:param float upper_mag:
Upper magnitude for consideration
:returns:
Instance of openquake.hmtk.seismicity.catalogue.Catalogue class containing
only selected events
'''
if not lower_mag:
if not upper_mag:
# No limiting magnitudes defined - return entire catalogue!
return self.catalogue
else:
lower_mag = -np.inf
if not upper_mag:
upper_mag = np.inf
is_valid = np.logical_and(
self.catalogue.data['magnitude'] >= lower_mag,
self.catalogue.data['magnitude'] < upper_mag)
return self.select_catalogue(is_valid)
def create_cluster_set(self, vcl):
"""
For a given catalogue and list of cluster IDs this function splits
the catalogue into a dictionary containing an individual catalogue
of events within each cluster
:param numpy.ndarray vcl:
Cluster ID list
:returns:
Dictionary of instances of the :class:
openquake.hmtk.seismicity.catalogue.Catalogue, where each instance
if the catalogue of each cluster
"""
num_clust = np.max(vcl)
cluster_set = []
for clid in range(0, num_clust + 1):
idx = np.where(vcl == clid)[0]
cluster_cat = deepcopy(self.catalogue)
cluster_cat.select_catalogue_events(idx)
cluster_set.append((clid, cluster_cat))
return OrderedDict(cluster_set)
def within_bounding_box(self, limits):
"""
Selects the earthquakes within a bounding box.
:parameter limits:
A list or a numpy array with four elements in the following order:
- min x (longitude)
- min y (latitude)
- max x (longitude)
- max y (latitude)
:returns:
Returns a :class:htmk.seismicity.catalogue.Catalogue` instance
"""
is_valid = np.logical_and(
self.catalogue.data['longitude'] >= limits[0],
np.logical_and(self.catalogue.data['longitude'] <= limits[2],
np.logical_and(self.catalogue.data['latitude'] >= limits[1],
self.catalogue.data['latitude'] <= limits[3])))
return self.select_catalogue(is_valid)
|
<gh_stars>0
import json
import cachetools
from botocore.client import Config
from botocore.exceptions import ClientError
from s1crets.core import DictQuery
from s1crets.providers.base import BaseProvider, DefaultValue, args_cache_key
from s1crets.providers.aws.base import ServiceWrapper
@cachetools.cached(cache={}, key=args_cache_key)
class SecretProvider(BaseProvider):
def __init__(self, sts_args={}, cache_args={}, retry=3, timeout=5, **kwargs):
config = Config(connect_timeout=timeout, read_timeout=timeout,
retries={'total_max_attempts': retry})
self.ssm = ServiceWrapper('ssm', boto_config=config, **sts_args)
super().__init__(sts_args=sts_args, cache_args=cache_args)
def get(self, path, default=DefaultValue, decrypt=True, cached=True, **kwargs):
if cached:
try:
# we take decrypt's value into account, so we can store both
# encrypted and decrypted data in the cache
return self.cache.get('keys', path, decrypt)
except KeyError:
# not in cache
pass
if not path.startswith('/aws/reference/secretsmanager/'):
# if the path references the Parameter Store, just try to fetch
# the value
try:
res = self.ssm.get_parameter(Name=path,
WithDecryption=decrypt)
self.cache.set('keys', path, res['Parameter']['Value'], decrypt)
return res['Parameter']['Value']
except ClientError as e:
if e.response['Error']['Code'] == 'ParameterNotFound':
if default is not DefaultValue:
# if the parameter cannot be found and we've got a
# default value, return it, instead of raising
# "KeyError" exception
return default
else:
raise KeyError(path)
# if it's not ParameterNotFound and we haven't got a default
# value, re-raise the exception
raise
# Secrets Manager stores mostly a JSON, so in order to make it possible
# to reference data stored in it in the same way as with PS, iterate
# on the path from backwards on / separator and try to find the key
# which we need to get
fetch = True
res = None
for i, c in list(enumerate(path))[::-1]:
if path[:i+1] == '/aws/reference/secretsmanager/':
# don't go below the above path, there is nothing there for us
break
if fetch:
try:
res = self.ssm.get_parameter(Name=path[:i+1],
WithDecryption=decrypt)
break
except ClientError as e:
if e.response['Error']['Code'] != 'ParameterNotFound':
# let other exceptions through
raise
fetch = False
if c == '/':
fetch = True
# no such key
if res is None:
raise KeyError(path)
try:
# is it a JSON?
res = json.loads(res['Parameter']['Value'])
except Exception:
# no
self.cache.set('keys', path, res['Parameter']['Value'], decrypt)
return res['Parameter']['Value']
if not path[i+2:]:
# if the remainder of the path is empty, the SM value was referenced
self.cache.set('keys', path, res, decrypt)
return res
# otherwise a token inside the JSON was referenced, try to return that,
# with handling nonexistent/default cases
subkey = path[i+2:].split('/')
if default is DefaultValue:
res = DictQuery(res).get(subkey, DefaultValue)
if res is DefaultValue:
# no such key
raise KeyError(path)
else:
self.cache.set('keys', path, res, decrypt)
return res
else:
val = DictQuery(res).get(subkey, default)
if val != default:
self.cache.set('keys', path, val, decrypt)
return val
def get_by_path(self, path, decrypt=True, recursive=True, cached=True):
try:
if cached:
return self.cache.get('paths', path, decrypt, recursive)
except KeyError:
# not in cache
pass
params = {}
kwargs = {}
while True:
r = self.ssm.get_parameters_by_path(Path=path, Recursive=recursive,
WithDecryption=decrypt, **kwargs)
for param in r.get('Parameters', []):
params[param['Name']] = param['Value']
if 'NextToken' not in r or r['NextToken'] is None:
# we've got all params
break
# set the next token
kwargs['NextToken'] = r['NextToken']
self.cache.set('paths', path, params, decrypt, recursive)
return params
def update(self, path, value):
next_token = None
search_next_page = True
# the loop will stop when the response do not contain NextToken or we got the data
while search_next_page:
try:
p_dict = {"Filters": [{'Key': 'Name', 'Values': [path]}]}
if next_token:
p_dict['NextToken'] = next_token
res = self.ssm.describe_parameters(**p_dict)
except ClientError as e:
if e.response['Error']['Code'] == 'ParameterNotFound':
raise KeyError(path)
raise
orig_params = res.get('Parameters', [])
if not orig_params:
if 'NextToken' not in res:
# can not find the path in parameter storage
raise KeyError(path)
else:
# can not find the path in current page, need to search in another page
next_token = res['NextToken']
else:
search_next_page = False
if len(orig_params) > 1:
raise KeyError('describe_parameters returned other than one ({}) parameters on path {}'.format(
len(orig_params), path))
kwargs = self.dict_filt(orig_params[0], ('Name', 'Type', 'KeyId', 'Description'))
self.ssm.put_parameter(Value=value, Overwrite=True, **kwargs)
# remove path from the key_cache
try:
self.cache.delete('keys', path)
except KeyError:
pass
# and simply drop all entries from path_cache
self.cache.clear('paths')
return value
def path_exists(self, path, **kwargs):
# we're using describe_parameters here, so we can check for paths and
# exact keys as well
next_token = None
is_path_in_parameter_storage = None
# the loop will stop when the response do not contain NextToken or we got the data
while is_path_in_parameter_storage is None:
try:
p_dict = {"Filters": [{'Key': 'Name', 'Values': [path]}]}
if next_token:
p_dict['NextToken'] = next_token
res = self.ssm.describe_parameters(**p_dict)
except ClientError as e:
if e.response['Error']['Code'] == 'ParameterNotFound':
is_path_in_parameter_storage = False
else:
raise
orig_params = res.get('Parameters', [])
# can not find it in the page
if not orig_params:
if 'NextToken' not in res:
# can not find the path in parameter storage
is_path_in_parameter_storage = False
else:
# can not find the path in current page, need to search in another page
next_token = res['NextToken']
else:
is_path_in_parameter_storage = True
return is_path_in_parameter_storage
|
__author__ = 'jfb_000'
from AddressBook import Book
def testbook(bookobj, firstname=None, lastname=None, phonenumber=None, emailaddress=None, street=None, city=None,
state=None, country=None):
print("----------------TEST-------------------")
# create book
print("This is " + bookobj.ownerid + "'s address book")
print("The maximum amount of contacts is " + str(bookobj.maxContacts()))
print("Number of contacts in address book: " + str(bookobj.numberOfContacts()))
# add contact with all values
bookobj.addContact(firstname, lastname, phonenumber, emailaddress, street, city, state, country)
print("Number of contacts in address book: " + str(bookobj.numberOfContacts()))
# find contact via phone number
if phonenumber:
testphonenumber = phonenumber
contactkeylist = bookobj.findContacts(phonenumber=testphonenumber)
if contactkeylist:
print("The contact(s) with phone number " + testphonenumber + " is:")
for key in contactkeylist:
bookobj.findContactByKey(key).dispContact()
else:
print("No contact with the phone number " + testphonenumber + " was found.")
# find contact via street and city
if street and city:
teststreet = street
testcity = city
contactkeylist = bookobj.findContacts(street=teststreet, city=testcity)
if contactkeylist:
print("The contact(s) with address " + teststreet + " " + testcity + " is:")
for key in contactkeylist:
bookobj.findContactByKey(key).dispContact()
else:
print("No contact with the address " + teststreet + " " + testcity + " was found.")
# testemail = '<EMAIL>'
# contact = bookobj.findContact(email=testemail)
# if contact:
# print("The contact with email " + testemail + " is " + contact.firstname + " " + contact.lastname)
# else:
# print("No contact with the email " + testemail + " was found.")
# contact = bookobj.findContactByName(newcontact.firstname, newcontact.lastname)
# contact2 = bookobj.findContactByName('Jesse')
# contact.dispContact()
# bookobj.removeContact(contact2)
# contact.delLastName()
# bookobj.removeContact(contact)
# print("Number of contacts in address book: " + str(bookobj.numberOfContacts()))
# num = bookobj.maxContacts()
# print("The maximum amount of contacts is " + str(bookobj.maxContacts()))
# def testcontact(firstname=None, lastname=None, phonenumber=None, emailaddress=None, street=None, city=None,
# country=None):
# print("----------------TEST-------------------")
# contactobj = Contact(firstname, lastname, phonenumber, emailaddress, street, city, country)
# print("Contact's first name is " + contactobj.firstName)
# if contactobj.lastname is not None:
# print("Contact's last name is " + contactobj.lastname)
# else:
# print('No last name')
# if contactobj.phonenumber is not None:
# print("Contact's phone number is " + contactobj.phonenumber)
# else:
# print('No phone number')
# if contactobj.emailaddress is not None:
# print("Contact's email address is " + contactobj.emailaddress)
# else:
# print('No email address')
# if contactobj.street is not None:
# print("Contact's street is " + contactobj.street)
# else:
# print('No street')
# if contactobj.city is not None:
# print("Contact's city is " + contactobj.city)
# else:
# print('No city')
# if contactobj.country is not None:
# print("Contact's country is " + contactobj.country)
# else:
# print('No country')
|
import jinja2
page = {}
page['title'] = 'Shkola'
page['item_path'] = '../src/'
page['google_signin_client_id'] = ""
page['google_site_verification'] = ""
page['exit'] = "EXIT"
page["debug_checkall"] = True
page["user_picture"] = "https://lh5.googleusercontent.com/-3VJ2UlD0Y3U/AAAAAAAAAAI/AAAAAAAAAAA/AMZuucnCsCk0v-JmKlQX7QXTrFI--Y_WXA/s96-c/photo.jpg"
page["user_name"] = "<NAME>"
page["q_number"] = "QN"
page["stats"] = {
'1': {'correct': 1, 'incorrect': 0},
'2': {'correct': 2, 'incorrect': 1},
'3': {'correct': 1, 'incorrect': 1}
}
page["total_bar"] = {"star1": 0, "star2": 0, "star3": 0, "missed": 0}
for k, v in page["stats"].items():
if k == "1":
page["total_bar"]["star1"] = page["total_bar"]["star1"] + v["correct"]
page["total_bar"]["missed"] = page["total_bar"]["missed"] + v["incorrect"]
elif k == "2":
page["total_bar"]["star2"] = page["total_bar"]["star2"] + v["correct"]
page["total_bar"]["missed"] = page["total_bar"]["missed"] + v["incorrect"]
elif k == "3":
page["total_bar"]["star3"] = page["total_bar"]["star3"] + v["correct"]
page["total_bar"]["missed"] = page["total_bar"]["missed"] + v["incorrect"]
page["root"] = "ROOT"
page["q_id"] = "QQQQ"
page["l_id"] = "LLLL"
page["year"] = "PRVI"
page["theme"] = "BROJEVI"
page["subtheme"] = "Operacije"
page["difficulty"] = 1
page["next"] = "NEXT"
page["skip"] = "SKIP"
page["back"] = "PREV"
page["question"] = "TEXT<br>TEXT<br>TEXT<br>TEXT<br>TEXT<br>TEXT<br>TEXT<br>TEXT<br>TEXT<br>" \
"TEXT<br>TEXT<br>TEXT<br>TEXT<br>TEXT<br>TEXT<br>TEXT<br>TEXT<br>TEXT<br>TEXT<br>" \
"TEXT<br>TEXT<br>TEXT<br>TEXT<br>TEXT<br>TEXT<br>TEXT<br>TEXT<br>TEXT<br>TEXT<br>"
page['menu'] = [
{
'name' : 'Zadaci',
'submenu' : {
'id' : 'zadaci',
'options' : [
{
'name' : 'Cetvrti',
'link' : 'C',
'submenu' : {
'id' : 'cetvrti',
'options' : [
{ 'name' : 'Brojevi', 'link' : '1'},
{ 'name' : 'Geometrija', 'link' : '2'},
{ 'name' : 'Razlomci', 'link' : '3'}
]
}
},
{
'name' : 'Treci',
'link' : 'T',
'submenu' : {
'id' : 'treci',
'options' : [
{ 'name' : 'Brojevi', 'link' : '1'},
{ 'name' : 'Geometrija', 'link' : '2'},
{ 'name' : 'Razlomci', 'link' : '3'}
]
}
}
]
}
},
{
'name' : 'Rezultati',
'link' : 'R'
}
]
file_loader = jinja2.FileSystemLoader("..")
env = jinja2.Environment(loader=file_loader)
template = env.get_template("rs/test.html.j2")
print(template.render(template_params=page))
|
import pronouncing
import pyphen
from num2words import num2words as n2w
from syllables import estimate
from lib.constants import (
BANNED_WORDS,
BANNED_PHRASES,
CHARS_ONLY,
PRONUNCIATION_OVERRIDES,
LICK_STRESSES,
LICK_NOTES
)
dic = pyphen.Pyphen(lang="en_UK")
def isLick(title: str):
"""
Returns whether or not the argument is pronounced in a way that matches THE LICC.
:param title: the string to be tested
:return: True, if it matches, False if not.
"""
if containsBanned(title):
return False
clean = cleanStr(title)
stresses = getTitleStresses(clean)
return LICK_STRESSES.match(stresses) is not None
def getHyphenation(title: str):
"""Splits the title into words and its words into possible hyphenations.
:param title: The string to split and hyphenate
:return: A list (representing the whole title) containing lists (representing words)
containing strings (representing hyphenated parts of the word)
"""
return [dic.inserted(word).split("-") for word in title.split()]
def adjustHyphenation(hyphenation: list):
"""
Adjusts a list of possible hyphenations in the format of getHyphenation(str),
so that the amount of (deep) elements is equal to the amount of notes used by THE LICC.
Note that this modifies the argument list.
:param hyphenation: A list in the format of what getHyphenation(str) returns
:return: the argument or None if it couldn't be adjusted.
"""
for wordIndex in range(len(hyphenation)):
word = hyphenation[wordIndex]
for syllableIndex in range(len(word)):
syllable = word[syllableIndex]
if estimate(syllable) > 1:
half = int(len(syllable) / 2) + 1
word.insert(syllableIndex, syllable[:half])
word.insert(syllableIndex + 1, syllable[half:])
word.remove(syllable)
if sum(map(lambda l: len(l), hyphenation)) == LICK_NOTES:
return hyphenation
def containsBanned(title: str):
"""
Return True if banned words or phrases in string.
This implementation is slow, but is was fast to write and I don't care about
speed for this script.
"""
def _containsBannedWord(title: str):
for word in title.split():
word = CHARS_ONLY.sub("", word.lower())
if word in BANNED_WORDS:
return True
return False
def _containsBannedPhrase(title: str):
for phrase in BANNED_PHRASES:
if phrase in title.lower():
return True
return False
return _containsBannedWord(title) or _containsBannedPhrase(title)
def getTitleStresses(title: str):
"""Takes a wikipedia title and gets the combined stresses of all words.
>>> getTitleStresses('Teenage Mutant Ninja Turtles')
'12101010'
Args:
title: String, title of a wikipedia page.
Returns:
String, stresses of each syllable as 0, 1, and 2s.
"""
title_words = title.split()
title_stresses = ""
while title_words:
word = title_words.pop(0)
word_stresses = getWordStresses(word)
# If word was a long number, it may have been parsed into several words.
if isinstance(word_stresses, list):
title_words = word_stresses + title_words
elif isinstance(word_stresses, str):
title_stresses += word_stresses
return title_stresses
def getWordStresses(word: str):
word = numbersToWords(word)
if " " in word:
return word.split()
for override, stresses in PRONUNCIATION_OVERRIDES:
if word.lower() == override.lower():
return stresses
try:
phones = pronouncing.phones_for_word(word)
stresses = pronouncing.stresses(phones[0])
except IndexError:
# Hacky way of discarding candidate title
return "?"
return stresses
def numbersToWords(word):
ordinal_number_endings = ("nd", "rd", "st", "th")
if word.isdigit():
if len(word) == 4:
try:
word = n2w(word, to="year")
except Exception:
# Hacky way of discarding candidate title
return "9"
else:
try:
word = n2w(word)
except Exception:
# Hacky way of discarding candidate title
return "9"
if word[:-2].isdigit() and word[-2:] in ordinal_number_endings:
word = word[-2:]
try:
word = n2w(word, to="ordinal")
except Exception:
# Hacky way of discarding candidate title
return "9"
return word
def cleanStr(s: str):
"""Remove characters that the pronouncing dictionary doesn't like.
This isn't very efficient, but it's readable at least. :-)
>>> cleanStr('fooBar123')
'fooBar123'
>>> cleanStr('Hello ([world])')
'Hello world'
>>> cleanStr('{hello-world}')
'hello world'
Args:
s: String to be stripped of offending characters
Returns:
String without offending characters
"""
DEL_CHARS = ["(", ")", "[", "]", "{", "}", ",", ":", ";", "."]
SWAP_CHARS = [("-", " ")]
for char in DEL_CHARS:
s = s.replace(char, "")
for char, replacement in SWAP_CHARS:
s = s.replace(char, replacement)
return s
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
},
'targets': [
{
# GN version: //ui/wm
'target_name': 'wm',
'type': '<(component)',
'dependencies': [
'../../base/base.gyp:base',
'../../skia/skia.gyp:skia',
'../aura/aura.gyp:aura',
'../compositor/compositor.gyp:compositor',
'../events/devices/events_devices.gyp:events_devices',
'../events/events.gyp:events',
'../events/events.gyp:events_base',
'../events/platform/events_platform.gyp:events_platform',
'../gfx/gfx.gyp:gfx_geometry',
'../gfx/gfx.gyp:gfx',
'../resources/ui_resources.gyp:ui_resources',
'../base/ui_base.gyp:ui_base',
],
'defines': [
'WM_IMPLEMENTATION',
],
'sources': [
# Note: sources list duplicated in GN build.
'core/accelerator_delegate.h',
'core/accelerator_filter.cc',
'core/accelerator_filter.h',
'core/base_focus_rules.h',
'core/base_focus_rules.cc',
'core/base_focus_rules.h',
'core/capture_controller.cc',
'core/capture_controller.h',
'core/compound_event_filter.cc',
'core/compound_event_filter.h',
'core/coordinate_conversion.cc',
'core/coordinate_conversion.h',
'core/cursor_manager.cc',
'core/cursor_manager.h',
'core/default_activation_client.cc',
'core/default_activation_client.h',
'core/default_screen_position_client.cc',
'core/default_screen_position_client.h',
'core/easy_resize_window_targeter.cc',
'core/easy_resize_window_targeter.h',
'core/focus_controller.cc',
'core/focus_controller.h',
'core/focus_rules.h',
'core/image_grid.cc',
'core/image_grid.h',
'core/input_method_event_filter.cc',
'core/input_method_event_filter.h',
'core/masked_window_targeter.cc',
'core/masked_window_targeter.h',
'core/native_cursor_manager.h',
'core/native_cursor_manager_delegate.h',
'core/nested_accelerator_dispatcher_linux.cc',
'core/nested_accelerator_dispatcher_win.cc',
'core/nested_accelerator_dispatcher.cc',
'core/nested_accelerator_dispatcher.h',
'core/nested_accelerator_delegate.h',
'core/nested_accelerator_controller.cc',
'core/nested_accelerator_controller.h',
'core/shadow.cc',
'core/shadow.h',
'core/shadow_controller.cc',
'core/shadow_controller.h',
'core/shadow_types.cc',
'core/shadow_types.h',
'core/transient_window_controller.cc',
'core/transient_window_controller.h',
'core/transient_window_manager.cc',
'core/transient_window_manager.h',
'core/transient_window_observer.h',
'core/transient_window_stacking_client.cc',
'core/transient_window_stacking_client.h',
'core/user_activity_detector.cc',
'core/user_activity_detector.h',
'core/user_activity_observer.h',
'core/visibility_controller.cc',
'core/visibility_controller.h',
'core/window_animations.cc',
'core/window_animations.h',
'core/window_modality_controller.cc',
'core/window_modality_controller.h',
'core/window_util.cc',
'core/window_util.h',
'core/wm_core_switches.cc',
'core/wm_core_switches.h',
'core/wm_state.cc',
'core/wm_state.h',
'wm_export.h',
],
},
{
# GN version: //ui/wm:test_support
'target_name': 'wm_test_support',
'type': 'static_library',
'dependencies': [
'../../skia/skia.gyp:skia',
'../aura/aura.gyp:aura',
'../events/events.gyp:events',
'../events/events.gyp:events_base',
],
'sources': [
'test/wm_test_helper.cc',
'test/wm_test_helper.h',
],
},
{
# GN version: //ui/wm:wm_unittests
'target_name': 'wm_unittests',
'type': 'executable',
'dependencies': [
'../../base/base.gyp:base',
'../../base/base.gyp:test_support_base',
'../../skia/skia.gyp:skia',
'../../testing/gtest.gyp:gtest',
'../aura/aura.gyp:aura',
'../aura/aura.gyp:aura_test_support',
'../base/ui_base.gyp:ui_base',
'../compositor/compositor.gyp:compositor',
'../events/events.gyp:events',
'../events/events.gyp:events_base',
'../events/platform/events_platform.gyp:events_platform',
'../gfx/gfx.gyp:gfx',
'../gfx/gfx.gyp:gfx_geometry',
'wm',
'wm_test_support',
],
'sources': [
'test/run_all_unittests.cc',
'core/compound_event_filter_unittest.cc',
'core/cursor_manager_unittest.cc',
'core/focus_controller_unittest.cc',
'core/input_method_event_filter_unittest.cc',
'core/image_grid_unittest.cc',
'core/nested_accelerator_controller_unittest.cc',
'core/shadow_controller_unittest.cc',
'core/shadow_unittest.cc',
'core/transient_window_manager_unittest.cc',
'core/transient_window_stacking_client_unittest.cc',
'core/user_activity_detector_unittest.cc',
'core/visibility_controller_unittest.cc',
'core/window_animations_unittest.cc',
'core/window_util_unittest.cc',
],
},
],
}
|
# -*- coding: utf-8 -*-
# Import Python libs
from __future__ import absolute_import
# Import Salt Libs
from salt.modules import win_dism as dism
# Import Salt Testing Libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import (
MagicMock,
patch
)
ensure_in_syspath('../../')
dism.__salt__ = {}
dism.__grains__ = {}
class WinDismTestCase(TestCase):
def test_add_capability(self):
'''
Test installing a capability with DISM
'''
mock = MagicMock()
with patch.dict(dism.__salt__, {'cmd.run_all': mock}):
with patch.dict(dism.__grains__, {'osversion': 10}):
dism.add_capability("test")
mock.assert_called_once_with(
['DISM', '/Quiet', '/Online', '/Add-Capability',
'/CapabilityName:test', '/NoRestart'])
def test_add_capability_with_extras(self):
'''
Test installing a capability with DISM
'''
mock = MagicMock()
with patch.dict(dism.__salt__, {'cmd.run_all': mock}):
with patch.dict(dism.__grains__, {'osversion': 10}):
dism.add_capability("test", "life", True)
mock.assert_called_once_with(
['DISM', '/Quiet', '/Online', '/Add-Capability',
'/CapabilityName:test', '/Source:life', '/LimitAccess',
'/NoRestart'])
def test_remove_capability(self):
'''
Test uninstalling a capability with DISM
'''
mock = MagicMock()
with patch.dict(dism.__salt__, {'cmd.run_all': mock}):
with patch.dict(dism.__grains__, {'osversion': 10}):
dism.remove_capability("test")
mock.assert_called_once_with(
['DISM', '/Quiet', '/Online', '/Remove-Capability',
'/CapabilityName:test', '/NoRestart'])
def test_get_capabilities(self):
'''
Test getting all the capabilities
'''
capabilties = "Capability Identity : Capa1\r\n State : Installed\r\n" \
"Capability Identity : Capa2\r\n State : Disabled\r\n"
mock = MagicMock(return_value=capabilties)
with patch.dict(dism.__salt__, {'cmd.run': mock}):
with patch.dict(dism.__grains__, {'osversion': 10}):
out = dism.get_capabilities()
mock.assert_called_once_with(
['DISM', '/Online', '/Get-Capabilities'])
self.assertEqual(out, ['Capa1', 'Capa2'])
def test_installed_capabilities(self):
'''
Test getting all the installed capabilities
'''
capabilties = "Capability Identity : Capa1\r\n State : Installed\r\n" \
"Capability Identity : Capa2\r\n State : Disabled\r\n"
mock = MagicMock(return_value=capabilties)
with patch.dict(dism.__salt__, {'cmd.run': mock}):
with patch.dict(dism.__grains__, {'osversion': 10}):
out = dism.installed_capabilities()
mock.assert_called_once_with(
['DISM', '/Online', '/Get-Capabilities'])
self.assertEqual(out, ["Capa1"])
def test_available_capabilities(self):
'''
Test getting all the available capabilities
'''
capabilties = "Capability Identity : Capa1\r\n State : Installed\r\n" \
"Capability Identity : Capa2\r\n State : Not Present\r\n"
mock = MagicMock(return_value=capabilties)
with patch.dict(dism.__salt__, {'cmd.run': mock}):
with patch.dict(dism.__grains__, {'osversion': 10}):
out = dism.available_capabilities()
mock.assert_called_once_with(
['DISM', '/Online', '/Get-Capabilities'])
self.assertEqual(out, ["Capa2"])
def test_add_feature(self):
'''
Test installing a feature with DISM
'''
mock = MagicMock()
with patch.dict(dism.__salt__, {'cmd.run_all': mock}):
dism.add_feature("test")
mock.assert_called_once_with(
['DISM', '/Quiet', '/Online', '/Enable-Feature',
'/FeatureName:test', '/NoRestart'])
def test_add_feature_with_extras(self):
'''
Test installing a feature with DISM
'''
mock = MagicMock()
with patch.dict(dism.__salt__, {'cmd.run_all': mock}):
dism.add_feature('sponge', 'bob', 'C:\\temp', True, True)
mock.assert_called_once_with(
['DISM', '/Quiet', '/Online', '/Enable-Feature',
'/FeatureName:sponge', '/PackageName:bob', '/Source:C:\\temp',
'/LimitAccess', '/All', '/NoRestart'])
def test_remove_feature(self):
'''
Test uninstalling a capability with DISM
'''
mock = MagicMock()
with patch.dict(dism.__salt__, {'cmd.run_all': mock}):
dism.remove_feature("test")
mock.assert_called_once_with(
['DISM', '/Quiet', '/Online', '/Disable-Feature',
'/FeatureName:test', '/NoRestart'])
def test_remove_feature_with_extras(self):
'''
Test uninstalling a capability with DISM
'''
mock = MagicMock()
with patch.dict(dism.__salt__, {'cmd.run_all': mock}):
dism.remove_feature('sponge', True)
mock.assert_called_once_with(
['DISM', '/Quiet', '/Online', '/Disable-Feature',
'/FeatureName:sponge', '/Remove', '/NoRestart'])
def test_get_features(self):
'''
Test getting all the features
'''
features = "Feature Name : Capa1\r\n State : Enabled\r\n" \
"Feature Name : Capa2\r\n State : Disabled\r\n"
mock = MagicMock(return_value=features)
with patch.dict(dism.__salt__, {'cmd.run': mock}):
out = dism.get_features()
mock.assert_called_once_with(['DISM', '/Online', '/Get-Features'])
self.assertEqual(out, ['Capa1', 'Capa2'])
def test_installed_features(self):
'''
Test getting all the installed features
'''
features = "Feature Name : Capa1\r\n State : Enabled\r\n" \
"Feature Name : Capa2\r\n State : Disabled\r\n"
mock = MagicMock(return_value=features)
with patch.dict(dism.__salt__, {'cmd.run': mock}):
out = dism.installed_features()
mock.assert_called_once_with(['DISM', '/Online', '/Get-Features'])
self.assertEqual(out, ["Capa1"])
def test_available_features(self):
'''
Test getting all the available features
'''
features = "Feature Name : Capa1\r\n State : Enabled\r\n" \
"Feature Name : Capa2\r\n State : Disabled\r\n"
mock = MagicMock(return_value=features)
with patch.dict(dism.__salt__, {'cmd.run': mock}):
out = dism.available_features()
mock.assert_called_once_with(['DISM', '/Online', '/Get-Features'])
self.assertEqual(out, ["Capa2"])
def test_add_package(self):
'''
Test installing a package with DISM
'''
mock = MagicMock()
with patch.dict(dism.__salt__, {'cmd.run_all': mock}):
dism.add_package("test")
mock.assert_called_once_with(
['DISM', '/Quiet', '/Online', '/Add-Package',
'/PackagePath:test', '/NoRestart'])
def test_add_package_with_extras(self):
'''
Test installing a package with DISM
'''
mock = MagicMock()
with patch.dict(dism.__salt__, {'cmd.run_all': mock}):
dism.add_package('sponge', True, True)
mock.assert_called_once_with(
['DISM', '/Quiet', '/Online', '/Add-Package',
'/PackagePath:sponge', '/IgnoreCheck', '/PreventPending',
'/NoRestart'])
def test_remove_package(self):
'''
Test uninstalling a package with DISM
'''
mock = MagicMock()
with patch.dict(dism.__salt__, {'cmd.run_all': mock}):
dism.remove_package("test")
mock.assert_called_once_with(
['DISM', '/Quiet', '/Online', '/Remove-Package', '/NoRestart',
'/PackagePath:test'])
def test_installed_packages(self):
'''
Test getting all the installed features
'''
features = "Package Identity : Capa1\r\n State : Installed\r\n" \
"Package Identity : Capa2\r\n State : Installed\r\n"
mock = MagicMock(return_value=features)
with patch.dict(dism.__salt__, {'cmd.run': mock}):
out = dism.installed_packages()
mock.assert_called_once_with(['DISM', '/Online', '/Get-Packages'])
self.assertEqual(out, ['Capa1', 'Capa2'])
if __name__ == '__main__':
from integration import run_tests
run_tests(WinDismTestCase, needs_daemon=False)
|
<filename>scripts/apply_def_template.py
#!/usr/bin/env python3
# Copyright 2020 Efabless Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import re
import click
import shutil
import argparse
import subprocess
@click.command()
@click.option("-t", "--def-template", "templateDEF", required=True, help="Template DEF")
@click.argument("userDEF")
def cli(templateDEF, userDEF):
scriptsDir = os.path.dirname(__file__)
def remove_power_pins(DEF):
templateDEFOpener = open(DEF,"r")
if templateDEFOpener.mode == 'r':
templateDEFSections =templateDEFOpener.read().split("PINS")
templateDEFOpener.close()
PINS = templateDEFSections[1].split("- ")
OUT_PINS=[" ;"]
cnt = 0
for pin in PINS[1:]:
if pin.find("USE GROUND") + pin.find("USE POWER") == -2:
cnt+=1
OUT_PINS.append(pin)
OUT_PINS[0] = " "+str(cnt) + OUT_PINS[0] + PINS[0].split(";")[1]
OUT_PINS[-1] = OUT_PINS[-1].replace("END ", "")
OUT_PINS[-1] = OUT_PINS[-1] + "END "
templateDEFSections[1] = "- ".join(OUT_PINS)
templateDEFOpener = open(DEF,"w")
templateDEFOpener.write("PINS".join(templateDEFSections))
templateDEFOpener.close()
newTemplateDEF = f"{userDEF}.template.tmp"
shutil.copy(templateDEF, newTemplateDEF)
templateDEF = newTemplateDEF
templateDEF = f"{userDEF}.template.tmp"
remove_power_pins(templateDEF)
subprocess.check_output([
"openroad",
"-python",
f"{scriptsDir}/defutil.py",
"--output", userDEF,
"--input-lef", "/dev/null",
userDEF, templateDEF
], stderr=subprocess.PIPE)
#read template Def
templateDEFOpener = open(templateDEF,"r")
if templateDEFOpener.mode == 'r':
templateDEFContent =templateDEFOpener.read()
templateDEFOpener.close()
#read user Def
userDEFOpener = open(userDEF,"r")
if userDEFOpener.mode == 'r':
userDEFContent =userDEFOpener.read()
userDEFOpener.close()
def copyStringWithWord(word, f_rom, t_o):
pattern = re.compile(r'\b%s\b\s*\([^)]*\)\s*\([^)]*\)' % word)
instances = re.findall(pattern, f_rom)
if len(instances) == 1:
str_from = instances[0]
tmp = re.sub(pattern, str_from, t_o)
return tmp
return None
# Copy DIEAREA
word='DIEAREA'
userDEFContent = copyStringWithWord(word, templateDEFContent, userDEFContent)
if userDEFContent is not None:
userDEFOpener = open(userDEF,"w")
userDEFOpener.write(userDEFContent)
userDEFOpener.close()
else:
raise Exception("DIEAREA not found in DEF")
if __name__ == '__main__':
cli()
|
from typing import Callable, Iterable, List, Tuple, Optional, Any, Dict, Hashable
import logging
from multiprocessing import TimeoutError
import os
import time
import collections
import threading
import queue
import copy
import gc
import sys
import itertools
try:
from joblib.parallel import BatchedCalls, parallel_backend
from joblib._parallel_backends import SafeFunction
except ImportError:
BatchedCalls = None
parallel_backend = None
SafeFunction = None
import ray
from ray.util import log_once
logger = logging.getLogger(__name__)
RAY_ADDRESS_ENV = "RAY_ADDRESS"
def _put_in_dict_registry(
obj: Any, registry_hashable: Dict[Hashable, ray.ObjectRef]
) -> ray.ObjectRef:
if obj not in registry_hashable:
ret = ray.put(obj)
registry_hashable[obj] = ret
else:
ret = registry_hashable[obj]
return ret
def _put_in_list_registry(
obj: Any, registry: List[Tuple[Any, ray.ObjectRef]]
) -> ray.ObjectRef:
try:
ret = next((ref for o, ref in registry if o is obj))
except StopIteration:
ret = ray.put(obj)
registry.append((obj, ret))
return ret
def ray_put_if_needed(
obj: Any,
registry: Optional[List[Tuple[Any, ray.ObjectRef]]] = None,
registry_hashable: Optional[Dict[Hashable, ray.ObjectRef]] = None,
) -> ray.ObjectRef:
"""ray.put obj in object store if it's not an ObjRef and bigger than 100 bytes,
with support for list and dict registries"""
if isinstance(obj, ray.ObjectRef) or sys.getsizeof(obj) < 100:
return obj
ret = obj
if registry_hashable is not None:
try:
ret = _put_in_dict_registry(obj, registry_hashable)
except TypeError:
if registry is not None:
ret = _put_in_list_registry(obj, registry)
elif registry is not None:
ret = _put_in_list_registry(obj, registry)
return ret
def ray_get_if_needed(obj: Any) -> Any:
"""If obj is an ObjectRef, do ray.get, otherwise return obj"""
if isinstance(obj, ray.ObjectRef):
return ray.get(obj)
return obj
if BatchedCalls is not None:
class RayBatchedCalls(BatchedCalls):
"""Joblib's BatchedCalls with basic Ray object store management
This functionality is provided through the put_items_in_object_store,
which uses external registries (list and dict) containing objects
and their ObjectRefs."""
def put_items_in_object_store(
self,
registry: Optional[List[Tuple[Any, ray.ObjectRef]]] = None,
registry_hashable: Optional[Dict[Hashable, ray.ObjectRef]] = None,
):
"""Puts all applicable (kw)args in self.items in object store
Takes two registries - list for unhashable objects and dict
for hashable objects. The registries are a part of a Pool object.
The method iterates through all entries in items list (usually,
there will be only one, but the number depends on joblib Parallel
settings) and puts all of the args and kwargs into the object
store, updating the registries.
If an arg or kwarg is already in a registry, it will not be
put again, and instead, the cached object ref will be used."""
new_items = []
for func, args, kwargs in self.items:
args = [
ray_put_if_needed(arg, registry, registry_hashable) for arg in args
]
kwargs = {
k: ray_put_if_needed(v, registry, registry_hashable)
for k, v in kwargs.items()
}
new_items.append((func, args, kwargs))
self.items = new_items
def __call__(self):
# Exactly the same as in BatchedCalls, with the
# difference being that it gets args and kwargs from
# object store (which have been put in there by
# put_items_in_object_store)
# Set the default nested backend to self._backend but do
# not set the change the default number of processes to -1
with parallel_backend(self._backend, n_jobs=self._n_jobs):
return [
func(
*[ray_get_if_needed(arg) for arg in args],
**{k: ray_get_if_needed(v) for k, v in kwargs.items()},
)
for func, args, kwargs in self.items
]
def __reduce__(self):
# Exactly the same as in BatchedCalls, with the
# difference being that it returns RayBatchedCalls
# instead
if self._reducer_callback is not None:
self._reducer_callback()
# no need pickle the callback.
return (
RayBatchedCalls,
(self.items, (self._backend, self._n_jobs), None, self._pickle_cache),
)
else:
RayBatchedCalls = None
# Helper function to divide a by b and round the result up.
def div_round_up(a, b):
return -(-a // b)
class PoolTaskError(Exception):
def __init__(self, underlying):
self.underlying = underlying
class ResultThread(threading.Thread):
"""Thread that collects results from distributed actors.
It winds down when either:
- A pre-specified number of objects has been processed
- When the END_SENTINEL (submitted through self.add_object_ref())
has been received and all objects received before that have been
processed.
Initialize the thread with total_object_refs = float('inf') to wait for the
END_SENTINEL.
Args:
object_refs (List[RayActorObjectRefs]): ObjectRefs to Ray Actor calls.
Thread tracks whether they are ready. More ObjectRefs may be added
with add_object_ref (or _add_object_ref internally) until the object
count reaches total_object_refs.
single_result (bool): Should be True if the thread is managing function
with a single result (like apply_async). False if the thread is managing
a function with a List of results.
callback (Callable): called only once at the end of the thread
if no results were errors. If single_result=True, and result is
not an error, callback is invoked with the result as the only
argument. If single_result=False, callback is invoked with
a list of all the results as the only argument.
error_callback (Callable): called only once on the first result
that errors. Should take an Exception as the only argument.
If no result errors, this callback is not called.
total_object_refs (int): Number of ObjectRefs that this thread
expects to be ready. May be more than len(object_refs) since
more ObjectRefs can be submitted after the thread starts.
If None, defaults to len(object_refs). If float("inf"), thread runs
until END_SENTINEL (submitted through self.add_object_ref())
has been received and all objects received before that have
been processed.
"""
END_SENTINEL = None
def __init__(
self,
object_refs,
single_result=False,
callback=None,
error_callback=None,
total_object_refs=None,
):
threading.Thread.__init__(self, daemon=True)
self._got_error = False
self._object_refs = []
self._num_ready = 0
self._results = []
self._ready_index_queue = queue.Queue()
self._single_result = single_result
self._callback = callback
self._error_callback = error_callback
self._total_object_refs = total_object_refs or len(object_refs)
self._indices = {}
# Thread-safe queue used to add ObjectRefs to fetch after creating
# this thread (used to lazily submit for imap and imap_unordered).
self._new_object_refs = queue.Queue()
for object_ref in object_refs:
self._add_object_ref(object_ref)
def _add_object_ref(self, object_ref):
self._indices[object_ref] = len(self._object_refs)
self._object_refs.append(object_ref)
self._results.append(None)
def add_object_ref(self, object_ref):
self._new_object_refs.put(object_ref)
def run(self):
unready = copy.copy(self._object_refs)
aggregated_batch_results = []
# Run for a specific number of objects if self._total_object_refs is finite.
# Otherwise, process all objects received prior to the stop signal, given by
# self.add_object(END_SENTINEL).
while self._num_ready < self._total_object_refs:
# Get as many new IDs from the queue as possible without blocking,
# unless we have no IDs to wait on, in which case we block.
while True:
try:
block = len(unready) == 0
new_object_ref = self._new_object_refs.get(block=block)
if new_object_ref is self.END_SENTINEL:
# Receiving the END_SENTINEL object is the signal to stop.
# Store the total number of objects.
self._total_object_refs = len(self._object_refs)
else:
self._add_object_ref(new_object_ref)
unready.append(new_object_ref)
except queue.Empty:
# queue.Empty means no result was retrieved if block=False.
break
[ready_id], unready = ray.wait(unready, num_returns=1)
try:
batch = ray.get(ready_id)
except ray.exceptions.RayError as e:
batch = [e]
# The exception callback is called only once on the first result
# that errors. If no result errors, it is never called.
if not self._got_error:
for result in batch:
if isinstance(result, Exception):
self._got_error = True
if self._error_callback is not None:
self._error_callback(result)
break
else:
aggregated_batch_results.append(result)
self._num_ready += 1
self._results[self._indices[ready_id]] = batch
self._ready_index_queue.put(self._indices[ready_id])
# The regular callback is called only once on the entire List of
# results as long as none of the results were errors. If any results
# were errors, the regular callback is never called; instead, the
# exception callback is called on the first erroring result.
#
# This callback is called outside the while loop to ensure that it's
# called on the entire list of results– not just a single batch.
if not self._got_error and self._callback is not None:
if not self._single_result:
self._callback(aggregated_batch_results)
else:
# On a thread handling a function with a single result
# (e.g. apply_async), we call the callback on just that result
# instead of on a list encaspulating that result
self._callback(aggregated_batch_results[0])
def got_error(self):
# Should only be called after the thread finishes.
return self._got_error
def result(self, index):
# Should only be called on results that are ready.
return self._results[index]
def results(self):
# Should only be called after the thread finishes.
return self._results
def next_ready_index(self, timeout=None):
try:
return self._ready_index_queue.get(timeout=timeout)
except queue.Empty:
# queue.Queue signals a timeout by raising queue.Empty.
raise TimeoutError
class AsyncResult:
"""An asynchronous interface to task results.
This should not be constructed directly.
"""
def __init__(
self, chunk_object_refs, callback=None, error_callback=None, single_result=False
):
self._single_result = single_result
self._result_thread = ResultThread(
chunk_object_refs, single_result, callback, error_callback
)
self._result_thread.start()
def wait(self, timeout=None):
"""
Returns once the result is ready or the timeout expires (does not
raise TimeoutError).
Args:
timeout: timeout in milliseconds.
"""
self._result_thread.join(timeout)
def get(self, timeout=None):
self.wait(timeout)
if self._result_thread.is_alive():
raise TimeoutError
results = []
for batch in self._result_thread.results():
for result in batch:
if isinstance(result, PoolTaskError):
raise result.underlying
elif isinstance(result, Exception):
raise result
results.extend(batch)
if self._single_result:
return results[0]
return results
def ready(self):
"""
Returns true if the result is ready, else false if the tasks are still
running.
"""
return not self._result_thread.is_alive()
def successful(self):
"""
Returns true if none of the submitted tasks errored, else false. Should
only be called once the result is ready (can be checked using `ready`).
"""
if not self.ready():
raise ValueError(f"{self!r} not ready")
return not self._result_thread.got_error()
class IMapIterator:
"""Base class for OrderedIMapIterator and UnorderedIMapIterator."""
def __init__(self, pool, func, iterable, chunksize=None):
self._pool = pool
self._func = func
self._next_chunk_index = 0
self._finished_iterating = False
# List of bools indicating if the given chunk is ready or not for all
# submitted chunks. Ordering mirrors that in the in the ResultThread.
self._submitted_chunks = []
self._ready_objects = collections.deque()
try:
self._iterator = iter(iterable)
except TypeError:
# for compatibility with prior releases, encapsulate non-iterable in a list
iterable = [iterable]
self._iterator = iter(iterable)
if isinstance(iterable, collections.abc.Iterator):
# Got iterator (which has no len() function).
# Make default chunksize 1 instead of using _calculate_chunksize().
# Indicate unknown queue length, requiring explicit stopping.
self._chunksize = chunksize or 1
result_list_size = float("inf")
else:
self._chunksize = chunksize or pool._calculate_chunksize(iterable)
result_list_size = div_round_up(len(iterable), chunksize)
self._result_thread = ResultThread([], total_object_refs=result_list_size)
self._result_thread.start()
for _ in range(len(self._pool._actor_pool)):
self._submit_next_chunk()
def _submit_next_chunk(self):
# The full iterable has already been submitted, so no-op.
if self._finished_iterating:
return
actor_index = len(self._submitted_chunks) % len(self._pool._actor_pool)
chunk_iterator = itertools.islice(self._iterator, self._chunksize)
# Check whether we have run out of samples.
# This consumes the original iterator, so we convert to a list and back
chunk_list = list(chunk_iterator)
if len(chunk_list) < self._chunksize:
# Reached end of self._iterator
self._finished_iterating = True
if len(chunk_list) == 0:
# Nothing to do, return.
return
chunk_iterator = iter(chunk_list)
new_chunk_id = self._pool._submit_chunk(
self._func, chunk_iterator, self._chunksize, actor_index
)
self._submitted_chunks.append(False)
# Wait for the result
self._result_thread.add_object_ref(new_chunk_id)
# If we submitted the final chunk, notify the result thread
if self._finished_iterating:
self._result_thread.add_object_ref(ResultThread.END_SENTINEL)
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
# Should be implemented by subclasses.
raise NotImplementedError
class OrderedIMapIterator(IMapIterator):
"""Iterator to the results of tasks submitted using `imap`.
The results are returned in the same order that they were submitted, even
if they don't finish in that order. Only one batch of tasks per actor
process is submitted at a time - the rest are submitted as results come in.
Should not be constructed directly.
"""
def next(self, timeout=None):
if len(self._ready_objects) == 0:
if self._finished_iterating and (
self._next_chunk_index == len(self._submitted_chunks)
):
# Finish when all chunks have been dispatched and processed
# Notify the calling process that the work is done.
raise StopIteration
# This loop will break when the next index in order is ready or
# self._result_thread.next_ready_index() raises a timeout.
index = -1
while index != self._next_chunk_index:
start = time.time()
index = self._result_thread.next_ready_index(timeout=timeout)
self._submit_next_chunk()
self._submitted_chunks[index] = True
if timeout is not None:
timeout = max(0, timeout - (time.time() - start))
while (
self._next_chunk_index < len(self._submitted_chunks)
and self._submitted_chunks[self._next_chunk_index]
):
for result in self._result_thread.result(self._next_chunk_index):
self._ready_objects.append(result)
self._next_chunk_index += 1
return self._ready_objects.popleft()
class UnorderedIMapIterator(IMapIterator):
"""Iterator to the results of tasks submitted using `imap`.
The results are returned in the order that they finish. Only one batch of
tasks per actor process is submitted at a time - the rest are submitted as
results come in.
Should not be constructed directly.
"""
def next(self, timeout=None):
if len(self._ready_objects) == 0:
if self._finished_iterating and (
self._next_chunk_index == len(self._submitted_chunks)
):
# Finish when all chunks have been dispatched and processed
# Notify the calling process that the work is done.
raise StopIteration
index = self._result_thread.next_ready_index(timeout=timeout)
self._submit_next_chunk()
for result in self._result_thread.result(index):
self._ready_objects.append(result)
self._next_chunk_index += 1
return self._ready_objects.popleft()
@ray.remote(num_cpus=0)
class PoolActor:
"""Actor used to process tasks submitted to a Pool."""
def __init__(self, initializer=None, initargs=None):
if initializer:
initargs = initargs or ()
initializer(*initargs)
def ping(self):
# Used to wait for this actor to be initialized.
pass
def run_batch(self, func, batch):
results = []
for args, kwargs in batch:
args = args or ()
kwargs = kwargs or {}
try:
results.append(func(*args, **kwargs))
except Exception as e:
results.append(PoolTaskError(e))
return results
# https://docs.python.org/3/library/multiprocessing.html#module-multiprocessing.pool
class Pool:
"""A pool of actor processes that is used to process tasks in parallel.
Args:
processes: number of actor processes to start in the pool. Defaults to
the number of cores in the Ray cluster if one is already running,
otherwise the number of cores on this machine.
initializer: function to be run in each actor when it starts up.
initargs: iterable of arguments to the initializer function.
maxtasksperchild: maximum number of tasks to run in each actor process.
After a process has executed this many tasks, it will be killed and
replaced with a new one.
ray_address: address of the Ray cluster to run on. If None, a new local
Ray cluster will be started on this machine. Otherwise, this will
be passed to `ray.init()` to connect to a running cluster. This may
also be specified using the `RAY_ADDRESS` environment variable.
ray_remote_args: arguments used to configure the Ray Actors making up
the pool.
"""
def __init__(
self,
processes: Optional[int] = None,
initializer: Optional[Callable] = None,
initargs: Optional[Iterable] = None,
maxtasksperchild: Optional[int] = None,
context: Any = None,
ray_address: Optional[str] = None,
ray_remote_args: Optional[Dict[str, Any]] = None,
):
self._closed = False
self._initializer = initializer
self._initargs = initargs
self._maxtasksperchild = maxtasksperchild or -1
self._actor_deletion_ids = []
self._registry: List[Tuple[Any, ray.ObjectRef]] = []
self._registry_hashable: Dict[Hashable, ray.ObjectRef] = {}
self._current_index = 0
self._ray_remote_args = ray_remote_args or {}
self._pool_actor = None
if context and log_once("context_argument_warning"):
logger.warning(
"The 'context' argument is not supported using "
"ray. Please refer to the documentation for how "
"to control ray initialization."
)
processes = self._init_ray(processes, ray_address)
self._start_actor_pool(processes)
def _init_ray(self, processes=None, ray_address=None):
# Initialize ray. If ray is already initialized, we do nothing.
# Else, the priority is:
# ray_address argument > RAY_ADDRESS > start new local cluster.
if not ray.is_initialized():
# Cluster mode.
if ray_address is None and RAY_ADDRESS_ENV in os.environ:
logger.info(
"Connecting to ray cluster at address='{}'".format(
os.environ[RAY_ADDRESS_ENV]
)
)
ray.init()
elif ray_address is not None:
logger.info(f"Connecting to ray cluster at address='{ray_address}'")
ray.init(address=ray_address)
# Local mode.
else:
logger.info("Starting local ray cluster")
ray.init(num_cpus=processes)
ray_cpus = int(ray.state.cluster_resources()["CPU"])
if processes is None:
processes = ray_cpus
if processes <= 0:
raise ValueError("Processes in the pool must be >0.")
if ray_cpus < processes:
raise ValueError(
"Tried to start a pool with {} processes on an "
"existing ray cluster, but there are only {} "
"CPUs in the ray cluster.".format(processes, ray_cpus)
)
return processes
def _start_actor_pool(self, processes):
self._pool_actor = None
self._actor_pool = [self._new_actor_entry() for _ in range(processes)]
ray.get([actor.ping.remote() for actor, _ in self._actor_pool])
def _wait_for_stopping_actors(self, timeout=None):
if len(self._actor_deletion_ids) == 0:
return
if timeout is not None:
timeout = float(timeout)
_, deleting = ray.wait(
self._actor_deletion_ids,
num_returns=len(self._actor_deletion_ids),
timeout=timeout,
)
self._actor_deletion_ids = deleting
def _stop_actor(self, actor):
# Check and clean up any outstanding IDs corresponding to deletions.
self._wait_for_stopping_actors(timeout=0.0)
# The deletion task will block until the actor has finished executing
# all pending tasks.
self._actor_deletion_ids.append(actor.__ray_terminate__.remote())
def _new_actor_entry(self):
# NOTE(edoakes): The initializer function can't currently be used to
# modify the global namespace (e.g., import packages or set globals)
# due to a limitation in cloudpickle.
# Cache the PoolActor with options
if not self._pool_actor:
self._pool_actor = PoolActor.options(**self._ray_remote_args)
return (self._pool_actor.remote(self._initializer, self._initargs), 0)
def _next_actor_index(self):
if self._current_index == len(self._actor_pool) - 1:
self._current_index = 0
else:
self._current_index += 1
return self._current_index
# Batch should be a list of tuples: (args, kwargs).
def _run_batch(self, actor_index, func, batch):
actor, count = self._actor_pool[actor_index]
object_ref = actor.run_batch.remote(func, batch)
count += 1
assert self._maxtasksperchild == -1 or count <= self._maxtasksperchild
if count == self._maxtasksperchild:
self._stop_actor(actor)
actor, count = self._new_actor_entry()
self._actor_pool[actor_index] = (actor, count)
return object_ref
def apply(
self,
func: Callable,
args: Optional[Tuple] = None,
kwargs: Optional[Dict] = None,
):
"""Run the given function on a random actor process and return the
result synchronously.
Args:
func: function to run.
args: optional arguments to the function.
kwargs: optional keyword arguments to the function.
Returns:
The result.
"""
return self.apply_async(func, args, kwargs).get()
def apply_async(
self,
func: Callable,
args: Optional[Tuple] = None,
kwargs: Optional[Dict] = None,
callback: Callable[[Any], None] = None,
error_callback: Callable[[Exception], None] = None,
):
"""Run the given function on a random actor process and return an
asynchronous interface to the result.
Args:
func: function to run.
args: optional arguments to the function.
kwargs: optional keyword arguments to the function.
callback: callback to be executed on the result once it is finished
only if it succeeds.
error_callback: callback to be executed the result once it is
finished only if the task errors. The exception raised by the
task will be passed as the only argument to the callback.
Returns:
AsyncResult containing the result.
"""
self._check_running()
func = self._convert_to_ray_batched_calls_if_needed(func)
object_ref = self._run_batch(self._next_actor_index(), func, [(args, kwargs)])
return AsyncResult([object_ref], callback, error_callback, single_result=True)
def _convert_to_ray_batched_calls_if_needed(self, func: Callable) -> Callable:
"""Convert joblib's BatchedCalls to RayBatchedCalls for ObjectRef caching.
This converts joblib's BatchedCalls callable, which is a collection of
functions with their args and kwargs to be ran sequentially in an
Actor, to a RayBatchedCalls callable, which provides identical
functionality in addition to a method which ensures that common
args and kwargs are put into the object store just once, saving time
and memory. That method is then ran.
If func is not a BatchedCalls instance, it is returned without changes.
The ObjectRefs are cached inside two registries (_registry and
_registry_hashable), which are common for the entire Pool and are
cleaned on close."""
if RayBatchedCalls is None:
return func
orginal_func = func
# SafeFunction is a Python 2 leftover and can be
# safely removed.
if isinstance(func, SafeFunction):
func = func.func
if isinstance(func, BatchedCalls):
func = RayBatchedCalls(
func.items,
(func._backend, func._n_jobs),
func._reducer_callback,
func._pickle_cache,
)
# go through all the items and replace args and kwargs with
# ObjectRefs, caching them in registries
func.put_items_in_object_store(self._registry, self._registry_hashable)
else:
func = orginal_func
return func
def _calculate_chunksize(self, iterable):
chunksize, extra = divmod(len(iterable), len(self._actor_pool) * 4)
if extra:
chunksize += 1
return chunksize
def _submit_chunk(self, func, iterator, chunksize, actor_index, unpack_args=False):
chunk = []
while len(chunk) < chunksize:
try:
args = next(iterator)
if not unpack_args:
args = (args,)
chunk.append((args, {}))
except StopIteration:
break
# Nothing to submit. The caller should prevent this.
assert len(chunk) > 0
return self._run_batch(actor_index, func, chunk)
def _chunk_and_run(self, func, iterable, chunksize=None, unpack_args=False):
if not hasattr(iterable, "__len__"):
iterable = list(iterable)
if chunksize is None:
chunksize = self._calculate_chunksize(iterable)
iterator = iter(iterable)
chunk_object_refs = []
while len(chunk_object_refs) * chunksize < len(iterable):
actor_index = len(chunk_object_refs) % len(self._actor_pool)
chunk_object_refs.append(
self._submit_chunk(
func, iterator, chunksize, actor_index, unpack_args=unpack_args
)
)
return chunk_object_refs
def _map_async(
self,
func,
iterable,
chunksize=None,
unpack_args=False,
callback=None,
error_callback=None,
):
self._check_running()
object_refs = self._chunk_and_run(
func, iterable, chunksize=chunksize, unpack_args=unpack_args
)
return AsyncResult(object_refs, callback, error_callback)
def map(self, func: Callable, iterable: Iterable, chunksize: Optional[int] = None):
"""Run the given function on each element in the iterable round-robin
on the actor processes and return the results synchronously.
Args:
func: function to run.
iterable: iterable of objects to be passed as the sole argument to
func.
chunksize: number of tasks to submit as a batch to each actor
process. If unspecified, a suitable chunksize will be chosen.
Returns:
A list of results.
"""
return self._map_async(
func, iterable, chunksize=chunksize, unpack_args=False
).get()
def map_async(
self,
func: Callable,
iterable: Iterable,
chunksize: Optional[int] = None,
callback: Callable[[List], None] = None,
error_callback: Callable[[Exception], None] = None,
):
"""Run the given function on each element in the iterable round-robin
on the actor processes and return an asynchronous interface to the
results.
Args:
func: function to run.
iterable: iterable of objects to be passed as the only argument to
func.
chunksize: number of tasks to submit as a batch to each actor
process. If unspecified, a suitable chunksize will be chosen.
callback: Will only be called if none of the results were errors,
and will only be called once after all results are finished.
A Python List of all the finished results will be passed as the
only argument to the callback.
error_callback: callback executed on the first errored result.
The Exception raised by the task will be passed as the only
argument to the callback.
Returns:
AsyncResult
"""
return self._map_async(
func,
iterable,
chunksize=chunksize,
unpack_args=False,
callback=callback,
error_callback=error_callback,
)
def starmap(self, func, iterable, chunksize=None):
"""Same as `map`, but unpacks each element of the iterable as the
arguments to func like: [func(*args) for args in iterable].
"""
return self._map_async(
func, iterable, chunksize=chunksize, unpack_args=True
).get()
def starmap_async(
self,
func: Callable,
iterable: Iterable,
callback: Callable[[List], None] = None,
error_callback: Callable[[Exception], None] = None,
):
"""Same as `map_async`, but unpacks each element of the iterable as the
arguments to func like: [func(*args) for args in iterable].
"""
return self._map_async(
func,
iterable,
unpack_args=True,
callback=callback,
error_callback=error_callback,
)
def imap(self, func: Callable, iterable: Iterable, chunksize: Optional[int] = 1):
"""Same as `map`, but only submits one batch of tasks to each actor
process at a time.
This can be useful if the iterable of arguments is very large or each
task's arguments consumes a large amount of resources.
The results are returned in the order corresponding to their arguments
in the iterable.
Returns:
OrderedIMapIterator
"""
self._check_running()
return OrderedIMapIterator(self, func, iterable, chunksize=chunksize)
def imap_unordered(
self, func: Callable, iterable: Iterable, chunksize: Optional[int] = 1
):
"""Same as `map`, but only submits one batch of tasks to each actor
process at a time.
This can be useful if the iterable of arguments is very large or each
task's arguments consumes a large amount of resources.
The results are returned in the order that they finish.
Returns:
UnorderedIMapIterator
"""
self._check_running()
return UnorderedIMapIterator(self, func, iterable, chunksize=chunksize)
def _check_running(self):
if self._closed:
raise ValueError("Pool not running")
def __enter__(self):
self._check_running()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.terminate()
def close(self):
"""Close the pool.
Prevents any more tasks from being submitted on the pool but allows
outstanding work to finish.
"""
self._registry.clear()
self._registry_hashable.clear()
for actor, _ in self._actor_pool:
self._stop_actor(actor)
self._closed = True
gc.collect()
def terminate(self):
"""Close the pool.
Prevents any more tasks from being submitted on the pool and stops
outstanding work.
"""
if not self._closed:
self.close()
for actor, _ in self._actor_pool:
ray.kill(actor)
def join(self):
"""Wait for the actors in a closed pool to exit.
If the pool was closed using `close`, this will return once all
outstanding work is completed.
If the pool was closed using `terminate`, this will return quickly.
"""
if not self._closed:
raise ValueError("Pool is still running")
self._wait_for_stopping_actors()
|
<reponame>SeitaroShinagawa/ClipBERT
from torch.optim import Adam, Adamax, SGD
from src.optimization.adamw import AdamW
def setup_optimizer(model, opts, model_type="transformer"):
"""model_type: str, one of [transformer, cnn]"""
if model_type == "transformer":
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = build_optimizer_w_lr_mul(
param_optimizer, opts.learning_rate,
opts.weight_decay, no_decay=no_decay,
lr_mul=opts.transformer_lr_mul,
lr_mul_prefix=opts.transformer_lr_mul_prefix)
if opts.optim == 'adam':
OptimCls = Adam
elif opts.optim == 'adamax':
OptimCls = Adamax
elif opts.optim == 'adamw':
OptimCls = AdamW
else:
raise ValueError('invalid optimizer')
optimizer = OptimCls(optimizer_grouped_parameters,
lr=opts.learning_rate, betas=opts.betas)
else:
assert model_type == "cnn"
parameters = list(model.named_parameters())
if opts.cnn_optim == "sgd":
optimizer_grouped_parameters = build_optimizer_w_lr_mul(
parameters, opts.cnn_learning_rate,
opts.cnn_weight_decay,
lr_mul=opts.cnn_lr_mul,
lr_mul_prefix=opts.cnn_lr_mul_prefix)
optimizer = SGD(optimizer_grouped_parameters,
lr=opts.cnn_learning_rate,
momentum=opts.cnn_sgd_momentum,
weight_decay=opts.cnn_weight_decay)
elif opts.cnn_optim == "adamw":
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = build_optimizer_w_lr_mul(
parameters, opts.cnn_learning_rate,
opts.cnn_weight_decay, no_decay=no_decay,
lr_mul=opts.cnn_lr_mul,
lr_mul_prefix=opts.cnn_lr_mul_prefix)
optimizer = AdamW(
optimizer_grouped_parameters,
lr=opts.cnn_learning_rate, betas=opts.betas)
else:
raise ValueError("Only support SGD/adamW for cnn.")
return optimizer
def build_optimizer_w_lr_mul(model_param_optimizer, learning_rate,
weight_decay, no_decay=[], lr_mul=1,
lr_mul_prefix=""):
# Prepare optimizer
if lr_mul_prefix == "":
param_optimizer = model_param_optimizer
param_top = []
else:
# top layer has larger learning rate
param_top = [(n, p) for n, p in model_param_optimizer
if lr_mul_prefix in n and p.requires_grad]
param_optimizer = [(n, p) for n, p in model_param_optimizer
if lr_mul_prefix not in n and p.requires_grad]
optimizer_grouped_parameters = []
if len(param_top):
optimizer_grouped_parameters.append(
{'params': [p for n, p in param_top
if not any(nd in n for nd in no_decay)],
'lr': lr_mul*learning_rate,
'weight_decay': weight_decay})
if len(no_decay):
optimizer_grouped_parameters.append(
{'params': [p for n, p in param_top
if any(nd in n for nd in no_decay)],
'lr': lr_mul*learning_rate,
'weight_decay': 0.0})
if len(param_optimizer):
optimizer_grouped_parameters.append(
{'params': [p for n, p in param_optimizer
if not any(nd in n for nd in no_decay)],
'weight_decay': weight_decay})
if len(no_decay):
optimizer_grouped_parameters.append(
{'params': [p for n, p in param_optimizer
if any(nd in n for nd in no_decay)],
'weight_decay': 0.0})
return optimizer_grouped_parameters
def setup_e2e_optimizer(model, opts):
"""model_type: str, one of [transformer, cnn]"""
transformer_param_optimizer = [
(n, p) for n, p in list(model.named_parameters())
if "transformer" in n and p.requires_grad]
cnn_param_optimizer = [
(n, p) for n, p in list(model.named_parameters())
if "cnn" in n and p.requires_grad]
trasformer_grouped_parameters = build_e2e_optimizer_w_lr_mul(
transformer_param_optimizer,
opts.learning_rate, opts.weight_decay,
lr_mul=opts.transformer_lr_mul,
lr_mul_prefix=opts.transformer_lr_mul_prefix)
cnn_grouped_parameters = build_e2e_optimizer_w_lr_mul(
cnn_param_optimizer,
opts.cnn_learning_rate, opts.cnn_weight_decay,
lr_mul=opts.cnn_lr_mul, lr_mul_prefix=opts.cnn_lr_mul_prefix)
optimizer_grouped_parameters = []
optimizer_grouped_parameters.extend(trasformer_grouped_parameters)
optimizer_grouped_parameters.extend(cnn_grouped_parameters)
if opts.optim == 'adam':
OptimCls = Adam
elif opts.optim == 'adamax':
OptimCls = Adamax
elif opts.optim == 'adamw':
OptimCls = AdamW
else:
raise ValueError('invalid optimizer')
optimizer = OptimCls(optimizer_grouped_parameters,
lr=opts.learning_rate, betas=opts.betas)
return optimizer
def build_e2e_optimizer_w_lr_mul(
model_param_optimizer, learning_rate, weight_decay,
lr_mul=1, lr_mul_prefix=""):
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
# Prepare optimizer
if lr_mul_prefix == "":
param_optimizer = model_param_optimizer
param_top = []
else:
# top layer has larger learning rate
param_top = [(n, p) for n, p in model_param_optimizer
if lr_mul_prefix in n and p.requires_grad]
param_optimizer = [(n, p) for n, p in model_param_optimizer
if lr_mul_prefix not in n and p.requires_grad]
optimizer_grouped_parameters = [
{'params': [p for n, p in param_top
if not any(nd in n for nd in no_decay)],
'lr': lr_mul*learning_rate,
'weight_decay': weight_decay},
{'params': [p for n, p in param_top
if any(nd in n for nd in no_decay)],
'lr': lr_mul*learning_rate,
'weight_decay': 0.0},
{'params': [p for n, p in param_optimizer
if not any(nd in n for nd in no_decay)],
'weight_decay': weight_decay},
{'params': [p for n, p in param_optimizer
if any(nd in n for nd in no_decay)],
'weight_decay': 0.0}]
return optimizer_grouped_parameters
|
from torch.utils.data import Dataset
import numpy as np
import matplotlib.pyplot as plt
import utils.forward_kinematics as fk
import torch
import utils.data_utils as data_utils
import os
import pickle as pkl
class H36motion(Dataset):
def __init__(self, path_to_data, actions, input_n=10, output_n=10, dct_n=20, split=0, sample_rate=2, use_dct=True, train_3d=False):
"""
read h36m data to get the dct coefficients.
:param path_to_data:
:param actions: actions to read
:param input_n: past frame length
:param output_n: future frame length
:param dct_n: number of dct coeff. used
:param split: 0 train, 1 test, 2 validation
:param sample_rate: 2
:param data_mean: mean of expmap
:param data_std: standard deviation of expmap
"""
print('NOTE THAT WE HAVE REMOVED DATA MEAN AND DATA STD')
self.dct_n = dct_n
self.input_n = input_n
self.output_n = output_n
self.path_to_data = path_to_data
self.use_dct = use_dct
self.split = split
self.train_3d = train_3d
subs = np.array([[1, 6, 7, 8, 9], [5], [11]])
acts = data_utils.define_actions(actions)
subjs = subs[split]
self.sequences_expmap, self.sequences_3d, self.all_seqs = self.load_data(subjs, acts, sample_rate, input_n + output_n, input_n=input_n)
self.all_seqs = np.concatenate(self.all_seqs, 0)
self.reduced_seqs_expmap = self.sequences_expmap[:,:,self.dimensions_to_use]
self.reduced_seqs_3d = self.sequences_3d[:,:,self.dimensions_to_use_3d]
if use_dct and self.train_3d:
self.input_dct_seq, self.output_dct_seq = self.get_dct(self.reduced_seqs_3d)
elif use_dct and not self.train_3d:
self.input_dct_seq, self.output_dct_seq = self.get_dct(self.reduced_seqs_expmap)
def get_dct(self, seqs, seq_are_3d=False):
if seq_are_3d:
dims_to_use = self.dimensions_to_use_3d
else:
dims_to_use = self.dimensions_to_use
seqs = seqs.transpose(0, 2, 1)
seqs = seqs.reshape(-1, self.input_n + self.output_n)
seqs = seqs.transpose()
dct_m_in, _ = data_utils.get_dct_matrix(self.input_n + self.output_n)
dct_m_out, _ = data_utils.get_dct_matrix(self.input_n + self.output_n)
# padding the observed sequence so that it has the same length as observed + future sequence
pad_idx = np.repeat([self.input_n - 1], self.output_n)
i_idx = np.append(np.arange(0, self.input_n), pad_idx)
input_dct_seq = np.matmul(dct_m_in[:self.dct_n, :], seqs[i_idx, :])
input_dct_seq = input_dct_seq.transpose().reshape([-1, len(dims_to_use), self.dct_n])
output_dct_seq = np.matmul(dct_m_out[:self.dct_n], seqs)
output_dct_seq = output_dct_seq.transpose().reshape([-1, len(dims_to_use), self.dct_n])
return input_dct_seq, output_dct_seq
def read_sequence(self, subject, action, subaction, sample_rate):
print("Reading subject {0}, action {1}, subaction {2}".format(subject, action, subaction))
filename = '{0}/S{1}/{2}_{3}.txt'.format(self.path_to_data, subject, action, subaction)
sequence = data_utils.readCSVasFloat(filename)
sampled_sequence = sequence[::sample_rate, :]
num_frames = len(sampled_sequence)
return sequence, num_frames
def get_subsequence(self, sequence, num_frames, seq_len):
fs = np.arange(0, num_frames - seq_len + 1)
fs_sel = fs
for i in np.arange(seq_len - 1):
fs_sel = np.vstack((fs_sel, fs + i + 1))
fs_sel = fs_sel.transpose()
seq_sel = sequence[fs_sel, :]
return seq_sel
def find_indices_srnn(self, frame_num1, frame_num2, seq_len, input_n=10):
"""
Adapted from https://github.com/una-dinosauria/human-motion-prediction/blob/master/src/seq2seq_model.py#L478
which originaly from
In order to find the same action indices as in SRNN.
https://github.com/asheshjain399/RNNexp/blob/master/structural_rnn/CRFProblems/H3.6m/processdata.py#L325
"""
# Used a fixed dummy seed, following
# https://github.com/asheshjain399/RNNexp/blob/srnn/structural_rnn/forecastTrajectories.py#L29
SEED = 1234567890
rng = np.random.RandomState(SEED)
T1 = frame_num1 - 150
T2 = frame_num2 - 150 # seq_len
idxo1 = None
idxo2 = None
for _ in np.arange(0, 4):
idx_ran1 = rng.randint(16, T1)
idx_ran2 = rng.randint(16, T2)
idxs1 = np.arange(idx_ran1 + 50 - input_n, idx_ran1 + 50 - input_n + seq_len)
idxs2 = np.arange(idx_ran2 + 50 - input_n, idx_ran2 + 50 - input_n + seq_len)
if idxo1 is None:
idxo1 = idxs1
idxo2 = idxs2
else:
idxo1 = np.vstack((idxo1, idxs1))
idxo2 = np.vstack((idxo2, idxs2))
return idxo1, idxo2
def load_data(self, subjects, actions, sample_rate, seq_len, input_n=10):
"""
adapted from
https://github.com/una-dinosauria/human-motion-prediction/src/data_utils.py#L216
:param seq_len: past frame length + future frame length
:param is_norm: normalize the expmap or not
:param input_n: past frame length
:return:
"""
cache_name = os.path.join(self.path_to_data, '_'.join(['learn_traj3', str(subjects), str(actions), str(sample_rate), str(seq_len), str(input_n)]) + '.pkl')
if os.path.isfile(cache_name):
print('loading data from cache: {}'.format(cache_name))
sequences_expmap, sequences_3d, complete_seq, sampled_seq = pkl.load(open(cache_name, 'rb'))
else:
sampled_seq, complete_seq = [], []
for subj in subjects:
for action in actions:
sequence1, num_frames1 = self.read_sequence(subj, action, 1, sample_rate)
sequence2, num_frames2 = self.read_sequence(subj, action, 2, sample_rate)
if subj == 5:
# subject 5 is the testing subject, we use a specific scheme to extract the frame idxs
# such that they are the same as in related work
fs_sel1, fs_sel2 = self.find_indices_srnn(num_frames1, num_frames2, seq_len, input_n=input_n)
seq_sel1 = sequence1[fs_sel1, :]
seq_sel2 = sequence2[fs_sel2, :]
else:
seq_sel1 = self.get_subsequence(sequence1, num_frames1, seq_len)
seq_sel2 = self.get_subsequence(sequence2, num_frames2, seq_len)
sampled_seq.append(seq_sel1), sampled_seq.append(seq_sel2)
complete_seq.append(sequence1), complete_seq.append(sequence2)
sequences_expmap = np.concatenate(sampled_seq, axis=0)
complete_seq = np.concatenate(complete_seq, axis=0)
zeroed = sequences_expmap.copy()
zeroed[:, :, 0:6] = 0
sequences_3d = H36motion.expmap2xyz(zeroed)
self.data_std = np.std(complete_seq, axis=0)
self.data_mean = np.mean(complete_seq, axis=0)
self.dimensions_to_ignore, self.dimensions_to_use = [], []
self.dimensions_to_ignore.extend(list(np.where(self.data_std < 1e-4)[0]))
self.dimensions_to_use.extend(list(np.where(self.data_std >= 1e-4)[0]))
self.data_std[self.dimensions_to_ignore] = 1.0
self.data_mean[self.dimensions_to_ignore] = 0.0
# first 6 elements are global translation and global rotation
self.dimensions_to_use = self.dimensions_to_use[6:]
joint_to_ignore_3d = np.array([0, 1, 6, 11, 16, 20, 23, 24, 28, 31])
self.dimensions_to_ignore_3d = np.concatenate((joint_to_ignore_3d * 3, joint_to_ignore_3d * 3 + 1, joint_to_ignore_3d * 3 + 2))
self.dimensions_to_use_3d = np.setdiff1d(np.arange(sequences_3d.shape[-1]), self.dimensions_to_ignore_3d)
print('Saving data to cache: {}...'.format(cache_name))
pkl.dump([sequences_expmap, sequences_3d, complete_seq, sampled_seq], open(cache_name, 'wb'))
return sequences_expmap, sequences_3d, sampled_seq
@staticmethod
def expmap2xyz(expmap):
"""
convert expmaps to joint locations
"""
shape_in = expmap.shape
if len(shape_in) == 3:
expmap = expmap.reshape(shape_in[0]*shape_in[1], -1)
parent, offset, rotInd, expmapInd = fk._some_variables()
if isinstance(expmap, torch.Tensor):
xyz = fk.fkl_torch(expmap, parent, offset, rotInd, expmapInd)
else:
xyz = fk.fkl_torch(torch.from_numpy(expmap), parent, offset, rotInd, expmapInd)
if len(shape_in) == 3:
xyz = xyz.reshape(shape_in[0], shape_in[1], -1)
return xyz
def __len__(self):
return np.shape(self.input_dct_seq)[0]
def __getitem__(self, item):
return self.input_dct_seq[item], self.output_dct_seq[item], self.all_seqs[item]
def test_visualization():
from torch.utils.data import DataLoader
acts = data_utils.define_actions('walking')
data_dir = '/run/media/bob/ext/human_exponential_format/h3.6m/dataset/'
test_dataset = H36motion(path_to_data=data_dir, actions=acts[0], input_n=10, output_n=25, split=1, sample_rate=2, dct_n=35, use_dct=False, train_3d=True)
loader = DataLoader(dataset=test_dataset, batch_size=1, shuffle=True, num_workers=0, pin_memory=True)
for batch in loader:
_, all_seqs = batch
fig = plt.figure()
ax = plt.gca(projection='3d')
plt.cla()
viz.plot_predictions(all_seqs[0, :, :], all_seqs[0, :, :], fig, ax, 'Pose', is_3d=True)
plt.pause(1)
if __name__ == '__main__':
test_visualization()
|
<reponame>corbanvilla/AlHosLetMeIn
import cv2
import numpy as np
import queue
import asyncio
import time
import findfaces
import face_recognition
from findfaces import FaceBox
from aiortc import VideoStreamTrack
from av import VideoFrame
from loguru import logger as log
from database.database import SessionLocal, engine
from database import models, crud
from recognition import cosine_similarity, find_closest_face_match
from face_box_helper import coordinates_to_face_boxs
# Initialize database
models.Base.metadata.create_all(bind=engine)
db = SessionLocal()
# Global frame variable to pass between threads
current_face = None # start w/ an empty frame
latest_frame = None
frame_lock = False # mutex-lock. More efficient than doing a deep-copy
known_faces = crud.get_all_users(db)
log.info(f'Loaded {len(known_faces)} profiles from database!')
class FaceStreamTrack(VideoStreamTrack):
"""
A video track that returns an animated flag.
"""
def __init__(self, track):
super().__init__() # don't forget this!
self.track = track
self.last_frame = None
self.frame_counter = 0
self.update_frames = 2
# Start our worker thread
self.worker = asyncio.create_task(self._face_analyzer_thread())
async def recv(self):
self.frame_counter += 1
# Grab the frame
frame = await self.track.recv()
# If it's been x frames since our last update
global frame_lock
if self.frame_counter >= self.update_frames \
and not frame_lock:
# Assign it to our global variable
frame_lock = True
global latest_frame
latest_frame = frame.to_ndarray(format="bgr24")
frame_lock = False
# Reset frame counter
self.frame_counter = 0
# Return whatever we have queued up
if current_face is not None:
new_frame = VideoFrame.from_ndarray(current_face, format="bgr24")
else:
new_frame = VideoFrame(300, 300, format="bgr24")
new_frame.pts = frame.pts
new_frame.time_base = frame.time_base
return new_frame
async def _face_analyzer_thread(self):
"""
Separate worker thread to analyze last images seen
"""
log.debug("Starting face worker thread")
def reset_processed_frame():
"""
Helper function to clear our latest_frame and update lock
"""
# Blank frame
global latest_frame
latest_frame = None
# Disable lock
global frame_lock
frame_lock = False
log.debug('Frame lock: disabled!')
def get_process_frame():
"""
Helper function to get frame and enable lock if frame is not none
"""
global latest_frame
global frame_lock
if latest_frame is not None and not frame_lock:
# Enable lock
frame_lock = True
log.debug('Frame lock: enabled!')
return latest_frame
return None
while True:
img = get_process_frame()
# If we don't have any frames to analyze, sleep+reset
if img is None:
await asyncio.sleep(.1)
continue
#if not img:
# # We recieved an empty frame. Release and sleep
# reset_processed_frame()
# await asyncio.sleep(.1)
# continue
# Find faces
#faces = findfaces.get_face_locations(img)
locs = face_recognition.face_locations(img, model="cnn") #[(face.top_y, face.bottom_x, face.bottom_y, face.top_x)]
faces = coordinates_to_face_boxs(locs)
log.info(f'Found {len(faces)} faces!')
# TODO - implement find largest face pattern
if len(faces) == 0:
log.info('No faces found in last frame....')
reset_processed_frame()
continue
face = faces[0]
# Get face encoding
# This is just how locations need to be formatted for this function
log.debug(f'Attempting to get encodings with coordinates: {locs}')
encodings = face_recognition.face_encodings(img, locs)
if len(encodings) == 0:
log.error(f'Face found but unable to get encoding!')
reset_processed_frame()
continue
# Grab the first/only encoding
log.debug("Got face encoding!")
encoding = encodings[0]
# Get our closest match
log.debug("Searching for closest match...")
match, score = find_closest_face_match(known_faces, encoding)
log.debug(f'Match found: {match} ({score})')
# Figure our alhosn status
alhosn_status = crud.get_alhosn_status(db, match)
log.debug(f'Profile alhosn: {alhosn_status}')
color = (0, 0, 255) # default red
if alhosn_status == "green":
color = (0, 255, 0)
elif alhosn_status == "gray":
color = (100, 100, 100)
# Image manipulation
try:
img = self._crop_face_from_image(img, face)
img = self._scale_image_to_height(img, desired_height=300)
img = self._draw_inner_rectangle(img, rgb=color)
except Exception as e:
log.error(f'Issue processing frame: {e}')
reset_processed_frame()
continue
# Update our global var
global current_face
current_face = img
# Reset mutex
reset_processed_frame()
@staticmethod
def _crop_face_from_image(img, face: FaceBox, buffer_percent=10) -> np.ndarray:
"""
Crops an image around a face.
Adds a small buffer relative to the face size.
"""
# Set aliases from our class
x1 = face.top_x
x2 = face.bottom_x
y1 = face.top_y
y2 = face.bottom_y
# Create a buffer that's 10% of face height
buffer_amount = int(buffer_percent/100 * (y2 - y1))
x1 -= buffer_amount
x2 += buffer_amount
y1 -= buffer_amount
y2 += buffer_amount
# Slice our array
# return img[x1:x2, y1:y2]
return img[y1:y2, x1:x2]
@staticmethod
def _scale_image_to_height(img, desired_height: int) -> np.ndarray:
"""
Scales an image down to a desired height.
This function makes sure to maintain the image
aspect ratio.
"""
# Get image dims
height, width, _ = img.shape
# Calculate what our scale factor should be
scale_factor = desired_height / height
# Calculate new image dimensions
# new_dims = int(width * scale_factor), int(height * scale_factor)
new_dims = 300, 300
# Do the actual resize operation
img = cv2.resize(img, new_dims, interpolation=cv2.INTER_AREA)
return img
@staticmethod
def _draw_inner_rectangle(img, buffer_percent=10, rgb=(0, 255, 0), weight=5) -> np.ndarray:
"""
Draws a rectangle inside an image.
Will indent buffer_percent according to both
the height and the width values.
"""
# Get image dims
height, width, _ = img.shape
# Calculate buffer amounts
rectangle_buffer_x = int(height * (buffer_percent/100))
rectangle_buffer_y = int(width * (buffer_percent/100))
# Bottom left is at 0+buffer
rec_x1 = rectangle_buffer_x
rec_y1 = rectangle_buffer_y
# Top right is at width-buffer:
rec_x2 = width - rectangle_buffer_x
rec_y2 = height - rectangle_buffer_y
# Draw the actual rectangle
cv2.rectangle(img, (rec_y1, rec_x1), (rec_y2, rec_x2), rgb, weight)
return img
|
from __future__ import absolute_import, division, print_function
from .timeline.models import TimeLine
from .timeline.processing import derivative_filtered
import matplotlib
import numpy as np
from matplotlib import pyplot as plt
matplotlib.use("agg")
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from matplotlib.ticker import MultipleLocator
from matplotlib.colors import ListedColormap
def new_cmap(cmap_name, step=25, cmin=0., cmax=0.7):
"""Create a new color map that goes smoothly to white"""
cm = plt.cm.get_cmap(cmap_name, 256)
newcolors = cm(np.linspace(cmin, cmax, 256 - step))
vals = np.zeros((step, 4))
for i in range(newcolors.shape[1]):
vals[:, i] = np.linspace(1., newcolors[0, i], step)
return ListedColormap(np.vstack([vals, newcolors]))
def plot_time_line(t, v, dv=0., function=None, func_kwargs=None, ax=None, **plot_kwargs):
"""
Plot the time line data of a trigger window and optionally the fit
:param t: array-like
time values in micro s
:param v:
voltage values in mV
:param dv:
voltage uncertainties in mV
:param fname: str
filename for plot
:param function: str
function identifier for `fastespy.fitting.TimeLine` class
:param func_kwargs: dict, optional
result dict from fitting
:param plot_kwargs: dict, optional
additional kwargs for plotting
:return: matplotlib ax object
"""
data_col = plot_kwargs.pop('data_col', plt.cm.tab10(0.))
data_ls = plot_kwargs.pop('data_ls', '-')
data_label = plot_kwargs.pop('data_label', 'data')
func_col = plot_kwargs.pop('data_col', plt.cm.tab10(0.1))
func_ls = plot_kwargs.pop('func_ls', '--')
func_label = plot_kwargs.pop('func_label', 'fit')
if ax is None:
ax = plt.gca()
if np.any(dv) > 0.:
ax.fill_between(t, v - dv,
y2=v + dv,
alpha=0.2, color=data_col)
ax.plot(t, v, color=data_col, ls=data_ls, label=data_label, **plot_kwargs)
ax.set_xlabel(r"Time ($\mu$s)")
ax.set_ylabel(r"Voltage (mV)")
if function is not None and func_kwargs is not None:
func = TimeLine(numcomp=func_kwargs['numcomp'], function=function)
ax.plot(t, func(t, **func_kwargs['fitarg']),
ls=func_ls, color=func_col, label=func_label, **plot_kwargs)
chi2dof = func_kwargs['chi2'] / func_kwargs['dof']
string = ''
for k in func_kwargs['value'].keys():
if 'tr' in k or 'td' in k:
ttype = 'rise' if 'tr' in k else 'decay'
it = int(k.split('_')[-1])
string += "$t_{{\\mathrm{{{1:s}}}, {0:n}}} = ({2:.2f} \\pm {3:.2f})\\mu$s\n".format(
it+1,
ttype,
func_kwargs['value'][k],
func_kwargs['error'][k])
string += "$\\chi^2 / \\mathrm{{d.o.f.}} = {0:.2f}$".format(chi2dof)
leg = ax.legend(title=string, fontsize='x-small')
else:
leg = ax.legend(fontsize='x-small')
plt.setp(leg.get_title(), fontsize='x-small')
return ax
def plot_time_line_derivative(t, v, fSample, ax=None,
fmax = 1.e6, norder = 3,
yunit='mV',
plot_unfiltered=False,
**plot_kwargs):
"""
Plot the time line data of a trigger window and optionally the fit
:param t: array-like
time values in micro s
:param v:
voltage values in mV
:param fSample: float
sampling frequency
:param fmax:
maximum frequency above which filter is applied, see scipy.signal.butter function
:param norder:
filter order
:param plot_unfiltered: bool, optional
if true, plot unfiltered derivative
:param plot_kwargs: dict, optional
additional kwargs for plotting
:return: matplotlib ax object
"""
label = plot_kwargs.pop('label', 'Derivative')
# dv and dv_filter are in units of v * fSample,
# which is in Hz = 1 / s
# therefore divide by 1e6 to get it in mu s
dv, dv_filter = derivative_filtered(v,
fSample=fSample,
fmax=fmax,
norder=norder)
if ax is None:
ax = plt.gca()
if plot_unfiltered:
ax.plot(t, dv / 1e6, label=label, **plot_kwargs)
if norder > 0:
ax.plot(t, dv_filter / 1e6, label=label +' filtered', **plot_kwargs)
ax.set_xlabel(r"Time ($\mu$s)")
ax.set_ylabel(r"$dU/dt$ ({0:s}$\,\mu\mathrm{{s}}^{{-1}}$)".format(yunit))
ax.legend(fontsize='x-small')
return ax
def plot_time_line_and_derivative(
t, v,
dv=0.,
function=None,
func_kwargs=None,
fig=None,
fSample=None,
fmax = 1.e6,
norder = 3,
kwargs_timeline={},
kwargs_derivative={}
):
gs = gridspec.GridSpec(3, 1)
kwargs_derivative.setdefault('lw', 1.)
if fig is None:
fig = plt.figure(figsize=(6, 6), tight_layout=True)
ax_t_vs_v = fig.add_subplot(gs[:2, 0])
ax_dvdt = fig.add_subplot(gs[2, 0])
plot_time_line(t, v, dv=dv, function=function, func_kwargs=func_kwargs, ax=ax_t_vs_v, **kwargs_timeline)
plot_time_line_derivative(t, v, fSample, ax=ax_dvdt,
fmax=fmax, norder=norder,
**kwargs_derivative)
ax_t_vs_v.set_xlabel('')
ax_t_vs_v.tick_params(labelbottom=False)
fig.subplots_adjust(wspace=0.05, hspace=0.05)
v = ax_t_vs_v.get_xlim()
ax_dvdt.set_xlim(v)
ax_t_vs_v.xaxis.set_minor_locator(MultipleLocator(5))
ax_dvdt.xaxis.set_minor_locator(MultipleLocator(5))
ax_t_vs_v.yaxis.set_minor_locator(MultipleLocator(5))
ax_dvdt.yaxis.set_minor_locator(MultipleLocator(5))
ax_t_vs_v.grid(which='both', lw=0.5, ls='-', color='0.8')
ax_dvdt.grid(which='both', lw=0.5, ls='-', color='0.8')
return fig, ax_t_vs_v, ax_dvdt
def plot_2d_hist(x,y,
fig=None, ax_2d=None, ax_x=None, ax_y=None,
bins_x=100, bins_y=100,
mesh_kwargs={},
add_cbar=False,
add_contours=True,
axes_lims=[0.01,0.99],
quantiles=[0.05,0.95],
hist_2d_kwargs={},
contour_kwargs={},
hist_x_kwargs={},
hist_y_kwargs={}):
"""
Create a 2d histogram with projected histograms
Returns
-------
fig, ax for 2d hist, ax for x hist, ax for y hist, bins for x, bins for y
Notes
-----
Adapted from https://matplotlib.org/examples/pylab_examples/scatter_hist.html
"""
mesh_kwargs.setdefault('cmap', plt.cm.Blues)
if fig is None:
fig = plt.figure(1, figsize=(8, 8))
if ax_2d is None or ax_x is None or ax_y is None:
# definitions for the axes
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
bottom_h = left_h = left + width + 0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.2]
rect_histy = [left_h, bottom, 0.2, height]
ax_2d = plt.axes(rect_scatter)
ax_x = plt.axes(rect_histx)
ax_y = plt.axes(rect_histy)
ax_x.tick_params(labelbottom=False)
ax_y.tick_params(labelleft=False)
# the 2d histogram
n, b1, b2 = np.histogram2d(x, y, bins=[bins_x, bins_y], **hist_2d_kwargs)
b1cen = 0.5 * (b1[1:] + b1[:-1])
b2cen = 0.5 * (b2[1:] + b2[:-1])
b11, b22 = np.meshgrid(b1, b2, indexing='ij')
c = ax_2d.pcolormesh(b11, b22, n, **mesh_kwargs)
if add_contours:
levels = contour_kwargs.pop('levels',
np.linspace(n.min(), n.max(), 7)[1:-1])
contours = ax_2d.contour(b1cen, b2cen, n.T, levels, **contour_kwargs)
ax_2d.clabel(contours, **contour_kwargs)
if add_cbar:
plt.colorbar(c)
nx, _, _ = ax_x.hist(x, bins=bins_x, **hist_x_kwargs)
ny, _, _ = ax_y.hist(y, bins=bins_y, orientation='horizontal', **hist_y_kwargs)
# add quantiles to hist plots
# first compute cdf of histograms
cdf_x = np.cumsum(nx)
cdf_x = (cdf_x - cdf_x[0]) / (cdf_x[-1] - cdf_x[0])
cdf_y = np.cumsum(ny)
cdf_y = (cdf_y - cdf_y[0]) / (cdf_y[-1] - cdf_y[0])
# compute quantiles
if quantiles is not None:
q_x = np.interp(quantiles, xp=cdf_x, fp=b1cen)
q_y = np.interp(quantiles, xp=cdf_y, fp=b2cen)
np.set_printoptions(precision=2)
print("x={0} quantile values are {1}".format(quantiles, q_x))
print("y={0} quantile values are {1}".format(quantiles, q_y))
for i in range(2):
ax_x.axvline(q_x[i], ls=':', color='k')
ax_y.axhline(q_y[i], ls=':', color='k')
# compute axes lims
if axes_lims is not None:
xlims = np.interp(axes_lims, xp=cdf_x, fp=b1cen)
ylims = np.interp(axes_lims, xp=cdf_y, fp=b2cen)
ax_x.set_xlim(xlims)
ax_y.set_ylim(ylims)
ax_2d.set_xlim(xlims)
ax_2d.set_ylim(ylims)
else:
ax_x.set_xlim(ax_2d.get_xlim())
ax_y.set_ylim(ax_2d.get_ylim())
return fig, ax_2d, ax_x, ax_y, bins_x, bins_y
def plot_scatter_w_hist(x,y,
fig=None, ax_2d=None, ax_x=None, ax_y=None,
bins_x=100, bins_y=100,
scatter_kwargs={},
hist_x_kwargs={},
hist_y_kwargs={}):
"""
Create a scatter plot with projected histograms
Returns
-------
fig, ax for 2d hist, ax for x hist, ax for y hist, bins for x, bins for y
Notes
-----
Adapted from https://matplotlib.org/examples/pylab_examples/scatter_hist.html
"""
if fig is None:
fig = plt.figure(1, figsize=(8, 8))
if ax_2d is None or ax_x is None or ax_y is None:
# definitions for the axes
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
bottom_h = left_h = left + width + 0.02
rect_scatter = [left, bottom, width, height]
rect_histx = [left, bottom_h, width, 0.2]
rect_histy = [left_h, bottom, 0.2, height]
ax_2d = plt.axes(rect_scatter)
ax_x = plt.axes(rect_histx)
ax_y = plt.axes(rect_histy)
ax_x.tick_params(labelbottom=False)
ax_y.tick_params(labelleft=False)
ax_2d.scatter(x, y, **scatter_kwargs)
ax_x.hist(x, bins=bins_x, **hist_x_kwargs)
ax_y.hist(y, bins=bins_y, orientation='horizontal', **hist_y_kwargs)
ax_x.set_xlim(ax_2d.get_xlim())
ax_y.set_ylim(ax_2d.get_ylim())
return fig, ax_2d, ax_x, ax_y, bins_x, bins_y
def plot_metric(history, ax=None, metric="loss", **kwargs):
"""
Plot the evolution of a classification metric
with epocks
Parameters
----------
history: keras history object
the classification history
ax: matplotlib axes object
axes for plotting
metric: string
name of metric to plot
kwargs: dict
additional kwargs passed to plot
Returns
-------
matplotlib axes object
"""
if ax is None:
ax = plt.gca()
label = kwargs.pop('label', '')
ax.semilogy(history.epoch, history.history[metric], label='Train ' + label, **kwargs)
kwargs.pop('ls', None)
ax.semilogy(history.epoch, history.history[f'val_{metric}'], label='Val ' + label, ls='--', **kwargs)
ax.set_xlabel('Epoch')
ax.set_ylabel(metric)
return ax
|
from django import forms
from .models import Team, Tournament, Match, Score
from player.models import Player
from performance.models import BattingInnings, BowlingInnings
class TeamCreationForm(forms.ModelForm):
logo = forms.ImageField(required=False)
class Meta:
model = Team
fields = [
'name',
'owner',
'logo'
]
labels = {
'name': 'Team Name',
'owner': 'Team Owner',
'logo': 'Team Logo'
}
class TournamentCreationForm(forms.ModelForm):
#place = forms.CharField(widget=forms.Textarea, max_length=100)
class Meta:
model = Tournament
fields = [
'name',
'place',
'start_date',
'end_date',
'image',
]
labels = {
'name': 'Tournament Name',
'start_date': 'Tournament Starting Date',
'end_date': 'Tournament Ending Date',
'image' : 'Select an Image',
}
class MatchCreationForm(forms.ModelForm):
def __init__(self, tournament, *args, **kwargs):
super(MatchCreationForm, self).__init__(*args, **kwargs)
self.fields['team_1'] = forms.ModelChoiceField(
queryset=Team.objects.filter(tournament=tournament)
)
self.fields['team_2'] = forms.ModelChoiceField(
queryset=Team.objects.filter(tournament=tournament)
)
class Meta:
model = Match
fields = [
'team_1',
'team_2',
'overs'
]
class ScoreUpdateForm(forms.Form):
def __init__(self, player1, player2, *args, **kwargs):
super(ScoreUpdateForm, self).__init__(*args, **kwargs)
if player1 is not None and player2 is not None:
self.fields['batsman'] = forms.ChoiceField(
choices=[(player1.player.id, str(player1.player)), (player2.player.id, str(player2.player))]
)
self.fields['out_batsman'] = forms.ChoiceField(
choices=[(player1.player.id, str(player1.player)), (player2.player.id, str(player2.player))]
)
self.fields['extra_type'] = forms.ChoiceField(
choices=[
('Wide', 'Wide'),
('NoBall', 'NoBall'),
('DeadBall', 'DeadBall')
]
)
self.fields['wicket_type'] = forms.ChoiceField(
choices=[
('RunOut', 'RunOut'),
('Catch', 'Catch'),
('Bowled', 'Bowled'),
('Lbw', 'Lbw'),
('Stumps', 'Stumps'),
('HitWicket', 'HitWicket')
]
)
ball_number = forms.IntegerField()
over_number = forms.IntegerField(initial='class')
batsman = forms.CharField(max_length=11)
run = forms.IntegerField(required=False)
extra_type = forms.CharField(max_length=11, required=False,)
extra_run = forms.IntegerField(required=False)
is_wicket = forms.BooleanField(required=False)
wicket_type = forms.CharField(max_length=11, required=False)
six = forms.BooleanField(required=False)
four = forms.BooleanField(required=False)
out_batsman = forms.CharField(max_length=11)
commentary = forms.CharField(widget=forms.Textarea(attrs={'cols': '70', 'rows': '3'}))
is_extra = forms.BooleanField(required=False)
class SelectBatsmanForm(forms.Form):
def __init__(self, players, *args, **kwargs):
super(SelectBatsmanForm, self).__init__(*args, **kwargs)
if players is not None:
self.fields['player'] = forms.ChoiceField(
choices=[(player.player.id, str(player.player)) for player in players]
)
player = forms.CharField(max_length=12)
class SelectBowlerForm(forms.Form):
def __init__(self, players, *args, **kwargs):
super(SelectBowlerForm, self).__init__(*args, **kwargs)
if players is not None:
self.fields['player'] = forms.ChoiceField(
choices=[(player.player.id, str(player.player)) for player in players]
)
player = forms.CharField(max_length=12)
class TossForm(forms.Form):
def __init__(self, match, *args, **kwargs):
super(TossForm, self).__init__(*args, **kwargs)
self.fields['toss_winner'] = forms.ChoiceField(
choices=[ (match.team_1.id, str(match.team_1)), (match.team_2.id, str(match.team_2))]
)
self.fields['toss_winner_choice'] = forms.ChoiceField(
choices=[('Batting', 'Batting'),
('Bowling', 'Bowling')]
)
toss_winner = forms.CharField(max_length=11)
toss_winner_choice = forms.CharField(max_length=10)
class OverForm(forms.Form):
overs = forms.IntegerField()
class OpenerForm(forms.Form):
def __init__(self, team, *args, **kwargs):
super(OpenerForm, self).__init__(*args, **kwargs)
self.fields['striker'] = forms.ChoiceField(
choices=[(player.id, str(player)) for player in team.players.all()]
)
self.fields['non_striker'] = forms.ChoiceField(
choices=[(player.id, str(player)) for player in team.players.all()]
)
striker = forms.CharField(required=False)
non_striker = forms.CharField(required=False)
class WinnerForm(forms.Form):
def __init__(self, team1, team2, *args, **kwargs):
super(WinnerForm, self).__init__(*args, **kwargs)
self.fields['winner'] = forms.ChoiceField(
choices=[(team1.id, str(team1)), (team2.id, str(team2))]
)
winner = forms.CharField(required=False)
|
import time
import http.client
from SidebarPage import SidebarPage
class TestIMS(SidebarPage):
def cornerTitle(self):
return 'Testing'
def error(self, msg):
self.write(f'<p style="color:red">{self.htmlEncode(msg)}</p>')
def writeMsg(self, msg):
self.write(f'<p>{self.htmlEncode(msg)}</p>')
def writeTest(self, msg):
self.write(f'<h4>{msg}</h4>')
def getDoc(self, path, headers=None):
if headers is None:
headers = {}
con = self._httpConnection(self._host)
con.request('GET', path, headers=headers)
return con.getresponse()
def writeContent(self):
self.writeln('<h2>Test If-Modified-Since support in Webware</h2>')
if self.request().environ().get('paste.testing'):
self.writeln('<p>This test requires a running web server.</p/>')
return
d = self.request().serverDictionary()
self._host = d['HTTP_HOST'] # includes the port
self._httpConnection = (
http.client.HTTPSConnection if d.get('wsgi.url_scheme') == 'https'
else http.client.HTTPConnection)
servletPath = self.request().servletPath()
# pick a static file which is served up by Webware's UnknownFileHandler
self.runTest(f'{servletPath}/PSP/Examples/psplogo.png')
def runTest(self, path):
self.writeTest(f'Opening <code>{path}</code>')
rsp = self.getDoc(path)
originalSize = size = len(rsp.read())
if rsp.status != 200:
self.error(f'Expected status of 200, received {rsp.status}.')
return
if size > 0:
self.writeMsg(
f'Received: {rsp.status} rsp.reason{rsp.reason},'
f' document size = {size} (as expected).')
else:
self.error(f'Document size is: {size}')
return
lastMod = rsp.getheader('Last-Modified', '')
if lastMod:
self.writeMsg(f'Last modified: {lastMod}')
else:
self.error('No Last-Modified header found.')
return
# Retrieve document again with IMS and expect a 304 not modified
self.writeTest(
f'Opening <code>{path}</code><br>'
f'with If-Modified-Since: {lastMod}')
rsp = self.getDoc(path, {'If-Modified-Since': lastMod})
size = len(rsp.read())
if rsp.status != 304:
self.error(f'Expected status of 304, received {rsp.status}.')
return
if size:
self.error(f'Expected 0 length document, received {size} bytes.')
return
self.writeMsg(
f'Received {rsp.status} {rsp.reason},'
f' document size = {size} (as expected).')
arpaFormat = '%a, %d %b %Y %H:%M:%S GMT'
t = time.strptime(lastMod, arpaFormat)
t = (t[0] - 1,) + t[1:] # one year before last modification
beforeMod = time.strftime(arpaFormat, time.gmtime(time.mktime(t)))
self.writeTest(
f'Opening <code>{path}</code><br>'
f'with If-Modified-Since: {beforeMod}')
rsp = self.getDoc(path, {'If-Modified-Since': beforeMod})
size = len(rsp.read())
lastMod = rsp.getheader('Last-Modified', '')
self.writeMsg(f'Last modified: {lastMod}')
if rsp.status != 200:
self.error(
f'Expected status of 200, received {rsp.status} {rsp.reason}.')
return
if size != originalSize:
self.error(
f'Received: {rsp.status} {rsp.reason},'
f' document size = {size}, expected size = {originalSize}.')
return
self.writeMsg(
f'Received: {rsp.status} {rsp.reason},'
f' document size = {size} (as expected).')
self.writeTest(f'{self.__class__.__name__} passed.')
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "LICENSE.txt" file accompanying this file.
# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied.
# See the License for the specific language governing permissions and limitations under the License.
import logging
import boto3
import pytest
from assertpy import assert_that
from remote_command_executor import RemoteCommandExecutor
from utils import get_username_for_os
from tests.common.assertions import assert_errors_in_logs
from tests.common.utils import get_installed_parallelcluster_version
@pytest.mark.dimensions("eu-central-1", "c5.xlarge", "ubuntu1804", "*")
@pytest.mark.usefixtures("instance", "scheduler")
def test_create_wrong_os(region, os, pcluster_config_reader, clusters_factory, architecture, amis_dict):
"""Test error message when os provide is different from the os of custom AMI"""
# ubuntu1804 is specified in the config file but an AMI of centos7 is provided
wrong_os = "centos7"
logging.info("Asserting os fixture is different from wrong_os variable")
assert_that(os != wrong_os).is_true()
cluster_config = pcluster_config_reader(custom_ami=amis_dict.get(wrong_os))
cluster = clusters_factory(cluster_config, raise_on_error=False)
_assert_head_node_is_running(region, cluster)
username = get_username_for_os(wrong_os)
remote_command_executor = RemoteCommandExecutor(cluster, username=username)
logging.info("Verifying error in logs")
assert_errors_in_logs(
remote_command_executor,
["/var/log/cfn-init.log"],
["RuntimeError", fr"custom AMI.+{wrong_os}.+base.+os.+config file.+{os}"],
)
@pytest.mark.dimensions("ca-central-1", "c5.xlarge", "alinux2", "*")
@pytest.mark.usefixtures("instance", "os", "scheduler")
def test_create_wrong_pcluster_version(
region, pcluster_config_reader, clusters_factory, pcluster_ami_without_standard_naming
):
"""Test error message when AMI provided was baked by a pcluster whose version is different from current version"""
current_version = get_installed_parallelcluster_version()
wrong_version = "2.8.1"
logging.info("Asserting wrong_version is different from current_version")
assert_that(current_version != wrong_version).is_true()
# Retrieve an AMI without 'aws-parallelcluster-<version>' in its name.
# Therefore, we can bypass the version check in CLI and test version check of .bootstrapped file in Cookbook.
wrong_ami = pcluster_ami_without_standard_naming(wrong_version)
cluster_config = pcluster_config_reader(custom_ami=wrong_ami)
cluster = clusters_factory(cluster_config, raise_on_error=False)
_assert_head_node_is_running(region, cluster)
remote_command_executor = RemoteCommandExecutor(cluster)
logging.info("Verifying error in logs")
assert_errors_in_logs(
remote_command_executor,
["/var/log/cloud-init-output.log"],
["error_exit", fr"AMI was created.+{wrong_version}.+is.+used.+{current_version}"],
)
def _assert_head_node_is_running(region, cluster):
logging.info("Asserting the head node is running")
head_node_state = (
boto3.client("ec2", region_name=region)
.describe_instances(Filters=[{"Name": "ip-address", "Values": [cluster.head_node_ip]}])
.get("Reservations")[0]
.get("Instances")[0]
.get("State")
.get("Name")
)
assert_that(head_node_state).is_equal_to("running")
|
# -*- coding: utf-8 -*-
"""Views for anything that relates to adminstration of members.
Login and logout stays in SVPB
"""
import os
from django.contrib.auth.decorators import user_passes_test
from django.core.management import call_command
from django.core.urlresolvers import reverse_lazy
from django.utils.decorators import method_decorator
from django.utils.safestring import mark_safe
from django.views.generic import View, FormView, CreateView, DeleteView
from django.utils.html import format_html
from django.contrib import messages
from django.contrib.messages.views import SuccessMessageMixin
from django.shortcuts import redirect, get_object_or_404
from django.contrib.auth.models import User, Group
from post_office import mail
from post_office.models import EmailTemplate
from pwgen import pwgen
from sendfile import sendfile
import mitglieder.forms
from arbeitsplan import forms
from arbeitsplan.tables import ImpersonateTable
from mitglieder.tables import MitgliederTable
from arbeitsplan.views import FilteredListView
from mitglieder.forms import (ActivateForm,
MitgliederAddForm,
AccountEdit,
AccountOtherEdit,
PersonMitgliedsnummer,
)
from arbeitsplan.models import Mitglied
from svpb.forms import MitgliederInactiveResetForm
from svpb.settings import SENDFILE_ROOT
from svpb.views import isVorstandMixin, isVorstand
#-------------------------------------------------
class ActivateView(FormView):
template_name = "registration/justForm.html"
form_class = ActivateForm
success_url = "/"
def get_context_data(self, **kwargs):
context = super(ActivateView, self).get_context_data(**kwargs)
context['title'] = "Aktivieren Sie Ihre SVPB-Konto"
context['intro_text'] = format_html("""<b>Willkommen bei der ersten Nutzung Ihres SVPB-Kontos</b>
<p>
Vor der Nutzung dieser Webseite bitten wir Sie um folgendes:
<ul>
<li>Bitte überprüfen Sie Ihre email-Adresse und korrigieren Sie diese
gegebenenfalls </li>
<li>Bitte stimmen Sie der Nutzung der Webseite zu </li>
<li>Bitte stimmen Sie zu, dass der SVPB Ihnen emails
im Zusammenhang mit dem
Arbeitsplan schicken darf. </li>
<li>Bitte vergeben Sie ein neues Passwort! (Die beiden Eingaben
müssen übereinstimmen) </li>
</ul>
Ohne diese Zustimmungn können Sie diese Webseite leider nicht nutzen!
""")
context['post_text'] = ""
context['todo_text'] = ""
return context
def get_initial(self):
initial = super(ActivateView, self).get_initial()
initial['email'] = self.request.user.email
return initial
def form_valid(self, form):
from django.utils import timezone
# set user active, store its email, rememmber date
self.request.user.email = form.cleaned_data['email']
self.request.user.is_active = True
self.request.user.set_password(form.cleaned_data['<PASSWORD>'])
self.request.user.mitglied.zustimmungsDatum = timezone.now()
self.request.user.save()
self.request.user.mitglied.save()
return super(ActivateView, self).form_valid(form)
def preparePassword(accountList=None):
"""For the given accounts, prepare the passwords and the PDFs for the letters
Arguments:
- `accountList`: List of User objects
Returns:
- List of tuples: (user object, PDF file)
"""
from jinja2 import Template
import codecs, subprocess
r = []
# print "preparing passwords for: ", accountList
for u in accountList:
pw = pwgen(6, no_symbols=True, no_ambiguous=True)
u.set_password(pw)
u.save()
r.append({'user': u,
'mitglied': u.mitglied,
'password': pw,
'status': u.mitglied.get_status_display(),
'geburtsdatum': u.mitglied.geburtsdatum.strftime('%d.%m.%Y'),
})
# generate the PDF
# assume the template is in templates
templateText = EmailTemplate.objects.get(name='newUserLaTeX')
# print templateText.content
rendered = Template(templateText.content).render(dicts=r)
# print rendered
# and now process this via latex:
f = codecs.open('letters.tex', 'w', 'utf-8')
f.write(rendered)
f.close()
# TODO: use better file names, protect against race conditions
retval = subprocess.call (["xelatex",
'-interaction=batchmode',
"letters.tex"])
## retval = subprocess.call (["xelatex",
## '-interaction=batchmode',
## "letters.tex"])
# move this file into a directory where only Vorstand has access!
# remove an older letter first; ignore errors here
import shutil, os
try:
os.remove (os.path.join (SENDFILE_ROOT, 'letters.pdf'))
except:
pass
shutil.move("letters.pdf", SENDFILE_ROOT)
return r
class AccountAdd(SuccessMessageMixin, isVorstandMixin, CreateView):
model = Mitglied
title = "Mitglied hinzufügen"
template_name = "mitglied_form.html"
form_class = MitgliederAddForm
success_url = "/accounts"
def get_context_data(self, **kwargs):
context = super(AccountAdd, self).get_context_data(**kwargs)
context['title'] = self.title
return context
def form_valid(self, form):
# create User and Mitglied based on cleaned data
# first, make some sanity checks to provide warnings
u = User(first_name=form.cleaned_data['firstname'],
last_name=form.cleaned_data['lastname'],
is_active=False,
username=form.cleaned_data['mitgliedsnummer'],
email=form.cleaned_data['email'],
)
u.set_password('<PASSWORD>')
u.save()
m = u.mitglied
m.user = u
m.geburtsdatum = form.cleaned_data['geburtsdatum']
m.mitgliedsnummer = form.cleaned_data['mitgliedsnummer']
m.ort = form.cleaned_data['ort']
m.plz = form.cleaned_data['plz']
m.strasse = form.cleaned_data['strasse']
m.gender = form.cleaned_data['gender']
m.status = form.cleaned_data['status']
m.arbeitlast = form.cleaned_data['arbeitslast']
m.festnetz = form.cleaned_data['festnetz']
m.mobil = form.cleaned_data['mobil']
m.save()
u.save()
messages.success(self.request,
format_html(
u"Nutzer {} {} (Nummer: {}, Account: {}) "
u"wurde erfolgreich angelegt",
u.first_name,
u.last_name, m.mitgliedsnummer,
u.username
))
try:
r = preparePassword([u])
print u"PAssword erzeugt: ", r
# copy the produced PDF to the SENDFILE_ROOT directory
messages.success(self.request,
format_html(
u'Das Anschreiben mit Password kann '
u'<a href="{}">hier</a>'
u' heruntergeladen werden.',
u'letters.pdf'
))
except Exception as e:
print "Fehler bei password: ", e
messages.error(self.request,
u"Das Password für den Nutzer konnte nicht gesetzt werden "
u"oder das Anschreiben nicht erzeugt werden. Bitten Sie das "
u"neue Mitglied, sich über die Webseite selbst ein Password zu "
u"generieren.")
return redirect(self.success_url)
class AccountEdit(SuccessMessageMixin, FormView):
template_name = "registration/justForm.html"
form_class = AccountEdit
success_url = "/"
post_text = format_html("""
<p>
Sie haben Ihr Passwort vergessen? Sie können es <a href="{{% url "password_reset_recover" %}}">
hier zurücksetzen</a>.
<p>
""")
def get_context_data(self, **kwargs):
context = super(AccountEdit, self).get_context_data(**kwargs)
context['title'] = "Aktualisieren Sie Ihr SVPB-Konto"
context['post_text'] = self.post_text
return context
def fillinUser(self, user):
initial = {}
initial['email'] = user.email
initial['strasse'] = user.mitglied.strasse
initial['plz'] = user.mitglied.plz
initial['ort'] = user.mitglied.ort
initial['geburtsdatum'] = user.mitglied.geburtsdatum
initial['festnetz'] = user.mitglied.festnetz
initial['mobil'] = user.mitglied.mobil
return initial
def get_initial(self):
initial = super(AccountEdit, self).get_initial()
initial.update(self.fillinUser(self.get_user()))
return initial
def storeUser (self, form, user):
user.email = form.cleaned_data['email']
user.mitglied.strasse = form.cleaned_data['strasse']
user.mitglied.plz = form.cleaned_data['plz']
user.mitglied.ort = form.cleaned_data['ort']
user.mitglied.geburtsdatum = form.cleaned_data['geburtsdatum']
user.mitglied.festnetz = form.cleaned_data['festnetz']
user.mitglied.mobil = form.cleaned_data['mobil']
def get_user(self):
return self.request.user
def form_valid(self, form):
if form.has_changed():
user = self.get_user()
self.storeUser(form, user)
user.save()
user.mitglied.save()
# print ({'user': user.__dict__,
# 'mitglied': user.mitglied.__dict__})
# print type(user)
# print user.last_name
# print type(user.mitglied)
# inform the relevant Vorstand in charge of memeberhsip
mail.send(['<EMAIL>'],
template="updatedProfile",
context={'user': user,
'mitglied': user.mitglied,
'changed': form.changed_data},
priority='now',
)
messages.success(self.request,
format_html(
u"Das Profil {} {} ({}) wurde erfolgreich aktualisiert.",
user.first_name, user.last_name,
user.mitglied.mitgliedsnummer))
else:
messages.success(self.request,
"Sie haben keine Änderungen vorgenommen."
)
return super(AccountEdit, self).form_valid(form)
class AccountOtherEdit(isVorstandMixin, AccountEdit):
form_class = AccountOtherEdit
post_text = ""
def get_context_data(self, **kwargs):
context = super(AccountOtherEdit, self).get_context_data(**kwargs)
context['title'] = "Bearbeiten Sie das SVPB-Konto eines Mitgliedes"
return context
def fillinUser(self, user):
initial = super(AccountOtherEdit, self).fillinUser(user)
initial['vorname'] = user.first_name
initial['nachname'] = user.last_name
initial['arbeitslast'] = user.mitglied.arbeitslast
initial['status'] = user.mitglied.status
initial['aktiv'] = user.is_active
initial['boots_app'] = user.groups.filter(name='Boote').exists()
return initial
def storeUser(self, form, user):
super(AccountOtherEdit, self).storeUser(form, user)
user.first_name = form.cleaned_data['vorname']
user.last_name = form.cleaned_data['nachname']
user.is_active = form.cleaned_data['aktiv']
user.mitglied.arbeitslast = form.cleaned_data['arbeitslast']
user.mitglied.status = form.cleaned_data['status']
# assign BOOTE group
group_boots = Group.objects.get(name="Boote")
if (form.cleaned_data['boots_app']):
user.groups.add(group_boots)
else:
user.groups.remove(group_boots)
def get_user(self):
userid = self.kwargs['id']
user = get_object_or_404(User, pk=int(userid))
return user
class AccountLetters(isVorstandMixin, View):
"""Check whether this user is allowed to download a letters.pdf file
"""
def get(self, request):
return sendfile(request,
os.path.join(SENDFILE_ROOT,
"letters.pdf"))
class AccountList(SuccessMessageMixin, isVorstandMixin, FilteredListView):
model = User
template_name = "mitglieder_tff.html"
title = "Mitglieder bearbeiten"
# filterform_class = forms.NameFilterForm
filterform_class = PersonMitgliedsnummer
filtertile = "Mitglieder nach Vor- oder Nachnamen filtern"
tabletitle = "Alle Mitglieder"
tableClass = MitgliederTable
filterconfig = [('first_name', 'first_name__icontains'),
('last_name', 'last_name__icontains'),
('mitgliedsnummer', 'mitglied__mitgliedsnummer__icontains'),
]
intro_text = mark_safe("""Diese Seite zeigt eine Liste aller Mitglieder an.
Sie dient vor allem dazu, einzelne Mitglieder-Konten zu finden und zu editieren.
Eine Übersicht über gemeldete, zugeteilte, erbrachte und akzeptieren
Arbeitsstunden findet sich separat in der <a href="/arbeitsplan/salden/">Saldenübersicht</a>.
""")
class AccountInactiveReset(FormView):
"""Für allen nicht-aktiven Accounts neue Passwörter erzeugen und PDF anlegen.
"""
template_name = "inactive_reset.html"
form_class = MitgliederInactiveResetForm
success_url = "accounts/"
def form_valid(self, form):
if 'reset' in self.request.POST:
userQs = User.objects.filter(is_active=False)
try:
r = preparePassword(userQs)
print "PAssword erzeugt: ", r
# copy the produced PDF to the SENDFILE_ROOT directory
messages.success(self.request,
format_html(
'Das Anschreiben mit Password kann '
'<a href="{}">hier</a>'
' heruntergeladen werden.',
'accounts/letters.pdf'
))
except Exception as e:
print "Fehler bei password: ", e
messages.error(self.request,
u"Ein Password konnte nicht gesetzt werden "
u"oder das Anschreiben nicht erzeugt werden. "
u"Bitte benachrichtigen Sie den Administrator.")
return redirect(self.success_url)
class AccountDelete(SuccessMessageMixin, isVorstandMixin, DeleteView):
model = User
success_url = reverse_lazy("accountList")
# success_url = "/accounts/list"
template_name = "user_confirm_delete.html"
# success_message = "%(first_name) %(last_name) wurde gelöscht!"
success_message = "Mitglied wurde gelöscht!"
class MitgliederExcel(View):
"""For Vorstand, send back an Excel file with all
the Mitlgieders in various filtering combinations"""
@method_decorator(user_passes_test(isVorstand, login_url="/keinVorstand/"))
def get(self, request):
if isVorstand(request.user):
# call the command to prepare the excel file
# repeated name; TODO: move this from here and mitgliedExcel.py into settings
filename = "mitglieder.xlsx"
basepath = SENDFILE_ROOT
call_command('mitgliedExcel')
return sendfile(request,
os.path.join(basepath, filename))
else:
return redirect ("keinVorstand")
class PasswordChange(FormView):
template_name = "password_change.html"
form_class = mitglieder.forms.PasswordChange
success_url = reverse_lazy("main")
def form_valid(self, form):
try:
u = self.request.user
u.set_password(form.cleaned_data['<PASSWORD>'])
u.save()
messages.success(self.request,
u'Ihr Passwort wurde erfolgreich geändert'
)
except Exception as e:
messages.error(self.request,
u'Ihre Passwortänderung ist fehlgeschlagen: ' +
str(e),
)
return super(PasswordChange, self).form_valid(form)
class ImpersonateListe(isVorstandMixin, FilteredListView):
"""Show a table with all Mitglieder,
pick one to impersonate.
Needs a suitable linked Column to point
to impersonate/user-id
"""
title = "Darzustellenden Nutzer auswählen"
tableClass = ImpersonateTable
tabletitle = "Mitglieder"
model = User
filterform_class = forms.NameFilterForm
filterconfig = [('first_name', 'first_name__icontains'),
('last_name', 'last_name__icontains'),
]
intro_text = """Sie können die Identität eines
anderen Nutzers annehmen,
beispielsweise um Meldungen oder Leistungen für diesen einzutragen.
<p>
Bitte gehen Sie verantwortlich mit dieser Möglichkeit um!
<p>
Beachten Sie: Diese Funktion funktioniert nicht bei Mitgliedern
mit Sonderstatus (z.B. Adminstratoren dieser Webseite).
"""
def get_data(self):
return (self.model.objects
.filter(is_active=True)
.filter(is_staff=False)
.filter(is_superuser=False)
.exclude(id=self.request.user.id))
pass
|
<reponame>affinis-lab/car-detection-module
import cv2
from keras.callbacks import ModelCheckpoint
from keras.models import Model
from keras.layers import Input, Flatten, Dense, Reshape, Lambda
from keras.layers import Conv2D, BatchNormalization, LeakyReLU, MaxPooling2D, Dropout, Activation, \
GlobalAveragePooling2D, np
from keras.models import load_model
import tensorflow as tf
from keras.optimizers import Adam
from read_data import GroundTruth
from utils import decode_netout, compute_overlap, compute_ap
from preprocessing import BatchGenerator
class TinyYolo():
def __init__(self, input_size, config):
self.config = config
self.true_boxes = Input(shape=(1, 1, 1, self.config['model']['max_obj'], 4))
self.nb_box = len(self.config['model']['anchors']) // 2
self.class_wt = np.ones(self.config['model']['nb_class'], dtype='float32')
input_image = Input(shape=(input_size, input_size, 3))
# Layer 1
x = Conv2D(16, (3,3), strides=(1,1), padding='same', name='conv_1', use_bias=False)(input_image)
x = BatchNormalization(name='norm_1')(x)
x = LeakyReLU(alpha=0.1)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# Layer 2 - 5
for i in range(0,4):
x = Conv2D(32*(2**i), (3,3), strides=(1,1), padding='same', name='conv_' + str(i+2), use_bias=False)(x)
x = BatchNormalization(name='norm_' + str(i+2))(x)
x = LeakyReLU(alpha=0.1)(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
# Layer 6
x = Conv2D(512, (3,3), strides=(1,1), padding='same', name='conv_6', use_bias=False)(x)
x = BatchNormalization(name='norm_6')(x)
x = LeakyReLU(alpha=0.1)(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(1,1), padding='same')(x)
# Layer 7
x = Conv2D(1024, (3,3), strides=(1,1), padding='same', name='conv_' + str(7), use_bias=False)(x)
x = BatchNormalization(name='norm_' + str(7))(x)
x = LeakyReLU(alpha=0.1)(x)
# Layer 8
x = Conv2D(512, (3, 3), strides=(1, 1), padding='same', name='conv_' + str(8), use_bias=False)(x)
x = BatchNormalization(name='norm_' + str(8))(x)
x = LeakyReLU(alpha=0.1)(x)
# Object detection layer
output = Conv2D(2 * (4 + 1 + self.config['model']['nb_class']),
(1, 1), strides=(1, 1),
padding='same',
name='DetectionLayer',
kernel_initializer='lecun_normal')(x)
output = Reshape((self.config['model']['grid_h'], self.config['model']['grid_w'], self.nb_box,
4 + 1 + self.config['model']['nb_class']))(output)
output = Lambda(lambda args: args[0])([output, self.true_boxes])
self.model = Model([input_image, self.true_boxes], output)
# Load pretrained model
pretrained = load_model('yolov2-tiny-coco.h5', custom_objects={'custom_loss': self.custom_loss, 'tf': tf})
idx = 0
for layer in self.model.layers:
if layer.name.startswith("DetectionLayer"):
break
if layer.name.startswith("class_conv") or layer.name.startswith("dropout"):
break
layer.set_weights(pretrained.get_layer(index=idx).get_weights())
idx += 1
for l in self.config['model']['frozen_layers']:
self.model.get_layer("conv_" + str(l)).trainable = False
self.model.get_layer("norm_" + str(l)).trainable = False
#self.model.summary()
def normalize(self, image):
return image / 255.
def custom_loss(self, y_true, y_pred):
mask_shape = tf.shape(y_true)[:4]
cell_x = tf.to_float(
tf.reshape(tf.tile(tf.range(self.config['model']['grid_w']), [self.config['model']['grid_h']]),
(1, self.config['model']['grid_h'], self.config['model']['grid_w'], 1, 1)))
cell_y = tf.transpose(cell_x, (0, 2, 1, 3, 4))
cell_grid = tf.tile(tf.concat([cell_x, cell_y], -1), [self.config['train']['batch_size'], 1, 1, self.nb_box, 1])
coord_mask = tf.zeros(mask_shape)
conf_mask = tf.zeros(mask_shape)
class_mask = tf.zeros(mask_shape)
seen = tf.Variable(0.)
total_loss = tf.Variable(0.)
total_recall = tf.Variable(0.)
total_boxes = tf.Variable(self.config['model']['grid_h'] * self.config['model']['grid_w'] *
self.config['model']['num_boxes'] * self.config['train']['batch_size'])
"""
Adjust prediction
"""
### adjust x and y
pred_box_xy = tf.sigmoid(y_pred[..., :2]) + cell_grid
### adjust w and h tf.exp(
pred_box_wh = tf.exp(y_pred[..., 2:4]) * np.reshape(self.config['model']['anchors'], [1, 1, 1, self.nb_box, 2])
### adjust confidence
pred_box_conf = tf.sigmoid(y_pred[..., 4])
### adjust class probabilities
pred_box_class = y_pred[..., 5:]
"""
Adjust ground truth
"""
### adjust x and y
true_box_xy = y_true[..., 0:2] # relative position to the containing cell
### adjust w and h
true_box_wh = y_true[..., 2:4] # number of cells accross, horizontally and vertically
### adjust confidence
true_wh_half = true_box_wh / 2.
true_mins = true_box_xy - true_wh_half
true_maxes = true_box_xy + true_wh_half
pred_wh_half = pred_box_wh / 2.
pred_mins = pred_box_xy - pred_wh_half
pred_maxes = pred_box_xy + pred_wh_half
intersect_mins = tf.maximum(pred_mins, true_mins)
intersect_maxes = tf.minimum(pred_maxes, true_maxes)
intersect_wh = tf.maximum(intersect_maxes - intersect_mins, 0.)
intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1]
true_areas = true_box_wh[..., 0] * true_box_wh[..., 1]
pred_areas = pred_box_wh[..., 0] * pred_box_wh[..., 1]
union_areas = pred_areas + true_areas - intersect_areas
iou_scores = tf.truediv(intersect_areas, union_areas)
true_box_conf = iou_scores * y_true[..., 4]
### adjust class probabilities
true_box_class = tf.argmax(y_true[..., 5:], -1)
"""
Determine the masks
"""
### coordinate mask: simply the position of the ground truth boxes (the predictors)
coord_mask = tf.expand_dims(y_true[..., 4], axis=-1) * self.config['model']['coord_scale']
### confidence mask: penelize predictors + penalize boxes with low IOU
# penalize the confidence of the boxes, which have IOU with some ground truth box < 0.6
true_xy = self.true_boxes[..., 0:2]
true_wh = self.true_boxes[..., 2:4]
true_wh_half = true_wh / 2.
true_mins = true_xy - true_wh_half
true_maxes = true_xy + true_wh_half
pred_xy = tf.expand_dims(pred_box_xy, 4)
pred_wh = tf.expand_dims(pred_box_wh, 4)
pred_wh_half = pred_wh / 2.
pred_mins = pred_xy - pred_wh_half
pred_maxes = pred_xy + pred_wh_half
intersect_mins = tf.maximum(pred_mins, true_mins)
intersect_maxes = tf.minimum(pred_maxes, true_maxes)
intersect_wh = tf.maximum(intersect_maxes - intersect_mins, 0.)
intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1]
true_areas = true_wh[..., 0] * true_wh[..., 1]
pred_areas = pred_wh[..., 0] * pred_wh[..., 1]
union_areas = pred_areas + true_areas - intersect_areas
iou_scores = tf.truediv(intersect_areas, union_areas)
best_ious = tf.reduce_max(iou_scores, axis=4)
#conf_mask = conf_mask + tf.to_float(best_ious < 0.5) * (1 - y_true[..., 4]) * self.no_object_scale
# penalize the confidence of the boxes, which are reponsible for corresponding ground truth box
#conf_mask = conf_mask + y_true[..., 4] * self.object_scale
conf_mask_neg = tf.to_float(best_ious < 0.4) * (1 - y_true[..., 4]) * self.config['model']['no_obj_scale']
conf_mask_pos = y_true[..., 4] * self.config['model']['obj_scale']
### class mask: simply the position of the ground truth boxes (the predictors)
class_mask = y_true[..., 4] * tf.gather(self.class_wt, true_box_class) * self.config['model']['class_scale']
"""
Warm-up training
"""
no_boxes_mask = tf.to_float(coord_mask < self.config['model']['coord_scale'] / 2.)
seen = tf.assign_add(seen, 1.)
true_box_xy, true_box_wh, coord_mask = tf.cond(tf.less(seen, self.config['train']['warmup_batches'] + 1),
lambda: [true_box_xy + (0.5 + cell_grid) * no_boxes_mask,
true_box_wh + tf.ones_like(true_box_wh) * \
np.reshape(self.config['model']['anchors'],
[1, 1, 1, self.nb_box, 2]) * no_boxes_mask,
tf.ones_like(coord_mask)],
lambda: [true_box_xy,
true_box_wh,
coord_mask])
"""
Finalize the loss
"""
nb_coord_box = tf.reduce_sum(tf.to_float(coord_mask > 0.0))
#nb_conf_box = tf.reduce_sum(tf.to_float(conf_mask > 0.0))
nb_conf_box_neg = tf.reduce_sum(tf.to_float(conf_mask_neg > 0.0))
nb_conf_box_pos = tf.subtract(tf.to_float(total_boxes), nb_conf_box_neg) #tf.reduce_sum(tf.to_float(conf_mask_pos > 0.0))
nb_class_box = tf.reduce_sum(tf.to_float(class_mask > 0.0))
true_box_wh = tf.sqrt(true_box_wh)
pred_box_wh = tf.sqrt(pred_box_wh)
loss_xy = tf.reduce_sum(tf.square(true_box_xy - pred_box_xy) * coord_mask) / (nb_coord_box + 1e-6) / 2.
loss_wh = tf.reduce_sum(tf.square(true_box_wh - pred_box_wh) * coord_mask) / (nb_coord_box + 1e-6) / 2.
loss_conf_neg = tf.reduce_sum(tf.square(true_box_conf - pred_box_conf) * conf_mask_neg) / (nb_conf_box_neg + 1e-6) / 2.
loss_conf_pos = tf.reduce_sum(tf.square(true_box_conf - pred_box_conf) * conf_mask_pos) / (nb_conf_box_pos + 1e-6) / 2
loss_conf = loss_conf_neg + loss_conf_pos
loss_class = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=true_box_class, logits=pred_box_class)
loss_class = tf.reduce_sum(loss_class * class_mask) / (nb_class_box + 1e-6)
loss = tf.cond(tf.less(seen, self.config['train']['warmup_batches'] + 1),
lambda: loss_xy + loss_wh + loss_conf + loss_class + 10,
lambda: loss_xy + loss_wh + loss_conf + loss_class)
if self.config['train']['debug']:
nb_true_box = tf.reduce_sum(y_true[..., 4])
nb_pred_box = tf.reduce_sum(tf.to_float(true_box_conf > 0.3) * tf.to_float(pred_box_conf > 0.25))
current_recall = nb_pred_box / (nb_true_box + 1e-6)
total_recall = tf.assign_add(total_recall, current_recall)
total_loss = tf.assign_add(total_loss, loss)
#loss = tf.Print(loss, [m2], message='\nPred box conf \t', summarize=1000)
loss = tf.Print(loss, [loss_xy], message='\nLoss XY \t', summarize=1000)
loss = tf.Print(loss, [loss_wh], message='Loss WH \t', summarize=1000)
loss = tf.Print(loss, [nb_conf_box_neg], message='Nb Conf Box Negative \t', summarize=1000)
loss = tf.Print(loss, [nb_conf_box_pos], message='Nb Conf Box Positive \t', summarize=1000)
loss = tf.Print(loss, [loss_conf_neg], message='Loss Conf Negative \t', summarize=1000)
loss = tf.Print(loss, [loss_conf_pos], message='Loss Conf Positive \t', summarize=1000)
loss = tf.Print(loss, [loss_conf], message='Loss Conf \t', summarize=1000)
loss = tf.Print(loss, [loss_class], message='Loss Class \t', summarize=1000)
loss = tf.Print(loss, [loss], message='Total Loss \t', summarize=1000)
loss = tf.Print(loss, [total_loss / seen], message='Average Loss \t', summarize=1000)
#loss = tf.Print(loss, [y_true[..., 5:]], message='\nYtrue \t', summarize=1000)
#loss = tf.Print(loss, [true_box_class], message='True box class \t', summarize=1000)
#loss = tf.Print(loss, [pred_box_class], message=' Pred box class \t', summarize=1000)
loss = tf.Print(loss, [nb_pred_box], message='Number of pred boxes \t', summarize=1000)
loss = tf.Print(loss, [nb_true_box], message='Number of true boxes \t', summarize=1000)
loss = tf.Print(loss, [current_recall], message='Current Recall \t', summarize=1000)
loss = tf.Print(loss, [total_recall / seen], message='Average Recall \t', summarize=1000)
return loss
def train(self):
############################################
# Make train and validation generators
############################################
objectReader = GroundTruth(self.config)
objectReader.load_json()
objectReader.objects_all()
data = objectReader.objects_all()
np.random.shuffle(data)
size = int(len(data) * 0.8)
train_instances, validation_instances = data[:size], data[size:]
np.random.shuffle(train_instances)
np.random.shuffle(validation_instances)
checkpoint = ModelCheckpoint('weights_coco.h5',
monitor='val_loss',
verbose=1,
save_best_only=True,
mode='auto',
period=1)
train_generator = BatchGenerator(train_instances,
self.config['generator_config'],
norm=self.normalize)
valid_generator = BatchGenerator(validation_instances,
self.config['generator_config'],
norm=self.normalize,
jitter=False)
############################################
# Compile the model
############################################
optimizer = Adam(lr=self.config['train']['learning_rate'], beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
self.model.compile(loss=self.custom_loss, optimizer=optimizer)
############################################
# Start the training process
############################################
self.model.fit_generator(generator=train_generator,
steps_per_epoch=len(train_generator),
epochs= self.config['train']['nb_epochs'],
verbose=2 if self.config['train']['debug'] else 1,
validation_data=valid_generator,
validation_steps=len(valid_generator),
workers=3,
callbacks=[checkpoint],
max_queue_size=16)
############################################
# Compute mAP on the validation set
############################################
average_precisions = self.evaluate(valid_generator)
# print evaluation
for label, average_precision in average_precisions.items():
print('car', '{:.4f}'.format(average_precision))
print('mAP: {:.4f}'.format(sum(average_precisions.values()) / len(average_precisions)))
def evaluate(self,
generator,
iou_threshold=0.3,
score_threshold=0.3,
max_detections=100,
save_path=None):
""" Evaluate a given dataset using a given model.
code originally from https://github.com/fizyr/keras-retinanet
# Arguments
generator : The generator that represents the dataset to evaluate.
model : The model to evaluate.
iou_threshold : The threshold used to consider when a detection is positive or negative.
score_threshold : The score confidence threshold to use for detections.
max_detections : The maximum number of detections to use per image.
save_path : The path to save images with visualized detections to.
# Returns
A dict mapping class names to mAP scores.
"""
# gather all detections and annotations
all_detections = [[None for i in range(generator.num_classes())] for j in range(generator.size())]
all_annotations = [[None for i in range(generator.num_classes())] for j in range(generator.size())]
for i in range(generator.size()):
raw_image = generator.load_image(i)
raw_height, raw_width, raw_channels = raw_image.shape
# make the boxes and the labels
pred_boxes = self.predict(raw_image)
score = np.array([box.score for box in pred_boxes])
pred_labels = np.array([box.label for box in pred_boxes])
if len(pred_boxes) > 0:
pred_boxes = np.array([[box.xmin * raw_width, box.ymin * raw_height, box.xmax * raw_width,
box.ymax * raw_height, box.score] for box in pred_boxes])
else:
pred_boxes = np.array([[]])
# sort the boxes and the labels according to scores
score_sort = np.argsort(-score)
pred_labels = pred_labels[score_sort]
pred_boxes = pred_boxes[score_sort]
# copy detections to all_detections
for label in range(generator.num_classes()):
all_detections[i][label] = pred_boxes[pred_labels == label, :]
annotations = generator.load_annotation(i)
# copy detections to all_annotations
for label in range(generator.num_classes()):
all_annotations[i][label] = annotations[annotations[:, 4] == label, :4].copy()
# compute mAP by comparing all detections and all annotations
average_precisions = {}
for label in range(generator.num_classes()):
false_positives = np.zeros((0,))
true_positives = np.zeros((0,))
scores = np.zeros((0,))
num_annotations = 0.0
for i in range(generator.size()):
detections = all_detections[i][label]
annotations = all_annotations[i][label]
num_annotations += annotations.shape[0]
detected_annotations = []
for d in detections:
scores = np.append(scores, d[4])
if annotations.shape[0] == 0:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
continue
overlaps = compute_overlap(np.expand_dims(d, axis=0), annotations)
assigned_annotation = np.argmax(overlaps, axis=1)
max_overlap = overlaps[0, assigned_annotation]
if max_overlap >= iou_threshold and assigned_annotation not in detected_annotations:
false_positives = np.append(false_positives, 0)
true_positives = np.append(true_positives, 1)
detected_annotations.append(assigned_annotation)
else:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
# no annotations -> AP for this class is 0 (is this correct?)
if num_annotations == 0:
average_precisions[label] = 0
continue
# sort by score
indices = np.argsort(-scores)
false_positives = false_positives[indices]
true_positives = true_positives[indices]
# compute false positives and true positives
false_positives = np.cumsum(false_positives)
true_positives = np.cumsum(true_positives)
# compute recall and precision
recall = true_positives / num_annotations
precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)
# compute average precision
average_precision = compute_ap(recall, precision)
average_precisions[label] = average_precision
return average_precisions
def predict(self, image):
image_h, image_w, _ = image.shape
image = cv2.resize(image, (416, 416))
image = self.normalize(image)
input_image = image[:, :, ::-1]
input_image = np.expand_dims(input_image, 0)
dummy_array = np.zeros((1, 1, 1, 1, self.config['model']['max_obj'], 4))
netout = self.model.predict([input_image, dummy_array])[0]
boxes = decode_netout(netout, self.config['model']['anchors'], self.config['model']['nb_class'])
return boxes
|
from __future__ import absolute_import
from __future__ import unicode_literals
import os
from datetime import datetime
from django.test import SimpleTestCase
from corehq.apps.app_manager.tests.util import TestXmlMixin
from corehq.form_processor.interfaces.processor import FormProcessorInterface
from corehq.form_processor.utils import convert_xform_to_json
from phonelog.utils import SumoLogicLog
class TestSumologic(SimpleTestCase, TestXmlMixin):
root = os.path.dirname(__file__)
file_path = ('data',)
def setUp(self):
self.domain = 'test_domain'
self.received_on = datetime.utcnow()
def _get_xform(self, filename):
xform = FormProcessorInterface(self.domain).new_xform(convert_xform_to_json(self.get_xml(filename)))
xform.received_on = self.received_on
return xform
def test_log_error(self):
xform = self._get_xform('log_subreport')
compiled_log = SumoLogicLog(self.domain, xform).log_subreport()
expected_log = (
"[log_date=2018-02-13T15:19:30.622-05] [log_submission_date={received}] [log_type=maintenance] "
"[domain={domain}] [username=t1] [device_id=014915000230428] [app_version=260] "
"[cc_version=2.43] [msg=Succesfully submitted 1 device reports to server.]"
).format(domain=self.domain, received=self.received_on)
self.assertEqual(expected_log, compiled_log)
def test_usererror(self):
xform = self._get_xform('usererror_subreport')
compiled_log = SumoLogicLog(self.domain, xform).user_error_subreport()
expected_log = (
"[log_date=2018-02-22T17:21:21.201-05] [log_submission_date={received}] [log_type=error-config] "
"[domain={domain}] [username=t1] [device_id=014915000230428] [app_version=260] "
"[cc_version=2.43] [msg=This is a test user error] [app_id=73d5f08b9d55fe48602906a89672c214] "
"[user_id=37cc2dcdb1abf5c16bab0763f435e6b7] [session=session] [expr=an expression]"
).format(domain=self.domain, received=self.received_on)
self.assertEqual(expected_log, compiled_log)
def test_forceclose(self):
xform = self._get_xform('forceclose_subreport')
compiled_log = SumoLogicLog(self.domain, xform).force_close_subreport()
expected_log = (
"[log_date=2018-02-22T17:21:21.232-05] [log_submission_date={received}] [log_type=forceclose] "
"[domain={domain}] [username=t1] [device_id=014915000230428] [app_version=260] "
"[cc_version=2.43] "
"""[msg=java.lang.RuntimeException: Unable to start activity ComponentInfo{{org.commcare.dalvik.debug/org.commcare.activities.MenuActivity}}: java.lang.RuntimeException
at android.app.ActivityThread.performLaunchActivity(ActivityThread.java:2416)
at android.app.ActivityThread.handleLaunchActivity(ActivityThread.java:2476)
at android.app.ActivityThread.-wrap11(ActivityThread.java)
at android.app.ActivityThread$H.handleMessage(ActivityThread.java:1344)
at android.os.Handler.dispatchMessage(Handler.java:102)
at android.os.Looper.loop(Looper.java:148)
at android.app.ActivityThread.main(ActivityThread.java:5417)
at java.lang.reflect.Method.invoke(Native Method)
at com.android.internal.os.ZygoteInit$MethodAndArgsCaller.run(ZygoteInit.java:726)
at com.android.internal.os.ZygoteInit.main(ZygoteInit.java:616)
Caused by: java.lang.RuntimeException
at org.commcare.activities.MenuActivity.onCreateSessionSafe(MenuActivity.java:35)
at org.commcare.activities.SessionAwareHelper.onCreateHelper(SessionAwareHelper.java:21)
at org.commcare.activities.SessionAwareCommCareActivity.onCreate(SessionAwareCommCareActivity.java:20)
at android.app.Activity.performCreate(Activity.java:6251)
at android.app.Instrumentation.callActivityOnCreate(Instrumentation.java:1107)
at android.app.ActivityThread.performLaunchActivity(ActivityThread.java:2369)
... 9 more] [app_id=73d5f08b9d55fe48602906a89672c214] """
"[user_id=37cc2dcdb1abf5c16bab0763f435e6b7] [session=readable_session] [device_model=Nexus 7]"
).format(domain=self.domain, received=self.received_on)
self.assertEqual(expected_log, compiled_log)
|
<reponame>briis/pysecspy<filename>pysecspy/secspy_data.py
"""SecuritySpy Data."""
import datetime
import json
import logging
import time
from collections import OrderedDict
_LOGGER = logging.getLogger(__name__)
CAMERA_KEYS = {
"state",
"recordingSettings_A",
"recordingSettings_C",
"recordingSettings_M",
"recording_mode_a",
"recording_mode_c",
"recording_mode_m",
"isOnline",
"enabled",
"reason",
"lastMotion",
"isMotionDetected",
}
EVENT_SMART_DETECT_ZONE = "smart"
EVENT_MOTION = "motion"
EVENT_DISCONNECT = "disconnect"
EVENT_LENGTH_PRECISION = 3
MAX_SUPPORTED_CAMERAS = 256
MAX_EVENT_HISTORY_IN_STATE_MACHINE = MAX_SUPPORTED_CAMERAS * 2
PROCESSED_EVENT_EMPTY = {
"event_start": None,
"event_on": False,
"event_type": None,
"event_online": True,
"event_length": 0,
"event_object": [],
}
REASON_CODES = {"128": "Human", "256": "Vehicle"}
def process_camera(server_id, server_credential, camera, include_events):
"""Process the camera json."""
# If addtional keys are checked, update CAMERA_KEYS
camera_id = camera["number"]
# Get if camera is online
online = camera["connected"] == "yes"
# Get if camera is enabled
enabled = camera.get("enabled")
# Get Recording Mode
if camera.get("recordingSettings_A") is not None:
recording_mode_a = camera.get("recordingSettings_A")
else:
recording_mode_a = camera["mode-a"] == "armed"
if camera.get("recordingSettings_C") is not None:
recording_mode_c = camera.get("recordingSettings_C")
else:
recording_mode_c = camera["mode-c"] == "armed"
if camera.get("recordingSettings_M") is not None:
recording_mode_m = camera.get("recordingSettings_M")
else:
recording_mode_m = camera["mode-m"] == "armed"
# Live Image
base_url = f"{server_credential['host']}:{server_credential['port']}"
base_stream = f"rtsp://{base_url}/stream?auth={server_credential['token']}"
live_stream = f"{base_stream}&cameraNum={camera_id}&codec=h264"
# Jpeg Image
image_width = str(camera["width"])
image_height = str(camera["height"])
latest_image = f"http://{base_url}/image?auth={server_credential['token']}&cameraNum={camera_id}&width={image_width}&height={image_height}&quality=75"
# PTZ
ptz_capabilities = camera.get("ptzcapabilities")
preset_list = []
if ptz_capabilities is not None and int(ptz_capabilities) > 0:
# Build a list of PTZ Presets
for preset in range(1, 10):
if camera.get(f"preset-name-{preset}") is not None:
preset_list.append(camera.get(f"preset-name-{preset}"))
# Other Settings
ip_address = "Local" if camera["devicetype"] == "Local" else camera.get("address")
camera_update = {
"name": str(camera["name"]),
"type": "camera",
"model": str(camera["devicename"]),
"online": online,
"enabled": enabled,
"recording_mode_a": recording_mode_a,
"recording_mode_c": recording_mode_c,
"recording_mode_m": recording_mode_m,
"ip_address": ip_address,
"live_stream": live_stream,
"latest_image": latest_image,
"image_width": image_width,
"image_height": image_height,
"fps": str(camera["current-fps"]),
"video_format": str(camera["video-format"]),
"ptz_capabilities": ptz_capabilities,
"ptz_presets": preset_list,
}
if server_id is not None:
camera_update["server_id"] = server_id
if include_events:
# Get the last time motion occured
if camera.get("timesincelastmotion") is not None:
last_update = int(time.time()) + int(camera["timesincelastmotion"])
camera_update["last_motion"] = datetime.datetime.fromtimestamp(
last_update / 1000
).strftime("%Y-%m-%d %H:%M:%S")
else:
camera_update["last_motion"] = None
return camera_update
def camera_update_from_ws_frames(
state_machine, server_credential, action_json, data_json
):
"""Convert a websocket frame to internal format."""
if action_json["modelKey"] != "camera":
raise ValueError("Model key must be camera")
camera_id = action_json["id"]
if not state_machine.has_device(camera_id):
_LOGGER.debug("Skipping non-adopted camera: %s", data_json)
return None, None
camera = state_machine.update(camera_id, data_json)
if data_json.keys().isdisjoint(CAMERA_KEYS):
_LOGGER.debug("Skipping camera data: %s", data_json)
return None, None
_LOGGER.debug("Processing camera: %s", camera)
processed_camera = process_camera(None, server_credential, camera, True)
return camera_id, processed_camera
def event_from_ws_frames(state_machine, action_json, data_json):
"""Convert a websocket frame to internal format.
20140927091955 1 3 ARM_C
20190927091955 2 3 ARM_M
20190927092026 3 3 MOTION 760 423 320 296
20190927092026 4 3 CLASSIFY HUMAN 99
20190927092026 5 3 TRIGGER_M 9
20190927092036 6 3 MOTION 0 432 260 198
20190927092036 7 3 CLASSIFY HUMAN 5 VEHICLE 95
20190927092040 8 X NULL
20190927092050 9 3 FILE /Volumes/VolName/Cam/2019-07-26/26-07-2019 15-52-00 C Cam.m4v
20190927092055 10 3 DISARM_M
20190927092056 11 3 OFFLINE
20210519172650 24 0 MOTION_END
"""
if action_json["modelKey"] != "event":
raise ValueError("Model key must be event")
action = action_json["action"]
event_id = action_json["id"]
if action == "add":
device_id = data_json.get("camera")
if device_id is None:
return None, None
state_machine.add(event_id, data_json)
event = data_json
elif action == "update":
event = state_machine.update(event_id, data_json)
if not event:
return None, None
device_id = event.get("camera")
else:
raise ValueError("The action must be add or update")
_LOGGER.debug("Processing event: %s", event)
processed_event = process_event(event)
return device_id, processed_event
def camera_event_from_ws_frames(state_machine, action_json, data_json):
"""Create processed events from the camera model."""
if "isMotionDetected" not in data_json and "timesincelastmotion" not in data_json and "isOnline" not in data_json:
return None
camera_id = action_json["id"]
start_time = None
event_length = 0
event_on = False
is_online = data_json.get("isOnline")
last_motion = int(time.time()) + int(data_json["timesincelastmotion"])
is_motion_detected = data_json.get("isMotionDetected")
if is_motion_detected is None:
start_time = state_machine.get_motion_detected_time(camera_id)
event_on = start_time is not None
else:
if is_motion_detected:
event_on = True
start_time = last_motion
state_machine.set_motion_detected_time(camera_id, start_time)
else:
start_time = state_machine.get_motion_detected_time(camera_id)
state_machine.set_motion_detected_time(camera_id, None)
if last_motion is None:
last_motion = round(time.time() * 1000)
if start_time is not None and last_motion is not None:
event_length = round(
(float(last_motion) - float(start_time)) / 1000, EVENT_LENGTH_PRECISION
)
return {
"event_on": event_on,
"event_type": "motion",
"event_start": start_time,
"event_length": event_length,
"event_score": 0,
"event_online": is_online,
}
def process_event(event):
"""Convert an event to our format."""
start = event.get("start")
end = event.get("end")
event_type = event.get("type")
event_reason = event.get("reason")
event_online = event.get("isOnline")
event_length = 0
start_time = None
if start:
start_time = _process_timestamp(start)
if end:
event_length = round(
(float(end) / 1000) - (float(start) / 1000), EVENT_LENGTH_PRECISION
)
event_object = (
"None" if event_reason not in REASON_CODES else REASON_CODES.get(event_reason)
)
processed_event = {
"event_on": False,
"event_type": event_type,
"event_start": start_time,
"event_length": event_length,
"event_object": event_object,
"event_online": event_online,
}
if event_type in (EVENT_MOTION, EVENT_SMART_DETECT_ZONE):
processed_event["last_motion"] = start_time
if not end:
processed_event["event_on"] = True
return processed_event
def _process_timestamp(time_stamp):
return datetime.datetime.strptime(time_stamp, "%Y%m%d%H%M%S").strftime(
"%Y-%m-%d %H:%M:%S"
)
class SecspyDeviceStateMachine:
"""A simple state machine for events."""
def __init__(self):
"""Init the state machine."""
self._devices = {}
self._motion_detected_time = {}
def has_device(self, device_id):
"""Check to see if a device id is in the state machine."""
return device_id in self._devices
def update(self, device_id, new_json):
"""Update an device in the state machine."""
self._devices.setdefault(device_id, {}).update(new_json)
return self._devices[device_id]
def set_motion_detected_time(self, device_id, timestamp):
"""Set device motion start detected time."""
self._motion_detected_time[device_id] = timestamp
def get_motion_detected_time(self, device_id):
"""Get device motion start detected time."""
return self._motion_detected_time.get(device_id)
class SecspyEventStateMachine:
"""A simple state machine for cameras."""
def __init__(self):
"""Init the state machine."""
self._events = FixSizeOrderedDict(max_size=MAX_EVENT_HISTORY_IN_STATE_MACHINE)
def add(self, event_id, event_json):
"""Add an event to the state machine."""
self._events[event_id] = event_json
def update(self, event_id, new_event_json):
"""Update an event in the state machine and return the merged event."""
event_json = self._events.get(event_id)
if event_json is None:
return None
event_json.update(new_event_json)
return event_json
class FixSizeOrderedDict(OrderedDict):
"""A fixed size ordered dict."""
def __init__(self, *args, max_size=0, **kwargs):
"""Create the FixSizeOrderedDict."""
self._max_size = max_size
super().__init__(*args, **kwargs)
def __setitem__(self, key, value):
"""Set an update up to the max size."""
OrderedDict.__setitem__(self, key, value)
if self._max_size > 0:
if len(self) > self._max_size:
self.popitem(False)
|
import pytz
from bs4 import BeautifulSoup
from datetime import datetime
import requests
import os
from github import Github
# local import
from dotenv import load_dotenv
def get_github_repo(access_token, repo_name):
"""
get github repository info
:param access_token: Personal Access Token from Github
:param repo_name: repository name
return repository object
"""
g = Github(access_token)
repository = g.get_user().get_repo(repo_name)
return repository
def get_repo_specific_file_content(repository, file_path):
"""
get the specific file from github repository
:param repository: repository object
:param file_path: file path
return raw content of the decoded target file
"""
target_file = repository.get_contents("source/_posts" + file_path)
raw_content = target_file.decoded_content
return raw_content.decode('utf-8')
def preprocess(content, target_path):
"""
preprocess the raw content
:param content: the decoded target file
return content_head(dict), content_body(str)
"""
def rindex(lst, val):
lst.reverse()
i = lst.index(val)
lst.reverse()
return len(lst) - i - 1
# separate head and body part
content_head_row \
= content[0:content.rfind("---") + 3].replace("---", "").strip().split("\n")
content_body_split_start = rindex(content.split("\n"), "---")
content_body_row = content.split("\n")[content_body_split_start + 1:]
# head preprocessing
content_head = {}
for head in content_head_row:
colon = head.find(':')
key = head[:colon]
value = head[colon + 1:].replace('"', '').replace("\u200d", '').strip()
if key == 'img':
value = f"https://github.com/ruby-kim/ruby-kim.github.io/blob/master{value}?raw=true"
content_head[key] = value
# body preprocessing
content_body = []
target_path_name = '/'.join(target_path.split("/")[1:]).replace(".md", "")
for body in content_body_row:
if '![]' in body and '.png)' in body:
uploaded_date = content_head["date"].split()[0].replace('-', '/')
img_filename = body.replace(".replace(")", "")
body = body.replace(img_filename, f"https://github.com/ruby-kim/ruby-kim.github.io/blob/master/"
f"{uploaded_date + '/' + target_path_name + '/' + img_filename}?raw=true")
content_body.append(body)
return content_head, '\n'.join(content_body)
class GithubBlog:
def __init__(self, blogUrl):
self.url = blogUrl
self.xml = blogUrl + "/atom.xml"
self.contents = []
self.curTime = datetime.now(pytz.utc).isoformat()
self.md_head = {}
self.md_body = ""
def parsing_md(self, target_path):
# local params
load_dotenv()
repo = get_github_repo(os.environ.get('MY_GITHUB_BLOG_BACKUP'), 'koBlog_backup')
# # Github action params
# repo = get_github_repo(os.environ['MY_GITHUB_BLOG_BACKUP'], 'koBlog_backup')
file = get_repo_specific_file_content(repo, target_path)
self.md_head, self.md_body = preprocess(file, target_path)
return self.md_head, self.md_body
def parsing_xml(self):
html = requests.get(self.xml)
soup = BeautifulSoup(html.text, "html.parser")
for elem in soup.find_all("entry"):
article = {
"title": elem.find("title").get_text(),
"link": elem.find("link").get("href"),
"published": elem.find("published").get_text("published"),
"updated": elem.find("updated").get_text("updated"),
"category": elem.find("category").get("term").replace("\u200d", ""),
"tags": [c.get("term")
for idx, c in enumerate(elem.find_all("category")) if idx != 0],
}
self.contents.append(article)
|
<reponame>theshiv303/kegbot-server
from builtins import str
from builtins import object
from pykeg.backend import get_kegbot_backend
from pykeg.core import models
from pykeg import config
from pykeg.core.util import get_version_object
from pykeg.core.util import set_current_request
from pykeg.core.util import must_upgrade
from pykeg.util import dbstatus
from pykeg.web.api.util import is_api_request
from pykeg.plugin import util as plugin_util
from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import render
from django.utils import timezone
import logging
logger = logging.getLogger(__name__)
# Requests are always allowed for these path prefixes.
PRIVACY_EXEMPT_PATHS = (
"/account/activate",
"/accounts/",
"/admin/",
"/media/",
"/setup/",
"/sso/login",
"/sso/logout",
)
PRIVACY_EXEMPT_PATHS += getattr(settings, "KEGBOT_EXTRA_PRIVACY_EXEMPT_PATHS", ())
def _path_allowed(path, kbsite):
for p in PRIVACY_EXEMPT_PATHS:
if path.startswith(p):
return True
return False
class CurrentRequestMiddleware(object):
"""Set/clear the current request."""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
set_current_request(request)
try:
response = self.get_response(request)
finally:
set_current_request(None)
return response
class IsSetupMiddleware(object):
"""Adds `.need_setup`, `.need_upgrade`, and `.kbsite` to the request."""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
request.need_setup = False
request.need_upgrade = False
request.kbsite = None
# Skip all checks if we're in the setup wizard.
if request.path.startswith("/setup"):
request.session = {}
request.session["_auth_user_backend"] = None
return self.get_response(request)
# First confirm the database is working.
try:
dbstatus.check_db_status()
except dbstatus.DatabaseNotInitialized:
logger.warning("Database is not initialized, sending to setup ...")
request.need_setup = True
request.need_upgrade = True
except dbstatus.NeedMigration:
logger.warning("Database needs migration, sending to setup ...")
request.need_upgrade = True
# If the database looks good, check the data.
if not request.need_setup:
installed_version = models.KegbotSite.get_installed_version()
if installed_version is None:
logger.warning("Kegbot not installed, sending to setup ...")
request.need_setup = True
else:
request.installed_version_string = str(installed_version)
if must_upgrade(installed_version, get_version_object()):
logger.warning("Kegbot upgrade required, sending to setup ...")
request.need_upgrade = True
# Lastly verify the kbsite record.
if not request.need_setup:
request.kbsite = models.KegbotSite.objects.get(name="default")
if not request.kbsite.is_setup:
logger.warning("Setup incomplete, sending to setup ...")
request.need_setup = True
return self.get_response(request)
def process_view(self, request, view_func, view_args, view_kwargs):
if is_api_request(request):
# API endpoints handle "setup required" differently.
return None
if request.need_setup:
return self._setup_required(request)
elif request.need_upgrade:
return self._upgrade_required(request)
return None
def _setup_required(self, request):
return render(request, "setup_wizard/setup_required.html", status=403)
def _upgrade_required(self, request):
context = {
"installed_version": getattr(request, "installed_version_string", None),
}
return render(request, "setup_wizard/upgrade_required.html", context=context, status=403)
class KegbotSiteMiddleware(object):
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
if request.kbsite and not request.need_setup:
timezone.activate(request.kbsite.timezone)
request.plugins = dict(
(p.get_short_name(), p) for p in list(plugin_util.get_plugins().values())
)
request.backend = get_kegbot_backend()
return self.get_response(request)
class PrivacyMiddleware(object):
"""Enforces site privacy settings.
Must be installed after ApiRequestMiddleware (in request order) to
access is_kb_api_request attribute.
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
return self.get_response(request)
def process_view(self, request, view_func, view_args, view_kwargs):
if not hasattr(request, "kbsite"):
return None
elif _path_allowed(request.path, request.kbsite):
return None
elif request.is_kb_api_request:
# api.middleware will enforce access requirements.
return None
privacy = request.kbsite.privacy
if privacy == "public":
return None
elif privacy == "staff":
if not request.user.is_staff:
return render(request, "kegweb/staff_only.html", status=401)
return None
elif privacy == "members":
if not request.user.is_authenticated or not request.user.is_active:
return render(request, "kegweb/members_only.html", status=401)
return None
return HttpResponse(
"Server misconfigured, unknown privacy setting:%s" % privacy, status=500
)
|
import thorpy
import parameters
def make_alert(title, text, font_size=None, font_color=None, ok_text="Ok"):
from thorpy.miscgui.launchers.launcher import make_ok_box
e_title = thorpy.make_text(title, thorpy.style.TITLE_FONT_SIZE, (255,0,0))
e_text = thorpy.make_text(text, font_size, font_color)
box = make_ok_box([e_title,e_text], ok_text=ok_text)
return box
##def make_choice(title, text, font_size=None, font_color=None, ok_text="Ok",
## cancel_text="Cancel"):
## from thorpy.miscgui.launchers.launcher import make_ok_cancel_box
## e_title = thorpy.make_text(title, thorpy.style.TITLE_FONT_SIZE, (255,0,0))
## e_text = thorpy.make_text(text, font_size, font_color)
## box = make_ok_cancel_box([e_title,e_text], ok_text=ok_text,
## cancel_text=cancel_text)
## return box
def launch_blocking_alert(title, text, parent=None, font_size=None, font_color=None,
ok_text="Ok", transp=True):
if font_size is None: font_size = thorpy.style.FONT_SIZE
if font_color is None: font_color = thorpy.style.FONT_COLOR
box_alert = make_alert(title, text, font_size, font_color, ok_text)
box_alert.center()
if transp:
color_transp = tuple(list(thorpy.style.DEF_COLOR)[:3]+[parameters.TRANSP_MENU])
box_alert.set_main_color(color_transp)
from thorpy.menus.tickedmenu import TickedMenu
m = TickedMenu(box_alert)
box_alert.get_elements_by_text(ok_text)[0].user_func = thorpy.functions.quit_menu_func
box_alert.get_elements_by_text(ok_text)[0].user_params = {}
m.play()
box_alert.unblit()
if parent:
parent.partial_blit(None, box_alert.get_fus_rect())
box_alert.update()
##def launch_blocking_choice(title, text, parent=None, font_size=None, font_color=None,
## ok_text="Ok", cancel_text="Cancel", transp=True):
## if font_size is None: font_size = thorpy.style.FONT_SIZE
## if font_color is None: font_color = thorpy.style.FONT_COLOR
## box_alert = make_choice(title, text, font_size, font_color, ok_text,
## cancel_text)
## box_alert.center()
## if transp:
## color_transp = tuple(list(thorpy.style.DEF_COLOR)[:3]+[parameters.TRANSP_MENU])
## box_alert.set_main_color(color_transp)
## from thorpy.menus.tickedmenu import TickedMenu
## m = TickedMenu(box_alert)
## box_alert.get_elements_by_text(cancel_text)[0].user_func = thorpy.functions.quit_menu_func
## box_alert.get_elements_by_text(cancel_text)[0].user_params = {}
## #
## box_alert.get_elements_by_text(ok_text)[0].user_func = thorpy.functions.quit_menu_func
## box_alert.get_elements_by_text(ok_text)[0].user_params = {}
## m.play()
## box_alert.unblit()
## if parent:
## parent.partial_blit(None, box_alert.get_fus_rect())
## box_alert.update()
def launch_blocking_choices(text, choices, parent=None, title_fontsize=None,
title_fontcolor=None):
"""choices are tuple (text,func)"""
if title_fontsize is None: title_fontsize = thorpy.style.FONT_SIZE
if title_fontcolor is None: title_fontcolor = thorpy.style.FONT_COLOR
elements = [thorpy.make_button(t,f) for t,f in choices]
ghost = thorpy.make_group(elements)
e_text = thorpy.make_text(text, title_fontsize, title_fontcolor)
box = thorpy.Box.make([e_text, ghost])
box.center()
from thorpy.miscgui.reaction import ConstantReaction
for e in elements:
reac = ConstantReaction(thorpy.constants.THORPY_EVENT,
thorpy.functions.quit_menu_func,
{"id":thorpy.constants.EVENT_UNPRESS,
"el":e})
box.add_reaction(reac)
from thorpy.menus.tickedmenu import TickedMenu
m = TickedMenu(box)
m.play()
box.unblit()
if parent:
parent.partial_blit(None, box.get_fus_rect())
box.update()
|
<gh_stars>0
'''
'''
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
Test.Summary = '''
Slice selfhealing test
'''
## Test description:
# Preload the cache with the entire asset to be range requested.
# Reload remap rule with slice plugin
# Request content through the slice plugin
Test.SkipUnless(
Condition.PluginExists('slice.so'),
Condition.PluginExists('cache_range_requests.so'),
Condition.PluginExists('xdebug.so'),
)
Test.ContinueOnFail = False
# configure origin server
server = Test.MakeOriginServer("server", lookup_key="{%uuid}")
# Define ATS and configure
ts = Test.MakeATSProcess("ts", command="traffic_server")
# default root
req_header_chk = {"headers":
"GET / HTTP/1.1\r\n" +
"Host: www.example.com\r\n" +
"uuid: none\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": "",
}
res_header_chk = {"headers":
"HTTP/1.1 200 OK\r\n" +
"Connection: close\r\n" +
"\r\n",
"timestamp": "1469733493.993",
"body": "",
}
server.addResponse("sessionlog.json", req_header_chk, res_header_chk)
# set up slice plugin with remap host into cache_range_requests
ts.Disk.remap_config.AddLines([
'map http://slice/ http://127.0.0.1:{}/'.format(server.Variables.Port) +
' @plugin=slice.so @pparam=--blockbytes-test=3 @pparam=--remap-host=cache_range_requests',
'map http://cache_range_requests/ http://127.0.0.1:{}/'.format(server.Variables.Port) +
' @plugin=cache_range_requests.so @pparam=--consider-ims',
])
ts.Disk.plugin_config.AddLine('xdebug.so')
ts.Disk.records_config.update({
'proxy.config.diags.debug.enabled': 0,
'proxy.config.diags.debug.tags': 'cache_range_requests|slice',
})
curl_and_args = 'curl -s -D /dev/stdout -o /dev/stderr -x localhost:{}'.format(ts.Variables.port) + ' -H "x-debug: x-cache"'
# Test case: 2nd slice out of date (refetch and continue)
req_header_2ndold1 = {"headers":
"GET /second HTTP/1.1\r\n" +
"Host: www.example.com\r\n" +
"uuid: etagold-1\r\n" +
"Range: bytes=3-5\r\n"
"\r\n",
"timestamp": "1469733493.993",
"body": "",
}
res_header_2ndold1 = {"headers":
"HTTP/1.1 206 Partial Content\r\n" +
"Accept-Ranges: bytes\r\n" +
"Cache-Control: max-age=5000\r\n" +
"Connection: close\r\n" +
"Content-Range: bytes 3-4/5\r\n" +
'Etag: "etagold"\r\n' +
"\r\n",
"timestamp": "1469733493.993",
"body": "aa"
}
server.addResponse("sessionlog.json", req_header_2ndold1, res_header_2ndold1)
req_header_2ndnew0 = {"headers":
"GET /second HTTP/1.1\r\n" +
"Host: www.example.com\r\n" +
"uuid: etagnew-0\r\n" +
"Range: bytes=0-2\r\n"
"\r\n",
"timestamp": "1469733493.993",
"body": "",
}
res_header_2ndnew0 = {"headers":
"HTTP/1.1 206 Partial Content\r\n" +
"Accept-Ranges: bytes\r\n" +
"Cache-Control: max-age=5000\r\n" +
"Connection: close\r\n" +
"Content-Range: bytes 0-2/5\r\n" +
'Etag: "etagnew"\r\n' +
"\r\n",
"timestamp": "1469733493.993",
"body": "bbb"
}
server.addResponse("sessionlog.json", req_header_2ndnew0, res_header_2ndnew0)
req_header_2ndnew1 = {"headers":
"GET /second HTTP/1.1\r\n" +
"Host: www.example.com\r\n" +
"uuid: etagnew-1\r\n" +
"Range: bytes=3-5\r\n"
"\r\n",
"timestamp": "1469733493.993",
"body": "",
}
res_header_2ndnew1 = {"headers":
"HTTP/1.1 206 Partial Content\r\n" +
"Accept-Ranges: bytes\r\n" +
"Cache-Control: max-age=5000\r\n" +
"Connection: close\r\n" +
"Content-Range: bytes 3-4/5\r\n" +
'Etag: "etagnew"\r\n' +
"\r\n",
"timestamp": "1469733493.993",
"body": "bb"
}
server.addResponse("sessionlog.json", req_header_2ndnew1, res_header_2ndnew1)
# 0 Test - Preload reference etagnew-0
tr = Test.AddTestRun("Preload reference etagnew-0")
ps = tr.Processes.Default
ps.StartBefore(server, ready=When.PortOpen(server.Variables.Port))
ps.StartBefore(Test.Processes.ts)
ps.Command = curl_and_args + ' http://cache_range_requests/second -r 0-2 -H "uuid: etagnew-0"'
ps.ReturnCode = 0
ps.Streams.stderr = "gold/bbb.gold"
ps.Streams.stdout.Content = Testers.ContainsExpression("etagnew", "expected etagnew")
tr.StillRunningAfter = ts
# 1 Test - Preload reference etagold-1
tr = Test.AddTestRun("Preload slice etagold-1")
ps = tr.Processes.Default
ps.Command = curl_and_args + ' http://cache_range_requests/second -r 3-5 -H "uuid: etagold-1"'
ps.ReturnCode = 0
ps.Streams.stderr = "gold/aa.gold"
ps.Streams.stdout.Content = Testers.ContainsExpression("etagold", "expected etagold")
tr.StillRunningAfter = ts
# 2 Test - Request second slice via slice plugin, with instructions to fetch new 2nd slice
tr = Test.AddTestRun("Request 2nd slice (expect refetch)")
ps = tr.Processes.Default
ps.Command = curl_and_args + ' http://slice/second -r 3- -H "uuid: etagnew-1"'
ps.ReturnCode = 0
ps.Streams.stderr = "gold/bb.gold"
ps.Streams.stdout.Content = Testers.ContainsExpression("etagnew", "expected etagnew")
tr.StillRunningAfter = ts
# 3 Test - Request fullly healed asset via slice plugin
tr = Test.AddTestRun("Request full healed slice")
ps = tr.Processes.Default
ps.Command = curl_and_args + ' http://slice/second'
ps.ReturnCode = 0
ps.Streams.stderr.Content = Testers.ContainsExpression("bbbbb", "expected bbbbb content")
ps.Streams.stdout.Content = Testers.ContainsExpression("etagnew", "expected etagnew")
tr.StillRunningAfter = ts
# Test case: reference slice out of date (abort connection, heal reference)
req_header_refold0 = {"headers":
"GET /reference HTTP/1.1\r\n" +
"Host: www.example.com\r\n" +
"uuid: etagold-0\r\n" +
"Range: bytes=0-2\r\n"
"\r\n",
"timestamp": "1469733493.993",
"body": "",
}
res_header_refold0 = {"headers":
"HTTP/1.1 206 Partial Content\r\n" +
"Accept-Ranges: bytes\r\n" +
"Cache-Control: max-age=5000\r\n" +
"Connection: close\r\n" +
"Content-Range: bytes 0-2/5\r\n" +
'Etag: "etagold"\r\n' +
"\r\n",
"timestamp": "1469733493.993",
"body": "aaa"
}
server.addResponse("sessionlog.json", req_header_refold0, res_header_refold0)
req_header_refnew0 = {"headers":
"GET /reference HTTP/1.1\r\n" +
"Host: www.example.com\r\n" +
"uuid: etagnew-0\r\n" +
"Range: bytes=0-2\r\n"
"\r\n",
"timestamp": "1469733493.993",
"body": "",
}
res_header_refnew0 = {"headers":
"HTTP/1.1 206 Partial Content\r\n" +
"Accept-Ranges: bytes\r\n" +
"Cache-Control: max-age=5000\r\n" +
"Connection: close\r\n" +
"Content-Range: bytes 0-2/5\r\n" +
'Etag: "etagnew"\r\n' +
"\r\n",
"timestamp": "1469733493.993",
"body": "bbb"
}
server.addResponse("sessionlog.json", req_header_refnew0, res_header_refnew0)
req_header_refnew1 = {"headers":
"GET /reference HTTP/1.1\r\n" +
"Host: www.example.com\r\n" +
"uuid: etagnew-1\r\n" +
"Range: bytes=3-5\r\n"
"\r\n",
"timestamp": "1469733493.993",
"body": "",
}
res_header_refnew1 = {"headers":
"HTTP/1.1 206 Partial Content\r\n" +
"Accept-Ranges: bytes\r\n" +
"Cache-Control: max-age=5000\r\n" +
"Connection: close\r\n" +
"Content-Range: bytes 3-4/5\r\n" +
'Etag: "etagnew"\r\n' +
"\r\n",
"timestamp": "1469733493.993",
"body": "bb"
}
server.addResponse("sessionlog.json", req_header_refnew1, res_header_refnew1)
# 4 Test - Preload reference etagold-0
tr = Test.AddTestRun("Preload reference etagold-0")
ps = tr.Processes.Default
ps.Command = curl_and_args + ' http://cache_range_requests/reference -r 0-2 -H "uuid: etagold-0"'
ps.ReturnCode = 0
ps.Streams.stderr = "gold/aaa.gold"
ps.Streams.stdout.Content = Testers.ContainsExpression("etagold", "expected etagold")
tr.StillRunningAfter = ts
# 5 Test - Preload reference etagnew-1
tr = Test.AddTestRun("Preload slice etagnew-1")
ps = tr.Processes.Default
ps.Command = curl_and_args + ' http://cache_range_requests/reference -r 3-5 -H "uuid: etagnew-1"'
ps.ReturnCode = 0
ps.Streams.stderr = "gold/bb.gold"
ps.Streams.stdout.Content = Testers.ContainsExpression("etagnew", "expected etagnew")
tr.StillRunningAfter = ts
# 6 Test - Request reference slice via slice plugin, with instructions to fetch new 2nd slice -- this will send the old header, but abort and refetch it
tr = Test.AddTestRun("Request 2nd slice (expect abort)")
ps = tr.Processes.Default
ps.Command = curl_and_args + ' http://slice/reference -r 3- -H "uuid: etagnew-0" -w "SENT: \'%{size_download}\'"'
# ps.ReturnCode = 0 # curl will fail here
ps.Streams.stdout.Content = Testers.ContainsExpression("etagold", "expected etagold")
ps.Streams.stdout.Content += Testers.ContainsExpression("SENT: '0'", "expected empty payload")
tr.StillRunningAfter = ts
# 7 Test - Request full healed asset via slice plugin
tr = Test.AddTestRun("Request full healed slice")
ps = tr.Processes.Default
ps.Command = curl_and_args + ' http://slice/reference'
ps.ReturnCode = 0
ps.Streams.stderr.Content = Testers.ContainsExpression("bbbbb", "expected bbbbb content")
ps.Streams.stdout.Content = Testers.ContainsExpression("etagnew", "expected etagnew")
tr.StillRunningAfter = ts
# Request results in 200, not 206 (server not support range requests)
req_header_200 = {"headers":
"GET /code200 HTTP/1.1\r\n" +
"Host: www.example.com\r\n" +
"uuid: code200\r\n" +
"Range: bytes=3-5\r\n"
"\r\n",
"timestamp": "1469733493.993",
"body": "",
}
res_header_200 = {"headers":
"HTTP/1.1 200 OK\r\n" +
"Cache-Control: max-age=5000\r\n" +
"Connection: close\r\n" +
'Etag: "etag"\r\n' +
"\r\n",
"timestamp": "1469733493.993",
"body": "ccccc"
}
server.addResponse("sessionlog.json", req_header_200, res_header_200)
# 8 test - Request through slice but get a 200 back
tr = Test.AddTestRun("Request gets a 200")
ps = tr.Processes.Default
ps.Command = curl_and_args + ' http://slice/code200 -r 3-5 -H "uuid: code200"'
ps.ReturnCode = 0
ps.Streams.stderr.Content = Testers.ContainsExpression("ccccc", "expected full ccccc content")
ps.Streams.stdout.Content = Testers.ContainsExpression("200 OK", "expected 200")
tr.StillRunningAfter = ts
# Test for asset gone
# Preload
req_header_assetgone0 = {"headers":
"GET /assetgone HTTP/1.1\r\n" +
"Host: www.example.com\r\n" +
"uuid: assetgone-0\r\n" +
"Range: bytes=0-2\r\n"
"\r\n",
"timestamp": "1469733493.993",
"body": "",
}
res_header_assetgone0 = {"headers":
"HTTP/1.1 206 Partial Content\r\n" +
"Accept-Ranges: bytes\r\n" +
"Cache-Control: max-age=5000\r\n" +
"Connection: close\r\n" +
"Content-Range: bytes 0-2/5\r\n" +
'Etag: "etag"\r\n' +
"\r\n",
"timestamp": "1469733493.993",
"body": "aaa"
}
server.addResponse("sessionlog.json", req_header_assetgone0, res_header_assetgone0)
# 9 test - Preload reference slice
tr = Test.AddTestRun("Preload reference assetgone-0")
ps = tr.Processes.Default
ps.Command = curl_and_args + ' http://slice/assetgone -r 0-2 -H "uuid: assetgone-0"'
ps.ReturnCode = 0
ps.Streams.stderr = "gold/aaa.gold"
ps.Streams.stdout.Content = Testers.ContainsExpression("etag", "expected etag")
tr.StillRunningAfter = ts
# 10 test - Fetch full asset, 2nd slice should trigger 404 response
tr = Test.AddTestRun("Fetch full asset")
ps = tr.Processes.Default
ps.Command = curl_and_args + ' http://slice/assetgone'
#ps.ReturnCode = 0 # curl will return non zero
ps.Streams.stderr = "gold/aaa.gold"
ps.Streams.stdout.Content = Testers.ContainsExpression("etag", "expected etag")
ps.Streams.stdout.Content += Testers.ContainsExpression("Content-Length: 5", "expected header of content-length 5")
tr.StillRunningAfter = ts
# 11 test - Fetch full asset again, full blown 404
tr = Test.AddTestRun("Fetch full asset, 404")
ps = tr.Processes.Default
ps.Command = curl_and_args + ' http://slice/assetgone'
#ps.ReturnCode = 0 # curl will return non zero
ps.Streams.stdout.Content = Testers.ContainsExpression("404 Not Found", "Expected 404")
tr.StillRunningAfter = ts
# Over riding the built in ERROR check since we expect to see logSliceErrors
ts.Disk.diags_log.Content = Testers.ContainsExpression("logSliceError", "logSliceErrors generated")
|
<filename>fish/fishbase.py
import os
import types
UNIX_CREDENTIALS_FILE = u'.fluidDBcredentials'
UNIX_USER_CREDENTIALS_FILE = u'.fluidDBcredentials.%s'
CRED_FILE_VAR = 'FISH_CREDENTIALS_FILE'
WIN_CRED_FILE = 'c:\\fish\\credentials.txt'
TEXTUAL_MIMES = {
'txt': None,
'csv': 'text/plain',
'html': 'text/html',
'xml': 'text/xml',
'htm': 'text/html',
'css': 'text/css',
'js': 'text/javascript',
'vcf': 'text/vcard',
'plain': 'text/plain',
'svg': 'image/svg+xml',
'ps': 'application/postscript',
'eps': 'application/postscript',
'rss': 'application/rss+xml',
'atom': 'application/atom+xml',
'xhtml': 'application/xhtml+xml',
}
toStr = unicode
def get_credentials_file(username=None):
if os.name == 'posix':
homeDir = os.path.expanduser('~')
file = ((UNIX_USER_CREDENTIALS_FILE % username) if username
else UNIX_CREDENTIALS_FILE)
return os.path.join(homeDir, file)
elif os.name:
e = os.environ
return e[CRED_FILE_VAR] if CRED_FILE_VAR in e else WIN_CRED_FILE
else:
return None
def get_user_file(file, username):
if os.name == 'posix':
return os.path.join(os.path.expanduser(u'~'), file['unix'] % username)
elif os.name:
return file[u'windows'] % username
else:
return None
def expandpath(file):
if os.name == 'posix' and file.startswith('~'):
if file == '~':
return os.path.expanduser(u'~')
else:
n = file.find('/')
if n >= 0:
return os.path.join(os.path.expanduser(file[:n]), file[n+1:])
return file
class Dummy:
pass
class O:
"""
This class is used to represent objects locally.
Missing tags are normally set to O.
The tags are stored in self.tags and there is usually
either self.about or self.id set.
"""
def __init__(self, tags=None, about=None, id=None):
self.about = about
self.id = id
self.tags = tags if tags else {}
self.types = {}
for t in self.tags:
self.types[t] = type(self.tags[t])
def __str__(self):
keys = self.tags.keys()
keys.sort()
return u'\n'.join([u' %s=%s' % (key, toStr(self.tags[key]
if not self.tags[key] is O
else u'(not present)'))
for key in keys])
def __unicode__(self):
keys = self.tags.keys()
keys.sort()
return u'\n'.join([formatted_tag_value(key, self.tags[key])
for key in keys
if not key.startswith('_')])
def typedval(self, t):
return (self.tags[t], self.types[t])
def u(self, key):
return self.tags[key]
def toJSON(self):
return {'item': 'object', 'tags': self.tags}
def get(self, tag, retNone=True):
try:
return self.tags[tag]
except KeyError:
if retNone:
return None
else:
raise
def formatted_tag_value(tag, value, terse=False, prefix=u' ', mime=None):
lhs = u'' if terse else u'%s%s = ' % (prefix, tag)
if mime and not mime in TEXTUAL_MIMES.values():
return (u'%s<Non-primitive value of type %s (size %d)>'
% (lhs, unicode(mime), len(value)))
elif value == None:
return u'%s%s' % (u'' if terse else prefix, tag)
elif type(value) == unicode:
return u'%s"%s"' % (lhs, value)
elif type(value) == type(''):
return '%s"%s"' % (lhs.encode('UTF-8'), value)
elif type(value) in (list, tuple):
vals = value[:]
if len(vals) < 2:
return u'%s[%s]' % (lhs, (u'"%s"' % unicode(vals[0])
if len(vals) == 1 else u''))
else:
return u'%s[\n %s\n ]' % (lhs,
u',\n '.join(u'"%s"' % unicode(v) for v in vals))
else:
return u'%s%s' % (lhs, toStr(value))
|
import numpy as np
import os
from PIL import Image
from skimage.measure import compare_ssim
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import tqdm
import scipy.misc
def crop(im, height=64, width=64, stride=1):
img_height = im.shape[0]
img_width = im.shape[1]
ssim = []
for i in range(0, (img_height - height)//stride + 1):
img = np.array([])
for j in range(0, (img_width - width)//stride + 1):
img = np.append(img, im[i*stride: i*stride + height, j*stride: j*stride + width])
img = img.reshape((j+1), height, width, 3)
re_im = aae.autoencoder.predict(img/255, batch_size=128)
ssim.append([compare_ssim(img[l]/255, re_im[l], multichannel=True) for l in range(img.shape[0])])
return ssim, j+1, i+1
def window(img_height, img_width, height=64, width=64, stride=1):
wind = np.zeros([img_height, img_width])
ones = np.ones([height, width])
for i in range(0, (img_height - height)//stride + 1):
for j in range(0, (img_width - width)//stride + 1):
wind[i*stride: i*stride + height, j*stride: j*stride + width] = \
wind[i*stride: i*stride + height, j*stride: j*stride + width] + ones
return wind
def mse(image_A, image_B):
err = np.sum((image_A.astype("float") - image_B.astype("float")) ** 2)
err /= float(image_A.shape[0] * image_A.shape[1])
return err
images_folder = 'data/train_data/'
images = os.listdir(images_folder)
threshold = 0.9777527527527528
from adversarial_autoencoder import *
aae = AdversarialAutoencoder()
aae.autoencoder.load_weights('trainings/no_gan/models/low_ae_autoencoder.h5')
wind = window(640, 640, stride=8)
plt.imshow(wind)
plt.savefig('window.png')
plt.close()
attack_prop = range(0, 110, 10)
mse_of_attack_ones = {}
mse_of_attack_zeros = {}
mse_of_attack_mix = {}
mse_of_one_node = []
all_zeros = np.zeros([640, 640])
for i in attack_prop:
mse_of_attack_ones[i] = []
mse_of_attack_zeros[i] = []
mse_of_attack_mix[i] = []
for image in tqdm.tqdm(images):
im_read = np.array(Image.open(os.path.join(images_folder, image)), dtype='uint8')
ssim, rows, cols = crop(im_read, stride=8)
prediction = np.zeros([640, 640])
ones = np.ones([64, 64])
ssim = np.asarray(ssim)
ssim = ssim.reshape(rows, cols)
new_dir = 'postprocessing_with_attacks/{}/'.format(image)
os.makedirs(new_dir, exist_ok=True)
for i in range(0, (640 - 64)//8 + 1):
for j in range(0, (640 - 64)//8 + 1):
if ssim[i, j] <= threshold:
prediction[i*8: i*8 + 64, j*8: j*8 + 64] = prediction[i*8: i*8 + 64, j*8: j*8 + 64] + ones
mse_of_one_node.append(mse(all_zeros, prediction/wind))
for k in attack_prop:
attacks = np.random.binomial(1, k/100, [rows, cols])
prediction_ones = np.zeros([640, 640])
prediction_zeros = np.zeros([640, 640])
prediction_mix = np.zeros([640, 640])
for i in range(0, (640 - 64)//8 + 1):
for j in range(0, (640 - 64)//8 + 1):
if attacks[i, j] == 0:
if ssim[i, j] <= threshold:
prediction_ones[i*8: i*8 + 64, j*8: j*8 + 64] = \
prediction_ones[i*8: i*8 + 64, j*8: j*8 + 64] + ones
prediction_zeros[i*8: i*8 + 64, j*8: j*8 + 64] = \
prediction_zeros[i*8: i*8 + 64, j*8: j*8 + 64] + ones
prediction_mix[i*8: i*8 + 64, j*8: j*8 + 64] = \
prediction_mix[i*8: i*8 + 64, j*8: j*8 + 64] + ones
else:
prediction_ones[i*8: i*8 + 64, j*8: j*8 + 64] = \
prediction_ones[i*8: i*8 + 64, j*8: j*8 + 64] + ones
prediction_zeros[i*8: i*8 + 64, j*8: j*8 + 64] = \
prediction_zeros[i*8: i*8 + 64, j*8: j*8 + 64]
prediction_mix[i*8: i*8 + 64, j*8: j*8 + 64] = \
prediction_mix[i*8: i*8 + 64, j*8: j*8 + 64] + np.random.binomial(1, 0.5, 1)[0]*ones
mse_of_attack_ones[k].append(mse(all_zeros, prediction_ones/wind))
mse_of_attack_zeros[k].append(mse(all_zeros, prediction_zeros/wind))
mse_of_attack_mix[k].append(mse(all_zeros, prediction_mix/wind))
fig, axs = plt.subplots(1, 5)
axs[0].imshow(im_read)
axs[1].imshow(prediction/wind)
axs[2].imshow(prediction_ones/wind, vmin=0.0, vmax=1.0)
axs[3].imshow(prediction_zeros/wind)
axs[4].imshow(prediction_mix/wind)
axs[0].axis('off')
axs[1].axis('off')
axs[2].axis('off')
axs[3].axis('off')
axs[4].axis('off')
fig.savefig(new_dir + 'attack_{}.png'.format(k))
plt.close()
mean_ones = []
mean_zeros = []
mean_mix = []
for i in sorted(mse_of_attack_ones.keys()):
mean_ones.append(np.mean(mse_of_attack_ones[i]))
mean_zeros.append(np.mean(mse_of_attack_zeros[i]))
mean_mix.append(np.mean(mse_of_attack_mix[i]))
plt.figure()
plt.xlabel('% of attackers')
plt.ylabel('MSE')
plt.semilogy(list(attack_prop), mean_ones, c='C0', label='Always One attack')
plt.semilogy(list(attack_prop), mean_zeros, c='C1', label='Always Zero attack')
plt.semilogy(list(attack_prop), mean_mix, c='C2', label='Random attack')
plt.semilogy(list(attack_prop), np.repeat(np.mean(mse_of_one_node), len(list(attack_prop))), c='C4',
label='Centralized Node')
plt.legend()
plt.grid()
plt.savefig('postprocessing_with_attacks/mse.png')
plt.close()
|
from abc import ABCMeta, abstractmethod
from monitor import HANAServerDBOperatorService
from monitor import HANAServerOSOperatorService
from util import MonitorUtility
from util import MonitorConst as Mc
import traceback
class MonitorInitializer:
"""The root Class for the Initializer
initialize -- performing the initialize job
"""
__metaclass__ = ABCMeta
def __init__(self):
self._os_operator = HANAServerOSOperatorService.instance()
self._db_operator = HANAServerDBOperatorService.instance()
def get_os_operator(self):
return self._os_operator
def get_db_operator(self):
return self._db_operator
@abstractmethod
def initialize(self):
"""abstract method, needs to be overwritten in child classes"""
pass
class TableInitializer(MonitorInitializer):
"""Initializer for tables, creating all the tables and views, inserting basic records"""
def __init__(self):
self.__logger = MonitorUtility.get_logger(Mc.LOGGER_MONITOR_INIT_TABLE)
super().__init__()
def initialize(self):
self._db_operator.execute_from_script(Mc.get_init_sql_file())
class ServerInfoInitializer(MonitorInitializer):
"""Initializer for Server Info, get overview of disk, memory cpu and operation system"""
def __init__(self):
self.__logger = MonitorUtility.get_logger(Mc.LOGGER_MONITOR_INIT_SERVER)
super().__init__()
def __get_server_info(self, ssh, server_id, mount_point):
"""get the overview of disk, memory, CPU and Operation System"""
self._os_operator.collect_os_info(ssh, server_id, None)
self._os_operator.collect_disk_info(ssh, server_id, mount_point, None)
self._os_operator.collect_mem_info(ssh, server_id, None)
self._os_operator.collect_cpu_info(ssh, server_id, None)
return self._os_operator.get_server_info_by_server_id(server_id)
def initialize(self):
"""get the overview of disk, memory, CPU and Operation System for all configured servers"""
try:
self.__logger.info("Start initializing server info...")
for location in self._db_operator.get_locations():
location_id = location[Mc.FIELD_LOCATION_ID]
server_name_list = self._db_operator.get_server_full_names(location_id)
if not server_name_list:
self.__logger.error("Server name list is empty, please initialize server name list first!")
return
for server in server_name_list:
self.__logger.debug("Processing:{0}".format(server))
server_id = server[Mc.FIELD_SERVER_ID]
server_name = server[Mc.FIELD_SERVER_FULL_NAME]
mount_point = server[Mc.FIELD_MOUNT_POINT]
ssh = self._os_operator.open_ssh_connection(server_name)
if ssh is None:
self.__logger.warning("Failed to connect {0}".format(server_name))
continue
server_info = self.__get_server_info(ssh, server_id, mount_point)
self.__logger.info(server_info)
if server_info is not None:
self._db_operator.update_server_info(server_info, server_id)
ssh.close()
self.__logger.info("Initializing {0} is done.".format(server_name))
self.__logger.info("Successfully initialized server info for location:{0}...".format(location_id))
self.__logger.info("Successfully initialized server info...")
except Exception as ex:
self.__logger.error("Error:{0} happened in initializing server info!".format(ex))
self.__logger.exception(traceback.format_exc())
class SidInitializer(MonitorInitializer):
"""generate all the SIDs by the configured mapping"""
def __init__(self):
self.__logger = MonitorUtility.get_logger(Mc.LOGGER_MONITOR_INIT_SID)
super().__init__()
def initialize(self):
"""generate all the SIDs by the configured mapping"""
try:
self.__logger.info("Start generating SIDs by the configured mapping...")
for location in self._db_operator.get_locations():
location_id = location[Mc.FIELD_LOCATION_ID]
sid_mappings = self._db_operator.get_sid_mappings()
if not sid_mappings:
self.__logger.error("SID mapping is empty!")
return
server_name_list = self._db_operator.get_server_full_names(location_id)
if not server_name_list:
self.__logger.error("Server name list is empty, please initialize server name list first!")
return
for mapping in sid_mappings:
sid_start = mapping[Mc.FIELD_SID_START]
sid_end = mapping[Mc.FIELD_SID_END]
employee_id = mapping[Mc.FIELD_EMPLOYEE_ID]
sid_list = MonitorUtility.gen_sid_list(sid_start, sid_end)
if not sid_list:
self.__logger.debug(
"Failed to generate SIDs for the SID mapping {0}-{1}.".format(sid_start, sid_end))
continue
# sid_info_list = []
# for server in server_name_list:
# # prepare the parameter list for the SQL:
# # insert into VAN_MONITOR.T_SID_INFO (SERVER_ID, SID, SID_USER, EMPLOYEE_ID) values (?,?,?)
# sid_info_list.extend(
# [(server[0], sid, "".join([sid.lower(), "adm"]), employee_id) for sid in sid_list])
self._db_operator.insert_sid_info(employee_id, server_name_list, sid_list)
self.__logger.info("Successfully generated SIDs by the configured "
"mapping for location:{0}...".format(location_id))
self.__logger.info("Successfully generated SIDs by the configured mapping!")
except Exception as ex:
self.__logger.error("Error happened in generating SID info:{0}!".format(ex))
self.__logger.exception(traceback.format_exc())
class InitController:
"""Facade of the SidInitializer and ServerInfoInitializer"""
def __init__(self):
self.table_initializer = TableInitializer()
self.server_initializer = ServerInfoInitializer()
self.sid_initializer = SidInitializer()
def init_server_info(self):
self.server_initializer.initialize()
def init_sid_info(self):
self.sid_initializer.initialize()
def init_tables(self):
self.table_initializer.initialize()
def init_all(self):
# self.init_tables()
# self.init_sid_info()
self.init_server_info()
if __name__ == '__main__':
InitController().init_all()
# InitController().init_sid_info()
|
import warnings
import copy
import math as m
import numpy as nu
from scipy import integrate, optimize
import scipy
if int(scipy.__version__.split('.')[1]) < 10: #pragma: no cover
from scipy.maxentropy import logsumexp
else:
from scipy.misc import logsumexp
from galpy.potential_src.Potential import evaluateRforces, evaluatezforces,\
evaluatePotentials, evaluatephiforces, evaluateDensities
from galpy.util import galpyWarning
import galpy.util.bovy_plot as plot
import galpy.util.bovy_symplecticode as symplecticode
import galpy.util.bovy_coords as coords
#try:
from galpy.orbit_src.integrateFullOrbit import integrateFullOrbit_c, _ext_loaded
ext_loaded= _ext_loaded
from galpy.util.bovy_conversion import physical_conversion
from galpy.orbit_src.OrbitTop import OrbitTop
_ORBFITNORMRADEC= 360.
_ORBFITNORMDIST= 10.
_ORBFITNORMPMRADEC= 4.
_ORBFITNORMVLOS= 200.
class FullOrbit(OrbitTop):
"""Class that holds and integrates orbits in full 3D potentials"""
def __init__(self,vxvv=[1.,0.,0.9,0.,0.1],vo=220.,ro=8.0,zo=0.025,
solarmotion=nu.array([-10.1,4.0,6.7])):
"""
NAME:
__init__
PURPOSE:
intialize a full orbit
INPUT:
vxvv - initial condition [R,vR,vT,z,vz,phi]
vo - circular velocity at ro (km/s)
ro - distance from vantage point to GC (kpc)
zo - offset toward the NGP of the Sun wrt the plane (kpc)
solarmotion - value in [-U,V,W] (km/s)
OUTPUT:
(none)
HISTORY:
2010-08-01 - Written - Bovy (NYU)
2014-06-11 - Added conversion kwargs to physical coordinates - Bovy (IAS)
"""
OrbitTop.__init__(self,vxvv=vxvv,
ro=ro,zo=zo,vo=vo,solarmotion=solarmotion)
return None
def integrate(self,t,pot,method='symplec4_c',dt=None):
"""
NAME:
integrate
PURPOSE:
integrate the orbit
INPUT:
t - list of times at which to output (0 has to be in this!)
pot - potential instance or list of instances
method= 'odeint' for scipy's odeint
'leapfrog' for a simple leapfrog implementation
'leapfrog_c' for a simple leapfrog implementation in C
'rk4_c' for a 4th-order Runge-Kutta integrator in C
'rk6_c' for a 6-th order Runge-Kutta integrator in C
'dopr54_c' for a Dormand-Prince integrator in C (generally the fastest)
dt= (None) if set, force the integrator to use this basic stepsize; must be an integer divisor of output stepsize
OUTPUT:
(none) (get the actual orbit using getOrbit()
HISTORY:
2010-08-01 - Written - Bovy (NYU)
"""
#Reset things that may have been defined by a previous integration
if hasattr(self,'_orbInterp'): delattr(self,'_orbInterp')
if hasattr(self,'rs'): delattr(self,'rs')
self.t= nu.array(t)
self._pot= pot
self.orbit= _integrateFullOrbit(self.vxvv,pot,t,method,dt)
@physical_conversion('energy')
def Jacobi(self,*args,**kwargs):
"""
NAME:
Jacobi
PURPOSE:
calculate the Jacobi integral of the motion
INPUT:
Omega - pattern speed of rotating frame
t= time
pot= potential instance or list of such instances
OUTPUT:
Jacobi integral
HISTORY:
2011-04-18 - Written - Bovy (NYU)
"""
if not 'OmegaP' in kwargs or kwargs['OmegaP'] is None:
OmegaP= 1.
if not 'pot' in kwargs or kwargs['pot'] is None:
try:
pot= self._pot
except AttributeError:
raise AttributeError("Integrate orbit or specify pot=")
else:
pot= kwargs['pot']
if isinstance(pot,list):
for p in pot:
if hasattr(p,'OmegaP'):
OmegaP= p.OmegaP()
break
else:
if hasattr(pot,'OmegaP'):
OmegaP= pot.OmegaP()
kwargs.pop('OmegaP',None)
else:
OmegaP= kwargs.pop('OmegaP')
#Make sure you are not using physical coordinates
old_physical= kwargs.get('use_physical',None)
kwargs['use_physical']= False
if not isinstance(OmegaP,(int,float)) and len(OmegaP) == 3:
if isinstance(OmegaP,list): thisOmegaP= nu.array(OmegaP)
else: thisOmegaP= OmegaP
out= self.E(*args,**kwargs)-nu.dot(thisOmegaP,
self.L(*args,**kwargs).T).T
else:
out= self.E(*args,**kwargs)-OmegaP*self.L(*args,**kwargs)[:,2]
if not old_physical is None:
kwargs['use_physical']= old_physical
else:
kwargs.pop('use_physical')
return out
@physical_conversion('energy')
def E(self,*args,**kwargs):
"""
NAME:
E
PURPOSE:
calculate the energy
INPUT:
t - (optional) time at which to get the energy
pot= potential instance or list of such instances
OUTPUT:
energy
HISTORY:
2010-09-15 - Written - Bovy (NYU)
"""
if not 'pot' in kwargs or kwargs['pot'] is None:
try:
pot= self._pot
except AttributeError:
raise AttributeError("Integrate orbit or specify pot=")
if 'pot' in kwargs and kwargs['pot'] is None:
kwargs.pop('pot')
else:
pot= kwargs.pop('pot')
if len(args) > 0:
t= args[0]
else:
t= 0.
#Get orbit
thiso= self(*args,**kwargs)
onet= (len(thiso.shape) == 1)
if onet:
return evaluatePotentials(thiso[0],thiso[3],pot,
phi=thiso[5],t=t)\
+thiso[1]**2./2.\
+thiso[2]**2./2.\
+thiso[4]**2./2.
else:
return nu.array([evaluatePotentials(thiso[0,ii],thiso[3,ii],
pot,phi=thiso[5,ii],
t=t[ii])\
+thiso[1,ii]**2./2.\
+thiso[2,ii]**2./2.\
+thiso[4,ii]**2./2. for ii in range(len(t))])
@physical_conversion('energy')
def ER(self,*args,**kwargs):
"""
NAME:
ER
PURPOSE:
calculate the radial energy
INPUT:
t - (optional) time at which to get the energy
pot= potential instance or list of such instances
OUTPUT:
radial energy
HISTORY:
2013-11-30 - Written - Bovy (IAS)
"""
if not 'pot' in kwargs or kwargs['pot'] is None:
try:
pot= self._pot
except AttributeError:
raise AttributeError("Integrate orbit or specify pot=")
if 'pot' in kwargs and kwargs['pot'] is None:
kwargs.pop('pot')
else:
pot= kwargs.pop('pot')
if len(args) > 0:
t= args[0]
else:
t= 0.
#Get orbit
thiso= self(*args,**kwargs)
onet= (len(thiso.shape) == 1)
if onet:
return evaluatePotentials(thiso[0],0.,pot,
phi=thiso[5],t=t)\
+thiso[1]**2./2.\
+thiso[2]**2./2.
else:
return nu.array([evaluatePotentials(thiso[0,ii],0.,
pot,phi=thiso[5,ii],
t=t[ii])\
+thiso[1,ii]**2./2.\
+thiso[2,ii]**2./2. for ii in range(len(t))])
@physical_conversion('energy')
def Ez(self,*args,**kwargs):
"""
NAME:
Ez
PURPOSE:
calculate the vertical energy
INPUT:
t - (optional) time at which to get the energy
pot= potential instance or list of such instances
OUTPUT:
vertical energy
HISTORY:
2013-11-30 - Written - Bovy (IAS)
"""
if not 'pot' in kwargs or kwargs['pot'] is None:
try:
pot= self._pot
except AttributeError:
raise AttributeError("Integrate orbit or specify pot=")
if 'pot' in kwargs and kwargs['pot'] is None:
kwargs.pop('pot')
else:
pot= kwargs.pop('pot')
if len(args) > 0:
t= args[0]
else:
t= 0.
#Get orbit
thiso= self(*args,**kwargs)
onet= (len(thiso.shape) == 1)
if onet:
return evaluatePotentials(thiso[0],thiso[3],pot,
phi=thiso[5],t=t)\
-evaluatePotentials(thiso[0],0.,pot,
phi=thiso[5],t=t)\
+thiso[4]**2./2.
else:
return nu.array([evaluatePotentials(thiso[0,ii],thiso[3,ii],
pot,phi=thiso[5,ii],
t=t[ii])\
-evaluatePotentials(thiso[0,ii],0.,
pot,phi=thiso[5,ii],
t=t[ii])\
+thiso[4,ii]**2./2. for ii in range(len(t))])
def e(self,analytic=False,pot=None):
"""
NAME:
e
PURPOSE:
calculate the eccentricity
INPUT:
analytic - compute this analytically
pot - potential to use for analytical calculation
OUTPUT:
eccentricity
HISTORY:
2010-09-15 - Written - Bovy (NYU)
"""
if analytic:
self._setupaA(pot=pot,type='adiabatic')
(rperi,rap)= self._aA.calcRapRperi(self)
return (rap-rperi)/(rap+rperi)
if not hasattr(self,'orbit'):
raise AttributeError("Integrate the orbit first")
if not hasattr(self,'rs'):
self.rs= nu.sqrt(self.orbit[:,0]**2.+self.orbit[:,3]**2.)
return (nu.amax(self.rs)-nu.amin(self.rs))/(nu.amax(self.rs)+nu.amin(self.rs))
@physical_conversion('position')
def rap(self,analytic=False,pot=None,**kwargs):
"""
NAME:
rap
PURPOSE:
return the apocenter radius
INPUT:
analytic - compute this analytically
pot - potential to use for analytical calculation
OUTPUT:
R_ap
HISTORY:
2010-09-20 - Written - Bovy (NYU)
"""
if analytic:
self._setupaA(pot=pot,type='adiabatic')
(rperi,rap)= self._aA.calcRapRperi(self)
return rap
if not hasattr(self,'orbit'):
raise AttributeError("Integrate the orbit first")
if not hasattr(self,'rs'):
self.rs= nu.sqrt(self.orbit[:,0]**2.+self.orbit[:,3]**2.)
return nu.amax(self.rs)
@physical_conversion('position')
def rperi(self,analytic=False,pot=None,**kwargs):
"""
NAME:
rperi
PURPOSE:
return the pericenter radius
INPUT:
analytic - compute this analytically
pot - potential to use for analytical calculation
OUTPUT:
R_peri
HISTORY:
2010-09-20 - Written - Bovy (NYU)
"""
if analytic:
self._setupaA(pot=pot,type='adiabatic')
(rperi,rap)= self._aA.calcRapRperi(self)
return rperi
if not hasattr(self,'orbit'):
raise AttributeError("Integrate the orbit first")
if not hasattr(self,'rs'):
self.rs= nu.sqrt(self.orbit[:,0]**2.+self.orbit[:,3]**2.)
return nu.amin(self.rs)
@physical_conversion('position')
def zmax(self,analytic=False,pot=None,**kwargs):
"""
NAME:
zmax
PURPOSE:
return the maximum vertical height
INPUT:
analytic - compute this analytically
pot - potential to use for analytical calculation
OUTPUT:
Z_max
HISTORY:
2010-09-20 - Written - Bovy (NYU)
2012-06-01 - Added analytic calculation - Bovy (IAS)
"""
if analytic:
self._setupaA(pot=pot,type='adiabatic')
zmax= self._aA.calczmax(self)
return zmax
if not hasattr(self,'orbit'):
raise AttributeError("Integrate the orbit first")
return nu.amax(nu.fabs(self.orbit[:,3]))
def fit(self,vxvv,vxvv_err=None,pot=None,radec=False,lb=False,
customsky=False,lb_to_customsky=None,pmllpmbb_to_customsky=None,
tintJ=10,ntintJ=1000,integrate_method='dopr54_c',
disp=False,
**kwargs):
"""
NAME:
fit
PURPOSE:
fit an Orbit to data using the current orbit as the initial
condition
INPUT:
vxvv - [:,6] array of positions and velocities along the orbit
vxvv_err= [:,6] array of errors on positions and velocities along the orbit (if None, these are set to 0.01)
pot= Potential to fit the orbit in
Keywords related to the input data:
radec= if True, input vxvv and vxvv_err are [ra,dec,d,mu_ra, mu_dec,vlos] in [deg,deg,kpc,mas/yr,mas/yr,km/s] (all J2000.0; mu_ra = mu_ra * cos dec); the attributes of the current Orbit are used to convert between these coordinates and Galactocentric coordinates
lb= if True, input vxvv and vxvv_err are [long,lat,d,mu_ll, mu_bb,vlos] in [deg,deg,kpc,mas/yr,mas/yr,km/s] (mu_ll = mu_ll * cos lat); the attributes of the current Orbit are used to convert between these coordinates and Galactocentric coordinates
customsky= if True, input vxvv and vxvv_err are [custom long,custom lat,d,mu_customll, mu_custombb,vlos] in [deg,deg,kpc,mas/yr,mas/yr,km/s] (mu_ll = mu_ll * cos lat) where custom longitude and custom latitude are a custom set of sky coordinates (e.g., ecliptic) and the proper motions are also expressed in these coordinats; you need to provide the functions lb_to_customsky and pmllpmbb_to_customsky to convert to the custom sky coordinates (these should have the same inputs and outputs as lb_to_radec and pmllpmbb_to_pmrapmdec); the attributes of the current Orbit are used to convert between these coordinates and Galactocentric coordinates
obs=[X,Y,Z,vx,vy,vz] - (optional) position and velocity of observer
(in kpc and km/s) (default=Object-wide default)
Cannot be an Orbit instance with the orbit of the reference point, as w/ the ra etc. functions
ro= distance in kpc corresponding to R=1. (default: taken from object)
vo= velocity in km/s corresponding to v=1. (default: taken from object)
lb_to_customsky= function that converts l,b,degree=False to the custom sky coordinates (like lb_to_radec); needs to be given when customsky=True
pmllpmbb_to_customsky= function that converts pmll,pmbb,l,b,degree=False to proper motions in the custom sky coordinates (like pmllpmbb_to_pmrapmdec); needs to be given when customsky=True
Keywords related to the orbit integrations:
tintJ= (default: 10) time to integrate orbits for fitting the orbit
ntintJ= (default: 1000) number of time-integration points
integrate_method= (default: 'dopr54_c') integration method to use
disp= (False) display the optimizer's convergence message
OUTPUT:
max of log likelihood
HISTORY:
2014-06-17 - Written - Bovy (IAS)
TEST:
from galpy.potential import LogarithmicHaloPotential; lp= LogarithmicHaloPotential(normalize=1.); from galpy.orbit import Orbit; o= Orbit(vxvv=[1.,0.1,1.1,0.1,0.02,0.]); ts= numpy.linspace(0,10,1000); o.integrate(ts,lp); outts= [0.,0.1,0.2,0.3,0.4]; vxvv= numpy.array([o.R(outts),o.vR(outts),o.vT(outts),o.z(outts),o.vz(outts),o.phi(outts)]).T; of= Orbit(vxvv=[1.02,0.101,1.101,0.101,0.0201,0.001]); of._orb.fit(vxvv,pot=lp,radec=False,tintJ=10,ntintJ=1000)
"""
if pot is None:
try:
pot= self._pot
except AttributeError:
raise AttributeError("Integrate orbit first or specify pot=")
if radec or lb or customsky:
obs, ro, vo= self._parse_radec_kwargs(kwargs,vel=True,dontpop=True)
else:
obs, ro, vo= None, None, None
if customsky \
and (lb_to_customsky is None or pmllpmbb_to_customsky is None):
raise IOError('if customsky=True, the functions lb_to_customsky and pmllpmbb_to_customsky need to be given')
new_vxvv, maxLogL= _fit_orbit(self,vxvv,vxvv_err,pot,radec=radec,lb=lb,
customsky=customsky,
lb_to_customsky=lb_to_customsky,
pmllpmbb_to_customsky=pmllpmbb_to_customsky,
tintJ=tintJ,ntintJ=ntintJ,
integrate_method=integrate_method,
ro=ro,vo=vo,obs=obs,disp=disp)
#Setup with these new initial conditions
self.vxvv= new_vxvv
return maxLogL
def plotEz(self,*args,**kwargs):
"""
NAME:
plotEz
PURPOSE:
plot Ez(.) along the orbit
INPUT:
bovy_plot.bovy_plot inputs
OUTPUT:
figure to output device
HISTORY:
2014-06-16 - Written - Bovy (IAS)
"""
if kwargs.pop('normed',False):
kwargs['d2']= 'Eznorm'
else:
kwargs['d2']= 'Ez'
self.plot(*args,**kwargs)
def plotER(self,*args,**kwargs):
"""
NAME:
plotER
PURPOSE:
plot ER(.) along the orbit
INPUT:
bovy_plot.bovy_plot inputs
OUTPUT:
figure to output device
HISTORY:
2014-06-16 - Written - Bovy (IAS)
"""
if kwargs.pop('normed',False):
kwargs['d2']= 'ERnorm'
else:
kwargs['d2']= 'ER'
self.plot(*args,**kwargs)
def plotEzJz(self,*args,**kwargs):
"""
NAME:
plotEzJz
PURPOSE:
plot E_z(.)/sqrt(dens(R)) along the orbit
INPUT:
pot= Potential instance or list of instances in which the orbit was
integrated
d1= - plot Ez vs d1: e.g., 't', 'z', 'R', 'vR', 'vT', 'vz'
+bovy_plot.bovy_plot inputs
OUTPUT:
figure to output device
HISTORY:
2010-08-08 - Written - Bovy (NYU)
"""
labeldict= {'t':r'$t$','R':r'$R$','vR':r'$v_R$','vT':r'$v_T$',
'z':r'$z$','vz':r'$v_z$','phi':r'$\phi$',
'x':r'$x$','y':r'$y$','vx':r'$v_x$','vy':r'$v_y$'}
if not 'pot' in kwargs:
try:
pot= self._pot
except AttributeError:
raise AttributeError("Integrate orbit first or specify pot=")
else:
pot= kwargs.pop('pot')
d1= kwargs.pop('d1','t')
self.EzJz= [(evaluatePotentials(self.orbit[ii,0],self.orbit[ii,3],
pot,t=self.t[ii])-
evaluatePotentials(self.orbit[ii,0],0.,pot,
phi= self.orbit[ii,5],t=self.t[ii])+
self.orbit[ii,4]**2./2.)/\
nu.sqrt(evaluateDensities(self.orbit[ii,0],0.,pot,phi=self.orbit[ii,5],t=self.t[ii]))\
for ii in range(len(self.t))]
if not 'xlabel' in kwargs:
kwargs['xlabel']= labeldict[d1]
if not 'ylabel' in kwargs:
kwargs['ylabel']= r'$E_z/\sqrt{\rho}$'
if d1 == 't':
plot.bovy_plot(nu.array(self.t),nu.array(self.EzJz)/self.EzJz[0],
*args,**kwargs)
elif d1 == 'z':
plot.bovy_plot(self.orbit[:,3],nu.array(self.EzJz)/self.EzJz[0],
*args,**kwargs)
elif d1 == 'R':
plot.bovy_plot(self.orbit[:,0],nu.array(self.EzJz)/self.EzJz[0],
*args,**kwargs)
elif d1 == 'vR':
plot.bovy_plot(self.orbit[:,1],nu.array(self.EzJz)/self.EzJz[0],
*args,**kwargs)
elif d1 == 'vT':
plot.bovy_plot(self.orbit[:,2],nu.array(self.EzJz)/self.EzJz[0],
*args,**kwargs)
elif d1 == 'vz':
plot.bovy_plot(self.orbit[:,4],nu.array(self.EzJz)/self.EzJz[0],
*args,**kwargs)
def _integrateFullOrbit(vxvv,pot,t,method,dt):
"""
NAME:
_integrateFullOrbit
PURPOSE:
integrate an orbit in a Phi(R,z,phi) potential
INPUT:
vxvv - array with the initial conditions stacked like
[R,vR,vT,z,vz,phi]; vR outward!
pot - Potential instance
t - list of times at which to output (0 has to be in this!)
method - 'odeint' or 'leapfrog'
dt - if set, force the integrator to use this basic stepsize; must be an integer divisor of output stepsize
OUTPUT:
[:,5] array of [R,vR,vT,z,vz,phi] at each t
HISTORY:
2010-08-01 - Written - Bovy (NYU)
"""
#First check that the potential has C
if '_c' in method:
if isinstance(pot,list):
allHasC= nu.prod([p.hasC for p in pot])
else:
allHasC= pot.hasC
if not allHasC and ('leapfrog' in method or 'symplec' in method):
method= 'leapfrog'
elif not allHasC:
method= 'odeint'
if method.lower() == 'leapfrog':
#go to the rectangular frame
this_vxvv= nu.array([vxvv[0]*nu.cos(vxvv[5]),
vxvv[0]*nu.sin(vxvv[5]),
vxvv[3],
vxvv[1]*nu.cos(vxvv[5])-vxvv[2]*nu.sin(vxvv[5]),
vxvv[2]*nu.cos(vxvv[5])+vxvv[1]*nu.sin(vxvv[5]),
vxvv[4]])
#integrate
out= symplecticode.leapfrog(_rectForce,this_vxvv,
t,args=(pot,),rtol=10.**-8)
#go back to the cylindrical frame
R= nu.sqrt(out[:,0]**2.+out[:,1]**2.)
phi= nu.arccos(out[:,0]/R)
phi[(out[:,1] < 0.)]= 2.*nu.pi-phi[(out[:,1] < 0.)]
vR= out[:,3]*nu.cos(phi)+out[:,4]*nu.sin(phi)
vT= out[:,4]*nu.cos(phi)-out[:,3]*nu.sin(phi)
out[:,3]= out[:,2]
out[:,4]= out[:,5]
out[:,0]= R
out[:,1]= vR
out[:,2]= vT
out[:,5]= phi
elif ext_loaded and \
(method.lower() == 'leapfrog_c' or method.lower() == 'rk4_c' \
or method.lower() == 'rk6_c' or method.lower() == 'symplec4_c' \
or method.lower() == 'symplec6_c' or method.lower() == 'dopr54_c'):
warnings.warn("Using C implementation to integrate orbits",
galpyWarning)
#go to the rectangular frame
this_vxvv= nu.array([vxvv[0]*nu.cos(vxvv[5]),
vxvv[0]*nu.sin(vxvv[5]),
vxvv[3],
vxvv[1]*nu.cos(vxvv[5])-vxvv[2]*nu.sin(vxvv[5]),
vxvv[2]*nu.cos(vxvv[5])+vxvv[1]*nu.sin(vxvv[5]),
vxvv[4]])
#integrate
tmp_out, msg= integrateFullOrbit_c(pot,this_vxvv,
t,method,dt=dt)
#go back to the cylindrical frame
R= nu.sqrt(tmp_out[:,0]**2.+tmp_out[:,1]**2.)
phi= nu.arccos(tmp_out[:,0]/R)
phi[(tmp_out[:,1] < 0.)]= 2.*nu.pi-phi[(tmp_out[:,1] < 0.)]
vR= tmp_out[:,3]*nu.cos(phi)+tmp_out[:,4]*nu.sin(phi)
vT= tmp_out[:,4]*nu.cos(phi)-tmp_out[:,3]*nu.sin(phi)
out= nu.zeros((len(t),6))
out[:,0]= R
out[:,1]= vR
out[:,2]= vT
out[:,5]= phi
out[:,3]= tmp_out[:,2]
out[:,4]= tmp_out[:,5]
elif method.lower() == 'odeint' or not ext_loaded:
vphi= vxvv[2]/vxvv[0]
init= [vxvv[0],vxvv[1],vxvv[5],vphi,vxvv[3],vxvv[4]]
intOut= integrate.odeint(_FullEOM,init,t,args=(pot,),
rtol=10.**-8.)#,mxstep=100000000)
out= nu.zeros((len(t),6))
out[:,0]= intOut[:,0]
out[:,1]= intOut[:,1]
out[:,2]= out[:,0]*intOut[:,3]
out[:,3]= intOut[:,4]
out[:,4]= intOut[:,5]
out[:,5]= intOut[:,2]
#post-process to remove negative radii
neg_radii= (out[:,0] < 0.)
out[neg_radii,0]= -out[neg_radii,0]
out[neg_radii,5]+= m.pi
return out
def _FullEOM(y,t,pot):
"""
NAME:
_FullEOM
PURPOSE:
implements the EOM, i.e., the right-hand side of the differential
equation
INPUT:
y - current phase-space position
t - current time
pot - (list of) Potential instance(s)
OUTPUT:
dy/dt
HISTORY:
2010-04-16 - Written - Bovy (NYU)
"""
l2= (y[0]**2.*y[3])**2.
return [y[1],
l2/y[0]**3.+evaluateRforces(y[0],y[4],pot,phi=y[2],t=t),
y[3],
1./y[0]**2.*(evaluatephiforces(y[0],y[4],pot,phi=y[2],t=t)-
2.*y[0]*y[1]*y[3]),
y[5],
evaluatezforces(y[0],y[4],pot,phi=y[2],t=t)]
def _rectForce(x,pot,t=0.):
"""
NAME:
_rectForce
PURPOSE:
returns the force in the rectangular frame
INPUT:
x - current position
t - current time
pot - (list of) Potential instance(s)
OUTPUT:
force
HISTORY:
2011-02-02 - Written - Bovy (NYU)
"""
#x is rectangular so calculate R and phi
R= nu.sqrt(x[0]**2.+x[1]**2.)
phi= nu.arccos(x[0]/R)
sinphi= x[1]/R
cosphi= x[0]/R
if x[1] < 0.: phi= 2.*nu.pi-phi
#calculate forces
Rforce= evaluateRforces(R,x[2],pot,phi=phi,t=t)
phiforce= evaluatephiforces(R,x[2],pot,phi=phi,t=t)
return nu.array([cosphi*Rforce-1./R*sinphi*phiforce,
sinphi*Rforce+1./R*cosphi*phiforce,
evaluatezforces(R,x[2],pot,phi=phi,t=t)])
def _fit_orbit(orb,vxvv,vxvv_err,pot,radec=False,lb=False,
customsky=False,lb_to_customsky=None,
pmllpmbb_to_customsky=None,
tintJ=100,ntintJ=1000,integrate_method='dopr54_c',
ro=None,vo=None,obs=None,disp=False):
"""Fit an orbit to data in a given potential"""
#Import here, because otherwise there is an infinite loop of imports
from galpy.actionAngle import actionAngleIsochroneApprox
#Mock this up, bc we want to use its orbit-integration routines
class mockActionAngleIsochroneApprox(actionAngleIsochroneApprox):
def __init__(self,tintJ,ntintJ,pot,integrate_method='dopr54_c'):
self._tintJ= tintJ
self._ntintJ=ntintJ
self._tsJ= nu.linspace(0.,self._tintJ,self._ntintJ)
self._pot= pot
self._integrate_method= integrate_method
return None
tmockAA= mockActionAngleIsochroneApprox(tintJ,ntintJ,pot,
integrate_method=integrate_method)
opt_vxvv= optimize.fmin_powell(_fit_orbit_mlogl,orb.vxvv,
args=(vxvv,vxvv_err,pot,radec,lb,
customsky,lb_to_customsky,
pmllpmbb_to_customsky,
tmockAA,
ro,vo,obs),
disp=disp)
maxLogL= -_fit_orbit_mlogl(opt_vxvv,vxvv,vxvv_err,pot,radec,lb,
customsky,lb_to_customsky,pmllpmbb_to_customsky,
tmockAA,
ro,vo,obs)
return (opt_vxvv,maxLogL)
def _fit_orbit_mlogl(new_vxvv,vxvv,vxvv_err,pot,radec,lb,
customsky,lb_to_customsky,pmllpmbb_to_customsky,
tmockAA,
ro,vo,obs):
"""The log likelihood for fitting an orbit"""
#Use this _parse_args routine, which does forward and backward integration
iR,ivR,ivT,iz,ivz,iphi= tmockAA._parse_args(True,False,
new_vxvv[0],
new_vxvv[1],
new_vxvv[2],
new_vxvv[3],
new_vxvv[4],
new_vxvv[5])
if radec or lb or customsky:
#Need to transform to (l,b), (ra,dec), or a custom set
#First transform to X,Y,Z,vX,vY,vZ (Galactic)
X,Y,Z = coords.galcencyl_to_XYZ(iR.flatten(),iphi.flatten(),
iz.flatten(),
Xsun=obs[0]/ro,
Ysun=obs[1]/ro,
Zsun=obs[2]/ro)
vX,vY,vZ = coords.galcencyl_to_vxvyvz(ivR.flatten(),ivT.flatten(),
ivz.flatten(),iphi.flatten(),
vsun=nu.array(\
obs[3:6])/vo)
bad_indx= (X == 0.)*(Y == 0.)*(Z == 0.)
if True in bad_indx: X[bad_indx]+= ro/10000.
lbdvrpmllpmbb= coords.rectgal_to_sphergal(X*ro,Y*ro,Z*ro,
vX*vo,vY*vo,vZ*vo,
degree=True)
if lb:
orb_vxvv= nu.array([lbdvrpmllpmbb[:,0],
lbdvrpmllpmbb[:,1],
lbdvrpmllpmbb[:,2],
lbdvrpmllpmbb[:,4],
lbdvrpmllpmbb[:,5],
lbdvrpmllpmbb[:,3]]).T
elif radec:
#Further transform to ra,dec,pmra,pmdec
radec= coords.lb_to_radec(lbdvrpmllpmbb[:,0],
lbdvrpmllpmbb[:,1],degree=True)
pmrapmdec= coords.pmllpmbb_to_pmrapmdec(lbdvrpmllpmbb[:,4],
lbdvrpmllpmbb[:,5],
lbdvrpmllpmbb[:,0],
lbdvrpmllpmbb[:,1],
degree=True)
orb_vxvv= nu.array([radec[:,0],radec[:,1],
lbdvrpmllpmbb[:,2],
pmrapmdec[:,0],pmrapmdec[:,1],
lbdvrpmllpmbb[:,3]]).T
elif customsky:
#Further transform to ra,dec,pmra,pmdec
customradec= lb_to_customsky(lbdvrpmllpmbb[:,0],
lbdvrpmllpmbb[:,1],degree=True)
custompmrapmdec= pmllpmbb_to_customsky(lbdvrpmllpmbb[:,4],
lbdvrpmllpmbb[:,5],
lbdvrpmllpmbb[:,0],
lbdvrpmllpmbb[:,1],
degree=True)
orb_vxvv= nu.array([customradec[:,0],customradec[:,1],
lbdvrpmllpmbb[:,2],
custompmrapmdec[:,0],custompmrapmdec[:,1],
lbdvrpmllpmbb[:,3]]).T
else:
#shape=(2tintJ-1,6)
orb_vxvv= nu.array([iR.flatten(),ivR.flatten(),ivT.flatten(),
iz.flatten(),ivz.flatten(),iphi.flatten()]).T
out= 0.
for ii in range(vxvv.shape[0]):
sub_vxvv= (orb_vxvv-vxvv[ii,:].flatten())**2.
#print(sub_vxvv[nu.argmin(nu.sum(sub_vxvv,axis=1))])
if not vxvv_err is None:
sub_vxvv/= vxvv_err[ii,:]**2.
else:
sub_vxvv/= 0.01**2.
out+= logsumexp(-0.5*nu.sum(sub_vxvv,axis=1))
return -out
|
<reponame>itsmesatwik/pants
"""Install next gen sequencing analysis tools not currently packaged.
"""
import os
from fabric.api import *
from fabric.contrib.files import *
from shared import (_if_not_installed, _make_tmp_dir,
_get_install, _get_install_local, _make_copy, _configure_make,
_java_install,
_symlinked_java_version_dir, _fetch_and_unpack, _python_make)
@_if_not_installed("faToTwoBit")
def install_ucsc_tools(env):
"""Install useful executables from UCSC.
todo: install from source to handle 32bit and get more programs
http://hgdownload.cse.ucsc.edu/admin/jksrc.zip
"""
tools = ["liftOver", "faToTwoBit", "bedToBigBed",
"bigBedInfo", "bigBedSummary", "bigBedToBed",
"bigWigInfo", "bigWigSummary", "bigWigToBedGraph", "bigWigToWig",
"fetchChromSizes", "wigToBigWig", "faSize", "twoBitInfo",
"faCount"]
url = "http://hgdownload.cse.ucsc.edu/admin/exe/linux.x86_64/"
install_dir = os.path.join(env.system_install, "bin")
for tool in tools:
with cd(install_dir):
if not exists(tool):
env.safe_sudo("wget %s%s" % (url, tool))
env.safe_sudo("chmod a+rwx %s" % tool)
# --- Alignment tools
@_if_not_installed("bowtie")
def install_bowtie(env):
"""Install the bowtie short read aligner.
"""
version = "0.12.7"
url = "http://downloads.sourceforge.net/project/bowtie-bio/bowtie/%s/" \
"bowtie-%s-src.zip" % (version, version)
_get_install(url, env, _make_copy("find -perm -100 -name 'bowtie*'"))
@_if_not_installed("bwa")
def install_bwa(env):
version = "0.5.9"
url = "http://downloads.sourceforge.net/project/bio-bwa/bwa-%s.tar.bz2" % (
version)
def _fix_makefile():
arch = run("uname -m")
# if not 64bit, remove the appropriate flag
if arch.find("x86_64") == -1:
run("sed -i.bak -r -e 's/-O2 -m64/-O2/g' Makefile")
_get_install(url, env, _make_copy("ls -1 bwa solid2fastq.pl qualfa2fq.pl",
_fix_makefile))
@_if_not_installed("bfast")
def install_bfast(env):
version = "0.6.4"
vext = "e"
url = "http://downloads.sourceforge.net/project/bfast/bfast/%s/bfast-%s%s.tar.gz"\
% (version, version, vext)
_get_install(url, env, _configure_make)
@_if_not_installed("perm")
def install_perm(env):
version = "0.3.3"
url = "http://perm.googlecode.com/files/PerM%sSource.zip" % version
def gcc44_makefile_patch():
gcc_cmd = "g++44"
with settings(hide('warnings', 'running', 'stdout', 'stderr'),
warn_only=True):
result = run("%s -v" % gcc_cmd)
print result.return_code
if result.return_code == 0:
sed("makefile", "g\+\+", gcc_cmd)
_get_install(url, env, _make_copy("ls -1 perm", gcc44_makefile_patch))
@_if_not_installed("gmap")
def install_gmap(env):
version = "2010-07-27"
url = "http://research-pub.gene.com/gmap/src/gmap-gsnap-%s.tar.gz" % version
_get_install(url, env, _configure_make)
def _wget_with_cookies(ref_url, dl_url):
run("wget --cookies=on --keep-session-cookies --save-cookies=cookie.txt %s"
% (ref_url))
run("wget --referer=%s --cookies=on --load-cookies=cookie.txt "
"--keep-session-cookies --save-cookies=cookie.txt %s" %
(ref_url, dl_url))
@_if_not_installed("novoalign")
def install_novoalign(env):
base_version = "V2.07.09"
cs_version = "V1.01.09"
_url = "http://www.novocraft.com/downloads/%s/" % base_version
ref_url = "http://www.novocraft.com/main/downloadpage.php"
base_url = "%s/novocraft%s.gcc.tar.gz" % (_url, base_version)
cs_url = "%s/novoalignCS%s.gcc.tar.gz" % (_url, cs_version)
install_dir = os.path.join(env.system_install, "bin")
with _make_tmp_dir() as work_dir:
with cd(work_dir):
_wget_with_cookies(ref_url, base_url)
run("tar -xzvpf novocraft%s.gcc.tar.gz" % base_version)
with cd("novocraft"):
for fname in ["isnovoindex", "novo2maq", "novo2paf",
"novo2sam.pl", "novoalign", "novobarcode",
"novoindex", "novope2bed.pl", "novorun.pl",
"novoutil"]:
env.safe_sudo("mv %s %s" % (fname, install_dir))
with _make_tmp_dir() as work_dir:
with cd(work_dir):
_wget_with_cookies(ref_url, cs_url)
run("tar -xzvpf novoalignCS%s.gcc.tar.gz" % cs_version)
with cd("novoalignCS"):
for fname in ["novoalignCS"]:
env.safe_sudo("mv %s %s" % (fname, install_dir))
@_if_not_installed("lastz")
def install_lastz(env):
version = "1.02.00"
url = "http://www.bx.psu.edu/miller_lab/dist/" \
"lastz-%s.tar.gz" % version
_get_install(url, env, _make_copy("find -perm -100 -name 'lastz'"))
@_if_not_installed("MosaikAligner")
def install_mosaik(env):
repository = "git clone git://github.com/wanpinglee/MOSAIK.git"
def _chdir_src(work_cmd):
def do_work(env):
with cd("src"):
work_cmd(env)
return do_work
_get_install(repository, env, _chdir_src(_make_copy("ls -1 ../bin/*")))
# --- Utilities
@_if_not_installed("samtools")
def install_samtools(env):
version = "0.1.17"
url = "http://downloads.sourceforge.net/project/samtools/samtools/" \
"%s/samtools-%s.tar.bz2" % (version, version)
_get_install(url, env, _make_copy("find -perm -100 -type f"))
@_if_not_installed("fastq_quality_boxplot_graph.sh")
def install_fastx_toolkit(env):
version = "0.0.13"
gtext_version = "0.6"
url_base = "http://hannonlab.cshl.edu/fastx_toolkit/"
fastx_url = "%sfastx_toolkit-%s.tar.bz2" % (url_base, version)
gtext_url = "%slibgtextutils-%s.tar.bz2" % (url_base, gtext_version)
def _remove_werror(env):
sed("configure", " -Werror", "")
_get_install(gtext_url, env, _configure_make, post_unpack_fn=_remove_werror)
_get_install(fastx_url, env, _configure_make, post_unpack_fn=_remove_werror)
@_if_not_installed("SolexaQA.pl")
def install_solexaqa(env):
version = "1.4"
url = "http://downloads.sourceforge.net/project/solexaqa/src/" \
"SolexaQA_v.%s.pl.zip" % version
with _make_tmp_dir() as work_dir:
with cd(work_dir):
run("wget %s" % url)
run("unzip %s" % os.path.basename(url))
env.safe_sudo("mv SolexaQA.pl %s" % os.path.join(env.system_install, "bin"))
@_if_not_installed("fastqc")
def install_fastqc(env):
version = "0.9.1"
url = "http://www.bioinformatics.bbsrc.ac.uk/projects/fastqc/" \
"fastqc_v%s.zip" % version
executable = "fastqc"
install_dir = _symlinked_java_version_dir("fastqc", version, env)
if install_dir:
with _make_tmp_dir() as work_dir:
with cd(work_dir):
run("wget %s" % (url))
run("unzip %s" % os.path.basename(url))
with cd("FastQC"):
env.safe_sudo("chmod a+rwx %s" % executable)
env.safe_sudo("mv * %s" % install_dir)
env.safe_sudo("ln -s %s/%s %s/bin/%s" % (install_dir, executable,
env.system_install, executable))
@_if_not_installed("intersectBed")
def install_bedtools(env):
repository = "git clone git://github.com/arq5x/bedtools.git"
_get_install(repository, env, _make_copy("ls -1 bin/*"))
@_if_not_installed("sabre")
def install_sabre(env):
repo = "git clone git://github.com/najoshi/sabre.git"
_get_install(repo, env, _make_copy("find -perm -100 -name 'sabre*'"))
_shrec_run = """
#!/usr/bin/perl
use warnings;
use strict;
use FindBin qw($RealBin);
use Getopt::Long;
my @java_args;
my @args;
foreach (@ARGV) {
if (/^\-X/) {push @java_args,$_;}
else {push @args,$_;}}
system("java -cp $RealBin @java_args Shrec @args");
"""
@_if_not_installed("shrec")
def install_shrec(env):
version = "2.2"
url = "http://downloads.sourceforge.net/project/shrec-ec/SHREC%%20%s/bin.zip" % version
install_dir = _symlinked_java_version_dir("shrec", version, env)
if install_dir:
shrec_script = "%s/shrec" % install_dir
with _make_tmp_dir() as work_dir:
with cd(work_dir):
run("wget %s" % (url))
run("unzip %s" % os.path.basename(url))
env.safe_sudo("mv *.class %s" % install_dir)
for line in _shrec_run.split("\n"):
if line.strip():
append(shrec_script, line, use_sudo=env.use_sudo)
env.safe_sudo("chmod a+rwx %s" % shrec_script)
env.safe_sudo("ln -s %s %s/bin/shrec" % (shrec_script, env.system_install))
# -- Analysis
def install_picard(env):
version = "1.52"
url = "http://downloads.sourceforge.net/project/picard/" \
"picard-tools/%s/picard-tools-%s.zip" % (version, version)
_java_install("picard", version, url, env)
def install_gatk(env):
version = "1.1-35-ge253f6f"
ext = ".tar.bz2"
url = "ftp://ftp.broadinstitute.org/pub/gsa/GenomeAnalysisTK/"\
"GenomeAnalysisTK-%s%s" % (version, ext)
_java_install("gatk", version, url, env)
def install_gatk_queue(env):
version = "1.0.4052"
ext = ".tar.bz2"
url = "ftp://ftp.broadinstitute.org/pub/gsa/Queue/"\
"Queue-%s%s" % (version, ext)
_java_install("gatk_queue", version, url, env)
def install_snpeff(env):
version = "1_9_5"
genomes = ["hg37.61", "mm37.61"]
url = "http://downloads.sourceforge.net/project/snpeff/" \
"snpEff_v%s_core.zip" % version
genome_url_base = "http://downloads.sourceforge.net/project/snpeff/"\
"databases/v%s/snpEff_v%s_%s.zip"
install_dir = _symlinked_java_version_dir("snpeff", version, env)
if install_dir:
with _make_tmp_dir() as work_dir:
with cd(work_dir):
dir_name = _fetch_and_unpack(url)
with cd(dir_name):
env.safe_sudo("mv *.jar %s" % install_dir)
run("sed -i.bak -r -e 's/data_dir = \.\/data\//data_dir = %s\/data/' %s" %
(install_dir.replace("/", "\/"), "snpEff.config"))
run("chmod a+r *.config")
env.safe_sudo("mv *.config %s" % install_dir)
data_dir = os.path.join(install_dir, "data")
env.safe_sudo("mkdir %s" % data_dir)
for org in genomes:
if not exists(os.path.join(data_dir, org)):
gurl = genome_url_base % (version, version, org)
_fetch_and_unpack(gurl, need_dir=False)
env.safe_sudo("mv data/%s %s" % (org, data_dir))
@_if_not_installed("freebayes")
def install_freebayes(env):
repository = "git clone --recursive git://github.com/ekg/freebayes.git"
_get_install(repository, env, _make_copy("ls -1 bin/*"))
def _install_samtools_libs(env):
repository = "svn co --non-interactive " \
"https://samtools.svn.sourceforge.net/svnroot/samtools/trunk/samtools"
def _samtools_lib_install(env):
lib_dir = os.path.join(env.system_install, "lib")
include_dir = os.path.join(env.system_install, "include", "bam")
run("make")
env.safe_sudo("mv -f libbam* %s" % lib_dir)
env.safe_sudo("mkdir -p %s" % include_dir)
env.safe_sudo("mv -f *.h %s" % include_dir)
check_dir = os.path.join(env.system_install, "include", "bam")
if not exists(check_dir):
_get_install(repository, env, _samtools_lib_install)
@_if_not_installed("tophat")
def install_tophat(env):
_install_samtools_libs(env)
version = "1.2.0"
def _fixseqan_configure_make(env):
"""Upgrade local copy of SeqAn before compiling to fix errors.
http://seqanswers.com/forums/showthread.php?t=9082
"""
with cd("src/SeqAn-1.1"):
run("wget http://www.seqan.de/uploads/media/Seqan_Release_1.2.zip")
run("rm -rf seqan")
run("unzip Seqan_Release_1.2.zip")
_configure_make(env)
url = "http://tophat.cbcb.umd.edu/downloads/tophat-%s.tar.gz" % version
_get_install(url, env, _fixseqan_configure_make)
@_if_not_installed("cufflinks")
def install_cufflinks(env):
# XXX problems on CentOS with older default version of boost libraries
_install_samtools_libs(env)
version = "1.0.1"
url = "http://cufflinks.cbcb.umd.edu/downloads/cufflinks-%s.tar.gz" % version
_get_install(url, env, _configure_make)
# --- Assembly
@_if_not_installed("ABYSS")
def install_abyss(env):
# XXX check for no sparehash on non-ubuntu systems
version = "1.2.7"
url = "http://www.bcgsc.ca/downloads/abyss/abyss-%s.tar.gz" % version
def _remove_werror(env):
sed("configure", " -Werror", "")
_get_install(url, env, _configure_make, post_unpack_fn=_remove_werror)
def install_transabyss(env):
version = "1.2.0"
url = "http://www.bcgsc.ca/platform/bioinfo/software/trans-abyss/" \
"releases/%s/trans-ABySS-v%s.tar.gz" % (version, version)
_get_install_local(url, env, _make_copy(do_make=False))
@_if_not_installed("velvetg")
def install_velvet(env):
version = "1.0.13"
url = "http://www.ebi.ac.uk/~zerbino/velvet/velvet_%s.tgz" % version
_get_install(url, env, _make_copy("find -perm -100 -name 'velvet*'"))
def install_trinity(env):
version = "03122011"
url = "http://downloads.sourceforge.net/project/trinityrnaseq/" \
"trinityrnaseq-%s.tgz" % version
_get_install_local(url, env, _make_copy())
# --- ChIP-seq
@_if_not_installed("macs14")
def install_macs(env):
version = "1.4.0rc2"
url = "http://macs:<EMAIL>/MACS/src/"\
"MACS-%s.tar.gz" % version
_get_install(url, env, _python_make)
|
<reponame>Reiningecho90/Raspberry-Pi-0W-Rocket-Project<filename>Launch.py
# Imports
from datetime import datetime
import smbus
import math
import time
import sys
import pandas as pd
import RPi.GPIO as GPIO
# GPIO initialization
GPIO.setmode(GPIO.BOARD)
GPIO.setup(18, GPIO.OUT)
pwm = GPIO.PWM(18, 100)
pwm.start(0)
# Register
power_mgmt_1 = 0x6b
power_mgmt_2 = 0x6c
def save_to_file():
angles = (int(read_word_2c(0x3b)) / 131,
int(read_word_2c(0x3d)) / 131,)
file = open('MPUData.csv', 'a')
for i in angles:
file.write(str(angles[0]) + ', ' + str(angles[1]) + '\n')
def read_byte(reg):
return bus.read_byte_data(address, reg)
def read_word(reg):
h = bus.read_byte_data(address, reg)
l = bus.read_byte_data(address, reg+1)
value = (h << 8) + l
return value
def read_word_2c(reg):
val = read_word(reg)
if (val >= 0x8000):
return -((65535 - val) + 1)
else:
return val
def dist(a,b):
return math.sqrt((a*a)+(b*b))
def get_y_rotation(x,y,z):
radians = math.atan2(x, dist(y,z))
def get_x_rotation(x,y,z):
radians = math.atan2(y, dist(x,z))
return math.degrees(radians)
bus = smbus.SMBus(1)
address = 0x68
bus.write_byte_data(address, power_mgmt_1, 0)
# Misc value definitions
count = 0
status = True
# Countdown timer
time.sleep(600)
# Main launch loop
while status:
deploy = False
spike = False
spike_t = 0
dep_time = 0
#deployment auto-sequence
if count == 168:
GPIO.output(18, True)
pwm.ChangeDutyCycle(5)
time.sleep(1)
pwm.ChangeDutyCycle(0)
GPIO.output(18, False)
dep_time = str(datetime.now())
a_x = read_word_2c(0x3b)
a_y = read_word_2c(0x3d)
a_z = read_word_2c(0x3f)
# Data to screen output
print ("\033c")
print ("Accel")
print ("------")
print ("\n")
sys.stdout.write(f"Scaled X: {a_x / 16348}")
if a_x / 16348 > 1.2:
spike = True
spike_t = str(datetime.now())
sys.stdout.flush()
print ("\n")
sys.stdout.write(f"Scaled Y: {a_y / 16348}")
if a_y / 16348 > 1.2:
spike = True
spike_t = str(datetime.now())
sys.stdout.flush()
print ("\n")
sys.stdout.write(f"Scaled Z: {a_z / 16348}")
if a_z /16348 > 1.2:
spike = True
spike_t = str(datetime.now())
sys.stdout.flush()
fail = False
# Data-spike output/failsafe
if spike_t != dep_time:
fail = True
deploy = False
elif spike_t == dep_time and count > 50:
print ("\n")
sys.stdout.write("MPU6050 Data Read: MPU HAS CONFIRMATION OF NOMINAL PARACHUTE DEPLOY")
sys.stdout.flush()
deploy = True
print ("\n")
print (count)
if not deploy and fail:
sys.stdout.write("MPU6050 Data Read: CURRENT MPU DATA HAS SHOWN THAT PARAHUTE DEPLOY SEQUENCE MAY HAVE BEEN ANOMINAL!")
sys.stdout.flush()
save_to_file()
time.sleep(0.1)
count = count+1
# TD confirmation
if not spike and count > 168:
continue
elif spike and count > 168:
sys.stdout.write("\n")
sys.stdout.flush()
sys.stdout.write("\n")
sys.stdout.flush()
sys.stdout.write("Tango Delta, touchdown confirmed.")
sys.stdout.flush()
sys.stdout.write("\n")
sys.stdout.flush()
sys.stdout.write("Switching to ground control systems and preparing for data review.")
sys.stdout.flush()
status = False
elif spike and count > 300:
sys.stdout.write("Tango Delta anominal, touchdown timing failure.")
sys.stdout.flush()
status = False
else:
continue
status = True
time.sleep(5)
# Data review and shutdown
while status:
print ("\n")
print ("Preparing data review systems, stand by for post-flight review.")
data_read = pd.read_csv('MPUData.csv', sep=', ', header=None, engine='python')
time.sleep(10)
for value in data_read:
if abs(value) > 5:
sys.stdout.write("\n")
sys.stdout.flush()
sys.stdiut.write("\n")
sys.stdout.flush()
sys.stdout.write(f"Anamoly found, G-Force value exceded nominal forces, force was equal to: {value} at point of anamoly, note that anomaly may have occured at parachute deploy but G limit still applies for deploy.")
sys.stdout.flush()
else:
sys.stdout.write("\n")
sys.stdout.flush()
sys.stdout.write("No anamolies found, craft safe on the ground, proceeding to post-flight calibration.")
sys.stdout.flush()
GPIO.output(18, True)
pwm.ChangeDutyCycle(0)
sys.stdout.write("Post flight calibration done, exiting program...")
sys.stdout.flush()
status = False
|
<filename>openrave/docs/breathe/__init__.py
from docutils import nodes
from docutils.parsers.rst.directives import unchanged_required
import os
import sys
import copy
from docutils.parsers import rst
from breathe.builder import RstBuilder, BuilderFactory
from breathe.finder import FinderFactory, NoMatchesError, MultipleMatchesError
from breathe.parser import DoxygenParserFactory, DoxygenIndexParser
from breathe.renderer.rst.doxygen import DoxygenToRstRendererFactoryCreator
from breathe.finder.doxygen import DoxygenItemFinderFactoryCreator, ItemMatcherFactory
import docutils.nodes
import sphinx.addnodes
class BaseDirective(rst.Directive):
def __init__(self, builder_factory, finder_factory, matcher_factory, project_info_factory, *args):
rst.Directive.__init__(self, *args)
self.builder_factory = builder_factory
self.finder_factory = finder_factory
self.matcher_factory = matcher_factory
self.project_info_factory = project_info_factory
# Directives
# ----------
class DoxygenIndexDirective(BaseDirective):
required_arguments = 0
optional_arguments = 2
option_spec = {
"path" : unchanged_required,
"project" : unchanged_required,
}
has_content = False
def run(self):
project_info = self.project_info_factory.create_project_info(self.options)
finder = self.finder_factory.create_finder(project_info)
# try:
data_object = finder.root()
# except
builder = self.builder_factory.create_builder(project_info, self.state.document)
nodes = builder.build(data_object)
return nodes
class DoxygenFunctionDirective(BaseDirective):
required_arguments = 1
optional_arguments = 1
option_spec = {
"path" : unchanged_required,
"project" : unchanged_required,
}
has_content = False
def run(self):
function_name = self.arguments[0]
project_info = self.project_info_factory.create_project_info(self.options)
finder = self.finder_factory.create_finder(project_info)
matcher = self.matcher_factory.create_name_type_matcher(function_name, "function")
try:
data_object = finder.find_one(matcher)
except NoMatchesError, e:
warning = 'doxygenfunction: Cannot find function "%s" in doxygen xml output' % function_name
return [ docutils.nodes.warning( "", docutils.nodes.paragraph("", "", docutils.nodes.Text(warning))),
self.state.document.reporter.warning( warning, line=self.lineno) ]
builder = self.builder_factory.create_builder(project_info, self.state.document)
nodes = builder.build(data_object)
return nodes
class DoxygenStructDirective(BaseDirective):
kind = "struct"
required_arguments = 1
optional_arguments = 1
option_spec = {
"path" : unchanged_required,
"project" : unchanged_required,
}
has_content = False
def run(self):
struct_name = self.arguments[0]
project_info = self.project_info_factory.create_project_info(self.options)
finder = self.finder_factory.create_finder(project_info)
# try:
matcher = self.matcher_factory.create_name_type_matcher(struct_name, self.kind)
try:
data_object = finder.find_one(matcher)
except NoMatchesError, e:
warning = 'doxygen%s: Cannot find %s "%s" in doxygen xml output' % (self.kind, self.kind, struct_name)
return [ docutils.nodes.warning( "", docutils.nodes.paragraph("", "", docutils.nodes.Text(warning))),
self.state.document.reporter.warning( warning, line=self.lineno) ]
builder = self.builder_factory.create_builder(project_info, self.state.document)
nodes = builder.build(data_object)
return nodes
class DoxygenClassDirective(DoxygenStructDirective):
kind = "class"
class DoxygenEnumDirective(DoxygenStructDirective):
kind = "enum"
class DoxygenTypedefDirective(DoxygenStructDirective):
kind = "typedef"
# Setup Administration
# --------------------
class DirectiveContainer(object):
def __init__(self, directive, builder, finder_factory, matcher_factory, project_info_factory):
self.directive = directive
self.builder = builder
self.finder_factory = finder_factory
self.matcher_factory = matcher_factory
self.project_info_factory = project_info_factory
# Required for sphinx to inspect
self.required_arguments = directive.required_arguments
self.optional_arguments = directive.optional_arguments
self.option_spec = directive.option_spec
self.has_content = directive.has_content
def __call__(self, *args):
return self.directive(self.builder, self.finder_factory, self.matcher_factory, self.project_info_factory, *args)
class ProjectInfo(object):
def __init__(self, name, path):
self._name = name
self._path = path
def name(self):
return self._name
def path(self):
return self._path
class ProjectInfoFactory(object):
def __init__(self):
self.projects = {}
self.default_project = None
self.project_count = 0
self.project_info_store = {}
def update(self, projects, default_project):
self.projects = projects
self.default_project = default_project
def default_path(self):
return self.projects[self.default_project]
def create_project_info(self, options):
name = ""
path = self.default_path()
if options.has_key("project"):
try:
path = self.projects[ options["project"] ]
name = options["project"]
except KeyError, e:
sys.stderr.write(
"Unable to find project '%s' in breathe_projects dictionary" % options["project"]
)
if options.has_key("path"):
path = options["path"]
try:
return self.project_info_store[path]
except KeyError:
if not name:
name = "project%s" % self.project_count
self.project_count += 1
project_info = ProjectInfo(name, path)
self.project_info_store[path] = project_info
return project_info
class DoxygenDirectiveFactory(object):
directives = {
"doxygenindex" : DoxygenIndexDirective,
"doxygenfunction" : DoxygenFunctionDirective,
"doxygenstruct" : DoxygenStructDirective,
"doxygenclass" : DoxygenClassDirective,
"doxygenenum" : DoxygenEnumDirective,
"doxygentypedef" : DoxygenTypedefDirective,
}
def __init__(self, builder_factory, finder_factory, matcher_factory, project_info_factory):
self.builder_factory = builder_factory
self.finder_factory = finder_factory
self.matcher_factory = matcher_factory
self.project_info_factory = project_info_factory
def create_index_directive_container(self):
return self.create_directive_container("doxygenindex")
def create_function_directive_container(self):
return self.create_directive_container("doxygenfunction")
def create_struct_directive_container(self):
return self.create_directive_container("doxygenstruct")
def create_enum_directive_container(self):
return self.create_directive_container("doxygenenum")
def create_typedef_directive_container(self):
return self.create_directive_container("doxygentypedef")
def create_class_directive_container(self):
return self.create_directive_container("doxygenclass")
def create_directive_container(self, type_):
return DirectiveContainer(
self.directives[type_],
self.builder_factory,
self.finder_factory,
self.matcher_factory,
self.project_info_factory
)
def get_config_values(self, app):
# All DirectiveContainers maintain references to this project info factory
# so we can update this to update them
self.project_info_factory.update(
app.config.breathe_projects,
app.config.breathe_default_project
)
class NodeFactory(object):
def __init__(self, *args):
self.sources = args
def __getattr__(self, node_name):
for source in self.sources:
try:
return getattr(source, node_name)
except AttributeError:
pass
raise NodeNotFoundError(node_name)
# Setup
# -----
def setup(app):
parser_factory = DoxygenParserFactory()
matcher_factory = ItemMatcherFactory()
item_finder_factory_creator = DoxygenItemFinderFactoryCreator(parser_factory, matcher_factory)
index_parser = DoxygenIndexParser()
finder_factory = FinderFactory(index_parser, item_finder_factory_creator)
node_factory = NodeFactory(docutils.nodes, sphinx.addnodes)
renderer_factory_creator = DoxygenToRstRendererFactoryCreator(node_factory, parser_factory)
builder_factory = BuilderFactory(RstBuilder, renderer_factory_creator)
project_info_factory = ProjectInfoFactory()
directive_factory = DoxygenDirectiveFactory(builder_factory, finder_factory, matcher_factory, project_info_factory)
app.add_directive(
"doxygenindex",
directive_factory.create_index_directive_container(),
)
app.add_directive(
"doxygenfunction",
directive_factory.create_function_directive_container(),
)
app.add_directive(
"doxygenstruct",
directive_factory.create_struct_directive_container(),
)
app.add_directive(
"doxygenenum",
directive_factory.create_enum_directive_container(),
)
app.add_directive(
"doxygentypedef",
directive_factory.create_typedef_directive_container(),
)
app.add_directive(
"doxygenclass",
directive_factory.create_class_directive_container(),
)
app.add_config_value("breathe_projects", {}, True)
app.add_config_value("breathe_default_project", "", True)
app.connect("builder-inited", directive_factory.get_config_values)
|
# ____ ____
# / /\/ /
# /___/ \ / Copyright (c) 2021, Xilinx®.
# \ \ \/ Author: <NAME> <<EMAIL>>
# \ \
# / /
# /___/ /\
# \ \ / \
# \___\/\___\
#
# Licensed under the Apache License, Version 2.0
#
import os
import sys
from colcon_core.plugin_system import satisfies_version
from colcon_acceleration.subverb import (
AccelerationSubverbExtensionPoint,
get_vitis_dir,
get_build_dir,
run,
get_vivado_dir,
get_vitis_hls_dir,
get_platform_dir,
)
from colcon_acceleration.verb import yellow, red
class VppSubverb(AccelerationSubverbExtensionPoint):
"""Vitis v++ compiler wrapper.
TODO: Document build process with v++. Document environmental variables
NOTE 1: hardcoded build directory path
TODO: REMOVE
- compile: colcon vitis v++ "-c -t sw_emu --config ../../test/src/zcu102.cfg -k vadd -I../../test/src ../../test/src/vadd.cpp -o vadd.xo"
- link: colcon vitis v++ "-l -t sw_emu --config ../../test/src/zcu102.cfg ./vadd.xo -o vadd.xclbin"
"""
def __init__(self): # noqa: D107
super().__init__()
satisfies_version(
AccelerationSubverbExtensionPoint.EXTENSION_POINT_VERSION, "^1.0"
)
def add_arguments(self, *, parser): # noqa: D102
parser.description += (
"\n\n"
"The Vitis compiler is a standalone command line utility for both compiling "
"kernel accelerator functions into Xilinx object (.xo) files, and linking "
"them with other .xo files and supported platforms to build an FPGA binary. \n"
)
argument = parser.add_argument(
"args",
nargs="?",
help='v++ compiler arguments provided as a String ("example arguments"). ',
)
# try:
# from argcomplete.completers import ChoicesCompleter
# except ImportError:
# pass
# else:
# options = []
# argument.completer = ChoicesCompleter(options)
def main(self, *, context): # noqa: D102
vitis_dir = get_vitis_dir()
build_dir = get_build_dir()
# create the "build/v++"" directory (if it doesn't exist already)
# NOTE 1: hardcoded
vpp_dir = build_dir + "/v++"
cmd = "mkdir -p " + vpp_dir
outs, errs = run(cmd, shell=True, timeout=20)
if errs:
red(
"Something went wrong while creating the build/v++ directory.\n"
+ "Review the output: "
+ errs
)
sys.exit(1)
# conform a command like, start including variables:
# XILINX_VIVADO=<path-to-ros2-ws>/xilinx/vivado PATH=<path-to-ros2-ws>/xilinx/vitis_hls/bin:$PATH
# <path-to-ros2-ws>/xilinx/vitis/bin/v++
#
cmd = ""
cmd += "cd " + vpp_dir + " && " # head to build dir
cmd += " PLATFORM_REPO_PATHS=" + get_platform_dir() # add target device dir
cmd += " XILINX_VIVADO=" + get_vivado_dir() # add Vivado dir
cmd += " XILINX_VITIS=" + get_vitis_dir() # add Vitis dir
cmd += " XILINX_HLS=" + get_vitis_hls_dir() # add Vitis HLS dir
cmd += " PATH=$PATH:" + get_vitis_hls_dir() + "/bin" # add HLS bin to path
cmd += " " + get_vitis_dir() + "/bin/v++ " # full path of v++ compiler
# add args
if context.args.args:
cmd += context.args.args
else:
cmd += "--help"
yellow(cmd)
os.system(cmd)
|
import os
import json
import time
import torch
from nas_201_api import NASBench201API as API
from xautodl.models import get_cell_based_tiny_net
from fvcore.nn import FlopCountAnalysis, parameter_count
from matrix_transform import build_matrix
NODE_TYPE_DICT = {
"none": 0,
"skip_connect": 1,
"nor_conv_1x1": 2,
"nor_conv_3x3": 3,
"avg_pool_3x3": 4
}
def main(api):
dataset = {}
for index, arch_str in enumerate(api):
arch_dict = {}
matrix = build_matrix(arch_str)
arch_dict['cell_adjacency'] = matrix
cifar10_valid_dict = api.get_more_info(index, 'cifar10-valid', 199, hp='200', is_random=False)
cifar10_dict = api.get_more_info(index, 'cifar10', 199, hp='200', is_random=False)
cifar10_val_acc = cifar10_valid_dict['valid-accuracy']
cifar10_test_acc = cifar10_dict['test-accuracy']
# print(cifar10_val_acc)
# print(cifar10_test_acc)
arch_dict['cifar10_val_acc'] = cifar10_val_acc
arch_dict['cifar10_test_acc'] = cifar10_test_acc
cifar100_dict = api.get_more_info(index, 'cifar100', 199, hp='200', is_random=False)
cifar100_val_acc = cifar100_dict['valid-accuracy']
cifar100_test_acc = cifar100_dict['test-accuracy']
# print(cifar100_dict_val_acc)
# print(cifar100_dict_test_acc)
arch_dict['cifar100_val_acc'] = cifar100_val_acc
arch_dict['cifar100_test_acc'] = cifar100_test_acc
imagenat16_dict = api.get_more_info(index, 'ImageNet16-120', 199, hp='200', is_random=False)
imagenat16_val_acc = imagenat16_dict['valid-accuracy']
imagenat16_test_acc = imagenat16_dict['test-accuracy']
# print(imagenat16_val_acc)
# print(imagenat16_test_acc)
arch_dict['imagenet16_val_acc'] = imagenat16_val_acc
arch_dict['imagenet16_test_acc'] = imagenat16_test_acc
info = api.query_meta_info_by_index(index, '200')
cifar10_cost_metrics = info.get_compute_costs('cifar10-valid')
cifar10_flops = cifar10_cost_metrics['flops']
cifar10_params = cifar10_cost_metrics['params']
cifar10_latency = cifar10_cost_metrics['latency']
# print(cifar10_flops, cifar10_params, cifar10_latency)
# arch_dict['cifar10_flops'] = cifar10_flops
# arch_dict['cifar10_params'] = cifar10_params
arch_dict['cifar10_latency'] = cifar10_latency
cifar100_cost_metrics = info.get_compute_costs('cifar100')
cifar100_flops = cifar100_cost_metrics['flops']
cifar100_params = cifar100_cost_metrics['params']
cifar100_latency = cifar100_cost_metrics['latency']
# print(cifar100_flops, cifar100_params, cifar100_latency)
# arch_dict['cifar100_flops'] = cifar100_flops
# arch_dict['cifar100_params'] = cifar100_params
arch_dict['cifar100_latency'] = cifar100_latency
image16_cost_metrics = info.get_compute_costs('ImageNet16-120')
image16_flops = image16_cost_metrics['flops']
image16_params = image16_cost_metrics['params']
image16_latency = image16_cost_metrics['latency']
# print(image16_flops, image16_params, image16_latency)
# arch_dict['image16_flops'] = image16_flops
# arch_dict['image16_params'] = image16_params
arch_dict['imagenet16_latency'] = image16_latency
for network_type in ['cifar10-valid', 'cifar100', 'ImageNet16-120']:
total_flops, total_params, opt_flops, opt_params = calculate_cell_opt_flops_params(api, index, network_type)
arch_dict['{}_total_flops'.format(network_type)] = total_flops
arch_dict['{}_total_params'.format(network_type)] = total_params
arch_dict['{}_opt_flops'.format(network_type)] = opt_flops
arch_dict['{}_opt_params'.format(network_type)] = opt_params
arch_dict['arch_str'] = arch_str
dataset[index] = arch_dict
print('***************************No. {} arch***************************'.format(index))
assert len(dataset) == len(api), 'Wrong length of dataset'
return dataset
def calculate_cell_opt_flops_params(api, index=0, network_type='cifar10-valid'):
config = api.get_net_config(index, network_type)
network = get_cell_based_tiny_net(config)
img_sz = None
if 'cifar' in network_type:
img_sz = 32
if 'ImageNet16-120' in network_type:
img_sz = 16
assert img_sz is not None, 'img_sz is None'
inputs = torch.randn(1,3,img_sz, img_sz)
network.eval()
#1 cal total flops and params
flops_obj = FlopCountAnalysis(network, inputs)
total_flops = flops_obj.total()
params_dict = parameter_count(network)
total_params = params_dict['']
#2 extract each opt flops and params in each cell
extract_op = lambda item:[NODE_TYPE_DICT[item[0]],item[1]]
opts = api.str2lists(config['arch_str']) # [(('nor_conv_3x3', 0),), (('nor_conv_3x3', 0), ('avg_pool_3x3', 1)), (('skip_connect', 0), ('nor_conv_3x3', 1), ('skip_connect', 2))]
opts_type = [] # [[3, 0], [3, 0], [4, 1], [1, 0], [3, 1], [1, 2]]
for node_ops in opts:
for op in node_ops:
opts_type.append(extract_op(op))
N = config['N']
cells_idx_list = [i for i in range(N)] + [j+1+N for j in range(N)] + [k+2+2*N for k in range(N)] # [0, 1, 2, 3, 4, 6, 7, 8, 9, 10, 12, 13, 14, 15, 16]
flops_dict = flops_obj.by_module()
opt_flops = {}
opt_params = {}
for cell_idx in cells_idx_list:
cell_opt_flops = []
cell_opt_params = []
for opt_idx in range(len(opts_type)):
key = 'cells.{}.layers.{}.op'.format(cell_idx, opt_idx)
cell_opt_flops.append(int(flops_dict[key]))
cell_opt_params.append(params_dict[key])
opt_flops['cells{}'.format(cell_idx)] = cell_opt_flops
opt_params['cells{}'.format(cell_idx)] = cell_opt_params
return int(total_flops), total_params, opt_flops, opt_params
if __name__ == '__main__':
start = time.time()
api = API('./data/NAS-Bench-201-v1_1-096897.pth', verbose=False)
save_path = './data'
file_name = 'nasbench201_with_edge_flops_and_params.json'
# file_name = 'target.json'
save_file = os.path.join(save_path, file_name)
if os.path.exists(save_file):
os.remove(save_file)
dataset = main(api)
with open(save_file, 'w') as r:
json.dump(dataset, r)
r.close()
print('all ok!!!!!!!!!!!!! using {} seconds'.format(time.time()-start))
|
import concurrent.futures
import logging
import time
import traceback
from pybatfish.exception import BatfishException
from concurrent.futures import TimeoutError
from timeit import default_timer as timer
from RouterConfiguration import router_configurator
import rt_comparator
from GNS3 import gns3_interface
from settings import GNS_RESTART_INTERVAL
from utils import *
def get_delta_commands(routers, prev, new, prev_args=None, new_args=None):
"""
Finds all features which need to change to switch from one test case to the next, and returns corresponding commands
:param routers: Routers which are used in the current test iteration
:param prev: Features which were enabled in the last run
:param new: Features which should be enabled in the new run
:param prev_args: Args used in the last run, if None features are only compared for enabled / disabled
:param new_args: Args to be used in the next run, if None features are only compared for enabled / disabled
:return: Commands to issue to the routers, dict indexed by router name
"""
commands = {router.name: [] for router in routers}
if prev_args is not None and new_args is not None:
disable_features = [f for f in prev if prev_args[f] != new_args[f]]
enable_features = [f for f in new if prev_args[f] != new_args[f]]
else:
enable_features = [f for f in new if f not in prev]
disable_features = [f for f in prev if f not in new]
router_configurator.disable_features(commands, disable_features)
router_configurator.enable_features(commands, enable_features)
return commands
def write_result(path, name, comp, p_err, init_err):
"""
Write the result of a test successfully triggering a discrepancy to disk in the results folder
:param path: Path used to run Metha
:param name: Name of the test case triggering a discrepancy
:param comp: Result of the datacompy comparison of the routing tables
:param p_err: Optional dataframe of parsing errors
:param init_err: Optional dataframe of initialization errors
"""
if p_err is not None and not p_err.empty:
with open(f'{path}../results/{name}_parse_errors.csv', 'w') as f:
f.write(p_err.to_csv(index=False))
if init_err is not None and not init_err.empty:
with open(f'{path}../results/{name}_init_issues.csv', 'w') as f:
f.write(init_err.to_csv(index=False))
with open(f'{path}../results/{name}_report.txt', 'w') as f:
f.write(comp.report())
with open(f'{path}../results/{name}_GNS_only.csv', 'w') as f:
f.write(comp.df2_unq_rows.to_csv(index=False))
with open(f'{path}../results/{name}_SUT_only.csv', 'w') as f:
f.write(comp.df1_unq_rows.to_csv(index=False))
def write_except(path, name):
"""
Append the stacktrace of a test resulting in an exception to an accumulation file on disk
:param path: Path of the file to append the stacktrace to
:param name: Name of the test case which triggered the exception
"""
with open(path, 'a') as f:
f.write(f'{name}:\n')
traceback.print_exc(file=f)
f.write('\n\n')
class TestRunner:
def __init__(self, path, topo, system, router_features=None):
"""
:param path: Subdirectory where all tests and results are saved
:param topo: The topology which is being tested
:param system: The system under test
:param router_features: Features of the different routers in triplet format
"""
self.topo = topo
self.path = path
self.gp = self.set_up_testbed()
self.test_num = 0
self.system = system
self.router_features = router_features
if router_features is not None:
self.last_args = {router_feature: -1 for router_feature in self.router_features}
else:
self.last_args = None
def set_up_testbed(self):
"""
Sets up the GNS3 project for this test run, including initial configuration of the routers
:return: GNS3 project
"""
for router in self.topo.routers:
router.write_config(f'{self.path}base_configs/configs/')
self.topo.to_json(f'{self.path}base_configs/')
gp = gns3_interface.setup_gns_from_topology(f'{self.path}base_configs/topology.json')
gp.start_nodes()
return gp
def restart_gns(self):
"""
Deletes current GNS3 project and creates a new GNS3 project with freshly started routers
"""
self.gp.delete()
self.gp = self.set_up_testbed()
if self.router_features is not None:
self.last_args = {router_feature: -1 for router_feature in self.router_features}
else:
self.last_args = None
for router in self.topo.routers:
router.enabled_features = {}
def configure_routers(self, configs):
"""
Configure the GNS3 routers and write the resulting configs to disk
:param configs: configurations for each router, dict indexed by router name
"""
config_outputs = {}
path = f'{self.path}test{self.test_num}'
with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor:
future_to_node = {
executor.submit(self.gp.nodes[node].send_config, configs[node]): self.gp.nodes[node] for node in
self.gp.nodes
}
for future in concurrent.futures.as_completed(future_to_node):
node = future_to_node[future]
config_outputs[node.name] = future.result()
executor.submit(node.write_config, f'{path}/configs/')
executor.shutdown(wait=True)
with open(f'{path}/configuration_outputs.txt', 'w') as f:
for node in config_outputs:
f.write(f'{node}\n')
if config_outputs[node] is not None:
f.write(config_outputs[node])
f.write('\n' * 3)
def set_router_args(self, args):
"""
Sets the feature arguments on routers
:param args: arguments as dict from (router, feature, arg) triplets to parameter values
"""
router_configurator.set_args_from_translation(args)
def clear_routing_tables(self):
"""
Clears the routing tables of the GNS3 routers
"""
with concurrent.futures.ThreadPoolExecutor(max_workers=8) as executor:
for node in self.gp.nodes:
executor.submit(self.gp.nodes[node].clear_routing_table)
executor.shutdown(wait=True)
time.sleep(1)
def run_test(self, cur_features, cur_args=None):
"""
Run a test case with specified features and args
:param cur_features: Features which should be enabled in this test case
:param cur_args: Arguments for the enabled features, if None the last args are used instead
:return: Result of the test: 0 if comparison ok, 1 for difference, 2 for sut crash, 3 for timeout
"""
logger = logging.getLogger('network-testing')
logger.info(f'Running test case with features {str_repr(cur_features)}')
if self.test_num % GNS_RESTART_INTERVAL == GNS_RESTART_INTERVAL-1:
self.restart_gns()
start = timer()
last_features = [(router, *f) for router in self.topo.routers for f in router.enabled_features]
commands = get_delta_commands(self.topo.routers, last_features, cur_features, self.last_args, cur_args)
self.configure_routers(commands)
self.clear_routing_tables()
try:
(comp, p_err, init_err) = rt_comparator.run_comparison(
f'{self.path}test{self.test_num}/',
self.gp,
self.topo.get_adjacency_info(),
self.system
)
if comp.matches():
res = 0
else:
write_result(self.path, f'test{self.test_num}', comp, p_err, init_err)
res = 1
except BatfishException:
write_except(f'{self.path}../results/crashing_tests.txt', f'test{self.test_num}')
res = 2
except TimeoutError:
write_except(f'{self.path}../results/timed_out_tests.txt', f'test{self.test_num}')
res = 3
end = timer()
with open(f'{self.path}../results/total_runtimes.txt', 'a') as f:
f.write(f'test{self.test_num}: {end - start}\n')
self.test_num += 1
self.last_args = cur_args
return res
|
<filename>example/vedio_scripts/game.py
import pygame as pg
import gym_gvgai as gvg
class Game:
def __init__(self, game, lvl):
self.env = gvg.make('gvgai-' + game + '-' + lvl + '-v0')
self.stateObs = self.env.reset()
size = (len(self.stateObs), len(self.stateObs[0]))
self.transpose = size[0] < size[1]
if self.transpose:
self.size = (size[1]*2, size[0]*2)
else:
self.size = (size[0]*2, size[1]*2)
self.done = False
self.score = 0
self.frame = 0
self.nAction = self.env.action_space.n
def start(self, agent, maxT=1000, printLog=True, visualized=True, fps=10):
if visualized:
clk = pg.time.Clock()
screen = pg.display.set_mode(self.size)
for i in range(maxT):
clk.tick(fps)
for event in pg.event.get():
if event.type == pg.QUIT:
pg.quit()
self.update(agent, printLog)
self.draw(screen)
pg.display.flip()
if self.done:
print('---------------------------\nFinish. Final score = %d' % self.score)
return
else:
for i in range(maxT):
self.update(agent, printLog)
if self.done:
print('---------------------------\nFinish. Final score = %d' % self.score)
return
def humanPlay(self):
print('Use direction keys to move, z key to take other actions(if exist in this game).')
screen = pg.display.set_mode(self.size)
while not self.done:
evt = pg.event.wait()
if evt.type == pg.QUIT:
pg.quit()
self.done = True
elif evt.type == pg.KEYDOWN:
self.playerAct(self.parseKey(evt))
if self.done:
print('---------------------------\nFinish. Final score = %d' % self.score)
return
self.draw(screen)
pg.display.flip()
def parseKey(self, evt):
if evt.key == pg.K_z:
if self.nAction > 5:
return 1
else:
return 0
if evt.key == pg.K_x:
if self.nAction > 6:
return 2
else:
return 0
elif evt.key == pg.K_UP:
return self.nAction-1
elif evt.key == pg.K_DOWN:
return self.nAction-2
elif evt.key == pg.K_RIGHT:
return self.nAction - 3
elif evt.key == pg.K_LEFT:
return self.nAction - 4
else:
return 0
def playerAct(self, actionID):
self.stateObs, reward, self.done, debug = self.env.step(actionID)
self.score += reward
self.frame += 1
print('frame%d, action:%d, reward:%d, score:%d' % (self.frame, actionID, reward, self.score))
def update(self, agent, printLog=True):
action_id = agent.act(self.stateObs, self.env.action_space)
self.stateObs, reward, self.done, debug = self.env.step(action_id)
self.score += reward
self.frame += 1
if printLog:
print('frame%d, action:%d, reward:%d, score:%d' % (self.frame, action_id, reward, self.score))
def draw(self, screen):
buffer = pg.pixelcopy.make_surface(self.stateObs[:, :, :3])
pa = pg.PixelArray(buffer)
if self.transpose:
pa = pa.transpose()
screen.blit(pg.transform.scale(pa.make_surface(), self.size), (0, 0))
|
<reponame>NKI-AI/direct<filename>direct/train.py
# coding=utf-8
# Copyright (c) DIRECT Contributors
import argparse
import functools
import logging
import os
import pathlib
import sys
import urllib.parse
from collections import defaultdict
from typing import Callable, Dict, List, Optional, Union
import numpy as np
import torch
from omegaconf import DictConfig
from direct.cli.utils import check_train_val
from direct.common.subsample import build_masking_function
from direct.data.datasets import build_dataset_from_input
from direct.data.lr_scheduler import WarmupMultiStepLR
from direct.data.mri_transforms import build_mri_transforms
from direct.environment import setup_training_environment
from direct.launch import launch
from direct.types import PathOrString
from direct.utils import remove_keys, set_all_seeds, str_to_class
from direct.utils.dataset import get_filenames_for_datasets_from_config
from direct.utils.io import check_is_valid_url, read_json
logger = logging.getLogger(__name__)
def parse_noise_dict(noise_dict: dict, percentile: float = 1.0, multiplier: float = 1.0):
logger.info("Parsing noise dictionary...")
output: Dict = defaultdict(dict)
for filename in noise_dict:
data_per_volume = noise_dict[filename]
for slice_no in data_per_volume:
curr_data = data_per_volume[slice_no]
if percentile != 1.0:
lower_clip = np.percentile(curr_data, 100 * (1 - percentile))
upper_clip = np.percentile(curr_data, 100 * percentile)
curr_data = np.clip(curr_data, lower_clip, upper_clip)
output[filename][int(slice_no)] = (
curr_data * multiplier
) ** 2 # np.asarray(curr_data) * multiplier# (np.clip(curr_data, lower_clip, upper_clip) * multiplier) ** 2
return output
def get_root_of_file(filename: PathOrString):
"""Get the root directory of the file or URL to file.
Examples
--------
>>> get_root_of_file('/mnt/archive/data.txt')
>>> /mnt/archive
>>> get_root_of_file('https://aiforoncology.nl/people')
>>> https://aiforoncology.nl/
Parameters
----------
filename: pathlib.Path or str
Returns
-------
pathlib.Path or str
"""
if check_is_valid_url(str(filename)):
filename = urllib.parse.urljoin(str(filename), ".")
else:
filename = pathlib.Path(filename).parents[0]
return filename
def build_transforms_from_environment(env, dataset_config: DictConfig) -> Callable:
mri_transforms_func = functools.partial(
build_mri_transforms,
forward_operator=env.engine.forward_operator,
backward_operator=env.engine.backward_operator,
mask_func=build_masking_function(**dataset_config.transforms.masking),
)
return mri_transforms_func(**remove_keys(dataset_config.transforms, "masking")) # type: ignore
def build_training_datasets_from_environment(
env,
datasets_config: List[DictConfig],
lists_root: Optional[PathOrString] = None,
data_root: Optional[PathOrString] = None,
initial_images: Optional[Union[List[pathlib.Path], None]] = None,
initial_kspaces: Optional[Union[List[pathlib.Path], None]] = None,
pass_text_description: bool = True,
pass_dictionaries: Optional[Dict[str, Dict]] = None,
):
datasets = []
for idx, dataset_config in enumerate(datasets_config):
if pass_text_description:
if not "text_description" in dataset_config:
dataset_config.text_description = f"ds{idx}" if len(datasets_config) > 1 else None
else:
dataset_config.text_description = None
transforms = build_transforms_from_environment(env, dataset_config)
dataset_args = {"transforms": transforms, "dataset_config": dataset_config}
if initial_images is not None:
dataset_args.update({"initial_images": initial_images})
if initial_kspaces is not None:
dataset_args.update({"initial_kspaces": initial_kspaces})
if data_root is not None:
dataset_args.update({"data_root": data_root})
filenames_filter = get_filenames_for_datasets_from_config(dataset_config, lists_root, data_root)
dataset_args.update({"filenames_filter": filenames_filter})
if pass_dictionaries is not None:
dataset_args.update({"pass_dictionaries": pass_dictionaries})
dataset = build_dataset_from_input(**dataset_args)
logger.debug("Transforms %s / %s :\n%s", idx + 1, len(datasets_config), transforms)
datasets.append(dataset)
logger.info(
"Data size for %s (%s/%s): %s.",
dataset_config.text_description, # type: ignore
idx + 1,
len(datasets_config),
len(dataset),
)
return datasets
def setup_train(
run_name: str,
training_root: Union[pathlib.Path, None],
validation_root: Union[pathlib.Path, None],
base_directory: pathlib.Path,
cfg_filename: PathOrString,
force_validation: bool,
initialization_checkpoint: PathOrString,
initial_images: Optional[Union[List[pathlib.Path], None]],
initial_kspace: Optional[Union[List[pathlib.Path], None]],
noise: Optional[Union[List[pathlib.Path], None]],
device: str,
num_workers: int,
resume: bool,
machine_rank: int,
mixed_precision: bool,
debug: bool,
):
env = setup_training_environment(
run_name,
base_directory,
cfg_filename,
device,
machine_rank,
mixed_precision,
debug=debug,
)
# Trigger cudnn benchmark and remove the associated cache
torch.backends.cudnn.benchmark = True
torch.cuda.empty_cache()
if initial_kspace is not None and initial_images is not None:
raise ValueError("Cannot both provide initial kspace or initial images.")
# Create training data
training_dataset_args = {"env": env, "datasets_config": env.cfg.training.datasets, "pass_text_description": True}
pass_dictionaries = {}
if noise is not None:
if not env.cfg.physics.use_noise_matrix:
raise ValueError("cfg.physics.use_noise_matrix is null, yet command line passed noise files.")
noise = [read_json(fn) for fn in noise]
pass_dictionaries["loglikelihood_scaling"] = [
parse_noise_dict(_, percentile=0.999, multiplier=env.cfg.physics.noise_matrix_scaling) for _ in noise
]
training_dataset_args.update({"pass_dictionaries": pass_dictionaries})
if training_root is not None:
training_dataset_args.update({"data_root": training_root})
# Get the lists_root. Assume now the given path is with respect to the config file.
lists_root = get_root_of_file(cfg_filename)
if lists_root is not None:
training_dataset_args.update({"lists_root": lists_root})
if initial_images is not None:
training_dataset_args.update({"initial_images": initial_images[0]})
if initial_kspace is not None:
training_dataset_args.update({"initial_kspaces": initial_kspace[0]})
# Build training datasets
training_datasets = build_training_datasets_from_environment(**training_dataset_args)
training_data_sizes = [len(_) for _ in training_datasets]
logger.info("Training data sizes: %s (sum=%s).", training_data_sizes, sum(training_data_sizes))
# Create validation data
if "validation" in env.cfg:
validation_dataset_args = {
"env": env,
"datasets_config": env.cfg.validation.datasets,
"pass_text_description": True,
}
if validation_root is not None:
validation_dataset_args.update({"data_root": validation_root})
lists_root = get_root_of_file(cfg_filename)
if lists_root is not None:
validation_dataset_args.update({"lists_root": lists_root})
if initial_images is not None:
validation_dataset_args.update({"initial_images": initial_images[1]})
if initial_kspace is not None:
validation_dataset_args.update({"initial_kspaces": initial_kspace[1]})
# Build validation datasets
validation_data = build_training_datasets_from_environment(**validation_dataset_args)
else:
logger.info("No validation data.")
validation_data = None
# Create the optimizers
logger.info("Building optimizers.")
optimizer_params = [{"params": env.engine.model.parameters()}]
for curr_model_name in env.engine.models:
# TODO(jt): Can get learning rate from the config per additional model too.
curr_learning_rate = env.cfg.training.lr
logger.info("Adding model parameters of %s with learning rate %s.", curr_model_name, curr_learning_rate)
optimizer_params.append(
{
"params": env.engine.models[curr_model_name].parameters(),
"lr": curr_learning_rate,
}
)
optimizer: torch.optim.Optimizer = str_to_class("torch.optim", env.cfg.training.optimizer)( # noqa
optimizer_params,
lr=env.cfg.training.lr,
weight_decay=env.cfg.training.weight_decay,
) # noqa
# Build the LR scheduler, we use a fixed LR schedule step size, no adaptive training schedule.
solver_steps = list(
range(
env.cfg.training.lr_step_size,
env.cfg.training.num_iterations,
env.cfg.training.lr_step_size,
)
)
lr_scheduler = WarmupMultiStepLR(
optimizer,
solver_steps,
env.cfg.training.lr_gamma,
warmup_factor=1 / 3.0,
warmup_iterations=env.cfg.training.lr_warmup_iter,
warmup_method="linear",
)
# Just to make sure.
torch.cuda.empty_cache()
# Check the initialization checkpoint
if env.cfg.training.model_checkpoint:
if initialization_checkpoint:
logger.warning(
"`--initialization-checkpoint is set, and config has a set `training.model_checkpoint`: %s. "
"Will overwrite config variable with the command line: %s.",
env.cfg.training.model_checkpoint,
initialization_checkpoint,
)
# Now overwrite this in the configuration, so the correct value is dumped.
env.cfg.training.model_checkpoint = str(initialization_checkpoint)
else:
initialization_checkpoint = env.cfg.training.model_checkpoint
env.engine.train(
optimizer,
lr_scheduler,
training_datasets,
env.experiment_dir,
validation_datasets=validation_data,
resume=resume,
initialization=initialization_checkpoint,
start_with_validation=force_validation,
num_workers=num_workers,
)
def train_from_argparse(args: argparse.Namespace):
# This sets MKL threads to 1.
# DataLoader can otherwise bring a lot of difficulties when computing CPU FFTs in the transforms.
torch.set_num_threads(1)
os.environ["OMP_NUM_THREADS"] = "1"
# Disable Tensorboard warnings.
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
if args.initialization_images is not None and args.initialization_kspace is not None:
sys.exit("--initialization-images and --initialization-kspace are mutually exclusive.")
check_train_val(args.initialization_images, "initialization-images")
check_train_val(args.initialization_kspace, "initialization-kspace")
check_train_val(args.noise, "noise")
set_all_seeds(args.seed)
run_name = args.name if args.name is not None else os.path.basename(args.cfg_file)[:-5]
# TODO(jt): Duplicate params
launch(
setup_train,
args.num_machines,
args.num_gpus,
args.machine_rank,
args.dist_url,
run_name,
args.training_root,
args.validation_root,
args.experiment_dir,
args.cfg_file,
args.force_validation,
args.initialization_checkpoint,
args.initialization_images,
args.initialization_kspace,
args.noise,
args.device,
args.num_workers,
args.resume,
args.machine_rank,
args.mixed_precision,
args.debug,
)
|
<reponame>olivier-nexmo/py-nexmo-rent_numbers
############################################################
##### Title: Search Owned Numbers #####
##### Author: <NAME> #####
##### Date: 09 May 2018 #####
##### Updated: #####
##### Compatibility: Python 3 #####
############################################################
import requests
import sys
import math
import os
import time
from requests_toolbelt.utils import dump
############
# SETTINGS #
############
### API KEY ###
params_keys = {
'api_key': os.getenv('nexmo_api_key'),
'api_secret': os.getenv('nexmo_api_secret')
}
### GLOBAL PARAMETERS ###
page_size = 5 # default 10, max 100
pattern = '' # A matching pattern (not required)
search_pattern = '' # Strategy for matching pattern. Expected values: 0 (starts with, default), 1 (anywhere), 2 (ends with). (not required)
moHttpUrl = '' # moHttpUrl: An URL encoded URI to the webhook endpoint that handles inbound messages. Your webhook endpoint must be active before you make this request, Nexmo makes a GET request to your endpoint and checks that it returns a 200 OK response. Set to empty string to clear.
moSmppSysType = '' # moSmppSysType: The associated system type for your SMPP client. For example inbound
# voiceCallbackValue has to be used together with voiceCallbackType parameter.
voiceCallbackType = '' # voiceCallbackType: The voice webhook type. Possible values are sip, tel, or app
voiceCallbackValue = '' # voiceCallbackValue: A SIP URI, telephone number or Application ID
voiceStatusCallback = '' # voiceStatusCallback: A webhook URI for Nexmo to send a request to when a call ends.
params_global = {
'pattern': pattern,
'search_pattern': search_pattern
}
numberList = []
###########
# CODE #
###########
def roundup(x):
return int(math.ceil(x))
def countOwnedNumbers():
params = dict(params_keys.items())
try:
response = requests.get(base_url + action_search, params=params)
virtual_numbers = response.json()
return virtual_numbers['count']
except requests.exceptions.RequestException as e:
print(e)
sys.exit(1)
def listOwnedNumbers(maxPageSize, idx):
new_params = {
'size': maxPageSize,
'index': idx
}
params = dict(params_keys.items() | new_params.items() | params_global.items())
try:
response = requests.get(base_url + action_search, params=params)
#data = dump.dump_all(response)
#print(data.decode('utf-8'))
virtual_numbers = response.json()
for number in virtual_numbers['numbers']:
print("Updating {}".format(number))
# Need to wait a second - 1 request per second with the developer API
time.sleep(1)
updateOwnedNumber(number['country'], number['msisdn'])
except requests.exceptions.RequestException as e:
print(e)
sys.exit(1)
def updateOwnedNumber(country,msisdn):
# Required
# country: The two character country code in ISO 3166-1 alpha-2 format.
# msisdn: An available inbound virtual number. For example, 447700900000.
# Not required
# moHttpUrl: An URL encoded URI to the webhook endpoint that handles inbound messages. Your webhook endpoint must be active before you make this request, Nexmo makes a GET request to your endpoint and checks that it returns a 200 OK response. Set to empty string to clear.
# moSmppSysType: The associated system type for your SMPP client. For example inbound
# voiceCallbackType: The voice webhook type. Possible values are sip, tel, or app
# voiceCallbackValue: A SIP URI, telephone number or Application ID
# voiceStatusCallback: A webhook URI for Nexmo to send a request to when a call ends.
# voiceCallbackValue has to be used together with voiceCallbackType parameter.
new_params = {
'country': country,
'msisdn': msisdn,
'moHttpUrl': moHttpUrl,
'moSmppSysType': moSmppSysType,
}
# can't send empty values for voiceCallbackType and voiceCallbackValue
if voiceCallbackType:
voice_params = {
'voiceCallbackValue': voiceCallbackValue,
'voiceCallbackType': voiceCallbackType,
'voiceStatusCallback': voiceStatusCallback
}
params = dict(params_keys.items() | new_params.items() | voice_params.items())
else:
params = dict(params_keys.items() | new_params.items())
print("Country {}, MSISDN {}".format(country, msisdn))
try:
print(params)
response = requests.post(base_url + action_update, params=params)
#data = dump.dump_all(response)
#print(data.decode('utf-8'))
decoded_response = response.json()
print("{}-{}".format(decoded_response['error-code'], decoded_response['error-code-label']))
except requests.exceptions.RequestException as e:
print(e)
sys.exit(1)
base_url = 'https://rest.nexmo.com'
version = ''
action_search = '/account/numbers'
action_update = '/number/update'
ownedNumbers = countOwnedNumbers()
pagination = roundup(ownedNumbers / page_size)
# Need to wait a second - 1 request per second with the developer API
time.sleep(1)
if (pagination == 1):
listOwnedNumbers(page_size, 1)
else:
for x in range(1, pagination + 1):
# Need to wait a second - 1 request per second with the developer API
time.sleep(1)
listOwnedNumbers(page_size, x)
|
<reponame>topblue/RootTheBox
# -*- coding: utf-8 -*-
'''
Created on Mar 13, 2012
@author: moloch
Copyright 2012 Root the Box
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License
----------------------------------------------------------------------------
This is the main file the defines what URLs get routed to what handlers
'''
import sys
from os import urandom, path, _exit
from modules.Menu import Menu
from modules.Recaptcha import Recaptcha
from modules.CssTheme import CssTheme
from libs.ConsoleColors import *
from libs.Scoreboard import score_bots
from libs.BotManager import BotManager, ping_bots
from libs.GameHistory import GameHistory
from libs.EventManager import EventManager
from libs.ConfigManager import ConfigManager
from tornado import netutil
from tornado.web import Application
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop, PeriodicCallback
from handlers.BotnetHandlers import *
from handlers.UserHandlers import *
from handlers.AdminHandlers import *
from handlers.ErrorHandlers import *
from handlers.PublicHandlers import *
from handlers.MarketHandlers import *
from handlers.UpgradeHandlers import *
from handlers.MissionsHandler import *
from handlers.PastebinHandlers import *
from handlers.ScoreboardHandlers import *
from handlers.FileUploadHandlers import *
from handlers.NotificationHandlers import *
from handlers.StaticFileHandler import StaticFileHandler
### Singletons
io_loop = IOLoop.instance()
config = ConfigManager.instance()
game_history = GameHistory.instance()
### Main URL Configuration
# First get base URLs that all game types will require
urls = [
# Static Handlers - StaticFileHandler.py
(r'/static/(.*\.(jpg|png|css|js|ico|swf|flv|eot|svg|ttf|woff|otf))',
StaticFileHandler, {'path': 'static/'}),
(r'/avatars/(.*\.(png|jpeg|jpg|gif|bmp))',
StaticFileHandler, {'path': 'files/avatars/'}),
# FileUploadHandlers - FileUploadHandlers.py
(r'/user/shares/delete', FileDeleteHandler),
(r'/user/shares/download(.*)', FileDownloadHandler),
(r'/user/share/files', FileUploadHandler),
# PasteBin - PastebinHandlers.py
(r'/user/share/pastebin', PasteHandler),
(r'/user/share/pastebin/create', CreatePasteHandler),
(r'/user/share/pastebin/display', DisplayPasteHandler),
(r'/user/share/pastebin/delete', DeletePasteHandler),
# Mission handlers - MissionHandlers.py
(r'/user/missions', MissionsHandler),
(r'/user/missions/capture',FlagSubmissionHandler),
(r'/user/missions/(flag|buyout)', MissionsHandler),
(r'/user/missions/firstlogin', FirstLoginHandler),
(r'/user/missions/boxes', BoxHandler),
(r'/user/missions/hint', PurchaseHintHandler),
### BOTNET URLS ###
# Bot Handlers - BotHandlers.py
(r'/botnet/connect', BotSocketHandler),
(r'/botnet/climonitor', BotCliMonitorSocketHandler),
(r'/botnet/webmonitor', BotWebMonitorSocketHandler),
(r'/user/bots/download/(windows|linux|monitor)', BotDownloadHandler),
(r'/user/bots/webmonitor', BotWebMonitorHandler),
### BLACK MARKET URLS ###
# This is only relevent if the black market is enabled
(r'/scoreboard/wall_of_sheep', ScoreboardWallOfSheepHandler),
# Market handlers - MarketHandlers.py
(r'/user/market', MarketViewHandler),
(r'/user/market/details', MarketDetailsHandler),
# Upgrade handlers - UpgradeHandlers.py
(r'/password_security', PasswordSecurityHandler),
(r'/federal_reserve', FederalReserveHandler),
(r'/federal_reserve/json/(.*)', FederalReserveAjaxHandler),
(r'/source_code_market', SourceCodeMarketHandler),
(r'/source_code_market/download', SourceCodeMarketDownloadHandler),
(r'/swat', SwatHandler),
# User handlers - UserHandlers.py
(r'/user', HomeHandler),
(r'/user/settings', SettingsHandler),
(r'/user/settings/(.*)', SettingsHandler),
(r'/logout', LogoutHandler),
# Admin Handlers - AdminHandlers.py
(r'/admin/game', AdminGameHandler),
(r'/admin/ban/(add|clear|config)', AdminBanHammerHandler),
(r'/admin/regtoken/(.*)', AdminRegTokenHandler),
(r'/admin/create/(.*)', AdminCreateHandler),
(r'/admin/edit/(.*)', AdminEditHandler),
(r'/admin/view/(.*)', AdminViewHandler),
(r'/admin/delete/(.*)', AdminDeleteHandler),
(r'/admin/ajax/objects(.*)', AdminAjaxObjectDataHandler),
(r'/admin/upgrades/source_code_market(.*)', AdminSourceCodeMarketHandler),
(r'/admin/upgrades/swat(.*)', AdminSwatHandler),
(r'/admin/lock', AdminLockHandler),
(r'/admin/configuration', AdminConfigurationHandler),
(r'/admin/export/(.*)', AdminExportHandler),
(r'/admin/import/xml', AdminImportXmlHandler),
(r'/admin/logviewer', AdminLogViewerHandler),
(r'/admin/logviewer/wsocket', AdminLogViewerSocketHandler),
(r'/admin/garbage', AdminGarbageCfgHandler),
# Notificaiton handlers - NotificationHandlers.py
(r'/notifications/all', AllNotificationsHandler),
(r'/notifications/wsocket/updates', NotifySocketHandler),
# Scoreboard Handlers - ScoreboardHandlers.py
(r'/scoreboard', ScoreboardHandler),
(r'/scoreboard/history', ScoreboardHistoryHandler),
(r'/scoreboard/ajax/(.*)', ScoreboardAjaxHandler),
(r'/scoreboard/wsocket/game_data', ScoreboardDataSocketHandler),
(r'/scoreboard/wsocket/game_history', ScoreboardHistorySocketHandler),
(r'/teams', TeamsHandler),
# Public handlers - PublicHandlers.py
(r'/login', LoginHandler),
(r'/registration', RegistrationHandler),
(r'/about', AboutHandler),
(r'/', HomePageHandler),
(r'/robots(|\.txt)', FakeRobotsHandler),
# Error handlers - ErrorHandlers.py
(r'/403', UnauthorizedHandler),
(r'/(.*).php', NoobHandler),
(r'/admin', NoobHandler),
(r'/(.*)phpmyadmin(.*)', NoobHandler),
(r'/administrator(.*)', NoobHandler)
]
# This one has to be last
urls.append((r'/(.*)', NotFoundHandler))
app = Application(
# URL handler mappings
urls,
# Randomly generated secret key
cookie_secret=urandom(32).encode('hex'),
# Ip addresses that access the admin interface
admin_ips=config.admin_ips,
# Template directory
template_path='templates/',
# Request that does not pass @authorized will be
# redirected here
forbidden_url='/403',
# Requests that does not pass @authenticated will be
# redirected here
login_url='/login',
# UI Modules
ui_modules={
"Menu": Menu,
"CssTheme": CssTheme,
"Recaptcha": Recaptcha,
},
# Enable XSRF protected forms; not optional
xsrf_cookies=True,
# Current domain settings
domain=config.domain,
port=config.listen_port,
# Anti-bruteforce
automatic_ban=False,
blacklist_threshold=10,
blacklisted_ips=[],
failed_logins={},
# Special file directories
source_code_market_dir=path.abspath('files/source_code_market/'),
# Notifier WebSocket
ws_connect=config.ws_connect,
# Debug mode
debug=config.debug,
# Flag used to start the game
game_started=False,
# Callback functions
score_bots_callback = PeriodicCallback(
score_bots,
config.bot_reward_interval,
io_loop=io_loop
),
history_callback = PeriodicCallback(
game_history.take_snapshot,
config.history_snapshot_interval,
io_loop=io_loop
),
# Application version
version='0.4.0',
)
# Main entry point
def start_server():
''' Main entry point for the application '''
if config.debug:
logging.warn("Debug mode is enabled; some security measures will be ignored")
# Setup server object
if config.use_ssl:
server = HTTPServer(app,
ssl_options={
"certfile": config.certfile,
"keyfile": config.keyfile,
},
xheaders=config.x_headers
)
else:
server = HTTPServer(app, xheaders=config.x_headers)
sockets = netutil.bind_sockets(config.listen_port)
server.add_sockets(sockets)
try:
io_loop.start()
except KeyboardInterrupt:
sys.stdout.write('\r' + WARN + 'Shutdown Everything!\n')
except:
logging.exception("Main i/o loop threw exception")
finally:
io_loop.stop()
_exit(0)
|
<reponame>AdamPrzybyla/Impansible
from distutils.core import setup
setup(
name = 'robotframework-impansible',
packages = ['Impansible'],
version = '0.11',
license='MIT',
description = 'Robotframework library to access all ansible internal modules.',
long_description='''Impansible
===============
.. contents::
Introduction
------------
Impansible_ is a `Robot Framework`_ test
library for access to all Ansible internal modules.
All Ansible modules are available as Robotframework's keywords.
The Impansible library can be used without Robotframework.
Impansible is operating system independent and supports Python 2.7 as well
as Python 3.x or newer.
Documentation
-------------
See `keyword documentation`_ for available keywords and more information
about the library in general.
For general information about using test libraries with Robot Framework, see
`Robot Framework User Guide`_.
Installation
------------
The recommended installation method is using pip_::
pip install --upgrade robotframework-impansible
With recent versions of ``pip`` it is possible to install directly from the
GitHub_ repository. To install latest source from the master branch, use
this command::
pip install git+https://github.com/AdamPrzybyla/Impansible.git
Alternatively you can download the source distribution from PyPI_, extract
it, and install it using one of the following depending are you using
Python or Jython::
python setup.py install
Usage
-----
The library can be used localy if teh first parametr is set to "local"
or remotly if the first parameter is set to hostname.
You need to export ssh keys or provide the propper credentials.
if you have root access you need to set the ansible_password variable
but for sudo access you neeed to set ansible_become_password and ansible_user
variables.
The keywors documenatation can be found on this site: `keyword Documentation`_
.. code:: robotframework
*** variables ***
${PAC} mtr
#${ansible_password} <PASSWORD>
${ansible_become_password} xxxxxxxxx
${ansible_user} user_user
*** settings ***
library Impansible
library Collections
library OperatingSystem
*** test cases ***
test 1
${x}= Setup localhost
log ${x}
${y}= get from dictionary ${x} ansible_facts
${h}= get from dictionary ${y} ansible_hostname
${z}= get from dictionary ${y} ansible_distribution
Should be Equal ${z} Ubuntu
Should Contain ${h} tester
test 2
[Timeout] 600
${x}= apt localhost package=${PAC} state=present
${x}= get from dictionary ${x} invocation
${y}= get from dictionary ${x} module_args
${s}= get from dictionary ${y} state
Should be Equal ${s} present
${w}= Run which ${PAC}
Should Contain ${w} ${PAC}
test 3
[Timeout] 600
${x}= apt localhost package=${PAC} state=absent
${x}= get from dictionary ${x} invocation
${y}= get from dictionary ${x} module_args
${s}= get from dictionary ${y} state
Should be Equal ${s} absent
${w}= Run which ${PAC}
Should not Contain ${w} ${PAC}
test 4
${x}= apt localhost package=python-openssl state=present
${c}= get certificate localhost host=www.onet.pl port=443 proxy_host=172.16.58.3
${e}= get from dictionary ${c} expired
Should not be True ${e}
test 5
${x}= nitz2
log ${x}
test 6
${w}= command localhost uname -a
${w}= get from dictionary ${w} stdout
Should Contain ${w} GNU/Linux
test 7
${x}= python requirements info localhost
${x}= get from dictionary ${x} ansible_facts
${x}= get from dictionary ${x} discovered_interpreter_python
Should Contain ${x} python
Support
-------
If the provided documentation is not enough, there are various support forums
available:
- `robotframework-users`_ mailing list
.. _Impansible: https://github.com/AdamPrzybyla/Impansible
.. _github: https://github.com/AdamPrzybyla/Impansible
.. _Robot Framework: http://robotframework.org
.. _Robot Framework User Guide: http://robotframework.org/robotframework/latest/RobotFrameworkUserGuide.html#using-test-libraries
.. _PyPI: https://pypi.python.org/pypi/robotframework-impansible
.. _keyword Documentation: https://adamprzybyla.github.io/robotframework-Impansible.html
.. _pip: http://pip-installer.org
.. _robotframework-users: http://groups.google.com/group/robotframework-users
''',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/AdamPrzybyla/impansible',
download_url = 'https://github.com/AdamPrzybyla/Impansible/archive/v_11.tar.gz',
keywords = ['robotframework', 'ansible', 'automatisation','nsm'],
install_requires=[
'ansible>=2.9,<2.10',
'robotframework',
'robotframework-nsm',
],
classifiers=[
'Development Status :: 3 - Alpha',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 2',
],
)
|
# -*- coding:utf-8 -*-
import collections
import math
import numpy as np
import os
import random
import tensorflow as tf
import zipfile
from scipy.sparse import lil_matrix
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
config = tf.ConfigProto()
config.gpu_options.per_process_gpu_memory_fraction = 0.5
session = tf.Session(config = config)
def read_data(filename):
with zipfile.ZipFile(filename) as f:
fread = f.namelist()[0]
content = f.read(fread)
data = tf.compat.as_str(content).split()
return data
filename = "text8.zip"
words = read_data(filename)
print('Data size %d' % len(words))
print('Sample string %s' % words[:50])
vocabulary_size = 30000
def build_dataset(words):
count = [["UNK", -1]]
wordscounts = collections.Counter(words)
words_common = wordscounts.most_common(vocabulary_size - 1)
count.extend(words_common)
dictionary = dict()
for word, _ in count:
dictionary[word] = len(dictionary)
data = list()
unk_count = 0
for word in words:
if word in dictionary:
index = dictionary[word]
else:
index = 0
unk_count = unk_count + 1
data.append(index)
count[0][1] = unk_count
reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))
return data, count, dictionary, reverse_dictionary
data, count, dictionary, reverse_dictionary = build_dataset(words)
print('Most common words (+UNK)', count[:5])
print('Sample data', data[:10])
del words
data_index = 0
def generate_batch(batch_size, num_skips, skip_window):
global data_index
assert batch_size % num_skips == 0
assert num_skips <= 2 * skip_window
batch = np.ndarray(shape=(batch_size), dtype=np.int32)
labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)
weights = np.ndarray(shape=(batch_size), dtype=np.float32)
span = 2 * skip_window + 1
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
for i in range(batch_size // num_skips):
target = skip_window
targets_to_avoid = [ skip_window ]
for j in range(num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * num_skips + j] = buffer[skip_window]
labels[i * num_skips + j, 0] = buffer[target]
weights[i * num_skips + j] = abs(1.0/(target - skip_window))
buffer.append(data[data_index])
data_index = (data_index + 1) % len(data)
return batch, labels, weights
print('data:', [reverse_dictionary[di] for di in data[:8]])
for num_skips, skip_window in [(2, 1), (8, 4)]:
data_index = 0
batch, labels, weights = generate_batch(
batch_size = 8,
num_skips = num_skips,
skip_window = skip_window)
print('\nwith num_skips = %d and skip_window = %d:' % (num_skips, skip_window))
print(' batch:', [reverse_dictionary[bi] for bi in batch])
print(' labels:', [reverse_dictionary[li] for li in labels.reshape(8)])
print(' weights:', [w for w in weights])
cooc_data_index = 0
dataset_size = len(data)
skip_window = 4
num_skips = 8
cooc_mat = lil_matrix((vocabulary_size, vocabulary_size), dtype=np.float32)
print(cooc_mat.shape)
def generate_cooc(batch_size,num_skips,skip_window):
data_index = 0
print('Running %d iterations to compute the co-occurance matrix' %( dataset_size // batch_size))
for i in range(dataset_size//batch_size):
if i > 0 and i % 100000 == 0:
print('\tFinished %d iterations' % i)
batch, labels, weights = generate_batch(
batch_size = batch_size,
num_skips = num_skips,
skip_window = skip_window) # increments data_index automatically
labels = labels.reshape(-1)
for inp,lbl,w in zip(batch,labels,weights):
cooc_mat[inp,lbl] += (1.0*w)
generate_cooc(8,num_skips,skip_window)
print('Sample chunks of co-occurance matrix')
rand_target_idx = np.random.randint(0,vocabulary_size,10).tolist()
for i in range(10):
idx_target = i
ith_row = cooc_mat.getrow(idx_target)
ith_row_dense = ith_row.toarray('C').reshape(-1)
while np.sum(ith_row_dense) < 10 or np.sum(ith_row_dense)>50000:
idx_target = np.random.randint(0,vocabulary_size)
ith_row = cooc_mat.getrow(idx_target)
ith_row_dense = ith_row.toarray('C').reshape(-1)
print('\nTarget Word: "%s"' % reverse_dictionary[idx_target])
sort_indices = np.argsort(ith_row_dense).reshape(-1) # indices with highest count of ith_row_dense
sort_indices = np.flip(sort_indices,axis=0) # reverse the array (to get max values to the start)
# printing several context words to make sure cooc_mat is correct
print('Context word:',end='')
for j in range(10):
idx_context = sort_indices[j]
print('"%s"(id:%d,count:%.2f), '%(reverse_dictionary[idx_context],idx_context,ith_row_dense[idx_context]),end='')
print()
if __name__ == '__main__':
batch_size = 128
embedding_size = 192 # Dimension of the embedding vector.
# We pick a random validation set to sample nearest neighbors. here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100 # Only pick dev samples in the head of the distribution.
# Validation set consist of 50 infrequent words and 50 frequent words
valid_examples = np.array(random.sample(range(valid_window), valid_size//2))
valid_examples = np.append(valid_examples,random.sample(range(1000,1000+valid_window), valid_size//2))
epsilon = 1 # used for the stability of log in the loss function
graph = tf.Graph()
with graph.as_default(), tf.device('/cpu:0'):
# Input data.
train_dataset = tf.placeholder(tf.int32, shape=[batch_size],name='train_dataset')
train_labels = tf.placeholder(tf.int32, shape=[batch_size],name='train_labels')
valid_dataset = tf.constant(valid_examples, dtype=tf.int32,name='valid_dataset')
# Variables.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0),name='embeddings')
bias_embeddings = tf.Variable(tf.random_uniform([vocabulary_size],0.0,0.01,dtype=tf.float32),name='embeddings_bias')
# Model.
# Look up embeddings for inputs.
embed_in = tf.nn.embedding_lookup(embeddings, train_dataset)
embed_out = tf.nn.embedding_lookup(embeddings, train_labels)
embed_bias_in = tf.nn.embedding_lookup(bias_embeddings,train_dataset)
embed_bias_out = tf.nn.embedding_lookup(bias_embeddings,train_labels)
# weights used in the cost function
weights_x = tf.placeholder(tf.float32,shape=[batch_size],name='weights_x')
x_ij = tf.placeholder(tf.float32,shape=[batch_size],name='x_ij')
# Compute the loss defined in the paper. Note that I'm not following the exact equation given (which is computing a pair of words at a time)
# I'm calculating the loss for a batch at one time, but the calculations are identical.
# I also made an assumption about the bias, that it is a smaller type of embedding
loss = tf.reduce_mean(
weights_x * (tf.reduce_sum(embed_in*embed_out, axis=1) + embed_bias_in + embed_bias_out - tf.log(epsilon+x_ij))**2)
# Optimizer.
optimizer = tf.train.AdagradOptimizer(1.0).minimize(loss)
# Compute the similarity between minibatch examples and all embeddings.
norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = embeddings / norm
valid_embeddings = tf.nn.embedding_lookup(
normalized_embeddings, valid_dataset)
similarity = tf.matmul(valid_embeddings, tf.transpose(normalized_embeddings))
num_steps = 100001
session = tf.InteractiveSession()
tf.global_variables_initializer().run()
print('Initialized')
average_loss = 0
for step in range(num_steps):
batch_data, batch_labels, batch_weights = generate_batch(
batch_size, num_skips, skip_window) # generate a single batch (data,labels,co-occurance weights)
batch_weights = [] # weighting used in the loss function
batch_xij = [] # weighted frequency of finding i near j
for inp,lbl in zip(batch_data,batch_labels.reshape(-1)):
batch_weights.append((np.asscalar(cooc_mat[inp,lbl])/100.0)**0.75)
batch_xij.append(cooc_mat[inp,lbl])
batch_weights = np.clip(batch_weights,-100,1)
batch_xij = np.asarray(batch_xij)
feed_dict = {train_dataset : batch_data.reshape(-1), train_labels : batch_labels.reshape(-1),
weights_x:batch_weights,x_ij:batch_xij}
_, l = session.run([optimizer, loss], feed_dict=feed_dict)
average_loss += l
if step % 2000 == 0:
if step > 0:
average_loss = average_loss / 2000
print('Average loss at step %d: %f' % (step, average_loss))
average_loss = 0
# note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 10000 == 0:
sim = similarity.eval()
for i in range(valid_size):
valid_word = reverse_dictionary[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = reverse_dictionary[nearest[k]]
log = '%s %s,' % (log, close_word)
print(log)
final_embeddings = normalized_embeddings.eval()
|
from collections import OrderedDict
from django.conf import settings
from settings.config import Config
from utility.filesystem import load_yaml, save_yaml, remove_dir, remove_file
from utility.data import Collection, sorted_keys
from utility.time import Time
import os
import threading
import copy
class EnvironmentError(Exception):
pass
class MetaEnvironment(type):
def get_db_path(self, name = None):
env_name = self.get_active_env() if name is None else name
return "{}-{}.db".format(settings.BASE_DATA_PATH, env_name)
def get_env_path(self):
return "{}.env.sh".format(settings.BASE_DATA_PATH)
def get_module_path(self, name = None):
env_name = self.get_active_env() if name is None else name
return os.path.join(settings.MODULE_BASE_PATH, env_name)
def load_data(self, reset = False):
if reset or not self.data:
save_data = False
with self.lock:
self.data = load_yaml(settings.RUNTIME_PATH)
if self.data is None:
time = self.time.now
self.data = {
'active': settings.DEFAULT_ENV_NAME,
'environments': {
settings.DEFAULT_ENV_NAME: {
'repo': settings.DEFAULT_RUNTIME_REPO,
'base_image': settings.DEFAULT_RUNTIME_IMAGE,
'created': time,
'updated': time
}
}
}
save_data = True
else:
for name, config in self.data['environments'].items():
self.data['environments'][name]['created'] = self.time.to_datetime(config['created'])
self.data['environments'][name]['updated'] = self.time.to_datetime(config['updated'])
if save_data:
self.save_data()
def save_data(self):
with self.lock:
data = copy.deepcopy(self.data)
for name, config in data['environments'].items():
data['environments'][name]['created'] = self.time.to_string(config['created'])
data['environments'][name]['updated'] = self.time.to_string(config['updated'])
save_yaml(settings.RUNTIME_PATH, data)
def save_env_vars(self, name = None):
self.load_data()
env_name = self.get_active_env() if name is None else name
variables = {
'ZIMAGI_ENVIRONMENT': env_name
}
with self.lock:
if env_name not in self.data['environments']:
raise EnvironmentError("Environment {} is not defined".format(env_name))
for field_name, field_value in self.data['environments'][env_name].items():
variables["ZIMAGI_{}".format(field_name.upper())] = field_value if field_value is not None else ''
Config.save(self.get_env_path(), variables)
def delete_env_vars(self):
with self.lock:
Config.remove(self.get_env_path())
def get_env_defaults(self):
return {
'repo': settings.DEFAULT_RUNTIME_REPO,
'base_image': settings.DEFAULT_RUNTIME_IMAGE
}
def get_all_env(self):
self.load_data()
env_data = OrderedDict()
with self.lock:
env_names = sorted_keys(self.data['environments'], 'created')
for env_name in env_names:
env_data[env_name] = self.get_env(env_name)
return env_data
def get_env(self, name = None):
self.load_data()
env_name = self.get_active_env() if name is None else name
with self.lock:
if env_name not in self.data['environments']:
raise EnvironmentError("Environment {} is not defined".format(env_name))
env_data = copy.deepcopy(self.data['environments'][env_name])
if not os.path.isfile(self.get_env_path()):
env_data['runtime_image'] = None
self.save_env(env_name, **env_data)
env_data['name'] = env_name
return Collection(**env_data)
def save_env(self, name = None, **fields):
self.load_data()
active_env = self.get_active_env()
env_name = active_env if name is None else name
time = time = self.time.now
defaults = self.get_env_defaults()
with self.lock:
if env_name not in self.data['environments']:
self.data['environments'][env_name] = {}
for field_name, field_value in fields.items():
if field_name in defaults and field_value is None:
field_value = defaults[field_name]
self.data['environments'][env_name][field_name] = field_value
for field_name, default_value in defaults.items():
if field_name not in self.data['environments'][env_name]:
self.data['environments'][env_name][field_name] = default_value
if 'created' not in self.data['environments'][env_name]:
self.data['environments'][env_name]['created'] = time
self.data['environments'][env_name]['updated'] = time
self.save_data()
# Current environment is saved environment?
if name is None or env_name == active_env:
self.save_env_vars(env_name)
def delete_env(self, name = None, remove_module_path = False):
self.load_data()
active_env = self.get_active_env()
env_name = active_env if name is None else name
with self.lock:
# Current environment is deleted environment?
if name is None or env_name == active_env:
self.data['active'] = settings.DEFAULT_ENV_NAME
if env_name != settings.DEFAULT_ENV_NAME:
self.data['environments'].pop(env_name)
remove_file(self.get_db_path(env_name))
if remove_module_path:
remove_dir(self.get_module_path(env_name))
self.save_data()
# Current environment is deleted environment?
if name is None or env_name == active_env:
self.delete_env_vars()
def get_active_env(self):
self.load_data()
with self.lock:
return self.data['active']
def set_active_env(self, name):
self.load_data()
with self.lock:
if name not in self.data['environments']:
raise EnvironmentError("Environment {} is not defined".format(name))
self.data['active'] = name
self.save_data()
class Environment(object, metaclass = MetaEnvironment):
time = Time()
lock = threading.Lock()
data = {}
|
#
# Copyright (c) 2019, Infosys Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import requests
import json
import time
import threading
import s1apTC as s1
from flask import Flask, request, Blueprint
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'Common'))
from genericProxy import GenericProxy
from genericProxy import app_send
from genericProxy import app_receive
from genericProxy import ctx_data
from genericProxy import clear_buffer
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'Logger'))
import igniteLogger
# currentDir = os.path.dirname(__file__)
configuration_file = os.path.join(os.path.dirname(__file__), '..', '..', 'Common', 'configuration.json')
with open(configuration_file) as configuration:
config_file = json.loads(configuration.read())
app = Flask(__name__)
######send
app.register_blueprint(app_send)
#####receive
app.register_blueprint(app_receive)
# ctxdata
app.register_blueprint(ctx_data)
# clearbuffer
app.register_blueprint(clear_buffer)
def s1apProxyStart(enodeBType):
if enodeBType=="source":
s1ap_proxy=GenericProxy("s1ap")
igniteLogger.logger.info("S1ap Proxy created")
s1ap_proxy.create_sut_socket()
igniteLogger.logger.info("S1ap SUT socket created")
s1ap_proxy.runProxy()
elif enodeBType == "target":
s1aptarget_proxy=GenericProxy("s1ap_target")
igniteLogger.logger.info("S1ap Target Proxy created")
s1aptarget_proxy.create_sut_socket()
igniteLogger.logger.info("S1ap Target SUT socket created")
s1aptarget_proxy.runProxy()
def runner(enodeBType):
if enodeBType=="source":
app.run(config_file["s1ap"]["ignite_ip"],config_file["s1ap"]["tc_port"])
elif enodeBType=="target":
app.run(config_file["s1ap_target"]["ignite_ip"], config_file["s1ap_target"]["tc_port"])
s1apArg = sys.argv
s1ap_thread= threading.Thread(target=s1apProxyStart,args=(s1apArg[1],))
s1ap_runner= threading.Thread(target=runner, args=(s1apArg[1],))
s1ap_runner.start()
s1ap_thread.start()
time.sleep(3)
if s1apArg[1] == "source":
s1_setup_request = json.loads(open('../../../Test/MessageTemplates/S1AP/s1setup_request.json').read())
print("send S1 set up")
igniteLogger.logger.info("\n-------------------------------\nsend source s1 set up\n----------------------")
igniteLogger.logger.info("\n---------------------------------------\nSend S1Setup Request to MME\n---------------------------------------")
s1.sendS1ap('s1_setup_request',s1_setup_request,None)
igniteLogger.logger.info("\n---------------------------------------\nS1 Setup Response received from MME\n---------------------------------------")
s1.receiveS1ap()
elif s1apArg[1] == "target":
s1_setup_request = json.loads(open('../../../Test/MessageTemplates/S1AP/s1setup_request_target.json').read())
print("s1 set up target")
igniteLogger.logger.info("\n-------------------------------\nsend target s1 set up\n----------------------")
igniteLogger.logger.info("\n---------------------------------------\nSend S1Setup Request to MME\n---------------------------------------")
s1.sendS1ap('s1_setup_request_target',s1_setup_request,None)
igniteLogger.logger.info("\n---------------------------------------\nS1 Setup Response received from MME\n---------------------------------------")
s1.receiveS1ap(target=True)
print ("\n-------------------------------------\nSetup Successful\n---------------------------------------")
|
<reponame>HyechurnJang/archon<filename>application/asa/manager.py
# -*- coding: utf-8 -*-
################################################################################
# _____ _ _____ _ #
# / ____(_) / ____| | | #
# | | _ ___ ___ ___ | (___ _ _ ___| |_ ___ _ __ ___ ___ #
# | | | / __|/ __/ _ \ \___ \| | | / __| __/ _ \ '_ ` _ \/ __| #
# | |____| \__ \ (_| (_) | ____) | |_| \__ \ || __/ | | | | \__ \ #
# \_____|_|___/\___\___/ |_____/ \__, |___/\__\___|_| |_| |_|___/ #
# __/ | #
# |___/ #
# _ __ _____ _ _____ ______ #
# | |/ / / ____| | |/ ____| ____| #
# | ' / ___ _ __ ___ __ _ | (___ ___ | | (___ | |__ #
# | < / _ \| '__/ _ \/ _` | \___ \ / _ \| |\___ \| __| #
# | . \ (_) | | | __/ (_| | ____) | (_) | |____) | |____ #
# |_|\_\___/|_| \___|\__,_| |_____/ \___/|_|_____/|______| #
# #
################################################################################
# #
# Copyright (c) 2016 Cisco Systems #
# All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT #
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #
# License for the specific language governing permissions and limitations #
# under the License. #
# #
################################################################################
import time
import pygics
import asadipy
import archon
from archon import *
from models import *
from .settings import *
#===============================================================================
# Create your manager here.
#===============================================================================
class HealthMonitor(pygics.Task):
def __init__(self, manager):
pygics.Task.__init__(self, tick=HEALTH_MON_SEC)
self.manager = manager
self.health = {'_tstamp' : []}
for i in reversed(range(0, HEALTH_MON_CNT)):
self.health['_tstamp'].append('00:00:00')
self.start()
def run(self):
now = time.strftime("%H:%M:%S", time.localtime(time.time()))
stats = self.manager.Stat()
health = {'_tstamp' : self.health['_tstamp'][1:]}
health['_tstamp'].append(now)
for domain_name in stats:
dn_cpu = domain_name + '/cpu'
dn_core = domain_name + '/core'
dn_mem = domain_name + '/mem'
dn_disk = domain_name + '/disk'
if dn_cpu in self.health: health[dn_cpu] = self.health[dn_cpu][1:]
else: health[dn_cpu] = [None for i in range(0, HEALTH_MON_CNT - 1)]
health[dn_cpu].append(stats[domain_name]['cpu']['total']['1min'])
for i in range(0, len(stats[domain_name]['cpu']['core'])):
dn_core_num = dn_core + '/%d' % i
if dn_core_num in self.health: health[dn_core_num] = self.health[dn_core_num][1:]
else: health[dn_core_num] = [None for j in range(0, HEALTH_MON_CNT - 1)]
health[dn_core_num].append(stats[domain_name]['cpu']['core'][i]['1min'])
if dn_mem in self.health: health[dn_mem] = self.health[dn_mem][1:]
else: health[dn_mem] = [None for i in range(0, HEALTH_MON_CNT - 1)]
health[dn_mem].append(stats[domain_name]['memory']['used_percent'])
if dn_disk in self.health: health[dn_disk] = self.health[dn_disk][1:]
else: health[dn_disk] = [None for i in range(0, HEALTH_MON_CNT - 1)]
health[dn_disk].append(stats[domain_name]['disk']['used_percent'])
self.health = health
def getHealth(self):
return self.health
class Manager(archon.ManagerAbstraction, asadipy.MultiDomain):
def __init__(self):
asadipy.MultiDomain.__init__(self, debug=MANAGER_DEBUG)
domains = Domain.objects.all()
for domain in domains:
asadipy.MultiDomain.addDomain(self, domain.name, domain.ip, domain.user, domain.password)
self.ipusers = {}
ipusers = IpUser.objects.all()
for ipuser in ipusers:
self.ipusers['%s-%s' % (ipuser.domain, ipuser.ip)] = {'user': ipuser.user, 'domain' : ipuser.domain, 'ip' : ipuser.ip}
self.healthmon = HealthMonitor(self)
def addDomain(self, domain_name, ip, user, pwd):
try: Domain.objects.get(name=domain_name)
except:
ret = asadipy.MultiDomain.addDomain(self, domain_name, ip, user, pwd)
if ret: Domain.objects.create(name=domain_name, ip=ip, user=user, password=pwd)
return ret
return False
def delDomain(self, domain_name):
try: domain = Domain.objects.get(name=domain_name)
except: return False
asadipy.MultiDomain.delDomain(self, domain_name)
domain.delete()
return True
def addIpUser(self, domain, ip, user):
key = '%s-%s' % (domain, ip)
if key in self.ipusers: return False
self.ipusers['%s-%s' % (domain, ip)] = {'user': user, 'domain' : domain, 'ip' : ip}
IpUser.objects.create(domain=domain, ip=ip, user=user)
return True
def getIpUser(self, domain, ip):
key = '%s-%s' % (domain, ip)
if key in self.ipusers: return self.ipusers['%s-%s' % (domain, ip)]['user']
return 'N/A'
def delIpUser(self, domain, ip):
key = '%s-%s' % (domain, ip)
if key in self.ipusers:
self.ipusers.pop('%s-%s' % (domain, ip))
IpUser.objects.filter(domain=domain, ip=ip).delete()
return True
return False
def getHealth(self):
return self.healthmon.getHealth()
def getSummary(self, R, M, V):
return {
'name' : 'ASA',
'icon' : 'asa/ASA_Default.png',
'desc' : 'Adaptive Security Appliance Monitoring Application',
'link' : '/asa/overview',
'view' : DIV()
}
|
<filename>surpyval/tests/test_real_data.py
import pytest
import numpy as np
import lifelines
import surpyval as surv
from collections import namedtuple
from lifelines.datasets import *
from reliability.Fitters import *
# Datasets in x, c, n: as namedtuples
SurvivalData = namedtuple('SurvivalData', ['x', 'c', 'n', 'name'])
IntervalSurvivalData = namedtuple('IntervalSurvivalData',
['left', 'right', 'name'])
# Canadian Senators
df = load_canadian_senators()
x = df['diff_days'].values
c = 1 - df['observed'].astype(int)
zero_idx = (x == 0)
x = x[~zero_idx]
c = c[~zero_idx]
n = np.ones_like(x)
canadian_senators = SurvivalData(x, c, n, 'canadian_senators')
# Bacteria...
df = load_c_botulinum_lag_phase()
left = df['lower_bound_days']
right = df['upper_bound_days']
bacteria = IntervalSurvivalData(left, right, 'bacteria')
# Political Durations
df = load_dd()
x = df['duration'].values
c = 1 - df['observed'].astype(int)
n = np.ones_like(x)
politics = SurvivalData(x, c, n, 'politics')
df = load_diabetes()
left = df['left']
right = df['right']
diabetes = IntervalSurvivalData(left, right, 'diabetes')
# ???
df = load_g3()
x = df['time'].values
c = 1 - df['event'].astype(int)
n = np.ones_like(x)
g3 = SurvivalData(x, c, n, 'g3')
# ???
df = load_gbsg2()
x = df['time'].values
c = 1 - df['cens'].astype(int)
n = np.ones_like(x)
gbsg2 = SurvivalData(x, c, n, 'gbsg2')
# holl_molly_polly...??!!!?!?!?!?!
df = load_holly_molly_polly()
x = df['T'].values
c = np.zeros_like(x)
n = np.ones_like(x)
holly_molly_polly = SurvivalData(x, c, n, 'holly_molly_polly')
# kidney
df = load_kidney_transplant()
x = df['time'].values
c = 1 - df['death'].astype(int)
n = np.ones_like(x)
kidney = SurvivalData(x, c, n, 'kidney')
# larynx
df = load_larynx()
x = df['time'].values
c = np.zeros_like(x)
n = np.ones_like(x)
larynx = SurvivalData(x, c, n, 'larynx')
# leukemia
df = load_leukemia()
x = df['t'].values
c = 1 - df['status'].astype(int)
n = np.ones_like(x)
leukemia = SurvivalData(x, c, n, 'leukemia')
# lung
df = load_lung()
x = df['time'].dropna()
c = np.zeros_like(x)
n = np.ones_like(x)
lung = SurvivalData(x, c, n, 'lung')
# lupus
df = load_lupus()
x = df['time_elapsed_between_estimated_onset_and_diagnosis_(months)'].dropna()
c = 1 - df['dead'].astype(int)
n = np.ones_like(x)
lupus = SurvivalData(x, c, n, 'lupus')
# lymph
df = load_lymph_node()
x = df['survtime'].dropna()
c = 1 - df['censdead'].astype(int)
n = np.ones_like(x)
lymph = SurvivalData(x, c, n, 'lymph')
# lymphoma
df = load_lymphoma()
x = df['Time'].dropna()
c = 1 - df['Censor'].astype(int)
n = np.ones_like(x)
lymphoma = SurvivalData(x, c, n, 'lymphoma')
# mice
df = load_mice()
left = df['l']
right = df['u']
mice = IntervalSurvivalData(left, right, 'mice')
# aids
df = load_multicenter_aids_cohort_study()
x = df['T'].dropna()
c = 1 - df['D'].astype(int)
n = np.ones_like(x)
aids = SurvivalData(x, c, n, 'aids')
# nh4
df = load_nh4()
x = df['Week'].dropna()
c = 1 - df['Censored'].astype(int)
n = np.ones_like(x)
nh4 = SurvivalData(x, c, n, 'nh4')
# panel
df = load_panel_test()
x = df['t'].dropna()
c = 1 - df['E'].astype(int)
n = np.ones_like(x)
panel = SurvivalData(x, c, n, 'panel')
# recur
df = load_recur()
x = df['AGE'].dropna()
c = 1 - df['CENSOR'].astype(int)
n = np.ones_like(x)
recur = SurvivalData(x, c, n, 'recur')
# reg
df = load_regression_dataset()
x = df['T'].dropna()
c = 1 - df['E'].astype(int)
n = np.ones_like(x)
reg = SurvivalData(x, c, n, 'reg')
# rossi
df = load_rossi()
x = df['week'].dropna()
c = 1 - df['arrest'].astype(int)
n = np.ones_like(x)
rossi = SurvivalData(x, c, n, 'rossi')
# static
df = load_static_test()
x = df['t'].dropna() + 1e-10
c = 1 - df['E'].astype(int)
n = np.ones_like(x)
static = SurvivalData(x, c, n, 'static')
# walton
df = load_waltons()
x = df['T'].dropna()
c = 1 - df['E'].astype(int)
n = np.ones_like(x)
walton = SurvivalData(x, c, n, 'walton')
def id_func(val):
if isinstance(val, SurvivalData):
return val.name
elif isinstance(val, IntervalSurvivalData):
return val.name
xcn_datasets = [
canadian_senators,
politics,
g3,
gbsg2,
holly_molly_polly,
kidney,
larynx,
leukemia,
lung,
lupus,
lymph,
lymphoma,
aids,
nh4,
panel,
recur,
reg,
rossi,
# static,
walton
]
int_datasets = [
bacteria,
diabetes,
mice
]
wf = lifelines.WeibullFitter()
lnf = lifelines.LogNormalFitter()
llf = lifelines.LogLogisticFitter()
ef = lifelines.ExponentialFitter()
DISTS = {
'Weibull': (wf, surv.Weibull),
'Exponential': (ef, surv.Exponential),
'LogNormal': (lnf, surv.LogNormal),
'LogLogistic': (llf, surv.LogLogistic)
}
REL_DISTS = {
'Exponential': (Fit_Exponential_1P, surv.Exponential),
'Weibull': (Fit_Weibull_2P, surv.Weibull),
'Gamma': (Fit_Gamma_2P, surv.Gamma),
'LogNormal': (Fit_Lognormal_2P, surv.LogNormal),
'LogLogistic': (Fit_Loglogistic_2P, surv.LogLogistic),
'Normal': (Fit_Normal_2P, surv.Normal),
'Gumbel': (Fit_Gumbel_2P, surv.Gumbel),
'Beta': (Fit_Beta_2P, surv.Beta),
}
def generate_case():
for i, data in enumerate(xcn_datasets):
yield data
def generate_real_cases():
for dist in DISTS.keys():
for data in xcn_datasets:
yield data, dist
def generate_real_cases_reliability():
for dist in REL_DISTS.keys():
for data in xcn_datasets:
yield data, dist
def generate_real_cases_int():
for dist in DISTS.keys():
for data in int_datasets:
yield data, dist
def params_with_xcn_data_rel(data, surpyval_fitter, rel_fitter):
if surpyval_fitter.name == 'Beta':
x = data.x/(data.x.max()+1)
else:
x = data.x
f, s = surv.xcn_to_fs(x, data.c, data.n)
if s == []:
s = None
rel_model = rel_fitter(f, s)
if surpyval_fitter.name == 'Exponential':
rel_params = rel_model.Lambda
elif surpyval_fitter.name in ['Weibull', 'Gamma',
'LogLogistic', 'Beta']:
rel_params = np.array([rel_model.alpha, rel_model.beta])
elif surpyval_fitter.name in ['LogNormal', 'Normal', 'Gumbel']:
rel_params = np.array([rel_model.mu, rel_model.sigma])
surp_est = surpyval_fitter.fit(x,
data.c,
data.n)
if np.allclose(rel_params, surp_est.params, 1e-1):
return True
else:
# reliability has performance that is to be desired. So check that
# loglike is better or within a small tolerance:
return (surp_est.neg_ll() - (-rel_model.loglik)) < 1e-5
def params_with_xcn_data(data, surpyval_fitter, lifelines_fitter):
ll_est = lifelines_fitter.fit(data.x,
1 - data.c,
weights=data.n).params_.values
surp_est = surpyval_fitter.fit(data.x,
data.c,
data.n).params
if surpyval_fitter.name == 'Exponential':
surp_est = 1./surp_est
return ll_est, surp_est
def params_with_int_data(data, surpyval_fitter, lifelines_fitter):
ll_est = lifelines_fitter.fit_interval_censoring(data.left,
data.right).params_.values
surp_est = surpyval_fitter.fit(xl=data.left,
xr=data.right).params
if surpyval_fitter.name == 'Exponential':
surp_est = 1./surp_est
return ll_est, surp_est
@pytest.mark.parametrize("data",
generate_case(),
ids=id_func)
def test_weibull_offset_with_real(data):
# Known issues - distribution too far off being Weibull to work with offset
if data.name in ['gbsg2',
'kidney',
'lymph',
'aids']:
assert True
else:
surpyval_fitter = surv.Weibull
fitted = surpyval_fitter.fit(data.x,
data.c,
data.n,
offset=True)
assert fitted.res.success or ('Desired error' in fitted.res.message)
@pytest.mark.parametrize("data,dist",
generate_real_cases(),
ids=id_func)
def test_against_lifelines_with_real_data(data, dist):
ll_fitter = DISTS[dist][0]
surp_fitter = DISTS[dist][1]
assert np.allclose(*params_with_xcn_data(data,
surp_fitter,
ll_fitter), 1e-1)
@pytest.mark.parametrize("data,dist",
generate_real_cases_int(),
ids=id_func)
def test_against_lifelines_with_real_data_interval(data, dist):
ll_fitter = DISTS[dist][0]
surp_fitter = DISTS[dist][1]
assert np.allclose(*params_with_int_data(data,
surp_fitter,
ll_fitter), 1e-1)
@pytest.mark.parametrize("data,dist",
generate_real_cases_reliability(),
ids=id_func)
def test_against_reliability_with_real_data(data, dist):
rel_fitter = REL_DISTS[dist][0]
surp_fitter = REL_DISTS[dist][1]
assert params_with_xcn_data_rel(data, surp_fitter, rel_fitter)
|
<filename>qt-creator-opensource-src-4.6.1/tests/system/suite_editors/tst_rename_macros/test.py
############################################################################
#
# Copyright (C) 2016 The Qt Company Ltd.
# Contact: https://www.qt.io/licensing/
#
# This file is part of Qt Creator.
#
# Commercial License Usage
# Licensees holding valid commercial Qt licenses may use this file in
# accordance with the commercial license agreement provided with the
# Software or, alternatively, in accordance with the terms contained in
# a written agreement between you and The Qt Company. For licensing terms
# and conditions see https://www.qt.io/terms-conditions. For further
# information use the contact form at https://www.qt.io/contact-us.
#
# GNU General Public License Usage
# Alternatively, this file may be used under the terms of the GNU
# General Public License version 3 as published by the Free Software
# Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
# included in the packaging of this file. Please review the following
# information to ensure the GNU General Public License requirements will
# be met: https://www.gnu.org/licenses/gpl-3.0.html.
#
############################################################################
source("../../shared/qtcreator.py")
cppEditorStr = ":Qt Creator_CppEditor::Internal::CPPEditorWidget"
def main():
global cppEditorStr
folder = prepareTemplate(os.path.abspath(os.path.join(os.getcwd(), "..", "shared",
"simplePlainCPP")))
if folder == None:
test.fatal("Could not prepare test files - leaving test")
return
proFile = os.path.join(folder, "testfiles.pro")
startApplication("qtcreator" + SettingsPath)
if not startedWithoutPluginError():
return
openQmakeProject(proFile)
if not testRenameMacroAfterSourceModification():
return
headerName = "anothertestfile.h"
addCPlusPlusFile(headerName, "C++ Header File", "testfiles.pro",
expectedHeaderName=headerName)
if not testRenameMacroAfterSourceMoving():
return
invokeMenuItem("File", "Save All")
invokeMenuItem("File", "Exit")
def testRenameMacroAfterSourceModification():
def __deleteAnyClass__():
global cppEditorStr
if platform.system() == 'Darwin':
type(cppEditorStr, "<Meta+Left>")
else:
type(cppEditorStr, "<Home>")
markText(cppEditorStr, "Down", 5)
type(cppEditorStr, "<Delete>")
test.log("Testing rename macro after modifying source.")
formerTexts = {}
content = openDocumentPlaceCursor("testfiles.Headers.testfile\\.h",
"class AnyClass", __deleteAnyClass__)
if not content:
return False
formerTexts["testfiles.Headers.testfile\\.h"] = content
content = openDocumentPlaceCursor("testfiles.Sources.testfile\\.cpp", "SOME_MACRO_NAME(a)")
if not content:
return False
formerTexts["testfiles.Sources.testfile\\.cpp"] = content
performMacroRenaming('SOME_OTHER_MACRO_NAME')
verifyChangedContent(formerTexts, "SOME_MACRO_NAME", "SOME_OTHER_MACRO_NAME")
revertChanges(formerTexts)
return True
def testRenameMacroAfterSourceMoving():
def __cut__():
global cppEditorStr
if platform.system() == 'Darwin':
type(cppEditorStr, "<Meta+Left>")
else:
type(cppEditorStr, "<Home>")
markText(cppEditorStr, "Down", 4)
invokeMenuItem("Edit", "Cut")
def __paste__():
global cppEditorStr
type(cppEditorStr, "<Return>")
invokeMenuItem("Edit", "Paste")
def __insertInclude__():
global cppEditorStr
typeLines(cppEditorStr, ['', '#include "anothertestfile.h"'])
test.log("Testing rename macro after moving source.")
formerTexts = {}
content = openDocumentPlaceCursor("testfiles.Headers.testfile\\.h",
"#define SOME_MACRO_NAME( X )\\", __cut__)
if not content:
return False
formerTexts["testfiles.Headers.testfile\\.h"] = content
content = openDocumentPlaceCursor("testfiles.Headers.anothertestfile\\.h",
"#define ANOTHERTESTFILE_H", __paste__)
if not content:
return False
formerTexts["testfiles.Headers.anothertestfile\\.h"] = content
content = openDocumentPlaceCursor('testfiles.Sources.testfile\\.cpp',
'#include "testfile.h"', __insertInclude__)
if not content:
return False
formerTexts["testfiles.Sources.testfile\\.cpp"] = content
placeCursorToLine(cppEditorStr, "SOME_MACRO_NAME(a)")
performMacroRenaming("COMPLETELY_DIFFERENT_MACRO_NAME")
verifyChangedContent(formerTexts, "SOME_MACRO_NAME", "COMPLETELY_DIFFERENT_MACRO_NAME")
revertChanges(formerTexts)
return True
def performMacroRenaming(newMacroName):
for i in range(10):
type(cppEditorStr, "<Left>")
invokeContextMenuItem(waitForObject(cppEditorStr), "Refactor",
"Rename Symbol Under Cursor")
waitForSearchResults()
validateSearchResult(2)
replaceLineEdit = waitForObject("{leftWidget={text='Replace with:' type='QLabel' "
"unnamed='1' visible='1'} "
"type='Core::Internal::WideEnoughLineEdit' unnamed='1' "
"visible='1' "
"window=':Qt Creator_Core::Internal::MainWindow'}")
replaceEditorContent(replaceLineEdit, newMacroName)
clickButton(waitForObject("{text='Replace' type='QToolButton' unnamed='1' visible='1' "
"window=':Qt Creator_Core::Internal::MainWindow'}"))
def verifyChangedContent(origTexts, replacedSymbol, replacement):
global cppEditorStr
successfullyCompared = []
for fileName,text in origTexts.iteritems():
if openDocument(fileName):
successfullyCompared.append(test.compare(waitForObject(cppEditorStr).plainText,
text.replace(replacedSymbol, replacement),
"Verifying content of %s" %
simpleFileName(fileName)))
else:
successfullyCompared.append(False)
test.fail("Failed to open document %s" % simpleFileName(fileName))
if successfullyCompared.count(True) == len(origTexts):
test.passes("Successfully compared %d changed files" % len(origTexts))
else:
test.fail("Verified %d files - %d have been successfully changed and %d failed to "
"change correctly." % (len(origTexts), successfullyCompared.count(True),
successfullyCompared.count(False)))
def revertChanges(files):
for f in files:
simpleName = simpleFileName(f)
if openDocument(f):
try:
invokeMenuItem('File', 'Revert "%s" to Saved' % simpleName)
clickButton(waitForObject(":Revert to Saved.Proceed_QPushButton"))
test.log("Reverted changes inside %s" % simpleName)
except:
test.warning("File '%s' cannot be reverted." % simpleName,
"Maybe it has not been changed at all.")
else:
test.fail("Could not open %s for reverting changes" % simpleName)
|
<filename>reframe/frontend/executors/__init__.py<gh_stars>0
import abc
import sys
import reframe.core.debug as debug
import reframe.core.logging as logging
import reframe.core.runtime as runtime
from reframe.core.environments import EnvironmentSnapshot
from reframe.core.exceptions import (AbortTaskError, JobNotStartedError,
ReframeFatalError, TaskExit)
from reframe.frontend.printer import PrettyPrinter
from reframe.frontend.statistics import TestStats
from reframe.utility.sandbox import Sandbox
ABORT_REASONS = (KeyboardInterrupt, ReframeFatalError, AssertionError)
class RegressionTask:
"""A class representing a :class:`RegressionTest` through the regression
pipeline."""
def __init__(self, check, listeners=[]):
self._check = check
self._failed_stage = None
self._current_stage = None
self._exc_info = (None, None, None)
self._environ = None
self._listeners = list(listeners)
# Test case has finished, but has not been waited for yet
self.zombie = False
@property
def check(self):
return self._check
@property
def exc_info(self):
return self._exc_info
@property
def failed(self):
return self._failed_stage is not None
@property
def failed_stage(self):
return self._failed_stage
def _notify_listeners(self, callback_name):
for l in self._listeners:
callback = getattr(l, callback_name)
callback(self)
def _safe_call(self, fn, *args, **kwargs):
self._current_stage = fn.__name__
try:
with logging.logging_context(self._check) as logger:
logger.debug('entering stage: %s' % self._current_stage)
return fn(*args, **kwargs)
except ABORT_REASONS:
self.fail()
raise
except BaseException as e:
self.fail()
raise TaskExit from e
def setup(self, *args, **kwargs):
self._safe_call(self._check.setup, *args, **kwargs)
self._environ = EnvironmentSnapshot()
def compile(self):
self._safe_call(self._check.compile)
def compile_wait(self):
self._safe_call(self._check.compile_wait)
def run(self):
self._safe_call(self._check.run)
self._notify_listeners('on_task_run')
def wait(self):
self._safe_call(self._check.wait)
self.zombie = False
def poll(self):
finished = self._safe_call(self._check.poll)
if finished:
self.zombie = True
self._notify_listeners('on_task_exit')
return finished
def sanity(self):
self._safe_call(self._check.sanity)
def performance(self):
self._safe_call(self._check.performance)
def cleanup(self, *args, **kwargs):
self._safe_call(self._check.cleanup, *args, **kwargs)
self._notify_listeners('on_task_success')
def fail(self, exc_info=None):
self._failed_stage = self._current_stage
self._exc_info = exc_info or sys.exc_info()
self._notify_listeners('on_task_failure')
def resume(self):
self._environ.load()
def abort(self, cause=None):
logging.getlogger().debug('aborting: %s' % self._check.info())
exc = AbortTaskError()
exc.__cause__ = cause
try:
# FIXME: we should perhaps extend the RegressionTest interface
# for supporting job cancelling
if not self.zombie and self._check.job:
self._check.job.cancel()
except JobNotStartedError:
self.fail((type(exc), exc, None))
except BaseException:
self.fail()
else:
self.fail((type(exc), exc, None))
class TaskEventListener:
@abc.abstractmethod
def on_task_run(self, task):
"""Called whenever the run() method of a RegressionTask is called."""
@abc.abstractmethod
def on_task_exit(self, task):
"""Called whenever a RegressionTask finishes."""
@abc.abstractmethod
def on_task_failure(self, task):
"""Called when a regression test has failed."""
@abc.abstractmethod
def on_task_success(self, task):
"""Called when a regression test has succeeded."""
class Runner:
"""Responsible for executing a set of regression tests based on an
execution policy."""
def __init__(self, policy, printer=None, max_retries=0):
self._policy = policy
self._printer = printer or PrettyPrinter()
self._max_retries = max_retries
self._stats = TestStats()
self._policy.stats = self._stats
self._policy.printer = self._printer
self._sandbox = Sandbox()
self._environ_snapshot = EnvironmentSnapshot()
def __repr__(self):
return debug.repr(self)
@property
def policy(self):
return self._policy
@property
def stats(self):
return self._stats
def runall(self, checks):
try:
self._printer.separator('short double line',
'Running %d check(s)' % len(checks))
self._printer.timestamp('Started on', 'short double line')
self._printer.info('')
self._runall(checks)
if self._max_retries:
self._retry_failed(checks)
finally:
# Print the summary line
num_failures = self._stats.num_failures()
num_cases = self._stats.num_cases(run=0)
self._printer.status(
'FAILED' if num_failures else 'PASSED',
'Ran %d test case(s) from %d check(s) (%d failure(s))' %
(num_cases, len(checks), num_failures), just='center'
)
self._printer.timestamp('Finished on', 'short double line')
self._environ_snapshot.load()
def _partition_supported(self, check, partition):
if self._policy.skip_system_check:
return True
return check.supports_system(partition.name)
def _environ_supported(self, check, environ):
ret = True
if self._policy.only_environs:
ret = environ.name in self._policy.only_environs
if self._policy.skip_environ_check:
return ret
else:
return ret and check.supports_environ(environ.name)
def _retry_failed(self, checks):
rt = runtime.runtime()
while (self._stats.num_failures() and
rt.current_run < self._max_retries):
failed_checks = [
c for c in checks if c.name in
set([t.check.name for t in self._stats.tasks_failed()])
]
rt.next_run()
self._printer.separator(
'short double line',
'Retrying %d failed check(s) (retry %d/%d)' %
(len(failed_checks), rt.current_run, self._max_retries)
)
self._runall(failed_checks)
def _runall(self, checks):
system = runtime.runtime().system
self._policy.enter()
for c in checks:
self._policy.enter_check(c)
for p in system.partitions:
if not self._partition_supported(c, p):
self._printer.status('SKIP',
'skipping %s' % p.fullname,
just='center',
level=logging.VERBOSE)
continue
self._policy.enter_partition(c, p)
for e in p.environs:
if not self._environ_supported(c, e):
self._printer.status('SKIP',
'skipping %s for %s' %
(e.name, p.fullname),
just='center',
level=logging.VERBOSE)
continue
self._sandbox.system = p
self._sandbox.environ = e
self._sandbox.check = c
self._policy.enter_environ(self._sandbox.check,
self._sandbox.system,
self._sandbox.environ)
self._environ_snapshot.load()
self._policy.run_check(self._sandbox.check,
self._sandbox.system,
self._sandbox.environ)
self._policy.exit_environ(self._sandbox.check,
self._sandbox.system,
self._sandbox.environ)
self._policy.exit_partition(c, p)
self._policy.exit_check(c)
self._policy.exit()
class ExecutionPolicy:
"""Base abstract class for execution policies.
An execution policy implements the regression check pipeline."""
def __init__(self):
# Options controlling the check execution
self.skip_system_check = False
self.force_local = False
self.skip_environ_check = False
self.skip_sanity_check = False
self.skip_performance_check = False
self.keep_stage_files = False
self.only_environs = None
self.printer = None
self.strict_check = False
# Scheduler options
self.sched_flex_alloc_tasks = None
self.sched_account = None
self.sched_partition = None
self.sched_reservation = None
self.sched_nodelist = None
self.sched_exclude_nodelist = None
self.sched_options = []
# Task event listeners
self.task_listeners = []
self.stats = None
def __repr__(self):
return debug.repr(self)
def enter(self):
pass
def exit(self):
pass
def enter_check(self, check):
self.printer.separator(
'short single line',
'started processing %s (%s)' % (check.name, check.descr)
)
def exit_check(self, check):
self.printer.separator(
'short single line',
'finished processing %s (%s)\n' % (check.name, check.descr)
)
def enter_partition(self, c, p):
pass
def exit_partition(self, c, p):
pass
def enter_environ(self, c, p, e):
pass
def exit_environ(self, c, p, e):
pass
@abc.abstractmethod
def run_check(self, c, p, e):
"""Run a check with on a specific system partition with a specific environment.
Keyword arguments:
c -- the check to run.
p -- the system partition to run the check on.
e -- the environment to run the check with.
"""
if self.strict_check:
c.strict_check = True
if self.force_local:
c.local = True
@abc.abstractmethod
def getstats(self):
"""Return test case statistics of the run."""
|
import time
import onionGpio
from OmegaExpansion import oledExp
from requests import get
from dns import resolver
from datetime import datetime
oledExp.driverInit(1)
oledExp.setBrightness(0)
oledExp.setTextColumns()
gpio_rled = onionGpio.OnionGpio(17)
gpio_gled = onionGpio.OnionGpio(16)
gpio_bled = onionGpio.OnionGpio(15)
gpio_rled.setOutputDirection(0)
gpio_gled.setOutputDirection(0)
gpio_bled.setOutputDirection(0)
time.sleep(0.25) #Blink white 1 second to confirm LED function
flag_global_error = False
def color_blink(r,g,b,duration=0.25,sleep=0.25):
#LED GPIO 1 means LOW and 0 means HIGH
gpio_rled.setValue(1-r)
gpio_gled.setValue(1-g)
gpio_bled.setValue(1-b)
if duration > 0:
time.sleep(duration)
gpio_rled.setValue(1)
gpio_gled.setValue(1)
gpio_bled.setValue(1)
time.sleep(sleep)
def led_start():
global flag_global_error
flag_global_error = False
color_blink(0,0,1)
def led_error(blink=True):
global flag_global_error
flag_global_error = True
color_blink(1,0,0)
color_blink(1,0,0)
if blink == False:
color_blink(1,0,0,duration=-1)
def led_success(blink=True):
color_blink(0,1,0)
if blink == False:
color_blink(0,1,0,duration=-1)
def check_website(url, name, line):
oledExp.setCursor(line,0)
try:
get(url,timeout=5).text
oledExp.write(name + " OK")
led_success() #All Good
except:
oledExp.write(name + " BAD")
print ("HTTP Request Failed")
led_error()
while True:
led_start()
now = datetime.now()
current_time = now.strftime("%H:%M:%S")
oledExp.setCursor(0,0)
oledExp.write(current_time)
#DNS Try
oledExp.setCursor(1,0)
try:
res = resolver.Resolver()
res.nameservers = ['10.0.0.3']
answers = res.query('prafiles.in', lifetime=5)
oledExp.write("DNS Good")
except:
oledExp.write("DNS Bad")
led_error()
continue
#Internet HTTP Try
check_website('https://monitor.prafiles.in',"Personal",2)
check_website('https://brabo-platform.nl-dev.solulever.com/',"NL-Dev",3)
check_website('https://dev.solulever.com',"E2E",4)
check_website('https://ep.solulever.com',"EP",5)
check_website('https://lightscameraazadi.in',"LCA",6)
if flag_global_error:
led_error(blink=False)
else:
led_success(blink=False)
time.sleep(10)
oledExp.clear()
time.sleep(50)
#readings = get_interface_val('ppp0')
#oledExp.setCursor(2,0)
#oledExp.write("WAN Mbps Rx " + str (readings['rx_rate']) + " Tx " + str (readings['tx_rate']))
#readings = get_interface_val('eth0')
#oledExp.setCursor(3,0)
#oledExp.write("ETH Mbps Rx " + str (readings['rx_rate']) + " Tx " + str (readings['tx_rate']))
|
from http.cookiejar import CookieJar
import pandas as pd
import requests as req
from strategy.keep_increasing import check as ki_check
import utils
import time
import settings
import talib as tl
settings.init()
def check():
'''
通达信尾盘选股法
14:30开始进行选股,依次按以下步骤执行:
步骤1: 涨幅 3%-5%
步骤2: 按量比lb筛选,按量比降序排序,将量比 < 1删除
步骤3: 将换手率低于5%和高于10%的全部删除
步骤4: 将结果按流通市值进行排序。并将流通市值低于50亿高于200亿的删除
步骤5: 按成交量进行操作,将成交量持续放大的个股留下,像台阶一样的更好,将成交量一高一低不稳定的删除
步骤6: 看个股K线形态,短期看5日/10日/20日均线,搭配60日均线多头向上发散就是最好的形态。
如果k线形态显示在重要K线下方,一般说明近期该个股的走势是冲高回落,说明上方的套牢盘压力过高,处于成交密集区。这种继续进行剔除。
把一些K线上方没有任何压力的留下,这样冲高也会更加轻松。
步骤7: 完成以上步骤后进行精确选取。用分时图来判断强势股的特征,能够跑赢大盘的都属于逆势上涨的,强者恒强的市场,只有选取
强势股才能把收益最大化,最好能搭配当下热点题材板块。这样支撑就更加有力度。
把剩下的优质股叠加上证指数的分时图,个股的走势必须是全天在分时图价格上方,这表明个股的涨幅较好,
市场的气氛充足,在车上的人都能吃到一波盈利,次日的冲高会更加有力度。
步骤8: 剩下的个股都是非常强势的优选股,根据行情的机会,优势股有时候可能一只都筛选不出来。也很正常,要耐心的持之以恒。
把剩下的个股看在下午2:30分之后,股价创出当日新高,就是目标个股,当个股回落到均线附近时,不跌破就是最好的入场时机。
止盈止损的点位也要设置好,做短线的精髓就是快准狠,快进快出,有盈利后行情没有按照预期发展,可以直接出,技术不在于多,
而在于精。
'''
XUE_QIU_URL = 'https://xueqiu.com/service/screener/screen'
headers = {
'Sec-Fetch-Dest': 'empty',
'Sec-Fetch-Mode': 'cors',
'Sec-Fetch-Site': 'same-origin',
'Host': 'xueqiu.com',
'Referer': 'https://xueqiu.com/hq/screener',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:98.0) Gecko/20100101 Firefox/98.0',
'X-Requested-With': 'XMLHttpRequest'
}
cookies = {
'bid': 'ce0e1db716e5ae3f04441cebb326ee79_l1779t9i',
'device_id': '38d9067979c607dbdd6526aa90691a7a',
'remember': '1',
'xq_is_login': '1',
's': 'dc11uia7zl',
'u': '8338841301',
'xq_a_token': '<KEY>',
'xq_r_token': '4364be70192304440149919b82a4419b66843900',
'xqat': '<KEY>',
'xq_id_token': '<KEY>',
'acw_tc': '2760779d16485318957215495e4f03bd2a4adcd84464a4f5a1ce44eb8ec327'
}
params = dict(
category='CN', exchange='sh_sz', areacode=None, indcode=None, order_by='symbol', order='desc',
current=None, pct='3_5', volume_ratio='1_1000', tr='5_10', fmc='4500000000_15000000000',
size=1000, only_count=0, page=1,
_=int(time.time() * 1000))
j = req.get(XUE_QIU_URL, params=params, cookies=cookies, headers=headers)
from datetime import datetime
end_date = datetime.now().strftime('%Y%m%d')
if j.status_code == 200:
good_stocks = pd.DataFrame(j.json()['data']['list'])
good_stocks['fmc'] = good_stocks['fmc'] / 1.0e8
good_stocks['keep_increase'] = [keep_increase(i) for i in good_stocks['symbol']]
good_stocks.to_csv(f'result/短线策略-{end_date}.csv')
return good_stocks
def keep_increase(code):
if code.startswith('SZ30'):
return None
data = utils.read_data(code)
return ki_check('', data, threshold=5) and ki_check('', data, threshold=10) and ki_check('', data, threshold=20)
if __name__ == '__main__':
data = check()
print(data)
|
<reponame>BrendaH/django-machina
import os
import pytest
from django.conf import settings
from django.core.files import File
from django.urls import reverse
from faker import Faker
from machina.core.db.models import get_model
from machina.core.loading import get_class
from machina.test.factories import (
AttachmentFactory, ForumReadTrackFactory, PostFactory, create_forum, create_topic
)
from machina.test.testcases import BaseClientTestCase
faker = Faker()
Attachment = get_model('forum_attachments', 'Attachment')
ForumReadTrack = get_model('forum_tracking', 'ForumReadTrack')
Post = get_model('forum_conversation', 'Post')
Topic = get_model('forum_conversation', 'Topic')
TopicReadTrack = get_model('forum_tracking', 'TopicReadTrack')
PermissionHandler = get_class('forum_permission.handler', 'PermissionHandler')
assign_perm = get_class('forum_permission.shortcuts', 'assign_perm')
remove_perm = get_class('forum_permission.shortcuts', 'remove_perm')
class TestAttachmentView(BaseClientTestCase):
@pytest.yield_fixture(autouse=True)
def setup(self):
# Permission handler
self.perm_handler = PermissionHandler()
# Set up a top-level forum
self.top_level_forum = create_forum()
# Set up a topic and some posts
self.topic = create_topic(forum=self.top_level_forum, poster=self.user)
self.post = PostFactory.create(topic=self.topic, poster=self.user)
# Set up an attachment
f = open(settings.MEDIA_ROOT + '/attachment.jpg', 'rb')
self.attachment_file = File(f)
self.attachment = AttachmentFactory.create(
post=self.post, file=self.attachment_file)
# Mark the forum as read
ForumReadTrackFactory.create(forum=self.top_level_forum, user=self.user)
# Assign some permissions
assign_perm('can_read_forum', self.user, self.top_level_forum)
assign_perm('can_download_file', self.user, self.top_level_forum)
yield
# teardown
# --
self.attachment_file.close()
attachments = Attachment.objects.all()
for attachment in attachments:
try:
attachment.file.delete()
except: # noqa: E722
pass
def test_browsing_works(self):
# Setup
correct_url = reverse('forum_conversation:attachment', kwargs={'pk': self.attachment.id})
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 200
def test_cannot_be_browsed_by_users_who_cannot_download_forum_files(self):
# Setup
remove_perm('can_download_file', self.user, self.top_level_forum)
correct_url = reverse('forum_conversation:attachment', kwargs={'pk': self.attachment.id})
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 403
def test_embed_the_correct_http_headers_in_the_response(self):
# Setup
correct_url = reverse('forum_conversation:attachment', kwargs={'pk': self.attachment.id})
filename = os.path.basename(self.attachment.file.name)
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 200
assert response['Content-Type'] == 'image/jpeg'
assert response['Content-Disposition'] == 'attachment; filename={}'.format(filename)
def test_is_able_to_handle_unknown_file_content_types(self):
# Setup
f = open(settings.MEDIA_ROOT + '/attachment.kyz', 'rb')
attachment_file = File(f)
attachment = AttachmentFactory.create(
post=self.post, file=attachment_file)
correct_url = reverse('forum_conversation:attachment', kwargs={'pk': attachment.id})
# Run
response = self.client.get(correct_url, follow=True)
# Check
assert response.status_code == 200
assert response['Content-Type'] == 'text/plain'
attachment_file.close()
attachment.file.delete()
|
<reponame>mustafa-travisci/lto-api.python
import requests
import json
from lto.transactions import from_data as tx_from_data, SetScript
from lto.accounts import Account
from lto import crypto
class PublicNode(object):
def __init__(self, url, api_key=''):
self.url = url
self.api_key = api_key
@staticmethod
def __addr(account_or_address):
return account_or_address.address if isinstance(account_or_address, Account) else account_or_address
def wrapper(self, api, post_data='', host='', headers=None):
if headers is None:
headers = {}
if not host:
host = self.url
if self.api_key:
headers = {"X-API-Key": self.api_key}
if post_data:
r = requests.post('%s%s' % (host, api), data=post_data,
headers=crypto.merge_dicts(headers, {'content-type': 'application/json'}))
else:
r = requests.get('%s%s' % (host, api), headers=headers)
if r.status_code != 200:
method = 'POST' if post_data else 'GET'
try:
error = json.loads(r.text)
except:
error = r.text
raise Exception(
'{} {}{} responded with {} {}'.format(method, host, api, r.status_code, r.reason),
error
)
r.raise_for_status()
return r.json()
def broadcast(self, transaction):
data = json.dumps(transaction.to_json())
response = self.wrapper(api='/transactions/broadcast', post_data=data)
return tx_from_data(response)
def compile(self, script_source):
compiled_script = self.wrapper(api='/utils/script/compile', post_data=script_source)['script']
return SetScript(compiled_script)
def height(self):
return self.wrapper('/blocks/height')['height']
def last_block(self):
return self.wrapper('/blocks/last')
def block(self, n):
return self.wrapper('/blocks/at/%d' % n)
def tx(self, id):
response = self.wrapper('/transactions/info/%s' % id)
return tx_from_data(response)
def lease_list(self, address):
return self.wrapper(api='/leasing/active/{}'.format(self.__addr(address)))
def get_data(self, address):
return self.wrapper(api='/addresses/data/{}'.format(self.__addr(address)))
def get_data_by_key(self, address, key):
return self.wrapper(api='/addresses/data/{}/{}'.format(self.__addr(address), key))
def sponsorship_list(self, address):
return self.wrapper(api='/sponsorship/status/{}'.format(self.__addr(address)))
def association_list(self, address):
return self.wrapper(api='/associations/status/{}'.format(self.__addr(address)))
def node_status(self):
return self.wrapper(api='/node/status')
def balance(self, address):
try:
return self.wrapper('/addresses/balance/%s' % self.__addr(address))['balance']
except:
return -1
def balance_details(self, address):
return self.wrapper('/addresses/balance/details/%s' % self.__addr(address))
def validate_address(self, address):
return self.wrapper('/addresses/validate/{}'.format(address))['valid']
def data_of(self, address):
data = self.wrapper('/addresses/data/%s' % self.__addr(address))
dict = {}
for entry in data:
dict[entry['key']] = entry['value']
return dict
def transactions(self, address, limit=100, after=''):
return self.wrapper('/transactions/address/%s/limit/%d%s' % (
self.__addr(address), limit, "" if after == "" else "?after={}".format(after)))
def sign_transaction(self, transaction):
data = json.dumps(transaction.to_json())
return(self.wrapper(api='/transactions/sign', post_data=data))
|
#!/usr/bin/env python
"""
Postprocess the outputs of a PISA analysis.
"""
from __future__ import absolute_import
from argparse import ArgumentParser
from collections import OrderedDict
from os.path import basename
import sys
import numpy as np
from pisa.utils.fileio import mkdir
from pisa.utils.log import logging, set_verbosity
from pisa.utils.postprocess import Postprocessor
from pisa.utils.scripting import get_script, parse_command
__all__ = ['SCRIPT', 'parse_args', 'postproc_profile_scan',
'postproc_discrete_hypo', 'postproc_inj_param_scan',
'postproc_syst_tests', 'parse_hypo_testing_subcommand', 'main']
__author__ = '<NAME>, <NAME>'
__license__ = '''Copyright (c) 2014-2017, The IceCube Collaboration
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.'''
SCRIPT = basename(get_script())
def parse_args(command, description):
"""Parse command line args, where `command` defines what args are to be
displayed / accepted...
Parameters
----------
command : string
Command passed to the script
description : string
Description line(s) to print to terminal in help message (i.e. if -h
or --help is passed)
Returns
-------
init_args_d : dict
Command line arguments passed via the command line, in a dictionary
"""
assert command in ['profile_scan', 'hypo_testing', 'inj_param_scan',
'syst_tests']
parser = ArgumentParser(description=description)
if command == 'inj_param_scan':
parser.add_argument(
'-d', '--dir', required=True,
metavar='DIR', type=str, action='append',
help='''Directory containing output of hypo_testing.py.
Repeat this argument to plot multiple significance lines on
the same plot. Note that if you do then none of the fits or
the minimiser info will be plotted'''
)
parser.add_argument(
'--dir-label', type=str, action='append',
help="""A unique name from which to identify each the above
directories can be identified. Repeat this argument for as
many times as you have directories. If no labels are
specified here they will be constructed using the truth
information in the files. So either specify one for
every directory or none at all."""
)
if command in ['hypo_testing', 'syst_tests']:
parser.add_argument(
'-d', '--dir', required=True,
metavar='DIR', type=str,
help='''Directory containing output of hypo_testing.py.'''
)
if command == 'hypo_testing':
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
'--asimov', action='store_true',
help='''Analyze the Asimov trials in the specified
directories.'''
)
group.add_argument(
'--llr', action='store_true',
help='''Analyze the LLR trials in the specified
directories.'''
)
if command == 'profile_scan':
parser.add_argument(
'--infile', metavar='FILE', type=str, required=True,
help='''Output file of profile_scan.py to processs.'''
)
parser.add_argument(
'--best-fit-infile', metavar='FILE', type=str, default=None,
help='''Output file of profile_scan.py containing the best
fit to add to the plots, if available.'''
)
parser.add_argument(
'--projection-infile', metavar='FILE',
type=str, action='append', default=None,
help='''If you want to add projections to your plots e.g. 1D
projections to 2D plots you can specify them here. Repeat this
argument to specify multiple projections.'''
)
parser.add_argument(
'--other-contour', metavar='FILE',
type=str, action='append', default=None,
help='''If you want to add other contours to your plots e.g.
Other experiments then specify them here. This is expected to
be a json dictionary with the following keys: vars, contour,
label, color, linestyle and (optionally) the best_fit point.'''
)
parser.add_argument(
'--pseudo-experiments', metavar='DIR',
type=str, default=None,
help='''If you want to overlay pseudo experiment fits from
the hypo_testing.py script on to the contours to check
coverage, set the directory here. Note that this will overlay
all of the hX_hypo_to_hY_fid fit results on to the contour
so you can select the appropriate one after the script is run.'''
)
parser.add_argument(
'--detector', type=str, default='',
help='''Name of detector to put in histogram titles.'''
)
parser.add_argument(
'--selection', type=str, default='',
help='''Name of selection to put in histogram titles.'''
)
if command == 'hypo_testing':
parser.add_argument(
'--llr-plots', action='store_true', default=False,
help='''Flag to make the LLR plots. This will give the
actual analysis results.'''
)
parser.add_argument(
'--fit-information', action='store_true', default=False,
help='''Flag to make tex files containing the
fiducial fit params and metric.'''
)
parser.add_argument(
'--minim-information', action='store_true', default=False,
help='''Flag to make plots of the minimiser information i.e. status,
number of iterations, time taken etc.'''
)
parser.add_argument(
'--individual-posteriors', action='store_true',
default=False,
help='''Flag to plot individual posteriors.'''
)
parser.add_argument(
'--combined-posteriors', action='store_true', default=False,
help='''Flag to plot combined posteriors for each h0 and h1
combination.'''
)
parser.add_argument(
'--individual-overlaid-posteriors', action='store_true',
default=False,
help='''Flag to plot individual overlaid posteriors. Overlaid
here means that for a plot will be made with each of the h0
and h1 returned values on the same plot for each of the
fiducial h0 and h1 pseudos.'''
)
parser.add_argument(
'--combined-overlaid-posteriors', action='store_true',
default=False,
help='''Flag to plot combined overlaid posteriors.'''
)
parser.add_argument(
'--individual-scatter', action='store_true', default=False,
help='''Flag to plot individual 2D scatter plots of posteriors.'''
)
parser.add_argument(
'--combined-individual-scatter',
action='store_true', default=False,
help='''Flag to plot all 2D scatter plots of one systematic
with every other systematic on one plot for each h0 and h1
combination.'''
)
parser.add_argument(
'--combined-scatter', action='store_true', default=False,
help='''Flag to plot all 2D scatter plots on one plot for each
h0 and h1 combination.'''
)
parser.add_argument(
'--correlation-matrix', action='store_true', default=False,
help='''Flag to plot the correlation matrices for each h0 and h1
combination.'''
)
parser.add_argument(
'--threshold', type=float, default=0.0,
help='''Sets the threshold for which to remove 'outlier' trials.
Ideally this will not be needed at all, but it is there in case
of e.g. failed minimiser. The higher this value, the more outliers
will be included. Do not set this parameter if you want all trials
to be included.'''
)
parser.add_argument(
'--extra-points', type=str, action='append',
help='''Extra lines to be added to the LLR plots. This is useful,
for example, when you wish to add specific LLR fit values to the
plot for comparison. These should be supplied as a single value
e.g. x1 or as a path to a file with the value provided in one
column that can be intepreted by numpy genfromtxt. Repeat this
argument in conjunction with the extra points label below to
specify multiple (and uniquely identifiable) sets of extra
points.'''
)
parser.add_argument(
'--extra-points-labels', type=str, action='append',
help='''The label(s) for the extra points above.'''
)
if command == 'inj_param_scan':
parser.add_argument(
'--inj-param-units', type=str, default=None,
help="""If you know the units that you injected the parameter
with and you expect that the script will not be able to find
this by looking at the fit parameters in the config file
(i.e. theta13 may be defined in degrees in the config file
but you injected it in radians) then use this argument to
explicitly set it for use in the plot labels."""
)
parser.add_argument(
'--significances', action='store_true', default=False,
help='''Flag to make the Asimov significance plots. This will
give the actual results of the study.'''
)
parser.add_argument(
'--minim-information', action='store_true', default=False,
help='''Flag to make plots of the minimiser information i.e.
status, number of iterations, time taken etc.'''
)
parser.add_argument(
'--individual-fits', action='store_true', default=False,
help='''Flag to make plots of all of the best fit parameters
separated by the fitted parameter.'''
)
parser.add_argument(
'--combined-fits', action='store_true', default=False,
help='''Flag to make plots of all of the best fit parameters joined
together.'''
)
parser.add_argument(
'--extra-points', type=str, action='append', metavar='LIST',
help='''Extra points to be added to the plots. This is useful,
for example, when you wish to add LLR results to the plot.
These should be supplied as a list of tuples e.g.
"[(x1,y1),(x2,y2)]" or "[(x1,y1,y1err),(x2,y2,y2err)]" or
"[(x1,y1,y1uperr,y1downerr),(x2,y2,y2uperr,y2downerr)]" or
as a path to a file with the values provided in columns that
can be intepreted by numpy genfromtxt. Repeat this argument in
conjunction with the extra points label below to specify
multiple (and uniquely identifiable) sets of extra points.'''
)
parser.add_argument(
'--extra-points-labels', type=str, action='append',
help='''The label(s) for the extra points above.'''
)
parser.add_argument(
'--outdir', metavar='DIR', type=str, default=None,
help='''Store all output plots to this directory. This will make
further subdirectories, if needed, to organise the output plots.'''
)
parser.add_argument(
'--pdf', action='store_true',
help='''Produce pdf plot(s).'''
)
parser.add_argument(
'--png', action='store_true',
help='''Produce png plot(s).'''
)
parser.add_argument(
'-v', action='count', default=None,
help='''set verbosity level'''
)
if command == 'profile_scan':
args = parser.parse_args(sys.argv[2:])
else: # inj_param_scan, syst_tests, and hypo_testing
args = parser.parse_args(sys.argv[3:])
init_args_d = vars(args)
set_verbosity(init_args_d.pop('v'))
init_args_d['formats'] = []
if args.png:
init_args_d['formats'].append('png')
if args.pdf:
init_args_d['formats'].append('pdf')
if init_args_d['formats']:
logging.info('Files will be saved in format(s) %s',
init_args_d['formats'])
return init_args_d
def postproc_profile_scan(return_outputs=False):
"""Process the output files of profile_scan"""
init_args_d = parse_args(
description=postproc_profile_scan.__doc__,
command='profile_scan'
)
if init_args_d['pseudo_experiments'] is not None:
fluctuate_fid = True
fluctuate_data = False
else:
fluctuate_fid = None
fluctuate_data = None
mkdir(init_args_d['outdir'])
postprocessor = Postprocessor(
analysis_type='profile_scan',
detector=init_args_d['detector'],
selection=init_args_d['selection'],
outdir=init_args_d['outdir'],
formats=init_args_d['formats'],
scan_file=init_args_d['infile'],
best_fit_file=init_args_d['best_fit_infile'],
projection_files=init_args_d['projection_infile'],
other_contours=init_args_d['other_contour'],
pseudo_experiments=init_args_d['pseudo_experiments'],
fluctuate_fid=fluctuate_fid,
fluctuate_data=fluctuate_data
)
# 1D profile scans
if len(postprocessor.all_bin_cens) == 1:
postprocessor.plot_1d_scans()
# 2D profile scans
elif len(postprocessor.all_bin_cens) == 2:
postprocessor.plot_2d_scans()
if (postprocessor.all_bin_names[0] == 'theta23'
and postprocessor.all_bin_names[1] == 'deltam31'):
postprocessor.add_deltam32_sin2theta23()
postprocessor.plot_2d_scans(
xlabel='sin2theta23',
xunits='dimensionless',
ylabel='deltam32'
)
else:
raise NotImplementedError(
'Postprocessing of profile scans in anything other than 1D or '
' 2D not implemented in this script.'
)
if return_outputs:
return postprocessor
def postproc_discrete_hypo(return_outputs=False):
"""Hypothesis testing: How do two hypotheses compare for
describing MC or data?
This computes significances, etc. from the logfiles recorded by the
`hypo_testing.py` script, for either Asimov or llr analysis. Plots and
tables are produced in the case of llr analysis."""
# TODO:
#
# 1) Some of the "combined" plots currently make it impossible to read the
# axis labels. Come up with a better way of doing this. Could involve
# making legends and just labelling the axes alphabetically.
init_args_d = parse_args(
description=postproc_discrete_hypo.__doc__,
command='hypo_testing'
)
if init_args_d['asimov']:
# TODO - Something like the necessary function is there with
# calculate_deltachi2_significances but exactly how to output
# this should probably be thought about
raise NotImplementedError(
'Postprocessing of Asimov analysis not implemented yet.'
)
# Otherwise: llr analysis
if init_args_d['outdir'] is None:
raise ValueError('Must specify --outdir when processing llr results.')
postprocessor = Postprocessor(
analysis_type='hypo_testing',
test_type='analysis',
logdir=init_args_d['dir'],
detector=init_args_d['detector'],
selection=init_args_d['selection'],
outdir=init_args_d['outdir'],
formats=init_args_d['formats'],
fluctuate_fid=True,
fluctuate_data=False,
extra_points=init_args_d['extra_points'],
extra_points_labels=init_args_d['extra_points_labels']
)
trial_nums = postprocessor.data_sets[
postprocessor.labels.dict['data']
]['h0_fit_to_h1_fid'].keys()
if init_args_d['threshold'] != 0.0:
logging.info('Outlying trials will be removed with a '
'threshold of %.2f', init_args_d['threshold'])
postprocessor.purge_outlying_trials(
trial_nums=np.array(trial_nums),
thresh=init_args_d['threshold']
)
else:
logging.info('All trials will be included in the analysis.')
if init_args_d['llr_plots']:
if len(trial_nums) != 1:
postprocessor.make_llr_plots()
else:
raise ValueError(
"llr plots were requested but only 1 trial "
"was found in the logdir."
)
if init_args_d['fit_information']:
postprocessor.make_fiducial_fit_files()
if init_args_d['minim_information']:
postprocessor.make_fit_information_plots()
if init_args_d['individual_posteriors']:
postprocessor.make_posterior_plots()
if init_args_d['combined_posteriors']:
postprocessor.make_posterior_plots(combined=True)
if init_args_d['individual_overlaid_posteriors']:
postprocessor.make_overlaid_posterior_plots()
if init_args_d['combined_overlaid_posteriors']:
postprocessor.make_overlaid_posterior_plots(combined=True)
if init_args_d['individual_scatter']:
postprocessor.make_scatter_plots()
if init_args_d['combined_individual_scatter']:
postprocessor.make_scatter_plots(combined=True, singlesyst=True)
if init_args_d['combined_scatter']:
postprocessor.make_scatter_plots(combined=True)
if init_args_d['correlation_matrix']:
postprocessor.make_scatter_plots(matrix=True)
def postproc_inj_param_scan(return_outputs=False):
"""Hypothesis testing: How do two hypotheses compare for
describing MC or data?
This computes significances, etc. from the logfiles recorded by the
`hypo_testing.py` script for a scan over some injected parameter.
The main result will be an Asimov sensitivity curve as a function of
this inejcted parameter."""
init_args_d = parse_args(
description=postproc_inj_param_scan.__doc__,
command='inj_param_scan'
)
postprocessor = Postprocessor(
analysis_type='hypo_testing',
test_type='injparamscan',
logdir=init_args_d['dir'],
detector=init_args_d['detector'],
selection=init_args_d['selection'],
outdir=init_args_d['outdir'],
formats=init_args_d['formats'],
fluctuate_fid=False,
fluctuate_data=False,
extra_points=init_args_d['extra_points'],
extra_points_labels=init_args_d['extra_points_labels'],
inj_param_units=init_args_d['inj_param_units']
)
if len(postprocessor.data_sets) == 1:
if postprocessor.wh_to_th[0]['params'].keys() == ['bestfit', 'altit']:
if init_args_d['individual_fits'] or init_args_d['combined_fits']:
raise ValueError(
"You have requested to make plots of the best fit "
"points of the systematic parameters but this is "
"not possible snce there are none included in "
"this analysis."
)
if init_args_d['significances']:
postprocessor.make_asimov_significance_plots()
if init_args_d['minim_information']:
postprocessor.make_fit_information_plots()
if init_args_d['individual_fits']:
postprocessor.make_asimov_fit_parameter_plots()
if init_args_d['combined_fits']:
postprocessor.make_asimov_fit_parameter_plots(combined=True)
else:
if (init_args_d['individual_fits'] or init_args_d['combned_fits']
or init_args_d['minim_information']):
raise ValueError(
"You have specified multiple input directories but have "
"also requested to make plots of the fit parameters or the "
"minimiser information. Multiple input directories are "
"only compatible with plotting the significances overlaid."
)
def postproc_syst_tests(return_outputs=False):
"""Hypothesis testing: How do two hypotheses compare for
describing MC or data?
This script/module computes significances, etc. from the logfiles recorded
by the `systematics_tests.py` script. That is, looks at how the fits
change for three different N-1 tests:
1) Where one of the systematics is fixed to the baseline value.
2) Where one of the systematics is injected *off* baseline but fixed
*on* baseline in the hypotheses.
3) Same as 2, but the systematic is not fixed and so the minimiser is
allowed to try correct for the incorrect hypothesis."""
init_args_d = parse_args(
description=postproc_syst_tests.__doc__,
command='syst_tests'
)
postprocessor = Postprocessor(
analysis_type='hypo_testing',
test_type='systtests',
logdir=init_args_d['dir'],
detector=init_args_d['detector'],
selection=init_args_d['selection'],
outdir=init_args_d['outdir'],
formats=init_args_d['formats'],
fluctuate_fid=False,
fluctuate_data=False
)
postprocessor.make_systtest_plots()
HYPO_TESTING_COMMANDS = OrderedDict([
('discrete_hypo', postproc_discrete_hypo),
('inj_param_scan', postproc_inj_param_scan),
('syst_tests', postproc_syst_tests)
])
HYPO_TESTING_DESCR = (
'Process the outputs produced by pisa-analysis script'
)
HYPO_TESTING_SUBCOMMAND_STR = '\n'.join([
' {0:16s} Processes outputs of pisa-analysis {0} ...'.format(cmd)
for cmd in HYPO_TESTING_COMMANDS.keys()
])
HYPO_TESTING_USAGE = '''{0} hypo_testing [<subcommand>] [<args>]
The subcommands that can be issued are:
{1}
Run
{0} hypo_testing <subcommand> -h
to see the valid arguments for each of the above subcommands
'''.format(SCRIPT, HYPO_TESTING_SUBCOMMAND_STR)
def parse_hypo_testing_subcommand(return_outputs=False):
"""Parse command line args for hypo_testing subcommand"""
return parse_command(command_depth=1,
commands=HYPO_TESTING_COMMANDS,
description=HYPO_TESTING_DESCR,
usage=HYPO_TESTING_USAGE,
return_outputs=return_outputs)
MAIN_CMD_SPEC = dict(
commands=OrderedDict([
('hypo_testing', parse_hypo_testing_subcommand),
('profile_scan', postproc_profile_scan)
]),
description='Postprocess outputs generated by a PISA analysis.',
usage='''{0} <command> [<subcommand>] [<args>]
The commands that can be issued are:
hypo_testing Processes output from one of the hypo_testing commands.
profile_scan Processes output from profile_scan.
Run
{0} <command> -h
to see the possible subcommands/arguments to each command.'''.format(SCRIPT)
)
def main(return_outputs=False):
"""main"""
return parse_command(command_depth=0, return_outputs=return_outputs,
**MAIN_CMD_SPEC)
if __name__ == '__main__':
outputs = main(return_outputs=True) # pylint: disable=invalid-name
|
<gh_stars>10-100
import warnings, os
# We don't want warnings in dependencies to show up in bioscrape's tests.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import numpy as np
import pylab as plt
import random
import pytest
import test_utils
from bioscrape.simulator import *
from bioscrape.types import *
# Seed RNG value. All tests use this value.
seed = 54173
# Set true to get more diagnostic prints
debug = False
# Parameter ranges to randomly choose parameters (on a log scale)
param_min = -4
param_max = 4
# parameter names required for each propensity (general will be treated by
# itself)
propensity_param_requirements = {
'massaction':['k'],
'hillpositive':['k', 'K', 'n'],
'hillnegative':['k', 'K', 'n'],
'proportionalhillpositive':["k", "K", "n"],
'proportionalhillnegative':["k", "K", "n"]
}
# species (passed in as parameters) requires for each propensity (general
# will be treated by itself)
propensity_species_requirements = {
'hillpositive':['s1'],
'hillnegative':['s1'],
'proportionalhillpositive':['s1', 'd'],
'proportionalhillnegative':['s1', 'd'],
"massaction":[]
}
all_prop_types = ['hillpositive',
'proportionalhillpositive',
'hillnegative',
'proportionalhillnegative',
'massaction', 'general']
TEST_NAME = "random_propensities"
def random_prop_model(prop_type):
'''
Returns a randomish model with a specified propensity type. Set to always
return the same model, for any particular propensity type.
WARNING: To produce consistent Models, this function resets the random seeds
used during Model construction. This may have unexpected effects on random
number generation outside this function as a side-effect.
'''
test_utils.set_seed(seed)
#Will always consider the reaction: A+B-->C
inputs = ["A", "B"]
outputs = ["C"]
all_species = inputs + outputs
x0 = {"A":25, "B": 25, "C":0}
if debug:
print('simulating propensity type ', prop_type)
param_dict = {}
# Here we will use a random(ish) rational function
if prop_type == 'general':
rate_str = "(1+"
numerator_terms = np.random.randint(0, 5)
denominator_terms = np.random.randint(0, 5)
for i in range(numerator_terms):
coef = str(round(np.exp(np.random.uniform(low = param_min,
high = param_max)), 3))
exp = str(round(np.random.uniform(low = 0,high = param_max), 3))
species = all_species[np.random.randint(len(all_species))]
rate_str += coef + "*" + species + "^" + exp + "+"
rate_str = rate_str[:-1] + ")"
rate_str += "/(1+"
for i in range(denominator_terms):
coef =str(round(np.exp(np.random.uniform(low = param_min,
high = param_max)), 3))
exp = str(round(np.random.uniform(low = 0,high = param_max), 3))
species = all_species[np.random.randint(len(all_species))]
rate_str += coef + "*" + species + "^" + exp + "+"
rate_str = rate_str[:-1] + ")"
param_dict['rate'] = rate_str
else:
required_params = propensity_param_requirements[prop_type]
required_species = propensity_species_requirements[prop_type]
param_dict = {}
for p in required_params:
param_dict[p] = \
round(np.exp(np.random.uniform(low = param_min,
high = param_max)), 3)
for i in range(len(required_species)):
k = required_species[i]
param_dict[k] = inputs[i]
if debug:
print('\t params =', param_dict)
rxn = (inputs, outputs, prop_type, param_dict)
M = Model(reactions = [rxn], initial_condition_dict = x0)
M.set_species(x0)
return M
# def test_debug():
# import bioscrape.sbmlutil
# bioscrape.sbmlutil.import_sbml("frozen_sbml_outputs/random_propensities/hillnegative.sbml.tmp")
@pytest.mark.parametrize('prop_type', all_prop_types)
def test_random_propensity_outputs(prop_type):
test_results = dict()
model = random_prop_model(prop_type)
timepoints = np.arange(0, 50, .01)
results_d = py_simulate_model(timepoints, Model = model, stochastic = False, return_dataframe = False).py_get_result()
results_s = py_simulate_model(timepoints, Model = model, stochastic = True, return_dataframe = False).py_get_result()
test_results[prop_type + "_deterministic"] = results_d
test_results[prop_type + "_stochastic"] = results_s
test_utils.check_sim_results(TEST_NAME, test_results)
@pytest.mark.parametrize('prop_type', all_prop_types)
def test_random_propensity_sbml(prop_type):
model_dict = dict()
model_dict[prop_type] = random_prop_model(prop_type)
test_utils.check_sbml_IO(TEST_NAME, model_dict)
def debug_random_prop_tests():
'''
This is not a test.
Plot frozen results for debugging purposes.
'''
propensity_types = ['hillpositive', 'proportionalhillpositive',
'hillnegative', 'proportionalhillnegative',
'massaction', 'general']
colors = {
'massaction':'blue',
'hillpositive': 'cyan',
'hillnegative': 'red',
'proportionalhillpositive': 'orange',
'proportionalhillnegative': 'purple',
'general': 'black'
}
test_loc = os.path.join(test_utils.frozen_results_loc, TEST_NAME)
plt.figure()
for prop_type in propensity_types:
results_d = np.load(os.path.join(test_loc,
prop_type + "_deterministic.npy"))
plt.plot(results_d[:,0], results_d[:,3],
label = "deterministic "+str(prop_type),
# +"params = "+str(param_dict),
color = colors[prop_type])
results_s = np.load(os.path.join(test_loc,
prop_type + "_stochastic.npy"))
plt.plot(results_s[:,0], results_s[:,3], ":",
label = "stochastic "+str(prop_type),
# +"params = "+str(param_dict),
color = colors[prop_type])
# plt.legend()
plt.xlabel("time")
plt.ylabel("C")
plt.legend()
plt.show()
|
#!/usr/bin/python
import utmp
from UTMPCONST import *
import time, pwd, grp, os, string, sys, socket, popen2
from stat import *
from string import lower
def getrealname(gec):
# get real name from gecos fiels
return string.split(gec,",",1)[0]
def formatidle(t):
if t<30:
return ""
if t<80:
r = "%ss" % int(t)
return r
if t<60*80:
return "%sm" % int(t/60)
if t<60*60*28:
return "%sh" % int(t/60/60)
if t<60*60*24*20:
return "%sd" % int(t/60/60/24)
return "DEAD"
def userlist(u, now, user=""):
u.setutent()
tnow = time.mktime(now)
header = 0
output = [] # list of output lines, without header
while 1:
b = u.getutent_dict()
if not b:
break
if b['ut_type'] == USER_PROCESS:
username = b['ut_user']
if user and b['ut_user']<>user:
continue
try:
pwnam = pwd.getpwnam(username)
except KeyError:
pwnam = '?'
tty = b['ut_line']
t = time.localtime(b['ut_tv'][0])
then = time.mktime(t)
if tnow<then: # login in the future?
login = "FUTURE"
elif t[7] == now[7] and t[0] == now[0]: # today
login = time.strftime("%H:%M", t)
elif tnow-then<60*60*24*7: # this week
login = time.strftime("%a", t)
elif tnow-then<60*60*24*365.: # this year
login = time.strftime("%d-%b", t)
else: # way down in the past
login = time.strftime("%Y", t)
location = b['ut_host']
tty = b['ut_line']
try:
s = os.stat("/dev/"+tty)
p = s[ST_MODE] & 060
if tnow<s[ST_ATIME]:
idle = 0
else:
idle = tnow-s[ST_ATIME]
idle = formatidle(idle)
if p:
p = ' '
else:
p = '*'
except:
p = '?'
if p == '?':
continue
#length sanitation
username = username[:12]
#realname = realname[:22]
login = login[:6]
location = location[:30]
if not header:
#print 60*"-"
print "%-12s%-7s%-4s%-2s%-8s%-30s" % \
("USERNAME","Login","Idle","", "TTY","Location")
#print 60*"-"
header = 1
output.append( "%-12s%-7s%4s%2s%-8s%-30s" %
(username,login,idle,p,tty,location) )
output.sort()
for i in output:
print i
return output
def lastlogin(u, user):
lastlogin = 0, ""
u.setutent()
while 1:
b = u.getutent_dict()
if not b:
break
if b['ut_type'] in (USER_PROCESS, DEAD_PROCESS) and \
b['ut_user'] == user and \
b['ut_tv'][0]>lastlogin[0]:
lastlogin = b['ut_tv'][0], b['ut_host']
u = utmp.UtmpRecord(WTMP_FILE)
while 1:
b = u.getutent_dict()
if not b:
break
if b['ut_type'] in (USER_PROCESS, DEAD_PROCESS) and \
b['ut_user'] == user and \
b['ut_tv'][0]>lastlogin[0]:
lastlogin = b['ut_tv'][0], b['ut_host']
u.endutent()
return lastlogin
def userplan(homedir):
try:
f = open(homedir+"/.plan", "r")
print "Plan:"
while 1:
l = f.readline()
if not l:
break
print string.rstrip(l)
except:
pass
def oneuser(u, user):
pwent = pwd.getpwnam(user)
rn = getrealname(pwent[4])
print "Login name: %-30s In real life: %s" % (user, rn)
print " Directory: %-30s Shell: %s" % (pwent[5], pwent[6])
print " %-30s Group: [%s]" % ("", grp.getgrgid(pwent[3])[0])
l, h = lastlogin(u, user)
if not l:
print "Never logged in."
else:
r = "Last login %-30s " % time.strftime("%A, %d-%b-%Y %H:%M", time.localtime(l))
if h:
r = r+'from: '+h
print r
print
userplan(pwent[5])
print
if len(sys.argv) == 2 and "@" in sys.argv[1]: # remote
user, host = string.split(sys.argv[1], "@", 1)
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
FINGER_PORT = 79
s.connect( (host, FINGER_PORT) )
s.send(user + '\r\n')
while 1:
buf = s.recv(1024)
if not buf: break
sys.stdout.write(buf)
sys.stdout.flush()
except socket.error, why:
print "ERROR:", why
sys.exit(0)
now = time.localtime(time.time())
a = utmp.UtmpRecord()
if len(sys.argv) == 1: # list of all local users
r = userlist(a, now)
if not r:
print "No such processes."
else:
#first find out if user exists
user = sys.argv[1]
try:
pwd.getpwnam(user)
r = userlist(a, now, user)
if not r:
print '"%s" isn\'t logged in.' % user
print
oneuser(a, user)
except KeyError:
print '"%s" does not match any user of this system.' % user
a.endutent()
|
<reponame>gsneha26/cactus
#!/usr/bin/env python3
"""
Functions to launch and manage Redis servers.
"""
import os
import random
import signal
import sys
import traceback
from multiprocessing import Process, Queue
from time import sleep
from toil.lib.bioio import logger
from cactus.shared.common import cactus_call
from cactus.pipeline.dbServerCommon import getHostName, findOccupiedPorts
MAX_REDIS_PORT = 65535
# The name of the snapshot that Redis outputs.
REDIS_SNAPSHOT_NAME = "dump.rdb"
class RedisServer:
def __init__(self, dbElem, fileStore=None, existingSnapshotID=None, snapshotExportID=None):
self.dbElem = dbElem
self.logPath = None
self.fileStore = fileStore
self.existingSnapshotID = existingSnapshotID
self.snapshotExportID = snapshotExportID
self.databaseDir = None
def runServer(self):
"""
Run a redis-server. This function launches a separate python process that manages the server.
Writing to the special key "TERMINATE" signals this thread to safely shut
down the DB and save the results. After finishing, the data will
eventually be written to snapshotFile.
Returns a tuple containing an updated version of the database config dbElem and the
path to the log file.
"""
self.databaseDir = self.fileStore.getLocalTempDir()
# log file can be saved in a subdirectory of where the snapshot is being saved
self.logPath = os.path.join(self.databaseDir, "redis.log")
open(self.logPath, 'a').close()
self.dbElem.setDbHost(getHostName())
# Find a suitable port to run on.
try:
occupiedPorts = findOccupiedPorts()
unoccupiedPorts = set(range(1025, MAX_REDIS_PORT)) - occupiedPorts
port = random.choice(list(unoccupiedPorts))
except:
logger.warning("Can't find which ports are occupied--likely netstat is not installed."
" Choosing a random port to start the DB on, good luck!")
port = random.randint(1025, MAX_REDIS_PORT)
self.dbElem.setDbPort(port)
try:
cactus_call(shell=False, parameters=['redis-server','--version'])
except:
raise RuntimeError("redis-server is not installed")
process = RedisServerProcess(self)
process.daemon = True
process.start()
if not self.blockUntilServerIsRunning():
try:
with open(self.logPath) as f:
log = f.read()
except:
log = ''
raise RuntimeError("Unable to launch redis-server in time. Log: %s" % log)
return process, self.dbElem, self.logPath
def blockUntilServerIsRunning(self, createTimeout=1800):
"""Check status until it's successful, an error is found, or we timeout.
Returns True if the redis-server is now running, False if something went wrong."""
success = False
for i in range(createTimeout):
if self.isServerFailed():
logger.critical('Error starting Redis server.')
success = False
break
if self.isServerRunning():
logger.info('Redis server running.')
success = True
break
sleep(1)
return success
def blockUntilServerIsFinished(self, timeout=1800, timeStep=10):
"""Wait for the redis-server log to indicate that it shut down properly.
Returns True if the server shut down, False if the timeout expired."""
for i in range(0, timeout, timeStep):
with open(self.logPath) as f:
log = f.read()
if 'ready to exit' in log:
return True
sleep(timeStep)
raise RuntimeError("Timeout reached while waiting for redis server.")
def isServerRunning(self):
"""Check if the server started running."""
success = False
with open(self.logPath) as f:
for line in f:
if line.lower().find("accept connections") >= 0:
success = True
return success
def isServerFailed(self):
"""Does the server log contain an error?"""
isFailed = False
with open(self.logPath) as f:
for line in f:
if line.lower().find("error") >= 0:
isFailed = True
break
return isFailed
def getTuningOptions(self):
"""Get the appropriate redis-server tuning parameters (bucket size, etc.)"""
# these are some hardcoded defaults. should think about moving to config
# TODO: check if every necessary tuning option is added (maybe to be merged with getServerOptions())
tuningOptions = "--maxmemory 1000Gb"
# override default redis-server settings if they are present in the
# experiment xml file.
if self.dbElem.getDbTuningOptions() is not None:
tuningOptions = self.dbElem.getDbTuningOptions()
if self.dbElem.getDbCreateTuningOptions() is not None:
tuningOptions = self.dbElem.getDbCreateTuningOptions()
return tuningOptions
def getServerOptions(self):
# these are some hardcoded defaults. should think about moving to config
# TODO: check if every necessary option is added
serverOptions = "--timeout 0 --databases 1 --protected-mode no --maxclients 200"
if self.dbElem.getDbServerOptions() is not None:
serverOptions = self.dbElem.getDbServerOptions()
return serverOptions
def getServerCommand(self, snapshotDir):
"""Get a redis-server command line with the proper options (in popen-type list format)."""
serverOptions = self.getServerOptions()
tuning = self.getTuningOptions()
cmd = ["redis-server", "--port", str(self.dbElem.getDbPort())]
cmd += serverOptions.split()
# Configure background snapshots, but set the interval between
# snapshots to ~ 10 days so it'll never trigger. We are only
# interested in the snapshot that the DB creates on termination.
cmd += ["--save", "", "--save", "1000000", "1000000"]
cmd += ["--dir", snapshotDir, "--dbfilename", REDIS_SNAPSHOT_NAME]
cmd += ["--logfile", 'redis.log']
cmd += tuning.split()
return cmd
def getRemoteParams(self):
"""Get parameters to supply to redis-cli to connect to the right DB."""
host = self.dbElem.getDbHost() or 'localhost'
return ['-p', str(self.dbElem.getDbPort()), '-h', host]
def stopServer(self):
"""Attempt to send the terminate signal to a redis-server."""
cactus_call(parameters=['redis-cli'] + self.getRemoteParams() + ['set', 'TERMINATE', '1'])
class RedisServerProcess(Process):
"""Independent process that babysits the redis-server process.
Waits for the TERMINATE flag to be set, then kills the DB and
copies the final snapshot to snapshotExportID.
"""
exceptionMsg = Queue()
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
super(RedisServerProcess, self).__init__()
def run(self):
"""Run the tryRun method, signaling the main thread if an exception occurs."""
try:
self.tryRun(*self.args, **self.kwargs)
except BaseException:
self.exceptionMsg.put("".join(traceback.format_exception(*sys.exc_info())))
raise
def tryRun(self, redisServer):
snapshotDir = redisServer.databaseDir
snapshotPath = os.path.join(snapshotDir, REDIS_SNAPSHOT_NAME)
if redisServer.existingSnapshotID is not None:
# Extract the existing snapshot to the snapshot
# directory so it will be automatically loaded
redisServer.fileStore.readGlobalFile(redisServer.existingSnapshotID, userPath=snapshotPath)
process = cactus_call(server=True, shell=False,
parameters=redisServer.getServerCommand(snapshotDir),
port=redisServer.dbElem.getDbPort())
redisServer.blockUntilServerIsRunning()
if redisServer.existingSnapshotID is not None:
# Clear the termination flag from the snapshot
cactus_call(parameters=["redis-cli"] + redisServer.getRemoteParams() + ["del", "TERMINATE"])
while True:
# Check for the termination signal
terminateFlag = cactus_call(parameters=["redis-cli"] + redisServer.getRemoteParams() + ["get", "TERMINATE"],
swallowStdErr=True, check_output=True)
if terminateFlag.strip() != '1':
# No terminate signal sent yet
pass
else:
# Terminate signal received
break
# Check that the DB is still alive
if process.poll() is not None or redisServer.isServerFailed():
with open(redisServer.logPath) as f:
raise RuntimeError("redis server failed. Log: %s" % f.read())
sleep(60)
process.send_signal(signal.SIGINT)
process.wait()
redisServer.blockUntilServerIsFinished()
if redisServer.snapshotExportID is not None:
if not os.path.exists(snapshotPath):
with open(redisServer.logPath) as f:
raise RuntimeError("redis-server did not leave a snapshot on termination,"
" but a snapshot was requested. Log: %s" % f.read())
# Export the snapshot file to the file store
redisServer.fileStore.jobStore.updateFile(redisServer.snapshotExportID, snapshotPath)
|
<gh_stars>1-10
# import copy
from .Data import Data, DataDict
from .Node import Node, Operator
from .Layer import Layer
import gc
from pathlib import Path
from time import gmtime, strftime
from typing import List, Tuple, Dict
class Pipeline(Node):
"""Pipeline works with DataLayer and Layer"""
def __init__(self, *nodes: List[Node], **kwargs: dict):
super().__init__(**kwargs)
_nodes_ = []
for node in nodes:
if isinstance(node, (Node, Operator)):
_nodes_.append(node)
else:
raise Exception(f'Unknown node type = {type(node)}')
# if isinstance(node, Pipeline):
# _nodes_.extend(node.nodes)
self.nodes = _nodes_
self.layers = None
# Todo: решить проблему с шейпами, хорошо бы их генерировать автоматом
self.shapes = kwargs['shapes']
assert len(self.shapes) == len(self.nodes), 'Data and nodes shapes do not match.'
# self.current_fit = 0
# self.current_predict = 0
def _compile_(self) -> None:
layers = []
for node, num in zip(self.nodes, self.shapes):
layer = Layer(*[node.copy for _ in range(num)])
layers.append(layer)
self.layers = layers
return None
def save(self, prefix: Path = None) -> None:
if self.layers is None:
raise Exception('Fit your model before.')
suffix = strftime("%y_%m_%d_%H_%M_%S", gmtime())
pipeline_name = self.name + suffix
for i, layer in enumerate(self.layers):
suffix_lyr = layer.name + '_' + str(i)
prefix_lyr = prefix / pipeline_name / suffix_lyr
layer.save(prefix_lyr)
return None
def load(self, prefix: Path = None):
self._compile_()
for i, layer in enumerate(self.layers):
suffix_lyr = layer.name + '_' + str(i)
prefix_lyr = prefix / suffix_lyr
layer.load(prefix_lyr)
return None
def fit(self, x: DataDict, y: DataDict) -> Tuple[DataDict, DataDict]:
self._compile_()
for layer in self.layers:
assert len(x) == len(y) == len(layer), 'Invalid shapes.'
x, y = layer.fit(x, y)
return x, y
def predict_forward(self, x: DataDict) -> DataDict:
if self.layers is None:
raise Exception('Fit your model before.')
for layer in self.layers:
x = layer.predict_forward(x)
return x
def predict_backward(self, y: DataDict) -> DataDict:
if self.layers is None:
raise Exception('Fit your model before.')
for layer in self.layers[::-1]:
y = layer.predict_backward(y)
return y
def predict(self, x: DataDict) -> DataDict:
y2 = self.predict_forward(x)
y1 = self.predict_backward(y2)
return y1
def fit_predict(self, x: DataDict, y: DataDict) -> DataDict:
x2, y2 = self.fit(x, y)
y1 = self.predict_backward(y2)
# y1 = self.predict(x)
return y1
def __str__(self) -> str:
pipe = f'({self.name}: '
for node in self.nodes:
pipe += str(node)
if node != self.nodes[-1]:
pipe += ' -> '
pipe += ')'
return pipe
# def fit_step(self, x, y):
# self.current_fit += 1
# assert self.current_fit <= len(self.nodes)
# x2, y2 = self._fit_until_(x, y)
# return x2, y2
# def _fit_until_(self, x, y):
# i = self.current_fit
# assert i >= 0
# layers = []
# for node in self.nodes:
# assert len(x) == len(y)
# layer = self.next_layer(node, len(x))
# x, y = layer.fit(x, y)
# layers.append(layer)
# self.layers = layers
# return x, y
|
<filename>woof/partitioned_producer.py<gh_stars>0
import logging
import random
from kafka import KafkaProducer
from kafka.errors import KafkaTimeoutError
from kafka.partitioner.default import DefaultPartitioner
from .common import CURRENT_PROD_BROKER_VERSION
from .transactions import make_kafka_safe
log = logging.getLogger("woof")
BATCH_SEND_DEFAULT_INTERVAL = 20
BATCH_SEND_MSG_COUNT = 32
REQUEST_TIMEOUT_MS = 2000
MAX_BLOCK_MS = 1000
class PartitionedProducer(object):
"""
use send() to send to any topic and distribute based on key
"""
def __init__(self, broker,
partitioner=None, # Note if the earlier hash is needed, need to explicitly pass dumb_hash
is_async=False,
req_acks=None, # unused - here for legacy support
ack_timeout=None, # unused - here for legacy support
codec=None,
batch_send=False,
batch_send_every_n=BATCH_SEND_MSG_COUNT,
batch_send_every_t=BATCH_SEND_DEFAULT_INTERVAL, # unused - here for legacy support
retries=3,
key_serializer=make_kafka_safe,
value_serializer=make_kafka_safe,
**kwargs):
try:
self.is_async = is_async
if partitioner is not None:
_partitioner = CustomPartitioner(partitioner)
else:
_partitioner = DefaultPartitioner()
kwargs['api_version'] = kwargs.get('api_version',
CURRENT_PROD_BROKER_VERSION)
self.prod = KafkaProducer(bootstrap_servers=broker,
key_serializer=key_serializer,
value_serializer=value_serializer,
batch_size=batch_send_every_n,
retries=retries,
partitioner=_partitioner,
request_timeout_ms=REQUEST_TIMEOUT_MS,
max_block_ms=MAX_BLOCK_MS,
**kwargs)
except Exception as e1:
log.error("[partitionedproducer log] GEN err %s \n", str(e1))
raise
def send(self, topic, key, *msg):
try:
for _msg in msg:
self.prod.send(topic, key=key, value=_msg)
# for async flush will happen in background
if not self.is_async:
self.prod.flush()
except KafkaTimeoutError as e:
log.error(
"[feedproducer log] KafkaTimeoutError err %s topic %s \n",
str(e), topic)
raise e
except Exception as e1:
log.error("[feedproducer log] GEN err %s topic %s \n", str(e1),
topic)
raise e1
# Note if the earlier hash is needed, need to explicitly pass dumb_hash
def dumb_hash(key):
sum = 0
str_key = str(key)
for s in str_key:
sum += ord(s)
log.debug("[feedproducer log] dumb_hash , key = %s", sum)
return sum
class CustomPartitioner(object):
_hash_map = {}
def __init__(self, hasher):
CustomPartitioner._hash_map[1] = hasher
@classmethod
def __call__(cls, key, all_partitions, available):
if key is None:
if available:
return random.choice(available)
return random.choice(all_partitions)
idx = cls._hash_map[1](key)
idx &= 0x7fffffff
idx %= len(all_partitions)
return all_partitions[idx]
class CyclicPartitionedProducer(KafkaProducer):
"""
use send() to send to any topic and distribute keys cyclically in partitions
"""
def __init__(self,
broker,
is_async=True,
key_serializer=make_kafka_safe,
value_serializer=make_kafka_safe,
random_start=True,
**kwargs):
self.partition_cycles = {}
self.random_start = random_start
self.is_async = is_async
kwargs['api_version'] = kwargs.get('api_version',
CURRENT_PROD_BROKER_VERSION)
super(CyclicPartitionedProducer, self).__init__(
bootstrap_servers=broker,
key_serializer=key_serializer,
value_serializer=value_serializer,
**kwargs)
def _partition(self, topic, partition, key, value, serialized_key,
serialized_value):
if partition is not None:
assert partition >= 0
assert partition in self._metadata.partitions_for_topic(
topic), 'Unrecognized partition'
return partition
all_partitions = list(self._metadata.partitions_for_topic(topic))
n_partitions = len(all_partitions)
try:
offset = (self.partition_cycles[topic] + 1) % n_partitions
except:
if self.random_start:
offset = random.randint(0, n_partitions - 1)
else:
offset = 0
self.partition_cycles[topic] = offset
return all_partitions[offset]
def send(self, topic, key, *msg):
try:
for _msg in msg:
super(CyclicPartitionedProducer, self).send(topic,
key=key,
value=_msg)
# for async flush will happen in background
if not self.is_async:
self.prod.flush()
except KafkaTimeoutError as e:
log.error(
"[feedproducer log] KafkaTimeoutError err %s topic %s \n",
str(e), topic)
raise e
except Exception as e1:
log.error("[feedproducer log] GEN err %s topic %s \n", str(e1),
topic)
raise e1
|
#!/usr/bin/env python
# Copyright (C) 2012, Code for America
# This is open source software, released under a standard 3-clause
# BSD-style license; see the file LICENSE for details.
import os
import math
import datetime
import smtplib
from email.mime.text import MIMEText
from threading import Thread
from optparse import OptionParser
import logging
from collections import defaultdict
import imp
import requests
import dateutil
from dateutil.parser import parse as parse_date
from db import DB
from models import Subscription, UpdateInfoItem, Base
# Default configuration
DEFAULT_CONFIG_PATH = os.path.join(os.path.dirname(__file__), 'configuration.py')
DEFAULT_NOTIFIERS_DIR = os.path.join(os.path.dirname(__file__), 'notifiers')
DEFAULT_TEMPLATE_PATH = os.path.join(os.path.dirname(__file__), 'templates')
# Max number of SRs to return per request (per spec it's 50)
SR_INFO_CHUNK_SIZE = 50
# logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.StreamHandler())
# These will be set by configure()
config = None
db = None
def config_from_file(path, base_configuration=None):
'''Load a configuration dictionary from a file path.
This is basically the same as config.from_pyfile ins Flask.
This version exists so we don't have the whole Flask dependency in updater.
One minor difference - second param is a basic configuration dictionary to update
rather than a silent switch.'''
config_module = imp.new_module('config')
config_module.__file__ = path
try:
execfile(path, config_module.__dict__)
except IOError, e:
e.strerror = 'Unable to load configuration file (%s)' % e.strerror
raise
results = base_configuration or {}
for key in dir(config_module):
if key.isupper():
results[key] = getattr(config_module, key)
return results
def configure(path=None):
global config, db
if not path:
path = os.path.abspath(os.environ.get('UPDATER_CONFIGURATION', os.environ.get('SRTRACKER_CONFIGURATION', DEFAULT_CONFIG_PATH)))
config = config_from_file(path)
# Where to get notification plugins
config['NOTIFIERS_DIR'] = os.path.abspath(config.get('NOTIFIERS_DIR', DEFAULT_NOTIFIERS_DIR))
# Set default template path
config['TEMPLATE_PATH'] = os.path.abspath(config.get('TEMPLATE_PATH', DEFAULT_TEMPLATE_PATH))
db = DB(config['DB_STRING'])
# Paths for templating and linking
if not config['SRTRACKER_URL'].endswith('/'):
config['SRTRACKER_URL'] = config['SRTRACKER_URL'] + '/'
if 'SR_DETAILS_URL' not in config:
config['SR_DETAILS_URL'] = '%srequests/{sr_id}' % config['SRTRACKER_URL']
if 'SR_TRACKER_IMG' not in config:
config['SR_TRACKER_IMG'] = '%sstatic/img/' % config['SRTRACKER_URL']
if 'SR_UNSUBSCRIBE_URL' not in config:
config['SR_UNSUBSCRIBE_URL'] = '%sunsubscribe/{key}' % config['SRTRACKER_URL']
# FIXME: start using this
utczone = dateutil.tz.tzutc()
def parse_date_utc(date_string):
'''Returns a naive date in UTC representing the passed-in date string.'''
parsed = parse_date(date_string)
if parsed.tzinfo:
parsed = parsed.astimezone(utczone).replace(tzinfo=None)
def get_updates(since):
url = '%s/requests.json' % config['OPEN311_SERVER']
params = {
'updated_after': since.isoformat(),
'page_size': config['OPEN311_PAGE_SIZE'],
'extensions': 'true'
}
if config['OPEN311_API_KEY']:
params['api_key'] = config['OPEN311_API_KEY']
# paging starts at 1 (not 0)
page = 1
results = []
while page:
params['page'] = page
request = requests.get(url, params=params)
if request.status_code == requests.codes.ok:
result = request.json
results.extend(result)
page = len(result) > 0 and page + 1 or 0
else:
# TODO: raise exception?
break
return results
def updated_srs_by_time():
updates = []
with db() as session:
last_update_info = session.query(UpdateInfoItem).filter(UpdateInfoItem.key == 'date').first()
# Bail out if we don't actually have any subscriptions
if not session.query(Subscription).first():
# but first we should clear out the last updated time
if last_update_info:
session.delete(last_update_info)
# TODO: should we raise an exception here instead?
return updates
# add 1 second to the time so we don't grab the latest previous result even if it wasn't updated
last_update_date = parse_date(last_update_info.value) + datetime.timedelta(seconds=1)
srs = get_updates(last_update_date)
# actually find the updated subscriptions
latest_update = None
for sr in srs:
# Some SRs may come back without a service_request_id if the SR was
# of the "batch" type (which should have a "token")
if 'service_request_id' in sr:
updated_subscriptions = session.query(Subscription).filter(Subscription.sr_id == sr['service_request_id'])
for subscription in updated_subscriptions:
updates.append((subscription.method, subscription.contact, subscription.key, sr))
if sr['status'] == 'closed':
session.delete(subscription)
# track the latest update time so we know when to start from next time we poll
sr_update_time = parse_date(sr['updated_datetime'])
if latest_update == None or latest_update < sr_update_time:
latest_update = sr_update_time
# in case of systems that are slow to update or batch updates (e.g. nightly),
# don't update the last update time unless we actually got some results
# and set the last update time to the most recent SR we received
if latest_update:
last_update_info.value = latest_update.isoformat()
return updates
def send_notifications(notifications):
# split up notifications by method
by_method = defaultdict(list)
for notification in notifications:
by_method[notification[0]].append(notification)
notifiers = get_notifiers()
for method, notes in by_method.iteritems():
if method in notifiers:
for notifier in notifiers[method]:
logger.debug('Sending %d notifications via %s', len(notes), notifier.__name__)
notifier.send_notifications(notes, config)
else:
logger.error('No notifier for "%s" - skipping %d notifications', method, len(notes))
def poll_and_notify():
logger.debug('Getting updates from Open311...')
notifications = updated_srs_by_time()
logger.debug('Sending %d notifications...', len(notifications))
# Need to unhardcode "email" updates so we can support things like SMS, Twitter, etc.
# Should break up the list by update method and have a thread pool for each
if config['THREADED_UPDATES']:
notification_count = len(notifications)
max_threads = config['EMAIL_MAX_THREADS']
per_thread = int(math.ceil(float(notification_count) / max_threads))
threads = []
# Create threads
for i in range(max_threads):
thread_notifications = notifications[i * per_thread:(i + 1) * per_thread]
if len(thread_notifications):
thread = Thread(target=send_notifications, args=(thread_notifications,))
thread.start()
threads.append(thread)
# Wait for threads to finish
for thread in threads:
thread.join()
else:
send_notifications(notifications)
def get_notifiers():
notifiers = defaultdict(list) # organized by type
for file_name in os.listdir(config['NOTIFIERS_DIR']):
module_name, ext = os.path.splitext(file_name)
if ext == '.py' or os.path.isdir(os.path.join(config['NOTIFIERS_DIR'], file_name)):
# Warning: this will raise ImportError if the file isn't importable (that's a good thing)
module_info = imp.find_module(module_name, [config['NOTIFIERS_DIR']])
module = None
try:
module = imp.load_module(module_name, *module_info)
finally:
# find_module opens the module's file, so be sure to close it here (!)
if module_info[0]:
module_info[0].close()
if module:
logger.debug('Loading notifier: "%s"' % module.__name__)
method = 'NOTIFICATION_METHOD' in dir(module) and module.NOTIFICATION_METHOD or module_name
if 'send_notifications' not in dir(module):
logger.warning('Notifier "%s" not loaded - Notifiers must implement the function send_notifications(notifications, options)' % module_name)
else:
notifiers[method].append(module)
return notifiers
def subscribe(request_id, method, address):
'''
Create a new subscription the request identified by request_id.
@param request_id: The request to subscribe to
@param method: The type of subscription (e.g. 'email' or 'sms')
@param address: The adress to send updates to (e.g. '<EMAIL>' or '63055512345')
'''
# TODO: validate the subscription by seeing if the request_id exists via Open311?
with db() as session:
subscription = get_subscription(request_id, method, address)
if subscription:
return subscription.key
else:
subscription = Subscription(
sr_id=request_id,
method=method,
contact=address)
session.add(subscription)
# If we haven't ever updated, set the last update date
last_update_info = session.query(UpdateInfoItem).filter(UpdateInfoItem.key == 'date').first()
if not last_update_info:
# TODO: get the SR's updated_datetime and use that
session.add(UpdateInfoItem(key='date', value=datetime.datetime.now()))
return subscription.key
return False
def get_subscription(request_id, method, address):
'''
Get the subscription associated with a given request_id, method, and address
@param request_id: The request to subscribe to
@param method: The type of subscription (e.g. 'email' or 'sms')
@param address: The adress to send updates to (e.g. '<EMAIL>' or '63055512345')
'''
with db() as session:
existing = session.query(Subscription).\
filter(Subscription.sr_id == request_id).\
filter(Subscription.method == method).\
filter(Subscription.contact == address).\
first()
return existing
def subscription_exists(request_id, method, address):
'''
Check whether a subscription already exists for the given request id with the specified method and address.
@param request_id: The request to subscribe to
@param method: The type of subscription (e.g. 'email' or 'sms')
@param address: The adress to send updates to (e.g. '<EMAIL>' or '63055512345')
'''
return get_subscription(request_id, method, address) != None
def subscription_for_key(unique_id):
'''
Get a subscription object associated with a given unique key.
'''
with db() as session:
subscription = session.query(Subscription).filter(Subscription.key == unique_id).first()
return subscription
return None
def unsubscribe(request_id, method, address):
'''
Remove a subscription if it exists
@param request_id: The request to subscribe to
@param method: The type of subscription (e.g. 'email' or 'sms')
@param address: The adress to send updates to (e.g. '<EMAIL>' or '63055512345')
'''
with db() as session:
existing = session.query(Subscription).\
filter(Subscription.sr_id == request_id).\
filter(Subscription.method == method).\
filter(Subscription.contact == address).\
first()
if existing:
session.delete(existing)
return True
return False
def unsubscribe_with_key(unique_id):
'''
Remove a subscription with a given key if it exists.
Returns true if the subscription existed and was removed and false otherwise.
@param unique_id: The key for the subscription to remove
'''
with db() as session:
subscription = session.query(Subscription).filter(Subscription.key == unique_id).first()
if subscription:
session.delete(subscription)
return True
return False
def initialize():
with db() as session:
# Ensure we have a last updated date
last_update_info = session.query(UpdateInfoItem).filter(UpdateInfoItem.key == 'date').first()
a_subscription = session.query(Subscription).first()
if a_subscription and not last_update_info:
# this is an invalid state! Could raise an error, but just attempt to repair for now
# default to 12am this morning for endpoints that update daily
start_date = datetime.datetime.combine(datetime.date.today(), datetime.time())
session.add(UpdateInfoItem(key='date', value=start_date))
logger.warning('Found a subscription but no last updated time.\nSetting last update to %s', start_date)
def initialize_db():
with db() as session:
db.create(Base)
try:
session.execute('ALTER TABLE subscriptions ADD key character varying')
session.execute('CREATE UNIQUE INDEX ON subscriptions (key)')
except:
print 'Failed to add "key" column to subscriptions. It is probably already present.'
finally:
session.commit()
print 'Adding keys for any subscriptions without them...'
added_keys = 0
for subscription in session.query(Subscription).all():
if not subscription.key:
subscription.key = subscription.generate_uuid()
added_keys += 1
print 'Added %d keys.' % added_keys
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("-i", "--initialize", dest="initialize_db", action="store_true", help="Initialize the database.")
parser.add_option("-c", "--config", dest="config_path", help="Path to a configuration file.")
# parser.add_option("-d", "--date", dest="start_date", help="Start datetime in the format 'YYYY-MM-DDTHH:MM:SS'", default=None)
(options, args) = parser.parse_args()
configure(options.config_path)
if options.initialize_db:
initialize_db()
else:
initialize()
poll_and_notify()
else:
config = configure()
|
# /*
# * Copyright 2010-2016 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# *
# * Licensed under the Apache License, Version 2.0 (the "License").
# * You may not use this file except in compliance with the License.
# * A copy of the License is located at
# *
# * http://aws.amazon.com/apache2.0
# *
# * or in the "license" file accompanying this file. This file is distributed
# * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# * express or implied. See the License for the specific language governing
# * permissions and limitations under the License.
# */
# This class implements the progressive backoff logic for auto-reconnect.
# It manages the reconnect wait time for the current reconnect, controling
# when to increase it and when to reset it.
import time
import threading
import logging
class progressiveBackoffCore:
# Logger
_logger = logging.getLogger(__name__)
def __init__(self, srcBaseReconnectTimeSecond=1, srcMaximumReconnectTimeSecond=32, srcMinimumConnectTimeSecond=20):
# The base reconnection time in seconds, default 1
self._baseReconnectTimeSecond = srcBaseReconnectTimeSecond
# The maximum reconnection time in seconds, default 32
self._maximumReconnectTimeSecond = srcMaximumReconnectTimeSecond
# The minimum time in milliseconds that a connection must be maintained in order to be considered stable
# Default 20
self._minimumConnectTimeSecond = srcMinimumConnectTimeSecond
# Current backOff time in seconds, init to equal to 0
self._currentBackoffTimeSecond = 1
# Handler for timer
self._resetBackoffTimer = None
# For custom progressiveBackoff timing configuration
def configTime(self, srcBaseReconnectTimeSecond, srcMaximumReconnectTimeSecond, srcMinimumConnectTimeSecond):
if srcBaseReconnectTimeSecond < 0 or srcMaximumReconnectTimeSecond < 0 or srcMinimumConnectTimeSecond < 0:
self._logger.error("init: Negative time configuration detected.")
raise ValueError("Negative time configuration detected.")
if srcBaseReconnectTimeSecond >= srcMinimumConnectTimeSecond:
self._logger.error("init: Min connect time should be bigger than base reconnect time.")
raise ValueError("Min connect time should be bigger than base reconnect time.")
self._baseReconnectTimeSecond = srcBaseReconnectTimeSecond
self._maximumReconnectTimeSecond = srcMaximumReconnectTimeSecond
self._minimumConnectTimeSecond = srcMinimumConnectTimeSecond
self._currentBackoffTimeSecond = 1
# Block the reconnect logic for _currentBackoffTimeSecond
# Update the currentBackoffTimeSecond for the next reconnect
# Cancel the in-waiting timer for resetting backOff time
# This should get called only when a disconnect/reconnect happens
def backOff(self):
self._logger.debug("backOff: current backoff time is: " + str(self._currentBackoffTimeSecond) + " sec.")
if self._resetBackoffTimer is not None:
# Cancel the timer
self._resetBackoffTimer.cancel()
# Block the reconnect logic
time.sleep(self._currentBackoffTimeSecond)
# Update the backoff time
if self._currentBackoffTimeSecond == 0:
# This is the first attempt to connect, set it to base
self._currentBackoffTimeSecond = self._baseReconnectTimeSecond
else:
# r_cur = min(2^n*r_base, r_max)
self._currentBackoffTimeSecond = min(self._maximumReconnectTimeSecond, self._currentBackoffTimeSecond * 2)
# Start the timer for resetting _currentBackoffTimeSecond
# Will be cancelled upon calling backOff
def startStableConnectionTimer(self):
self._resetBackoffTimer = threading.Timer(self._minimumConnectTimeSecond, self._connectionStableThenResetBackoffTime)
self._resetBackoffTimer.start()
def stopStableConnectionTimer(self):
if self._resetBackoffTimer is not None:
# Cancel the timer
self._resetBackoffTimer.cancel()
# Timer callback to reset _currentBackoffTimeSecond
# If the connection is stable for longer than _minimumConnectTimeSecond,
# reset the currentBackoffTimeSecond to _baseReconnectTimeSecond
def _connectionStableThenResetBackoffTime(self):
self._logger.debug("stableConnection: Resetting the backoff time to: " + str(self._baseReconnectTimeSecond) + " sec.")
self._currentBackoffTimeSecond = self._baseReconnectTimeSecond
|
<gh_stars>1-10
"""Unit tests for orbitpy.mission module.
The following tests are framed to test the different possible ways in which the mission can be framed in the JSON string and called to execute.
TODO: In each test, the output is tested with the results as computed on July 2021 (thus representing the "truth" data). The truth data is to be present in the folder ``test_data``.
**Tests:**
* ``test_scenario_1``: 1 satellite, no instrument ; propagation only, auto/ custom-time-step. The mission epoch is same, different from the satellite orbit-state date.
* ``test_scenario_2``: 1 satellite, 1 instrument ; propagation (custom time-step), grid-coverage (2 (auto) grids, default and custom-grid res), data-metrics calculation.
* ``test_scenario_3``: 1 satellite, 1 instrument ; propagation, pointing-options coverage, data-metrics calculation, contact-finder (ground-station only).
* ``test_scenario_4``: 1 satellite, 1 instrument ; propagation, pointing-options with grid-coverage, data-metrics calculation, contact-finder (ground-station only).
* ``test_scenario_5``: 1 satellite, multiple ground-stations ; propagation, contact-finder (ground-station only).
* ``test_scenario_6``: Multiple satellites from constellation; propagation, contact-finder (ground-station, inter-satellite).
* ``test_scenario_7``: Multiple satellites from constellation, single-instrument per satellite ; propagation, pointing-options-coverage, data-metrics calculation, contact-finder (inter-satellite only).
* TODO ``test_scenario_8``: Multiple satellites from list, multiple instruments per satellite, multiple ground-stations ; propagation, grid-coverage, data-metrics calculation, contact-finder (ground-station and inter-satellite).
"""
import os, shutil
import unittest
import pandas as pd
import orbitpy
from orbitpy.mission import Mission
from orbitpy.propagator import J2AnalyticalPropagator
from orbitpy.util import Spacecraft, GroundStation
class TestSettings(unittest.TestCase): #TODO
pass
class TestMission(unittest.TestCase):
@classmethod
def setUpClass(cls):
# Create new working directory to store output of all the class functions.
cls.dir_path = os.path.dirname(os.path.realpath(__file__))
cls.out_dir = os.path.join(cls.dir_path, 'temp')
if os.path.exists(cls.out_dir):
shutil.rmtree(cls.out_dir)
os.makedirs(cls.out_dir)
def test_scenario_1(self):
""" 1 satellite, no instrument ; propagation only, auto, custom-time-step. The mission epoch is same, different from the satellite orbit-state date.
"""
# auto propagation step-size, mission-date different from spacecraft orbitstate date
mission_json_str = '{ "epoch":{"@type":"GREGORIAN_UTC", "year":2021, "month":3, "day":25, "hour":15, "minute":6, "second":8}, \
"duration": 0.1, \
"spacecraft": { \
"spacecraftBus":{"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} \
}, \
"orbitState": {"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, \
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25} \
} \
}, \
"settings": {"outDir": "temp/", "opaque_atmos_height":30} \
}'
mission = Mission.from_json(mission_json_str)
self.assertAlmostEqual(mission.epoch.GetJulianDate(), 2459299.1292592594)
self.assertAlmostEqual(mission.duration, 0.1)
self.assertEqual(len(mission.spacecraft), 1)
self.assertAlmostEqual(mission.spacecraft[0], Spacecraft.from_dict({ "spacecraftBus":{"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"}},
"orbitState": {"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0},
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25}}
}))
self.assertEqual(mission.propagator, J2AnalyticalPropagator.from_dict({"@type": "J2 ANALYTICAL PROPAGATOR", "stepSize": 173.31598026839598})) # corresponds to time-step calculated considering horizon angle = 136.0373... deg and time-resolution factor = 0.25
self.assertIsNone(mission.grid)
self.assertIsNone(mission.groundStation)
self.assertAlmostEqual(mission.propagator.stepSize, 173.31598026839598)
self.assertEqual(mission.settings.outDir, "temp/")
self.assertIsNone(mission.settings.coverageType)
self.assertEqual(mission.settings.propTimeResFactor, 0.25)
self.assertEqual(mission.settings.gridResFactor, 0.9)
self.assertEqual(mission.settings.opaque_atmos_height, 30)
out_info = mission.execute()
# custom propagation step-size, mission-date same as spacecraft orbitstate date, custom propTimeResFactor = 1/8
mission_json_str = '{ "epoch":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, \
"duration": 0.1, \
"spacecraft": { \
"spacecraftBus":{"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} \
}, \
"orbitState": {"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, \
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25} \
} \
}, \
"settings": {"outDir": "temp/", "propTimeResFactor": 0.125} \
}'
mission = Mission.from_json(mission_json_str)
self.assertAlmostEqual(mission.epoch.GetJulianDate(), 2459270.75)
self.assertAlmostEqual(mission.propagator, J2AnalyticalPropagator.from_dict({"@type": "J2 ANALYTICAL PROPAGATOR", "stepSize": 86.657990134197990})) # corresponds to time-step calculated considering horizon angle = 136.0373... deg and time-resolution factor = 1/8
self.assertEqual(mission.settings.propTimeResFactor, 1/8)
out_info = mission.execute()
def test_scenario_2(self):
""" 1 satellite, 1 instrument ; propagation (custom time-step), (field-of-regard) grid-coverage (2 (auto) grids, default and custom-grid res), basic-sensor data-metrics calculation.
"""
# check warnings are issued.
mission_json_str = '{ "epoch":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, \
"duration": 0.1, \
"spacecraft": { \
"spacecraftBus":{"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} \
}, \
"orbitState": {"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, \
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 45, "raan": 35, "aop": 145, "ta": -25} \
}, \
"instrument": { "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, \
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter":15 }, \
"maneuver":{"maneuverType": "CIRCULAR", "diameter":10}, \
"@id":"bs1", "@type":"Basic Sensor" \
} \
}, \
"propagator": {"@type": "J2 ANALYTICAL PROPAGATOR", "stepSize": 200}, \
"settings": {"outDir": "temp/", "coverageType": "GRID COVERAGE"} \
}'
with self.assertWarns(Warning): # check for warning that user specified step-size is greater than auto-calculated step-size.
mission = Mission.from_json(mission_json_str)
mission = Mission.from_json(mission_json_str)
with self.assertWarns(Warning): # check for warning that grid has not been specified.
out_info = mission.execute()
# check execution with single grid.
mission_json_str = '{ "epoch":{"@type":"GREGORIAN_UTC", "year":2018, "month":5, "day":15, "hour":12, "minute":12, "second":12}, \
"duration": 0.5, \
"spacecraft": [{ \
"spacecraftBus":{"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} \
}, \
"orbitState": {"date":{"@type":"GREGORIAN_UTC", "year":2018, "month":5, "day":15, "hour":12, "minute":12, "second":12}, \
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 30, "raan": 35, "aop": 145, "ta": -25} \
}, \
"instrument": { "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, \
"fieldOfViewGeometry": {"shape": "CIRCULAR", "diameter":15 }, \
"maneuver":{"maneuverType": "CIRCULAR", "diameter":10}, \
"@id":"bs1", "@type":"Basic Sensor" \
} \
}], \
"propagator": {"@type": "J2 ANALYTICAL PROPAGATOR", "stepSize": 60}, \
"grid": [{"@type": "autogrid", "@id": "cus", "latUpper":2, "latLower":0, "lonUpper":180, "lonLower":-180}, {"@type": "autogrid", "@id": "auto", "latUpper":20, "latLower":0, "lonUpper":180, "lonLower":-180, "gridRes": 1}], \
"settings": {"outDir": "temp/", "coverageType": "GRID COVERAGE", "gridResFactor": 0.5} \
}'
mission = Mission.from_json(mission_json_str)
self.assertEqual(mission.propagator, J2AnalyticalPropagator.from_dict({"@type": "J2 ANALYTICAL PROPAGATOR", "stepSize": 60}))
self.assertEqual(len(mission.grid), 2)
# 0.5917400590151374 is the grid-resolution calculated for the 15 deg FOV sensor at altitude of 500km and gridResFactor = 0.5
self.assertEqual(mission.grid[0].num_points, 1820) # ~ 4*pi/ (0.5917400590151374*pi/180 * 0.5917400590151374*pi/180) * ((2*pi)*(2*pi/180))/(4*pi)
# 1 deg grid resolution is input in the specifications
self.assertEqual(mission.grid[1].num_points, 7402) # ~ 4*pi/ (pi/180 * pi/180) * ((2*pi)*(20*pi/180)/(4*pi))
out_info = mission.execute()
def test_scenario_3(self):
""" 1 satellite, 1 instrument ; propagation, pointing-options coverage, basic-sensor data-metrics calculation.
"""
mission_json_str = '{ "epoch":{"@type":"GREGORIAN_UTC", "year":2021, "month":3, "day":12, "hour":23, "minute":12, "second":12}, \
"duration": 0.05, \
"spacecraft": { \
"spacecraftBus":{"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} \
}, \
"orbitState": {"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":15, "hour":12, "minute":12, "second":12}, \
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 7078.137, "ecc": 0.001, "inc": 98, "raan": 35, "aop": 145, "ta": -25} \
}, \
"instrument": [{ "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, \
"fieldOfViewGeometry": {"shape": "rectangular", "angleHeight":15, "angleWidth":10 }, \
"pointingOption":[{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_LOOK", "sideLookAngle":0}, \
{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_LOOK", "sideLookAngle":-15}, \
{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_LOOK", "sideLookAngle":15}], \
"@id":"bs1", "@type":"Basic Sensor" \
}] \
}, \
"settings": {"outDir": "temp/", "coverageType": "POINTING OPTIONS COVERAGE"} \
}'
mission = Mission.from_json(mission_json_str)
self.assertAlmostEqual(mission.epoch.GetJulianDate(), 2459286.4668055554)
self.assertEqual(mission.propagator, J2AnalyticalPropagator.from_dict({"@type": "J2 ANALYTICAL PROPAGATOR", "stepSize": 6.820899943040534}))
out_info = mission.execute()
def test_scenario_4(self):
""" 1 satellite, 1 SAR instrument ; propagation, pointing-options with grid-coverage and access-file correction (since sidelooking instrument with narrow At-fov), SAR data-metrics calculation.
Using default propagation step-size and grid-resolution. The scene FOV has angleWidth = instrument FOV angleWidth. The scene FOV angleHeight is larger to allow for coarser propagation step-size and gird-resolution.
"""
mission_json_str = '{ "epoch":{"@type":"GREGORIAN_UTC", "year":2018, "month":5, "day":15, "hour":12, "minute":12, "second":12}, \
"duration": 0.1, \
"spacecraft": { \
"spacecraftBus":{"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} \
}, \
"orbitState": {"date":{"@type":"GREGORIAN_UTC", "year":2018, "month":5, "day":15, "hour":12, "minute":12, "second":12}, \
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 7078.137, "ecc": 0.001, "inc": 98, "raan": 35, "aop": 145, "ta": -25} \
}, \
"instrument": { "@type": "Synthetic Aperture Radar", "@id": "sar1", \
"orientation": { "convention": "SIDE_LOOK", "sideLookAngle": 20.5 }, \
"antenna":{"shape": "RECTANGULAR", "height": 10.7, "width": 2.16, "apertureEfficiency": 0.6, "apertureExcitation": "UNIFORM"}, \
"sceneFieldOfViewGeometry": {"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":6.233630110575892}, \
"pulseWidth": 33.4e-6, \
"operatingFrequency": 1.2757e9, "peakTransmitPower": 1000, "chirpBandwidth": 19e6, \
"minimumPRF": 1463, "maximumPRF": 1686, "radarLoss": 3.5, "systemNoiseFigure": 5.11, \
"pointingOption":[{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_LOOK", "sideLookAngle":-20.5}, \
{"referenceFrame": "NADIR_POINTING", "convention": "SIDE_LOOK", "sideLookAngle":20.5}] \
} \
}, \
"grid": [{"@type": "autogrid", "@id": 1, "latUpper":2, "latLower":0, "lonUpper":180, "lonLower":-180}, {"@type": "autogrid", "@id": 2, "latUpper":22, "latLower":20, "lonUpper":180, "lonLower":-180}], \
"settings": {"outDir": "temp/", "coverageType": "POINTING OPTIONS WITH GRID COVERAGE"} \
}'
mission = Mission.from_json(mission_json_str)
self.assertAlmostEqual(mission.epoch.GetJulianDate(), 2458254.0084722224)
self.assertEqual(mission.propagator, J2AnalyticalPropagator.from_dict({"@type": "J2 ANALYTICAL PROPAGATOR", "stepSize": 2.2600808214710266}))
self.assertEqual(mission.grid[0].num_points, 2906)
self.assertEqual(mission.grid[1].num_points, 2710)
out_info = mission.execute()
def test_scenario_5(self):
""" 1 satellite, multiple ground-stations ; propagation, contact-finder (ground-station only).
"""
mission_json_str = '{ "epoch":{"@type":"GREGORIAN_UTC", "year":2021, "month":3, "day":25, "hour":15, "minute":6, "second":8}, \
"duration": 0.5, \
"spacecraft": { \
"spacecraftBus":{"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} \
}, \
"orbitState": {"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, \
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 98, "raan": 35, "aop": 145, "ta": -25} \
} \
}, \
"groundStation":[{"name": "TrollSAR", "latitude": -72.0029, "longitude": 2.5257, "altitude":0}, \
{"name": "CONAE", "latitude": -31.52, "longitude": -64.46, "altitude":0}], \
"settings": {"outDir": "temp/"} \
}'
mission = Mission.from_json(mission_json_str)
self.assertEqual(len(mission.groundStation), 2)
self.assertIsInstance(mission.groundStation[0], GroundStation)
self.assertEqual(mission.groundStation[0], GroundStation.from_dict({"name": "TrollSAR", "latitude": -72.0029, "longitude": 2.5257, "altitude":0}))
self.assertIsInstance(mission.groundStation[1], GroundStation)
self.assertEqual(mission.groundStation[1], GroundStation.from_dict({"name": "CONAE", "latitude": -31.52, "longitude": -64.46, "altitude":0}))
out_info = mission.execute()
def test_scenario_6(self):
""" Multiple satellites from constellation, common instrument; propagation, contact-finder (ground-station, inter-satellite only).
"""
mission_json_str = '{ "epoch":{"@type":"JULIAN_DATE_UT1", "jd":2459270.75}, \
"duration": 0.25, \
"constellation": { "@type": "Walker Delta Constellation", \
"date":{"@type": "JULIAN_DATE_UT1", "jd":2459270.75}, \
"numberSatellites": 8, \
"numberPlanes": 1, \
"relativeSpacing": 1, \
"alt": 700, \
"ecc": 0.001, \
"inc": 45, \
"aop": 135, \
"@id": "abc" \
}, \
"groundStation":{"name": "CONAE", "latitude": -31.52, "longitude": -64.46, "altitude":0}, \
"settings": {"outDir": "temp/"} \
}'
mission = Mission.from_json(mission_json_str)
self.assertEqual(len(mission.spacecraft), 8)
# check the assigned spacecraft's ids.
self.assertEqual(mission.spacecraft[0]._id, "spc_abc_11")
self.assertEqual(mission.spacecraft[1]._id, "spc_abc_12")
self.assertEqual(mission.spacecraft[2]._id, "spc_abc_13")
self.assertEqual(mission.spacecraft[3]._id, "spc_abc_14")
self.assertEqual(mission.spacecraft[4]._id, "spc_abc_15")
self.assertEqual(mission.spacecraft[5]._id, "spc_abc_16")
self.assertEqual(mission.spacecraft[6]._id, "spc_abc_17")
self.assertEqual(mission.spacecraft[7]._id, "spc_abc_18")
out_info = mission.execute()
# test the satellites initial Keplerian states to confirm that the constellation is generated correctly.
state_sat0_fl = self.out_dir + '/sat0/state_keplerian.csv'
(epoch_JDUT1, step_size, duration) = orbitpy.util.extract_auxillary_info_from_state_file(state_sat0_fl)
state_sat0_row0 = pd.read_csv(state_sat0_fl, skiprows=4, nrows=2)
self.assertEqual(epoch_JDUT1, 2459270.75)
self.assertAlmostEqual(step_size, 211.50955372780942)
self.assertEqual(duration, 0.25)
self.assertAlmostEqual(state_sat0_row0['sma [km]'][0], 7078.137)
self.assertAlmostEqual(state_sat0_row0['ecc'][0], 0.001)
self.assertAlmostEqual(state_sat0_row0['inc [deg]'][0], 45)
self.assertAlmostEqual(state_sat0_row0['raan [deg]'][0]%360, 0)
self.assertAlmostEqual(state_sat0_row0['aop [deg]'][0], 135.0)
self.assertAlmostEqual(state_sat0_row0['ta [deg]'][0]%360, 0.0)
state_sat1_fl = self.out_dir + '/sat1/state_keplerian.csv'
(epoch_JDUT1, step_size, duration) = orbitpy.util.extract_auxillary_info_from_state_file(state_sat1_fl)
state_sat1_row0 = pd.read_csv(state_sat1_fl, skiprows=4, nrows=2)
self.assertEqual(epoch_JDUT1, 2459270.75)
self.assertAlmostEqual(step_size, 211.50955372780942)
self.assertEqual(duration, 0.25)
self.assertAlmostEqual(state_sat1_row0['sma [km]'][0], 7078.137)
self.assertAlmostEqual(state_sat1_row0['ecc'][0], 0.001)
self.assertAlmostEqual(state_sat1_row0['inc [deg]'][0], 45)
self.assertAlmostEqual(state_sat1_row0['raan [deg]'][0]%360, 0)
self.assertAlmostEqual(state_sat1_row0['aop [deg]'][0], 135.0)
self.assertAlmostEqual(state_sat1_row0['ta [deg]'][0]%360, 45)
state_sat2_fl = self.out_dir + '/sat2/state_keplerian.csv'
(epoch_JDUT1, step_size, duration) = orbitpy.util.extract_auxillary_info_from_state_file(state_sat2_fl)
state_sat2_row0 = pd.read_csv(state_sat2_fl, skiprows=4, nrows=2)
self.assertEqual(epoch_JDUT1, 2459270.75)
self.assertAlmostEqual(step_size, 211.50955372780942)
self.assertEqual(duration, 0.25)
self.assertAlmostEqual(state_sat2_row0['sma [km]'][0], 7078.137)
self.assertAlmostEqual(state_sat2_row0['ecc'][0], 0.001)
self.assertAlmostEqual(state_sat2_row0['inc [deg]'][0], 45)
self.assertAlmostEqual(state_sat2_row0['raan [deg]'][0]%360, 0)
self.assertAlmostEqual(state_sat2_row0['aop [deg]'][0], 135.0)
self.assertAlmostEqual(state_sat2_row0['ta [deg]'][0]%360, 90)
state_sat3_fl = self.out_dir + '/sat3/state_keplerian.csv'
(epoch_JDUT1, step_size, duration) = orbitpy.util.extract_auxillary_info_from_state_file(state_sat3_fl)
state_sat3_row0 = pd.read_csv(state_sat3_fl, skiprows=4, nrows=2)
self.assertEqual(epoch_JDUT1, 2459270.75)
self.assertAlmostEqual(step_size, 211.50955372780942)
self.assertEqual(duration, 0.25)
self.assertAlmostEqual(state_sat3_row0['sma [km]'][0], 7078.137)
self.assertAlmostEqual(state_sat3_row0['ecc'][0], 0.001)
self.assertAlmostEqual(state_sat3_row0['inc [deg]'][0], 45)
self.assertAlmostEqual(state_sat3_row0['raan [deg]'][0]%360, 0)
self.assertAlmostEqual(state_sat3_row0['aop [deg]'][0], 135.0)
self.assertAlmostEqual(state_sat3_row0['ta [deg]'][0]%360, 135)
state_sat4_fl = self.out_dir + '/sat4/state_keplerian.csv'
(epoch_JDUT1, step_size, duration) = orbitpy.util.extract_auxillary_info_from_state_file(state_sat4_fl)
state_sat4_row0 = pd.read_csv(state_sat4_fl, skiprows=4, nrows=2)
self.assertEqual(epoch_JDUT1, 2459270.75)
self.assertAlmostEqual(step_size, 211.50955372780942)
self.assertEqual(duration, 0.25)
self.assertAlmostEqual(state_sat4_row0['sma [km]'][0], 7078.137)
self.assertAlmostEqual(state_sat4_row0['ecc'][0], 0.001)
self.assertAlmostEqual(state_sat4_row0['inc [deg]'][0], 45)
self.assertAlmostEqual(state_sat4_row0['raan [deg]'][0]%360, 0)
self.assertAlmostEqual(state_sat4_row0['aop [deg]'][0], 135.0)
self.assertAlmostEqual(state_sat4_row0['ta [deg]'][0]%360, 180, delta=0.001)
state_sat5_fl = self.out_dir + '/sat5/state_keplerian.csv'
(epoch_JDUT1, step_size, duration) = orbitpy.util.extract_auxillary_info_from_state_file(state_sat5_fl)
state_sat5_row0 = pd.read_csv(state_sat5_fl, skiprows=4, nrows=2)
self.assertEqual(epoch_JDUT1, 2459270.75)
self.assertAlmostEqual(step_size, 211.50955372780942)
self.assertEqual(duration, 0.25)
self.assertAlmostEqual(state_sat5_row0['sma [km]'][0], 7078.137)
self.assertAlmostEqual(state_sat5_row0['ecc'][0], 0.001)
self.assertAlmostEqual(state_sat5_row0['inc [deg]'][0], 45)
self.assertAlmostEqual(state_sat5_row0['raan [deg]'][0]%360, 0)
self.assertAlmostEqual(state_sat5_row0['aop [deg]'][0], 135.0)
self.assertAlmostEqual(state_sat5_row0['ta [deg]'][0]%360, 225)
state_sat6_fl = self.out_dir + '/sat6/state_keplerian.csv'
(epoch_JDUT1, step_size, duration) = orbitpy.util.extract_auxillary_info_from_state_file(state_sat6_fl)
state_sat6_row0 = pd.read_csv(state_sat6_fl, skiprows=4, nrows=2)
self.assertEqual(epoch_JDUT1, 2459270.75)
self.assertAlmostEqual(step_size, 211.50955372780942)
self.assertEqual(duration, 0.25)
self.assertAlmostEqual(state_sat6_row0['sma [km]'][0], 7078.137)
self.assertAlmostEqual(state_sat6_row0['ecc'][0], 0.001)
self.assertAlmostEqual(state_sat6_row0['inc [deg]'][0], 45)
self.assertAlmostEqual(state_sat6_row0['raan [deg]'][0]%360, 0)
self.assertAlmostEqual(state_sat6_row0['aop [deg]'][0], 135.0)
self.assertAlmostEqual(state_sat6_row0['ta [deg]'][0]%360, 270)
state_sat7_fl = self.out_dir + '/sat7/state_keplerian.csv'
(epoch_JDUT1, step_size, duration) = orbitpy.util.extract_auxillary_info_from_state_file(state_sat7_fl)
state_sat7_row0 = pd.read_csv(state_sat7_fl, skiprows=4, nrows=2)
self.assertEqual(epoch_JDUT1, 2459270.75)
self.assertAlmostEqual(step_size, 211.50955372780942)
self.assertEqual(duration, 0.25)
self.assertAlmostEqual(state_sat7_row0['sma [km]'][0], 7078.137)
self.assertAlmostEqual(state_sat7_row0['ecc'][0], 0.001)
self.assertAlmostEqual(state_sat7_row0['inc [deg]'][0], 45)
self.assertAlmostEqual(state_sat7_row0['raan [deg]'][0]%360, 0)
self.assertAlmostEqual(state_sat7_row0['aop [deg]'][0], 135.0)
self.assertAlmostEqual(state_sat7_row0['ta [deg]'][0]%360, 315)
def test_scenario_7(self):
""" Multiple satellites from list, multiple instruments per satellite ; propagation, grid-coverage, data-metrics calculation, contact-finder (inter-satellite).
Spacecraft #1 : No instruments.
Spacecraft #2 : 1 instrument (Basic Sensor).
Spacecraft #3 : 2 instruments (Passive Optical Scanner, SAR)
"""
mission_json_str = '{ "epoch":{"@type":"GREGORIAN_UTC", "year":2021, "month":3, "day":25, "hour":15, "minute":6, "second":8}, \
"duration": 0.1, \
"spacecraft": [{ \
"@id": "spc1", \
"spacecraftBus":{"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} \
}, \
"orbitState": {"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, \
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 98, "raan": 35, "aop": 145, "ta": -25} \
} \
}, \
{ \
"@id": "spc2", \
"spacecraftBus":{"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} \
}, \
"orbitState": {"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, \
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 98, "raan": 35, "aop": 145, "ta": -35} \
}, \
"instrument": { "orientation": {"referenceFrame": "SC_BODY_FIXED", "convention": "REF_FRAME_ALIGNED"}, \
"fieldOfViewGeometry": {"shape": "rectangular", "angleHeight":15, "angleWidth":10 }, \
"@id":"bs", "@type":"Basic Sensor" } \
}, \
{ \
"@id": "spc3", \
"spacecraftBus":{"orientation":{"referenceFrame": "NADIR_POINTING", "convention": "REF_FRAME_ALIGNED"} \
}, \
"orbitState": {"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":2, "day":25, "hour":6, "minute":0, "second":0}, \
"state":{"@type": "KEPLERIAN_EARTH_CENTERED_INERTIAL", "sma": 6878.137, "ecc": 0.001, "inc": 98, "raan": 35, "aop": 145, "ta": -145} \
}, \
"instrument": [{"@type": "Passive Optical Scanner", "@id": "opt1",\
"fieldOfViewGeometry": { "shape": "RECTanGULAR", "angleHeight": 0.628, "angleWidth": 115.8 }, \
"sceneFieldOfViewGeometry": { "shape": "RECTanGULAR", "angleHeight": 5, "angleWidth": 115.8 }, \
"scanTechnique": "WhiskBROOM", \
"orientation": { "referenceFrame": "SC_BODY_FIXED", "convention": "SIDE_loOK", "sideLookAngle": 0 }, \
"numberDetectorRows": 256, "numberDetectorCols": 1, \
"detectorWidth": 30e-6, "focalLength": 0.7, "operatingWavelength": 4.2e-6, "bandwidth": 1.9e-6, \
"quantumEff": 0.5, "targetBlackBodyTemp": 290, "bitsPerPixel": 8, "opticsSysEff": 0.75, \
"numOfReadOutE": 25, "apertureDia": 0.26, "Fnum": 2.7, "atmosLossModel": "LOWTRAN7" \
}, \
{"@type": "Synthetic Aperture Radar", "@id": "sar1", \
"orientation": { "convention": "SIDE_LOOK", "sideLookAngle": 20.5 }, \
"antenna":{"shape": "RECTANGULAR", "height": 10.7, "width": 2.16, "apertureEfficiency": 0.6, "apertureExcitation": "UNIFORM"}, \
"sceneFieldOfViewGeometry": {"shape": "RECTANGULAR", "angleHeight":5, "angleWidth":6.233630110575892}, \
"pulseWidth": 33.4e-6, \
"operatingFrequency": 1.2757e9, "peakTransmitPower": 1000, "chirpBandwidth": 19e6, \
"minimumPRF": 1463, "maximumPRF": 1686, "radarLoss": 3.5, "systemNoiseFigure": 5.11 \
}] \
}], \
"grid": [{"@type": "autogrid", "@id": 1, "latUpper":2, "latLower":0, "lonUpper":180, "lonLower":-180, "gridRes": 1}, {"@type": "autogrid", "@id": 2, "latUpper":22, "latLower":20, "lonUpper":180, "lonLower":-180, "gridRes": 1}], \
"settings": {"outDir": "temp/", "coverageType": "GRID COVERAGE"} \
}'
mission = Mission.from_json(mission_json_str)
self.assertEqual(len(mission.spacecraft), 3)
out_info = mission.execute()
'''
def test_scenario_x(self):
""" Hydrology paper """
mission_json_str = '{ "epoch":{"@type":"GREGORIAN_UTC", "year":2021, "month":1, "day":27, "hour":18, "minute":43, "second":5}, \
"duration": 3, \
"constellation": { "@type": "Walker Delta Constellation", \
"date":{"@type":"GREGORIAN_UTC", "year":2021, "month":1, "day":27, "hour":18, "minute":43, "second":5}, \
"numberSatellites": 8, \
"numberPlanes": 1, \
"relativeSpacing": 1, \
"alt": 705, \
"ecc": 0.0001, \
"inc": 98.2, \
"aop": 302.6503 \
}, \
"groundStation":{"name": "AtMetro", "latitude": 33, "longitude": -98, "altitude":0, "minimumElevation":35}, \
"propagator": {"@type": "J2 ANALYTICAL PROPAGATOR", "stepSize": 4}, \
"settings": {"outDir": "temp/"} \
}'
mission = Mission.from_json(mission_json_str)
out_info = mission.execute()
'''
|
<reponame>ohduran/ring<filename>tests/test_redis.py
import ring
from .test_func_sync import redis_client
import pytest
__all__ = ('redis_client', )
@pytest.mark.parametrize('expire', [
1,
None,
])
def test_redis(redis_client, expire):
@ring.redis(redis_client, 'ring-test', expire=expire)
def f(a, b):
r = a * 100 + b
return str(r).encode('utf-8')
assert f.key(1, 2) == 'ring-test:1:2'
f.delete(1, 2)
assert False is f.has(1, 2)
assert None is f.get(1, b=2)
assert 102 == int(f(1, b=2))
assert 102 == int(redis_client.get(f.key(1, 2)))
assert True is f.has(1, 2)
if expire is None:
with pytest.raises(TypeError):
f.touch(1, 2)
else:
f.touch(1, 2)
@ring.redis(redis_client, 'ring-test', expire=expire, coder='json')
def f(a, b):
r = a * 100 + b
return r
mv = f.execute_many(
{'a': 1, 'b': 2},
(1, 4),
)
assert mv == [102, 104]
with pytest.raises(AttributeError):
f.delete_many()
f.delete(1, 2)
f.delete(1, 4)
mv = f.get_many(
(1, 2),
{'a': 1, 'b': 4},
)
assert mv == [None, None]
mv = f.update_many(
{'a': 1, 'b': 2},
(5, 1),
)
assert mv == [102, 501]
mv = f.get_many(
(1, 2),
(1, 4),
(5, 1),
)
assert mv == [102, None, 501]
def test_redis_hash(redis_client):
@ring.redis_hash(redis_client, 'test-hash-key', 'test-field')
def f(a, b):
r = a * 100 + b
return str(r).encode('utf-8')
# delete previous test
f.delete(1, 2)
f.delete(3, 4)
f.delete(5, 6)
f.delete(7, 8)
assert f.key(1, 2) == 'test-field:1:2'
f.delete(1, 2)
assert False is f.has(1, 2)
assert None is f.get(1, b=2)
assert 102 == int(f(1, b=2))
assert f.key(3, 4) == 'test-field:3:4'
assert 102 == int(f.get(1, b=2))
assert 304 == int(f(3, b=4))
mv = f.get_many(
(1, 2),
(3, 4),
)
assert mv == [b'102', b'304']
with pytest.raises(AttributeError):
f.delete_many()
f.delete(1, 2)
f.delete(3, 4)
mv = f.get_many(
(1, 2),
(3, 4),
)
assert mv == [None, None]
mv = f.update_many(
{'a': 5, 'b': 6},
(7, 8),
)
assert mv == [b'506', b'708']
mv = f.get_many(
(1, 2),
(3, 4),
(5, 6),
(7, 8),
)
assert mv == [None, None, b'506', b'708']
|
from ..archive import Archive
from ..individual import Individual
from ..operators import crowding_distance
import unittest
class TestArchive(unittest.TestCase):
def setUp(self):
self.archive = Archive()
def test_should_constructor_create_a_non_null_object(self):
self.assertIsNotNone(self.archive)
def test_should_adding_one_solution_work_properly(self):
x = Individual([2, 2])
x.costs = [-1.0, 5.0, 9.0]
x.costs_signed = [-1.0, 5.0, 9.0, 0]
self.archive.add(x)
self.assertEqual(1, self.archive.size())
self.assertEqual(x, self.archive._contents[0])
def test_should_adding_two_solutions_work_properly_if_one_is_dominated(self):
dominated_solution = Individual([1, 2])
dominated_solution.costs = [2.0, 2.0, 0]
dominated_solution.costs_signed = [2.0, 2.0, 0]
dominant_solution = Individual([1, 1])
dominant_solution.costs = [1.0, 1.0, 0]
dominant_solution.costs_signed = [1.0, 1.0, 0]
self.archive.add(dominated_solution)
self.archive.add(dominant_solution)
self.assertEqual(1, self.archive.size())
self.assertEqual(dominant_solution, self.archive._contents[0])
def test_should_adding_two_solutions_work_properly_if_both_are_non_dominated(self):
x = Individual([1, 2])
x.costs = [1.0, 0.0, 0]
x.costs_signed = [1.0, 0.0, 0]
y = Individual([1, 1])
y.costs = [0.0, 1.0, 0]
y.costs_signed = [0.0, 1.0, 0]
self.archive.add(x)
self.archive.add(y)
self.assertEqual(2, self.archive.size())
self.assertTrue(x in self.archive._contents and
y in self.archive._contents)
def test_should_adding_four_solutions_work_properly_if_one_dominates_the_others(self):
x = Individual([1, 2])
x.costs = [1.0, 1.0]
x.costs_signed = [1.0, 1.0, 0]
y = Individual([1, 2])
y.costs = [0.0, 2.0]
y.costs_signed = [0.0, 2.0, 0]
z = Individual([1, 2])
z.costs = [0.5, 1.5]
z.costs_signed = [0.5, 1.5, 0]
v = Individual([1, 2])
v.costs = [0.0, 0.0]
v.costs_signed = [0.0, 0.0, 0]
self.archive.add(x)
self.archive.add(y)
self.archive.add(z)
self.archive.add(v)
self.assertEqual(1, self.archive.size())
self.assertEqual(v, self.archive._contents[0])
def test_should_adding_three_solutions_work_properly_if_two_of_them_are_equal(self):
x = Individual([1, 2])
x.costs = [1.0, 1.0]
x.costs_signed = [1.0, 1.0, 0.0]
y = Individual([1, 2])
y.costs = [0.0, 2.0]
y.costs_signed = [0.0, 2.0, 0.0]
z = Individual([1, 2])
z.costs = [1.0, 1.0]
z.costs_signed = [1.0, 1.0, 0.0]
self.archive.add(x)
self.archive.add(y)
result = self.archive.add(z)
self.assertEqual(2, self.archive.size())
self.assertFalse(result)
self.assertTrue(x in self.archive._contents
or y in self.archive._contents)
def test_crowding_distance_truncate(self):
# the cost values for the test function, 16 non-dominated solution, half - of- them should be rejected by their
# crowding distance
test_costs = [[12, 0],
[11.5, 0.5],
[11, 1],
[10.8, 1.2],
[10.5, 1.5],
[10.3, 1.8],
[9.5, 2],
[9, 2.5],
[7, 3],
[5, 4],
[2.5, 6],
[2, 10],
[1.5, 11],
[1, 11.5],
[0.8, 11.7],
[0, 12]]
for cost in test_costs:
x = Individual(cost)
x.costs = cost
cost.append(0.)
x.costs_signed = cost
x.features = {'crowding_distance': 0}
res = self.archive.add(x)
crowding_distance(self.archive._contents)
# test the algorithm adds every non-dominated solutions to the list
self.assertEqual(self.archive.size(), len(test_costs))
# ordering and truncate the elements according to their crowding distance,
# length of the list should be the given number
self.archive.truncate(8, 'crowding_distance')
# if ordered by crowding distance the following items should be contained by the new list
# the two endpoints
x1 = Individual([0, 12])
x1.costs = [0, 12]
x1.costs_signed = [0, 12, 0.]
x2 = Individual([12, 0])
x2.costs = [12, 0]
x2.costs_signed = [12, 0, 0.]
self.assertIn(x1, self.archive._contents)
self.assertIn(x2, self.archive._contents)
x3 = Individual([1.5, 11])
x3.costs = [1.5, 11]
x3.costs_signed = [1.5, 11., 0.]
self.assertIn(x3, self.archive._contents)
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath("."))
# -- Project information -----------------------------------------------------
project = "pycl_fft"
copyright = "2021, <NAME>"
author = "<NAME>"
import pkg_resources
version = pkg_resources.get_distribution("pycl_fft").version
release = version
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named "sphinx.ext.*") or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.intersphinx",
"sphinx.ext.linkcode",
"sphinx.ext.ifconfig",
"sphinx.ext.doctest",
"sphinx_copybutton"
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "furo"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"numpy": ("https://docs.scipy.org/doc/numpy/", None),
"scipy": ("https://docs.scipy.org/doc/scipy/reference/", None),
"pyopencl": ("https://documen.tician.de/pyopencl", None),
"pytest": ("https://docs.pytest.org/en/latest/", None),
}
latex_elements = {
"maxlistdepth": "99",
}
# autodoc_mock_imports = ["sympy"]
import os
on_rtd = os.environ.get("READTHEDOCS") == "True"
# setup copy button thing
def setup(app):
app.add_config_value("on_rtd", on_rtd, "env")
doctest_global_setup = """
import pyopencl as cl
import pyopencl.array as cla
"""
copybutton_prompt_text = r">>> |\.\.\. |\$ |In \[\d*\]: | {2,5}\.\.\.: | {5,8}: "
copybutton_prompt_is_regexp = True
import sys
import inspect
linkcode_revision = "main"
linkcode_url = "https://github.com/zachjweiner/pycl-fft/blob/" \
+ linkcode_revision + "/{filepath}#L{linestart}-L{linestop}"
def linkcode_resolve(domain, info):
if domain != "py" or not info["module"]:
return None
modname = info["module"]
topmodulename = modname.split(".")[0]
fullname = info["fullname"]
submod = sys.modules.get(modname)
if submod is None:
return None
obj = submod
for part in fullname.split("."):
try:
obj = getattr(obj, part)
except Exception:
return None
try:
modpath = pkg_resources.require(topmodulename)[0].location
filepath = os.path.relpath(inspect.getsourcefile(obj), modpath)
if filepath is None:
return
except Exception:
return None
try:
source, lineno = inspect.getsourcelines(obj)
except OSError:
return None
else:
linestart, linestop = lineno, lineno + len(source) - 1
return linkcode_url.format(
filepath=filepath, linestart=linestart, linestop=linestop)
import pycl_fft
pycl_fft.clfft.Transform = pycl_fft.clfft.Transform.__wrapped__
pycl_fft.vkfft.Transform = pycl_fft.vkfft.Transform.__wrapped__
rst_prolog = """
.. |vkfft| replace:: :mod:`VkFFT`
.. _vkfft: https://github.com/DTolm/VkFFT
.. |clfft| replace:: :mod:`clFFT`
.. _clfft: https://github.com/clMathLibraries/clFFT
.. |scipy| replace:: :mod:`scipy`
.. _scipy: https://docs.scipy.org/doc/scipy/reference/
"""
|
from tkinter import *
from tkinter import messagebox
import sqlite3
from sqlite3 import Error
import os,sys
from datetime import datetime,date
py = sys.executable
class ret(Tk):
def __init__(self):
super().__init__()
self.iconbitmap(r'libico.ico')
self.title("Return")
self.maxsize(420,280)
self.canvas = Canvas(width=500, height=417, bg='black')
self.canvas.pack()
self.photo = PhotoImage(file='ret.png')
self.canvas.create_image(-20, -20, image=self.photo, anchor=NW)
self.cal = 0
a = StringVar()
def days_between(d1, d2):
if d2 <= d1:
return 0
else:
d1 = datetime.strptime(d1, "%Y-%m-%d")
d2 = datetime.strptime(d2, "%Y-%m-%d")
return abs((d2 - d1).days)
def qui():
if len(a.get()) == '0':
messagebox.showerror("Error","Please Enter The Book Id")
else:
try:
self.conn = sqlite3.connect('library_administration.db')
self.mycursor = self.conn.cursor()
self.mycursor.execute("Select SID from issue where BID = ?", [a.get()])
sid = list(self.mycursor.fetchone())
self.mycursor.execute("Select Books_Issued from students where Student_Id = ?", [sid[0]])
gsid = list(self.mycursor.fetchone())
gsid[0] = gsid[0] - 1
self.mycursor.execute("Select BID from issue where BID = ?",[a.get()])
temp = self.mycursor.fetchone()
self.mycursor.execute("Select Fine from students where Student_Id = ?", [sid[0]])
fine = self.mycursor.fetchone()
self.mycursor.execute("Select Return_date from issue where BID = ? and SID = ?", [a.get(), sid[0]])
temp1 = self.mycursor.fetchone()
da = str(date.today())
ea = str(temp1[0])
self.cal = days_between(ea, da)
self.cal += int(fine[0])
if da <= ea and int(self.cal) == 0:
self.mycursor.execute("DELETE FROM issue WHERE BID = ?", [a.get()])
self.mycursor.execute("update books set Availiability = 1 where Book_Id = ?", [a.get()])
self.mycursor.execute("update students set Books_Issued = ? where Student_Id = ?", [gsid[0],sid[0]])
self.conn.commit()
self.conn.close()
messagebox.showinfo('Info', 'Succesfully Returned')
d = messagebox.askyesno("Confirm", "Return more books?")
if d:
self.destroy()
os.system('%s %s' % (py, 'ret.py'))
else:
self.destroy()
elif len(temp) > 0:
if int(self.cal) > 0:
messagebox.showinfo('Warning','Please Return/Renew book Timely to avoid termination of id')
self.mycursor.execute("Update students set Fine = ? where Student_Id = ?",[int(self.cal), sid[0]])
self.mycursor.execute("DELETE FROM issue WHERE BID = ?", [a.get()])
self.mycursor.execute("update books set Availiability = 1 where Book_Id = ?", [a.get()])
self.mycursor.execute("update students set Books_Issued = ? where Student_Id = ?", [gsid[0],sid[0]])
self.conn.commit()
self.conn.close()
messagebox.showinfo('Info', 'Succesfully Returned')
d = messagebox.askyesno("Confirm", "Return more books?")
if d:
self.destroy()
os.system('%s %s' % (py, 'ret.py'))
else:
self.destroy()
else:
self.mycursor.execute("DELETE FROM issue WHERE BID = ?", [a.get()])
self.mycursor.execute("update books set Availiability = 1 where Book_Id = ?", [a.get()])
self.mycursor.execute("update students set Books_Issued = ? where Student_Id = ?", [gsid[0],sid[0]])
self.conn.commit()
self.conn.close()
messagebox.showinfo('Info', 'Succesfully Returned')
d = messagebox.askyesno("Confirm", "Return more books?")
if d:
self.destroy()
os.system('%s %s' % (py, 'ret.py'))
else:
self.destroy()
else:
messagebox.showinfo("Oop's", "Book not yet issued")
except Error:
messagebox.showerror("Error","Something Goes Wrong")
Label(self, text='Return Book', fg='red',font=('arial', 35, 'bold')).pack()
Label(self, text='Enter Book ID', font=('Comic Scan Ms', 15, 'bold')).place(x=20, y=120)
Entry(self, textvariable=a, width=40).place(x=165, y=124)
Button(self, text="Return", width=25, command=qui).place(x=180, y=180)
ret().mainloop()
|
<filename>samples/containerregistry/manage_task.py
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import os
from azure.identity import DefaultAzureCredential
from azure.mgmt.containerregistry import ContainerRegistryManagementClient
from azure.mgmt.resource import ResourceManagementClient
def main():
SUBSCRIPTION_ID = os.environ.get("SUBSCRIPTION_ID", None)
GROUP_NAME = "testgroupx"
TASK = "taskxxyyzz"
REGISTRIES = "registriesxxyyzz"
# Create client
# # For other authentication approaches, please see: https://pypi.org/project/azure-identity/
resource_client = ResourceManagementClient(
credential=DefaultAzureCredential(),
subscription_id=SUBSCRIPTION_ID
)
containerregistry_client = ContainerRegistryManagementClient(
credential=DefaultAzureCredential(),
subscription_id=SUBSCRIPTION_ID,
api_version="2019-12-01-preview"
)
# Create resource group
resource_client.resource_groups.create_or_update(
GROUP_NAME,
{"location": "eastus"}
)
# - init depended resources -
registries = containerregistry_client.registries.begin_create(
GROUP_NAME,
REGISTRIES,
{
"location": "eastus",
"tags": {
"key": "value"
},
"sku": {
"name": "Premium"
},
"admin_user_enabled": True
}
).result()
# - end -
# Create task
task = containerregistry_client.tasks.begin_create(
GROUP_NAME,
REGISTRIES,
TASK,
{
"location": "eastus",
"tags": {
"testkey": "value"
},
"status": "Enabled",
"platform": {
"os": "Linux",
"architecture": "amd64"
},
"agent_configuration": {
"cpu": "2"
},
"step": {
"type": "Docker",
"context_path": "https://github.com/SteveLasker/node-helloworld",
"image_names": [
"testtask:v1"
],
"docker_file_path": "DockerFile",
"is_push_enabled": True,
"no_cache": False,
},
"trigger": {
"base_image_trigger": {
"name": "myBaseImageTrigger",
"base_image_trigger_type": "Runtime",
"update_trigger_payload_type": "Default",
"status": "Enabled"
}
}
}
).result()
print("Create task:\n{}".format(task))
# Get task
task = containerregistry_client.tasks.get(
GROUP_NAME,
REGISTRIES,
TASK
)
print("Get task:\n{}".format(task))
# Update task
task = containerregistry_client.tasks.begin_update(
GROUP_NAME,
REGISTRIES,
TASK,
{
"location": "eastus",
"tags": {
"testkey": "value"
},
"status": "Enabled",
"platform": {
"os": "Linux",
"architecture": "amd64"
},
"agent_configuration": {
"cpu": "2"
},
"step": {
"type": "Docker",
"context_path": "https://github.com/SteveLasker/node-helloworld",
"image_names": [
"testtask:v1"
],
"docker_file_path": "DockerFile",
"is_push_enabled": True,
"no_cache": False,
},
"trigger": {
"base_image_trigger": {
"name": "myBaseImageTrigger",
"base_image_trigger_type": "Runtime",
"update_trigger_payload_type": "Default",
"status": "Enabled"
}
}
}
).result()
print("Update task:\n{}".format(task))
# Delete task
task = containerregistry_client.tasks.begin_delete(
GROUP_NAME,
REGISTRIES,
TASK
).result()
print("Delete task.\n")
# Delete Group
resource_client.resource_groups.begin_delete(
GROUP_NAME
).result()
if __name__ == "__main__":
main()
|
<reponame>netvisionhcm/app-server
# example usage: python yolo_video.py -i video.mp4 -o video_out.avi
import argparse
import glob
import time
import logging
import cv2
import numpy as np
import threading
from pathlib import Path
from ai_logging import LOG
class ObjectDetectionEngine(object):
def __init__(self):
self.height = 720
self.width = 1280
self.confidence = 0.5
self.threshold = 0.4
self.weights = glob.glob("yolo/*.weights")[0]
self.labels = glob.glob("yolo/*.txt")[0]
self.cfg = glob.glob("yolo/*.cfg")[0]
self.num_net = 10
self.net_dict = {}
self.lock = threading.Lock()
LOG.info("Using {} weights ,{} configs and {} labels.".format(
self.weights, self.cfg, self.labels))
self.class_names = list()
with open(self.labels, "r") as f:
self.class_names = [cname.strip() for cname in f.readlines()]
self.COLORS = np.random.randint(0, 255, size=(len(self.class_names), 3), dtype="uint8")
for i in range(self.num_net):
start_time = time.time()
#net = cv2.dnn.readNetFromCaffe('ssd/MobileNetSSD_deploy.prototxt', 'ssd/MobileNetSSD_deploy.caffemodel')
net = cv2.dnn.readNetFromDarknet(self.cfg, self.weights)
#net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
#net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
layer = net.getLayerNames()
layer = [layer[i[0] - 1] for i in net.getUnconnectedOutLayers()]
self.net_dict[i] = {'net':net,'layer':layer,'status':1}
#print('Creation time:',time.time()-start_time)
#self.writer = None
def get_available_net(self):
for i in range(self.num_net):
ret_dict = self.net_dict[i]
if ret_dict['status'] == 1:
self.net_dict[i]['status'] = 0
return ret_dict['net'],ret_dict['layer'],i
return None, None
def perform_detection(self,frm):
(H, W) = frm.shape[:2]
blob = cv2.dnn.blobFromImage(frm, 1/255.0, (416, 416),
swapRB=True, crop=False)
self.lock.acquire()
net,layer,idx = self.get_available_net()
self.lock.release()
net.setInput(blob)
#start_time = time.time()
layerOutputs = net.forward(layer)
#end_time = time.time()
boxes = []
classIds = []
confidences = []
results = []
for output in layerOutputs:
for detection in output:
scores = detection[5:]
classID = np.argmax(scores)
confidence = scores[classID]
if confidence > self.confidence:
box = detection[0:4] * np.array([W, H, W, H])
(centerX, centerY, width, height) = box.astype("int")
x = int(centerX - (width/2))
y = int(centerY - (height/2))
boxes.append([x, y, int(width), int(height)])
classIds.append(classID)
confidences.append(float(confidence))
idxs = cv2.dnn.NMSBoxes(
boxes, confidences, self.confidence, self.threshold)
if len(idxs) > 0:
for i in idxs.flatten():
(x, y) = (boxes[i][0], boxes[i][1])
(w, h) = (boxes[i][2], boxes[i][3])
#color = [int(c) for c in self.COLORS[classIds[i]]]
#cv2.rectangle(frm, (x, y), (x + w, y + h), color, 2)
label = self.class_names[classIds[i]]
#if label not in self.object_db:
# continue
results.append((x,y,x + w,y + h,label))
'''
text = "{}: {:.4f}".format(
self.class_names[classIds[i]], confidences[i])
cv2.putText(frm, text, (x, y - 5),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 0, 0), 2)
fps_label = "FPS: %.2f" % (1 / (end_time - start_time))
cv2.putText(frm, fps_label, (0, 25),
cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 2)
'''
self.lock.acquire()
self.net_dict[idx]['status'] = 1
self.lock.release()
return results
|
import numpy as np
import sys
def same_dist_elems(arr):
"""
Smart little script to check if indices are equidistant.
Found at https://stackoverflow.com/questions/58741961/how-to-check-if-consecutive-elements-of-array-are-evenly-spaced
Parameters
----------
arr : array_like
Input array
Returns
-------
bool
boolean value, True if array is equidistantly spaced, False otherwise
"""
diff = arr[1] - arr[0]
for x in range(1, len(arr) - 1):
if arr[x + 1] - arr[x] != diff:
return False
return True
def progressbar(it, prefix="", size=60, file=sys.stdout):
"""
Function to generate a progress bar. Does not work ideally... Found on stackexchange
"""
count = len(it)
def show(j):
x = int(size*j/count)
file.write("%s[%s%s] %i/%i\r" % (prefix, "#"*x, "."*(size-x), j, count))
file.flush()
show(0)
for i, item in enumerate(it):
yield item
show(i+1)
file.write("\n")
file.flush()
def mult(*args):
# Multiply elements one by one
result = 1
for x in args:
result = result * x
return result
def interp(x, y, wl_ran=(300, 1200), delta_lambda=1, kind='cubic', lowlim=400, uplim=1100):
"""
This function interpolates values between given input table values.
Parameters
----------
x : array_like
Input array of x values, eg. wavelength
Ex: np.array([100, 217, 350])
y : array_like
Input array of y values, eg. quantum efficieny, or mirror reflectance.
Ex: np.array([0.1, 0.7, 0.85])
wl_ran : tuple
wavelength span. Entries must be integers
delta_lambda : float
wavelength resolution, in nm.
kind : string
type of interpolation. Valid options are 'linear', 'quadratic' and 'cubic'.
lowlim : float
lower wavelength limit. Below this value, throughput will be set to 0
uplim : float
upper wavelength limit. Above this value, thoughput will be set to 0
Returns
-------
interpolated : array_like
Interpolated values between wl_start and wl_stop, with sharp cutoff beyond the specified limits.
Notes
-----
Check interp1d for more options.
"""
from scipy.interpolate import interp1d #Load neccessary package
import numpy as np
f = interp1d(x, y, kind=kind, fill_value="extrapolate") #interpolates, and extrapolates if the given table does not cover the wavelength range
# xnew = np.linspace(wl_ran[0], wl_ran[1], num=int((wl_ran[1]-wl_ran[0])/delta_lambda), endpoint=True) #Generates new x-values
xnew = np.arange(wl_ran[0], wl_ran[1], delta_lambda)
interp = f(xnew) #"Raw" interpolation
interpol= np.asarray([i if i>0 else 0 for i in interp]) #recast as numpy array for easier handling, and throws away values below 0
interpolated = np.stack((xnew,interpol), axis=-1) #Combine new x-values and interpolated
# To remove values below lower limit
for i in range(interpolated.shape[0]):
if interpolated[i,0]<lowlim:
interpolated[i,1]=0
if interpolated[i,0] > lowlim:
break
#To remove values above upper limit
for i in reversed(range(interpolated.shape[0])): #Start from top and goes down
if interpolated[i,0]>uplim:
interpolated[i,1]=0
if interpolated[i,0] < uplim:
break
return interpolated
def loadfunc(*args, wls, kind='cubic'):
result = 1
for x in args: #takes several input arrays
loaded = np.loadtxt(x)
if not loaded.shape[0] == (wls[1]-wls[0]): #if input is not of the correct length, this will interpolate
temp = interp(loaded[:,0], loaded[:,1], wl_ran=wls, kind=kind, lowlim=wls[0]-50, uplim=wls[1]+50)[:,1]
else:
temp = loaded
result = result * temp
return result
def CCD_maker(CCD_size, subpix=10, var=0.05, var2=0.05, grid_loss=0.6, smooth=5):
"""
This function creates a CCD composed of subpixels, with a separating grid between all full pixels.
The grid will have some loss.
Parameters
----------
CCD_size : array_like
Input size of CCD (in full pixels).
Ex: (10, 10)
subpix : int
Number of subpixels in each full pixel
var : float
Variation of noise, (in the gaussian noise)
var2 : float
Variation, relative variation from 0
grid_loss: float
Loss in the grid. 1 = everything gets through, 0 = nothing gets through
smooth : float
Smoothness factor, previously called "stepsize". Is the ammount of subpixel to correlate to during that phase. Must be larger than 1.
Returns
-------
CCD : ndarray
Output array of the CCD with the specified subpixel ammount, and size.
Notes
-----
Once used, remember to save the created CCD, as to not run the script again. It can take quite a while to make big arrays.
Exaples
-------
>>> new_CCD = CCD_maker((240, 240), 10, 0.3, 0.7, 5)
array([[0.59858663, 0.59919131, 0.59980866, ..., 0.59164421, 0.59224492,
0.59108706],
...,
[0.63641557, 0.88710319, 0.60372464, ..., 0.91472067, 0.65503371,
0.96646196]])
"""
import numpy as np
import sys
gridsize = subpix #"size of pixels" in # of subpixels
x_size = CCD_size[0]*gridsize
y_size = CCD_size[1]*gridsize # number of subpixels
S = smooth #stepsize "smoothness", previously =5
CCD = np.random.normal(1-var, var, [x_size, y_size])#*var #Noise matrix
# noise = np.random.standard_normal((x_size, y_size))*var #Noise matrix
# CCD = np.ones((x_size,y_size)) #"Clean" matrix
# CCD = CCD-noise #Subtracts noise from "clean" CCD matrix
CCD2=np.zeros(CCD.shape)
#Correlate the subpixels
N = 3 # number of times to correlate
for t in np.arange(0,N):
for i in np.arange(0,x_size):
for j in np.arange(0,y_size):
bit = CCD[i:i+S, j:j+S-1] #cuts out a bit to treat
CCD2[i, j] = np.sum(np.sum(bit)/np.size(bit)) #correlates surrounding subpixels
sys.stdout.write('.'); sys.stdout.flush(); #"Progress bar", just for visuals
#Introduces grid, to mimic the actual pixels - seperate the subpixels by a grid with a slight loss defined by grid_loss variable.
grid = np.ones((CCD.shape[0], CCD.shape[1])) #Set up grid
grid[0::gridsize,:]=grid_loss #Sets gridloss for every 'gridsize' row (10)
grid[:,0::gridsize]=grid_loss #sets gridloss for every 'gridsize' coloumn (10)
#to see a visualization of this, use the variable explorer - type: %varexp --imshow grid
# noise2 = np.random.standard_normal((x_size, y_size))*var2
noise2 = np.random.normal(0, var2, [x_size, y_size])#*var2
# CCD2 = CCD2+noise2+1
CCD2 = CCD2-noise2
CCD2 = CCD2/np.mean(CCD2)
# CCD2 = CCD2/np.mean(CCD2)
CCD = CCD2*grid #overlays the grid on the CCD
# CCD = CCD/np.max(CCD)
return CCD
def psf_maker(file_name, wl_endpoints=(350, 1100), f=1, size=101, res=(100, 100)):
"""
Creates a new file containing the full-color PSF, without interpolating as with psf_maker
Parameters
----------
file_name : str
Desired name of the file.
wl_endpoints : tuple, optional
Two values that mark the first and last colors. The default is (350, 1100).
f : float
factor to multiply in the sigma values
size : int, optional
Size of the PSF. The default is 101.
res : tuple, optional
Resolutionof the meshgrid used in the 2D Gaussian. Will affect the size of the PSF inversly: Larger values mean smaller PSF. Just a tweakable parameter. The default is (100, 100).
Returns
-------
.npy and .hdf5 files containing the PSF
"""
import os
import numpy as np
import h5py
path = os.getcwd() #Get current working directory
file_path = path + "/" + file_name +".hdf5" #Set up path to save file later
'''
numColors = int( (wl_endpoints[1]-wl_endpoints[0])/step) # Number of colors
x_size = size[0]
y_size = size[1] #Extracts from the size input
z = np.float128(np.zeros((res, res, numColors))) # Setup empty array for PSF-slices
x = np.float128(np.linspace(-x_size, x_size, res)) #Preparation for meshgrid
y = np.float128(np.linspace(-y_size, y_size, res))
xx, yy = np.meshgrid(x, y) #define meshgrid
for i in range(wl_endpoints[0], wl_endpoints[1], step): # for-loop to create one psf for each color
sigma_x = np.float128(np.log(i)+0.5*i/100) # Used in the 2D Gaussian
sigma_y = np.float128(np.log(i)+0.5*i/100)
# 2D Gaussian function, that takes sigma_x and _y as input variables. Also takes in the meshgrid xx and yy
zz = (1/(2*np.pi*sigma_x*sigma_y) * np.exp(-((xx)**2/(2*sigma_x**2)
+ (yy)**2/(2*sigma_y**2))))
zz = zz/np.sum(zz) # Normalizes, so the total value (the sum of the array) =1
z[:,:,i-350] = zz # put psf-"slice" into larger 3D array
'''
step=1
numColors = int( (wl_endpoints[1]-wl_endpoints[0])/step) # Number of colors
x_size = res[0]
y_size = res[1] #Extracts from the size input
z = np.zeros((size, size, numColors)) # Setup empty array for PSF-slices
x = np.linspace(-x_size, x_size, size) #Preparation for meshgrid
y = np.linspace(-y_size, y_size, size)
xx, yy = np.meshgrid(x, y) #define meshgrid
for i in range(wl_endpoints[0], wl_endpoints[1], step): # for-loop to create one psf for each color
# sigma_x = np.log(i)+f*i/100 # Used in the 2D Gaussian, old one
# sigma_y = np.log(i)+f*i/100
sigma_x = f*0.014285714285714285 * i + 20.714285714285715 # emperically determined slope, linear increase
sigma_y = f*0.014285714285714285 * i + 20.714285714285715
# 2D Gaussian function, that takes sigma_x and _y as input variables. Also takes in the meshgrid xx and yy
zz = (1/(2*np.pi*sigma_x*sigma_y) * np.exp(-((xx)**2/(2*sigma_x**2)
+ (yy)**2/(2*sigma_y**2))))
zz = zz/np.sum(zz) # Normalizes, so the total value (the sum of the array) =1
z[:,:,i-wl_endpoints[0]] = zz # put psf-"slice" into larger 3D array
if os.path.exists(file_path) == True: #If file already exists, it will be deleted
os.remove(file_path)
# Saving the psf as a hdf5 file in order to store the large file, using h5py
psf_file = h5py.File(file_path, "a")
psf_file.create_dataset('psf', data=z, dtype='f') # Place dataset in the .hdf5 file
np.save(file_name + "_raw.npy", z) #Save as .npy binary file
return print("New PSF done, saved as", file_name, ".npy")
def psf_interp(input_psf_images, input_psf_wl, wl_endpoints=(350, 1100), delta_lambda=1):
import sys
from scipy.interpolate import interp1d
print('\nInterpolating missing wavelengths in PSF... \n')
ran = range(wl_endpoints[0], wl_endpoints[1], delta_lambda) #set for-loop range
res = input_psf_images.shape[0] # Width of the input psf, so the created psf will have the same size
psf = np.zeros((input_psf_images.shape[0], input_psf_images.shape[1], wl_endpoints[1]-wl_endpoints[0])) #Creates empty array for the new psf
for i in range(res):
for j in range(res):
f_test = interp1d(input_psf_wl, input_psf_images[i,j,:], kind='quadratic', fill_value="extrapolate") #sets up interpolation function
psf[i,j,:] = f_test(ran) # interpolates at the wavelengths specified in the range
sys.stdout.write('.'); sys.stdout.flush(); #"Progress bar", just for visuals
print(' ')
print('Interpolation done')
print(' ')
return psf
def func_jitter (entries, gain, dt):
"""
Generates two jitter arrays, in x- and y.
Parameters
----------
entries : int
Number of entries in the desired jitter arrays
gain : float
Gain of the ADCS.
dt : int
Time delay
Returns
-------
x, y : array-like
Jitter in x- and y-directions
"""
x = np.zeros((entries+dt)) #allocates for arrays
y = np.zeros((entries+dt))
for i in range(entries+dt-1): #set up for loop
x[i+1] = x[i]+np.random.normal()-gain*x[i-dt] #next entry will be previous, plus a Gaussian number,
y[i+1] = y[i]+np.random.normal()-gain*y[i-dt] # and the correction is subtracted from the i-dt'th entry
x = x[dt-1:-1] #Cut off the initial dt entries.
y = y[dt-1:-1]
return x, y
def func_slit(slit_size=[10,100], pos=[499, 499], image_size=[1000,1000]):
""" Creates a slit "mask" to overlay images.
Parameters
----------
slit_size : array_like, int
Size of slit: should be two numbers, width and height.
pos : array_like, int
Position of the slit, measured in subpixels.
img_size : array_like, int
Size of mask. Should be identical to size of the image upon which the mask is overlaid.
Returns
-------
mask : array_like
Mask is zero everywhere except in the slit, where the value is 1.
"""
width = slit_size[0] #Loads in size of slit
height = slit_size[1]
x_low = pos[0] - width #Finds boundaries
x_up = pos[0] + width
y_low = pos[1] - height
y_up = pos[1] + height
mask = np.zeros(image_size) #Creates empty mask
mask[y_low:y_up, x_low:x_up] = mask[y_low:y_up, x_low:x_up]+1 #Fills in the slit, so that only the slit has any throughput
return mask
def mag(mag_star, mag_ref=0):
"""Calculates the brightness difference based on magnitudes
Parameters
----------
mag_star : float
Magnitude of input star
mag_ref : float
magnitude of reference star"""
return 10**(0.4*((mag_ref)-(mag_star)))
# def jitter_im(x, y, psf_size):
# ''' Creates a jitter "image" - a matrix of the same dimensions (x & y) as the psf, used in the folding function
# NOTE: Will round of the position of the jitter to nearest subpixel!
# Parameters
# ----------
# x : array
# Input jitter x-coord.
# y : array
# Input jitter y-coord.
# psf_size : int, two values
# Size of the psf.
# Returns
# -------
# jitter : array
# Jitter image, where each point where the jitter "stops" has a +1 value. All other points are zero.
# '''
# jitter=np.zeros(psf_size) # Setup image
# # jitter2=np.zeros(psf_size)
# for i in range(len(x)):
# jitter[(x[i]+(psf_size[0]/2)).astype(int), (y[i]+(psf_size[1]/2)).astype(int)]= jitter[(x[i]+(psf_size[0]/2)).astype(int), (y[i]+(psf_size[1]/2)).astype(int)]+1
# # jitter2[x[i].astype(int)+int(np.floor(psf_size[0]/2)), y[i].astype(int)+int(np.floor(psf_size[1]/2))]= jitter[x[i].astype(int)+int(np.floor(psf_size[0]/2)), y[i].astype(int)+int(np.floor(psf_size[1]/2))]+1 # Create jitter "image". +1 to every point where the jitter "hits"
# return jitter#, jitter2
def jitter_im(x, y, psf_size):
''' Creates a jitter "image" - a matrix of the same dimensions (x & y) as the psf, used in the folding function
NOTE: Will round of the position of the jitter to nearest subpixel!
Parameters
----------
x : array
Input jitter x-coord.
y : array
Input jitter y-coord.
psf_size : int, two values
Size of the psf.
Returns
-------
jitter : array
Jitter image, where each point where the jitter "stops" has a +1 value. All other points are zero.
'''
jitter=np.zeros(psf_size) # Setup image
# jitter2=np.zeros(psf_size)
for i in range(len(x)):
rang1 = (x[i]+(psf_size[0]/2)).astype(int)
rang2 = (y[i]+(psf_size[1]/2)).astype(int)
# print(rang1, rang2)
jitter[rang1, rang2] = jitter[rang1, rang2]+1
# Create jitter "image". +1 to every point where the jitter "hits"
return jitter#, jitter2
def folding(psf_image, jitter_image, mode='same', boundary='fill'):
#Clutter function, might as well just use signal.convolve2d
from scipy import signal
folded=signal.convolve2d(psf_image, jitter_image, mode=mode, boundary=boundary) #convolves the psf slice and the jitter image
return folded
#The correct disperser::::
def spatial_dispersion(wl_endpoints, jit_img, psf_ends, pos, image_size, dispersion, eff,
mask_img, steps=1, secondary_source='n', plot='n'):
import sys
from scipy import signal
from astropy.convolution import AiryDisk2DKernel
x_pos=pos[0]
y_pos=pos[1] #load in position of "zeroth order"
im_disp = np.zeros((image_size[0],image_size[1])) # empty image
im_disp_lambda = np.zeros((image_size[0],image_size[1]))
x_dispersion = dispersion[0] #load in dispersions
y_dispersion = dispersion[1]
numColors = int( (wl_endpoints[1]-wl_endpoints[0])) #total number of colours to iterate
# print("Number of colors to iterate: " + str(numColors))
# print(' ')
if plot=='y': #this part is not useful atm
import matplotlib.pyplot as plt
plt.figure()
from matplotlib.colors import LinearSegmentedColormap
N = 256 #8-bit value, to fix colours
colspec = plt.cm.get_cmap('Spectral') #Fetches colourmap to use later
vals = np.ones((N,4)) #Setup for colormap
temp = np.linspace(psf_ends[0], psf_ends[1], numColors)
for i in range(0, numColors, steps):
# for i in range(0,101, steps):
im = np.zeros((image_size[0],image_size[1])) #create temp. image
psf = AiryDisk2DKernel(temp[i], x_size=jit_img.shape[0], y_size=jit_img.shape[0]).array #PSF for this colour
if secondary_source == 'y': #To account for the secondary light source perhaps not being fully within the psf
# fold = folding(psf_img[:,:,i], jit_img)
fold = signal.convolve2d(psf[:,:,i], jit_img, mode='same', boundary='fill') #fold psf and jitter
fold = fold[0:jit_img.shape[1], 0:jit_img.shape[0]] #cut down to regular shape
else:
fold = signal.convolve2d(psf[:,:], jit_img, mode='same', boundary='fill') #fold as usual, if no sec. sources
# fold=fold/np.sum(fold)
foo = int(psf.shape[0]/2)
# im[0+x_pos-foo:len(jitter)+x_pos-foo, 0+y_pos-foo:len(jitter)+y_pos-foo] = im[0+x_pos-foo:len(jitter)+x_pos-foo, 0+y_pos-foo:len(jitter)+y_pos-foo] + fold*magni
im[0+y_pos-foo:len(fold)+y_pos-foo, 0+x_pos-foo:len(fold)+x_pos-foo] = fold #im[0+y_pos-foo:len(fold)+y_pos-foo, 0+x_pos-foo:len(fold)+x_pos-foo] + fold#*magni
immask = im*mask_img #mask is "overlaid" by multiplying
roll_x = np.roll(immask, int(np.modf(x_dispersion[i])[1]), axis=1) #move/disperse the light
roll_y = np.roll(roll_x, int(np.modf(y_dispersion[i])[1]), axis=0) #also in the y-direction
dx = abs(np.modf(x_dispersion[i])[0]) #residual amount (decimal amounts are shifted to the next sub-pixel)
dy = abs(np.modf(y_dispersion[i])[0])
foob = roll_y*(eff[i]*(1-dx)*(1-dy)) #multiply by efficiency
im_disp = im_disp + foob # Add the rolled image to the final, and multiply by the "effectivity"
roll_dx = np.roll(roll_y, 1, axis=1) # Roll the residual to the next subpixel
eff_dx = eff[i] * dx * (1-dy) # effectivity of the x-residual
roll_dy = np.roll(roll_y, 1, axis=0) # Roll the residual to the next subpixel, y-wise
eff_dy = eff[i] * dy * (1-dx) # y-residual eff.
roll_dxy = np.roll(roll_dx, 1, axis=0) # roll the image one step in both x- and y-wise.
eff_dxy = eff[i]* dx * dy #and eff.
baar = roll_dx*eff_dx + roll_dy*eff_dy + roll_dxy*eff_dxy
im_disp = im_disp + baar #add all residuals and multiply by their respective effectivities.
im_disp_lambda = im_disp_lambda+((foob+baar)*(i+wl_endpoints[0])) #fill in im_disp, and multiply by wavelength i
# im_disp_lambda = im_disp_lambda+(i+wl_endpoints[0]) #fill in im_disp, and multiply by wavelength i
# sys.stdout.write('/'); sys.stdout.flush(); #"Progress bar", just for visuals
##### Plotting #####
if plot == 'y':
vals[:, 0] = np.linspace(0, colspec(1-i/750)[0], N) #Making new colourmap values
vals[:, 1] = np.linspace(0, colspec(1-i/750)[1], N) #the /750 is to normalize the colormap, so values fall between 0 and 1
vals[:, 2] = np.linspace(0, colspec(1-i/750)[2], N)
vals[:, 3] = np.linspace(0, 1, N) #alpha, for making the cmap transparent
newcmp = LinearSegmentedColormap.from_list(name='Spectral', colors=vals) #Creates new cmp, based on vals
plt.imshow(roll_y, cmap=newcmp) # Show array
if plot=='y':
plt.title('Color dispersion of sample spectrum', size=18)
plt.xlabel('Sub-pixel', size=13)
plt.ylabel('Sub-pixel', size=13)
return im_disp, im_disp_lambda
def ccd_interp(inCCD, wls, img, img_wl):
"""
Interpolator used to find the subpixel sensitivity for all wavelengths (not just the ones created by ccd_maker)
Parameters
----------
inCCD : array
Input CCD array, can be made using ccd_maker.
wls : array
Corresponding wavelengths. Must have the same size as the depth of inCCD
img : array
Input image, from disperser2.
img_wl : array
Input image wavelengths.
Returns
-------
new_img : array
Image "multiplied" by the CCD, using the interpolated sensitivities for each subpixel.
"""
import sys
from scipy.interpolate import interp1d
if not wls.shape[0] is inCCD.shape[2]:
raise TypeError("Wavelength array and input CCD depth not same size")
if not inCCD.shape[0:2] == img.shape[0:2] == img_wl.shape:
raise TypeError("CCD and image not same size")
new_img = np.zeros((img.shape[0], img.shape[1]))
for i in range(0, inCCD.shape[0]):
for j in range(0, inCCD.shape[1]):
interp = interp1d(wls, inCCD[i,j,:], kind="slinear", fill_value="extrapolate")
new_img[i,j] = img[i,j]*interp(img_wl[i,j])
sys.stdout.write('.'); sys.stdout.flush();
return new_img
def read_out(dispersed):
'''
Will sum up the "photons" in the y-direction of the input dispersed image.
Parameters
----------
dispersed : array, 2 dimensional
Dispersed image-array.
Returns
-------
counts : array
Array of counts in the y-direction.
'''
counts = np.array(())
for i in range(dispersed.shape[1]):
counts = np.append(counts, np.sum(dispersed[:,i]))
return counts
def read_outx(dispersed):
'''
Will sum up the "photons" in the X-direction of the input dispersed image.
'''
counts = np.array(())
for i in range(dispersed.shape[0]):
counts = np.append(counts, np.sum(dispersed[i,:]))
return counts
def bin_sum(inp, bin_size):
"""
Returns a binned version of inp, with each bin being bin_size in each dimension. The bins are summed up.
Parameters
----------
inp : array_like
Input array. Must be 2D.
bin_size : int
Bin size. Division of input shape and bin_size should be a whole number, i.e. no 8.333 etc.
Returns
-------
binned : array
Array of inp.shape/bin_size in shape, with the bins summed up.
"""
# Check if bin_size is whole divisor of inp.shape
if not np.modf(inp.shape[0]/bin_size)[0] == 0 == np.modf(inp.shape[1]/bin_size)[0]:
raise TypeError("Input shape and bin size divided must be a whole number. (mod = 0)")
temp = np.zeros((inp.shape[0], int(inp.shape[1]/bin_size) )) #Create empty matrix for first step
summed = np.zeros((int(inp.shape[0]/bin_size), int(inp.shape[1]/bin_size) )) #Empty matrix for second step
for x in range(0, inp.shape[1], bin_size): #Range for 1st
j = range(0+x, bin_size+x) #Bin range. ex. 20-30 if bin_size is 10
for i in range(0, inp.shape[0]): # over all columns
temp[i, int(j[0]/bin_size)]= sum(inp[i,j]) #sum, and add to temp
for x in range(0, inp.shape[0], bin_size): #2nd step, repeat 1st step, but for rows
i = range(0+x, bin_size+x) #row bin-range.
for j in range(0, summed.shape[1]):
summed[int(i[0]/bin_size), j]= sum(temp[i,j]) #sum and add to result-matrix
return summed
def noise(size, image, RON=5):
noise = np.zeros((size[0], size[1]))
for i in range(size[0]):
for j in range(size[1]):
noise[i,j] = (np.sqrt(image[i,j])+RON)*np.random.normal(0,1)
return noise
def convert_plate_pix(plate_scale, pix_size):
"""
Plate scale is calculated with the equation:
P = 206265 / (D*f/#)
206265 is the amount of arcsecs in a radian.
D is the diameter of the telescope
f/# is the f-number: Focal length/Diameter
( http://www-supernova.lbl.gov/~sed/telescope/obsguide/platescale.html )
For a telescope of 20 cm, and focal length of 50 cm, the plate scale is 412.53 arcsec/mm
Parameters
----------
plate_scale : float
Must be in arcsec/mm.
pix_size : float
Must be in mm/pixel.
Returns
-------
convert_factor : float
How large a sky area a single pixel width covers.
"""
convert_factor = plate_scale * pix_size # [arcsec per pix] = [arcsec/mm] * [mm/pix]
return convert_factor
def convert_slit(unit, size, convert_factor):
if not type(unit) == str:
raise TypeError("unit must be a string")
if not ((unit == 'pix') or (unit == 'ang')):
raise TypeError("unit must be either 'ang' or 'pix'")
if unit == 'ang':
slit_size = np.divide(size, convert_factor)
if unit == 'pix':
slit_size = size
return slit_size
def setup(input_file):
import warnings
in_spec = np.loadtxt(input_file.in_spec) #Input science spectrum
in_spec2 = np.loadtxt(input_file.in_spec2) #Input science spectrum
col_area = input_file.col_area # Collecting area
sub_pixel = input_file.sub_pixel # Amount of sub-pixels per full pixel
img_size = input_file.img_size #Size of the CCD, in pixels
pl_scale = input_file.pl_scale # Plate scale
pix_size = input_file.pix_size # Pixel size
bg_spec = np.loadtxt(input_file.bg_spec) # Background spectrum, i.e. zodiacal light
exp = input_file.exp # Exposure time
wl_ran = input_file.wl_ran # Wavelength range
eta_in = input_file.eta_in # Spectral troughput of the entire system. Requires at minimum the CCD QE
slit = input_file.slit # Slit size. Unit first, then width and height
#psf = np.load(input_file.psf) # Point Spread Function of the optics etc.
#psf_col = input_file.psf_col
#psf_col = np.arange(300, 1000)
disper = np.load(input_file.disper) #Dispersion of the spectrograph
####### Optionals ########
if not input_file.jitter:
jitter = ''
else:
jitter = np.load(input_file.jitter) # Spacecraft jitter
step = input_file.step # Step size. Only needed if jitter is left empty
in_CCD = np.load(input_file.in_CCD) # Input CCD imperfections. Sub-pixel variations
CCD_col = input_file.CCD_col # CCD colours, respective to each slice in in_CCD
img_size[0] = img_size[0]*sub_pixel
img_size[1] = img_size[1]*sub_pixel
pl_arc_mm = convert_plate_pix(pl_scale, pix_size=pix_size)
disper[0] = disper[0]*sub_pixel
# disper[1] = disper[1]*sub_pixel
if not ((type(wl_ran[0]) == int) or (type(wl_ran[1]) == int)):
raise TypeError("wl_ran must be a tuple with two integers")
span = wl_ran[1]-wl_ran[0]
foo = 1
args = eta_in
for x in args: #takes several input arrays
loaded = np.loadtxt(x)
if not loaded.shape[0] == span: #if input is not of the correct length, this will interpolate
temp = interp(loaded[:,0], loaded[:,1], wl_ran=wl_ran, kind='cubic', lowlim=wl_ran[0]-50, uplim=wl_ran[1]+50)[:,1]
else:
temp = loaded
foo = foo * temp
eta_in = foo
del foo, args, temp, loaded
#Handling the input spectrum and SEC/TEC
if not in_spec.shape[0] == span:
in_spec = interp(x=in_spec[:,0], y=in_spec[:,1], wl_ran=wl_ran, kind='cubic', lowlim=wl_ran[0]-50, uplim=wl_ran[1]+50)
if not eta_in.shape[0] == span:
raise TypeError("eta_in must cover the range of wavelengths: " + str(span) + " entries, from " + str(wl_ran[0]) +" to " +str(wl_ran[1]))
spec_eff = in_spec[:,1] * col_area * eta_in
if not in_spec2.shape[0] == span:
in_spec2 = interp(x=in_spec2[:,0], y=in_spec2[:,1], wl_ran=wl_ran, kind='cubic', lowlim=wl_ran[0]-50, uplim=wl_ran[1]+50)
if not eta_in.shape[0] == span:
raise TypeError("eta_in must cover the range of wavelengths: " + str(span) + " entries, from " + str(wl_ran[0]) +" to " +str(wl_ran[1]))
spec_eff2 = in_spec2[:,1] * col_area * eta_in
#Slit is created here
slit_size = convert_slit(unit = slit[0], size = slit[1:3], convert_factor = pl_arc_mm) #Convert slit size to pixels
slit_size[0] = slit_size[0]*sub_pixel #Convert to subpixels
slit_size[1] = slit_size[1]*sub_pixel
slitpos = [150, 249] #Slit position on the sub-pixel CCD image. Arbitrary position..
mask = func_slit(slit_size = np.floor(slit_size).astype(int), pos=slitpos, image_size=img_size) #Generate mask used to overlay before actual dispersion later.
#Background spectrum gets handled here. A background image of exp = 1s will be created, and can be scaled and overlaid on the final image
# new_bg = input("Do you wish to generate a new background? (y/n): ")
new_bg = "n"
if new_bg == 'y':
if not bg_spec.shape[0] == span: #interpolate if values are missing
bg_spec = interp(x=bg_spec[:,0], y=bg_spec[:,1], wl_ran=wl_ran, kind='cubic', lowlim=wl_ran[0]-50, uplim=wl_ran[1]+50)
print("\nInterpolated missing values in background spectrum")
detector_area = (pl_arc_mm*img_size[0]/sub_pixel)*(pl_arc_mm*img_size[1]/sub_pixel) #Collecting area of the detector measured in arcsec^2
bg_spec = bg_spec*detector_area #Multiply by detector area
bg_psf = np.ones((101, 101, wl_ran[1]-wl_ran[0]))
x_j, y_j = func_jitter(entries=(exp*step), gain=0.15, dt=5) #This jitter will be a single point at the center of the jitter image
bg_jit = jitter_im(x= x_j, y= y_j, psf_size=(bg_psf[:,:,0].shape[0], bg_psf[:,:,0].shape[0]) ) #Creating jitter "image"
background, background_wl = spatial_dispersion(wl_endpoints=wl_ran, jit_img=bg_jit, psf_img=bg_psf, pos=slitpos, image_size=img_size, dispersion=disper, eff = bg_spec[:,1], mask_img=mask, steps=1, plot='n' )
np.save('background.npy', background) #saving the background image for later use.
del x_j, y_j, bg_jit, background_wl, bg_spec #getting rid of unnecessary variables
else:
background = np.load('../sample_values/background.npy')
try: #If jitter is not defined, new jitter will be generated
jitter
except NameError:
try:
step
except NameError:
raise NameError("Either jitter or step must be specified")
x_j, y_j = func_jitter(entries=(exp*step), gain=0.15, dt=5)
# x_j, y_j = simfun.jitter(entries=(exp*step), gain=0.02, dt=10)
jitter = np.stack((x_j, y_j), axis=-1)
spec_eff = spec_eff/step #If the generated jitter is used, the spectrum must be in step size, not seconds
spec_eff2 = spec_eff2/step
with warnings.catch_warnings(): #This is to suppress the potential "FutureWarning" error message. Comparing np-array to str etc. Might cause errors down the line?
warnings.simplefilter(action='ignore', category=FutureWarning)
if jitter == '': #if jitter is an empty str, it will also be generated.
if step == '': #step must be specified
raise TypeError("If jitter is unspecified, step must be explicitly specified")
x_j, y_j = func_jitter(entries=(exp*step), gain=0.15, dt=5) #New jitter, will have epx*step length
# x_j, y_j = simfun.jitter(entries=(exp*step), gain=0.02, dt=10)
jitter = np.stack((x_j, y_j), axis=-1)
spec_eff = spec_eff/step #If the generated jitter is used, the spectrum must be in step size, not seconds
spec_eff2 = spec_eff2/step
jitter = jitter_im(x= jitter[:,0], y= jitter[:,1], psf_size=(101, 101) )
return spec_eff, spec_eff2, jitter, x_j, y_j, img_size, sub_pixel, pl_arc_mm, disper, mask, slitpos, background
def int_r(r1, rang):
"""
Interpolator
"""
from scipy.interpolate import interp1d
x= np.arange(len(r1))
xnew = np.arange(0, len(r1), 0.001)
f1 = interp1d(x, r1, kind=3, fill_value="extrapolate")
# f2 = interp1d(x, r2, kind=3, fill_value="extrapolate")
r1_int = f1(xnew)
# r2_int = f2(xnew)
return r1_int
# def int_r(r1, r2, rang):
# """
# Interpolator
# """
# from scipy.interpolate import interp1d
# x= np.arange(len(r1))
# xnew = np.arange(0, len(r1), 0.001)
# f1 = interp1d(x, r1, kind=3, fill_value="extrapolate")
# f2 = interp1d(x, r2, kind=3, fill_value="extrapolate")
# r1_int = f1(xnew)
# r2_int = f2(xnew)
# return r1_int, r2_int
def noise2d(x, RON=5):
"""
Function to generate noise of a 2D array.
Parameters
----------
x : array
Input array. Could be a simulated spectrum
RON : float
Read-out noise of the CCD, measured in photo-electrons. The default is 5.
Returns
-------
noise : array
Noise-array, same dimensions as the input array. The two can then be added together for the "actual image"
The noise is calculated with the \hyperlink{noise2d}{noise2d} function.
It uses the following equation:
N_{i,j} = [sqrt(counts_{i,j})+RON]*\mathcal{N}(0,1)
with N_{i,j} being the noise of entry i,j, counts_{i,j} is the value of the i,j'th entry, RON is the
Read-Out Noise of the CCD, and \mathcal{N}(0,1) is a random number drawn from a normal distribution,
between 0 and 1.
"""
noise = np.zeros((x.shape))
for i in range(x.shape[0]):
for j in range(x.shape[1]):
noise[i,j] = (np.sqrt(x[i,j])+RON)*np.random.normal(0,1)
return noise
def noise_test(x):
noise = np.sqrt(abs(x))*np.random.normal(0, 1)
return noise
def sinusoidal(size, frequency, amplitude, phase):
"""
Function to generate sinusoidal jitter
Parameters
----------
size : int
Size of the desired output array.
frequency : list
List with frequencies
amplitude : list
List of amplitudes. Must be the same size as the frequency list
phase : float
Phase of the sinusoidal.
Returns
-------
x : array
"""
if not (len(frequency)) == (len(amplitude)):
raise TypeError("Frequency array must be same length as amplitude array")
x = np.zeros((size))
for j in range(len(frequency)):
for i in range(size):
x[i] = x[i] + amplitude[j] * np.cos(frequency[j] * i - phase)
return x
# def sinusoidal(size, frequency, amplitude, phase):
# x = np.zeros((size))
# y = np.zeros((size))
# frequency_x = frequency[0]
# frequency_y = frequency[1]
# amplitude_x = amplitude[0]
# amplitude_y = amplitude[1]
# phase_x = phase[0]
# phase_y = phase[1]
# for i in range(0, size):
# x[i] = amplitude_x * np.cos(frequency_x * i - phase_x) #next x-value, using the new phase
# # x[i] = x[i] + noise_test(x[i])
# y[i] = amplitude_y * np.sin(frequency_y * i - phase_y) #and new y-value of coord.
# # y[i] = y[i] + noise_test(y[i])
# return x, y
def prep_func(image, CCD, sub_pixel, wl_ran, interpolate = "n"):
spec = read_out(bin_sum(image*CCD, sub_pixel)+noise2d(bin_sum(image, sub_pixel)))
if interpolate == 'y':
spec = int_r(spec, wl_ran) #interpolate to higher resolution
return spec
def transmission_spec_func(spectrum1, spectrum2, wl_ran, disper, slitpos, img_size):
"""
Function used to process two stellar spectrum, so it is possible to analyze the transmission spectrum of an exoplanetary
atmosphere.
First, the two spectra will be summed, and read-out (2D image is collapsed column-wise). Then, the spectra are shifted
so they have the largest correlation (best alignment). Afterwards, a linear regression is made to find the wavelength/pixel
relation. and a moving mean filter is overlaid to smooth.
"""
import scipy.signal
# import input_file as inp
# CCD = np.load(inp.in_CCD)
# rout = bin_sum(image, sub_pixel)
# r1 = read_out(rout)
# rin = bin_sum(image2, sub_pixel)
# r2 = read_out(rin) #sum image and readout
# r1, r2 = int_r(r1, r2, wl_ran) #Interpolate to higher resolution
# if noiseinp == "y":
# no = noise1d(rout)
# ni = noise1d(rin)
# else:
# no=0
# ni=0
# if move == "y":
autocor = scipy.signal.correlate(spectrum1, spectrum1, mode="same") #perform autocorr.
cor = scipy.signal.correlate(spectrum1, spectrum2, mode="same") #Regular correlation
first = np.argmax(autocor)
second = np.argmax(cor)
delta = first-second #amount of sub-pixels to move r1, for the two spectra to overlap
# if noiseinp == "y":
# r1, r2 = int_r(spectrum1, spectrum2, wl_ran)
spectrum1 = np.roll(spectrum1, delta) #Move r1
del first, second, autocor, cor#, rout, rin
# if not move == "y":
# delta = 0
pos = (disper[0]+slitpos[0])*100.0 #Position of each wavelength on the detector
from scipy.stats import linregress
a, b, r, p, s = linregress(pos, np.arange(wl_ran[0], wl_ran[1])) #Linear regression to find the lambda/pixel correlation
wavelength = a*np.arange(img_size[1]*100.0)+(b)
del a, b, r, p, s,
wave = wavelength[np.max(np.where(wavelength<wl_ran[0]))+1:np.min(np.where(wavelength>wl_ran[1]))-1] #remove outlying entries, where the spectrum is not present (belo 300 nm, and above 1000)
spectrum1 = spectrum1[np.max(np.where(wavelength<wl_ran[0]))+1:np.min(np.where(wavelength>wl_ran[1]))-1]
spectrum2 = spectrum2[np.max(np.where(wavelength<wl_ran[0]))+1:np.min(np.where(wavelength>wl_ran[1]))-1]
from astropy.convolution import convolve, Gaussian1DKernel
spectrum1 = convolve(spectrum1,kernel = Gaussian1DKernel(4246.6)) #Moving Mean filter by convolution. Kernel is Gaussian, input is sigma
spectrum2 = convolve(spectrum2,kernel = Gaussian1DKernel(4246.6)) #https://docs.astropy.org/en/stable/convolution/
return spectrum1, spectrum2, wave, delta
def photon_convert(wavelength_array, flux_array, stellar_radius, distance):
"""
Function to convert stellar flux from SPECTRUM into photon counts per second per cm^2.
Parameters
----------
wavelength_array : array
Array with each entry being the wavelength, in cm
flux_array : array
SPECTRUM fluxes. Has to be in erg/s/cm^2/Å
stellar_radius : float
Stellar radius of the target star.
distance : float
Distance to target star, in the same unit as stellar_radius
Returns
-------
spec : array
Photon counts per second per cm^2 in the specified wavelength. [No. of photons/s/cm^2].
"""
import astropy.constants as co
flux = np.zeros((flux_array.shape[0]))
for i in range(flux_array.shape[0]):
flux[i] = np.pi*flux_array[i]*(stellar_radius/distance)**2 #The pi is a geometric factor. See 1979ApJS...40....1K
spec = wavelength_array*flux/(co.h.cgs.value * co.c.cgs.value)
return spec
#Trash or old functions be here:
'''
# def transmission_spec_func(image, image2, sub_pixel, wl_ran, disper, slitpos, img_size, move="y", noiseinp="n"):
# """
# Function used to process two stellar spectrum, so it is possible to analyze the transmission spectrum of an exoplanetary
# atmosphere.
# First, the two spectra will be summed, and read-out (2D image is collapsed column-wise). Then, the spectra are shifted
# so they have the largest correlation (best alignment). Afterwards, a linear regression is made to find the wavelength/pixel
# relation. and a moving mean filter is overlaid to smooth.
# """
# import scipy.signal
# import input_file as inp
# CCD = np.load(inp.in_CCD)
# rout = bin_sum(image, sub_pixel)
# r1 = read_out(rout)
# rin = bin_sum(image2, sub_pixel)
# r2 = read_out(rin) #sum image and readout
# r1, r2 = int_r(r1, r2, wl_ran) #Interpolate to higher resolution
# if noiseinp == "y":
# no = noise1d(rout)
# ni = noise1d(rin)
# else:
# no=0
# ni=0
# if move == "y":
# autocor = scipy.signal.correlate(r1, r1, mode="same") #perform autocorr.
# cor = scipy.signal.correlate(r1, r2, mode="same") #Regular correlation
# first = np.argmax(autocor)
# second = np.argmax(cor)
# delta = first-second #amount of sub-pixels to move r1, for the two spectra to overlap
# if noiseinp == "y":
# rout = read_out(bin_sum(image*CCD, sub_pixel)+no)
# rin = read_out(bin_sum(image2*CCD, sub_pixel)+ni)
# r1, r2 = int_r(rout, rin, wl_ran)
# r1 = np.roll(r1, delta) #Move r1
# del first, second, autocor, cor, rout, rin
# if not move == "y":
# delta = 0
# pos = (disper[0]+slitpos[0])*100.0 #Position of each wavelength on the detector
# from scipy.stats import linregress
# a, b, r, p, s = linregress(pos, np.arange(wl_ran[0], wl_ran[1])) #Linear regression to find the lambda/pixel correlation
# wavelength = a*np.arange(img_size[1]*100.0)+(b)
# del a, b, r, p, s,
# wave = wavelength[np.max(np.where(wavelength<wl_ran[0]))+1:np.min(np.where(wavelength>wl_ran[1]))-1] #remove outlying entries, where the spectrum is not present (belo 300 nm, and above 1000)
# r1 = r1[np.max(np.where(wavelength<wl_ran[0]))+1:np.min(np.where(wavelength>wl_ran[1]))-1]
# r2 = r2[np.max(np.where(wavelength<wl_ran[0]))+1:np.min(np.where(wavelength>wl_ran[1]))-1]
# from astropy.convolution import convolve, Gaussian1DKernel
# r1 = convolve(r1,kernel = Gaussian1DKernel(4246.6)) #Moving Mean filter by convolution. Kernel is Gaussian, input is sigma
# r2 = convolve(r2,kernel = Gaussian1DKernel(4246.6)) #https://docs.astropy.org/en/stable/convolution/
# return r1, r2, wave, delta
def disperser(wl_endpoints, jit_img, psf_img, pos, image_size, dispersion, eff,
mask_img, steps=1, secondary_source='n', plot='n'):
import sys
from scipy import signal
x_pos=pos[0]
y_pos=pos[1] #load in position of "zeroth order"
im_disp = np.zeros((image_size[0],image_size[1])) # empty image
im_disp_lambda = np.zeros((image_size[0],image_size[1]))
x_dispersion = dispersion[0] #load in dispersions
y_dispersion = dispersion[1]
numColors = int( (wl_endpoints[1]-wl_endpoints[0])) #total number of colours to iterate
print("Number of colors to iterate: " + str(numColors))
print(' ')
if plot=='y': #this part is not useful atm
import matplotlib.pyplot as plt
plt.figure()
from matplotlib.colors import LinearSegmentedColormap
N = 256 #8-bit value, to fix colours
colspec = plt.cm.get_cmap('Spectral') #Fetches colourmap to use later
vals = np.ones((N,4)) #Setup for colormap
for i in range(0, numColors, steps):
# for i in range(0,101, steps):
im = np.zeros((image_size[0],image_size[1])) #create temp. image
if secondary_source == 'y': #To account for the secondary light source perhaps not being fully within the psf
# fold = folding(psf_img[:,:,i], jit_img)
fold = signal.convolve2d(psf_img[:,:,i], jit_img, mode='same', boundary='fill') #fold psf and jitter
fold = fold[0:jit_img.shape[1], 0:jit_img.shape[0]] #cut down to regular shape
else:
fold = signal.convolve2d(psf_img[:,:,i], jit_img, mode='same', boundary='fill') #fold as usual, if no sec. sources
# fold=fold/np.sum(fold)
foo = int(psf_img.shape[0]/2)
# im[0+x_pos-foo:len(jitter)+x_pos-foo, 0+y_pos-foo:len(jitter)+y_pos-foo] = im[0+x_pos-foo:len(jitter)+x_pos-foo, 0+y_pos-foo:len(jitter)+y_pos-foo] + fold*magni
im[0+y_pos-foo:len(fold)+y_pos-foo, 0+x_pos-foo:len(fold)+x_pos-foo] = fold #im[0+y_pos-foo:len(fold)+y_pos-foo, 0+x_pos-foo:len(fold)+x_pos-foo] + fold#*magni
immask = im*mask_img #mask is "overlaid" by multiplying
roll_x = np.roll(immask, int(np.modf(x_dispersion[i])[1]), axis=1) #move/disperse the light
roll_y = np.roll(roll_x, int(np.modf(y_dispersion[i])[1]), axis=0) #also in the y-direction
dx = abs(np.modf(x_dispersion[i])[0]) #residual amount (decimal amounts are shifted to the next sub-pixel)
dy = abs(np.modf(y_dispersion[i])[0])
foob = roll_y*(eff[i]*(1-dx)*(1-dy)) #multiply by efficiency
im_disp = im_disp + foob # Add the rolled image to the final, and multiply by the "effectivity"
roll_dx = np.roll(roll_y, 1, axis=1) # Roll the residual to the next subpixel
eff_dx = eff[i] * dx * (1-dy) # effectivity of the x-residual
roll_dy = np.roll(roll_y, 1, axis=0) # Roll the residual to the next subpixel, y-wise
eff_dy = eff[i] * dy * (1-dx) # y-residual eff.
roll_dxy = np.roll(roll_dx, 1, axis=0) # roll the image one step in both x- and y-wise.
eff_dxy = eff[i]* dx * dy #and eff.
baar = roll_dx*eff_dx + roll_dy*eff_dy + roll_dxy*eff_dxy
im_disp = im_disp + baar #add all residuals and multiply by their respective effectivities.
im_disp_lambda = im_disp_lambda+((foob+baar)*(i+wl_endpoints[0])) #fill in im_disp, and multiply by wavelength i
# im_disp_lambda = im_disp_lambda+(i+wl_endpoints[0]) #fill in im_disp, and multiply by wavelength i
sys.stdout.write('/'); sys.stdout.flush(); #"Progress bar", just for visuals
##### Plotting #####
if plot == 'y':
vals[:, 0] = np.linspace(0, colspec(1-i/750)[0], N) #Making new colourmap values
vals[:, 1] = np.linspace(0, colspec(1-i/750)[1], N) #the /750 is to normalize the colormap, so values fall between 0 and 1
vals[:, 2] = np.linspace(0, colspec(1-i/750)[2], N)
vals[:, 3] = np.linspace(0, 1, N) #alpha, for making the cmap transparent
newcmp = LinearSegmentedColormap.from_list(name='Spectral', colors=vals) #Creates new cmp, based on vals
plt.imshow(roll_y, cmap=newcmp) # Show array
if plot=='y':
plt.title('Color dispersion of sample spectrum', size=18)
plt.xlabel('Sub-pixel', size=13)
plt.ylabel('Sub-pixel', size=13)
return im_disp, im_disp_lambda
'''
"""
def jitter(steps=1000, dt=10, time_delay=5, gain=0.2, amplitude_act=0.1, amplitude_sens=0.1):
Jitter generator
Parameters
----------
steps : int
desired number of entries in position vector
dt : int
"batch size", how many indices to run through before correction
time_delay : int
index that is subtraced from the dt'th index, used to correct for.
gain : float
gain of correction. RoT - should be around 1/time_delay
amplitude_* : float
size of noise added under correction
Returns
-------
x, y : array_like
Vectors with x and y positions, of the size specified (+1)
x = np.zeros(steps+1) #Allocates vectors for x and y position
y = np.zeros(steps+1)
k = 0
for j in range(int(steps/dt)):
jitt = np.random.randn(1,2) #Generate random noise to add to position
for i in range(1,dt): #Jitter will be added to position, cumulatively
x[k+i] = x[k+i-1]+jitt[0,0] #Takes previous position, adds jitter
y[k+i] = y[k+i-1]+jitt[0,1]
jitt = np.random.randn(1,2)*0.05 #Generate new jitter, to add to next postition
x_correction = gain*(-x[k+i-time_delay])+amplitude_act*np.random.randn()+amplitude_sens*np.random.randn() #Generates the correction term,
# but for the index "time_delay" ago - so for time_delay= 5, x[k+9-5] = x[k+4].
# print(x_correction)
y_correction = gain*(-y[k+i-time_delay])+amplitude_act*np.random.randn()+amplitude_sens*np.random.randn()
x[k+i+1] = x[k+i] + x_correction #correction term is added to the last entry in the small batch of "steps"
y[k+i+1] = y[k+i] + y_correction
k=k+dt #K is updated, and the whole thing runs again, this time for index +dt.
x = x[0:steps]
y = y[0:steps] #Cut off the last step, as it is just filler
return x, y
def PSF(sigma_x, sigma_y, res=100, x_size=30, y_size=30, x0=0, y0=0):
""" """This function creates a mesh of the PSF as a 2D Gaussian
Parameters
----------
sigma_x, sigma_y : float
std. The "spread" of the function
res : int
number of steps in grid. "Resolution" of PSF
x_size, y_size : int
Size of created grid
x0, y0 : float
x and y position on the detector
""" """
import numpy as np
x = np.linspace(-x_size, x_size, res)
y = np.linspace(-y_size, y_size, res)
x, y = np.meshgrid(x, y) #define meshgrid
z = (1/(2*np.pi*sigma_x*sigma_y) * np.exp(-((x)**2/(2*sigma_x**2)
+ (y)**2/(2*sigma_y**2)))) #2 Gaussian functions
z = z/np.sum(z)
return x, y, z
def psf_maker(res=101, wl_start=350, wl_stop=1100, si=[30, 30]):
""" """
Creates 10 psf's. Mainly for quickly testing the psf-interpolator.
Parameters
----------
res : int, optional
Size of the psf. The default is 101.
wl_start : int, optional
Start wavelength. The default is 350.
wl_stop : int, optional
Stop wavelength. The default is 1100.
si : TYPE, optional
DESCRIPTION. The default is [30, 30].
Returns
-------
psf_temp : array
PSF to interpolate.
ran : array
List of corresponding wavelengths. Each entry is the wavelength of the PSF slice in psf_temp
""" """
#Load packages
import numpy as np
ran = np.linspace(wl_start, wl_stop, 10) #set for loop index
foo = np.zeros((res, res)) #create empty grid to store in the psf
for i in ran:
sigma_x = np.log(i)+0.5*i/100 # Used in the 2D Gaussian
sigma_y = np.log(i)+0.5*i/100
x, y, z = PSF(sigma_x, sigma_y, res=res, x_size=si[0], y_size=si[1]) #2D Gaussian
foo = np.dstack((foo,z)) #appends the new psf to the 3d matrix
psf_temp = foo[:,:,1:ran.shape[0]+1] #cuts off the initial layer of 0's
del foo
print(' ')
print('psf done')
return psf_temp, ran
def disp_func(wl_start, wl_stop, delta_lambda):
return 1.15*np.arange(wl_start, wl_stop, delta_lambda)-800
def disperser(image, psf, magni, mask, eff, dispers=(350, 1100, 1), CCDsize=(1000,1000), cols=(0, 750), stepsize=5, exposure=100, plot='n', save='n'):
import sys
if plot == 'y':
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
N = 256 #8-bit value, to fix colours
colspec = plt.cm.get_cmap('Spectral') #Fetches colourmap to use later
vals = np.ones((N,4))
plt.figure()
im_disp = np.zeros(CCDsize)
x_j, y_j = jitter(steps=100, gain=0.2, amplitude_act=3, amplitude_sens=3)
ux = int(np.floor(psf.shape[0]/2))
ox = int(np.floor(psf.shape[0]/2)+1)
uy = int(np.floor(psf.shape[1]/2))
oy = int(np.floor(psf.shape[1]/2)+1)
wl_start = dispers[0]
wl_stop = dispers[1]
delta_lambda = dispers[2]
dispersion=disp_func(wl_start, wl_stop, delta_lambda)
print(' ')
print('Number of colours complete:')
print(' 10 20 30 40 50 60 70 80 90')
for k in range(cols[0],cols[1], stepsize):
x_pos = 499 #generates star position
y_pos = 499 #has to be here, otherwise jitter won't be applied properly
x_0 = x_pos
y_0 = y_pos
for i in range(exposure):
image[x_pos-ux:x_pos+ox, y_pos-uy:y_pos+oy] = image[x_pos-ux:x_pos+ox, y_pos-uy:y_pos+oy]+psf[:,:,k]*magni #adds psf values to selected area of image array
x_pos = x_0+x_j[i]
y_pos = y_0+y_j[i] #updates coordinates based on jitter
x_pos = int(np.around(x_pos))
y_pos = int(np.around(y_pos)) # rounds off the coordinates, as matrix can only take int as index
image_masked = image[:,:]*mask #Overlay slit mask
roll = np.roll(image_masked, int(dispersion[k]), axis=1)*eff[k]
im_disp = im_disp + roll + np.random.standard_normal((1000, 1000))*0.001 #Disperses the colours, using np.roll
sys.stdout.write('/'); sys.stdout.flush(); #"Progress bar", just for visuals
##### Plotting #####
if plot == 'y':
vals[:, 0] = np.linspace(0, colspec(1-k/750)[0], N) #Making new colourmap values
vals[:, 1] = np.linspace(0, colspec(1-k/750)[1], N) #the /750 is to normalize the colormap, so values fall between 0 and 1
vals[:, 2] = np.linspace(0, colspec(1-k/750)[2], N)
vals[:, 3] = np.linspace(0, 1, N) #alpha, for making the cmap transparent
newcmp = LinearSegmentedColormap.from_list(name='Spectral', colors=vals) #Creates new cmp, based on vals
plt.imshow(roll, cmap=newcmp) # Show array
if plot == 'y':
plt.title('Color dispersion of sample spectrum', size=18)
plt.xlabel('Sub-pixel', size=13)
plt.ylabel('Sub-pixel', size=13)
if save == 'y':
plt.savefig('disp.png', dpi=400)
return im_disp
def disperser2_copy(jit_img, psf_img, pos, image_size, dispersion, eff, magni, mask_img, steps=1, plot='n'):
'''
Parameters
----------
jitter : array of float64
Jitter "image".
psf : _hl.dataset.Dataset
Point Spread Function image. Must be a 3D array with depth equal to the number of colors
pos : list
Position of the star. Two values in a list.
image_size : tuble or list
Two integers that determine the size of the image. Must have the same dimensions as the mask and jitter
dispersion : tuble
Requires two entries, one for dispersion in the x- and one for the y-direction. Must have same length as number of colors
eff : array of float64
Spectral effeciency/throughput. Must be same lenght as number of colors
magni : float
Magnitude of the star.
mask : array of float64
Slit mask. Must have same dimensions as image.
steps : int, optional
Size of color "steps" to include in the disperser. The default is 1 - so all colors are included.
plot : string, optional
Toggles plotting of the color-dispersion, mainly for visuals. The default is 'n'.
Returns
-------
im_disp : array of float64
Dispersed image, 2D array.
'''
import sys
x_pos=pos[0]
y_pos=pos[1]
im_disp = np.zeros((image_size[0],image_size[1]))
x_dispersion = dispersion[0]
y_dispersion = dispersion[1]
if plot=='y':
import matplotlib.pyplot as plt
plt.figure()
from matplotlib.colors import LinearSegmentedColormap
N = 256 #8-bit value, to fix colours
colspec = plt.cm.get_cmap('Spectral') #Fetches colourmap to use later
vals = np.ones((N,4)) #Setup for colormap
for i in range(0,750, steps):
# for i in range(0,101, steps):
im = np.zeros((image_size[0],image_size[1]))
fold = folding(psf_img[:,:,i], jit_img)
fold=fold/np.sum(fold)
foo = int(psf_img.shape[0]/2)
# im[0+x_pos-foo:len(jitter)+x_pos-foo, 0+y_pos-foo:len(jitter)+y_pos-foo] = im[0+x_pos-foo:len(jitter)+x_pos-foo, 0+y_pos-foo:len(jitter)+y_pos-foo] + fold*magni
im[0+y_pos-foo:len(fold)+y_pos-foo, 0+x_pos-foo:len(fold)+x_pos-foo] = fold #im[0+y_pos-foo:len(fold)+y_pos-foo, 0+x_pos-foo:len(fold)+x_pos-foo] + fold#*magni
immask = im*mask_img
roll_x = np.roll(immask, int(np.modf( x_dispersion[i])[1]), axis=1)
roll_y = np.roll(roll_x, int(np.modf(y_dispersion[i])[1]), axis=0)
dx = abs(np.modf(x_dispersion[i])[0])
dy = abs(np.modf(y_dispersion[i])[0])
im_disp = im_disp + roll_y*(eff[i]*(1-dx)*(1-dy)) # Add the rolled image to the final, and multiply by the "effectivity"
roll_dx = np.roll(roll_y, 1, axis=1) # Roll the residual to the next subpixel
eff_dx = eff[i] * dx * (1-dy) # effectivity of the x-residual
roll_dy = np.roll(roll_y, 1, axis=0) # Roll the residual to the next subpixel, y-wise
eff_dy = eff[i] * dy * (1-dx) # y-residual eff.
roll_dxy = np.roll(roll_dx, 1, axis=0) # roll the image one step in both x- and y-wise.
eff_dxy = eff[i]* dx * dy #and eff.
im_disp = im_disp + roll_dx*eff_dx + roll_dy*eff_dy + roll_dxy*eff_dxy #add all residuals and multiply by their respective effectivities.
sys.stdout.write('/'); sys.stdout.flush(); #"Progress bar", just for visuals
##### Plotting #####
if plot == 'y':
vals[:, 0] = np.linspace(0, colspec(1-i/750)[0], N) #Making new colourmap values
vals[:, 1] = np.linspace(0, colspec(1-i/750)[1], N) #the /750 is to normalize the colormap, so values fall between 0 and 1
vals[:, 2] = np.linspace(0, colspec(1-i/750)[2], N)
vals[:, 3] = np.linspace(0, 1, N) #alpha, for making the cmap transparent
newcmp = LinearSegmentedColormap.from_list(name='Spectral', colors=vals) #Creates new cmp, based on vals
plt.imshow(roll_y, cmap=newcmp) # Show array
if plot=='y':
plt.title('Color dispersion of sample spectrum', size=18)
plt.xlabel('Sub-pixel', size=13)
plt.ylabel('Sub-pixel', size=13)
return im_disp
"""
|
<reponame>elecun/mlpack
'''
@brief Leg-Rest Pos Recommendataion with DecisionTree Regressor
@author <NAME> <<EMAIL>>
@date 2021. 05. 21
'''
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
from sklearn.neural_network import MLPRegressor
import progressbar
'''
Presets & Hyper-parameters
'''
CONFIGURATION_FILE_PATH = "./data/train/data_config.csv"
DATASET_PATH = "./data/train/"
pd.set_option('display.width', 200) # for display width
# FEATURE_LENGTH = 30 # n-dimensional data feature only use
# NUMBER_OF_SAMPLES = 299 # number of augmented data
# FEATURE_MAX_LENGTH = 115 # Maximum feature length
# NUMBER_OF_RANDOM_SELECTION = 5
# MAX_TRAIN_ITERATION = -1 # infinity
'''
1. Load configuration file
'''
data_config = pd.read_csv(CONFIGURATION_FILE_PATH, header=0, index_col=0)
'''
2. data extraction
'''
X = data_config.loc[:, ['user_height', 'user_weight', 'user_age']]
bmr = 66.47+(13.75*X['user_weight'])+(5*X['user_height'])-(6.76*X['user_age'])
bmi = X['user_weight']/(X['user_height']/100*X['user_height']/100)
X["bmr"] = bmr
X["bmi"] = bmi
ys = data_config.loc[:, ['bestfit_angle_standard']]
yr = data_config.loc[:, ['bestfit_angle_relax']]
'''
DecisionTree Regression Model
'''
print("------ Regression Model Evaluation (@standard) ------")
X_train, X_test, y_train, y_test = train_test_split(X, np.ravel(ys), test_size=0.33, shuffle=True)
model_standard = MLPRegressor(
hidden_layer_sizes=25,
activation='relu',
verbose=False,
solver='adam',
learning_rate_init=0.001,
random_state=1,
tol=0.000001,
max_iter=100000).fit(X_train, y_train)
print("* R2 Score with Trainset (@standard) :", model_standard.score(X_train, y_train))
print("* R2 Score with Testset (@standard) :", model_standard.score(X_test, y_test))
print("------ Regression Model Evaluation (@relax) ------")
X_train, X_test, y_train, y_test = train_test_split(X, np.ravel(yr), test_size=0.33, shuffle=True)
model_relax = MLPRegressor(
hidden_layer_sizes=25,
activation='relu',
verbose=False,
solver='adam',
learning_rate_init=0.001,
random_state=1,
tol=0.000001,
max_iter=100000).fit(X_train, y_train)
print("* R-squared Score with Trainset (@relax) :", model_relax.score(X_train, y_train))
print("* R-squared Score with Testset (@relax) :", model_relax.score(X_test, y_test))
'''
Output File Generation
'''
min_age = 10
max_age = 80
ages = np.array([min_age+i for i in range(max_age-min_age+1)])
min_height = 150
max_height = 200
heights = np.array([min_height+i for i in range(max_height-min_height+1)])
min_weight = 40
max_weight = 100
weights = np.array([min_weight+i for i in range(max_weight-min_weight+1)])
bar = progressbar.ProgressBar(maxval=len(ages)*len(heights)*len(weights), widgets=[progressbar.Bar('=', '[', ']'), ' ', progressbar.Percentage()])
bar.start()
output_standard = pd.DataFrame(columns=['age','height','weight','legrest'])
output_relax = pd.DataFrame(columns=['age','height','weight','legrest'])
count = 0
for a in ages:
for h in heights:
for w in weights:
bmr = 66.47+(13.75*w)+(5*h)-(6.76*a)
bmi = w/(h/100*h/100)
pvs = model_standard.predict([[a,h,w,bmr,bmi]])
pvr = model_relax.predict([[a,h,w,bmr,bmi]])
output_standard = output_standard.append({'age':a, 'height':h, 'weight':w, 'legrest':pvs[0]}, ignore_index=True)
output_relax = output_relax.append({'age':a, 'height':h, 'weight':w, 'legrest':pvr[0]}, ignore_index=True)
count = count+1
bar.update(count)
bar.finish()
output_standard.to_csv('result_standard.csv', index=False)
output_relax.to_csv('result_relax.csv', index=False)
print("saved results")
|
from src.dqn import DQN
from glob import glob
import random, os
import numpy as np
from retro.scripts import playback_movie
from src import actions_builder, env_creator
import math
from src import utils
from collections import deque
logger = utils.get_logger(__name__)
def train_on_random_movie(dqn):
human_games = glob("human_games/**")
movie = random.sample(human_games, 1)[0]
train_from_movie(dqn, movie)
def train_from_movie(dqn, movie_file):
logger.info("Training on movie {}".format(movie_file))
env, movie, duration, = playback_movie.load_movie(movie_file)
env = env_creator.wrap_environment(env)
dqn.reset(env)
memory = []
total_reward = 0
total_steps = 0
while movie.step():
total_steps += 1
keys = []
for i in range(16):
keys.append(movie.get_key(i))
keys = list(map(float, keys))[:12]
actions = np.where((dqn.ACTIONS == np.array(keys)).all(axis=1))
if len(actions) != 1:
raise ValueError("keys array not present in actions", keys)
else:
action = dqn.ACTIONS[actions[0]][0]
state, action, new_state, reward, done, info, new_action = dqn.step(env, action)
total_reward += reward
if len(memory) > 0:
memory[-1][-1] = action
memory.append([state, action, new_state, reward, done, info, new_action])
dqn.learn_from_memory(memory)
dqn.model.save_weights("weights/alvaro_dqn_model.h5")
logger.info("Total reward {}, total_steps {}".format(total_reward, total_steps))
memory.clear()
del memory
env.close()
movie.close()
def enqueue_episode(episodes, total_reward, total_steps, memory):
if len(episodes) < episodes.maxlen:
episodes.append((total_reward, total_steps, memory))
return
for index, episode in enumerate(episodes):
reward, steps, memory = episode
if total_reward > reward:
episodes[index] = (total_reward, total_steps, memory)
break
def train_on_env(dqn, env, epochs=1, train_steps=500, render=False,
manual_interventions_enabled=True,
manual_intervention_epsilon=0.8,
manual_intervention_duration=200):
episodes = deque(maxlen=5)
for epoch in range(epochs):
episode_steps = 0
done = False
prev_info = None
total_reward = 0
memory = []
first_x, last_x = None, None
max_x = 0
pushing_wall = False
real_rewards = []
dqn.reset(env)
while not done:
max_x = 0
episode_steps += 1
#extras = [[(sum(real_rewards[-20:]))/20]]
extras = [[]]
state, action, new_state, reward, done, info, new_action, extra_info = dqn.step(env, _extra_info=extras)
real_rewards.append(reward)
reward = 0
max_x = max(max_x, info["x"])
total_reward += reward
if render:
env.render()
if done:
sum_last_rewards = sum(real_rewards)
memory[-1][3] = sum_last_rewards
if not done:
if episode_steps % train_steps == 0 and episode_steps > 0:
sum_last_rewards = sum(real_rewards[:])
logger.info("- trigger online batch training (reward {}, max_x {})".format(sum_last_rewards, max_x))
#memory[-1][3] = sum_last_rewards
dqn.learn_from_memory(memory[:])
memory.append([state, action, new_state, reward, done, info, new_action, extra_info])
prev_info = info
enqueue_episode(episodes, total_reward, episode_steps, memory)
#for total_reward, total_steps, memory in episodes:
# logger.info("training on memory with reward {}, steps {}".format(total_reward, total_steps))
# dqn.learn_from_memory(memory)
dqn.learn_from_memory(memory)
#[dqn.learn_from_memory(mem) for mem in full_memories]
dqn.model.save_weights("weights/alvaro_dqn_model.h5")
logger.info("Total reward {}, total_steps {}, max_x {}".format(
sum(real_rewards), episode_steps, max_x))
|
# Copyright (c) 2020 Graphcore Ltd. All Rights Reserved.
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file has been modified by Graphcore Ltd.
import re
import logging
import operator
from tensorflow.python.distribute import distribution_strategy_context as distribute_ctx
import tensorflow as tf
from tensorflow.python.training import optimizer
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import array_ops
from tensorflow.python import ipu
from tensorflow.python.ipu.ops import cross_replica_ops
from tensorflow.python.framework import ops
from math import sqrt
from functools import reduce
class AdamWeightDecayOptimizer(tf.compat.v1.train.Optimizer):
"""A basic Adam optimizer that includes "correct" L2 weight decay."""
def __init__(self,
learning_rate,
loss_scaling,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=[
"GroupNorm", "Group_Norm", "LayerNorm", "Layer_Norm",
"bias"
],
name="AdamWeightDecayOptimizer",
weights_dtype=tf.float16,
debiasing=True,
outline_grad_fn=True):
"""Constructs a AdamWeightDecayOptimizer."""
super(AdamWeightDecayOptimizer, self).__init__(False, name)
self.learning_rate = tf.cast(learning_rate, dtype=weights_dtype)
self.loss_scaling = loss_scaling
self.weight_decay_rate = weight_decay_rate
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.exclude_from_weight_decay = exclude_from_weight_decay
self.debiasing = debiasing
if self.debiasing:
self.step = tf.get_variable('adam_step_counter',
dtype=tf.int32,
initializer=[1],
trainable=False)
self.outline_grad_fn = outline_grad_fn
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""See base class."""
assignments = []
for (grad, param) in grads_and_vars:
if grad is None or param is None:
continue
param_name = self._get_variable_name(param.name)
m = tf.get_variable(name=param_name + "/adam_m",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
v = tf.get_variable(name=param_name + "/adam_v",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
def grad_fn(grad, param, m, v):
cast_grad = tf.cast(grad, dtype=tf.float32)
cast_grad = cast_grad / self.loss_scaling
# Standard Adam update.
next_m = (tf.multiply(self.beta_1, m) +
tf.multiply(1.0 - self.beta_1, cast_grad))
next_v = (tf.multiply(self.beta_2, v) +
tf.multiply(1.0 - self.beta_2, tf.square(cast_grad)))
# Beta scaling of momentum and velocity
if self.debiasing:
beta_1_power = tf.math.pow(
tf.cast(self.beta_1, tf.float32), tf.cast(self.step + 1, tf.float32))
beta_2_power = tf.math.pow(
self.beta_2, tf.cast(self.step + 1, tf.float32))
bias_correction = tf.cast(tf.math.sqrt(
1 - beta_2_power) / (1 - beta_1_power), param.dtype)
update = tf.cast(next_m / (tf.sqrt(next_v) + self.epsilon),
param.dtype)
# Beta scaling of momentum and velocity
if self.debiasing:
beta_1_power = tf.math.pow(tf.cast(self.beta_1, tf.float32), tf.cast(self.step + 1, tf.float32))
beta_2_power = tf.math.pow(self.beta_2, tf.cast(self.step + 1, tf.float32))
bias_correction = tf.cast(tf.math.sqrt(1 - beta_2_power) / (1 - beta_1_power), tf.float16)
else:
bias_correction = tf.cast(tf.constant(1), tf.float16)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if self._do_use_weight_decay(param_name):
update += self.weight_decay_rate * param
update_with_lr = tf.cast(self.learning_rate, param.dtype) * update * bias_correction
next_param = param - update_with_lr
return next_param, next_v, next_m
if self.outline_grad_fn:
grad_fn = ipu.outlined_function(grad_fn, unique_sharding=True)
next_param, next_v, next_m = grad_fn(grad, param, m, v)
assignments.extend(
[param.assign(next_param),
m.assign(next_m),
v.assign(next_v)])
# We add the update for the step
if self.debiasing:
assignments.extend([self.step.assign(self.step + 1)])
return tf.group(*assignments, name=name)
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if not self.weight_decay_rate:
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
def _get_variable_name(self, param_name):
"""Get the variable name from the tensor name."""
m = re.match("^(.*):\\d+$", param_name)
if m is not None:
param_name = m.group(1)
return param_name
class MixedPrecisionAdamWeightDecayOptimizer(tf.train.Optimizer):
"""A basic Adam optimizer that includes "correct" L2 weight decay."""
def __init__(self,
learning_rate,
loss_scaling,
weight_decay_rate=0.01,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-6,
exclude_from_weight_decay=[
"GroupNorm", "Group_Norm", "LayerNorm", "Layer_Norm",
"bias"
],
name="AdamWeightDecayOptimizer",
weights_dtype=tf.float16,
debiasing=True,
outline_grad_fn=True):
"""Constructs a AdamWeightDecayOptimizer."""
super(MixedPrecisionAdamWeightDecayOptimizer, self).__init__(False, name)
self.learning_rate = learning_rate
self.loss_scaling = loss_scaling
self.weight_decay_rate = weight_decay_rate
self.beta_1 = tf.cast(beta_1, tf.float16)
self.beta_2 = beta_2
self.epsilon = epsilon * self.loss_scaling
self.exclude_from_weight_decay = exclude_from_weight_decay
self.debiasing = debiasing
if self.debiasing:
self.step = tf.get_variable('adam_step_counter',
dtype=tf.int32,
initializer=[1],
trainable=False)
self.outline_grad_fn = outline_grad_fn
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""See base class."""
assignments = []
ordered_grad_and_vars = sorted(
grads_and_vars, key=lambda x: reduce(operator.mul, x[0].shape, 1))
for (grad, param) in ordered_grad_and_vars:
if grad is None or param is None:
continue
param_name = self._get_variable_name(param.name)
m = tf.get_variable(name=param_name + "/adam_m",
shape=param.shape.as_list(),
dtype=tf.float16,
trainable=False,
initializer=tf.zeros_initializer())
v = tf.get_variable(name=param_name + "/adam_v",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
def grad_fn(grad, param, m, v):
param_dtype = param.dtype
# Standard Adam update.
next_m = (tf.multiply(self.beta_1, m) +
tf.multiply(1.0 - self.beta_1, grad))
cast_grad = tf.cast(grad, dtype=tf.float32)
next_v = (tf.multiply(self.beta_2, v) +
tf.multiply(1.0 - self.beta_2, tf.square(cast_grad)))
# Beta scaling of momentum and velocity
if self.debiasing:
beta_1_power = tf.math.pow(tf.cast(self.beta_1, tf.float32), tf.cast(self.step + 1, tf.float32))
beta_2_power = tf.math.pow(self.beta_2, tf.cast(self.step + 1, tf.float32))
bias_correction = tf.cast(tf.math.sqrt(1 - beta_2_power) / (1 - beta_1_power), tf.float32)
else:
bias_correction = tf.cast(tf.constant(1), tf.float32)
update = tf.cast(next_m, tf.float32) / (tf.sqrt(next_v) + self.epsilon)
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want to decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
param_fp32 = tf.cast(param, tf.float32)
if self._do_use_weight_decay(param_name):
update += self.weight_decay_rate * param_fp32
lr_32 = tf.cast(self.learning_rate, tf.float32)
update_with_lr = lr_32 * update * bias_correction
next_param = tf.cast(param_fp32 - update_with_lr, param_dtype)
return next_param, next_v, next_m
if self.outline_grad_fn:
grad_fn = ipu.outlined_function(grad_fn, unique_sharding=True)
next_param, next_v, next_m = grad_fn(grad, param, m, v)
assignments.extend(
[param.assign(next_param),
m.assign(next_m),
v.assign(next_v)])
if self.debiasing:
assignments.extend([self.step.assign(self.step + 1)])
return tf.group(*assignments, name=name)
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if not self.weight_decay_rate:
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
def _get_variable_name(self, param_name):
"""Get the variable name from the tensor name."""
m = re.match("^(.*):\\d+$", param_name)
if m is not None:
param_name = m.group(1)
return param_name
class LAMBOptimizer(tf.compat.v1.train.Optimizer):
"""LAMB (Layer-wise Adaptive Moments optimizer for Batch training).
This class has been adapted by Graphcore Ltd from NVIDIA code at
https://github.com/NVIDIA/DeepLearningExamples/
"""
# A new optimizer that includes correct L2 weight decay, adaptive
# element-wise updating, and layer-wise justification. The LAMB optimizer
# was proposed by <NAME>, <NAME>, <NAME>, <NAME>,
# <NAME>, and <NAME> in a paper titled as Reducing BERT
# Pre-Training Time from 3 Days to 76 Minutes (arxiv.org/abs/1904.00962)
#
def __init__(self,
learning_rate,
loss_scaling=1.0,
weight_decay_rate=0.0,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-4,
exclude_from_weight_decay=None,
exclude_from_layer_adaptation=None,
name="LAMBOptimizer",
high_precision=True,
use_nvlamb=False,
debiasing=True,
weight_clipping=0,
clipping_value=1.0,
outline_grad_fn=True):
"""Constructs a LAMBOptimizer."""
super(LAMBOptimizer, self).__init__(False, name)
# We assume that the weights are inputed in fp16.
# High precision flag makes the last part of the optimizer being done in fp32
# if it is false then the weight update is done in fp16
self.beta_1 = tf.cast(beta_1, dtype=tf.float32)
self.beta_2 = tf.cast(beta_2, dtype=tf.float32)
self.loss_scaling = loss_scaling
self.epsilon = tf.cast(epsilon, dtype=tf.float16)
logging.info("Setting Epsilon to {}".format(epsilon))
self.high_precision = high_precision
if self.high_precision:
logging.info("Configured LAMB to use fp32 intermediate results")
self.target_type = tf.float32
else:
logging.info("Configured LAMB to use fp16 intermediate results")
self.target_type = tf.float16
self.weight_decay_rate = weight_decay_rate
self.weight_clip = weight_clipping
if self.weight_clip:
logging.info("Clipping the norm of the weights at {}".format(
self.weight_clip))
else:
logging.info("Not clipping the norm of the weights.")
self.learning_rate = learning_rate
self.exclude_from_weight_decay = exclude_from_weight_decay
self.exclude_from_layer_adaptation = exclude_from_layer_adaptation
# If true use the NVLAM implimentaion found:
self.use_nvlamb = use_nvlamb
if self.use_nvlamb:
logging.info("Using NVLAMB")
# If true we debias the momenta (M and V) and if it is false we don't
self.debiasing = debiasing
if self.debiasing or self.use_nvlamb:
logging.info("Using debiasing for M and V tensors")
else:
logging.info("Not using debiasing for M and V tensors")
if self.use_nvlamb or self.debiasing:
self.step = tf.get_variable('lamb_step_counter',
dtype=tf.int32,
initializer=[1],
trainable=False)
# https://developer.nvidia.com/blog/pretraining-bert-with-layer-wise-adaptive-learning-rates/
# -----
self.clipping_value = tf.cast(clipping_value, dtype=tf.float32)
self.outline_grad_fn = outline_grad_fn
def clipped_norm(self, gradients_list):
# We compute the total norm of the gradient
squared_gradients = [
tf.reduce_sum(
tf.square(tf.cast(g, dtype=tf.float32) / self.loss_scaling))
for g in gradients_list
]
global_norm = tf.add_n(squared_gradients)
global_norm = tf.sqrt(global_norm)
clipped_global_norm = tf.maximum(global_norm, self.clipping_value)
return clipped_global_norm
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
if distribute_ctx.has_strategy():
return distribute_ctx.get_replica_context().merge_call(
self.core_apply_gradients, args=(grads_and_vars, global_step, name))
else:
return self.core_apply_gradients(None, grads_and_vars, global_step, name)
def core_apply_gradients(self, distribution, grads_and_vars, global_step=None, name=None):
"""See base class."""
assignments = []
if self.use_nvlamb:
global_norm = self.clipped_norm([g for g, v in grads_and_vars])
# We reverse the order of the gradients and variables based on their sizes
ordered_grad_and_vars = sorted(
grads_and_vars, key=lambda x: reduce(operator.mul, x[0].shape, 1))
for (grad, param) in ordered_grad_and_vars:
if grad is None or param is None:
continue
param_name = self._get_variable_name(param.name)
# Momentum
m = tf.get_variable(name=f"{param_name}/lamb_m",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
# Velocity
v = tf.get_variable(name=f"{param_name}/lamb_v",
shape=param.shape.as_list(),
dtype=tf.float32,
trainable=False,
initializer=tf.zeros_initializer())
def grad_fn(grad, param, m, v):
# We convert the gradient to fp32 and we rescale it
cast_grad = tf.cast(grad, dtype=tf.float32)
cast_grad = cast_grad / self.loss_scaling
if self.use_nvlamb:
# We de normalize the gradients
cast_grad = cast_grad * self.clipping_value / global_norm
# Standard Adam update.
next_m = (tf.multiply(self.beta_1, m) +
tf.multiply(1.0 - self.beta_1, cast_grad))
next_v = (tf.multiply(self.beta_2, v) +
tf.multiply(1.0 - self.beta_2, tf.square(cast_grad)))
# Beta scaling of momentum and velocity
if self.debiasing:
m_hat = next_m / (1.0 - tf.pow(
self.beta_1, tf.cast(self.step, dtype=tf.float32))) # x10
v_hat = next_v / (1.0 - tf.pow(
self.beta_2, tf.cast(self.step, dtype=tf.float32))
) # x1000
else:
m_hat = next_m
v_hat = next_v
# TODO: Check if it is possible to convert to fp16 here.
# m_hat = tf.cast(m_hat, dtype = tf.float16)
# v_hat = tf.cast(v_hat, dtype = tf.float16)
update = m_hat / (tf.sqrt(tf.math.abs(v_hat)) +
tf.cast(self.epsilon, dtype=v_hat.dtype))
# Just adding the square of the weights to the loss function is *not*
# the correct way of using L2 regularization/weight decay with Adam,
# since that will interact with the m and v parameters in strange ways.
#
# Instead we want ot decay the weights in a manner that doesn't interact
# with the m/v parameters. This is equivalent to adding the square
# of the weights to the loss with plain (non-momentum) SGD.
if self._do_use_weight_decay(param_name):
update += tf.cast(self.weight_decay_rate,
dtype=update.dtype) * tf.cast(
param, dtype=update.dtype)
reshaped_update = tf.reshape(update, [-1])
ratio = 1.0
if self._do_layer_adaptation(param_name):
reshaped_param = tf.reshape(param, [-1])
# Norms are then computed in fp32
w_norm = linalg_ops.norm(tf.cast(reshaped_param,
dtype=tf.float32),
ord=2,
axis=-1)
u_norm = linalg_ops.norm(reshaped_update, ord=2, axis=-1)
if self.weight_clip:
w_norm = tf.math.minimum(
w_norm, tf.cast(self.weight_clip, dtype=w_norm.dtype))
# We set the ratio to 1 if either the w norm and the u norms are 0
ratio = array_ops.where(
math_ops.greater(w_norm, 0),
array_ops.where(
math_ops.greater(u_norm, 0),
(tf.cast(w_norm, dtype=tf.float32) / u_norm),
tf.constant(1.0, dtype=tf.float32, shape=w_norm.shape)),
tf.constant(1.0, dtype=tf.float32, shape=w_norm.shape))
# We reshape the ration in order to be broadcastable
ratio = tf.reshape(ratio, shape=ratio.shape.as_list() + [1])
# We combine the learning rate and the ratio at fp32
ratio = ratio * self.learning_rate
# We now downcast to do the next operation
# If the scaledd is present we do not need this operation
ratio = tf.cast(ratio, dtype=self.target_type)
reshaped_update = tf.cast(reshaped_update, dtype=self.target_type)
update_with_lr = ratio * reshaped_update
# Backward transform to the same as param
update_with_lr = tf.reshape(update_with_lr, shape=param.shape)
update_with_lr = tf.cast(update_with_lr, dtype=param.dtype)
next_param = param - update_with_lr
return next_param, next_m, next_v
if self.outline_grad_fn:
grad_fn = ipu.outlined_function(grad_fn, unique_sharding=True)
next_param, next_m, next_v = grad_fn(grad, param, m, v)
# We add the update for the parameters and the biases
assignments.extend(
[param.assign(next_param),
m.assign(next_m),
v.assign(next_v)])
# We add the update for the step
if self.use_nvlamb or self.debiasing:
assignments.extend([self.step.assign(self.step + 1)])
return tf.group(*assignments, name=name)
def _do_use_weight_decay(self, param_name):
"""Whether to use L2 weight decay for `param_name`."""
if not self.weight_decay_rate:
return False
if self.exclude_from_weight_decay:
for r in self.exclude_from_weight_decay:
if re.search(r, param_name) is not None:
return False
return True
def _do_layer_adaptation(self, param_name):
"""Whether to do layer-wise learning rate adaptation for
`param_name`."""
if self.exclude_from_layer_adaptation:
for r in self.exclude_from_layer_adaptation:
if re.search(r, param_name) is not None:
return False
return True
def _get_variable_name(self, param_name):
"""Get the variable name from the tensor name."""
m = re.match("^(.*):\\d+$", param_name)
if m is not None:
param_name = m.group(1)
return param_name
class StageMomentumOptimizer(tf.compat.v1.train.Optimizer):
"""
Given different decay of learning rate and momentum to different weights at different pipeline stages.
"""
def __init__(self,
learning_rate,
momentum,
trainable_variables,
stage_weights=None,
stage_lr_decay=None,
stage_mom_decay=None):
super(StageMomentumOptimizer,
self).__init__(False, name="StageMomentumOptimizer")
self.stage_weights = stage_weights
self.max_stage_weights = max(self.stage_weights.keys())
self.lr = learning_rate
self.momentum = momentum
self.tvars = trainable_variables
self.stage_lr_decay = {}
self.stage_mom_decay = {}
self.momentum_offset = 0.2
self.lr_offset = 0.2
if stage_lr_decay:
self.stage_lr_decay = stage_lr_decay
else:
self.lr_factor = (1.0 - self.lr_offset) / self.max_stage_weights
for k in self.stage_weights.keys():
self.stage_lr_decay[k] = self.lr_offset + k * self.lr_factor
if stage_mom_decay:
self.stage_mom_decay = stage_mom_decay
else:
self.mom_factor = (1.0 - self.momentum_offset) / \
self.max_stage_weights
for k in self.stage_weights.keys():
self.stage_mom_decay[k] = self.momentum_offset + \
k * self.mom_factor
def compute_gradients(self, loss, var_list=None, **kwargs):
if not var_list:
var_list = self.tvars
return tf.train.MomentumOptimizer(
self.lr, self.momentum).compute_gradients(loss,
var_list=var_list,
**kwargs)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
ops = []
if not self.stage_weights:
return tf.train.MomentumOptimizer(
self.lr,
momentum=self.momentum).apply_gradients(grads_and_vars, name)
for stage, weights in self.stage_weights.items():
lr_decay = self.stage_lr_decay[stage]
mom_decay = self.stage_mom_decay[stage]
lr = self.lr * lr_decay
mom = self.momentum * mom_decay
grads_and_vars_opt = [(g, v) for g, v in grads_and_vars
if v.name in weights]
ops.append(
tf.train.MomentumOptimizer(lr, momentum=mom).apply_gradients(
grads_and_vars_opt, name))
return tf.group(ops)
def mixed_precision_global_norm(t_list, dtype=tf.float32):
"""Computes the global norm of multiple tensors.
Given a tuple or list of tensors `t_list`, this operation returns the
global norm of the elements in all tensors in `t_list`. The global norm is
computed as:
`global_norm = sqrt(sum([l2norm(t)**2 for t in t_list]))`
Any entries in `t_list` that are of type None are ignored.
Args:
t_list: A tuple or list of mixed `Tensors`
dtype: datatype of the norm
Returns:
A 0-D (scalar) `Tensor` of type `float`.
"""
t_list = list(t_list)
squared_norms = []
for t in t_list:
with ops.colocate_with(t):
squared_norm = tf.reduce_sum(tf.pow(tf.cast(t, dtype), 2))
squared_norms.append(squared_norm)
return tf.sqrt(tf.reduce_sum(tf.stack(squared_norms)), name="global_norm")
def mixed_precision_clip_by_global_norm(t_list, clip_norm):
"""Clips values of multiple tensors by the ratio of the sum of their norms.
Given a tuple or list of tensors `t_list`, and a clipping ratio `clip_norm`,
this operation returns a list of clipped tensors `list_clipped`
and the global norm (`global_norm`) of all tensors in `t_list`.
To perform the clipping, the values `t_list[i]` are set to:
t_list[i] * clip_norm / max(global_norm, clip_norm)
where:
global_norm = sqrt(sum([l2norm(t)**2 for t in t_list]))
If `clip_norm > global_norm` then the entries in `t_list` remain as they are,
otherwise they are all shrunk by the global ratio.
If `global_norm == infinity` then the entries in `t_list` are all set to `NaN`
to signal that an error occurred.
Any of the entries of `t_list` that are of type `None` are ignored.
Args:
t_list: A tuple or list of mixed `Tensors`
clip_norm: A 0-D (scalar) `Tensor` > 0. The clipping ratio.
Returns:
t_list_clipped: A list of `Tensors` of the same type as `t_list`.
norm: A 0-D (scalar) `Tensor` representing the global norm.
"""
t_list = list(t_list)
norm = mixed_precision_global_norm(t_list, tf.float32)
scale_for_finite = clip_norm * tf.minimum(1.0 / norm, 1.0 / clip_norm)
scale = tf.where(tf.is_finite(norm), scale_for_finite, float("nan"))
t_list_clipped = []
for t in t_list:
_scale = tf.cast(scale, t.dtype)
with ops.colocate_with(t):
t_list_clipped.append(t * _scale)
return t_list_clipped, norm
class GlobalNormClippingOptimizer(optimizer.Optimizer):
def __init__(self, optimizer, clip_norm=1.0, name="GlobalNormClippingOptimizer"):
super(GlobalNormClippingOptimizer, self).__init__(False, name)
self._optimizer = optimizer
self._clip_norm = clip_norm
self._slots = optimizer._slots
def compute_gradients(self, loss, var_list=None, **kwargs):
"""
Compute gradients using the underlying optimizer.
"""
return self._optimizer.compute_gradients(loss, var_list=var_list, **kwargs)
def apply_gradients(self, grads_and_vars, global_step=None, name=None):
"""
Clips gradients by norm first, then applies gradients.
"""
# Unzip gradients and variables
gradients, variables = list(zip(*grads_and_vars))
# Clip gradients by global norm
(gradients, _) = mixed_precision_clip_by_global_norm(gradients, clip_norm=self._clip_norm)
# Apply gradients
return self._optimizer.apply_gradients(list(zip(gradients, variables)), global_step=global_step, name=name)
def variables(self):
"""
Forwards the variables from the underlying optimizer.
"""
return self._optimizer.variables()
def get_slot_names(self):
"""
Forwards the get_slot_names from the underlying optimizer.
"""
return self._optimizer.get_slot_names()
def get_slot(self, var, name):
"""
Forwards the get_slot from the underlying optimizer.
"""
return self._optimizer.get_slot(var, name)
def _zeros_slot(self, var, slot_name, op_name):
"""
Forwards the _zeros_slot from the underlying optimizer.
"""
return self._optimizer._zeros_slot(var, slot_name, op_name)
def get_optimizer(learning_rate, loss_scaling, num_replicas, opts):
"""Configure and return the optimizer"""
scale_down_grads_factor = loss_scaling
if opts['reduction_type'] == "mean":
scale_down_grads_factor *= opts['gradient_accumulation_count']
elif opts['reduction_type'] == "sum":
# The cross replica optimizer will normalise by the number
# of replicas. We need to undo this normalising by upscaling
# the gradients by the number of replicas.
scale_down_grads_factor /= num_replicas
scaled_learning_rate = learning_rate / scale_down_grads_factor
# When using replicated tensor sharding, do not use outlining
# in the optimizer
outline_optimizer_grad_fn = not opts["replicated_tensor_sharding"]
if opts['optimizer'].lower() == 'sgd':
optimizer = tf.train.GradientDescentOptimizer(scaled_learning_rate)
elif opts['optimizer'].lower() == 'momentum':
optimizer = tf.train.MomentumOptimizer(scaled_learning_rate,
momentum=opts['momentum'],
use_nesterov=False)
elif opts['optimizer'].lower() == 'adam':
optimizer = tf.train.AdamOptimizer(scaled_learning_rate,
beta1=opts["beta1"],
beta2=opts["beta2"],
epsilon=opts["epsilon"])
elif opts['optimizer'].lower() == 'adamw':
optimizer = AdamWeightDecayOptimizer(
learning_rate,
loss_scaling=scale_down_grads_factor,
beta_1=opts["beta1"],
beta_2=opts["beta2"],
weight_decay_rate=opts["weight_decay_rate"],
epsilon=opts["epsilon"],
debiasing=opts["use_debiasing"],
outline_grad_fn=outline_optimizer_grad_fn,
)
optimizer = GlobalNormClippingOptimizer(
optimizer, clip_norm=scale_down_grads_factor * num_replicas)
elif opts['optimizer'].lower() == 'mpadamw':
optimizer = MixedPrecisionAdamWeightDecayOptimizer(
learning_rate,
loss_scaling=scale_down_grads_factor,
beta_1=opts["beta1"],
beta_2=opts["beta2"],
weight_decay_rate=opts["weight_decay_rate"],
epsilon=opts["epsilon"],
debiasing=opts["use_debiasing"],
outline_grad_fn=outline_optimizer_grad_fn,
)
optimizer = GlobalNormClippingOptimizer(
optimizer, clip_norm=scale_down_grads_factor * num_replicas)
elif opts['optimizer'].lower() == 'lamb':
optimizer = LAMBOptimizer(
learning_rate,
loss_scaling=scale_down_grads_factor,
beta_1=opts["beta1"],
beta_2=opts["beta2"],
weight_decay_rate=opts["weight_decay_rate"],
high_precision=opts["increase_optimiser_precision"],
use_nvlamb=opts["use_nvlamb"],
epsilon=opts["epsilon"],
debiasing=opts["use_debiasing"],
exclude_from_layer_adaptation=["bias", "beta", "gamma"],
exclude_from_weight_decay=["bias", "beta"],
outline_grad_fn=outline_optimizer_grad_fn,
)
elif opts['optimizer'].lower() == 'custom':
tvars = tf.trainable_variables()
stage_weights = {}
# Net will spilt to 14 session to optimize(1 embbeding + 12 encorder layers + 1 loss)
for i in range(14):
stage_weights[i] = []
for num, weight_ in enumerate(tvars):
if "embeddings" in weight_.name:
stage_weights[0].append(weight_.name)
elif 'squad' in weight_.name:
stage_weights[13].append(weight_.name)
else:
pattern = r"layer_(\d+)"
num = re.findall(pattern, weight_.name)[0]
stage_weights[int(float(num) + 1)].append(weight_.name)
optimizer = StageMomentumOptimizer(learning_rate,
opts['momentum'],
tvars,
stage_weights=stage_weights)
else:
raise ValueError(f"Optimizer {opts['optimizer']} not recognised")
if num_replicas > 1:
optimizer = ipu.optimizers.cross_replica_optimizer.CrossReplicaOptimizer(
optimizer)
return optimizer
|
import unittest.mock as mock
from unittest.mock import Mock, MagicMock
import unittest
import cloudpickle
from queue import Empty as EmptyQueue
from mlagents.envs.subprocess_env_manager import (
SubprocessEnvManager,
EnvironmentResponse,
EnvironmentCommand,
worker,
StepResponse,
)
from mlagents.envs.base_unity_environment import BaseUnityEnvironment
def mock_env_factory(worker_id: int):
return mock.create_autospec(spec=BaseUnityEnvironment)
class MockEnvWorker:
def __init__(self, worker_id, resp=None):
self.worker_id = worker_id
self.process = None
self.conn = None
self.send = Mock()
self.recv = Mock(return_value=resp)
self.waiting = False
class SubprocessEnvManagerTest(unittest.TestCase):
def test_environments_are_created(self):
SubprocessEnvManager.create_worker = MagicMock()
env = SubprocessEnvManager(mock_env_factory, 2)
# Creates two processes
env.create_worker.assert_has_calls(
[
mock.call(0, env.step_queue, mock_env_factory),
mock.call(1, env.step_queue, mock_env_factory),
]
)
self.assertEqual(len(env.env_workers), 2)
def test_worker_step_resets_on_global_done(self):
env_mock = Mock()
env_mock.reset = Mock(return_value="reset_data")
env_mock.global_done = True
def mock_global_done_env_factory(worker_id: int):
return env_mock
mock_parent_connection = Mock()
mock_step_queue = Mock()
step_command = EnvironmentCommand("step", (None, None, None, None))
close_command = EnvironmentCommand("close")
mock_parent_connection.recv.side_effect = [step_command, close_command]
mock_parent_connection.send = Mock()
worker(
mock_parent_connection,
mock_step_queue,
cloudpickle.dumps(mock_global_done_env_factory),
0,
)
# recv called twice to get step and close command
self.assertEqual(mock_parent_connection.recv.call_count, 2)
expected_step_response = StepResponse(
all_brain_info="reset_data", timer_root=mock.ANY
)
# worker returns the data from the reset
mock_step_queue.put.assert_called_with(
EnvironmentResponse("step", 0, expected_step_response)
)
def test_reset_passes_reset_params(self):
SubprocessEnvManager.create_worker = lambda em, worker_id, step_queue, env_factory: MockEnvWorker(
worker_id, EnvironmentResponse("reset", worker_id, worker_id)
)
manager = SubprocessEnvManager(mock_env_factory, 1)
params = {"test": "params"}
manager.reset(params, False)
manager.env_workers[0].send.assert_called_with("reset", (params, False, None))
def test_reset_collects_results_from_all_envs(self):
SubprocessEnvManager.create_worker = lambda em, worker_id, step_queue, env_factory: MockEnvWorker(
worker_id, EnvironmentResponse("reset", worker_id, worker_id)
)
manager = SubprocessEnvManager(mock_env_factory, 4)
params = {"test": "params"}
res = manager.reset(params)
for i, env in enumerate(manager.env_workers):
env.send.assert_called_with("reset", (params, True, None))
env.recv.assert_called()
# Check that the "last steps" are set to the value returned for each step
self.assertEqual(
manager.env_workers[i].previous_step.current_all_brain_info, i
)
assert res == list(map(lambda ew: ew.previous_step, manager.env_workers))
def test_step_takes_steps_for_all_non_waiting_envs(self):
SubprocessEnvManager.create_worker = lambda em, worker_id, step_queue, env_factory: MockEnvWorker(
worker_id, EnvironmentResponse("step", worker_id, worker_id)
)
manager = SubprocessEnvManager(mock_env_factory, 3)
manager.step_queue = Mock()
manager.step_queue.get_nowait.side_effect = [
EnvironmentResponse("step", 0, StepResponse(0, None)),
EnvironmentResponse("step", 1, StepResponse(1, None)),
EmptyQueue(),
]
step_mock = Mock()
last_steps = [Mock(), Mock(), Mock()]
manager.env_workers[0].previous_step = last_steps[0]
manager.env_workers[1].previous_step = last_steps[1]
manager.env_workers[2].previous_step = last_steps[2]
manager.env_workers[2].waiting = True
manager._take_step = Mock(return_value=step_mock)
res = manager.step()
for i, env in enumerate(manager.env_workers):
if i < 2:
env.send.assert_called_with("step", step_mock)
manager.step_queue.get_nowait.assert_called()
# Check that the "last steps" are set to the value returned for each step
self.assertEqual(
manager.env_workers[i].previous_step.current_all_brain_info, i
)
self.assertEqual(
manager.env_workers[i].previous_step.previous_all_brain_info,
last_steps[i].current_all_brain_info,
)
assert res == [
manager.env_workers[0].previous_step,
manager.env_workers[1].previous_step,
]
|
import lvgl as lv
from audio import Player
# RESOURCES_ROOT = "S:/Users/liujuncheng/workspace/iot/esp32/solution/MicroPython/smart_panel/smart_panel/"
RESOURCES_ROOT = "S:/data/pyamp/"
functionImage = [
RESOURCES_ROOT + "images/prev.png",
RESOURCES_ROOT + "images/play.png",
RESOURCES_ROOT + "images/next.png",
RESOURCES_ROOT + "images/favorite.png"]
currentMusic = 0
musicData = [
{
"title":"Counting Stars",
"album":"OneRepublic",
"album_url": RESOURCES_ROOT + "images/album_one_republic.jpg",
"url":"file://data/pyamp/audio/test_long.mp3",
"duration":11,
"favorite": False
},
{
"title":"Aube",
"album":"Darius",
"album_url": RESOURCES_ROOT + "images/album_darius.jpg",
"url":"file://data/pyamp/audio/spring.mp3",
"duration":155,
"favorite": False
},
]
start = False
anim = None
playedTime = None
slider = None
anim_timeline = None
player = None
durationTime = 0
currentValue = 0
image = [None, None, None, None]
albumCover = None
songTitle = None
albumTitle = None
totalTime = None
music_alive = False
def music_back_click_callback(e, win):
global anim_timeline
global start
global player
global music_alive
start = False
# stop animation
if (anim_timeline != None):
lv.anim_timeline_stop(anim_timeline)
lv.anim_timeline_del(anim_timeline)
anim_timeline = None
if (player != None):
player.pause()
player.close()
player = None
# load smart panel desktop
if (music_alive):
from smart_panel import load_smart_panel
load_smart_panel()
music_alive = False
def music_back_press_callback(e, image):
image.set_zoom(280)
def music_back_release_callback(e, image):
image.set_zoom(250)
def setLabelValue(label, value):
global slider
global anim
global start
global anim_timeline
global durationTime
minute = value / 60
second = value % 60
# if (slider.is_dragged() == True):
# print("drag: %d" % value)
# start = False
#
# lv.anim_timeline_stop(anim_timeline)
# lv.anim_timeline_del(anim_timeline)
# anim_timeline = None
#
# slider.set_value(value, lv.ANIM.ON)
# anim.set_time((durationTime - currentValue) * 1000)
# anim.set_values(currentValue, durationTime)
# anim_timeline = lv.anim_timeline_create()
# lv.anim_timeline_add(anim_timeline, 0, anim)
label.set_text('%02d:%02d'%(minute, second))
def setSpentTime(slider, value):
global playedTime
global currentValue
global durationTime
global image
global start
global anim_timeline
global player
global albumCover
global songTitle
global albumTitle
global totalTime
global currentMusic
global musicData
if (value >= durationTime):
# currentMusic += 1
# if (len(musicData) == currentMusic):
# currentMusic = 0
start = False
reset_music()
else:
currentValue = value
setLabelValue(playedTime, value)
slider.set_value(value, lv.ANIM.ON)
def cb(data):
print(data)
def reset_music():
global albumCover
global songTitle
global albumTitle
global totalTime
global musicData
global currentMusic
global durationTime
global slider
global anim
global image
global start
global currentValue
global anim_timeline
global playedTime
global player
if (anim_timeline != None):
lv.anim_timeline_stop(anim_timeline)
lv.anim_timeline_del(anim_timeline)
anim_timeline = None
albumCover.set_src(musicData[currentMusic]["album_url"])
songTitle.set_text(musicData[currentMusic]["title"])
albumTitle.set_text(musicData[currentMusic]["album"])
durationTime = musicData[currentMusic]["duration"]
currentValue = 0
slider.set_range(0, durationTime)
slider.set_value(0, lv.ANIM.ON)
anim.set_time(durationTime * 1000)
anim.set_values(0, durationTime)
anim_timeline = lv.anim_timeline_create()
lv.anim_timeline_add(anim_timeline, 0, anim)
setLabelValue(totalTime, durationTime)
setLabelValue(playedTime, 0)
if (player != None):
player.pause()
player.close()
player = None
if (start == False):
image[1].set_src(RESOURCES_ROOT + "images/play.png")
else:
image[1].set_src(RESOURCES_ROOT + "images/pause.png")
lv.anim_timeline_start(anim_timeline)
player = Player()
player.open()
player.play(musicData[currentMusic]["url"], sync=False)
player.on(cb)
if (musicData[currentMusic]["favorite"] == False):
image[3].set_src(RESOURCES_ROOT + "images/favorite.png")
else:
image[3].set_src(RESOURCES_ROOT + "images/favorited.png")
def controller_click_cb(e, func):
global anim
global start
global anim_timeline
global durationTime
global player
global image
global currentValue
global musicData
global currentMusic
print(func, anim_timeline)
if (func == "play"):
if (start == False):
start = True
if (currentValue == durationTime):
currentValue = 0
anim.set_time((durationTime - currentValue) * 1000)
anim.set_values(currentValue, durationTime)
anim_timeline = lv.anim_timeline_create()
lv.anim_timeline_add(anim_timeline, 0, anim)
lv.anim_timeline_start(anim_timeline)
image[1].set_src(RESOURCES_ROOT + "images/pause.png")
if (player == None):
player = Player()
player.open()
player.play(musicData[currentMusic]["url"], sync=False)
player.on(cb)
else:
player.resume()
# state = player.getState()
# print(state)
# if (state == 2):
# player.resume()
# image[1].set_src(RESOURCES_ROOT + "images/pause.png")
# else:
# player.pause()
# image[1].set_src(RESOURCES_ROOT + "images/play.png")
else:
start = False
image[1].set_src(RESOURCES_ROOT + "images/play.png")
lv.anim_timeline_stop(anim_timeline)
lv.anim_timeline_del(anim_timeline)
anim_timeline = None
anim.set_time((durationTime - currentValue) * 1000)
anim.set_values(currentValue, durationTime)
anim_timeline = lv.anim_timeline_create()
lv.anim_timeline_add(anim_timeline, 0, anim)
player.pause()
elif (func == "fav"):
if (musicData[currentMusic]["favorite"] == False):
image[3].set_src(RESOURCES_ROOT + "images/favorited.png")
musicData[currentMusic]["favorite"] = True
else:
musicData[currentMusic]["favorite"] = False
image[3].set_src(RESOURCES_ROOT + "images/favorite.png")
elif (func == "next"):
currentMusic += 1
if (len(musicData) == currentMusic):
currentMusic = 0
reset_music()
elif (func == "prev"):
currentMusic -= 1
if (currentMusic < 0):
currentMusic = len(musicData) - 1
reset_music()
class Music:
def createPage(self):
global anim
global playedTime
global durationTime
global slider
global audio_src
global player
global image
global music_alive
global currentMusic
global albumCover
global songTitle
global albumTitle
global totalTime
global anim_timeline
global scr
print("Enter Music")
# init scr
scr = lv.obj()
win = lv.obj(scr)
win.set_size(scr.get_width(), scr.get_height())
win.set_style_border_opa(0, 0)
win.set_style_radius(0, 0)
win.set_style_bg_color(lv.color_black(), 0)
win.clear_flag(lv.obj.FLAG.SCROLLABLE)
backImg=lv.img(win)
backImg.set_src(RESOURCES_ROOT + "images/back.png")
backImg.set_style_align(lv.ALIGN.LEFT_MID, 0)
backImg.add_flag(lv.obj.FLAG.CLICKABLE)
backImg.add_event_cb(lambda e: music_back_click_callback(e, win), lv.EVENT.CLICKED, None)
backImg.add_event_cb(lambda e: music_back_press_callback(e, backImg), lv.EVENT.PRESSED, None)
backImg.add_event_cb(lambda e: music_back_release_callback(e, backImg), lv.EVENT.RELEASED, None)
backImg.set_ext_click_area(30)
albumCover = lv.img(win)
albumCover.set_style_pad_left(12, 0)
albumCover.set_style_pad_top(10, 0)
songTitle = lv.label(win)
songTitle.set_style_text_font(lv.font_montserrat_20, 0)
songTitle.set_style_text_color(lv.color_white(), 0)
songTitle.align_to(albumCover, lv.ALIGN.TOP_LEFT, 130, 3)
albumTitle = lv.label(win)
albumTitle.set_style_text_font(lv.font_montserrat_16, 0)
albumTitle.set_style_text_color(lv.color_make(0xCC, 0xCC, 0xCC), 0)
albumTitle.align_to(songTitle, lv.ALIGN.OUT_BOTTOM_LEFT, 0, 12)
props = [lv.STYLE.BG_COLOR, 0]
transition_dsc = lv.style_transition_dsc_t()
transition_dsc.init(props, lv.anim_t.path_linear, 300, 0, None)
style_main = lv.style_t()
style_indicator = lv.style_t()
style_pressed_color = lv.style_t()
style_main.init()
style_main.set_bg_opa(lv.OPA.COVER)
style_main.set_bg_color(lv.color_make(0x66, 0x66, 0x66))
style_main.set_radius(lv.RADIUS.CIRCLE)
style_main.set_line_dash_width(1)
style_indicator.init()
style_indicator.set_bg_opa(lv.OPA.COVER)
style_indicator.set_bg_color(lv.color_white())
style_indicator.set_radius(lv.RADIUS.CIRCLE)
style_indicator.set_transition(transition_dsc)
style_pressed_color.init()
style_pressed_color.set_bg_color(lv.color_white())
# Create a slider and add the style
slider = lv.slider(win)
slider.remove_style_all() # Remove the styles coming from the theme
slider.add_style(style_main, lv.PART.MAIN)
slider.add_style(style_indicator, lv.PART.INDICATOR)
slider.add_style(style_pressed_color, lv.PART.INDICATOR | lv.STATE.PRESSED)
slider.align_to(albumTitle, lv.ALIGN.OUT_BOTTOM_LEFT, 0, 25)
slider.set_size(140, 1)
anim = lv.anim_t()
anim.init()
anim.set_var(slider)
playedTime = lv.label(win)
setLabelValue(playedTime, 0)
playedTime.set_style_text_font(lv.font_montserrat_16, 0)
playedTime.set_style_text_color(lv.color_white(), 0)
playedTime.align_to(slider, lv.ALIGN.OUT_BOTTOM_LEFT, 0, 15)
totalTime = lv.label(win)
totalTime.set_style_text_font(lv.font_montserrat_16, 0)
totalTime.set_style_text_color(lv.color_white(), 0)
totalTime.align_to(slider, lv.ALIGN.OUT_BOTTOM_RIGHT, 0, 15)
func_col_dsc = [80, 80, 80, 80, lv.GRID_TEMPLATE.LAST]
func_row_dsc = [40, lv.GRID_TEMPLATE.LAST]
funcContainer = lv.obj(win)
funcContainer.set_style_bg_opa(0x00, 0)
funcContainer.set_style_border_opa(0x00, 0)
funcContainer.set_layout(lv.LAYOUT_GRID.value)
funcContainer.set_grid_dsc_array(func_col_dsc, func_row_dsc)
funcContainer.set_grid_align(lv.GRID_ALIGN.SPACE_BETWEEN, lv.GRID_ALIGN.SPACE_BETWEEN)
funcContainer.set_align(lv.ALIGN.BOTTOM_MID)
funcContainer.set_size(320, 70)
for i in range(4):
image[i] = lv.img(funcContainer)
image[i].set_src(functionImage[i])
image[i].add_flag(lv.obj.FLAG.CLICKABLE)
image[i].set_ext_click_area(20)
image[i].set_grid_cell(lv.GRID_ALIGN.CENTER, i, 1, lv.GRID_ALIGN.CENTER, 0, 1)
if (i == 0):
image[i].add_event_cb(lambda e: controller_click_cb(e, "prev"), lv.EVENT.CLICKED, None)
elif (i == 1):
image[i].add_event_cb(lambda e: controller_click_cb(e, "play"), lv.EVENT.CLICKED, None)
elif (i == 2):
image[i].add_event_cb(lambda e: controller_click_cb(e, "next"), lv.EVENT.CLICKED, None)
elif (i == 3):
image[i].add_event_cb(lambda e: controller_click_cb(e, "fav"), lv.EVENT.CLICKED, None)
anim.set_custom_exec_cb(lambda a1, val: setSpentTime(slider, val))
reset_music()
from smart_panel import needAnimation
if (needAnimation):
lv.scr_load_anim(scr, lv.SCR_LOAD_ANIM.MOVE_LEFT, 500, 0, True)
else:
lv.scr_load_anim(scr, lv.SCR_LOAD_ANIM.NONE, 0, 0, True)
music_alive = True
|
<reponame>TaaoWen/PSC_AgentBasedModellingCode<filename>main.py
# main.py
'''
Agent Based Modelling
This Python code is a used as the practicals (Agent Based Modelling) for the module "Programming for Social Science".
This is the main file, and Agents classes are stored in "agentframework.py".
More detail can be found in comments below.
@author: <NAME>
@Version: Final
'''
# =============================================================================
# # ####################################Pay attention (Read before simulation)####################################
# # Please use "Ctrl + 5" to remove the comments for code block surrounded by "==="
# # Please use "Ctrl + 4" to comment in code block surrounded by "==="
# # Please use "Ctrl + 1" to remove or add comments for multiple lines together
# # It will be very troublesome and may introduce ERRORS if adding or removing "#" manually line by line!
# # If there is any problem, please contact me via <EMAIL>
# # or read "http://e-callisto.org/cospar2018/SpyderKeyboardShortcutsEditor.pdf", the top of the 2nd page
# =============================================================================
import sys
import random
import operator
import time
import tkinter
import matplotlib
# Please uncomment next line to design GUI, but comment in the next line for other figures and outcomes
matplotlib.use('TkAgg')
import matplotlib.pyplot
import matplotlib.animation
import agentframework
import requests
import bs4
# Function: Write total amount stored by all the agents to txt file
def write_store_to_output(write_str):
"""
Write total amount stored by all the agents to "output_store.txt"
Note: The output does not contain the initial total amount
because it is 0 by default setting
Parameters
----------
write_str: str
str needs to be output in the .txt file
"""
# open the .txt file
with open("output_store.txt", "a+") as f:
f.write(write_str) # wirte the str
f.write("\n") # New line
# Function: Write environment to txt file
def write_environment_to_output(write_str):
"""
Write environment to "output_environment.txt"
The size of the matrix of the environment is the same as the matrix read
from "in.txt" because the code only changes the value of element and does
not change the size
Parameters
----------
write_str: str
str needs to be output in the .txt file
"""
# open the .txt file
with open("output_environment.txt", "a+") as f:
# Each line of the environment (**apart from** the last one in each line)
for j in range(len(write_str)-1):
f.write(str(write_str[j])) # Write each line **apart from** the last one
f.write(',') # wirte ","
f.write(str(write_str[-1])) # Write out the last value WITHOUT "," at the end of each line
f.write("\n") # New line
# Function: Obtain the distance between two agents based on the Euclidean Distance
def distance_between(agents_row_a, agents_row_b):
"""
Obtain the distance between two agents based on the Euclidean Distance
Parameters
----------
agents_row_a: agentframework.Agent
The framework of the first agent
agents_row_b: agentframework.Agent
The framework of the second agent
Returns
-------
distance_obtain: float
The distance between two agents based on Euclidean distance
"""
# Euclidean Distance
distance_obtain = (((agents_row_a.x - agents_row_b.x)**2) +
((agents_row_a.y - agents_row_b.y)**2))**0.5
return distance_obtain
# Function: Calculate the distance between each pair of agent based on function "calculate_distance_0"
def calculate_distance_0(agents):
"""
Obtain the timing to calculate the distance between each pair of nodes,
where agentA and agentB are both from 1 to end, and agentA != agentB
Parameters
----------
agents: list
The list of agents
Returns
-------
need_time: float
Timing needed to obtain the distance for all pair of agents based on this function
max_dis: float
The maximum distance between agents
min_dis: float
The minimum distance between agents
"""
# Initial setting for max and min distance
max_dis = distance_between(agents[0], agents[1])
min_dis = max_dis
start_time = time.time() # Time begin
# agentA and agentB are both from 1 to end
for i in range(0, num_of_agents, 1):
for j in range(0, num_of_agents, 1):
# agentA != agentB
if i != j:
# # Uncomment next line to print the distance between each pair of agents
# print("The distance between Agent", agents_row_a.ID, \
# "and Agent", agents_row_b.ID, \
# "is", distance_between(agents_row_a, agents_row_b))
# Update the max and min distance
max_dis = max(max_dis, distance_between(agents[i], agents[j]))
min_dis = min(min_dis, distance_between(agents[i], agents[j]))
end_time = time.time() # Time end
need_time = end_time - start_time # Time calculate
# # Uncomment next lines to print the max and min distances, as well as the timing
# print("Maximum distance is", max_dis, "and minimum distance is", min_dis)
# print("Running time is", need_time)
return need_time, max_dis, min_dis
# Function: Calculate the distance between each pair of agent based on function "calculate_distance_1"
def calculate_distance_1(agents):
"""
Obtain the timing to calculate the distance between each pair of nodes,
where agentA and agentB are both from 1 to end, but the distance is ONLY calculated
when agents_row_a.ID > agents_row_b.ID
Parameters
----------
agents: list
The list of agents
Returns
-------
need_time: float
Timing needed to obtain the distance for all pair of agents based on this function
max_dis: float
The maximum distance between agents
min_dis: float
The minimum distance between agents
"""
# Initial setting for max and min distance
max_dis = distance_between(agents[0], agents[1])
min_dis = max_dis
start_time = time.time() # Time begin
# agentA and agentB are both from 1 to end
for i in range(0, num_of_agents, 1):
for j in range(0, num_of_agents, 1):
# distance is ONLY calculated when agents_row_a.ID > agents_row_b.ID
if i > j:
# # Uncomment next line to print the distance between each pair of agents
# print("The distance between Agent", agents_row_a.ID, \
# "and Agent", agents_row_b.ID, \
# "is", distance_between(agents_row_a, agents_row_b))
# Update the max and min distance
max_dis = max(max_dis, distance_between(agents[i], agents[j]))
min_dis = min(min_dis, distance_between(agents[i], agents[j]))
end_time = time.time() # Time end
need_time = end_time - start_time # Time calculate
# # Uncomment next lines to print the max and min distances, as well as the timing
# print("Maximum distance is", max_dis, "and minimum distance is", min_dis)
# print("Running time is", need_time)
return need_time, max_dis, min_dis
# Function: Calculate the distance between each pair of agent based on function "calculate_distance_2"
def calculate_distance_2(agents):
"""
Obtain the timing to calculate the distance between each pair of nodes,
where agentA is from 1 to end, agentB is from agentA to end (NOT include agentA)
Parameters
----------
agents: list
The list of agents
Returns
-------
need_time: float
Timing needed to obtain the distance for all pair of agents based on this function
max_dis: float
The maximum distance between agents
min_dis: float
The minimum distance between agents
"""
# Initial setting for max and min distance
max_dis = distance_between(agents[0], agents[1])
min_dis = max_dis
start_time = time.time() # Time begin
# agentA is from 1 to end
for i in range(0, num_of_agents, 1):
# agentB is from agentA to end (NOT include agentA)
for j in range(i + 1, num_of_agents, 1):
# # Uncomment next line to print the distance between each pair of agents
# print("The distance between Agent", agents_row_a.ID, \
# "and Agent", agents_row_b.ID, \
# "is", distance_between(agents_row_a, agents_row_b))
# Update the max and min distance
max_dis = max(max_dis, distance_between(agents[i], agents[j]))
min_dis = min(min_dis, distance_between(agents[i], agents[j]))
end_time = time.time() # Time end
need_time = end_time - start_time # Time calculate
# # Uncomment next lines to print the max and min distances, as well as the timing
# print("Maximum distance is", max_dis, "and minimum distance is", min_dis)
# print("Running time is", need_time)
return need_time, max_dis, min_dis
# =============================================================================
# # =============================================================================
# # # =============================================================================
# # # # Code below is for initial setting
# # # =============================================================================
# # =============================================================================
# Environment and other setting
random.seed(0) # This line can be removed for randomization
num_of_iterations = 50 # Number of iterations
environment = [] # List of environment that will be read later
# Sheep setting, please note "sheep" and "agent" are the same meaning in this code
agents = [] # List of sheep (agent) that will be given later
num_of_agents = 10 # Number of sheep
neighbourhood = 20 # Sheep share store with neighbor sheep within this distance "neighbourhood"
times_for_move = 1.1 # Sheep move quickly if their store is "times_for_move" times the average storage
born_iteration_sheep = 5 # New sheep are born every "born_iteration_sheep" iterations
new_sheep_partion = 0.2 # The number of new sheep is "new_sheep_partion" of the number of alive sheep
# Wolves setting
wolves = [] # List of wolves that will be given later
num_of_wolves = 5 # Number of wolves
required_distance = 30 # Wolves eat sheep within this distance "required_distance"
unit_step_wovle = 5 # The unit that the wolf moves at each iteration in each direction
born_iteration_wolves = 10 # New wolves are born every "born_iteration_sheep" iterations
new_wolves_partion = 0.2 # The number of new wolves is "new_wolves_partion" of the number of alive wolves
wolves_dead_criterion = 5 # Wolves die when they eat "wolves_dead_criterion" sheep
# Please NOTE
# Living sheep are represented by blue points and dead sheep are represented by red points
# Living wolves are represented by black points and dead wolves are represented by yellow points
# =============================================================================
# # Uncomment next lines to read model parameters from the command line
# # The input will replace the value given above, so do not need to comment in lines above
# num_of_agents = int(sys.argv[1]) # Number of sheep
# num_of_iterations = int(sys.argv[2]) # Number of iterations
# neighbourhood = int(sys.argv[3]) # Sheep share store with neighbor sheep within this distance "neighbourhood"
# =============================================================================
# Read the environment from txt file
f = open("in.txt") # open the file
for line in f: # read all lines
parsed_line = str.split(line,",") # divide each line by ","
rowlist = [] # line initializing
for value in parsed_line: # read all numbers in one line
rowlist.append(float(value))
environment.append(rowlist)
f.close() # close the file
# =============================================================================
# # Uncomment next lines to visualize the environment without agents
# matplotlib.pyplot.xlim(0, len(environment[0])) # range of x axis
# matplotlib.pyplot.ylim(0, len(environment)) # range of y axis
# matplotlib.pyplot.imshow(environment) # show the figure
# matplotlib.pyplot.show()
# =============================================================================
# Make the wovles, the number is "num_of_wolves"
for i in range(num_of_wolves):
wolves.append(agentframework.Wolves(wolves,agents,environment,i))
# # Uncomment next line to print the initial position of all wovles
# print(wolves[i])
# Make the sheep, the number is "num_of_agents"
for i in range(num_of_agents):
# Position is randomly given by default, you can also input the position of agents
# by using "agents.append(agentframework.Agent(environment,agents,i,ypos,xpos))"
# where "ypos" and "xpos" are "float" that indicate the position of this agent
# More details can be found in the next code block
agents.append(agentframework.Agent(environment,agents,i))
# # Uncomment next line to print the initial position of all sheep
# print(agents[i])
# =============================================================================
# # Uncomment next lines to read the agent position from website
# # Please comment in "Make the sheep" code bloak above if you want to read position from website (this code block)
# r = requests.get('http://www.geog.leeds.ac.uk/courses/computing/practicals/python/agent-framework/part9/data.html')
# content = r.text
# soup = bs4.BeautifulSoup(content, 'html.parser')
# td_ys = soup.find_all(attrs={"class" : "y"})
# td_xs = soup.find_all(attrs={"class" : "x"})
#
# for i in range(num_of_agents):
# y = int(td_ys[i].text)
# x = int(td_xs[i].text)
# agents.append(agentframework.Agent(environment, agents, i, y, x))
# # # Uncomment next line to print the initial position of all agents
# # print(agents[i])
# =============================================================================
# =============================================================================
# # Uncomment next lines to test if "distance_between" function can work normally.
# print("The position of agent A is", agents[0]) # information of agent A
# print("The position of agent B is", agents[1]) # information of agent B
# print("The distance between agent A and B is", distance_between(agents[0],agents[1])) # distance between agent A and B
# =============================================================================
# =============================================================================
# # Uncomment next lines to find the agent with the largest x (furthest east)
# matplotlib.pyplot.xlim(0, len(environment[0])) # range of x axis
# matplotlib.pyplot.ylim(0, len(environment)) # range of y axis
# for i in range(num_of_agents): # all agents are given in black color
# matplotlib.pyplot.scatter(agents[i].x,agents[i].y, color = 'black')
# sorted_agents = sorted(agents, key = lambda a: a.x) # sort the agent based on x
# # agent with largest x is given by red color
# matplotlib.pyplot.scatter(sorted_agents[len(agents)-1].x,sorted_agents[len(agents)-1].y, color = 'red')
# matplotlib.pyplot.show()
# =============================================================================
# =============================================================================
# # Uncomment next lines to obtain the timings, the maximum distances,
# # and the minimum distances from three functions
# need_time0, max_dis0, min_dis0 = calculate_distance_0(agents)
# need_time1, max_dis1, min_dis1 = calculate_distance_1(agents)
# need_time2, max_dis2, min_dis2 = calculate_distance_2(agents)
# =============================================================================
# =============================================================================
# # Uncomment next lines to obtain the timings for three function under different number of agents
# num_of_agents_list = [10,20,50,100,200,500,1000,2000] # To test the timings for different number of agents
# # timing initializing
# running_time0 = []
# running_time1 = []
# running_time2 = []
# for num_of_agents in num_of_agents_list:
# # Print the current number of agents
# print("Now, the number of agents is", num_of_agents)
# agents = []
# # make the agents
# for i in range(num_of_agents):
# # Position is randomly given by default, you can input the position manually (refer to comments above)
# agents.append(agentframework.Agent(environment, agents, i))
# # # Uncomment next line to print the initial position of all agents
# # print(agents[i])
#
# # obtain the timings, the maximum distances, and the minimum distances from three functions
# need_time0, max_dis0, min_dis0 = calculate_distance_0(agents)
# running_time0.append(need_time0)
# need_time1, max_dis1, min_dis1 = calculate_distance_1(agents)
# running_time1.append(need_time1)
# need_time2, max_dis2, min_dis2 = calculate_distance_2(agents)
# running_time2.append(need_time2)
#
# # Calculate the maximum time it takes for any run, then set the axis limit
# max_time = max(running_time0)
# max_time = max(max_time, max(running_time1))
# max_time = max(max_time, max(running_time2))
# # Set the axis limits
# matplotlib.pyplot.ylim(0, 1.1 * max(num_of_agents_list))
# matplotlib.pyplot.xlim(0, 1.1 * max_time)
# # visualize the timings obtained from different functions
# for i in range(len(num_of_agents_list)):
# # Please note the color for each function
# matplotlib.pyplot.scatter(running_time0[i],num_of_agents_list[i], color="red")
# matplotlib.pyplot.scatter(running_time1[i],num_of_agents_list[i], color="black")
# matplotlib.pyplot.scatter(running_time2[i],num_of_agents_list[i], color="green")
# # name of label and legend
# matplotlib.pyplot.xlabel("Timing")
# matplotlib.pyplot.ylabel("Number of agents")
# matplotlib.pyplot.legend(["Function0","Function1","Function2"])
# matplotlib.pyplot.show()
# =============================================================================
# =============================================================================
# # Uncomment next lines to test if each agent has the information of other agents.
# print("This is the original information from Agent 1:", agents[1])
# print("This is the information of Agent 1 from Agent 0:", agents[0].agents[1])
# =============================================================================
# =============================================================================
# # =============================================================================
# # # =============================================================================
# # # # Code above is for initial setting
# # # =============================================================================
# # =============================================================================
# =============================================================================
# # =============================================================================
# # # Code below is for Basic figures
# # =============================================================================
# =============================================================================
# =============================================================================
# for j in range(num_of_iterations): # each iteration
# # # Uncomment next line to randomise the order of agents if you want
# # # and you can also uncomment the code block below to obtain the normal order
# # random.shuffle(agents)
#
# # Obtain the average store of all agents before actions
# store_total = 0
# for i in range(num_of_agents):
# store_total += agents[i].store
# store_average = store_total/num_of_agents
# # # Uncomment next line to print the average store of all agents in this step
# # print("Average store for step", j, "is", store_average)
#
# # # Uncomment next line to print the step of the movement
# # print("It is", j, "step")
#
# # Action of the sheep
# for i in range(num_of_agents): # each sheep
# if agents[i].state == 1: # Only living sheep can move, eat, and share
# # # Uncomment next line to print the position of agent before moving
# # print("Before moving",agents[i])
# agents[i].move(times_for_move,store_average) # move
# # # Uncomment next line to print the position of agent after moving
# # print("After moving",agents[i])
#
# agents[i].eat() # sheep eat the environment, they will not leave negative values and sick up their store
# agents[i].share_with_neighbours(neighbourhood) # Share the stores with neighbour agents within the distance
#
# # # Uncomment next line to print the position of each agent in each step
# # print(agents[i])
#
# # Action of the wolves
# for i in range(num_of_wolves): # each wolf
# # If eat more than 'wolves_dead_criterion' sheep, this wolf will die
# if wolves[i].eatSheep >= wolves_dead_criterion:
# wolves[i].state = 0 # die
#
# # living wolf eats and moves
# if wolves[i].state == 1:
# wolves[i].move(unit_step_wovle) # move
# wolves[i].find_eat(required_distance) # eat sheep within the distance
#
# # # Uncomment next line to check the state of sheep
# # for i in range(num_of_agents):
# # print("The state for sheep " + str(agents[i].ID) \
# # + " is " + str(agents[i].state))
#
# # New sheep born
# if (j + 1) % born_iteration_sheep == 0: # identify the step that is suitable to born
# # Measure the number of living sheep
# alive_number = 0 # initializing
# for i in range(num_of_agents):
# if agents[i].state == 1:
# alive_number += 1
# # add_number is the new sheep born from the living sheep (rounding)
# add_number = round(new_sheep_partion * alive_number)
# # Current (new) number of sheep
# new_num_of_agents = num_of_agents + add_number
# # make the position of the new sheep (from "num_of_agents" to "new_num_of_agents")
# for i in range(num_of_agents,new_num_of_agents,1):
# agents.append(agentframework.Agent(environment,agents,i))
# # Update the number of sheep
# num_of_agents = new_num_of_agents
# # print("Current total number of sheep is",num_of_agents)
#
# # New wolves born
# if (j + 1) % born_iteration_wolves == 0: # identify the step that is suitable to born
# # Measure the number of living wolves
# alive_number = 0 # initializing
# for i in range(num_of_wolves):
# if wolves[i].state == 1:
# alive_number += 1
# # add_number is the new wolves born from the living wolves (rounding)
# add_number = round(new_wolves_partion * alive_number)
# # Current (new) number of wolves
# new_num_of_wolves = num_of_wolves + add_number
# # make the position of the new wolves (from "num_of_wolves" to "new_num_of_wolves")
# for i in range(num_of_wolves,new_num_of_wolves,1):
# wolves.append(agentframework.Wolves(wolves,agents,environment,i))
# # Update the number of wolves
# num_of_wolves = new_num_of_wolves
# # print("Current total number of wolves is",num_of_wolves)
#
# # =============================================================================
# # # Uncomment next lines to output total amount stored by all the agents to txt file
# # # The output does not contain the initial total amount because it is 0 by default setting
# # totalStored = 0 # initializing
# # for i in range(num_of_agents):
# # totalStored += agents[i].store
# # # write the str to txt file
# # write_store_to_output("After the movement and eating of step " + str(j) + \
# # ", and the total amount stored by all the agents is " + str(totalStored))
# # =============================================================================
#
# # Uncomment next lines to display environment and agent
# matplotlib.pyplot.xlim(0, len(environment[0]))
# matplotlib.pyplot.ylim(0, len(environment))
# matplotlib.pyplot.imshow(environment)
# #print("Final states")
# for i in range(num_of_agents): # visualize the sheep
# # # Uncomment next lines to print the state for all sheep at the end.
# # print("The state for sheep", agents[i].ID, "is", agents[i].state)
#
# # Living sheep are represented by blue points and dead sheep are represented by red points
# if agents[i].state == 1: # Living sheep
# matplotlib.pyplot.scatter(agents[i].x,agents[i].y, color = 'blue')
# else: # Dead sheep
# matplotlib.pyplot.scatter(agents[i].x,agents[i].y, color = 'red')
#
# for i in range(num_of_wolves): # visualize the wolves
# # # Uncomment next lines to print the state for all wolves at the end.
# # print("Wolf", wolves[i].ID, "eated total", wolves[i].eatSheep, "sheep")
#
# # Living wolves are represented by black points and dead wolves are represented by yellow points
# if wolves[i].state == 1: # Living wolves
# matplotlib.pyplot.scatter(wolves[i].x,wolves[i].y, color = 'black')
# else: # Dead wolves
# matplotlib.pyplot.scatter(wolves[i].x,wolves[i].y, color = 'yellow')
# matplotlib.pyplot.show()
#
# # =============================================================================
# # # Uncomment next lines to write out the environment as .txt file
# # for i in range(len(environment)):
# # write_environment_to_output(environment[i])
# # =============================================================================
#
# # =============================================================================
# # # Uncomment next lines to obtain the normal order if using 'shuffle' function above
# # print("Random order")
# # for i in range(num_of_agents): # print the agents in random order
# # print(agents[i])
# # sorted_agents = sorted(agents, key = lambda a: a.ID) # sort based on ID
# # print("Normal order")
# # for i in range(num_of_agents): # print the agents in normal order
# # print(sorted_agents[i])
# # =============================================================================
# =============================================================================
# =============================================================================
# # =============================================================================
# # # Code above is for Basic figures
# # =============================================================================
# =============================================================================
# =============================================================================
# # =============================================================================
# # # Code below is for Animation
# # =============================================================================
# =============================================================================
# =============================================================================
# # Figure initializing
# fig = matplotlib.pyplot.figure(figsize=(7, 7))
# ax = fig.add_axes([0, 0, 1, 1])
#
# # Parameters initializing
# carry_on = True # stop or not
# jIteration = 0 # iteration indicator
#
# # update (main) function for Animation
# def update(frame_number):
#
# fig.clear()
# # Parameter globalization
# global jIteration
# global num_of_agents
# global num_of_wolves
# global carry_on
#
# # Plot the environment before agents
# matplotlib.pyplot.xlim(0, len(environment[0]))
# matplotlib.pyplot.ylim(0, len(environment))
# matplotlib.pyplot.imshow(environment)
#
# # Obtain the average store of all agents before actions
# store_total = 0
# for i in range(num_of_agents):
# store_total += agents[i].store
# store_average = store_total/num_of_agents
#
# # Action of the sheep
# for i in range(num_of_agents): # each sheep
# if agents[i].state == 1: # Only living sheep can move, eat, and share
# agents[i].move(times_for_move,store_average) # move
# agents[i].eat() # sheep eat the environment, they will not leave negative values and sick up their store
# agents[i].share_with_neighbours(neighbourhood) # Share the stores with neighbour agents within the distance
#
# # Action of the wolves
# for i in range(num_of_wolves): # each wolf
# # If eat more than 'wolves_dead_criterion' sheep, this wolf will die
# if wolves[i].eatSheep >= wolves_dead_criterion:
# wolves[i].state = 0 # die
#
# # Wolf eats and moves
# if wolves[i].state == 1: # living wolves
# wolves[i].move(unit_step_wovle) # move
# wolves[i].find_eat(required_distance) # eat sheep within the distance
#
# # New sheep born
# if (jIteration + 1) % born_iteration_sheep == 0: # identify the step that is suitable to born
# # Measure the number of living sheep
# alive_number = 0 # initializing
# for i in range(num_of_agents):
# if agents[i].state == 1:
# alive_number += 1
# # add_number is the new sheep born from the living sheep (rounding)
# add_number = round(new_sheep_partion * alive_number)
# # Current (new) number of sheep
# new_num_of_agents = num_of_agents + add_number
# # make the position of the new sheep (from "num_of_agents" to "new_num_of_agents")
# for i in range(num_of_agents,new_num_of_agents,1):
# agents.append(agentframework.Agent(environment,agents,i))
# # Update the number of sheep
# num_of_agents = new_num_of_agents
# # print("Current total number of sheep is",num_of_agents)
#
# # New wolves born
# if (jIteration + 1) % born_iteration_wolves == 0:
# # Measure the number of living wolves
# alive_number = 0 # initializing
# for i in range(num_of_wolves):
# if wolves[i].state == 1:
# alive_number += 1
# # add_number is the new wolves born from the living wolves (rounding)
# add_number = round(new_wolves_partion * alive_number)
# # Current (new) number of wolves
# new_num_of_wolves = num_of_wolves + add_number
# # make the position of the new wolves (from "num_of_wolves" to "new_num_of_wolves")
# for i in range(num_of_wolves,new_num_of_wolves,1):
# wolves.append(agentframework.Wolves(wolves,agents,environment,i))
# # Update the number of wolves
# num_of_wolves = new_num_of_wolves
# # print("Current total number of wolves is",num_of_wolves)
#
# jIteration += 1 # iteration + 1 manually
#
# # Stop condiction based on a random number
# if random.random() < 0.001:
# carry_on = False # stop indicator
# print("stopping condition")
#
# # Plot the sheep and wolves in this iteration
# for i in range(num_of_agents): # visualize the sheep
# # Living sheep are represented by blue points and dead sheep are represented by red points
# if agents[i].state == 1: # Living sheep
# matplotlib.pyplot.scatter(agents[i].x,agents[i].y, color = 'blue')
# else: # Dead sheep
# matplotlib.pyplot.scatter(agents[i].x,agents[i].y, color = 'red')
# for i in range(num_of_wolves): # visualize the wolves
# # Living wolves are represented by black points and dead wolves are represented by yellow points
# if wolves[i].state == 1: # Living wolves
# matplotlib.pyplot.scatter(wolves[i].x,wolves[i].y, color = 'black')
# else: # Dead wolves
# matplotlib.pyplot.scatter(wolves[i].x,wolves[i].y, color = 'yellow')
#
#
# # Stop condition function: (1) Step number (2) Random number
# def gen_function(b = [0]):
# a = 0
# global carry_on #Not actually needed as we're not assigning, but clearer
# while (a < num_of_iterations) & (carry_on) : # two stop conditions
# yield a # Returns control and waits next call.
# a = a + 1
#
# # Animation
# animation = matplotlib.animation.FuncAnimation(fig, update, frames=gen_function, repeat=False)
#
# matplotlib.pyplot.show()
# =============================================================================
# =============================================================================
# # =============================================================================
# # # =============================================================================
# # # # Code above is for Animation
# # # =============================================================================
# # =============================================================================
# =============================================================================
# # =============================================================================
# # # Code below is for GUI Setting
# # =============================================================================
# =============================================================================
# Define the run function
def run():
animation = matplotlib.animation.FuncAnimation(fig, update, frames=gen_function, repeat=False)
canvas.draw()
# Figure initializing
fig = matplotlib.pyplot.figure(figsize=(7, 7))
ax = fig.add_axes([0, 0, 1, 1])
# GUI design setting
root = tkinter.Tk()
root.wm_title("Model")
canvas = matplotlib.backends.backend_tkagg.FigureCanvasTkAgg(fig, master=root)
canvas._tkcanvas.pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)
menu = tkinter.Menu(root)
root.config(menu=menu)
model_menu = tkinter.Menu(menu)
menu.add_cascade(label="Model", menu=model_menu)
model_menu.add_command(label="Run model", command=run)
# Parameters initializing
carry_on = True # stop or not
jIteration = 0 # iteration indicator
# update (main) function for Animation
def update(frame_number):
fig.clear()
# Parameter globalization
global jIteration
global num_of_agents
global num_of_wolves
global carry_on
# Plot the environment before agents
matplotlib.pyplot.xlim(0, len(environment[0]))
matplotlib.pyplot.ylim(0, len(environment))
matplotlib.pyplot.imshow(environment)
# Obtain the average store of all agents before actions
store_total = 0
for i in range(num_of_agents):
store_total += agents[i].store
store_average = store_total/num_of_agents
# Action of the sheep
for i in range(num_of_agents): # each sheep
if agents[i].state == 1: # Only living sheep can move, eat, and share
agents[i].move(times_for_move,store_average) # move
agents[i].eat() # sheep eat the environment, they will not leave negative values and sick up their store
agents[i].share_with_neighbours(neighbourhood) # Share the stores with neighbour agents within the distance
# Action of the wolves
for i in range(num_of_wolves): # each wolf
# If eat more than 'wolves_dead_criterion' sheep, this wolf will die
if wolves[i].eatSheep >= wolves_dead_criterion:
wolves[i].state = 0 # die
# Wolf eats and moves
if wolves[i].state == 1: # living wolves
wolves[i].move(unit_step_wovle) # move
wolves[i].find_eat(required_distance) # eat sheep within the distance
# New sheep born
if (jIteration + 1) % born_iteration_sheep == 0: # identify the step that is suitable to born
# Measure the number of living sheep
alive_number = 0 # initializing
for i in range(num_of_agents):
if agents[i].state == 1:
alive_number += 1
# add_number is the new sheep born from the living sheep (rounding)
add_number = round(new_sheep_partion * alive_number)
# Current (new) number of sheep
new_num_of_agents = num_of_agents + add_number
# make the position of the new sheep (from "num_of_agents" to "new_num_of_agents")
for i in range(num_of_agents,new_num_of_agents,1):
agents.append(agentframework.Agent(environment,agents,i))
# Update the number of sheep
num_of_agents = new_num_of_agents
# print("Current total number of sheep is",num_of_agents)
# New wolves born
if (jIteration + 1) % born_iteration_wolves == 0:
# Measure the number of living wolves
alive_number = 0
for i in range(num_of_wolves):
if wolves[i].state == 1:
alive_number += 1
# add_number is the new wolves born from the living wolves (rounding)
add_number = round(new_wolves_partion * alive_number)
# Current (new) number of wolves
new_num_of_wolves = num_of_wolves + add_number
# make the position of the new wolves (from "num_of_wolves" to "new_num_of_wolves")
for i in range(num_of_wolves,new_num_of_wolves,1):
wolves.append(agentframework.Wolves(wolves,agents,environment,i))
# Update the number of wolves
num_of_wolves = new_num_of_wolves
# print("Current total number of wolves is",num_of_wolves)
jIteration += 1 # iteration + 1 manually
# Stop condiction based on a random number
if random.random() < 0.001:
carry_on = False # stop indicator
print("stopping condition")
# Plot the sheep and wolves in this iteration
for i in range(num_of_agents): # visualize the sheep
# Living sheep are represented by blue points and dead sheep are represented by red points
if agents[i].state == 1: # Living sheep
matplotlib.pyplot.scatter(agents[i].x,agents[i].y, color = 'blue')
else: # Dead sheep
matplotlib.pyplot.scatter(agents[i].x,agents[i].y, color = 'red')
for i in range(num_of_wolves): # visualize the wolves
# Living wolves are represented by black points and dead wolves are represented by yellow points
if wolves[i].state == 1: # Living wolves
matplotlib.pyplot.scatter(wolves[i].x,wolves[i].y, color = 'black')
else: # Dead wolves
matplotlib.pyplot.scatter(wolves[i].x,wolves[i].y, color = 'yellow')
# Stop condition function: (1) Step number (2) Random number
def gen_function(b = [0]):
a = 0
global carry_on #Not actually needed as we're not assigning, but clearer
while (a < num_of_iterations) & (carry_on) : # two stop conditions
yield a # Returns control and waits next call.
a = a + 1
tkinter.mainloop()
# =============================================================================
# # =============================================================================
# # # =============================================================================
# # # # Code above is for GUI Setting
# # # =============================================================================
# # =============================================================================
|
from heartbeat import BaseTest
import urllib2
import json
import nose.tools
import os
from nose.plugins.skip import SkipTest
class Test(BaseTest):
def __init__(self, *args):
self.proc = None
super(Test, self).__init__(*args)
def test_telemetry(self):
"""
Test that telemetry metrics are correctly registered and increment / decrement
"""
# This test is flaky https://github.com/elastic/beats/issues/8966
raise SkipTest
if os.name == "nt":
# This test is currently skipped on windows because file permission
# configuration isn't implemented on Windows yet
raise SkipTest
server = self.start_server("hello world", 200)
try:
self.setup_dynamic(["-E", "http.enabled=true"])
cfg_file = "test.yml"
self.write_dyn_config(
cfg_file, self.http_cfg(
"http://localhost:{}".format(server.server_port))
)
self.wait_until(lambda: self.output_has(lines=1))
self.assert_stats({
"http": {
"monitor_starts": 1,
"monitor_stops": 0,
"endpoint_starts": 1,
"endpoint_stops": 0,
}
})
self.assert_state({
"http": {
"monitors": 1,
"endpoints": 1,
}
})
tcp_hosts = ["localhost:123", "localhost:456"]
self.write_dyn_config(
cfg_file, self.tcp_cfg(*tcp_hosts)
)
for tcp_host in tcp_hosts:
self.wait_until(lambda: self.log_contains(
"Start job 'tcp-tcp@{}".format(tcp_host)))
init_lines = self.output_lines()
self.wait_until(lambda: self.output_has(lines=init_lines+2))
self.assert_stats({
"http": {
"monitor_starts": 1,
"monitor_stops": 1,
"endpoint_starts": 1,
"endpoint_stops": 1,
},
"tcp": {
"monitor_starts": 1,
"monitor_stops": 0,
"endpoint_starts": 2,
"endpoint_stops": 0,
}
})
self.assert_state({
"tcp": {
"monitors": 1,
"endpoints": 2,
}
})
finally:
self.proc.check_kill_and_wait()
server.shutdown()
@staticmethod
def assert_state(expected={}):
stats = json.loads(urllib2.urlopen(
"http://localhost:5066/state").read())
total_monitors = 0
total_endpoints = 0
for proto in ("http", "tcp", "icmp"):
proto_expected = expected.get(proto, {})
monitors = proto_expected.get("monitors", 0)
endpoints = proto_expected.get("endpoints", 0)
total_monitors += monitors
total_endpoints += endpoints
nose.tools.assert_dict_equal(stats['heartbeat'][proto], {
'monitors': monitors,
'endpoints': endpoints,
})
nose.tools.assert_equal(stats['heartbeat']['monitors'], total_monitors)
nose.tools.assert_equal(
stats['heartbeat']['endpoints'], total_endpoints)
@staticmethod
def assert_stats(expected={}):
stats = json.loads(urllib2.urlopen(
"http://localhost:5066/stats").read())
for proto in ("http", "tcp", "icmp"):
proto_expected = expected.get(proto, {})
nose.tools.assert_dict_equal(stats['heartbeat'][proto], {
'monitor_starts': proto_expected.get("monitor_starts", 0),
'monitor_stops': proto_expected.get("monitor_stops", 0),
'endpoint_starts': proto_expected.get("endpoint_starts", 0),
'endpoint_stops': proto_expected.get("endpoint_stops", 0),
})
|
#!/usr/bin/env python
#coding=utf-8
#======================================================================
#Program: Diffusion Weighted MRI Reconstruction
#Module: $RCSfile: spherical_splines.py,v $
#Language: Python
#Author: $Author: bjian $
#Date: $Date: 2009/04/09 06:04:19 $
#Version: $Revision: 1.8 $
#=====================================================================
from numpy import arange, array, dot, ones, zeros, eye, r_, c_, linalg, loadtxt, max
from math import log, sqrt, fabs, pi
import os
def q(z):
"""Evaluate q(z) = \int_0^1 (1-h)^2/sqrt(1-2*h*z+h*h)} dh """
if z>=1.0: return 0.5
"""
<NAME>, Spline interpolation and smoothing on the sphere,
SIAM J. SCI. STAT. COMPUT., 2(1) 1981, pp. 5-16. [Equation (3.4)]
http://www.stat.wisc.edu/%7Ewahba/ftp1/oldie/sphspl.pdf
W = (1-z)/2.0
C = 2*sqrt(W)
A = log(1+1.0/sqrt(W))
return 0.5*(A*(12*W*W - 4*W) -6*C*W + 6*W + 1)
"""
"""
<NAME>, <NAME>, <NAME>,
Spline interpolation and smoothing on hyperspheres,
SIAM J. SCI. COMPUT., 15(5) 1994, pp. 1111-1125. [Table 1]
"""
try:
S = sqrt(2-2*z)
N = (1-z)*log(2-2*z)
L = (1-z)*log(sqrt(2/(1-z))+1)
return 0.5*(-L*(3*z-1)+3*S*(z-1)+4-3*z)
except:
return 0.5
def sanity_test():
"""A sanity test compared with numerical approximation. """
dh = .0001
hh = arange(dh/2.0,1,dh)
g = lambda z: sum([dh*(1-h)**2/sqrt(1-2*h*z+h*h) for h in hh])
print max([fabs(q(z)-g(z)) for z in arange(0,1,0.01)]) #8.33332181038e-010
# The reproducing kernel: see Wahba (3.3) and Taijeron et al. (45)
R = lambda z: (0.25*(q(z)+q(-z)) - 1/6.0)/(2*pi)
def assemble_kernel_matrix(x,v):
"""Assemble the kernel matrix from a given set of directions. """
#use R(fabs(dot(i,j))) in case angles are considered in the range (0,pi/2)
return array([R(dot(i,j)) for i in x for j in v]).reshape(len(x),len(v))
def test(gradient_file, signal_file, knots_file, _lambda = 0):
"""
Fit the spherical thin-plate spline model to descrete signal data.
Reference:
Ferreira et al. Directional Log-Spline Distributions,
Bayesian Analysis 3(2) 2008, pp. 297-316 [Eq.(3)]
In [331]: c = test('81vectors.txt','3fib.mhd')
2.92138710735e-013
In [332]: c = test('81vectors.txt','2fib.mhd')
2.37209920248e-013
In [333]: c = test('81vectors.txt','1fib.mhd')
3.90984974495e-013
"""
import mhd_utils
import flt_utils
g = loadtxt(gradient_file) #diffusion-mri/Python/data/81vectors.txt
v = loadtxt(knots_file)
#vv = r_[v,-v]
#gg = r_[g,-g]
_R = assemble_kernel_matrix(g, v)
#_one = ones([81,1])
#_eye = eye(81)
#_R = assemble_kernel_matrix(gg, vv)
_one_column = ones([len(g),1])
_one_row = ones([1,len(v)])
#_eye = eye(81*2)
#A = r_[c_[_one,_R + _lambda * _eye], c_[0, _one.T]]
A = r_[c_[_one_column,_R], c_[0, _one_row]]
basename = os.path.splitext(signal_file)[1]
if basename == '.flt':
[dsize,s] = flt_utils.read_flt_file(signal_file)
elif basename == '.mhd':
[s,dsize] = mhd_utils.load_raw_data_with_mhd(signal_file)
else:
return
_zero_row = zeros([1,s.shape[1]])
c = dot(linalg.pinv(A),r_[s,_zero_row])
#return A,s,c,_R
#c = dot(linalg.pinv(A),r_[s,s,[[0]]])
print max(abs(c[0] + dot(_R,c[1::]) - s))
return A,c,s,g,_R
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.